diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
index 636c70e40e4..92d95bfe34c 100644
--- a/.github/CODEOWNERS
+++ b/.github/CODEOWNERS
@@ -1,51 +1,81 @@
-bootstrap.sh @deepthi
+* @deepthi
+bootstrap.sh @ajm188 @deepthi @frouioui @vmg
+go.mod @ajm188 @deepthi @harshit-gangal @mattlord @rohit-nayak-ps @systay @frouioui
+go.sum @ajm188 @deepthi @harshit-gangal @mattlord @rohit-nayak-ps @systay @frouioui
+/.github/ @deepthi @mattlord @rohit-nayak-ps @frouioui
+/.github/ISSUE_TEMPLATE/ @deepthi @frouioui @mattlord
+/.github/workflows/ @deepthi @frouioui @mattlord @rohit-nayak-ps
/config/mycnf/ @deepthi @shlomi-noach @mattlord
-/docker/ @derekperkins @dkhenry @mattlord
-/examples/compose @shlomi-noach
-/examples/demo @sougou @mattlord
-/examples/legacy_local @deepthi
-/examples/local @rohit-nayak-ps @frouioui @mattlord
-/examples/operator @GuptaManan100 @frouioui
-/examples/region_sharding @deepthi
+/doc/ @deepthi @frouioui @GuptaManan100 @rsajwani
+/docker/ @deepthi @derekperkins @dkhenry @mattlord @GuptaManan100 @frouioui
+/examples/compose @shlomi-noach @GuptaManan100 @frouioui
+/examples/demo @mattlord @rohit-nayak-ps
+/examples/local @rohit-nayak-ps @frouioui @mattlord @GuptaManan100
+/examples/operator @GuptaManan100 @frouioui @mattlord
+/examples/region_sharding @deepthi @mattlord
/java/ @harshit-gangal
/go/cache @vmg
-/go/cmd @ajm188
-/go/cmd/vtadmin @ajm188 @doeg @notfelineit
+/go/cmd @ajm188 @deepthi
+/go/cmd/vtadmin @ajm188 @notfelineit
/go/cmd/vtctldclient @ajm188 @notfelineit
-/go/internal/flag @ajm188
+/go/internal/flag @ajm188 @rohit-nayak-ps
/go/mysql @harshit-gangal @systay @mattlord
-/go/protoutil @ajm188
-/go/test/endtoend/onlineddl @shlomi-noach
-/go/test/endtoend/vtorc @deepthi @shlomi-noach @GuptaManan100
+/go/pools @deepthi @harshit-gangal
+/go/protoutil @ajm188 @deepthi
+/go/sqltypes @harshit-gangal @shlomi-noach @vmg
+/go/test/endtoend/onlineddl @rohit-nayak-ps @shlomi-noach
/go/test/endtoend/messaging @mattlord @rohit-nayak-ps @derekperkins
/go/test/endtoend/vtgate @harshit-gangal @systay @frouioui
-/go/vt/discovery @deepthi
-/go/vt/mysqlctl @deepthi @mattlord
-/go/vt/vtorc @deepthi @shlomi-noach @GuptaManan100
-/go/vt/proto/vtadmin @ajm188 @doeg @notfelineit
-/go/vt/schema @shlomi-noach
+/go/test/endtoend/vtorc @deepthi @shlomi-noach @GuptaManan100 @rsajwani
+/go/tools/ @frouioui @systay
+/go/vt/dbconnpool @harshit-gangal @mattlord
+/go/vt/discovery @deepthi @frouioui
+/go/vt/discovery/*tablet_picker* @rohit-nayak-ps @mattlord
+/go/vt/mysqlctl @deepthi @mattlord @rsajwani
+/go/vt/proto @deepthi @harshit-gangal @mattlord
+/go/vt/proto/vtadmin @ajm188 @notfelineit
+/go/vt/schema @mattlord @shlomi-noach
/go/vt/servenv @deepthi @ajm188
/go/vt/sqlparser @harshit-gangal @systay @GuptaManan100
-/go/vt/srvtopo @rafael
-/go/vt/topo @deepthi @rafael
-/go/vt/vtadmin @ajm188 @doeg @notfelineit @rohit-nayak-ps
-/go/vt/vtctl @deepthi
-/go/vt/vtctl/vtctl.go @ajm188 @notfelineit
+/go/vt/srvtopo @deepthi @mattlord
+/go/vt/sysvars @harshit-gangal @systay
+/go/vt/topo @deepthi @mattlord @rsajwani
+/go/vt/topotools @deepthi @mattlord @rsajwani
+/go/vt/vitessdriver @harshit-gangal
+/go/vt/vtadmin @ajm188 @notfelineit @rohit-nayak-ps
+/go/vt/vtctl @ajm188 @deepthi @rohit-nayak-ps
+/go/vt/vtctl/vtctl.go @notfelineit @rohit-nayak-ps
/go/vt/vtctl/grpcvtctldclient @ajm188 @notfelineit
/go/vt/vtctl/grpcvtctldserver @ajm188 @notfelineit
+/go/vt/vtctl/reparentutil @ajm188 @GuptaManan100 @deepthi
/go/vt/vtctl/vtctldclient @ajm188 @notfelineit
/go/vt/vtctld @ajm188 @deepthi @notfelineit @rohit-nayak-ps
-/go/vt/vtexplain @systay
-/go/vt/vtgate @harshit-gangal @systay @frouioui
-/go/vt/vttablet/tabletmanager @deepthi @shlomi-noach
+/go/vt/vterrors @harshit-gangal @systay
+/go/vt/vtexplain @systay @harshit-gangal
+/go/vt/vtgate @harshit-gangal @systay @frouioui @GuptaManan100
+/go/vt/vtgate/endtoend/*vstream* @rohit-nayak-ps @mattlord
+/go/vt/vtgate/planbuilder @harshit-gangal @systay @frouioui @GuptaManan100 @arthurschreiber
+/go/vt/vtgate/*vstream* @rohit-nayak-ps @mattlord
+/go/vt/vtorc @deepthi @shlomi-noach @GuptaManan100 @rsajwani
+/go/vt/vttablet/*conn* @harshit-gangal @systay
+/go/vt/vttablet/endtoend @harshit-gangal @mattlord @rohit-nayak-ps @systay
+/go/vt/vttablet/grpc* @ajm188 @rohit-nayak-ps @rsajwani @shlomi-noach @harshit-gangal
+/go/vt/vttablet/onlineddl @mattlord @rohit-nayak-ps @shlomi-noach
+/go/vt/vttablet/queryservice @harshit-gangal @systay
+/go/vt/vttablet/tabletmanager @deepthi @GuptaManan100 @rohit-nayak-ps @rsajwani @shlomi-noach
/go/vt/vttablet/tabletmanager/vreplication @rohit-nayak-ps @mattlord
/go/vt/vttablet/tabletmanager/vstreamer @rohit-nayak-ps @mattlord
-/go/vt/vttablet/tabletserver @harshit-gangal @systay @shlomi-noach
+/go/vt/vttablet/tabletserver* @harshit-gangal @systay @shlomi-noach @rohit-nayak-ps
/go/vt/vttablet/tabletserver/messager @mattlord @rohit-nayak-ps @derekperkins
-/go/vt/wrangler @deepthi @rohit-nayak-ps @mattlord
-/go/vt/workflow @rohit-nayak-ps @mattlord
-/proto/vtadmin.proto @ajm188 @doeg @notfelineit
+/go/vt/vttablet/*tmclient* @ajm188 @GuptaManan100 @rohit-nayak-ps @rsajwani @shlomi-noach
+/go/vt/vttablet/vexec @mattlord @rohit-nayak-ps @shlomi-noach
+/go/vt/wrangler @deepthi @mattlord @rohit-nayak-ps
+/go/vt/workflow @mattlord @rohit-nayak-ps
+/proto/ @deepthi @harshit-gangal
+/proto/vtadmin.proto @ajm188 @notfelineit
/proto/vtctldata.proto @ajm188 @notfelineit
/proto/vtctlservice.proto @ajm188 @notfelineit
-/web/vtadmin @ajm188 @doeg @notfelineit
-/web/vtctld2 @notfelineit @rohit-nayak-ps
+/test/ @GuptaManan100 @frouioui @rohit-nayak-ps @deepthi @mattlord @harshit-gangal
+/tools/ @frouioui @rohit-nayak-ps
+/web/vtadmin @ajm188 @notfelineit
+/web/vtadmin/src/proto @deepthi @harshit-gangal @mattlord
diff --git a/.github/docker/cluster_test_vtorc/Dockerfile b/.github/docker/cluster_test_vtorc/Dockerfile
deleted file mode 100644
index 8042d96eb1a..00000000000
--- a/.github/docker/cluster_test_vtorc/Dockerfile
+++ /dev/null
@@ -1,34 +0,0 @@
-# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows"
-
-ARG bootstrap_version=11
-ARG image="vitess/bootstrap:${bootstrap_version}-mysql80"
-
-FROM "${image}"
-
-USER root
-
-# Re-copy sources from working tree
-RUN rm -rf /vt/src/vitess.io/vitess/*
-COPY . /vt/src/vitess.io/vitess
-
-# Set the working directory
-WORKDIR /vt/src/vitess.io/vitess
-
-# Fix permissions
-RUN chown -R vitess:vitess /vt
-
-USER vitess
-
-# Set environment variables
-ENV VTROOT /vt/src/vitess.io/vitess
-# Set the vtdataroot such that it uses the volume mount
-ENV VTDATAROOT /vt/vtdataroot
-
-# create the vtdataroot directory
-RUN mkdir -p $VTDATAROOT
-
-# install goimports
-RUN go install golang.org/x/tools/cmd/goimports@latest
-
-# sleep for 50 minutes
-CMD sleep 3000
diff --git a/.github/docker/cluster_test_vtorc_mysql57/Dockerfile b/.github/docker/cluster_test_vtorc_mysql57/Dockerfile
deleted file mode 100644
index 0497e4112d9..00000000000
--- a/.github/docker/cluster_test_vtorc_mysql57/Dockerfile
+++ /dev/null
@@ -1,34 +0,0 @@
-# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows"
-
-ARG bootstrap_version=11
-ARG image="vitess/bootstrap:${bootstrap_version}-mysql57"
-
-FROM "${image}"
-
-USER root
-
-# Re-copy sources from working tree
-RUN rm -rf /vt/src/vitess.io/vitess/*
-COPY . /vt/src/vitess.io/vitess
-
-# Set the working directory
-WORKDIR /vt/src/vitess.io/vitess
-
-# Fix permissions
-RUN chown -R vitess:vitess /vt
-
-USER vitess
-
-# Set environment variables
-ENV VTROOT /vt/src/vitess.io/vitess
-# Set the vtdataroot such that it uses the volume mount
-ENV VTDATAROOT /vt/vtdataroot
-
-# create the vtdataroot directory
-RUN mkdir -p $VTDATAROOT
-
-# install goimports
-RUN go install golang.org/x/tools/cmd/goimports@latest
-
-# sleep for 50 minutes
-CMD sleep 3000
diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md
index 8acc1f57f72..d32f01a6668 100644
--- a/.github/pull_request_template.md
+++ b/.github/pull_request_template.md
@@ -18,7 +18,7 @@
## Checklist
-- [ ] "Backport me!" label has been added if this change should be backported
+- [ ] "Backport to:" labels have been added if this change should be back-ported
- [ ] Tests were added or are not required
- [ ] Documentation was added or is not required
diff --git a/.github/workflows/assign_milestone.yml b/.github/workflows/assign_milestone.yml
new file mode 100644
index 00000000000..626d3ce14a2
--- /dev/null
+++ b/.github/workflows/assign_milestone.yml
@@ -0,0 +1,30 @@
+name: Assign Milestone
+
+on:
+ pull_request_target:
+ types: [opened]
+
+permissions: read-all
+
+env:
+ GH_TOKEN: ${{ github.token }}
+
+jobs:
+ build:
+ name: Assign Milestone
+ runs-on: ubuntu-22.04
+ permissions:
+ pull-requests: write
+
+ steps:
+ - name: Set up Go
+ uses: actions/setup-go@v3
+ with:
+ go-version: 1.20.8
+
+ - name: Checkout code
+ uses: actions/checkout@v3
+
+ - name: Assign Milestone
+ run: |
+ gh pr edit ${{ github.event.number }} --milestone "v$(sed -n 's/.*versionName.*\"\([[:digit:]\.]*\).*\"/\1/p' ./go/vt/servenv/version.go)"
diff --git a/.github/workflows/check_label.yml b/.github/workflows/check_label.yml
new file mode 100644
index 00000000000..81e093ba637
--- /dev/null
+++ b/.github/workflows/check_label.yml
@@ -0,0 +1,72 @@
+name: Check Pull Request labels
+on:
+ pull_request:
+ types: [opened, labeled, unlabeled, synchronize]
+
+concurrency:
+ group: format('{0}-{1}', ${{ github.ref }}, 'Check Pull Request labels')
+ cancel-in-progress: true
+
+jobs:
+ check_pull_request_labels:
+ name: Check Pull Request labels
+ timeout-minutes: 10
+ runs-on: ubuntu-22.04
+ if: github.repository == 'vitessio/vitess'
+ steps:
+ - name: Release Notes label
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'release notes (needs details)')}}" == "true" ]]; then
+ echo The "release notes (needs details)" label is set. The changes made in this Pull Request need to be documented in the release notes summary "('./changelog/16.0/16.0.1/summary.md')". Once documented, the "release notes (needs details)" label can be removed.
+ exit 1
+ fi
+
+ - name: Check type and component labels
+ env:
+ PR_NUMBER: ${{ github.event.pull_request.number }}
+ run: |
+ LABELS_JSON="/tmp/labels.json"
+ # Get labels for this pull request
+ curl -s \
+ -H 'authorization: Bearer ${{ secrets.GITHUB_TOKEN }}' \
+ -H "Accept: application/vnd.github.v3+json" \
+ -H "Content-type: application/json" \
+ "https://api.github.com/repos/${GITHUB_REPOSITORY}/issues/${PR_NUMBER}/labels" \
+ > "$LABELS_JSON"
+ if ! cat ${LABELS_JSON} | jq -r '.[].name ' | grep -q 'Component:' ; then
+ echo "Expecting PR to have label 'Component: ...'"
+ exit 1
+ fi
+ if ! cat ${LABELS_JSON} | jq -r '.[].name ' | grep -q 'Type:' ; then
+ echo "Expecting PR to have label 'Type: ...'"
+ exit 1
+ fi
+
+ - name: Check NeedsWebsiteDocsUpdate and NeedsDescriptionUpdate are off
+ env:
+ PR_NUMBER: ${{ github.event.pull_request.number }}
+ run: |
+ LABELS_JSON="/tmp/labels.json"
+ # Get labels for this pull request
+ curl -s \
+ -H 'authorization: Bearer ${{ secrets.GITHUB_TOKEN }}' \
+ -H "Accept: application/vnd.github.v3+json" \
+ -H "Content-type: application/json" \
+ "https://api.github.com/repos/${GITHUB_REPOSITORY}/issues/${PR_NUMBER}/labels" \
+ > "$LABELS_JSON"
+ if cat ${LABELS_JSON} | jq -r '.[].name ' | grep -q 'NeedsDescriptionUpdate' ; then
+ echo "Expecting PR to not have the NeedsDescriptionUpdate label, please update the PR's description and remove the label."
+ exit 1
+ fi
+ if cat ${LABELS_JSON} | jq -r '.[].name ' | grep -q 'NeedsWebsiteDocsUpdate' ; then
+ echo "Expecting PR to not have the NeedsWebsiteDocsUpdate label, please update the documentation and remove the label."
+ exit 1
+ fi
+
+
+ - name: Do Not Merge label
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Do Not Merge')}}" == "true" ]]; then
+ echo "This PR should not be merged. The 'Do Not Merge' label is set. Please unset it if you wish to merge this PR."
+ exit 1
+ fi
\ No newline at end of file
diff --git a/.github/workflows/check_make_vtadmin_authz_testgen.yml b/.github/workflows/check_make_vtadmin_authz_testgen.yml
index aebd68686bc..c000f3198b0 100644
--- a/.github/workflows/check_make_vtadmin_authz_testgen.yml
+++ b/.github/workflows/check_make_vtadmin_authz_testgen.yml
@@ -4,8 +4,15 @@ jobs:
build:
name: Check Make vtadmin_authz_testgen
- runs-on: ubuntu-latest
+ runs-on: ubuntu-22.04
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -14,11 +21,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -31,22 +38,22 @@ jobs:
- 'bootstrap.sh'
- 'tools/**'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'Makefile'
- 'go/vt/vtadmin/**'
- '.github/workflows/check_make_vtadmin_authz_testgen.yml'
- name: Set up Go
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.vtadmin_changes == 'true'
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.vtadmin_changes == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
-
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.vtadmin_changes == 'true'
diff --git a/.github/workflows/check_make_vtadmin_web_proto.yml b/.github/workflows/check_make_vtadmin_web_proto.yml
index 970ce9190c0..874707c67fd 100644
--- a/.github/workflows/check_make_vtadmin_web_proto.yml
+++ b/.github/workflows/check_make_vtadmin_web_proto.yml
@@ -4,8 +4,15 @@ jobs:
build:
name: Check Make VTAdmin Web Proto
- runs-on: ubuntu-latest
+ runs-on: ubuntu-22.04
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -14,11 +21,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -31,7 +38,8 @@ jobs:
- 'bootstrap.sh'
- 'tools/**'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'Makefile'
- 'go/vt/proto/**'
- 'proto/*.proto'
@@ -39,17 +47,17 @@ jobs:
- '.github/workflows/check_make_vtadmin_web_proto.yml'
- name: Set up Go
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.proto_changes == 'true'
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Setup Node
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.proto_changes == 'true'
- uses: actions/setup-node@v2
+ uses: actions/setup-node@v3
with:
# node-version should match package.json
- node-version: '16.13.0'
+ node-version: '16.19.0'
- name: Install npm dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.proto_changes == 'true'
diff --git a/.github/workflows/close_stale_pull_requests.yml b/.github/workflows/close_stale_pull_requests.yml
index 688bbbc35c7..971fcc39f06 100644
--- a/.github/workflows/close_stale_pull_requests.yml
+++ b/.github/workflows/close_stale_pull_requests.yml
@@ -10,7 +10,7 @@ permissions:
jobs:
close_stale_pull_requests:
- runs-on: ubuntu-latest
+ runs-on: ubuntu-22.04
steps:
- uses: actions/stale@v5
with:
diff --git a/.github/workflows/cluster_endtoend_12.yml b/.github/workflows/cluster_endtoend_12.yml
index c44c34d2d07..056ac512f72 100644
--- a/.github/workflows/cluster_endtoend_12.yml
+++ b/.github/workflows/cluster_endtoend_12.yml
@@ -14,9 +14,16 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (12)
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-22.04
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -25,11 +32,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +50,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,18 +60,20 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ # Limit local port range to not use ports that overlap with server side
+ # ports that we listen on.
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
# Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
sudo sysctl -p /etc/sysctl.conf
@@ -71,16 +81,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -91,7 +102,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
diff --git a/.github/workflows/cluster_endtoend_13.yml b/.github/workflows/cluster_endtoend_13.yml
index 97241444e19..e83444b95ef 100644
--- a/.github/workflows/cluster_endtoend_13.yml
+++ b/.github/workflows/cluster_endtoend_13.yml
@@ -14,9 +14,16 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (13)
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-22.04
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -25,11 +32,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +50,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,18 +60,20 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ # Limit local port range to not use ports that overlap with server side
+ # ports that we listen on.
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
# Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
sudo sysctl -p /etc/sysctl.conf
@@ -71,16 +81,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -91,7 +102,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
diff --git a/.github/workflows/cluster_endtoend_15.yml b/.github/workflows/cluster_endtoend_15.yml
index 3d8e22ea659..7e9d597df57 100644
--- a/.github/workflows/cluster_endtoend_15.yml
+++ b/.github/workflows/cluster_endtoend_15.yml
@@ -14,9 +14,16 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (15)
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-22.04
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -25,11 +32,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +50,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,18 +60,20 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ # Limit local port range to not use ports that overlap with server side
+ # ports that we listen on.
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
# Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
sudo sysctl -p /etc/sysctl.conf
@@ -71,16 +81,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -91,7 +102,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
diff --git a/.github/workflows/cluster_endtoend_18.yml b/.github/workflows/cluster_endtoend_18.yml
index 9913dbd59a2..aee8d491ae8 100644
--- a/.github/workflows/cluster_endtoend_18.yml
+++ b/.github/workflows/cluster_endtoend_18.yml
@@ -14,9 +14,16 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (18)
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-22.04
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -25,11 +32,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +50,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,18 +60,20 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ # Limit local port range to not use ports that overlap with server side
+ # ports that we listen on.
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
# Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
sudo sysctl -p /etc/sysctl.conf
@@ -71,16 +81,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -96,7 +107,7 @@ jobs:
make tools
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
diff --git a/.github/workflows/cluster_endtoend_21.yml b/.github/workflows/cluster_endtoend_21.yml
index cfa90240080..f99601db059 100644
--- a/.github/workflows/cluster_endtoend_21.yml
+++ b/.github/workflows/cluster_endtoend_21.yml
@@ -14,9 +14,16 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (21)
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-22.04
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -25,11 +32,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +50,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,18 +60,20 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ # Limit local port range to not use ports that overlap with server side
+ # ports that we listen on.
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
# Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
sudo sysctl -p /etc/sysctl.conf
@@ -71,16 +81,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -91,7 +102,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
diff --git a/.github/workflows/cluster_endtoend_22.yml b/.github/workflows/cluster_endtoend_22.yml
index add0cbc0a54..9e6b0777821 100644
--- a/.github/workflows/cluster_endtoend_22.yml
+++ b/.github/workflows/cluster_endtoend_22.yml
@@ -14,9 +14,16 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (22)
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-22.04
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -25,11 +32,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +50,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,18 +60,20 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ # Limit local port range to not use ports that overlap with server side
+ # ports that we listen on.
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
# Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
sudo sysctl -p /etc/sysctl.conf
@@ -71,16 +81,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -91,7 +102,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
diff --git a/.github/workflows/cluster_endtoend_26.yml b/.github/workflows/cluster_endtoend_26.yml
deleted file mode 100644
index ddaab899bfb..00000000000
--- a/.github/workflows/cluster_endtoend_26.yml
+++ /dev/null
@@ -1,126 +0,0 @@
-# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows"
-
-name: Cluster (26)
-on: [push, pull_request]
-concurrency:
- group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (26)')
- cancel-in-progress: true
-
-env:
- LAUNCHABLE_ORGANIZATION: "vitess"
- LAUNCHABLE_WORKSPACE: "vitess-app"
- GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}"
-
-jobs:
- build:
- name: Run endtoend tests on Cluster (26)
- runs-on: ubuntu-20.04
-
- steps:
- - name: Check if workflow needs to be skipped
- id: skip-workflow
- run: |
- skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
- echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
-
- - name: Check out code
- if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
-
- - name: Check for changes in relevant files
- if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: frouioui/paths-filter@main
- id: changes
- with:
- token: ''
- filters: |
- end_to_end:
- - 'go/**/*.go'
- - 'test.go'
- - 'Makefile'
- - 'build.env'
- - 'go.[sumod]'
- - 'proto/*.proto'
- - 'tools/**'
- - 'config/**'
- - 'bootstrap.sh'
- - '.github/workflows/cluster_endtoend_26.yml'
-
- - name: Set up Go
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
- with:
- go-version: 1.18.5
-
- - name: Set up python
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
-
- - name: Tune the OS
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
- # Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
- echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
- sudo sysctl -p /etc/sysctl.conf
-
- - name: Get dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
- sudo apt-get update
-
- # Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
- sudo service mysql stop
- sudo service etcd stop
- sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
- sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld
- go mod download
-
- # install JUnit report formatter
- go install github.com/vitessio/go-junit-report@HEAD
-
- - name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
- pip3 install --user launchable~=1.0 > /dev/null
-
- # verify that launchable setup is all correct.
- launchable verify || true
-
- # Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
-
- - name: Run cluster endtoend test
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 45
- run: |
- # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
- # which musn't be more than 107 characters long.
- export VTDATAROOT="/tmp/"
- source build.env
-
- set -x
-
- # run the tests however you normally do, then produce a JUnit XML file
- eatmydata -- go run test.go -docker=false -follow -shard 26 | tee -a output.txt | go-junit-report -set-exit-code > report.xml
-
- - name: Print test output and Record test result in launchable
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
- run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
-
- # print test output
- cat output.txt
diff --git a/.github/workflows/cluster_endtoend_onlineddl_singleton.yml b/.github/workflows/cluster_endtoend_backup_pitr.yml
similarity index 71%
rename from .github/workflows/cluster_endtoend_onlineddl_singleton.yml
rename to .github/workflows/cluster_endtoend_backup_pitr.yml
index 054fc039cdf..ada7ccbbd34 100644
--- a/.github/workflows/cluster_endtoend_onlineddl_singleton.yml
+++ b/.github/workflows/cluster_endtoend_backup_pitr.yml
@@ -1,9 +1,9 @@
# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows"
-name: Cluster (onlineddl_singleton)
+name: Cluster (backup_pitr)
on: [push, pull_request]
concurrency:
- group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (onlineddl_singleton)')
+ group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (backup_pitr)')
cancel-in-progress: true
env:
@@ -13,10 +13,17 @@ env:
jobs:
build:
- name: Run endtoend tests on Cluster (onlineddl_singleton)
- runs-on: ubuntu-20.04
+ name: Run endtoend tests on Cluster (backup_pitr)
+ runs-on: ubuntu-22.04
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -25,11 +32,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,27 +50,30 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
- 'bootstrap.sh'
- - '.github/workflows/cluster_endtoend_onlineddl_singleton.yml'
+ - '.github/workflows/cluster_endtoend_backup_pitr.yml'
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ # Limit local port range to not use ports that overlap with server side
+ # ports that we listen on.
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
# Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
sudo sysctl -p /etc/sysctl.conf
@@ -71,16 +81,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -91,7 +102,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -114,7 +125,7 @@ jobs:
set -x
# run the tests however you normally do, then produce a JUnit XML file
- eatmydata -- go run test.go -docker=false -follow -shard onlineddl_singleton | tee -a output.txt | go-junit-report -set-exit-code > report.xml
+ eatmydata -- go run test.go -docker=false -follow -shard backup_pitr | tee -a output.txt | go-junit-report -set-exit-code > report.xml
- name: Print test output and Record test result in launchable
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
diff --git a/.github/workflows/cluster_endtoend_onlineddl_declarative_mysql57.yml b/.github/workflows/cluster_endtoend_backup_pitr_mysql57.yml
similarity index 81%
rename from .github/workflows/cluster_endtoend_onlineddl_declarative_mysql57.yml
rename to .github/workflows/cluster_endtoend_backup_pitr_mysql57.yml
index 3c7fca00d06..530ad9eae39 100644
--- a/.github/workflows/cluster_endtoend_onlineddl_declarative_mysql57.yml
+++ b/.github/workflows/cluster_endtoend_backup_pitr_mysql57.yml
@@ -1,9 +1,9 @@
# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows"
-name: Cluster (onlineddl_declarative) mysql57
+name: Cluster (backup_pitr) mysql57
on: [push, pull_request]
concurrency:
- group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (onlineddl_declarative) mysql57')
+ group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (backup_pitr) mysql57')
cancel-in-progress: true
env:
@@ -13,10 +13,17 @@ env:
jobs:
build:
- name: Run endtoend tests on Cluster (onlineddl_declarative) mysql57
- runs-on: ubuntu-20.04
+ name: Run endtoend tests on Cluster (backup_pitr) mysql57
+ runs-on: ubuntu-22.04
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -25,11 +32,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,27 +50,28 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
- 'bootstrap.sh'
- - '.github/workflows/cluster_endtoend_onlineddl_declarative_mysql57.yml'
+ - '.github/workflows/cluster_endtoend_backup_pitr_mysql57.yml'
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
# Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
sudo sysctl -p /etc/sysctl.conf
@@ -88,14 +96,14 @@ jobs:
# Get key to latest MySQL repo
sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
- wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.14-1_all.deb
- # Bionic packages are still compatible for Focal since there's no MySQL 5.7
- # packages for Focal.
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
+ # Bionic packages are still compatible for Jammy since there's no MySQL 5.7
+ # packages for Jammy.
echo mysql-apt-config mysql-apt-config/repo-codename select bionic | sudo debconf-set-selections
echo mysql-apt-config mysql-apt-config/select-server select mysql-5.7 | sudo debconf-set-selections
sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
- sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-client=5.7* mysql-community-server=5.7* mysql-server=5.7*
+ sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-client=5.7* mysql-community-server=5.7* mysql-server=5.7* libncurses5
sudo apt-get install -y make unzip g++ etcd curl git wget eatmydata
sudo service mysql stop
@@ -105,7 +113,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -128,7 +136,7 @@ jobs:
set -x
# run the tests however you normally do, then produce a JUnit XML file
- eatmydata -- go run test.go -docker=false -follow -shard onlineddl_declarative | tee -a output.txt | go-junit-report -set-exit-code > report.xml
+ eatmydata -- go run test.go -docker=false -follow -shard backup_pitr | tee -a output.txt | go-junit-report -set-exit-code > report.xml
- name: Print test output and Record test result in launchable
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
diff --git a/.github/workflows/cluster_endtoend_ers_prs_newfeatures_heavy.yml b/.github/workflows/cluster_endtoend_ers_prs_newfeatures_heavy.yml
index 17ac8cac8c8..175f53b7bda 100644
--- a/.github/workflows/cluster_endtoend_ers_prs_newfeatures_heavy.yml
+++ b/.github/workflows/cluster_endtoend_ers_prs_newfeatures_heavy.yml
@@ -14,9 +14,16 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (ers_prs_newfeatures_heavy)
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-22.04
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -25,11 +32,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +50,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,18 +60,20 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ # Limit local port range to not use ports that overlap with server side
+ # ports that we listen on.
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
# Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
sudo sysctl -p /etc/sysctl.conf
@@ -71,16 +81,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -91,7 +102,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -113,8 +124,6 @@ jobs:
set -x
- # Increase our local ephemeral port range as we could exhaust this
- sudo sysctl -w net.ipv4.ip_local_port_range="22768 61999"
# Increase our open file descriptor limit as we could hit this
ulimit -n 65536
cat <<-EOF>>./config/mycnf/mysql80.cnf
diff --git a/.github/workflows/cluster_endtoend_mysql80.yml b/.github/workflows/cluster_endtoend_mysql80.yml
index 2d92a6d03d3..602c9ab999a 100644
--- a/.github/workflows/cluster_endtoend_mysql80.yml
+++ b/.github/workflows/cluster_endtoend_mysql80.yml
@@ -14,9 +14,16 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (mysql80)
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-22.04
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -25,11 +32,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +50,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,18 +60,20 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ # Limit local port range to not use ports that overlap with server side
+ # ports that we listen on.
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
# Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
sudo sysctl -p /etc/sysctl.conf
@@ -71,16 +81,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -91,7 +102,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
diff --git a/.github/workflows/cluster_endtoend_mysql_server_vault.yml b/.github/workflows/cluster_endtoend_mysql_server_vault.yml
index a8fef50ea6f..c00a972dd78 100644
--- a/.github/workflows/cluster_endtoend_mysql_server_vault.yml
+++ b/.github/workflows/cluster_endtoend_mysql_server_vault.yml
@@ -14,9 +14,16 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (mysql_server_vault)
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-22.04
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -25,11 +32,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +50,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,18 +60,20 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ # Limit local port range to not use ports that overlap with server side
+ # ports that we listen on.
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
# Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
sudo sysctl -p /etc/sysctl.conf
@@ -71,16 +81,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -96,7 +107,7 @@ jobs:
make tools
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
diff --git a/.github/workflows/cluster_endtoend_onlineddl_declarative.yml b/.github/workflows/cluster_endtoend_onlineddl_declarative.yml
deleted file mode 100644
index 94855bfb3a1..00000000000
--- a/.github/workflows/cluster_endtoend_onlineddl_declarative.yml
+++ /dev/null
@@ -1,126 +0,0 @@
-# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows"
-
-name: Cluster (onlineddl_declarative)
-on: [push, pull_request]
-concurrency:
- group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (onlineddl_declarative)')
- cancel-in-progress: true
-
-env:
- LAUNCHABLE_ORGANIZATION: "vitess"
- LAUNCHABLE_WORKSPACE: "vitess-app"
- GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}"
-
-jobs:
- build:
- name: Run endtoend tests on Cluster (onlineddl_declarative)
- runs-on: ubuntu-20.04
-
- steps:
- - name: Check if workflow needs to be skipped
- id: skip-workflow
- run: |
- skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
- echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
-
- - name: Check out code
- if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
-
- - name: Check for changes in relevant files
- if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: frouioui/paths-filter@main
- id: changes
- with:
- token: ''
- filters: |
- end_to_end:
- - 'go/**/*.go'
- - 'test.go'
- - 'Makefile'
- - 'build.env'
- - 'go.[sumod]'
- - 'proto/*.proto'
- - 'tools/**'
- - 'config/**'
- - 'bootstrap.sh'
- - '.github/workflows/cluster_endtoend_onlineddl_declarative.yml'
-
- - name: Set up Go
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
- with:
- go-version: 1.18.5
-
- - name: Set up python
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
-
- - name: Tune the OS
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
- # Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
- echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
- sudo sysctl -p /etc/sysctl.conf
-
- - name: Get dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
- sudo apt-get update
-
- # Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
- sudo service mysql stop
- sudo service etcd stop
- sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
- sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld
- go mod download
-
- # install JUnit report formatter
- go install github.com/vitessio/go-junit-report@HEAD
-
- - name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
- pip3 install --user launchable~=1.0 > /dev/null
-
- # verify that launchable setup is all correct.
- launchable verify || true
-
- # Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
-
- - name: Run cluster endtoend test
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 45
- run: |
- # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
- # which musn't be more than 107 characters long.
- export VTDATAROOT="/tmp/"
- source build.env
-
- set -x
-
- # run the tests however you normally do, then produce a JUnit XML file
- eatmydata -- go run test.go -docker=false -follow -shard onlineddl_declarative | tee -a output.txt | go-junit-report -set-exit-code > report.xml
-
- - name: Print test output and Record test result in launchable
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
- run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
-
- # print test output
- cat output.txt
diff --git a/.github/workflows/cluster_endtoend_onlineddl_ghost.yml b/.github/workflows/cluster_endtoend_onlineddl_ghost.yml
index 269db6e959d..4c72209f226 100644
--- a/.github/workflows/cluster_endtoend_onlineddl_ghost.yml
+++ b/.github/workflows/cluster_endtoend_onlineddl_ghost.yml
@@ -14,9 +14,16 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (onlineddl_ghost)
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-22.04
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -25,11 +32,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,27 +50,31 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
- 'bootstrap.sh'
- '.github/workflows/cluster_endtoend_onlineddl_ghost.yml'
+ - 'go/test/endtoend/onlineddl/vrepl_suite/testdata'
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ # Limit local port range to not use ports that overlap with server side
+ # ports that we listen on.
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
# Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
sudo sysctl -p /etc/sysctl.conf
@@ -71,16 +82,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -91,7 +103,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
diff --git a/.github/workflows/cluster_endtoend_onlineddl_ghost_mysql57.yml b/.github/workflows/cluster_endtoend_onlineddl_ghost_mysql57.yml
index c10dab28517..4cffa4caaef 100644
--- a/.github/workflows/cluster_endtoend_onlineddl_ghost_mysql57.yml
+++ b/.github/workflows/cluster_endtoend_onlineddl_ghost_mysql57.yml
@@ -14,9 +14,16 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (onlineddl_ghost) mysql57
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-22.04
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -25,11 +32,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,27 +50,29 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
- 'bootstrap.sh'
- '.github/workflows/cluster_endtoend_onlineddl_ghost_mysql57.yml'
+ - 'go/test/endtoend/onlineddl/vrepl_suite/testdata'
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
# Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
sudo sysctl -p /etc/sysctl.conf
@@ -88,14 +97,14 @@ jobs:
# Get key to latest MySQL repo
sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
- wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.14-1_all.deb
- # Bionic packages are still compatible for Focal since there's no MySQL 5.7
- # packages for Focal.
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
+ # Bionic packages are still compatible for Jammy since there's no MySQL 5.7
+ # packages for Jammy.
echo mysql-apt-config mysql-apt-config/repo-codename select bionic | sudo debconf-set-selections
echo mysql-apt-config mysql-apt-config/select-server select mysql-5.7 | sudo debconf-set-selections
sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
- sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-client=5.7* mysql-community-server=5.7* mysql-server=5.7*
+ sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-client=5.7* mysql-community-server=5.7* mysql-server=5.7* libncurses5
sudo apt-get install -y make unzip g++ etcd curl git wget eatmydata
sudo service mysql stop
@@ -105,7 +114,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
diff --git a/.github/workflows/cluster_endtoend_onlineddl_revert.yml b/.github/workflows/cluster_endtoend_onlineddl_revert.yml
index ca3329096d6..703eab7a8ea 100644
--- a/.github/workflows/cluster_endtoend_onlineddl_revert.yml
+++ b/.github/workflows/cluster_endtoend_onlineddl_revert.yml
@@ -14,9 +14,16 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (onlineddl_revert)
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-22.04
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -25,11 +32,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,27 +50,31 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
- 'bootstrap.sh'
- '.github/workflows/cluster_endtoend_onlineddl_revert.yml'
+ - 'go/test/endtoend/onlineddl/vrepl_suite/testdata'
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ # Limit local port range to not use ports that overlap with server side
+ # ports that we listen on.
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
# Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
sudo sysctl -p /etc/sysctl.conf
@@ -71,16 +82,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -91,7 +103,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
diff --git a/.github/workflows/cluster_endtoend_onlineddl_revert_mysql57.yml b/.github/workflows/cluster_endtoend_onlineddl_revert_mysql57.yml
index 0835553d39a..1d1429f2bfe 100644
--- a/.github/workflows/cluster_endtoend_onlineddl_revert_mysql57.yml
+++ b/.github/workflows/cluster_endtoend_onlineddl_revert_mysql57.yml
@@ -14,9 +14,16 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (onlineddl_revert) mysql57
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-22.04
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -25,11 +32,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,27 +50,29 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
- 'bootstrap.sh'
- '.github/workflows/cluster_endtoend_onlineddl_revert_mysql57.yml'
+ - 'go/test/endtoend/onlineddl/vrepl_suite/testdata'
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
# Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
sudo sysctl -p /etc/sysctl.conf
@@ -88,14 +97,14 @@ jobs:
# Get key to latest MySQL repo
sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
- wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.14-1_all.deb
- # Bionic packages are still compatible for Focal since there's no MySQL 5.7
- # packages for Focal.
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
+ # Bionic packages are still compatible for Jammy since there's no MySQL 5.7
+ # packages for Jammy.
echo mysql-apt-config mysql-apt-config/repo-codename select bionic | sudo debconf-set-selections
echo mysql-apt-config mysql-apt-config/select-server select mysql-5.7 | sudo debconf-set-selections
sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
- sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-client=5.7* mysql-community-server=5.7* mysql-server=5.7*
+ sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-client=5.7* mysql-community-server=5.7* mysql-server=5.7* libncurses5
sudo apt-get install -y make unzip g++ etcd curl git wget eatmydata
sudo service mysql stop
@@ -105,7 +114,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
diff --git a/.github/workflows/cluster_endtoend_onlineddl_revertible.yml b/.github/workflows/cluster_endtoend_onlineddl_revertible.yml
deleted file mode 100644
index a750825d3e9..00000000000
--- a/.github/workflows/cluster_endtoend_onlineddl_revertible.yml
+++ /dev/null
@@ -1,126 +0,0 @@
-# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows"
-
-name: Cluster (onlineddl_revertible)
-on: [push, pull_request]
-concurrency:
- group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (onlineddl_revertible)')
- cancel-in-progress: true
-
-env:
- LAUNCHABLE_ORGANIZATION: "vitess"
- LAUNCHABLE_WORKSPACE: "vitess-app"
- GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}"
-
-jobs:
- build:
- name: Run endtoend tests on Cluster (onlineddl_revertible)
- runs-on: ubuntu-20.04
-
- steps:
- - name: Check if workflow needs to be skipped
- id: skip-workflow
- run: |
- skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
- echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
-
- - name: Check out code
- if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
-
- - name: Check for changes in relevant files
- if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: frouioui/paths-filter@main
- id: changes
- with:
- token: ''
- filters: |
- end_to_end:
- - 'go/**/*.go'
- - 'test.go'
- - 'Makefile'
- - 'build.env'
- - 'go.[sumod]'
- - 'proto/*.proto'
- - 'tools/**'
- - 'config/**'
- - 'bootstrap.sh'
- - '.github/workflows/cluster_endtoend_onlineddl_revertible.yml'
-
- - name: Set up Go
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
- with:
- go-version: 1.18.5
-
- - name: Set up python
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
-
- - name: Tune the OS
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
- # Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
- echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
- sudo sysctl -p /etc/sysctl.conf
-
- - name: Get dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
- sudo apt-get update
-
- # Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
- sudo service mysql stop
- sudo service etcd stop
- sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
- sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld
- go mod download
-
- # install JUnit report formatter
- go install github.com/vitessio/go-junit-report@HEAD
-
- - name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
- pip3 install --user launchable~=1.0 > /dev/null
-
- # verify that launchable setup is all correct.
- launchable verify || true
-
- # Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
-
- - name: Run cluster endtoend test
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 45
- run: |
- # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
- # which musn't be more than 107 characters long.
- export VTDATAROOT="/tmp/"
- source build.env
-
- set -x
-
- # run the tests however you normally do, then produce a JUnit XML file
- eatmydata -- go run test.go -docker=false -follow -shard onlineddl_revertible | tee -a output.txt | go-junit-report -set-exit-code > report.xml
-
- - name: Print test output and Record test result in launchable
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
- run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
-
- # print test output
- cat output.txt
diff --git a/.github/workflows/cluster_endtoend_onlineddl_scheduler.yml b/.github/workflows/cluster_endtoend_onlineddl_scheduler.yml
index b51711df734..fc552f7e37f 100644
--- a/.github/workflows/cluster_endtoend_onlineddl_scheduler.yml
+++ b/.github/workflows/cluster_endtoend_onlineddl_scheduler.yml
@@ -14,9 +14,16 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (onlineddl_scheduler)
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-22.04
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -25,11 +32,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,27 +50,31 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
- 'bootstrap.sh'
- '.github/workflows/cluster_endtoend_onlineddl_scheduler.yml'
+ - 'go/test/endtoend/onlineddl/vrepl_suite/testdata'
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ # Limit local port range to not use ports that overlap with server side
+ # ports that we listen on.
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
# Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
sudo sysctl -p /etc/sysctl.conf
@@ -71,16 +82,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -91,7 +103,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
diff --git a/.github/workflows/cluster_endtoend_onlineddl_scheduler_mysql57.yml b/.github/workflows/cluster_endtoend_onlineddl_scheduler_mysql57.yml
index dec20500256..84139168bdd 100644
--- a/.github/workflows/cluster_endtoend_onlineddl_scheduler_mysql57.yml
+++ b/.github/workflows/cluster_endtoend_onlineddl_scheduler_mysql57.yml
@@ -14,9 +14,16 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (onlineddl_scheduler) mysql57
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-22.04
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -25,11 +32,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,27 +50,29 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
- 'bootstrap.sh'
- '.github/workflows/cluster_endtoend_onlineddl_scheduler_mysql57.yml'
+ - 'go/test/endtoend/onlineddl/vrepl_suite/testdata'
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
# Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
sudo sysctl -p /etc/sysctl.conf
@@ -88,14 +97,14 @@ jobs:
# Get key to latest MySQL repo
sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
- wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.14-1_all.deb
- # Bionic packages are still compatible for Focal since there's no MySQL 5.7
- # packages for Focal.
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
+ # Bionic packages are still compatible for Jammy since there's no MySQL 5.7
+ # packages for Jammy.
echo mysql-apt-config mysql-apt-config/repo-codename select bionic | sudo debconf-set-selections
echo mysql-apt-config mysql-apt-config/select-server select mysql-5.7 | sudo debconf-set-selections
sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
- sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-client=5.7* mysql-community-server=5.7* mysql-server=5.7*
+ sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-client=5.7* mysql-community-server=5.7* mysql-server=5.7* libncurses5
sudo apt-get install -y make unzip g++ etcd curl git wget eatmydata
sudo service mysql stop
@@ -105,7 +114,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
diff --git a/.github/workflows/cluster_endtoend_onlineddl_vrepl.yml b/.github/workflows/cluster_endtoend_onlineddl_vrepl.yml
index 1ad7306f34b..5566aa2e550 100644
--- a/.github/workflows/cluster_endtoend_onlineddl_vrepl.yml
+++ b/.github/workflows/cluster_endtoend_onlineddl_vrepl.yml
@@ -14,9 +14,16 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (onlineddl_vrepl)
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-22.04
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -25,11 +32,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,27 +50,31 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
- 'bootstrap.sh'
- '.github/workflows/cluster_endtoend_onlineddl_vrepl.yml'
+ - 'go/test/endtoend/onlineddl/vrepl_suite/testdata'
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ # Limit local port range to not use ports that overlap with server side
+ # ports that we listen on.
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
# Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
sudo sysctl -p /etc/sysctl.conf
@@ -71,16 +82,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -91,7 +103,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
diff --git a/.github/workflows/cluster_endtoend_onlineddl_vrepl_mysql57.yml b/.github/workflows/cluster_endtoend_onlineddl_vrepl_mysql57.yml
index b82d19217d7..97e3f002034 100644
--- a/.github/workflows/cluster_endtoend_onlineddl_vrepl_mysql57.yml
+++ b/.github/workflows/cluster_endtoend_onlineddl_vrepl_mysql57.yml
@@ -14,9 +14,16 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (onlineddl_vrepl) mysql57
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-22.04
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -25,11 +32,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,27 +50,29 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
- 'bootstrap.sh'
- '.github/workflows/cluster_endtoend_onlineddl_vrepl_mysql57.yml'
+ - 'go/test/endtoend/onlineddl/vrepl_suite/testdata'
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
# Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
sudo sysctl -p /etc/sysctl.conf
@@ -88,14 +97,14 @@ jobs:
# Get key to latest MySQL repo
sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
- wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.14-1_all.deb
- # Bionic packages are still compatible for Focal since there's no MySQL 5.7
- # packages for Focal.
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
+ # Bionic packages are still compatible for Jammy since there's no MySQL 5.7
+ # packages for Jammy.
echo mysql-apt-config mysql-apt-config/repo-codename select bionic | sudo debconf-set-selections
echo mysql-apt-config mysql-apt-config/select-server select mysql-5.7 | sudo debconf-set-selections
sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
- sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-client=5.7* mysql-community-server=5.7* mysql-server=5.7*
+ sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-client=5.7* mysql-community-server=5.7* mysql-server=5.7* libncurses5
sudo apt-get install -y make unzip g++ etcd curl git wget eatmydata
sudo service mysql stop
@@ -105,7 +114,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
diff --git a/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress.yml b/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress.yml
index ffe0cf531b1..55f5c2b3b7a 100644
--- a/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress.yml
+++ b/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress.yml
@@ -14,9 +14,16 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (onlineddl_vrepl_stress)
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-22.04
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -25,11 +32,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,27 +50,31 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
- 'bootstrap.sh'
- '.github/workflows/cluster_endtoend_onlineddl_vrepl_stress.yml'
+ - 'go/test/endtoend/onlineddl/vrepl_suite/testdata'
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ # Limit local port range to not use ports that overlap with server side
+ # ports that we listen on.
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
# Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
sudo sysctl -p /etc/sysctl.conf
@@ -71,16 +82,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -91,7 +103,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
diff --git a/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_mysql57.yml b/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_mysql57.yml
index 0e38ecf0e70..8dc65820b73 100644
--- a/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_mysql57.yml
+++ b/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_mysql57.yml
@@ -14,9 +14,16 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (onlineddl_vrepl_stress) mysql57
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-22.04
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -25,11 +32,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,27 +50,29 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
- 'bootstrap.sh'
- '.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_mysql57.yml'
+ - 'go/test/endtoend/onlineddl/vrepl_suite/testdata'
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
# Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
sudo sysctl -p /etc/sysctl.conf
@@ -88,14 +97,14 @@ jobs:
# Get key to latest MySQL repo
sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
- wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.14-1_all.deb
- # Bionic packages are still compatible for Focal since there's no MySQL 5.7
- # packages for Focal.
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
+ # Bionic packages are still compatible for Jammy since there's no MySQL 5.7
+ # packages for Jammy.
echo mysql-apt-config mysql-apt-config/repo-codename select bionic | sudo debconf-set-selections
echo mysql-apt-config mysql-apt-config/select-server select mysql-5.7 | sudo debconf-set-selections
sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
- sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-client=5.7* mysql-community-server=5.7* mysql-server=5.7*
+ sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-client=5.7* mysql-community-server=5.7* mysql-server=5.7* libncurses5
sudo apt-get install -y make unzip g++ etcd curl git wget eatmydata
sudo service mysql stop
@@ -105,7 +114,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
diff --git a/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_suite.yml b/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_suite.yml
index 9d684ec9ea7..13509e70e3a 100644
--- a/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_suite.yml
+++ b/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_suite.yml
@@ -14,9 +14,16 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (onlineddl_vrepl_stress_suite)
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-22.04
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -25,11 +32,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,27 +50,31 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
- 'bootstrap.sh'
- '.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_suite.yml'
+ - 'go/test/endtoend/onlineddl/vrepl_suite/testdata'
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ # Limit local port range to not use ports that overlap with server side
+ # ports that we listen on.
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
# Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
sudo sysctl -p /etc/sysctl.conf
@@ -71,16 +82,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -91,7 +103,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
diff --git a/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_suite_mysql57.yml b/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_suite_mysql57.yml
index 81c586269a0..2cfa7d12d4e 100644
--- a/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_suite_mysql57.yml
+++ b/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_suite_mysql57.yml
@@ -14,9 +14,16 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (onlineddl_vrepl_stress_suite) mysql57
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-22.04
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -25,11 +32,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,27 +50,29 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
- 'bootstrap.sh'
- '.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_suite_mysql57.yml'
+ - 'go/test/endtoend/onlineddl/vrepl_suite/testdata'
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
# Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
sudo sysctl -p /etc/sysctl.conf
@@ -88,14 +97,14 @@ jobs:
# Get key to latest MySQL repo
sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
- wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.14-1_all.deb
- # Bionic packages are still compatible for Focal since there's no MySQL 5.7
- # packages for Focal.
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
+ # Bionic packages are still compatible for Jammy since there's no MySQL 5.7
+ # packages for Jammy.
echo mysql-apt-config mysql-apt-config/repo-codename select bionic | sudo debconf-set-selections
echo mysql-apt-config mysql-apt-config/select-server select mysql-5.7 | sudo debconf-set-selections
sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
- sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-client=5.7* mysql-community-server=5.7* mysql-server=5.7*
+ sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-client=5.7* mysql-community-server=5.7* mysql-server=5.7* libncurses5
sudo apt-get install -y make unzip g++ etcd curl git wget eatmydata
sudo service mysql stop
@@ -105,7 +114,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
diff --git a/.github/workflows/cluster_endtoend_onlineddl_vrepl_suite.yml b/.github/workflows/cluster_endtoend_onlineddl_vrepl_suite.yml
index 74f40632d58..8e960d3c41f 100644
--- a/.github/workflows/cluster_endtoend_onlineddl_vrepl_suite.yml
+++ b/.github/workflows/cluster_endtoend_onlineddl_vrepl_suite.yml
@@ -14,9 +14,16 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (onlineddl_vrepl_suite)
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-22.04
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -25,11 +32,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,27 +50,31 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
- 'bootstrap.sh'
- '.github/workflows/cluster_endtoend_onlineddl_vrepl_suite.yml'
+ - 'go/test/endtoend/onlineddl/vrepl_suite/testdata'
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ # Limit local port range to not use ports that overlap with server side
+ # ports that we listen on.
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
# Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
sudo sysctl -p /etc/sysctl.conf
@@ -71,16 +82,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -91,7 +103,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
diff --git a/.github/workflows/cluster_endtoend_onlineddl_vrepl_suite_mysql57.yml b/.github/workflows/cluster_endtoend_onlineddl_vrepl_suite_mysql57.yml
index 6e358a6261f..1d18b2b36a8 100644
--- a/.github/workflows/cluster_endtoend_onlineddl_vrepl_suite_mysql57.yml
+++ b/.github/workflows/cluster_endtoend_onlineddl_vrepl_suite_mysql57.yml
@@ -14,9 +14,16 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (onlineddl_vrepl_suite) mysql57
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-22.04
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -25,11 +32,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,27 +50,29 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
- 'bootstrap.sh'
- '.github/workflows/cluster_endtoend_onlineddl_vrepl_suite_mysql57.yml'
+ - 'go/test/endtoend/onlineddl/vrepl_suite/testdata'
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
# Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
sudo sysctl -p /etc/sysctl.conf
@@ -88,14 +97,14 @@ jobs:
# Get key to latest MySQL repo
sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
- wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.14-1_all.deb
- # Bionic packages are still compatible for Focal since there's no MySQL 5.7
- # packages for Focal.
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
+ # Bionic packages are still compatible for Jammy since there's no MySQL 5.7
+ # packages for Jammy.
echo mysql-apt-config mysql-apt-config/repo-codename select bionic | sudo debconf-set-selections
echo mysql-apt-config mysql-apt-config/select-server select mysql-5.7 | sudo debconf-set-selections
sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
- sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-client=5.7* mysql-community-server=5.7* mysql-server=5.7*
+ sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-client=5.7* mysql-community-server=5.7* mysql-server=5.7* libncurses5
sudo apt-get install -y make unzip g++ etcd curl git wget eatmydata
sudo service mysql stop
@@ -105,7 +114,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
diff --git a/.github/workflows/cluster_endtoend_schemadiff_vrepl.yml b/.github/workflows/cluster_endtoend_schemadiff_vrepl.yml
index 22ef9c7932e..3dbbba573aa 100644
--- a/.github/workflows/cluster_endtoend_schemadiff_vrepl.yml
+++ b/.github/workflows/cluster_endtoend_schemadiff_vrepl.yml
@@ -14,9 +14,16 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (schemadiff_vrepl)
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-22.04
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -25,11 +32,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,27 +50,31 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
- 'bootstrap.sh'
- '.github/workflows/cluster_endtoend_schemadiff_vrepl.yml'
+ - 'go/test/endtoend/onlineddl/vrepl_suite/testdata'
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ # Limit local port range to not use ports that overlap with server side
+ # ports that we listen on.
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
# Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
sudo sysctl -p /etc/sysctl.conf
@@ -71,16 +82,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -91,7 +103,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
diff --git a/.github/workflows/cluster_endtoend_schemadiff_vrepl_mysql57.yml b/.github/workflows/cluster_endtoend_schemadiff_vrepl_mysql57.yml
index b33417798d6..378c9609417 100644
--- a/.github/workflows/cluster_endtoend_schemadiff_vrepl_mysql57.yml
+++ b/.github/workflows/cluster_endtoend_schemadiff_vrepl_mysql57.yml
@@ -14,9 +14,16 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (schemadiff_vrepl) mysql57
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-22.04
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -25,11 +32,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,27 +50,29 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
- 'bootstrap.sh'
- '.github/workflows/cluster_endtoend_schemadiff_vrepl_mysql57.yml'
+ - 'go/test/endtoend/onlineddl/vrepl_suite/testdata'
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
# Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
sudo sysctl -p /etc/sysctl.conf
@@ -88,14 +97,14 @@ jobs:
# Get key to latest MySQL repo
sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
- wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.14-1_all.deb
- # Bionic packages are still compatible for Focal since there's no MySQL 5.7
- # packages for Focal.
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
+ # Bionic packages are still compatible for Jammy since there's no MySQL 5.7
+ # packages for Jammy.
echo mysql-apt-config mysql-apt-config/repo-codename select bionic | sudo debconf-set-selections
echo mysql-apt-config mysql-apt-config/select-server select mysql-5.7 | sudo debconf-set-selections
sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
- sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-client=5.7* mysql-community-server=5.7* mysql-server=5.7*
+ sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-client=5.7* mysql-community-server=5.7* mysql-server=5.7* libncurses5
sudo apt-get install -y make unzip g++ etcd curl git wget eatmydata
sudo service mysql stop
@@ -105,7 +114,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
diff --git a/.github/workflows/cluster_endtoend_tabletmanager_consul.yml b/.github/workflows/cluster_endtoend_tabletmanager_consul.yml
index 47bce7c735f..d9e6653d5cf 100644
--- a/.github/workflows/cluster_endtoend_tabletmanager_consul.yml
+++ b/.github/workflows/cluster_endtoend_tabletmanager_consul.yml
@@ -14,9 +14,16 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (tabletmanager_consul)
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-22.04
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -25,11 +32,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +50,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,18 +60,20 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ # Limit local port range to not use ports that overlap with server side
+ # ports that we listen on.
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
# Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
sudo sysctl -p /etc/sysctl.conf
@@ -71,16 +81,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -96,7 +107,7 @@ jobs:
make tools
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
diff --git a/.github/workflows/cluster_endtoend_tabletmanager_tablegc.yml b/.github/workflows/cluster_endtoend_tabletmanager_tablegc.yml
index 89434ba98d5..e4732868da5 100644
--- a/.github/workflows/cluster_endtoend_tabletmanager_tablegc.yml
+++ b/.github/workflows/cluster_endtoend_tabletmanager_tablegc.yml
@@ -14,9 +14,16 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (tabletmanager_tablegc)
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-22.04
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -25,11 +32,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +50,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,18 +60,20 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ # Limit local port range to not use ports that overlap with server side
+ # ports that we listen on.
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
# Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
sudo sysctl -p /etc/sysctl.conf
@@ -71,16 +81,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -91,7 +102,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
diff --git a/.github/workflows/cluster_endtoend_tabletmanager_tablegc_mysql57.yml b/.github/workflows/cluster_endtoend_tabletmanager_tablegc_mysql57.yml
index bf62b36b26b..5a2081689ea 100644
--- a/.github/workflows/cluster_endtoend_tabletmanager_tablegc_mysql57.yml
+++ b/.github/workflows/cluster_endtoend_tabletmanager_tablegc_mysql57.yml
@@ -14,9 +14,16 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (tabletmanager_tablegc) mysql57
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-22.04
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -25,11 +32,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +50,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,18 +60,18 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
# Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
sudo sysctl -p /etc/sysctl.conf
@@ -88,14 +96,14 @@ jobs:
# Get key to latest MySQL repo
sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
- wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.14-1_all.deb
- # Bionic packages are still compatible for Focal since there's no MySQL 5.7
- # packages for Focal.
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
+ # Bionic packages are still compatible for Jammy since there's no MySQL 5.7
+ # packages for Jammy.
echo mysql-apt-config mysql-apt-config/repo-codename select bionic | sudo debconf-set-selections
echo mysql-apt-config mysql-apt-config/select-server select mysql-5.7 | sudo debconf-set-selections
sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
- sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-client=5.7* mysql-community-server=5.7* mysql-server=5.7*
+ sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-client=5.7* mysql-community-server=5.7* mysql-server=5.7* libncurses5
sudo apt-get install -y make unzip g++ etcd curl git wget eatmydata
sudo service mysql stop
@@ -105,7 +113,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
diff --git a/.github/workflows/cluster_endtoend_tabletmanager_throttler.yml b/.github/workflows/cluster_endtoend_tabletmanager_throttler.yml
index 8da831878a6..cc71bd5e0b0 100644
--- a/.github/workflows/cluster_endtoend_tabletmanager_throttler.yml
+++ b/.github/workflows/cluster_endtoend_tabletmanager_throttler.yml
@@ -14,9 +14,16 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (tabletmanager_throttler)
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-22.04
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -25,11 +32,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +50,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,18 +60,20 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ # Limit local port range to not use ports that overlap with server side
+ # ports that we listen on.
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
# Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
sudo sysctl -p /etc/sysctl.conf
@@ -71,16 +81,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -91,7 +102,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
diff --git a/.github/workflows/cluster_endtoend_tabletmanager_throttler_custom_config.yml b/.github/workflows/cluster_endtoend_tabletmanager_throttler_custom_config.yml
index 2c72908622e..596f2c36e49 100644
--- a/.github/workflows/cluster_endtoend_tabletmanager_throttler_custom_config.yml
+++ b/.github/workflows/cluster_endtoend_tabletmanager_throttler_custom_config.yml
@@ -14,9 +14,16 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (tabletmanager_throttler_custom_config)
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-22.04
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -25,11 +32,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +50,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,18 +60,20 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ # Limit local port range to not use ports that overlap with server side
+ # ports that we listen on.
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
# Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
sudo sysctl -p /etc/sysctl.conf
@@ -71,16 +81,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -91,7 +102,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
diff --git a/.github/workflows/cluster_endtoend_onlineddl_revertible_mysql57.yml b/.github/workflows/cluster_endtoend_tabletmanager_throttler_topo.yml
similarity index 71%
rename from .github/workflows/cluster_endtoend_onlineddl_revertible_mysql57.yml
rename to .github/workflows/cluster_endtoend_tabletmanager_throttler_topo.yml
index cfce8ad2eab..220d45d58be 100644
--- a/.github/workflows/cluster_endtoend_onlineddl_revertible_mysql57.yml
+++ b/.github/workflows/cluster_endtoend_tabletmanager_throttler_topo.yml
@@ -1,9 +1,9 @@
# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows"
-name: Cluster (onlineddl_revertible) mysql57
+name: Cluster (tabletmanager_throttler_topo)
on: [push, pull_request]
concurrency:
- group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (onlineddl_revertible) mysql57')
+ group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (tabletmanager_throttler_topo)')
cancel-in-progress: true
env:
@@ -13,10 +13,17 @@ env:
jobs:
build:
- name: Run endtoend tests on Cluster (onlineddl_revertible) mysql57
- runs-on: ubuntu-20.04
+ name: Run endtoend tests on Cluster (tabletmanager_throttler_topo)
+ runs-on: ubuntu-22.04
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -25,11 +32,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,27 +50,30 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
- 'bootstrap.sh'
- - '.github/workflows/cluster_endtoend_onlineddl_revertible_mysql57.yml'
+ - '.github/workflows/cluster_endtoend_tabletmanager_throttler_topo.yml'
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ # Limit local port range to not use ports that overlap with server side
+ # ports that we listen on.
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
# Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
sudo sysctl -p /etc/sysctl.conf
@@ -71,41 +81,28 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- sudo apt-get update
-
- # Uninstall any previously installed MySQL first
- sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
- sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld
-
- sudo systemctl stop apparmor
- sudo DEBIAN_FRONTEND="noninteractive" apt-get remove -y --purge mysql-server mysql-client mysql-common
- sudo apt-get -y autoremove
- sudo apt-get -y autoclean
- sudo deluser mysql
- sudo rm -rf /var/lib/mysql
- sudo rm -rf /etc/mysql
-
+
# Get key to latest MySQL repo
sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
-
- wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.14-1_all.deb
- # Bionic packages are still compatible for Focal since there's no MySQL 5.7
- # packages for Focal.
- echo mysql-apt-config mysql-apt-config/repo-codename select bionic | sudo debconf-set-selections
- echo mysql-apt-config mysql-apt-config/select-server select mysql-5.7 | sudo debconf-set-selections
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
- sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-client=5.7* mysql-community-server=5.7* mysql-server=5.7*
+ # Install everything else we need, and configure
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5
- sudo apt-get install -y make unzip g++ etcd curl git wget eatmydata
sudo service mysql stop
sudo service etcd stop
+ sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
+ sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld
+ go mod download
# install JUnit report formatter
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -128,7 +125,7 @@ jobs:
set -x
# run the tests however you normally do, then produce a JUnit XML file
- eatmydata -- go run test.go -docker=false -follow -shard onlineddl_revertible | tee -a output.txt | go-junit-report -set-exit-code > report.xml
+ eatmydata -- go run test.go -docker=false -follow -shard tabletmanager_throttler_topo | tee -a output.txt | go-junit-report -set-exit-code > report.xml
- name: Print test output and Record test result in launchable
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
diff --git a/.github/workflows/cluster_endtoend_topo_connection_cache.yml b/.github/workflows/cluster_endtoend_topo_connection_cache.yml
index 49e4226c7e7..1e25a842831 100644
--- a/.github/workflows/cluster_endtoend_topo_connection_cache.yml
+++ b/.github/workflows/cluster_endtoend_topo_connection_cache.yml
@@ -14,9 +14,16 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (topo_connection_cache)
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-22.04
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -25,11 +32,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +50,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,18 +60,20 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ # Limit local port range to not use ports that overlap with server side
+ # ports that we listen on.
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
# Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
sudo sysctl -p /etc/sysctl.conf
@@ -71,16 +81,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -91,7 +102,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
diff --git a/.github/workflows/cluster_endtoend_vreplication_across_db_versions.yml b/.github/workflows/cluster_endtoend_vreplication_across_db_versions.yml
index 3a09023463e..3027b28c6b8 100644
--- a/.github/workflows/cluster_endtoend_vreplication_across_db_versions.yml
+++ b/.github/workflows/cluster_endtoend_vreplication_across_db_versions.yml
@@ -14,9 +14,16 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (vreplication_across_db_versions)
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-22.04
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -25,11 +32,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +50,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,18 +60,20 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ # Limit local port range to not use ports that overlap with server side
+ # ports that we listen on.
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
# Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
sudo sysctl -p /etc/sysctl.conf
@@ -71,16 +81,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -91,7 +102,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -113,8 +124,6 @@ jobs:
set -x
- # Increase our local ephemeral port range as we could exhaust this
- sudo sysctl -w net.ipv4.ip_local_port_range="22768 61999"
# Increase our open file descriptor limit as we could hit this
ulimit -n 65536
cat <<-EOF>>./config/mycnf/mysql80.cnf
diff --git a/.github/workflows/cluster_endtoend_vreplication_basic.yml b/.github/workflows/cluster_endtoend_vreplication_basic.yml
index 5b836239342..d33783d257b 100644
--- a/.github/workflows/cluster_endtoend_vreplication_basic.yml
+++ b/.github/workflows/cluster_endtoend_vreplication_basic.yml
@@ -14,9 +14,16 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (vreplication_basic)
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-22.04
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -25,11 +32,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +50,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,18 +60,20 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ # Limit local port range to not use ports that overlap with server side
+ # ports that we listen on.
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
# Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
sudo sysctl -p /etc/sysctl.conf
@@ -71,16 +81,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -91,7 +102,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -113,8 +124,6 @@ jobs:
set -x
- # Increase our local ephemeral port range as we could exhaust this
- sudo sysctl -w net.ipv4.ip_local_port_range="22768 61999"
# Increase our open file descriptor limit as we could hit this
ulimit -n 65536
cat <<-EOF>>./config/mycnf/mysql80.cnf
diff --git a/.github/workflows/cluster_endtoend_vreplication_cellalias.yml b/.github/workflows/cluster_endtoend_vreplication_cellalias.yml
index fadf85c0bfe..9b245c8595f 100644
--- a/.github/workflows/cluster_endtoend_vreplication_cellalias.yml
+++ b/.github/workflows/cluster_endtoend_vreplication_cellalias.yml
@@ -14,9 +14,16 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (vreplication_cellalias)
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-22.04
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -25,11 +32,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +50,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,18 +60,20 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ # Limit local port range to not use ports that overlap with server side
+ # ports that we listen on.
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
# Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
sudo sysctl -p /etc/sysctl.conf
@@ -71,16 +81,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -91,7 +102,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -113,8 +124,6 @@ jobs:
set -x
- # Increase our local ephemeral port range as we could exhaust this
- sudo sysctl -w net.ipv4.ip_local_port_range="22768 61999"
# Increase our open file descriptor limit as we could hit this
ulimit -n 65536
cat <<-EOF>>./config/mycnf/mysql80.cnf
diff --git a/.github/workflows/cluster_endtoend_vreplication_migrate_vdiff2_convert_tz.yml b/.github/workflows/cluster_endtoend_vreplication_migrate_vdiff2_convert_tz.yml
index ac914da2a43..91edcb9d8e2 100644
--- a/.github/workflows/cluster_endtoend_vreplication_migrate_vdiff2_convert_tz.yml
+++ b/.github/workflows/cluster_endtoend_vreplication_migrate_vdiff2_convert_tz.yml
@@ -14,9 +14,16 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (vreplication_migrate_vdiff2_convert_tz)
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-22.04
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -25,11 +32,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +50,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,18 +60,20 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ # Limit local port range to not use ports that overlap with server side
+ # ports that we listen on.
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
# Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
sudo sysctl -p /etc/sysctl.conf
@@ -71,16 +81,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -91,7 +102,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -113,8 +124,6 @@ jobs:
set -x
- # Increase our local ephemeral port range as we could exhaust this
- sudo sysctl -w net.ipv4.ip_local_port_range="22768 61999"
# Increase our open file descriptor limit as we could hit this
ulimit -n 65536
cat <<-EOF>>./config/mycnf/mysql80.cnf
diff --git a/.github/workflows/cluster_endtoend_vreplication_multicell.yml b/.github/workflows/cluster_endtoend_vreplication_multicell.yml
index 96427323190..d43f4664968 100644
--- a/.github/workflows/cluster_endtoend_vreplication_multicell.yml
+++ b/.github/workflows/cluster_endtoend_vreplication_multicell.yml
@@ -14,9 +14,16 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (vreplication_multicell)
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-22.04
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -25,11 +32,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +50,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,18 +60,20 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ # Limit local port range to not use ports that overlap with server side
+ # ports that we listen on.
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
# Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
sudo sysctl -p /etc/sysctl.conf
@@ -71,16 +81,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -91,7 +102,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -113,8 +124,6 @@ jobs:
set -x
- # Increase our local ephemeral port range as we could exhaust this
- sudo sysctl -w net.ipv4.ip_local_port_range="22768 61999"
# Increase our open file descriptor limit as we could hit this
ulimit -n 65536
cat <<-EOF>>./config/mycnf/mysql80.cnf
diff --git a/.github/workflows/cluster_endtoend_vreplication_v2.yml b/.github/workflows/cluster_endtoend_vreplication_v2.yml
index 7e81ef4d409..5c7868b0e32 100644
--- a/.github/workflows/cluster_endtoend_vreplication_v2.yml
+++ b/.github/workflows/cluster_endtoend_vreplication_v2.yml
@@ -14,9 +14,16 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (vreplication_v2)
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-22.04
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -25,11 +32,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +50,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,18 +60,20 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ # Limit local port range to not use ports that overlap with server side
+ # ports that we listen on.
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
# Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
sudo sysctl -p /etc/sysctl.conf
@@ -71,16 +81,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -91,7 +102,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -113,8 +124,6 @@ jobs:
set -x
- # Increase our local ephemeral port range as we could exhaust this
- sudo sysctl -w net.ipv4.ip_local_port_range="22768 61999"
# Increase our open file descriptor limit as we could hit this
ulimit -n 65536
cat <<-EOF>>./config/mycnf/mysql80.cnf
diff --git a/.github/workflows/cluster_endtoend_vstream_failover.yml b/.github/workflows/cluster_endtoend_vstream_failover.yml
index 77d283a46dc..e1c4699b65a 100644
--- a/.github/workflows/cluster_endtoend_vstream_failover.yml
+++ b/.github/workflows/cluster_endtoend_vstream_failover.yml
@@ -14,9 +14,16 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (vstream_failover)
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-22.04
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -25,11 +32,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +50,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,18 +60,20 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ # Limit local port range to not use ports that overlap with server side
+ # ports that we listen on.
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
# Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
sudo sysctl -p /etc/sysctl.conf
@@ -71,16 +81,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -91,7 +102,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
diff --git a/.github/workflows/cluster_endtoend_vstream_stoponreshard_false.yml b/.github/workflows/cluster_endtoend_vstream_stoponreshard_false.yml
index 432a2c3ec39..42042071cf8 100644
--- a/.github/workflows/cluster_endtoend_vstream_stoponreshard_false.yml
+++ b/.github/workflows/cluster_endtoend_vstream_stoponreshard_false.yml
@@ -14,9 +14,16 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (vstream_stoponreshard_false)
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-22.04
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -25,11 +32,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +50,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,18 +60,20 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ # Limit local port range to not use ports that overlap with server side
+ # ports that we listen on.
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
# Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
sudo sysctl -p /etc/sysctl.conf
@@ -71,16 +81,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -91,7 +102,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
diff --git a/.github/workflows/cluster_endtoend_vstream_stoponreshard_true.yml b/.github/workflows/cluster_endtoend_vstream_stoponreshard_true.yml
index f91b8ad230f..675b64e64be 100644
--- a/.github/workflows/cluster_endtoend_vstream_stoponreshard_true.yml
+++ b/.github/workflows/cluster_endtoend_vstream_stoponreshard_true.yml
@@ -14,9 +14,16 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (vstream_stoponreshard_true)
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-22.04
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -25,11 +32,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +50,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,18 +60,20 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ # Limit local port range to not use ports that overlap with server side
+ # ports that we listen on.
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
# Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
sudo sysctl -p /etc/sysctl.conf
@@ -71,16 +81,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -91,7 +102,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
diff --git a/.github/workflows/cluster_endtoend_vstream_with_keyspaces_to_watch.yml b/.github/workflows/cluster_endtoend_vstream_with_keyspaces_to_watch.yml
index b270a350977..dff7a842287 100644
--- a/.github/workflows/cluster_endtoend_vstream_with_keyspaces_to_watch.yml
+++ b/.github/workflows/cluster_endtoend_vstream_with_keyspaces_to_watch.yml
@@ -14,9 +14,16 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (vstream_with_keyspaces_to_watch)
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-22.04
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -25,11 +32,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +50,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,18 +60,20 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ # Limit local port range to not use ports that overlap with server side
+ # ports that we listen on.
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
# Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
sudo sysctl -p /etc/sysctl.conf
@@ -71,16 +81,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -91,7 +102,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
diff --git a/.github/workflows/cluster_endtoend_vtbackup_transform.yml b/.github/workflows/cluster_endtoend_vtbackup.yml
similarity index 73%
rename from .github/workflows/cluster_endtoend_vtbackup_transform.yml
rename to .github/workflows/cluster_endtoend_vtbackup.yml
index e7d12c33484..ff17a63b767 100644
--- a/.github/workflows/cluster_endtoend_vtbackup_transform.yml
+++ b/.github/workflows/cluster_endtoend_vtbackup.yml
@@ -1,9 +1,9 @@
# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows"
-name: Cluster (vtbackup_transform)
+name: Cluster (vtbackup)
on: [push, pull_request]
concurrency:
- group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vtbackup_transform)')
+ group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vtbackup)')
cancel-in-progress: true
env:
@@ -13,10 +13,17 @@ env:
jobs:
build:
- name: Run endtoend tests on Cluster (vtbackup_transform)
- runs-on: ubuntu-20.04
+ name: Run endtoend tests on Cluster (vtbackup)
+ runs-on: ubuntu-22.04
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -25,11 +32,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,27 +50,30 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
- 'bootstrap.sh'
- - '.github/workflows/cluster_endtoend_vtbackup_transform.yml'
+ - '.github/workflows/cluster_endtoend_vtbackup.yml'
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ # Limit local port range to not use ports that overlap with server side
+ # ports that we listen on.
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
# Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
sudo sysctl -p /etc/sysctl.conf
@@ -71,16 +81,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -91,7 +102,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -114,7 +125,7 @@ jobs:
set -x
# run the tests however you normally do, then produce a JUnit XML file
- eatmydata -- go run test.go -docker=false -follow -shard vtbackup_transform | tee -a output.txt | go-junit-report -set-exit-code > report.xml
+ eatmydata -- go run test.go -docker=false -follow -shard vtbackup | tee -a output.txt | go-junit-report -set-exit-code > report.xml
- name: Print test output and Record test result in launchable
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
diff --git a/.github/workflows/cluster_endtoend_vtctlbackup_sharded_clustertest_heavy.yml b/.github/workflows/cluster_endtoend_vtctlbackup_sharded_clustertest_heavy.yml
index 3b4f432c113..bc07125f1f3 100644
--- a/.github/workflows/cluster_endtoend_vtctlbackup_sharded_clustertest_heavy.yml
+++ b/.github/workflows/cluster_endtoend_vtctlbackup_sharded_clustertest_heavy.yml
@@ -14,9 +14,16 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (vtctlbackup_sharded_clustertest_heavy)
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-22.04
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -25,11 +32,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +50,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,18 +60,20 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ # Limit local port range to not use ports that overlap with server side
+ # ports that we listen on.
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
# Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
sudo sysctl -p /etc/sysctl.conf
@@ -71,16 +81,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -91,7 +102,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -113,8 +124,6 @@ jobs:
set -x
- # Increase our local ephemeral port range as we could exhaust this
- sudo sysctl -w net.ipv4.ip_local_port_range="22768 61999"
# Increase our open file descriptor limit as we could hit this
ulimit -n 65536
cat <<-EOF>>./config/mycnf/mysql80.cnf
diff --git a/.github/workflows/cluster_endtoend_vtgate_concurrentdml.yml b/.github/workflows/cluster_endtoend_vtgate_concurrentdml.yml
index dee1595a8c0..3aaee43af92 100644
--- a/.github/workflows/cluster_endtoend_vtgate_concurrentdml.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_concurrentdml.yml
@@ -14,9 +14,16 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (vtgate_concurrentdml)
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-22.04
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -25,11 +32,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +50,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,18 +60,20 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ # Limit local port range to not use ports that overlap with server side
+ # ports that we listen on.
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
# Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
sudo sysctl -p /etc/sysctl.conf
@@ -71,16 +81,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -91,7 +102,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
diff --git a/.github/workflows/cluster_endtoend_vtgate_gen4.yml b/.github/workflows/cluster_endtoend_vtgate_gen4.yml
index be3370466ff..3161053fd68 100644
--- a/.github/workflows/cluster_endtoend_vtgate_gen4.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_gen4.yml
@@ -14,9 +14,16 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (vtgate_gen4)
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-22.04
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -25,11 +32,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +50,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,18 +60,20 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ # Limit local port range to not use ports that overlap with server side
+ # ports that we listen on.
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
# Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
sudo sysctl -p /etc/sysctl.conf
@@ -71,16 +81,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -91,7 +102,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
diff --git a/.github/workflows/cluster_endtoend_vtgate_general_heavy.yml b/.github/workflows/cluster_endtoend_vtgate_general_heavy.yml
index 634fb1c8302..6982c6acfda 100644
--- a/.github/workflows/cluster_endtoend_vtgate_general_heavy.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_general_heavy.yml
@@ -14,9 +14,16 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (vtgate_general_heavy)
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-22.04
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -25,11 +32,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +50,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,18 +60,20 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ # Limit local port range to not use ports that overlap with server side
+ # ports that we listen on.
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
# Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
sudo sysctl -p /etc/sysctl.conf
@@ -71,16 +81,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -91,7 +102,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -113,8 +124,6 @@ jobs:
set -x
- # Increase our local ephemeral port range as we could exhaust this
- sudo sysctl -w net.ipv4.ip_local_port_range="22768 61999"
# Increase our open file descriptor limit as we could hit this
ulimit -n 65536
cat <<-EOF>>./config/mycnf/mysql80.cnf
diff --git a/.github/workflows/cluster_endtoend_vtgate_godriver.yml b/.github/workflows/cluster_endtoend_vtgate_godriver.yml
index bf2fc291e33..26d1b198a4f 100644
--- a/.github/workflows/cluster_endtoend_vtgate_godriver.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_godriver.yml
@@ -14,9 +14,16 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (vtgate_godriver)
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-22.04
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -25,11 +32,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +50,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,18 +60,20 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ # Limit local port range to not use ports that overlap with server side
+ # ports that we listen on.
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
# Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
sudo sysctl -p /etc/sysctl.conf
@@ -71,16 +81,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -91,7 +102,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
diff --git a/.github/workflows/cluster_endtoend_vtgate_partial_keyspace.yml b/.github/workflows/cluster_endtoend_vtgate_partial_keyspace.yml
index d5436a25868..13015700fe7 100644
--- a/.github/workflows/cluster_endtoend_vtgate_partial_keyspace.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_partial_keyspace.yml
@@ -14,9 +14,16 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (vtgate_partial_keyspace)
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-22.04
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -25,11 +32,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +50,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,18 +60,20 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ # Limit local port range to not use ports that overlap with server side
+ # ports that we listen on.
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
# Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
sudo sysctl -p /etc/sysctl.conf
@@ -71,16 +81,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -91,7 +102,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
diff --git a/.github/workflows/cluster_endtoend_vtgate_queries.yml b/.github/workflows/cluster_endtoend_vtgate_queries.yml
index e71808afb5c..fe8de9d9d5a 100644
--- a/.github/workflows/cluster_endtoend_vtgate_queries.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_queries.yml
@@ -14,9 +14,16 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (vtgate_queries)
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-22.04
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -25,11 +32,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +50,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,18 +60,20 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ # Limit local port range to not use ports that overlap with server side
+ # ports that we listen on.
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
# Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
sudo sysctl -p /etc/sysctl.conf
@@ -71,16 +81,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -91,7 +102,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
diff --git a/.github/workflows/cluster_endtoend_vtgate_readafterwrite.yml b/.github/workflows/cluster_endtoend_vtgate_readafterwrite.yml
index f0f488a736c..de53010f685 100644
--- a/.github/workflows/cluster_endtoend_vtgate_readafterwrite.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_readafterwrite.yml
@@ -14,9 +14,16 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (vtgate_readafterwrite)
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-22.04
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -25,11 +32,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +50,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,18 +60,20 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ # Limit local port range to not use ports that overlap with server side
+ # ports that we listen on.
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
# Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
sudo sysctl -p /etc/sysctl.conf
@@ -71,16 +81,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -91,7 +102,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
diff --git a/.github/workflows/cluster_endtoend_vtgate_reservedconn.yml b/.github/workflows/cluster_endtoend_vtgate_reservedconn.yml
index ef1ea86d716..be356d8c781 100644
--- a/.github/workflows/cluster_endtoend_vtgate_reservedconn.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_reservedconn.yml
@@ -14,9 +14,16 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (vtgate_reservedconn)
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-22.04
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -25,11 +32,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +50,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,18 +60,20 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ # Limit local port range to not use ports that overlap with server side
+ # ports that we listen on.
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
# Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
sudo sysctl -p /etc/sysctl.conf
@@ -71,16 +81,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -91,7 +102,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
diff --git a/.github/workflows/cluster_endtoend_vtgate_schema.yml b/.github/workflows/cluster_endtoend_vtgate_schema.yml
index 09272396754..6b59f050a63 100644
--- a/.github/workflows/cluster_endtoend_vtgate_schema.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_schema.yml
@@ -14,9 +14,16 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (vtgate_schema)
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-22.04
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -25,11 +32,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +50,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,18 +60,20 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ # Limit local port range to not use ports that overlap with server side
+ # ports that we listen on.
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
# Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
sudo sysctl -p /etc/sysctl.conf
@@ -71,16 +81,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -91,7 +102,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
diff --git a/.github/workflows/cluster_endtoend_vtgate_schema_tracker.yml b/.github/workflows/cluster_endtoend_vtgate_schema_tracker.yml
index 912df7ba811..1ec982dc81e 100644
--- a/.github/workflows/cluster_endtoend_vtgate_schema_tracker.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_schema_tracker.yml
@@ -14,9 +14,16 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (vtgate_schema_tracker)
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-22.04
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -25,11 +32,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +50,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,18 +60,20 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ # Limit local port range to not use ports that overlap with server side
+ # ports that we listen on.
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
# Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
sudo sysctl -p /etc/sysctl.conf
@@ -71,16 +81,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -91,7 +102,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
diff --git a/.github/workflows/cluster_endtoend_vtgate_tablet_healthcheck_cache.yml b/.github/workflows/cluster_endtoend_vtgate_tablet_healthcheck_cache.yml
index aa2df3b6717..05881d09d98 100644
--- a/.github/workflows/cluster_endtoend_vtgate_tablet_healthcheck_cache.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_tablet_healthcheck_cache.yml
@@ -14,9 +14,16 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (vtgate_tablet_healthcheck_cache)
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-22.04
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -25,11 +32,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +50,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,18 +60,20 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ # Limit local port range to not use ports that overlap with server side
+ # ports that we listen on.
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
# Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
sudo sysctl -p /etc/sysctl.conf
@@ -71,16 +81,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -91,7 +102,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
diff --git a/.github/workflows/cluster_endtoend_vtgate_topo.yml b/.github/workflows/cluster_endtoend_vtgate_topo.yml
index b7a235f70d4..8d20c0b43e0 100644
--- a/.github/workflows/cluster_endtoend_vtgate_topo.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_topo.yml
@@ -14,9 +14,16 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (vtgate_topo)
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-22.04
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -25,11 +32,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +50,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,18 +60,20 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ # Limit local port range to not use ports that overlap with server side
+ # ports that we listen on.
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
# Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
sudo sysctl -p /etc/sysctl.conf
@@ -71,16 +81,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -91,7 +102,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
diff --git a/.github/workflows/cluster_endtoend_vtgate_topo_consul.yml b/.github/workflows/cluster_endtoend_vtgate_topo_consul.yml
index 5ea52a9c797..a2671328e77 100644
--- a/.github/workflows/cluster_endtoend_vtgate_topo_consul.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_topo_consul.yml
@@ -14,9 +14,16 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (vtgate_topo_consul)
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-22.04
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -25,11 +32,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +50,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,18 +60,20 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ # Limit local port range to not use ports that overlap with server side
+ # ports that we listen on.
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
# Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
sudo sysctl -p /etc/sysctl.conf
@@ -71,16 +81,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -96,7 +107,7 @@ jobs:
make tools
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
diff --git a/.github/workflows/cluster_endtoend_vtgate_topo_etcd.yml b/.github/workflows/cluster_endtoend_vtgate_topo_etcd.yml
index 72a3f72e8ab..fca95a26844 100644
--- a/.github/workflows/cluster_endtoend_vtgate_topo_etcd.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_topo_etcd.yml
@@ -14,9 +14,16 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (vtgate_topo_etcd)
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-22.04
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -25,11 +32,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +50,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,18 +60,20 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ # Limit local port range to not use ports that overlap with server side
+ # ports that we listen on.
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
# Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
sudo sysctl -p /etc/sysctl.conf
@@ -71,16 +81,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -91,7 +102,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
diff --git a/.github/workflows/cluster_endtoend_vtgate_transaction.yml b/.github/workflows/cluster_endtoend_vtgate_transaction.yml
index d28fc5fe582..3ab6ca26a25 100644
--- a/.github/workflows/cluster_endtoend_vtgate_transaction.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_transaction.yml
@@ -14,9 +14,16 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (vtgate_transaction)
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-22.04
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -25,11 +32,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +50,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,18 +60,20 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ # Limit local port range to not use ports that overlap with server side
+ # ports that we listen on.
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
# Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
sudo sysctl -p /etc/sysctl.conf
@@ -71,16 +81,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -91,7 +102,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
diff --git a/.github/workflows/cluster_endtoend_vtgate_unsharded.yml b/.github/workflows/cluster_endtoend_vtgate_unsharded.yml
index 2f8c871260a..d4ba377f7c6 100644
--- a/.github/workflows/cluster_endtoend_vtgate_unsharded.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_unsharded.yml
@@ -14,9 +14,16 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (vtgate_unsharded)
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-22.04
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -25,11 +32,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +50,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,18 +60,20 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ # Limit local port range to not use ports that overlap with server side
+ # ports that we listen on.
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
# Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
sudo sysctl -p /etc/sysctl.conf
@@ -71,16 +81,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -91,7 +102,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
diff --git a/.github/workflows/cluster_endtoend_vtgate_vindex_heavy.yml b/.github/workflows/cluster_endtoend_vtgate_vindex_heavy.yml
index 6a8c96a8e69..ea65dce089c 100644
--- a/.github/workflows/cluster_endtoend_vtgate_vindex_heavy.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_vindex_heavy.yml
@@ -14,9 +14,16 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (vtgate_vindex_heavy)
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-22.04
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -25,11 +32,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +50,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,18 +60,20 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ # Limit local port range to not use ports that overlap with server side
+ # ports that we listen on.
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
# Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
sudo sysctl -p /etc/sysctl.conf
@@ -71,16 +81,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -91,7 +102,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -113,8 +124,6 @@ jobs:
set -x
- # Increase our local ephemeral port range as we could exhaust this
- sudo sysctl -w net.ipv4.ip_local_port_range="22768 61999"
# Increase our open file descriptor limit as we could hit this
ulimit -n 65536
cat <<-EOF>>./config/mycnf/mysql80.cnf
diff --git a/.github/workflows/cluster_endtoend_vtgate_vschema.yml b/.github/workflows/cluster_endtoend_vtgate_vschema.yml
index 7026c374fc6..af73b945d39 100644
--- a/.github/workflows/cluster_endtoend_vtgate_vschema.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_vschema.yml
@@ -14,9 +14,16 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (vtgate_vschema)
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-22.04
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -25,11 +32,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +50,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,18 +60,20 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ # Limit local port range to not use ports that overlap with server side
+ # ports that we listen on.
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
# Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
sudo sysctl -p /etc/sysctl.conf
@@ -71,16 +81,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -91,7 +102,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
diff --git a/.github/workflows/cluster_endtoend_vtorc.yml b/.github/workflows/cluster_endtoend_vtorc.yml
index 981446f6070..e8ded8b55be 100644
--- a/.github/workflows/cluster_endtoend_vtorc.yml
+++ b/.github/workflows/cluster_endtoend_vtorc.yml
@@ -1,76 +1,137 @@
# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows"
-name: Cluster (vtorc)(mysql80)
+name: Cluster (vtorc)
on: [push, pull_request]
concurrency:
- group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vtorc)(mysql80)')
+ group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vtorc)')
cancel-in-progress: true
+env:
+ LAUNCHABLE_ORGANIZATION: "vitess"
+ LAUNCHABLE_WORKSPACE: "vitess-app"
+ GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}"
+
jobs:
build:
- name: Run endtoend tests on Cluster (vtorc)(mysql80)
- runs-on: self-hosted
+ name: Run endtoend tests on Cluster (vtorc)
+ runs-on: ubuntu-22.04
steps:
- - name: Check if workflow needs to be skipped
- id: skip-workflow
- run: |
- skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
- echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
-
- - name: Check out code
- if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
-
- - name: Check for changes in relevant files
- if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: frouioui/paths-filter@main
- id: changes
- with:
- token: ''
- filters: |
- end_to_end:
- - 'go/**/*.go'
- - 'test.go'
- - 'Makefile'
- - 'build.env'
- - 'go.[sumod]'
- - 'proto/*.proto'
- - 'tools/**'
- - 'config/**'
- - '.github/docker/**'
- - 'bootstrap.sh'
- - '.github/workflows/cluster_endtoend_vtorc.yml'
-
- - name: Build Docker Image
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: docker build -f ./.github/docker/cluster_test_vtorc/Dockerfile -t cluster_test_vtorc:$GITHUB_SHA .
-
- - name: Run test
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 30
- run: docker run --name "cluster_test_vtorc_$GITHUB_SHA" cluster_test_vtorc:$GITHUB_SHA /bin/bash -c 'source build.env && go run test.go -keep-data=true -docker=false -print-log -follow -shard vtorc -- -- --keep-data=true'
-
- - name: Print Volume Used
- if: always() && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- docker inspect -f '{{ (index .Mounts 0).Name }}' cluster_test_vtorc_$GITHUB_SHA
-
- - name: Cleanup Docker Volume
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- docker rm -v cluster_test_vtorc_$GITHUB_SHA
-
- - name: Cleanup Docker Container
- if: always() && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- docker rm -f cluster_test_vtorc_$GITHUB_SHA
-
- - name: Cleanup Docker Image
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- docker image rm cluster_test_vtorc:$GITHUB_SHA
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
+ - name: Check if workflow needs to be skipped
+ id: skip-workflow
+ run: |
+ skip='false'
+ if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
+ skip='true'
+ fi
+ echo Skip ${skip}
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
+
+ - name: Check out code
+ if: steps.skip-workflow.outputs.skip-workflow == 'false'
+ uses: actions/checkout@v3
+
+ - name: Check for changes in relevant files
+ if: steps.skip-workflow.outputs.skip-workflow == 'false'
+ uses: frouioui/paths-filter@main
+ id: changes
+ with:
+ token: ''
+ filters: |
+ end_to_end:
+ - 'go/**/*.go'
+ - 'test.go'
+ - 'Makefile'
+ - 'build.env'
+ - 'go.sum'
+ - 'go.mod'
+ - 'proto/*.proto'
+ - 'tools/**'
+ - 'config/**'
+ - 'bootstrap.sh'
+ - '.github/workflows/cluster_endtoend_vtorc.yml'
+
+ - name: Set up Go
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ uses: actions/setup-go@v3
+ with:
+ go-version: 1.20.8
+
+ - name: Set up python
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ uses: actions/setup-python@v4
+
+ - name: Tune the OS
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ run: |
+ # Limit local port range to not use ports that overlap with server side
+ # ports that we listen on.
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
+ # Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
+ echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
+ sudo sysctl -p /etc/sysctl.conf
+
+ - name: Get dependencies
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ run: |
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
+ sudo apt-get update
+ # Install everything else we need, and configure
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5
+
+ sudo service mysql stop
+ sudo service etcd stop
+ sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
+ sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld
+ go mod download
+
+ # install JUnit report formatter
+ go install github.com/vitessio/go-junit-report@HEAD
+
+ - name: Setup launchable dependencies
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
+ run: |
+ # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
+ pip3 install --user launchable~=1.0 > /dev/null
+
+ # verify that launchable setup is all correct.
+ launchable verify || true
+
+ # Tell Launchable about the build you are producing and testing
+ launchable record build --name "$GITHUB_RUN_ID" --source .
+
+ - name: Run cluster endtoend test
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ timeout-minutes: 45
+ run: |
+ # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
+ # which musn't be more than 107 characters long.
+ export VTDATAROOT="/tmp/"
+ source build.env
+
+ set -x
+
+ # run the tests however you normally do, then produce a JUnit XML file
+ eatmydata -- go run test.go -docker=false -follow -shard vtorc | tee -a output.txt | go-junit-report -set-exit-code > report.xml
+
+ - name: Print test output and Record test result in launchable
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
+ run: |
+ # send recorded tests to launchable
+ launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+
+ # print test output
+ cat output.txt
diff --git a/.github/workflows/cluster_endtoend_vtorc_mysql57.yml b/.github/workflows/cluster_endtoend_vtorc_mysql57.yml
index cebe62cbe75..36529fc6e31 100644
--- a/.github/workflows/cluster_endtoend_vtorc_mysql57.yml
+++ b/.github/workflows/cluster_endtoend_vtorc_mysql57.yml
@@ -1,76 +1,148 @@
# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows"
-name: Cluster (vtorc)(mysql57)
+name: Cluster (vtorc) mysql57
on: [push, pull_request]
concurrency:
- group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vtorc)(mysql57)')
+ group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vtorc) mysql57')
cancel-in-progress: true
+env:
+ LAUNCHABLE_ORGANIZATION: "vitess"
+ LAUNCHABLE_WORKSPACE: "vitess-app"
+ GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}"
+
jobs:
build:
- name: Run endtoend tests on Cluster (vtorc)(mysql57)
- runs-on: self-hosted
+ name: Run endtoend tests on Cluster (vtorc) mysql57
+ runs-on: ubuntu-22.04
steps:
- - name: Check if workflow needs to be skipped
- id: skip-workflow
- run: |
- skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
- echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
-
- - name: Check out code
- if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
-
- - name: Check for changes in relevant files
- if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: frouioui/paths-filter@main
- id: changes
- with:
- token: ''
- filters: |
- end_to_end:
- - 'go/**/*.go'
- - 'test.go'
- - 'Makefile'
- - 'build.env'
- - 'go.[sumod]'
- - 'proto/*.proto'
- - 'tools/**'
- - 'config/**'
- - '.github/docker/**'
- - 'bootstrap.sh'
- - '.github/workflows/cluster_endtoend_vtorc_mysql57.yml'
-
- - name: Build Docker Image
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: docker build -f ./.github/docker/cluster_test_vtorc_mysql57/Dockerfile -t cluster_test_vtorc_mysql57:$GITHUB_SHA .
-
- - name: Run test
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 30
- run: docker run --name "cluster_test_vtorc_mysql57_$GITHUB_SHA" cluster_test_vtorc_mysql57:$GITHUB_SHA /bin/bash -c 'source build.env && go run test.go -keep-data=true -docker=false -print-log -follow -shard vtorc -- -- --keep-data=true'
-
- - name: Print Volume Used
- if: always() && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- docker inspect -f '{{ (index .Mounts 0).Name }}' cluster_test_vtorc_mysql57_$GITHUB_SHA
-
- - name: Cleanup Docker Volume
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- docker rm -v cluster_test_vtorc_mysql57_$GITHUB_SHA
-
- - name: Cleanup Docker Container
- if: always() && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- docker rm -f cluster_test_vtorc_mysql57_$GITHUB_SHA
-
- - name: Cleanup Docker Image
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- docker image rm cluster_test_vtorc_mysql57:$GITHUB_SHA
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
+ - name: Check if workflow needs to be skipped
+ id: skip-workflow
+ run: |
+ skip='false'
+ if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
+ skip='true'
+ fi
+ echo Skip ${skip}
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
+
+ - name: Check out code
+ if: steps.skip-workflow.outputs.skip-workflow == 'false'
+ uses: actions/checkout@v3
+
+ - name: Check for changes in relevant files
+ if: steps.skip-workflow.outputs.skip-workflow == 'false'
+ uses: frouioui/paths-filter@main
+ id: changes
+ with:
+ token: ''
+ filters: |
+ end_to_end:
+ - 'go/**/*.go'
+ - 'test.go'
+ - 'Makefile'
+ - 'build.env'
+ - 'go.sum'
+ - 'go.mod'
+ - 'proto/*.proto'
+ - 'tools/**'
+ - 'config/**'
+ - 'bootstrap.sh'
+ - '.github/workflows/cluster_endtoend_vtorc_mysql57.yml'
+
+ - name: Set up Go
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ uses: actions/setup-go@v3
+ with:
+ go-version: 1.20.8
+
+ - name: Set up python
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ uses: actions/setup-python@v4
+
+ - name: Tune the OS
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ run: |
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
+ # Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
+ echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
+ sudo sysctl -p /etc/sysctl.conf
+
+ - name: Get dependencies
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ run: |
+ sudo apt-get update
+
+ # Uninstall any previously installed MySQL first
+ sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
+ sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld
+
+ sudo systemctl stop apparmor
+ sudo DEBIAN_FRONTEND="noninteractive" apt-get remove -y --purge mysql-server mysql-client mysql-common
+ sudo apt-get -y autoremove
+ sudo apt-get -y autoclean
+ sudo deluser mysql
+ sudo rm -rf /var/lib/mysql
+ sudo rm -rf /etc/mysql
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
+
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
+ # Bionic packages are still compatible for Jammy since there's no MySQL 5.7
+ # packages for Jammy.
+ echo mysql-apt-config mysql-apt-config/repo-codename select bionic | sudo debconf-set-selections
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-5.7 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
+ sudo apt-get update
+ sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-client=5.7* mysql-community-server=5.7* mysql-server=5.7* libncurses5
+
+ sudo apt-get install -y make unzip g++ etcd curl git wget eatmydata
+ sudo service mysql stop
+ sudo service etcd stop
+
+ # install JUnit report formatter
+ go install github.com/vitessio/go-junit-report@HEAD
+
+ - name: Setup launchable dependencies
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
+ run: |
+ # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
+ pip3 install --user launchable~=1.0 > /dev/null
+
+ # verify that launchable setup is all correct.
+ launchable verify || true
+
+ # Tell Launchable about the build you are producing and testing
+ launchable record build --name "$GITHUB_RUN_ID" --source .
+
+ - name: Run cluster endtoend test
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ timeout-minutes: 45
+ run: |
+ # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
+ # which musn't be more than 107 characters long.
+ export VTDATAROOT="/tmp/"
+ source build.env
+
+ set -x
+
+ # run the tests however you normally do, then produce a JUnit XML file
+ eatmydata -- go run test.go -docker=false -follow -shard vtorc | tee -a output.txt | go-junit-report -set-exit-code > report.xml
+
+ - name: Print test output and Record test result in launchable
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
+ run: |
+ # send recorded tests to launchable
+ launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+
+ # print test output
+ cat output.txt
diff --git a/.github/workflows/cluster_endtoend_onlineddl_singleton_mysql57.yml b/.github/workflows/cluster_endtoend_vttablet_prscomplex.yml
similarity index 71%
rename from .github/workflows/cluster_endtoend_onlineddl_singleton_mysql57.yml
rename to .github/workflows/cluster_endtoend_vttablet_prscomplex.yml
index 40ae6050d35..21b82d83b78 100644
--- a/.github/workflows/cluster_endtoend_onlineddl_singleton_mysql57.yml
+++ b/.github/workflows/cluster_endtoend_vttablet_prscomplex.yml
@@ -1,9 +1,9 @@
# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows"
-name: Cluster (onlineddl_singleton) mysql57
+name: Cluster (vttablet_prscomplex)
on: [push, pull_request]
concurrency:
- group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (onlineddl_singleton) mysql57')
+ group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vttablet_prscomplex)')
cancel-in-progress: true
env:
@@ -13,10 +13,17 @@ env:
jobs:
build:
- name: Run endtoend tests on Cluster (onlineddl_singleton) mysql57
- runs-on: ubuntu-20.04
+ name: Run endtoend tests on Cluster (vttablet_prscomplex)
+ runs-on: ubuntu-22.04
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -25,11 +32,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,27 +50,30 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
- 'bootstrap.sh'
- - '.github/workflows/cluster_endtoend_onlineddl_singleton_mysql57.yml'
+ - '.github/workflows/cluster_endtoend_vttablet_prscomplex.yml'
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ # Limit local port range to not use ports that overlap with server side
+ # ports that we listen on.
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
# Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
sudo sysctl -p /etc/sysctl.conf
@@ -71,41 +81,28 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- sudo apt-get update
-
- # Uninstall any previously installed MySQL first
- sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
- sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld
-
- sudo systemctl stop apparmor
- sudo DEBIAN_FRONTEND="noninteractive" apt-get remove -y --purge mysql-server mysql-client mysql-common
- sudo apt-get -y autoremove
- sudo apt-get -y autoclean
- sudo deluser mysql
- sudo rm -rf /var/lib/mysql
- sudo rm -rf /etc/mysql
-
+
# Get key to latest MySQL repo
sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
-
- wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.14-1_all.deb
- # Bionic packages are still compatible for Focal since there's no MySQL 5.7
- # packages for Focal.
- echo mysql-apt-config mysql-apt-config/repo-codename select bionic | sudo debconf-set-selections
- echo mysql-apt-config mysql-apt-config/select-server select mysql-5.7 | sudo debconf-set-selections
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
- sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-client=5.7* mysql-community-server=5.7* mysql-server=5.7*
+ # Install everything else we need, and configure
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5
- sudo apt-get install -y make unzip g++ etcd curl git wget eatmydata
sudo service mysql stop
sudo service etcd stop
+ sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
+ sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld
+ go mod download
# install JUnit report formatter
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -128,7 +125,7 @@ jobs:
set -x
# run the tests however you normally do, then produce a JUnit XML file
- eatmydata -- go run test.go -docker=false -follow -shard onlineddl_singleton | tee -a output.txt | go-junit-report -set-exit-code > report.xml
+ eatmydata -- go run test.go -docker=false -follow -shard vttablet_prscomplex | tee -a output.txt | go-junit-report -set-exit-code > report.xml
- name: Print test output and Record test result in launchable
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
diff --git a/.github/workflows/cluster_endtoend_xb_backup.yml b/.github/workflows/cluster_endtoend_xb_backup.yml
index 8c9b344bf07..3e53566f9a1 100644
--- a/.github/workflows/cluster_endtoend_xb_backup.yml
+++ b/.github/workflows/cluster_endtoend_xb_backup.yml
@@ -14,9 +14,16 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (xb_backup)
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-22.04
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -25,11 +32,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +50,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,18 +60,20 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ # Limit local port range to not use ports that overlap with server side
+ # ports that we listen on.
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
# Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
sudo sysctl -p /etc/sysctl.conf
@@ -71,6 +81,7 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
+
# Setup Percona Server for MySQL 8.0
sudo apt-get update
sudo apt-get install -y lsb-release gnupg2 curl
@@ -80,7 +91,8 @@ jobs:
sudo apt-get update
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils libncurses5
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -93,7 +105,7 @@ jobs:
sudo apt-get install percona-xtrabackup-80 lz4
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
diff --git a/.github/workflows/cluster_endtoend_xb_backup_mysql57.yml b/.github/workflows/cluster_endtoend_xb_backup_mysql57.yml
index e4bbc8c12b2..fc5a0def074 100644
--- a/.github/workflows/cluster_endtoend_xb_backup_mysql57.yml
+++ b/.github/workflows/cluster_endtoend_xb_backup_mysql57.yml
@@ -18,9 +18,16 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (xb_backup) mysql57
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-22.04
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -29,11 +36,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -47,7 +54,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -56,18 +64,18 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
# Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
sudo sysctl -p /etc/sysctl.conf
@@ -92,14 +100,14 @@ jobs:
# Get key to latest MySQL repo
sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
- wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.14-1_all.deb
- # Bionic packages are still compatible for Focal since there's no MySQL 5.7
- # packages for Focal.
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
+ # Bionic packages are still compatible for Jammy since there's no MySQL 5.7
+ # packages for Jammy.
echo mysql-apt-config mysql-apt-config/repo-codename select bionic | sudo debconf-set-selections
echo mysql-apt-config mysql-apt-config/select-server select mysql-5.7 | sudo debconf-set-selections
sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
- sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-client=5.7* mysql-community-server=5.7* mysql-server=5.7*
+ sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-client=5.7* mysql-community-server=5.7* mysql-server=5.7* libncurses5
sudo apt-get install -y make unzip g++ etcd curl git wget eatmydata
sudo service mysql stop
@@ -121,7 +129,7 @@ jobs:
fi
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
diff --git a/.github/workflows/cluster_endtoend_xb_recovery.yml b/.github/workflows/cluster_endtoend_xb_recovery.yml
index 88fcde631d1..39d618b51c1 100644
--- a/.github/workflows/cluster_endtoend_xb_recovery.yml
+++ b/.github/workflows/cluster_endtoend_xb_recovery.yml
@@ -14,9 +14,16 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (xb_recovery)
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-22.04
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -25,11 +32,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +50,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,18 +60,20 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ # Limit local port range to not use ports that overlap with server side
+ # ports that we listen on.
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
# Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
sudo sysctl -p /etc/sysctl.conf
@@ -71,6 +81,7 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
+
# Setup Percona Server for MySQL 8.0
sudo apt-get update
sudo apt-get install -y lsb-release gnupg2 curl
@@ -80,7 +91,8 @@ jobs:
sudo apt-get update
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils libncurses5
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -93,7 +105,7 @@ jobs:
sudo apt-get install percona-xtrabackup-80 lz4
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
diff --git a/.github/workflows/cluster_endtoend_xb_recovery_mysql57.yml b/.github/workflows/cluster_endtoend_xb_recovery_mysql57.yml
index ae656927322..fa110a24b0d 100644
--- a/.github/workflows/cluster_endtoend_xb_recovery_mysql57.yml
+++ b/.github/workflows/cluster_endtoend_xb_recovery_mysql57.yml
@@ -18,9 +18,16 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (xb_recovery) mysql57
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-22.04
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -29,11 +36,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -47,7 +54,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -56,18 +64,18 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
# Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
sudo sysctl -p /etc/sysctl.conf
@@ -92,14 +100,14 @@ jobs:
# Get key to latest MySQL repo
sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
- wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.14-1_all.deb
- # Bionic packages are still compatible for Focal since there's no MySQL 5.7
- # packages for Focal.
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
+ # Bionic packages are still compatible for Jammy since there's no MySQL 5.7
+ # packages for Jammy.
echo mysql-apt-config mysql-apt-config/repo-codename select bionic | sudo debconf-set-selections
echo mysql-apt-config mysql-apt-config/select-server select mysql-5.7 | sudo debconf-set-selections
sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
- sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-client=5.7* mysql-community-server=5.7* mysql-server=5.7*
+ sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-client=5.7* mysql-community-server=5.7* mysql-server=5.7* libncurses5
sudo apt-get install -y make unzip g++ etcd curl git wget eatmydata
sudo service mysql stop
@@ -121,7 +129,7 @@ jobs:
fi
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
diff --git a/.github/workflows/code_freeze.yml b/.github/workflows/code_freeze.yml
index 381793fa84d..efe1d6e8856 100644
--- a/.github/workflows/code_freeze.yml
+++ b/.github/workflows/code_freeze.yml
@@ -5,7 +5,7 @@ on:
jobs:
build:
name: Code Freeze
- runs-on: ubuntu-latest
+ runs-on: ubuntu-22.04
steps:
- name: Fail if Code Freeze is enabled
run: |
diff --git a/.github/workflows/codeql_analysis.yml b/.github/workflows/codeql_analysis.yml
index 862b3f36234..669231f287a 100644
--- a/.github/workflows/codeql_analysis.yml
+++ b/.github/workflows/codeql_analysis.yml
@@ -7,11 +7,12 @@ on:
- release-**.0
schedule:
- cron: '0 0 * * 1'
+ workflow_dispatch:
jobs:
analyze:
name: Analyze
- runs-on: ubuntu-latest
+ runs-on: ubuntu-22.04
permissions:
actions: read
contents: read
@@ -39,9 +40,9 @@ jobs:
# queries: security-extended,security-and-quality
- name: Set up Go
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Get base dependencies
run: |
@@ -56,7 +57,7 @@ jobs:
sudo rm -rf /etc/mysql
# Install mysql80
sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
- wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.20-1_all.deb
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
@@ -78,11 +79,25 @@ jobs:
sudo apt-get update
sudo apt-get install percona-xtrabackup-24
- - name: Building last release's binaries
- timeout-minutes: 10
+ - name: Building binaries
+ timeout-minutes: 30
run: |
source build.env
make build
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v2
+
+ - name: Slack Workflow Notification
+ if: ${{ failure() }}
+ uses: Gamesight/slack-workflow-status@master
+ with:
+ repo_token: ${{secrets.GITHUB_TOKEN}}
+ slack_webhook_url: ${{secrets.SLACK_WEBHOOK_URL}}
+ channel: '#codeql'
+ name: 'CodeQL Workflows'
+
+ - name: Fail if needed
+ if: ${{ failure() }}
+ run: |
+ exit 1
diff --git a/.github/workflows/create_release.yml b/.github/workflows/create_release.yml
index b087c8e4716..677cd49a146 100644
--- a/.github/workflows/create_release.yml
+++ b/.github/workflows/create_release.yml
@@ -10,26 +10,20 @@ on:
jobs:
build:
name: Create Release
- runs-on: ubuntu-latest
+ runs-on: ubuntu-22.04
steps:
- name: Set up Go
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Tune the OS
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
-
- # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185
- - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file
- run: |
- echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts
- # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED!
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
- name: Check out code
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Get dependencies
run: |
diff --git a/.github/workflows/docker_test_cluster_10.yml b/.github/workflows/docker_test_cluster_10.yml
index 5b62635be43..a236185dea7 100644
--- a/.github/workflows/docker_test_cluster_10.yml
+++ b/.github/workflows/docker_test_cluster_10.yml
@@ -4,9 +4,16 @@ jobs:
build:
name: Docker Test Cluster 10
- runs-on: ubuntu-latest
+ runs-on: ubuntu-22.04
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -15,11 +22,11 @@ jobs:
skip='true'
fi
echo Skip $skip
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -33,7 +40,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -43,22 +51,15 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
echo "value: " ${{steps.skip-workflow.outputs.skip-workflow}}
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
-
- # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185
- - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts
- # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED!
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
- name: Run tests which require docker - 1
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/docker_test_cluster_25.yml b/.github/workflows/docker_test_cluster_25.yml
index 28fec63703a..f6b3f644914 100644
--- a/.github/workflows/docker_test_cluster_25.yml
+++ b/.github/workflows/docker_test_cluster_25.yml
@@ -4,9 +4,16 @@ jobs:
build:
name: Docker Test Cluster 25
- runs-on: ubuntu-latest
+ runs-on: ubuntu-22.04
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -15,11 +22,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -33,7 +40,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -43,21 +51,14 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
-
- # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185
- - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts
- # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED!
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
- name: Run tests which require docker - 2
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/e2e_race.yml b/.github/workflows/e2e_race.yml
index 4e1a0890ead..f4b2e2635c5 100644
--- a/.github/workflows/e2e_race.yml
+++ b/.github/workflows/e2e_race.yml
@@ -4,8 +4,15 @@ jobs:
build:
name: End-to-End Test (Race)
- runs-on: ubuntu-latest
+ runs-on: ubuntu-22.04
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -14,11 +21,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -32,7 +39,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -41,27 +49,28 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup MySQL 8.0
+ # Get key to latest MySQL repo
sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
- wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.20-1_all.deb
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
+
# Install everything else we need, and configure
- sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
diff --git a/.github/workflows/endtoend.yml b/.github/workflows/endtoend.yml
index 30eb503733b..fa28f46fd7e 100644
--- a/.github/workflows/endtoend.yml
+++ b/.github/workflows/endtoend.yml
@@ -4,8 +4,15 @@ jobs:
build:
name: End-to-End Test
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-22.04
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -14,11 +21,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -32,7 +39,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -41,21 +49,14 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
-
- # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185
- - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts
- # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED!
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/local_example.yml b/.github/workflows/local_example.yml
index 71ae05a7b2f..f0b04fd3947 100644
--- a/.github/workflows/local_example.yml
+++ b/.github/workflows/local_example.yml
@@ -7,10 +7,17 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
- os: [ubuntu-latest]
+ os: [ubuntu-22.04]
topo: [consul,etcd,k8s]
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -19,11 +26,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -37,7 +44,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -47,26 +55,27 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.examples == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.examples == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.examples == 'true'
run: |
- if [ ${{matrix.os}} = "ubuntu-latest" ]; then
- # Setup MySQL 8.0
+ if [ ${{matrix.os}} = "ubuntu-22.04" ]; then
+ # Get key to latest MySQL repo
sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
- wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.20-1_all.deb
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
+
# Install everything else we need, and configure
sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata
sudo service mysql stop
diff --git a/.github/workflows/region_example.yml b/.github/workflows/region_example.yml
index ef69e85bbc1..faed1845135 100644
--- a/.github/workflows/region_example.yml
+++ b/.github/workflows/region_example.yml
@@ -7,10 +7,17 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
- os: [ubuntu-latest]
+ os: [ubuntu-22.04]
topo: [etcd]
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -19,11 +26,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -37,7 +44,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -47,22 +55,23 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.examples == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.examples == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.examples == 'true'
run: |
- if [ ${{matrix.os}} = "ubuntu-latest" ]; then
- # Setup MySQL 8.0
+ if [ ${{matrix.os}} = "ubuntu-22.04" ]; then
+ # Get key to latest MySQL repo
sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
- wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.20-1_all.deb
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
@@ -96,5 +105,4 @@ jobs:
if [ ${{matrix.os}} = "macos-latest" ]; then
export PATH="/usr/local/opt/mysql@5.7/bin:$PATH"
fi
- sed -i 's/user\/my-vitess/runner\/work\/vitess\/vitess/g' examples/region_sharding/main_vschema_sharded.json #set correct path to countries.json
eatmydata -- go run test.go -print-log -follow -retry=1 region_example
diff --git a/.github/workflows/release_notes_label.yml b/.github/workflows/release_notes_label.yml
deleted file mode 100644
index 220bd07da30..00000000000
--- a/.github/workflows/release_notes_label.yml
+++ /dev/null
@@ -1,52 +0,0 @@
-name: Check Pull Request labels
-on:
- pull_request:
- types: [opened, labeled, unlabeled, synchronize]
-
-concurrency:
- group: format('{0}-{1}', ${{ github.ref }}, 'Check Pull Request labels')
- cancel-in-progress: true
-
-jobs:
- check_pull_request_labels:
- name: Check Pull Request labels
- timeout-minutes: 10
- runs-on: ubuntu-latest
- if: github.repository == 'vitessio/vitess'
- steps:
- - uses: mheap/github-action-required-labels@v1
- name: Check release notes label
- id: required_label
- env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- with:
- mode: exactly
- count: 0
- labels: "release notes (needs details)"
-
- - name: Print helper
- if: failure() && steps.required_label.outcome == 'failure'
- run: |
- echo The "release notes (needs details)" label is set. The changes made in this Pull Request need to be documented in the release notes summary "('./doc/releasenotes/15_0_0_summary.md')". Once documented, the "release notes (needs details)" label can be removed.
- exit 1
-
- - name: Check type and component labels
- env:
- PR_NUMBER: ${{ github.event.pull_request.number }}
- run: |
- LABELS_JSON="/tmp/labels.json"
- # Get labels for this pull request
- curl -s \
- -H 'authorization: Bearer ${{ secrets.GITHUB_TOKEN }}' \
- -H "Accept: application/vnd.github.v3+json" \
- -H "Content-type: application/json" \
- "https://api.github.com/repos/${GITHUB_REPOSITORY}/issues/${PR_NUMBER}/labels" \
- > "$LABELS_JSON"
- if ! cat ${LABELS_JSON} | jq -r '.[].name ' | grep -q 'Component:' ; then
- echo "Expecting PR to have label 'Component: ...'"
- exit 1
- fi
- if ! cat ${LABELS_JSON} | jq -r '.[].name ' | grep -q 'Type:' ; then
- echo "Expecting PR to have label 'Type: ...'"
- exit 1
- fi
diff --git a/.github/workflows/semgrep.yml b/.github/workflows/semgrep.yml
deleted file mode 100644
index e71a3a9b6aa..00000000000
--- a/.github/workflows/semgrep.yml
+++ /dev/null
@@ -1,21 +0,0 @@
-on:
- pull_request: {}
- push:
- branches:
- - main
- paths:
- - .github/workflows/semgrep.yml
- schedule:
- - cron: '0 0 * * 0'
- name: Semgrep
- jobs:
- semgrep:
- name: Scan
- runs-on: ubuntu-20.04
- env:
- SEMGREP_APP_TOKEN: ${{ secrets.SEMGREP_APP_TOKEN }}
- container:
- image: returntocorp/semgrep
- steps:
- - uses: actions/checkout@v3
- - run: semgrep ci
diff --git a/.github/workflows/sonar_analysis.yml b/.github/workflows/sonar_analysis.yml
index 871e6cc7f83..c9e44063562 100644
--- a/.github/workflows/sonar_analysis.yml
+++ b/.github/workflows/sonar_analysis.yml
@@ -6,26 +6,20 @@ on:
jobs:
build:
- runs-on: ubuntu-latest
+ runs-on: ubuntu-22.04
steps:
- name: Set up Go
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.13
+ go-version: 1.20.8
- name: Tune the OS
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
-
- # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185
- - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file
- run: |
- echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts
- # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED!
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
- name: Check out code
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Get dependencies
run: |
diff --git a/.github/workflows/static_checks_etc.yml b/.github/workflows/static_checks_etc.yml
index 2f62e4c2c6e..5139d73d77e 100644
--- a/.github/workflows/static_checks_etc.yml
+++ b/.github/workflows/static_checks_etc.yml
@@ -7,9 +7,16 @@ on:
jobs:
build:
name: Static Code Checks Etc
- runs-on: ubuntu-latest
+ runs-on: ubuntu-22.04
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -18,11 +25,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Checkout code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Run FOSSA scan and upload build data
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -40,11 +47,12 @@ jobs:
go_files:
- '**/*.go'
- '*.go'
- - 'go.[sumod]'
- - '.github/workflows/static_checks_etc.yml'
+ - 'go.sum'
+ - 'go.mod'
parser_changes:
- 'go/vt/sqlparser/**'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'build.env'
- 'bootstrap.sh'
- 'tools/**'
@@ -53,7 +61,8 @@ jobs:
- 'bootstrap.sh'
- 'tools/**'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'Makefile'
- 'go/vt/proto/**'
- 'proto/*.proto'
@@ -63,7 +72,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'tools/**'
- 'bootstrap.sh'
- '.github/workflows/static_checks_etc.yml'
@@ -72,7 +82,8 @@ jobs:
- 'go/vt/sqlparser/**'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'tools/**'
- 'bootstrap.sh'
- 'misc/git/hooks/asthelpers'
@@ -83,25 +94,23 @@ jobs:
- 'Makefile'
- 'bootstrap.sh'
- '.github/workflows/static_checks_etc.yml'
-
+ ci_config:
+ - 'test/config.json'
+ - '.github/workflows/static_checks_etc.yml'
+ release_notes:
+ - 'changelog/**'
+ - './go/tools/releases/**'
- name: Set up Go
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.go_files == 'true' || steps.changes.outputs.parser_changes == 'true' || steps.changes.outputs.proto_changes == 'true'
- uses: actions/setup-go@v2
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && (steps.changes.outputs.go_files == 'true' || steps.changes.outputs.parser_changes == 'true' || steps.changes.outputs.proto_changes == 'true')
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.go_files == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
-
- # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185
- - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.go_files == 'true'
- run: |
- echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts
- # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED!
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
- name: Run go fmt
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.go_files == 'true'
@@ -109,7 +118,7 @@ jobs:
gofmt -l . | grep -vF vendor/ && exit 1 || echo "All files formatted correctly"
- name: Install goimports
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.go_files == 'true' || steps.changes.outputs.visitor == 'true'
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && (steps.changes.outputs.go_files == 'true' || steps.changes.outputs.visitor == 'true')
run: |
go install golang.org/x/tools/cmd/goimports@latest
@@ -120,7 +129,7 @@ jobs:
echo $out | grep go > /dev/null && echo -e "The following files are malformatted:\n$out" && exit 1 || echo "All the files are formatted correctly"
- name: Get dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.parser_changes == 'true'
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && (steps.changes.outputs.parser_changes == 'true' || steps.changes.outputs.go_files == 'true')
run: |
sudo apt-get update
sudo apt-get install -y make unzip g++ etcd curl git wget
@@ -128,24 +137,24 @@ jobs:
go mod download
- name: Run make minimaltools
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.parser_changes == 'true'
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && (steps.changes.outputs.parser_changes == 'true' || steps.changes.outputs.go_files == 'true')
run: |
make minimaltools
- name: check_make_parser
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.parser_changes == 'true'
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && (steps.changes.outputs.parser_changes == 'true' || steps.changes.outputs.go_files == 'true')
run: |
- tools/check_make_parser.sh
+ tools/check_make_parser.sh || exit 1
- name: check_make_sizegen
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.sizegen == 'true'
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && (steps.changes.outputs.sizegen == 'true' || steps.changes.outputs.go_files == 'true')
run: |
- tools/check_make_sizegen.sh
+ tools/check_make_sizegen.sh || exit 1
- name: check_make_visitor
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.visitor == 'true'
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && (steps.changes.outputs.visitor == 'true' || steps.changes.outputs.go_files == 'true')
run: |
- misc/git/hooks/asthelpers
+ misc/git/hooks/asthelpers || exit 1
- name: run ensure_bootstrap_version
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -156,7 +165,7 @@ jobs:
- name: Install golangci-lint
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.go_files == 'true'
- run: go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.46.2
+ run: go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.51.2
- name: Clean Env
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.go_files == 'true'
@@ -168,7 +177,7 @@ jobs:
- name: Run golangci-lint
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.go_files == 'true'
- run: $(go env GOPATH)/bin/golangci-lint run go/...
+ run: $(go env GOPATH)/bin/golangci-lint run go/... --timeout 10m || exit 1
- name: Run go mod tidy
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.go_files == 'true'
@@ -187,4 +196,23 @@ jobs:
- name: check_make_proto
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.proto_changes == 'true'
run: |
- tools/check_make_proto.sh
+ tools/check_make_proto.sh || exit 1
+
+ - name: Check test/config.json
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && (steps.changes.outputs.go_files == 'true' || steps.changes.outputs.ci_config == 'true')
+ run: |
+ go run ./go/tools/ci-config/main.go || exit 1
+
+ - name: Check changelog
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.release_notes == 'true'
+ run: |
+ set -e
+ go run ./go/tools/releases/releases.go
+ output=$(git status -s)
+ if [ -z "${output}" ]; then
+ exit 0
+ fi
+ echo 'We wish to maintain a consistent changelog directory, please run `go run ./go/tools/releases/releases.go`, commit and push again.'
+ echo 'Running `go run ./go/tools/releases/releases.go` on CI yields the following changes:'
+ echo "$output"
+ echo ""
\ No newline at end of file
diff --git a/.github/workflows/unit_race.yml b/.github/workflows/unit_race.yml
index 82aa3ae6da4..f36c7190974 100644
--- a/.github/workflows/unit_race.yml
+++ b/.github/workflows/unit_race.yml
@@ -8,8 +8,15 @@ jobs:
build:
name: Unit Test (Race)
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-22.04
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -18,11 +25,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -36,7 +43,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -45,14 +53,14 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true'
diff --git a/.github/workflows/unit_test_mariadb103.yml b/.github/workflows/unit_test_mariadb103.yml
deleted file mode 100644
index d8af0b6fd6d..00000000000
--- a/.github/workflows/unit_test_mariadb103.yml
+++ /dev/null
@@ -1,105 +0,0 @@
-# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows"
-
-name: Unit Test (mariadb103)
-on: [push, pull_request]
-concurrency:
- group: format('{0}-{1}', ${{ github.ref }}, 'Unit Test (mariadb103)')
- cancel-in-progress: true
-
-jobs:
- test:
- runs-on: ubuntu-20.04
-
- steps:
- - name: Check if workflow needs to be skipped
- id: skip-workflow
- run: |
- skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
- echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
-
- - name: Check out code
- if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
-
- - name: Check for changes in relevant files
- if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: frouioui/paths-filter@main
- id: changes
- with:
- token: ''
- filters: |
- unit_tests:
- - 'go/**'
- - 'test.go'
- - 'Makefile'
- - 'build.env'
- - 'go.[sumod]'
- - 'proto/*.proto'
- - 'tools/**'
- - 'config/**'
- - 'bootstrap.sh'
- - '.github/workflows/unit_test_mariadb103.yml'
-
- - name: Set up Go
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true'
- uses: actions/setup-go@v2
- with:
- go-version: 1.18.5
-
- - name: Tune the OS
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true'
- run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
- # Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
- echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
- sudo sysctl -p /etc/sysctl.conf
-
- - name: Get dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true'
- run: |
- export DEBIAN_FRONTEND="noninteractive"
- sudo apt-get update
-
- # Uninstall any previously installed MySQL first
- sudo systemctl stop apparmor
- sudo DEBIAN_FRONTEND="noninteractive" apt-get remove -y --purge mysql-server mysql-client mysql-common
- sudo apt-get -y autoremove
- sudo apt-get -y autoclean
- sudo deluser mysql
- sudo rm -rf /var/lib/mysql
- sudo rm -rf /etc/mysql
-
- # mariadb103
- sudo apt-get install -y software-properties-common
- sudo apt-key adv --recv-keys --keyserver hkp://keyserver.ubuntu.com:80 0xF1656F24C74CD1D8
- sudo add-apt-repository 'deb [arch=amd64,arm64,ppc64el] https://mirror.rackspace.com/mariadb/repo/10.3/ubuntu bionic main'
- sudo apt update
- sudo DEBIAN_FRONTEND="noninteractive" apt install -y mariadb-server
-
- sudo apt-get install -y make unzip g++ curl git wget ant openjdk-11-jdk eatmydata
- sudo service mysql stop
- sudo bash -c "echo '/usr/sbin/mysqld { }' > /etc/apparmor.d/usr.sbin.mysqld" # https://bugs.launchpad.net/ubuntu/+source/mariadb-10.1/+bug/1806263
- sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
- sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld || echo "could not remove mysqld profile"
-
- mkdir -p dist bin
- curl -L https://github.com/coreos/etcd/releases/download/v3.3.10/etcd-v3.3.10-linux-amd64.tar.gz | tar -zxC dist
- mv dist/etcd-v3.3.10-linux-amd64/{etcd,etcdctl} bin/
-
- go mod download
- go install golang.org/x/tools/cmd/goimports@latest
-
- - name: Run make tools
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true'
- run: |
- make tools
-
- - name: Run test
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true'
- timeout-minutes: 30
- run: |
- eatmydata -- make unit_test
diff --git a/.github/workflows/unit_test_mysql57.yml b/.github/workflows/unit_test_mysql57.yml
index 5e154874911..621068ff45f 100644
--- a/.github/workflows/unit_test_mysql57.yml
+++ b/.github/workflows/unit_test_mysql57.yml
@@ -6,11 +6,23 @@ concurrency:
group: format('{0}-{1}', ${{ github.ref }}, 'Unit Test (mysql57)')
cancel-in-progress: true
+env:
+ LAUNCHABLE_ORGANIZATION: "vitess"
+ LAUNCHABLE_WORKSPACE: "vitess-app"
+ GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}"
+
jobs:
test:
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-22.04
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -19,11 +31,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -37,7 +49,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -46,14 +59,18 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
+
+ - name: Set up python
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true'
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
# Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
sudo sysctl -p /etc/sysctl.conf
@@ -77,14 +94,14 @@ jobs:
sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
# mysql57
- wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.14-1_all.deb
- # Bionic packages are still compatible for Focal since there's no MySQL 5.7
- # packages for Focal.
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
+ # Bionic packages are still compatible for Jammy since there's no MySQL 5.7
+ # packages for Jammy.
echo mysql-apt-config mysql-apt-config/repo-codename select bionic | sudo debconf-set-selections
echo mysql-apt-config mysql-apt-config/select-server select mysql-5.7 | sudo debconf-set-selections
sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
- sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-client=5.7* mysql-community-server=5.7* mysql-server=5.7*
+ sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-client=5.7* mysql-community-server=5.7* mysql-server=5.7* libncurses5
sudo apt-get install -y make unzip g++ curl git wget ant openjdk-11-jdk eatmydata
sudo service mysql stop
@@ -98,14 +115,38 @@ jobs:
go mod download
go install golang.org/x/tools/cmd/goimports@latest
+
+ # install JUnit report formatter
+ go install github.com/vitessio/go-junit-report@HEAD
- name: Run make tools
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true'
run: |
make tools
+ - name: Setup launchable dependencies
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' && github.base_ref == 'main'
+ run: |
+ # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
+ pip3 install --user launchable~=1.0 > /dev/null
+
+ # verify that launchable setup is all correct.
+ launchable verify || true
+
+ # Tell Launchable about the build you are producing and testing
+ launchable record build --name "$GITHUB_RUN_ID" --source .
+
- name: Run test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true'
timeout-minutes: 30
run: |
- eatmydata -- make unit_test
+ eatmydata -- make unit_test | tee -a output.txt | go-junit-report -set-exit-code > report.xml
+
+ - name: Print test output and Record test result in launchable
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' && always()
+ run: |
+ # send recorded tests to launchable
+ launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+
+ # print test output
+ cat output.txt
diff --git a/.github/workflows/unit_test_mysql80.yml b/.github/workflows/unit_test_mysql80.yml
index 04db046170f..bc8b8502779 100644
--- a/.github/workflows/unit_test_mysql80.yml
+++ b/.github/workflows/unit_test_mysql80.yml
@@ -6,11 +6,23 @@ concurrency:
group: format('{0}-{1}', ${{ github.ref }}, 'Unit Test (mysql80)')
cancel-in-progress: true
+env:
+ LAUNCHABLE_ORGANIZATION: "vitess"
+ LAUNCHABLE_WORKSPACE: "vitess-app"
+ GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}"
+
jobs:
test:
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-22.04
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -19,11 +31,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -37,7 +49,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -46,14 +59,18 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
+
+ - name: Set up python
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true'
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
# Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
sudo sysctl -p /etc/sysctl.conf
@@ -77,7 +94,7 @@ jobs:
sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
# mysql80
- wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.14-1_all.deb
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
@@ -95,14 +112,38 @@ jobs:
go mod download
go install golang.org/x/tools/cmd/goimports@latest
+
+ # install JUnit report formatter
+ go install github.com/vitessio/go-junit-report@HEAD
- name: Run make tools
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true'
run: |
make tools
+ - name: Setup launchable dependencies
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' && github.base_ref == 'main'
+ run: |
+ # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
+ pip3 install --user launchable~=1.0 > /dev/null
+
+ # verify that launchable setup is all correct.
+ launchable verify || true
+
+ # Tell Launchable about the build you are producing and testing
+ launchable record build --name "$GITHUB_RUN_ID" --source .
+
- name: Run test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true'
timeout-minutes: 30
run: |
- eatmydata -- make unit_test
+ eatmydata -- make unit_test | tee -a output.txt | go-junit-report -set-exit-code > report.xml
+
+ - name: Print test output and Record test result in launchable
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' && always()
+ run: |
+ # send recorded tests to launchable
+ launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+
+ # print test output
+ cat output.txt
diff --git a/.github/workflows/update_golang_version.yml b/.github/workflows/update_golang_version.yml
new file mode 100644
index 00000000000..a80f234f6b8
--- /dev/null
+++ b/.github/workflows/update_golang_version.yml
@@ -0,0 +1,81 @@
+name: Update Golang Version
+
+on:
+ schedule:
+ - cron: "0 0 * * *" # Runs every day at midnight UTC
+ workflow_dispatch:
+
+jobs:
+ update_golang_version:
+ strategy:
+ matrix:
+ branch: [ main, release-16.0, release-15.0, release-14.0 ]
+ name: Update Golang Version
+ runs-on: ubuntu-latest
+ steps:
+ - name: Set up Go
+ uses: actions/setup-go@v2
+ with:
+ go-version: 1.20.8
+
+ - name: Check out code
+ uses: actions/checkout@v3
+ with:
+ ref: ${{ matrix.branch }}
+
+ - name: Detect new version and update codebase
+ id: detect-and-update
+ run: |
+ old_go_version=$(go run ./go/tools/go-upgrade/go-upgrade.go get go-version)
+ echo "old-go-version=${old_go_version}" >> $GITHUB_OUTPUT
+
+ if [ ${{ matrix.branch }} == "main" ]; then
+ go run ./go/tools/go-upgrade/go-upgrade.go upgrade --workflow-update=false --main --allow-major-upgrade
+ else
+ go run ./go/tools/go-upgrade/go-upgrade.go upgrade --workflow-update=false
+ fi
+
+ output=$(git status -s)
+ if [ -z "${output}" ]; then
+ exit 0
+ fi
+
+ go_version=$(go run ./go/tools/go-upgrade/go-upgrade.go get go-version)
+ bootstrap_version=$(go run ./go/tools/go-upgrade/go-upgrade.go get bootstrap-version)
+ echo "go-version=${go_version}" >> $GITHUB_OUTPUT
+ echo "bootstrap-version=${bootstrap_version}" >> $GITHUB_OUTPUT
+
+ # Check if the PR already exists, if it does then do not create new PR.
+ gh pr list -S "is:open [${{ matrix.branch }}] Upgrade the Golang version to go${go_version}" | grep "OPEN"
+ if [ $? -eq 0 ]; then
+ exit 0
+ fi
+
+ echo "create-pr=true" >> $GITHUB_OUTPUT
+
+ - name: Create Pull Request
+ if: steps.detect-and-update.outputs.create-pr == 'true'
+ uses: peter-evans/create-pull-request@v4
+ with:
+ branch: "upgrade-go-to-${{steps.detect-and-update.outputs.go-version}}-on-${{ matrix.branch }}"
+ commit-message: "bump go version to go${{steps.detect-and-update.outputs.go-version}}"
+ signoff: true
+ delete-branch: true
+ title: "[${{ matrix.branch }}] Upgrade the Golang version to `go${{steps.detect-and-update.outputs.go-version}}`"
+ body: |
+ This Pull Request bumps the Golang version to `go${{steps.detect-and-update.outputs.go-version}}` and the bootstrap version to `${{steps.detect-and-update.outputs.bootstrap-version}}`.
+
+ > Do not trust the bot blindly. A thorough code review must be done to ensure all the files have been correctly modified.
+
+ There are a few manual steps remaining:
+ - [ ] Make sure you update the Golang version used in the previous and next release branches for the Upgrade/Downgrade tests.
+ - [ ] Build and Push the bootstrap images to Docker Hub, the bot cannot handle that.
+ - [ ] Update the `./.github/workflows/*.yml` files with the newer Golang version, the bot cannot handle that due to permissions.
+ - To accomplish this, run the following: `go run ./go/tools/go-upgrade/go-upgrade.go upgrade workflows --go-to=${{steps.detect-and-update.outputs.go-version}}`
+ base: ${{ matrix.branch }}
+ labels: |
+ Skip CI
+ go
+ Benchmark me
+ Component: General
+ Type: CI/Build
diff --git a/.github/workflows/upgrade_downgrade_test_backups_e2e.yml b/.github/workflows/upgrade_downgrade_test_backups_e2e.yml
index 49b07e77cab..20621f503ed 100644
--- a/.github/workflows/upgrade_downgrade_test_backups_e2e.yml
+++ b/.github/workflows/upgrade_downgrade_test_backups_e2e.yml
@@ -10,14 +10,14 @@ concurrency:
jobs:
get_previous_release:
if: always()
- name: Get latest release
- runs-on: ubuntu-latest
+ name: Get Previous Release - Backups - E2E
+ runs-on: ubuntu-22.04
outputs:
previous_release: ${{ steps.output-previous-release-ref.outputs.previous_release_ref }}
steps:
- name: Check out to HEAD
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
with:
fetch-depth: 0
@@ -26,17 +26,24 @@ jobs:
run: |
previous_release_ref=$(./tools/get_previous_release.sh ${{github.base_ref}} ${{github.ref}})
echo $previous_release_ref
- echo "::set-output name=previous_release_ref::${previous_release_ref}"
+ echo "previous_release_ref=${previous_release_ref}" >> $GITHUB_OUTPUT
upgrade_downgrade_test_e2e:
timeout-minutes: 60
if: always() && needs.get_previous_release.result == 'success'
- name: Run Upgrade Downgrade Test
- runs-on: ubuntu-20.04
+ name: Run Upgrade Downgrade Test - Backups - E2E
+ runs-on: ubuntu-22.04
needs:
- get_previous_release
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -45,11 +52,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out commit's code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -64,7 +71,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -73,18 +81,18 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
- name: Get base dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -109,7 +117,7 @@ jobs:
# Checkout to the last release of Vitess
- name: Check out other version's code (${{ needs.get_previous_release.outputs.previous_release }})
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
with:
ref: ${{ needs.get_previous_release.outputs.previous_release }}
@@ -131,7 +139,7 @@ jobs:
# Checkout to this build's commit
- name: Check out commit's code
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Get dependencies for this commit
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/upgrade_downgrade_test_backups_e2e_next_release.yml b/.github/workflows/upgrade_downgrade_test_backups_e2e_next_release.yml
index 882b09f3f53..1b57d54b1ce 100644
--- a/.github/workflows/upgrade_downgrade_test_backups_e2e_next_release.yml
+++ b/.github/workflows/upgrade_downgrade_test_backups_e2e_next_release.yml
@@ -10,14 +10,14 @@ concurrency:
jobs:
get_next_release:
if: always()
- name: Get latest release
- runs-on: ubuntu-latest
+ name: Get Latest Release - Backups - E2E - Next Release
+ runs-on: ubuntu-22.04
outputs:
next_release: ${{ steps.output-next-release-ref.outputs.next_release_ref }}
steps:
- name: Check out to HEAD
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
with:
fetch-depth: 0
@@ -26,17 +26,24 @@ jobs:
run: |
next_release_ref=$(./tools/get_next_release.sh ${{github.base_ref}} ${{github.ref}})
echo $next_release_ref
- echo "::set-output name=next_release_ref::${next_release_ref}"
+ echo "next_release_ref=${next_release_ref}" >> $GITHUB_OUTPUT
upgrade_downgrade_test_e2e:
timeout-minutes: 60
if: always() && needs.get_next_release.result == 'success'
- name: Run Upgrade Downgrade Test
- runs-on: ubuntu-20.04
+ name: Run Upgrade Downgrade Test - Backups - E2E - Next Release
+ runs-on: ubuntu-22.04
needs:
- get_next_release
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -48,11 +55,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out commit's code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -67,7 +74,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -76,18 +84,18 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
- name: Get base dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -112,7 +120,7 @@ jobs:
# Checkout to the next release of Vitess
- name: Check out other version's code (${{ needs.get_next_release.outputs.next_release }})
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
with:
ref: ${{ needs.get_next_release.outputs.next_release }}
@@ -134,7 +142,7 @@ jobs:
# Checkout to this build's commit
- name: Check out commit's code
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Get dependencies for this commit
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -177,8 +185,8 @@ jobs:
source build.env
rm -f $PWD/bin/vtbackup $PWD/bin/vttablet
- cp /tmp/vitess-build-current/bin/vtbackup $PWD/bin/vtbackup
- cp /tmp/vitess-build-other/bin/vttablet $PWD/bin/vttablet
+ cp /tmp/vitess-build-other/bin/vtbackup $PWD/bin/vtbackup
+ cp /tmp/vitess-build-current/bin/vttablet $PWD/bin/vttablet
vtbackup --version
vttablet --version
diff --git a/.github/workflows/upgrade_downgrade_test_backups_manual.yml b/.github/workflows/upgrade_downgrade_test_backups_manual.yml
index 4abc49e18e4..7bfac25cd38 100644
--- a/.github/workflows/upgrade_downgrade_test_backups_manual.yml
+++ b/.github/workflows/upgrade_downgrade_test_backups_manual.yml
@@ -10,14 +10,14 @@ concurrency:
jobs:
get_previous_release:
if: always()
- name: Get a recent LTS release
- runs-on: ubuntu-20.04
+ name: Get Previous Release - Backups - Manual
+ runs-on: ubuntu-22.04
outputs:
previous_release: ${{ steps.output-previous-release-ref.outputs.previous_release_ref }}
steps:
- name: Check out to HEAD
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
with:
fetch-depth: 0
@@ -26,18 +26,25 @@ jobs:
run: |
previous_release_ref=$(./tools/get_previous_release.sh ${{github.base_ref}} ${{github.ref}})
echo $previous_release_ref
- echo "::set-output name=previous_release_ref::${previous_release_ref}"
+ echo "previous_release_ref=${previous_release_ref}" >> $GITHUB_OUTPUT
# This job usually execute in ± 20 minutes
upgrade_downgrade_test_manual:
timeout-minutes: 40
if: always() && (needs.get_previous_release.result == 'success')
- name: Run Upgrade Downgrade Test
- runs-on: ubuntu-20.04
+ name: Run Upgrade Downgrade Test - Backups - Manual
+ runs-on: ubuntu-22.04
needs:
- get_previous_release
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -46,12 +53,12 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
# Checkout to this build's commit
- name: Checkout to commit's code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -66,7 +73,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -75,25 +83,18 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
-
- # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185
- - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts
- # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED!
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
- name: Get base dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -109,26 +110,12 @@ jobs:
sudo rm -rf /etc/mysql
# Install MySQL 8.0
- ####
- ## Temporarily pin the MySQL version at 8.0.29 as Vitess 14.0.1 does not have the fix to support
- ## backups of 8.0.30+. See: https://github.com/vitessio/vitess/pull/10847
- ## TODO: remove this pin once the above fixes are included in a v14 release (will be in v14.0.2) OR
- ## Vitess 16.0.0-SNAPSHOT becomes the dev version on vitessio/main
- #sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
- #wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.20-1_all.deb
- #echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
- #sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
- #sudo apt-get update
- #sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-server mysql-client
- ####
- wget -c https://cdn.mysql.com/archives/mysql-8.0/mysql-common_8.0.28-1ubuntu20.04_amd64.deb \
- https://cdn.mysql.com/archives/mysql-8.0/mysql-community-client-core_8.0.28-1ubuntu20.04_amd64.deb \
- https://cdn.mysql.com/archives/mysql-8.0/mysql-community-client-plugins_8.0.28-1ubuntu20.04_amd64.deb \
- https://cdn.mysql.com/archives/mysql-8.0/mysql-client_8.0.28-1ubuntu20.04_amd64.deb \
- https://cdn.mysql.com/archives/mysql-8.0/mysql-community-server-core_8.0.28-1ubuntu20.04_amd64.deb \
- https://cdn.mysql.com/archives/mysql-8.0/mysql-community-server_8.0.28-1ubuntu20.04_amd64.deb \
- https://cdn.mysql.com/archives/mysql-8.0/mysql-community-client_8.0.28-1ubuntu20.04_amd64.deb
- sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y ./mysql-*.deb
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
+ sudo apt-get update
+ sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-server mysql-client
# Install everything else we need, and configure
sudo apt-get install -y make unzip g++ etcd curl git wget eatmydata grep
@@ -150,7 +137,7 @@ jobs:
# Checkout to the last release of Vitess
- name: Checkout to the other version's code (${{ needs.get_previous_release.outputs.previous_release }})
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
with:
ref: ${{ needs.get_previous_release.outputs.previous_release }}
@@ -172,7 +159,7 @@ jobs:
# Checkout to this build's commit
- name: Checkout to commit's code
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Get dependencies for this commit
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -197,18 +184,18 @@ jobs:
# We also insert a few rows in our three tables.
- name: Create the example Vitess cluster with all components using version N
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 5
+ timeout-minutes: 10
run: |
- source build.env ; cd examples/local
- ./backups/start_cluster.sh
+ source build.env ; cd examples/backups
+ ./start_cluster.sh
# Taking a backup
- name: Take a backup of all the shards
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 2
+ timeout-minutes: 5
run: |
- source build.env ; cd examples/local
- ./backups/take_backups.sh
+ source build.env ; cd examples/backups
+ ./take_backups.sh
# We insert more data in every table after the backup.
# When we restore the backup made in the previous step, we do not want to see the rows we are about to insert now.
@@ -220,7 +207,7 @@ jobs:
- name: Insert more data after the backup
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- source build.env ; cd examples/local ; source ./env.sh
+ source build.env ; cd examples; source ./common/env.sh
echo "insert into customer(email) values('new_user_1@domain.com');" | mysql
echo "insert into product(sku, description, price) values('SKU-1009', 'description', 89);" | mysql
@@ -229,10 +216,10 @@ jobs:
# Stop all the tablets and remove their data
- name: Stop tablets
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 5
+ timeout-minutes: 10
run: |
- source build.env ; cd examples/local
- ./backups/stop_tablets.sh
+ source build.env ; cd examples/backups
+ ./stop_tablets.sh
# We downgrade: we use the version N-1 of vttablet
- name: Downgrade - Swap binaries, use VTTablet N-1
@@ -247,18 +234,18 @@ jobs:
# Starting the tablets again, they will automatically start restoring the last backup.
- name: Start new tablets and restore
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 5
+ timeout-minutes: 10
run: |
- source build.env ; cd examples/local
- ./backups/restart_tablets.sh
+ source build.env ; cd examples/backups
+ ./restart_tablets.sh
# give enough time to the tablets to restore the backup
- sleep 60
+ sleep 90
# Count the number of rows in each table to make sure the restoration is successful.
- name: Assert the number of rows in every table
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- source build.env ; cd examples/local ; source ./env.sh
+ source build.env ; cd examples ; source ./common/env.sh
echo "select count(sku) from product;" | mysql 2>&1| grep 2
echo "select count(email) from customer;" | mysql 2>&1| grep 5
@@ -268,7 +255,7 @@ jobs:
- name: Insert more rows in the tables
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- source build.env ; cd examples/local ; source ./env.sh
+ source build.env ; cd examples ; source ./common/env.sh
echo "insert into customer(email) values('new_user_2@domain.com');" | mysql
echo "insert into product(sku, description, price) values('SKU-1011', 'description', 111);" | mysql
@@ -277,18 +264,10 @@ jobs:
# Taking a second backup of the cluster.
- name: Take a second backup of all the shards
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 2
- run: |
- source build.env ; cd examples/local
- ./backups/take_backups.sh
-
- # Stopping the tablets so we can perform the upgrade.
- - name: Stop tablets
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 5
+ timeout-minutes: 10
run: |
- source build.env ; cd examples/local
- ./backups/stop_tablets.sh
+ source build.env ; cd examples/backups
+ ./take_backups.sh
# We upgrade: we swap binaries and use the version N of the tablet.
- name: Upgrade - Swap binaries, use VTTablet N
@@ -303,18 +282,16 @@ jobs:
# Starting the tablets again and restoring the previous backup.
- name: Start new tablets and restore
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 5
+ timeout-minutes: 10
run: |
- source build.env ; cd examples/local
- ./backups/restart_tablets.sh
- # give enough time to the tablets to restore the backup
- sleep 60
+ source build.env ; cd examples/backups
+ ./upgrade_cluster.sh
# We count the number of rows in every table to check that the restore step was successful.
- name: Assert the number of rows in every table
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- source build.env ; cd examples/local ; source ./env.sh
+ source build.env ; cd examples ; source ./common/env.sh
echo "select count(sku) from product;" | mysql 2>&1| grep 3
echo "select count(email) from customer;" | mysql 2>&1| grep 6
diff --git a/.github/workflows/upgrade_downgrade_test_backups_manual_next_release.yml b/.github/workflows/upgrade_downgrade_test_backups_manual_next_release.yml
index 7adb54aaabb..b93e75f03e2 100644
--- a/.github/workflows/upgrade_downgrade_test_backups_manual_next_release.yml
+++ b/.github/workflows/upgrade_downgrade_test_backups_manual_next_release.yml
@@ -10,14 +10,14 @@ concurrency:
jobs:
get_next_release:
if: always()
- name: Get a recent LTS release
- runs-on: ubuntu-20.04
+ name: Get Previous Release - Backups - Manual - Next Release
+ runs-on: ubuntu-22.04
outputs:
next_release: ${{ steps.output-next-release-ref.outputs.next_release_ref }}
steps:
- name: Check out to HEAD
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
with:
fetch-depth: 0
@@ -26,18 +26,25 @@ jobs:
run: |
next_release_ref=$(./tools/get_next_release.sh ${{github.base_ref}} ${{github.ref}})
echo $next_release_ref
- echo "::set-output name=next_release_ref::${next_release_ref}"
+ echo "next_release_ref=${next_release_ref}" >> $GITHUB_OUTPUT
# This job usually execute in ± 20 minutes
upgrade_downgrade_test_manual:
timeout-minutes: 40
if: always() && (needs.get_next_release.result == 'success')
- name: Run Upgrade Downgrade Test
- runs-on: ubuntu-20.04
+ name: Run Upgrade Downgrade Test - Backups - Manual - Next Release
+ runs-on: ubuntu-22.04
needs:
- get_next_release
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -49,12 +56,12 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
# Checkout to this build's commit
- name: Checkout to commit's code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -69,7 +76,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -78,25 +86,18 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
-
- # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185
- - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts
- # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED!
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
- name: Get base dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -112,26 +113,12 @@ jobs:
sudo rm -rf /etc/mysql
# Install MySQL 8.0
- ####
- ## Temporarily pin the MySQL version at 8.0.29 as Vitess 14.0.1 does not have the fix to support
- ## backups of 8.0.30+. See: https://github.com/vitessio/vitess/pull/10847
- ## TODO: remove this pin once the above fixes are included in a v14 release (will be in v14.0.2) OR
- ## Vitess 16.0.0-SNAPSHOT becomes the dev version on vitessio/main
- #sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
- #wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.20-1_all.deb
- #echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
- #sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
- #sudo apt-get update
- #sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-server mysql-client
- ####
- wget -c https://cdn.mysql.com/archives/mysql-8.0/mysql-common_8.0.28-1ubuntu20.04_amd64.deb \
- https://cdn.mysql.com/archives/mysql-8.0/mysql-community-client-core_8.0.28-1ubuntu20.04_amd64.deb \
- https://cdn.mysql.com/archives/mysql-8.0/mysql-community-client-plugins_8.0.28-1ubuntu20.04_amd64.deb \
- https://cdn.mysql.com/archives/mysql-8.0/mysql-client_8.0.28-1ubuntu20.04_amd64.deb \
- https://cdn.mysql.com/archives/mysql-8.0/mysql-community-server-core_8.0.28-1ubuntu20.04_amd64.deb \
- https://cdn.mysql.com/archives/mysql-8.0/mysql-community-server_8.0.28-1ubuntu20.04_amd64.deb \
- https://cdn.mysql.com/archives/mysql-8.0/mysql-community-client_8.0.28-1ubuntu20.04_amd64.deb
- sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y ./mysql-*.deb
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
+ sudo apt-get update
+ sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-server mysql-client
# Install everything else we need, and configure
sudo apt-get install -y make unzip g++ etcd curl git wget eatmydata grep
@@ -153,7 +140,7 @@ jobs:
# Checkout to the next release of Vitess
- name: Checkout to the other version's code (${{ needs.get_next_release.outputs.next_release }})
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
with:
ref: ${{ needs.get_next_release.outputs.next_release }}
@@ -175,7 +162,7 @@ jobs:
# Checkout to this build's commit
- name: Checkout to commit's code
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Get dependencies for this commit
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -200,18 +187,18 @@ jobs:
# We also insert a few rows in our three tables.
- name: Create the example Vitess cluster with all components using version N
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 5
+ timeout-minutes: 10
run: |
- source build.env ; cd examples/local
- ./backups/start_cluster.sh
+ source build.env ; cd examples/backups
+ ./start_cluster.sh
# Taking a backup
- name: Take a backup of all the shards
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 2
+ timeout-minutes: 5
run: |
- source build.env ; cd examples/local
- ./backups/take_backups.sh
+ source build.env ; cd examples/backups
+ ./take_backups.sh
# We insert more data in every table after the backup.
# When we restore the backup made in the next step, we do not want to see the rows we are about to insert now.
@@ -223,7 +210,7 @@ jobs:
- name: Insert more data after the backup
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- source build.env ; cd examples/local ; source ./env.sh
+ source build.env ; cd examples ; source ./common/env.sh
echo "insert into customer(email) values('new_user_1@domain.com');" | mysql
echo "insert into product(sku, description, price) values('SKU-1009', 'description', 89);" | mysql
@@ -232,10 +219,10 @@ jobs:
# Stop all the tablets and remove their data
- name: Stop tablets
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 5
+ timeout-minutes: 10
run: |
- source build.env ; cd examples/local
- ./backups/stop_tablets.sh
+ source build.env ; cd examples/backups
+ ./stop_tablets.sh
# We downgrade: we use the version N+1 of vttablet
- name: Downgrade - Swap binaries, use VTTablet N+1
@@ -250,10 +237,10 @@ jobs:
# Starting the tablets again, they will automatically start restoring the last backup.
- name: Start new tablets and restore
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 5
+ timeout-minutes: 10
run: |
- source build.env ; cd examples/local
- ./backups/restart_tablets.sh
+ source build.env ; cd examples/backups
+ ./restart_tablets.sh
# give enough time to the tablets to restore the backup
sleep 60
@@ -261,7 +248,7 @@ jobs:
- name: Assert the number of rows in every table
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- source build.env ; cd examples/local ; source ./env.sh
+ source build.env ; cd examples ; source ./common/env.sh
echo "select count(sku) from product;" | mysql 2>&1| grep 2
echo "select count(email) from customer;" | mysql 2>&1| grep 5
@@ -271,7 +258,7 @@ jobs:
- name: Insert more rows in the tables
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- source build.env ; cd examples/local ; source ./env.sh
+ source build.env ; cd examples ; source ./common/env.sh
echo "insert into customer(email) values('new_user_2@domain.com');" | mysql
echo "insert into product(sku, description, price) values('SKU-1011', 'description', 111);" | mysql
@@ -280,18 +267,10 @@ jobs:
# Taking a second backup of the cluster.
- name: Take a second backup of all the shards
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 2
+ timeout-minutes: 10
run: |
- source build.env ; cd examples/local
- ./backups/take_backups.sh
-
- # Stopping the tablets so we can perform the upgrade.
- - name: Stop tablets
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 5
- run: |
- source build.env ; cd examples/local
- ./backups/stop_tablets.sh
+ source build.env ; cd examples/backups
+ ./take_backups.sh
# We upgrade: we swap binaries and use the version N of the tablet.
- name: Upgrade - Swap binaries, use VTTablet N
@@ -306,18 +285,16 @@ jobs:
# Starting the tablets again and restoring the next backup.
- name: Start new tablets and restore
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 5
+ timeout-minutes: 10
run: |
- source build.env ; cd examples/local
- ./backups/restart_tablets.sh
- # give enough time to the tablets to restore the backup
- sleep 60
+ source build.env ; cd examples/backups
+ ./upgrade_cluster.sh
# We count the number of rows in every table to check that the restore step was successful.
- name: Assert the number of rows in every table
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- source build.env ; cd examples/local ; source ./env.sh
+ source build.env ; cd examples ; source ./common/env.sh
echo "select count(sku) from product;" | mysql 2>&1| grep 3
echo "select count(email) from customer;" | mysql 2>&1| grep 6
diff --git a/.github/workflows/upgrade_downgrade_test_query_serving_queries.yml b/.github/workflows/upgrade_downgrade_test_query_serving_queries.yml
index b4965b9c254..79ab3b038e0 100644
--- a/.github/workflows/upgrade_downgrade_test_query_serving_queries.yml
+++ b/.github/workflows/upgrade_downgrade_test_query_serving_queries.yml
@@ -13,14 +13,14 @@ concurrency:
jobs:
get_previous_release:
if: always()
- name: Get latest release
- runs-on: ubuntu-latest
+ name: Get Previous Release - Query Serving (Queries)
+ runs-on: ubuntu-22.04
outputs:
previous_release: ${{ steps.output-previous-release-ref.outputs.previous_release_ref }}
steps:
- name: Check out to HEAD
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
with:
fetch-depth: 0
@@ -29,16 +29,23 @@ jobs:
run: |
previous_release_ref=$(./tools/get_previous_release.sh ${{github.base_ref}} ${{github.ref}})
echo $previous_release_ref
- echo "::set-output name=previous_release_ref::${previous_release_ref}"
+ echo "previous_release_ref=${previous_release_ref}" >> $GITHUB_OUTPUT
upgrade_downgrade_test:
if: always() && (needs.get_previous_release.result == 'success')
- name: Run Upgrade Downgrade Test
- runs-on: ubuntu-latest
+ name: Run Upgrade Downgrade Test - Query Serving (Queries)
+ runs-on: ubuntu-22.04
needs:
- get_previous_release
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -47,11 +54,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out commit's code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -66,7 +73,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -75,18 +83,18 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
- name: Get base dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -102,7 +110,7 @@ jobs:
sudo rm -rf /etc/mysql
# Install mysql80
sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
- wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.20-1_all.deb
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
@@ -127,7 +135,7 @@ jobs:
# Checkout to the last release of Vitess
- name: Check out other version's code (${{ needs.get_previous_release.outputs.previous_release }})
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
with:
ref: ${{ needs.get_previous_release.outputs.previous_release }}
@@ -149,7 +157,7 @@ jobs:
# Checkout to this build's commit
- name: Check out commit's code
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Get dependencies for this commit
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -173,7 +181,7 @@ jobs:
mkdir -p /tmp/vtdataroot
source build.env
- eatmydata -- go run test.go -skip-build -keep-data -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_queries
+ eatmydata -- go run test.go -skip-build -keep-data=false -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_queries
# Swap the binaries in the bin. Use vtgate version n-1 and keep vttablet at version n
- name: Use last release's VTGate
@@ -193,7 +201,7 @@ jobs:
mkdir -p /tmp/vtdataroot
source build.env
- eatmydata -- go run test.go -skip-build -keep-data -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_queries
+ eatmydata -- go run test.go -skip-build -keep-data=false -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_queries
# Swap the binaries again. This time, vtgate will be at version n, and vttablet will be at version n-1
- name: Use current version VTGate, and other version VTTablet
@@ -215,4 +223,4 @@ jobs:
mkdir -p /tmp/vtdataroot
source build.env
- eatmydata -- go run test.go -skip-build -keep-data -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_queries
+ eatmydata -- go run test.go -skip-build -keep-data=false -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_queries
diff --git a/.github/workflows/upgrade_downgrade_test_query_serving_queries_next_release.yml b/.github/workflows/upgrade_downgrade_test_query_serving_queries_next_release.yml
index 56a86fdd8fc..a29b87c25be 100644
--- a/.github/workflows/upgrade_downgrade_test_query_serving_queries_next_release.yml
+++ b/.github/workflows/upgrade_downgrade_test_query_serving_queries_next_release.yml
@@ -13,14 +13,14 @@ concurrency:
jobs:
get_next_release:
if: always()
- name: Get latest release
- runs-on: ubuntu-latest
+ name: Get Latest Release - Query Serving (Queries) Next Release
+ runs-on: ubuntu-22.04
outputs:
next_release: ${{ steps.output-next-release-ref.outputs.next_release_ref }}
steps:
- name: Check out to HEAD
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
with:
fetch-depth: 0
@@ -29,16 +29,23 @@ jobs:
run: |
next_release_ref=$(./tools/get_next_release.sh ${{github.base_ref}} ${{github.ref}})
echo $next_release_ref
- echo "::set-output name=next_release_ref::${next_release_ref}"
+ echo "next_release_ref=${next_release_ref}" >> $GITHUB_OUTPUT
upgrade_downgrade_test:
if: always() && (needs.get_next_release.result == 'success')
- name: Run Upgrade Downgrade Test
- runs-on: ubuntu-latest
+ name: Run Upgrade Downgrade Test - Query Serving (Queries) Next Release
+ runs-on: ubuntu-22.04
needs:
- get_next_release
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -50,11 +57,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out commit's code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -69,7 +76,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -78,18 +86,18 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
- name: Get base dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -105,7 +113,7 @@ jobs:
sudo rm -rf /etc/mysql
# Install mysql80
sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
- wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.20-1_all.deb
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
@@ -130,7 +138,7 @@ jobs:
# Checkout to the next release of Vitess
- name: Check out other version's code (${{ needs.get_next_release.outputs.next_release }})
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
with:
ref: ${{ needs.get_next_release.outputs.next_release }}
@@ -152,7 +160,7 @@ jobs:
# Checkout to this build's commit
- name: Check out commit's code
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Get dependencies for this commit
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -176,7 +184,7 @@ jobs:
mkdir -p /tmp/vtdataroot
source build.env
- eatmydata -- go run test.go -skip-build -keep-data -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_queries
+ eatmydata -- go run test.go -skip-build -keep-data=false -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_queries
# Swap the binaries in the bin. Use vtgate version n+1 and keep vttablet at version n
- name: Use next release's VTGate
@@ -196,7 +204,7 @@ jobs:
mkdir -p /tmp/vtdataroot
source build.env
- eatmydata -- go run test.go -skip-build -keep-data -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_queries
+ eatmydata -- go run test.go -skip-build -keep-data=false -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_queries
# Swap the binaries again. This time, vtgate will be at version n, and vttablet will be at version n+1
- name: Use current version VTGate, and other version VTTablet
@@ -218,4 +226,4 @@ jobs:
mkdir -p /tmp/vtdataroot
source build.env
- eatmydata -- go run test.go -skip-build -keep-data -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_queries
+ eatmydata -- go run test.go -skip-build -keep-data=false -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_queries
diff --git a/.github/workflows/upgrade_downgrade_test_query_serving_schema.yml b/.github/workflows/upgrade_downgrade_test_query_serving_schema.yml
index b3399dd3845..5080fcfaf1d 100644
--- a/.github/workflows/upgrade_downgrade_test_query_serving_schema.yml
+++ b/.github/workflows/upgrade_downgrade_test_query_serving_schema.yml
@@ -13,14 +13,14 @@ concurrency:
jobs:
get_previous_release:
if: always()
- name: Get latest release
- runs-on: ubuntu-latest
+ name: Get Previous Release - Query Serving (Schema)
+ runs-on: ubuntu-22.04
outputs:
previous_release: ${{ steps.output-previous-release-ref.outputs.previous_release_ref }}
steps:
- name: Check out to HEAD
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
with:
fetch-depth: 0
@@ -29,16 +29,23 @@ jobs:
run: |
previous_release_ref=$(./tools/get_previous_release.sh ${{github.base_ref}} ${{github.ref}})
echo $previous_release_ref
- echo "::set-output name=previous_release_ref::${previous_release_ref}"
+ echo "previous_release_ref=${previous_release_ref}" >> $GITHUB_OUTPUT
upgrade_downgrade_test:
if: always() && (needs.get_previous_release.result == 'success')
- name: Run Upgrade Downgrade Test
- runs-on: ubuntu-latest
+ name: Run Upgrade Downgrade Test - Query Serving (Schema)
+ runs-on: ubuntu-22.04
needs:
- get_previous_release
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -47,11 +54,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out commit's code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -66,7 +73,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -75,18 +83,18 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
- name: Get base dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -102,7 +110,7 @@ jobs:
sudo rm -rf /etc/mysql
# Install mysql80
sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
- wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.20-1_all.deb
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
@@ -127,7 +135,7 @@ jobs:
# Checkout to the last release of Vitess
- name: Check out other version's code (${{ needs.get_previous_release.outputs.previous_release }})
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
with:
ref: ${{ needs.get_previous_release.outputs.previous_release }}
@@ -149,7 +157,7 @@ jobs:
# Checkout to this build's commit
- name: Check out commit's code
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Get dependencies for this commit
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -173,7 +181,7 @@ jobs:
mkdir -p /tmp/vtdataroot
source build.env
- eatmydata -- go run test.go -skip-build -keep-data -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_schema
+ eatmydata -- go run test.go -skip-build -keep-data=false -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_schema
# Swap the binaries in the bin. Use vtgate version n-1 and keep vttablet at version n
- name: Use last release's VTGate
@@ -193,7 +201,7 @@ jobs:
mkdir -p /tmp/vtdataroot
source build.env
- eatmydata -- go run test.go -skip-build -keep-data -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_schema
+ eatmydata -- go run test.go -skip-build -keep-data=false -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_schema
# Swap the binaries again. This time, vtgate will be at version n, and vttablet will be at version n-1
- name: Use current version VTGate, and other version VTTablet
@@ -215,4 +223,4 @@ jobs:
mkdir -p /tmp/vtdataroot
source build.env
- eatmydata -- go run test.go -skip-build -keep-data -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_schema
+ eatmydata -- go run test.go -skip-build -keep-data=false -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_schema
diff --git a/.github/workflows/upgrade_downgrade_test_query_serving_schema_next_release.yml b/.github/workflows/upgrade_downgrade_test_query_serving_schema_next_release.yml
index c0b1052ab23..7ef97bbeae4 100644
--- a/.github/workflows/upgrade_downgrade_test_query_serving_schema_next_release.yml
+++ b/.github/workflows/upgrade_downgrade_test_query_serving_schema_next_release.yml
@@ -13,14 +13,14 @@ concurrency:
jobs:
get_next_release:
if: always()
- name: Get latest release
- runs-on: ubuntu-latest
+ name: Get Latest Release - Query Serving (Schema) Next Release
+ runs-on: ubuntu-22.04
outputs:
next_release: ${{ steps.output-next-release-ref.outputs.next_release_ref }}
steps:
- name: Check out to HEAD
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
with:
fetch-depth: 0
@@ -29,16 +29,23 @@ jobs:
run: |
next_release_ref=$(./tools/get_next_release.sh ${{github.base_ref}} ${{github.ref}})
echo $next_release_ref
- echo "::set-output name=next_release_ref::${next_release_ref}"
+ echo "next_release_ref=${next_release_ref}" >> $GITHUB_OUTPUT
upgrade_downgrade_test:
if: always() && (needs.get_next_release.result == 'success')
- name: Run Upgrade Downgrade Test
- runs-on: ubuntu-latest
+ name: Run Upgrade Downgrade Test - Query Serving (Schema) Next Release
+ runs-on: ubuntu-22.04
needs:
- get_next_release
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -50,11 +57,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out commit's code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -69,7 +76,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -78,18 +86,18 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
- name: Get base dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -105,7 +113,7 @@ jobs:
sudo rm -rf /etc/mysql
# Install mysql80
sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
- wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.20-1_all.deb
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
@@ -130,7 +138,7 @@ jobs:
# Checkout to the next release of Vitess
- name: Check out other version's code (${{ needs.get_next_release.outputs.next_release }})
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
with:
ref: ${{ needs.get_next_release.outputs.next_release }}
@@ -152,7 +160,7 @@ jobs:
# Checkout to this build's commit
- name: Check out commit's code
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Get dependencies for this commit
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -176,7 +184,7 @@ jobs:
mkdir -p /tmp/vtdataroot
source build.env
- eatmydata -- go run test.go -skip-build -keep-data -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_schema
+ eatmydata -- go run test.go -skip-build -keep-data=false -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_schema
# Swap the binaries in the bin. Use vtgate version n+1 and keep vttablet at version n
- name: Use next release's VTGate
@@ -196,7 +204,7 @@ jobs:
mkdir -p /tmp/vtdataroot
source build.env
- eatmydata -- go run test.go -skip-build -keep-data -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_schema
+ eatmydata -- go run test.go -skip-build -keep-data=false -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_schema
# Swap the binaries again. This time, vtgate will be at version n, and vttablet will be at version n+1
- name: Use current version VTGate, and other version VTTablet
@@ -218,4 +226,4 @@ jobs:
mkdir -p /tmp/vtdataroot
source build.env
- eatmydata -- go run test.go -skip-build -keep-data -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_schema
+ eatmydata -- go run test.go -skip-build -keep-data=false -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_schema
diff --git a/.github/workflows/upgrade_downgrade_test_reparent_new_vtctl.yml b/.github/workflows/upgrade_downgrade_test_reparent_new_vtctl.yml
index 57392335000..00761ba186f 100644
--- a/.github/workflows/upgrade_downgrade_test_reparent_new_vtctl.yml
+++ b/.github/workflows/upgrade_downgrade_test_reparent_new_vtctl.yml
@@ -13,14 +13,14 @@ concurrency:
jobs:
get_next_release:
if: always()
- name: Get latest release
- runs-on: ubuntu-latest
+ name: Get Latest Release - Reparent New Vtctl
+ runs-on: ubuntu-22.04
outputs:
next_release: ${{ steps.output-next-release-ref.outputs.next_release_ref }}
steps:
- name: Check out to HEAD
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
with:
fetch-depth: 0
@@ -29,16 +29,23 @@ jobs:
run: |
next_release_ref=$(./tools/get_next_release.sh ${{github.base_ref}} ${{github.ref}})
echo $next_release_ref
- echo "::set-output name=next_release_ref::${next_release_ref}"
+ echo "next_release_ref=${next_release_ref}" >> $GITHUB_OUTPUT
upgrade_downgrade_test:
if: always() && (needs.get_next_release.result == 'success')
- name: Run Upgrade Downgrade Test
- runs-on: ubuntu-latest
+ name: Run Upgrade Downgrade Test - Reparent New Vtctl
+ runs-on: ubuntu-22.04
needs:
- get_next_release
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -50,11 +57,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out commit's code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -69,7 +76,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -78,18 +86,18 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
- name: Get base dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -105,7 +113,7 @@ jobs:
sudo rm -rf /etc/mysql
# Install mysql80
sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
- wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.20-1_all.deb
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
@@ -130,7 +138,7 @@ jobs:
# Checkout to the next release of Vitess
- name: Check out other version's code (${{ needs.get_next_release.outputs.next_release }})
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
with:
ref: ${{ needs.get_next_release.outputs.next_release }}
@@ -152,7 +160,7 @@ jobs:
# Checkout to this build's commit
- name: Check out commit's code
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Get dependencies for this commit
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -190,4 +198,4 @@ jobs:
mkdir -p /tmp/vtdataroot
source build.env
- eatmydata -- go run test.go -skip-build -keep-data -docker=false -print-log -follow -tag upgrade_downgrade_reparent
+ eatmydata -- go run test.go -skip-build -keep-data=false -docker=false -print-log -follow -tag upgrade_downgrade_reparent
diff --git a/.github/workflows/upgrade_downgrade_test_reparent_new_vttablet.yml b/.github/workflows/upgrade_downgrade_test_reparent_new_vttablet.yml
index 99090a640b8..20458bd2a08 100644
--- a/.github/workflows/upgrade_downgrade_test_reparent_new_vttablet.yml
+++ b/.github/workflows/upgrade_downgrade_test_reparent_new_vttablet.yml
@@ -13,14 +13,14 @@ concurrency:
jobs:
get_next_release:
if: always()
- name: Get latest release
- runs-on: ubuntu-latest
+ name: Get Latest Release - Reparent New VTTablet
+ runs-on: ubuntu-22.04
outputs:
next_release: ${{ steps.output-next-release-ref.outputs.next_release_ref }}
steps:
- name: Check out to HEAD
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
with:
fetch-depth: 0
@@ -29,16 +29,23 @@ jobs:
run: |
next_release_ref=$(./tools/get_next_release.sh ${{github.base_ref}} ${{github.ref}})
echo $next_release_ref
- echo "::set-output name=next_release_ref::${next_release_ref}"
+ echo "next_release_ref=${next_release_ref}" >> $GITHUB_OUTPUT
upgrade_downgrade_test:
if: always() && (needs.get_next_release.result == 'success')
- name: Run Upgrade Downgrade Test
- runs-on: ubuntu-latest
+ name: Run Upgrade Downgrade Test - Reparent New VTTablet
+ runs-on: ubuntu-22.04
needs:
- get_next_release
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -50,11 +57,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out commit's code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -69,7 +76,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -78,18 +86,18 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
- name: Get base dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -105,7 +113,7 @@ jobs:
sudo rm -rf /etc/mysql
# Install mysql80
sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
- wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.20-1_all.deb
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
@@ -130,7 +138,7 @@ jobs:
# Checkout to the next release of Vitess
- name: Check out other version's code (${{ needs.get_next_release.outputs.next_release }})
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
with:
ref: ${{ needs.get_next_release.outputs.next_release }}
@@ -152,7 +160,7 @@ jobs:
# Checkout to this build's commit
- name: Check out commit's code
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Get dependencies for this commit
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -187,4 +195,4 @@ jobs:
mkdir -p /tmp/vtdataroot
source build.env
- eatmydata -- go run test.go -skip-build -keep-data -docker=false -print-log -follow -tag upgrade_downgrade_reparent
+ eatmydata -- go run test.go -skip-build -keep-data=false -docker=false -print-log -follow -tag upgrade_downgrade_reparent
diff --git a/.github/workflows/upgrade_downgrade_test_reparent_old_vtctl.yml b/.github/workflows/upgrade_downgrade_test_reparent_old_vtctl.yml
index a1fc737a459..9539f83f3b8 100644
--- a/.github/workflows/upgrade_downgrade_test_reparent_old_vtctl.yml
+++ b/.github/workflows/upgrade_downgrade_test_reparent_old_vtctl.yml
@@ -13,14 +13,14 @@ concurrency:
jobs:
get_previous_release:
if: always()
- name: Get latest release
- runs-on: ubuntu-latest
+ name: Get Previous Release - Reparent Old Vtctl
+ runs-on: ubuntu-22.04
outputs:
previous_release: ${{ steps.output-previous-release-ref.outputs.previous_release_ref }}
steps:
- name: Check out to HEAD
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
with:
fetch-depth: 0
@@ -29,16 +29,23 @@ jobs:
run: |
previous_release_ref=$(./tools/get_previous_release.sh ${{github.base_ref}} ${{github.ref}})
echo $previous_release_ref
- echo "::set-output name=previous_release_ref::${previous_release_ref}"
+ echo "previous_release_ref=${previous_release_ref}" >> $GITHUB_OUTPUT
upgrade_downgrade_test:
if: always() && (needs.get_previous_release.result == 'success')
- name: Run Upgrade Downgrade Test
- runs-on: ubuntu-latest
+ name: Run Upgrade Downgrade Test - Reparent Old Vtctl
+ runs-on: ubuntu-22.04
needs:
- get_previous_release
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -47,11 +54,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out commit's code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -66,7 +73,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -75,18 +83,18 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
- name: Get base dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -102,7 +110,7 @@ jobs:
sudo rm -rf /etc/mysql
# Install mysql80
sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
- wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.20-1_all.deb
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
@@ -127,7 +135,7 @@ jobs:
# Checkout to the last release of Vitess
- name: Check out other version's code (${{ needs.get_previous_release.outputs.previous_release }})
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
with:
ref: ${{ needs.get_previous_release.outputs.previous_release }}
@@ -149,7 +157,7 @@ jobs:
# Checkout to this build's commit
- name: Check out commit's code
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Get dependencies for this commit
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -187,4 +195,4 @@ jobs:
mkdir -p /tmp/vtdataroot
source build.env
- eatmydata -- go run test.go -skip-build -keep-data -docker=false -print-log -follow -tag upgrade_downgrade_reparent
+ eatmydata -- go run test.go -skip-build -keep-data=false -docker=false -print-log -follow -tag upgrade_downgrade_reparent
diff --git a/.github/workflows/upgrade_downgrade_test_reparent_old_vttablet.yml b/.github/workflows/upgrade_downgrade_test_reparent_old_vttablet.yml
index 96eee1fbb99..965fb92786e 100644
--- a/.github/workflows/upgrade_downgrade_test_reparent_old_vttablet.yml
+++ b/.github/workflows/upgrade_downgrade_test_reparent_old_vttablet.yml
@@ -13,14 +13,14 @@ concurrency:
jobs:
get_previous_release:
if: always()
- name: Get latest release
- runs-on: ubuntu-latest
+ name: Get Previous Release - Reparent Old VTTablet
+ runs-on: ubuntu-22.04
outputs:
previous_release: ${{ steps.output-previous-release-ref.outputs.previous_release_ref }}
steps:
- name: Check out to HEAD
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
with:
fetch-depth: 0
@@ -29,16 +29,23 @@ jobs:
run: |
previous_release_ref=$(./tools/get_previous_release.sh ${{github.base_ref}} ${{github.ref}})
echo $previous_release_ref
- echo "::set-output name=previous_release_ref::${previous_release_ref}"
+ echo "previous_release_ref=${previous_release_ref}" >> $GITHUB_OUTPUT
upgrade_downgrade_test:
if: always() && (needs.get_previous_release.result == 'success')
- name: Run Upgrade Downgrade Test
- runs-on: ubuntu-latest
+ name: Run Upgrade Downgrade Test - Reparent Old VTTablet
+ runs-on: ubuntu-22.04
needs:
- get_previous_release
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -47,11 +54,11 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out commit's code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -66,7 +73,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -75,18 +83,18 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.20.8
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
- name: Get base dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -102,7 +110,7 @@ jobs:
sudo rm -rf /etc/mysql
# Install mysql80
sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
- wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.20-1_all.deb
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
@@ -127,7 +135,7 @@ jobs:
# Checkout to the last release of Vitess
- name: Check out other version's code (${{ needs.get_previous_release.outputs.previous_release }})
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
with:
ref: ${{ needs.get_previous_release.outputs.previous_release }}
@@ -149,7 +157,7 @@ jobs:
# Checkout to this build's commit
- name: Check out commit's code
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Get dependencies for this commit
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -184,4 +192,4 @@ jobs:
mkdir -p /tmp/vtdataroot
source build.env
- eatmydata -- go run test.go -skip-build -keep-data -docker=false -print-log -follow -tag upgrade_downgrade_reparent
+ eatmydata -- go run test.go -skip-build -keep-data=false -docker=false -print-log -follow -tag upgrade_downgrade_reparent
diff --git a/.github/workflows/vtadmin_web_build.yml b/.github/workflows/vtadmin_web_build.yml
index 97a5ec9eca6..c0b668641ea 100644
--- a/.github/workflows/vtadmin_web_build.yml
+++ b/.github/workflows/vtadmin_web_build.yml
@@ -14,8 +14,15 @@ on:
jobs:
build:
- runs-on: ubuntu-latest
+ runs-on: ubuntu-22.04
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -24,16 +31,16 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- - uses: actions/setup-node@v2
+ - uses: actions/setup-node@v3
if: steps.skip-workflow.outputs.skip-workflow == 'false'
with:
# node-version should match package.json
- node-version: '16.13.0'
+ node-version: '16.19.0'
- name: Install dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false'
diff --git a/.github/workflows/vtadmin_web_lint.yml b/.github/workflows/vtadmin_web_lint.yml
index 02071d72e53..9b7e9a68847 100644
--- a/.github/workflows/vtadmin_web_lint.yml
+++ b/.github/workflows/vtadmin_web_lint.yml
@@ -14,8 +14,15 @@ on:
jobs:
lint:
- runs-on: ubuntu-latest
+ runs-on: ubuntu-22.04
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -24,16 +31,16 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- - uses: actions/setup-node@v2
+ - uses: actions/setup-node@v3
if: steps.skip-workflow.outputs.skip-workflow == 'false'
with:
# node-version should match package.json
- node-version: '16.13.0'
+ node-version: '16.19.0'
- name: Install dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false'
diff --git a/.github/workflows/vtadmin_web_unit_tests.yml b/.github/workflows/vtadmin_web_unit_tests.yml
index 21172a25759..bfc4a5d15ff 100644
--- a/.github/workflows/vtadmin_web_unit_tests.yml
+++ b/.github/workflows/vtadmin_web_unit_tests.yml
@@ -14,8 +14,15 @@ on:
jobs:
unit-tests:
- runs-on: ubuntu-latest
+ runs-on: ubuntu-22.04
steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
@@ -24,16 +31,16 @@ jobs:
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- - uses: actions/setup-node@v2
+ - uses: actions/setup-node@v3
if: steps.skip-workflow.outputs.skip-workflow == 'false'
with:
# node-version should match package.json
- node-version: '16.13.0'
+ node-version: '16.19.0'
- name: Install dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false'
diff --git a/.gitignore b/.gitignore
index 6e48b17eca9..881e89890cc 100644
--- a/.gitignore
+++ b/.gitignore
@@ -70,12 +70,7 @@ _test/
/vendor/*/
# release folder
-releases
-
-# Angular2 Bower Libs
-/web/vtctld2/.bowerrc~
-/web/vtctld2/bower.json~
-/web/vtctld2/public/bower_components/
+/releases/
# Local examples
/examples/local/vtdataroot
@@ -88,3 +83,6 @@ venv
.scannerwork
report
+
+# plan test output
+/go/vt/vtgate/planbuilder/testdata/plan_test*
diff --git a/.golangci.yml b/.golangci.yml
index f57fb88dea3..e2bdb5336e4 100644
--- a/.golangci.yml
+++ b/.golangci.yml
@@ -1,5 +1,5 @@
run:
- go: 1.18
+ go: 1.19
timeout: 10m
skip-dirs:
- go/vt/topo/k8stopo/client
@@ -18,13 +18,10 @@ linters:
disable-all: true
enable:
# Defaults
- - deadcode
- errcheck
- govet
- ineffassign
- - structcheck
- typecheck
- - varcheck
- staticcheck
- gosimple
@@ -32,6 +29,7 @@ linters:
- gofmt
- goimports
- exportloopref
+ - bodyclose
# revive is a replacement for golint, but we do not run it in CI for now.
# This is only enabled as a post-commit hook
@@ -44,10 +42,6 @@ issues:
- errcheck
- goimports
- - path: '^go/vt/vtadmin/cache/'
- linters:
- - structcheck
-
### BEGIN: errcheck exclusion rules. Each rule should be considered
# a TODO for removal after adding error checks to that package/file/etc,
# except where otherwise noted.
@@ -160,4 +154,4 @@ issues:
# https://github.com/golangci/golangci/wiki/Configuration
service:
- golangci-lint-version: 1.46.2 # use the fixed version to not introduce new linters unexpectedly
+ golangci-lint-version: 1.51.2 # use the fixed version to not introduce new linters unexpectedly
diff --git a/GITHUB_SELF_HOSTED_RUNNERS.md b/GITHUB_SELF_HOSTED_RUNNERS.md
index 15b89883b81..47d0f223df9 100644
--- a/GITHUB_SELF_HOSTED_RUNNERS.md
+++ b/GITHUB_SELF_HOSTED_RUNNERS.md
@@ -71,3 +71,21 @@ The logs will be stored in the `savedRuns` directory and can be copied locally v
A cronjob is already setup to empty the `savedRuns` directory every week so please download the runs
before they are deleted.
+
+## Running out of disk space in Self-hosted runners
+
+If the loads on the self-hosted runners increases due to multiple tests being moved to them or some other reason,
+they sometimes end up running out of disk space. This causes the runner to stop working all together.
+
+In order to fix this issue follow the following steps -
+1. `ssh` into the self-hosted runner by finding its address from the equinix dashboard.
+2. Clear out the disk by running `docker system prune -f --volumes --all`. This is the same command that we run on a cron on the server.
+3. Switch to the `github-runner` user
+ 1. `su github-runner`
+4. Resume an existing `screen`
+ 1. `screen -r`
+5. Start the runner again.
+ 1. `./run.sh`
+6. Verify that the runner has started accepting jobs again. Detach the screen and close the `ssh` connection.
+
+
diff --git a/GOVERNANCE.md b/GOVERNANCE.md
index 44f7e0d01fa..8adec25f8d9 100644
--- a/GOVERNANCE.md
+++ b/GOVERNANCE.md
@@ -44,15 +44,14 @@ As contributors gain experience and familiarity with the project, their profile
## Maintainers
-[Maintainers](https://github.com/vitessio/vitess/blob/main/MAINTAINERS.md) are community members who have shown that they are committed to the continued development of the project through ongoing engagement with the community. Maintainer-ship allows contributors to more easily carry on with their project related activities by giving them direct access to the project’s resources. That is, they can make changes directly to project outputs, without having to submit changes via pull requests.
+[Maintainers](https://github.com/vitessio/vitess/blob/main/MAINTAINERS.md) are community members who have shown that they are committed to the continued development of the project through ongoing engagement with the community. Maintainer-ship allows contributors to more easily carry on with their project related activities by giving them direct access to the project’s resources.
This does not mean that a maintainer is free to do what they want. In fact, maintainers have no more authority over the project than contributors. While maintainer-ship indicates a valued member of the community who has demonstrated a healthy respect for the project’s aims and objectives, their work continues to be reviewed by the community before acceptance in an official release.
-A maintainer is not allowed to merge their change without approval from another person. However, a small number of maintainers are allowed to sidestep this rule under justifiable circumstances. For example:
+A maintainer is not allowed to merge their change without approval from other maintainers. However, a small number of maintainers are allowed to sidestep this rule under justifiable circumstances. For example:
-* If a CI tool is broken, they may override the tool to still submit the change.
-* Minor typos or fixes for broken tests.
-* The change was approved through other means than the standard process.
+* If a CI tool is broken, they may override the tool to merge pull requests.
+* The change is a critical security fix and was approved through other means than the standard process.
Anyone can become a maintainer; there are no special requirements, other than to have shown a willingness and ability to participate in the project as a team player. Typically, a potential maintainer will need to show that they have an understanding of the project, its objectives and its strategy. They will also have provided valuable contributions to the project over a period of time.
diff --git a/GUIDING_PRINCIPLES.md b/GUIDING_PRINCIPLES.md
index 619480290b8..d648058a228 100644
--- a/GUIDING_PRINCIPLES.md
+++ b/GUIDING_PRINCIPLES.md
@@ -6,7 +6,7 @@ The Vitess project operates under the guiding principles described below. The [S
*“To be the most performant, scalable, and available NewSQL storage system in the Cloud.”*
-Vitess can currently run on bare-metal. However, the trend is clear that applications are moving to the cloud. For the foreseeable future, we should continue to support the ability to run on bare-metal, because it will facilitate migration to the cloud.
+Vitess can currently run on bare-metal or in the cloud. However, the trend is clear that applications are moving to the cloud. For the foreseeable future, we should continue to support the ability to run on bare-metal, because it will facilitate migration to the cloud.
The Vitess architecture is generic enough to accommodate any other RDBMS in the future. However, Vitess currently runs on MySQL and has many features that provide compatibility with it. Vitess should remain focused on MySQL until all frictions are removed for those who wish to migrate from MySQL to Vitess.
diff --git a/MAINTAINERS.md b/MAINTAINERS.md
index e7f0c9114e0..4d09f7eafb6 100644
--- a/MAINTAINERS.md
+++ b/MAINTAINERS.md
@@ -4,56 +4,69 @@ The following is the full list, alphabetically ordered.
* Andres Taylor ([systay](https://github.com/systay)) andres@planetscale.com
* Andrew Mason ([amason](https://github.com/ajm188)) andrew@planetscale.com
-* Anthony Yeh ([enisoc](https://github.com/enisoc)) enisoc@enisoc.dev
+* Arthur Schreiber ([arthurschreiber](https://github.com/arthurschreiber)) arthurschreiber@github.com
* Dan Kozlowski ([dkhenry](https://github.com/dkhenry)) dan.kozlowski@gmail.com
-* David Weitzman ([dweitzman](https://github.com/dweitzman)) dweitzman@pinterest.com
* Deepthi Sigireddi ([deepthi](https://github.com/deepthi)) deepthi@planetscale.com
* Derek Perkins ([derekperkins](https://github.com/derekperkins)) derek@nozzle.io
* Florent Poinsard ([frouioui](https://github.com/frouioui)) florent@planetscale.com
+* Frances Thai ([notfelineit](https://github.com/notfelineit)) frances@planetscale.com
* Harshit Gangal ([harshit-gangal](https://github.com/harshit-gangal)) harshit.gangal@gmail.com
-* Jon Tirsen ([tirsen](https://github.com/tirsen)) jontirsen@squareup.com
-* Mali Akmanalp ([makmanalp](https://github.com/makmanalp) makmanalp@hubspot.com
* Manan Gupta ([GuptaManan100](https://github.com/GuptaManan100)) manan@planetscale.com
* Matt Lord ([mattlord](https://github.com/mattlord)) mlord@planetscale.com
-* Michael Demmer ([demmer](https://github.com/demmer)) mdemmer@slack-corp.com
-* Michael Pawliszyn ([mpawliszyn](https://github.com/mpawliszyn)) mikepaw@squareup.com
-* Rafael Chacon ([rafael](https://github.com/rafael)) rchacon@figma.com
* Rohit Nayak ([rohit-nayak-ps](https://github.com/rohit-nayak-ps)) rohit@planetscale.com
* Shlomi Noach ([shlomi-noach](https://github.com/shlomi-noach)) shlomi@planetscale.com
-* Sugu Sougoumarane ([sougou](https://github.com/sougou)) sougou@planetscale.com
* Vicent Marti ([vmg](https://github.com/vmg)) vmg@planetscale.com
## Areas of expertise
### General Vitess
-sougou, deepthi, demmer, rafael, dweitzman, tirsen, enisoc
+deepthi, mattlord, derekperkins
### Builds
dkhenry, shlomi-noach, ajm188, vmg, GuptaManan100, frouioui
### Resharding
-sougou, rafael, tirsen, dweitzman, systay, rohit-nayak-ps, deepthi, mattlord
+rohit-nayak-ps, deepthi, mattlord
### Parser
-sougou, dweitzman, systay, harshit-gangal, vmg, GuptaManan100
+systay, harshit-gangal, vmg, GuptaManan100
### Planner
-sougou, systay, harshit-gangal, GuptaManan100, frouioui
+systay, harshit-gangal, GuptaManan100, frouioui
### Performance
vmg
### Cluster Management
-deepthi, rafael, enisoc, shlomi-noach, ajm188, GuptaManan100
+deepthi, shlomi-noach, ajm188, GuptaManan100
### Java
-mpawliszyn, makmanalp, harshit-gangal
+harshit-gangal
### Kubernetes
-derekperkins, dkhenry, enisoc
+derekperkins, dkhenry, GuptaManan100, frouioui
### VTAdmin
-ajm188
+ajm188, notfelineit
### Messaging
-derekperkins
+derekperkins, mattlord
+
+## Past Maintainers
+We thank the following past maintainers for their contributions.
+
+* Alain Jobart ([alainjobart](https://github.com/alainjobart))
+* Alkin Tezuysal ([askdba](https://github.com/askdba))
+* Anthony Yeh ([enisoc](https://github.com/enisoc))
+* David Weitzman ([dweitzman](https://github.com/dweitzman))
+* Jon Tirsen ([tirsen](https://github.com/tirsen))
+* Leo X. Lin ([leoxlin](https://github.com/leoxlin))
+* Mali Akmanalp ([makmanalp](https://github.com/makmanalp)
+* Michael Berlin ([michael-berlin](https://github.com/michael-berlin))
+* Michael Demmer ([demmer](https://github.com/demmer))
+* Michael Pawliszyn ([mpawliszyn](https://github.com/mpawliszyn))
+* Morgan Tocker ([morgo](https://github.com/morgo))
+* Paul Hemberger ([pH14](https://github.com/pH14))
+* Rafael Chacon ([rafael](https://github.com/rafael))
+* Sara Bee ([doeg](https://github.com/doeg))
+* Sugu Sougoumarane ([sougou](https://github.com/sougou))
diff --git a/Makefile b/Makefile
index 959965fae99..b69bb168707 100644
--- a/Makefile
+++ b/Makefile
@@ -46,7 +46,7 @@ export REWRITER=go/vt/sqlparser/rewriter.go
# Since we are not using this Makefile for compilation, limiting parallelism will not increase build time.
.NOTPARALLEL:
-.PHONY: all build install test clean unit_test unit_test_cover unit_test_race integration_test proto proto_banner site_test site_integration_test docker_bootstrap docker_test docker_unit_test java_test reshard_tests e2e_test e2e_test_race minimaltools tools web_bootstrap web_build web_start generate_ci_workflows
+.PHONY: all build install test clean unit_test unit_test_cover unit_test_race integration_test proto proto_banner site_test site_integration_test docker_bootstrap docker_test docker_unit_test java_test reshard_tests e2e_test e2e_test_race minimaltools tools generate_ci_workflows
all: build
@@ -92,12 +92,6 @@ endif
-ldflags "$(shell tools/build_version_flags.sh)" \
-o ${VTROOTBIN} ./go/...
- # build vtorc with CGO, because it depends on sqlite
- CGO_ENABLED=1 go build \
- -trimpath $(EXTRA_BUILD_FLAGS) $(VT_GO_PARALLEL) \
- -ldflags "$(shell tools/build_version_flags.sh)" \
- -o ${VTROOTBIN} ./go/cmd/vtorc/...
-
# cross-build can be used to cross-compile Vitess client binaries
# Outside of select client binaries (namely vtctlclient & vtexplain), cross-compiled Vitess Binaries are not recommended for production deployments
# Usage: GOOS=darwin GOARCH=amd64 make cross-build
@@ -120,8 +114,6 @@ endif
echo "Missing vttablet at: ${VTROOTBIN}/${GOOS}_${GOARCH}." && exit; \
fi
- # Cross-compiling w/ cgo isn't trivial and we don't need vtorc, so we can skip building it
-
debug:
ifndef NOBANNER
echo $$(date): Building source tree
@@ -145,8 +137,7 @@ install: build
cross-install: cross-build
# binaries
mkdir -p "$${PREFIX}/bin"
- # Still no vtorc for cross-compile
- cp "${VTROOTBIN}/${GOOS}_${GOARCH}/"{mysqlctl,mysqlctld,vtadmin,vtctld,vtctlclient,vtctldclient,vtgate,vttablet,vtbackup} "$${PREFIX}/bin/"
+ cp "${VTROOTBIN}/${GOOS}_${GOARCH}/"{mysqlctl,mysqlctld,vtorc,vtadmin,vtctld,vtctlclient,vtctldclient,vtgate,vttablet,vtbackup} "$${PREFIX}/bin/"
# Install local install the binaries needed to run vitess locally
# Usage: make install-local PREFIX=/path/to/install/root
@@ -164,9 +155,6 @@ install-testing: build
cp "$${VTROOT}/bin/"{mysqlctld,mysqlctl,vtcombo,vttestserver} "$${PREFIX}/bin/"
# config files
cp -R config "$${PREFIX}/"
- # vtctld web UI files
- mkdir -p "$${PREFIX}/web/vtctld2"
- cp -R web/vtctld2/app "$${PREFIX}/web/vtctld2"
vtctldclient: go/vt/proto/vtctlservice/vtctlservice.pb.go
make -C go/vt/vtctl/vtctldclient
@@ -177,28 +165,23 @@ parser:
demo:
go install ./examples/demo/demo.go
-codegen: asthelpergen sizegen parser astfmtgen
+codegen: asthelpergen sizegen parser
visitor: asthelpergen
echo "make visitor has been replaced by make asthelpergen"
asthelpergen:
- go run ./go/tools/asthelpergen/main \
- --in ./go/vt/sqlparser \
- --iface vitess.io/vitess/go/vt/sqlparser.SQLNode \
- --except "*ColName"
+ go generate ./go/vt/sqlparser/...
sizegen:
go run ./go/tools/sizegen/sizegen.go \
--in ./go/... \
--gen vitess.io/vitess/go/pools.Setting \
+ --gen vitess.io/vitess/go/vt/schema.DDLStrategySetting \
--gen vitess.io/vitess/go/vt/vtgate/engine.Plan \
--gen vitess.io/vitess/go/vt/vttablet/tabletserver.TabletPlan \
--gen vitess.io/vitess/go/sqltypes.Result
-astfmtgen:
- go run ./go/tools/astfmtgen/main.go vitess.io/vitess/go/vt/sqlparser/...
-
# To pass extra flags, run test.go manually.
# For example: go run test.go -docker=false -- --extra-flag
# For more info see: go run test.go -help
@@ -290,9 +273,9 @@ $(PROTO_GO_OUTS): minimaltools install_protoc-gen-go proto/*.proto
# Please read docker/README.md to understand the different available images.
# This rule builds the bootstrap images for all flavors.
-DOCKER_IMAGES_FOR_TEST = mariadb mariadb103 mysql57 mysql80 percona57 percona80
+DOCKER_IMAGES_FOR_TEST = mysql57 mysql80 percona57 percona80
DOCKER_IMAGES = common $(DOCKER_IMAGES_FOR_TEST)
-BOOTSTRAP_VERSION=11
+BOOTSTRAP_VERSION=14.7
ensure_bootstrap_version:
find docker/ -type f -exec sed -i "s/^\(ARG bootstrap_version\)=.*/\1=${BOOTSTRAP_VERSION}/" {} \;
sed -i 's/\(^.*flag.String(\"bootstrap-version\",\) *\"[^\"]\+\"/\1 \"${BOOTSTRAP_VERSION}\"/' test.go
@@ -328,7 +311,7 @@ endef
docker_base:
${call build_docker_image,docker/base/Dockerfile,vitess/base}
-DOCKER_BASE_SUFFIX = mysql80 mariadb mariadb103 percona57 percona80
+DOCKER_BASE_SUFFIX = mysql80 percona57 percona80
DOCKER_BASE_TARGETS = $(addprefix docker_base_, $(DOCKER_BASE_SUFFIX))
$(DOCKER_BASE_TARGETS): docker_base_%:
${call build_docker_image,docker/base/Dockerfile.$*,vitess/base:$*}
@@ -338,13 +321,16 @@ docker_base_all: docker_base $(DOCKER_BASE_TARGETS)
docker_lite:
${call build_docker_image,docker/lite/Dockerfile,vitess/lite}
-DOCKER_LITE_SUFFIX = mysql57 ubi7.mysql57 mysql80 ubi7.mysql80 mariadb mariadb103 percona57 ubi7.percona57 percona80 ubi7.percona80 alpine testing ubi8.mysql80 ubi8.arm64.mysql80
+DOCKER_LITE_SUFFIX = mysql57 ubi7.mysql57 mysql80 ubi7.mysql80 percona57 ubi7.percona57 percona80 ubi7.percona80 testing ubi8.mysql80 ubi8.arm64.mysql80
DOCKER_LITE_TARGETS = $(addprefix docker_lite_,$(DOCKER_LITE_SUFFIX))
$(DOCKER_LITE_TARGETS): docker_lite_%:
${call build_docker_image,docker/lite/Dockerfile.$*,vitess/lite:$*}
docker_lite_all: docker_lite $(DOCKER_LITE_TARGETS)
+docker_lite_push:
+ for i in $(DOCKER_LITE_SUFFIX); do echo "pushing lite image: $$i"; docker push vitess/lite:$$i || exit 1; done
+
docker_local:
${call build_docker_image,docker/local/Dockerfile,vitess/local}
@@ -362,7 +348,7 @@ $(DOCKER_VTTESTSERVER_TARGETS): docker_vttestserver_%:
docker_vttestserver: $(DOCKER_VTTESTSERVER_TARGETS)
# This rule loads the working copy of the code into a bootstrap image,
# and then runs the tests inside Docker.
-# Example: $ make docker_test flavor=mariadb
+# Example: $ make docker_test flavor=mysql80
docker_test:
go run test.go -flavor $(flavor)
@@ -383,8 +369,11 @@ release: docker_base
echo "git push origin v$(VERSION)"
echo "Also, don't forget the upload releases/v$(VERSION).tar.gz file to GitHub releases"
-do_release:
- ./tools/do_release.sh
+create_release:
+ ./tools/create_release.sh
+
+back_to_dev_mode:
+ ./tools/back_to_dev_mode.sh
tools:
echo $$(date): Installing dependencies
@@ -455,22 +444,6 @@ client_go_gen: install_k8s-code-generator
mv vitess.io/vitess/go/vt/topo/k8stopo/apis/topo/v1beta1/zz_generated.deepcopy.go go/vt/topo/k8stopo/apis/topo/v1beta1/zz_generated.deepcopy.go
rm -rf vitess.io/vitess/go/vt/topo/k8stopo/
-# Check prerequisites and install dependencies
-web_bootstrap:
- ./tools/web_bootstrap.sh
-
-# Do a production build of the vtctld UI.
-# This target needs to be manually run every time any file within web/vtctld2/app
-# is modified to regenerate assets.
-web_build: web_bootstrap
- ./tools/web_build.sh
-
-# Start a front-end dev server with hot reloading on http://localhost:4200.
-# This expects that you have a vtctld API server running on http://localhost:15000.
-# Following the local Docker install guide is recommended: https://vitess.io/docs/get-started/local-docker/
-web_start: web_bootstrap
- cd web/vtctld2 && npm run start
-
vtadmin_web_install:
cd web/vtadmin && npm install
@@ -490,8 +463,5 @@ vtadmin_authz_testgen:
generate_ci_workflows:
cd test && go run ci_workflow_gen.go && cd ..
-release-notes:
- go run ./go/tools/release-notes --from "$(FROM)" --to "$(TO)" --version "$(VERSION)" --summary "$(SUMMARY)"
-
install_kubectl_kind:
./tools/get_kubectl_kind.sh
diff --git a/doc/VitessSpectrum.png b/VitessSpectrum.png
similarity index 100%
rename from doc/VitessSpectrum.png
rename to VitessSpectrum.png
diff --git a/bootstrap.sh b/bootstrap.sh
index fa93b1d4d32..868cc4b4841 100755
--- a/bootstrap.sh
+++ b/bootstrap.sh
@@ -161,9 +161,8 @@ install_etcd() {
file="etcd-${version}-${platform}-${target}.${ext}"
# This is how we'd download directly from source:
- # download_url=https://github.com/etcd-io/etcd/releases/download
- # wget "$download_url/$version/$file"
- $VTROOT/tools/wget-retry "${VITESS_RESOURCES_DOWNLOAD_URL}/${file}"
+ $VTROOT/tools/wget-retry "https://github.com/etcd-io/etcd/releases/download/$version/$file"
+ #$VTROOT/tools/wget-retry "${VITESS_RESOURCES_DOWNLOAD_URL}/${file}"
if [ "$ext" = "tar.gz" ]; then
tar xzf "$file"
else
@@ -275,10 +274,10 @@ install_all() {
fi
# etcd
- command -v etcd && echo "etcd already installed" || install_dep "etcd" "v3.5.3" "$VTROOT/dist/etcd" install_etcd
+ install_dep "etcd" "v3.5.6" "$VTROOT/dist/etcd" install_etcd
# k3s
- command -v k3s || install_dep "k3s" "v1.0.0" "$VTROOT/dist/k3s" install_k3s
+ command -v k3s || install_dep "k3s" "v1.0.0" "$VTROOT/dist/k3s" install_k3s
# consul
if [ "$BUILD_CONSUL" == 1 ] ; then
diff --git a/build.env b/build.env
index 5a37f4f41bc..ca1110e9af4 100755
--- a/build.env
+++ b/build.env
@@ -17,7 +17,7 @@
source ./tools/shell_functions.inc
go version >/dev/null 2>&1 || fail "Go is not installed or is not in \$PATH. See https://vitess.io/contributing/build-from-source for install instructions."
-goversion_min 1.18.5 || fail "Go version reported: `go version`. Version 1.18.5+ required. See https://vitess.io/contributing/build-from-source for install instructions."
+goversion_min 1.20.8 || echo "Go version reported: `go version`. Version 1.20.8+ recommended. See https://vitess.io/contributing/build-from-source for install instructions."
mkdir -p dist
mkdir -p bin
@@ -34,8 +34,6 @@ mkdir -p "$VTDATAROOT"
# TODO(mberlin): Which of these can be deleted?
ln -snf "$PWD/go/vt/zkctl/zksrv.sh" bin/zksrv.sh
ln -snf "$PWD/test/vthook-test.sh" vthook/test.sh
-ln -snf "$PWD/test/vthook-test_backup_error" vthook/test_backup_error
-ln -snf "$PWD/test/vthook-test_backup_transform" vthook/test_backup_transform
# install git hooks
diff --git a/doc/releasenotes/10_0_0_release_notes.md b/changelog/10.0/10.0.0/release_notes.md
similarity index 100%
rename from doc/releasenotes/10_0_0_release_notes.md
rename to changelog/10.0/10.0.0/release_notes.md
diff --git a/doc/releasenotes/10_0_1_release_notes.md b/changelog/10.0/10.0.1/release_notes.md
similarity index 100%
rename from doc/releasenotes/10_0_1_release_notes.md
rename to changelog/10.0/10.0.1/release_notes.md
diff --git a/doc/releasenotes/10_0_2_release_notes.md b/changelog/10.0/10.0.2/release_notes.md
similarity index 100%
rename from doc/releasenotes/10_0_2_release_notes.md
rename to changelog/10.0/10.0.2/release_notes.md
diff --git a/doc/releasenotes/10_0_3_release_notes.md b/changelog/10.0/10.0.3/release_notes.md
similarity index 100%
rename from doc/releasenotes/10_0_3_release_notes.md
rename to changelog/10.0/10.0.3/release_notes.md
diff --git a/doc/releasenotes/10_0_3_summary.md b/changelog/10.0/10.0.3/summary.md
similarity index 100%
rename from doc/releasenotes/10_0_3_summary.md
rename to changelog/10.0/10.0.3/summary.md
diff --git a/doc/releasenotes/10_0_4_release_notes.md b/changelog/10.0/10.0.4/release_notes.md
similarity index 100%
rename from doc/releasenotes/10_0_4_release_notes.md
rename to changelog/10.0/10.0.4/release_notes.md
diff --git a/doc/releasenotes/10_0_4_summary.md b/changelog/10.0/10.0.4/summary.md
similarity index 100%
rename from doc/releasenotes/10_0_4_summary.md
rename to changelog/10.0/10.0.4/summary.md
diff --git a/doc/releasenotes/10_0_5_release_notes.md b/changelog/10.0/10.0.5/release_notes.md
similarity index 100%
rename from doc/releasenotes/10_0_5_release_notes.md
rename to changelog/10.0/10.0.5/release_notes.md
diff --git a/doc/releasenotes/10_0_5_summary.md b/changelog/10.0/10.0.5/summary.md
similarity index 100%
rename from doc/releasenotes/10_0_5_summary.md
rename to changelog/10.0/10.0.5/summary.md
diff --git a/changelog/10.0/README.md b/changelog/10.0/README.md
new file mode 100644
index 00000000000..304cc933a16
--- /dev/null
+++ b/changelog/10.0/README.md
@@ -0,0 +1,18 @@
+## v10.0
+* **[10.0.5](10.0.5)**
+ * [Release Notes](10.0.5/release_notes.md)
+
+* **[10.0.4](10.0.4)**
+ * [Release Notes](10.0.4/release_notes.md)
+
+* **[10.0.3](10.0.3)**
+ * [Release Notes](10.0.3/release_notes.md)
+
+* **[10.0.2](10.0.2)**
+ * [Release Notes](10.0.2/release_notes.md)
+
+* **[10.0.1](10.0.1)**
+ * [Release Notes](10.0.1/release_notes.md)
+
+* **[10.0.0](10.0.0)**
+ * [Release Notes](10.0.0/release_notes.md)
diff --git a/doc/releasenotes/11_0_0_release_notes.md b/changelog/11.0/11.0.0/release_notes.md
similarity index 100%
rename from doc/releasenotes/11_0_0_release_notes.md
rename to changelog/11.0/11.0.0/release_notes.md
diff --git a/doc/releasenotes/11_0_1_release_notes.md b/changelog/11.0/11.0.1/release_notes.md
similarity index 100%
rename from doc/releasenotes/11_0_1_release_notes.md
rename to changelog/11.0/11.0.1/release_notes.md
diff --git a/doc/releasenotes/11_0_2_release_notes.md b/changelog/11.0/11.0.2/release_notes.md
similarity index 100%
rename from doc/releasenotes/11_0_2_release_notes.md
rename to changelog/11.0/11.0.2/release_notes.md
diff --git a/doc/releasenotes/11_0_2_summary.md b/changelog/11.0/11.0.2/summary.md
similarity index 100%
rename from doc/releasenotes/11_0_2_summary.md
rename to changelog/11.0/11.0.2/summary.md
diff --git a/doc/releasenotes/11_0_3_release_notes.md b/changelog/11.0/11.0.3/release_notes.md
similarity index 100%
rename from doc/releasenotes/11_0_3_release_notes.md
rename to changelog/11.0/11.0.3/release_notes.md
diff --git a/doc/releasenotes/11_0_3_summary.md b/changelog/11.0/11.0.3/summary.md
similarity index 100%
rename from doc/releasenotes/11_0_3_summary.md
rename to changelog/11.0/11.0.3/summary.md
diff --git a/doc/releasenotes/11_0_4_release_notes.md b/changelog/11.0/11.0.4/release_notes.md
similarity index 100%
rename from doc/releasenotes/11_0_4_release_notes.md
rename to changelog/11.0/11.0.4/release_notes.md
diff --git a/doc/releasenotes/11_0_4_summary.md b/changelog/11.0/11.0.4/summary.md
similarity index 100%
rename from doc/releasenotes/11_0_4_summary.md
rename to changelog/11.0/11.0.4/summary.md
diff --git a/changelog/11.0/README.md b/changelog/11.0/README.md
new file mode 100644
index 00000000000..51dfb2e5648
--- /dev/null
+++ b/changelog/11.0/README.md
@@ -0,0 +1,15 @@
+## v11.0
+* **[11.0.4](11.0.4)**
+ * [Release Notes](11.0.4/release_notes.md)
+
+* **[11.0.3](11.0.3)**
+ * [Release Notes](11.0.3/release_notes.md)
+
+* **[11.0.2](11.0.2)**
+ * [Release Notes](11.0.2/release_notes.md)
+
+* **[11.0.1](11.0.1)**
+ * [Release Notes](11.0.1/release_notes.md)
+
+* **[11.0.0](11.0.0)**
+ * [Release Notes](11.0.0/release_notes.md)
diff --git a/doc/releasenotes/12_0_0_release_notes.md b/changelog/12.0/12.0.0/release_notes.md
similarity index 100%
rename from doc/releasenotes/12_0_0_release_notes.md
rename to changelog/12.0/12.0.0/release_notes.md
diff --git a/doc/releasenotes/12_0_0_summary.md b/changelog/12.0/12.0.0/summary.md
similarity index 100%
rename from doc/releasenotes/12_0_0_summary.md
rename to changelog/12.0/12.0.0/summary.md
diff --git a/doc/releasenotes/12_0_1_release_notes.md b/changelog/12.0/12.0.1/release_notes.md
similarity index 100%
rename from doc/releasenotes/12_0_1_release_notes.md
rename to changelog/12.0/12.0.1/release_notes.md
diff --git a/doc/releasenotes/12_0_1_summary.md b/changelog/12.0/12.0.1/summary.md
similarity index 100%
rename from doc/releasenotes/12_0_1_summary.md
rename to changelog/12.0/12.0.1/summary.md
diff --git a/doc/releasenotes/12_0_2_release_notes.md b/changelog/12.0/12.0.2/release_notes.md
similarity index 100%
rename from doc/releasenotes/12_0_2_release_notes.md
rename to changelog/12.0/12.0.2/release_notes.md
diff --git a/doc/releasenotes/12_0_2_summary.md b/changelog/12.0/12.0.2/summary.md
similarity index 100%
rename from doc/releasenotes/12_0_2_summary.md
rename to changelog/12.0/12.0.2/summary.md
diff --git a/doc/releasenotes/12_0_3_release_notes.md b/changelog/12.0/12.0.3/release_notes.md
similarity index 100%
rename from doc/releasenotes/12_0_3_release_notes.md
rename to changelog/12.0/12.0.3/release_notes.md
diff --git a/doc/releasenotes/12_0_3_summary.md b/changelog/12.0/12.0.3/summary.md
similarity index 100%
rename from doc/releasenotes/12_0_3_summary.md
rename to changelog/12.0/12.0.3/summary.md
diff --git a/doc/releasenotes/12_0_4_release_notes.md b/changelog/12.0/12.0.4/release_notes.md
similarity index 100%
rename from doc/releasenotes/12_0_4_release_notes.md
rename to changelog/12.0/12.0.4/release_notes.md
diff --git a/doc/releasenotes/12_0_5_changelog.md b/changelog/12.0/12.0.5/changelog.md
similarity index 100%
rename from doc/releasenotes/12_0_5_changelog.md
rename to changelog/12.0/12.0.5/changelog.md
diff --git a/doc/releasenotes/12_0_5_release_notes.md b/changelog/12.0/12.0.5/release_notes.md
similarity index 92%
rename from doc/releasenotes/12_0_5_release_notes.md
rename to changelog/12.0/12.0.5/release_notes.md
index fc2c613e4da..dbff8a5aade 100644
--- a/doc/releasenotes/12_0_5_release_notes.md
+++ b/changelog/12.0/12.0.5/release_notes.md
@@ -9,7 +9,7 @@ Below is a summary of this patch release. You can learn more [here](https://go.d
> go1.17.12 (released 2022-07-12) includes security fixes to the compress/gzip, encoding/gob, encoding/xml, go/parser, io/fs, net/http, and path/filepath packages, as well as bug fixes to the compiler, the go command, the runtime, and the runtime/metrics package. [See the Go 1.17.12 milestone](https://github.com/golang/go/issues?q=milestone%3AGo1.17.12+label%3ACherryPickApproved) on our issue tracker for details.
------------
-The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/doc/releasenotes/12_0_5_changelog.md).
+The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/12.0/12.0.5/changelog.md).
The release includes 7 commits (excluding merges)
diff --git a/doc/releasenotes/12_0_6_changelog.md b/changelog/12.0/12.0.6/changelog.md
similarity index 100%
rename from doc/releasenotes/12_0_6_changelog.md
rename to changelog/12.0/12.0.6/changelog.md
diff --git a/doc/releasenotes/12_0_6_release_notes.md b/changelog/12.0/12.0.6/release_notes.md
similarity index 93%
rename from doc/releasenotes/12_0_6_release_notes.md
rename to changelog/12.0/12.0.6/release_notes.md
index 8afbe0a4239..c9c743d95ea 100644
--- a/doc/releasenotes/12_0_6_release_notes.md
+++ b/changelog/12.0/12.0.6/release_notes.md
@@ -15,7 +15,7 @@ This change is documented on our website [here](https://vitess.io/docs/12.0/over
------------
-The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/doc/releasenotes/12_0_6_changelog.md).
+The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/12.0/12.0.6/changelog.md).
The release includes 11 commits (excluding merges)
diff --git a/doc/releasenotes/12_0_6_summary.md b/changelog/12.0/12.0.6/summary.md
similarity index 100%
rename from doc/releasenotes/12_0_6_summary.md
rename to changelog/12.0/12.0.6/summary.md
diff --git a/changelog/12.0/README.md b/changelog/12.0/README.md
new file mode 100644
index 00000000000..131b2df443d
--- /dev/null
+++ b/changelog/12.0/README.md
@@ -0,0 +1,23 @@
+## v12.0
+* **[12.0.6](12.0.6)**
+ * [Changelog](12.0.6/changelog.md)
+ * [Release Notes](12.0.6/release_notes.md)
+
+* **[12.0.5](12.0.5)**
+ * [Changelog](12.0.5/changelog.md)
+ * [Release Notes](12.0.5/release_notes.md)
+
+* **[12.0.4](12.0.4)**
+ * [Release Notes](12.0.4/release_notes.md)
+
+* **[12.0.3](12.0.3)**
+ * [Release Notes](12.0.3/release_notes.md)
+
+* **[12.0.2](12.0.2)**
+ * [Release Notes](12.0.2/release_notes.md)
+
+* **[12.0.1](12.0.1)**
+ * [Release Notes](12.0.1/release_notes.md)
+
+* **[12.0.0](12.0.0)**
+ * [Release Notes](12.0.0/release_notes.md)
diff --git a/doc/releasenotes/13_0_0_release_notes.md b/changelog/13.0/13.0.0/release_notes.md
similarity index 100%
rename from doc/releasenotes/13_0_0_release_notes.md
rename to changelog/13.0/13.0.0/release_notes.md
diff --git a/doc/releasenotes/13_0_0_summary.md b/changelog/13.0/13.0.0/summary.md
similarity index 100%
rename from doc/releasenotes/13_0_0_summary.md
rename to changelog/13.0/13.0.0/summary.md
diff --git a/doc/releasenotes/13_0_1_release_notes.md b/changelog/13.0/13.0.1/release_notes.md
similarity index 100%
rename from doc/releasenotes/13_0_1_release_notes.md
rename to changelog/13.0/13.0.1/release_notes.md
diff --git a/doc/releasenotes/13_0_2_changelog.md b/changelog/13.0/13.0.2/changelog.md
similarity index 100%
rename from doc/releasenotes/13_0_2_changelog.md
rename to changelog/13.0/13.0.2/changelog.md
diff --git a/doc/releasenotes/13_0_2_release_notes.md b/changelog/13.0/13.0.2/release_notes.md
similarity index 93%
rename from doc/releasenotes/13_0_2_release_notes.md
rename to changelog/13.0/13.0.2/release_notes.md
index 310eb5e633a..12692031e2a 100644
--- a/doc/releasenotes/13_0_2_release_notes.md
+++ b/changelog/13.0/13.0.2/release_notes.md
@@ -9,7 +9,7 @@ Below is a summary of this patch release. You can learn more [here](https://go.d
> go1.17.12 (released 2022-07-12) includes security fixes to the compress/gzip, encoding/gob, encoding/xml, go/parser, io/fs, net/http, and path/filepath packages, as well as bug fixes to the compiler, the go command, the runtime, and the runtime/metrics package. [See the Go 1.17.12 milestone](https://github.com/golang/go/issues?q=milestone%3AGo1.17.12+label%3ACherryPickApproved) on our issue tracker for details.
------------
-The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/doc/releasenotes/13_0_2_changelog.md).
+The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/13.0/13.0.2/changelog.md).
The release includes 28 commits (excluding merges)
Thanks to all our contributors: @GuptaManan100, @aquarapid, @frouioui, @harshit-gangal, @mattlord, @rohit-nayak-ps, @systay, @vitess-bot[bot], @vmg
\ No newline at end of file
diff --git a/doc/releasenotes/13_0_2_summary.md b/changelog/13.0/13.0.2/summary.md
similarity index 100%
rename from doc/releasenotes/13_0_2_summary.md
rename to changelog/13.0/13.0.2/summary.md
diff --git a/doc/releasenotes/13_0_3_changelog.md b/changelog/13.0/13.0.3/changelog.md
similarity index 100%
rename from doc/releasenotes/13_0_3_changelog.md
rename to changelog/13.0/13.0.3/changelog.md
diff --git a/doc/releasenotes/13_0_3_release_notes.md b/changelog/13.0/13.0.3/release_notes.md
similarity index 93%
rename from doc/releasenotes/13_0_3_release_notes.md
rename to changelog/13.0/13.0.3/release_notes.md
index 3fee980f099..b04c0d69d20 100644
--- a/doc/releasenotes/13_0_3_release_notes.md
+++ b/changelog/13.0/13.0.3/release_notes.md
@@ -15,7 +15,7 @@ This change is documented on our website [here](https://vitess.io/docs/13.0/over
------------
-The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/doc/releasenotes/13_0_3_changelog.md).
+The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/13.0/13.0.3/changelog.md).
The release includes 17 commits(excluding merges)
diff --git a/doc/releasenotes/13_0_3_summary.md b/changelog/13.0/13.0.3/summary.md
similarity index 100%
rename from doc/releasenotes/13_0_3_summary.md
rename to changelog/13.0/13.0.3/summary.md
diff --git a/changelog/13.0/README.md b/changelog/13.0/README.md
new file mode 100644
index 00000000000..780625ef69a
--- /dev/null
+++ b/changelog/13.0/README.md
@@ -0,0 +1,14 @@
+## v13.0
+* **[13.0.3](13.0.3)**
+ * [Changelog](13.0.3/changelog.md)
+ * [Release Notes](13.0.3/release_notes.md)
+
+* **[13.0.2](13.0.2)**
+ * [Changelog](13.0.2/changelog.md)
+ * [Release Notes](13.0.2/release_notes.md)
+
+* **[13.0.1](13.0.1)**
+ * [Release Notes](13.0.1/release_notes.md)
+
+* **[13.0.0](13.0.0)**
+ * [Release Notes](13.0.0/release_notes.md)
diff --git a/doc/releasenotes/14_0_0_changelog.md b/changelog/14.0/14.0.0/changelog.md
similarity index 100%
rename from doc/releasenotes/14_0_0_changelog.md
rename to changelog/14.0/14.0.0/changelog.md
diff --git a/doc/releasenotes/14_0_0_release_notes.md b/changelog/14.0/14.0.0/release_notes.md
similarity index 98%
rename from doc/releasenotes/14_0_0_release_notes.md
rename to changelog/14.0/14.0.0/release_notes.md
index 50d02232ae1..5f88f6975db 100644
--- a/doc/releasenotes/14_0_0_release_notes.md
+++ b/changelog/14.0/14.0.0/release_notes.md
@@ -18,6 +18,7 @@
## Known Issues
- [VTOrc doesn't discover the tablets](https://github.com/vitessio/vitess/issues/10650) of a keyspace if the durability policy doesn't exist in the topo server when it comes up. This can be resolved by restarting VTOrc.
+- [Corrupted results for non-full-group-by queries with JOINs](https://github.com/vitessio/vitess/issues/11625). This can be resolved by using full-group-by queries.
## Major Changes
@@ -318,7 +319,7 @@ Work has gone into making the advisory locks (`get_lock()`, `release_lock()`, et
A long time ago, the sharding column and type were specified at the keyspace level. This syntax is now deprecated and will be removed in v15.
------------
-The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/doc/releasenotes/14_0_0_changelog.md).
+The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/14.0/14.0.0/changelog.md).
The release includes 1101 commits (excluding merges)
diff --git a/doc/releasenotes/14_0_0_summary.md b/changelog/14.0/14.0.0/summary.md
similarity index 98%
rename from doc/releasenotes/14_0_0_summary.md
rename to changelog/14.0/14.0.0/summary.md
index e02c9d9a282..6047ec7ec16 100644
--- a/doc/releasenotes/14_0_0_summary.md
+++ b/changelog/14.0/14.0.0/summary.md
@@ -17,6 +17,7 @@
## Known Issues
- [VTOrc doesn't discover the tablets](https://github.com/vitessio/vitess/issues/10650) of a keyspace if the durability policy doesn't exist in the topo server when it comes up. This can be resolved by restarting VTOrc.
+- [Corrupted results for non-full-group-by queries with JOINs](https://github.com/vitessio/vitess/issues/11625). This can be resolved by using full-group-by queries.
## Major Changes
diff --git a/doc/releasenotes/14_0_1_changelog.md b/changelog/14.0/14.0.1/changelog.md
similarity index 100%
rename from doc/releasenotes/14_0_1_changelog.md
rename to changelog/14.0/14.0.1/changelog.md
diff --git a/doc/releasenotes/14_0_1_release_notes.md b/changelog/14.0/14.0.1/release_notes.md
similarity index 80%
rename from doc/releasenotes/14_0_1_release_notes.md
rename to changelog/14.0/14.0.1/release_notes.md
index 7215301939f..639af4fce96 100644
--- a/doc/releasenotes/14_0_1_release_notes.md
+++ b/changelog/14.0/14.0.1/release_notes.md
@@ -1,4 +1,8 @@
# Release of Vitess v14.0.1
+## Known Issues
+
+- [Corrupted results for non-full-group-by queries with JOINs](https://github.com/vitessio/vitess/issues/11625). This can be resolved by using full-group-by queries.
+
## Major Changes
### Upgrade to `go1.18.4`
@@ -9,7 +13,7 @@ Below is a summary of this patch release. You can learn more [here](https://go.d
> go1.18.4 (released 2022-07-12) includes security fixes to the compress/gzip, encoding/gob, encoding/xml, go/parser, io/fs, net/http, and path/filepath packages, as well as bug fixes to the compiler, the go command, the linker, the runtime, and the runtime/metrics package. [See the Go 1.18.4 milestone](https://github.com/golang/go/issues?q=milestone%3AGo1.18.4+label%3ACherryPickApproved) on our issue tracker for details.
------------
-The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/doc/releasenotes/14_0_1_changelog.md).
+The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/14.0/14.0.1/changelog.md).
The release includes 25 commits (excluding merges)
diff --git a/doc/releasenotes/14_0_1_summary.md b/changelog/14.0/14.0.1/summary.md
similarity index 80%
rename from doc/releasenotes/14_0_1_summary.md
rename to changelog/14.0/14.0.1/summary.md
index 1f2f985baba..5a176b6af16 100644
--- a/doc/releasenotes/14_0_1_summary.md
+++ b/changelog/14.0/14.0.1/summary.md
@@ -1,3 +1,7 @@
+## Known Issues
+
+- [Corrupted results for non-full-group-by queries with JOINs](https://github.com/vitessio/vitess/issues/11625). This can be resolved by using full-group-by queries.
+
## Major Changes
### Upgrade to `go1.18.4`
diff --git a/doc/releasenotes/14_0_2_changelog.md b/changelog/14.0/14.0.2/changelog.md
similarity index 100%
rename from doc/releasenotes/14_0_2_changelog.md
rename to changelog/14.0/14.0.2/changelog.md
diff --git a/doc/releasenotes/14_0_2_release_notes.md b/changelog/14.0/14.0.2/release_notes.md
similarity index 83%
rename from doc/releasenotes/14_0_2_release_notes.md
rename to changelog/14.0/14.0.2/release_notes.md
index 956ca21ef62..724673af576 100644
--- a/doc/releasenotes/14_0_2_release_notes.md
+++ b/changelog/14.0/14.0.2/release_notes.md
@@ -1,4 +1,8 @@
# Release of Vitess v14.0.2
+## Known Issues
+
+- [Corrupted results for non-full-group-by queries with JOINs](https://github.com/vitessio/vitess/issues/11625). This can be resolved by using full-group-by queries.
+
## Major Changes
### Upgrade to `go1.18.5`
@@ -14,7 +18,7 @@ Since the end-of-life of MariaDB 10.2, its Docker image is unavailable, and we d
You can find more information on the list of supported databases on our documentation website, [here](https://vitess.io/docs/14.0/overview/supported-databases/).
------------
-The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/doc/releasenotes/14_0_2_changelog.md).
+The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/14.0/14.0.2/changelog.md).
The release includes 23 commits (excluding merges)
diff --git a/doc/releasenotes/14_0_2_summary.md b/changelog/14.0/14.0.2/summary.md
similarity index 85%
rename from doc/releasenotes/14_0_2_summary.md
rename to changelog/14.0/14.0.2/summary.md
index 05a1aac5d68..8b26887dd01 100644
--- a/doc/releasenotes/14_0_2_summary.md
+++ b/changelog/14.0/14.0.2/summary.md
@@ -1,3 +1,7 @@
+## Known Issues
+
+- [Corrupted results for non-full-group-by queries with JOINs](https://github.com/vitessio/vitess/issues/11625). This can be resolved by using full-group-by queries.
+
## Major Changes
### Upgrade to `go1.18.5`
diff --git a/doc/releasenotes/14_0_3_changelog.md b/changelog/14.0/14.0.3/changelog.md
similarity index 100%
rename from doc/releasenotes/14_0_3_changelog.md
rename to changelog/14.0/14.0.3/changelog.md
diff --git a/doc/releasenotes/14_0_3_release_notes.md b/changelog/14.0/14.0.3/release_notes.md
similarity index 77%
rename from doc/releasenotes/14_0_3_release_notes.md
rename to changelog/14.0/14.0.3/release_notes.md
index d9cd4ac7a61..5d5cc9b871c 100644
--- a/doc/releasenotes/14_0_3_release_notes.md
+++ b/changelog/14.0/14.0.3/release_notes.md
@@ -1,4 +1,8 @@
# Release of Vitess v14.0.3
+## Known Issues
+
+- [Corrupted results for non-full-group-by queries with JOINs](https://github.com/vitessio/vitess/issues/11625). This can be resolved by using full-group-by queries.
+
## Major Changes
### Fix VTOrc Discovery
@@ -8,7 +12,7 @@ This problem could be resolved by restarting the VTOrc so that it discovers all
frequently, this posed a greater challenge, since some pods when evicted and rescheduled on a different node, would sometimes fail to be discovered by VTOrc.
This has problem has been addressed in this patch by the fix https://github.com/vitessio/vitess/pull/10662.
------------
-The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/doc/releasenotes/14_0_3_changelog.md).
+The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/14.0/14.0.3/changelog.md).
The release includes 12 commits (excluding merges)
diff --git a/doc/releasenotes/14_0_3_summary.md b/changelog/14.0/14.0.3/summary.md
similarity index 77%
rename from doc/releasenotes/14_0_3_summary.md
rename to changelog/14.0/14.0.3/summary.md
index 9d9364f67b3..121d9054a19 100644
--- a/doc/releasenotes/14_0_3_summary.md
+++ b/changelog/14.0/14.0.3/summary.md
@@ -1,3 +1,7 @@
+## Known Issues
+
+- [Corrupted results for non-full-group-by queries with JOINs](https://github.com/vitessio/vitess/issues/11625). This can be resolved by using full-group-by queries.
+
## Major Changes
### Fix VTOrc Discovery
diff --git a/changelog/14.0/14.0.4/changelog.md b/changelog/14.0/14.0.4/changelog.md
new file mode 100644
index 00000000000..bad71636814
--- /dev/null
+++ b/changelog/14.0/14.0.4/changelog.md
@@ -0,0 +1,43 @@
+# Changelog of Vitess v14.0.4
+
+### Bug fixes
+#### Backup and Restore
+ * Detect redo log location dynamically based on presence [#11555](https://github.com/vitessio/vitess/pull/11555)
+#### Build/CI
+ * Fix the script `check_make_sizegen` [#11465](https://github.com/vitessio/vitess/pull/11465)
+ * Skip `TestComparisonSemantics` test [#11474](https://github.com/vitessio/vitess/pull/11474)
+ * Addition of a CI tool to detect dead links in test/config.json [#11668](https://github.com/vitessio/vitess/pull/11668)
+ * Fix files changes filtering in CI [#11714](https://github.com/vitessio/vitess/pull/11714)
+#### Query Serving
+ * fix: do not rewrite single columns in derived tables [#11419](https://github.com/vitessio/vitess/pull/11419)
+ * Push down derived tables under route when possible [#11422](https://github.com/vitessio/vitess/pull/11422)
+ * collations: fix coercion semantics according to 8.0.31 changes [#11487](https://github.com/vitessio/vitess/pull/11487)
+ * [14.0] Fix JSON functions parsing [#11624](https://github.com/vitessio/vitess/pull/11624)
+ * [bugfix] Allow VTExplain to handle shards that are not active during resharding [#11640](https://github.com/vitessio/vitess/pull/11640)
+ * [release-14.0] Do not multiply `AggregateRandom` in JOINs [#11671](https://github.com/vitessio/vitess/pull/11671)
+ * [14.0] Send errors in stream instead of a grpc error from streaming rpcs when transaction or reserved connection is acquired [#11688](https://github.com/vitessio/vitess/pull/11688)
+ * Push down derived tables under route when possible [#11786](https://github.com/vitessio/vitess/pull/11786)
+### CI/Build
+#### Build/CI
+ * [release-14.0] Remove Launchable in the workflows [#11244](https://github.com/vitessio/vitess/pull/11244)
+ * [release-14.0] Add automation to change vitess version in the docker-release script (#11682) [#11814](https://github.com/vitessio/vitess/pull/11814)
+ * Remove Tests from Self-hosted runners [#11838](https://github.com/vitessio/vitess/pull/11838)
+#### Governance
+ * codeowners: have at least two for almost every package [#11639](https://github.com/vitessio/vitess/pull/11639)
+#### VReplication
+ * update jsonparser dependency [#11694](https://github.com/vitessio/vitess/pull/11694)
+### Enhancement
+#### General
+ * [release-14.0] Upgrade to `go1.18.7` [#11510](https://github.com/vitessio/vitess/pull/11510)
+#### Query Serving
+ * Improve route merging for queries that have conditions on different vindexes, but can be merged via join predicates. [#10942](https://github.com/vitessio/vitess/pull/10942)
+### Release
+#### Documentation
+ * Prepare the release notes summary for `v14.0.4` [#11803](https://github.com/vitessio/vitess/pull/11803)
+#### General
+ * Release of v14.0.3 [#11404](https://github.com/vitessio/vitess/pull/11404)
+ * Back to dev mode after v14.0.3 [#11405](https://github.com/vitessio/vitess/pull/11405)
+### Testing
+#### Query Serving
+ * [V14] Better plan-tests [#11435](https://github.com/vitessio/vitess/pull/11435)
+
diff --git a/changelog/14.0/14.0.4/release_notes.md b/changelog/14.0/14.0.4/release_notes.md
new file mode 100644
index 00000000000..31cbbc26627
--- /dev/null
+++ b/changelog/14.0/14.0.4/release_notes.md
@@ -0,0 +1,22 @@
+# Release of Vitess v14.0.4
+## Major Changes
+
+### Upgrade to `go1.18.7`
+
+Vitess `v14.0.4` now runs on `go1.18.7`.
+The patch release of Go, `go1.18.7`, was one of the main reasons for this release as it includes important security fixes to packages used by Vitess.
+Below is a summary of this patch release. You can learn more [here](https://go.dev/doc/devel/release#go1.18).
+
+> go1.18.7 (released 2022-10-04) includes security fixes to the archive/tar, net/http/httputil, and regexp packages, as well as bug fixes to the compiler, the linker, and the go/types package.
+
+### Corrupted results for non-full-group-by queries with JOINs
+
+An issue in versions `<= v14.0.3` and `<= v15.0.0` that generated corrupted results for non-full-group-by queries with a JOIN
+is now fixed. The full issue can be found [here](https://github.com/vitessio/vitess/issues/11625), and its fix [here](https://github.com/vitessio/vitess/pull/11633).
+------------
+The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/14.0/14.0.4/changelog.md).
+
+The release includes 24 commits (excluding merges)
+
+Thanks to all our contributors: @GuptaManan100, @dbussink, @frouioui, @harshit-gangal, @systay, @vitess-bot[bot]
+
diff --git a/changelog/14.0/14.0.4/summary.md b/changelog/14.0/14.0.4/summary.md
new file mode 100644
index 00000000000..683e4067557
--- /dev/null
+++ b/changelog/14.0/14.0.4/summary.md
@@ -0,0 +1,14 @@
+## Major Changes
+
+### Upgrade to `go1.18.7`
+
+Vitess `v14.0.4` now runs on `go1.18.7`.
+The patch release of Go, `go1.18.7`, was one of the main reasons for this release as it includes important security fixes to packages used by Vitess.
+Below is a summary of this patch release. You can learn more [here](https://go.dev/doc/devel/release#go1.18).
+
+> go1.18.7 (released 2022-10-04) includes security fixes to the archive/tar, net/http/httputil, and regexp packages, as well as bug fixes to the compiler, the linker, and the go/types package.
+
+### Corrupted results for non-full-group-by queries with JOINs
+
+An issue in versions `<= v14.0.3` and `<= v15.0.0` that generated corrupted results for non-full-group-by queries with a JOIN
+is now fixed. The full issue can be found [here](https://github.com/vitessio/vitess/issues/11625), and its fix [here](https://github.com/vitessio/vitess/pull/11633).
\ No newline at end of file
diff --git a/changelog/14.0/README.md b/changelog/14.0/README.md
new file mode 100644
index 00000000000..10f12867483
--- /dev/null
+++ b/changelog/14.0/README.md
@@ -0,0 +1,20 @@
+## v14.0
+* **[14.0.4](14.0.4)**
+ * [Changelog](14.0.4/changelog.md)
+ * [Release Notes](14.0.4/release_notes.md)
+
+* **[14.0.3](14.0.3)**
+ * [Changelog](14.0.3/changelog.md)
+ * [Release Notes](14.0.3/release_notes.md)
+
+* **[14.0.2](14.0.2)**
+ * [Changelog](14.0.2/changelog.md)
+ * [Release Notes](14.0.2/release_notes.md)
+
+* **[14.0.1](14.0.1)**
+ * [Changelog](14.0.1/changelog.md)
+ * [Release Notes](14.0.1/release_notes.md)
+
+* **[14.0.0](14.0.0)**
+ * [Changelog](14.0.0/changelog.md)
+ * [Release Notes](14.0.0/release_notes.md)
diff --git a/changelog/15.0/15.0.0/changelog.md b/changelog/15.0/15.0.0/changelog.md
new file mode 100644
index 00000000000..88da2b594bf
--- /dev/null
+++ b/changelog/15.0/15.0.0/changelog.md
@@ -0,0 +1,616 @@
+# Changelog of Vitess v15.0.0
+
+### Announcement
+#### Query Serving
+ * Remove tablet query plan field caching [#10489](https://github.com/vitessio/vitess/pull/10489)
+ * delete unused flags and handling of deprecated protobuf fields [#10612](https://github.com/vitessio/vitess/pull/10612)
+### Bug fixes
+#### Backup and Restore
+ * Remove built-in decompression flag [#10670](https://github.com/vitessio/vitess/pull/10670)
+ * Fixing logic for backup progress [#10794](https://github.com/vitessio/vitess/pull/10794)
+ * Backups: Support InnoDB Redo Log Location With 8.0.30+ [#10847](https://github.com/vitessio/vitess/pull/10847)
+ * fix: objname was not logged correctly [#11038](https://github.com/vitessio/vitess/pull/11038)
+ * [release-15.0] Detect redo log location dynamically based on presence (#11555) [#11558](https://github.com/vitessio/vitess/pull/11558)
+#### Build/CI
+ * Fixed the release notes CI check helper [#10574](https://github.com/vitessio/vitess/pull/10574)
+ * Remove potential double close of channel [#10929](https://github.com/vitessio/vitess/pull/10929)
+ * Add explicit close state to memory topo connection [#11110](https://github.com/vitessio/vitess/pull/11110)
+ * Use Ubuntu 20 for vtgate and tabletmanager workflows [#11152](https://github.com/vitessio/vitess/pull/11152)
+ * Fix vtcompose and docker-compose examples [#11188](https://github.com/vitessio/vitess/pull/11188)
+ * Fix the script `check_make_sizegen` [#11465](https://github.com/vitessio/vitess/pull/11465)
+ * Skip `TestComparisonSemantics` test [#11474](https://github.com/vitessio/vitess/pull/11474)
+#### CLI
+ * [vtadmin] Update how tracing flags are registered [#11063](https://github.com/vitessio/vitess/pull/11063)
+ * CLI Pflag migration work: Fix regression caused by pflag-vreplication PR [#11127](https://github.com/vitessio/vitess/pull/11127)
+#### Cluster management
+ * Fix Online DDL Revert flakiness [#10675](https://github.com/vitessio/vitess/pull/10675)
+ * Fix pprof toggling via SIGUSR1 and waitSig flag [#10875](https://github.com/vitessio/vitess/pull/10875)
+ * BugFix: Vttablet semi-sync settings on startup [#10881](https://github.com/vitessio/vitess/pull/10881)
+ * BugFix: vtctld panic with `enable_realtime_stats` [#10902](https://github.com/vitessio/vitess/pull/10902)
+ * Fix races in memory topo and watcher [#11065](https://github.com/vitessio/vitess/pull/11065)
+ * [vtctld] Fix nil-ness in healthcheck [#11067](https://github.com/vitessio/vitess/pull/11067)
+ * Install panic handler for all grpcvtctldserver endpoints [#11184](https://github.com/vitessio/vitess/pull/11184)
+ * Fix tablet debug/env template variable name [#11348](https://github.com/vitessio/vitess/pull/11348)
+#### Evalengine
+ * evalengine: support mismatched numerical types [#10997](https://github.com/vitessio/vitess/pull/10997)
+#### Examples
+ * Fix local example scripts [#11319](https://github.com/vitessio/vitess/pull/11319)
+#### General
+ * RateLimiter: exit goroutine at Stop() [#10755](https://github.com/vitessio/vitess/pull/10755)
+ * Fix frouioui in the MAINTAINERS.md file [#11343](https://github.com/vitessio/vitess/pull/11343)
+ * Stats Flags: include stats flags in the correct binaries [#11450](https://github.com/vitessio/vitess/pull/11450)
+ * Test flags: Update logic for parsing test flags to run unit tests within GoLand and to parse test flags in vtgate to allow running unit tests [#11551](https://github.com/vitessio/vitess/pull/11551)
+#### Operator
+ * Increase the memory limit of the vitess-operator [#11548](https://github.com/vitessio/vitess/pull/11548)
+#### Query Serving
+ * fix: scalar aggregation engine primitive [#10465](https://github.com/vitessio/vitess/pull/10465)
+ * fix: aggregation empty row on join with grouping and aggregations [#10480](https://github.com/vitessio/vitess/pull/10480)
+ * Fix parsing of CAST() statements [#10512](https://github.com/vitessio/vitess/pull/10512)
+ * Add back unary single column expression check [#10514](https://github.com/vitessio/vitess/pull/10514)
+ * fix: handle planner_version and planner-version correctly [#10534](https://github.com/vitessio/vitess/pull/10534)
+ * Fix casing of vitess migration syntax and comments printing [#10535](https://github.com/vitessio/vitess/pull/10535)
+ * Fix vtgate query log table name extraction for DML statements [#10536](https://github.com/vitessio/vitess/pull/10536)
+ * VReplication: more unrecoverable error codes [#10559](https://github.com/vitessio/vitess/pull/10559)
+ * Add support for INSERT() string function [#10593](https://github.com/vitessio/vitess/pull/10593)
+ * Ignoring empty queries with MySQL dashed comments [#10634](https://github.com/vitessio/vitess/pull/10634)
+ * Online DDL: deal with autogenerated CHECK constraint names [#10638](https://github.com/vitessio/vitess/pull/10638)
+ * Inject shard name in commit-phase multi-shard errors [#10669](https://github.com/vitessio/vitess/pull/10669)
+ * Parameterize BIT types and fixes in HEX types [#10689](https://github.com/vitessio/vitess/pull/10689)
+ * BugFix: Keep predicates in join when pushing new ones [#10715](https://github.com/vitessio/vitess/pull/10715)
+ * BugFix: Gen4CompareV3 planner reverted to Gen4 on Update queries [#10722](https://github.com/vitessio/vitess/pull/10722)
+ * Fix gen4 planner handling of `<=>` operator with `NULL` operand [#10754](https://github.com/vitessio/vitess/pull/10754)
+ * BugFix: `--queryserver-config-terse-errors` shouldn't redact `Row count exceeded` error and shouldn't affect ignoring `Duplicate entry` error in lookup vindexes [#10762](https://github.com/vitessio/vitess/pull/10762)
+ * Wrong length creating the buffer needed to ask for full auth [#10767](https://github.com/vitessio/vitess/pull/10767)
+ * fix: evalengine - check compare numeric of same type [#10793](https://github.com/vitessio/vitess/pull/10793)
+ * Fix client session state tracking logic [#10871](https://github.com/vitessio/vitess/pull/10871)
+ * feat: don't use the vtgatehandler unless it is known to have been initialized [#10879](https://github.com/vitessio/vitess/pull/10879)
+ * schemadiff: ordering and applying a RenameColumn [#10912](https://github.com/vitessio/vitess/pull/10912)
+ * Fix parsing of `PARTITION BY KEY` [#10958](https://github.com/vitessio/vitess/pull/10958)
+ * Online DDL, CancelMigration: distinguish user-issued vs. internally-issued cancellation [#11011](https://github.com/vitessio/vitess/pull/11011)
+ * Use the correct error type for dependencies [#11018](https://github.com/vitessio/vitess/pull/11018)
+ * Fix AST copying of basic types [#11046](https://github.com/vitessio/vitess/pull/11046)
+ * fix: return when instructions are nil in checkThatPlanIsValid [#11070](https://github.com/vitessio/vitess/pull/11070)
+ * Fix handling zero byte string for session state changed [#11071](https://github.com/vitessio/vitess/pull/11071)
+ * Fix logging formatting mistake [#11086](https://github.com/vitessio/vitess/pull/11086)
+ * On demand heartbeats: fix race condition closing the writer [#11157](https://github.com/vitessio/vitess/pull/11157)
+ * Fix problematic watch cancellation due to context cancellation [#11170](https://github.com/vitessio/vitess/pull/11170)
+ * Fix OK packet parsing logic [#11176](https://github.com/vitessio/vitess/pull/11176)
+ * Fix: Insert using select streaming bug [#11248](https://github.com/vitessio/vitess/pull/11248)
+ * bugfix: Truncate columns even when sorting on vtgate [#11265](https://github.com/vitessio/vitess/pull/11265)
+ * Fix query list override issue on mysql restart [#11309](https://github.com/vitessio/vitess/pull/11309)
+ * Fix conditions after `<=>` operator on left joined table columns being ignored for routing purposes. [#11310](https://github.com/vitessio/vitess/pull/11310)
+ * Fix complex predicates being pulled into `ON` conditions for `LEFT JOIN` statements. [#11317](https://github.com/vitessio/vitess/pull/11317)
+ * Handle cache value type correctly [#11369](https://github.com/vitessio/vitess/pull/11369)
+ * Push down derived tables under route when possible [#11379](https://github.com/vitessio/vitess/pull/11379)
+ * Fix: DML engine multiequal support [#11395](https://github.com/vitessio/vitess/pull/11395)
+ * Allow parenthesis around derived tables [#11407](https://github.com/vitessio/vitess/pull/11407)
+ * fix: do not rewrite single columns in derived tables [#11419](https://github.com/vitessio/vitess/pull/11419)
+ * Plan order by `COUNT(X)` [#11420](https://github.com/vitessio/vitess/pull/11420)
+ * Fix #11455 - skip vindex operations for `DELETE` statements against unsharded tables [#11461](https://github.com/vitessio/vitess/pull/11461)
+ * bug fix: using self-referencing columns in HAVING should not overflow [#11499](https://github.com/vitessio/vitess/pull/11499)
+ * [release-15.0] fix: reserve connection to follow query timeout when outside of transaction (#11490) [#11505](https://github.com/vitessio/vitess/pull/11505)
+ * [15.0] Fix query list override issue on mysql restart (#11309) [#11506](https://github.com/vitessio/vitess/pull/11506)
+ * Fix `HAVING` rewriting made in #11306 [#11515](https://github.com/vitessio/vitess/pull/11515)
+ * [15.0] fix: fail over reconnect in stream execution for connection with transaction [#11527](https://github.com/vitessio/vitess/pull/11527)
+ * [15.0] Fix: concatenate engine in transaction [#11534](https://github.com/vitessio/vitess/pull/11534)
+ * Redact bind variables in mysql errors [#11540](https://github.com/vitessio/vitess/pull/11540)
+#### TabletManager
+ * Fix schema engine close and ticks race [#10386](https://github.com/vitessio/vitess/pull/10386)
+#### VReplication
+ * VStream API: Fix vtgate memory leaks when context gets cancelled [#10571](https://github.com/vitessio/vitess/pull/10571)
+ * VReplication: retry in WaitForPos when read of pos is killed off by deadlock detector [#10621](https://github.com/vitessio/vitess/pull/10621)
+ * MoveTables: use source timezone to adjust datetime columns on update statements [#10667](https://github.com/vitessio/vitess/pull/10667)
+ * VDiff2: ignore errors while attempting to purge vdiff tables [#10725](https://github.com/vitessio/vitess/pull/10725)
+ * Add drop_foreign_keys to v2 MoveTables command [#10773](https://github.com/vitessio/vitess/pull/10773)
+ * Ensure VDiff Engine is open in RPC entrypoint [#10969](https://github.com/vitessio/vitess/pull/10969)
+ * VReplication: vreplication_max_time_to_retry_on_error default to zero, no limit [#11031](https://github.com/vitessio/vitess/pull/11031)
+ * VReplication: Handle DECIMAL 0 Value Edge Case [#11212](https://github.com/vitessio/vitess/pull/11212)
+ * Don't run VDiff on frozen workflows [#11234](https://github.com/vitessio/vitess/pull/11234)
+ * VStreamer: fix deadlock when there are a lot of vschema changes at the same time as binlog events [#11325](https://github.com/vitessio/vitess/pull/11325)
+ * VDiff: Make restarting VReplication workflow more robust [#11413](https://github.com/vitessio/vitess/pull/11413)
+#### VTCombo
+ * vtcombo mutates options, make a copy to avoid this [#11223](https://github.com/vitessio/vitess/pull/11223)
+#### VTorc
+ * Fix panic in VTOrc [#10519](https://github.com/vitessio/vitess/pull/10519)
+ * Fix VTOrc Discovery to also retry discovering tablets which aren't present in database_instance table [#10662](https://github.com/vitessio/vitess/pull/10662)
+ * BugFix: VTOrc should repair replication if either replication thread is stopped [#10786](https://github.com/vitessio/vitess/pull/10786)
+ * Fix VTOrc holding locks after shutdown [#11442](https://github.com/vitessio/vitess/pull/11442)
+ * [15.0] Fix VTOrc to handle multiple failures [#11489](https://github.com/vitessio/vitess/pull/11489)
+#### vttestserver
+ * Fix flags in vttestserver run script used in the docker image [#11354](https://github.com/vitessio/vitess/pull/11354)
+### CI/Build
+#### Backup and Restore
+ * Revert: Revert temporary workflow changes made in #10847 [#10914](https://github.com/vitessio/vitess/pull/10914)
+#### Build/CI
+ * tablegc test to utilize new capability logic [#10463](https://github.com/vitessio/vitess/pull/10463)
+ * docs: add query serving features to the release notes [#10475](https://github.com/vitessio/vitess/pull/10475)
+ * Modified the Pull Request review checklist to check for descriptive Pull Request titles [#10485](https://github.com/vitessio/vitess/pull/10485)
+ * Take into account `github.ref` when doing upgrade-downgrade tests [#10504](https://github.com/vitessio/vitess/pull/10504)
+ * add vtadmin web files to all lite images [#10581](https://github.com/vitessio/vitess/pull/10581)
+ * Removed the check label in upgrade downgrade tests [#10583](https://github.com/vitessio/vitess/pull/10583)
+ * Add stale PRs action [#10603](https://github.com/vitessio/vitess/pull/10603)
+ * Allow manual workflow_dispatch for close_stale_pull_requests [#10610](https://github.com/vitessio/vitess/pull/10610)
+ * Enable stale PR closer [#10617](https://github.com/vitessio/vitess/pull/10617)
+ * fix: build [#10647](https://github.com/vitessio/vitess/pull/10647)
+ * Remove the review checklist workflow [#10656](https://github.com/vitessio/vitess/pull/10656)
+ * Add MySQL 8 Support to Backup Tests [#10691](https://github.com/vitessio/vitess/pull/10691)
+ * Remove MariaDB 10.2 Unit Test in v15 [#10700](https://github.com/vitessio/vitess/pull/10700)
+ * Auto Detect MySQL Version and Use in vtgate mysql_server_version Flag [#10701](https://github.com/vitessio/vitess/pull/10701)
+ * Reduce Flakiness of ERS/PRS e2e Tests Using Retries With a Timeout [#10720](https://github.com/vitessio/vitess/pull/10720)
+ * Flakes: Increase timeouts for upgrade_downgrade workflows [#10735](https://github.com/vitessio/vitess/pull/10735)
+ * fix: use go-unit-report fork version in ci workflow [#10757](https://github.com/vitessio/vitess/pull/10757)
+ * Add the linter for exporting a loop variable through a pointer reference [#10763](https://github.com/vitessio/vitess/pull/10763)
+ * Be explicit about capturing the pointer [#10765](https://github.com/vitessio/vitess/pull/10765)
+ * looking into onlineddl_vrepl_stress_suite flakiness in CI [#10779](https://github.com/vitessio/vitess/pull/10779)
+ * Add semgrep CI workflow [#10826](https://github.com/vitessio/vitess/pull/10826)
+ * onlineddl_vrepl flakiness: further work [#10876](https://github.com/vitessio/vitess/pull/10876)
+ * Revert temporary workflow changes made in #10847 [#10896](https://github.com/vitessio/vitess/pull/10896)
+ * Fix main in CI [#10953](https://github.com/vitessio/vitess/pull/10953)
+ * Avoid race condition in memory topo watch shutdown [#10954](https://github.com/vitessio/vitess/pull/10954)
+ * Remove accidentally added fmt.Printf from debugging [#10967](https://github.com/vitessio/vitess/pull/10967)
+ * Add more robust go version handling [#11001](https://github.com/vitessio/vitess/pull/11001)
+ * Run latest gofmt on everything & address linter warnings [#11008](https://github.com/vitessio/vitess/pull/11008)
+ * Fix mariadb103 ci [#11015](https://github.com/vitessio/vitess/pull/11015)
+ * Online DDL vrepl suite: fix auto_increment tests in 8.0 [#11019](https://github.com/vitessio/vitess/pull/11019)
+ * CI: change upgrade/downgrade tests to use vitessio fork of go-junit-report [#11023](https://github.com/vitessio/vitess/pull/11023)
+ * Add workflow file to the filter rules [#11032](https://github.com/vitessio/vitess/pull/11032)
+ * Add upgrade-downgrade tests for next releases [#11033](https://github.com/vitessio/vitess/pull/11033)
+ * fix missing vtadmin binary in docker image [#11076](https://github.com/vitessio/vitess/pull/11076)
+ * Refactor vtorc tests to run as a single test with sub-tests [#11108](https://github.com/vitessio/vitess/pull/11108)
+ * Upgrade to Ubuntu 20.04 for endtoend tests [#11113](https://github.com/vitessio/vitess/pull/11113)
+ * Move tabletmanager CI jobs to 20.04 [#11116](https://github.com/vitessio/vitess/pull/11116)
+ * Upgrade vtgate CI jobs to 20.04 [#11118](https://github.com/vitessio/vitess/pull/11118)
+ * Upgrade CI for unit tests to Ubuntu 20.04 [#11119](https://github.com/vitessio/vitess/pull/11119)
+ * Move towards MySQL 8.0 as the default template generation [#11153](https://github.com/vitessio/vitess/pull/11153)
+ * Add VTOrc and VTAdmin to Region example [#11172](https://github.com/vitessio/vitess/pull/11172)
+ * Add a CodeQL workflow to check vulnerabilities in the codebase [#11207](https://github.com/vitessio/vitess/pull/11207)
+ * Fix build errors [#11209](https://github.com/vitessio/vitess/pull/11209)
+ * Adding tablet startup check [#11251](https://github.com/vitessio/vitess/pull/11251)
+ * Move vtorc to self-hosted runner [#11255](https://github.com/vitessio/vitess/pull/11255)
+ * Move 12 and 18 back to github runners [#11273](https://github.com/vitessio/vitess/pull/11273)
+ * Flakes: Fix Backup Transform Test Flakiness [#11352](https://github.com/vitessio/vitess/pull/11352)
+ * Move vtorc-8.0 to self-hosted runner [#11384](https://github.com/vitessio/vitess/pull/11384)
+#### Cluster management
+ * Fix examples/compose/docker-compose.yml to run healthy vttablets [#10597](https://github.com/vitessio/vitess/pull/10597)
+ * Include more tests in upgrade tests [#10665](https://github.com/vitessio/vitess/pull/10665)
+ * Fixing flakiness in TestCrossCellDurability and TestHealthCheckCacheWithTabletChurn [#10961](https://github.com/vitessio/vitess/pull/10961)
+ * FlakinessFix: Reparent tests by removing `restore_from_backup` [#11064](https://github.com/vitessio/vitess/pull/11064)
+ * Augment local example to also run VTOrc [#11155](https://github.com/vitessio/vitess/pull/11155)
+#### Documentation
+ * Minor fixes to markdown and test code [#10866](https://github.com/vitessio/vitess/pull/10866)
+#### General
+ * Upgrade to `go1.18.4` [#10705](https://github.com/vitessio/vitess/pull/10705)
+ * Tweak make targets MacOS M1 xc to Linux arches [#10706](https://github.com/vitessio/vitess/pull/10706)
+ * [release-15.0] [deps] go get golang.org/x/text && go mod tidy (#11466) [#11467](https://github.com/vitessio/vitess/pull/11467)
+#### Governance
+ * Update the comment for review checklist with an item for CI workflows [#10471](https://github.com/vitessio/vitess/pull/10471)
+#### Java
+ * Bump gson from 2.8.5 to 2.8.9 in /java [#10353](https://github.com/vitessio/vitess/pull/10353)
+#### Online DDL
+ * Address additional causes of OnlineDDL test flakiness [#11047](https://github.com/vitessio/vitess/pull/11047)
+#### Operator
+ * Fix VTop Example [#10687](https://github.com/vitessio/vitess/pull/10687)
+#### Query Serving
+ * CI Fix: Collation tests [#10839](https://github.com/vitessio/vitess/pull/10839)
+ * Add additional tests for EOF packet checks [#11014](https://github.com/vitessio/vitess/pull/11014)
+#### VReplication
+ * vrepl endtoend flakiness fix via schema versioning [#10804](https://github.com/vitessio/vitess/pull/10804)
+ * Tests: AddShard should use targeted MySQL version [#11006](https://github.com/vitessio/vitess/pull/11006)
+ * Flakes: Use waits instead of checks in vrepl e2e tests [#11048](https://github.com/vitessio/vitess/pull/11048)
+ * Flakes: Prevent VDiff2 test failures when operating near the second boundary [#11054](https://github.com/vitessio/vitess/pull/11054)
+ * Flakes: Eliminate TestVreplicationCopyThrottling Flakes [#11208](https://github.com/vitessio/vitess/pull/11208)
+ * Flakes: Correct TestVReplicationCopyThrottling Logic [#11224](https://github.com/vitessio/vitess/pull/11224)
+#### VTAdmin
+ * fix building logtail, logrotate and vtadmin docker image in Dockerhub [#10968](https://github.com/vitessio/vitess/pull/10968)
+#### VTorc
+ * Flakiness Fix: Tests for GracefulPrimaryTakeover [#11355](https://github.com/vitessio/vitess/pull/11355)
+ * [release-15.0] Move vtorc runners back to normal github runners (#11482) [#11494](https://github.com/vitessio/vitess/pull/11494)
+### Dependabot
+#### Examples
+ * Build(deps): Bump async from 3.2.0 to 3.2.4 in /vitess-mixin/e2e [#10515](https://github.com/vitessio/vitess/pull/10515)
+#### Observability
+ * Bump minimist and cypress in /vitess-mixin/e2e [#11201](https://github.com/vitessio/vitess/pull/11201)
+#### VTAdmin
+ * Bump protobufjs from 6.10.2 to 6.11.3 in /web/vtadmin [#10418](https://github.com/vitessio/vitess/pull/10418)
+ * Build(deps): bump terser from 5.10.0 to 5.14.2 in /web/vtadmin [#10761](https://github.com/vitessio/vitess/pull/10761)
+### Documentation
+#### CLI
+ * [vtctldclient] Update CLI docs for usages, flags, and aliases [#10502](https://github.com/vitessio/vitess/pull/10502)
+ * [vtctldclient] Add autodoc tool for generating website docs [#10635](https://github.com/vitessio/vitess/pull/10635)
+ * [release-15.0] [vtadmin] Do not backtick binary name (#11464) [#11481](https://github.com/vitessio/vitess/pull/11481)
+#### Cluster management
+ * [main] Add the vtorc discovery bug as a known issue to 14.0 (#10711) [#10724](https://github.com/vitessio/vitess/pull/10724)
+#### Documentation
+ * Throttler stats: amendment [#10572](https://github.com/vitessio/vitess/pull/10572)
+ * Improvements to the Summary doc [#11502](https://github.com/vitessio/vitess/pull/11502)
+#### General
+ * release notes: add index to v15 summary [#10829](https://github.com/vitessio/vitess/pull/10829)
+#### Query Serving
+ * added vindex interface breaking change to summary notes [#10693](https://github.com/vitessio/vitess/pull/10693)
+#### VTAdmin
+ * [vtadmin] Document known issue with node versions 17+ [#10483](https://github.com/vitessio/vitess/pull/10483)
+ * [vtadmin] Add authzdocsgen to generate some website docs [#10513](https://github.com/vitessio/vitess/pull/10513)
+### Enhancement
+#### Backup and Restore
+ * expose vtbackup stats at --port /metrics [#11388](https://github.com/vitessio/vitess/pull/11388)
+#### Build/CI
+ * Add name to static check workflow [#10470](https://github.com/vitessio/vitess/pull/10470)
+ * Make etcd based tests more deterministic and surface errors [#10521](https://github.com/vitessio/vitess/pull/10521)
+ * Skip CI workflows on `push` for pull requests [#10768](https://github.com/vitessio/vitess/pull/10768)
+ * Run upgrade/downgrade tests on main [#11022](https://github.com/vitessio/vitess/pull/11022)
+ * Move CI workflow to use latest community version of mysql 8.0 [#11493](https://github.com/vitessio/vitess/pull/11493)
+#### CLI
+ * [cmd/*] Switch to pflag for all CLI flag parsing [#10619](https://github.com/vitessio/vitess/pull/10619)
+ * [go/mysql/*] Move all authserver–related flags off of global flagset [#10752](https://github.com/vitessio/vitess/pull/10752)
+ * [cli] [mysqlctl] Scope all backupstorage implementation flags to `pflag` and relevant binaries [#10844](https://github.com/vitessio/vitess/pull/10844)
+ * [cli] [mysqlctl] Scope `backup_storage_implementation` flag to `pflag` [#10852](https://github.com/vitessio/vitess/pull/10852)
+ * Port vtorc CLI to servenv and pflag [#10911](https://github.com/vitessio/vitess/pull/10911)
+ * [vtexplain] Switch vtexplain flags to use pflag hooks [#10938](https://github.com/vitessio/vitess/pull/10938)
+ * [cli] [vtgate] Migrate `vtgate/buffer` flags to `pflag` [#10939](https://github.com/vitessio/vitess/pull/10939)
+ * [cli] [grpcvtgateconn] Migrate `vtgate/grpcvtgateconn` flags to `pflag` [#10941](https://github.com/vitessio/vitess/pull/10941)
+ * [cli] [vtgate] Migrate `grpcvtgateservice` flags to `pflag` and scope to appropriate binaries. [#10947](https://github.com/vitessio/vitess/pull/10947)
+ * [cli] [vtgr] Migrate all `vtgr` flags to `pflag` [#10952](https://github.com/vitessio/vitess/pull/10952)
+ * [cli] Migrate `vterrors` to `pflag` [#10957](https://github.com/vitessio/vitess/pull/10957)
+ * [cli] [go/mysql/collations/...] Migrate all flags to `pflag` [#10970](https://github.com/vitessio/vitess/pull/10970)
+ * [cli] [tabletconn] Migrate `go/vt/vttablet/tabletconn` to `pflag` [#10999](https://github.com/vitessio/vitess/pull/10999)
+ * [trace] Migrate `go/trace` to use `pflag` for flag definitions [#11028](https://github.com/vitessio/vitess/pull/11028)
+ * [log] Migrate `go/vt/log` flags to `pflag` [#11036](https://github.com/vitessio/vitess/pull/11036)
+ * [cli] [logutil] Migrate flags defined in `go/vt/logutil` to `pflag` [#11044](https://github.com/vitessio/vitess/pull/11044)
+ * [cli] [tabletmanager] Migrate all tabletmanager flags to `pflag` [#11057](https://github.com/vitessio/vitess/pull/11057)
+ * [tmclient] Migrate flags to pflag [#11066](https://github.com/vitessio/vitess/pull/11066)
+ * [cli] [tabletserver/vstreamer] Migrate vstreamer's packet size flags to pflags [#11087](https://github.com/vitessio/vitess/pull/11087)
+ * [cli] [tabletserver/vreplication] Migrate vreplication flags to pflags [#11095](https://github.com/vitessio/vitess/pull/11095)
+ * [cli] [tabletserver/throttler] Migrate throttler flags to pflags [#11100](https://github.com/vitessio/vitess/pull/11100)
+ * [cli] [tabletserver/gc] Migrate gc flags to pflags [#11101](https://github.com/vitessio/vitess/pull/11101)
+ * [cli] [stats] Migrate stats/opentsdb + stats/statsd flags to pflag [#11105](https://github.com/vitessio/vitess/pull/11105)
+ * [cli] [topo/consultopo] Migrate consul flags to pflags [#11106](https://github.com/vitessio/vitess/pull/11106)
+ * [cli] [status] Migrate go/vt/status to pflag [#11107](https://github.com/vitessio/vitess/pull/11107)
+ * [cli] [tabletserver/tabletenv] Migrate tabletenv flags to pflags [#11109](https://github.com/vitessio/vitess/pull/11109)
+ * [cli] [grpc{tabletconn,tmclient}] Migrate flags to `pflag` [#11111](https://github.com/vitessio/vitess/pull/11111)
+ * [cli] [grpcclient] Migrate flags to `pflag` [#11115](https://github.com/vitessio/vitess/pull/11115)
+ * [cli] [grpccommon] Migrate flags to `pflag` [#11122](https://github.com/vitessio/vitess/pull/11122)
+ * [cli] [tabletserver/streamlog] Migrate streamlog flags to pflags [#11125](https://github.com/vitessio/vitess/pull/11125)
+ * [cli] Migrate withddl/workflow/healthstreamer flags to pflags [#11126](https://github.com/vitessio/vitess/pull/11126)
+ * [cli] [servenv] Migrate grpc auth server flags within `servenv` to `pflag` [#11146](https://github.com/vitessio/vitess/pull/11146)
+ * [cli] [servenv] Migrate flags used by grpc servers to `pflag` [#11165](https://github.com/vitessio/vitess/pull/11165)
+ * [cli] [servenv] Migrate missed auth flag to pflag [#11166](https://github.com/vitessio/vitess/pull/11166)
+ * [cli] [servenv] migrate `--service_map` and `pprof` flags to `pflag` [#11179](https://github.com/vitessio/vitess/pull/11179)
+ * [cli] [servenv] Migrate miscellaneous flags to `pflag` [#11186](https://github.com/vitessio/vitess/pull/11186)
+ * [cli] [servenv] Migrate `--version` flag to pflag, and also add to `vtctldclient` and `vtadmin` [#11189](https://github.com/vitessio/vitess/pull/11189)
+ * [cli] [servenv] Migrate `--mysql_server_version` to pflag [#11190](https://github.com/vitessio/vitess/pull/11190)
+ * [cli] Migrate flag to pflag for file/sys logger [#11274](https://github.com/vitessio/vitess/pull/11274)
+ * [cli] Misc pflag binary migrations [#11307](https://github.com/vitessio/vitess/pull/11307)
+ * [cli] [mysqlctl] Migrate mysqlctl flags to pflags [#11314](https://github.com/vitessio/vitess/pull/11314)
+ * [cli] [vtgate/vschemaacl] Migrate VschemaACL flags to pflags [#11315](https://github.com/vitessio/vitess/pull/11315)
+ * [cli] [vtctl] Migrate all vtctl commands to `pflag` [#11320](https://github.com/vitessio/vitess/pull/11320)
+ * Fix adding flags to vtctlclient and vtctldclient [#11322](https://github.com/vitessio/vitess/pull/11322)
+ * [cli] [vtctld] Migrate vtctld flags to pflags [#11326](https://github.com/vitessio/vitess/pull/11326)
+ * [cli] [topo] Migrate topo2topo flags to pflags [#11327](https://github.com/vitessio/vitess/pull/11327)
+ * [cli] [zkctld] Migrate all zkctld flags to pflag [#11329](https://github.com/vitessio/vitess/pull/11329)
+ * [cli] [zkctl] Migrate zkctl flags to pflags [#11331](https://github.com/vitessio/vitess/pull/11331)
+ * [cli] [zk] Migrate zk flags to pflags [#11332](https://github.com/vitessio/vitess/pull/11332)
+ * [cli] [vtbackup] Migrate all vtbackup flags to pflag [#11334](https://github.com/vitessio/vitess/pull/11334)
+ * Move dbconfigs to pflag and remove deprecated flags [#11336](https://github.com/vitessio/vitess/pull/11336)
+ * [cmd/vtctl] Migrate flags to `pflag` [#11339](https://github.com/vitessio/vitess/pull/11339)
+ * [vtctlclient] Migrate to pflag [#11342](https://github.com/vitessio/vitess/pull/11342)
+ * [cli] Migrate cmd/vtclient and cmd/vttablet from flag to pflag [#11349](https://github.com/vitessio/vitess/pull/11349)
+ * [cli] Migrate cmd/vtctld to pflag [#11350](https://github.com/vitessio/vitess/pull/11350)
+ * [asthelpergen] Migrate to pflags [#11363](https://github.com/vitessio/vitess/pull/11363)
+ * [vtexplain] Migrate to pflags [#11364](https://github.com/vitessio/vitess/pull/11364)
+ * Migrates `cmd/vtbench` to pflags [#11366](https://github.com/vitessio/vitess/pull/11366)
+ * [grpcclient] Migrate `--grpc_auth_static_client_creds` to pflag [#11367](https://github.com/vitessio/vitess/pull/11367)
+ * [vtctlclient] Migrate `vtctl_client_protocol` to pflag [#11368](https://github.com/vitessio/vitess/pull/11368)
+ * [flagutil] Cleanup `flag` references [#11381](https://github.com/vitessio/vitess/pull/11381)
+ * Migrate mysqlctl command and package to pflag [#11391](https://github.com/vitessio/vitess/pull/11391)
+ * Migrate ACL package to pflag [#11392](https://github.com/vitessio/vitess/pull/11392)
+ * [cli] [topo] Migrate topo flags to pflags [#11393](https://github.com/vitessio/vitess/pull/11393)
+ * [cli] [etcd2] Migrate etcd2topo flags to pflags [#11394](https://github.com/vitessio/vitess/pull/11394)
+ * [tools/rowlog] Migrate to pflag [#11412](https://github.com/vitessio/vitess/pull/11412)
+ * VTop: Adds a function to get the flag set for a given command [#11424](https://github.com/vitessio/vitess/pull/11424)
+ * Properly deprecate flags and fix default for `--cell` [#11501](https://github.com/vitessio/vitess/pull/11501)
+ * Allow version to be accessible via the -v shorthand [#11512](https://github.com/vitessio/vitess/pull/11512)
+#### Cluster management
+ * Throttler: stats in /debug/vars [#10443](https://github.com/vitessio/vitess/pull/10443)
+ * Adds RPCs to vttablet that vtorc requires [#10464](https://github.com/vitessio/vitess/pull/10464)
+ * vtctl GetSchema --table_schema_only [#10552](https://github.com/vitessio/vitess/pull/10552)
+ * Deprecate enable-semi-sync in favour of RPC parameter [#10695](https://github.com/vitessio/vitess/pull/10695)
+ * Add GetFullStatus RPC to vtctld [#10905](https://github.com/vitessio/vitess/pull/10905)
+ * Simply Replication Status proto conversions [#10926](https://github.com/vitessio/vitess/pull/10926)
+ * Improve PRS to validate new primary can make forward progress [#11308](https://github.com/vitessio/vitess/pull/11308)
+ * [cli] Topo: Migrate zk2topo and k8stopo to pflag [#11401](https://github.com/vitessio/vitess/pull/11401)
+ * remove excessive logging [#11479](https://github.com/vitessio/vitess/pull/11479)
+#### Examples
+ * Give all permissions in rbac in examples [#11463](https://github.com/vitessio/vitess/pull/11463)
+ * Fix Vitess Operator example [#11546](https://github.com/vitessio/vitess/pull/11546)
+#### General
+ * [cli] Migrate miscellaneous components from flag to pflag [#11347](https://github.com/vitessio/vitess/pull/11347)
+ * Move vttlstest to pflag and cobra [#11361](https://github.com/vitessio/vitess/pull/11361)
+ * Move vtaclcheck command to pflags [#11372](https://github.com/vitessio/vitess/pull/11372)
+ * Migrate mysqlctld from flag to pflag [#11376](https://github.com/vitessio/vitess/pull/11376)
+ * removing unncessary flags across binaries [#11495](https://github.com/vitessio/vitess/pull/11495)
+ * [release-15.0] Upgrade to `go1.18.7` [#11507](https://github.com/vitessio/vitess/pull/11507)
+ * Removing redundant flags across binaries [#11522](https://github.com/vitessio/vitess/pull/11522)
+#### Observability
+ * Add SessionUUID and transaction mark to vtgate query logs [#10427](https://github.com/vitessio/vitess/pull/10427)
+#### Online DDL
+ * [cli] [tabletserver/onlineddl] Migrate onlineddl flags to pflags [#11099](https://github.com/vitessio/vitess/pull/11099)
+#### Query Serving
+ * Refactor aggregation AST structs [#10347](https://github.com/vitessio/vitess/pull/10347)
+ * Concurrent vitess migrations [#10410](https://github.com/vitessio/vitess/pull/10410)
+ * Make vtgate streamlog buffer configurable [#10426](https://github.com/vitessio/vitess/pull/10426)
+ * fix: change planner_version to planner-version everywhere [#10453](https://github.com/vitessio/vitess/pull/10453)
+ * enable schema tracking by default [#10455](https://github.com/vitessio/vitess/pull/10455)
+ * Add support for alter table rename column [#10469](https://github.com/vitessio/vitess/pull/10469)
+ * schemadiff: `ColumnRenameStrategy` in DiffHints [#10472](https://github.com/vitessio/vitess/pull/10472)
+ * Add parsing support for performance schema functions [#10478](https://github.com/vitessio/vitess/pull/10478)
+ * schemadiff: TableRenameStrategy in DiffHints [#10479](https://github.com/vitessio/vitess/pull/10479)
+ * OnlineDDL executor: adding log entries [#10482](https://github.com/vitessio/vitess/pull/10482)
+ * Fix: handle all cases for consistent lookup unique on single transaction mode [#10493](https://github.com/vitessio/vitess/pull/10493)
+ * Cleanup: Remove 'Name' field from aggregate structure [#10507](https://github.com/vitessio/vitess/pull/10507)
+ * New explain format: VTEXPLAIN [#10556](https://github.com/vitessio/vitess/pull/10556)
+ * Insert with select using streaming call [#10577](https://github.com/vitessio/vitess/pull/10577)
+ * Add parsing support for GTID functions [#10579](https://github.com/vitessio/vitess/pull/10579)
+ * [14.0] Schema tracking acl error logging [#10591](https://github.com/vitessio/vitess/pull/10591)
+ * Update how table uses are reported [#10598](https://github.com/vitessio/vitess/pull/10598)
+ * Parse INTERVAL() function [#10599](https://github.com/vitessio/vitess/pull/10599)
+ * VReplication: throttling info for both source and target; Online DDL propagates said info [#10601](https://github.com/vitessio/vitess/pull/10601)
+ * Online DDL: increase stale migration timeout [#10614](https://github.com/vitessio/vitess/pull/10614)
+ * Online DDL: even more logging [#10615](https://github.com/vitessio/vitess/pull/10615)
+ * Parse LOCATE(), POSITION() and CHAR() functions [#10629](https://github.com/vitessio/vitess/pull/10629)
+ * Improve handling of MATCH AGAINST [#10633](https://github.com/vitessio/vitess/pull/10633)
+ * Accept geomcollection as alias for geometrycollection [#10641](https://github.com/vitessio/vitess/pull/10641)
+ * Fix stats for cache miss and add CachePlan for Vtgate [#10643](https://github.com/vitessio/vitess/pull/10643)
+ * Support lookup multi shard autocommit [#10652](https://github.com/vitessio/vitess/pull/10652)
+ * Online DDL: ALTER VITESS_MIGRATION COMPLETE ALL [#10694](https://github.com/vitessio/vitess/pull/10694)
+ * Improve performance of `information_schema` queries on MySQL 8. [#10703](https://github.com/vitessio/vitess/pull/10703)
+ * ApplySchema: renew keyspace lock while iterating SQLs [#10727](https://github.com/vitessio/vitess/pull/10727)
+ * ApplySchema: do not ReloadSchema on ExecuteFetchAsDba [#10739](https://github.com/vitessio/vitess/pull/10739)
+ * Online DDL: issue a ReloadSchema at the completion of any migration [#10766](https://github.com/vitessio/vitess/pull/10766)
+ * refactor: make resource pool as interface and pool refresh as common [#10784](https://github.com/vitessio/vitess/pull/10784)
+ * Online DDL: migration state transitions to 'cancelled' after CANCEL command [#10900](https://github.com/vitessio/vitess/pull/10900)
+ * add vttablet cli flags for stream consolidator [#10907](https://github.com/vitessio/vitess/pull/10907)
+ * Online DDL: --pospone-launch, ALTER VITESS_MIGRATION ... LAUNCH [#10915](https://github.com/vitessio/vitess/pull/10915)
+ * Implement date, time and timestamp literals [#10921](https://github.com/vitessio/vitess/pull/10921)
+ * add the selected keyspace to LogStats [#10924](https://github.com/vitessio/vitess/pull/10924)
+ * schemadiff: rich error for unmet view dependencies [#10940](https://github.com/vitessio/vitess/pull/10940)
+ * Improve route merging for queries that have conditions on different vindexes, but can be merged via join predicates. [#10942](https://github.com/vitessio/vitess/pull/10942)
+ * decouple olap tx timeout from oltp tx timeout [#10946](https://github.com/vitessio/vitess/pull/10946)
+ * Merge subqueries that "join" on lookup index columns. [#10966](https://github.com/vitessio/vitess/pull/10966)
+ * Remove prefill logic from resource pool [#11002](https://github.com/vitessio/vitess/pull/11002)
+ * schemadiff: FullTextKeyStrategy, handling multiple 'ADD FULLTEXT key' alter options [#11012](https://github.com/vitessio/vitess/pull/11012)
+ * Online DDL: support multiple 'ADD FULLTEXT KEY' in single ALTER [#11013](https://github.com/vitessio/vitess/pull/11013)
+ * refactor: group all system setting query into single set statement [#11021](https://github.com/vitessio/vitess/pull/11021)
+ * System Settings connections pool implementation [#11037](https://github.com/vitessio/vitess/pull/11037)
+ * Improve schema reload performance by pre-filtering joined rows. [#11043](https://github.com/vitessio/vitess/pull/11043)
+ * Improve merging for `None` route opcodes. [#11045](https://github.com/vitessio/vitess/pull/11045)
+ * Add possibility of viewing plans with graphviz [#11050](https://github.com/vitessio/vitess/pull/11050)
+ * Use available method to compare tables [#11056](https://github.com/vitessio/vitess/pull/11056)
+ * schemadiff: Fix handling of primary key [#11059](https://github.com/vitessio/vitess/pull/11059)
+ * No reserved connection on modifying system settings [#11088](https://github.com/vitessio/vitess/pull/11088)
+ * tabletserver stream replace schema name bindvar [#11090](https://github.com/vitessio/vitess/pull/11090)
+ * Online DDL: introduce '--max_concurrent_online_ddl' [#11091](https://github.com/vitessio/vitess/pull/11091)
+ * return resource back to pool on apply settings failure [#11096](https://github.com/vitessio/vitess/pull/11096)
+ * [Gen4] Merge `SeenPredicates` when creating route operator for join [#11104](https://github.com/vitessio/vitess/pull/11104)
+ * Two changes to the error sanitizer [#11114](https://github.com/vitessio/vitess/pull/11114)
+ * Online DDL: more error logging [#11117](https://github.com/vitessio/vitess/pull/11117)
+ * Add parsing for Offsets similar to bind-variables [#11120](https://github.com/vitessio/vitess/pull/11120)
+ * Fix typing error in constant for wait_until_sql_thread_after_gtids [#11121](https://github.com/vitessio/vitess/pull/11121)
+ * Treat `IN` operations on single value tuples as `Equal` operations. [#11123](https://github.com/vitessio/vitess/pull/11123)
+ * adding setting pool metrics [#11175](https://github.com/vitessio/vitess/pull/11175)
+ * Adds delete planning to Gen4 [#11177](https://github.com/vitessio/vitess/pull/11177)
+ * generate settings plan in tablet with query and reset setting query [#11181](https://github.com/vitessio/vitess/pull/11181)
+ * Add Metric For Time Elapsed In Getting Connection In Pools [#11213](https://github.com/vitessio/vitess/pull/11213)
+ * Online DDL: more info in a conflicting migration message [#11217](https://github.com/vitessio/vitess/pull/11217)
+ * addressing review comments from #11088 [#11221](https://github.com/vitessio/vitess/pull/11221)
+ * Reapply system settings on connection reconnect [#11256](https://github.com/vitessio/vitess/pull/11256)
+ * Allow non-SSL callers of VTGate RPC APIs to specify group information for the CallerID [#11260](https://github.com/vitessio/vitess/pull/11260)
+ * Move go/mysql flags to pflags [#11272](https://github.com/vitessio/vitess/pull/11272)
+ * feat: rewrite column names in HAVING [#11306](https://github.com/vitessio/vitess/pull/11306)
+ * advisory lock to acquire reserve connection only for get_lock [#11359](https://github.com/vitessio/vitess/pull/11359)
+ * fix: store the output of the rewrite [#11362](https://github.com/vitessio/vitess/pull/11362)
+ * gen4 planner: small cleanup [#11403](https://github.com/vitessio/vitess/pull/11403)
+#### TabletManager
+ * Improve topo handling and add additional functionality [#10906](https://github.com/vitessio/vitess/pull/10906)
+ * Replication Manager Improvements [#11194](https://github.com/vitessio/vitess/pull/11194)
+#### VReplication
+ * Partial Movetables: allow moving a keyspace one shard at a time [#9987](https://github.com/vitessio/vitess/pull/9987)
+ * Fail VReplication workflows on errors that persist and unrecoverable errors [#10429](https://github.com/vitessio/vitess/pull/10429)
+ * VDiff2: Support Resuming VDiffs [#10497](https://github.com/vitessio/vitess/pull/10497)
+ * Implement VDiff2 Delete Action [#10608](https://github.com/vitessio/vitess/pull/10608)
+ * VDiff2: Auto retry to continue on error [#10639](https://github.com/vitessio/vitess/pull/10639)
+ * VDiff2: Add --wait flag to Create/Resume actions [#10799](https://github.com/vitessio/vitess/pull/10799)
+ * VDiff2: Add Stop Action [#10830](https://github.com/vitessio/vitess/pull/10830)
+ * Add tracking session state changes for transaction start [#11061](https://github.com/vitessio/vitess/pull/11061)
+ * Port time zone handling from vdiff1 to vdiff2 [#11128](https://github.com/vitessio/vitess/pull/11128)
+ * VDiff2: Add support for Mount+Migrate [#11204](https://github.com/vitessio/vitess/pull/11204)
+ * VStreams: Rotate Binary Log For Snapshot Connections [#11344](https://github.com/vitessio/vitess/pull/11344)
+ * For partial MoveTables, setup reverse shard routing rules on workflow creation [#11415](https://github.com/vitessio/vitess/pull/11415)
+#### VTAdmin
+ * using nginx for vtadmin web [#10770](https://github.com/vitessio/vitess/pull/10770)
+ * [VTAdmin] `Validate`, `ValidateShard`, `ValidateVersionShard`, `GetFullStatus` [#11438](https://github.com/vitessio/vitess/pull/11438)
+ * Full Status tab improvements for VTAdmin [#11470](https://github.com/vitessio/vitess/pull/11470)
+ * [15.0] Add VTGate debug/status page link to VTAdmin [#11541](https://github.com/vitessio/vitess/pull/11541)
+#### VTorc
+ * Replicas should be able to heal if replication is not initialised properly [#10943](https://github.com/vitessio/vitess/pull/10943)
+ * Getting rid of external logging [#11085](https://github.com/vitessio/vitess/pull/11085)
+ * Moving math package from external libarary [#11147](https://github.com/vitessio/vitess/pull/11147)
+ * Prevent martini from logging in VTOrc [#11173](https://github.com/vitessio/vitess/pull/11173)
+ * Only refresh required tablet's information in VTOrc [#11220](https://github.com/vitessio/vitess/pull/11220)
+ * Parameterize VTOrc constants [#11254](https://github.com/vitessio/vitess/pull/11254)
+ * Introduce `servenv` status pages in VTOrc [#11263](https://github.com/vitessio/vitess/pull/11263)
+ * Addition of Metrics to VTOrc to track the number of recoveries ran and their success count. [#11338](https://github.com/vitessio/vitess/pull/11338)
+ * VTOrc cleanup: Remove unused CLI code and move relevant APIs to the new VTOrc UI [#11370](https://github.com/vitessio/vitess/pull/11370)
+#### vtctl
+ * Add order, limit, skip options to onlineddl show command [#10651](https://github.com/vitessio/vitess/pull/10651)
+#### vtexplain
+ * `vtexplain` fails for vindex lookup queries with duplicate / equivalent values. [#10996](https://github.com/vitessio/vitess/pull/10996)
+### Feature Request
+#### Backup and Restore
+ * Backup/Restore: add support for external compressors and decompressors [#10558](https://github.com/vitessio/vitess/pull/10558)
+#### Evalengine
+ * evalengine: Support built-in MySQL function CEIL() [#11027](https://github.com/vitessio/vitess/pull/11027)
+#### VTAdmin
+ * add vtadmin docker image [#10543](https://github.com/vitessio/vitess/pull/10543)
+#### web UI
+ * [VTAdmin] RebuildKeyspaceGraph, RemoveKeyspaceCell, NewShard [#11249](https://github.com/vitessio/vitess/pull/11249)
+ * VTAdmin: shard actions [#11328](https://github.com/vitessio/vitess/pull/11328)
+ * [VTAdmin] Cherry Pick Topology Browser [#11518](https://github.com/vitessio/vitess/pull/11518)
+### Internal Cleanup
+#### Build/CI
+ * upgrade versions of security vulnerable packages crypto/net/serf [#10272](https://github.com/vitessio/vitess/pull/10272)
+ * update golangci-lint to 1.46.2 [#10568](https://github.com/vitessio/vitess/pull/10568)
+ * Update to latest Protobuf 21.3 release [#10803](https://github.com/vitessio/vitess/pull/10803)
+ * Always close body for HTTP requests in tests [#10835](https://github.com/vitessio/vitess/pull/10835)
+ * Always setup an underlying topo for a sandbox [#10882](https://github.com/vitessio/vitess/pull/10882)
+ * Cleanup the go-sqlite3 workaround [#10884](https://github.com/vitessio/vitess/pull/10884)
+ * Cleanup usage of go.rice in favor of go:embed [#10956](https://github.com/vitessio/vitess/pull/10956)
+#### CLI
+ * [cli][discovery]: migrate discovery flags to pflag [#10863](https://github.com/vitessio/vitess/pull/10863)
+ * [vtcombo] Delete `flag.Set` call on non-existent flag [#10889](https://github.com/vitessio/vitess/pull/10889)
+ * Move goyacc to use pflags package [#11092](https://github.com/vitessio/vitess/pull/11092)
+ * Move sqlparser flags to use pflags [#11094](https://github.com/vitessio/vitess/pull/11094)
+ * vtgate pflags migration [#11318](https://github.com/vitessio/vitess/pull/11318)
+ * [cli] `vttestserver` flag parsing to use pflags [#11321](https://github.com/vitessio/vitess/pull/11321)
+ * customrule pflags migration [#11340](https://github.com/vitessio/vitess/pull/11340)
+ * srvtopo pflags migration [#11341](https://github.com/vitessio/vitess/pull/11341)
+ * [cli] Begrudgingly shim `flag.Parse` call to trick glog [#11382](https://github.com/vitessio/vitess/pull/11382)
+ * [cli] Use pflag/flag interop function in vtctldclient legacy shim [#11399](https://github.com/vitessio/vitess/pull/11399)
+ * Fix vtbackup binary by adding the flags it needs that we missed before [#11417](https://github.com/vitessio/vitess/pull/11417)
+#### Cluster management
+ * Remove legacy healthcheck files and structures [#10542](https://github.com/vitessio/vitess/pull/10542)
+ * Proto file lint fix and vtadmin generated file [#10563](https://github.com/vitessio/vitess/pull/10563)
+ * Cleanup: un-explode GetSchema and reuse GetSchemaRequest struct [#10578](https://github.com/vitessio/vitess/pull/10578)
+ * [vtctl] Delete query commands [#10646](https://github.com/vitessio/vitess/pull/10646)
+ * Cleanup: ERS and PRS tests by removing setupShardLegacy [#10728](https://github.com/vitessio/vitess/pull/10728)
+ * Refactor: Unexplode Backup() function, pass BackupRequest as argument [#10904](https://github.com/vitessio/vitess/pull/10904)
+ * Deprecate orchestrator integration [#11409](https://github.com/vitessio/vitess/pull/11409)
+ * Adding deprecate message to backup hooks [#11491](https://github.com/vitessio/vitess/pull/11491)
+ * [15.0] Deprecate InitShardPrimary command [#11557](https://github.com/vitessio/vitess/pull/11557)
+#### Evalengine
+ * evalengine: expose Filter operations [#10903](https://github.com/vitessio/vitess/pull/10903)
+ * Move evalengine integration tests to use pflags [#11378](https://github.com/vitessio/vitess/pull/11378)
+#### General
+ * Remove v2 resharding fields [#10409](https://github.com/vitessio/vitess/pull/10409)
+ * Remove @doeg from a subset of CODEOWNERS [#10557](https://github.com/vitessio/vitess/pull/10557)
+ * Remove @doeg from maintainers [#10625](https://github.com/vitessio/vitess/pull/10625)
+ * Remove the release notes document from the main branch [#10672](https://github.com/vitessio/vitess/pull/10672)
+ * Delete `go/vt/vttime` [#10995](https://github.com/vitessio/vitess/pull/10995)
+#### Observability
+ * flags etc: delete old flags and stats, add deprecation notice to release notes [#11402](https://github.com/vitessio/vitess/pull/11402)
+#### Query Serving
+ * Extract vindex lookup queries into their own primitive [#10490](https://github.com/vitessio/vitess/pull/10490)
+ * Reduce shift-reduce conflicts [#10500](https://github.com/vitessio/vitess/pull/10500)
+ * feat: don't stop if compilation errors are happening on the generated files [#10506](https://github.com/vitessio/vitess/pull/10506)
+ * User defined and sys variables [#10547](https://github.com/vitessio/vitess/pull/10547)
+ * refactor: removed context from part of vcursor struct [#10632](https://github.com/vitessio/vitess/pull/10632)
+ * Unexplode return values for queryservice [#10802](https://github.com/vitessio/vitess/pull/10802)
+ * Mark aggregate functions callable [#10805](https://github.com/vitessio/vitess/pull/10805)
+ * Separate function for creating bind variables [#10883](https://github.com/vitessio/vitess/pull/10883)
+ * check for nil earlier [#10887](https://github.com/vitessio/vitess/pull/10887)
+ * refactor: minor refactor in partial shard routing and change in flag to dashes [#11357](https://github.com/vitessio/vitess/pull/11357)
+ * Delete deprecated flags [#11360](https://github.com/vitessio/vitess/pull/11360)
+ * Remove deprecated IsSkipTopo() function [#11377](https://github.com/vitessio/vitess/pull/11377)
+#### TabletManager
+ * refactor: unexplode VStreamRows() and reuse VStreamRowsRequest, unexplode VStream() and reuse VStreamRequest [#10671](https://github.com/vitessio/vitess/pull/10671)
+ * [tmclient] [tmserver] Unexplode fetchers [#10998](https://github.com/vitessio/vitess/pull/10998)
+#### VReplication
+ * Delete all legacy sharding related code [#10278](https://github.com/vitessio/vitess/pull/10278)
+#### VTAdmin
+ * [vtadmin] Rename ERS/PRS pools+flags properly [#10460](https://github.com/vitessio/vitess/pull/10460)
+#### VTorc
+ * Use introduced tablet manager RPCs in VTOrc [#10467](https://github.com/vitessio/vitess/pull/10467)
+ * Remove logging in GetDurabilityPolicy [#10516](https://github.com/vitessio/vitess/pull/10516)
+ * VTOrc Cleanup: Remove KV stores [#10645](https://github.com/vitessio/vitess/pull/10645)
+ * Use TMC RPCs in VTOrc [#10664](https://github.com/vitessio/vitess/pull/10664)
+ * Nil-check errors before printing them in VTOrc [#11156](https://github.com/vitessio/vitess/pull/11156)
+ * Cluster-Alias cleanup for VTOrc [#11193](https://github.com/vitessio/vitess/pull/11193)
+ * Refactor: Rename Orchestrator to VTOrc in the codebase [#11231](https://github.com/vitessio/vitess/pull/11231)
+ * VTOrc Cleanup - Configs, APIs and old UI [#11356](https://github.com/vitessio/vitess/pull/11356)
+ * VTOrc Standardisation and Cleanup [#11416](https://github.com/vitessio/vitess/pull/11416)
+ * [vtorc] Remove duplicated vt/log import [#11423](https://github.com/vitessio/vitess/pull/11423)
+#### vtctl
+ * [vtctl] delete all throttler commands and associated cmd imports [#10661](https://github.com/vitessio/vitess/pull/10661)
+#### web UI
+ * Remove sharding_column_name and sharding_column_type from vtctld2 [#10459](https://github.com/vitessio/vitess/pull/10459)
+### Other
+#### Other
+ * Build(deps): Bump mysql-connector-java from 8.0.25 to 8.0.28 in /java/example [#10551](https://github.com/vitessio/vitess/pull/10551)
+### Performance
+#### Query Serving
+ * schemadiff performance improvements [#11035](https://github.com/vitessio/vitess/pull/11035)
+ * schemadiff: Shallow copy of the schema [#11041](https://github.com/vitessio/vitess/pull/11041)
+ * [vtgate] Add flag to pool connection read buffers [#11167](https://github.com/vitessio/vitess/pull/11167)
+#### TabletManager
+ * Tablet Executor: consolidate ReloadSchema calls, and skip for Online DDL [#10719](https://github.com/vitessio/vitess/pull/10719)
+### Regression
+#### Backup and Restore
+ * revert default compression engine [#11029](https://github.com/vitessio/vitess/pull/11029)
+### Release
+#### Build/CI
+ * Rework how the `release notes` labels are handled by the CI [#10508](https://github.com/vitessio/vitess/pull/10508)
+ * Rework the generation of the release notes [#10510](https://github.com/vitessio/vitess/pull/10510)
+ * Addition of the v14 release notes documents [#10602](https://github.com/vitessio/vitess/pull/10602)
+#### CLI
+ * Migrates `release-notes` to pflag [#11365](https://github.com/vitessio/vitess/pull/11365)
+#### Deployments
+ * Code freeze of release-15.0 [#11565](https://github.com/vitessio/vitess/pull/11565)
+#### Documentation
+ * Update the release documentation [#11174](https://github.com/vitessio/vitess/pull/11174)
+ * Add hyperlink in the release changelog [#11241](https://github.com/vitessio/vitess/pull/11241)
+#### General
+ * Post release `v14.0.0-RC1` steps [#10458](https://github.com/vitessio/vitess/pull/10458)
+ * Documented the legacy healthcheck and tabletgateway and added summary to 14's summary [#10567](https://github.com/vitessio/vitess/pull/10567)
+ * Addition of the v14 release docs on main [#10606](https://github.com/vitessio/vitess/pull/10606)
+ * [main] Addition of the release notes summary for v14.0.1 (#10821) [#10837](https://github.com/vitessio/vitess/pull/10837)
+ * [main] Release summary 13.0.2 (#10820) [#10838](https://github.com/vitessio/vitess/pull/10838)
+ * Addition of the release notes for `v13.0.2` [#10849](https://github.com/vitessio/vitess/pull/10849)
+ * Addition of the release notes for v14.0.1 [#10851](https://github.com/vitessio/vitess/pull/10851)
+ * Addition of the release notes for v12.0.5 [#10873](https://github.com/vitessio/vitess/pull/10873)
+ * Include the compose examples in the `do_release` script [#11130](https://github.com/vitessio/vitess/pull/11130)
+ * do_release: fix updateVitessExamples function call [#11134](https://github.com/vitessio/vitess/pull/11134)
+ * Upgrade go version to `1.18.5` on `main` [#11136](https://github.com/vitessio/vitess/pull/11136)
+ * Addition of the release notes for `v14.0.2` [#11160](https://github.com/vitessio/vitess/pull/11160)
+ * Addition of the release notes for `v13.0.3` [#11162](https://github.com/vitessio/vitess/pull/11162)
+ * Addition of the release notes for `v12.0.6` [#11164](https://github.com/vitessio/vitess/pull/11164)
+ * Simple code freeze script and workflow [#11178](https://github.com/vitessio/vitess/pull/11178)
+ * Improve the `do_release` script to have two different Pull Requests instead of one during a release [#11197](https://github.com/vitessio/vitess/pull/11197)
+ * Release notes 14.0.3 on main [#11406](https://github.com/vitessio/vitess/pull/11406)
+ * Code freeze of release-15.0 [#11427](https://github.com/vitessio/vitess/pull/11427)
+ * Release of v15.0.0-rc1 [#11443](https://github.com/vitessio/vitess/pull/11443)
+ * Back to dev mode after v15.0.0-rc1 [#11444](https://github.com/vitessio/vitess/pull/11444)
+ * fixing urls [#11572](https://github.com/vitessio/vitess/pull/11572)
+### Testing
+#### Backup and Restore
+ * Enable VTOrc in backup tests [#11410](https://github.com/vitessio/vitess/pull/11410)
+#### Build/CI
+ * test: reduce number of vttablets to start in the tests [#10491](https://github.com/vitessio/vitess/pull/10491)
+ * test: for unit tests set TMPDIR=/tmp_XXXXXX on mac [#10655](https://github.com/vitessio/vitess/pull/10655)
+ * CI: mysql8 test for schemadiff_vrepl [#10679](https://github.com/vitessio/vitess/pull/10679)
+ * Fixes to config file and flakiness fix for TestFloatValueDefault [#10710](https://github.com/vitessio/vitess/pull/10710)
+ * Flakes: Expect SERVING status for tablets added to shard with a PRIMARY [#11007](https://github.com/vitessio/vitess/pull/11007)
+#### CLI
+ * [cli] [vttest] Extend vttest.TopoData to implement `pflag.Value`, and make function return types implicit [#10994](https://github.com/vitessio/vitess/pull/10994)
+ * [cli] [vtcombo|tests] Migrate `vtcombo` to `pflag` and rewrite tabletconn tests to not need TabletProtocol exported [#11010](https://github.com/vitessio/vitess/pull/11010)
+#### Cluster management
+ * Fix incorrect use of loop variable in parallel test [#11082](https://github.com/vitessio/vitess/pull/11082)
+#### General
+ * fix minor code unreachability error [#10771](https://github.com/vitessio/vitess/pull/10771)
+#### Query Serving
+ * unit test: fix mysql tests to run on MacOS [#10613](https://github.com/vitessio/vitess/pull/10613)
+ * Use many more valid test cases [#10640](https://github.com/vitessio/vitess/pull/10640)
+ * test: set parameter on vtgate than on vttablet [#10698](https://github.com/vitessio/vitess/pull/10698)
+ * Addition of a test in aggr_cases for grouping on data from derived table [#10868](https://github.com/vitessio/vitess/pull/10868)
+ * Format Gen4 end-to-end tests [#11089](https://github.com/vitessio/vitess/pull/11089)
+ * Fix `TestInvalidDateTimeTimestampVals` linter issues [#11098](https://github.com/vitessio/vitess/pull/11098)
+ * Use vtparams instead of clusterInstance in TestNormalizeAllFields [#11102](https://github.com/vitessio/vitess/pull/11102)
+ * test: deflake TestIdleTimeoutCreateFail [#11411](https://github.com/vitessio/vitess/pull/11411)
+ * Use JSON for plan tests [#11430](https://github.com/vitessio/vitess/pull/11430)
+#### VTAdmin
+ * [vtadmin] authz tests - tablet actions [#10457](https://github.com/vitessio/vitess/pull/10457)
+ * [vtadmin] Add authz tests for remaining non-schema related actions [#10481](https://github.com/vitessio/vitess/pull/10481)
+ * [vtadmin] Add schema-related authz tests [#10486](https://github.com/vitessio/vitess/pull/10486)
+ * [vtadmin/tests] Serialize Schema test cases to avoid cache backfill races [#10538](https://github.com/vitessio/vitess/pull/10538)
+ * [vtadmin] fix flaky GetSchemas test cases [#10555](https://github.com/vitessio/vitess/pull/10555)
+#### web UI
+ * Fixing flaky vtctld2 web test [#10541](https://github.com/vitessio/vitess/pull/10541)
+
diff --git a/changelog/15.0/15.0.0/release_notes.md b/changelog/15.0/15.0.0/release_notes.md
new file mode 100644
index 00000000000..f60fd0547f1
--- /dev/null
+++ b/changelog/15.0/15.0.0/release_notes.md
@@ -0,0 +1,463 @@
+# Release of Vitess v15.0.0
+## Summary
+
+- **[Known Issues](#known-issues)**
+- **[Breaking Changes](#breaking-changes)**
+ - [Flags](#flags)
+ - [VTTablet Flag Deletions](#vttablet-flag-deletions)
+ - [Vindex Interface](#vindex-interface)
+- **[Deprecations](#deprecations)**
+ - [LogStats Table and Keyspace Deprecated](#logstats-table-and-keyspace-deprecated)
+ - [Orchestrator Integration Deprecation](#orchestrator-integration-deprecation)
+ - [Connection Pool Prefill](#connection-pool-prefill)
+ - [InitShardPrimary Deprecation](#initshardprimary-deprecation)
+- **[Command-Line Syntax Deprecations](#command-line-syntax-deprecations)**
+ - [VTTablet Startup Flag Deletions](#vttablet-startup-flag-deletions)
+ - [VTTablet Startup Flag Deprecations](#vttablet-startup-flag-deprecations)
+ - [VTBackup Flag Deprecations](#vtbackup-flag-deprecations)
+- **[VTGate](#vtgate)**
+ - [vtgate --mysql-server-pool-conn-read-buffers](#vtgate--mysql-server-pool-conn-read-buffers)
+- **[VDiff2](#vdiff2)**
+ - [Resume Workflow](#resume-workflow)
+- **[New command line flags and behavior](#new-command-line)**
+ - [vtctl GetSchema --table-schema-only](#vtctl-getschema--table-schema-only)
+ - [Support for Additional Compressors and Decompressors During Backup & Restore](#support-for-additional-compressors-and-decompressors-during-backup-&-restore)
+ - [Independent OLAP and OLTP Transactional Timeouts](#independant-olap-and-oltp-transactional-timeouts)
+ - [Support for Specifying Group Information in Calls to VTGate](#support-for-specifying-group-information-in-calls-to-vtgate)
+- **[Online DDL Changes](#online-ddl-changes)**
+ - [Concurrent Vitess Migrations](#concurrent-vitess-migrations)
+ - [VTCtl Command Changes](#vtctl-command-changes)
+ - [New Syntax](#new-syntax)
+- **[Tablet Throttler](#tablet-throttler)**
+ - [API Changes](#api-changes)
+- **[Mysql Compatibility](#mysql-compatibility)**
+ - [System Settings](#system-settings)
+ - [Lookup Vindexes](#lookup-vindexes)
+- **[Durability Policy](#durability-policy)**
+ - [Cross Cell](#cross-cell)
+- **[New EXPLAIN Format](#new-explain-format)**
+ - [FORMAT=vtexplain](#formatvtexplain)
+- **[VTOrc](#vtorc)**
+ - [Old UI Removal and Replacement](#old-ui-removal-and-replacement)
+ - [Configuration Refactor and New Flags](#configuratoin-refactor-and-new-flags)
+ - [Example Upgrade](#example-upgrade)
+ - [Default Configuration Files](#default-configuration-files)
+- **[Flags Restructure](#flags-restructure)**
+ - [Flags Diff](#flags-diff)
+
+## Known Issues
+
+- [Corrupted results for non-full-group-by queries with JOINs](https://github.com/vitessio/vitess/issues/11625). This can be resolved by using full-group-by queries.
+
+## Major Changes
+
+### Breaking Changes
+
+#### Flags
+
+- The deprecated `--cpu_profile` flag has been removed. Please use the `--pprof` flag instead.
+- The deprecated `--mem-profile-rate` flag has been removed. Please use `--pprof=mem` instead.
+- The deprecated `--mutex-profile-fraction` flag has been removed. Please use `--pprof=mutex` instead.
+- The deprecated vtgate/vtexplain/vtcombo flag `--planner_version` has been removed. Please use `--planner-version` instead.
+- The deprecated flag `--master_connect_retry` has been removed. Please use `--replication_connect_retry` instead.
+- `vtctl` commands that take shard names and ranges as positional arguments (e.g. `vtctl Reshard ks.workflow -80 -40,40-80`) need to have their positional arguments separated from their flag arguments by a double-dash separator to avoid the new parsing library from mistaking them as flags (e.g. `vtctl Reshard ks.workflow -- -80 -40,40-80`).
+- The `--cell` flag in the `vtgate` binary no longer has a default value. It is a required argument that has to be specified for the binary to run. Please explicitly specify the flag, if dependent on the flag's default value.
+- The `--db-config-*-*` VTTablet flags were deprecated in `v3.0.0`. They have now been deleted as part of this release. You must use `--db_dba_*` now.
+
+#### vttablet Flag Deletions
+The following VTTablet flags were deprecated in 7.0. They have now been deleted
+- `--queryserver-config-message-conn-pool-size`
+- `--queryserver-config-message-conn-pool-prefill-parallelism`
+- `--client-found-rows-pool-size` A different existing flag `--queryserver-config-transaction-cap` will be used instead
+- `--transaction_shutdown_grace_period` Use `--shutdown_grace_period` instead
+- `--queryserver-config-max-dml-rows`
+- `--queryserver-config-allowunsafe-dmls`
+- `--pool-name-prefix`
+- `--enable-autocommit` Autocommit is always allowed
+
+#### Vindex Interface
+
+All the vindex interface methods are changed by adding `context.Context` as an input parameter.
+
+E.g:
+```go
+Map(vcursor VCursor, .... ) ....
+ To
+Map(ctx context.Context, vcursor VCursor, .... ) ....
+```
+
+This only affects users who have added their own custom vindex implementation.
+They are required to change their implementation with these new interface method expectations.
+
+### Deprecations
+
+#### LogStats Table and Keyspace deprecated
+
+Information about which tables are used was being reported through the `Keyspace` and `Table` fields on LogStats.
+For multi-table queries, this output can be confusing, so we have added `TablesUsed`, that is a string array, listing all tables and which keyspace they are on.
+`Keyspace` and `Table` fields are deprecated and will be removed in the v16 release of Vitess.
+
+#### Orchestrator Integration Deprecation
+
+Orchestrator integration in `vttablet` has been deprecated. It will continue to work in this release but is liable to be removed in future releases.
+Consider using VTOrc instead of Orchestrator as VTOrc goes GA in this release.
+
+#### Connection Pool Prefill
+
+The connection pool with prefilled connections have been removed. The pool now does lazy connection creation.
+
+#### InitShardPrimary Deprecation
+
+The vtcltd command InitShardPrimary has been deprecated. Please use PlannedReparentShard instead.
+
+### Command-line syntax deprecations
+
+#### vttablet startup flag deletions
+The following VTTablet flags were deprecated in 7.0. They have now been deleted
+- --queryserver-config-message-conn-pool-size
+- --queryserver-config-message-conn-pool-prefill-parallelism
+- --client-found-rows-pool-size --queryserver-config-transaction-cap will be used instead
+- --transaction_shutdown_grace_period Use --shutdown_grace_period instead
+- --queryserver-config-max-dml-rows
+- --queryserver-config-allowunsafe-dmls
+- --pool-name-prefix
+- --enable-autocommit Autocommit is always allowed
+
+#### vttablet startup flag deprecations
+- `--enable-query-plan-field-caching` has been deprecated. It will be removed in v16.
+- `--enable_semi_sync` has been deprecated. It will be removed in v16. Instead, set the correct durability policy using `SetKeyspaceDurabilityPolicy`
+- `--queryserver-config-pool-prefill-parallelism`, `--queryserver-config-stream-pool-prefill-parallelism` and `--queryserver-config-transaction-prefill-parallelism` have all been deprecated. They will be removed in v16.
+- `--backup_storage_hook` has been deprecated, consider using one of the builtin compression algorithms or `--external-compressor` and `--external-decompressor` instead.
+
+#### vtbackup flag deprecations
+- `--backup_storage_hook` has been deprecated, consider using one of the builtin compression algorithms or `--external-compressor` and `--external-decompressor` instead.
+
+### VTGate
+
+#### vtgate --mysql-server-pool-conn-read-buffers
+
+`--mysql-server-pool-conn-read-buffers` enables pooling of buffers used to read from incoming
+connections, similar to the way pooling happens for write buffers. Defaults to off.
+
+### VDiff v2
+
+#### Resume Workflow
+
+We introduced the ability to resume a VDiff2 workflow:
+```
+$ vtctlclient --server=localhost:15999 VDiff --v2 customer.commerce2customer resume 4c664dc2-eba9-11ec-9ef7-920702940ee0
+VDiff 4c664dc2-eba9-11ec-9ef7-920702940ee0 resumed on target shards, use show to view progress
+
+$ vtctlclient --server=localhost:15999 VDiff --v2 customer.commerce2customer show last
+
+VDiff Summary for customer.commerce2customer (4c664dc2-eba9-11ec-9ef7-920702940ee0)
+State: completed
+RowsCompared: 196
+HasMismatch: false
+StartedAt: 2022-06-26 22:44:29
+CompletedAt: 2022-06-26 22:44:31
+
+Use "--format=json" for more detailed output.
+
+$ vtctlclient --server=localhost:15999 VDiff --v2 --format=json customer.commerce2customer show last
+{
+ "Workflow": "commerce2customer",
+ "Keyspace": "customer",
+ "State": "completed",
+ "UUID": "4c664dc2-eba9-11ec-9ef7-920702940ee0",
+ "RowsCompared": 196,
+ "HasMismatch": false,
+ "Shards": "0",
+ "StartedAt": "2022-06-26 22:44:29",
+ "CompletedAt": "2022-06-26 22:44:31"
+}
+```
+
+We leverage this resume capability to automatically restart a VDiff2 workflow that encountered a retryable error.
+
+We also made a number of other enhancements like progress reporting and features needed to make it a full replacement for VDiff v1. You can see more details in the tracking ticket for the VDiff2 feature complete target: https://github.com/vitessio/vitess/issues/10494
+
+Now that VDiff v2 is feature complete in 15.0, we hope to make it GA in 16.0.
+
+Please see the VDiff2 [documentation](https://vitess.io/docs/15.0/reference/vreplication/vdiff2/) for additional information.
+
+### New command line flags and behavior
+
+#### vtctl GetSchema --table-schema-only
+
+The new flag `--table-schema-only` skips column introspection. `GetSchema` only returns general schema analysis, and specifically it includes the `CREATE TABLE|VIEW` statement in the `schema` field.
+
+#### Support for additional compressors and decompressors during backup & restore
+Backup/Restore now allow you many more options for compression and decompression instead of relying on the default compressor(`pargzip`).
+There are some built-in compressors which you can use out-of-the-box. Users will need to evaluate which option works best for their
+use-case. Here are the flags that control this feature
+
+- `--compression-engine-name`
+- `--external-compressor`
+- `--external-decompressor`
+- `--external-compressor-extension`
+- `--compression-level`
+
+`--compression-engine-name` specifies the engine used for compression. It can have one of the following values
+
+- pargzip (Default)
+- pgzip
+- lz4
+- zstd
+- external
+
+If you want to use any of the built-in compressors, simply set one of the above values other than `external` for `--compression-engine-name`. The value
+specified in `--compression-engine-name` is saved in the backup MANIFEST, which is later read by the restore process to decide which
+engine to use for decompression. Default value for engine is 'pargzip'.
+
+If you would like to use a custom command or external tool for compression/decompression then you need to provide the full command with
+arguments to the `--external-compressor` and `--external-decompressor` flags. `--external-compressor-extension` flag also needs to be provided
+so that compressed files are created with the correct extension. If the external command is not using any of the built-in compression engines
+(i.e. pgzip, pargzip, lz4 or zstd) then you need to set `--compression-engine-name` to value 'external'.
+
+Please note that if you want to keep the current behavior then you don't need to provide any of these flags.
+You can read more about backup & restore [here] (https://vitess.io/docs/15.0/user-guides/operating-vitess/backup-and-restore/).
+
+If you decided to switch from an external compressor to one of the built-in supported compressors (i.e. pgzip, pargzip, lz4 or zstd) at any point
+in the future, you will need to do it in two steps.
+
+- step #1, set `--external-compressor` and `--external-compressor-extension` flag values to empty and change `--compression-engine-name` to desired value.
+- Step #2, after at least one cycle of backup with new configuration, you can set `--external-decompressor` flag value to empty.
+
+The reason you cannot change all the values together is because the restore process will then have no way to find out which external decompressor
+should be used to process the previous backup. Please make sure you have thought out all possible scenarios for restore before transitioning from one
+compression engine to another.
+
+#### Independent OLAP and OLTP transactional timeouts
+
+`--queryserver-config-olap-transaction-timeout` specifies the timeout applied
+to a transaction created within an OLAP workload. The default value is `30`
+seconds, but this can be raised, lowered, or set to zero to disable the timeout
+altogether.
+
+Until now, while OLAP queries would bypass the query timeout, transactions
+created within an OLAP session would be rolled back
+`--queryserver-config-transaction-timeout` seconds after the transaction was
+started.
+
+As of now, OLTP and OLAP transaction timeouts can be configured independently of each
+other.
+
+The main use case is to run queries spanning a long period of time which
+require transactional guarantees such as consistency or atomicity.
+
+#### Support for specifying group information in calls to VTGate
+
+`--grpc-use-effective-groups` allows non-SSL callers to specify groups information for a caller.
+Until now, you could only specify the caller-id for the security context used to authorize queries.
+As of now, you can specify the principal of the caller, and any groups they belong to.
+
+### Online DDL changes
+
+#### Concurrent vitess migrations
+
+All Online DDL migrations using the `vitess` strategy are now eligible to run concurrently, given `--allow-concurrent` DDL strategy flag. Until now, only `CREATE`, `DROP` and `REVERT` migrations were eligible, and now `ALTER` migrations are supported, as well. The terms for `ALTER` migrations concurrency:
+
+- DDL strategy must be `vitess --allow-concurent ...`
+- No two migrations can run concurrently on the same table
+- No two `ALTER`s will copy table data concurrently
+- A concurrent `ALTER` migration will not start if another `ALTER` is running and is not `ready_to_complete`
+
+The main use case is to run multiple concurrent migrations, all with `--postpone-completion`. All table-copy operations will run sequentially, but no migration will actually cut-over, and eventually all migrations will be `ready_to_complete`, continuously tailing the binary logs and keeping up-to-date. A quick and iterative `ALTER VITESS_MIGRATION '...' COMPLETE` sequence of commands will cut-over all migrations _closely together_ (though not atomically together).
+
+#### vtctl command changes.
+All `online DDL show` commands can now be run with a few additional parameters
+- `--order` , order migrations in the output by either ascending or descending order of their `id` fields.
+- `--skip` , skip specified number of migrations in the output.
+- `--limit` , limit results to a specified number of migrations in the output.
+
+#### New syntax
+
+The following is now supported:
+
+```sql
+ALTER VITESS_MIGRATION COMPLETE ALL
+```
+
+This works on all pending migrations (`queued`, `ready`, `running`) and internally issues a `ALTER VITESS_MIGRATION '' COMPLETE` for each one. The command is useful for completing multiple concurrent migrations (see above) that are open-ended (`--postpone-completion`).
+
+### Tablet Throttler
+
+#### API changes
+
+API endpoint `/debug/vars` now exposes throttler metrics, such as number of hits and errors per app per check type. Example:
+
+```shell
+$ curl -s http://127.0.0.1:15100/debug/vars | jq . | grep Throttler
+ "ThrottlerAggregatedMysqlSelf": 0.191718,
+ "ThrottlerAggregatedMysqlShard": 0.960054,
+ "ThrottlerCheckAnyError": 27,
+ "ThrottlerCheckAnyMysqlSelfError": 13,
+ "ThrottlerCheckAnyMysqlSelfTotal": 38,
+ "ThrottlerCheckAnyMysqlShardError": 14,
+ "ThrottlerCheckAnyMysqlShardTotal": 42,
+ "ThrottlerCheckAnyTotal": 80,
+ "ThrottlerCheckMysqlSelfSecondsSinceHealthy": 0,
+ "ThrottlerCheckMysqlShardSecondsSinceHealthy": 0,
+ "ThrottlerProbesLatency": 355523,
+ "ThrottlerProbesTotal": 74,
+```
+
+### Mysql Compatibility
+
+#### System Settings
+Vitess has had support for system settings from release 7.0 onwards, but this support came with some caveats.
+As soon as a client session changes a default system setting, a mysql connection gets reserved for it.
+This can sometimes lead to clients running out of mysql connections.
+Users were instructed to minimize the use of this feature and to try to set the desired system settings as defaults in the mysql config.
+
+With this release, Vitess can handle system settings changes in a much better way and clients can use them more freely.
+Vitess now has the ability to pool changed settings without reserving connections for any particular session.
+
+This feature can be enabled by setting `queryserver-enable-settings-pool` flag on the vttablet. It is disabled by default.
+In future releases, we will make this flag enabled by default.
+
+#### Lookup Vindexes
+
+Lookup vindexes now support a new parameter `multi_shard_autocommit`. If this is set to `true`, lookup vindex dml queries will be sent as autocommit to all shards instead of being wrapped in a transaction.
+This is different from the existing `autocommit` parameter where the query is sent in its own transaction separate from the ongoing transaction if any i.e. begin -> lookup query execs -> commit/rollback
+
+### Durability Policy
+
+#### Cross Cell
+
+A new durability policy `cross_cell` is now supported. `cross_cell` durability policy only allows replica tablets from a different cell than the current primary to
+send semi-sync ACKs. This ensures that any committed write exists in at least 2 tablets belonging to different cells.
+
+### New EXPLAIN format
+
+#### FORMAT=vtexplain
+
+With this new `explain` format, you can get an output that is very similar to the command line `vtexplain` app, but from a running `vtgate`, through a MySQL query.
+
+### VTOrc
+
+#### Old UI Removal and Replacement
+
+The old UI that VTOrc inherited from `Orchestrator` has been removed. A replacement UI, more consistent with the other Vitess binaries has been created.
+In order to use the new UI, `--port` flag has to be provided.
+
+Along with the UI, the old APIs have also been deprecated. However, some of them have been ported over to the new UI -
+
+| Old API | New API | Additional notes |
+|----------------------------------|----------------------------------|-----------------------------------------------------------------------|
+| `/api/problems` | `/api/problems` | The new API also supports filtering using the keyspace and shard name |
+| `/api/disable-global-recoveries` | `/api/disable-global-recoveries` | Functionally remains the same |
+| `/api/enable-global-recoveries` | `/api/enable-global-recoveries` | Functionally remains the same |
+| `/api/health` | `/debug/health` | Functionally remains the same |
+| `/api/replication-analysis` | `/api/replication-analysis` | Functionally remains the same. Output is now JSON format. |
+
+Apart from these APIs, we also now have `/debug/status`, `/debug/vars` and `/debug/liveness` available in the new UI.
+
+#### Configuration Refactor and New Flags
+
+Since VTOrc was forked from `Orchestrator`, it inherited a lot of configurations that don't make sense for the Vitess use-case.
+All of such configurations have been removed.
+
+VTOrc ignores the configurations that it doesn't understand. So old configurations can be kept around on upgrading and won't cause any issues.
+They will just be ignored.
+
+For all the configurations that are kept, flags have been added for them and the flags are the desired way to pass these configurations going forward.
+The config file will be deprecated and removed in upcoming releases. The following is a list of all the configurations that are kept and the associated flags added.
+
+| Configurations Kept | Flags Introduced |
+|:-------------------------------------:|:-------------------------------------:|
+| SQLite3DataFile | `--sqlite-data-file` |
+| InstancePollSeconds | `--instance-poll-time` |
+| SnapshotTopologiesIntervalHours | `--snapshot-topology-interval` |
+| ReasonableReplicationLagSeconds | `--reasonable-replication-lag` |
+| AuditLogFile | `--audit-file-location` |
+| AuditToSyslog | `--audit-to-backend` |
+| AuditToBackendDB | `--audit-to-syslog` |
+| AuditPurgeDays | `--audit-purge-duration` |
+| RecoveryPeriodBlockSeconds | `--recovery-period-block-duration` |
+| PreventCrossDataCenterPrimaryFailover | `--prevent-cross-cell-failover` |
+| LockShardTimeoutSeconds | `--lock-shard-timeout` |
+| WaitReplicasTimeoutSeconds | `--wait-replicas-timeout` |
+| TopoInformationRefreshSeconds | `--topo-information-refresh-duration` |
+| RecoveryPollSeconds | `--recovery-poll-duration` |
+
+Apart from configurations, some flags from VTOrc have also been removed -
+- `sibling`
+- `destination`
+- `discovery`
+- `skip-unresolve`
+- `skip-unresolve-check`
+- `noop`
+- `binlog`
+- `statement`
+- `grab-election`
+- `promotion-rule`
+- `skip-continuous-registration`
+- `enable-database-update`
+- `ignore-raft-setup`
+- `tag`
+
+The ideal way to ensure backward compatibility is to remove the flags listed above while on the previous release. Then upgrade VTOrc.
+After upgrading, remove the config file and instead pass the flags that are introduced.
+
+#### Example Upgrade
+
+If you are running VTOrc with the flags `--ignore-raft-setup --clusters_to_watch="ks/0" --config="path/to/config"` and the following configuration
+```json
+{
+ "Debug": true,
+ "ListenAddress": ":6922",
+ "MySQLTopologyUser": "orc_client_user",
+ "MySQLTopologyPassword": "orc_client_user_password",
+ "MySQLReplicaUser": "vt_repl",
+ "MySQLReplicaPassword": "",
+ "RecoveryPeriodBlockSeconds": 1,
+ "InstancePollSeconds": 1,
+ "PreventCrossDataCenterPrimaryFailover": true
+}
+```
+First drop the flag `--ignore-raft-setup` while on the previous release. So, you'll be running VTOrc with `--clusters_to_watch="ks/0" --config="path/to/config"` and the same configuration listed above.
+
+Now you can upgrade your VTOrc version continuing to use the same flags and configurations, and it will continue to work just the same. If you wish to use the new UI, then you can add the `--port` flag as well.
+
+After upgrading, you can drop the configuration entirely and use the new flags like `--clusters_to_watch="ks/0" --recovery-period-block-duration=1s --instance-poll-time=1s --prevent-cross-cell-failover`
+
+#### Default Configuration Files
+
+The default files that VTOrc searches for configurations in have also changed from `"/etc/orchestrator.conf.json", "conf/orchestrator.conf.json", "orchestrator.conf.json"` to
+`"/etc/vtorc.conf.json", "conf/vtorc.conf.json", "vtorc.conf.json"`.
+
+### Flags Restructure
+
+#### Flags Diff
+
+In addition to these major streams of work in release-15.0, we have made tremendous progress on [VEP-4, aka The Flag Situation](https://github.com/vitessio/enhancements/blob/main/veps/vep-4.md), reorganizing our code so that Vitess binaries and their flags are
+clearly aligned in help text. An immediate win for usability, this positions us well to move on to a [viper](https://github.com/spf13/viper) implementation which will facilitate additional improvements including standardization of flag syntax and runtime configuration reloads.
+We are also aligning with industry standards regarding the use of flags, ensuring a seamless experience for users migrating from or integrating with other platforms.
+Below are the changes for each binary.
+- [mysqlctl](https://github.com/vitessio/vitess/blob/release-15.0/doc/flags/14.0-to-15.0-transition/mysqlctl.diff)
+- [mysqlctld](https://github.com/vitessio/vitess/blob/release-15.0/doc/flags/14.0-to-15.0-transition/mysqlctld.diff)
+- [vtaclcheck](https://github.com/vitessio/vitess/blob/release-15.0/doc/flags/14.0-to-15.0-transition/vtaclcheck.diff)
+- [vtadmin](https://github.com/vitessio/vitess/blob/release-15.0/doc/flags/14.0-to-15.0-transition/vtadmin.diff)
+- [vtctlclient](https://github.com/vitessio/vitess/blob/release-15.0/doc/flags/14.0-to-15.0-transition/vtctlclient.diff)
+- [vtctld](https://github.com/vitessio/vitess/blob/release-15.0/doc/flags/14.0-to-15.0-transition/vtctld.diff)
+- [vtctldclient](https://github.com/vitessio/vitess/blob/release-15.0/doc/flags/14.0-to-15.0-transition/vtctldclient.diff)
+- [vtexplain](https://github.com/vitessio/vitess/blob/release-15.0/doc/flags/14.0-to-15.0-transition/vtexplain.diff)
+- [vtgate](https://github.com/vitessio/vitess/blob/release-15.0/doc/flags/14.0-to-15.0-transition/vtgate.diff)
+- [vtgtr](https://github.com/vitessio/vitess/blob/release-15.0/doc/flags/14.0-to-15.0-transition/vtgtr.diff)
+- [vtorc](https://github.com/vitessio/vitess/blob/release-15.0/doc/flags/14.0-to-15.0-transition/vtorc.diff)
+- [vttablet](https://github.com/vitessio/vitess/blob/release-15.0/doc/flags/14.0-to-15.0-transition/vttablet.diff)
+- [vttestserver](https://github.com/vitessio/vitess/blob/release-15.0/doc/flags/14.0-to-15.0-transition/vttestserver.diff)
+- [vttlstest](https://github.com/vitessio/vitess/blob/release-15.0/doc/flags/14.0-to-15.0-transition/vttlstest.diff)
+- [zk](https://github.com/vitessio/vitess/blob/release-15.0/doc/flags/14.0-to-15.0-transition/zk.diff)
+- [zkctl](https://github.com/vitessio/vitess/blob/release-15.0/doc/flags/14.0-to-15.0-transition/zkctl.diff)
+- [zkctld](https://github.com/vitessio/vitess/blob/release-15.0/doc/flags/14.0-to-15.0-transition/zkctld.diff)
+
+------------
+The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/15.0/15.0.0/changelog.md).
+
+The release includes 595 commits (excluding merges)
+
+Thanks to all our contributors: @Abirdcfly, @DeathBorn, @GuptaManan100, @K-Kumar-01, @L3o-pold, @Phanatic, @Weijun-H, @ajm188, @arthurschreiber, @arvind-murty, @brirams, @dbussink, @deepthi, @dependabot[bot], @doeg, @frouioui, @harshit-gangal, @mattlord, @maxenglander, @mgale, @notfelineit, @ofiriluz, @olyazavr, @quinox, @rafer, @renatolabs, @rohit-nayak-ps, @rsajwani, @rvrangel, @saunderst, @shlomi-noach, @systay, @vitess-bot[bot], @vmg, @yoheimuta
+
diff --git a/doc/releasenotes/15_0_0_summary.md b/changelog/15.0/15.0.0/summary.md
similarity index 58%
rename from doc/releasenotes/15_0_0_summary.md
rename to changelog/15.0/15.0.0/summary.md
index 0ca9ccee6c6..56b784b55dd 100644
--- a/doc/releasenotes/15_0_0_summary.md
+++ b/changelog/15.0/15.0.0/summary.md
@@ -1,23 +1,58 @@
## Summary
-- [Vindex Interface](#vindex-interface)
-- [LogStats Table and Keyspace deprecated](#logstats-table-and-keyspace-deprecated)
-- [Command-line syntax deprecations](#command-line-syntax-deprecations)
-- [New command line flags and behavior](#new-command-line-flags-and-behavior)
-- [Online DDL changes](#online-ddl-changes)
-- [Tablet throttler](#tablet-throttler)
-- [VDiff2](#vdiff2)
-- [Mysql Compatibility](#mysql-compatibility)
-- [Durability Policy](#durability-policy)
-- [New EXPLAIN format](#new-explain-format)
-
-## Known Issues
+- **[Known Issues](#known-issues)**
+- **[Breaking Changes](#breaking-changes)**
+ - [Flags](#flags)
+ - [VTTablet Flag Deletions](#vttablet-flag-deletions)
+ - [Vindex Interface](#vindex-interface)
+- **[Deprecations](#deprecations)**
+ - [LogStats Table and Keyspace Deprecated](#logstats-table-and-keyspace-deprecated)
+ - [Orchestrator Integration Deprecation](#orchestrator-integration-deprecation)
+ - [Connection Pool Prefill](#connection-pool-prefill)
+ - [InitShardPrimary Deprecation](#initshardprimary-deprecation)
+- **[Command-Line Syntax Deprecations](#command-line-syntax-deprecations)**
+ - [VTTablet Startup Flag Deletions](#vttablet-startup-flag-deletions)
+ - [VTTablet Startup Flag Deprecations](#vttablet-startup-flag-deprecations)
+ - [VTBackup Flag Deprecations](#vtbackup-flag-deprecations)
+- **[VTGate](#vtgate)**
+ - [vtgate --mysql-server-pool-conn-read-buffers](#vtgate--mysql-server-pool-conn-read-buffers)
+- **[VDiff2](#vdiff2)**
+ - [Resume Workflow](#resume-workflow)
+- **[New command line flags and behavior](#new-command-line)**
+ - [vtctl GetSchema --table-schema-only](#vtctl-getschema--table-schema-only)
+ - [Support for Additional Compressors and Decompressors During Backup & Restore](#support-for-additional-compressors-and-decompressors-during-backup-&-restore)
+ - [Independent OLAP and OLTP Transactional Timeouts](#independant-olap-and-oltp-transactional-timeouts)
+ - [Support for Specifying Group Information in Calls to VTGate](#support-for-specifying-group-information-in-calls-to-vtgate)
+- **[Online DDL Changes](#online-ddl-changes)**
+ - [Concurrent Vitess Migrations](#concurrent-vitess-migrations)
+ - [VTCtl Command Changes](#vtctl-command-changes)
+ - [New Syntax](#new-syntax)
+- **[Tablet Throttler](#tablet-throttler)**
+ - [API Changes](#api-changes)
+- **[Mysql Compatibility](#mysql-compatibility)**
+ - [System Settings](#system-settings)
+ - [Lookup Vindexes](#lookup-vindexes)
+- **[Durability Policy](#durability-policy)**
+ - [Cross Cell](#cross-cell)
+- **[New EXPLAIN Format](#new-explain-format)**
+ - [FORMAT=vtexplain](#formatvtexplain)
+- **[VTOrc](#vtorc)**
+ - [Old UI Removal and Replacement](#old-ui-removal-and-replacement)
+ - [Configuration Refactor and New Flags](#configuratoin-refactor-and-new-flags)
+ - [Example Upgrade](#example-upgrade)
+ - [Default Configuration Files](#default-configuration-files)
+- **[Flags Restructure](#flags-restructure)**
+ - [Flags Diff](#flags-diff)
+
+## Known Issues
+
+- [Corrupted results for non-full-group-by queries with JOINs](https://github.com/vitessio/vitess/issues/11625). This can be resolved by using full-group-by queries.
## Major Changes
-### Breaking Changes
+### Breaking Changes
-#### Flags
+#### Flags
- The deprecated `--cpu_profile` flag has been removed. Please use the `--pprof` flag instead.
- The deprecated `--mem-profile-rate` flag has been removed. Please use `--pprof=mem` instead.
@@ -25,8 +60,21 @@
- The deprecated vtgate/vtexplain/vtcombo flag `--planner_version` has been removed. Please use `--planner-version` instead.
- The deprecated flag `--master_connect_retry` has been removed. Please use `--replication_connect_retry` instead.
- `vtctl` commands that take shard names and ranges as positional arguments (e.g. `vtctl Reshard ks.workflow -80 -40,40-80`) need to have their positional arguments separated from their flag arguments by a double-dash separator to avoid the new parsing library from mistaking them as flags (e.g. `vtctl Reshard ks.workflow -- -80 -40,40-80`).
+- The `--cell` flag in the `vtgate` binary no longer has a default value. It is a required argument that has to be specified for the binary to run. Please explicitly specify the flag, if dependent on the flag's default value.
+- The `--db-config-*-*` VTTablet flags were deprecated in `v3.0.0`. They have now been deleted as part of this release. You must use `--db_dba_*` now.
-#### Vindex Interface
+#### vttablet Flag Deletions
+The following VTTablet flags were deprecated in 7.0. They have now been deleted
+- `--queryserver-config-message-conn-pool-size`
+- `--queryserver-config-message-conn-pool-prefill-parallelism`
+- `--client-found-rows-pool-size` A different existing flag `--queryserver-config-transaction-cap` will be used instead
+- `--transaction_shutdown_grace_period` Use `--shutdown_grace_period` instead
+- `--queryserver-config-max-dml-rows`
+- `--queryserver-config-allowunsafe-dmls`
+- `--pool-name-prefix`
+- `--enable-autocommit` Autocommit is always allowed
+
+#### Vindex Interface
All the vindex interface methods are changed by adding `context.Context` as an input parameter.
@@ -40,26 +88,30 @@ Map(ctx context.Context, vcursor VCursor, .... ) ....
This only affects users who have added their own custom vindex implementation.
They are required to change their implementation with these new interface method expectations.
-#### LogStats Table and Keyspace deprecated
+### Deprecations
+
+#### LogStats Table and Keyspace deprecated
Information about which tables are used was being reported through the `Keyspace` and `Table` fields on LogStats.
For multi-table queries, this output can be confusing, so we have added `TablesUsed`, that is a string array, listing all tables and which keyspace they are on.
`Keyspace` and `Table` fields are deprecated and will be removed in the v16 release of Vitess.
-#### Orchestrator Integration Deprecation
+#### Orchestrator Integration Deprecation
Orchestrator integration in `vttablet` has been deprecated. It will continue to work in this release but is liable to be removed in future releases.
Consider using VTOrc instead of Orchestrator as VTOrc goes GA in this release.
-#### Connection Pool Prefill
+#### Connection Pool Prefill
The connection pool with prefilled connections have been removed. The pool now does lazy connection creation.
-Following flags are deprecated: `queryserver-config-pool-prefill-parallelism`, `queryserver-config-stream-pool-prefill-parallelism`, `queryserver-config-transaction-prefill-parallelism`
-and will be removed in future version.
-### Command-line syntax deprecations
+#### InitShardPrimary Deprecation
-#### vttablet startup flag deletions
+The vtcltd command InitShardPrimary has been deprecated. Please use PlannedReparentShard instead.
+
+### Command-line syntax deprecations
+
+#### vttablet startup flag deletions
The following VTTablet flags were deprecated in 7.0. They have now been deleted
- --queryserver-config-message-conn-pool-size
- --queryserver-config-message-conn-pool-prefill-parallelism
@@ -70,19 +122,25 @@ The following VTTablet flags were deprecated in 7.0. They have now been deleted
- --pool-name-prefix
- --enable-autocommit Autocommit is always allowed
-#### vttablet startup flag deprecations
-- --enable-query-plan-field-caching is now deprecated. It will be removed in v16.
-- --enable_semi_sync is now deprecated. It will be removed in v16. Instead, set the correct durability policy using `SetKeyspaceDurabilityPolicy`
-- --queryserver-config-pool-prefill-parallelism, --queryserver-config-stream-pool-prefill-parallelism and --queryserver-config-transaction-prefill-parallelism have all been deprecated. They will be removed in v16.
+#### vttablet startup flag deprecations
+- `--enable-query-plan-field-caching` has been deprecated. It will be removed in v16.
+- `--enable_semi_sync` has been deprecated. It will be removed in v16. Instead, set the correct durability policy using `SetKeyspaceDurabilityPolicy`
+- `--queryserver-config-pool-prefill-parallelism`, `--queryserver-config-stream-pool-prefill-parallelism` and `--queryserver-config-transaction-prefill-parallelism` have all been deprecated. They will be removed in v16.
+- `--backup_storage_hook` has been deprecated, consider using one of the builtin compression algorithms or `--external-compressor` and `--external-decompressor` instead.
+
+#### vtbackup flag deprecations
+- `--backup_storage_hook` has been deprecated, consider using one of the builtin compression algorithms or `--external-compressor` and `--external-decompressor` instead.
-### New command line flags and behavior
+### VTGate
-#### vtgate --mysql-server-pool-conn-read-buffers
+#### vtgate --mysql-server-pool-conn-read-buffers
`--mysql-server-pool-conn-read-buffers` enables pooling of buffers used to read from incoming
connections, similar to the way pooling happens for write buffers. Defaults to off.
-### VDiff2
+### VDiff v2
+
+#### Resume Workflow
We introduced the ability to resume a VDiff2 workflow:
```
@@ -114,24 +172,30 @@ $ vtctlclient --server=localhost:15999 VDiff --v2 --format=json customer.commerc
}
```
+We leverage this resume capability to automatically restart a VDiff2 workflow that encountered a retryable error.
+
+We also made a number of other enhancements like progress reporting and features needed to make it a full replacement for VDiff v1. You can see more details in the tracking ticket for the VDiff2 feature complete target: https://github.com/vitessio/vitess/issues/10494
+
+Now that VDiff v2 is feature complete in 15.0, we hope to make it GA in 16.0.
+
Please see the VDiff2 [documentation](https://vitess.io/docs/15.0/reference/vreplication/vdiff2/) for additional information.
-### New command line flags and behavior
+### New command line flags and behavior
-#### vtctl GetSchema --table-schema-only
+#### vtctl GetSchema --table-schema-only
The new flag `--table-schema-only` skips column introspection. `GetSchema` only returns general schema analysis, and specifically it includes the `CREATE TABLE|VIEW` statement in the `schema` field.
-#### Support for additional compressors and decompressors during backup & restore
-Backup/Restore now allow you many more options for compression and decompression instead of relying on the default compressor(`pgzip`).
+#### Support for additional compressors and decompressors during backup & restore
+Backup/Restore now allow you many more options for compression and decompression instead of relying on the default compressor(`pargzip`).
There are some built-in compressors which you can use out-of-the-box. Users will need to evaluate which option works best for their
use-case. Here are the flags that control this feature
-- --compression-engine-name
-- --external-compressor
-- --external-decompressor
-- --external-compressor-extension
-- --compression-level
+- `--compression-engine-name`
+- `--external-compressor`
+- `--external-decompressor`
+- `--external-compressor-extension`
+- `--compression-level`
`--compression-engine-name` specifies the engine used for compression. It can have one of the following values
@@ -141,20 +205,19 @@ use-case. Here are the flags that control this feature
- zstd
- external
-where 'external' is set only when using a custom command or tool other than the ones that are already provided.
-If you want to use any of the built-in compressors, simply set one of the above values for `--compression-engine-name`. The value
+If you want to use any of the built-in compressors, simply set one of the above values other than `external` for `--compression-engine-name`. The value
specified in `--compression-engine-name` is saved in the backup MANIFEST, which is later read by the restore process to decide which
-engine to use for decompression. Default value for engine is 'pgzip'.
+engine to use for decompression. Default value for engine is 'pargzip'.
If you would like to use a custom command or external tool for compression/decompression then you need to provide the full command with
arguments to the `--external-compressor` and `--external-decompressor` flags. `--external-compressor-extension` flag also needs to be provided
so that compressed files are created with the correct extension. If the external command is not using any of the built-in compression engines
-(i-e pgzip, pargzip, lz4 or zstd) then you need to set `--compression-engine-name` to value 'external'.
+(i.e. pgzip, pargzip, lz4 or zstd) then you need to set `--compression-engine-name` to value 'external'.
-Please note that if you want the current production behavior then you don't need to change any of these flags.
+Please note that if you want to keep the current behavior then you don't need to provide any of these flags.
You can read more about backup & restore [here] (https://vitess.io/docs/15.0/user-guides/operating-vitess/backup-and-restore/).
-If you decided to switch from an external compressor to one of the built-in supported compressors (i-e pgzip, pargzip, lz4 or zstd) at any point
+If you decided to switch from an external compressor to one of the built-in supported compressors (i.e. pgzip, pargzip, lz4 or zstd) at any point
in the future, you will need to do it in two steps.
- step #1, set `--external-compressor` and `--external-compressor-extension` flag values to empty and change `--compression-engine-name` to desired value.
@@ -164,7 +227,7 @@ The reason you cannot change all the values together is because the restore proc
should be used to process the previous backup. Please make sure you have thought out all possible scenarios for restore before transitioning from one
compression engine to another.
-#### Independent OLAP and OLTP transactional timeouts
+#### Independent OLAP and OLTP transactional timeouts
`--queryserver-config-olap-transaction-timeout` specifies the timeout applied
to a transaction created within an OLAP workload. The default value is `30`
@@ -182,15 +245,15 @@ other.
The main use case is to run queries spanning a long period of time which
require transactional guarantees such as consistency or atomicity.
-#### Support for specifying group information in calls to VTGate
+#### Support for specifying group information in calls to VTGate
`--grpc-use-effective-groups` allows non-SSL callers to specify groups information for a caller.
Until now, you could only specify the caller-id for the security context used to authorize queries.
As of now, you can specify the principal of the caller, and any groups they belong to.
-### Online DDL changes
+### Online DDL changes
-#### Concurrent vitess migrations
+#### Concurrent vitess migrations
All Online DDL migrations using the `vitess` strategy are now eligible to run concurrently, given `--allow-concurrent` DDL strategy flag. Until now, only `CREATE`, `DROP` and `REVERT` migrations were eligible, and now `ALTER` migrations are supported, as well. The terms for `ALTER` migrations concurrency:
@@ -201,13 +264,13 @@ All Online DDL migrations using the `vitess` strategy are now eligible to run co
The main use case is to run multiple concurrent migrations, all with `--postpone-completion`. All table-copy operations will run sequentially, but no migration will actually cut-over, and eventually all migrations will be `ready_to_complete`, continuously tailing the binary logs and keeping up-to-date. A quick and iterative `ALTER VITESS_MIGRATION '...' COMPLETE` sequence of commands will cut-over all migrations _closely together_ (though not atomically together).
-#### vtctl command changes.
+#### vtctl command changes.
All `online DDL show` commands can now be run with a few additional parameters
- `--order` , order migrations in the output by either ascending or descending order of their `id` fields.
- `--skip` , skip specified number of migrations in the output.
- `--limit` , limit results to a specified number of migrations in the output.
-#### New syntax
+#### New syntax
The following is now supported:
@@ -217,9 +280,9 @@ ALTER VITESS_MIGRATION COMPLETE ALL
This works on all pending migrations (`queued`, `ready`, `running`) and internally issues a `ALTER VITESS_MIGRATION '' COMPLETE` for each one. The command is useful for completing multiple concurrent migrations (see above) that are open-ended (`--postpone-completion`).
-### Tablet throttler
+### Tablet Throttler
-#### API changes
+#### API changes
API endpoint `/debug/vars` now exposes throttler metrics, such as number of hits and errors per app per check type. Example:
@@ -239,41 +302,41 @@ $ curl -s http://127.0.0.1:15100/debug/vars | jq . | grep Throttler
"ThrottlerProbesTotal": 74,
```
-### Mysql Compatibility
+### Mysql Compatibility
-#### System Settings
-Vitess supported system settings from release 7.0 onwards, but it was always with a pinch of salt.
-As soon as a client session changes a default system setting, the mysql connection gets blocked for it.
-This leads to clients running out of mysql connections.
-The clients were instructed to use this to a minimum and try to set those changed system settings as default on the mysql.
+#### System Settings
+Vitess has had support for system settings from release 7.0 onwards, but this support came with some caveats.
+As soon as a client session changes a default system setting, a mysql connection gets reserved for it.
+This can sometimes lead to clients running out of mysql connections.
+Users were instructed to minimize the use of this feature and to try to set the desired system settings as defaults in the mysql config.
-With this release, Vitess can handle system settings changes in a much better way and the clients can use it more freely.
-Vitess now pools those changed settings and does not reserve it for any particular session.
+With this release, Vitess can handle system settings changes in a much better way and clients can use them more freely.
+Vitess now has the ability to pool changed settings without reserving connections for any particular session.
This feature can be enabled by setting `queryserver-enable-settings-pool` flag on the vttablet. It is disabled by default.
In future releases, we will make this flag enabled by default.
-#### Lookup Vindexes
+#### Lookup Vindexes
Lookup vindexes now support a new parameter `multi_shard_autocommit`. If this is set to `true`, lookup vindex dml queries will be sent as autocommit to all shards instead of being wrapped in a transaction.
This is different from the existing `autocommit` parameter where the query is sent in its own transaction separate from the ongoing transaction if any i.e. begin -> lookup query execs -> commit/rollback
-### Durability Policy
+### Durability Policy
-#### Cross Cell
+#### Cross Cell
A new durability policy `cross_cell` is now supported. `cross_cell` durability policy only allows replica tablets from a different cell than the current primary to
send semi-sync ACKs. This ensures that any committed write exists in at least 2 tablets belonging to different cells.
-### New EXPLAIN format
+### New EXPLAIN format
-#### FORMAT=vtexplain
+#### FORMAT=vtexplain
With this new `explain` format, you can get an output that is very similar to the command line `vtexplain` app, but from a running `vtgate`, through a MySQL query.
-### VTOrc
+### VTOrc
-#### Old UI Removal and Replacement
+#### Old UI Removal and Replacement
The old UI that VTOrc inherited from `Orchestrator` has been removed. A replacement UI, more consistent with the other Vitess binaries has been created.
In order to use the new UI, `--port` flag has to be provided.
@@ -290,7 +353,7 @@ Along with the UI, the old APIs have also been deprecated. However, some of them
Apart from these APIs, we also now have `/debug/status`, `/debug/vars` and `/debug/liveness` available in the new UI.
-#### Configuration Refactor and New Flags
+#### Configuration Refactor and New Flags
Since VTOrc was forked from `Orchestrator`, it inherited a lot of configurations that don't make sense for the Vitess use-case.
All of such configurations have been removed.
@@ -337,7 +400,7 @@ Apart from configurations, some flags from VTOrc have also been removed -
The ideal way to ensure backward compatibility is to remove the flags listed above while on the previous release. Then upgrade VTOrc.
After upgrading, remove the config file and instead pass the flags that are introduced.
-#### Example Upgrade
+#### Example Upgrade
If you are running VTOrc with the flags `--ignore-raft-setup --clusters_to_watch="ks/0" --config="path/to/config"` and the following configuration
```json
@@ -359,7 +422,33 @@ Now you can upgrade your VTOrc version continuing to use the same flags and conf
After upgrading, you can drop the configuration entirely and use the new flags like `--clusters_to_watch="ks/0" --recovery-period-block-duration=1s --instance-poll-time=1s --prevent-cross-cell-failover`
-#### Default Configuration Files
+#### Default Configuration Files
The default files that VTOrc searches for configurations in have also changed from `"/etc/orchestrator.conf.json", "conf/orchestrator.conf.json", "orchestrator.conf.json"` to
`"/etc/vtorc.conf.json", "conf/vtorc.conf.json", "vtorc.conf.json"`.
+
+### Flags Restructure
+
+#### Flags Diff
+
+In addition to these major streams of work in release-15.0, we have made tremendous progress on [VEP-4, aka The Flag Situation](https://github.com/vitessio/enhancements/blob/main/veps/vep-4.md), reorganizing our code so that Vitess binaries and their flags are
+clearly aligned in help text. An immediate win for usability, this positions us well to move on to a [viper](https://github.com/spf13/viper) implementation which will facilitate additional improvements including standardization of flag syntax and runtime configuration reloads.
+We are also aligning with industry standards regarding the use of flags, ensuring a seamless experience for users migrating from or integrating with other platforms.
+Below are the changes for each binary.
+- [mysqlctl](https://github.com/vitessio/vitess/blob/release-15.0/doc/flags/14.0-to-15.0-transition/mysqlctl.diff)
+- [mysqlctld](https://github.com/vitessio/vitess/blob/release-15.0/doc/flags/14.0-to-15.0-transition/mysqlctld.diff)
+- [vtaclcheck](https://github.com/vitessio/vitess/blob/release-15.0/doc/flags/14.0-to-15.0-transition/vtaclcheck.diff)
+- [vtadmin](https://github.com/vitessio/vitess/blob/release-15.0/doc/flags/14.0-to-15.0-transition/vtadmin.diff)
+- [vtctlclient](https://github.com/vitessio/vitess/blob/release-15.0/doc/flags/14.0-to-15.0-transition/vtctlclient.diff)
+- [vtctld](https://github.com/vitessio/vitess/blob/release-15.0/doc/flags/14.0-to-15.0-transition/vtctld.diff)
+- [vtctldclient](https://github.com/vitessio/vitess/blob/release-15.0/doc/flags/14.0-to-15.0-transition/vtctldclient.diff)
+- [vtexplain](https://github.com/vitessio/vitess/blob/release-15.0/doc/flags/14.0-to-15.0-transition/vtexplain.diff)
+- [vtgate](https://github.com/vitessio/vitess/blob/release-15.0/doc/flags/14.0-to-15.0-transition/vtgate.diff)
+- [vtgtr](https://github.com/vitessio/vitess/blob/release-15.0/doc/flags/14.0-to-15.0-transition/vtgtr.diff)
+- [vtorc](https://github.com/vitessio/vitess/blob/release-15.0/doc/flags/14.0-to-15.0-transition/vtorc.diff)
+- [vttablet](https://github.com/vitessio/vitess/blob/release-15.0/doc/flags/14.0-to-15.0-transition/vttablet.diff)
+- [vttestserver](https://github.com/vitessio/vitess/blob/release-15.0/doc/flags/14.0-to-15.0-transition/vttestserver.diff)
+- [vttlstest](https://github.com/vitessio/vitess/blob/release-15.0/doc/flags/14.0-to-15.0-transition/vttlstest.diff)
+- [zk](https://github.com/vitessio/vitess/blob/release-15.0/doc/flags/14.0-to-15.0-transition/zk.diff)
+- [zkctl](https://github.com/vitessio/vitess/blob/release-15.0/doc/flags/14.0-to-15.0-transition/zkctl.diff)
+- [zkctld](https://github.com/vitessio/vitess/blob/release-15.0/doc/flags/14.0-to-15.0-transition/zkctld.diff)
diff --git a/changelog/15.0/15.0.1/changelog.md b/changelog/15.0/15.0.1/changelog.md
new file mode 100644
index 00000000000..c44b69b5e8e
--- /dev/null
+++ b/changelog/15.0/15.0.1/changelog.md
@@ -0,0 +1,43 @@
+# Changelog of Vitess v15.0.1
+
+### Bug fixes
+#### Build/CI
+ * Docker Image Context Fix [#11628](https://github.com/vitessio/vitess/pull/11628)
+ * Addition of a CI tool to detect dead links in test/config.json [#11668](https://github.com/vitessio/vitess/pull/11668)
+ * Fix files changes filtering in CI [#11714](https://github.com/vitessio/vitess/pull/11714)
+#### General
+ * [release-15.0] Fix missing flag usage for vault credentials flags (#11582) [#11583](https://github.com/vitessio/vitess/pull/11583)
+ * fix vdiff release notes [#11595](https://github.com/vitessio/vitess/pull/11595)
+#### Query Serving
+ * collations: fix coercion semantics according to 8.0.31 changes [#11487](https://github.com/vitessio/vitess/pull/11487)
+ * [bugfix] Allow VTExplain to handle shards that are not active during resharding [#11640](https://github.com/vitessio/vitess/pull/11640)
+ * [release-15.0] Do not multiply `AggregateRandom` in JOINs [#11672](https://github.com/vitessio/vitess/pull/11672)
+ * [15.0] Send errors in stream instead of a grpc error from streaming rpcs when transaction or reserved connection is acquired [#11687](https://github.com/vitessio/vitess/pull/11687)
+ * improve handling of ORDER BY/HAVING rewriting [#11691](https://github.com/vitessio/vitess/pull/11691)
+ * [release-15.0] Accept no more data in session state change as ok (#11796) [#11800](https://github.com/vitessio/vitess/pull/11800)
+ * semantics: Use a BitSet [#11819](https://github.com/vitessio/vitess/pull/11819)
+#### VTAdmin
+ * Add VTAdmin folder to release package [#11683](https://github.com/vitessio/vitess/pull/11683)
+#### vtctl
+ * Switch ApplySchema `--sql` argument to be `StringArray` instead of `StringSlice` [#11790](https://github.com/vitessio/vitess/pull/11790)
+### CI/Build
+#### Build/CI
+ * [release-15.0] Remove Launchable in the workflows [#11669](https://github.com/vitessio/vitess/pull/11669)
+ * Update test runners to run all tests including outside package [#11787](https://github.com/vitessio/vitess/pull/11787)
+ * [release-15.0] Add automation to change vitess version in the docker-release script (#11682) [#11816](https://github.com/vitessio/vitess/pull/11816)
+#### Governance
+ * codeowners: have at least two for almost every package [#11639](https://github.com/vitessio/vitess/pull/11639)
+#### Query Serving
+ * [release-15.0] Consistent sorting in Online DDL Vrepl suite test (#11821) [#11828](https://github.com/vitessio/vitess/pull/11828)
+#### VReplication
+ * update jsonparser dependency [#11694](https://github.com/vitessio/vitess/pull/11694)
+### Release
+#### General
+ * Release of v15.0.0 [#11573](https://github.com/vitessio/vitess/pull/11573)
+ * Back to dev mode after v15.0.0 [#11574](https://github.com/vitessio/vitess/pull/11574)
+ * fix anchors for release notes and summary [#11578](https://github.com/vitessio/vitess/pull/11578)
+ * Mention the `--db-config-*-*` flag in the release notes [#11610](https://github.com/vitessio/vitess/pull/11610)
+### Testing
+#### Build/CI
+ * [release-15.0] Use `go1.19.3` in the upgrade/downgrade tests [#11676](https://github.com/vitessio/vitess/pull/11676)
+
diff --git a/changelog/15.0/15.0.1/release_notes.md b/changelog/15.0/15.0.1/release_notes.md
new file mode 100644
index 00000000000..1737f0fd2f8
--- /dev/null
+++ b/changelog/15.0/15.0.1/release_notes.md
@@ -0,0 +1,24 @@
+# Release of Vitess v15.0.1
+## Major Changes
+
+### Corrupted results for non-full-group-by queries with JOINs
+
+An issue in versions `<= v14.0.3` and `<= v15.0.0` that generated corrupted results for non-full-group-by queries with a JOIN
+is now fixed. The full issue can be found [here](https://github.com/vitessio/vitess/issues/11625), and its fix [here](https://github.com/vitessio/vitess/pull/11633).
+
+### VtAdmin web folder is missing while installing Vitess with local method
+
+When we try to install Vitess locally (https://vitess.io/docs/15.0/get-started/local/#install-vitess) on `v15.0`, we are getting the following error
+```
+npm ERR! enoent ENOENT: no such file or directory, open '/home/web/vtadmin/package.json'
+```
+This issue is fixed in 15.0.1. The full issue can be found [here](https://github.com/vitessio/vitess/issues/11679), and its fix [here](https://github.com/vitessio/vitess/pull/11683).
+
+------------
+
+The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/15.0/15.0.1/changelog.md).
+
+The release includes 25 commits (excluding merges)
+
+Thanks to all our contributors: @GuptaManan100, @frouioui, @harshit-gangal, @rsajwani, @vitess-bot[bot]
+
diff --git a/changelog/15.0/15.0.1/summary.md b/changelog/15.0/15.0.1/summary.md
new file mode 100644
index 00000000000..66a5763180f
--- /dev/null
+++ b/changelog/15.0/15.0.1/summary.md
@@ -0,0 +1,14 @@
+## Major Changes
+
+### Corrupted results for non-full-group-by queries with JOINs
+
+An issue in versions `<= v14.0.3` and `<= v15.0.0` that generated corrupted results for non-full-group-by queries with a JOIN
+is now fixed. The full issue can be found [here](https://github.com/vitessio/vitess/issues/11625), and its fix [here](https://github.com/vitessio/vitess/pull/11633).
+
+### VtAdmin web folder is missing while installing Vitess with local method
+
+When we try to install Vitess locally (https://vitess.io/docs/15.0/get-started/local/#install-vitess) on `v15.0`, we are getting the following error
+```
+npm ERR! enoent ENOENT: no such file or directory, open '/home/web/vtadmin/package.json'
+```
+This issue is fixed in 15.0.1. The full issue can be found [here](https://github.com/vitessio/vitess/issues/11679), and its fix [here](https://github.com/vitessio/vitess/pull/11683).
\ No newline at end of file
diff --git a/changelog/15.0/15.0.2/changelog.md b/changelog/15.0/15.0.2/changelog.md
new file mode 100644
index 00000000000..29aff8fa2f7
--- /dev/null
+++ b/changelog/15.0/15.0.2/changelog.md
@@ -0,0 +1,27 @@
+# Changelog of Vitess v15.0.2
+
+### Bug fixes
+#### Query Serving
+ * Online DDL: fix 'vtctlclient OnlineDDL' template queries [#11889](https://github.com/vitessio/vitess/pull/11889)
+ * Fix CheckMySQL by setting the correct wanted state [#11895](https://github.com/vitessio/vitess/pull/11895)
+ * bugfix: allow predicates without dependencies with derived tables to be handled correctly [#11911](https://github.com/vitessio/vitess/pull/11911)
+ * [release-15.0] Fix sending a ServerLost error when reading a packet fails (#11920) [#11930](https://github.com/vitessio/vitess/pull/11930)
+ * Skip `TestSubqueriesExists` during upgrade-downgrade tests [#11953](https://github.com/vitessio/vitess/pull/11953)
+#### VReplication
+ * VReplication: Prevent Orphaned VDiff2 Jobs [#11768](https://github.com/vitessio/vitess/pull/11768)
+### CI/Build
+#### Build/CI
+ * Fix deprecated usage of set-output [#11844](https://github.com/vitessio/vitess/pull/11844)
+ * Use `go1.19.4` in the next release upgrade downgrade E2E tests [#11924](https://github.com/vitessio/vitess/pull/11924)
+#### TabletManager
+ * Fix closing the body for HTTP requests [#11842](https://github.com/vitessio/vitess/pull/11842)
+### Enhancement
+#### General
+ * Upgrade to `go1.18.9` [#11897](https://github.com/vitessio/vitess/pull/11897)
+### Release
+#### General
+ * Release of v15.0.1 [#11847](https://github.com/vitessio/vitess/pull/11847)
+ * Back to dev mode after v15.0.1 [#11848](https://github.com/vitessio/vitess/pull/11848)
+ * updating summary and release notes for v15.0.1 [#11852](https://github.com/vitessio/vitess/pull/11852)
+ * Update the release `15.0.2` summary doc [#11954](https://github.com/vitessio/vitess/pull/11954)
+
diff --git a/changelog/15.0/15.0.2/release_notes.md b/changelog/15.0/15.0.2/release_notes.md
new file mode 100644
index 00000000000..33ece0e1c73
--- /dev/null
+++ b/changelog/15.0/15.0.2/release_notes.md
@@ -0,0 +1,20 @@
+# Release of Vitess v15.0.2
+## Major Changes
+
+### Upgrade to `go1.18.9`
+
+Vitess `v15.0.2` now runs on `go1.18.9`.
+The patch release of Go, `go1.18.9`, was one of the main reasons for this release as it includes an important security fixe to `net/http` package, which is use extensively by Vitess.
+Below is a summary of this patch release. You can learn more [here](https://groups.google.com/g/golang-announce/c/L_3rmdT0BMU).
+
+> go1.18.9 (released 2022-12-06) includes security fixes to the net/http and os packages, as well as bug fixes to cgo, the compiler, the runtime, and the crypto/x509 and os/exec packages.
+
+
+------------
+
+The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/15.0/15.0.2/changelog.md).
+
+The release includes 14 commits (excluding merges)
+
+Thanks to all our contributors: @GuptaManan100, @dbussink, @frouioui, @mattlord, @rsajwani, @shlomi-noach, @vitess-bot[bot]
+
diff --git a/changelog/15.0/15.0.2/summary.md b/changelog/15.0/15.0.2/summary.md
new file mode 100644
index 00000000000..6f3346efa47
--- /dev/null
+++ b/changelog/15.0/15.0.2/summary.md
@@ -0,0 +1,10 @@
+## Major Changes
+
+### Upgrade to `go1.18.9`
+
+Vitess `v15.0.2` now runs on `go1.18.9`.
+The patch release of Go, `go1.18.9`, was one of the main reasons for this release as it includes an important security fixe to `net/http` package, which is use extensively by Vitess.
+Below is a summary of this patch release. You can learn more [here](https://groups.google.com/g/golang-announce/c/L_3rmdT0BMU).
+
+> go1.18.9 (released 2022-12-06) includes security fixes to the net/http and os packages, as well as bug fixes to cgo, the compiler, the runtime, and the crypto/x509 and os/exec packages.
+
diff --git a/changelog/15.0/README.md b/changelog/15.0/README.md
new file mode 100644
index 00000000000..f6985e8252b
--- /dev/null
+++ b/changelog/15.0/README.md
@@ -0,0 +1,13 @@
+## v15.0
+The dedicated team for this release can be found [here](team.md).
+* **[15.0.2](15.0.2)**
+ * [Changelog](15.0.2/changelog.md)
+ * [Release Notes](15.0.2/release_notes.md)
+
+* **[15.0.1](15.0.1)**
+ * [Changelog](15.0.1/changelog.md)
+ * [Release Notes](15.0.1/release_notes.md)
+
+* **[15.0.0](15.0.0)**
+ * [Changelog](15.0.0/changelog.md)
+ * [Release Notes](15.0.0/release_notes.md)
diff --git a/changelog/15.0/team.md b/changelog/15.0/team.md
new file mode 100644
index 00000000000..b8cbdf809bf
--- /dev/null
+++ b/changelog/15.0/team.md
@@ -0,0 +1,5 @@
+## Release Team for v15
+
+- **Lead:** Rameez Sajwani ([rsajwani](https://github.com/rsajwani)) rameez@planetscale.com
+- **Shadow:** Manan Gupta ([GuptaManan100](https://github.com/GuptaManan100)) manan@planetscale.com
+- **Mentor:** Florent Poinsard ([frouioui](https://github.com/frouioui)) florent@planetscale.com
\ No newline at end of file
diff --git a/changelog/16.0/16.0.0/changelog.md b/changelog/16.0/16.0.0/changelog.md
new file mode 100644
index 00000000000..0849e143a05
--- /dev/null
+++ b/changelog/16.0/16.0.0/changelog.md
@@ -0,0 +1,449 @@
+# Changelog of Vitess v16.0.0
+
+### Announcement
+#### Governance
+ * Move inactive maintainers to "Past" section, change Areas to include more active maintainers [#11744](https://github.com/vitessio/vitess/pull/11744)
+ * add frances to maintainers [#11865](https://github.com/vitessio/vitess/pull/11865)
+ * add Arthur to the maintainers file [#11937](https://github.com/vitessio/vitess/pull/11937)
+#### Query Serving
+ * deprecate V3 planner [#11635](https://github.com/vitessio/vitess/pull/11635)
+#### VTAdmin
+ * [vtadmin] deprecated vtexplain [#12163](https://github.com/vitessio/vitess/pull/12163)
+### Bug fixes
+#### Backup and Restore
+ * Detect redo log location dynamically based on presence [#11555](https://github.com/vitessio/vitess/pull/11555)
+ * [main] mysqlctl: flags should be added to vtbackup (#12048) [#12060](https://github.com/vitessio/vitess/pull/12060)
+#### Build/CI
+ * Fix the script `check_make_sizegen` [#11465](https://github.com/vitessio/vitess/pull/11465)
+ * Skip `TestComparisonSemantics` test [#11474](https://github.com/vitessio/vitess/pull/11474)
+ * Docker Image Context Fix [#11628](https://github.com/vitessio/vitess/pull/11628)
+ * Addition of a CI tool to detect dead links in test/config.json [#11668](https://github.com/vitessio/vitess/pull/11668)
+ * Fix files changes filtering in CI [#11714](https://github.com/vitessio/vitess/pull/11714)
+ * Fix `codeql` workflow timeout issue [#11760](https://github.com/vitessio/vitess/pull/11760)
+ * Trigger OnlineDDL workflow when test data changes [#11827](https://github.com/vitessio/vitess/pull/11827)
+#### CLI
+ * [release-16.0] vtctldclient: Format GetKeyspace output using cli.MarshalJSON (#12495) [#12503](https://github.com/vitessio/vitess/pull/12503)
+#### Cluster management
+ * BugFix: Errant GTID detection for a single replica [#12024](https://github.com/vitessio/vitess/pull/12024)
+ * BugFix: Fix race in `IsFlagProvided` [#12042](https://github.com/vitessio/vitess/pull/12042)
+#### Examples
+ * Examples: Add lib functions to wait for shard states [#12239](https://github.com/vitessio/vitess/pull/12239)
+ * Examples: Correct VTAdmin Discovery File Path And Add Check [#12415](https://github.com/vitessio/vitess/pull/12415)
+ * examples: Fix Vtop example and release script [#12440](https://github.com/vitessio/vitess/pull/12440)
+ * Fix vtctldclient command in vtop test script and the fix operator example [#12458](https://github.com/vitessio/vitess/pull/12458)
+ * Open vtadmin-api port in docker-local example to enable vtadmin access [#12467](https://github.com/vitessio/vitess/pull/12467)
+#### General
+ * [main] Stats Flags: include stats flags in the correct binaries (#11450) [#11453](https://github.com/vitessio/vitess/pull/11453)
+ * Test flags: Update logic for parsing test flags to run unit tests within GoLand and to parse test flags in vtgate to allow running unit tests [#11551](https://github.com/vitessio/vitess/pull/11551)
+ * Fix missing flag usage for vault credentials flags [#11582](https://github.com/vitessio/vitess/pull/11582)
+ * fix vdiff release notes [#11595](https://github.com/vitessio/vitess/pull/11595)
+#### Observability
+ * stats/prometheus: normalize labels for single-label implementations [#12057](https://github.com/vitessio/vitess/pull/12057)
+#### Online DDL
+ * Parse binlog variable length encoded columns [#11871](https://github.com/vitessio/vitess/pull/11871)
+ * Allow zero (in) date when setting up internal _vt schema [#12262](https://github.com/vitessio/vitess/pull/12262)
+#### Operator
+ * [main] Increase the memory limit of the vitess-operator (#11548) [#11550](https://github.com/vitessio/vitess/pull/11550)
+ * Fix rbac config in the vtop example [#12034](https://github.com/vitessio/vitess/pull/12034)
+#### Query Serving
+ * Fix query list override issue on mysql restart [#11309](https://github.com/vitessio/vitess/pull/11309)
+ * make MySQL56-flavor schema queries forward-compatible [#11323](https://github.com/vitessio/vitess/pull/11323)
+ * Plan order by `COUNT(X)` [#11420](https://github.com/vitessio/vitess/pull/11420)
+ * Fix #11455 - skip vindex operations for `DELETE` statements against unsharded tables [#11461](https://github.com/vitessio/vitess/pull/11461)
+ * Fix aggregation end-to-end test PRIMARY KEY [#11473](https://github.com/vitessio/vitess/pull/11473)
+ * collations: fix coercion semantics according to 8.0.31 changes [#11487](https://github.com/vitessio/vitess/pull/11487)
+ * fix: reserve connection to follow query timeout when outside of transaction [#11490](https://github.com/vitessio/vitess/pull/11490)
+ * [main] bug fix: using self-referencing columns in HAVING should not overflow (#11499) [#11504](https://github.com/vitessio/vitess/pull/11504)
+ * Fix `HAVING` rewriting made in #11306 [#11515](https://github.com/vitessio/vitess/pull/11515)
+ * fix: fail over reconnect in stream execution for connection with transaction [#11517](https://github.com/vitessio/vitess/pull/11517)
+ * [15.0] Fix: concatenate engine in transaction [#11534](https://github.com/vitessio/vitess/pull/11534)
+ * [main] Redact bind variables in mysql errors (#11540) [#11545](https://github.com/vitessio/vitess/pull/11545)
+ * Fix ordering when error happens during stream setup [#11592](https://github.com/vitessio/vitess/pull/11592)
+ * Do not multiply `AggregateRandom` in `JOIN`s [#11633](https://github.com/vitessio/vitess/pull/11633)
+ * [bugfix] Allow VTExplain to handle shards that are not active during resharding [#11640](https://github.com/vitessio/vitess/pull/11640)
+ * Fix generating invalid alter table for comments [#11645](https://github.com/vitessio/vitess/pull/11645)
+ * sqltypes: handle leading zeroes [#11650](https://github.com/vitessio/vitess/pull/11650)
+ * Send errors in stream instead of a grpc error from streaming rpcs when transaction or reserved connection is acquired [#11656](https://github.com/vitessio/vitess/pull/11656)
+ * schemadiff: normalize index option value (string) [#11675](https://github.com/vitessio/vitess/pull/11675)
+ * improve handling of ORDER BY/HAVING rewriting [#11691](https://github.com/vitessio/vitess/pull/11691)
+ * query timeout hints in unsharded cases [#11709](https://github.com/vitessio/vitess/pull/11709)
+ * Online DDL: adding error check; more verbose error messages [#11789](https://github.com/vitessio/vitess/pull/11789)
+ * Accept no more data in session state change as ok [#11796](https://github.com/vitessio/vitess/pull/11796)
+ * Fix: return allowed transaction isolation level value on select query [#11804](https://github.com/vitessio/vitess/pull/11804)
+ * semantics: Use a BitSet [#11819](https://github.com/vitessio/vitess/pull/11819)
+ * BugFix: Escaping Percentage and Underscore require special handling [#11823](https://github.com/vitessio/vitess/pull/11823)
+ * Simplify recursive data structure in CFC Vindex [#11843](https://github.com/vitessio/vitess/pull/11843)
+ * Fix MySQL56 GTID parsing when SID/UUID repeats [#11888](https://github.com/vitessio/vitess/pull/11888)
+ * Online DDL: fix 'vtctlclient OnlineDDL' template queries [#11889](https://github.com/vitessio/vitess/pull/11889)
+ * Fix CheckMySQL by setting the correct wanted state [#11895](https://github.com/vitessio/vitess/pull/11895)
+ * Onlineddl: formalize "immediate operations", respect `--postpone-completion` strategy flag [#11910](https://github.com/vitessio/vitess/pull/11910)
+ * bugfix: allow predicates without dependencies with derived tables to be handled correctly [#11911](https://github.com/vitessio/vitess/pull/11911)
+ * Online DDL: ensure message is valid `utf8` in `updateMigrationMessage()` [#11914](https://github.com/vitessio/vitess/pull/11914)
+ * Fix sending a ServerLost error when reading a packet fails [#11920](https://github.com/vitessio/vitess/pull/11920)
+ * only expand when we have full information [#11998](https://github.com/vitessio/vitess/pull/11998)
+ * Remove unnecessary logging [#12000](https://github.com/vitessio/vitess/pull/12000)
+ * Fix parsing and normalization of floating point types [#12009](https://github.com/vitessio/vitess/pull/12009)
+ * OnlineDDL: scheduleNextMigration() to only read reviewed migrations [#12014](https://github.com/vitessio/vitess/pull/12014)
+ * Keywords must be unique and can't be reused [#12044](https://github.com/vitessio/vitess/pull/12044)
+ * Fix: Date math with Interval keyword [#12082](https://github.com/vitessio/vitess/pull/12082)
+ * OnlineDDL: support integer-to-enum conversion in `vitess` migrations [#12098](https://github.com/vitessio/vitess/pull/12098)
+ * Keep the correct case for the charset for canonical string [#12105](https://github.com/vitessio/vitess/pull/12105)
+ * BugFix: Cast expression translation by evaluation engine [#12111](https://github.com/vitessio/vitess/pull/12111)
+ * [Gen4] Fix lookup vindexes with `autocommit` enabled [#12172](https://github.com/vitessio/vitess/pull/12172)
+ * handle system databases other that information_schema correctly [#12175](https://github.com/vitessio/vitess/pull/12175)
+ * VTGate: Ensure HealthCheck Cache Secondary Maps Stay in Sync With Authoritative Map on Tablet Delete [#12178](https://github.com/vitessio/vitess/pull/12178)
+ * schemadiff: fix scenario where no tables exist in schema and with just views reading from DUAL [#12189](https://github.com/vitessio/vitess/pull/12189)
+ * Fix parsing of bitnum values larger than 64 bit [#12191](https://github.com/vitessio/vitess/pull/12191)
+ * Online DDL: fix deadlock by releasing mutex before calling callback [#12211](https://github.com/vitessio/vitess/pull/12211)
+ * fix: transaction_isolation to be applied at session level [#12281](https://github.com/vitessio/vitess/pull/12281)
+ * Fix aggregation on outer joins [#12298](https://github.com/vitessio/vitess/pull/12298)
+ * [release-16.0] fix: added null safe operator precendence rule (#12297) [#12307](https://github.com/vitessio/vitess/pull/12307)
+ * Fix for USING when column names not lower cased [#12379](https://github.com/vitessio/vitess/pull/12379)
+ * [release-16.0] Fix bug in vtexplain around JOINs (#12376) [#12384](https://github.com/vitessio/vitess/pull/12384)
+ * Fix scalar aggregation engine primitive for column truncation [#12468](https://github.com/vitessio/vitess/pull/12468)
+ * [release-16.0] BugFix: Unsharded query using a derived table and a dual table [#12484](https://github.com/vitessio/vitess/pull/12484)
+#### VReplication
+ * VReplication: escape identifiers when renaming source tables [#11670](https://github.com/vitessio/vitess/pull/11670)
+ * VReplication: Prevent Orphaned VDiff2 Jobs [#11768](https://github.com/vitessio/vitess/pull/11768)
+ * VDiff2: Properly Apply --only_pks Report Flag [#12025](https://github.com/vitessio/vitess/pull/12025)
+ * VReplication: Improve Error/Status Reporting [#12052](https://github.com/vitessio/vitess/pull/12052)
+ * VReplication: Propagate Binlog Stream Errors [#12095](https://github.com/vitessio/vitess/pull/12095)
+ * VReplication: ignore GC tables in schema analysis [#12320](https://github.com/vitessio/vitess/pull/12320)
+ * Sidecar schema init: use COPY algorithm while altering sidecardb tables [#12436](https://github.com/vitessio/vitess/pull/12436)
+#### VTorc
+ * Fix VTOrc holding locks after shutdown [#11442](https://github.com/vitessio/vitess/pull/11442)
+ * [15.0] Fix VTOrc to handle multiple failures [#11489](https://github.com/vitessio/vitess/pull/11489)
+ * VTOrc running PRS when database_instance empty bug fix. [#12019](https://github.com/vitessio/vitess/pull/12019)
+ * Fix: VTOrc forgetting old instances [#12089](https://github.com/vitessio/vitess/pull/12089)
+ * Fix insert query of blocked_recovery table in VTOrc [#12091](https://github.com/vitessio/vitess/pull/12091)
+#### vtctl
+ * Switch ApplySchema `--sql` argument to be `StringArray` instead of `StringSlice` [#11790](https://github.com/vitessio/vitess/pull/11790)
+#### vtexplain
+ * Use Gen4 as the default planner version for VTExplain [#12021](https://github.com/vitessio/vitess/pull/12021)
+#### vttestserver
+ * Fix vttestserver run script defaults [#12004](https://github.com/vitessio/vitess/pull/12004)
+ * Add missing backslash to run.sh script [#12033](https://github.com/vitessio/vitess/pull/12033)
+### CI/Build
+#### Backup and Restore
+ * docker/lite: +zstd dep [#11997](https://github.com/vitessio/vitess/pull/11997)
+#### Build/CI
+ * unit test: use require and assert [#11252](https://github.com/vitessio/vitess/pull/11252)
+ * Skip CI with the `Skip CI` label [#11514](https://github.com/vitessio/vitess/pull/11514)
+ * Update GitHub Actions workflows to latest versions [#11525](https://github.com/vitessio/vitess/pull/11525)
+ * Removing SharedPitr_tls and Backup_transfrom test from CI [#11611](https://github.com/vitessio/vitess/pull/11611)
+ * Add automation to change vitess version in the docker-release script [#11682](https://github.com/vitessio/vitess/pull/11682)
+ * Fix two additional flaky test sources in endtoend tests [#11743](https://github.com/vitessio/vitess/pull/11743)
+ * Update latest protobuf [#11782](https://github.com/vitessio/vitess/pull/11782)
+ * Update test runners to run all tests including outside package [#11787](https://github.com/vitessio/vitess/pull/11787)
+ * Update to latest etcd release [#11791](https://github.com/vitessio/vitess/pull/11791)
+ * Migrate to GitHub OIDC based auth for Launchable [#11808](https://github.com/vitessio/vitess/pull/11808)
+ * Fix the golangci-lint config [#11812](https://github.com/vitessio/vitess/pull/11812)
+ * Add instructions on how to fix a self-hosted runner running out of disk space [#11839](https://github.com/vitessio/vitess/pull/11839)
+ * Fix deprecated usage of set-output [#11844](https://github.com/vitessio/vitess/pull/11844)
+ * update golangci-lint to 1.50.1 [#11873](https://github.com/vitessio/vitess/pull/11873)
+ * CODEOWNERS: Add vrepl team members for vtgate vstream and tablet picker [#11950](https://github.com/vitessio/vitess/pull/11950)
+ * Upgrade all the CI runners to Ubuntu 22.04 [#11985](https://github.com/vitessio/vitess/pull/11985)
+ * Add lauchable to unit tests as well and remove OIDC [#12031](https://github.com/vitessio/vitess/pull/12031)
+ * consolidating OnlineDDL 'declarative' tests into 'scheduler' tests: part 1 [#12061](https://github.com/vitessio/vitess/pull/12061)
+ * OnlineDDL CI: remove 'revertible' tests (part 2) [#12192](https://github.com/vitessio/vitess/pull/12192)
+ * Update vtadmin dependencies [#12201](https://github.com/vitessio/vitess/pull/12201)
+ * Update Go dependencies [#12215](https://github.com/vitessio/vitess/pull/12215)
+ * Download mariadb from vitess-resources [#12271](https://github.com/vitessio/vitess/pull/12271)
+#### Cluster management
+ * Endtoend cluster improvements [#11859](https://github.com/vitessio/vitess/pull/11859)
+ * CI, tabletmanager throttler topo tests: polling until status received [#12107](https://github.com/vitessio/vitess/pull/12107)
+#### General
+ * [deps] go get golang.org/x/text && go mod tidy [#11466](https://github.com/vitessio/vitess/pull/11466)
+ * Upgrade to `go1.19.3` [#11655](https://github.com/vitessio/vitess/pull/11655)
+ * Code freeze of release-16.0 [#12232](https://github.com/vitessio/vitess/pull/12232)
+#### Governance
+ * codeowners: have at least two for almost every package [#11639](https://github.com/vitessio/vitess/pull/11639)
+ * added code owners for go.mod and go.sum files [#11711](https://github.com/vitessio/vitess/pull/11711)
+ * Add more codeowners to the `/test` directory [#11762](https://github.com/vitessio/vitess/pull/11762)
+#### Query Serving
+ * Consistent sorting in Online DDL Vrepl suite test [#11821](https://github.com/vitessio/vitess/pull/11821)
+ * Flakes: Properly Test HealthCheck Cache Response Handling [#12226](https://github.com/vitessio/vitess/pull/12226)
+ * Fix `SHOW VSCHEMA TABLES` tests using v17 vtgate that expected `dual` [#12381](https://github.com/vitessio/vitess/pull/12381)
+#### TabletManager
+ * Update throttler-topo workflow file [#11784](https://github.com/vitessio/vitess/pull/11784)
+ * Fix closing the body for HTTP requests [#11842](https://github.com/vitessio/vitess/pull/11842)
+#### VReplication
+ * update jsonparser dependency [#11694](https://github.com/vitessio/vitess/pull/11694)
+#### VTorc
+ * Move vtorc runners back to normal github runners [#11482](https://github.com/vitessio/vitess/pull/11482)
+### Dependabot
+#### Build/CI
+ * [release-16.0] Bump golang.org/x/net from 0.5.0 to 0.7.0 (#12390) [#12400](https://github.com/vitessio/vitess/pull/12400)
+ * [release-16.0] Update additional Go dependencies (#12401) [#12402](https://github.com/vitessio/vitess/pull/12402)
+#### Java
+ * build(deps): Bump protobuf-java from 3.19.4 to 3.19.6 in /java [#11439](https://github.com/vitessio/vitess/pull/11439)
+#### VTAdmin
+ * build(deps): Bump @xmldom/xmldom from 0.7.5 to 0.7.8 in /web/vtadmin [#11615](https://github.com/vitessio/vitess/pull/11615)
+ * build(deps): Bump loader-utils from 1.4.0 to 1.4.1 in /web/vtadmin [#11659](https://github.com/vitessio/vitess/pull/11659)
+ * build(deps): Bump loader-utils from 1.4.1 to 1.4.2 in /web/vtadmin [#11725](https://github.com/vitessio/vitess/pull/11725)
+### Documentation
+#### Build/CI
+ * Update release notes summary for the new default MySQL version [#12222](https://github.com/vitessio/vitess/pull/12222)
+#### CLI
+ * [vtadmin] Do not backtick binary name [#11464](https://github.com/vitessio/vitess/pull/11464)
+ * [vtctldclient|docs] apply doc feedback based on website PR feedback [#12030](https://github.com/vitessio/vitess/pull/12030)
+#### Documentation
+ * Upgrades the release notes for v15.0.0 [#11567](https://github.com/vitessio/vitess/pull/11567)
+ * Copy design docs over from website [#12071](https://github.com/vitessio/vitess/pull/12071)
+#### Query Serving
+ * Add release notes summary for views [#12422](https://github.com/vitessio/vitess/pull/12422)
+#### VReplication
+ * Mark VDiff V2 as GA in v16 [#12084](https://github.com/vitessio/vitess/pull/12084)
+### Enhancement
+#### Authn/z
+ * VTGate: Set immediate caller id from gRPC static auth username [#12050](https://github.com/vitessio/vitess/pull/12050)
+#### Backup and Restore
+ * Incremental logical backup and point in time recovery [#11097](https://github.com/vitessio/vitess/pull/11097)
+ * vtbackup: disable redo log before starting replication [#11330](https://github.com/vitessio/vitess/pull/11330)
+ * remove excessive backup decompression logging [#11479](https://github.com/vitessio/vitess/pull/11479)
+ * vtbackup: add --disable-redo-log flag (default false) [#11594](https://github.com/vitessio/vitess/pull/11594)
+ * remove backup_hook from flag help [#12029](https://github.com/vitessio/vitess/pull/12029)
+#### Build/CI
+ * Move CI workflow to use latest community version of mysql 8.0 [#11493](https://github.com/vitessio/vitess/pull/11493)
+ * Upgrade the `release_notes_label` workflow for `v16.0.0` [#11544](https://github.com/vitessio/vitess/pull/11544)
+ * CODEOWNERS: Add maintainers to `.github/workflows` and `.github` [#11781](https://github.com/vitessio/vitess/pull/11781)
+ * Allow override of build git env in docker/base builds [#11968](https://github.com/vitessio/vitess/pull/11968)
+ * Add vtorc port to vitess local docker run [#12001](https://github.com/vitessio/vitess/pull/12001)
+ * Update the MySQL version used by our Docker images [#12054](https://github.com/vitessio/vitess/pull/12054)
+ * Fail CI when a PR is labeled with `NeedsWebsiteDocsUpdate` or `NeedsDescriptionUpdate` [#12062](https://github.com/vitessio/vitess/pull/12062)
+ * Revert default MySQL 80 version to `8.0.30` [#12252](https://github.com/vitessio/vitess/pull/12252)
+#### CLI
+ * Add GenerateShardRanges to vtctldclient [#11492](https://github.com/vitessio/vitess/pull/11492)
+ * Properly deprecate flags and fix default for `--cell` [#11501](https://github.com/vitessio/vitess/pull/11501)
+ * Allow version to be accessible via the -v shorthand [#11512](https://github.com/vitessio/vitess/pull/11512)
+#### Cluster management
+ * Create new api for topo lock shard exists [#11269](https://github.com/vitessio/vitess/pull/11269)
+ * Deprecating VExec part1: removing client-side references [#11955](https://github.com/vitessio/vitess/pull/11955)
+#### Driver
+ * Implement the RowsColumnTypeScanType interface in the go sql driver [#12007](https://github.com/vitessio/vitess/pull/12007)
+#### Examples
+ * Give all permissions in rbac in examples [#11463](https://github.com/vitessio/vitess/pull/11463)
+ * Fix Vitess Operator example [#11546](https://github.com/vitessio/vitess/pull/11546)
+#### General
+ * removing unncessary flags across binaries [#11495](https://github.com/vitessio/vitess/pull/11495)
+ * [release-15.0] Upgrade to `go1.18.7` [#11507](https://github.com/vitessio/vitess/pull/11507)
+ * vttablet sidecar schema:use schemadiff to reach desired schema on tablet init replacing the withDDL-based approach [#11520](https://github.com/vitessio/vitess/pull/11520)
+ * Removing redundant flags across binaries [#11522](https://github.com/vitessio/vitess/pull/11522)
+ * Remove `EnableTracingOpt` and `--grpc_enable_tracing` [#11543](https://github.com/vitessio/vitess/pull/11543)
+ * Add default lower stack limit [#11569](https://github.com/vitessio/vitess/pull/11569)
+ * Upgrade to `go1.19.4` [#11905](https://github.com/vitessio/vitess/pull/11905)
+ * Add structure logging to Vitess [#11960](https://github.com/vitessio/vitess/pull/11960)
+ * Revert changes made in #11960 [#12219](https://github.com/vitessio/vitess/pull/12219)
+ * Upgrade `release-16.0` to `go1.20.1` [#12398](https://github.com/vitessio/vitess/pull/12398)
+#### Governance
+ * Add manan and florent to Docker files CODEOWNERS [#11981](https://github.com/vitessio/vitess/pull/11981)
+#### Query Serving
+ * ComBinlogDumpGTID and downstream replication protocol [#10066](https://github.com/vitessio/vitess/pull/10066)
+ * Document error code in `vtgate/planbuilder` [#10738](https://github.com/vitessio/vitess/pull/10738)
+ * opt in/out of query consolidation [#11080](https://github.com/vitessio/vitess/pull/11080)
+ * Online DDL: more (async) log visibility into cut-over phase [#11253](https://github.com/vitessio/vitess/pull/11253)
+ * optionally disable verify-after-insert behavior of lookup vindexes [#11313](https://github.com/vitessio/vitess/pull/11313)
+ * resource pool: resource max lifetime timeout [#11337](https://github.com/vitessio/vitess/pull/11337)
+ * feat: added query timeout to vtgate default and per session [#11429](https://github.com/vitessio/vitess/pull/11429)
+ * [evalengine] add rewrites for nullif and ifnull [#11431](https://github.com/vitessio/vitess/pull/11431)
+ * Handle aliasing of collation names [#11433](https://github.com/vitessio/vitess/pull/11433)
+ * vitess Online DDL atomic cut-over [#11460](https://github.com/vitessio/vitess/pull/11460)
+ * Keep track of expanded columns in the semantic analysis [#11462](https://github.com/vitessio/vitess/pull/11462)
+ * feat: deconstruct tuple comparisons so we can use them for routing decisions [#11500](https://github.com/vitessio/vitess/pull/11500)
+ * Add Gauge For CheckMySQL Running [#11524](https://github.com/vitessio/vitess/pull/11524)
+ * Optimize List Support In Vindex Functions [#11531](https://github.com/vitessio/vitess/pull/11531)
+ * add option to disable lookup read lock [#11538](https://github.com/vitessio/vitess/pull/11538)
+ * [refactor] Predicate push down [#11552](https://github.com/vitessio/vitess/pull/11552)
+ * planner: better bindvar names for auto-parameterized queries [#11571](https://github.com/vitessio/vitess/pull/11571)
+ * planner enhancement: nice bindvar names for update [#11581](https://github.com/vitessio/vitess/pull/11581)
+ * Online DDL: more support for INSTANT DDL [#11591](https://github.com/vitessio/vitess/pull/11591)
+ * vtgate: route create table statements to vschema keyspace [#11602](https://github.com/vitessio/vitess/pull/11602)
+ * Dynamic tablet throttler config: enable/disable, set metrics query/threshold [#11604](https://github.com/vitessio/vitess/pull/11604)
+ * Cleanup copying of proto results to sqltypes.Result [#11607](https://github.com/vitessio/vitess/pull/11607)
+ * Move horizon planning to operators [#11622](https://github.com/vitessio/vitess/pull/11622)
+ * normalize more expressions [#11631](https://github.com/vitessio/vitess/pull/11631)
+ * Fix `OR 1=0` causing queries to scatter [#11653](https://github.com/vitessio/vitess/pull/11653)
+ * Online DDL: normalize/idempotentize CHECK CONSTRAINTs in ALTER TABLE statement [#11663](https://github.com/vitessio/vitess/pull/11663)
+ * add support for transaction isolation level and make it vitess aware setting [#11673](https://github.com/vitessio/vitess/pull/11673)
+ * don't reuse bindvars for LIMIT and OFFSET [#11689](https://github.com/vitessio/vitess/pull/11689)
+ * Online DDL: more scheduler triggering following successful operations [#11701](https://github.com/vitessio/vitess/pull/11701)
+ * Add support for transaction access mode [#11704](https://github.com/vitessio/vitess/pull/11704)
+ * rewrite predicates to expose routing opportunities [#11765](https://github.com/vitessio/vitess/pull/11765)
+ * find IN route possibility in ORs [#11775](https://github.com/vitessio/vitess/pull/11775)
+ * [planner] Better AST equality [#11867](https://github.com/vitessio/vitess/pull/11867)
+ * optimize joins, redirect dml for reference tables [#11875](https://github.com/vitessio/vitess/pull/11875)
+ * VExplain statement [#11892](https://github.com/vitessio/vitess/pull/11892)
+ * Simplify `getPlan` and `gen4CompareV3` [#11903](https://github.com/vitessio/vitess/pull/11903)
+ * Better clone of the VCursor [#11926](https://github.com/vitessio/vitess/pull/11926)
+ * Better clone of the VCursor [#11926](https://github.com/vitessio/vitess/pull/11926)
+ * [planner] Schema information on the information_schema views [#11941](https://github.com/vitessio/vitess/pull/11941)
+ * schemadiff: foreign key validation (tables and columns) [#11944](https://github.com/vitessio/vitess/pull/11944)
+ * OnlineDDL: support --unsafe-allow-foreign-keys strategy flag [#11976](https://github.com/vitessio/vitess/pull/11976)
+ * support transaction isolation modification through reserved connection system settings [#11987](https://github.com/vitessio/vitess/pull/11987)
+ * **unsafe**: Online DDL support for `--unsafe-allow-foreign-keys` strategy flag [#11988](https://github.com/vitessio/vitess/pull/11988)
+ * vtgate advertised mysql server version to 8.0.31 [#11989](https://github.com/vitessio/vitess/pull/11989)
+ * schemadiff: normalize `PRIMARY KEY` definition [#12016](https://github.com/vitessio/vitess/pull/12016)
+ * schemadiff: validate and apply foreign key indexes [#12026](https://github.com/vitessio/vitess/pull/12026)
+ * OnlineDDL: 'mysql' strategy, managed by the scheduler, but executed via normal MySQL statements [#12027](https://github.com/vitessio/vitess/pull/12027)
+ * Refactor sqlparser.Rewrite uses [#12059](https://github.com/vitessio/vitess/pull/12059)
+ * Online DDL: --in-order-completion ddl strategy and logic [#12113](https://github.com/vitessio/vitess/pull/12113)
+ * schemadiff: TableCharsetCollateStrategy hint [#12137](https://github.com/vitessio/vitess/pull/12137)
+ * Support BETWEEN in the evalengine [#12150](https://github.com/vitessio/vitess/pull/12150)
+ * Use schema for the information_schema views [#12171](https://github.com/vitessio/vitess/pull/12171)
+ * vtgateconn: add DeregisterDialer hook [#12213](https://github.com/vitessio/vitess/pull/12213)
+ * add database name to _vt.views table [#12368](https://github.com/vitessio/vitess/pull/12368)
+ * Schema RPC to fetch table/view definition [#12375](https://github.com/vitessio/vitess/pull/12375)
+ * Change `GetSchema` RPC to return `CreateView` instead of `SelectStmt` [#12421](https://github.com/vitessio/vitess/pull/12421)
+ * GetSchema rpc to streaming api [#12447](https://github.com/vitessio/vitess/pull/12447)
+#### TabletManager
+ * SidecarDB Init: don't fail on schema init errors [#12328](https://github.com/vitessio/vitess/pull/12328)
+#### VReplication
+ * VReplication Copy Phase: Parallelize Bulk Inserts [#10828](https://github.com/vitessio/vitess/pull/10828)
+ * VSCopy: Resume the copy phase consistently from given GTID and lastpk [#11103](https://github.com/vitessio/vitess/pull/11103)
+ * For partial MoveTables, setup reverse shard routing rules on workflow creation [#11415](https://github.com/vitessio/vitess/pull/11415)
+ * Use unique rows in copy_state to support parallel replication [#11451](https://github.com/vitessio/vitess/pull/11451)
+ * Log which tablet copy_state optimization failed on [#11521](https://github.com/vitessio/vitess/pull/11521)
+ * Allow users to control VReplication DDL handling [#11532](https://github.com/vitessio/vitess/pull/11532)
+ * VReplication: Defer Secondary Index Creation [#11700](https://github.com/vitessio/vitess/pull/11700)
+ * VSCopy: Send COPY_COMPLETED events when the copy operation is done [#11740](https://github.com/vitessio/vitess/pull/11740)
+ * Add `VStreamerCount` stat to `vttablet` [#11978](https://github.com/vitessio/vitess/pull/11978)
+ * VReplication: Use MariaDB Compat JSON Functions [#12420](https://github.com/vitessio/vitess/pull/12420)
+#### VTAdmin
+ * [VTAdmin] `Validate`, `ValidateShard`, `ValidateVersionShard`, `GetFullStatus` [#11438](https://github.com/vitessio/vitess/pull/11438)
+ * Full Status tab improvements for VTAdmin [#11470](https://github.com/vitessio/vitess/pull/11470)
+ * [15.0] Add VTGate debug/status page link to VTAdmin [#11541](https://github.com/vitessio/vitess/pull/11541)
+ * VTAdmin: display workflow type in workflows list [#11685](https://github.com/vitessio/vitess/pull/11685)
+#### VTorc
+ * Timeout Fixes and VTOrc Improvement [#11881](https://github.com/vitessio/vitess/pull/11881)
+ * Also log error on a failure in DiscoverInstance [#11936](https://github.com/vitessio/vitess/pull/11936)
+ * VTOrc Code Cleanup - generate_base, replace cluster_name with keyspace and shard. [#12012](https://github.com/vitessio/vitess/pull/12012)
+ * Move vtorc from go-sqlite3 to modernc.org/sqlite [#12214](https://github.com/vitessio/vitess/pull/12214)
+### Feature Request
+#### Evalengine
+ * evalengine: Support built-in MySQL function for string functions and operations [#11185](https://github.com/vitessio/vitess/pull/11185)
+#### Query Serving
+ * Add support for views in vtgate [#11195](https://github.com/vitessio/vitess/pull/11195)
+ * Add support for Views DDL [#11896](https://github.com/vitessio/vitess/pull/11896)
+ * notify view change to vtgate [#12115](https://github.com/vitessio/vitess/pull/12115)
+ * Views Support: Updating Views in VSchema for query serving [#12124](https://github.com/vitessio/vitess/pull/12124)
+ * Create Views allowed for same keyspace [#12409](https://github.com/vitessio/vitess/pull/12409)
+#### web UI
+ * [VTAdmin] Topology Browser [#11496](https://github.com/vitessio/vitess/pull/11496)
+### Internal Cleanup
+#### Backup and Restore
+ * backup: remove deprecated hook support [#12066](https://github.com/vitessio/vitess/pull/12066)
+#### Build/CI
+ * Update all the Go dependencies [#11741](https://github.com/vitessio/vitess/pull/11741)
+ * Remove building Docker containers with MariaDB [#12040](https://github.com/vitessio/vitess/pull/12040)
+ * Add TOC to the summary docs [#12225](https://github.com/vitessio/vitess/pull/12225)
+#### CLI
+ * moved missed flags to pflags in vtgate [#11966](https://github.com/vitessio/vitess/pull/11966)
+ * Migrate missed vtctld flags to pflag and immediately deprecate them [#11974](https://github.com/vitessio/vitess/pull/11974)
+ * Remove Dead Legacy Workflow Manager Code [#12085](https://github.com/vitessio/vitess/pull/12085)
+#### Cluster management
+ * Adding deprecate message to backup hooks [#11491](https://github.com/vitessio/vitess/pull/11491)
+ * Orchestrator Integration Removal and `orc_client_user` removal [#11503](https://github.com/vitessio/vitess/pull/11503)
+ * [15.0] Deprecate InitShardPrimary command [#11557](https://github.com/vitessio/vitess/pull/11557)
+ * mysqlctl is a command-line client so remove server flags [#12022](https://github.com/vitessio/vitess/pull/12022)
+ * Remove replication manager and run VTOrc in all e2e tests [#12149](https://github.com/vitessio/vitess/pull/12149)
+#### General
+ * Improve Codeowners File [#11428](https://github.com/vitessio/vitess/pull/11428)
+ * Remove example script that caused some confusion [#11529](https://github.com/vitessio/vitess/pull/11529)
+ * Remove unused ioutil2 code [#11661](https://github.com/vitessio/vitess/pull/11661)
+ * Fix some linter errors [#11773](https://github.com/vitessio/vitess/pull/11773)
+ * Remove Deprecated flags, code and stats. [#12083](https://github.com/vitessio/vitess/pull/12083)
+ * Fix release notes on release-16.0 [#12276](https://github.com/vitessio/vitess/pull/12276)
+ * Fix summary and release notes [#12283](https://github.com/vitessio/vitess/pull/12283)
+ * Reorder summary in order of importance [#12433](https://github.com/vitessio/vitess/pull/12433)
+ * release notes: mark dynamic throttler configuration as experimental [#12475](https://github.com/vitessio/vitess/pull/12475)
+#### Governance
+ * Correct minor inaccuracies in governing docs [#11933](https://github.com/vitessio/vitess/pull/11933)
+#### Online DDL
+ * [cleanup] Explicitly include DDLStrategySetting in the sizegen target [#11857](https://github.com/vitessio/vitess/pull/11857)
+ * OnlineDDL: avoid schema_migrations AUTO_INCREMENT gaps by pre-checking for existing migration [#12169](https://github.com/vitessio/vitess/pull/12169)
+#### Query Serving
+ * [gen4 planner] Operator refactoring [#11498](https://github.com/vitessio/vitess/pull/11498)
+ * [gen4]: small refactoring around Compact [#11537](https://github.com/vitessio/vitess/pull/11537)
+ * change CreatePhysicalOperator to use the rewriteBottomUp() functionality [#11542](https://github.com/vitessio/vitess/pull/11542)
+ * [refactor planner] Columns and predicates on operators [#11606](https://github.com/vitessio/vitess/pull/11606)
+ * Move initialization of metrics to be static [#11608](https://github.com/vitessio/vitess/pull/11608)
+ * planner operators refactoring [#11680](https://github.com/vitessio/vitess/pull/11680)
+ * sqlparser: new Equality API [#11906](https://github.com/vitessio/vitess/pull/11906)
+ * sqlparser: `QueryMatchesTemplates` uses canonical string [#11990](https://github.com/vitessio/vitess/pull/11990)
+ * Move more rewriting to SafeRewrite [#12063](https://github.com/vitessio/vitess/pull/12063)
+ * store transaction isolation level in upper case [#12099](https://github.com/vitessio/vitess/pull/12099)
+ * Generating copy-on-rewrite logic [#12135](https://github.com/vitessio/vitess/pull/12135)
+ * Clean up ColumnType uses [#12139](https://github.com/vitessio/vitess/pull/12139)
+#### TabletManager
+ * Table GC: rely on tm state to determine operation mode [#11972](https://github.com/vitessio/vitess/pull/11972)
+ * Mark VReplicationExec Client Command as Deprecated [#12070](https://github.com/vitessio/vitess/pull/12070)
+#### VReplication
+ * Leverage pFlag's Changed function to detect user specified flag [#11677](https://github.com/vitessio/vitess/pull/11677)
+ * VReplication: Remove Deprecated V1 Client Commands [#11705](https://github.com/vitessio/vitess/pull/11705)
+#### VTAdmin
+ * move react-scripts to dev dependencies [#11767](https://github.com/vitessio/vitess/pull/11767)
+#### web UI
+ * [vtctld2] Remove vtctld2 UI and vtctld server components that serve the app UI [#11851](https://github.com/vitessio/vitess/pull/11851)
+### Performance
+#### Cluster management
+ * Bug fix: Cache filtered out tablets in topology watcher to avoid unnecessary GetTablet calls to topo [#12194](https://github.com/vitessio/vitess/pull/12194)
+#### Online DDL
+ * Speedup DDLs by not reloading table size stats [#11601](https://github.com/vitessio/vitess/pull/11601)
+#### Query Serving
+ * DDL: do not Reload() for 'CREATE TEMPORARY' and 'DROP TEMPORARY' statements [#12144](https://github.com/vitessio/vitess/pull/12144)
+#### VReplication
+ * mysql: Improve MySQL 5.6 GTID parsing performance [#11570](https://github.com/vitessio/vitess/pull/11570)
+#### vttestserver
+ * vttestserver: make tablet_refresh_interval configurable and reduce default value [#11918](https://github.com/vitessio/vitess/pull/11918)
+### Release
+#### Build/CI
+ * Improve the release process [#12056](https://github.com/vitessio/vitess/pull/12056)
+ * Use Ubuntu 20.04 for Release Builds [#12202](https://github.com/vitessio/vitess/pull/12202)
+ * Use Ubuntu 20.04 for Release Builds [#12202](https://github.com/vitessio/vitess/pull/12202)
+#### Documentation
+ * Fix release notes summary links [#11508](https://github.com/vitessio/vitess/pull/11508)
+ * Release notes summary of `14.0.4` [#11849](https://github.com/vitessio/vitess/pull/11849)
+ * Release notes for `v15.0.2` [#11963](https://github.com/vitessio/vitess/pull/11963)
+#### General
+ * Release notes for 15.0.0-rc1 and update SNAPSHOT version to 16.0.0 [#11445](https://github.com/vitessio/vitess/pull/11445)
+ * fix anchors for release notes and summary [#11578](https://github.com/vitessio/vitess/pull/11578)
+ * update release notes after 15.0 [#11584](https://github.com/vitessio/vitess/pull/11584)
+ * Mention the `--db-config-*-*` flag in the release notes [#11610](https://github.com/vitessio/vitess/pull/11610)
+ * Release notes for 15.0.1 [#11850](https://github.com/vitessio/vitess/pull/11850)
+ * updating summary and release notes for v15.0.1 [#11852](https://github.com/vitessio/vitess/pull/11852)
+ * [main] Update the release `15.0.2` summary doc (#11954) [#11956](https://github.com/vitessio/vitess/pull/11956)
+ * Release of v16.0.0-rc1 [#12235](https://github.com/vitessio/vitess/pull/12235)
+ * Back to dev mode after v16.0.0-rc1 [#12277](https://github.com/vitessio/vitess/pull/12277)
+ * [release-16.0] Fix release script for the version in the docker script (#12284) [#12289](https://github.com/vitessio/vitess/pull/12289)
+ * Code freeze of release-16.0 for v16.0.0 [#12469](https://github.com/vitessio/vitess/pull/12469)
+### Testing
+#### Backup and Restore
+ * go/vt/mysqlctl: add compression benchmarks [#11994](https://github.com/vitessio/vitess/pull/11994)
+#### Build/CI
+ * endtoend: fix race when closing vtgate [#11707](https://github.com/vitessio/vitess/pull/11707)
+ * [ci issue] Tests are running on older versions that do not support the query [#11923](https://github.com/vitessio/vitess/pull/11923)
+ * consolidating OnlineDDL 'singleton' tests into 'scheduler' tests: part 1 [#12055](https://github.com/vitessio/vitess/pull/12055)
+ * Internal: Fix Bad Merge [#12087](https://github.com/vitessio/vitess/pull/12087)
+ * add debug tooling [#12126](https://github.com/vitessio/vitess/pull/12126)
+ * Remove the semgrep action [#12148](https://github.com/vitessio/vitess/pull/12148)
+ * CI cleanup: remove onlineddl_declarative, onlineddl_singleton (cleanup part 2) [#12182](https://github.com/vitessio/vitess/pull/12182)
+ * Online DDL CI: consolidated revertible and revert CI tests (part 1) [#12183](https://github.com/vitessio/vitess/pull/12183)
+ * Allow manually kicking off CodeQL [#12200](https://github.com/vitessio/vitess/pull/12200)
+ * Don't keep data in upgrade-downgrade tests [#12462](https://github.com/vitessio/vitess/pull/12462)
+#### General
+ * endtoend: fix dbconfig initialization for endtoend tests [#11609](https://github.com/vitessio/vitess/pull/11609)
+#### Online DDL
+ * Backport to v16: onlineddl_vrepl flakiness and subsequent fixes [#12426](https://github.com/vitessio/vitess/pull/12426)
+#### Query Serving
+ * Add additional unit test with state changes swapped [#11192](https://github.com/vitessio/vitess/pull/11192)
+ * Use JSON for plan tests [#11430](https://github.com/vitessio/vitess/pull/11430)
+ * Add a PRIMARY KEY to the aggregation E2E tests [#11459](https://github.com/vitessio/vitess/pull/11459)
+ * Change the indexes in `TestEmptyTableAggr` to be unique [#11485](https://github.com/vitessio/vitess/pull/11485)
+ * Readable plan tests [#11708](https://github.com/vitessio/vitess/pull/11708)
+ * test: deflake TestQueryTimeoutWithTables [#11772](https://github.com/vitessio/vitess/pull/11772)
+ * more unit tests for QueryMatchesTemplates() [#11894](https://github.com/vitessio/vitess/pull/11894)
+ * remove e2e test from partial_keyspace config [#12005](https://github.com/vitessio/vitess/pull/12005)
+#### VReplication
+ * VDiff2: Migrate VDiff1 Unit Tests [#11916](https://github.com/vitessio/vitess/pull/11916)
+ * VReplication: Test Migrations From MariaDB to MySQL [#12036](https://github.com/vitessio/vitess/pull/12036)
+
diff --git a/changelog/16.0/16.0.0/release_notes.md b/changelog/16.0/16.0.0/release_notes.md
new file mode 100644
index 00000000000..e17a74a8d0f
--- /dev/null
+++ b/changelog/16.0/16.0.0/release_notes.md
@@ -0,0 +1,561 @@
+# Release of Vitess v16.0.0
+## Summary
+
+### Table of Contents
+
+- **[Known Issues](#known-issues)**
+ - [MySQL & Xtrabackup known issue](#mysql-xtrabackup-ddl)
+ - [VTTablet Restore Metrics](#vttablet-restore-metrics)
+ - [Schema-initialization stuck on semi-sync ACKs while upgrading to v16.0.0](#schema-init-upgrade)
+ - [Broken downgrade from v17.x.x when super_read_only turned on by default](#init-db-sql-turned-on)
+- **[Major Changes](#major-changes)**
+ - **[Breaking Changes](#breaking-changes)**
+ - [VTGate Advertised MySQL Version](#advertised-mysql-version)
+ - [Default MySQL version on Docker](#default-mysql-version)
+ - [⚠️ Upgrading to this release with vitess-operator](#upgrading-to-this-release-with-vitess-operator)
+ - [Flag Deletions and Deprecations](#flag-deletions-and-deprecations)
+ - [VTCtld](#vtctld-flag-deletions-deprecations)
+ - [MySQLCtl](#mysqlctl-flag-deletions-deprecations)
+ - [VTTablet](#vttablet-flag-deletions-deprecations)
+ - [VTBackup](#vtbackup-flag-deletions-deprecations)
+ - [VTOrc](#vtorc-flag-deletions-deprecations)
+ - [`lock-timeout` and `remote_operation_timeout` Changes](#lock-timeout-introduction)
+ - [Orchestrator Integration Deletion](#orc-integration-removal)
+ - [vtctld UI Removal](#vtcltd-ui-removal)
+ - [Query Serving Errors](#qs-errors)
+ - [Logstats Table and Keyspace removed](#logstats-table-keyspace)
+ - [Removed Stats](#removed-stats)
+ - [Deprecated Stats](#deprecated-stats)
+ - [Normalized labels in the Prometheus Exporter](#normalized-lables)
+ - **[Replication manager removal and VTOrc becomes mandatory](#repl-manager-removal)**
+ - **[VReplication](#vreplication)**
+ - [VStream Copy Resume](#vstream-copy-resume)
+ - [VDiff2 GA](#vdiff2-ga)
+ - **[Tablet throttler](#tablet-throttler)**
+ - **[Incremental backup and point in time recovery](#inc-backup)**
+ - **[New command line flags and behavior](#new-flag)**
+ - [VTGate: Support query timeout --query-timeout](#vtgate-query-timeout)
+ - [VTTablet: VReplication parallel insert workers --vreplication-parallel-insert-workers](#vrepl-parallel-workers)
+ - [VTTablet: --queryserver-config-pool-conn-max-lifetime](#queryserver-lifetime)
+ - [vttablet --throttler-config-via-topo](#vttablet-throttler-config)
+ - [vtctldclient UpdateThrottlerConfig](#vtctldclient-update-throttler)
+ - [vtctldclient Backup --incremental_from_pos](#vtctldclient-backup)
+ - [vtctldclient RestoreFromBackup --restore_to_pos](#vtctldclient-restore-from-backup)
+ - [New `vexplain` command](#new-vexplain-command)
+ - **[Important bug fixes](#important-bug-fixes)**
+ - [Corrupted results for non-full-group-by queries with JOINs](#corrupted-results)
+ - **[Deprecations and Removals](#deprecations-removals)**
+ - **[MySQL Compatibility](#mysql-compatibility)**
+ - [Transaction Isolation Level](#transaction-isolation-level)
+ - [Transaction Access Mode](#transaction-access-mode)
+ - [Support for views](#support-views)
+ - **[VTTestServer](#vttestserver)**
+ - [Performance Improvement](#perf-improvement)
+- **[Minor Changes](#minor-changes)**
+ - **[Backup compression benchmarks](#backup-comp-benchmarks)**
+- **[Refactor](#refactor)**
+ - **[VTTablet sidecar schema maintenance refactor](#vttablet-sidecar-schema)**
+
+## Known Issues
+
+### MySQL & Xtrabackup known issue
+
+There is a known issue with MySQL's INSTANT DDL combined with Percona XtraBackup, that affects users of Vitess 16.0.
+The problem is described in https://docs.percona.com/percona-xtrabackup/8.0/em/instant.html, and the immediate impact is you may not be able to backup your database using XtraBackup under certain conditions.
+
+As of MySQL 8.0.12, the default `ALGORITHM` for InnoDB's `ALTER TABLE` is `INSTANT`. In `8.0.12` only a small number of operations were eligible for `INSTANT`, but MySQL `8.0.29` added support for more common cases.
+Unfortunately, the changes in `8.0.29` affect XtraBackup as follows: if you `ALTER TABLE` in MySQL `8.0.29`, and that `ALTER` is eligible for `INSTANT` DDL (e.g. add new table column), then as of that moment, XtraBackup is unable to backup that table, hence your entire database.
+
+It is important to note that even if you then upgrade your MySQL server to, e.g. `8.0.32`, the table still cannot be backed up.
+
+Versions where XtraBackup is unable to backup such tables: MySQL `8.0.29` - `8.0.31`. This does not apply to Percona Server flavor.
+
+The issue is resolved with Percona XtraBackup `8.0.32` combined with MySQL `8.0.32`.
+
+You might be affected if:
+
+- You're using MySQL `8.0.29` - `8.0.31` and are using XtraBackup to backup your database
+- and, you have issued an `ALTER TABLE`, either directly, or using Online DDL in vitess `v16.0` and below
+
+A futures Vitess patch release `v16.0.1` will address the issue via Online DDL migrations.
+
+#### Mitigations
+
+- Use Percona XtraBackup `8.0.32` combined with MySQL `8.0.32`. To go with this option, you can use the docker image `vitess/lite:v16.0.0-mysql-8.0.32`.
+- or, Use a Percona Server flavor
+- or, always ensure to add `ALGORITHM=INPLACE` or `ALGORITHM=COPY` to your `ALTER TABLE` statements
+
+#### Workarounds
+
+If you have already been affected, these are the options to be able to backup your database:
+
+- Use `builtin` backups, see https://vitess.io/docs/15.0/user-guides/operating-vitess/backup-and-restore/creating-a-backup/. `builting` backups are not based on XtraBackup.
+- Upgrade to MySQL `8.0.32` or above and to Xtrabackup `8.0.32`, or switch to Percona Server. To go with this option, you can use the docker image `vitess/lite:v16.0.0-mysql-8.0.32`. Then rebuild the table directly via:
+ - `OPTIMIZE TABLE your_table`
+ - or, `ALTER TABLE your_table ENGINE=INNOB`
+- Upgrade to Vitess patch release `v16.0.1`, upgrade to MySQL `8.0.32` or above and to Xtrabackup `8.0.32`, or switch to Percona Server, and rebuild the table via Online DDL:
+```shell
+$ vtctldclient ApplySchema --skip_preflight --ddl_strategy "vitess" --sql "ALTER TABLE your_table ENGINE=InnoDB" your_keyspace
+```
+or
+```sql
+> SET @@ddl_strategy='vitess';
+> ALTER TABLE your_table ENGINE=InnoDB;
+```
+
+### VTTablet Restore Metrics
+
+As part of the VTTablet Sidecar Schema Maintenance Refactor in v16.0.0, we dropped the `local_metadata` table from the sidecar database schema. This table was storing a couple of metrics related to restores from backup, which have now been lost.
+They have been re-introduced in v17.0.0 as metrics that can be accessed from `/debug/vars`.
+
+The original issue can be found [here](https://github.com/vitessio/vitess/issues/13336).
+
+### Schema-initialization stuck on semi-sync ACKs while upgrading to `v16.0.0`
+
+During upgrades from `<= v15.x.x` to `v16.0.0`, as part of `PromoteReplica` call, the schema-init realizes that there are schema diffs to apply and ends up writing to the database.
+The issue is that if [semi-sync](https://vitess.io/docs/16.0/reference/features/mysql-replication/#semi-sync) is enabled, all of these writes get blocked indefinitely.
+Eventually, `PromoteReplica` fails, and this fails the entire PRS call.
+
+A fix for this issue was merged on `release-16.0` in [PR#13441](https://github.com/vitessio/vitess/pull/13441), read the [corresponding bug report to learn more](https://github.com/vitessio/vitess/issues/13426).
+
+This issue is fixed in `v16.0.3` and later patch releases.
+
+### Broken downgrade from v17.x.x when super_read_only turned on by default
+
+In `v17.x.x` `super_read_only` is turned on by default meaning that downgrading from `v17` to `v16.0.0` breaks due to `init_db.sql` needing write access.
+
+This issue is fixed in `>= v16.0.3` thanks to [PR #13525](https://github.com/vitessio/vitess/pull/13525)
+
+## Major Changes
+
+### Breaking Changes
+
+#### VTGate Advertised MySQL Version
+
+Since [Pull Request #11989](https://github.com/vitessio/vitess/pull/11989), VTGate advertises MySQL version 8.0.30. This is a breaking change for clients that rely on the VTGate advertised MySQL version and still use MySQL 5.7.
+The users can set the `mysql_server_version` flag to advertise the correct version.
+
+It is worth noting that [the feature to avoid using reserved connections](https://vitess.io/docs/16.0/reference/query-serving/reserved-conn/#avoiding-the-use-of-reserved-connections) depends on the `mysql_server_version` CLI flag, which default value has been changed from `5.7.9-vitess` to `8.0.30-vitess`. We recommend that users running MySQL 5.7 set vtgate's `mysql_server_version` CLI flag to `5.7.9-vitess` to prevent the queries from being unexpectedly rewritten.
+
+#### Default MySQL version on Docker
+
+The default major MySQL version used by our `vitess/lite:latest` image is going from `5.7` to `8.0`. Additionally, the patch version of MySQL80 has been upgraded from `8.0.23` to `8.0.30`.
+This change was brought by [Pull Request #12252](https://github.com/vitessio/vitess/pull/12252).
+
+#### ⚠️Upgrading to this release with vitess-operator
+
+If you are using the vitess-operator and want to remain on MySQL 5.7, **you are required** to use the `vitess/lite:v16.0.0-mysql57` Docker Image, otherwise the `vitess/lite:v16.0.0` image will be on MySQL 80.
+
+However, if you are running MySQL 8.0 on the vitess-operator, with for instance `vitess/lite:v15.0.2-mysql80`, considering that we are bumping the patch version of MySQL 80 from `8.0.23` to `8.0.30`, you will have to manually upgrade:
+
+1. Add `innodb_fast_shutdown=0` to your extra cnf in your YAML file.
+2. Apply this file.
+3. Wait for all the pods to be healthy.
+4. Then change your YAML file to use the new Docker Images (`vitess/lite:v16.0.0`, defaults to mysql80).
+5. Remove `innodb_fast_shutdown=0` from your extra cnf in your YAML file.
+6. Apply this file.
+
+#### Flag Deletions and Deprecations
+
+##### VTCtld
+With the removal of the vtctld UI, the following vtctld flags have been deprecated:
+- `--vtctld_show_topology_crud`: This was a flag that controlled the display of CRUD topology actions in the vtctld UI. The UI is removed, so this flag is no longer necessary.
+
+The following deprecated flags have also been removed:
+- `--enable_realtime_stats`
+- `--enable_vtctld_ui`
+- `--web_dir`
+- `--web_dir2`
+- `--workflow_manager_init`
+- `--workflow_manager_use_election`
+- `--workflow_manager_disable`
+
+##### MySQLCtld
+
+The [`mysqlctl` command-line client](https://vitess.io/docs/16.0/reference/programs/mysqlctl/) had some leftover (ignored) server flags after the [v15 pflag work](https://github.com/vitessio/enhancements/blob/main/veps/vep-4.md). Those unused flags have now been removed. If you are using any of the following flags with `mysqlctl` in your scripts or other tooling, they will need to be removed prior to upgrading to v16:
+`--port --grpc_auth_static_client_creds --grpc_compression --grpc_initial_conn_window_size --grpc_initial_window_size --grpc_keepalive_time --grpc_keepalive_timeout`
+
+##### VTTablet
+
+The following flags were removed in v16:
+- `--enable_semi_sync`
+- `--backup_storage_hook`, use one of the builtin compression algorithms or `--external-compressor` and `--external-decompressor` instead.
+- `--init_populate_metadata`, since we have deleted the `local_metadata` and `shard_metadata` sidecar database tables.
+
+The flag `--disable-replication-manager` is deprecated and will be removed in a future release.
+
+##### VTBackup
+
+The VTBackup flag `--backup_storage_hook` has been removed, use one of the builtin compression algorithms or `--external-compressor` and `--external-decompressor` instead.
+
+
+##### VTOrc
+
+The flag `--lock-shard-timeout` has been deprecated. Please use the newly introduced `--lock-timeout` flag instead. More detail [here](#lock-timeout-introduction).
+
+#### `lock-timeout` and `remote_operation_timeout` Changes
+
+Before the changes made in [Pull Request #11881](https://github.com/vitessio/vitess/pull/11881), the shard and keyspace locks used to be capped by the `remote_operation_timeout`. This is no longer the case and instead a new flag called `lock-timeout` is introduced.
+For backward compatibility, if `lock-timeout` is unspecified and `remote_operation_timeout` flag is provided, then its value will also be used for `lock-timeout`.
+The default value for `remote_operation_timeout` has also changed from 30 seconds to 15 seconds. The default for the new flag `lock-timeout` is 45 seconds.
+
+During upgrades, if the users want to preserve the same behaviour as previous releases, then they should provide the `remote_operation_timeout` flag explicitly before upgrading.
+After the upgrade, they should then alter their configuration to also specify `lock-timeout` explicitly.
+
+#### Orchestrator Integration Deletion
+
+Orchestrator integration in `vttablet` was deprecated in the previous release and is deleted in this release.
+`VTOrc` should be deployed instead. You can read more on [how VTOrc is designed](https://vitess.io/docs/16.0/reference/vtorc/) and on [how to run VTOrc in production](https://vitess.io/docs/16.0/user-guides/configuration-basic/vtorc/).
+
+#### vtctld web UI Removal
+In v13, the vtctld UI was deprecated. As of this release, the `web/vtctld2` directory is deleted and the UI will no longer be included in any Vitess images going forward. All build scripts and the Makefile have been updated to reflect this change, which was done in [Pull Request #11851](https://github.com/vitessio/vitess/pull/11851)
+
+However, the vtctld HTTP API will remain at `{$vtctld_web_port}/api`.
+
+#### Query Serving Errors
+
+In [Pull Request #10738](https://github.com/vitessio/vitess/pull/10738) we are introducing a new way to report errors from Vitess through the query interface.
+Errors will now have an error code for each error, which will make it easy to search for more information on the issue.
+For instance, the following error:
+
+```
+aggregate functions take a single argument 'count(user_id, name)'
+```
+
+Will be transformed into:
+
+```
+VT03001: aggregate functions take a single argument 'count(user_id, name)'
+```
+
+The error code `VT03001` can then be used to search or ask for help and report problems.
+
+If you have code searching for error strings from Vitess, this is a breaking change.
+Many error strings have been tweaked.
+If your application is searching for specific errors, you might need to update your code.
+
+#### Logstats Table and Keyspace removed
+
+Information about which tables are used is now reported by the field TablesUsed added in v15, that is a string array, listing all tables and which keyspace they are in.
+The Table/Keyspace fields were deprecated in v15 and are now removed in the v16 release, more information can be found on [Pull Request #12083](https://github.com/vitessio/vitess/pull/12083).
+
+#### Removed Stats
+
+The stat `QueryRowCounts` is removed in v16 as part of [Pull Request #12083](https://github.com/vitessio/vitess/pull/12083). `QueryRowsAffected` and `QueryRowsReturned` can be used instead to gather the same information.
+
+#### Deprecated Stats
+
+The stats `QueriesProcessed` and `QueriesRouted` are deprecated in v16 as part of [Pull Request #12083](https://github.com/vitessio/vitess/pull/12083). The same information can be inferred from the stats `QueriesProcessedByTable` and `QueriesRoutedByTable` respectively. These stats will be removed in the next release.
+
+#### Normalized labels in the Prometheus Exporter
+
+The Prometheus metrics exporter now properly normalizes _all_ label names into their `snake_case` form, as it is idiomatic for Prometheus metrics. Previously, Vitess instances were emitting inconsistent labels for their metrics, with some of them being `CamelCase` and others being `snake_case`.
+More information about this change can be found on [Pull Request #12057](https://github.com/vitessio/vitess/pull/12057).
+
+For example, `vtgate_topology_watcher_errors{Operation="GetTablet"} 0` will become `vtgate_topology_watcher_errors{operation="GetTablet"} 0`
+
+Some more of these changes are listed here -
+
+| Previous metric | New Metric |
+|-------------------------------------------------------------|-------------------------------------------------------------|
+| vtgate_topology_watcher_operations{Operation="AddTablet"} | vtgate_topology_watcher_operations{operation="AddTablet"} |
+| vtgate_queries_processed{Plan="Reference"} | vtgate_queries_processed{plan="Reference"} |
+| vtgate_queries_routed{Plan="Reference"} | vtgate_queries_routed{plan="Reference"} |
+| vttablet_table_allocated_size{Table="corder"} | vttablet_table_allocated_size{table="corder"} |
+| vttablet_table_file_size{Table="corder"} | vttablet_table_file_size{table="corder"} |
+| vttablet_topology_watcher_errors{Operation="GetTablet"} | vttablet_topology_watcher_errors{operation="GetTablet"} |
+| vttablet_topology_watcher_operations{Operation="AddTablet"} | vttablet_topology_watcher_operations{operation="AddTablet"} |
+
+### Replication manager removal and VTOrc becomes mandatory
+VTOrc is now a **required** component of Vitess starting from v16. If the users want Vitess to manage replication, then they must run VTOrc.
+Replication manager is removed from vttablets since the responsibility of fixing replication lies entirely with VTOrc now.
+The flag `disable-replication-manager` is deprecated and will be removed in a future release.
+
+### VReplication
+
+#### VStream Copy Resume
+
+In [Pull Request #11103](https://github.com/vitessio/vitess/pull/11103) we introduced the ability to resume a `VTGate` [`VStream` copy operation](https://vitess.io/docs/16.0/reference/vreplication/vstream/). This is useful when a [`VStream` copy operation](https://vitess.io/docs/16.0/reference/vreplication/vstream/) is interrupted due to e.g. a network failure or a server restart. The `VStream` copy operation can be resumed by specifying each table's last seen primary key value in the `VStream` request. Please see the [`VStream` docs](https://vitess.io/docs/16.0/reference/vreplication/vstream/) for more details.
+
+#### VDiff2 GA
+
+We are marking [VDiff v2](https://vitess.io/docs/16.0/reference/vreplication/vdiff2/) as production-ready in v16. We now recommend that you use v2 rather than v1 going forward. V1 will be deprecated and eventually removed in future releases.
+If you wish to use v1 for any reason, you will now need to specify the `--v1` flag.
+
+### Tablet throttler
+
+The tablet throttler can now be configured dynamically. Configuration is now found in the topo service, and applies to all tablets in all shards and cells of a given keyspace.
+It is possible to enable or disable throttling, and to change the throttling threshold as well as the throttler query.
+
+Please note that this feature is considered experimental in this release. For backwards compatibility `v16` still supports `vttablet`-based command line flags for throttler configuration.
+
+More information can be found on [Pull Request #11604](https://github.com/vitessio/vitess/pull/11604).
+
+### Incremental backup and point in time recovery
+
+In [Pull Request #11097](https://github.com/vitessio/vitess/pull/11097) we introduced native incremental backup and point in time recovery:
+
+- It is possible to take an incremental backup, starting with last known (full or incremental) backup, and up to either a specified (GTID) position, or current ("auto") position.
+- The backup is done by copying binary logs. The binary logs are rotated as needed.
+- It is then possible to restore a backup up to a given point in time (GTID position). This involves finding a restore path consisting of a full backup and zero or more incremental backups, applied up to the given point in time.
+- A server restored to a point in time remains in `DRAINED` tablet type, and does not join the replication stream (thus, "frozen" in time).
+- It is possible to take incremental backups from different tablets. It is OK to have overlaps in incremental backup contents. The restore process chooses a valid path, and is valid as long as there are no gaps in the backed up binary log content.
+
+### New command line flags and behavior
+
+#### VTGate: Support query timeout --query-timeout
+
+`--query-timeout` allows you to specify a timeout for queries. This timeout is applied to all queries.
+It can be overridden by setting the `query_timeout` session variable.
+Setting it as query comment directive with `QUERY_TIMEOUT_MS` will override other values.
+
+#### VTTablet: VReplication parallel insert workers --vreplication-parallel-insert-workers
+
+`--vreplication-parallel-insert-workers=[integer]` enables parallel bulk inserts during the copy phase
+of VReplication (disabled by default). When set to a value greater than 1 the bulk inserts — each
+executed as a single transaction from the vstream packet contents — may happen in-parallel and
+out-of-order, but the commit of those transactions are still serialized in order.
+
+Other aspects of the VReplication copy-phase logic are preserved:
+
+ 1. All statements executed when processing a vstream packet occur within a single MySQL transaction.
+ 2. Writes to `_vt.copy_state` always follow their corresponding inserts from within the vstream packet.
+ 3. The final `commit` for the vstream packet always follows the corresponding write to `_vt.copy_state`.
+ 4. The vstream packets are committed in the order seen in the stream. So for any PK1 and PK2, the write to `_vt.copy_state` and `commit` steps (steps 2 and 3 above) for PK1 will both precede the `_vt.copy_state` write and commit steps of PK2.
+
+ Other phases, catchup, fast-forward, and replicating/"running", are unchanged.
+
+#### VTTablet: --queryserver-config-pool-conn-max-lifetime
+
+`--queryserver-config-pool-conn-max-lifetime=[integer]` allows you to set a timeout on each connection in the query server connection pool. It chooses a random value between its value and twice its value, and when a connection has lived longer than the chosen value, it'll be removed from the pool the next time it's returned to the pool.
+
+#### vttablet --throttler-config-via-topo
+
+The flag `--throttler-config-via-topo` switches throttler configuration from `vttablet`-flags to the topo service. This flag is `false` by default, for backwards compatibility. It will default to `true` in future versions.
+
+#### vtctldclient UpdateThrottlerConfig
+
+Tablet throttler configuration is now supported in `topo`. Updating the throttler configuration is done via `vtctldclient UpdateThrottlerConfig` and applies to all tablet in all cells for a given keyspace.
+
+Examples:
+
+```shell
+# disable throttler; all throttler checks will return with "200 OK"
+$ vtctldclient UpdateThrottlerConfig --disable commerce
+
+# enable throttler; checks are responded with appropriate status per current metrics
+$ vtctldclient UpdateThrottlerConfig --enable commerce
+
+# Both enable and set threshold in same command. Since no query is indicated, we assume the default check for replication lag
+$ vtctldclient UpdateThrottlerConfig --enable --threshold 5.0 commerce
+
+# Change threshold. Does not affect enabled/disabled state of the throttler
+$ vtctldclient UpdateThrottlerConfig --threshold 1.5 commerce
+
+# Use a custom query
+$ vtctldclient UpdateThrottlerConfig --custom_query "show global status like 'threads_running'" --check_as_check_self --threshold 50 commerce
+
+# Restore default query and threshold
+$ vtctldclient UpdateThrottlerConfig --custom_query "" --check_as_check_shard --threshold 1.5 commerce
+```
+
+See https://github.com/vitessio/vitess/pull/11604
+
+#### vtctldclient Backup --incremental_from_pos
+
+The `Backup` command now supports `--incremental_from_pos` flag, which can receive a valid position or the value `auto`. For example:
+
+```shell
+$ vtctlclient -- Backup --incremental_from_pos "MySQL56/16b1039f-22b6-11ed-b765-0a43f95f28a3:1-615" zone1-0000000102
+$ vtctlclient -- Backup --incremental_from_pos "auto" zone1-0000000102
+```
+
+When the value is `auto`, the position is evaluated as the last successful backup's `Position`. The idea with incremental backups is to create a contiguous (overlaps allowed) sequence of backups that store all changes from last full backup.
+
+The incremental backup copies binary log files. It does not take MySQL down nor places any locks. It does not interrupt traffic on the MySQL server. The incremental backup copies complete binlog files. It initially rotates binary logs, then copies anything from the requested position and up to the last completed binary log.
+
+The backup thus does not necessarily start _exactly_ at the requested position. It starts with the first binary log that has newer entries than requested position. It is OK if the binary logs include transactions prior to the requested position. The restore process will discard any duplicates.
+
+Normally, you can expect the backups to be precisely contiguous. Consider an `auto` value: due to the nature of log rotation and the fact we copy complete binlog files, the next incremental backup will start with the first binay log not covered by the previous backup, which in itself copied the one previous binlog file in full. Again, it is completely valid to enter any good position.
+
+The incremental backup fails if it is unable to attain binary logs from given position (ie binary logs have been purged).
+
+The manifest of an incremental backup has a non-empty `FromPosition` value, and a `Incremental = true` value.
+
+#### vtctldclient RestoreFromBackup --restore_to_pos
+
+- `--restore_to_pos`: request to restore the server up to the given position (inclusive) and not one step further.
+- `--dry_run`: when `true`, calculate the restore process, if possible, evaluate a path, but exit without actually making any changes to the server.
+
+Examples:
+
+```shell
+$ vtctlclient -- RestoreFromBackup --restore_to_pos "MySQL56/16b1039f-22b6-11ed-b765-0a43f95f28a3:1-220" zone1-0000000102
+```
+
+The restore process seeks a restore _path_: a sequence of backups (handles/manifests) consisting of one full backup followed by zero or more incremental backups, that can bring the server up to the requested position, inclusive.
+
+The command fails if it cannot evaluate a restore path. Possible reasons:
+
+- there's gaps in the incremental backups
+- existing backups don't reach as far as requested position
+- all full backups exceed requested position (so there's no way to get into an ealier position)
+
+The command outputs the restore path.
+
+There may be multiple restore paths, the command prefers a path with the least number of backups. This has nothing to say about the amount and size of binary logs involved.
+
+The `RestoreFromBackup --restore_to_pos` ends with:
+
+- the restored server in intentionally broken replication setup
+- tablet type is `DRAINED`
+
+#### New `vexplain` command
+A new `vexplain` command has been introduced with the following syntax:
+```
+VEXPLAIN [ALL|QUERIES|PLAN] explainable_stmt
+```
+
+This command will help users look at the plan that vtgate comes up with for the given query (`PLAN` type), see all the queries that are executed on all the MySQL instances (`QUERIES` type), and see the vtgate plan along with the MySQL explain output for the executed queries (`ALL` type).
+
+The formats `VTEXPLAIN` and `VITESS` for `EXPLAIN` queries are deprecated, and these newly introduced commands should be used instead.
+
+### Important bug fixes
+
+#### Corrupted results for non-full-group-by queries with JOINs
+
+An issue in versions `<= v14.0.3` and `<= v15.0.0` that generated corrupted results for non-full-group-by queries with a JOIN
+is now fixed. The full issue can be found [here](https://github.com/vitessio/vitess/issues/11625), and its fix [here](https://github.com/vitessio/vitess/pull/11633).
+
+### Deprecations and Removals
+
+- The V3 planner is deprecated as of the v16 release, and will be removed in the v17 release of Vitess.
+
+- The [VReplication v1 commands](https://vitess.io/docs/15.0/reference/vreplication/v1/) — which were deprecated in Vitess 11.0 — have been removed. You will need to use the [VReplication v2 commands](https://vitess.io/docs/16.0/reference/vreplication/v2/) instead.
+
+- The `vtctlclient VExec` command was removed, having been deprecated since v12.
+
+- The `vtctlclient VReplicationExec` command has now been deprecated and will be removed in a future release. Please see [#12070](https://github.com/vitessio/vitess/pull/12070) for additional details.
+
+- `vtctlclient OnlineDDL ... [complete|retry|cancel|cancel-all]` returns empty result on success instead of number of shard affected.
+
+- The dead legacy Workflow Manager related code was removed in [#12085](https://github.com/vitessio/vitess/pull/12085). This included the following `vtctl` client commands: `WorkflowAction`, `WorkflowCreate`, `WorkflowWait`, `WorkflowStart`, `WorkflowStop`, `WorkflowTree`, `WorkflowDelete`.
+
+- VTAdmin's `VTExplain` endpoint has been deprecated. Users can use the new `vexplain` query format instead. The endpoint will be deleted in a future release.
+
+### MySQL Compatibility
+
+#### Transaction Isolation Level
+
+In [Pull Request #11704](https://github.com/vitessio/vitess/pull/11704) we are adding support for `set [session] transaction isolation level `
+
+```sql
+transaction_characteristic: {
+ ISOLATION LEVEL level
+ | access_mode
+}
+
+level: {
+ REPEATABLE READ
+ | READ COMMITTED
+ | READ UNCOMMITTED
+ | SERIALIZABLE
+}
+```
+
+This will set the transaction isolation level for the current session.
+This will be applied to any shard where the session will open a transaction.
+
+#### Transaction Access Mode
+
+In [Pull Request #11704](https://github.com/vitessio/vitess/pull/11704) we are adding support for `start transaction` with transaction characteristic.
+
+```sql
+START TRANSACTION
+ [transaction_characteristic [, transaction_characteristic] ...]
+
+transaction_characteristic: {
+ WITH CONSISTENT SNAPSHOT
+ | READ WRITE
+ | READ ONLY
+}
+```
+
+This will allow users to start a transaction with these characteristics.
+
+#### Support For Views
+
+Views sharded support is released as an experimental feature in `v16.0.0`.
+Views are not enabled by default in your Vitess cluster, but they can be turned on using the `--enable-views` flag on VTGate, and `--queryserver-enable-views` flag on VTTablet.
+
+To read more on how views are implemented you can read the [Views Support RFC](https://github.com/vitessio/vitess/issues/11559).
+And if you want to learn more on how to use views and its current limitations, you can read the [Views Documentation](https://vitess.io/docs/16.0/reference/compatibility/mysql-compatibility/#views).
+
+### VTTestServer
+
+#### Performance Improvement
+
+Creating a database with vttestserver was taking ~45 seconds. This can be problematic in test environments where testcases do a lot of `create` and `drop` database.
+In an effort to minimize the database creation time, in [Pull Request #11918](https://github.com/vitessio/vitess/pull/11918) we have changed the value of `tablet_refresh_interval` to 10s while instantiating vtcombo during vttestserver initialization. We have also made this configurable so that it can be reduced further if desired.
+For any production cluster the default value of this flag is still [1 minute](https://vitess.io/docs/16.0/reference/programs/vtgate/). Reducing this value might put more stress on Topo Server (since we now read from Topo server more often) but for testing purposes
+this shouldn't be a concern.
+
+## Minor changes
+
+### Backup Compression Benchmarks
+
+Compression benchmarks have been added to the `mysqlctl` package.
+
+The benchmarks fetch and compress a ~6 GiB tar file containing 3 InnoDB files using different built-in and external compressors.
+
+Here are sample results from a 2020-era Mac M1 with 16 GiB of memory:
+
+```sh
+$ go test -bench=BenchmarkCompress ./go/vt/mysqlctl -run=NONE -timeout=12h -benchtime=1x -v
+goos: darwin
+goarch: arm64
+pkg: vitess.io/vitess/go/vt/mysqlctl
+BenchmarkCompressLz4Builtin
+ compression_benchmark_test.go:310: downloading data from https://www.dropbox.com/s/raw/smmgifsooy5qytd/enwiki-20080103-pages-articles.ibd.tar.zst
+ BenchmarkCompressLz4Builtin-8 1 11737493087 ns/op 577.98 MB/s 2.554 compression-ratio
+ BenchmarkCompressPargzipBuiltin
+ BenchmarkCompressPargzipBuiltin-8 1 31083784040 ns/op 218.25 MB/s 2.943 compression-ratio
+ BenchmarkCompressPgzipBuiltin
+ BenchmarkCompressPgzipBuiltin-8 1 13325299680 ns/op 509.11 MB/s 2.910 compression-ratio
+ BenchmarkCompressZstdBuiltin
+ BenchmarkCompressZstdBuiltin-8 1 18683863911 ns/op 363.09 MB/s 3.150 compression-ratio
+ BenchmarkCompressZstdExternal
+ BenchmarkCompressZstdExternal-8 1 10795487675 ns/op 628.41 MB/s 3.093 compression-ratio
+ BenchmarkCompressZstdExternalFast4
+ BenchmarkCompressZstdExternalFast4-8 1 7139319009 ns/op 950.23 MB/s 2.323 compression-ratio
+ BenchmarkCompressZstdExternalT0
+ BenchmarkCompressZstdExternalT0-8 1 4393860434 ns/op 1543.97 MB/s 3.093 compression-ratio
+ BenchmarkCompressZstdExternalT4
+ BenchmarkCompressZstdExternalT4-8 1 4389559744 ns/op 1545.49 MB/s 3.093 compression-ratio
+ PASS
+ cleaning up "/var/folders/96/k7gzd7q10zdb749vr02q7sjh0000gn/T/ee7d47b45ef09786c54fa2d7354d2a68.dat"
+```
+
+## Refactor
+
+### VTTablet Sidecar Schema Maintenance Refactor
+
+This is an internal refactor and should not change the behavior of Vitess as seen by users.
+
+Developers will see a difference though: v16 changes the way we maintain vttablet's sidecar database schema (also referred to as the `_vt`
+database). Instead of using the `WithDDL` package, introduced in [PR #6348](https://github.com/vitessio/vitess/pull/6348), we use a
+declarative approach. Developers will now have to update the desired schema in the `go/vt/sidecardb/schema` directory.
+
+The desired schema is specified, one per table. A new module `sidecardb`, compares this to the existing schema and
+performs the required `create` or `alter` to reach it. This is done whenever a primary vttablet starts up.
+
+The sidecar tables `local_metadata` and `shard_metadata` are no longer in use and all references to them are removed as
+part of this refactor. They were used previously for Orchestrator support, which has been superseded by `vtorc`.
+
+------------
+The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/16.0/16.0.0/changelog.md).
+
+The release includes 378 commits (excluding merges)
+
+Thanks to all our contributors: @EmadMokhtar, @GuptaManan100, @Weijun-H, @WilliamLu99, @ajm188, @arthurschreiber, @arvind-murty, @brendar, @brirams, @dbussink, @deepthi, @dependabot[bot], @draftcode, @ejortegau, @frouioui, @harshit-gangal, @jjh-kim, @johanoskarsson, @kbslvsk, @mattlord, @maxenglander, @mdlayher, @notfelineit, @pbibra, @pudiva, @rohit-nayak-ps, @rsajwani, @shlomi-noach, @systay, @timvaillancourt, @vitess-bot[bot], @vmg, @yoheimuta
+
diff --git a/changelog/16.0/16.0.0/summary.md b/changelog/16.0/16.0.0/summary.md
new file mode 100644
index 00000000000..c895a33130b
--- /dev/null
+++ b/changelog/16.0/16.0.0/summary.md
@@ -0,0 +1,551 @@
+## Summary
+
+### Table of Contents
+
+- **[Known Issues](#known-issues)**
+ - [MySQL & Xtrabackup known issue](#mysql-xtrabackup-ddl)
+ - [VTTablet Restore Metrics](#vttablet-restore-metrics)
+ - [Schema-initialization stuck on semi-sync ACKs while upgrading to v16.0.0](#schema-init-upgrade)
+ - [Broken downgrade from v17.x.x when super_read_only turned on by default](#init-db-sql-turned-on)
+- **[Major Changes](#major-changes)**
+ - **[Breaking Changes](#breaking-changes)**
+ - [VTGate Advertised MySQL Version](#advertised-mysql-version)
+ - [Default MySQL version on Docker](#default-mysql-version)
+ - [⚠️ Upgrading to this release with vitess-operator](#upgrading-to-this-release-with-vitess-operator)
+ - [Flag Deletions and Deprecations](#flag-deletions-and-deprecations)
+ - [VTCtld](#vtctld-flag-deletions-deprecations)
+ - [MySQLCtl](#mysqlctl-flag-deletions-deprecations)
+ - [VTTablet](#vttablet-flag-deletions-deprecations)
+ - [VTBackup](#vtbackup-flag-deletions-deprecations)
+ - [VTOrc](#vtorc-flag-deletions-deprecations)
+ - [`lock-timeout` and `remote_operation_timeout` Changes](#lock-timeout-introduction)
+ - [Orchestrator Integration Deletion](#orc-integration-removal)
+ - [vtctld UI Removal](#vtcltd-ui-removal)
+ - [Query Serving Errors](#qs-errors)
+ - [Logstats Table and Keyspace removed](#logstats-table-keyspace)
+ - [Removed Stats](#removed-stats)
+ - [Deprecated Stats](#deprecated-stats)
+ - [Normalized labels in the Prometheus Exporter](#normalized-lables)
+ - **[Replication manager removal and VTOrc becomes mandatory](#repl-manager-removal)**
+ - **[VReplication](#vreplication)**
+ - [VStream Copy Resume](#vstream-copy-resume)
+ - [VDiff2 GA](#vdiff2-ga)
+ - **[Tablet throttler](#tablet-throttler)**
+ - **[Incremental backup and point in time recovery](#inc-backup)**
+ - **[New command line flags and behavior](#new-flag)**
+ - [VTGate: Support query timeout --query-timeout](#vtgate-query-timeout)
+ - [VTTablet: VReplication parallel insert workers --vreplication-parallel-insert-workers](#vrepl-parallel-workers)
+ - [VTTablet: --queryserver-config-pool-conn-max-lifetime](#queryserver-lifetime)
+ - [vttablet --throttler-config-via-topo](#vttablet-throttler-config)
+ - [vtctldclient UpdateThrottlerConfig](#vtctldclient-update-throttler)
+ - [vtctldclient Backup --incremental_from_pos](#vtctldclient-backup)
+ - [vtctldclient RestoreFromBackup --restore_to_pos](#vtctldclient-restore-from-backup)
+ - [New `vexplain` command](#new-vexplain-command)
+ - **[Important bug fixes](#important-bug-fixes)**
+ - [Corrupted results for non-full-group-by queries with JOINs](#corrupted-results)
+ - **[Deprecations and Removals](#deprecations-removals)**
+ - **[MySQL Compatibility](#mysql-compatibility)**
+ - [Transaction Isolation Level](#transaction-isolation-level)
+ - [Transaction Access Mode](#transaction-access-mode)
+ - [Support for views](#support-views)
+ - **[VTTestServer](#vttestserver)**
+ - [Performance Improvement](#perf-improvement)
+- **[Minor Changes](#minor-changes)**
+ - **[Backup compression benchmarks](#backup-comp-benchmarks)**
+- **[Refactor](#refactor)**
+ - **[VTTablet sidecar schema maintenance refactor](#vttablet-sidecar-schema)**
+
+## Known Issues
+
+### MySQL & Xtrabackup known issue
+
+There is a known issue with MySQL's INSTANT DDL combined with Percona XtraBackup, that affects users of Vitess 16.0.
+The problem is described in https://docs.percona.com/percona-xtrabackup/8.0/em/instant.html, and the immediate impact is you may not be able to backup your database using XtraBackup under certain conditions.
+
+As of MySQL 8.0.12, the default `ALGORITHM` for InnoDB's `ALTER TABLE` is `INSTANT`. In `8.0.12` only a small number of operations were eligible for `INSTANT`, but MySQL `8.0.29` added support for more common cases.
+Unfortunately, the changes in `8.0.29` affect XtraBackup as follows: if you `ALTER TABLE` in MySQL `8.0.29`, and that `ALTER` is eligible for `INSTANT` DDL (e.g. add new table column), then as of that moment, XtraBackup is unable to backup that table, hence your entire database.
+
+It is important to note that even if you then upgrade your MySQL server to, e.g. `8.0.32`, the table still cannot be backed up.
+
+Versions where XtraBackup is unable to backup such tables: MySQL `8.0.29` - `8.0.31`. This does not apply to Percona Server flavor.
+
+The issue is resolved with Percona XtraBackup `8.0.32` combined with MySQL `8.0.32`.
+
+You might be affected if:
+
+- You're using MySQL `8.0.29` - `8.0.31` and are using XtraBackup to backup your database
+- and, you have issued an `ALTER TABLE`, either directly, or using Online DDL in vitess `v16.0` and below
+
+A futures Vitess patch release `v16.0.1` will address the issue via Online DDL migrations.
+
+#### Mitigations
+
+- Use Percona XtraBackup `8.0.32` combined with MySQL `8.0.32`. To go with this option, you can use the docker image `vitess/lite:v16.0.0-mysql-8.0.32`.
+- or, Use a Percona Server flavor
+- or, always ensure to add `ALGORITHM=INPLACE` or `ALGORITHM=COPY` to your `ALTER TABLE` statements
+
+#### Workarounds
+
+If you have already been affected, these are the options to be able to backup your database:
+
+- Use `builtin` backups, see https://vitess.io/docs/15.0/user-guides/operating-vitess/backup-and-restore/creating-a-backup/. `builting` backups are not based on XtraBackup.
+- Upgrade to MySQL `8.0.32` or above and to Xtrabackup `8.0.32`, or switch to Percona Server. To go with this option, you can use the docker image `vitess/lite:v16.0.0-mysql-8.0.32`. Then rebuild the table directly via:
+ - `OPTIMIZE TABLE your_table`
+ - or, `ALTER TABLE your_table ENGINE=INNOB`
+- Upgrade to Vitess patch release `v16.0.1`, upgrade to MySQL `8.0.32` or above and to Xtrabackup `8.0.32`, or switch to Percona Server, and rebuild the table via Online DDL:
+```shell
+$ vtctldclient ApplySchema --skip_preflight --ddl_strategy "vitess" --sql "ALTER TABLE your_table ENGINE=InnoDB" your_keyspace
+```
+or
+```sql
+> SET @@ddl_strategy='vitess';
+> ALTER TABLE your_table ENGINE=InnoDB;
+```
+
+### VTTablet Restore Metrics
+
+As part of the VTTablet Sidecar Schema Maintenance Refactor in v16.0.0, we dropped the `local_metadata` table from the sidecar database schema. This table was storing a couple of metrics related to restores from backup, which have now been lost.
+They have been re-introduced in v17.0.0 as metrics that can be accessed from `/debug/vars`.
+
+The original issue can be found [here](https://github.com/vitessio/vitess/issues/13336).
+
+### Schema-initialization stuck on semi-sync ACKs while upgrading to `v16.0.0`
+
+During upgrades from `<= v15.x.x` to `v16.0.0`, as part of `PromoteReplica` call, the schema-init realizes that there are schema diffs to apply and ends up writing to the database if [semi-sync](https://vitess.io/docs/16.0/reference/features/mysql-replication/#semi-sync) is enabled, all of these writes get blocked indefinitely.
+Eventually, `PromoteReplica` fails, and this fails the entire PRS call.
+
+A fix for this issue was merged on `release-16.0` in [PR#13441](https://github.com/vitessio/vitess/pull/13441), read the [corresponding bug report to learn more](https://github.com/vitessio/vitess/issues/13426).
+
+This issue is fixed in `v16.0.3` and later patch releases.
+
+### Broken downgrade from v17.x.x when super_read_only turned on by default
+
+In `v17.x.x` `super_read_only` is turned on by default meaning that downgrading from `v17` to `v16.0.0` breaks due to `init_db.sql` needing write access.
+
+This issue is fixed in `>= v16.0.3` thanks to [PR #13525](https://github.com/vitessio/vitess/pull/13525).
+
+## Major Changes
+
+### Breaking Changes
+
+#### VTGate Advertised MySQL Version
+
+Since [Pull Request #11989](https://github.com/vitessio/vitess/pull/11989), VTGate advertises MySQL version 8.0.30. This is a breaking change for clients that rely on the VTGate advertised MySQL version and still use MySQL 5.7.
+The users can set the `mysql_server_version` flag to advertise the correct version.
+
+It is worth noting that [the feature to avoid using reserved connections](https://vitess.io/docs/16.0/reference/query-serving/reserved-conn/#avoiding-the-use-of-reserved-connections) depends on the `mysql_server_version` CLI flag, which default value has been changed from `5.7.9-vitess` to `8.0.30-vitess`. We recommend that users running MySQL 5.7 set vtgate's `mysql_server_version` CLI flag to `5.7.9-vitess` to prevent the queries from being unexpectedly rewritten.
+
+#### Default MySQL version on Docker
+
+The default major MySQL version used by our `vitess/lite:latest` image is going from `5.7` to `8.0`. Additionally, the patch version of MySQL80 has been upgraded from `8.0.23` to `8.0.30`.
+This change was brought by [Pull Request #12252](https://github.com/vitessio/vitess/pull/12252).
+
+#### ⚠️Upgrading to this release with vitess-operator
+
+If you are using the vitess-operator and want to remain on MySQL 5.7, **you are required** to use the `vitess/lite:v16.0.0-mysql57` Docker Image, otherwise the `vitess/lite:v16.0.0` image will be on MySQL 80.
+
+However, if you are running MySQL 8.0 on the vitess-operator, with for instance `vitess/lite:v15.0.2-mysql80`, considering that we are bumping the patch version of MySQL 80 from `8.0.23` to `8.0.30`, you will have to manually upgrade:
+
+1. Add `innodb_fast_shutdown=0` to your extra cnf in your YAML file.
+2. Apply this file.
+3. Wait for all the pods to be healthy.
+4. Then change your YAML file to use the new Docker Images (`vitess/lite:v16.0.0`, defaults to mysql80).
+5. Remove `innodb_fast_shutdown=0` from your extra cnf in your YAML file.
+6. Apply this file.
+
+#### Flag Deletions and Deprecations
+
+##### VTCtld
+With the removal of the vtctld UI, the following vtctld flags have been deprecated:
+- `--vtctld_show_topology_crud`: This was a flag that controlled the display of CRUD topology actions in the vtctld UI. The UI is removed, so this flag is no longer necessary.
+
+The following deprecated flags have also been removed:
+- `--enable_realtime_stats`
+- `--enable_vtctld_ui`
+- `--web_dir`
+- `--web_dir2`
+- `--workflow_manager_init`
+- `--workflow_manager_use_election`
+- `--workflow_manager_disable`
+
+##### MySQLCtld
+
+The [`mysqlctl` command-line client](https://vitess.io/docs/16.0/reference/programs/mysqlctl/) had some leftover (ignored) server flags after the [v15 pflag work](https://github.com/vitessio/enhancements/blob/main/veps/vep-4.md). Those unused flags have now been removed. If you are using any of the following flags with `mysqlctl` in your scripts or other tooling, they will need to be removed prior to upgrading to v16:
+`--port --grpc_auth_static_client_creds --grpc_compression --grpc_initial_conn_window_size --grpc_initial_window_size --grpc_keepalive_time --grpc_keepalive_timeout`
+
+##### VTTablet
+
+The following flags were removed in v16:
+- `--enable_semi_sync`
+- `--backup_storage_hook`, use one of the builtin compression algorithms or `--external-compressor` and `--external-decompressor` instead.
+- `--init_populate_metadata`, since we have deleted the `local_metadata` and `shard_metadata` sidecar database tables.
+
+The flag `--disable-replication-manager` is deprecated and will be removed in a future release.
+
+##### VTBackup
+
+The VTBackup flag `--backup_storage_hook` has been removed, use one of the builtin compression algorithms or `--external-compressor` and `--external-decompressor` instead.
+
+
+##### VTOrc
+
+The flag `--lock-shard-timeout` has been deprecated. Please use the newly introduced `--lock-timeout` flag instead. More detail [here](#lock-timeout-introduction).
+
+#### `lock-timeout` and `remote_operation_timeout` Changes
+
+Before the changes made in [Pull Request #11881](https://github.com/vitessio/vitess/pull/11881), the shard and keyspace locks used to be capped by the `remote_operation_timeout`. This is no longer the case and instead a new flag called `lock-timeout` is introduced.
+For backward compatibility, if `lock-timeout` is unspecified and `remote_operation_timeout` flag is provided, then its value will also be used for `lock-timeout`.
+The default value for `remote_operation_timeout` has also changed from 30 seconds to 15 seconds. The default for the new flag `lock-timeout` is 45 seconds.
+
+During upgrades, if the users want to preserve the same behaviour as previous releases, then they should provide the `remote_operation_timeout` flag explicitly before upgrading.
+After the upgrade, they should then alter their configuration to also specify `lock-timeout` explicitly.
+
+#### Orchestrator Integration Deletion
+
+Orchestrator integration in `vttablet` was deprecated in the previous release and is deleted in this release.
+`VTOrc` should be deployed instead. You can read more on [how VTOrc is designed](https://vitess.io/docs/16.0/reference/vtorc/) and on [how to run VTOrc in production](https://vitess.io/docs/16.0/user-guides/configuration-basic/vtorc/).
+
+#### vtctld web UI Removal
+In v13, the vtctld UI was deprecated. As of this release, the `web/vtctld2` directory is deleted and the UI will no longer be included in any Vitess images going forward. All build scripts and the Makefile have been updated to reflect this change, which was done in [Pull Request #11851](https://github.com/vitessio/vitess/pull/11851)
+
+However, the vtctld HTTP API will remain at `{$vtctld_web_port}/api`.
+
+#### Query Serving Errors
+
+In [Pull Request #10738](https://github.com/vitessio/vitess/pull/10738) we are introducing a new way to report errors from Vitess through the query interface.
+Errors will now have an error code for each error, which will make it easy to search for more information on the issue.
+For instance, the following error:
+
+```
+aggregate functions take a single argument 'count(user_id, name)'
+```
+
+Will be transformed into:
+
+```
+VT03001: aggregate functions take a single argument 'count(user_id, name)'
+```
+
+The error code `VT03001` can then be used to search or ask for help and report problems.
+
+If you have code searching for error strings from Vitess, this is a breaking change.
+Many error strings have been tweaked.
+If your application is searching for specific errors, you might need to update your code.
+
+#### Logstats Table and Keyspace removed
+
+Information about which tables are used is now reported by the field TablesUsed added in v15, that is a string array, listing all tables and which keyspace they are in.
+The Table/Keyspace fields were deprecated in v15 and are now removed in the v16 release, more information can be found on [Pull Request #12083](https://github.com/vitessio/vitess/pull/12083).
+
+#### Removed Stats
+
+The stat `QueryRowCounts` is removed in v16 as part of [Pull Request #12083](https://github.com/vitessio/vitess/pull/12083). `QueryRowsAffected` and `QueryRowsReturned` can be used instead to gather the same information.
+
+#### Deprecated Stats
+
+The stats `QueriesProcessed` and `QueriesRouted` are deprecated in v16 as part of [Pull Request #12083](https://github.com/vitessio/vitess/pull/12083). The same information can be inferred from the stats `QueriesProcessedByTable` and `QueriesRoutedByTable` respectively. These stats will be removed in the next release.
+
+#### Normalized labels in the Prometheus Exporter
+
+The Prometheus metrics exporter now properly normalizes _all_ label names into their `snake_case` form, as it is idiomatic for Prometheus metrics. Previously, Vitess instances were emitting inconsistent labels for their metrics, with some of them being `CamelCase` and others being `snake_case`.
+More information about this change can be found on [Pull Request #12057](https://github.com/vitessio/vitess/pull/12057).
+
+For example, `vtgate_topology_watcher_errors{Operation="GetTablet"} 0` will become `vtgate_topology_watcher_errors{operation="GetTablet"} 0`
+
+Some more of these changes are listed here -
+
+| Previous metric | New Metric |
+|-------------------------------------------------------------|-------------------------------------------------------------|
+| vtgate_topology_watcher_operations{Operation="AddTablet"} | vtgate_topology_watcher_operations{operation="AddTablet"} |
+| vtgate_queries_processed{Plan="Reference"} | vtgate_queries_processed{plan="Reference"} |
+| vtgate_queries_routed{Plan="Reference"} | vtgate_queries_routed{plan="Reference"} |
+| vttablet_table_allocated_size{Table="corder"} | vttablet_table_allocated_size{table="corder"} |
+| vttablet_table_file_size{Table="corder"} | vttablet_table_file_size{table="corder"} |
+| vttablet_topology_watcher_errors{Operation="GetTablet"} | vttablet_topology_watcher_errors{operation="GetTablet"} |
+| vttablet_topology_watcher_operations{Operation="AddTablet"} | vttablet_topology_watcher_operations{operation="AddTablet"} |
+
+### Replication manager removal and VTOrc becomes mandatory
+VTOrc is now a **required** component of Vitess starting from v16. If the users want Vitess to manage replication, then they must run VTOrc.
+Replication manager is removed from vttablets since the responsibility of fixing replication lies entirely with VTOrc now.
+The flag `disable-replication-manager` is deprecated and will be removed in a future release.
+
+### VReplication
+
+#### VStream Copy Resume
+
+In [Pull Request #11103](https://github.com/vitessio/vitess/pull/11103) we introduced the ability to resume a `VTGate` [`VStream` copy operation](https://vitess.io/docs/16.0/reference/vreplication/vstream/). This is useful when a [`VStream` copy operation](https://vitess.io/docs/16.0/reference/vreplication/vstream/) is interrupted due to e.g. a network failure or a server restart. The `VStream` copy operation can be resumed by specifying each table's last seen primary key value in the `VStream` request. Please see the [`VStream` docs](https://vitess.io/docs/16.0/reference/vreplication/vstream/) for more details.
+
+#### VDiff2 GA
+
+We are marking [VDiff v2](https://vitess.io/docs/16.0/reference/vreplication/vdiff2/) as production-ready in v16. We now recommend that you use v2 rather than v1 going forward. V1 will be deprecated and eventually removed in future releases.
+If you wish to use v1 for any reason, you will now need to specify the `--v1` flag.
+
+### Tablet throttler
+
+The tablet throttler can now be configured dynamically. Configuration is now found in the topo service, and applies to all tablets in all shards and cells of a given keyspace.
+It is possible to enable or disable throttling, and to change the throttling threshold as well as the throttler query.
+
+Please note that this feature is considered experimental in this release. For backwards compatibility `v16` still supports `vttablet`-based command line flags for throttler configuration.
+
+More information can be found on [Pull Request #11604](https://github.com/vitessio/vitess/pull/11604).
+
+### Incremental backup and point in time recovery
+
+In [Pull Request #11097](https://github.com/vitessio/vitess/pull/11097) we introduced native incremental backup and point in time recovery:
+
+- It is possible to take an incremental backup, starting with last known (full or incremental) backup, and up to either a specified (GTID) position, or current ("auto") position.
+- The backup is done by copying binary logs. The binary logs are rotated as needed.
+- It is then possible to restore a backup up to a given point in time (GTID position). This involves finding a restore path consisting of a full backup and zero or more incremental backups, applied up to the given point in time.
+- A server restored to a point in time remains in `DRAINED` tablet type, and does not join the replication stream (thus, "frozen" in time).
+- It is possible to take incremental backups from different tablets. It is OK to have overlaps in incremental backup contents. The restore process chooses a valid path, and is valid as long as there are no gaps in the backed up binary log content.
+
+### New command line flags and behavior
+
+#### VTGate: Support query timeout --query-timeout
+
+`--query-timeout` allows you to specify a timeout for queries. This timeout is applied to all queries.
+It can be overridden by setting the `query_timeout` session variable.
+Setting it as query comment directive with `QUERY_TIMEOUT_MS` will override other values.
+
+#### VTTablet: VReplication parallel insert workers --vreplication-parallel-insert-workers
+
+`--vreplication-parallel-insert-workers=[integer]` enables parallel bulk inserts during the copy phase
+of VReplication (disabled by default). When set to a value greater than 1 the bulk inserts — each
+executed as a single transaction from the vstream packet contents — may happen in-parallel and
+out-of-order, but the commit of those transactions are still serialized in order.
+
+Other aspects of the VReplication copy-phase logic are preserved:
+
+ 1. All statements executed when processing a vstream packet occur within a single MySQL transaction.
+ 2. Writes to `_vt.copy_state` always follow their corresponding inserts from within the vstream packet.
+ 3. The final `commit` for the vstream packet always follows the corresponding write to `_vt.copy_state`.
+ 4. The vstream packets are committed in the order seen in the stream. So for any PK1 and PK2, the write to `_vt.copy_state` and `commit` steps (steps 2 and 3 above) for PK1 will both precede the `_vt.copy_state` write and commit steps of PK2.
+
+ Other phases, catchup, fast-forward, and replicating/"running", are unchanged.
+
+#### VTTablet: --queryserver-config-pool-conn-max-lifetime
+
+`--queryserver-config-pool-conn-max-lifetime=[integer]` allows you to set a timeout on each connection in the query server connection pool. It chooses a random value between its value and twice its value, and when a connection has lived longer than the chosen value, it'll be removed from the pool the next time it's returned to the pool.
+
+#### vttablet --throttler-config-via-topo
+
+The flag `--throttler-config-via-topo` switches throttler configuration from `vttablet`-flags to the topo service. This flag is `false` by default, for backwards compatibility. It will default to `true` in future versions.
+
+#### vtctldclient UpdateThrottlerConfig
+
+Tablet throttler configuration is now supported in `topo`. Updating the throttler configuration is done via `vtctldclient UpdateThrottlerConfig` and applies to all tablet in all cells for a given keyspace.
+
+Examples:
+
+```shell
+# disable throttler; all throttler checks will return with "200 OK"
+$ vtctldclient UpdateThrottlerConfig --disable commerce
+
+# enable throttler; checks are responded with appropriate status per current metrics
+$ vtctldclient UpdateThrottlerConfig --enable commerce
+
+# Both enable and set threshold in same command. Since no query is indicated, we assume the default check for replication lag
+$ vtctldclient UpdateThrottlerConfig --enable --threshold 5.0 commerce
+
+# Change threshold. Does not affect enabled/disabled state of the throttler
+$ vtctldclient UpdateThrottlerConfig --threshold 1.5 commerce
+
+# Use a custom query
+$ vtctldclient UpdateThrottlerConfig --custom_query "show global status like 'threads_running'" --check_as_check_self --threshold 50 commerce
+
+# Restore default query and threshold
+$ vtctldclient UpdateThrottlerConfig --custom_query "" --check_as_check_shard --threshold 1.5 commerce
+```
+
+See https://github.com/vitessio/vitess/pull/11604
+
+#### vtctldclient Backup --incremental_from_pos
+
+The `Backup` command now supports `--incremental_from_pos` flag, which can receive a valid position or the value `auto`. For example:
+
+```shell
+$ vtctlclient -- Backup --incremental_from_pos "MySQL56/16b1039f-22b6-11ed-b765-0a43f95f28a3:1-615" zone1-0000000102
+$ vtctlclient -- Backup --incremental_from_pos "auto" zone1-0000000102
+```
+
+When the value is `auto`, the position is evaluated as the last successful backup's `Position`. The idea with incremental backups is to create a contiguous (overlaps allowed) sequence of backups that store all changes from last full backup.
+
+The incremental backup copies binary log files. It does not take MySQL down nor places any locks. It does not interrupt traffic on the MySQL server. The incremental backup copies complete binlog files. It initially rotates binary logs, then copies anything from the requested position and up to the last completed binary log.
+
+The backup thus does not necessarily start _exactly_ at the requested position. It starts with the first binary log that has newer entries than requested position. It is OK if the binary logs include transactions prior to the requested position. The restore process will discard any duplicates.
+
+Normally, you can expect the backups to be precisely contiguous. Consider an `auto` value: due to the nature of log rotation and the fact we copy complete binlog files, the next incremental backup will start with the first binay log not covered by the previous backup, which in itself copied the one previous binlog file in full. Again, it is completely valid to enter any good position.
+
+The incremental backup fails if it is unable to attain binary logs from given position (ie binary logs have been purged).
+
+The manifest of an incremental backup has a non-empty `FromPosition` value, and a `Incremental = true` value.
+
+#### vtctldclient RestoreFromBackup --restore_to_pos
+
+- `--restore_to_pos`: request to restore the server up to the given position (inclusive) and not one step further.
+- `--dry_run`: when `true`, calculate the restore process, if possible, evaluate a path, but exit without actually making any changes to the server.
+
+Examples:
+
+```shell
+$ vtctlclient -- RestoreFromBackup --restore_to_pos "MySQL56/16b1039f-22b6-11ed-b765-0a43f95f28a3:1-220" zone1-0000000102
+```
+
+The restore process seeks a restore _path_: a sequence of backups (handles/manifests) consisting of one full backup followed by zero or more incremental backups, that can bring the server up to the requested position, inclusive.
+
+The command fails if it cannot evaluate a restore path. Possible reasons:
+
+- there's gaps in the incremental backups
+- existing backups don't reach as far as requested position
+- all full backups exceed requested position (so there's no way to get into an ealier position)
+
+The command outputs the restore path.
+
+There may be multiple restore paths, the command prefers a path with the least number of backups. This has nothing to say about the amount and size of binary logs involved.
+
+The `RestoreFromBackup --restore_to_pos` ends with:
+
+- the restored server in intentionally broken replication setup
+- tablet type is `DRAINED`
+
+#### New `vexplain` command
+A new `vexplain` command has been introduced with the following syntax:
+```
+VEXPLAIN [ALL|QUERIES|PLAN] explainable_stmt
+```
+
+This command will help users look at the plan that vtgate comes up with for the given query (`PLAN` type), see all the queries that are executed on all the MySQL instances (`QUERIES` type), and see the vtgate plan along with the MySQL explain output for the executed queries (`ALL` type).
+
+The formats `VTEXPLAIN` and `VITESS` for `EXPLAIN` queries are deprecated, and these newly introduced commands should be used instead.
+
+### Important bug fixes
+
+#### Corrupted results for non-full-group-by queries with JOINs
+
+An issue in versions `<= v14.0.3` and `<= v15.0.0` that generated corrupted results for non-full-group-by queries with a JOIN
+is now fixed. The full issue can be found [here](https://github.com/vitessio/vitess/issues/11625), and its fix [here](https://github.com/vitessio/vitess/pull/11633).
+
+### Deprecations and Removals
+
+- The V3 planner is deprecated as of the v16 release, and will be removed in the v17 release of Vitess.
+
+- The [VReplication v1 commands](https://vitess.io/docs/15.0/reference/vreplication/v1/) — which were deprecated in Vitess 11.0 — have been removed. You will need to use the [VReplication v2 commands](https://vitess.io/docs/16.0/reference/vreplication/v2/) instead.
+
+- The `vtctlclient VExec` command was removed, having been deprecated since v12.
+
+- The `vtctlclient VReplicationExec` command has now been deprecated and will be removed in a future release. Please see [#12070](https://github.com/vitessio/vitess/pull/12070) for additional details.
+
+- `vtctlclient OnlineDDL ... [complete|retry|cancel|cancel-all]` returns empty result on success instead of number of shard affected.
+
+- The dead legacy Workflow Manager related code was removed in [#12085](https://github.com/vitessio/vitess/pull/12085). This included the following `vtctl` client commands: `WorkflowAction`, `WorkflowCreate`, `WorkflowWait`, `WorkflowStart`, `WorkflowStop`, `WorkflowTree`, `WorkflowDelete`.
+
+- VTAdmin's `VTExplain` endpoint has been deprecated. Users can use the new `vexplain` query format instead. The endpoint will be deleted in a future release.
+
+### MySQL Compatibility
+
+#### Transaction Isolation Level
+
+In [Pull Request #11704](https://github.com/vitessio/vitess/pull/11704) we are adding support for `set [session] transaction isolation level `
+
+```sql
+transaction_characteristic: {
+ ISOLATION LEVEL level
+ | access_mode
+}
+
+level: {
+ REPEATABLE READ
+ | READ COMMITTED
+ | READ UNCOMMITTED
+ | SERIALIZABLE
+}
+```
+
+This will set the transaction isolation level for the current session.
+This will be applied to any shard where the session will open a transaction.
+
+#### Transaction Access Mode
+
+In [Pull Request #11704](https://github.com/vitessio/vitess/pull/11704) we are adding support for `start transaction` with transaction characteristic.
+
+```sql
+START TRANSACTION
+ [transaction_characteristic [, transaction_characteristic] ...]
+
+transaction_characteristic: {
+ WITH CONSISTENT SNAPSHOT
+ | READ WRITE
+ | READ ONLY
+}
+```
+
+This will allow users to start a transaction with these characteristics.
+
+#### Support For Views
+
+Views sharded support is released as an experimental feature in `v16.0.0`.
+Views are not enabled by default in your Vitess cluster, but they can be turned on using the `--enable-views` flag on VTGate, and `--queryserver-enable-views` flag on VTTablet.
+
+To read more on how views are implemented you can read the [Views Support RFC](https://github.com/vitessio/vitess/issues/11559).
+And if you want to learn more on how to use views and its current limitations, you can read the [Views Documentation](https://vitess.io/docs/16.0/reference/compatibility/mysql-compatibility/#views).
+
+### VTTestServer
+
+#### Performance Improvement
+
+Creating a database with vttestserver was taking ~45 seconds. This can be problematic in test environments where testcases do a lot of `create` and `drop` database.
+In an effort to minimize the database creation time, in [Pull Request #11918](https://github.com/vitessio/vitess/pull/11918) we have changed the value of `tablet_refresh_interval` to 10s while instantiating vtcombo during vttestserver initialization. We have also made this configurable so that it can be reduced further if desired.
+For any production cluster the default value of this flag is still [1 minute](https://vitess.io/docs/16.0/reference/programs/vtgate/). Reducing this value might put more stress on Topo Server (since we now read from Topo server more often) but for testing purposes
+this shouldn't be a concern.
+
+## Minor changes
+
+### Backup Compression Benchmarks
+
+Compression benchmarks have been added to the `mysqlctl` package.
+
+The benchmarks fetch and compress a ~6 GiB tar file containing 3 InnoDB files using different built-in and external compressors.
+
+Here are sample results from a 2020-era Mac M1 with 16 GiB of memory:
+
+```sh
+$ go test -bench=BenchmarkCompress ./go/vt/mysqlctl -run=NONE -timeout=12h -benchtime=1x -v
+goos: darwin
+goarch: arm64
+pkg: vitess.io/vitess/go/vt/mysqlctl
+BenchmarkCompressLz4Builtin
+ compression_benchmark_test.go:310: downloading data from https://www.dropbox.com/s/raw/smmgifsooy5qytd/enwiki-20080103-pages-articles.ibd.tar.zst
+ BenchmarkCompressLz4Builtin-8 1 11737493087 ns/op 577.98 MB/s 2.554 compression-ratio
+ BenchmarkCompressPargzipBuiltin
+ BenchmarkCompressPargzipBuiltin-8 1 31083784040 ns/op 218.25 MB/s 2.943 compression-ratio
+ BenchmarkCompressPgzipBuiltin
+ BenchmarkCompressPgzipBuiltin-8 1 13325299680 ns/op 509.11 MB/s 2.910 compression-ratio
+ BenchmarkCompressZstdBuiltin
+ BenchmarkCompressZstdBuiltin-8 1 18683863911 ns/op 363.09 MB/s 3.150 compression-ratio
+ BenchmarkCompressZstdExternal
+ BenchmarkCompressZstdExternal-8 1 10795487675 ns/op 628.41 MB/s 3.093 compression-ratio
+ BenchmarkCompressZstdExternalFast4
+ BenchmarkCompressZstdExternalFast4-8 1 7139319009 ns/op 950.23 MB/s 2.323 compression-ratio
+ BenchmarkCompressZstdExternalT0
+ BenchmarkCompressZstdExternalT0-8 1 4393860434 ns/op 1543.97 MB/s 3.093 compression-ratio
+ BenchmarkCompressZstdExternalT4
+ BenchmarkCompressZstdExternalT4-8 1 4389559744 ns/op 1545.49 MB/s 3.093 compression-ratio
+ PASS
+ cleaning up "/var/folders/96/k7gzd7q10zdb749vr02q7sjh0000gn/T/ee7d47b45ef09786c54fa2d7354d2a68.dat"
+```
+
+## Refactor
+
+### VTTablet Sidecar Schema Maintenance Refactor
+
+This is an internal refactor and should not change the behavior of Vitess as seen by users.
+
+Developers will see a difference though: v16 changes the way we maintain vttablet's sidecar database schema (also referred to as the `_vt`
+database). Instead of using the `WithDDL` package, introduced in [PR #6348](https://github.com/vitessio/vitess/pull/6348), we use a
+declarative approach. Developers will now have to update the desired schema in the `go/vt/sidecardb/schema` directory.
+
+The desired schema is specified, one per table. A new module `sidecardb`, compares this to the existing schema and
+performs the required `create` or `alter` to reach it. This is done whenever a primary vttablet starts up.
+
+The sidecar tables `local_metadata` and `shard_metadata` are no longer in use and all references to them are removed as
+part of this refactor. They were used previously for Orchestrator support, which has been superseded by `vtorc`.
diff --git a/changelog/16.0/16.0.1/changelog.md b/changelog/16.0/16.0.1/changelog.md
new file mode 100644
index 00000000000..47ea58c2469
--- /dev/null
+++ b/changelog/16.0/16.0.1/changelog.md
@@ -0,0 +1,71 @@
+# Changelog of Vitess v16.0.1
+
+### Bug fixes
+#### Build/CI
+ * Fix `TestFuzz` that hangs on `go1.20.1` [#12514](https://github.com/vitessio/vitess/pull/12514)
+ * Fix dubious ownership of git directory in `vitess/base` Docker build [#12530](https://github.com/vitessio/vitess/pull/12530)
+#### CLI
+ * Purge logs without panicking [#12187](https://github.com/vitessio/vitess/pull/12187)
+ * Fix `vtctldclient`'s Root command to return an error on unknown command [#12481](https://github.com/vitessio/vitess/pull/12481)
+#### Cluster management
+ * Fix initialization code to also stop replication to prevent crash [#12534](https://github.com/vitessio/vitess/pull/12534)
+ * [Backport] Update topo {Get,Create}Keyspace to prevent invalid keyspace names [#12732](https://github.com/vitessio/vitess/pull/12732)
+#### General
+ * Fixing backup tests flakiness [#12655](https://github.com/vitessio/vitess/pull/12655)
+ * [release-16.0] Port two flaky test fixes #12603 and #12546 [#12745](https://github.com/vitessio/vitess/pull/12745)
+#### Observability
+ * Reset the current lag when closing the replication lag reader. [#12683](https://github.com/vitessio/vitess/pull/12683)
+#### Online DDL
+ * Throttler: Store Config in Global Keyspace Topo Record [#12520](https://github.com/vitessio/vitess/pull/12520)
+ * v16: Online DDL: enforce ALGORITHM=COPY on shadow table [#12522](https://github.com/vitessio/vitess/pull/12522)
+ * Mysqld.GetSchema: tolerate tables being dropped while inspecting schema [#12641](https://github.com/vitessio/vitess/pull/12641)
+#### Query Serving
+ * collations: fix sorting in UCA900 collations [#12555](https://github.com/vitessio/vitess/pull/12555)
+ * VSchema DDL: Add grammar to accept qualified table names in Vindex option values [#12577](https://github.com/vitessio/vitess/pull/12577)
+ * [release-16.0] `ApplyVSchemaDDL`: escape Sequence names when writing the VSchema (#12519) [#12599](https://github.com/vitessio/vitess/pull/12599)
+ * [gen4 planner] Make sure to not push down expressions when not possible [#12607](https://github.com/vitessio/vitess/pull/12607)
+ * Fix `panic` when executing a prepare statement with over `65,528` parameters [#12614](https://github.com/vitessio/vitess/pull/12614)
+ * Always add columns in the `Derived` operator [#12634](https://github.com/vitessio/vitess/pull/12634)
+ * planner: fix predicate simplifier [#12650](https://github.com/vitessio/vitess/pull/12650)
+ * [planner bugfix] add expressions to HAVING [#12668](https://github.com/vitessio/vitess/pull/12668)
+ * Use a left join to make sure that tables with tablespace=innodb_system are included in the schema [#12672](https://github.com/vitessio/vitess/pull/12672)
+ * [planner fix] make unknown column an error only for sharded queries [#12704](https://github.com/vitessio/vitess/pull/12704)
+#### VReplication
+ * VStreamer: improve representation of integers in json data types [#12630](https://github.com/vitessio/vitess/pull/12630)
+#### VTorc
+ * Fix unhandled error in VTOrc `recoverDeadPrimary` [#12510](https://github.com/vitessio/vitess/pull/12510)
+### CI/Build
+#### Build/CI
+ * [release-16.0] Make upgrade downgrade job names unique [#12499](https://github.com/vitessio/vitess/pull/12499)
+#### Examples
+ * Examples, Flakes: Wait for Shard's VReplication Engine to Open [#12560](https://github.com/vitessio/vitess/pull/12560)
+#### General
+ * [release-16.0] Upgrade the Golang version to `go1.20.2` [#12723](https://github.com/vitessio/vitess/pull/12723)
+#### Online DDL
+ * CI: extend timeouts in onlineddl_vrepl due to slow CI runners [#12583](https://github.com/vitessio/vitess/pull/12583)
+ * [release-16.0] CI: increase overall test timeouts for all OnlineDDL tests (#12584) [#12589](https://github.com/vitessio/vitess/pull/12589)
+### Enhancement
+#### Build/CI
+ * Auto upgrade the Golang version [#12585](https://github.com/vitessio/vitess/pull/12585)
+### Internal Cleanup
+#### Build/CI
+ * Run launchable only on PRs against `main` [#12694](https://github.com/vitessio/vitess/pull/12694)
+#### General
+ * Add a known issue into the release notes for xtrabackup and DDLs [#12536](https://github.com/vitessio/vitess/pull/12536)
+### Release
+#### Build/CI
+ * [release-16.0] Tooling improvements backports [#12528](https://github.com/vitessio/vitess/pull/12528)
+#### Documentation
+ * Re-organize the `releasenotes` directory into `changelog` [#12566](https://github.com/vitessio/vitess/pull/12566)
+ * Addition of the `v16.0.1` release summary [#12751](https://github.com/vitessio/vitess/pull/12751)
+#### General
+ * Back to dev mode after v16.0.0 [#12515](https://github.com/vitessio/vitess/pull/12515)
+ * Release 16.0 code freeze for 16.0.1 patch release [#12762](https://github.com/vitessio/vitess/pull/12762)
+#### VTAdmin
+ * Add the vtadmin `web` directory to the release packages [#12639](https://github.com/vitessio/vitess/pull/12639)
+### Testing
+#### General
+ * Fix fullstatus test for backward compat [#12685](https://github.com/vitessio/vitess/pull/12685)
+#### VReplication
+ * Flakes: Use new healthy shard check in vreplication e2e tests [#12502](https://github.com/vitessio/vitess/pull/12502)
+
diff --git a/changelog/16.0/16.0.1/release_notes.md b/changelog/16.0/16.0.1/release_notes.md
new file mode 100644
index 00000000000..c1354eac4ee
--- /dev/null
+++ b/changelog/16.0/16.0.1/release_notes.md
@@ -0,0 +1,41 @@
+# Release of Vitess v16.0.1
+
+## Known Issues
+
+### Schema-initialization stuck on semi-sync ACKs while upgrading to `v16.0.1`
+
+During upgrades from `<= v15.x.x` to `v16.0.1`, as part of `PromoteReplica` call, the schema-init realizes that there are schema diffs to apply and ends up writing to the database if [semi-sync](https://vitess.io/docs/16.0/reference/features/mysql-replication/#semi-sync) is enabled, all of these writes get blocked indefinitely.
+Eventually, `PromoteReplica` fails, and this fails the entire PRS call.
+
+A fix for this issue was merged on `release-16.0` in [PR#13441](https://github.com/vitessio/vitess/pull/13441), read the [corresponding bug report to learn more](https://github.com/vitessio/vitess/issues/13426).
+
+This issue is fixed in `v16.0.3` and later patch releases.
+
+### Broken downgrade from v17.x.x when super_read_only turned on by default
+
+In `v17.x.x` `super_read_only` is turned on by default meaning that downgrading from `v17` to `v16.0.1` breaks due to `init_db.sql` needing write access.
+
+This issue is fixed in `>= v16.0.3` thanks to [PR #13525](https://github.com/vitessio/vitess/pull/13525)
+
+## Major Changes
+
+### Upgrade to `go1.20.2`
+
+Vitess `v16.0.1` now runs on `go1.20.2`.
+Below is a summary of this Go patch release. You can learn more [here](https://go.dev/doc/devel/release#go1.20).
+
+> go1.20.2 (released 2023-03-07) includes a security fix to the crypto/elliptic package, as well as bug fixes to the compiler, the covdata command, the linker, the runtime, and the crypto/ecdh, crypto/rsa, crypto/x509, os, and syscall packages.
+
+### Keyspace name validation in TopoServer
+
+Prior to v16.0.1, it was possible to create a keyspace with invalid characters, which would then be inaccessible to various cluster management operations.
+
+Keyspace names may no longer contain the forward slash ("/") character, and TopoServer's `GetKeyspace` and `CreateKeyspace` methods return an error if given such a name.
+
+------------
+The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/16.0/16.0.1/changelog.md).
+
+The release includes 39 commits (excluding merges)
+
+Thanks to all our contributors: @GuptaManan100, @ajm188, @frouioui, @github-actions[bot], @mattlord, @rohit-nayak-ps, @rsajwani, @shlomi-noach, @systay, @vitess-bot[bot]
+
diff --git a/changelog/16.0/16.0.1/summary.md b/changelog/16.0/16.0.1/summary.md
new file mode 100644
index 00000000000..f9af9672ea1
--- /dev/null
+++ b/changelog/16.0/16.0.1/summary.md
@@ -0,0 +1,32 @@
+## Known Issues
+
+### Schema-initialization stuck on semi-sync ACKs while upgrading to `v16.0.1`
+
+During upgrades from `<= v15.x.x` to `v16.0.1`, as part of `PromoteReplica` call, the schema-init realizes that there are schema diffs to apply and ends up writing to the database if [semi-sync](https://vitess.io/docs/16.0/reference/features/mysql-replication/#semi-sync) is enabled, all of these writes get blocked indefinitely.
+Eventually, `PromoteReplica` fails, and this fails the entire PRS call.
+
+A fix for this issue was merged on `release-16.0` in [PR#13441](https://github.com/vitessio/vitess/pull/13441), read the [corresponding bug report to learn more](https://github.com/vitessio/vitess/issues/13426).
+
+This issue is fixed in `v16.0.3` and later patch releases.
+
+### Broken downgrade from v17.x.x when super_read_only turned on by default
+
+In `v17.x.x` `super_read_only` is turned on by default meaning that downgrading from `v17` to `v16.0.1` breaks due to `init_db.sql` needing write access.
+
+This issue is fixed in `>= v16.0.3` thanks to [PR #13525](https://github.com/vitessio/vitess/pull/13525)
+
+## Major Changes
+
+### Upgrade to `go1.20.2`
+
+Vitess `v16.0.1` now runs on `go1.20.2`.
+Below is a summary of this Go patch release. You can learn more [here](https://go.dev/doc/devel/release#go1.20).
+
+> go1.20.2 (released 2023-03-07) includes a security fix to the crypto/elliptic package, as well as bug fixes to the compiler, the covdata command, the linker, the runtime, and the crypto/ecdh, crypto/rsa, crypto/x509, os, and syscall packages.
+
+### Keyspace name validation in TopoServer
+
+Prior to v16.0.1, it was possible to create a keyspace with invalid characters, which would then be inaccessible to various cluster management operations.
+
+Keyspace names may no longer contain the forward slash ("/") character, and TopoServer's `GetKeyspace` and `CreateKeyspace` methods return an error if given such a name.
+
diff --git a/changelog/16.0/16.0.2/changelog.md b/changelog/16.0/16.0.2/changelog.md
new file mode 100644
index 00000000000..978cf9441e6
--- /dev/null
+++ b/changelog/16.0/16.0.2/changelog.md
@@ -0,0 +1,50 @@
+# Changelog of Vitess v16.0.2
+
+### Bug fixes
+#### Build/CI
+ * Small fixes to the auto-upgrade golang tool [#12838](https://github.com/vitessio/vitess/pull/12838)
+ * Add timeout to `golangci-lint` and bump its version [#12852](https://github.com/vitessio/vitess/pull/12852)
+ * [release-16.0] Remove recent golangci-lint version bump [#12909](https://github.com/vitessio/vitess/pull/12909)
+#### Cluster management
+ * Backport: [topo] Disallow the slash character in shard names #12843 [#12858](https://github.com/vitessio/vitess/pull/12858)
+#### Query Serving
+ * Fix `vtgate_schema_tracker` flaky tests [#12780](https://github.com/vitessio/vitess/pull/12780)
+ * [planbuilder bugfix] do not push aggregations into derived tables [#12810](https://github.com/vitessio/vitess/pull/12810)
+ * [16.0] Fix: reset transaction session when no reserved connection [#12877](https://github.com/vitessio/vitess/pull/12877)
+ * [release-16.0] fix: union distinct between unsharded route and sharded join (#12968) [#12974](https://github.com/vitessio/vitess/pull/12974)
+### CI/Build
+#### General
+ * Do not fail build on incorrect Go version [#12809](https://github.com/vitessio/vitess/pull/12809)
+ * [release-16.0] Upgrade the Golang version to `go1.20.3` [#12832](https://github.com/vitessio/vitess/pull/12832)
+### Documentation
+#### Query Serving
+ * update v16 release notes about VTGate Advertised MySQL Version [#12957](https://github.com/vitessio/vitess/pull/12957)
+### Enhancement
+#### Build/CI
+ * Remove unnecessary code bits in workflows [#12756](https://github.com/vitessio/vitess/pull/12756)
+#### General
+ * Automatically add milestone to new Pull Request [#12759](https://github.com/vitessio/vitess/pull/12759)
+#### Query Serving
+ * [release-16.0] planner fix: scoping rules for JOIN ON expression inside a subquery [#12891](https://github.com/vitessio/vitess/pull/12891)
+### Internal Cleanup
+#### CLI
+ * Cleanup TODOs in vtorc flag parsing code from v15 [#12787](https://github.com/vitessio/vitess/pull/12787)
+#### TabletManager
+ * Table GC: remove spammy log entry [#12625](https://github.com/vitessio/vitess/pull/12625)
+### Regression
+#### ACL
+ * vtgate : Disable Automatically setting immediateCallerID to user from static authentication context [#12961](https://github.com/vitessio/vitess/pull/12961)
+#### Query Serving
+ * gen4 planner: allow last_insert_id with arguments [#13026](https://github.com/vitessio/vitess/pull/13026)
+### Release
+#### Documentation
+ * Fix incorrect path during release notes generation [#12769](https://github.com/vitessio/vitess/pull/12769)
+#### General
+ * Back to dev mode after v16.0.1 [#12783](https://github.com/vitessio/vitess/pull/12783)
+ * Summary changes and code freeze for release of v16.0.2 [#13049](https://github.com/vitessio/vitess/pull/13049)
+### Testing
+#### Build/CI
+ * [release-16.0] Throttler: Expose Tablet's Config & Leverage to Deflake Tests [#12791](https://github.com/vitessio/vitess/pull/12791)
+ * fakedbclient: Add locking to avoid races [#12814](https://github.com/vitessio/vitess/pull/12814)
+ * [release-16.0] test: fix cfc flaky test (#12941) [#12960](https://github.com/vitessio/vitess/pull/12960)
+
diff --git a/changelog/16.0/16.0.2/release_notes.md b/changelog/16.0/16.0.2/release_notes.md
new file mode 100644
index 00000000000..806cf87d208
--- /dev/null
+++ b/changelog/16.0/16.0.2/release_notes.md
@@ -0,0 +1,48 @@
+# Release of Vitess v16.0.2
+
+## Known Issues
+
+### Schema-initialization stuck on semi-sync ACKs while upgrading to `v16.0.2`
+
+During upgrades from `<= v15.x.x` to `v16.0.2`, as part of `PromoteReplica` call, the schema-init realizes that there are schema diffs to apply and ends up writing to the database if [semi-sync](https://vitess.io/docs/16.0/reference/features/mysql-replication/#semi-sync) is enabled, all of these writes get blocked indefinitely.
+Eventually, `PromoteReplica` fails, and this fails the entire PRS call.
+
+A fix for this issue was merged on `release-16.0` in [PR#13441](https://github.com/vitessio/vitess/pull/13441), read the [corresponding bug report to learn more](https://github.com/vitessio/vitess/issues/13426).
+
+This issue is fixed in `v16.0.3` and later patch releases.
+
+### Broken downgrade from v17.x.x when super_read_only turned on by default
+
+In `v17.x.x` `super_read_only` is turned on by default meaning that downgrading from `v17` to `v16.0.2` breaks due to `init_db.sql` needing write access.
+
+This issue is fixed in `>= v16.0.3` thanks to [PR #13525](https://github.com/vitessio/vitess/pull/13525)
+
+## Major Changes
+
+### Upgrade to `go1.20.3`
+
+Vitess `v16.0.2` now runs on `go1.20.3`.
+Below is a summary of this Go patch release. You can learn more [here](https://go.dev/doc/devel/release#go1.20).
+
+> go1.20.3 (released 2023-04-04) includes security fixes to the go/parser, html/template, mime/multipart, net/http, and net/textproto packages, as well as bug fixes to the compiler, the linker, the runtime, and the time package. See the Go 1.20.3 milestone on our issue tracker for details.
+
+### EffectiveCallerId in Vtgate gRPC calls
+
+A new flag `grpc-use-static-authentication-callerid` is added to gate the behavior introduced in https://github.com/vitessio/vitess/pull/12050.
+Earlier, we used to automatically set immediateCallerID to user from static authentication context that overrode the EffectiveCallerId.
+
+
+### Shard name validation in TopoServer
+
+Prior to v16.0.2, it was possible to create a shard name with invalid characters, which would then be inaccessible to various cluster management operations.
+
+Shard names may no longer contain the forward slash ("/") character, and TopoServer's `CreateShard` method returns an error if given such a name.
+
+
+------------
+The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/16.0/16.0.2/changelog.md).
+
+The release includes 24 commits (excluding merges)
+
+Thanks to all our contributors: @GuptaManan100, @ajm188, @frouioui, @github-actions[bot], @harshit-gangal, @mattlord, @systay, @vitess-bot[bot]
+
diff --git a/changelog/16.0/16.0.2/summary.md b/changelog/16.0/16.0.2/summary.md
new file mode 100644
index 00000000000..faef0207921
--- /dev/null
+++ b/changelog/16.0/16.0.2/summary.md
@@ -0,0 +1,39 @@
+## Known Issues
+
+### Schema-initialization stuck on semi-sync ACKs while upgrading to `v16.0.2`
+
+During upgrades from `<= v15.x.x` to `v16.0.2`, as part of `PromoteReplica` call, the schema-init realizes that there are schema diffs to apply and ends up writing to the database if [semi-sync](https://vitess.io/docs/16.0/reference/features/mysql-replication/#semi-sync) is enabled, all of these writes get blocked indefinitely.
+Eventually, `PromoteReplica` fails, and this fails the entire PRS call.
+
+A fix for this issue was merged on `release-16.0` in [PR#13441](https://github.com/vitessio/vitess/pull/13441), read the [corresponding bug report to learn more](https://github.com/vitessio/vitess/issues/13426).
+
+This issue is fixed in `v16.0.3` and later patch releases.
+
+### Broken downgrade from v17.x.x when super_read_only turned on by default
+
+In `v17.x.x` `super_read_only` is turned on by default meaning that downgrading from `v17` to `v16.0.2` breaks due to `init_db.sql` needing write access.
+
+This issue is fixed in `>= v16.0.3` thanks to [PR #13525](https://github.com/vitessio/vitess/pull/13525)
+
+
+## Major Changes
+
+### Upgrade to `go1.20.3`
+
+Vitess `v16.0.2` now runs on `go1.20.3`.
+Below is a summary of this Go patch release. You can learn more [here](https://go.dev/doc/devel/release#go1.20).
+
+> go1.20.3 (released 2023-04-04) includes security fixes to the go/parser, html/template, mime/multipart, net/http, and net/textproto packages, as well as bug fixes to the compiler, the linker, the runtime, and the time package. See the Go 1.20.3 milestone on our issue tracker for details.
+
+### EffectiveCallerId in Vtgate gRPC calls
+
+A new flag `grpc-use-static-authentication-callerid` is added to gate the behavior introduced in https://github.com/vitessio/vitess/pull/12050.
+Earlier, we used to automatically set immediateCallerID to user from static authentication context that overrode the EffectiveCallerId.
+
+
+### Shard name validation in TopoServer
+
+Prior to v16.0.2, it was possible to create a shard name with invalid characters, which would then be inaccessible to various cluster management operations.
+
+Shard names may no longer contain the forward slash ("/") character, and TopoServer's `CreateShard` method returns an error if given such a name.
+
diff --git a/changelog/16.0/16.0.3/changelog.md b/changelog/16.0/16.0.3/changelog.md
new file mode 100644
index 00000000000..3f43d9b6049
--- /dev/null
+++ b/changelog/16.0/16.0.3/changelog.md
@@ -0,0 +1,67 @@
+# Changelog of Vitess v16.0.3
+
+### Bug fixes
+#### Cluster management
+ * [release-16.0] Prevent resetting replication every time we set replication source (#13377) [#13392](https://github.com/vitessio/vitess/pull/13392)
+ * [release-16.0] Don't run any reparent commands if the host is empty (#13396) [#13402](https://github.com/vitessio/vitess/pull/13402)
+ * [release-16.0] Upgrade-Downgrade Fix: Schema-initialization stuck on semi-sync ACKs while upgrading (#13411) [#13441](https://github.com/vitessio/vitess/pull/13441)
+ * [release-16.0] Flaky tests: Fix race in memory topo (#13559) [#13576](https://github.com/vitessio/vitess/pull/13576)
+ * [release-16.0] ignore all error for views in engine reload (#13590) [#13593](https://github.com/vitessio/vitess/pull/13593)
+ * [release-16.0] check keyspace snapshot time if none specified for backup restores (#13557) [#13634](https://github.com/vitessio/vitess/pull/13634)
+#### Examples
+ * [release-16.0] `examples/compose`: fix `consul:latest` error w/`docker-compose up -d` (#13468) [#13472](https://github.com/vitessio/vitess/pull/13472)
+#### Operator
+ * [release-16.0] Upgrade mysqld memory limits to 1024Mi (#13122) [#13204](https://github.com/vitessio/vitess/pull/13204)
+#### Query Serving
+ * [release-16.0] Fix the resilientQuery to give correct results during initialization (#13080) [#13087](https://github.com/vitessio/vitess/pull/13087)
+ * [16.0] evalengine: TypeOf for Columns should only use value type when we have a value [#13154](https://github.com/vitessio/vitess/pull/13154)
+ * [release-16.0] Remove indentation limit in the sqlparser (#13158) [#13166](https://github.com/vitessio/vitess/pull/13166)
+ * Fix: errant GTID in health streamer [#13184](https://github.com/vitessio/vitess/pull/13184)
+ * [16.0] Fix: TabletServer ReserveBeginExecute to return transaction ID on error [#13193](https://github.com/vitessio/vitess/pull/13193)
+ * [release-16.0] Bug fix: SQL queries erroring with message `unknown aggregation random` (#13330) [#13334](https://github.com/vitessio/vitess/pull/13334)
+ * [release-16.0] ignore ongoing backfill vindex from routing selection (#13523) [#13607](https://github.com/vitessio/vitess/pull/13607)
+#### Schema Tracker
+ * [release-16.0] Ignore error while reading table data in Schema.Engine reload (#13421) [#13424](https://github.com/vitessio/vitess/pull/13424)
+ * Backport v16: schema.Reload(): ignore column reading errors for views only, error for tables #13442 [#13456](https://github.com/vitessio/vitess/pull/13456)
+#### TabletManager
+ * [release-16.0] mysqlctl: Correctly encode database and table names (#13312) [#13323](https://github.com/vitessio/vitess/pull/13323)
+#### VReplication
+ * [release-16.0] VReplication: Do not delete sharded target vschema table entries on Cancel (#13146) [#13155](https://github.com/vitessio/vitess/pull/13155)
+ * [release-16.0] VReplication: Pass on --keep_routing_rules flag value for Cancel action (#13171) [#13194](https://github.com/vitessio/vitess/pull/13194)
+ * [release-16.0] VReplication: Fix VDiff2 DeleteByUUID Query (#13255) [#13282](https://github.com/vitessio/vitess/pull/13282)
+ * [release-16.0] VReplication: Ensure ROW events are sent within a transaction (#13547) [#13580](https://github.com/vitessio/vitess/pull/13580)
+### CI/Build
+#### General
+ * [release-16.0] Upgrade the Golang version to `go1.20.4` [#13053](https://github.com/vitessio/vitess/pull/13053)
+### Documentation
+#### Documentation
+ * [release-16.0] update link for reparenting guide (#13350) [#13356](https://github.com/vitessio/vitess/pull/13356)
+### Enhancement
+#### Build/CI
+ * [release-16.0] Set the number of threads for release notes generation with a flag [#13316](https://github.com/vitessio/vitess/pull/13316)
+### Performance
+#### TabletManager
+ * [release-16.0] BaseShowTablesWithSizes: optimize MySQL 8.0 query (#13375) [#13389](https://github.com/vitessio/vitess/pull/13389)
+### Release
+#### Build/CI
+ * [release-16.0] Optimize release notes generation to use GitHub Milestones (#13398) [#13621](https://github.com/vitessio/vitess/pull/13621)
+#### Documentation
+ * [release-16.0] Fix format error in the `v16.0.2` release notes (#13057) [#13058](https://github.com/vitessio/vitess/pull/13058)
+### Testing
+#### Backup and Restore
+ * [release-16.0]: Fix `upgrade-downgrade` test setup and fix the `init_db.sql` [#13525](https://github.com/vitessio/vitess/pull/13525)
+#### Cluster management
+ * [release-16.0] Deflake `TestPlannedReparentShardPromoteReplicaFail` (#13548) [#13549](https://github.com/vitessio/vitess/pull/13549)
+ * [release-16.0] Flaky tests: Fix wrangler tests (#13568) [#13571](https://github.com/vitessio/vitess/pull/13571)
+#### General
+ * TestFix: `Upgrade Downgrade Testing - Backups - Manual` [#13408](https://github.com/vitessio/vitess/pull/13408)
+#### Query Serving
+ * [release-16.0] Fix benchmarks in `plan_test.go` (#13096) [#13126](https://github.com/vitessio/vitess/pull/13126)
+ * [release-16.0] Deflake `TestQueryTimeoutWithDual` test (#13405) [#13409](https://github.com/vitessio/vitess/pull/13409)
+ * [release-16.0] Fix `TestGatewayBufferingWhileReparenting` flakiness (#13469) [#13500](https://github.com/vitessio/vitess/pull/13500)
+ * [release-16.0] fix TestQueryTimeoutWithTables flaky test (#13579) [#13585](https://github.com/vitessio/vitess/pull/13585)
+#### VTorc
+ * [release-16.0]: Fix flakiness in VTOrc tests (#13489) [#13528](https://github.com/vitessio/vitess/pull/13528)
+#### vtctl
+ * Fix new vtctl upgrade downgrade test on `release-16.0` [#13252](https://github.com/vitessio/vitess/pull/13252)
+
diff --git a/changelog/16.0/16.0.3/release_notes.md b/changelog/16.0/16.0.3/release_notes.md
new file mode 100644
index 00000000000..d377bdc24f9
--- /dev/null
+++ b/changelog/16.0/16.0.3/release_notes.md
@@ -0,0 +1,7 @@
+# Release of Vitess v16.0.3
+The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/16.0/16.0.3/changelog.md).
+
+The release includes 38 merged Pull Requests.
+
+Thanks to all our contributors: @GuptaManan100, @app/github-actions, @app/vitess-bot, @frouioui, @harshit-gangal, @shlomi-noach, @systay
+
diff --git a/web/vtctld2/app/assets/.gitignore b/changelog/16.0/16.0.3/summary.md
similarity index 100%
rename from web/vtctld2/app/assets/.gitignore
rename to changelog/16.0/16.0.3/summary.md
diff --git a/changelog/16.0/16.0.4/changelog.md b/changelog/16.0/16.0.4/changelog.md
new file mode 100644
index 00000000000..45c4944aa25
--- /dev/null
+++ b/changelog/16.0/16.0.4/changelog.md
@@ -0,0 +1,24 @@
+# Changelog of Vitess v16.0.4
+
+### Bug fixes
+#### Backup and Restore
+ * Manual cherry-pick of 13339 [#13733](https://github.com/vitessio/vitess/pull/13733)
+ * [release-16.0] Address vttablet memory usage with backups to Azure Blob Service (#13770) [#13774](https://github.com/vitessio/vitess/pull/13774)
+#### Online DDL
+ * v16 backport: Fix closed channel panic in Online DDL cutover [#13732](https://github.com/vitessio/vitess/pull/13732)
+ * v16 backport: Solve RevertMigration.Comment read/write concurrency issue [#13736](https://github.com/vitessio/vitess/pull/13736)
+#### Query Serving
+ * planbuilder: Fix infinite recursion for subqueries [#13783](https://github.com/vitessio/vitess/pull/13783)
+ * [release-16.0] vtgate: fix race condition iterating tables and views from schema tracker (#13673) [#13795](https://github.com/vitessio/vitess/pull/13795)
+ * [16.0] bugfixes: collection of fixes to bugs found while fuzzing [#13805](https://github.com/vitessio/vitess/pull/13805)
+### CI/Build
+#### Online DDL
+ * [release-16.0] CI: fix onlineddl_scheduler flakiness (#13754) [#13759](https://github.com/vitessio/vitess/pull/13759)
+### Release
+#### General
+ * Back to dev mode after v16.0.3 [#13660](https://github.com/vitessio/vitess/pull/13660)
+ * Release 16.0 code freeze for `v16.0.3` release [#13810](https://github.com/vitessio/vitess/pull/13810)
+### Testing
+#### Build/CI
+ * [release-16.0] Flakes: Delete VTDATAROOT files in reparent test teardown within CI (#13793) [#13797](https://github.com/vitessio/vitess/pull/13797)
+
diff --git a/changelog/16.0/16.0.4/release_notes.md b/changelog/16.0/16.0.4/release_notes.md
new file mode 100644
index 00000000000..d46559f5fec
--- /dev/null
+++ b/changelog/16.0/16.0.4/release_notes.md
@@ -0,0 +1,7 @@
+# Release of Vitess v16.0.4
+The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/16.0/16.0.4/changelog.md).
+
+The release includes 11 merged Pull Requests.
+
+Thanks to all our contributors: @GuptaManan100, @app/vitess-bot, @dbussink, @rohit-nayak-ps, @shlomi-noach, @systay
+
diff --git a/web/vtctld2/app/assets/.npmignore b/changelog/16.0/16.0.4/summary.md
similarity index 100%
rename from web/vtctld2/app/assets/.npmignore
rename to changelog/16.0/16.0.4/summary.md
diff --git a/changelog/16.0/README.md b/changelog/16.0/README.md
new file mode 100644
index 00000000000..75b3f3a0a1f
--- /dev/null
+++ b/changelog/16.0/README.md
@@ -0,0 +1,21 @@
+## v16.0
+The dedicated team for this release can be found [here](team.md).
+* **[16.0.4](16.0.4)**
+ * [Changelog](16.0.4/changelog.md)
+ * [Release Notes](16.0.4/release_notes.md)
+
+* **[16.0.3](16.0.3)**
+ * [Changelog](16.0.3/changelog.md)
+ * [Release Notes](16.0.3/release_notes.md)
+
+* **[16.0.2](16.0.2)**
+ * [Changelog](16.0.2/changelog.md)
+ * [Release Notes](16.0.2/release_notes.md)
+
+* **[16.0.1](16.0.1)**
+ * [Changelog](16.0.1/changelog.md)
+ * [Release Notes](16.0.1/release_notes.md)
+
+* **[16.0.0](16.0.0)**
+ * [Changelog](16.0.0/changelog.md)
+ * [Release Notes](16.0.0/release_notes.md)
diff --git a/changelog/16.0/team.md b/changelog/16.0/team.md
new file mode 100644
index 00000000000..76b11c38145
--- /dev/null
+++ b/changelog/16.0/team.md
@@ -0,0 +1,5 @@
+## Release Team for v16
+
+- **Lead:** Manan Gupta ([GuptaManan100](https://github.com/GuptaManan100)) manan@planetscale.com
+- **Shadow:** Matt Lord ([mattlord](https://github.com/mattlord)) mlord@planetscale.com
+- **Mentor:** Florent Poinsard ([frouioui](https://github.com/frouioui)) florent@planetscale.com
\ No newline at end of file
diff --git a/doc/releasenotes/7_0_0_release_notes.md b/changelog/7.0/7.0.0/release_notes.md
similarity index 100%
rename from doc/releasenotes/7_0_0_release_notes.md
rename to changelog/7.0/7.0.0/release_notes.md
diff --git a/doc/releasenotes/7_0_1_release_notes.md b/changelog/7.0/7.0.1/release_notes.md
similarity index 100%
rename from doc/releasenotes/7_0_1_release_notes.md
rename to changelog/7.0/7.0.1/release_notes.md
diff --git a/doc/releasenotes/7_0_2_release_notes.md b/changelog/7.0/7.0.2/release_notes.md
similarity index 100%
rename from doc/releasenotes/7_0_2_release_notes.md
rename to changelog/7.0/7.0.2/release_notes.md
diff --git a/doc/releasenotes/7_0_3_release_notes.md b/changelog/7.0/7.0.3/release_notes.md
similarity index 100%
rename from doc/releasenotes/7_0_3_release_notes.md
rename to changelog/7.0/7.0.3/release_notes.md
diff --git a/changelog/7.0/README.md b/changelog/7.0/README.md
new file mode 100644
index 00000000000..7177c6be673
--- /dev/null
+++ b/changelog/7.0/README.md
@@ -0,0 +1,12 @@
+## v7.0
+* **[7.0.3](7.0.3)**
+ * [Release Notes](7.0.3/release_notes.md)
+
+* **[7.0.2](7.0.2)**
+ * [Release Notes](7.0.2/release_notes.md)
+
+* **[7.0.1](7.0.1)**
+ * [Release Notes](7.0.1/release_notes.md)
+
+* **[7.0.0](7.0.0)**
+ * [Release Notes](7.0.0/release_notes.md)
diff --git a/doc/releasenotes/8_0_0_release_notes.md b/changelog/8.0/8.0.0/release_notes.md
similarity index 100%
rename from doc/releasenotes/8_0_0_release_notes.md
rename to changelog/8.0/8.0.0/release_notes.md
diff --git a/changelog/8.0/README.md b/changelog/8.0/README.md
new file mode 100644
index 00000000000..fa359e7302f
--- /dev/null
+++ b/changelog/8.0/README.md
@@ -0,0 +1,3 @@
+## v8.0
+* **[8.0.0](8.0.0)**
+ * [Release Notes](8.0.0/release_notes.md)
diff --git a/doc/releasenotes/9_0_0_release_notes.md b/changelog/9.0/9.0.0/release_notes.md
similarity index 100%
rename from doc/releasenotes/9_0_0_release_notes.md
rename to changelog/9.0/9.0.0/release_notes.md
diff --git a/doc/releasenotes/9_0_1_release_notes.md b/changelog/9.0/9.0.1/release_notes.md
similarity index 100%
rename from doc/releasenotes/9_0_1_release_notes.md
rename to changelog/9.0/9.0.1/release_notes.md
diff --git a/doc/releasenotes/9_0_2_release_notes.md b/changelog/9.0/9.0.2/release_notes.md
similarity index 100%
rename from doc/releasenotes/9_0_2_release_notes.md
rename to changelog/9.0/9.0.2/release_notes.md
diff --git a/changelog/9.0/README.md b/changelog/9.0/README.md
new file mode 100644
index 00000000000..17f49aa3b47
--- /dev/null
+++ b/changelog/9.0/README.md
@@ -0,0 +1,9 @@
+## v9.0
+* **[9.0.2](9.0.2)**
+ * [Release Notes](9.0.2/release_notes.md)
+
+* **[9.0.1](9.0.1)**
+ * [Release Notes](9.0.1/release_notes.md)
+
+* **[9.0.0](9.0.0)**
+ * [Release Notes](9.0.0/release_notes.md)
diff --git a/changelog/README.md b/changelog/README.md
new file mode 100644
index 00000000000..f443a473b08
--- /dev/null
+++ b/changelog/README.md
@@ -0,0 +1,11 @@
+## Releases
+* [16.0](16.0)
+* [15.0](15.0)
+* [14.0](14.0)
+* [13.0](13.0)
+* [12.0](12.0)
+* [11.0](11.0)
+* [10.0](10.0)
+* [9.0](9.0)
+* [8.0](8.0)
+* [7.0](7.0)
\ No newline at end of file
diff --git a/config/embed.go b/config/embed.go
index 121fa29cee2..b2a9333e6de 100644
--- a/config/embed.go
+++ b/config/embed.go
@@ -8,20 +8,8 @@ var DefaultInitDB string
//go:embed mycnf/default.cnf
var MycnfDefault string
-//go:embed mycnf/mariadb100.cnf
-var MycnfMariaDB100 string
-
-//go:embed mycnf/mariadb101.cnf
-var MycnfMariaDB101 string
-
-//go:embed mycnf/mariadb102.cnf
-var MycnfMariaDB102 string
-
-//go:embed mycnf/mariadb103.cnf
-var MycnfMariaDB103 string
-
-//go:embed mycnf/mariadb104.cnf
-var MycnfMariaDB104 string
+//go:embed mycnf/mariadb10.cnf
+var MycnfMariaDB10 string
//go:embed mycnf/mysql57.cnf
var MycnfMySQL57 string
diff --git a/config/init_db.sql b/config/init_db.sql
index cf7fdd63350..47ac4a04749 100644
--- a/config/init_db.sql
+++ b/config/init_db.sql
@@ -11,6 +11,12 @@
###############################################################################
# Equivalent of mysql_secure_installation
###############################################################################
+# We need to ensure that super_read_only is disabled so that we can execute
+# these commands. Note that disabling it does NOT disable read_only.
+# We save the current value so that we only re-enable it at the end if it was
+# enabled before.
+SET @original_super_read_only=IF(@@global.super_read_only=1, 'ON', 'OFF');
+SET GLOBAL super_read_only='OFF';
# Changes during the init db should not make it to the binlog.
# They could potentially create errant transactions on replicas.
@@ -28,23 +34,6 @@ DROP DATABASE IF EXISTS test;
# Vitess defaults
###############################################################################
-# Vitess-internal database.
-CREATE DATABASE IF NOT EXISTS _vt;
-# Note that definitions of local_metadata and shard_metadata should be the same
-# as in production which is defined in go/vt/mysqlctl/metadata_tables.go.
-CREATE TABLE IF NOT EXISTS _vt.local_metadata (
- name VARCHAR(255) NOT NULL,
- value VARCHAR(255) NOT NULL,
- db_name VARBINARY(255) NOT NULL,
- PRIMARY KEY (db_name, name)
- ) ENGINE=InnoDB;
-CREATE TABLE IF NOT EXISTS _vt.shard_metadata (
- name VARCHAR(255) NOT NULL,
- value MEDIUMBLOB NOT NULL,
- db_name VARBINARY(255) NOT NULL,
- PRIMARY KEY (db_name, name)
- ) ENGINE=InnoDB;
-
# Admin user with all privileges.
CREATE USER 'vt_dba'@'localhost';
GRANT ALL ON *.* TO 'vt_dba'@'localhost';
@@ -90,14 +79,13 @@ GRANT SELECT, PROCESS, SUPER, REPLICATION CLIENT, RELOAD
GRANT SELECT, UPDATE, DELETE, DROP
ON performance_schema.* TO 'vt_monitoring'@'localhost';
-# User for Orchestrator (https://github.com/openark/orchestrator).
-CREATE USER 'orc_client_user'@'%' IDENTIFIED BY 'orc_client_user_password';
-GRANT SUPER, PROCESS, REPLICATION SLAVE, RELOAD
- ON *.* TO 'orc_client_user'@'%';
-GRANT SELECT
- ON _vt.* TO 'orc_client_user'@'%';
-
FLUSH PRIVILEGES;
RESET SLAVE ALL;
RESET MASTER;
+
+# custom sql is used to add custom scripts like creating users/passwords. We use it in our tests
+# {{custom_sql}}
+
+# We need to set super_read_only back to what it was before
+SET GLOBAL super_read_only=IFNULL(@original_super_read_only, 'ON');
diff --git a/config/mycnf/mariadb100.cnf b/config/mycnf/mariadb10.cnf
similarity index 62%
rename from config/mycnf/mariadb100.cnf
rename to config/mycnf/mariadb10.cnf
index 3f840530566..1912cd8e154 100644
--- a/config/mycnf/mariadb100.cnf
+++ b/config/mycnf/mariadb10.cnf
@@ -1,4 +1,4 @@
-# This file is auto-included when MariaDB 10.0 is detected.
+# This file is auto-included when MariaDB 10 is detected.
# Semi-sync replication is required for automated unplanned failover
# (when the primary goes away). Here we just load the plugin so it's
@@ -8,12 +8,10 @@
# or when a primary is promoted or demoted based on the durability policy configured.
plugin-load = rpl_semi_sync_master=semisync_master.so;rpl_semi_sync_slave=semisync_slave.so
-slave_net_timeout = 60
-
-# MariaDB 10.0 is unstrict by default
+# MariaDB 10 is unstrict by default in some versions.
sql_mode = STRICT_TRANS_TABLES,NO_ENGINE_SUBSTITUTION
-# enable strict mode so it's safe to compare sequence numbers across different server IDs.
+# Enable strict mode so it's safe to compare sequence numbers across different server IDs.
gtid_strict_mode = 1
innodb_stats_persistent = 0
@@ -24,19 +22,20 @@ innodb_stats_persistent = 0
rpl_semi_sync_master_timeout = 1000000000000000000
rpl_semi_sync_master_wait_no_slave = 1
-
-character_set_server = utf8
-collation_server = utf8_general_ci
-
-expire_logs_days = 3
-
+# MariaDB 10 does not have crash safe binlogs by default.
sync_binlog = 1
+expire_logs_days = 3
+# MariaDB 10 uses MIXED mode by default which is not supported by VReplication.
binlog_format = ROW
+# MariaDB 10 does not support full replica binary logs by default, which we
+# need for backups and restores.
log_slave_updates
-expire_logs_days = 3
-# In MariaDB the default charset is latin1
+# Some MariaDB 10 versions set this to 1 hour, which can cause hidden replication lag and
+# unexpected Vitess issue as the mariadbd instance thinks it's caught up when it is not.
+slave_net_timeout = 60
+# In MariaDB 10 the default charset is latin1.
character_set_server = utf8
collation_server = utf8_general_ci
diff --git a/config/mycnf/mariadb101.cnf b/config/mycnf/mariadb101.cnf
deleted file mode 100644
index 1c660bf6f61..00000000000
--- a/config/mycnf/mariadb101.cnf
+++ /dev/null
@@ -1,41 +0,0 @@
-# This file is auto-included when MariaDB 10.1 is detected.
-
-# Semi-sync replication is required for automated unplanned failover
-# (when the primary goes away). Here we just load the plugin so it's
-# available if desired, but it's disabled at startup.
-#
-# VTTablet will enable semi-sync at the proper time when replication is set up,
-# or when a primary is promoted or demoted based on the durability policy configured.
-plugin-load = rpl_semi_sync_master=semisync_master.so;rpl_semi_sync_slave=semisync_slave.so
-
-slave_net_timeout = 60
-
-# MariaDB 10.1 default is only no-engine-substitution and no-auto-create-user
-sql_mode = STRICT_TRANS_TABLES,NO_ENGINE_SUBSTITUTION,NO_AUTO_CREATE_USER
-
-# enable strict mode so it's safe to compare sequence numbers across different server IDs.
-gtid_strict_mode = 1
-innodb_stats_persistent = 0
-
-# When semi-sync is enabled, don't allow fallback to async
-# if you get no ack, or have no replicas. This is necessary to
-# prevent alternate futures when doing a failover in response to
-# a primary that becomes unresponsive.
-rpl_semi_sync_master_timeout = 1000000000000000000
-rpl_semi_sync_master_wait_no_slave = 1
-
-
-character_set_server = utf8
-collation_server = utf8_general_ci
-
-expire_logs_days = 3
-
-sync_binlog = 1
-binlog_format = ROW
-log_slave_updates
-expire_logs_days = 3
-
-# In MariaDB the default charset is latin1
-
-character_set_server = utf8
-collation_server = utf8_general_ci
diff --git a/config/mycnf/mariadb102.cnf b/config/mycnf/mariadb102.cnf
deleted file mode 100644
index ae1da3d9a71..00000000000
--- a/config/mycnf/mariadb102.cnf
+++ /dev/null
@@ -1,36 +0,0 @@
-# This file is auto-included when MariaDB 10.2 is detected.
-
-# Semi-sync replication is required for automated unplanned failover
-# (when the primary goes away). Here we just load the plugin so it's
-# available if desired, but it's disabled at startup.
-#
-# VTTablet will enable semi-sync at the proper time when replication is set up,
-# or when a primary is promoted or demoted based on the durability policy configured.
-plugin-load = rpl_semi_sync_master=semisync_master.so;rpl_semi_sync_slave=semisync_slave.so
-
-# enable strict mode so it's safe to compare sequence numbers across different server IDs.
-gtid_strict_mode = 1
-innodb_stats_persistent = 0
-
-# When semi-sync is enabled, don't allow fallback to async
-# if you get no ack, or have no replicas. This is necessary to
-# prevent alternate futures when doing a failover in response to
-# a primary that becomes unresponsive.
-rpl_semi_sync_master_timeout = 1000000000000000000
-rpl_semi_sync_master_wait_no_slave = 1
-
-
-character_set_server = utf8
-collation_server = utf8_general_ci
-
-expire_logs_days = 3
-
-sync_binlog = 1
-binlog_format = ROW
-log_slave_updates
-expire_logs_days = 3
-
-# In MariaDB the default charset is latin1
-
-character_set_server = utf8
-collation_server = utf8_general_ci
diff --git a/config/mycnf/mariadb103.cnf b/config/mycnf/mariadb103.cnf
deleted file mode 100644
index dcf99b8b52c..00000000000
--- a/config/mycnf/mariadb103.cnf
+++ /dev/null
@@ -1,30 +0,0 @@
-# This file is auto-included when MariaDB 10.3 is detected.
-
-# enable strict mode so it's safe to compare sequence numbers across different server IDs.
-gtid_strict_mode = 1
-innodb_stats_persistent = 0
-
-# When semi-sync is enabled, don't allow fallback to async
-# if you get no ack, or have no replicas. This is necessary to
-# prevent alternate futures when doing a failover in response to
-# a primary that becomes unresponsive.
-rpl_semi_sync_master_timeout = 1000000000000000000
-rpl_semi_sync_master_wait_no_slave = 1
-
-
-character_set_server = utf8
-collation_server = utf8_general_ci
-
-expire_logs_days = 3
-
-sync_binlog = 1
-binlog_format = ROW
-log_slave_updates
-expire_logs_days = 3
-
-# In MariaDB the default charset is latin1
-
-character_set_server = utf8
-collation_server = utf8_general_ci
-
-
diff --git a/config/mycnf/mariadb104.cnf b/config/mycnf/mariadb104.cnf
deleted file mode 100644
index 047c5c36ab0..00000000000
--- a/config/mycnf/mariadb104.cnf
+++ /dev/null
@@ -1,30 +0,0 @@
-# This file is auto-included when MariaDB 10.4 is detected.
-
-# enable strict mode so it's safe to compare sequence numbers across different server IDs.
-gtid_strict_mode = 1
-innodb_stats_persistent = 0
-
-# When semi-sync is enabled, don't allow fallback to async
-# if you get no ack, or have no replicas. This is necessary to
-# prevent alternate futures when doing a failover in response to
-# a primary that becomes unresponsive.
-rpl_semi_sync_master_timeout = 1000000000000000000
-rpl_semi_sync_master_wait_no_slave = 1
-
-
-character_set_server = utf8
-collation_server = utf8_general_ci
-
-expire_logs_days = 3
-
-sync_binlog = 1
-binlog_format = ROW
-log_slave_updates
-expire_logs_days = 3
-
-# In MariaDB the default charset is latin1
-
-character_set_server = utf8
-collation_server = utf8_general_ci
-
-
diff --git a/config/tablet/default.yaml b/config/tablet/default.yaml
index ad88e320871..427465d4598 100644
--- a/config/tablet/default.yaml
+++ b/config/tablet/default.yaml
@@ -55,6 +55,7 @@ oltpReadPool:
size: 16 # queryserver-config-pool-size
timeoutSeconds: 0 # queryserver-config-query-pool-timeout
idleTimeoutSeconds: 1800 # queryserver-config-idle-timeout
+ maxLifetimeSeconds: 0 # queryserver-config-pool-conn-max-lifetime
prefillParallelism: 0 # queryserver-config-pool-prefill-parallelism
maxWaiters: 50000 # queryserver-config-query-pool-waiter-cap
diff --git a/dev.env b/dev.env
index 781f54be7ba..7426dde45f2 100644
--- a/dev.env
+++ b/dev.env
@@ -22,7 +22,7 @@
source ./build.env
-export VTPORTSTART=15000
+export VTPORTSTART=6700
# Add chromedriver to path for Selenium tests.
diff --git a/doc/DesignDocs.md b/doc/DesignDocs.md
deleted file mode 100644
index 0fa0e422cd6..00000000000
--- a/doc/DesignDocs.md
+++ /dev/null
@@ -1,5 +0,0 @@
-The pages below this navigation entry `Design Docs` represent the design considerations
-that went behind some of the features implemented. They may not be necessarily up-to-date.
-
-Also, some of the ideas here may just be experimental proposals, and it's possible
-that such ideas might have been abandoned or not implemented yet.
diff --git a/doc/DockerBuild.md b/doc/DockerBuild.md
index 39a28e9cb16..c4134556cc2 100644
--- a/doc/DockerBuild.md
+++ b/doc/DockerBuild.md
@@ -31,7 +31,6 @@ Then you can run our build script for the `lite` image which extracts the Vitess
vitess$ docker pull vitess/bootstrap:mysql56 # MySQL Community Edition 5.6
vitess$ docker pull vitess/bootstrap:percona57 # Percona Server 5.7
vitess$ docker pull vitess/bootstrap:percona # Percona Server
- vitess$ docker pull vitess/bootstrap:mariadb # MariaDB
```
**Note:** If you have already downloaded the `vitess/bootstrap:`
@@ -50,7 +49,6 @@ Then you can run our build script for the `lite` image which extracts the Vitess
vitess$ make docker_base_mysql56
vitess$ make docker_base_percona57
vitess$ make docker_base_percona
- vitess$ make docker_base_mariadb
```
1. Build the `vitess/lite[:]` image.
@@ -65,7 +63,6 @@ Then you can run our build script for the `lite` image which extracts the Vitess
vitess$ make docker_lite_mysql56
vitess$ make docker_lite_percona57
vitess$ make docker_lite_percona
- vitess$ make docker_lite_mariadb
```
1. Re-tag the image under your personal repository, then upload it.
diff --git a/doc/APIScope.md b/doc/design-docs/APIScope.md
similarity index 100%
rename from doc/APIScope.md
rename to doc/design-docs/APIScope.md
diff --git a/doc/design-docs/ComponentizingTabletServer.md b/doc/design-docs/ComponentizingTabletServer.md
new file mode 100644
index 00000000000..71db02d319b
--- /dev/null
+++ b/doc/design-docs/ComponentizingTabletServer.md
@@ -0,0 +1,174 @@
+# Componentizing TabletServer
+
+As Vitess adoption expands, several feature requests have been popping up that will benefit from multiple instances of TabletServer (or its sub-components) co-existing within the same process.
+
+The following features drive this refactor, in order of priority:
+
+* Multi-schema
+* VTShovel: multiple data import sources for vreplication
+* VTDirect: allow VTGate to directly send queries to mysql
+
+Beyond these features, componentizing TabletServer will make the vitess architecture more flexible. There are many places in the code where we instantiate a QueryService. All those places can now explore the benefit of instantiating a TabletServer or its sub-components.
+
+## Features
+
+This section describes the use cases and their features. An important prerequisite: In order to retain backward compatibility, the new features should not cause existing behavior to be affected.
+
+### Multi-schema
+
+There has been a steady inflow of enquiries about use cases where a mysql instance has a large number of schemas (1000+). We currently support multi-schema by requiring the user to launch one vttablet process per schema. This, however, does not scale for the number of schemas we are beginning to see.
+
+To enable this, we need the ability for a single vttablet to host multiple TabletServers. Requirements are:
+
+* Grouped or consolidated Stats (/debug/vars).
+* Segregated or consolidated HTTP endpoints, like (/debug/status), with sub-page links working.
+* A better way to specify flags: the existing approach of command line flags may not scale.
+* A tablet manager that can represent multiple tablet ids.
+
+Other parts of TabletServer have already been modified to point at a shared mysql instance due to work done by @deepthi in #4727, and other related changes.
+
+### VTShovel
+
+VTShovel is a data migration feature that extends VReplication to allow a user to specify an external mysql as the source. This can be used to import data and also keep the targets up-to-date as the source is written to.
+
+VTShovel currently has two limitations:
+
+* It supports only one external source per vttablet. The need for multiple sources was voiced as a requirement by one of the adopters.
+* It does not support VDiff, which also requires multiple sources.
+
+If the TabletServer refactor is architected correctly, VTShovel should inherit the multi-instance ability without any major impact. In particular:
+
+* Leverage the flags refactor to support more than one external source.
+* Observability features implemented for multi-schema like stats and HTTP endpoints should naturally extend to vtshovel.
+* VDiff should work.
+
+### VTDirect
+
+The excessive use of CPU due to gRPC continues to be a concern among some adopters. Additionally, Vitess is now being deployed against externally managed databases like RDS and CloudSQL. Such users are reluctant to pay the latency cost of the extra hop.
+
+VTDirect is the ability of VTGate to directly send queries to the mysql instances.
+
+This feature adds the following requirements over the previous ones:
+
+* Some features of TabletServer (like sequences) should be disabled or redirected to an actual vttablet.
+* TabletServers can have a life-cycle as tablets are added and removed from the topo. The variables and end-points need to reflect these changes.
+
+In previous discussions, alternate approaches that did not require a TabletServer refactor were suggested. Given that the TabletServer refactor brings us much closer to this feature, we’ll need to re-evaluate our options for the best approach. This will be a separate RFC.
+
+## Requirements
+
+This section describes the requirements dictated by the features.
+
+### Stats
+
+Stats (/debug/vars) should be reported in such a way that the variables from each TabletServer can be differentiated. Idiomatic usage of vitess expects the monitoring tool to add the tablet id as a dimension when combining variables coming from different vttablets. Therefore, every TabletServer should be changed to add this dimension to its exported variables.
+
+On the flip side, this may result in an extremely large number of variables to be exported. If so, it may be better to consolidate them. There is no right answer; We have to support both options.
+
+#### Other options considered
+
+We could have each TabletServer export a brand new set of variables by appending the tablet id to the root variable name. However, this would make it very hard for monitoring tools because they are not very good at dealing with dynamic variable names.
+
+### HTTP endpoints
+
+A TabletServer exports a variety of http endpoints. In general, it makes sense to have each TabletServer export a separate set of endpoints within the current process. However, in cases where the performance of the underlying mysql is concerned, it may be beneficial to consolidate certain pages.
+
+We’ll start with a separate set of pages, with each set prefixed by the tablet id. For example, what was previously `/debug/consolidations` will now become `/cell-100/debug/consolidations`.
+
+#### Other options considered
+
+We could keep the existing set of endpoints unchanged, and have each TabletServer add its section. But this would make it hard to troubleshoot problems related to a specific TabletServer.
+
+The best-case scenario would be the “why not both” option: the original set of pages continue to exist and provide a summary from all the tablet servers. This can still be implemented as an enhancement.
+
+### Command line flags
+
+Extending the command line flags to be able to specify parameters for a thousand tablet servers is not going to be practical.
+
+Using config files is a better option. To prevent verbosity, we can use a hierarchy where the root config specifies initial values for the flags, and the TabletServer specific configs can inherit and override the original ones.
+
+The input file format could be yaml. Also, this is a good opportunity for us to come up with better names.
+
+Since the config option is more powerful and flexible, specifying that file in the command line will supersede all legacy flags.
+
+#### Other options considered
+
+These configs could be hosted in the topo. This is actually viable. There are two reasons why this option takes a backseat:
+
+* We currently don’t have good tooling for managing data in the topo. VTCtld is currently the only way, and people have found it inadequate sometimes.
+* There are mechanisms to secure config files, which will allow it to contain secrets like the mysql passwords. This will not be possible in the case of topos.
+
+## Design
+
+We propose to address the above requirements with the following design elements.
+
+### Dimension Dropper
+
+The dimension dropper will be a new feature of the stats package. Its purpose is to remove specific dimensions from any multi-dimensional variable.
+
+We’ll introduce a new command-line flag that takes a list of labels as input, like `-drop_dimensions='Keyspace,ShardName'`. The stats package will then remove that dimension from any variable that refers to it.
+
+In the case of the TabletServer, specifying `TabletID` in the list of dropped dimensions will have the effect of all TabletServers incrementing a common counter instead of different ones under their own tablet id.
+
+The reason for this approach is that there are already other use cases where the number of exported variables is excessive. This allows us to address those use cases also.
+
+It’s possible that this feature is too broad. For example, one may not want to drop the `Keyspace` dimension from all variables. If we encounter such use cases, it should be relatively easy to extend this feature to accommodate more specific rules.
+
+### Exporter
+
+The exporter will be a new feature that will layer between TabletServer and the singleton APIs: stats and http. It will allow you to create exporters that are either anonymous or named.
+
+An anonymous exporter will behave as if you invoked the stats and http directly. Open issue: we’ll need to see if we want to protect from panics due to duplicate registrations.
+
+A named exporter will perform consolidations or segregations depending on the situation:
+
+* In the case of a stats variable, it will create a common underlying variable, and will update the dimension that matches the name of the exporter.
+* In the case of http, it will export the end point under a new URL rooted from the name of the exporter.
+
+Currently, the connection pools have a different name based mechanism to export different stats. The exporter functionality should support this prefixing, which will eliminate the boiler-plate code in those components.
+
+A prototype of this implementation (aka embedder) is present in the vtdirect branch. This needs to be cleaned up and ported to the latest code.
+
+There is no need for the exporter to provide the option to consolidate without the name because the dimension dropper can cover that functionality.
+
+It’s possible to achieve backward compatibility for stats by creating an exporter with a name (tablet id), and then dropping that dimension in stats. However, it’ll work only for stats and not for the http endpoints. For this reason, we need to support explicit code paths for the anonymous exporters. Plus, it makes things more explicit.
+
+### Config loader
+
+The TabletServer already has most, if not all, of its input flags consolidated into a `Config` struct under tabletenv. The existing flags initialize a `DefaultConfig` global variable. If the command line specifies a newly defined flag, like `-tablet_config='filename.yaml'`, then we can branch off into code that reads the yaml file and initializes the configs from there.
+
+The code will load the global part of the yaml into a “global” Config. For each tablet specific config, the global config will be copied first, and then the tablet specific overrides will be overwritten into the copied values.
+
+This is an opportunity for us to rename the members of the Config struct to use better names and data types. The yaml tags will have to match these new names.
+
+The most popular yaml reader seems to be https://github.com/go-yaml/yaml. We’ll start with that and iterate forward.
+
+The dbconfigs data structure will also be folded into the `Config`. This is because each tablet could potentially have different credentials.
+
+#### Bonus points
+
+Given that vitess uses protos everywhere, we could look at standardizing on a generic way to convert yaml to and from protos. This will allow us to look at converting all formats to yaml. If this sounds viable, we can convert the `Config` struct to be generated from a proto, and then have yaml tags that can convert into it. This will future-proof us in case we decide to go this route.
+
+On the initial search, there is no standard way to do this conversion. It would be nice if protos supported this natively as they do for json. We do have the option of using this code to build our own yaml to proto converter: https://github.com/golang/protobuf/blob/master/jsonpb/encode.go.
+
+### TabletManager
+
+The TabletManager change will put everything together for the multi-schema feature.
+
+The ActionAgent data structure will be changed to support multiple tablet servers:
+
+* QueryServiceControl will become a list (or map)
+* UpdateStream will be deleted (deprecated)
+* TabletAlias, VREngine, _tablet and _blacklistedTables will be added to the QueryServiceControl list
+
+All other members of TabletManager seem unaffected.
+
+The tablet manager API will be extended for cases where requests are specific to a tablet id. For example, `GetSchema` will now require the tablet id as an additional parameter. For legacy support: if tablet id is empty, then we redirect the request to the only tablet.
+
+Note: VREngine’s queries are actually tablet agnostic. The user is expected to restrict their queries to the dbname of the tablet. This is not a good user experience. We should tighten up the query analyzer of vrengine to add dbname as an additional constraint or fill in the correct value as needed.
+
+### VReplication
+
+VReplication should have a relatively easy change. We already have a field named external mysql. This can be a key into the tablet id Config, which can then be used to pull the mysql credentials needed to connect to the external mysql.
+
+The multi-instance capabilities of VStreamer will naturally extend to support all the observability features we’ll add to it.
diff --git a/doc/LifeOfAQuery.md b/doc/design-docs/LifeOfAQuery.md
similarity index 89%
rename from doc/LifeOfAQuery.md
rename to doc/design-docs/LifeOfAQuery.md
index b40ef21f261..3ef15b92b52 100644
--- a/doc/LifeOfAQuery.md
+++ b/doc/design-docs/LifeOfAQuery.md
@@ -11,7 +11,7 @@ Life of A Query
A query means a request for information from database and it involves four components in the case of Vitess, including the client application, VtGate, VtTablet and MySQL instance. This doc explains the interaction which happens between and within components.
-![](https://raw.githubusercontent.com/vitessio/vitess/main/doc/life_of_a_query.png)
+![](https://raw.githubusercontent.com/vitessio/vitess/main/doc/design-docs/life_of_a_query.png)
At a very high level, as the graph shows, first the client sends a query to VtGate. VtGate then resolves the query and routes it to the right VtTablets. For each VtTablet that receives the query, it does necessary validations and passes the query to the underlying MySQL instance. After gathering results from MySQL, VtTablet sends the response back to VtGate. Once VtGate receives responses from all VtTablets, it sends the combined result to the client. In the presence of VtTablet errors, VtGate will retry the query if errors are recoverable and it only fails the query if either errors are unrecoverable or the maximum number of retries has been reached.
@@ -19,13 +19,13 @@ At a very high level, as the graph shows, first the client sends a query to VtGa
A client application first sends an rpc with an embedded sql query to VtGate. VtGate's rpc server unmarshals this rpc request, calls the appropriate VtGate method and return its result back to client.
-![](https://raw.githubusercontent.com/vitessio/vitess/main/doc/life_of_a_query_client_to_vtgate.png)
+![](https://raw.githubusercontent.com/vitessio/vitess/main/doc/design-docs/life_of_a_query_client_to_vtgate.png)
VtGate keeps an in-memory table that stores all available rpc methods for each service, e.g. VtGate uses "VTGate" as its service name and most of its methods defined in [go/vt/vtgate/vtgate.go](../go/vt/vtgate/vtgate.go) are used to serve rpc request.
## From VtGate to VtTablet
-![](https://raw.githubusercontent.com/vitessio/vitess/main/doc/life_of_a_query_vtgate_to_vttablet.png)
+![](https://raw.githubusercontent.com/vitessio/vitess/main/doc/design-docs/life_of_a_query_vtgate_to_vttablet.png)
After receiving an rpc call from the client and one of its Execute* method being invoked, VtGate needs to figure out which shards should receive the query and send it to each of them. In addition, VtGate talks to the topo server to get necessary information to create a VtTablet connection for each shard. At this point, VtGate is able to send the query to the right VtTablets in parallel. VtGate also does retry if timeout happens or some VtTablets return recoverable errors.
@@ -35,13 +35,13 @@ A ShardConn object represents a load balanced connection to a group of VtTablets
## From VtTablet to MySQL
-![](https://raw.githubusercontent.com/vitessio/vitess/main/doc/life_of_a_query_vttablet_to_mysql.png)
+![](https://raw.githubusercontent.com/vitessio/vitess/main/doc/design-docs/life_of_a_query_vttablet_to_mysql.png)
Once VtTablet received an rpc call from VtGate, it does a few checks before passing the query to MySQL. First, it validates the current VtTablet state including the session id, then generates a query plan and applies predefined query rules and does ACL checks. It also checks whether the query hits the row cache and returns the result immediately if so. In addition, VtTablet consolidates duplicate queries from executing simultaneously and shares results between them. At this point, VtTablet has no way but pass the query down to MySQL layer and wait for the result.
## Putting it all together
-![](https://raw.githubusercontent.com/vitessio/vitess/main/doc/life_of_a_query_all.png)
+![](https://raw.githubusercontent.com/vitessio/vitess/main/doc/design-docs/life_of_a_query_all.png)
## TopoServer
diff --git a/doc/LongRunningJobs.md b/doc/design-docs/LongRunningJobs.md
similarity index 100%
rename from doc/LongRunningJobs.md
rename to doc/design-docs/LongRunningJobs.md
diff --git a/doc/design-docs/OnlineDDLScheduler.md b/doc/design-docs/OnlineDDLScheduler.md
new file mode 100644
index 00000000000..18e3e3a85a0
--- /dev/null
+++ b/doc/design-docs/OnlineDDLScheduler.md
@@ -0,0 +1,122 @@
+# Online DDL Scheduler
+
+The DDL scheduler is a control plane that runs on a `PRIMARY` vttablet, as part of the state manager. It is responsible for identifying new migration requests, to choose and execute the next migration, to review running migrations, cleaning up after completion, etc.
+
+This document explains the general logic behind `onlineddl.Executor` and, in particular, the scheduling aspect.
+
+## OnlineDDL & VTTablet state manager
+
+`onlineddl.Executor` runs on `PRIMARY` tablets. It `Open`s when a tablet turns primary, and `Close`s when the tablet changes its type away from `PRIMARY`. It only operates when in the open state.
+
+## General operations
+
+The scheduler:
+
+- Identifies queued migrations
+- Picks next migration to run
+- Executes a migration
+- Follows up on migration progress
+- Identifies completion or failure
+- Cleans up artifacts
+- Identifies stale (rogue) migrations that need to be marked as failed
+- Identifies migrations started by another tablet
+- Possibly auto-retries migrations
+
+The executor also receives requests from the tablet's query engine/executor to:
+
+- Submit a new migration
+- Cancel a migration
+- Retry a migration
+
+It also responds on the following API endpoint:
+
+- `/schema-migration/report-status`: called by `gh-ost` and `pt-online-schema-change` to report liveness, completion or failure.
+
+# The scheduler
+
+Breaking down the scheduler logic
+
+## Migration states & transitions
+
+A migration can be in any one of these states:
+
+- `queued`: a migration is submitted
+- `ready`: a migration is picked from the queue to run
+- `running`: a migration was started. It is periodically tested to be making progress.
+- `complete`: a migration completed successfully
+- `failed`: a migration failed due to whatever reason. It may have ran for a while, or it may have been marked as `failed` before even running.
+- `cancelled`: a _pending_ migration was cancelled
+
+A migration is said to be _pending_ if we expect it to run and complete. Pending migrations are those in `queued`, `ready` and `running` states.
+
+Some possible state transitions are:
+
+- `queued -> ready -> running -> complete`: the ideal flow where everything just works
+- `queued -> ready -> running -> failed`: a migration breaks
+- `queued -> cancelled`: a migration is cancelled by the user before taken out of queue
+- `queued -> ready -> cancelled`: a migration is cancelled by the user before running
+- `queued -> ready -> running -> failed`: a running migration is cancelled by the user and forcefully terminated, causing it to enter the `failed` state
+- `queued -> ready -> running -> failed -> running`: a failed migration was _retried_
+- `queued -> ... cancelled -> queued -> ready -> running`: a cancelled migration was _retried_ (irrespective of whether it was running at time of cancellation)
+- `queued -> ready -> cancelled -> queued -> ready -> running -> failed -> running -> failed -> running -> completed`: a combined flow that shows we can retry multiple times
+
+## General logic
+
+The scheduler works by periodically sampling the known migrations. Normally there's a once per minute tick that kicks in a series of checks. You may imagine a state machine that advances once per minute. However, some steps such as:
+
+- Submission of a new migration
+- Migration execution start
+- Migration execution completion
+- Open() state
+- Test suite scenario
+
+will kick a burst of additional ticks. This is done to speed up the progress of the state machine. For example, if a new migration is submitted, there's a good chance it will be clear to execute, so an increase in ticks will start the migration within a few seconds rather than one minute later.
+
+By default, Vitess schedules all migrations to run sequentially. Only a single migration is expected to run at any given time. However, there are cases for concurrent execution of migrations, and the user may request concurrent execution via `--allow-concurrent` flag in `ddl_strategy`. Some migrations are eligible to run concurrently, other migrations are eligible to run specific phases concurrently, and some do not allow concurrency. See the user guides for up-to-date information.
+
+## Who runs the migration
+
+Some migrations are executed by the scheduler itself, some by a sub-process, and some implicitly by vreplication, as follows:
+
+- `CREATE TABLE` migrations are executed by the scheduler.
+- `DROP TABLE` migrations are executed by the scheduler.
+- `ALTER TABLE` migrations depend on `ddl_strategy`:
+ - `vitess`/`online`: the scheduler configures, creates and starts a VReplication stream. From that point on, the tablet manager's VReplication logic takes ownership of the execution. The scheduler periodically checks progress. The scheduler identifies an end-of-migration scenario and finalizes the cut-over and termination of the VReplication stream. It is possible for a VReplication migration to span multiple tablets, detailed below. In this case, if the tablet goes down, then the migration will not be lost. It will be continued on another tablet, as described below.
+ - `gh-ost`: the executor runs `gh-ost` via `os.Exec`. It runs the entire flow within a single function. Thus, `gh-ost` completes within the same lifetime of the scheduler (and the tablet space in which is operates). To clarify, if the tablet goes down, then the migration is deemed lost.
+ - `pt-osc`: the executor runs `pt-online-schema-change` via `os.Exec`. It runs the entire flow within a single function. Thus, `pt-online-schema-change` completes within the same lifetime of the scheduler (and the tablet space in which is operates). To clarify, if the tablet goes down, then the migration is deemed lost.
+
+## Stale migrations
+
+The scheduler maintains a _liveness_ timestamp for running migrations:
+
+- `vitess`/`online` migrations are based on VReplication, which reports last timestamp/transaction timestamp. The scheduler infers migration liveness based on these and on the stream status.
+- `gh-ost` migrations report liveness via `/schema-migration/report-status`
+- `pt-osc` does not report liveness. The scheduler actively checks for liveness by looking up the `pt-online-schema-change` process.
+
+One way or another, we expect at most (roughly) a 1 minute interval between a running migration's liveness reports. When a migration is expected to be running, and does not have a liveness report for `10` minutes, then it is considered _stale_.
+
+A stale migration can happen for various reasons. Perhaps a `pt-osc` process went zombie. Or a `gh-ost` process was locked.
+
+When the scheduler finds a stale migration, it:
+
+- Considers it to be broken and removes it from internal bookkeeping of running migrations.
+- Takes steps to forcefully terminate it, just in case it still happens to run:
+ - For a `gh-ost` migration, it touches the panic flag file.
+ - For `pt-osc`, it `kill`s the process, if any
+ - For `online`, it stops and deletes the stream
+
+## Failed tablet migrations
+
+A specially handled scenario is where a migration runs, and the owning (primary) tablet fails.
+
+For `gh-ost` and `pt-osc` migrations, it's impossible to resume the migration from the exact point of failure. The scheduler will attempt a full retry of the migration. This means throwing away the previous migration's artifacts (ghost tables) and starting anew.
+
+To avoid a cascading failure scenario, a migration is only auto-retried _once_. If a 2nd tablet failure takes place, it's up to the user to retry the failed migration.
+
+## Cross tablet VReplication migrations
+
+VReplication is more capable than `gh-ost` and `pt-osc`, since it tracks its state transactionally in the same database server as the migration/ghost table. This means a stream can automatically recover after e.g. a failover. The new `primary` tablet has all the information in `_vt.vreplication`, `_vt.copy_state` to keep on running the stream.
+
+The scheduler supports that. It is able to identify a stream which started with a previous tablet, and is able to take ownership of such a stream. Because VReplication will recover/resume a stream independently of the scheduler, the scheduler will then implicitly find that the stream is _running_ and be able to assert its _liveness_.
+
+The result is that if a tablet fails mid-`online` migration, the new `primary` tablet will auto-resume migration _from the point of interruption_. This happens whether it's the same table that recovers as `primary` or whether its a new tablet that is promoted as `primary`. A migration can survive multiple tablet failures. It is only limited by VReplication's capabilities.
diff --git a/doc/design-docs/RealTimeParamsChange.md b/doc/design-docs/RealTimeParamsChange.md
new file mode 100644
index 00000000000..0617a81f771
--- /dev/null
+++ b/doc/design-docs/RealTimeParamsChange.md
@@ -0,0 +1,25 @@
+# Real-time parameter change
+
+Vitess Components currently disallow changing of configuration parameters while a process is running. There are a few reasons why this was not allowed:
+
+* The command line used to launch the process will cease to be an authoritative source.
+* It is difficult to audit any values that were changed in real-time or keep track of history if changes were made multiple times.
+* If a process is restarted for any reason, the changes will be reverted.
+
+However, if there is an ongoing high severity incident, it may be beneficial to allow human operators to make temporary changes to a system. This, of course, comes with the caveat that they will eventually revert the changes or make them permanent by deploying the binaries with the new flags.
+
+## Proposal
+
+Given that this should be generally discouraged as common practice, these capabilities should not be made openly available. For example, you should not be able to make such changes using SQL statements.
+
+We’ll export a `/debug/env` endpoint from vttablet, protected by `ADMIN` access. The URL will display the current values of the various parameters and will allow you to modify and submit any of those.
+
+Values like connection pool sizes, query timeouts, query consolidation settings, etc. will be changeable. We’ll iterate on this list as more use cases arise. The initial list will be in an upcoming PR.
+
+## Other options considered
+
+SET Statements: These would be issued via VTGate. The problem with this approach is that the commands would be too widely available. Also, VTGate doesn’t allow targeting of specific tablets.
+
+vtctld ExecuteFetchAsDba SET statements: This could be made to work. However, this is currently implemented as a pass-through in tablet manager. Significant changes will be needed to parse the statements and make them do other things.
+
+vtcltd alternate command, like `SetTabletEnv`: This could be made to work. However, it is a lot more work than what is proposed, and may not be worth it for a feature that is meant to be used so rarely. We still have the option of implementing this if the need arises in the future.
diff --git a/doc/ReplicationLagBasedThrottlingOfTransactions.md b/doc/design-docs/ReplicationLagBasedThrottlingOfTransactions.md
similarity index 100%
rename from doc/ReplicationLagBasedThrottlingOfTransactions.md
rename to doc/design-docs/ReplicationLagBasedThrottlingOfTransactions.md
diff --git a/doc/design-docs/RowBasedReplication.md b/doc/design-docs/RowBasedReplication.md
new file mode 100644
index 00000000000..269e1e1074e
--- /dev/null
+++ b/doc/design-docs/RowBasedReplication.md
@@ -0,0 +1,43 @@
+# VTTablet with RBR mode
+
+The deprecation of SBR will result in a fundamental change in how vttablet works. The most significant one is that vttablet does not need to compute which rows will be affected by a DML. Dropping this requirement allows us to rethink the implementation of its various features. Some of these changes will not be fully backward compatible. Here are the details:
+
+## Pass-through DMLs
+
+Most DMLs can be just passed through.
+
+There is, however, one valuable feature that we want to preserve: the ability to limit the number of rows a DML affects. In SBR mode, this was achieved as a side-effect of the fact that we had to run a “subquery” to identify all affected rows, which allowed us to count them and return an error if they exceeded the limit. We lose this ability in pass-through mode.
+
+Instead of continuing to issue the subquery, the new proposal is to add a LIMIT clause to the DML itself. Then, if “RowsAffected” was greater than the limit, we return an error. The one downside of this implementation is that such a failure will leave the DML partially executed. This means that vttablet will have to force a rollback of the transaction. In most use cases, the application is expected to rollback the transaction on such an error. Therefore, this sounds like a reasonable trade-off for the level of simplicity achieved. There is also the added benefit of avoiding the extra roundtrip for the subquery.
+
+This change has a subtle interaction with the “found rows” flag, which reports affected rows differently. This just means that users of this flag may have to set their limits differently.
+
+The explicit pass-through mode flag `queryserver-config-passthrough-dmls` will continue to behave as is. In the new context, its meaning will change to: “do not limit affected rows”.
+
+We believe that inserts of a large number of rows are usually intentional. Therefore, limits will only be applied to updates and deletes.
+
+## Autocommits
+
+With most DMLs being pass-through, we can now resurrect autocommit behavior in vttablet. This means that a transactionless DML can be treated as autocommit. In the case where a limit has to be enforced, vttablet can open a transaction, and then rollback if the limit is exceeded.
+
+## Sequences
+
+Sequences will continue their existing behavior: normal statements will continue to be sent to mysql. If a `select next...` is received, the sequence specific functionality will be triggered.
+
+## Messages
+
+Messages are undergoing a significant overhaul as seen in #5913. Tracking of row changes has been changed to use VStreamer, which allows for any DML to be executed on those tables. Inserts remain an exception because columns like `time_created` and `time_scheduled` have to be populated by vttablet in order to maintain backward compatibility.
+
+## Plan IDs
+
+Most plan ids were specific to SBR. After the refactor, the total number of plans will be greatly reduced, and we’ll likely generate a new set of ids.
+
+## Schema
+
+These changes greatly reduce our dependence on the schema. We only need to know the following info from a table:
+
+* Field info
+* PK columns
+* Table comments
+
+VTTablet also gathered table statistics from `information schema`. However, we don’t think anyone is using them. So, it may be better to stop reporting them. If anyone still needs them, please speak up now.
diff --git a/doc/design-docs/SafeLazyDropTables.md b/doc/design-docs/SafeLazyDropTables.md
new file mode 100644
index 00000000000..0cb0e57669a
--- /dev/null
+++ b/doc/design-docs/SafeLazyDropTables.md
@@ -0,0 +1,95 @@
+# Safe, lazy DROP TABLE
+
+`DROP TABLE` is a risky MySQL operation in production. There seem to be multiple components involved, the major being that if the table has pages in InnoDB's buffer pool (one or many), then the buffer pool is locked for the duration of the `DROP`. The duration of the `DROP` is also related with the time it takes the operating system to delete the `.ibd` file associated with the table (assuming `innodb_file_per_table`).
+
+Noteworthy that the problem is in particular on the `primary` MySQL server; replicas are not affected as much.
+
+Different companies solve `DROP TABLE` in different ways. An interesting discussion is found on `gh-ost`'s repo and on mysql bugs:
+
+- https://github.com/github/gh-ost/issues/307
+- https://bugs.mysql.com/bug.php?id=91977
+
+Solutions differ in implementation, but all suggest _waiting_ for some time before actually dropping the table. That alone requires management around `DROP TABLE` operations. As explained below, _waiting_ enables reverting the operation.
+
+Vitess should automate table drops and make the problem transparent to the user as much as possible. Breakdown of the suggested solution follows.
+
+## Illustrating custom DROP workaround steps
+
+We can make the `DROP` management stateful or stateless. Opting for stateless: no meta tables to describe the progress of the `DROP`. The state should be inferred from the tables themselves. Specifically, we will encode hints in the table names.
+
+We wish to manage `DROP` requests. Most managed `DROP` requests will _wait_ before destroying data. If the user issued a `DROP TABLE` only to realize the app still expects the table to exist, then we make it possible to revert the operation.
+
+This is done by first issuing a `RENAME TABLE my_table TO something_else`. To the app, it seems like the table is gone; but the user may easily restore it by running the revert query: `RENAME TABLE something_else TO my_table`.
+
+That `something_else` name can be e.g. `_vt_HOLD_2201058f_f266_11ea_bab4_0242c0a8b007_20200910113042`.
+
+At some point we decide that we can destroy the data. The "hold" period can either be determined by vitess or explicitly by the user. e.g. On a successful schema migration completion, Vitess can choose to purge the "old" table right away.
+At that stage we rename the table to e.g. `_vt_PURGE_63b5db0c_f25c_11ea_bab4_0242c0a8b007_20200911070228`.
+A table by that name is eligible to have its data purged.
+
+By experience (see `gh-ost` issue above), a safe method to purge data is to slowly remove rows, until the table is empty. Note:
+
+- This operation needs to be throttled (see #6661, #6668, https://github.com/vitessio/website/pull/512)
+- We can `SET SQL_LOG_BIN=0` and only purge the table on the `primary`. This reduces binlog size and also does not introduce replication lag. One may argue that lag-based throttling is not needed, but in my experience it's still wise to use, since replication lag can imply load on `primary`, and it's best to not overload the `primary`.
+- We issue a `DELETE FROM my_table LIMIT 50` ; 10-100 are normally good chunk sizes. Order of purging does not matter.
+
+It's important to note that the `DELETE` statement actually causes table pages to _load into the buffer pool_, which works against our objective.
+
+Once all rows are purged from a table, we rename it again to e.g. `_vt_DROP_8a797518_f25c_11ea_bab4_0242c0a8b007_20210211234156`. At this time we point out that `20200911234156` is actually a readable timestamp, and stands for `2021-02-11 23:41:56`. That timestamp can tell us when the table was last renamed.
+
+Vitess can then run an actual `DROP TABLE` for `_vt_DROP_...` tables whose timestamp is older than, say, 2 days. As mentioned above, purging the table actually caused the table to load onto the buffer pool, and we need to wait for it to naturally get evicted, before dropping it.
+
+## Suggested implementation: table lifecycle aka table garbage collection
+
+The actual implementation will be in `vttablet`. A `primary` vttablet will routinely check for tables that need work:
+
+- A once per hour check is enough; we are going to spend _days_ in dropping a tables, so no need to check frequently
+- Look for tables called `_vt_(HOLD|PURGE|DROP)__` (be very strict about the pattern search, because why not)
+- If time is right, `RENAME` to the next step (`HOLD` renames to `PURGE`, `PURGE` renames to `DROP`)
+- Continuously purge rows from tables in `PURGE` state. We may want to purge only one table at a time, e.g. purge oldest table, until it is empty, immediately rename it to `DROP` state, move on to the next table, begin purging it.
+ - Use throttler in between purge chunks
+
+This means a user-initiated `DROP ... TABLE` on a sharded cluster, is handled by each shard independently. The data is purged at different times, and the disk space reclaimed at different time, each shard will do its best, but will likely have somewhat different workloads to make the operations run faster on some, and slower on others.
+
+We need to allow for alternate methods/flows for dropping tables:
+
+- some users just wait and drop
+- some need to purge rows
+- some operate independently on replicas.
+
+The way to support the above is by introducing user-defined states. The general table lifecycle flow is this, and in this order:
+
+1. Real table (_alive_)
+2. `HOLD`
+3. `PURGE`
+4. `EVAC`
+5. `DROP`
+6. _gone_
+
+Vitess will transition the table through these states, _in order_. But it will also support skipping some states. Let's first explain the meaning of the states:
+
+- Real table (alive): Table is in use in production.
+- `HOLD`: Table is renamed to something like `_vt_HOLD_6ace8bcef73211ea87e9f875a4d24e90_20200915120410`. Vitess will not make changes to the table, will not drop data. The table is put away for safe keeping for X hours/days. If it turns out the app still needs the table, the user can `RENAME` is back to its original name, taking it out of this game.
+- `PURGE`: Table renamed to e.g `_vt_PURGE_6ace8bcef73211ea87e9f875a4d24e90_20200916080539`. Vitess purges (or will purge, based on workload and prior engagements) rows from `PURGE` tables. Data is lost and the user may not resurrect the table anymore.
+Most likely we will settle for a `SQL_LOG_BIN=0`, ie purging will not go through replication. The replicas are not so badly affected by `DROP` statements as a `primary`.
+- `EVAC`: Table renamed to e.g. `_vt_EVAC_6ace8bcef73211ea87e9f875a4d24e90_20200918192031`. The table sits still for Y houtrs/days. I'm thinking this period will be pre-defined by vitess. The purpose of this state is to wait a _reasonable_ amount of time so that tabe's pages are evacuated from the innodb buffer pool by the natural succession of production IO/memory activity.
+- `DROP`: Table renamed to e.g. `_vt_DROP_6ace8bcef73211ea87e9f875a4d24e90_20200921193202`. Vitess will `DROP TABLE` this table _imminently_.
+- gone: end of lifecycle
+
+## Transitioning and skipping of states
+
+The above lifecycle will be the default, and safest cycle. It is also the longest. Some users will use this sequence. Others have no issue dropping a table with millions of rows, and don't want to pay the IO+time of purging the data.
+
+We introduce a `vttablet` command line flag: `-table_gc_lifecycle="hold,purge,evac,drop"`, that's the default value. `drop` is implicit, if you don't specify it, it's automatically appended. Otherwise, consider the following flows:
+
+- some people just happy to drop tables with no wait, no purge, no nothing. They will set `-table_gc_lifecycle="drop"`
+- some people want to keep the table around for a few days, then just go ahead and drop it: `-table_gc_lifecycle="hold,drop"`
+- some want to keep the table, then purge it, then immediately drop: `-table_gc_lifecycle="hold,purge,drop"`
+
+This is both user-customizable and simple to implement.
+
+
+## Asking for a safe DROP
+
+Safe `DROP TABLE`s participate in the Online DDL flow. When a user chooses an online `ddl_strategy`, the user's `DROP TABLE` statement implicitly translates to a `RENAME TABLE` statement which sends the table into our lifecycle mechanism, from where it is garbage collected.
+
diff --git a/doc/SeparatingVttabletMysql.md b/doc/design-docs/SeparatingVttabletMysql.md
similarity index 100%
rename from doc/SeparatingVttabletMysql.md
rename to doc/design-docs/SeparatingVttabletMysql.md
diff --git a/doc/design-docs/TabletManagerModel.md b/doc/design-docs/TabletManagerModel.md
new file mode 100644
index 00000000000..d9c9ac1a3c3
--- /dev/null
+++ b/doc/design-docs/TabletManagerModel.md
@@ -0,0 +1,60 @@
+# The tabletmanager model
+
+## Background
+
+The current tabletmanager model treats the tablet record as authoritative. The tabletmanager polls the tablet record and reacts to changes there. There are also calls sprayed around the code that invoke “RefreshTablet” or “RefreshState”, after they update the tablet record.
+
+This model is not representative of how we currently operate vitess. There is actually no benefit to updating the tablet record and expecting the tablet to refresh itself. In fact, the approach is fragile because we’re unnecessarily bringing additional components in our chain of action, thereby increasing the chances of failure.
+
+We should instead change our model to say that the tablet process is the authoritative source of its current state. It publishes it to the tablet record, which is then used for discovery.
+
+## Details
+
+### While vttablet is up
+
+Every flow that needs to change something about a tablet directly issues an rpc request to it. The tablet will immediately execute the request, and will perform a best-effort action to update the tablet record, and will continue to retry until it succeeds.
+
+The main advantage of this approach is that the request will succeed with a single round trip to the tablet. If the request fails, we treat it as a failure. If the request succeeds, but the tablet fails to update its record, we still succeed. The tablet record will eventually be updated.
+
+### If vttablet is down or is unreachable
+
+If vttablet is unreachable, we fail the operation. This failure mode is no worse than us not being able to update a tablet record.
+
+Essentially, we generally assume that the tablet record may not be in sync with the vttablet. This has always been the case, and our code has to deal with this already.
+
+### Refresh State
+
+Refresh state will continue to exist as an API, but it’s only for refreshing against state changes in the global topo.
+
+### Exception 1: Cluster Leadership
+
+In the case of flows that designate who the primary is, the topo is the authority. For such requests, the tablet will first try to update its record, and only then succeed. This is required because of how the new cluster leadership redesign works.
+
+### Exception 2: VTTablet startup
+
+When a vttablet starts up, it will treat the following info from the topo as authoritative:
+
+* Keyspace
+* Shard
+* Tablet Type
+* DBName
+
+As an additional precaution, if this info does not match the init parameters, we should exit.
+
+We can also consider the following: If the tablet type is primary, we can force a sync against the shard record before we confirm ourselves to be the primary.
+
+## Advantages
+
+The main advantage of this approach is that a vttablet becomes the authoritative owner of a tablet record. This will greatly reduce its complexity because it doesn’t have to continuously poll it, and it does not have to deal with possibly unexpected or invalid changes in the tablet record.
+
+Since it can assume that nobody else is modifying the record, the vttablet can freely update the tablet record with its local copy without worrying about synchronizing with the existing info.
+
+This will also simplify many flows because they will all become a single request instead of being two requests (change record and refresh).
+
+Load on topos will be reduced because the tablets don’t poll anymore.
+
+## Transition
+
+We’ll first change all call sites to a single round-trip to the tablets. The tabletmanager will continue to update the tablet record and rely on its existing poller.
+
+This change will mean that the tabletmanager has become the sole owner of its tablet record. At this point, we can switch its behavior to non-polling.
diff --git a/doc/TabletRouting.md b/doc/design-docs/TabletRouting.md
similarity index 100%
rename from doc/TabletRouting.md
rename to doc/design-docs/TabletRouting.md
diff --git a/doc/design-docs/TabletServerParamsAsYAML.md b/doc/design-docs/TabletServerParamsAsYAML.md
new file mode 100644
index 00000000000..25543ad9018
--- /dev/null
+++ b/doc/design-docs/TabletServerParamsAsYAML.md
@@ -0,0 +1,170 @@
+# VTTablet YAML configuration
+
+## Background
+This issue is an expansion of #5791, and covers the details of how we’ll use yaml to implement a hierarchy of parameters for initializing and customizing TabletServer components within a process.
+
+Using the yaml approach, we intend to solve the following problems:
+
+* Multiple tablet servers need to exist within a process.
+* Badly named command line parameters: The new yaml specifications will have simpler and easier to remember parameter names.
+* Sections for improved readability.
+* Unreasonable default values: The new specs will introduce a simplified approach that will save the user from tweaking too many variables without knowing their consequences.
+* Repetition: If multiple TabletServers have to share the same parameter values, they should not be repeated.
+* Backward compatibility: The system must be backward compatible.
+
+## Proposed Approach
+We will introduce two new command-line options that will be specified like this:
+```
+-defaults=defaults.yaml -tablets=tablets.yaml
+```
+
+The defaults option allows the user to specify the default settings for all tablets. The “tablets” option allows the user to specify tablet specific options.
+
+We’ll provide predefined “defaults” files that people can use as starting points. We’ll start with something simple like small.yaml, medium.yaml, and large.yaml.
+
+### Protobuf
+We explored the use of protobufs for converting to and from yaml. However, the protobuf JSON implementation, which was required to correctly parse enums, was unable to preserve the original values for sub-objects. This was a showstopper for making defaults work.
+
+The existing `tabletenv.TabletConfig` data structure will be converted into a struct with the new names and appropriate JSON tags.
+
+## Detailed specification
+The following section lists all the properties that can be specified in a yaml file along with the existing defaults.
+
+During implementation, we’ll need to decide between creating a universal protobuf that represents this structure vs converting the original one into something more usable.
+
+Convention used: same as the one followed by kubernetes, and we’ll be making use of https://github.com/kubernetes-sigs/yaml for the implementation.
+
+Note that certain properties (like tabletID) are only applicable to tablet-specific files.
+
+```
+tabletID: zone-1234
+
+init:
+ dbName: # init_db_name_override
+ keyspace: # init_keyspace
+ shard: # init_shard
+ tabletType: # init_tablet_type
+ timeoutSeconds: 60 # init_timeout
+
+db:
+ socket: # db_socket
+ host: # db_host
+ port: 0 # db_port
+ charSet: # db_charset
+ flags: 0 # db_flags
+ flavor: # db_flavor
+ sslCa: # db_ssl_ca
+ sslCaPath: # db_ssl_ca_path
+ sslCert: # db_ssl_cert
+ sslKey: # db_ssl_key
+ serverName: # db_server_name
+ connectTimeoutMilliseconds: 0 # db_connect_timeout_ms
+ app:
+ user: vt_app # db_app_user
+ password: # db_app_password
+ useSsl: true # db_app_use_ssl
+ preferSocket: true
+ dba:
+ user: vt_dba # db_dba_user
+ password: # db_dba_password
+ useSsl: true # db_dba_use_ssl
+ preferSocket: true
+ filtered:
+ user: vt_filtered # db_filtered_user
+ password: # db_filtered_password
+ useSsl: true # db_filtered_use_ssl
+ preferSocket: true
+ repl:
+ user: vt_repl # db_repl_user
+ password: # db_repl_password
+ useSsl: true # db_repl_use_ssl
+ preferSocket: true
+ appdebug:
+ user: vt_appdebug # db_appdebug_user
+ password: # db_appdebug_password
+ useSsl: true # db_appdebug_use_ssl
+ preferSocket: true
+ allprivs:
+ user: vt_allprivs # db_allprivs_user
+ password: # db_allprivs_password
+ useSsl: true # db_allprivs_use_ssl
+ preferSocket: true
+
+oltpReadPool:
+ size: 16 # queryserver-config-pool-size
+ timeoutSeconds: 0 # queryserver-config-query-pool-timeout
+ idleTimeoutSeconds: 1800 # queryserver-config-idle-timeout
+ prefillParallelism: 0 # queryserver-config-pool-prefill-parallelism
+ maxWaiters: 50000 # queryserver-config-query-pool-waiter-cap
+
+olapReadPool:
+ size: 200 # queryserver-config-stream-pool-size
+ timeoutSeconds: 0 # queryserver-config-query-pool-timeout
+ idleTimeoutSeconds: 1800 # queryserver-config-idle-timeout
+ prefillParallelism: 0 # queryserver-config-stream-pool-prefill-parallelism
+ maxWaiters: 0
+
+txPool:
+ size: 20 # queryserver-config-transaction-cap
+ timeoutSeconds: 1 # queryserver-config-txpool-timeout
+ idleTimeoutSeconds: 1800 # queryserver-config-idle-timeout
+ prefillParallelism: 0 # queryserver-config-transaction-prefill-parallelism
+ maxWaiters: 50000 # queryserver-config-txpool-waiter-cap
+
+oltp:
+ queryTimeoutSeconds: 30 # queryserver-config-query-timeout
+ txTimeoutSeconds: 30 # queryserver-config-transaction-timeout
+ maxRows: 10000 # queryserver-config-max-result-size
+ warnRows: 0 # queryserver-config-warn-result-size
+
+hotRowProtection:
+ mode: disable|dryRun|enable # enable_hot_row_protection, enable_hot_row_protection_dry_run
+ # Default value is same as txPool.size.
+ maxQueueSize: 20 # hot_row_protection_max_queue_size
+ maxGlobalQueueSize: 1000 # hot_row_protection_max_global_queue_size
+ maxConcurrency: 5 # hot_row_protection_concurrent_transactions
+
+consolidator: enable|disable|notOnPrimary # enable-consolidator, enable-consolidator-replicas
+heartbeatIntervalMilliseconds: 0 # heartbeat_enable, heartbeat_interval
+shutdownGracePeriodSeconds: 0 # transaction_shutdown_grace_period
+passthroughDML: false # queryserver-config-passthrough-dmls
+streamBufferSize: 32768 # queryserver-config-stream-buffer-size
+queryCacheSize: 5000 # queryserver-config-query-cache-size
+schemaReloadIntervalSeconds: 1800 # queryserver-config-schema-reload-time
+watchReplication: false # watch_replication_stream
+terseErrors: false # queryserver-config-terse-errors
+messagePostponeParallelism: 4 # queryserver-config-message-postpone-cap
+cacheResultFields: true # enable-query-plan-field-caching
+sanitizeLogMessages: false # sanitize_log_messages
+
+
+# The following flags are currently not supported.
+# enforce_strict_trans_tables
+# queryserver-config-strict-table-acl
+# queryserver-config-enable-table-acl-dry-run
+# queryserver-config-acl-exempt-acl
+# enable-tx-throttler
+# tx-throttler-config
+# tx-throttler-healthcheck-cells
+# enable_transaction_limit
+# enable_transaction_limit_dry_run
+# transaction_limit_per_user
+# transaction_limit_by_username
+# transaction_limit_by_principal
+# transaction_limit_by_component
+# transaction_limit_by_subcomponent
+```
+
+There are also other global parameters. VTTablet has a total of 405 flags. We may have to later move some of them into these yamls.
+
+## Implementation
+We'll use the unified tabletenv.TabletConfig data structure to load defaults as well as tablet-specific values. The following changes will be made:
+
+* TabletConfig will be changed to match the above specifications.
+* The existing flags will be changed to update the values in a global TabletConfig.
+* In case of type mismatch (like time.Duration vs a "Seconds" variable), an extra step will be performed after parsing to convert the flag variables into TabletConfig members.
+* The code will be changed to honor the new TabletConfig members.
+* After the flag.Parse step, a copy of the global Config will be used as input to load the defaults file. This means that any command line flags that are not overridden in the yaml files will be preserved. This behavior is needed to support backward compatibility in case we decide to move more flags into the yaml.
+* For each tablet entry, we create a copy of the result TabletConfig and read the tablet yaml into those, which will set the tablet-specific values. This will then be used to instantiate a TabletServer.
+
+The exact format of the tablets.yaml file is not fully finalized. Our options are to allow a list of files where each is for a single tablet, or, to require only one file containing a dictionary of tablet-specific overrides.
diff --git a/doc/TwoPhaseCommitDesign.md b/doc/design-docs/TwoPhaseCommitDesign.md
similarity index 99%
rename from doc/TwoPhaseCommitDesign.md
rename to doc/design-docs/TwoPhaseCommitDesign.md
index 6d0df21dc90..e1376985a4a 100644
--- a/doc/TwoPhaseCommitDesign.md
+++ b/doc/design-docs/TwoPhaseCommitDesign.md
@@ -106,7 +106,7 @@ For #1 and #2, the Rollback workflow is initiated. For #3, the commit is resumed
The following diagram illustrates the life-cycle of a Vitess transaction.
-![](https://raw.githubusercontent.com/vitessio/vitess/main/doc/TxLifecycle.png)
+![](https://raw.githubusercontent.com/vitessio/vitess/main/doc/design-docs/TxLifecycle.png)
A transaction generally starts off as a single DB transaction. It becomes a distributed transaction as soon as more than one VTTablet is affected. If the app issues a rollback, then all participants are simply rolled back. If a BEC is issued, then all transactions are individually committed. These actions are the same irrespective of single or distributed transactions.
@@ -132,7 +132,7 @@ In order to make 2PC work, the following pieces of functionality have to be buil
The diagram below show how the various components interact.
-![](https://raw.githubusercontent.com/vitessio/vitess/main/doc/TxInteractions.png)
+![](https://raw.githubusercontent.com/vitessio/vitess/main/doc/design-docs/TxInteractions.png)
The detailed design explains all the functionalities and interactions.
diff --git a/doc/TxInteractions.png b/doc/design-docs/TxInteractions.png
similarity index 100%
rename from doc/TxInteractions.png
rename to doc/design-docs/TxInteractions.png
diff --git a/doc/TxLifecycle.png b/doc/design-docs/TxLifecycle.png
similarity index 100%
rename from doc/TxLifecycle.png
rename to doc/design-docs/TxLifecycle.png
diff --git a/doc/V3HighLevelDesign.md b/doc/design-docs/V3HighLevelDesign.md
similarity index 100%
rename from doc/V3HighLevelDesign.md
rename to doc/design-docs/V3HighLevelDesign.md
diff --git a/doc/V3VindexDesign.md b/doc/design-docs/V3VindexDesign.md
similarity index 100%
rename from doc/V3VindexDesign.md
rename to doc/design-docs/V3VindexDesign.md
diff --git a/doc/VTGateSubqueries.md b/doc/design-docs/VTGateSubqueries.md
similarity index 100%
rename from doc/VTGateSubqueries.md
rename to doc/design-docs/VTGateSubqueries.md
diff --git a/doc/VTGateV3Features.md b/doc/design-docs/VTGateV3Features.md
similarity index 100%
rename from doc/VTGateV3Features.md
rename to doc/design-docs/VTGateV3Features.md
diff --git a/doc/design-docs/VTTabletFastStateTransitions.md b/doc/design-docs/VTTabletFastStateTransitions.md
new file mode 100644
index 00000000000..2e2bc8e3805
--- /dev/null
+++ b/doc/design-docs/VTTabletFastStateTransitions.md
@@ -0,0 +1,25 @@
+# Fast vttablet state transitions
+
+This issue is in response to #6645. When vttablet transitions from primary to non-primary, the following problems can occur under different circumstances:
+
+* When a query is killed, it may take a long time for mysql to return the error if it has to do a lot of cleanup. This can delay a vttablet transition. Just closing the pools is not enough because the tablet shutdown also waits for all executing goroutines to return.
+* It is possible that the query timeout is much greater than the transaction timeout. In such cases, the query timeout must be reduced to match the transaction timeout. Otherwise, a running query can hold a transaction hostage and prevent a vttablet from transitioning.
+* The `transaction_shutdown_grace_period` must acquire a new meaning. It should be renamed to `shutdown_grace_period`, and must also apply to queries that are exceeding this time limit. This limit applies to all queries: streaming, oltp read, reserved, and in_transaction.
+* The transaction shutdown code "waits for empty", but reserved connections (not in a transaction) are now part of this pool and will prevent the pool from going empty until they timeout. We need to close them more proactively during a shutdown.
+* The transition from primary to non-primary uses the immediate flag. This should wait for transactions to complete. Fortunately, due to how PRS works, that code path is not used. We instead use the "dont_serve" code path. But this needs to be fixed for future-proofing.
+
+Many approaches were discussed in #6645. Those approaches are all non-viable because they don't address all of the above concerns.
+
+To fix all these problems, some refactoring will need to be done. Here's the proposal:
+
+* The query killer (DBConn.Kill) will be changed to proactively close the connection. This will cause the execution to return immediately, thereby addressing the problem where slow kills delay a shutdown.
+* Change the query execution to use the minimum of the transaction timeout and query timeout, but only if the request is part of a transaction.
+* Build a list of all currently active queries by extending StreamQueryList. During a shutdown, the state manager will use this list to kill all active queries if shutdown_grace_period is hit. This, along with the Kill change, will cause all those executes to immediately return and the connections will be returned to their respective pools.
+* tx_engine/tx_pool: Will now have two modes during shutdown:
+ * "kill_reserved" will cause all reserved connections to be closed. If a reserved connection is returned to the pool during this state, it will also be closed. This will be the initial shutdown state until the shutdown grace period is hit.
+ * "kill_all" will cause all reserved and transaction connections to be closed. We enter this state after the shutdown grace period is hit. While in this state, the state manager would have killed all currently executing queries. As these connections are returned to the pool, we just close them. This will cause the tx pool to close in a timely manner.
+* Fix transition to not be immediate (dead code).
+
+During a previous code review, I also found a (rare) race condition in tx_engine, which I forgot to document. I'll redo the analysis and fix the race if it's trivial.
+
+The most important guarantee of this change is that a shutdown will not take longer than the shutdown_grace_period if it was specified.
diff --git a/doc/VindexAsTable.md b/doc/design-docs/VindexAsTable.md
similarity index 100%
rename from doc/VindexAsTable.md
rename to doc/design-docs/VindexAsTable.md
diff --git a/doc/VitessQueues.md b/doc/design-docs/VitessQueues.md
similarity index 100%
rename from doc/VitessQueues.md
rename to doc/design-docs/VitessQueues.md
diff --git a/doc/life_of_a_query.png b/doc/design-docs/life_of_a_query.png
similarity index 100%
rename from doc/life_of_a_query.png
rename to doc/design-docs/life_of_a_query.png
diff --git a/doc/life_of_a_query.xml b/doc/design-docs/life_of_a_query.xml
similarity index 100%
rename from doc/life_of_a_query.xml
rename to doc/design-docs/life_of_a_query.xml
diff --git a/doc/life_of_a_query_all.png b/doc/design-docs/life_of_a_query_all.png
similarity index 100%
rename from doc/life_of_a_query_all.png
rename to doc/design-docs/life_of_a_query_all.png
diff --git a/doc/life_of_a_query_all.xml b/doc/design-docs/life_of_a_query_all.xml
similarity index 100%
rename from doc/life_of_a_query_all.xml
rename to doc/design-docs/life_of_a_query_all.xml
diff --git a/doc/life_of_a_query_client_to_vtgate.png b/doc/design-docs/life_of_a_query_client_to_vtgate.png
similarity index 100%
rename from doc/life_of_a_query_client_to_vtgate.png
rename to doc/design-docs/life_of_a_query_client_to_vtgate.png
diff --git a/doc/life_of_a_query_client_to_vtgate.xml b/doc/design-docs/life_of_a_query_client_to_vtgate.xml
similarity index 100%
rename from doc/life_of_a_query_client_to_vtgate.xml
rename to doc/design-docs/life_of_a_query_client_to_vtgate.xml
diff --git a/doc/life_of_a_query_vtgate_to_vttablet.png b/doc/design-docs/life_of_a_query_vtgate_to_vttablet.png
similarity index 100%
rename from doc/life_of_a_query_vtgate_to_vttablet.png
rename to doc/design-docs/life_of_a_query_vtgate_to_vttablet.png
diff --git a/doc/life_of_a_query_vtgate_to_vttablet.xml b/doc/design-docs/life_of_a_query_vtgate_to_vttablet.xml
similarity index 100%
rename from doc/life_of_a_query_vtgate_to_vttablet.xml
rename to doc/design-docs/life_of_a_query_vtgate_to_vttablet.xml
diff --git a/doc/life_of_a_query_vttablet_to_mysql.png b/doc/design-docs/life_of_a_query_vttablet_to_mysql.png
similarity index 100%
rename from doc/life_of_a_query_vttablet_to_mysql.png
rename to doc/design-docs/life_of_a_query_vttablet_to_mysql.png
diff --git a/doc/life_of_a_query_vttablet_to_mysql.xml b/doc/design-docs/life_of_a_query_vttablet_to_mysql.xml
similarity index 100%
rename from doc/life_of_a_query_vttablet_to_mysql.xml
rename to doc/design-docs/life_of_a_query_vttablet_to_mysql.xml
diff --git a/doc/flags/14.0-to-15.0-transition/mysqlctl.diff b/doc/flags/14.0-to-15.0-transition/mysqlctl.diff
new file mode 100644
index 00000000000..285919a33f6
--- /dev/null
+++ b/doc/flags/14.0-to-15.0-transition/mysqlctl.diff
@@ -0,0 +1,241 @@
+diff --git a/flags/14.0/mysqlctl.txt b/flags/15.0/mysqlctl.txt
+index a535bc4..6444c8f 100644
+--- a/flags/14.0/mysqlctl.txt
++++ b/flags/15.0/mysqlctl.txt
+@@ -1,150 +1,86 @@
+-Usage of mysqlctl:
+- --alsologtostderr log to standard error as well as files
+- --app_idle_timeout duration Idle timeout for app connections (default 1m0s)
+- --app_pool_size int Size of the connection pool for app connections (default 40)
+- --backup_engine_implementation string Specifies which implementation to use for creating new backups (builtin or xtrabackup). Restores will always be done with whichever engine created a given backup. (default builtin)
+- --backup_storage_block_size int if backup_storage_compress is true, backup_storage_block_size sets the byte size for each block while compressing (default is 250000). (default 250000)
+- --backup_storage_compress if set, the backup files will be compressed (default is true). Set to false for instance if a backup_storage_hook is specified and it compresses the data. (default true)
+- --backup_storage_hook string if set, we send the contents of the backup files through this hook.
+- --backup_storage_implementation string which implementation to use for the backup storage feature
+- --backup_storage_number_blocks int if backup_storage_compress is true, backup_storage_number_blocks sets the number of blocks that can be processed, at once, before the writer blocks, during compression (default is 2). It should be equal to the number of CPUs available for compression (default 2)
+- --builtinbackup_mysqld_timeout duration how long to wait for mysqld to shutdown at the start of the backup (default 10m0s)
+- --builtinbackup_progress duration how often to send progress updates when backing up large files (default 5s)
+- --catch-sigpipe catch and ignore SIGPIPE on stdout and stderr if specified
+- --cpu_profile string deprecated: use '-pprof=cpu' instead
+- --datadog-agent-host string host to send spans to. if empty, no tracing will be done
+- --datadog-agent-port string port to send spans to. if empty, no tracing will be done
+- --db-config-dba-charset string deprecated: use db_charset (default utf8mb4)
+- --db-config-dba-flags uint deprecated: use db_flags
+- --db-config-dba-flavor string deprecated: use db_flavor
+- --db-config-dba-host string deprecated: use db_host
+- --db-config-dba-pass string db dba deprecated: use db_dba_password
+- --db-config-dba-port int deprecated: use db_port
+- --db-config-dba-server_name string deprecated: use db_server_name
+- --db-config-dba-ssl-ca string deprecated: use db_ssl_ca
+- --db-config-dba-ssl-ca-path string deprecated: use db_ssl_ca_path
+- --db-config-dba-ssl-cert string deprecated: use db_ssl_cert
+- --db-config-dba-ssl-key string deprecated: use db_ssl_key
+- --db-config-dba-uname string deprecated: use db_dba_user (default vt_dba)
+- --db-config-dba-unixsocket string deprecated: use db_socket
+- --db-credentials-file string db credentials file; send SIGHUP to reload this file
+- --db-credentials-server string db credentials server type ('file' - file implementation; 'vault' - HashiCorp Vault implementation) (default file)
+- --db-credentials-vault-addr string URL to Vault server
+- --db-credentials-vault-path string Vault path to credentials JSON blob, e.g.: secret/data/prod/dbcreds
+- --db-credentials-vault-role-mountpoint string Vault AppRole mountpoint; can also be passed using VAULT_MOUNTPOINT environment variable (default approle)
+- --db-credentials-vault-role-secretidfile string Path to file containing Vault AppRole secret_id; can also be passed using VAULT_SECRETID environment variable
+- --db-credentials-vault-roleid string Vault AppRole id; can also be passed using VAULT_ROLEID environment variable
+- --db-credentials-vault-timeout duration Timeout for vault API operations (default 10s)
+- --db-credentials-vault-tls-ca string Path to CA PEM for validating Vault server certificate
+- --db-credentials-vault-tokenfile string Path to file containing Vault auth token; token can also be passed using VAULT_TOKEN environment variable
+- --db-credentials-vault-ttl duration How long to cache DB credentials from the Vault server (default 30m0s)
+- --db_charset string Character set used for this tablet. (default utf8mb4)
+- --db_conn_query_info enable parsing and processing of QUERY_OK info fields
+- --db_connect_timeout_ms int connection timeout to mysqld in milliseconds (0 for no timeout)
+- --db_dba_password string db dba password
+- --db_dba_use_ssl Set this flag to false to make the dba connection to not use ssl (default true)
+- --db_dba_user string db dba user userKey (default vt_dba)
+- --db_flags uint Flag values as defined by MySQL.
+- --db_flavor string Flavor overrid. Valid value is FilePos.
+- --db_host string The host name for the tcp connection.
+- --db_port int tcp port
+- --db_server_name string server name of the DB we are connecting to.
+- --db_socket string The unix socket to connect on. If this is specified, host and port will not be used.
+- --db_ssl_ca string connection ssl ca
+- --db_ssl_ca_path string connection ssl ca path
+- --db_ssl_cert string connection ssl certificate
+- --db_ssl_key string connection ssl key
+- --db_ssl_mode value SSL mode to connect with. One of disabled, preferred, required, verify_ca & verify_identity.
+- --db_tls_min_version string Configures the minimal TLS version negotiated when SSL is enabled. Defaults to TLSv1.2. Options: TLSv1.0, TLSv1.1, TLSv1.2, TLSv1.3.
+- --dba_idle_timeout duration Idle timeout for dba connections (default 1m0s)
+- --dba_pool_size int Size of the connection pool for dba connections (default 20)
+- --disable_active_reparents if set, do not allow active reparents. Use this to protect a cluster using external reparents.
+- --emit_stats If set, emit stats to push-based monitoring and stats backends
+- --grpc_auth_mode string Which auth plugin implementation to use (eg: static)
+- --grpc_auth_mtls_allowed_substrings string List of substrings of at least one of the client certificate names (separated by colon).
+- --grpc_auth_static_client_creds string when using grpc_static_auth in the server, this file provides the credentials to use to authenticate with server
+- --grpc_auth_static_password_file string JSON File to read the users/passwords from.
+- --grpc_ca string server CA to use for gRPC connections, requires TLS, and enforces client certificate check
+- --grpc_cert string server certificate to use for gRPC connections, requires grpc_key, enables TLS
+- --grpc_compression string Which protocol to use for compressing gRPC. Default: nothing. Supported: snappy
+- --grpc_crl string path to a certificate revocation list in PEM format, client certificates will be further verified against this file during TLS handshake
+- --grpc_enable_optional_tls enable optional TLS mode when a server accepts both TLS and plain-text connections on the same port
+- --grpc_enable_tracing Enable GRPC tracing
+- --grpc_initial_conn_window_size int gRPC initial connection window size
+- --grpc_initial_window_size int gRPC initial window size
+- --grpc_keepalive_time duration After a duration of this time, if the client doesn't see any activity, it pings the server to see if the transport is still alive. (default 10s)
+- --grpc_keepalive_timeout duration After having pinged for keepalive check, the client waits for a duration of Timeout and if no activity is seen even after that the connection is closed. (default 10s)
+- --grpc_key string server private key to use for gRPC connections, requires grpc_cert, enables TLS
+- --grpc_max_connection_age duration Maximum age of a client connection before GoAway is sent. (default 2562047h47m16.854775807s)
+- --grpc_max_connection_age_grace duration Additional grace period after grpc_max_connection_age, after which connections are forcibly closed. (default 2562047h47m16.854775807s)
+- --grpc_max_message_size int Maximum allowed RPC message size. Larger messages will be rejected by gRPC with the error 'exceeding the max size'. (default 16777216)
+- --grpc_port int Port to listen on for gRPC calls
+- --grpc_prometheus Enable gRPC monitoring with Prometheus
+- --grpc_server_ca string path to server CA in PEM format, which will be combine with server cert, return full certificate chain to clients
+- --grpc_server_initial_conn_window_size int gRPC server initial connection window size
+- --grpc_server_initial_window_size int gRPC server initial window size
+- --grpc_server_keepalive_enforcement_policy_min_time duration gRPC server minimum keepalive time (default 10s)
+- --grpc_server_keepalive_enforcement_policy_permit_without_stream gRPC server permit client keepalive pings even when there are no active streams (RPCs)
+- --jaeger-agent-host string host and port to send spans to. if empty, no tracing will be done
+- --keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
+- --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
+- --lameduck-period duration keep running at least this long after SIGTERM before stopping (default 50ms)
+- --log_backtrace_at value when logging hits line file:N, emit a stack trace
+- --log_dir string If non-empty, write log files in this directory
+- --log_err_stacks log stack traces for errors
+- --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
+- --logtostderr log to standard error instead of files
+- --master_connect_retry duration Deprecated, use -replication_connect_retry (default 10s)
+- --mem-profile-rate int deprecated: use '-pprof=mem' instead (default 524288)
+- --mutex-profile-fraction int deprecated: use '-pprof=mutex' instead
+- --mysql_auth_server_static_file string JSON File to read the users/passwords from.
+- --mysql_auth_server_static_string string JSON representation of the users/passwords config.
+- --mysql_auth_static_reload_interval duration Ticker to reload credentials
+- --mysql_clientcert_auth_method string client-side authentication method to use. Supported values: mysql_clear_password, dialog. (default mysql_clear_password)
+- --mysql_port int mysql port (default 3306)
+- --mysql_server_flush_delay duration Delay after which buffered response will be flushed to the client. (default 100ms)
+- --mysql_server_version string MySQL server version to advertise.
+- --mysql_socket string path to the mysql socket
+- --mysqlctl_client_protocol string the protocol to use to talk to the mysqlctl server (default grpc)
+- --mysqlctl_mycnf_template string template file to use for generating the my.cnf file during server init
+- --mysqlctl_socket string socket file to use for remote mysqlctl actions (empty for local actions)
+- --onclose_timeout duration wait no more than this for OnClose handlers before stopping (default 1ns)
+- --onterm_timeout duration wait no more than this for OnTermSync handlers before stopping (default 10s)
+- --pid_file string If set, the process will write its pid to the named file, and delete it on graceful shutdown.
+- --pool_hostname_resolve_interval duration if set force an update to all hostnames and reconnect if changed, defaults to 0 (disabled)
+- --port int vttablet port (default 6612)
+- --pprof string enable profiling
+- --purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
+- --remote_operation_timeout duration time to wait for a remote operation (default 30s)
+- --replication_connect_retry duration how long to wait in between replica reconnect attempts. Only precise to the second. (default 10s)
+- --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only)
+- --service_map value comma separated list of services to enable (or disable if prefixed with '-') Example: grpc-vtworker
+- --sql-max-length-errors int truncate queries in error logs to the given length (default unlimited)
+- --sql-max-length-ui int truncate queries in debug UIs to the given length (default 512) (default 512)
+- --stats_backend string The name of the registered push-based monitoring/stats backend to use
+- --stats_combine_dimensions string List of dimensions to be combined into a single "all" value in exported stats vars
+- --stats_common_tags string Comma-separated list of common tags for the stats backend. It provides both label and values. Example: label1:value1,label2:value2
+- --stats_drop_variables string Variables to be dropped from the list of exported variables.
+- --stats_emit_period duration Interval between emitting stats to all registered backends (default 1m0s)
+- --stderrthreshold value logs at or above this threshold go to stderr (default 1)
+- --tablet_dir string The directory within the vtdataroot to store vttablet/mysql files. Defaults to being generated by the tablet uid.
+- --tablet_manager_protocol string the protocol to use to talk to vttablet (default grpc)
+- --tablet_uid uint tablet uid (default 41983)
+- --topo_global_root string the path of the global topology data in the global topology server
+- --topo_global_server_address string the address of the global topology server
+- --topo_implementation string the topology implementation to use
+- --tracer string tracing service to use (default noop)
+- --tracing-enable-logging whether to enable logging in the tracing service
+- --tracing-sampling-rate value sampling rate for the probabilistic jaeger sampler (default 0.1)
+- --tracing-sampling-type value sampling strategy to use for jaeger. possible values are 'const', 'probabilistic', 'rateLimiting', or 'remote' (default const)
+- --v value log level for V logs
+- --version print binary version
+- --vmodule value comma-separated list of pattern=N settings for file-filtered logging
+- --xbstream_restore_flags string flags to pass to xbstream command during restore. These should be space separated and will be added to the end of the command. These need to match the ones used for backup e.g. --compress / --decompress, --encrypt / --decrypt
+- --xtrabackup_backup_flags string flags to pass to backup command. These should be space separated and will be added to the end of the command
+- --xtrabackup_prepare_flags string flags to pass to prepare command. These should be space separated and will be added to the end of the command
+- --xtrabackup_root_path string directory location of the xtrabackup and xbstream executables, e.g., /usr/bin
+- --xtrabackup_stream_mode string which mode to use if streaming, valid values are tar and xbstream (default tar)
+- --xtrabackup_stripe_block_size uint Size in bytes of each block that gets sent to a given stripe before rotating to the next stripe (default 102400)
+- --xtrabackup_stripes uint If greater than 0, use data striping across this many destination files to parallelize data transfer and decompression
+- --xtrabackup_user string User that xtrabackup will use to connect to the database server. This user must have all necessary privileges. For details, please refer to xtrabackup documentation.
++Usage: mysqlctl [global-flags] -- [command-flags]
++
++The commands are listed below. Use 'mysqlctl -- {-h, --help}' for command help.
++
++ init [--wait_time=5m] [--init_db_sql_file=]
++ init_config
++ reinit_config
++ teardown [--wait_time=5m] [--force]
++ start [--wait_time=5m]
++ shutdown [--wait_time=5m]
++ position
++
++Global flags:
++ --alsologtostderr log to standard error as well as files
++ --app_idle_timeout duration Idle timeout for app connections (default 1m0s)
++ --app_pool_size int Size of the connection pool for app connections (default 40)
++ --catch-sigpipe catch and ignore SIGPIPE on stdout and stderr if specified
++ --db-credentials-file string db credentials file; send SIGHUP to reload this file
++ --db-credentials-server string db credentials server type ('file' - file implementation; 'vault' - HashiCorp Vault implementation) (default "file")
++ --db-credentials-vault-addr string URL to Vault server
++ --db-credentials-vault-path string Vault path to credentials JSON blob, e.g.: secret/data/prod/dbcreds
++ --db-credentials-vault-role-mountpoint string Vault AppRole mountpoint; can also be passed using VAULT_MOUNTPOINT environment variable (default "approle")
++ --db-credentials-vault-role-secretidfile string Path to file containing Vault AppRole secret_id; can also be passed using VAULT_SECRETID environment variable
++ --db-credentials-vault-roleid string Vault AppRole id; can also be passed using VAULT_ROLEID environment variable
++ --db-credentials-vault-timeout duration Timeout for vault API operations (default 10s)
++ --db-credentials-vault-tls-ca string Path to CA PEM for validating Vault server certificate
++ --db-credentials-vault-tokenfile string Path to file containing Vault auth token; token can also be passed using VAULT_TOKEN environment variable
++ --db-credentials-vault-ttl duration How long to cache DB credentials from the Vault server (default 30m0s)
++ --db_charset string Character set used for this tablet. (default "utf8mb4")
++ --db_conn_query_info enable parsing and processing of QUERY_OK info fields
++ --db_connect_timeout_ms int connection timeout to mysqld in milliseconds (0 for no timeout)
++ --db_dba_password string db dba password
++ --db_dba_use_ssl Set this flag to false to make the dba connection to not use ssl (default true)
++ --db_dba_user string db dba user userKey (default "vt_dba")
++ --db_flags uint Flag values as defined by MySQL.
++ --db_flavor string Flavor overrid. Valid value is FilePos.
++ --db_host string The host name for the tcp connection.
++ --db_port int tcp port
++ --db_server_name string server name of the DB we are connecting to.
++ --db_socket string The unix socket to connect on. If this is specified, host and port will not be used.
++ --db_ssl_ca string connection ssl ca
++ --db_ssl_ca_path string connection ssl ca path
++ --db_ssl_cert string connection ssl certificate
++ --db_ssl_key string connection ssl key
++ --db_ssl_mode SslMode SSL mode to connect with. One of disabled, preferred, required, verify_ca & verify_identity.
++ --db_tls_min_version string Configures the minimal TLS version negotiated when SSL is enabled. Defaults to TLSv1.2. Options: TLSv1.0, TLSv1.1, TLSv1.2, TLSv1.3.
++ --dba_idle_timeout duration Idle timeout for dba connections (default 1m0s)
++ --dba_pool_size int Size of the connection pool for dba connections (default 20)
++ --grpc_auth_static_client_creds string When using grpc_static_auth in the server, this file provides the credentials to use to authenticate with server.
++ --grpc_compression string Which protocol to use for compressing gRPC. Default: nothing. Supported: snappy
++ --grpc_initial_conn_window_size int gRPC initial connection window size
++ --grpc_initial_window_size int gRPC initial window size
++ --grpc_keepalive_time duration After a duration of this time, if the client doesn't see any activity, it pings the server to see if the transport is still alive. (default 10s)
++ --grpc_keepalive_timeout duration After having pinged for keepalive check, the client waits for a duration of Timeout and if no activity is seen even after that the connection is closed. (default 10s)
++ -h, --help display usage and exit
++ --keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
++ --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
++ --lameduck-period duration keep running at least this long after SIGTERM before stopping (default 50ms)
++ --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
++ --log_dir string If non-empty, write log files in this directory
++ --log_err_stacks log stack traces for errors
++ --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
++ --logtostderr log to standard error instead of files
++ --mysql_port int MySQL port (default 3306)
++ --mysql_server_version string MySQL server version to advertise.
++ --mysql_socket string Path to the mysqld socket file
++ --mysqlctl_client_protocol string the protocol to use to talk to the mysqlctl server (default "grpc")
++ --mysqlctl_mycnf_template string template file to use for generating the my.cnf file during server init
++ --mysqlctl_socket string socket file to use for remote mysqlctl actions (empty for local actions)
++ --onclose_timeout duration wait no more than this for OnClose handlers before stopping (default 1ns)
++ --onterm_timeout duration wait no more than this for OnTermSync handlers before stopping (default 10s)
++ --pid_file string If set, the process will write its pid to the named file, and delete it on graceful shutdown.
++ --pool_hostname_resolve_interval duration if set force an update to all hostnames and reconnect if changed, defaults to 0 (disabled)
++ --port int port for the server
++ --pprof strings enable profiling
++ --purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
++ --replication_connect_retry duration how long to wait in between replica reconnect attempts. Only precise to the second. (default 10s)
++ --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only)
++ --service_map strings comma separated list of services to enable (or disable if prefixed with '-') Example: grpc-queryservice
++ --socket_file string Local unix socket file to listen on
++ --stderrthreshold severity logs at or above this threshold go to stderr (default 1)
++ --tablet_dir string The directory within the vtdataroot to store vttablet/mysql files. Defaults to being generated by the tablet uid.
++ --tablet_uid uint Tablet UID (default 41983)
++ --v Level log level for V logs
++ -v, --version print binary version
++ --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
diff --git a/doc/flags/14.0-to-15.0-transition/mysqlctld.diff b/doc/flags/14.0-to-15.0-transition/mysqlctld.diff
new file mode 100644
index 00000000000..593cc7476ee
--- /dev/null
+++ b/doc/flags/14.0-to-15.0-transition/mysqlctld.diff
@@ -0,0 +1,174 @@
+diff --git a/flags/14.0/mysqlctld.txt b/flags/15.0/mysqlctld.txt
+index 47df65e..f34697b 100644
+--- a/flags/14.0/mysqlctld.txt
++++ b/flags/15.0/mysqlctld.txt
+@@ -2,48 +2,24 @@ Usage of mysqlctld:
+ --alsologtostderr log to standard error as well as files
+ --app_idle_timeout duration Idle timeout for app connections (default 1m0s)
+ --app_pool_size int Size of the connection pool for app connections (default 40)
+- --backup_engine_implementation string Specifies which implementation to use for creating new backups (builtin or xtrabackup). Restores will always be done with whichever engine created a given backup. (default builtin)
+- --backup_storage_block_size int if backup_storage_compress is true, backup_storage_block_size sets the byte size for each block while compressing (default is 250000). (default 250000)
+- --backup_storage_compress if set, the backup files will be compressed (default is true). Set to false for instance if a backup_storage_hook is specified and it compresses the data. (default true)
+- --backup_storage_hook string if set, we send the contents of the backup files through this hook.
+- --backup_storage_implementation string which implementation to use for the backup storage feature
+- --backup_storage_number_blocks int if backup_storage_compress is true, backup_storage_number_blocks sets the number of blocks that can be processed, at once, before the writer blocks, during compression (default is 2). It should be equal to the number of CPUs available for compression (default 2)
+- --builtinbackup_mysqld_timeout duration how long to wait for mysqld to shutdown at the start of the backup (default 10m0s)
+- --builtinbackup_progress duration how often to send progress updates when backing up large files (default 5s)
+ --catch-sigpipe catch and ignore SIGPIPE on stdout and stderr if specified
+- --cpu_profile string deprecated: use '-pprof=cpu' instead
+- --datadog-agent-host string host to send spans to. if empty, no tracing will be done
+- --datadog-agent-port string port to send spans to. if empty, no tracing will be done
+- --db-config-dba-charset string deprecated: use db_charset (default utf8mb4)
+- --db-config-dba-flags uint deprecated: use db_flags
+- --db-config-dba-flavor string deprecated: use db_flavor
+- --db-config-dba-host string deprecated: use db_host
+- --db-config-dba-pass string db dba deprecated: use db_dba_password
+- --db-config-dba-port int deprecated: use db_port
+- --db-config-dba-server_name string deprecated: use db_server_name
+- --db-config-dba-ssl-ca string deprecated: use db_ssl_ca
+- --db-config-dba-ssl-ca-path string deprecated: use db_ssl_ca_path
+- --db-config-dba-ssl-cert string deprecated: use db_ssl_cert
+- --db-config-dba-ssl-key string deprecated: use db_ssl_key
+- --db-config-dba-uname string deprecated: use db_dba_user (default vt_dba)
+- --db-config-dba-unixsocket string deprecated: use db_socket
+ --db-credentials-file string db credentials file; send SIGHUP to reload this file
+- --db-credentials-server string db credentials server type ('file' - file implementation; 'vault' - HashiCorp Vault implementation) (default file)
++ --db-credentials-server string db credentials server type ('file' - file implementation; 'vault' - HashiCorp Vault implementation) (default "file")
+ --db-credentials-vault-addr string URL to Vault server
+ --db-credentials-vault-path string Vault path to credentials JSON blob, e.g.: secret/data/prod/dbcreds
+- --db-credentials-vault-role-mountpoint string Vault AppRole mountpoint; can also be passed using VAULT_MOUNTPOINT environment variable (default approle)
++ --db-credentials-vault-role-mountpoint string Vault AppRole mountpoint; can also be passed using VAULT_MOUNTPOINT environment variable (default "approle")
+ --db-credentials-vault-role-secretidfile string Path to file containing Vault AppRole secret_id; can also be passed using VAULT_SECRETID environment variable
+ --db-credentials-vault-roleid string Vault AppRole id; can also be passed using VAULT_ROLEID environment variable
+ --db-credentials-vault-timeout duration Timeout for vault API operations (default 10s)
+ --db-credentials-vault-tls-ca string Path to CA PEM for validating Vault server certificate
+ --db-credentials-vault-tokenfile string Path to file containing Vault auth token; token can also be passed using VAULT_TOKEN environment variable
+ --db-credentials-vault-ttl duration How long to cache DB credentials from the Vault server (default 30m0s)
+- --db_charset string Character set used for this tablet. (default utf8mb4)
++ --db_charset string Character set used for this tablet. (default "utf8mb4")
+ --db_conn_query_info enable parsing and processing of QUERY_OK info fields
+ --db_connect_timeout_ms int connection timeout to mysqld in milliseconds (0 for no timeout)
+ --db_dba_password string db dba password
+ --db_dba_use_ssl Set this flag to false to make the dba connection to not use ssl (default true)
+- --db_dba_user string db dba user userKey (default vt_dba)
++ --db_dba_user string db dba user userKey (default "vt_dba")
+ --db_flags uint Flag values as defined by MySQL.
+ --db_flavor string Flavor overrid. Valid value is FilePos.
+ --db_host string The host name for the tcp connection.
+@@ -54,22 +30,19 @@ Usage of mysqlctld:
+ --db_ssl_ca_path string connection ssl ca path
+ --db_ssl_cert string connection ssl certificate
+ --db_ssl_key string connection ssl key
+- --db_ssl_mode value SSL mode to connect with. One of disabled, preferred, required, verify_ca & verify_identity.
++ --db_ssl_mode SslMode SSL mode to connect with. One of disabled, preferred, required, verify_ca & verify_identity.
+ --db_tls_min_version string Configures the minimal TLS version negotiated when SSL is enabled. Defaults to TLSv1.2. Options: TLSv1.0, TLSv1.1, TLSv1.2, TLSv1.3.
+ --dba_idle_timeout duration Idle timeout for dba connections (default 1m0s)
+ --dba_pool_size int Size of the connection pool for dba connections (default 20)
+- --disable_active_reparents if set, do not allow active reparents. Use this to protect a cluster using external reparents.
+- --emit_stats If set, emit stats to push-based monitoring and stats backends
+ --grpc_auth_mode string Which auth plugin implementation to use (eg: static)
+ --grpc_auth_mtls_allowed_substrings string List of substrings of at least one of the client certificate names (separated by colon).
+- --grpc_auth_static_client_creds string when using grpc_static_auth in the server, this file provides the credentials to use to authenticate with server
++ --grpc_auth_static_client_creds string When using grpc_static_auth in the server, this file provides the credentials to use to authenticate with server.
+ --grpc_auth_static_password_file string JSON File to read the users/passwords from.
+ --grpc_ca string server CA to use for gRPC connections, requires TLS, and enforces client certificate check
+ --grpc_cert string server certificate to use for gRPC connections, requires grpc_key, enables TLS
+ --grpc_compression string Which protocol to use for compressing gRPC. Default: nothing. Supported: snappy
+ --grpc_crl string path to a certificate revocation list in PEM format, client certificates will be further verified against this file during TLS handshake
+ --grpc_enable_optional_tls enable optional TLS mode when a server accepts both TLS and plain-text connections on the same port
+- --grpc_enable_tracing Enable GRPC tracing
+ --grpc_initial_conn_window_size int gRPC initial connection window size
+ --grpc_initial_window_size int gRPC initial window size
+ --grpc_keepalive_time duration After a duration of this time, if the client doesn't see any activity, it pings the server to see if the transport is still alive. (default 10s)
+@@ -77,36 +50,25 @@ Usage of mysqlctld:
+ --grpc_key string server private key to use for gRPC connections, requires grpc_cert, enables TLS
+ --grpc_max_connection_age duration Maximum age of a client connection before GoAway is sent. (default 2562047h47m16.854775807s)
+ --grpc_max_connection_age_grace duration Additional grace period after grpc_max_connection_age, after which connections are forcibly closed. (default 2562047h47m16.854775807s)
+- --grpc_max_message_size int Maximum allowed RPC message size. Larger messages will be rejected by gRPC with the error 'exceeding the max size'. (default 16777216)
+- --grpc_port int Port to listen on for gRPC calls
+- --grpc_prometheus Enable gRPC monitoring with Prometheus
++ --grpc_port int Port to listen on for gRPC calls. If zero, do not listen.
+ --grpc_server_ca string path to server CA in PEM format, which will be combine with server cert, return full certificate chain to clients
+ --grpc_server_initial_conn_window_size int gRPC server initial connection window size
+ --grpc_server_initial_window_size int gRPC server initial window size
+ --grpc_server_keepalive_enforcement_policy_min_time duration gRPC server minimum keepalive time (default 10s)
+ --grpc_server_keepalive_enforcement_policy_permit_without_stream gRPC server permit client keepalive pings even when there are no active streams (RPCs)
+- --init_db_sql_file string path to .sql file to run after mysql_install_db
+- --jaeger-agent-host string host and port to send spans to. if empty, no tracing will be done
++ -h, --help display usage and exit
++ --init_db_sql_file string Path to .sql file to run after mysqld initialization
+ --keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
+ --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
+ --lameduck-period duration keep running at least this long after SIGTERM before stopping (default 50ms)
+- --log_backtrace_at value when logging hits line file:N, emit a stack trace
++ --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
+ --log_dir string If non-empty, write log files in this directory
+ --log_err_stacks log stack traces for errors
+ --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
+ --logtostderr log to standard error instead of files
+- --master_connect_retry duration Deprecated, use -replication_connect_retry (default 10s)
+- --mem-profile-rate int deprecated: use '-pprof=mem' instead (default 524288)
+- --mutex-profile-fraction int deprecated: use '-pprof=mutex' instead
+- --mysql_auth_server_static_file string JSON File to read the users/passwords from.
+- --mysql_auth_server_static_string string JSON representation of the users/passwords config.
+- --mysql_auth_static_reload_interval duration Ticker to reload credentials
+- --mysql_clientcert_auth_method string client-side authentication method to use. Supported values: mysql_clear_password, dialog. (default mysql_clear_password)
+- --mysql_port int mysql port (default 3306)
+- --mysql_server_flush_delay duration Delay after which buffered response will be flushed to the client. (default 100ms)
++ --mysql_port int MySQL port (default 3306)
+ --mysql_server_version string MySQL server version to advertise.
+- --mysql_socket string path to the mysql socket
+- --mysqlctl_client_protocol string the protocol to use to talk to the mysqlctl server (default grpc)
++ --mysql_socket string Path to the mysqld socket file
+ --mysqlctl_mycnf_template string template file to use for generating the my.cnf file during server init
+ --mysqlctl_socket string socket file to use for remote mysqlctl actions (empty for local actions)
+ --onclose_timeout duration wait no more than this for OnClose handlers before stopping (default 1ns)
+@@ -114,40 +76,16 @@ Usage of mysqlctld:
+ --pid_file string If set, the process will write its pid to the named file, and delete it on graceful shutdown.
+ --pool_hostname_resolve_interval duration if set force an update to all hostnames and reconnect if changed, defaults to 0 (disabled)
+ --port int port for the server
+- --pprof string enable profiling
++ --pprof strings enable profiling
+ --purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
+- --remote_operation_timeout duration time to wait for a remote operation (default 30s)
+ --replication_connect_retry duration how long to wait in between replica reconnect attempts. Only precise to the second. (default 10s)
+ --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only)
+- --service_map value comma separated list of services to enable (or disable if prefixed with '-') Example: grpc-vtworker
++ --service_map strings comma separated list of services to enable (or disable if prefixed with '-') Example: grpc-queryservice
+ --socket_file string Local unix socket file to listen on
+- --sql-max-length-errors int truncate queries in error logs to the given length (default unlimited)
+- --sql-max-length-ui int truncate queries in debug UIs to the given length (default 512) (default 512)
+- --stats_backend string The name of the registered push-based monitoring/stats backend to use
+- --stats_combine_dimensions string List of dimensions to be combined into a single "all" value in exported stats vars
+- --stats_common_tags string Comma-separated list of common tags for the stats backend. It provides both label and values. Example: label1:value1,label2:value2
+- --stats_drop_variables string Variables to be dropped from the list of exported variables.
+- --stats_emit_period duration Interval between emitting stats to all registered backends (default 1m0s)
+- --stderrthreshold value logs at or above this threshold go to stderr (default 1)
++ --stderrthreshold severity logs at or above this threshold go to stderr (default 1)
+ --tablet_dir string The directory within the vtdataroot to store vttablet/mysql files. Defaults to being generated by the tablet uid.
+- --tablet_manager_protocol string the protocol to use to talk to vttablet (default grpc)
+- --tablet_uid uint tablet uid (default 41983)
+- --topo_global_root string the path of the global topology data in the global topology server
+- --topo_global_server_address string the address of the global topology server
+- --topo_implementation string the topology implementation to use
+- --tracer string tracing service to use (default noop)
+- --tracing-enable-logging whether to enable logging in the tracing service
+- --tracing-sampling-rate value sampling rate for the probabilistic jaeger sampler (default 0.1)
+- --tracing-sampling-type value sampling strategy to use for jaeger. possible values are 'const', 'probabilistic', 'rateLimiting', or 'remote' (default const)
+- --v value log level for V logs
+- --version print binary version
+- --vmodule value comma-separated list of pattern=N settings for file-filtered logging
+- --wait_time duration how long to wait for mysqld startup or shutdown (default 5m0s)
+- --xbstream_restore_flags string flags to pass to xbstream command during restore. These should be space separated and will be added to the end of the command. These need to match the ones used for backup e.g. --compress / --decompress, --encrypt / --decrypt
+- --xtrabackup_backup_flags string flags to pass to backup command. These should be space separated and will be added to the end of the command
+- --xtrabackup_prepare_flags string flags to pass to prepare command. These should be space separated and will be added to the end of the command
+- --xtrabackup_root_path string directory location of the xtrabackup and xbstream executables, e.g., /usr/bin
+- --xtrabackup_stream_mode string which mode to use if streaming, valid values are tar and xbstream (default tar)
+- --xtrabackup_stripe_block_size uint Size in bytes of each block that gets sent to a given stripe before rotating to the next stripe (default 102400)
+- --xtrabackup_stripes uint If greater than 0, use data striping across this many destination files to parallelize data transfer and decompression
+- --xtrabackup_user string User that xtrabackup will use to connect to the database server. This user must have all necessary privileges. For details, please refer to xtrabackup documentation.
++ --tablet_uid uint Tablet UID (default 41983)
++ --v Level log level for V logs
++ -v, --version print binary version
++ --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
++ --wait_time duration How long to wait for mysqld startup or shutdown (default 5m0s)
diff --git a/doc/flags/14.0-to-15.0-transition/vtaclcheck.diff b/doc/flags/14.0-to-15.0-transition/vtaclcheck.diff
new file mode 100644
index 00000000000..a5be9159aa0
--- /dev/null
+++ b/doc/flags/14.0-to-15.0-transition/vtaclcheck.diff
@@ -0,0 +1,91 @@
+diff --git a/flags/14.0/vtaclcheck.txt b/flags/15.0/vtaclcheck.txt
+index e7c9720..6e2c57d 100644
+--- a/flags/14.0/vtaclcheck.txt
++++ b/flags/15.0/vtaclcheck.txt
+.0/vtaclcheck.txt
+@@ -1,67 +1,19 @@
+ Usage of vtaclcheck:
+- --acl_file string The path of the JSON ACL file to check
+- --alsologtostderr log to standard error as well as files
+- --catch-sigpipe catch and ignore SIGPIPE on stdout and stderr if specified
+- --cpu_profile string deprecated: use '-pprof=cpu' instead
+- --datadog-agent-host string host to send spans to. if empty, no tracing will be done
+- --datadog-agent-port string port to send spans to. if empty, no tracing will be done
+- --emit_stats If set, emit stats to push-based monitoring and stats backends
+- --grpc_auth_mode string Which auth plugin implementation to use (eg: static)
+- --grpc_auth_mtls_allowed_substrings string List of substrings of at least one of the client certificate names (separated by colon).
+- --grpc_auth_static_password_file string JSON File to read the users/passwords from.
+- --grpc_ca string server CA to use for gRPC connections, requires TLS, and enforces client certificate check
+- --grpc_cert string server certificate to use for gRPC connections, requires grpc_key, enables TLS
+- --grpc_crl string path to a certificate revocation list in PEM format, client certificates will be further verified against this file during TLS handshake
+- --grpc_enable_optional_tls enable optional TLS mode when a server accepts both TLS and plain-text connections on the same port
+- --grpc_enable_tracing Enable GRPC tracing
+- --grpc_key string server private key to use for gRPC connections, requires grpc_cert, enables TLS
+- --grpc_max_connection_age duration Maximum age of a client connection before GoAway is sent. (default 2562047h47m16.854775807s)
+- --grpc_max_connection_age_grace duration Additional grace period after grpc_max_connection_age, after which connections are forcibly closed. (default 2562047h47m16.854775807s)
+- --grpc_max_message_size int Maximum allowed RPC message size. Larger messages will be rejected by gRPC with the error 'exceeding the max size'. (default 16777216)
+- --grpc_port int Port to listen on for gRPC calls
+- --grpc_prometheus Enable gRPC monitoring with Prometheus
+- --grpc_server_ca string path to server CA in PEM format, which will be combine with server cert, return full certificate chain to clients
+- --grpc_server_initial_conn_window_size int gRPC server initial connection window size
+- --grpc_server_initial_window_size int gRPC server initial window size
+- --grpc_server_keepalive_enforcement_policy_min_time duration gRPC server minimum keepalive time (default 10s)
+- --grpc_server_keepalive_enforcement_policy_permit_without_stream gRPC server permit client keepalive pings even when there are no active streams (RPCs)
+- --jaeger-agent-host string host and port to send spans to. if empty, no tracing will be done
+- --keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
+- --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
+- --lameduck-period duration keep running at least this long after SIGTERM before stopping (default 50ms)
+- --log_backtrace_at value when logging hits line file:N, emit a stack trace
+- --log_dir string If non-empty, write log files in this directory
+- --log_err_stacks log stack traces for errors
+- --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
+- --logtostderr log to standard error instead of files
+- --mem-profile-rate int deprecated: use '-pprof=mem' instead (default 524288)
+- --mutex-profile-fraction int deprecated: use '-pprof=mutex' instead
+- --mysql_auth_server_static_file string JSON File to read the users/passwords from.
+- --mysql_auth_server_static_string string JSON representation of the users/passwords config.
+- --mysql_auth_static_reload_interval duration Ticker to reload credentials
+- --mysql_clientcert_auth_method string client-side authentication method to use. Supported values: mysql_clear_password, dialog. (default mysql_clear_password)
+- --mysql_server_flush_delay duration Delay after which buffered response will be flushed to the client. (default 100ms)
+- --mysql_server_version string MySQL server version to advertise.
+- --onclose_timeout duration wait no more than this for OnClose handlers before stopping (default 1ns)
+- --onterm_timeout duration wait no more than this for OnTermSync handlers before stopping (default 10s)
+- --pid_file string If set, the process will write its pid to the named file, and delete it on graceful shutdown.
+- --pprof string enable profiling
+- --purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
+- --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only)
+- --service_map value comma separated list of services to enable (or disable if prefixed with '-') Example: grpc-vtworker
+- --sql-max-length-errors int truncate queries in error logs to the given length (default unlimited)
+- --sql-max-length-ui int truncate queries in debug UIs to the given length (default 512) (default 512)
+- --static_auth_file string The path of the auth_server_static JSON file to check
+- --stats_backend string The name of the registered push-based monitoring/stats backend to use
+- --stats_combine_dimensions string List of dimensions to be combined into a single "all" value in exported stats vars
+- --stats_common_tags string Comma-separated list of common tags for the stats backend. It provides both label and values. Example: label1:value1,label2:value2
+- --stats_drop_variables string Variables to be dropped from the list of exported variables.
+- --stats_emit_period duration Interval between emitting stats to all registered backends (default 1m0s)
+- --stderrthreshold value logs at or above this threshold go to stderr (default 1)
+- --tracer string tracing service to use (default noop)
+- --tracing-enable-logging whether to enable logging in the tracing service
+- --tracing-sampling-rate value sampling rate for the probabilistic jaeger sampler (default 0.1)
+- --tracing-sampling-type value sampling strategy to use for jaeger. possible values are 'const', 'probabilistic', 'rateLimiting', or 'remote' (default const)
+- --v value log level for V logs
+- --version print binary version
+- --vmodule value comma-separated list of pattern=N settings for file-filtered logging
++ --acl-file string The path of the JSON ACL file to check
++ --alsologtostderr log to standard error as well as files
++ -h, --help display usage and exit
++ --keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
++ --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
++ --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
++ --log_dir string If non-empty, write log files in this directory
++ --log_err_stacks log stack traces for errors
++ --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
++ --logtostderr log to standard error instead of files
++ --pprof strings enable profiling
++ --purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
++ --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only)
++ --static-auth-file string The path of the auth_server_static JSON file to check
++ --stderrthreshold severity logs at or above this threshold go to stderr (default 1)
++ --v Level log level for V logs
++ -v, --version print binary version
++ --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
diff --git a/doc/flags/14.0-to-15.0-transition/vtadmin.diff b/doc/flags/14.0-to-15.0-transition/vtadmin.diff
new file mode 100644
index 00000000000..b1f229d200c
--- /dev/null
+++ b/doc/flags/14.0-to-15.0-transition/vtadmin.diff
@@ -0,0 +1,53 @@
+diff --git a/flags/14.0/vtadmin.txt b/flags/15.0/vtadmin.txt
+new file mode 100644
+index 0000000..7ea8436
+--- /dev/null
++++ b/flags/15.0/vtadmin.txt
+@@ -0,0 +1,47 @@
++Usage:
++ vtadmin [flags]
++
++Flags:
++ --addr string address to serve on (default ":15000")
++ --alsologtostderr log to standard error as well as files
++ --cache-refresh-key string instructs a request to ignore any cached data (if applicable) and refresh the cache;usable as an HTTP header named 'X-' and as a gRPC metadata key ''
++ Note: any whitespace characters are replaced with hyphens. (default "vt-cache-refresh")
++ --cluster cluster.ClustersFlag per-cluster configuration. any values here take precedence over those in -cluster-defaults or -cluster-config (default [])
++ --cluster-config cluster.FileConfig path to a yaml cluster configuration. see clusters.example.yaml (default {defaults: *cluster.Config:{ID: Name: DiscoveryImpl: DiscoveryFlagsByImpl:map[] TabletFQDNTmplStr: VtSQLFlags:map[] VtctldFlags:map[] BackupReadPoolConfig: SchemaReadPoolConfig: TopoRWPoolConfig: TopoReadPoolConfig: WorkflowReadPoolConfig: EmergencyFailoverPoolConfig: FailoverPoolConfig: SchemaCacheConfig: vtctldConfigOpts:[] vtsqlConfigOpts:[]}, clusters: []})
++ --cluster-defaults cluster.Config default options for all clusters (default *cluster.Config:{ID: Name: DiscoveryImpl: DiscoveryFlagsByImpl:map[] TabletFQDNTmplStr: VtSQLFlags:map[] VtctldFlags:map[] BackupReadPoolConfig: SchemaReadPoolConfig: TopoRWPoolConfig: TopoReadPoolConfig: WorkflowReadPoolConfig: EmergencyFailoverPoolConfig: FailoverPoolConfig: SchemaCacheConfig: vtctldConfigOpts:[] vtsqlConfigOpts:[]})
++ --datadog-agent-host string host to send spans to. if empty, no tracing will be done
++ --datadog-agent-port string port to send spans to. if empty, no tracing will be done
++ --emit_stats If set, emit stats to push-based monitoring and stats backends
++ --enable-dynamic-clusters whether to enable dynamic clusters that are set by request header cookies or gRPC metadata
++ --grpc-allow-reflection grpc_cli whether to register the gRPC server for reflection; this is required to use tools like grpc_cli
++ --grpc-enable-channelz whether to enable the channelz service on the gRPC server
++ --grpc-tracing whether to enable tracing on the gRPC server
++ -h, --help help for vtadmin
++ --http-debug-omit-env StringSetFlag name of an environment variable to omit from /debug/env, if http debug endpoints are enabled. specify multiple times to omit multiple env vars
++ --http-debug-sanitize-env StringSetFlag name of an environment variable to sanitize in /debug/env, if http debug endpoints are enabled. specify multiple times to sanitize multiple env vars
++ --http-metrics-endpoint string HTTP endpoint to expose prometheus metrics on. Omit to disable scraping metrics. Using a path used by VTAdmin's http API is unsupported and causes undefined behavior. (default "/metrics")
++ --http-no-compress whether to disable compression of HTTP API responses
++ --http-no-debug whether to disable /debug/pprof/* and /debug/env HTTP endpoints
++ --http-origin strings repeated, comma-separated flag of allowed CORS origins. omit to disable CORS
++ --http-tablet-url-tmpl string [EXPERIMENTAL] Go template string to generate a reachable http(s) address for a tablet. Currently used to make passthrough requests to /debug/vars endpoints. (default "https://{{ .Tablet.Hostname }}:80")
++ --http-tracing whether to enable tracing on the HTTP server
++ --jaeger-agent-host string host and port to send spans to. if empty, no tracing will be done
++ --lame-duck-duration duration length of lame duck period at shutdown (default 5s)
++ --lmux-read-timeout duration how long to spend connection muxing (default 1s)
++ --log_dir string If non-empty, write log files in this directory
++ --logtostderr log to standard error instead of files
++ --no-rbac whether to disable RBAC. must be set if not passing --no-rbac
++ --rbac whether to enable RBAC. must be set if not passing --rbac
++ --rbac-config string path to an RBAC config file. must be set if passing --rbac
++ --stats_backend string The name of the registered push-based monitoring/stats backend to use
++ --stats_combine_dimensions string List of dimensions to be combined into a single "all" value in exported stats vars
++ --stats_common_tags strings Comma-separated list of common tags for the stats backend. It provides both label and values. Example: label1:value1,label2:value2
++ --stats_drop_variables string Variables to be dropped from the list of exported variables.
++ --stats_emit_period duration Interval between emitting stats to all registered backends (default 1m0s)
++ --stderrthreshold severity logs at or above this threshold go to stderr (default 1)
++ --tracer string tracing service to use (default "noop")
++ --tracing-enable-logging whether to enable logging in the tracing service
++ --tracing-sampling-rate float sampling rate for the probabilistic jaeger sampler (default 0.1)
++ --tracing-sampling-type string sampling strategy to use for jaeger. possible values are 'const', 'probabilistic', 'rateLimiting', or 'remote' (default "const")
++ -v, --v Level log level for V logs
++ --version version for vtadmin
diff --git a/doc/flags/14.0-to-15.0-transition/vtbackup.diff b/doc/flags/14.0-to-15.0-transition/vtbackup.diff
new file mode 100644
index 00000000000..475bae16b3c
--- /dev/null
+++ b/doc/flags/14.0-to-15.0-transition/vtbackup.diff
@@ -0,0 +1,497 @@
+diff --git a/flags/14.0/vtbackup.txt b/flags/15.0/vtbackup.txt
+index 15e5f21..7f81472 100644
+--- a/flags/14.0/vtbackup.txt
++++ b/flags/15.0/vtbackup.txt
+@@ -1,318 +1,175 @@
+ Usage of vtbackup:
+- --allow_first_backup Allow this job to take the first backup of an existing shard.
+- --alsologtostderr log to standard error as well as files
+- --app_idle_timeout duration Idle timeout for app connections (default 1m0s)
+- --app_pool_size int Size of the connection pool for app connections (default 40)
+- --azblob_backup_account_key_file string Path to a file containing the Azure Storage account key; if this flag is unset, the environment variable VT_AZBLOB_ACCOUNT_KEY will be used as the key itself (NOT a file path)
+- --azblob_backup_account_name string Azure Storage Account name for backups; if this flag is unset, the environment variable VT_AZBLOB_ACCOUNT_NAME will be used
+- --azblob_backup_container_name string Azure Blob Container Name
+- --azblob_backup_parallelism int Azure Blob operation parallelism (requires extra memory when increased) (default 1)
+- --azblob_backup_storage_root string Root prefix for all backup-related Azure Blobs; this should exclude both initial and trailing '/' (e.g. just 'a/b' not '/a/b/')
+- --backup_engine_implementation string Specifies which implementation to use for creating new backups (builtin or xtrabackup). Restores will always be done with whichever engine created a given backup. (default builtin)
+- --backup_storage_block_size int if backup_storage_compress is true, backup_storage_block_size sets the byte size for each block while compressing (default is 250000). (default 250000)
+- --backup_storage_compress if set, the backup files will be compressed (default is true). Set to false for instance if a backup_storage_hook is specified and it compresses the data. (default true)
+- --backup_storage_hook string if set, we send the contents of the backup files through this hook.
+- --backup_storage_implementation string which implementation to use for the backup storage feature
+- --backup_storage_number_blocks int if backup_storage_compress is true, backup_storage_number_blocks sets the number of blocks that can be processed, at once, before the writer blocks, during compression (default is 2). It should be equal to the number of CPUs available for compression (default 2)
+- --builtinbackup_mysqld_timeout duration how long to wait for mysqld to shutdown at the start of the backup (default 10m0s)
+- --builtinbackup_progress duration how often to send progress updates when backing up large files (default 5s)
+- --catch-sigpipe catch and ignore SIGPIPE on stdout and stderr if specified
+- --ceph_backup_storage_config string Path to JSON config file for ceph backup storage (default ceph_backup_config.json)
+- --concurrency int (init restore parameter) how many concurrent files to restore at once (default 4)
+- --consul_auth_static_file string JSON File to read the topos/tokens from.
+- --cpu_profile string deprecated: use '-pprof=cpu' instead
+- --datadog-agent-host string host to send spans to. if empty, no tracing will be done
+- --datadog-agent-port string port to send spans to. if empty, no tracing will be done
+- --db-config-allprivs-charset string deprecated: use db_charset (default utf8mb4)
+- --db-config-allprivs-flags uint deprecated: use db_flags
+- --db-config-allprivs-flavor string deprecated: use db_flavor
+- --db-config-allprivs-host string deprecated: use db_host
+- --db-config-allprivs-pass string db allprivs deprecated: use db_allprivs_password
+- --db-config-allprivs-port int deprecated: use db_port
+- --db-config-allprivs-server_name string deprecated: use db_server_name
+- --db-config-allprivs-ssl-ca string deprecated: use db_ssl_ca
+- --db-config-allprivs-ssl-ca-path string deprecated: use db_ssl_ca_path
+- --db-config-allprivs-ssl-cert string deprecated: use db_ssl_cert
+- --db-config-allprivs-ssl-key string deprecated: use db_ssl_key
+- --db-config-allprivs-uname string deprecated: use db_allprivs_user (default vt_allprivs)
+- --db-config-allprivs-unixsocket string deprecated: use db_socket
+- --db-config-app-charset string deprecated: use db_charset (default utf8mb4)
+- --db-config-app-flags uint deprecated: use db_flags
+- --db-config-app-flavor string deprecated: use db_flavor
+- --db-config-app-host string deprecated: use db_host
+- --db-config-app-pass string db app deprecated: use db_app_password
+- --db-config-app-port int deprecated: use db_port
+- --db-config-app-server_name string deprecated: use db_server_name
+- --db-config-app-ssl-ca string deprecated: use db_ssl_ca
+- --db-config-app-ssl-ca-path string deprecated: use db_ssl_ca_path
+- --db-config-app-ssl-cert string deprecated: use db_ssl_cert
+- --db-config-app-ssl-key string deprecated: use db_ssl_key
+- --db-config-app-uname string deprecated: use db_app_user (default vt_app)
+- --db-config-app-unixsocket string deprecated: use db_socket
+- --db-config-appdebug-charset string deprecated: use db_charset (default utf8mb4)
+- --db-config-appdebug-flags uint deprecated: use db_flags
+- --db-config-appdebug-flavor string deprecated: use db_flavor
+- --db-config-appdebug-host string deprecated: use db_host
+- --db-config-appdebug-pass string db appdebug deprecated: use db_appdebug_password
+- --db-config-appdebug-port int deprecated: use db_port
+- --db-config-appdebug-server_name string deprecated: use db_server_name
+- --db-config-appdebug-ssl-ca string deprecated: use db_ssl_ca
+- --db-config-appdebug-ssl-ca-path string deprecated: use db_ssl_ca_path
+- --db-config-appdebug-ssl-cert string deprecated: use db_ssl_cert
+- --db-config-appdebug-ssl-key string deprecated: use db_ssl_key
+- --db-config-appdebug-uname string deprecated: use db_appdebug_user (default vt_appdebug)
+- --db-config-appdebug-unixsocket string deprecated: use db_socket
+- --db-config-dba-charset string deprecated: use db_charset (default utf8mb4)
+- --db-config-dba-flags uint deprecated: use db_flags
+- --db-config-dba-flavor string deprecated: use db_flavor
+- --db-config-dba-host string deprecated: use db_host
+- --db-config-dba-pass string db dba deprecated: use db_dba_password
+- --db-config-dba-port int deprecated: use db_port
+- --db-config-dba-server_name string deprecated: use db_server_name
+- --db-config-dba-ssl-ca string deprecated: use db_ssl_ca
+- --db-config-dba-ssl-ca-path string deprecated: use db_ssl_ca_path
+- --db-config-dba-ssl-cert string deprecated: use db_ssl_cert
+- --db-config-dba-ssl-key string deprecated: use db_ssl_key
+- --db-config-dba-uname string deprecated: use db_dba_user (default vt_dba)
+- --db-config-dba-unixsocket string deprecated: use db_socket
+- --db-config-erepl-charset string deprecated: use db_charset (default utf8mb4)
+- --db-config-erepl-dbname string deprecated: dbname does not need to be explicitly configured
+- --db-config-erepl-flags uint deprecated: use db_flags
+- --db-config-erepl-flavor string deprecated: use db_flavor
+- --db-config-erepl-host string deprecated: use db_host
+- --db-config-erepl-pass string db erepl deprecated: use db_erepl_password
+- --db-config-erepl-port int deprecated: use db_port
+- --db-config-erepl-server_name string deprecated: use db_server_name
+- --db-config-erepl-ssl-ca string deprecated: use db_ssl_ca
+- --db-config-erepl-ssl-ca-path string deprecated: use db_ssl_ca_path
+- --db-config-erepl-ssl-cert string deprecated: use db_ssl_cert
+- --db-config-erepl-ssl-key string deprecated: use db_ssl_key
+- --db-config-erepl-uname string deprecated: use db_erepl_user (default vt_erepl)
+- --db-config-erepl-unixsocket string deprecated: use db_socket
+- --db-config-filtered-charset string deprecated: use db_charset (default utf8mb4)
+- --db-config-filtered-flags uint deprecated: use db_flags
+- --db-config-filtered-flavor string deprecated: use db_flavor
+- --db-config-filtered-host string deprecated: use db_host
+- --db-config-filtered-pass string db filtered deprecated: use db_filtered_password
+- --db-config-filtered-port int deprecated: use db_port
+- --db-config-filtered-server_name string deprecated: use db_server_name
+- --db-config-filtered-ssl-ca string deprecated: use db_ssl_ca
+- --db-config-filtered-ssl-ca-path string deprecated: use db_ssl_ca_path
+- --db-config-filtered-ssl-cert string deprecated: use db_ssl_cert
+- --db-config-filtered-ssl-key string deprecated: use db_ssl_key
+- --db-config-filtered-uname string deprecated: use db_filtered_user (default vt_filtered)
+- --db-config-filtered-unixsocket string deprecated: use db_socket
+- --db-config-repl-charset string deprecated: use db_charset (default utf8mb4)
+- --db-config-repl-flags uint deprecated: use db_flags
+- --db-config-repl-flavor string deprecated: use db_flavor
+- --db-config-repl-host string deprecated: use db_host
+- --db-config-repl-pass string db repl deprecated: use db_repl_password
+- --db-config-repl-port int deprecated: use db_port
+- --db-config-repl-server_name string deprecated: use db_server_name
+- --db-config-repl-ssl-ca string deprecated: use db_ssl_ca
+- --db-config-repl-ssl-ca-path string deprecated: use db_ssl_ca_path
+- --db-config-repl-ssl-cert string deprecated: use db_ssl_cert
+- --db-config-repl-ssl-key string deprecated: use db_ssl_key
+- --db-config-repl-uname string deprecated: use db_repl_user (default vt_repl)
+- --db-config-repl-unixsocket string deprecated: use db_socket
+- --db-credentials-file string db credentials file; send SIGHUP to reload this file
+- --db-credentials-server string db credentials server type ('file' - file implementation; 'vault' - HashiCorp Vault implementation) (default file)
+- --db-credentials-vault-addr string URL to Vault server
+- --db-credentials-vault-path string Vault path to credentials JSON blob, e.g.: secret/data/prod/dbcreds
+- --db-credentials-vault-role-mountpoint string Vault AppRole mountpoint; can also be passed using VAULT_MOUNTPOINT environment variable (default approle)
+- --db-credentials-vault-role-secretidfile string Path to file containing Vault AppRole secret_id; can also be passed using VAULT_SECRETID environment variable
+- --db-credentials-vault-roleid string Vault AppRole id; can also be passed using VAULT_ROLEID environment variable
+- --db-credentials-vault-timeout duration Timeout for vault API operations (default 10s)
+- --db-credentials-vault-tls-ca string Path to CA PEM for validating Vault server certificate
+- --db-credentials-vault-tokenfile string Path to file containing Vault auth token; token can also be passed using VAULT_TOKEN environment variable
+- --db-credentials-vault-ttl duration How long to cache DB credentials from the Vault server (default 30m0s)
+- --db_allprivs_password string db allprivs password
+- --db_allprivs_use_ssl Set this flag to false to make the allprivs connection to not use ssl (default true)
+- --db_allprivs_user string db allprivs user userKey (default vt_allprivs)
+- --db_app_password string db app password
+- --db_app_use_ssl Set this flag to false to make the app connection to not use ssl (default true)
+- --db_app_user string db app user userKey (default vt_app)
+- --db_appdebug_password string db appdebug password
+- --db_appdebug_use_ssl Set this flag to false to make the appdebug connection to not use ssl (default true)
+- --db_appdebug_user string db appdebug user userKey (default vt_appdebug)
+- --db_charset string Character set used for this tablet. (default utf8mb4)
+- --db_conn_query_info enable parsing and processing of QUERY_OK info fields
+- --db_connect_timeout_ms int connection timeout to mysqld in milliseconds (0 for no timeout)
+- --db_dba_password string db dba password
+- --db_dba_use_ssl Set this flag to false to make the dba connection to not use ssl (default true)
+- --db_dba_user string db dba user userKey (default vt_dba)
+- --db_erepl_password string db erepl password
+- --db_erepl_use_ssl Set this flag to false to make the erepl connection to not use ssl (default true)
+- --db_erepl_user string db erepl user userKey (default vt_erepl)
+- --db_filtered_password string db filtered password
+- --db_filtered_use_ssl Set this flag to false to make the filtered connection to not use ssl (default true)
+- --db_filtered_user string db filtered user userKey (default vt_filtered)
+- --db_flags uint Flag values as defined by MySQL.
+- --db_flavor string Flavor overrid. Valid value is FilePos.
+- --db_host string The host name for the tcp connection.
+- --db_port int tcp port
+- --db_repl_password string db repl password
+- --db_repl_use_ssl Set this flag to false to make the repl connection to not use ssl (default true)
+- --db_repl_user string db repl user userKey (default vt_repl)
+- --db_server_name string server name of the DB we are connecting to.
+- --db_socket string The unix socket to connect on. If this is specified, host and port will not be used.
+- --db_ssl_ca string connection ssl ca
+- --db_ssl_ca_path string connection ssl ca path
+- --db_ssl_cert string connection ssl certificate
+- --db_ssl_key string connection ssl key
+- --db_ssl_mode value SSL mode to connect with. One of disabled, preferred, required, verify_ca & verify_identity.
+- --db_tls_min_version string Configures the minimal TLS version negotiated when SSL is enabled. Defaults to TLSv1.2. Options: TLSv1.0, TLSv1.1, TLSv1.2, TLSv1.3.
+- --dba_idle_timeout duration Idle timeout for dba connections (default 1m0s)
+- --dba_pool_size int Size of the connection pool for dba connections (default 20)
+- --detach detached mode - run backups detached from the terminal
+- --disable_active_reparents if set, do not allow active reparents. Use this to protect a cluster using external reparents.
+- --emit_stats If set, emit stats to push-based monitoring and stats backends
+- --file_backup_storage_root string root directory for the file backup storage
+- --gcs_backup_storage_bucket string Google Cloud Storage bucket to use for backups
+- --gcs_backup_storage_root string root prefix for all backup-related object names
+- --grpc_auth_mode string Which auth plugin implementation to use (eg: static)
+- --grpc_auth_mtls_allowed_substrings string List of substrings of at least one of the client certificate names (separated by colon).
+- --grpc_auth_static_client_creds string when using grpc_static_auth in the server, this file provides the credentials to use to authenticate with server
+- --grpc_auth_static_password_file string JSON File to read the users/passwords from.
+- --grpc_ca string server CA to use for gRPC connections, requires TLS, and enforces client certificate check
+- --grpc_cert string server certificate to use for gRPC connections, requires grpc_key, enables TLS
+- --grpc_compression string Which protocol to use for compressing gRPC. Default: nothing. Supported: snappy
+- --grpc_crl string path to a certificate revocation list in PEM format, client certificates will be further verified against this file during TLS handshake
+- --grpc_enable_optional_tls enable optional TLS mode when a server accepts both TLS and plain-text connections on the same port
+- --grpc_enable_tracing Enable GRPC tracing
+- --grpc_initial_conn_window_size int gRPC initial connection window size
+- --grpc_initial_window_size int gRPC initial window size
+- --grpc_keepalive_time duration After a duration of this time, if the client doesn't see any activity, it pings the server to see if the transport is still alive. (default 10s)
+- --grpc_keepalive_timeout duration After having pinged for keepalive check, the client waits for a duration of Timeout and if no activity is seen even after that the connection is closed. (default 10s)
+- --grpc_key string server private key to use for gRPC connections, requires grpc_cert, enables TLS
+- --grpc_max_connection_age duration Maximum age of a client connection before GoAway is sent. (default 2562047h47m16.854775807s)
+- --grpc_max_connection_age_grace duration Additional grace period after grpc_max_connection_age, after which connections are forcibly closed. (default 2562047h47m16.854775807s)
+- --grpc_max_message_size int Maximum allowed RPC message size. Larger messages will be rejected by gRPC with the error 'exceeding the max size'. (default 16777216)
+- --grpc_port int Port to listen on for gRPC calls
+- --grpc_prometheus Enable gRPC monitoring with Prometheus
+- --grpc_server_ca string path to server CA in PEM format, which will be combine with server cert, return full certificate chain to clients
+- --grpc_server_initial_conn_window_size int gRPC server initial connection window size
+- --grpc_server_initial_window_size int gRPC server initial window size
+- --grpc_server_keepalive_enforcement_policy_min_time duration gRPC server minimum keepalive time (default 10s)
+- --grpc_server_keepalive_enforcement_policy_permit_without_stream gRPC server permit client keepalive pings even when there are no active streams (RPCs)
+- --init_db_name_override string (init parameter) override the name of the db used by vttablet
+- --init_db_sql_file string path to .sql file to run after mysql_install_db
+- --init_keyspace string (init parameter) keyspace to use for this tablet
+- --init_shard string (init parameter) shard to use for this tablet
+- --initial_backup Instead of restoring from backup, initialize an empty database with the provided init_db_sql_file and upload a backup of that for the shard, if the shard has no backups yet. This can be used to seed a brand new shard with an initial, empty backup. If any backups already exist for the shard, this will be considered a successful no-op. This can only be done before the shard exists in topology (i.e. before any tablets are deployed).
+- --jaeger-agent-host string host and port to send spans to. if empty, no tracing will be done
+- --keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
+- --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
+- --lameduck-period duration keep running at least this long after SIGTERM before stopping (default 50ms)
+- --log_backtrace_at value when logging hits line file:N, emit a stack trace
+- --log_dir string If non-empty, write log files in this directory
+- --log_err_stacks log stack traces for errors
+- --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
+- --logtostderr log to standard error instead of files
+- --master_connect_retry duration Deprecated, use -replication_connect_retry (default 10s)
+- --mem-profile-rate int deprecated: use '-pprof=mem' instead (default 524288)
+- --min_backup_interval duration Only take a new backup if it's been at least this long since the most recent backup.
+- --min_retention_count int Always keep at least this many of the most recent backups in this backup storage location, even if some are older than the min_retention_time. This must be at least 1 since a backup must always exist to allow new backups to be made (default 1)
+- --min_retention_time duration Keep each old backup for at least this long before removing it. Set to 0 to disable pruning of old backups.
+- --mutex-profile-fraction int deprecated: use '-pprof=mutex' instead
+- --mycnf-file string path to my.cnf, if reading all config params from there
+- --mycnf_bin_log_path string mysql binlog path
+- --mycnf_data_dir string data directory for mysql
+- --mycnf_error_log_path string mysql error log path
+- --mycnf_general_log_path string mysql general log path
+- --mycnf_innodb_data_home_dir string Innodb data home directory
+- --mycnf_innodb_log_group_home_dir string Innodb log group home directory
+- --mycnf_master_info_file string mysql master.info file
+- --mycnf_mysql_port int port mysql is listening on
+- --mycnf_pid_file string mysql pid file
+- --mycnf_relay_log_index_path string mysql relay log index path
+- --mycnf_relay_log_info_path string mysql relay log info path
+- --mycnf_relay_log_path string mysql relay log path
+- --mycnf_secure_file_priv string mysql path for loading secure files
+- --mycnf_server_id int mysql server id of the server (if specified, mycnf-file will be ignored)
+- --mycnf_slow_log_path string mysql slow query log path
+- --mycnf_socket_file string mysql socket file
+- --mycnf_tmp_dir string mysql tmp directory
+- --mysql_auth_server_static_file string JSON File to read the users/passwords from.
+- --mysql_auth_server_static_string string JSON representation of the users/passwords config.
+- --mysql_auth_static_reload_interval duration Ticker to reload credentials
+- --mysql_clientcert_auth_method string client-side authentication method to use. Supported values: mysql_clear_password, dialog. (default mysql_clear_password)
+- --mysql_port int mysql port (default 3306)
+- --mysql_server_flush_delay duration Delay after which buffered response will be flushed to the client. (default 100ms)
+- --mysql_server_version string MySQL server version to advertise.
+- --mysql_socket string path to the mysql socket
+- --mysql_timeout duration how long to wait for mysqld startup (default 5m0s)
+- --mysqlctl_client_protocol string the protocol to use to talk to the mysqlctl server (default grpc)
+- --mysqlctl_mycnf_template string template file to use for generating the my.cnf file during server init
+- --mysqlctl_socket string socket file to use for remote mysqlctl actions (empty for local actions)
+- --onclose_timeout duration wait no more than this for OnClose handlers before stopping (default 1ns)
+- --onterm_timeout duration wait no more than this for OnTermSync handlers before stopping (default 10s)
+- --pid_file string If set, the process will write its pid to the named file, and delete it on graceful shutdown.
+- --pool_hostname_resolve_interval duration if set force an update to all hostnames and reconnect if changed, defaults to 0 (disabled)
+- --pprof string enable profiling
+- --purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
+- --remote_operation_timeout duration time to wait for a remote operation (default 30s)
+- --replication_connect_retry duration how long to wait in between replica reconnect attempts. Only precise to the second. (default 10s)
+- --replication_timeout duration DEPRECATED AND UNUSED (default 1h0m0s)
+- --restart_before_backup Perform a mysqld clean/full restart after applying binlogs, but before taking the backup. Only makes sense to work around xtrabackup bugs.
+- --s3_backup_aws_endpoint string endpoint of the S3 backend (region must be provided)
+- --s3_backup_aws_region string AWS region to use (default us-east-1)
+- --s3_backup_aws_retries int AWS request retries (default -1)
+- --s3_backup_force_path_style force the s3 path style
+- --s3_backup_log_level string determine the S3 loglevel to use from LogOff, LogDebug, LogDebugWithSigning, LogDebugWithHTTPBody, LogDebugWithRequestRetries, LogDebugWithRequestErrors (default LogOff)
+- --s3_backup_server_side_encryption string server-side encryption algorithm (e.g., AES256, aws:kms, sse_c:/path/to/key/file)
+- --s3_backup_storage_bucket string S3 bucket to use for backups
+- --s3_backup_storage_root string root prefix for all backup-related object names
+- --s3_backup_tls_skip_verify_cert skip the 'certificate is valid' check for SSL connections
+- --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only)
+- --service_map value comma separated list of services to enable (or disable if prefixed with '-') Example: grpc-vtworker
+- --sql-max-length-errors int truncate queries in error logs to the given length (default unlimited)
+- --sql-max-length-ui int truncate queries in debug UIs to the given length (default 512) (default 512)
+- --stats_backend string The name of the registered push-based monitoring/stats backend to use
+- --stats_combine_dimensions string List of dimensions to be combined into a single "all" value in exported stats vars
+- --stats_common_tags string Comma-separated list of common tags for the stats backend. It provides both label and values. Example: label1:value1,label2:value2
+- --stats_drop_variables string Variables to be dropped from the list of exported variables.
+- --stats_emit_period duration Interval between emitting stats to all registered backends (default 1m0s)
+- --stderrthreshold value logs at or above this threshold go to stderr (default 1)
+- --tablet_dir string The directory within the vtdataroot to store vttablet/mysql files. Defaults to being generated by the tablet uid.
+- --tablet_manager_grpc_ca string the server ca to use to validate servers when connecting
+- --tablet_manager_grpc_cert string the cert to use to connect
+- --tablet_manager_grpc_concurrency int concurrency to use to talk to a vttablet server for performance-sensitive RPCs (like ExecuteFetchAs{Dba,AllPrivs,App}) (default 8)
+- --tablet_manager_grpc_connpool_size int number of tablets to keep tmclient connections open to (default 100)
+- --tablet_manager_grpc_crl string the server crl to use to validate server certificates when connecting
+- --tablet_manager_grpc_key string the key to use to connect
+- --tablet_manager_grpc_server_name string the server name to use to validate server certificate
+- --tablet_manager_protocol string the protocol to use to talk to vttablet (default grpc)
+- --timeout duration DEPRECATED AND UNUSED (default 2h0m0s)
+- --topo_consul_lock_delay duration LockDelay for consul session. (default 15s)
+- --topo_consul_lock_session_checks string List of checks for consul session. (default serfHealth)
+- --topo_consul_lock_session_ttl string TTL for consul session.
+- --topo_consul_watch_poll_duration duration time of the long poll for watch queries. (default 30s)
+- --topo_etcd_lease_ttl int Lease TTL for locks and leader election. The client will use KeepAlive to keep the lease going. (default 30)
+- --topo_etcd_tls_ca string path to the ca to use to validate the server cert when connecting to the etcd topo server
+- --topo_etcd_tls_cert string path to the client cert to use to connect to the etcd topo server, requires topo_etcd_tls_key, enables TLS
+- --topo_etcd_tls_key string path to the client key to use to connect to the etcd topo server, enables TLS
+- --topo_global_root string the path of the global topology data in the global topology server
+- --topo_global_server_address string the address of the global topology server
+- --topo_implementation string the topology implementation to use
+- --topo_zk_auth_file string auth to use when connecting to the zk topo server, file contents should be :, e.g., digest:user:pass
+- --topo_zk_base_timeout duration zk base timeout (see zk.Connect) (default 30s)
+- --topo_zk_max_concurrency int maximum number of pending requests to send to a Zookeeper server. (default 64)
+- --topo_zk_tls_ca string the server ca to use to validate servers when connecting to the zk topo server
+- --topo_zk_tls_cert string the cert to use to connect to the zk topo server, requires topo_zk_tls_key, enables TLS
+- --topo_zk_tls_key string the key to use to connect to the zk topo server, enables TLS
+- --tracer string tracing service to use (default noop)
+- --tracing-enable-logging whether to enable logging in the tracing service
+- --tracing-sampling-rate value sampling rate for the probabilistic jaeger sampler (default 0.1)
+- --tracing-sampling-type value sampling strategy to use for jaeger. possible values are 'const', 'probabilistic', 'rateLimiting', or 'remote' (default const)
+- --v value log level for V logs
+- --version print binary version
+- --vmodule value comma-separated list of pattern=N settings for file-filtered logging
+- --xbstream_restore_flags string flags to pass to xbstream command during restore. These should be space separated and will be added to the end of the command. These need to match the ones used for backup e.g. --compress / --decompress, --encrypt / --decrypt
+- --xtrabackup_backup_flags string flags to pass to backup command. These should be space separated and will be added to the end of the command
+- --xtrabackup_prepare_flags string flags to pass to prepare command. These should be space separated and will be added to the end of the command
+- --xtrabackup_root_path string directory location of the xtrabackup and xbstream executables, e.g., /usr/bin
+- --xtrabackup_stream_mode string which mode to use if streaming, valid values are tar and xbstream (default tar)
+- --xtrabackup_stripe_block_size uint Size in bytes of each block that gets sent to a given stripe before rotating to the next stripe (default 102400)
+- --xtrabackup_stripes uint If greater than 0, use data striping across this many destination files to parallelize data transfer and decompression
+- --xtrabackup_user string User that xtrabackup will use to connect to the database server. This user must have all necessary privileges. For details, please refer to xtrabackup documentation.
++ --allow_first_backup Allow this job to take the first backup of an existing shard.
++ --alsologtostderr log to standard error as well as files
++ --azblob_backup_account_key_file string Path to a file containing the Azure Storage account key; if this flag is unset, the environment variable VT_AZBLOB_ACCOUNT_KEY will be used as the key itself (NOT a file path).
++ --azblob_backup_account_name string Azure Storage Account name for backups; if this flag is unset, the environment variable VT_AZBLOB_ACCOUNT_NAME will be used.
++ --azblob_backup_container_name string Azure Blob Container Name.
++ --azblob_backup_parallelism int Azure Blob operation parallelism (requires extra memory when increased). (default 1)
++ --azblob_backup_storage_root string Root prefix for all backup-related Azure Blobs; this should exclude both initial and trailing '/' (e.g. just 'a/b' not '/a/b/').
++ --backup_engine_implementation string Specifies which implementation to use for creating new backups (builtin or xtrabackup). Restores will always be done with whichever engine created a given backup. (default "builtin")
++ --backup_storage_block_size int if backup_storage_compress is true, backup_storage_block_size sets the byte size for each block while compressing (default is 250000). (default 250000)
++ --backup_storage_compress if set, the backup files will be compressed (default is true). Set to false for instance if a backup_storage_hook is specified and it compresses the data. (default true)
++ --backup_storage_implementation string Which backup storage implementation to use for creating and restoring backups.
++ --backup_storage_number_blocks int if backup_storage_compress is true, backup_storage_number_blocks sets the number of blocks that can be processed, at once, before the writer blocks, during compression (default is 2). It should be equal to the number of CPUs available for compression. (default 2)
++ --ceph_backup_storage_config string Path to JSON config file for ceph backup storage. (default "ceph_backup_config.json")
++ --concurrency int (init restore parameter) how many concurrent files to restore at once (default 4)
++ --consul_auth_static_file string JSON File to read the topos/tokens from.
++ --db-credentials-file string db credentials file; send SIGHUP to reload this file
++ --db-credentials-server string db credentials server type ('file' - file implementation; 'vault' - HashiCorp Vault implementation) (default "file")
++ --db-credentials-vault-addr string URL to Vault server
++ --db-credentials-vault-path string Vault path to credentials JSON blob, e.g.: secret/data/prod/dbcreds
++ --db-credentials-vault-role-mountpoint string Vault AppRole mountpoint; can also be passed using VAULT_MOUNTPOINT environment variable (default "approle")
++ --db-credentials-vault-role-secretidfile string Path to file containing Vault AppRole secret_id; can also be passed using VAULT_SECRETID environment variable
++ --db-credentials-vault-roleid string Vault AppRole id; can also be passed using VAULT_ROLEID environment variable
++ --db-credentials-vault-timeout duration Timeout for vault API operations (default 10s)
++ --db-credentials-vault-tls-ca string Path to CA PEM for validating Vault server certificate
++ --db-credentials-vault-tokenfile string Path to file containing Vault auth token; token can also be passed using VAULT_TOKEN environment variable
++ --db-credentials-vault-ttl duration How long to cache DB credentials from the Vault server (default 30m0s)
++ --db_allprivs_password string db allprivs password
++ --db_allprivs_use_ssl Set this flag to false to make the allprivs connection to not use ssl (default true)
++ --db_allprivs_user string db allprivs user userKey (default "vt_allprivs")
++ --db_app_password string db app password
++ --db_app_use_ssl Set this flag to false to make the app connection to not use ssl (default true)
++ --db_app_user string db app user userKey (default "vt_app")
++ --db_appdebug_password string db appdebug password
++ --db_appdebug_use_ssl Set this flag to false to make the appdebug connection to not use ssl (default true)
++ --db_appdebug_user string db appdebug user userKey (default "vt_appdebug")
++ --db_charset string Character set used for this tablet. (default "utf8mb4")
++ --db_conn_query_info enable parsing and processing of QUERY_OK info fields
++ --db_connect_timeout_ms int connection timeout to mysqld in milliseconds (0 for no timeout)
++ --db_dba_password string db dba password
++ --db_dba_use_ssl Set this flag to false to make the dba connection to not use ssl (default true)
++ --db_dba_user string db dba user userKey (default "vt_dba")
++ --db_erepl_password string db erepl password
++ --db_erepl_use_ssl Set this flag to false to make the erepl connection to not use ssl (default true)
++ --db_erepl_user string db erepl user userKey (default "vt_erepl")
++ --db_filtered_password string db filtered password
++ --db_filtered_use_ssl Set this flag to false to make the filtered connection to not use ssl (default true)
++ --db_filtered_user string db filtered user userKey (default "vt_filtered")
++ --db_flags uint Flag values as defined by MySQL.
++ --db_flavor string Flavor overrid. Valid value is FilePos.
++ --db_host string The host name for the tcp connection.
++ --db_port int tcp port
++ --db_repl_password string db repl password
++ --db_repl_use_ssl Set this flag to false to make the repl connection to not use ssl (default true)
++ --db_repl_user string db repl user userKey (default "vt_repl")
++ --db_server_name string server name of the DB we are connecting to.
++ --db_socket string The unix socket to connect on. If this is specified, host and port will not be used.
++ --db_ssl_ca string connection ssl ca
++ --db_ssl_ca_path string connection ssl ca path
++ --db_ssl_cert string connection ssl certificate
++ --db_ssl_key string connection ssl key
++ --db_ssl_mode SslMode SSL mode to connect with. One of disabled, preferred, required, verify_ca & verify_identity.
++ --db_tls_min_version string Configures the minimal TLS version negotiated when SSL is enabled. Defaults to TLSv1.2. Options: TLSv1.0, TLSv1.1, TLSv1.2, TLSv1.3.
++ --detach detached mode - run backups detached from the terminal
++ --emit_stats If set, emit stats to push-based monitoring and stats backends
++ --file_backup_storage_root string Root directory for the file backup storage.
++ --gcs_backup_storage_bucket string Google Cloud Storage bucket to use for backups.
++ --gcs_backup_storage_root string Root prefix for all backup-related object names.
++ --grpc_auth_static_client_creds string When using grpc_static_auth in the server, this file provides the credentials to use to authenticate with server.
++ --grpc_compression string Which protocol to use for compressing gRPC. Default: nothing. Supported: snappy
++ --grpc_enable_tracing Enable gRPC tracing.
++ --grpc_initial_conn_window_size int gRPC initial connection window size
++ --grpc_initial_window_size int gRPC initial window size
++ --grpc_keepalive_time duration After a duration of this time, if the client doesn't see any activity, it pings the server to see if the transport is still alive. (default 10s)
++ --grpc_keepalive_timeout duration After having pinged for keepalive check, the client waits for a duration of Timeout and if no activity is seen even after that the connection is closed. (default 10s)
++ --grpc_max_message_size int Maximum allowed RPC message size. Larger messages will be rejected by gRPC with the error 'exceeding the max size'. (default 16777216)
++ --grpc_prometheus Enable gRPC monitoring with Prometheus.
++ -h, --help display usage and exit
++ --init_db_name_override string (init parameter) override the name of the db used by vttablet
++ --init_db_sql_file string path to .sql file to run after mysql_install_db
++ --init_keyspace string (init parameter) keyspace to use for this tablet
++ --init_shard string (init parameter) shard to use for this tablet
++ --initial_backup Instead of restoring from backup, initialize an empty database with the provided init_db_sql_file and upload a backup of that for the shard, if the shard has no backups yet. This can be used to seed a brand new shard with an initial, empty backup. If any backups already exist for the shard, this will be considered a successful no-op. This can only be done before the shard exists in topology (i.e. before any tablets are deployed).
++ --keep-alive-timeout duration Wait until timeout elapses after a successful backup before shutting down.
++ --keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
++ --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
++ --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
++ --log_dir string If non-empty, write log files in this directory
++ --log_err_stacks log stack traces for errors
++ --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
++ --logtostderr log to standard error instead of files
++ --min_backup_interval duration Only take a new backup if it's been at least this long since the most recent backup.
++ --min_retention_count int Always keep at least this many of the most recent backups in this backup storage location, even if some are older than the min_retention_time. This must be at least 1 since a backup must always exist to allow new backups to be made (default 1)
++ --min_retention_time duration Keep each old backup for at least this long before removing it. Set to 0 to disable pruning of old backups.
++ --mycnf-file string path to my.cnf, if reading all config params from there
++ --mycnf_bin_log_path string mysql binlog path
++ --mycnf_data_dir string data directory for mysql
++ --mycnf_error_log_path string mysql error log path
++ --mycnf_general_log_path string mysql general log path
++ --mycnf_innodb_data_home_dir string Innodb data home directory
++ --mycnf_innodb_log_group_home_dir string Innodb log group home directory
++ --mycnf_master_info_file string mysql master.info file
++ --mycnf_mysql_port int port mysql is listening on
++ --mycnf_pid_file string mysql pid file
++ --mycnf_relay_log_index_path string mysql relay log index path
++ --mycnf_relay_log_info_path string mysql relay log info path
++ --mycnf_relay_log_path string mysql relay log path
++ --mycnf_secure_file_priv string mysql path for loading secure files
++ --mycnf_server_id int mysql server id of the server (if specified, mycnf-file will be ignored)
++ --mycnf_slow_log_path string mysql slow query log path
++ --mycnf_socket_file string mysql socket file
++ --mycnf_tmp_dir string mysql tmp directory
++ --mysql_port int mysql port (default 3306)
++ --mysql_server_version string MySQL server version to advertise.
++ --mysql_socket string path to the mysql socket
++ --mysql_timeout duration how long to wait for mysqld startup (default 5m0s)
++ --port int port for the server
++ --pprof strings enable profiling
++ --purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
++ --remote_operation_timeout duration time to wait for a remote operation (default 30s)
++ --restart_before_backup Perform a mysqld clean/full restart after applying binlogs, but before taking the backup. Only makes sense to work around xtrabackup bugs.
++ --s3_backup_aws_endpoint string endpoint of the S3 backend (region must be provided).
++ --s3_backup_aws_region string AWS region to use. (default "us-east-1")
++ --s3_backup_aws_retries int AWS request retries. (default -1)
++ --s3_backup_force_path_style force the s3 path style.
++ --s3_backup_log_level string determine the S3 loglevel to use from LogOff, LogDebug, LogDebugWithSigning, LogDebugWithHTTPBody, LogDebugWithRequestRetries, LogDebugWithRequestErrors. (default "LogOff")
++ --s3_backup_server_side_encryption string server-side encryption algorithm (e.g., AES256, aws:kms, sse_c:/path/to/key/file).
++ --s3_backup_storage_bucket string S3 bucket to use for backups.
++ --s3_backup_storage_root string root prefix for all backup-related object names.
++ --s3_backup_tls_skip_verify_cert skip the 'certificate is valid' check for SSL connections.
++ --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only)
++ --sql-max-length-errors int truncate queries in error logs to the given length (default unlimited)
++ --sql-max-length-ui int truncate queries in debug UIs to the given length (default 512) (default 512)
++ --stats_backend string The name of the registered push-based monitoring/stats backend to use
++ --stats_combine_dimensions string List of dimensions to be combined into a single "all" value in exported stats vars
++ --stats_common_tags strings Comma-separated list of common tags for the stats backend. It provides both label and values. Example: label1:value1,label2:value2
++ --stats_drop_variables string Variables to be dropped from the list of exported variables.
++ --stats_emit_period duration Interval between emitting stats to all registered backends (default 1m0s)
++ --stderrthreshold severity logs at or above this threshold go to stderr (default 1)
++ --tablet_manager_grpc_ca string the server ca to use to validate servers when connecting
++ --tablet_manager_grpc_cert string the cert to use to connect
++ --tablet_manager_grpc_concurrency int concurrency to use to talk to a vttablet server for performance-sensitive RPCs (like ExecuteFetchAs{Dba,AllPrivs,App}) (default 8)
++ --tablet_manager_grpc_connpool_size int number of tablets to keep tmclient connections open to (default 100)
++ --tablet_manager_grpc_crl string the server crl to use to validate server certificates when connecting
++ --tablet_manager_grpc_key string the key to use to connect
++ --tablet_manager_grpc_server_name string the server name to use to validate server certificate
++ --tablet_manager_protocol string Protocol to use to make tabletmanager RPCs to vttablets. (default "grpc")
++ --topo_consul_lock_delay duration LockDelay for consul session. (default 15s)
++ --topo_consul_lock_session_checks string List of checks for consul session. (default "serfHealth")
++ --topo_consul_lock_session_ttl string TTL for consul session.
++ --topo_consul_watch_poll_duration duration time of the long poll for watch queries. (default 30s)
++ --topo_etcd_lease_ttl int Lease TTL for locks and leader election. The client will use KeepAlive to keep the lease going. (default 30)
++ --topo_etcd_tls_ca string path to the ca to use to validate the server cert when connecting to the etcd topo server
++ --topo_etcd_tls_cert string path to the client cert to use to connect to the etcd topo server, requires topo_etcd_tls_key, enables TLS
++ --topo_etcd_tls_key string path to the client key to use to connect to the etcd topo server, enables TLS
++ --topo_global_root string the path of the global topology data in the global topology server
++ --topo_global_server_address string the address of the global topology server
++ --topo_implementation string the topology implementation to use
++ --topo_zk_auth_file string auth to use when connecting to the zk topo server, file contents should be :, e.g., digest:user:pass
++ --topo_zk_base_timeout duration zk base timeout (see zk.Connect) (default 30s)
++ --topo_zk_max_concurrency int maximum number of pending requests to send to a Zookeeper server. (default 64)
++ --topo_zk_tls_ca string the server ca to use to validate servers when connecting to the zk topo server
++ --topo_zk_tls_cert string the cert to use to connect to the zk topo server, requires topo_zk_tls_key, enables TLS
++ --topo_zk_tls_key string the key to use to connect to the zk topo server, enables TLS
++ --v Level log level for V logs
++ -v, --version print binary version
++ --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
++ --xbstream_restore_flags string Flags to pass to xbstream command during restore. These should be space separated and will be added to the end of the command. These need to match the ones used for backup e.g. --compress / --decompress, --encrypt / --decrypt
++ --xtrabackup_backup_flags string Flags to pass to backup command. These should be space separated and will be added to the end of the command
++ --xtrabackup_prepare_flags string Flags to pass to prepare command. These should be space separated and will be added to the end of the command
++ --xtrabackup_root_path string Directory location of the xtrabackup and xbstream executables, e.g., /usr/bin
++ --xtrabackup_stream_mode string Which mode to use if streaming, valid values are tar and xbstream. Please note that tar is not supported in XtraBackup 8.0 (default "tar")
++ --xtrabackup_stripe_block_size uint Size in bytes of each block that gets sent to a given stripe before rotating to the next stripe (default 102400)
++ --xtrabackup_stripes uint If greater than 0, use data striping across this many destination files to parallelize data transfer and decompression
++ --xtrabackup_user string User that xtrabackup will use to connect to the database server. This user must have all necessary privileges. For details, please refer to xtrabackup documentation.
diff --git a/doc/flags/14.0-to-15.0-transition/vtctlclient.diff b/doc/flags/14.0-to-15.0-transition/vtctlclient.diff
new file mode 100644
index 00000000000..b8a2978f7af
--- /dev/null
+++ b/doc/flags/14.0-to-15.0-transition/vtctlclient.diff
@@ -0,0 +1,88 @@
+diff --git a/flags/14.0/vtctlclient.txt b/flags/15.0/vtctlclient.txt
+index 2ee62d7..207f319 100644
+--- a/flags/14.0/vtctlclient.txt
++++ b/flags/15.0/vtctlclient.txt
+@@ -1,43 +1,41 @@
+ Usage of vtctlclient:
+- --action_timeout duration timeout for the total command (default 1h0m0s)
+- --alsologtostderr log to standard error as well as files
+- --datadog-agent-host string host to send spans to. if empty, no tracing will be done
+- --datadog-agent-port string port to send spans to. if empty, no tracing will be done
+- --emit_stats If set, emit stats to push-based monitoring and stats backends
+- --grpc_auth_static_client_creds string when using grpc_static_auth in the server, this file provides the credentials to use to authenticate with server
+- --grpc_compression string Which protocol to use for compressing gRPC. Default: nothing. Supported: snappy
+- --grpc_enable_tracing Enable GRPC tracing
+- --grpc_initial_conn_window_size int gRPC initial connection window size
+- --grpc_initial_window_size int gRPC initial window size
+- --grpc_keepalive_time duration After a duration of this time, if the client doesn't see any activity, it pings the server to see if the transport is still alive. (default 10s)
+- --grpc_keepalive_timeout duration After having pinged for keepalive check, the client waits for a duration of Timeout and if no activity is seen even after that the connection is closed. (default 10s)
+- --grpc_max_message_size int Maximum allowed RPC message size. Larger messages will be rejected by gRPC with the error 'exceeding the max size'. (default 16777216)
+- --grpc_prometheus Enable gRPC monitoring with Prometheus
+- --jaeger-agent-host string host and port to send spans to. if empty, no tracing will be done
+- --keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
+- --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
+- --log_backtrace_at value when logging hits line file:N, emit a stack trace
+- --log_dir string If non-empty, write log files in this directory
+- --log_err_stacks log stack traces for errors
+- --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
+- --logtostderr log to standard error instead of files
+- --purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
+- --server string server to use for connection
+- --stats_backend string The name of the registered push-based monitoring/stats backend to use
+- --stats_combine_dimensions string List of dimensions to be combined into a single "all" value in exported stats vars
+- --stats_common_tags string Comma-separated list of common tags for the stats backend. It provides both label and values. Example: label1:value1,label2:value2
+- --stats_drop_variables string Variables to be dropped from the list of exported variables.
+- --stats_emit_period duration Interval between emitting stats to all registered backends (default 1m0s)
+- --stderrthreshold value logs at or above this threshold go to stderr (default 1)
+- --tracer string tracing service to use (default noop)
+- --tracing-enable-logging whether to enable logging in the tracing service
+- --tracing-sampling-rate value sampling rate for the probabilistic jaeger sampler (default 0.1)
+- --tracing-sampling-type value sampling strategy to use for jaeger. possible values are 'const', 'probabilistic', 'rateLimiting', or 'remote' (default const)
+- --v value log level for V logs
+- --vmodule value comma-separated list of pattern=N settings for file-filtered logging
+- --vtctl_client_protocol string the protocol to use to talk to the vtctl server (default grpc)
+- --vtctld_grpc_ca string the server ca to use to validate servers when connecting
+- --vtctld_grpc_cert string the cert to use to connect
+- --vtctld_grpc_crl string the server crl to use to validate server certificates when connecting
+- --vtctld_grpc_key string the key to use to connect
+- --vtctld_grpc_server_name string the server name to use to validate server certificate
++ --action_timeout duration timeout for the total command (default 1h0m0s)
++ --alsologtostderr log to standard error as well as files
++ --datadog-agent-host string host to send spans to. if empty, no tracing will be done
++ --datadog-agent-port string port to send spans to. if empty, no tracing will be done
++ --grpc_auth_static_client_creds string When using grpc_static_auth in the server, this file provides the credentials to use to authenticate with server.
++ --grpc_compression string Which protocol to use for compressing gRPC. Default: nothing. Supported: snappy
++ --grpc_enable_tracing Enable gRPC tracing.
++ --grpc_initial_conn_window_size int gRPC initial connection window size
++ --grpc_initial_window_size int gRPC initial window size
++ --grpc_keepalive_time duration After a duration of this time, if the client doesn't see any activity, it pings the server to see if the transport is still alive. (default 10s)
++ --grpc_keepalive_timeout duration After having pinged for keepalive check, the client waits for a duration of Timeout and if no activity is seen even after that the connection is closed. (default 10s)
++ --grpc_max_message_size int Maximum allowed RPC message size. Larger messages will be rejected by gRPC with the error 'exceeding the max size'. (default 16777216)
++ --grpc_prometheus Enable gRPC monitoring with Prometheus.
++ -h, --help display usage and exit
++ --jaeger-agent-host string host and port to send spans to. if empty, no tracing will be done
++ --keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
++ --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
++ --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
++ --log_dir string If non-empty, write log files in this directory
++ --log_err_stacks log stack traces for errors
++ --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
++ --logtostderr log to standard error instead of files
++ --pprof strings enable profiling
++ --purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
++ --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only)
++ --server string server to use for connection
++ --stderrthreshold severity logs at or above this threshold go to stderr (default 1)
++ --tracer string tracing service to use (default "noop")
++ --tracing-enable-logging whether to enable logging in the tracing service
++ --tracing-sampling-rate float sampling rate for the probabilistic jaeger sampler (default 0.1)
++ --tracing-sampling-type string sampling strategy to use for jaeger. possible values are 'const', 'probabilistic', 'rateLimiting', or 'remote' (default "const")
++ --v Level log level for V logs
++ -v, --version print binary version
++ --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
++ --vtctl_client_protocol string Protocol to use to talk to the vtctl server. (default "grpc")
++ --vtctld_grpc_ca string the server ca to use to validate servers when connecting
++ --vtctld_grpc_cert string the cert to use to connect
++ --vtctld_grpc_crl string the server crl to use to validate server certificates when connecting
++ --vtctld_grpc_key string the key to use to connect
++ --vtctld_grpc_server_name string the server name to use to validate server certificate
diff --git a/doc/flags/14.0-to-15.0-transition/vtctld.diff b/doc/flags/14.0-to-15.0-transition/vtctld.diff
new file mode 100644
index 00000000000..61b8aedda54
--- /dev/null
+++ b/doc/flags/14.0-to-15.0-transition/vtctld.diff
@@ -0,0 +1,411 @@
+diff --git a/flags/14.0/vtctld.txt b/flags/15.0/vtctld.txt
+index a063b8c..887a4da 100644
+--- a/flags/14.0/vtctld.txt
++++ b/flags/15.0/vtctld.txt
+@@ -1,84 +1,45 @@
+ Usage of vtctld:
+ --action_timeout duration time to wait for an action before resorting to force (default 2m0s)
+- --allowed_tablet_types value Specifies the tablet types this vtgate is allowed to route queries to
+ --alsologtostderr log to standard error as well as files
+- --app_idle_timeout duration Idle timeout for app connections (default 1m0s)
+- --app_pool_size int Size of the connection pool for app connections (default 40)
+- --azblob_backup_account_key_file string Path to a file containing the Azure Storage account key; if this flag is unset, the environment variable VT_AZBLOB_ACCOUNT_KEY will be used as the key itself (NOT a file path)
+- --azblob_backup_account_name string Azure Storage Account name for backups; if this flag is unset, the environment variable VT_AZBLOB_ACCOUNT_NAME will be used
+- --azblob_backup_container_name string Azure Blob Container Name
+- --azblob_backup_parallelism int Azure Blob operation parallelism (requires extra memory when increased) (default 1)
+- --azblob_backup_storage_root string Root prefix for all backup-related Azure Blobs; this should exclude both initial and trailing '/' (e.g. just 'a/b' not '/a/b/')
+- --backup_engine_implementation string Specifies which implementation to use for creating new backups (builtin or xtrabackup). Restores will always be done with whichever engine created a given backup. (default builtin)
++ --azblob_backup_account_key_file string Path to a file containing the Azure Storage account key; if this flag is unset, the environment variable VT_AZBLOB_ACCOUNT_KEY will be used as the key itself (NOT a file path).
++ --azblob_backup_account_name string Azure Storage Account name for backups; if this flag is unset, the environment variable VT_AZBLOB_ACCOUNT_NAME will be used.
++ --azblob_backup_container_name string Azure Blob Container Name.
++ --azblob_backup_parallelism int Azure Blob operation parallelism (requires extra memory when increased). (default 1)
++ --azblob_backup_storage_root string Root prefix for all backup-related Azure Blobs; this should exclude both initial and trailing '/' (e.g. just 'a/b' not '/a/b/').
++ --backup_engine_implementation string Specifies which implementation to use for creating new backups (builtin or xtrabackup). Restores will always be done with whichever engine created a given backup. (default "builtin")
+ --backup_storage_block_size int if backup_storage_compress is true, backup_storage_block_size sets the byte size for each block while compressing (default is 250000). (default 250000)
+ --backup_storage_compress if set, the backup files will be compressed (default is true). Set to false for instance if a backup_storage_hook is specified and it compresses the data. (default true)
+- --backup_storage_hook string if set, we send the contents of the backup files through this hook.
+- --backup_storage_implementation string which implementation to use for the backup storage feature
+- --backup_storage_number_blocks int if backup_storage_compress is true, backup_storage_number_blocks sets the number of blocks that can be processed, at once, before the writer blocks, during compression (default is 2). It should be equal to the number of CPUs available for compression (default 2)
+- --binlog_player_protocol string the protocol to download binlogs from a vttablet (default grpc)
+- --binlog_use_v3_resharding_mode (DEPRECATED) True if and only if the binlog streamer should use V3-style sharding, which doesn't require a preset sharding key column. (default true)
+- --builtinbackup_mysqld_timeout duration how long to wait for mysqld to shutdown at the start of the backup (default 10m0s)
+- --builtinbackup_progress duration how often to send progress updates when backing up large files (default 5s)
++ --backup_storage_implementation string Which backup storage implementation to use for creating and restoring backups.
++ --backup_storage_number_blocks int if backup_storage_compress is true, backup_storage_number_blocks sets the number of blocks that can be processed, at once, before the writer blocks, during compression (default is 2). It should be equal to the number of CPUs available for compression. (default 2)
++ --builtinbackup_mysqld_timeout duration how long to wait for mysqld to shutdown at the start of the backup. (default 10m0s)
++ --builtinbackup_progress duration how often to send progress updates when backing up large files. (default 5s)
+ --catch-sigpipe catch and ignore SIGPIPE on stdout and stderr if specified
+ --cell string cell to use
+- --ceph_backup_storage_config string Path to JSON config file for ceph backup storage (default ceph_backup_config.json)
+- --client-found-rows-pool-size int DEPRECATED: queryserver-config-transaction-cap will be used instead.
++ --ceph_backup_storage_config string Path to JSON config file for ceph backup storage. (default "ceph_backup_config.json")
++ --compression-engine-name string compressor engine used for compression. (default "pargzip")
++ --compression-level int what level to pass to the compressor. (default 1)
+ --consul_auth_static_file string JSON File to read the topos/tokens from.
+- --cpu_profile string deprecated: use '-pprof=cpu' instead
+ --datadog-agent-host string host to send spans to. if empty, no tracing will be done
+ --datadog-agent-port string port to send spans to. if empty, no tracing will be done
+- --db-credentials-file string db credentials file; send SIGHUP to reload this file
+- --db-credentials-server string db credentials server type ('file' - file implementation; 'vault' - HashiCorp Vault implementation) (default file)
+- --db-credentials-vault-addr string URL to Vault server
+- --db-credentials-vault-path string Vault path to credentials JSON blob, e.g.: secret/data/prod/dbcreds
+- --db-credentials-vault-role-mountpoint string Vault AppRole mountpoint; can also be passed using VAULT_MOUNTPOINT environment variable (default approle)
+- --db-credentials-vault-role-secretidfile string Path to file containing Vault AppRole secret_id; can also be passed using VAULT_SECRETID environment variable
+- --db-credentials-vault-roleid string Vault AppRole id; can also be passed using VAULT_ROLEID environment variable
+- --db-credentials-vault-timeout duration Timeout for vault API operations (default 10s)
+- --db-credentials-vault-tls-ca string Path to CA PEM for validating Vault server certificate
+- --db-credentials-vault-tokenfile string Path to file containing Vault auth token; token can also be passed using VAULT_TOKEN environment variable
+- --db-credentials-vault-ttl duration How long to cache DB credentials from the Vault server (default 30m0s)
+- --dba_idle_timeout duration Idle timeout for dba connections (default 1m0s)
+- --dba_pool_size int Size of the connection pool for dba connections (default 20)
+- --degraded_threshold duration replication lag after which a replica is considered degraded (default 30s)
+ --disable_active_reparents if set, do not allow active reparents. Use this to protect a cluster using external reparents.
+- --discovery_high_replication_lag_minimum_serving duration the replication lag that is considered too high when applying the min_number_serving_vttablets threshold (default 2h0m0s)
+- --discovery_low_replication_lag duration the replication lag that is considered low enough to be healthy (default 30s)
+- --durability_policy string type of durability to enforce. Default is none. Other values are dictated by registered plugins (default none)
++ --durability_policy string type of durability to enforce. Default is none. Other values are dictated by registered plugins (default "none")
+ --emit_stats If set, emit stats to push-based monitoring and stats backends
+- --enable-autocommit This flag is deprecated. Autocommit is always allowed. (default true)
+- --enable-consolidator Synonym to -enable_consolidator (default true)
+- --enable-consolidator-replicas Synonym to -enable_consolidator_replicas
+- --enable-lag-throttler Synonym to -enable_lag_throttler
+- --enable-query-plan-field-caching Synonym to -enable_query_plan_field_caching (default true)
+- --enable-tx-throttler Synonym to -enable_tx_throttler
+- --enable_consolidator This option enables the query consolidator. (default true)
+- --enable_consolidator_replicas This option enables the query consolidator only on replicas.
+- --enable_hot_row_protection If true, incoming transactions for the same row (range) will be queued and cannot consume all txpool slots.
+- --enable_hot_row_protection_dry_run If true, hot row protection is not enforced but logs if transactions would have been queued.
+- --enable_lag_throttler If true, vttablet will run a throttler service, and will implicitly enable heartbeats
+- --enable_queries [DEPRECATED - query commands via vtctl are being deprecated] if set, allows vtgate and vttablet queries. May have security implications, as the queries will be run from this process.
+- --enable_query_plan_field_caching This option fetches & caches fields (columns) when storing query plans (default true)
+- --enable_realtime_stats Required for the Realtime Stats view. If set, vtctld will maintain a streaming RPC to each tablet (in all cells) to gather the realtime health stats.
+- --enable_replication_reporter Use polling to track replication lag.
+- --enable_transaction_limit If true, limit on number of transactions open at the same time will be enforced for all users. User trying to open a new transaction after exhausting their limit will receive an error immediately, regardless of whether there are available slots or not.
+- --enable_transaction_limit_dry_run If true, limit on number of transactions open at the same time will be tracked for all users, but not enforced.
+- --enable_tx_throttler If true replication-lag-based throttling on transactions will be enabled.
+- --enable_vtctld_ui If true, the vtctld web interface will be enabled. Default is true. (default true)
+- --enforce_strict_trans_tables If true, vttablet requires MySQL to run with STRICT_TRANS_TABLES or STRICT_ALL_TABLES on. It is recommended to not turn this flag off. Otherwise MySQL may alter your supplied values before saving them to the database. (default true)
+- --file_backup_storage_root string root directory for the file backup storage
+- --gcs_backup_storage_bucket string Google Cloud Storage bucket to use for backups
+- --gcs_backup_storage_root string root prefix for all backup-related object names
++ --external-compressor string command with arguments to use when compressing a backup.
++ --external-compressor-extension string extension to use when using an external compressor.
++ --external-decompressor string command with arguments to use when decompressing a backup.
++ --file_backup_storage_root string Root directory for the file backup storage.
++ --gcs_backup_storage_bucket string Google Cloud Storage bucket to use for backups.
++ --gcs_backup_storage_root string Root prefix for all backup-related object names.
+ --grpc_auth_mode string Which auth plugin implementation to use (eg: static)
+ --grpc_auth_mtls_allowed_substrings string List of substrings of at least one of the client certificate names (separated by colon).
+- --grpc_auth_static_client_creds string when using grpc_static_auth in the server, this file provides the credentials to use to authenticate with server
++ --grpc_auth_static_client_creds string When using grpc_static_auth in the server, this file provides the credentials to use to authenticate with server.
+ --grpc_auth_static_password_file string JSON File to read the users/passwords from.
+ --grpc_ca string server CA to use for gRPC connections, requires TLS, and enforces client certificate check
+ --grpc_cert string server certificate to use for gRPC connections, requires grpc_key, enables TLS
+ --grpc_compression string Which protocol to use for compressing gRPC. Default: nothing. Supported: snappy
+ --grpc_crl string path to a certificate revocation list in PEM format, client certificates will be further verified against this file during TLS handshake
+ --grpc_enable_optional_tls enable optional TLS mode when a server accepts both TLS and plain-text connections on the same port
+- --grpc_enable_tracing Enable GRPC tracing
++ --grpc_enable_tracing Enable gRPC tracing.
+ --grpc_initial_conn_window_size int gRPC initial connection window size
+ --grpc_initial_window_size int gRPC initial window size
+ --grpc_keepalive_time duration After a duration of this time, if the client doesn't see any activity, it pings the server to see if the transport is still alive. (default 10s)
+@@ -87,132 +48,57 @@ Usage of vtctld:
+ --grpc_max_connection_age duration Maximum age of a client connection before GoAway is sent. (default 2562047h47m16.854775807s)
+ --grpc_max_connection_age_grace duration Additional grace period after grpc_max_connection_age, after which connections are forcibly closed. (default 2562047h47m16.854775807s)
+ --grpc_max_message_size int Maximum allowed RPC message size. Larger messages will be rejected by gRPC with the error 'exceeding the max size'. (default 16777216)
+- --grpc_port int Port to listen on for gRPC calls
+- --grpc_prometheus Enable gRPC monitoring with Prometheus
++ --grpc_port int Port to listen on for gRPC calls. If zero, do not listen.
++ --grpc_prometheus Enable gRPC monitoring with Prometheus.
+ --grpc_server_ca string path to server CA in PEM format, which will be combine with server cert, return full certificate chain to clients
+ --grpc_server_initial_conn_window_size int gRPC server initial connection window size
+ --grpc_server_initial_window_size int gRPC server initial window size
+ --grpc_server_keepalive_enforcement_policy_min_time duration gRPC server minimum keepalive time (default 10s)
+ --grpc_server_keepalive_enforcement_policy_permit_without_stream gRPC server permit client keepalive pings even when there are no active streams (RPCs)
+- --health_check_interval duration Interval between health checks (default 20s)
+- --heartbeat_enable If true, vttablet records (if master) or checks (if replica) the current time of a replication heartbeat in the table _vt.heartbeat. The result is used to inform the serving state of the vttablet via healthchecks.
+- --heartbeat_interval duration How frequently to read and write replication heartbeat. (default 1s)
+- --heartbeat_on_demand_duration duration If non-zero, heartbeats are only written upon consumer request, and only run for up to given duration following the request. Frequent requests can keep the heartbeat running consistently; when requests are infrequent heartbeat may completely stop between requests
+- --hot_row_protection_concurrent_transactions int Number of concurrent transactions let through to the txpool/MySQL for the same hot row. Should be > 1 to have enough 'ready' transactions in MySQL and benefit from a pipelining effect. (default 5)
+- --hot_row_protection_max_global_queue_size int Global queue limit across all row (ranges). Useful to prevent that the queue can grow unbounded. (default 1000)
+- --hot_row_protection_max_queue_size int Maximum number of BeginExecute RPCs which will be queued for the same row (range). (default 20)
++ -h, --help display usage and exit
+ --jaeger-agent-host string host and port to send spans to. if empty, no tracing will be done
+ --keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
+ --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
+- --keyspaces_to_watch value Specifies which keyspaces this vtgate should have access to while routing queries or accessing the vschema
+ --lameduck-period duration keep running at least this long after SIGTERM before stopping (default 50ms)
+- --legacy_replication_lag_algorithm use the legacy algorithm when selecting the vttablets for serving (default true)
+- --log_backtrace_at value when logging hits line file:N, emit a stack trace
++ --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
+ --log_dir string If non-empty, write log files in this directory
+ --log_err_stacks log stack traces for errors
+ --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
+ --logtostderr log to standard error instead of files
+- --master_connect_retry duration Deprecated, use -replication_connect_retry (default 10s)
+- --mem-profile-rate int deprecated: use '-pprof=mem' instead (default 524288)
+- --min_number_serving_vttablets int the minimum number of vttablets for each replicating tablet_type (e.g. replica, rdonly) that will be continue to be used even with replication lag above discovery_low_replication_lag, but still below discovery_high_replication_lag_minimum_serving (default 2)
+- --mutex-profile-fraction int deprecated: use '-pprof=mutex' instead
+- --mysql_auth_server_static_file string JSON File to read the users/passwords from.
+- --mysql_auth_server_static_string string JSON representation of the users/passwords config.
+- --mysql_auth_static_reload_interval duration Ticker to reload credentials
+- --mysql_clientcert_auth_method string client-side authentication method to use. Supported values: mysql_clear_password, dialog. (default mysql_clear_password)
+- --mysql_server_flush_delay duration Delay after which buffered response will be flushed to the client. (default 100ms)
+- --mysql_server_version string MySQL server version to advertise.
+- --mysqlctl_client_protocol string the protocol to use to talk to the mysqlctl server (default grpc)
+- --mysqlctl_mycnf_template string template file to use for generating the my.cnf file during server init
+- --mysqlctl_socket string socket file to use for remote mysqlctl actions (empty for local actions)
+ --onclose_timeout duration wait no more than this for OnClose handlers before stopping (default 1ns)
+- --online_ddl_check_interval duration deprecated. Will be removed in next Vitess version
+ --onterm_timeout duration wait no more than this for OnTermSync handlers before stopping (default 10s)
+ --opentsdb_uri string URI of opentsdb /api/put method
+ --pid_file string If set, the process will write its pid to the named file, and delete it on graceful shutdown.
+- --pool-name-prefix string Deprecated
+- --pool_hostname_resolve_interval duration if set force an update to all hostnames and reconnect if changed, defaults to 0 (disabled)
+ --port int port for the server
+- --pprof string enable profiling
++ --pprof strings enable profiling
+ --proxy_tablets Setting this true will make vtctld proxy the tablet status instead of redirecting to them
+ --purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
+- --query-log-stream-handler string URL handler for streaming queries log (default /debug/querylog)
+- --querylog-filter-tag string string that must be present in the query for it to be logged; if using a value as the tag, you need to disable query normalization
+- --querylog-format string format for query logs ("text" or "json") (default text)
+- --querylog-row-threshold uint Number of rows a query has to return or affect before being logged; not useful for streaming queries. 0 means all queries will be logged.
+- --queryserver-config-acl-exempt-acl string an acl that exempt from table acl checking (this acl is free to access any vitess tables).
+- --queryserver-config-allowunsafe-dmls deprecated
+- --queryserver-config-annotate-queries prefix queries to MySQL backend with comment indicating vtgate principal (user) and target tablet type
+- --queryserver-config-enable-table-acl-dry-run If this flag is enabled, tabletserver will emit monitoring metrics and let the request pass regardless of table acl check results
+- --queryserver-config-idle-timeout float query server idle timeout (in seconds), vttablet manages various mysql connection pools. This config means if a connection has not been used in given idle timeout, this connection will be removed from pool. This effectively manages number of connection objects and optimize the pool performance. (default 1800)
+- --queryserver-config-max-dml-rows int query server max dml rows per statement, maximum number of rows allowed to return at a time for an update or delete with either 1) an equality where clauses on primary keys, or 2) a subselect statement. For update and delete statements in above two categories, vttablet will split the original query into multiple small queries based on this configuration value.
+- --queryserver-config-max-result-size int query server max result size, maximum number of rows allowed to return from vttablet for non-streaming queries. (default 10000)
+- --queryserver-config-message-conn-pool-prefill-parallelism int DEPRECATED: Unused.
+- --queryserver-config-message-conn-pool-size int DEPRECATED
+- --queryserver-config-message-postpone-cap int query server message postpone cap is the maximum number of messages that can be postponed at any given time. Set this number to substantially lower than transaction cap, so that the transaction pool isn't exhausted by the message subsystem. (default 4)
+- --queryserver-config-passthrough-dmls query server pass through all dml statements without rewriting
+- --queryserver-config-pool-prefill-parallelism int query server read pool prefill parallelism, a non-zero value will prefill the pool using the specified parallism.
+- --queryserver-config-pool-size int query server read pool size, connection pool is used by regular queries (non streaming, not in a transaction) (default 16)
+- --queryserver-config-query-cache-lfu query server cache algorithm. when set to true, a new cache algorithm based on a TinyLFU admission policy will be used to improve cache behavior and prevent pollution from sparse queries (default true)
+- --queryserver-config-query-cache-memory int query server query cache size in bytes, maximum amount of memory to be used for caching. vttablet analyzes every incoming query and generate a query plan, these plans are being cached in a lru cache. This config controls the capacity of the lru cache. (default 33554432)
+- --queryserver-config-query-cache-size int query server query cache size, maximum number of queries to be cached. vttablet analyzes every incoming query and generate a query plan, these plans are being cached in a lru cache. This config controls the capacity of the lru cache. (default 5000)
+- --queryserver-config-query-pool-timeout float query server query pool timeout (in seconds), it is how long vttablet waits for a connection from the query pool. If set to 0 (default) then the overall query timeout is used instead.
+- --queryserver-config-query-pool-waiter-cap int query server query pool waiter limit, this is the maximum number of queries that can be queued waiting to get a connection (default 5000)
+- --queryserver-config-query-timeout float query server query timeout (in seconds), this is the query timeout in vttablet side. If a query takes more than this timeout, it will be killed. (default 30)
+- --queryserver-config-schema-change-signal query server schema signal, will signal connected vtgates that schema has changed whenever this is detected. VTGates will need to have -schema_change_signal enabled for this to work (default true)
+- --queryserver-config-schema-change-signal-interval float query server schema change signal interval defines at which interval the query server shall send schema updates to vtgate. (default 5)
+- --queryserver-config-schema-reload-time float query server schema reload time, how often vttablet reloads schemas from underlying MySQL instance in seconds. vttablet keeps table schemas in its own memory and periodically refreshes it from MySQL. This config controls the reload time. (default 1800)
+- --queryserver-config-stream-buffer-size int query server stream buffer size, the maximum number of bytes sent from vttablet for each stream call. It's recommended to keep this value in sync with vtgate's stream_buffer_size. (default 32768)
+- --queryserver-config-stream-pool-prefill-parallelism int query server stream pool prefill parallelism, a non-zero value will prefill the pool using the specified parallelism
+- --queryserver-config-stream-pool-size int query server stream connection pool size, stream pool is used by stream queries: queries that return results to client in a streaming fashion (default 200)
+- --queryserver-config-stream-pool-timeout float query server stream pool timeout (in seconds), it is how long vttablet waits for a connection from the stream pool. If set to 0 (default) then there is no timeout.
+- --queryserver-config-stream-pool-waiter-cap int query server stream pool waiter limit, this is the maximum number of streaming queries that can be queued waiting to get a connection
+- --queryserver-config-strict-table-acl only allow queries that pass table acl checks
+- --queryserver-config-terse-errors prevent bind vars from escaping in client error messages
+- --queryserver-config-transaction-cap int query server transaction cap is the maximum number of transactions allowed to happen at any given point of a time for a single vttablet. E.g. by setting transaction cap to 100, there are at most 100 transactions will be processed by a vttablet and the 101th transaction will be blocked (and fail if it cannot get connection within specified timeout) (default 20)
+- --queryserver-config-transaction-prefill-parallelism int query server transaction prefill parallelism, a non-zero value will prefill the pool using the specified parallism.
+- --queryserver-config-transaction-timeout float query server transaction timeout (in seconds), a transaction will be killed if it takes longer than this value (default 30)
+- --queryserver-config-txpool-timeout float query server transaction pool timeout, it is how long vttablet waits if tx pool is full (default 1)
+- --queryserver-config-txpool-waiter-cap int query server transaction pool waiter limit, this is the maximum number of transactions that can be queued waiting to get a connection (default 5000)
+- --queryserver-config-warn-result-size int query server result size warning threshold, warn if number of rows returned from vttablet for non-streaming queries exceeds this
+- --queryserver_enable_online_ddl Enable online DDL. (default true)
+- --redact-debug-ui-queries redact full queries and bind variables from debug UI
+- --relay_log_max_items int Maximum number of rows for VReplication target buffering. (default 5000)
+- --relay_log_max_size int Maximum buffer size (in bytes) for VReplication target buffering. If single rows are larger than this, a single row is buffered at a time. (default 250000)
+ --remote_operation_timeout duration time to wait for a remote operation (default 30s)
+- --replication_connect_retry duration how long to wait in between replica reconnect attempts. Only precise to the second. (default 10s)
+- --s3_backup_aws_endpoint string endpoint of the S3 backend (region must be provided)
+- --s3_backup_aws_region string AWS region to use (default us-east-1)
+- --s3_backup_aws_retries int AWS request retries (default -1)
+- --s3_backup_force_path_style force the s3 path style
+- --s3_backup_log_level string determine the S3 loglevel to use from LogOff, LogDebug, LogDebugWithSigning, LogDebugWithHTTPBody, LogDebugWithRequestRetries, LogDebugWithRequestErrors (default LogOff)
+- --s3_backup_server_side_encryption string server-side encryption algorithm (e.g., AES256, aws:kms, sse_c:/path/to/key/file)
+- --s3_backup_storage_bucket string S3 bucket to use for backups
+- --s3_backup_storage_root string root prefix for all backup-related object names
+- --s3_backup_tls_skip_verify_cert skip the 'certificate is valid' check for SSL connections
+- --sanitize_log_messages Remove potentially sensitive information in tablet INFO, WARNING, and ERROR log messages such as query parameters.
+- --schema_change_check_interval int this value decides how often we check schema change dir, in seconds (default 60)
+- --schema_change_controller string schema change controller is responsible for finding schema changes and responding to schema change events
+- --schema_change_dir string directory contains schema changes for all keyspaces. Each keyspace has its own directory and schema changes are expected to live in '$KEYSPACE/input' dir. e.g. test_keyspace/input/*sql, each sql file represents a schema change
+- --schema_change_replicas_timeout duration how long to wait for replicas to receive the schema change (default 10s)
+- --schema_change_user string The user who submits this schema change.
++ --s3_backup_aws_endpoint string endpoint of the S3 backend (region must be provided).
++ --s3_backup_aws_region string AWS region to use. (default "us-east-1")
++ --s3_backup_aws_retries int AWS request retries. (default -1)
++ --s3_backup_force_path_style force the s3 path style.
++ --s3_backup_log_level string determine the S3 loglevel to use from LogOff, LogDebug, LogDebugWithSigning, LogDebugWithHTTPBody, LogDebugWithRequestRetries, LogDebugWithRequestErrors. (default "LogOff")
++ --s3_backup_server_side_encryption string server-side encryption algorithm (e.g., AES256, aws:kms, sse_c:/path/to/key/file).
++ --s3_backup_storage_bucket string S3 bucket to use for backups.
++ --s3_backup_storage_root string root prefix for all backup-related object names.
++ --s3_backup_tls_skip_verify_cert skip the 'certificate is valid' check for SSL connections.
++ --schema_change_check_interval duration How often the schema change dir is checked for schema changes (deprecated: if passed as a bare integer, the duration will be in seconds). (default 1m0s)
++ --schema_change_controller string Schema change controller is responsible for finding schema changes and responding to schema change events.
++ --schema_change_dir string Directory containing schema changes for all keyspaces. Each keyspace has its own directory, and schema changes are expected to live in '$KEYSPACE/input' dir. (e.g. 'test_keyspace/input/*sql'). Each sql file represents a schema change.
++ --schema_change_replicas_timeout duration How long to wait for replicas to receive a schema change. (default 10s)
++ --schema_change_user string The user who schema changes are submitted on behalf of.
+ --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only)
+- --service_map value comma separated list of services to enable (or disable if prefixed with '-') Example: grpc-vtworker
+- --serving_state_grace_period duration how long to pause after broadcasting health to vtgate, before enforcing a new serving state
+- --shutdown_grace_period float how long to wait (in seconds) for queries and transactions to complete during graceful shutdown.
++ --service_map strings comma separated list of services to enable (or disable if prefixed with '-') Example: grpc-queryservice
+ --sql-max-length-errors int truncate queries in error logs to the given length (default unlimited)
+ --sql-max-length-ui int truncate queries in debug UIs to the given length (default 512) (default 512)
+- --srv_topo_cache_refresh duration how frequently to refresh the topology for cached entries (default 1s)
+- --srv_topo_cache_ttl duration how long to use cached entries for topology (default 1s)
+- --srv_topo_timeout duration topo server timeout (default 5s)
+ --stats_backend string The name of the registered push-based monitoring/stats backend to use
+ --stats_combine_dimensions string List of dimensions to be combined into a single "all" value in exported stats vars
+- --stats_common_tags string Comma-separated list of common tags for the stats backend. It provides both label and values. Example: label1:value1,label2:value2
++ --stats_common_tags strings Comma-separated list of common tags for the stats backend. It provides both label and values. Example: label1:value1,label2:value2
+ --stats_drop_variables string Variables to be dropped from the list of exported variables.
+ --stats_emit_period duration Interval between emitting stats to all registered backends (default 1m0s)
+- --stderrthreshold value logs at or above this threshold go to stderr (default 1)
++ --stderrthreshold severity logs at or above this threshold go to stderr (default 1)
+ --tablet_dir string The directory within the vtdataroot to store vttablet/mysql files. Defaults to being generated by the tablet uid.
+- --tablet_filters value Specifies a comma-separated list of 'keyspace|shard_name or keyrange' values to filter the tablets to watch
+ --tablet_grpc_ca string the server ca to use to validate servers when connecting
+ --tablet_grpc_cert string the cert to use to connect
+ --tablet_grpc_crl string the server crl to use to validate server certificates when connecting
+@@ -226,24 +112,13 @@ Usage of vtctld:
+ --tablet_manager_grpc_crl string the server crl to use to validate server certificates when connecting
+ --tablet_manager_grpc_key string the key to use to connect
+ --tablet_manager_grpc_server_name string the server name to use to validate server certificate
+- --tablet_manager_protocol string the protocol to use to talk to vttablet (default grpc)
+- --tablet_protocol string how to talk to the vttablets (default grpc)
+- --tablet_refresh_interval duration tablet refresh interval (default 1m0s)
+- --tablet_refresh_known_tablets tablet refresh reloads the tablet address/port map from topo in case it changes (default true)
+- --tablet_url_template string format string describing debug tablet url formatting. See the Go code for getTabletDebugURL() how to customize this. (default http://{{.GetTabletHostPort}})
+- --throttle_check_as_check_self Should throttler/check return a throttler/check-self result (changes throttler behavior for writes)
+- --throttle_metrics_query SELECT Override default heartbeat/lag metric. Use either SELECT (must return single row, single value) or `SHOW GLOBAL ... LIKE ...` queries. Set -throttle_metrics_threshold respectively.
+- --throttle_metrics_threshold float Override default throttle threshold, respective to -throttle_metrics_query (default 1.7976931348623157e+308)
+- --throttle_tablet_types string Comma separated VTTablet types to be considered by the throttler. default: 'replica'. example: 'replica,rdonly'. 'replica' aways implicitly included (default replica)
+- --throttle_threshold duration Replication lag threshold for default lag throttling (default 1s)
+- --throttler_client_grpc_ca string the server ca to use to validate servers when connecting
+- --throttler_client_grpc_cert string the cert to use to connect
+- --throttler_client_grpc_crl string the server crl to use to validate server certificates when connecting
+- --throttler_client_grpc_key string the key to use to connect
+- --throttler_client_grpc_server_name string the server name to use to validate server certificate
+- --throttler_client_protocol string the protocol to use to talk to the integrated throttler service (default grpc)
++ --tablet_manager_protocol string Protocol to use to make tabletmanager RPCs to vttablets. (default "grpc")
++ --tablet_protocol string Protocol to use to make queryservice RPCs to vttablets. (default "grpc")
++ --tablet_refresh_interval duration Tablet refresh interval. (default 1m0s)
++ --tablet_refresh_known_tablets Whether to reload the tablet's address/port map from topo in case they change. (default true)
++ --tablet_url_template string Format string describing debug tablet url formatting. See getTabletDebugURL() for how to customize this. (default "http://{{.GetTabletHostPort}}")
+ --topo_consul_lock_delay duration LockDelay for consul session. (default 15s)
+- --topo_consul_lock_session_checks string List of checks for consul session. (default serfHealth)
++ --topo_consul_lock_session_checks string List of checks for consul session. (default "serfHealth")
+ --topo_consul_lock_session_ttl string TTL for consul session.
+ --topo_consul_watch_poll_duration duration time of the long poll for watch queries. (default 30s)
+ --topo_etcd_lease_ttl int Lease TTL for locks and leader election. The client will use KeepAlive to keep the lease going. (default 30)
+@@ -256,106 +131,22 @@ Usage of vtctld:
+ --topo_k8s_context string The kubeconfig context to use, overrides the 'current-context' from the config
+ --topo_k8s_kubeconfig string Path to a valid kubeconfig file. When running as a k8s pod inside the same cluster you wish to use as the topo, you may omit this and the below arguments, and Vitess is capable of auto-discovering the correct values. https://kubernetes.io/docs/tasks/access-application-cluster/access-cluster/#accessing-the-api-from-a-pod
+ --topo_k8s_namespace string The kubernetes namespace to use for all objects. Default comes from the context or in-cluster config
+- --topo_read_concurrency int concurrent topo reads (default 32)
++ --topo_read_concurrency int Concurrency of topo reads. (default 32)
+ --topo_zk_auth_file string auth to use when connecting to the zk topo server, file contents should be :, e.g., digest:user:pass
+ --topo_zk_base_timeout duration zk base timeout (see zk.Connect) (default 30s)
+ --topo_zk_max_concurrency int maximum number of pending requests to send to a Zookeeper server. (default 64)
+ --topo_zk_tls_ca string the server ca to use to validate servers when connecting to the zk topo server
+ --topo_zk_tls_cert string the cert to use to connect to the zk topo server, requires topo_zk_tls_key, enables TLS
+ --topo_zk_tls_key string the key to use to connect to the zk topo server, enables TLS
+- --tracer string tracing service to use (default noop)
++ --tracer string tracing service to use (default "noop")
+ --tracing-enable-logging whether to enable logging in the tracing service
+- --tracing-sampling-rate value sampling rate for the probabilistic jaeger sampler (default 0.1)
+- --tracing-sampling-type value sampling strategy to use for jaeger. possible values are 'const', 'probabilistic', 'rateLimiting', or 'remote' (default const)
+- --track_schema_versions When enabled, vttablet will store versions of schemas at each position that a DDL is applied and allow retrieval of the schema corresponding to a position
+- --transaction-log-stream-handler string URL handler for streaming transactions log (default /debug/txlog)
+- --transaction_limit_by_component Include CallerID.component when considering who the user is for the purpose of transaction limit.
+- --transaction_limit_by_principal Include CallerID.principal when considering who the user is for the purpose of transaction limit. (default true)
+- --transaction_limit_by_subcomponent Include CallerID.subcomponent when considering who the user is for the purpose of transaction limit.
+- --transaction_limit_by_username Include VTGateCallerID.username when considering who the user is for the purpose of transaction limit. (default true)
+- --transaction_limit_per_user float Maximum number of transactions a single user is allowed to use at any time, represented as fraction of -transaction_cap. (default 0.4)
+- --transaction_shutdown_grace_period float DEPRECATED: use shutdown_grace_period instead.
+- --twopc_abandon_age float time in seconds. Any unresolved transaction older than this time will be sent to the coordinator to be resolved.
+- --twopc_coordinator_address string address of the (VTGate) process(es) that will be used to notify of abandoned transactions.
+- --twopc_enable if the flag is on, 2pc is enabled. Other 2pc flags must be supplied.
+- --tx-throttler-config string Synonym to -tx_throttler_config (default target_replication_lag_sec: 2
+-max_replication_lag_sec: 10
+-initial_rate: 100
+-max_increase: 1
+-emergency_decrease: 0.5
+-min_duration_between_increases_sec: 40
+-max_duration_between_increases_sec: 62
+-min_duration_between_decreases_sec: 20
+-spread_backlog_across_sec: 20
+-age_bad_rate_after_sec: 180
+-bad_rate_increase: 0.1
+-max_rate_approach_threshold: 0.9
+-)
+- --tx-throttler-healthcheck-cells value Synonym to -tx_throttler_healthcheck_cells
+- --tx_throttler_config string The configuration of the transaction throttler as a text formatted throttlerdata.Configuration protocol buffer message (default target_replication_lag_sec: 2
+-max_replication_lag_sec: 10
+-initial_rate: 100
+-max_increase: 1
+-emergency_decrease: 0.5
+-min_duration_between_increases_sec: 40
+-max_duration_between_increases_sec: 62
+-min_duration_between_decreases_sec: 20
+-spread_backlog_across_sec: 20
+-age_bad_rate_after_sec: 180
+-bad_rate_increase: 0.1
+-max_rate_approach_threshold: 0.9
+-)
+- --tx_throttler_healthcheck_cells value A comma-separated list of cells. Only tabletservers running in these cells will be monitored for replication lag by the transaction throttler.
+- --unhealthy_threshold duration replication lag after which a replica is considered unhealthy (default 2h0m0s)
+- --v value log level for V logs
+- --version print binary version
+- --vmodule value comma-separated list of pattern=N settings for file-filtered logging
+- --vreplication_copy_phase_duration duration Duration for each copy phase loop (before running the next catchup: default 1h) (default 1h0m0s)
+- --vreplication_copy_phase_max_innodb_history_list_length int The maximum InnoDB transaction history that can exist on a vstreamer (source) before starting another round of copying rows. This helps to limit the impact on the source tablet. (default 1000000)
+- --vreplication_copy_phase_max_mysql_replication_lag int The maximum MySQL replication lag (in seconds) that can exist on a vstreamer (source) before starting another round of copying rows. This helps to limit the impact on the source tablet. (default 43200)
+- --vreplication_experimental_flags int (Bitmask) of experimental features in vreplication to enable (default 1)
+- --vreplication_healthcheck_retry_delay duration healthcheck retry delay (default 5s)
+- --vreplication_healthcheck_timeout duration healthcheck retry delay (default 1m0s)
+- --vreplication_healthcheck_topology_refresh duration refresh interval for re-reading the topology (default 30s)
+- --vreplication_heartbeat_update_interval int Frequency (in seconds, default 1, max 60) at which the time_updated column of a vreplication stream when idling (default 1)
+- --vreplication_max_time_to_retry_on_error duration stop automatically retrying when we've had consecutive failures with the same error for this long after the first occurrence (default 15m0s)
+- --vreplication_replica_lag_tolerance duration Replica lag threshold duration: once lag is below this we switch from copy phase to the replication (streaming) phase (default 1m0s)
+- --vreplication_retry_delay duration delay before retrying a failed workflow event in the replication phase (default 5s)
+- --vreplication_store_compressed_gtid Store compressed gtids in the pos column of _vt.vreplication
+- --vreplication_tablet_type string comma separated list of tablet types used as a source (default in_order:REPLICA,PRIMARY)
+- --vstream_dynamic_packet_size Enable dynamic packet sizing for VReplication. This will adjust the packet size during replication to improve performance. (default true)
+- --vstream_packet_size int Suggested packet size for VReplication streamer. This is used only as a recommendation. The actual packet size may be more or less than this amount. (default 250000)
+- --vtctl_client_protocol string the protocol to use to talk to the vtctl server (default grpc)
++ --tracing-sampling-rate float sampling rate for the probabilistic jaeger sampler (default 0.1)
++ --tracing-sampling-type string sampling strategy to use for jaeger. possible values are 'const', 'probabilistic', 'rateLimiting', or 'remote' (default "const")
++ --v Level log level for V logs
++ -v, --version print binary version
++ --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
+ --vtctl_healthcheck_retry_delay duration delay before retrying a failed healthcheck (default 5s)
+ --vtctl_healthcheck_timeout duration the health check timeout period (default 1m0s)
+ --vtctl_healthcheck_topology_refresh duration refresh interval for re-reading the topology (default 30s)
+ --vtctld_sanitize_log_messages When true, vtctld sanitizes logging.
+ --vtctld_show_topology_crud Controls the display of the CRUD topology actions in the vtctld UI. (default true)
+- --vtgate_grpc_ca string the server ca to use to validate servers when connecting
+- --vtgate_grpc_cert string the cert to use to connect
+- --vtgate_grpc_crl string the server crl to use to validate server certificates when connecting
+- --vtgate_grpc_key string the key to use to connect
+- --vtgate_grpc_server_name string the server name to use to validate server certificate
+- --vtgate_protocol string how to talk to vtgate (default grpc)
+- --vtworker_client_grpc_ca string (DEPRECATED) the server ca to use to validate servers when connecting
+- --vtworker_client_grpc_cert string (DEPRECATED) the cert to use to connect
+- --vtworker_client_grpc_crl string (DEPRECATED) the server crl to use to validate server certificates when connecting
+- --vtworker_client_grpc_key string (DEPRECATED) the key to use to connect
+- --vtworker_client_grpc_server_name string (DEPRECATED) the server name to use to validate server certificate
+- --vtworker_client_protocol string (DEPRECATED) the protocol to use to talk to the vtworker server (default grpc)
+- --wait_for_drain_sleep_rdonly duration (DEPRECATED) time to wait before shutting the query service on old RDONLY tablets during MigrateServedTypes (default 5s)
+- --wait_for_drain_sleep_replica duration (DEPRECATED) time to wait before shutting the query service on old REPLICA tablets during MigrateServedTypes (default 15s)
+- --watch_replication_stream When enabled, vttablet will stream the MySQL replication stream from the local server, and use it to update schema when it sees a DDL.
+- --web_dir string NOT USED, here for backward compatibility
+- --web_dir2 string NOT USED, here for backward compatibility
+- --workflow_manager_disable value comma separated list of workflow types to disable
+- --workflow_manager_init Initialize the workflow manager in this vtctld instance.
+- --workflow_manager_use_election if specified, will use a topology server-based master election to ensure only one workflow manager is active at a time.
+- --xbstream_restore_flags string flags to pass to xbstream command during restore. These should be space separated and will be added to the end of the command. These need to match the ones used for backup e.g. --compress / --decompress, --encrypt / --decrypt
+- --xtrabackup_backup_flags string flags to pass to backup command. These should be space separated and will be added to the end of the command
+- --xtrabackup_prepare_flags string flags to pass to prepare command. These should be space separated and will be added to the end of the command
+- --xtrabackup_root_path string directory location of the xtrabackup and xbstream executables, e.g., /usr/bin
+- --xtrabackup_stream_mode string which mode to use if streaming, valid values are tar and xbstream (default tar)
+- --xtrabackup_stripe_block_size uint Size in bytes of each block that gets sent to a given stripe before rotating to the next stripe (default 102400)
+- --xtrabackup_stripes uint If greater than 0, use data striping across this many destination files to parallelize data transfer and decompression
+- --xtrabackup_user string User that xtrabackup will use to connect to the database server. This user must have all necessary privileges. For details, please refer to xtrabackup documentation.
diff --git a/doc/flags/14.0-to-15.0-transition/vtctldclient.diff b/doc/flags/14.0-to-15.0-transition/vtctldclient.diff
new file mode 100644
index 00000000000..40874d41ac7
--- /dev/null
+++ b/doc/flags/14.0-to-15.0-transition/vtctldclient.diff
@@ -0,0 +1,216 @@
+diff --git a/flags/14.0/vtctldclient.txt b/flags/15.0/vtctldclient.txt
+index ddff2f5..35c7092 100644
+--- a/flags/14.0/vtctldclient.txt
++++ b/flags/15.0/vtctldclient.txt
+@@ -8,6 +8,7 @@ Available Commands:
+ AddCellsAlias Defines a group of cells that can be referenced by a single name (the alias).
+ ApplyRoutingRules Applies the VSchema routing rules.
+ ApplySchema Applies the schema change to the specified keyspace on every primary, running in parallel on all shards. The changes are then propagated to replicas via replication.
++ ApplyShardRoutingRules Applies VSchema shard routing rules.
+ ApplyVSchema Applies the VTGate routing schema to the provided keyspace. Shows the result after application.
+ Backup Uses the BackupStorage service on the given tablet to create and store a new backup.
+ BackupShard Finds the most up-to-date REPLICA, RDONLY, or SPARE tablet in the given shard and uses the BackupStorage service on that tablet to create and store a new backup.
+@@ -29,12 +30,14 @@ Available Commands:
+ GetCellInfo Gets the CellInfo object for the given cell.
+ GetCellInfoNames Lists the names of all cells in the cluster.
+ GetCellsAliases Gets all CellsAlias objects in the cluster.
++ GetFullStatus Outputs a JSON structure that contains full status of MySQL including the replication information, semi-sync information, GTID information among others.
+ GetKeyspace Returns information about the given keyspace from the topology.
+ GetKeyspaces Returns information about every keyspace in the topology.
+ GetPermissions Displays the permissions for a tablet.
+ GetRoutingRules Displays the VSchema routing rules.
+ GetSchema Displays the full schema for a tablet, optionally restricted to the specified tables/views.
+ GetShard Returns information about a shard in the topology.
++ GetShardRoutingRules Displays VSchema shard routing rules.
+ GetSrvKeyspaceNames Outputs a JSON mapping of cell=>keyspace names served in that cell. Omit to query all cells.
+ GetSrvKeyspaces Returns the SrvKeyspaces for the given keyspace in one or more cells.
+ GetSrvVSchema Returns the SrvVSchema for the given cell.
+@@ -42,9 +45,9 @@ Available Commands:
+ GetTablet Outputs a JSON structure that contains information about the tablet.
+ GetTabletVersion Print the version of a tablet from its debug vars.
+ GetTablets Looks up tablets according to filter criteria.
++ GetTopologyPath Gets the file located at the specified path in the topology server.
+ GetVSchema Prints a JSON representation of a keyspace's topo record.
+ GetWorkflows Gets all vreplication workflows (Reshard, MoveTables, etc) in the given keyspace.
+- InitShardPrimary Sets the initial primary for the shard.
+ LegacyVtctlCommand Invoke a legacy vtctlclient command. Flag parsing is best effort.
+ PingTablet Checks that the specified tablet is awake and responding to RPCs. This command can be blocked by other in-flight operations.
+ PlannedReparentShard Reparents the shard to a new primary, or away from an old primary. Both the old and new primaries must be up and running.
+@@ -63,7 +66,7 @@ Available Commands:
+ RunHealthCheck Runs a healthcheck on the remote tablet.
+ SetKeyspaceDurabilityPolicy Sets the durability-policy used by the specified keyspace.
+ SetShardIsPrimaryServing Add or remove a shard from serving. This is meant as an emergency function. It does not rebuild any serving graphs; i.e. it does not run `RebuildKeyspaceGraph`.
+- SetShardTabletControl Sets the TabletControl record for a shard and tablet type. Only use this for an emergency fix or after a finished MoveTables. The MigrateServedFrom and MigrateServedType commands set this record appropriately already.
++ SetShardTabletControl Sets the TabletControl record for a shard and tablet type. Only use this for an emergency fix or after a finished MoveTables.
+ SetWritable Sets the specified tablet as writable or read-only.
+ ShardReplicationFix Walks through a ShardReplication object and fixes the first error encountered.
+ ShardReplicationPositions
+@@ -80,134 +83,42 @@ Available Commands:
+ ValidateSchemaKeyspace Validates that the schema on the primary tablet for shard 0 matches the schema on all other tablets in the keyspace.
+ ValidateShard Validates that all nodes reachable from the specified shard are consistent.
+ ValidateVersionKeyspace Validates that the version on the primary tablet of shard 0 matches all of the other tablets in the keyspace.
++ ValidateVersionShard Validates that the version on the primary matches all of the replicas.
++ completion Generate the autocompletion script for the specified shell
+ help Help about any command
+
+ Flags:
+- --allowed_tablet_types TabletTypeList Specifies the tablet types this vtgate is allowed to route queries to
+- --app_idle_timeout duration Idle timeout for app connections (default 1m0s)
+- --backup_engine_implementation string Specifies which implementation to use for creating new backups (builtin or xtrabackup). Restores will always be done with whichever engine created a given backup. (default "builtin")
+- --backup_storage_compress if set, the backup files will be compressed (default is true). Set to false for instance if a backup_storage_hook is specified and it compresses the data. (default true)
+- --backup_storage_implementation string which implementation to use for the backup storage feature
+- --binlog_player_protocol string the protocol to download binlogs from a vttablet (default "grpc")
+- --builtinbackup_mysqld_timeout duration how long to wait for mysqld to shutdown at the start of the backup (default 10m0s)
+- --catch-sigpipe catch and ignore SIGPIPE on stdout and stderr if specified
+- --cpu_profile string deprecated: use '-pprof=cpu' instead
+- --datadog-agent-port string port to send spans to. if empty, no tracing will be done
+- --db-credentials-server string db credentials server type ('file' - file implementation; 'vault' - HashiCorp Vault implementation) (default "file")
+- --db-credentials-vault-path string Vault path to credentials JSON blob, e.g.: secret/data/prod/dbcreds
+- --db-credentials-vault-role-secretidfile string Path to file containing Vault AppRole secret_id; can also be passed using VAULT_SECRETID environment variable
+- --db-credentials-vault-timeout duration Timeout for vault API operations (default 10s)
+- --db-credentials-vault-tokenfile string Path to file containing Vault auth token; token can also be passed using VAULT_TOKEN environment variable
+- --dba_idle_timeout duration Idle timeout for dba connections (default 1m0s)
+- --degraded_threshold duration replication lag after which a replica is considered degraded (default 30s)
+- --discovery_high_replication_lag_minimum_serving duration the replication lag that is considered too high when applying the min_number_serving_vttablets threshold (default 2h0m0s)
+- --emit_stats If set, emit stats to push-based monitoring and stats backends
+- --enable-consolidator Synonym to -enable_consolidator (default true)
+- --enable-lag-throttler Synonym to -enable_lag_throttler
+- --enable-tx-throttler Synonym to -enable_tx_throttler
+- --enable_consolidator_replicas This option enables the query consolidator only on replicas.
+- --enable_hot_row_protection_dry_run If true, hot row protection is not enforced but logs if transactions would have been queued.
+- --enable_query_plan_field_caching This option fetches & caches fields (columns) when storing query plans (default true)
+- --enable_transaction_limit If true, limit on number of transactions open at the same time will be enforced for all users. User trying to open a new transaction after exhausting their limit will receive an error immediately, regardless of whether there are available slots or not.
+- --enable_tx_throttler If true replication-lag-based throttling on transactions will be enabled.
+- --grpc_auth_mode string Which auth plugin implementation to use (eg: static)
+- --grpc_auth_static_client_creds string when using grpc_static_auth in the server, this file provides the credentials to use to authenticate with server
+- --grpc_ca string server CA to use for gRPC connections, requires TLS, and enforces client certificate check
+- --grpc_compression string Which protocol to use for compressing gRPC. Default: nothing. Supported: snappy
+- --grpc_enable_optional_tls enable optional TLS mode when a server accepts both TLS and plain-text connections on the same port
+- --grpc_initial_conn_window_size int gRPC initial connection window size
+- --grpc_keepalive_time duration After a duration of this time, if the client doesn't see any activity, it pings the server to see if the transport is still alive. (default 10s)
+- --grpc_key string server private key to use for gRPC connections, requires grpc_cert, enables TLS
+- --grpc_max_connection_age_grace duration Additional grace period after grpc_max_connection_age, after which connections are forcibly closed. (default 2562047h47m16.854775807s)
+- --grpc_port int Port to listen on for gRPC calls
+- --grpc_server_ca string path to server CA in PEM format, which will be combine with server cert, return full certificate chain to clients
+- --grpc_server_initial_window_size int gRPC server initial window size
+- --grpc_server_keepalive_enforcement_policy_permit_without_stream gRPC server permit client keepalive pings even when there are no active streams (RPCs)
+- --heartbeat_enable If true, vttablet records (if master) or checks (if replica) the current time of a replication heartbeat in the table _vt.heartbeat. The result is used to inform the serving state of the vttablet via healthchecks.
+- --heartbeat_on_demand_duration duration If non-zero, heartbeats are only written upon consumer request, and only run for up to given duration following the request. Frequent requests can keep the heartbeat running consistently; when requests are infrequent heartbeat may completely stop between requests (default 0s)
+- --hot_row_protection_concurrent_transactions int Number of concurrent transactions let through to the txpool/MySQL for the same hot row. Should be > 1 to have enough 'ready' transactions in MySQL and benefit from a pipelining effect. (default 5)
+- --hot_row_protection_max_queue_size int Maximum number of BeginExecute RPCs which will be queued for the same row (range). (default 20)
+- --keep_logs duration keep logs for this long (using ctime) (zero to keep forever) (default 0s)
+- --keyspaces_to_watch StringList Specifies which keyspaces this vtgate should have access to while routing queries or accessing the vschema
+- --legacy_replication_lag_algorithm use the legacy algorithm when selecting the vttablets for serving (default true)
+- --log_dir string If non-empty, write log files in this directory
+- --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
+- --master_connect_retry duration Deprecated, use -replication_connect_retry (default 10s)
+- --min_number_serving_vttablets int the minimum number of vttablets for each replicating tablet_type (e.g. replica, rdonly) that will be continue to be used even with replication lag above discovery_low_replication_lag, but still below discovery_high_replication_lag_minimum_serving (default 2)
+- --mysql_auth_server_static_file string JSON File to read the users/passwords from.
+- --mysql_auth_static_reload_interval duration Ticker to reload credentials (default 0s)
+- --mysql_server_flush_delay duration Delay after which buffered response will be flushed to the client. (default 100ms)
+- --mysqlctl_client_protocol string the protocol to use to talk to the mysqlctl server (default "grpc")
+- --mysqlctl_socket string socket file to use for remote mysqlctl actions (empty for local actions)
+- --onterm_timeout duration wait no more than this for OnTermSync handlers before stopping (default 10s)
+- --pool-name-prefix string Deprecated
+- --pprof string enable profiling
+- --query-log-stream-handler string URL handler for streaming queries log (default "/debug/querylog")
+- --querylog-format string format for query logs ("text" or "json") (default "text")
+- --queryserver-config-acl-exempt-acl string an acl that exempt from table acl checking (this acl is free to access any vitess tables).
+- --queryserver-config-annotate-queries prefix queries to MySQL backend with comment indicating vtgate principal (user) and target tablet type
+- --queryserver-config-idle-timeout float query server idle timeout (in seconds), vttablet manages various mysql connection pools. This config means if a connection has not been used in given idle timeout, this connection will be removed from pool. This effectively manages number of connection objects and optimize the pool performance. (default 1800)
+- --queryserver-config-max-result-size int query server max result size, maximum number of rows allowed to return from vttablet for non-streaming queries. (default 10000)
+- --queryserver-config-message-conn-pool-size int DEPRECATED
+- --queryserver-config-passthrough-dmls query server pass through all dml statements without rewriting
+- --queryserver-config-pool-size int query server read pool size, connection pool is used by regular queries (non streaming, not in a transaction) (default 16)
+- --queryserver-config-query-cache-memory int query server query cache size in bytes, maximum amount of memory to be used for caching. vttablet analyzes every incoming query and generate a query plan, these plans are being cached in a lru cache. This config controls the capacity of the lru cache. (default 33554432)
+- --queryserver-config-query-pool-timeout float query server query pool timeout (in seconds), it is how long vttablet waits for a connection from the query pool. If set to 0 (default) then the overall query timeout is used instead.
+- --queryserver-config-query-timeout float query server query timeout (in seconds), this is the query timeout in vttablet side. If a query takes more than this timeout, it will be killed. (default 30)
+- --queryserver-config-schema-change-signal-interval float query server schema change signal interval defines at which interval the query server shall send schema updates to vtgate. (default 5)
+- --queryserver-config-stream-buffer-size int query server stream buffer size, the maximum number of bytes sent from vttablet for each stream call. It's recommended to keep this value in sync with vtgate's stream_buffer_size. (default 32768)
+- --queryserver-config-stream-pool-size int query server stream connection pool size, stream pool is used by stream queries: queries that return results to client in a streaming fashion (default 200)
+- --queryserver-config-stream-pool-waiter-cap int query server stream pool waiter limit, this is the maximum number of streaming queries that can be queued waiting to get a connection
+- --queryserver-config-terse-errors prevent bind vars from escaping in client error messages
+- --queryserver-config-transaction-prefill-parallelism int query server transaction prefill parallelism, a non-zero value will prefill the pool using the specified parallism.
+- --queryserver-config-txpool-timeout float query server transaction pool timeout, it is how long vttablet waits if tx pool is full (default 1)
+- --queryserver-config-warn-result-size int query server result size warning threshold, warn if number of rows returned from vttablet for non-streaming queries exceeds this
+- --redact-debug-ui-queries redact full queries and bind variables from debug UI
+- --relay_log_max_size int Maximum buffer size (in bytes) for VReplication target buffering. If single rows are larger than this, a single row is buffered at a time. (default 250000)
+- --replication_connect_retry duration how long to wait in between replica reconnect attempts. Only precise to the second. (default 10s)
+- --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only)
+- --service_map StringList comma separated list of services to enable (or disable if prefixed with '-') Example: grpc-vtworker
+- --shutdown_grace_period float how long to wait (in seconds) for queries and transactions to complete during graceful shutdown.
+- --sql-max-length-ui int truncate queries in debug UIs to the given length (default 512) (default 512)
+- --srv_topo_cache_ttl duration how long to use cached entries for topology (default 1s)
+- --stats_backend string The name of the registered push-based monitoring/stats backend to use
+- --stats_common_tags string Comma-separated list of common tags for the stats backend. It provides both label and values. Example: label1:value1,label2:value2
+- --stats_emit_period duration Interval between emitting stats to all registered backends (default 1m0s)
+- --tablet_dir string The directory within the vtdataroot to store vttablet/mysql files. Defaults to being generated by the tablet uid.
+- --tablet_manager_protocol string the protocol to use to talk to vttablet (default "grpc")
+- --tablet_refresh_interval duration tablet refresh interval (default 1m0s)
+- --tablet_url_template string format string describing debug tablet url formatting. See the Go code for getTabletDebugURL() how to customize this. (default "http://{{.GetTabletHostPort}}")
+- --throttle_metrics_query SELECT Override default heartbeat/lag metric. Use either SELECT (must return single row, single value) or `SHOW GLOBAL ... LIKE ...` queries. Set -throttle_metrics_threshold respectively.
+- --throttle_tablet_types string Comma separated VTTablet types to be considered by the throttler. default: 'replica'. example: 'replica,rdonly'. 'replica' aways implicitly included (default "replica")
+- --topo_global_root string the path of the global topology data in the global topology server
+- --topo_implementation string the topology implementation to use
+- --tracer string tracing service to use (default "noop")
+- --tracing-sampling-rate OptionalFloat64 sampling rate for the probabilistic jaeger sampler (default 0.1)
+- --track_schema_versions When enabled, vttablet will store versions of schemas at each position that a DDL is applied and allow retrieval of the schema corresponding to a position
+- --transaction_limit_by_component Include CallerID.component when considering who the user is for the purpose of transaction limit.
+- --transaction_limit_by_subcomponent Include CallerID.subcomponent when considering who the user is for the purpose of transaction limit.
+- --transaction_limit_per_user float Maximum number of transactions a single user is allowed to use at any time, represented as fraction of -transaction_cap. (default 0.4)
+- --twopc_abandon_age float time in seconds. Any unresolved transaction older than this time will be sent to the coordinator to be resolved.
+- --twopc_enable if the flag is on, 2pc is enabled. Other 2pc flags must be supplied.
+- --tx-throttler-healthcheck-cells StringList Synonym to -tx_throttler_healthcheck_cells
+- --tx_throttler_healthcheck_cells StringList A comma-separated list of cells. Only tabletservers running in these cells will be monitored for replication lag by the transaction throttler.
+- -v, --v Level log level for V logs
+- --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
+- --vreplication_copy_phase_max_innodb_history_list_length int The maximum InnoDB transaction history that can exist on a vstreamer (source) before starting another round of copying rows. This helps to limit the impact on the source tablet. (default 1000000)
+- --vreplication_experimental_flags int (Bitmask) of experimental features in vreplication to enable (default 1)
+- --vreplication_healthcheck_timeout duration healthcheck retry delay (default 1m0s)
+- --vreplication_heartbeat_update_interval int Frequency (in seconds, default 1, max 60) at which the time_updated column of a vreplication stream when idling (default 1)
+- --vreplication_replica_lag_tolerance duration Replica lag threshold duration: once lag is below this we switch from copy phase to the replication (streaming) phase (default 1m0s)
+- --vreplication_store_compressed_gtid Store compressed gtids in the pos column of _vt.vreplication
+- --vstream_dynamic_packet_size Enable dynamic packet sizing for VReplication. This will adjust the packet size during replication to improve performance. (default true)
+- --vtctl_client_protocol string the protocol to use to talk to the vtctl server (default "grpc")
+- --vtctld_grpc_cert string the cert to use to connect
+- --vtctld_grpc_key string the key to use to connect
+- --wait_for_drain_sleep_rdonly duration (DEPRECATED) time to wait before shutting the query service on old RDONLY tablets during MigrateServedTypes (default 5s)
+- --watch_replication_stream When enabled, vttablet will stream the MySQL replication stream from the local server, and use it to update schema when it sees a DDL.
+- --xtrabackup_backup_flags string flags to pass to backup command. These should be space separated and will be added to the end of the command
+- --xtrabackup_root_path string directory location of the xtrabackup and xbstream executables, e.g., /usr/bin
+- --xtrabackup_stripe_block_size uint Size in bytes of each block that gets sent to a given stripe before rotating to the next stripe (default 102400)
+- --xtrabackup_user string User that xtrabackup will use to connect to the database server. This user must have all necessary privileges. For details, please refer to xtrabackup documentation.
++ --action_timeout duration timeout for the total command (default 1h0m0s)
++ --alsologtostderr log to standard error as well as files
++ --grpc_auth_static_client_creds string When using grpc_static_auth in the server, this file provides the credentials to use to authenticate with server.
++ --grpc_compression string Which protocol to use for compressing gRPC. Default: nothing. Supported: snappy
++ --grpc_enable_tracing Enable gRPC tracing.
++ --grpc_initial_conn_window_size int gRPC initial connection window size
++ --grpc_initial_window_size int gRPC initial window size
++ --grpc_keepalive_time duration After a duration of this time, if the client doesn't see any activity, it pings the server to see if the transport is still alive. (default 10s)
++ --grpc_keepalive_timeout duration After having pinged for keepalive check, the client waits for a duration of Timeout and if no activity is seen even after that the connection is closed. (default 10s)
++ --grpc_max_message_size int Maximum allowed RPC message size. Larger messages will be rejected by gRPC with the error 'exceeding the max size'. (default 16777216)
++ --grpc_prometheus Enable gRPC monitoring with Prometheus.
++ -h, --help help for vtctldclient
++ --keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
++ --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
++ --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
++ --log_dir string If non-empty, write log files in this directory
++ --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
++ --logtostderr log to standard error instead of files
++ --mysql_server_version string MySQL server version to advertise.
++ --purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
++ --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only)
++ --server string server to use for connection (required)
++ --stderrthreshold severity logs at or above this threshold go to stderr (default 1)
++ -v, --v Level log level for V logs
++ --version version for vtctldclient
++ --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
++ --vtctl_client_protocol string Protocol to use to talk to the vtctl server. (default "grpc")
++ --vtctld_grpc_ca string the server ca to use to validate servers when connecting
++ --vtctld_grpc_cert string the cert to use to connect
++ --vtctld_grpc_crl string the server crl to use to validate server certificates when connecting
++ --vtctld_grpc_key string the key to use to connect
++ --vtctld_grpc_server_name string the server name to use to validate server certificate
+
+ Use "vtctldclient [command] --help" for more information about a command.
diff --git a/doc/flags/14.0-to-15.0-transition/vtexplain.diff b/doc/flags/14.0-to-15.0-transition/vtexplain.diff
new file mode 100644
index 00000000000..0c63b374ecd
--- /dev/null
+++ b/doc/flags/14.0-to-15.0-transition/vtexplain.diff
@@ -0,0 +1,417 @@
+diff --git a/flags/14.0/vtexplain.txt b/flags/15.0/vtexplain.txt
+index 00a605e..2666e0b 100644
+--- a/flags/14.0/vtexplain.txt
++++ b/flags/15.0/vtexplain.txt
+@@ -1,353 +1,60 @@
+ Usage of vtexplain:
+- --allowed_tablet_types value Specifies the tablet types this vtgate is allowed to route queries to
+- --alsologtostderr log to standard error as well as files
+- --app_idle_timeout duration Idle timeout for app connections (default 1m0s)
+- --app_pool_size int Size of the connection pool for app connections (default 40)
+- --backup_engine_implementation string Specifies which implementation to use for creating new backups (builtin or xtrabackup). Restores will always be done with whichever engine created a given backup. (default builtin)
+- --backup_storage_block_size int if backup_storage_compress is true, backup_storage_block_size sets the byte size for each block while compressing (default is 250000). (default 250000)
+- --backup_storage_compress if set, the backup files will be compressed (default is true). Set to false for instance if a backup_storage_hook is specified and it compresses the data. (default true)
+- --backup_storage_hook string if set, we send the contents of the backup files through this hook.
+- --backup_storage_implementation string which implementation to use for the backup storage feature
+- --backup_storage_number_blocks int if backup_storage_compress is true, backup_storage_number_blocks sets the number of blocks that can be processed, at once, before the writer blocks, during compression (default is 2). It should be equal to the number of CPUs available for compression (default 2)
+- --batch-interval duration Interval between logical time slots. (default 10ms)
+- --binlog_player_protocol string the protocol to download binlogs from a vttablet (default grpc)
+- --binlog_use_v3_resharding_mode (DEPRECATED) True if and only if the binlog streamer should use V3-style sharding, which doesn't require a preset sharding key column. (default true)
+- --buffer_drain_concurrency int Maximum number of requests retried simultaneously. More concurrency will increase the load on the PRIMARY vttablet when draining the buffer. (default 1)
+- --buffer_implementation string Allowed values: healthcheck (legacy implementation), keyspace_events (default) (default keyspace_events)
+- --buffer_keyspace_shards string If not empty, limit buffering to these entries (comma separated). Entry format: keyspace or keyspace/shard. Requires --enable_buffer=true.
+- --buffer_max_failover_duration duration Stop buffering completely if a failover takes longer than this duration. (default 20s)
+- --buffer_min_time_between_failovers duration Minimum time between the end of a failover and the start of the next one (tracked per shard). Faster consecutive failovers will not trigger buffering. (default 1m0s)
+- --buffer_size int Maximum number of buffered requests in flight (across all ongoing failovers). (default 1000)
+- --buffer_window duration Duration for how long a request should be buffered at most. (default 10s)
+- --builtinbackup_mysqld_timeout duration how long to wait for mysqld to shutdown at the start of the backup (default 10m0s)
+- --builtinbackup_progress duration how often to send progress updates when backing up large files (default 5s)
+- --catch-sigpipe catch and ignore SIGPIPE on stdout and stderr if specified
+- --cells_to_watch string comma-separated list of cells for watching tablets
+- --client-found-rows-pool-size int DEPRECATED: queryserver-config-transaction-cap will be used instead.
+- --cpu_profile string deprecated: use '-pprof=cpu' instead
+- --datadog-agent-host string host to send spans to. if empty, no tracing will be done
+- --datadog-agent-port string port to send spans to. if empty, no tracing will be done
+- --db-credentials-file string db credentials file; send SIGHUP to reload this file
+- --db-credentials-server string db credentials server type ('file' - file implementation; 'vault' - HashiCorp Vault implementation) (default file)
+- --db-credentials-vault-addr string URL to Vault server
+- --db-credentials-vault-path string Vault path to credentials JSON blob, e.g.: secret/data/prod/dbcreds
+- --db-credentials-vault-role-mountpoint string Vault AppRole mountpoint; can also be passed using VAULT_MOUNTPOINT environment variable (default approle)
+- --db-credentials-vault-role-secretidfile string Path to file containing Vault AppRole secret_id; can also be passed using VAULT_SECRETID environment variable
+- --db-credentials-vault-roleid string Vault AppRole id; can also be passed using VAULT_ROLEID environment variable
+- --db-credentials-vault-timeout duration Timeout for vault API operations (default 10s)
+- --db-credentials-vault-tls-ca string Path to CA PEM for validating Vault server certificate
+- --db-credentials-vault-tokenfile string Path to file containing Vault auth token; token can also be passed using VAULT_TOKEN environment variable
+- --db-credentials-vault-ttl duration How long to cache DB credentials from the Vault server (default 30m0s)
+- --dba_idle_timeout duration Idle timeout for dba connections (default 1m0s)
+- --dba_pool_size int Size of the connection pool for dba connections (default 20)
+- --dbddl_plugin string controls how to handle CREATE/DROP DATABASE. use it if you are using your own database provisioning service (default fail)
+- --dbname string Optional database target to override normal routing
+- --ddl_strategy string Set default strategy for DDL statements. Override with @@ddl_strategy session variable (default direct)
+- --default_tablet_type value The default tablet type to set for queries, when one is not explicitly selected (default PRIMARY)
+- --degraded_threshold duration replication lag after which a replica is considered degraded (default 30s)
+- --disable_active_reparents if set, do not allow active reparents. Use this to protect a cluster using external reparents.
+- --disable_local_gateway deprecated: if specified, this process will not route any queries to local tablets in the local cell
+- --discovery_high_replication_lag_minimum_serving duration the replication lag that is considered too high when applying the min_number_serving_vttablets threshold (default 2h0m0s)
+- --discovery_low_replication_lag duration the replication lag that is considered low enough to be healthy (default 30s)
+- --emit_stats If set, emit stats to push-based monitoring and stats backends
+- --enable-autocommit This flag is deprecated. Autocommit is always allowed. (default true)
+- --enable-consolidator Synonym to -enable_consolidator (default true)
+- --enable-consolidator-replicas Synonym to -enable_consolidator_replicas
+- --enable-lag-throttler Synonym to -enable_lag_throttler
+- --enable-query-plan-field-caching Synonym to -enable_query_plan_field_caching (default true)
+- --enable-tx-throttler Synonym to -enable_tx_throttler
+- --enable_buffer Enable buffering (stalling) of primary traffic during failovers.
+- --enable_buffer_dry_run Detect and log failover events, but do not actually buffer requests.
+- --enable_consolidator This option enables the query consolidator. (default true)
+- --enable_consolidator_replicas This option enables the query consolidator only on replicas.
+- --enable_direct_ddl Allow users to submit direct DDL statements (default true)
+- --enable_hot_row_protection If true, incoming transactions for the same row (range) will be queued and cannot consume all txpool slots.
+- --enable_hot_row_protection_dry_run If true, hot row protection is not enforced but logs if transactions would have been queued.
+- --enable_lag_throttler If true, vttablet will run a throttler service, and will implicitly enable heartbeats
+- --enable_online_ddl Allow users to submit, review and control Online DDL (default true)
+- --enable_query_plan_field_caching This option fetches & caches fields (columns) when storing query plans (default true)
+- --enable_replication_reporter Use polling to track replication lag.
+- --enable_set_var This will enable the use of MySQL's SET_VAR query hint for certain system variables instead of using reserved connections (default true)
+- --enable_system_settings This will enable the system settings to be changed per session at the database connection level (default true)
+- --enable_transaction_limit If true, limit on number of transactions open at the same time will be enforced for all users. User trying to open a new transaction after exhausting their limit will receive an error immediately, regardless of whether there are available slots or not.
+- --enable_transaction_limit_dry_run If true, limit on number of transactions open at the same time will be tracked for all users, but not enforced.
+- --enable_tx_throttler If true replication-lag-based throttling on transactions will be enabled.
+- --enforce_strict_trans_tables If true, vttablet requires MySQL to run with STRICT_TRANS_TABLES or STRICT_ALL_TABLES on. It is recommended to not turn this flag off. Otherwise MySQL may alter your supplied values before saving them to the database. (default true)
+- --execution-mode string The execution mode to simulate -- must be set to multi, legacy-autocommit, or twopc (default multi)
+- --foreign_key_mode string This is to provide how to handle foreign key constraint in create/alter table. Valid values are: allow, disallow (default allow)
+- --gate_query_cache_lfu gate server cache algorithm. when set to true, a new cache algorithm based on a TinyLFU admission policy will be used to improve cache behavior and prevent pollution from sparse queries (default true)
+- --gate_query_cache_memory int gate server query cache size in bytes, maximum amount of memory to be cached. vtgate analyzes every incoming query and generate a query plan, these plans are being cached in a lru cache. This config controls the capacity of the lru cache. (default 33554432)
+- --gate_query_cache_size int gate server query cache size, maximum number of queries to be cached. vtgate analyzes every incoming query and generate a query plan, these plans are being cached in a cache. This config controls the expected amount of unique entries in the cache. (default 5000)
+- --gateway_initial_tablet_timeout duration At startup, the tabletGateway will wait up to this duration to get at least one tablet per keyspace/shard/tablet type (default 30s)
+- --gc_check_interval duration Interval between garbage collection checks (default 1h0m0s)
+- --gc_purge_check_interval duration Interval between purge discovery checks (default 1m0s)
+- --gh-ost-path string override default gh-ost binary full path
+- --grpc_auth_mode string Which auth plugin implementation to use (eg: static)
+- --grpc_auth_mtls_allowed_substrings string List of substrings of at least one of the client certificate names (separated by colon).
+- --grpc_auth_static_client_creds string when using grpc_static_auth in the server, this file provides the credentials to use to authenticate with server
+- --grpc_auth_static_password_file string JSON File to read the users/passwords from.
+- --grpc_ca string server CA to use for gRPC connections, requires TLS, and enforces client certificate check
+- --grpc_cert string server certificate to use for gRPC connections, requires grpc_key, enables TLS
+- --grpc_compression string Which protocol to use for compressing gRPC. Default: nothing. Supported: snappy
+- --grpc_crl string path to a certificate revocation list in PEM format, client certificates will be further verified against this file during TLS handshake
+- --grpc_enable_optional_tls enable optional TLS mode when a server accepts both TLS and plain-text connections on the same port
+- --grpc_enable_tracing Enable GRPC tracing
+- --grpc_initial_conn_window_size int gRPC initial connection window size
+- --grpc_initial_window_size int gRPC initial window size
+- --grpc_keepalive_time duration After a duration of this time, if the client doesn't see any activity, it pings the server to see if the transport is still alive. (default 10s)
+- --grpc_keepalive_timeout duration After having pinged for keepalive check, the client waits for a duration of Timeout and if no activity is seen even after that the connection is closed. (default 10s)
+- --grpc_key string server private key to use for gRPC connections, requires grpc_cert, enables TLS
+- --grpc_max_connection_age duration Maximum age of a client connection before GoAway is sent. (default 2562047h47m16.854775807s)
+- --grpc_max_connection_age_grace duration Additional grace period after grpc_max_connection_age, after which connections are forcibly closed. (default 2562047h47m16.854775807s)
+- --grpc_max_message_size int Maximum allowed RPC message size. Larger messages will be rejected by gRPC with the error 'exceeding the max size'. (default 16777216)
+- --grpc_port int Port to listen on for gRPC calls
+- --grpc_prometheus Enable gRPC monitoring with Prometheus
+- --grpc_server_ca string path to server CA in PEM format, which will be combine with server cert, return full certificate chain to clients
+- --grpc_server_initial_conn_window_size int gRPC server initial connection window size
+- --grpc_server_initial_window_size int gRPC server initial window size
+- --grpc_server_keepalive_enforcement_policy_min_time duration gRPC server minimum keepalive time (default 10s)
+- --grpc_server_keepalive_enforcement_policy_permit_without_stream gRPC server permit client keepalive pings even when there are no active streams (RPCs)
+- --health_check_interval duration Interval between health checks (default 20s)
+- --healthcheck_retry_delay duration health check retry delay (default 2ms)
+- --healthcheck_timeout duration the health check timeout period (default 1m0s)
+- --heartbeat_enable If true, vttablet records (if master) or checks (if replica) the current time of a replication heartbeat in the table _vt.heartbeat. The result is used to inform the serving state of the vttablet via healthchecks.
+- --heartbeat_interval duration How frequently to read and write replication heartbeat. (default 1s)
+- --heartbeat_on_demand_duration duration If non-zero, heartbeats are only written upon consumer request, and only run for up to given duration following the request. Frequent requests can keep the heartbeat running consistently; when requests are infrequent heartbeat may completely stop between requests
+- --hot_row_protection_concurrent_transactions int Number of concurrent transactions let through to the txpool/MySQL for the same hot row. Should be > 1 to have enough 'ready' transactions in MySQL and benefit from a pipelining effect. (default 5)
+- --hot_row_protection_max_global_queue_size int Global queue limit across all row (ranges). Useful to prevent that the queue can grow unbounded. (default 1000)
+- --hot_row_protection_max_queue_size int Maximum number of BeginExecute RPCs which will be queued for the same row (range). (default 20)
+- --jaeger-agent-host string host and port to send spans to. if empty, no tracing will be done
+- --keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
+- --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
+- --keyspaces_to_watch value Specifies which keyspaces this vtgate should have access to while routing queries or accessing the vschema
+- --ks-shard-map string JSON map of keyspace name -> shard name -> ShardReference object. The inner map is the same as the output of FindAllShardsInKeyspace
+- --ks-shard-map-file string File containing json blob of keyspace name -> shard name -> ShardReference object
+- --lameduck-period duration keep running at least this long after SIGTERM before stopping (default 50ms)
+- --legacy_replication_lag_algorithm use the legacy algorithm when selecting the vttablets for serving (default true)
+- --lock_heartbeat_time duration If there is lock function used. This will keep the lock connection active by using this heartbeat (default 5s)
+- --log_backtrace_at value when logging hits line file:N, emit a stack trace
+- --log_dir string If non-empty, write log files in this directory
+- --log_err_stacks log stack traces for errors
+- --log_queries_to_file string Enable query logging to the specified file
+- --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
+- --logtostderr log to standard error instead of files
+- --master_connect_retry duration Deprecated, use -replication_connect_retry (default 10s)
+- --max_memory_rows int Maximum number of rows that will be held in memory for intermediate results as well as the final result. (default 300000)
+- --max_payload_size int The threshold for query payloads in bytes. A payload greater than this threshold will result in a failure to handle the query.
+- --mem-profile-rate int deprecated: use '-pprof=mem' instead (default 524288)
+- --message_stream_grace_period duration the amount of time to give for a vttablet to resume if it ends a message stream, usually because of a reparent. (default 30s)
+- --migration_check_interval duration Interval between migration checks (default 1m0s)
+- --min_number_serving_vttablets int the minimum number of vttablets for each replicating tablet_type (e.g. replica, rdonly) that will be continue to be used even with replication lag above discovery_low_replication_lag, but still below discovery_high_replication_lag_minimum_serving (default 2)
+- --mutex-profile-fraction int deprecated: use '-pprof=mutex' instead
+- --mysql_allow_clear_text_without_tls If set, the server will allow the use of a clear text password over non-SSL connections.
+- --mysql_auth_server_impl string Which auth server implementation to use. Options: none, ldap, clientcert, static, vault. (default static)
+- --mysql_auth_server_static_file string JSON File to read the users/passwords from.
+- --mysql_auth_server_static_string string JSON representation of the users/passwords config.
+- --mysql_auth_static_reload_interval duration Ticker to reload credentials
+- --mysql_clientcert_auth_method string client-side authentication method to use. Supported values: mysql_clear_password, dialog. (default mysql_clear_password)
+- --mysql_default_workload string Default session workload (OLTP, OLAP, DBA) (default OLTP)
+- --mysql_server_bind_address string Binds on this address when listening to MySQL binary protocol. Useful to restrict listening to 'localhost' only for instance.
+- --mysql_server_flush_delay duration Delay after which buffered response will be flushed to the client. (default 100ms)
+- --mysql_server_port int If set, also listen for MySQL binary protocol connections on this port. (default -1)
+- --mysql_server_query_timeout duration mysql query timeout
+- --mysql_server_read_timeout duration connection read timeout
+- --mysql_server_require_secure_transport Reject insecure connections but only if mysql_server_ssl_cert and mysql_server_ssl_key are provided
+- --mysql_server_socket_path string This option specifies the Unix socket file to use when listening for local connections. By default it will be empty and it won't listen to a unix socket
+- --mysql_server_ssl_ca string Path to ssl CA for mysql server plugin SSL. If specified, server will require and validate client certs.
+- --mysql_server_ssl_cert string Path to the ssl cert for mysql server plugin SSL
+- --mysql_server_ssl_crl string Path to ssl CRL for mysql server plugin SSL
+- --mysql_server_ssl_key string Path to ssl key for mysql server plugin SSL
+- --mysql_server_ssl_server_ca string path to server CA in PEM format, which will be combine with server cert, return full certificate chain to clients
+- --mysql_server_tls_min_version string Configures the minimal TLS version negotiated when SSL is enabled. Defaults to TLSv1.2. Options: TLSv1.0, TLSv1.1, TLSv1.2, TLSv1.3.
+- --mysql_server_version string MySQL server version to advertise.
+- --mysql_server_write_timeout duration connection write timeout
+- --mysql_slow_connect_warn_threshold duration Warn if it takes more than the given threshold for a mysql connection to establish
+- --mysql_tcp_version string Select tcp, tcp4, or tcp6 to control the socket type. (default tcp)
+- --mysqlctl_client_protocol string the protocol to use to talk to the mysqlctl server (default grpc)
+- --mysqlctl_mycnf_template string template file to use for generating the my.cnf file during server init
+- --mysqlctl_socket string socket file to use for remote mysqlctl actions (empty for local actions)
+- --no_scatter when set to true, the planner will fail instead of producing a plan that includes scatter queries
+- --normalize Whether to enable vtgate normalization
+- --normalize_queries Rewrite queries with bind vars. Turn this off if the app itself sends normalized queries with bind vars. (default true)
+- --onclose_timeout duration wait no more than this for OnClose handlers before stopping (default 1ns)
+- --onterm_timeout duration wait no more than this for OnTermSync handlers before stopping (default 10s)
+- --output-mode string Output in human-friendly text or json (default text)
+- --pid_file string If set, the process will write its pid to the named file, and delete it on graceful shutdown.
+- --planner-version string Sets the query planner version to use when generating the explain output. Valid values are V3 and Gen4
+- --planner_version string Deprecated flag. Use planner-version instead
+- --pool-name-prefix string Deprecated
+- --pool_hostname_resolve_interval duration if set force an update to all hostnames and reconnect if changed, defaults to 0 (disabled)
+- --pprof string enable profiling
+- --proxy_protocol Enable HAProxy PROXY protocol on MySQL listener socket
+- --pt-osc-path string override default pt-online-schema-change binary full path
+- --purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
+- --query-log-stream-handler string URL handler for streaming queries log (default /debug/querylog)
+- --querylog-filter-tag string string that must be present in the query for it to be logged; if using a value as the tag, you need to disable query normalization
+- --querylog-format string format for query logs ("text" or "json") (default text)
+- --querylog-row-threshold uint Number of rows a query has to return or affect before being logged; not useful for streaming queries. 0 means all queries will be logged.
+- --queryserver-config-acl-exempt-acl string an acl that exempt from table acl checking (this acl is free to access any vitess tables).
+- --queryserver-config-allowunsafe-dmls deprecated
+- --queryserver-config-annotate-queries prefix queries to MySQL backend with comment indicating vtgate principal (user) and target tablet type
+- --queryserver-config-enable-table-acl-dry-run If this flag is enabled, tabletserver will emit monitoring metrics and let the request pass regardless of table acl check results
+- --queryserver-config-idle-timeout float query server idle timeout (in seconds), vttablet manages various mysql connection pools. This config means if a connection has not been used in given idle timeout, this connection will be removed from pool. This effectively manages number of connection objects and optimize the pool performance. (default 1800)
+- --queryserver-config-max-dml-rows int query server max dml rows per statement, maximum number of rows allowed to return at a time for an update or delete with either 1) an equality where clauses on primary keys, or 2) a subselect statement. For update and delete statements in above two categories, vttablet will split the original query into multiple small queries based on this configuration value.
+- --queryserver-config-max-result-size int query server max result size, maximum number of rows allowed to return from vttablet for non-streaming queries. (default 10000)
+- --queryserver-config-message-conn-pool-prefill-parallelism int DEPRECATED: Unused.
+- --queryserver-config-message-conn-pool-size int DEPRECATED
+- --queryserver-config-message-postpone-cap int query server message postpone cap is the maximum number of messages that can be postponed at any given time. Set this number to substantially lower than transaction cap, so that the transaction pool isn't exhausted by the message subsystem. (default 4)
+- --queryserver-config-passthrough-dmls query server pass through all dml statements without rewriting
+- --queryserver-config-pool-prefill-parallelism int query server read pool prefill parallelism, a non-zero value will prefill the pool using the specified parallism.
+- --queryserver-config-pool-size int query server read pool size, connection pool is used by regular queries (non streaming, not in a transaction) (default 16)
+- --queryserver-config-query-cache-lfu query server cache algorithm. when set to true, a new cache algorithm based on a TinyLFU admission policy will be used to improve cache behavior and prevent pollution from sparse queries (default true)
+- --queryserver-config-query-cache-memory int query server query cache size in bytes, maximum amount of memory to be used for caching. vttablet analyzes every incoming query and generate a query plan, these plans are being cached in a lru cache. This config controls the capacity of the lru cache. (default 33554432)
+- --queryserver-config-query-cache-size int query server query cache size, maximum number of queries to be cached. vttablet analyzes every incoming query and generate a query plan, these plans are being cached in a lru cache. This config controls the capacity of the lru cache. (default 5000)
+- --queryserver-config-query-pool-timeout float query server query pool timeout (in seconds), it is how long vttablet waits for a connection from the query pool. If set to 0 (default) then the overall query timeout is used instead.
+- --queryserver-config-query-pool-waiter-cap int query server query pool waiter limit, this is the maximum number of queries that can be queued waiting to get a connection (default 5000)
+- --queryserver-config-query-timeout float query server query timeout (in seconds), this is the query timeout in vttablet side. If a query takes more than this timeout, it will be killed. (default 30)
+- --queryserver-config-schema-change-signal query server schema signal, will signal connected vtgates that schema has changed whenever this is detected. VTGates will need to have -schema_change_signal enabled for this to work (default true)
+- --queryserver-config-schema-change-signal-interval float query server schema change signal interval defines at which interval the query server shall send schema updates to vtgate. (default 5)
+- --queryserver-config-schema-reload-time float query server schema reload time, how often vttablet reloads schemas from underlying MySQL instance in seconds. vttablet keeps table schemas in its own memory and periodically refreshes it from MySQL. This config controls the reload time. (default 1800)
+- --queryserver-config-stream-buffer-size int query server stream buffer size, the maximum number of bytes sent from vttablet for each stream call. It's recommended to keep this value in sync with vtgate's stream_buffer_size. (default 32768)
+- --queryserver-config-stream-pool-prefill-parallelism int query server stream pool prefill parallelism, a non-zero value will prefill the pool using the specified parallelism
+- --queryserver-config-stream-pool-size int query server stream connection pool size, stream pool is used by stream queries: queries that return results to client in a streaming fashion (default 200)
+- --queryserver-config-stream-pool-timeout float query server stream pool timeout (in seconds), it is how long vttablet waits for a connection from the stream pool. If set to 0 (default) then there is no timeout.
+- --queryserver-config-stream-pool-waiter-cap int query server stream pool waiter limit, this is the maximum number of streaming queries that can be queued waiting to get a connection
+- --queryserver-config-strict-table-acl only allow queries that pass table acl checks
+- --queryserver-config-terse-errors prevent bind vars from escaping in client error messages
+- --queryserver-config-transaction-cap int query server transaction cap is the maximum number of transactions allowed to happen at any given point of a time for a single vttablet. E.g. by setting transaction cap to 100, there are at most 100 transactions will be processed by a vttablet and the 101th transaction will be blocked (and fail if it cannot get connection within specified timeout) (default 20)
+- --queryserver-config-transaction-prefill-parallelism int query server transaction prefill parallelism, a non-zero value will prefill the pool using the specified parallism.
+- --queryserver-config-transaction-timeout float query server transaction timeout (in seconds), a transaction will be killed if it takes longer than this value (default 30)
+- --queryserver-config-txpool-timeout float query server transaction pool timeout, it is how long vttablet waits if tx pool is full (default 1)
+- --queryserver-config-txpool-waiter-cap int query server transaction pool waiter limit, this is the maximum number of transactions that can be queued waiting to get a connection (default 5000)
+- --queryserver-config-warn-result-size int query server result size warning threshold, warn if number of rows returned from vttablet for non-streaming queries exceeds this
+- --queryserver_enable_online_ddl Enable online DDL. (default true)
+- --redact-debug-ui-queries redact full queries and bind variables from debug UI
+- --relay_log_max_items int Maximum number of rows for VReplication target buffering. (default 5000)
+- --relay_log_max_size int Maximum buffer size (in bytes) for VReplication target buffering. If single rows are larger than this, a single row is buffered at a time. (default 250000)
+- --remote_operation_timeout duration time to wait for a remote operation (default 30s)
+- --replication-mode string The replication mode to simulate -- must be set to either ROW or STATEMENT (default ROW)
+- --replication_connect_retry duration how long to wait in between replica reconnect attempts. Only precise to the second. (default 10s)
+- --retain_online_ddl_tables duration How long should vttablet keep an old migrated table before purging it (default 24h0m0s)
+- --retry-count int retry count (default 2)
+- --sanitize_log_messages Remove potentially sensitive information in tablet INFO, WARNING, and ERROR log messages such as query parameters.
+- --schema string The SQL table schema
+- --schema-file string Identifies the file that contains the SQL table schema
+- --schema_change_signal Enable the schema tracker; requires queryserver-config-schema-change-signal to be enabled on the underlying vttablets for this to work (default true)
+- --schema_change_signal_user string User to be used to send down query to vttablet to retrieve schema changes
+- --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only)
+- --service_map value comma separated list of services to enable (or disable if prefixed with '-') Example: grpc-vtworker
+- --serving_state_grace_period duration how long to pause after broadcasting health to vtgate, before enforcing a new serving state
+- --shards int Number of shards per keyspace. Passing --ks-shard-map/--ks-shard-map-file causes this flag to be ignored. (default 2)
+- --shutdown_grace_period float how long to wait (in seconds) for queries and transactions to complete during graceful shutdown.
+- --sql string A list of semicolon-delimited SQL commands to analyze
+- --sql-file string Identifies the file that contains the SQL commands to analyze
+- --sql-max-length-errors int truncate queries in error logs to the given length (default unlimited)
+- --sql-max-length-ui int truncate queries in debug UIs to the given length (default 512) (default 512)
+- --srv_topo_cache_refresh duration how frequently to refresh the topology for cached entries (default 1s)
+- --srv_topo_cache_ttl duration how long to use cached entries for topology (default 1s)
+- --srv_topo_timeout duration topo server timeout (default 5s)
+- --stats_backend string The name of the registered push-based monitoring/stats backend to use
+- --stats_combine_dimensions string List of dimensions to be combined into a single "all" value in exported stats vars
+- --stats_common_tags string Comma-separated list of common tags for the stats backend. It provides both label and values. Example: label1:value1,label2:value2
+- --stats_drop_variables string Variables to be dropped from the list of exported variables.
+- --stats_emit_period duration Interval between emitting stats to all registered backends (default 1m0s)
+- --stderrthreshold value logs at or above this threshold go to stderr (default 1)
+- --stream_buffer_size int the number of bytes sent from vtgate for each stream call. It's recommended to keep this value in sync with vttablet's query-server-config-stream-buffer-size. (default 32768)
+- --stream_health_buffer_size uint max streaming health entries to buffer per streaming health client (default 20)
+- --table_gc_lifecycle string States for a DROP TABLE garbage collection cycle. Default is 'hold,purge,evac,drop', use any subset ('drop' implcitly always included) (default hold,purge,evac,drop)
+- --tablet_dir string The directory within the vtdataroot to store vttablet/mysql files. Defaults to being generated by the tablet uid.
+- --tablet_filters value Specifies a comma-separated list of 'keyspace|shard_name or keyrange' values to filter the tablets to watch
+- --tablet_manager_protocol string the protocol to use to talk to vttablet (default grpc)
+- --tablet_protocol string how to talk to the vttablets (default grpc)
+- --tablet_refresh_interval duration tablet refresh interval (default 1m0s)
+- --tablet_refresh_known_tablets tablet refresh reloads the tablet address/port map from topo in case it changes (default true)
+- --tablet_url_template string format string describing debug tablet url formatting. See the Go code for getTabletDebugURL() how to customize this. (default http://{{.GetTabletHostPort}})
+- --throttle_check_as_check_self Should throttler/check return a throttler/check-self result (changes throttler behavior for writes)
+- --throttle_metrics_query SELECT Override default heartbeat/lag metric. Use either SELECT (must return single row, single value) or `SHOW GLOBAL ... LIKE ...` queries. Set -throttle_metrics_threshold respectively.
+- --throttle_metrics_threshold float Override default throttle threshold, respective to -throttle_metrics_query (default 1.7976931348623157e+308)
+- --throttle_tablet_types string Comma separated VTTablet types to be considered by the throttler. default: 'replica'. example: 'replica,rdonly'. 'replica' aways implicitly included (default replica)
+- --throttle_threshold duration Replication lag threshold for default lag throttling (default 1s)
+- --topo_global_root string the path of the global topology data in the global topology server
+- --topo_global_server_address string the address of the global topology server
+- --topo_implementation string the topology implementation to use
+- --topo_read_concurrency int concurrent topo reads (default 32)
+- --tracer string tracing service to use (default noop)
+- --tracing-enable-logging whether to enable logging in the tracing service
+- --tracing-sampling-rate value sampling rate for the probabilistic jaeger sampler (default 0.1)
+- --tracing-sampling-type value sampling strategy to use for jaeger. possible values are 'const', 'probabilistic', 'rateLimiting', or 'remote' (default const)
+- --track_schema_versions When enabled, vttablet will store versions of schemas at each position that a DDL is applied and allow retrieval of the schema corresponding to a position
+- --transaction-log-stream-handler string URL handler for streaming transactions log (default /debug/txlog)
+- --transaction_limit_by_component Include CallerID.component when considering who the user is for the purpose of transaction limit.
+- --transaction_limit_by_principal Include CallerID.principal when considering who the user is for the purpose of transaction limit. (default true)
+- --transaction_limit_by_subcomponent Include CallerID.subcomponent when considering who the user is for the purpose of transaction limit.
+- --transaction_limit_by_username Include VTGateCallerID.username when considering who the user is for the purpose of transaction limit. (default true)
+- --transaction_limit_per_user float Maximum number of transactions a single user is allowed to use at any time, represented as fraction of -transaction_cap. (default 0.4)
+- --transaction_mode string SINGLE: disallow multi-db transactions, MULTI: allow multi-db transactions with best effort commit, TWOPC: allow multi-db transactions with 2pc commit (default MULTI)
+- --transaction_shutdown_grace_period float DEPRECATED: use shutdown_grace_period instead.
+- --twopc_abandon_age float time in seconds. Any unresolved transaction older than this time will be sent to the coordinator to be resolved.
+- --twopc_coordinator_address string address of the (VTGate) process(es) that will be used to notify of abandoned transactions.
+- --twopc_enable if the flag is on, 2pc is enabled. Other 2pc flags must be supplied.
+- --tx-throttler-config string Synonym to -tx_throttler_config (default target_replication_lag_sec: 2
+-max_replication_lag_sec: 10
+-initial_rate: 100
+-max_increase: 1
+-emergency_decrease: 0.5
+-min_duration_between_increases_sec: 40
+-max_duration_between_increases_sec: 62
+-min_duration_between_decreases_sec: 20
+-spread_backlog_across_sec: 20
+-age_bad_rate_after_sec: 180
+-bad_rate_increase: 0.1
+-max_rate_approach_threshold: 0.9
+-)
+- --tx-throttler-healthcheck-cells value Synonym to -tx_throttler_healthcheck_cells
+- --tx_throttler_config string The configuration of the transaction throttler as a text formatted throttlerdata.Configuration protocol buffer message (default target_replication_lag_sec: 2
+-max_replication_lag_sec: 10
+-initial_rate: 100
+-max_increase: 1
+-emergency_decrease: 0.5
+-min_duration_between_increases_sec: 40
+-max_duration_between_increases_sec: 62
+-min_duration_between_decreases_sec: 20
+-spread_backlog_across_sec: 20
+-age_bad_rate_after_sec: 180
+-bad_rate_increase: 0.1
+-max_rate_approach_threshold: 0.9
+-)
+- --tx_throttler_healthcheck_cells value A comma-separated list of cells. Only tabletservers running in these cells will be monitored for replication lag by the transaction throttler.
+- --unhealthy_threshold duration replication lag after which a replica is considered unhealthy (default 2h0m0s)
+- --v value log level for V logs
+- --version print binary version
+- --vmodule value comma-separated list of pattern=N settings for file-filtered logging
+- --vreplication_copy_phase_duration duration Duration for each copy phase loop (before running the next catchup: default 1h) (default 1h0m0s)
+- --vreplication_copy_phase_max_innodb_history_list_length int The maximum InnoDB transaction history that can exist on a vstreamer (source) before starting another round of copying rows. This helps to limit the impact on the source tablet. (default 1000000)
+- --vreplication_copy_phase_max_mysql_replication_lag int The maximum MySQL replication lag (in seconds) that can exist on a vstreamer (source) before starting another round of copying rows. This helps to limit the impact on the source tablet. (default 43200)
+- --vreplication_experimental_flags int (Bitmask) of experimental features in vreplication to enable (default 1)
+- --vreplication_healthcheck_retry_delay duration healthcheck retry delay (default 5s)
+- --vreplication_healthcheck_timeout duration healthcheck retry delay (default 1m0s)
+- --vreplication_healthcheck_topology_refresh duration refresh interval for re-reading the topology (default 30s)
+- --vreplication_heartbeat_update_interval int Frequency (in seconds, default 1, max 60) at which the time_updated column of a vreplication stream when idling (default 1)
+- --vreplication_max_time_to_retry_on_error duration stop automatically retrying when we've had consecutive failures with the same error for this long after the first occurrence (default 15m0s)
+- --vreplication_replica_lag_tolerance duration Replica lag threshold duration: once lag is below this we switch from copy phase to the replication (streaming) phase (default 1m0s)
+- --vreplication_retry_delay duration delay before retrying a failed workflow event in the replication phase (default 5s)
+- --vreplication_store_compressed_gtid Store compressed gtids in the pos column of _vt.vreplication
+- --vreplication_tablet_type string comma separated list of tablet types used as a source (default in_order:REPLICA,PRIMARY)
+- --vschema string Identifies the VTGate routing schema
+- --vschema-file string Identifies the VTGate routing schema file
+- --vschema_ddl_authorized_users string List of users authorized to execute vschema ddl operations, or '%' to allow all users.
+- --vstream_dynamic_packet_size Enable dynamic packet sizing for VReplication. This will adjust the packet size during replication to improve performance. (default true)
+- --vstream_packet_size int Suggested packet size for VReplication streamer. This is used only as a recommendation. The actual packet size may be more or less than this amount. (default 250000)
+- --vtgate-config-terse-errors prevent bind vars from escaping in returned errors
+- --vtgate_protocol string how to talk to vtgate (default grpc)
+- --warn_memory_rows int Warning threshold for in-memory results. A row count higher than this amount will cause the VtGateWarnings.ResultsExceeded counter to be incremented. (default 30000)
+- --warn_payload_size int The warning threshold for query payloads in bytes. A payload greater than this threshold will cause the VtGateWarnings.WarnPayloadSizeExceeded counter to be incremented.
+- --warn_sharded_only If any features that are only available in unsharded mode are used, query execution warnings will be added to the session
+- --watch_replication_stream When enabled, vttablet will stream the MySQL replication stream from the local server, and use it to update schema when it sees a DDL.
+- --xbstream_restore_flags string flags to pass to xbstream command during restore. These should be space separated and will be added to the end of the command. These need to match the ones used for backup e.g. --compress / --decompress, --encrypt / --decrypt
+- --xtrabackup_backup_flags string flags to pass to backup command. These should be space separated and will be added to the end of the command
+- --xtrabackup_prepare_flags string flags to pass to prepare command. These should be space separated and will be added to the end of the command
+- --xtrabackup_root_path string directory location of the xtrabackup and xbstream executables, e.g., /usr/bin
+- --xtrabackup_stream_mode string which mode to use if streaming, valid values are tar and xbstream (default tar)
+- --xtrabackup_stripe_block_size uint Size in bytes of each block that gets sent to a given stripe before rotating to the next stripe (default 102400)
+- --xtrabackup_stripes uint If greater than 0, use data striping across this many destination files to parallelize data transfer and decompression
+- --xtrabackup_user string User that xtrabackup will use to connect to the database server. This user must have all necessary privileges. For details, please refer to xtrabackup documentation.
++ --alsologtostderr log to standard error as well as files
++ --batch-interval duration Interval between logical time slots. (default 10ms)
++ --dbname string Optional database target to override normal routing
++ --default_tablet_type topodatapb.TabletType The default tablet type to set for queries, when one is not explicitly selected. (default PRIMARY)
++ --execution-mode string The execution mode to simulate -- must be set to multi, legacy-autocommit, or twopc (default "multi")
++ -h, --help display usage and exit
++ --keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
++ --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
++ --ks-shard-map string JSON map of keyspace name -> shard name -> ShardReference object. The inner map is the same as the output of FindAllShardsInKeyspace
++ --ks-shard-map-file string File containing json blob of keyspace name -> shard name -> ShardReference object
++ --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
++ --log_dir string If non-empty, write log files in this directory
++ --log_err_stacks log stack traces for errors
++ --log_queries_to_file string Enable query logging to the specified file
++ --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
++ --logtostderr log to standard error instead of files
++ --message_stream_grace_period duration the amount of time to give for a vttablet to resume if it ends a message stream, usually because of a reparent. (default 30s)
++ --mysql-server-pool-conn-read-buffers If set, the server will pool incoming connection read buffers
++ --mysql_allow_clear_text_without_tls If set, the server will allow the use of a clear text password over non-SSL connections.
++ --mysql_auth_server_impl string Which auth server implementation to use. Options: none, ldap, clientcert, static, vault. (default "static")
++ --mysql_default_workload string Default session workload (OLTP, OLAP, DBA) (default "OLTP")
++ --mysql_server_bind_address string Binds on this address when listening to MySQL binary protocol. Useful to restrict listening to 'localhost' only for instance.
++ --mysql_server_port int If set, also listen for MySQL binary protocol connections on this port. (default -1)
++ --mysql_server_query_timeout duration mysql query timeout (default 0s)
++ --mysql_server_read_timeout duration connection read timeout (default 0s)
++ --mysql_server_require_secure_transport Reject insecure connections but only if mysql_server_ssl_cert and mysql_server_ssl_key are provided
++ --mysql_server_socket_path string This option specifies the Unix socket file to use when listening for local connections. By default it will be empty and it won't listen to a unix socket
++ --mysql_server_ssl_ca string Path to ssl CA for mysql server plugin SSL. If specified, server will require and validate client certs.
++ --mysql_server_ssl_cert string Path to the ssl cert for mysql server plugin SSL
++ --mysql_server_ssl_crl string Path to ssl CRL for mysql server plugin SSL
++ --mysql_server_ssl_key string Path to ssl key for mysql server plugin SSL
++ --mysql_server_ssl_server_ca string path to server CA in PEM format, which will be combine with server cert, return full certificate chain to clients
++ --mysql_server_tls_min_version string Configures the minimal TLS version negotiated when SSL is enabled. Defaults to TLSv1.2. Options: TLSv1.0, TLSv1.1, TLSv1.2, TLSv1.3.
++ --mysql_server_version string MySQL server version to advertise.
++ --mysql_server_write_timeout duration connection write timeout (default 0s)
++ --mysql_slow_connect_warn_threshold duration Warn if it takes more than the given threshold for a mysql connection to establish (default 0s)
++ --mysql_tcp_version string Select tcp, tcp4, or tcp6 to control the socket type. (default "tcp")
++ --normalize Whether to enable vtgate normalization
++ --output-mode string Output in human-friendly text or json (default "text")
++ --planner-version string Sets the query planner version to use when generating the explain output. Valid values are V3 and Gen4
++ --pprof strings enable profiling
++ --proxy_protocol Enable HAProxy PROXY protocol on MySQL listener socket
++ --purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
++ --querylog-buffer-size int Maximum number of buffered query logs before throttling log output (default 10)
++ --replication-mode string The replication mode to simulate -- must be set to either ROW or STATEMENT (default "ROW")
++ --schema string The SQL table schema
++ --schema-file string Identifies the file that contains the SQL table schema
++ --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only)
++ --shards int Number of shards per keyspace. Passing --ks-shard-map/--ks-shard-map-file causes this flag to be ignored. (default 2)
++ --sql string A list of semicolon-delimited SQL commands to analyze
++ --sql-file string Identifies the file that contains the SQL commands to analyze
++ --sql-max-length-errors int truncate queries in error logs to the given length (default unlimited)
++ --sql-max-length-ui int truncate queries in debug UIs to the given length (default 512) (default 512)
++ --stderrthreshold severity logs at or above this threshold go to stderr (default 1)
++ --v Level log level for V logs
++ -v, --version print binary version
++ --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
++ --vschema string Identifies the VTGate routing schema
++ --vschema-file string Identifies the VTGate routing schema file
diff --git a/doc/flags/14.0-to-15.0-transition/vtgate.diff b/doc/flags/14.0-to-15.0-transition/vtgate.diff
new file mode 100644
index 00000000000..6e055031636
--- /dev/null
+++ b/doc/flags/14.0-to-15.0-transition/vtgate.diff
@@ -0,0 +1,247 @@
+diff --git a/flags/14.0/vtgate.txt b/flags/15.0/vtgate.txt
+index 49c7f59..e9e8591 100644
+--- a/flags/14.0/vtgate.txt
++++ b/flags/15.0/vtgate.txt
+@@ -1,48 +1,48 @@
+ Usage of vtgate:
+- --allowed_tablet_types value Specifies the tablet types this vtgate is allowed to route queries to
++ --allowed_tablet_types strings Specifies the tablet types this vtgate is allowed to route queries to. Should be provided as a comma-separated set of tablet types.
+ --alsologtostderr log to standard error as well as files
+ --buffer_drain_concurrency int Maximum number of requests retried simultaneously. More concurrency will increase the load on the PRIMARY vttablet when draining the buffer. (default 1)
+- --buffer_implementation string Allowed values: healthcheck (legacy implementation), keyspace_events (default) (default keyspace_events)
++ --buffer_implementation string Allowed values: healthcheck (legacy implementation), keyspace_events (default) (default "keyspace_events")
+ --buffer_keyspace_shards string If not empty, limit buffering to these entries (comma separated). Entry format: keyspace or keyspace/shard. Requires --enable_buffer=true.
+ --buffer_max_failover_duration duration Stop buffering completely if a failover takes longer than this duration. (default 20s)
+ --buffer_min_time_between_failovers duration Minimum time between the end of a failover and the start of the next one (tracked per shard). Faster consecutive failovers will not trigger buffering. (default 1m0s)
+ --buffer_size int Maximum number of buffered requests in flight (across all ongoing failovers). (default 1000)
+ --buffer_window duration Duration for how long a request should be buffered at most. (default 10s)
+ --catch-sigpipe catch and ignore SIGPIPE on stdout and stderr if specified
+- --cell string cell to use (default test_nj)
++ --cell string cell to use
+ --cells_to_watch string comma-separated list of cells for watching tablets
+ --consul_auth_static_file string JSON File to read the topos/tokens from.
+- --cpu_profile string deprecated: use '-pprof=cpu' instead
+ --datadog-agent-host string host to send spans to. if empty, no tracing will be done
+ --datadog-agent-port string port to send spans to. if empty, no tracing will be done
+- --dbddl_plugin string controls how to handle CREATE/DROP DATABASE. use it if you are using your own database provisioning service (default fail)
+- --ddl_strategy string Set default strategy for DDL statements. Override with @@ddl_strategy session variable (default direct)
+- --default_tablet_type value The default tablet type to set for queries, when one is not explicitly selected (default PRIMARY)
+- --disable_local_gateway deprecated: if specified, this process will not route any queries to local tablets in the local cell
+- --discovery_high_replication_lag_minimum_serving duration the replication lag that is considered too high when applying the min_number_serving_vttablets threshold (default 2h0m0s)
+- --discovery_low_replication_lag duration the replication lag that is considered low enough to be healthy (default 30s)
++ --dbddl_plugin string controls how to handle CREATE/DROP DATABASE. use it if you are using your own database provisioning service (default "fail")
++ --ddl_strategy string Set default strategy for DDL statements. Override with @@ddl_strategy session variable (default "direct")
++ --default_tablet_type topodatapb.TabletType The default tablet type to set for queries, when one is not explicitly selected. (default PRIMARY)
++ --discovery_high_replication_lag_minimum_serving duration Threshold above which replication lag is considered too high when applying the min_number_serving_vttablets flag. (default 2h0m0s)
++ --discovery_low_replication_lag duration Threshold below which replication lag is considered low enough to be healthy. (default 30s)
+ --emit_stats If set, emit stats to push-based monitoring and stats backends
++ --enable-partial-keyspace-migration (Experimental) Follow shard routing rules: enable only while migrating a keyspace shard by shard. See documentation on Partial MoveTables for more. (default false)
+ --enable_buffer Enable buffering (stalling) of primary traffic during failovers.
+ --enable_buffer_dry_run Detect and log failover events, but do not actually buffer requests.
+ --enable_direct_ddl Allow users to submit direct DDL statements (default true)
+ --enable_online_ddl Allow users to submit, review and control Online DDL (default true)
+ --enable_set_var This will enable the use of MySQL's SET_VAR query hint for certain system variables instead of using reserved connections (default true)
+ --enable_system_settings This will enable the system settings to be changed per session at the database connection level (default true)
+- --foreign_key_mode string This is to provide how to handle foreign key constraint in create/alter table. Valid values are: allow, disallow (default allow)
++ --foreign_key_mode string This is to provide how to handle foreign key constraint in create/alter table. Valid values are: allow, disallow (default "allow")
+ --gate_query_cache_lfu gate server cache algorithm. when set to true, a new cache algorithm based on a TinyLFU admission policy will be used to improve cache behavior and prevent pollution from sparse queries (default true)
+ --gate_query_cache_memory int gate server query cache size in bytes, maximum amount of memory to be cached. vtgate analyzes every incoming query and generate a query plan, these plans are being cached in a lru cache. This config controls the capacity of the lru cache. (default 33554432)
+ --gate_query_cache_size int gate server query cache size, maximum number of queries to be cached. vtgate analyzes every incoming query and generate a query plan, these plans are being cached in a cache. This config controls the expected amount of unique entries in the cache. (default 5000)
+ --gateway_initial_tablet_timeout duration At startup, the tabletGateway will wait up to this duration to get at least one tablet per keyspace/shard/tablet type (default 30s)
++ --grpc-use-effective-groups If set, and SSL is not used, will set the immediate caller's security groups from the effective caller id's groups.
+ --grpc_auth_mode string Which auth plugin implementation to use (eg: static)
+ --grpc_auth_mtls_allowed_substrings string List of substrings of at least one of the client certificate names (separated by colon).
+- --grpc_auth_static_client_creds string when using grpc_static_auth in the server, this file provides the credentials to use to authenticate with server
++ --grpc_auth_static_client_creds string When using grpc_static_auth in the server, this file provides the credentials to use to authenticate with server.
+ --grpc_auth_static_password_file string JSON File to read the users/passwords from.
+ --grpc_ca string server CA to use for gRPC connections, requires TLS, and enforces client certificate check
+ --grpc_cert string server certificate to use for gRPC connections, requires grpc_key, enables TLS
+ --grpc_compression string Which protocol to use for compressing gRPC. Default: nothing. Supported: snappy
+ --grpc_crl string path to a certificate revocation list in PEM format, client certificates will be further verified against this file during TLS handshake
+ --grpc_enable_optional_tls enable optional TLS mode when a server accepts both TLS and plain-text connections on the same port
+- --grpc_enable_tracing Enable GRPC tracing
++ --grpc_enable_tracing Enable gRPC tracing.
+ --grpc_initial_conn_window_size int gRPC initial connection window size
+ --grpc_initial_window_size int gRPC initial window size
+ --grpc_keepalive_time duration After a duration of this time, if the client doesn't see any activity, it pings the server to see if the transport is still alive. (default 10s)
+@@ -51,8 +51,8 @@ Usage of vtgate:
+ --grpc_max_connection_age duration Maximum age of a client connection before GoAway is sent. (default 2562047h47m16.854775807s)
+ --grpc_max_connection_age_grace duration Additional grace period after grpc_max_connection_age, after which connections are forcibly closed. (default 2562047h47m16.854775807s)
+ --grpc_max_message_size int Maximum allowed RPC message size. Larger messages will be rejected by gRPC with the error 'exceeding the max size'. (default 16777216)
+- --grpc_port int Port to listen on for gRPC calls
+- --grpc_prometheus Enable gRPC monitoring with Prometheus
++ --grpc_port int Port to listen on for gRPC calls. If zero, do not listen.
++ --grpc_prometheus Enable gRPC monitoring with Prometheus.
+ --grpc_server_ca string path to server CA in PEM format, which will be combine with server cert, return full certificate chain to clients
+ --grpc_server_initial_conn_window_size int gRPC server initial connection window size
+ --grpc_server_initial_window_size int gRPC server initial window size
+@@ -61,14 +61,15 @@ Usage of vtgate:
+ --grpc_use_effective_callerid If set, and SSL is not used, will set the immediate caller id from the effective caller id's principal.
+ --healthcheck_retry_delay duration health check retry delay (default 2ms)
+ --healthcheck_timeout duration the health check timeout period (default 1m0s)
++ -h, --help display usage and exit
+ --jaeger-agent-host string host and port to send spans to. if empty, no tracing will be done
+ --keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
+ --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
+- --keyspaces_to_watch value Specifies which keyspaces this vtgate should have access to while routing queries or accessing the vschema
++ --keyspaces_to_watch strings Specifies which keyspaces this vtgate should have access to while routing queries or accessing the vschema.
+ --lameduck-period duration keep running at least this long after SIGTERM before stopping (default 50ms)
+- --legacy_replication_lag_algorithm use the legacy algorithm when selecting the vttablets for serving (default true)
++ --legacy_replication_lag_algorithm Use the legacy algorithm when selecting vttablets for serving. (default true)
+ --lock_heartbeat_time duration If there is lock function used. This will keep the lock connection active by using this heartbeat (default 5s)
+- --log_backtrace_at value when logging hits line file:N, emit a stack trace
++ --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
+ --log_dir string If non-empty, write log files in this directory
+ --log_err_stacks log stack traces for errors
+ --log_queries_to_file string Enable query logging to the specified file
+@@ -76,34 +77,33 @@ Usage of vtgate:
+ --logtostderr log to standard error instead of files
+ --max_memory_rows int Maximum number of rows that will be held in memory for intermediate results as well as the final result. (default 300000)
+ --max_payload_size int The threshold for query payloads in bytes. A payload greater than this threshold will result in a failure to handle the query.
+- --mem-profile-rate int deprecated: use '-pprof=mem' instead (default 524288)
+ --message_stream_grace_period duration the amount of time to give for a vttablet to resume if it ends a message stream, usually because of a reparent. (default 30s)
+- --min_number_serving_vttablets int the minimum number of vttablets for each replicating tablet_type (e.g. replica, rdonly) that will be continue to be used even with replication lag above discovery_low_replication_lag, but still below discovery_high_replication_lag_minimum_serving (default 2)
+- --mutex-profile-fraction int deprecated: use '-pprof=mutex' instead
++ --min_number_serving_vttablets int The minimum number of vttablets for each replicating tablet_type (e.g. replica, rdonly) that will be continue to be used even with replication lag above discovery_low_replication_lag, but still below discovery_high_replication_lag_minimum_serving. (default 2)
++ --mysql-server-pool-conn-read-buffers If set, the server will pool incoming connection read buffers
+ --mysql_allow_clear_text_without_tls If set, the server will allow the use of a clear text password over non-SSL connections.
+- --mysql_auth_server_impl string Which auth server implementation to use. Options: none, ldap, clientcert, static, vault. (default static)
++ --mysql_auth_server_impl string Which auth server implementation to use. Options: none, ldap, clientcert, static, vault. (default "static")
+ --mysql_auth_server_static_file string JSON File to read the users/passwords from.
+ --mysql_auth_server_static_string string JSON representation of the users/passwords config.
+ --mysql_auth_static_reload_interval duration Ticker to reload credentials
+ --mysql_auth_vault_addr string URL to Vault server
+ --mysql_auth_vault_path string Vault path to vtgate credentials JSON blob, e.g.: secret/data/prod/vtgatecreds
+- --mysql_auth_vault_role_mountpoint string Vault AppRole mountpoint; can also be passed using VAULT_MOUNTPOINT environment variable (default approle)
++ --mysql_auth_vault_role_mountpoint string Vault AppRole mountpoint; can also be passed using VAULT_MOUNTPOINT environment variable (default "approle")
+ --mysql_auth_vault_role_secretidfile string Path to file containing Vault AppRole secret_id; can also be passed using VAULT_SECRETID environment variable
+ --mysql_auth_vault_roleid string Vault AppRole id; can also be passed using VAULT_ROLEID environment variable
+ --mysql_auth_vault_timeout duration Timeout for vault API operations (default 10s)
+ --mysql_auth_vault_tls_ca string Path to CA PEM for validating Vault server certificate
+ --mysql_auth_vault_tokenfile string Path to file containing Vault auth token; token can also be passed using VAULT_TOKEN environment variable
+ --mysql_auth_vault_ttl duration How long to cache vtgate credentials from the Vault server (default 30m0s)
+- --mysql_clientcert_auth_method string client-side authentication method to use. Supported values: mysql_clear_password, dialog. (default mysql_clear_password)
+- --mysql_default_workload string Default session workload (OLTP, OLAP, DBA) (default OLTP)
++ --mysql_clientcert_auth_method string client-side authentication method to use. Supported values: mysql_clear_password, dialog. (default "mysql_clear_password")
++ --mysql_default_workload string Default session workload (OLTP, OLAP, DBA) (default "OLTP")
+ --mysql_ldap_auth_config_file string JSON File from which to read LDAP server config.
+ --mysql_ldap_auth_config_string string JSON representation of LDAP server config.
+- --mysql_ldap_auth_method string client-side authentication method to use. Supported values: mysql_clear_password, dialog. (default mysql_clear_password)
++ --mysql_ldap_auth_method string client-side authentication method to use. Supported values: mysql_clear_password, dialog. (default "mysql_clear_password")
+ --mysql_server_bind_address string Binds on this address when listening to MySQL binary protocol. Useful to restrict listening to 'localhost' only for instance.
+ --mysql_server_flush_delay duration Delay after which buffered response will be flushed to the client. (default 100ms)
+ --mysql_server_port int If set, also listen for MySQL binary protocol connections on this port. (default -1)
+- --mysql_server_query_timeout duration mysql query timeout
+- --mysql_server_read_timeout duration connection read timeout
++ --mysql_server_query_timeout duration mysql query timeout (default 0s)
++ --mysql_server_read_timeout duration connection read timeout (default 0s)
+ --mysql_server_require_secure_transport Reject insecure connections but only if mysql_server_ssl_cert and mysql_server_ssl_key are provided
+ --mysql_server_socket_path string This option specifies the Unix socket file to use when listening for local connections. By default it will be empty and it won't listen to a unix socket
+ --mysql_server_ssl_ca string Path to ssl CA for mysql server plugin SSL. If specified, server will require and validate client certs.
+@@ -113,9 +113,9 @@ Usage of vtgate:
+ --mysql_server_ssl_server_ca string path to server CA in PEM format, which will be combine with server cert, return full certificate chain to clients
+ --mysql_server_tls_min_version string Configures the minimal TLS version negotiated when SSL is enabled. Defaults to TLSv1.2. Options: TLSv1.0, TLSv1.1, TLSv1.2, TLSv1.3.
+ --mysql_server_version string MySQL server version to advertise.
+- --mysql_server_write_timeout duration connection write timeout
+- --mysql_slow_connect_warn_threshold duration Warn if it takes more than the given threshold for a mysql connection to establish
+- --mysql_tcp_version string Select tcp, tcp4, or tcp6 to control the socket type. (default tcp)
++ --mysql_server_write_timeout duration connection write timeout (default 0s)
++ --mysql_slow_connect_warn_threshold duration Warn if it takes more than the given threshold for a mysql connection to establish (default 0s)
++ --mysql_tcp_version string Select tcp, tcp4, or tcp6 to control the socket type. (default "tcp")
+ --no_scatter when set to true, the planner will fail instead of producing a plan that includes scatter queries
+ --normalize_queries Rewrite queries with bind vars. Turn this off if the app itself sends normalized queries with bind vars. (default true)
+ --onclose_timeout duration wait no more than this for OnClose handlers before stopping (default 1ns)
+@@ -123,13 +123,13 @@ Usage of vtgate:
+ --opentsdb_uri string URI of opentsdb /api/put method
+ --pid_file string If set, the process will write its pid to the named file, and delete it on graceful shutdown.
+ --planner-version string Sets the default planner to use when the session has not changed it. Valid values are: V3, Gen4, Gen4Greedy and Gen4Fallback. Gen4Fallback tries the gen4 planner and falls back to the V3 planner if the gen4 fails.
+- --planner_version string Deprecated flag. Use planner-version instead
+ --port int port for the server
+- --pprof string enable profiling
++ --pprof strings enable profiling
+ --proxy_protocol Enable HAProxy PROXY protocol on MySQL listener socket
+ --purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
++ --querylog-buffer-size int Maximum number of buffered query logs before throttling log output (default 10)
+ --querylog-filter-tag string string that must be present in the query for it to be logged; if using a value as the tag, you need to disable query normalization
+- --querylog-format string format for query logs ("text" or "json") (default text)
++ --querylog-format string format for query logs ("text" or "json") (default "text")
+ --querylog-row-threshold uint Number of rows a query has to return or affect before being logged; not useful for streaming queries. 0 means all queries will be logged.
+ --redact-debug-ui-queries redact full queries and bind variables from debug UI
+ --remote_operation_timeout duration time to wait for a remote operation (default 30s)
+@@ -137,7 +137,7 @@ Usage of vtgate:
+ --schema_change_signal Enable the schema tracker; requires queryserver-config-schema-change-signal to be enabled on the underlying vttablets for this to work (default true)
+ --schema_change_signal_user string User to be used to send down query to vttablet to retrieve schema changes
+ --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only)
+- --service_map value comma separated list of services to enable (or disable if prefixed with '-') Example: grpc-vtworker
++ --service_map strings comma separated list of services to enable (or disable if prefixed with '-') Example: grpc-queryservice
+ --sql-max-length-errors int truncate queries in error logs to the given length (default unlimited)
+ --sql-max-length-ui int truncate queries in debug UIs to the given length (default 512) (default 512)
+ --srv_topo_cache_refresh duration how frequently to refresh the topology for cached entries (default 1s)
+@@ -145,27 +145,26 @@ Usage of vtgate:
+ --srv_topo_timeout duration topo server timeout (default 5s)
+ --stats_backend string The name of the registered push-based monitoring/stats backend to use
+ --stats_combine_dimensions string List of dimensions to be combined into a single "all" value in exported stats vars
+- --stats_common_tags string Comma-separated list of common tags for the stats backend. It provides both label and values. Example: label1:value1,label2:value2
++ --stats_common_tags strings Comma-separated list of common tags for the stats backend. It provides both label and values. Example: label1:value1,label2:value2
+ --stats_drop_variables string Variables to be dropped from the list of exported variables.
+ --stats_emit_period duration Interval between emitting stats to all registered backends (default 1m0s)
+ --statsd_address string Address for statsd client
+- --statsd_sample_rate float (default 1)
+- --stderrthreshold value logs at or above this threshold go to stderr (default 1)
++ --statsd_sample_rate float Sample rate for statsd metrics (default 1)
++ --stderrthreshold severity logs at or above this threshold go to stderr (default 1)
+ --stream_buffer_size int the number of bytes sent from vtgate for each stream call. It's recommended to keep this value in sync with vttablet's query-server-config-stream-buffer-size. (default 32768)
+- --tablet_filters value Specifies a comma-separated list of 'keyspace|shard_name or keyrange' values to filter the tablets to watch
++ --tablet_filters strings Specifies a comma-separated list of 'keyspace|shard_name or keyrange' values to filter the tablets to watch.
+ --tablet_grpc_ca string the server ca to use to validate servers when connecting
+ --tablet_grpc_cert string the cert to use to connect
+ --tablet_grpc_crl string the server crl to use to validate server certificates when connecting
+ --tablet_grpc_key string the key to use to connect
+ --tablet_grpc_server_name string the server name to use to validate server certificate
+- --tablet_manager_protocol string the protocol to use to talk to vttablet (default grpc)
+- --tablet_protocol string how to talk to the vttablets (default grpc)
+- --tablet_refresh_interval duration tablet refresh interval (default 1m0s)
+- --tablet_refresh_known_tablets tablet refresh reloads the tablet address/port map from topo in case it changes (default true)
+- --tablet_types_to_wait string wait till connected for specified tablet types during Gateway initialization
+- --tablet_url_template string format string describing debug tablet url formatting. See the Go code for getTabletDebugURL() how to customize this. (default http://{{.GetTabletHostPort}})
++ --tablet_protocol string Protocol to use to make queryservice RPCs to vttablets. (default "grpc")
++ --tablet_refresh_interval duration Tablet refresh interval. (default 1m0s)
++ --tablet_refresh_known_tablets Whether to reload the tablet's address/port map from topo in case they change. (default true)
++ --tablet_types_to_wait strings Wait till connected for specified tablet types during Gateway initialization. Should be provided as a comma-separated set of tablet types.
++ --tablet_url_template string Format string describing debug tablet url formatting. See getTabletDebugURL() for how to customize this. (default "http://{{.GetTabletHostPort}}")
+ --topo_consul_lock_delay duration LockDelay for consul session. (default 15s)
+- --topo_consul_lock_session_checks string List of checks for consul session. (default serfHealth)
++ --topo_consul_lock_session_checks string List of checks for consul session. (default "serfHealth")
+ --topo_consul_lock_session_ttl string TTL for consul session.
+ --topo_consul_watch_poll_duration duration time of the long poll for watch queries. (default 30s)
+ --topo_etcd_lease_ttl int Lease TTL for locks and leader election. The client will use KeepAlive to keep the lease going. (default 30)
+@@ -178,21 +177,21 @@ Usage of vtgate:
+ --topo_k8s_context string The kubeconfig context to use, overrides the 'current-context' from the config
+ --topo_k8s_kubeconfig string Path to a valid kubeconfig file. When running as a k8s pod inside the same cluster you wish to use as the topo, you may omit this and the below arguments, and Vitess is capable of auto-discovering the correct values. https://kubernetes.io/docs/tasks/access-application-cluster/access-cluster/#accessing-the-api-from-a-pod
+ --topo_k8s_namespace string The kubernetes namespace to use for all objects. Default comes from the context or in-cluster config
+- --topo_read_concurrency int concurrent topo reads (default 32)
++ --topo_read_concurrency int Concurrency of topo reads. (default 32)
+ --topo_zk_auth_file string auth to use when connecting to the zk topo server, file contents should be :, e.g., digest:user:pass
+ --topo_zk_base_timeout duration zk base timeout (see zk.Connect) (default 30s)
+ --topo_zk_max_concurrency int maximum number of pending requests to send to a Zookeeper server. (default 64)
+ --topo_zk_tls_ca string the server ca to use to validate servers when connecting to the zk topo server
+ --topo_zk_tls_cert string the cert to use to connect to the zk topo server, requires topo_zk_tls_key, enables TLS
+ --topo_zk_tls_key string the key to use to connect to the zk topo server, enables TLS
+- --tracer string tracing service to use (default noop)
++ --tracer string tracing service to use (default "noop")
+ --tracing-enable-logging whether to enable logging in the tracing service
+- --tracing-sampling-rate value sampling rate for the probabilistic jaeger sampler (default 0.1)
+- --tracing-sampling-type value sampling strategy to use for jaeger. possible values are 'const', 'probabilistic', 'rateLimiting', or 'remote' (default const)
+- --transaction_mode string SINGLE: disallow multi-db transactions, MULTI: allow multi-db transactions with best effort commit, TWOPC: allow multi-db transactions with 2pc commit (default MULTI)
+- --v value log level for V logs
+- --version print binary version
+- --vmodule value comma-separated list of pattern=N settings for file-filtered logging
++ --tracing-sampling-rate float sampling rate for the probabilistic jaeger sampler (default 0.1)
++ --tracing-sampling-type string sampling strategy to use for jaeger. possible values are 'const', 'probabilistic', 'rateLimiting', or 'remote' (default "const")
++ --transaction_mode string SINGLE: disallow multi-db transactions, MULTI: allow multi-db transactions with best effort commit, TWOPC: allow multi-db transactions with 2pc commit (default "MULTI")
++ --v Level log level for V logs
++ -v, --version print binary version
++ --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
+ --vschema_ddl_authorized_users string List of users authorized to execute vschema ddl operations, or '%' to allow all users.
+ --vtctld_addr string address of a vtctld instance
+ --vtgate-config-terse-errors prevent bind vars from escaping in returned errors
diff --git a/doc/flags/14.0-to-15.0-transition/vtgr.diff b/doc/flags/14.0-to-15.0-transition/vtgr.diff
new file mode 100644
index 00000000000..226fd80b7b0
--- /dev/null
+++ b/doc/flags/14.0-to-15.0-transition/vtgr.diff
@@ -0,0 +1,187 @@
+diff --git a/flags/14.0/vtgr.txt b/flags/15.0/vtgr.txt
+index a4c928e..75e7b0a 100644
+--- a/flags/14.0/vtgr.txt
++++ b/flags/15.0/vtgr.txt
+@@ -1,111 +1,72 @@
+ Usage of vtgr:
+- --abort_rebootstrap don't allow vtgr to rebootstrap an existing group
+- --alsologtostderr log to standard error as well as files
+- --catch-sigpipe catch and ignore SIGPIPE on stdout and stderr if specified
+- --clusters_to_watch string Comma-separated list of keyspaces or keyspace/shards that this instance will monitor and repair. Defaults to all clusters in the topology. Example: "ks1,ks2/-80"
+- --consul_auth_static_file string JSON File to read the topos/tokens from.
+- --cpu_profile string deprecated: use '-pprof=cpu' instead
+- --datadog-agent-host string host to send spans to. if empty, no tracing will be done
+- --datadog-agent-port string port to send spans to. if empty, no tracing will be done
+- --db_config string full path to db config file that will be used by VTGR
+- --db_flavor string mysql flavor override (default MySQL56)
+- --db_port int local mysql port, set this to enable local fast check
+- --emit_stats If set, emit stats to push-based monitoring and stats backends
+- --enable_heartbeat_check enable heartbeat checking, set together with -group_heartbeat_threshold
+- --gr_port int port to bootstrap a mysql group (default 33061)
+- --group_heartbeat_threshold int VTGR will trigger backoff on inconsistent state if the group heartbeat staleness exceeds this threshold (in seconds). Should be used along with -enable_heartbeat_check
+- --grpc_auth_mode string Which auth plugin implementation to use (eg: static)
+- --grpc_auth_mtls_allowed_substrings string List of substrings of at least one of the client certificate names (separated by colon).
+- --grpc_auth_static_client_creds string when using grpc_static_auth in the server, this file provides the credentials to use to authenticate with server
+- --grpc_auth_static_password_file string JSON File to read the users/passwords from.
+- --grpc_ca string server CA to use for gRPC connections, requires TLS, and enforces client certificate check
+- --grpc_cert string server certificate to use for gRPC connections, requires grpc_key, enables TLS
+- --grpc_compression string Which protocol to use for compressing gRPC. Default: nothing. Supported: snappy
+- --grpc_crl string path to a certificate revocation list in PEM format, client certificates will be further verified against this file during TLS handshake
+- --grpc_enable_optional_tls enable optional TLS mode when a server accepts both TLS and plain-text connections on the same port
+- --grpc_enable_tracing Enable GRPC tracing
+- --grpc_initial_conn_window_size int gRPC initial connection window size
+- --grpc_initial_window_size int gRPC initial window size
+- --grpc_keepalive_time duration After a duration of this time, if the client doesn't see any activity, it pings the server to see if the transport is still alive. (default 10s)
+- --grpc_keepalive_timeout duration After having pinged for keepalive check, the client waits for a duration of Timeout and if no activity is seen even after that the connection is closed. (default 10s)
+- --grpc_key string server private key to use for gRPC connections, requires grpc_cert, enables TLS
+- --grpc_max_connection_age duration Maximum age of a client connection before GoAway is sent. (default 2562047h47m16.854775807s)
+- --grpc_max_connection_age_grace duration Additional grace period after grpc_max_connection_age, after which connections are forcibly closed. (default 2562047h47m16.854775807s)
+- --grpc_max_message_size int Maximum allowed RPC message size. Larger messages will be rejected by gRPC with the error 'exceeding the max size'. (default 16777216)
+- --grpc_port int Port to listen on for gRPC calls
+- --grpc_prometheus Enable gRPC monitoring with Prometheus
+- --grpc_server_ca string path to server CA in PEM format, which will be combine with server cert, return full certificate chain to clients
+- --grpc_server_initial_conn_window_size int gRPC server initial connection window size
+- --grpc_server_initial_window_size int gRPC server initial window size
+- --grpc_server_keepalive_enforcement_policy_min_time duration gRPC server minimum keepalive time (default 10s)
+- --grpc_server_keepalive_enforcement_policy_permit_without_stream gRPC server permit client keepalive pings even when there are no active streams (RPCs)
+- --jaeger-agent-host string host and port to send spans to. if empty, no tracing will be done
+- --keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
+- --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
+- --lameduck-period duration keep running at least this long after SIGTERM before stopping (default 50ms)
+- --log_backtrace_at value when logging hits line file:N, emit a stack trace
+- --log_dir string If non-empty, write log files in this directory
+- --log_err_stacks log stack traces for errors
+- --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
+- --logtostderr log to standard error instead of files
+- --mem-profile-rate int deprecated: use '-pprof=mem' instead (default 524288)
+- --mutex-profile-fraction int deprecated: use '-pprof=mutex' instead
+- --mysql_auth_server_static_file string JSON File to read the users/passwords from.
+- --mysql_auth_server_static_string string JSON representation of the users/passwords config.
+- --mysql_auth_static_reload_interval duration Ticker to reload credentials
+- --mysql_clientcert_auth_method string client-side authentication method to use. Supported values: mysql_clear_password, dialog. (default mysql_clear_password)
+- --mysql_server_flush_delay duration Delay after which buffered response will be flushed to the client. (default 100ms)
+- --mysql_server_version string MySQL server version to advertise.
+- --onclose_timeout duration wait no more than this for OnClose handlers before stopping (default 1ns)
+- --onterm_timeout duration wait no more than this for OnTermSync handlers before stopping (default 10s)
+- --pid_file string If set, the process will write its pid to the named file, and delete it on graceful shutdown.
+- --ping_tablet_timeout duration time to wait when we ping a tablet (default 2s)
+- --pprof string enable profiling
+- --purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
+- --refresh_interval duration refresh interval to load tablets (default 10s)
+- --remote_operation_timeout duration time to wait for a remote operation (default 30s)
+- --scan_interval duration scan interval to diagnose and repair (default 3s)
+- --scan_repair_timeout duration time to wait for a Diagnose and repair operation (default 3s)
+- --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only)
+- --service_map value comma separated list of services to enable (or disable if prefixed with '-') Example: grpc-vtworker
+- --sql-max-length-errors int truncate queries in error logs to the given length (default unlimited)
+- --sql-max-length-ui int truncate queries in debug UIs to the given length (default 512) (default 512)
+- --stats_backend string The name of the registered push-based monitoring/stats backend to use
+- --stats_combine_dimensions string List of dimensions to be combined into a single "all" value in exported stats vars
+- --stats_common_tags string Comma-separated list of common tags for the stats backend. It provides both label and values. Example: label1:value1,label2:value2
+- --stats_drop_variables string Variables to be dropped from the list of exported variables.
+- --stats_emit_period duration Interval between emitting stats to all registered backends (default 1m0s)
+- --stderrthreshold value logs at or above this threshold go to stderr (default 1)
+- --tablet_manager_grpc_ca string the server ca to use to validate servers when connecting
+- --tablet_manager_grpc_cert string the cert to use to connect
+- --tablet_manager_grpc_concurrency int concurrency to use to talk to a vttablet server for performance-sensitive RPCs (like ExecuteFetchAs{Dba,AllPrivs,App}) (default 8)
+- --tablet_manager_grpc_connpool_size int number of tablets to keep tmclient connections open to (default 100)
+- --tablet_manager_grpc_crl string the server crl to use to validate server certificates when connecting
+- --tablet_manager_grpc_key string the key to use to connect
+- --tablet_manager_grpc_server_name string the server name to use to validate server certificate
+- --tablet_manager_protocol string the protocol to use to talk to vttablet (default grpc)
+- --topo_consul_lock_delay duration LockDelay for consul session. (default 15s)
+- --topo_consul_lock_session_checks string List of checks for consul session. (default serfHealth)
+- --topo_consul_lock_session_ttl string TTL for consul session.
+- --topo_consul_watch_poll_duration duration time of the long poll for watch queries. (default 30s)
+- --topo_etcd_lease_ttl int Lease TTL for locks and leader election. The client will use KeepAlive to keep the lease going. (default 30)
+- --topo_etcd_tls_ca string path to the ca to use to validate the server cert when connecting to the etcd topo server
+- --topo_etcd_tls_cert string path to the client cert to use to connect to the etcd topo server, requires topo_etcd_tls_key, enables TLS
+- --topo_etcd_tls_key string path to the client key to use to connect to the etcd topo server, enables TLS
+- --topo_global_root string the path of the global topology data in the global topology server
+- --topo_global_server_address string the address of the global topology server
+- --topo_implementation string the topology implementation to use
+- --topo_zk_auth_file string auth to use when connecting to the zk topo server, file contents should be :, e.g., digest:user:pass
+- --topo_zk_base_timeout duration zk base timeout (see zk.Connect) (default 30s)
+- --topo_zk_max_concurrency int maximum number of pending requests to send to a Zookeeper server. (default 64)
+- --topo_zk_tls_ca string the server ca to use to validate servers when connecting to the zk topo server
+- --topo_zk_tls_cert string the cert to use to connect to the zk topo server, requires topo_zk_tls_key, enables TLS
+- --topo_zk_tls_key string the key to use to connect to the zk topo server, enables TLS
+- --tracer string tracing service to use (default noop)
+- --tracing-enable-logging whether to enable logging in the tracing service
+- --tracing-sampling-rate value sampling rate for the probabilistic jaeger sampler (default 0.1)
+- --tracing-sampling-type value sampling strategy to use for jaeger. possible values are 'const', 'probabilistic', 'rateLimiting', or 'remote' (default const)
+- --v value log level for V logs
+- --version print binary version
+- --vmodule value comma-separated list of pattern=N settings for file-filtered logging
+- --vtgr_config string config file for vtgr
++ --abort_rebootstrap Don't allow vtgr to rebootstrap an existing group.
++ --alsologtostderr log to standard error as well as files
++ --clusters_to_watch strings Comma-separated list of keyspaces or keyspace/shards that this instance will monitor and repair. Defaults to all clusters in the topology. Example: "ks1,ks2/-80"
++ --consul_auth_static_file string JSON File to read the topos/tokens from.
++ --db_config string Full path to db config file that will be used by VTGR.
++ --db_flavor string MySQL flavor override. (default "MySQL56")
++ --db_port int Local mysql port, set this to enable local fast check.
++ --emit_stats If set, emit stats to push-based monitoring and stats backends
++ --enable_heartbeat_check Enable heartbeat checking, set together with --group_heartbeat_threshold.
++ --gr_port int Port to bootstrap a MySQL group. (default 33061)
++ --group_heartbeat_threshold int VTGR will trigger backoff on inconsistent state if the group heartbeat staleness exceeds this threshold (in seconds). Should be used along with --enable_heartbeat_check.
++ --grpc_auth_static_client_creds string When using grpc_static_auth in the server, this file provides the credentials to use to authenticate with server.
++ --grpc_compression string Which protocol to use for compressing gRPC. Default: nothing. Supported: snappy
++ --grpc_enable_tracing Enable gRPC tracing.
++ --grpc_initial_conn_window_size int gRPC initial connection window size
++ --grpc_initial_window_size int gRPC initial window size
++ --grpc_keepalive_time duration After a duration of this time, if the client doesn't see any activity, it pings the server to see if the transport is still alive. (default 10s)
++ --grpc_keepalive_timeout duration After having pinged for keepalive check, the client waits for a duration of Timeout and if no activity is seen even after that the connection is closed. (default 10s)
++ --grpc_max_message_size int Maximum allowed RPC message size. Larger messages will be rejected by gRPC with the error 'exceeding the max size'. (default 16777216)
++ --grpc_prometheus Enable gRPC monitoring with Prometheus.
++ -h, --help display usage and exit
++ --keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
++ --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
++ --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
++ --log_dir string If non-empty, write log files in this directory
++ --log_err_stacks log stack traces for errors
++ --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
++ --logtostderr log to standard error instead of files
++ --ping_tablet_timeout duration time to wait when we ping a tablet (default 2s)
++ --pprof strings enable profiling
++ --purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
++ --refresh_interval duration Refresh interval to load tablets. (default 10s)
++ --remote_operation_timeout duration time to wait for a remote operation (default 30s)
++ --scan_interval duration Scan interval to diagnose and repair. (default 3s)
++ --scan_repair_timeout duration Time to wait for a Diagnose and repair operation. (default 3s)
++ --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only)
++ --stats_backend string The name of the registered push-based monitoring/stats backend to use
++ --stats_combine_dimensions string List of dimensions to be combined into a single "all" value in exported stats vars
++ --stats_common_tags strings Comma-separated list of common tags for the stats backend. It provides both label and values. Example: label1:value1,label2:value2
++ --stats_drop_variables string Variables to be dropped from the list of exported variables.
++ --stats_emit_period duration Interval between emitting stats to all registered backends (default 1m0s)
++ --stderrthreshold severity logs at or above this threshold go to stderr (default 1)
++ --tablet_manager_grpc_ca string the server ca to use to validate servers when connecting
++ --tablet_manager_grpc_cert string the cert to use to connect
++ --tablet_manager_grpc_concurrency int concurrency to use to talk to a vttablet server for performance-sensitive RPCs (like ExecuteFetchAs{Dba,AllPrivs,App}) (default 8)
++ --tablet_manager_grpc_connpool_size int number of tablets to keep tmclient connections open to (default 100)
++ --tablet_manager_grpc_crl string the server crl to use to validate server certificates when connecting
++ --tablet_manager_grpc_key string the key to use to connect
++ --tablet_manager_grpc_server_name string the server name to use to validate server certificate
++ --tablet_manager_protocol string Protocol to use to make tabletmanager RPCs to vttablets. (default "grpc")
++ --topo_consul_lock_delay duration LockDelay for consul session. (default 15s)
++ --topo_consul_lock_session_checks string List of checks for consul session. (default "serfHealth")
++ --topo_consul_lock_session_ttl string TTL for consul session.
++ --topo_consul_watch_poll_duration duration time of the long poll for watch queries. (default 30s)
++ --topo_etcd_lease_ttl int Lease TTL for locks and leader election. The client will use KeepAlive to keep the lease going. (default 30)
++ --topo_etcd_tls_ca string path to the ca to use to validate the server cert when connecting to the etcd topo server
++ --topo_etcd_tls_cert string path to the client cert to use to connect to the etcd topo server, requires topo_etcd_tls_key, enables TLS
++ --topo_etcd_tls_key string path to the client key to use to connect to the etcd topo server, enables TLS
++ --topo_global_root string the path of the global topology data in the global topology server
++ --topo_global_server_address string the address of the global topology server
++ --topo_implementation string the topology implementation to use
++ --topo_zk_auth_file string auth to use when connecting to the zk topo server, file contents should be :, e.g., digest:user:pass
++ --topo_zk_base_timeout duration zk base timeout (see zk.Connect) (default 30s)
++ --topo_zk_max_concurrency int maximum number of pending requests to send to a Zookeeper server. (default 64)
++ --topo_zk_tls_ca string the server ca to use to validate servers when connecting to the zk topo server
++ --topo_zk_tls_cert string the cert to use to connect to the zk topo server, requires topo_zk_tls_key, enables TLS
++ --topo_zk_tls_key string the key to use to connect to the zk topo server, enables TLS
++ --v Level log level for V logs
++ -v, --version print binary version
++ --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
++ --vtgr_config string Config file for vtgr.
diff --git a/doc/flags/14.0-to-15.0-transition/vtorc.diff b/doc/flags/14.0-to-15.0-transition/vtorc.diff
new file mode 100644
index 00000000000..3bf06c2a6a4
--- /dev/null
+++ b/doc/flags/14.0-to-15.0-transition/vtorc.diff
@@ -0,0 +1,212 @@
+diff --git a/flags/14.0/vtorc.txt b/flags/15.0/vtorc.txt
+index 732595e..74ab84c 100644
+--- a/flags/14.0/vtorc.txt
++++ b/flags/15.0/vtorc.txt
+@@ -1,123 +1,85 @@
+ Usage of vtorc:
+- --alsologtostderr log to standard error as well as files
+- --binlog string Binary log file name
+- --catch-sigpipe catch and ignore SIGPIPE on stdout and stderr if specified
+- --clusters_to_watch string Comma-separated list of keyspaces or keyspace/shards that this instance will monitor and repair. Defaults to all clusters in the topology. Example: "ks1,ks2/-80"
+- --config string config file name
+- --consul_auth_static_file string JSON File to read the topos/tokens from.
+- --cpu_profile string deprecated: use '-pprof=cpu' instead
+- --d string destination instance, host_fqdn[:port] (synonym to -s)
+- --datadog-agent-host string host to send spans to. if empty, no tracing will be done
+- --datadog-agent-port string port to send spans to. if empty, no tracing will be done
+- --debug debug mode (very verbose)
+- --discovery auto discovery mode (default true)
+- --emit_stats If set, emit stats to push-based monitoring and stats backends
+- --enable-database-update Enable database update, overrides SkipOrchestratorDatabaseUpdate
+- --grab-election Grab leadership (only applies to continuous mode)
+- --grpc_auth_mode string Which auth plugin implementation to use (eg: static)
+- --grpc_auth_mtls_allowed_substrings string List of substrings of at least one of the client certificate names (separated by colon).
+- --grpc_auth_static_client_creds string when using grpc_static_auth in the server, this file provides the credentials to use to authenticate with server
+- --grpc_auth_static_password_file string JSON File to read the users/passwords from.
+- --grpc_ca string server CA to use for gRPC connections, requires TLS, and enforces client certificate check
+- --grpc_cert string server certificate to use for gRPC connections, requires grpc_key, enables TLS
+- --grpc_compression string Which protocol to use for compressing gRPC. Default: nothing. Supported: snappy
+- --grpc_crl string path to a certificate revocation list in PEM format, client certificates will be further verified against this file during TLS handshake
+- --grpc_enable_optional_tls enable optional TLS mode when a server accepts both TLS and plain-text connections on the same port
+- --grpc_enable_tracing Enable GRPC tracing
+- --grpc_initial_conn_window_size int gRPC initial connection window size
+- --grpc_initial_window_size int gRPC initial window size
+- --grpc_keepalive_time duration After a duration of this time, if the client doesn't see any activity, it pings the server to see if the transport is still alive. (default 10s)
+- --grpc_keepalive_timeout duration After having pinged for keepalive check, the client waits for a duration of Timeout and if no activity is seen even after that the connection is closed. (default 10s)
+- --grpc_key string server private key to use for gRPC connections, requires grpc_cert, enables TLS
+- --grpc_max_connection_age duration Maximum age of a client connection before GoAway is sent. (default 2562047h47m16.854775807s)
+- --grpc_max_connection_age_grace duration Additional grace period after grpc_max_connection_age, after which connections are forcibly closed. (default 2562047h47m16.854775807s)
+- --grpc_max_message_size int Maximum allowed RPC message size. Larger messages will be rejected by gRPC with the error 'exceeding the max size'. (default 16777216)
+- --grpc_port int Port to listen on for gRPC calls
+- --grpc_prometheus Enable gRPC monitoring with Prometheus
+- --grpc_server_ca string path to server CA in PEM format, which will be combine with server cert, return full certificate chain to clients
+- --grpc_server_initial_conn_window_size int gRPC server initial connection window size
+- --grpc_server_initial_window_size int gRPC server initial window size
+- --grpc_server_keepalive_enforcement_policy_min_time duration gRPC server minimum keepalive time (default 10s)
+- --grpc_server_keepalive_enforcement_policy_permit_without_stream gRPC server permit client keepalive pings even when there are no active streams (RPCs)
+- --ignore-raft-setup Override RaftEnabled for CLI invocation (CLI by default not allowed for raft setups). NOTE: operations by CLI invocation may not reflect in all raft nodes.
+- --jaeger-agent-host string host and port to send spans to. if empty, no tracing will be done
+- --keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
+- --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
+- --lameduck-period duration keep running at least this long after SIGTERM before stopping (default 50ms)
+- --log_backtrace_at value when logging hits line file:N, emit a stack trace
+- --log_dir string If non-empty, write log files in this directory
+- --log_err_stacks log stack traces for errors
+- --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
+- --logtostderr log to standard error instead of files
+- --mem-profile-rate int deprecated: use '-pprof=mem' instead (default 524288)
+- --mutex-profile-fraction int deprecated: use '-pprof=mutex' instead
+- --mysql_auth_server_static_file string JSON File to read the users/passwords from.
+- --mysql_auth_server_static_string string JSON representation of the users/passwords config.
+- --mysql_auth_static_reload_interval duration Ticker to reload credentials
+- --mysql_clientcert_auth_method string client-side authentication method to use. Supported values: mysql_clear_password, dialog. (default "mysql_clear_password")
+- --mysql_server_flush_delay duration Delay after which buffered response will be flushed to the client. (default 100ms)
+- --mysql_server_version string MySQL server version to advertise.
+- --noop Dry run; do not perform destructing operations
+- --onclose_timeout duration wait no more than this for OnClose handlers before stopping (default 1ns)
+- --onterm_timeout duration wait no more than this for OnTermSync handlers before stopping (default 10s)
+- --orc_web_dir string Orchestrator http file location (default "web/orchestrator")
+- --pid_file string If set, the process will write its pid to the named file, and delete it on graceful shutdown.
+- --pprof string enable profiling
+- --promotion-rule string Promotion rule for register-andidate (prefer|neutral|prefer_not|must_not) (default "prefer")
+- --purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
+- --quiet quiet
+- --remote_operation_timeout duration time to wait for a remote operation (default 30s)
+- --s string sibling instance, host_fqdn[:port]
+- --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only)
+- --service_map value comma separated list of services to enable (or disable if prefixed with '-') Example: grpc-vtworker
+- --shutdown_wait_time duration maximum time to wait for vtorc to release all the locks that it is holding before shutting down on SIGTERM (default 30s)
+- --skip-continuous-registration Skip cli commands performaing continuous registration (to reduce orchestratrator backend db load
+- --skip-unresolve Do not unresolve a host name
+- --skip-unresolve-check Skip/ignore checking an unresolve mapping (via hostname_unresolve table) resolves back to same hostname
+- --sql-max-length-errors int truncate queries in error logs to the given length (default unlimited)
+- --sql-max-length-ui int truncate queries in debug UIs to the given length (default 512) (default 512)
+- --stack add stack trace upon error
+- --statement string Statement/hint
+- --stats_backend string The name of the registered push-based monitoring/stats backend to use
+- --stats_combine_dimensions string List of dimensions to be combined into a single "all" value in exported stats vars
+- --stats_common_tags string Comma-separated list of common tags for the stats backend. It provides both label and values. Example: label1:value1,label2:value2
+- --stats_drop_variables string Variables to be dropped from the list of exported variables.
+- --stats_emit_period duration Interval between emitting stats to all registered backends (default 1m0s)
+- --stderrthreshold value logs at or above this threshold go to stderr (default 1)
+- --tablet_manager_grpc_ca string the server ca to use to validate servers when connecting
+- --tablet_manager_grpc_cert string the cert to use to connect
+- --tablet_manager_grpc_concurrency int concurrency to use to talk to a vttablet server for performance-sensitive RPCs (like ExecuteFetchAs{Dba,AllPrivs,App}) (default 8)
+- --tablet_manager_grpc_connpool_size int number of tablets to keep tmclient connections open to (default 100)
+- --tablet_manager_grpc_crl string the server crl to use to validate server certificates when connecting
+- --tablet_manager_grpc_key string the key to use to connect
+- --tablet_manager_grpc_server_name string the server name to use to validate server certificate
+- --tablet_manager_protocol string the protocol to use to talk to vttablet (default "grpc")
+- --tag string tag to add ('tagname' or 'tagname=tagvalue') or to search ('tagname' or 'tagname=tagvalue' or comma separated 'tag0,tag1=val1,tag2' for intersection of all)
+- --topo_consul_lock_delay duration LockDelay for consul session. (default 15s)
+- --topo_consul_lock_session_checks string List of checks for consul session. (default "serfHealth")
+- --topo_consul_lock_session_ttl string TTL for consul session.
+- --topo_consul_watch_poll_duration duration time of the long poll for watch queries. (default 30s)
+- --topo_etcd_lease_ttl int Lease TTL for locks and leader election. The client will use KeepAlive to keep the lease going. (default 30)
+- --topo_etcd_tls_ca string path to the ca to use to validate the server cert when connecting to the etcd topo server
+- --topo_etcd_tls_cert string path to the client cert to use to connect to the etcd topo server, requires topo_etcd_tls_key, enables TLS
+- --topo_etcd_tls_key string path to the client key to use to connect to the etcd topo server, enables TLS
+- --topo_global_root string the path of the global topology data in the global topology server
+- --topo_global_server_address string the address of the global topology server
+- --topo_implementation string the topology implementation to use
+- --topo_k8s_context string The kubeconfig context to use, overrides the 'current-context' from the config
+- --topo_k8s_kubeconfig string Path to a valid kubeconfig file. When running as a k8s pod inside the same cluster you wish to use as the topo, you may omit this and the below arguments, and Vitess is capable of auto-discovering the correct values. https://kubernetes.io/docs/tasks/access-application-cluster/access-cluster/#accessing-the-api-from-a-pod
+- --topo_k8s_namespace string The kubernetes namespace to use for all objects. Default comes from the context or in-cluster config
+- --topo_zk_auth_file string auth to use when connecting to the zk topo server, file contents should be :, e.g., digest:user:pass
+- --topo_zk_base_timeout duration zk base timeout (see zk.Connect) (default 30s)
+- --topo_zk_max_concurrency int maximum number of pending requests to send to a Zookeeper server. (default 64)
+- --topo_zk_tls_ca string the server ca to use to validate servers when connecting to the zk topo server
+- --topo_zk_tls_cert string the cert to use to connect to the zk topo server, requires topo_zk_tls_key, enables TLS
+- --topo_zk_tls_key string the key to use to connect to the zk topo server, enables TLS
+- --tracer string tracing service to use (default "noop")
+- --tracing-enable-logging whether to enable logging in the tracing service
+- --tracing-sampling-rate value sampling rate for the probabilistic jaeger sampler (default 0.1)
+- --tracing-sampling-type value sampling strategy to use for jaeger. possible values are 'const', 'probabilistic', 'rateLimiting', or 'remote' (default const)
+- --v value log level for V logs
+- --verbose verbose
+- --version print binary version
+- --vmodule value comma-separated list of pattern=N settings for file-filtered logging
++ --alsologtostderr log to standard error as well as files
++ --audit-file-location string File location where the audit logs are to be stored
++ --audit-purge-duration duration Duration for which audit logs are held before being purged. Should be in multiples of days (default 168h0m0s)
++ --audit-to-backend Whether to store the audit log in the VTOrc database
++ --audit-to-syslog Whether to store the audit log in the syslog
++ --catch-sigpipe catch and ignore SIGPIPE on stdout and stderr if specified
++ --clusters_to_watch strings Comma-separated list of keyspaces or keyspace/shards that this instance will monitor and repair. Defaults to all clusters in the topology. Example: "ks1,ks2/-80"
++ --config string config file name
++ --consul_auth_static_file string JSON File to read the topos/tokens from.
++ --emit_stats If set, emit stats to push-based monitoring and stats backends
++ --grpc_auth_static_client_creds string When using grpc_static_auth in the server, this file provides the credentials to use to authenticate with server.
++ --grpc_compression string Which protocol to use for compressing gRPC. Default: nothing. Supported: snappy
++ --grpc_enable_tracing Enable gRPC tracing.
++ --grpc_initial_conn_window_size int gRPC initial connection window size
++ --grpc_initial_window_size int gRPC initial window size
++ --grpc_keepalive_time duration After a duration of this time, if the client doesn't see any activity, it pings the server to see if the transport is still alive. (default 10s)
++ --grpc_keepalive_timeout duration After having pinged for keepalive check, the client waits for a duration of Timeout and if no activity is seen even after that the connection is closed. (default 10s)
++ --grpc_max_message_size int Maximum allowed RPC message size. Larger messages will be rejected by gRPC with the error 'exceeding the max size'. (default 16777216)
++ --grpc_prometheus Enable gRPC monitoring with Prometheus.
++ -h, --help display usage and exit
++ --instance-poll-time duration Timer duration on which VTOrc refreshes MySQL information (default 5s)
++ --keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
++ --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
++ --lameduck-period duration keep running at least this long after SIGTERM before stopping (default 50ms)
++ --lock-shard-timeout duration Duration for which a shard lock is held when running a recovery (default 30s)
++ --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
++ --log_dir string If non-empty, write log files in this directory
++ --log_err_stacks log stack traces for errors
++ --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
++ --logtostderr log to standard error instead of files
++ --onclose_timeout duration wait no more than this for OnClose handlers before stopping (default 1ns)
++ --onterm_timeout duration wait no more than this for OnTermSync handlers before stopping (default 10s)
++ --pid_file string If set, the process will write its pid to the named file, and delete it on graceful shutdown.
++ --port int port for the server
++ --pprof strings enable profiling
++ --prevent-cross-cell-failover Prevent VTOrc from promoting a primary in a different cell than the current primary in case of a failover
++ --purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
++ --reasonable-replication-lag duration Maximum replication lag on replicas which is deemed to be acceptable (default 10s)
++ --recovery-period-block-duration duration Duration for which a new recovery is blocked on an instance after running a recovery (default 30s)
++ --recovery-poll-duration duration Timer duration on which VTOrc polls its database to run a recovery (default 1s)
++ --remote_operation_timeout duration time to wait for a remote operation (default 30s)
++ --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only)
++ --shutdown_wait_time duration Maximum time to wait for VTOrc to release all the locks that it is holding before shutting down on SIGTERM (default 30s)
++ --snapshot-topology-interval duration Timer duration on which VTOrc takes a snapshot of the current MySQL information it has in the database. Should be in multiple of hours
++ --sqlite-data-file string SQLite Datafile to use as VTOrc's database (default "file::memory:?mode=memory&cache=shared")
++ --stats_backend string The name of the registered push-based monitoring/stats backend to use
++ --stats_combine_dimensions string List of dimensions to be combined into a single "all" value in exported stats vars
++ --stats_common_tags strings Comma-separated list of common tags for the stats backend. It provides both label and values. Example: label1:value1,label2:value2
++ --stats_drop_variables string Variables to be dropped from the list of exported variables.
++ --stats_emit_period duration Interval between emitting stats to all registered backends (default 1m0s)
++ --stderrthreshold severity logs at or above this threshold go to stderr (default 1)
++ --tablet_manager_grpc_ca string the server ca to use to validate servers when connecting
++ --tablet_manager_grpc_cert string the cert to use to connect
++ --tablet_manager_grpc_concurrency int concurrency to use to talk to a vttablet server for performance-sensitive RPCs (like ExecuteFetchAs{Dba,AllPrivs,App}) (default 8)
++ --tablet_manager_grpc_connpool_size int number of tablets to keep tmclient connections open to (default 100)
++ --tablet_manager_grpc_crl string the server crl to use to validate server certificates when connecting
++ --tablet_manager_grpc_key string the key to use to connect
++ --tablet_manager_grpc_server_name string the server name to use to validate server certificate
++ --tablet_manager_protocol string Protocol to use to make tabletmanager RPCs to vttablets. (default "grpc")
++ --topo-information-refresh-duration duration Timer duration on which VTOrc refreshes the keyspace and vttablet records from the topology server (default 15s)
++ --topo_consul_lock_delay duration LockDelay for consul session. (default 15s)
++ --topo_consul_lock_session_checks string List of checks for consul session. (default "serfHealth")
++ --topo_consul_lock_session_ttl string TTL for consul session.
++ --topo_consul_watch_poll_duration duration time of the long poll for watch queries. (default 30s)
++ --topo_etcd_lease_ttl int Lease TTL for locks and leader election. The client will use KeepAlive to keep the lease going. (default 30)
++ --topo_etcd_tls_ca string path to the ca to use to validate the server cert when connecting to the etcd topo server
++ --topo_etcd_tls_cert string path to the client cert to use to connect to the etcd topo server, requires topo_etcd_tls_key, enables TLS
++ --topo_etcd_tls_key string path to the client key to use to connect to the etcd topo server, enables TLS
++ --topo_global_root string the path of the global topology data in the global topology server
++ --topo_global_server_address string the address of the global topology server
++ --topo_implementation string the topology implementation to use
++ --topo_k8s_context string The kubeconfig context to use, overrides the 'current-context' from the config
++ --topo_k8s_kubeconfig string Path to a valid kubeconfig file. When running as a k8s pod inside the same cluster you wish to use as the topo, you may omit this and the below arguments, and Vitess is capable of auto-discovering the correct values. https://kubernetes.io/docs/tasks/access-application-cluster/access-cluster/#accessing-the-api-from-a-pod
++ --topo_k8s_namespace string The kubernetes namespace to use for all objects. Default comes from the context or in-cluster config
++ --topo_zk_auth_file string auth to use when connecting to the zk topo server, file contents should be :, e.g., digest:user:pass
++ --topo_zk_base_timeout duration zk base timeout (see zk.Connect) (default 30s)
++ --topo_zk_max_concurrency int maximum number of pending requests to send to a Zookeeper server. (default 64)
++ --topo_zk_tls_ca string the server ca to use to validate servers when connecting to the zk topo server
++ --topo_zk_tls_cert string the cert to use to connect to the zk topo server, requires topo_zk_tls_key, enables TLS
++ --topo_zk_tls_key string the key to use to connect to the zk topo server, enables TLS
++ --v Level log level for V logs
++ -v, --version print binary version
++ --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
++ --wait-replicas-timeout duration Duration for which to wait for replica's to respond when issuing RPCs (default 30s)
diff --git a/doc/flags/14.0-to-15.0-transition/vttablet.diff b/doc/flags/14.0-to-15.0-transition/vttablet.diff
new file mode 100644
index 00000000000..a42e8f6b932
--- /dev/null
+++ b/doc/flags/14.0-to-15.0-transition/vttablet.diff
@@ -0,0 +1,577 @@
+diff --git a/flags/14.0/vttablet.txt b/flags/15.0/vttablet.txt
+index 96a4298..25807a7 100644
+--- a/flags/14.0/vttablet.txt
++++ b/flags/15.0/vttablet.txt
+@@ -1,19 +1,17 @@
+ Usage of vttablet:
+- --allowed_tablet_types value Specifies the tablet types this vtgate is allowed to route queries to
+ --alsologtostderr log to standard error as well as files
+ --app_idle_timeout duration Idle timeout for app connections (default 1m0s)
+ --app_pool_size int Size of the connection pool for app connections (default 40)
+- --azblob_backup_account_key_file string Path to a file containing the Azure Storage account key; if this flag is unset, the environment variable VT_AZBLOB_ACCOUNT_KEY will be used as the key itself (NOT a file path)
+- --azblob_backup_account_name string Azure Storage Account name for backups; if this flag is unset, the environment variable VT_AZBLOB_ACCOUNT_NAME will be used
+- --azblob_backup_container_name string Azure Blob Container Name
+- --azblob_backup_parallelism int Azure Blob operation parallelism (requires extra memory when increased) (default 1)
+- --azblob_backup_storage_root string Root prefix for all backup-related Azure Blobs; this should exclude both initial and trailing '/' (e.g. just 'a/b' not '/a/b/')
+- --backup_engine_implementation string Specifies which implementation to use for creating new backups (builtin or xtrabackup). Restores will always be done with whichever engine created a given backup. (default builtin)
++ --azblob_backup_account_key_file string Path to a file containing the Azure Storage account key; if this flag is unset, the environment variable VT_AZBLOB_ACCOUNT_KEY will be used as the key itself (NOT a file path).
++ --azblob_backup_account_name string Azure Storage Account name for backups; if this flag is unset, the environment variable VT_AZBLOB_ACCOUNT_NAME will be used.
++ --azblob_backup_container_name string Azure Blob Container Name.
++ --azblob_backup_parallelism int Azure Blob operation parallelism (requires extra memory when increased). (default 1)
++ --azblob_backup_storage_root string Root prefix for all backup-related Azure Blobs; this should exclude both initial and trailing '/' (e.g. just 'a/b' not '/a/b/').
++ --backup_engine_implementation string Specifies which implementation to use for creating new backups (builtin or xtrabackup). Restores will always be done with whichever engine created a given backup. (default "builtin")
+ --backup_storage_block_size int if backup_storage_compress is true, backup_storage_block_size sets the byte size for each block while compressing (default is 250000). (default 250000)
+ --backup_storage_compress if set, the backup files will be compressed (default is true). Set to false for instance if a backup_storage_hook is specified and it compresses the data. (default true)
+- --backup_storage_hook string if set, we send the contents of the backup files through this hook.
+- --backup_storage_implementation string which implementation to use for the backup storage feature
+- --backup_storage_number_blocks int if backup_storage_compress is true, backup_storage_number_blocks sets the number of blocks that can be processed, at once, before the writer blocks, during compression (default is 2). It should be equal to the number of CPUs available for compression (default 2)
++ --backup_storage_implementation string Which backup storage implementation to use for creating and restoring backups.
++ --backup_storage_number_blocks int if backup_storage_compress is true, backup_storage_number_blocks sets the number of blocks that can be processed, at once, before the writer blocks, during compression (default is 2). It should be equal to the number of CPUs available for compression. (default 2)
+ --binlog_host string PITR restore parameter: hostname/IP of binlog server.
+ --binlog_password string PITR restore parameter: password of binlog server.
+ --binlog_player_grpc_ca string the server ca to use to validate servers when connecting
+@@ -21,120 +19,29 @@ Usage of vttablet:
+ --binlog_player_grpc_crl string the server crl to use to validate server certificates when connecting
+ --binlog_player_grpc_key string the key to use to connect
+ --binlog_player_grpc_server_name string the server name to use to validate server certificate
+- --binlog_player_protocol string the protocol to download binlogs from a vttablet (default grpc)
++ --binlog_player_protocol string the protocol to download binlogs from a vttablet (default "grpc")
+ --binlog_port int PITR restore parameter: port of binlog server.
+ --binlog_ssl_ca string PITR restore parameter: Filename containing TLS CA certificate to verify binlog server TLS certificate against.
+ --binlog_ssl_cert string PITR restore parameter: Filename containing mTLS client certificate to present to binlog server as authentication.
+ --binlog_ssl_key string PITR restore parameter: Filename containing mTLS client private key for use in binlog server authentication.
+- --binlog_ssl_server_name string PITR restore parameter: TLS server name (common name) to verify against for the binlog server we are connecting to (If not set: use the hostname or IP supplied in -binlog_host).
+- --binlog_use_v3_resharding_mode (DEPRECATED) True if and only if the binlog streamer should use V3-style sharding, which doesn't require a preset sharding key column. (default true)
++ --binlog_ssl_server_name string PITR restore parameter: TLS server name (common name) to verify against for the binlog server we are connecting to (If not set: use the hostname or IP supplied in --binlog_host).
+ --binlog_user string PITR restore parameter: username of binlog server.
+- --builtinbackup_mysqld_timeout duration how long to wait for mysqld to shutdown at the start of the backup (default 10m0s)
+- --builtinbackup_progress duration how often to send progress updates when backing up large files (default 5s)
++ --builtinbackup_mysqld_timeout duration how long to wait for mysqld to shutdown at the start of the backup. (default 10m0s)
++ --builtinbackup_progress duration how often to send progress updates when backing up large files. (default 5s)
+ --catch-sigpipe catch and ignore SIGPIPE on stdout and stderr if specified
+- --ceph_backup_storage_config string Path to JSON config file for ceph backup storage (default ceph_backup_config.json)
+- --client-found-rows-pool-size int DEPRECATED: queryserver-config-transaction-cap will be used instead.
++ --ceph_backup_storage_config string Path to JSON config file for ceph backup storage. (default "ceph_backup_config.json")
++ --compression-engine-name string compressor engine used for compression. (default "pargzip")
++ --compression-level int what level to pass to the compressor. (default 1)
++ --consolidator-stream-query-size int Configure the stream consolidator query size in bytes. Setting to 0 disables the stream consolidator. (default 2097152)
++ --consolidator-stream-total-size int Configure the stream consolidator total size in bytes. Setting to 0 disables the stream consolidator. (default 134217728)
+ --consul_auth_static_file string JSON File to read the topos/tokens from.
+- --cpu_profile string deprecated: use '-pprof=cpu' instead
+ --datadog-agent-host string host to send spans to. if empty, no tracing will be done
+ --datadog-agent-port string port to send spans to. if empty, no tracing will be done
+- --db-config-allprivs-charset string deprecated: use db_charset (default utf8mb4)
+- --db-config-allprivs-flags uint deprecated: use db_flags
+- --db-config-allprivs-flavor string deprecated: use db_flavor
+- --db-config-allprivs-host string deprecated: use db_host
+- --db-config-allprivs-pass string db allprivs deprecated: use db_allprivs_password
+- --db-config-allprivs-port int deprecated: use db_port
+- --db-config-allprivs-server_name string deprecated: use db_server_name
+- --db-config-allprivs-ssl-ca string deprecated: use db_ssl_ca
+- --db-config-allprivs-ssl-ca-path string deprecated: use db_ssl_ca_path
+- --db-config-allprivs-ssl-cert string deprecated: use db_ssl_cert
+- --db-config-allprivs-ssl-key string deprecated: use db_ssl_key
+- --db-config-allprivs-uname string deprecated: use db_allprivs_user (default vt_allprivs)
+- --db-config-allprivs-unixsocket string deprecated: use db_socket
+- --db-config-app-charset string deprecated: use db_charset (default utf8mb4)
+- --db-config-app-flags uint deprecated: use db_flags
+- --db-config-app-flavor string deprecated: use db_flavor
+- --db-config-app-host string deprecated: use db_host
+- --db-config-app-pass string db app deprecated: use db_app_password
+- --db-config-app-port int deprecated: use db_port
+- --db-config-app-server_name string deprecated: use db_server_name
+- --db-config-app-ssl-ca string deprecated: use db_ssl_ca
+- --db-config-app-ssl-ca-path string deprecated: use db_ssl_ca_path
+- --db-config-app-ssl-cert string deprecated: use db_ssl_cert
+- --db-config-app-ssl-key string deprecated: use db_ssl_key
+- --db-config-app-uname string deprecated: use db_app_user (default vt_app)
+- --db-config-app-unixsocket string deprecated: use db_socket
+- --db-config-appdebug-charset string deprecated: use db_charset (default utf8mb4)
+- --db-config-appdebug-flags uint deprecated: use db_flags
+- --db-config-appdebug-flavor string deprecated: use db_flavor
+- --db-config-appdebug-host string deprecated: use db_host
+- --db-config-appdebug-pass string db appdebug deprecated: use db_appdebug_password
+- --db-config-appdebug-port int deprecated: use db_port
+- --db-config-appdebug-server_name string deprecated: use db_server_name
+- --db-config-appdebug-ssl-ca string deprecated: use db_ssl_ca
+- --db-config-appdebug-ssl-ca-path string deprecated: use db_ssl_ca_path
+- --db-config-appdebug-ssl-cert string deprecated: use db_ssl_cert
+- --db-config-appdebug-ssl-key string deprecated: use db_ssl_key
+- --db-config-appdebug-uname string deprecated: use db_appdebug_user (default vt_appdebug)
+- --db-config-appdebug-unixsocket string deprecated: use db_socket
+- --db-config-dba-charset string deprecated: use db_charset (default utf8mb4)
+- --db-config-dba-flags uint deprecated: use db_flags
+- --db-config-dba-flavor string deprecated: use db_flavor
+- --db-config-dba-host string deprecated: use db_host
+- --db-config-dba-pass string db dba deprecated: use db_dba_password
+- --db-config-dba-port int deprecated: use db_port
+- --db-config-dba-server_name string deprecated: use db_server_name
+- --db-config-dba-ssl-ca string deprecated: use db_ssl_ca
+- --db-config-dba-ssl-ca-path string deprecated: use db_ssl_ca_path
+- --db-config-dba-ssl-cert string deprecated: use db_ssl_cert
+- --db-config-dba-ssl-key string deprecated: use db_ssl_key
+- --db-config-dba-uname string deprecated: use db_dba_user (default vt_dba)
+- --db-config-dba-unixsocket string deprecated: use db_socket
+- --db-config-erepl-charset string deprecated: use db_charset (default utf8mb4)
+- --db-config-erepl-dbname string deprecated: dbname does not need to be explicitly configured
+- --db-config-erepl-flags uint deprecated: use db_flags
+- --db-config-erepl-flavor string deprecated: use db_flavor
+- --db-config-erepl-host string deprecated: use db_host
+- --db-config-erepl-pass string db erepl deprecated: use db_erepl_password
+- --db-config-erepl-port int deprecated: use db_port
+- --db-config-erepl-server_name string deprecated: use db_server_name
+- --db-config-erepl-ssl-ca string deprecated: use db_ssl_ca
+- --db-config-erepl-ssl-ca-path string deprecated: use db_ssl_ca_path
+- --db-config-erepl-ssl-cert string deprecated: use db_ssl_cert
+- --db-config-erepl-ssl-key string deprecated: use db_ssl_key
+- --db-config-erepl-uname string deprecated: use db_erepl_user (default vt_erepl)
+- --db-config-erepl-unixsocket string deprecated: use db_socket
+- --db-config-filtered-charset string deprecated: use db_charset (default utf8mb4)
+- --db-config-filtered-flags uint deprecated: use db_flags
+- --db-config-filtered-flavor string deprecated: use db_flavor
+- --db-config-filtered-host string deprecated: use db_host
+- --db-config-filtered-pass string db filtered deprecated: use db_filtered_password
+- --db-config-filtered-port int deprecated: use db_port
+- --db-config-filtered-server_name string deprecated: use db_server_name
+- --db-config-filtered-ssl-ca string deprecated: use db_ssl_ca
+- --db-config-filtered-ssl-ca-path string deprecated: use db_ssl_ca_path
+- --db-config-filtered-ssl-cert string deprecated: use db_ssl_cert
+- --db-config-filtered-ssl-key string deprecated: use db_ssl_key
+- --db-config-filtered-uname string deprecated: use db_filtered_user (default vt_filtered)
+- --db-config-filtered-unixsocket string deprecated: use db_socket
+- --db-config-repl-charset string deprecated: use db_charset (default utf8mb4)
+- --db-config-repl-flags uint deprecated: use db_flags
+- --db-config-repl-flavor string deprecated: use db_flavor
+- --db-config-repl-host string deprecated: use db_host
+- --db-config-repl-pass string db repl deprecated: use db_repl_password
+- --db-config-repl-port int deprecated: use db_port
+- --db-config-repl-server_name string deprecated: use db_server_name
+- --db-config-repl-ssl-ca string deprecated: use db_ssl_ca
+- --db-config-repl-ssl-ca-path string deprecated: use db_ssl_ca_path
+- --db-config-repl-ssl-cert string deprecated: use db_ssl_cert
+- --db-config-repl-ssl-key string deprecated: use db_ssl_key
+- --db-config-repl-uname string deprecated: use db_repl_user (default vt_repl)
+- --db-config-repl-unixsocket string deprecated: use db_socket
+ --db-credentials-file string db credentials file; send SIGHUP to reload this file
+- --db-credentials-server string db credentials server type ('file' - file implementation; 'vault' - HashiCorp Vault implementation) (default file)
++ --db-credentials-server string db credentials server type ('file' - file implementation; 'vault' - HashiCorp Vault implementation) (default "file")
+ --db-credentials-vault-addr string URL to Vault server
+ --db-credentials-vault-path string Vault path to credentials JSON blob, e.g.: secret/data/prod/dbcreds
+- --db-credentials-vault-role-mountpoint string Vault AppRole mountpoint; can also be passed using VAULT_MOUNTPOINT environment variable (default approle)
++ --db-credentials-vault-role-mountpoint string Vault AppRole mountpoint; can also be passed using VAULT_MOUNTPOINT environment variable (default "approle")
+ --db-credentials-vault-role-secretidfile string Path to file containing Vault AppRole secret_id; can also be passed using VAULT_SECRETID environment variable
+ --db-credentials-vault-roleid string Vault AppRole id; can also be passed using VAULT_ROLEID environment variable
+ --db-credentials-vault-timeout duration Timeout for vault API operations (default 10s)
+@@ -143,84 +50,82 @@ Usage of vttablet:
+ --db-credentials-vault-ttl duration How long to cache DB credentials from the Vault server (default 30m0s)
+ --db_allprivs_password string db allprivs password
+ --db_allprivs_use_ssl Set this flag to false to make the allprivs connection to not use ssl (default true)
+- --db_allprivs_user string db allprivs user userKey (default vt_allprivs)
++ --db_allprivs_user string db allprivs user userKey (default "vt_allprivs")
+ --db_app_password string db app password
+ --db_app_use_ssl Set this flag to false to make the app connection to not use ssl (default true)
+- --db_app_user string db app user userKey (default vt_app)
++ --db_app_user string db app user userKey (default "vt_app")
+ --db_appdebug_password string db appdebug password
+ --db_appdebug_use_ssl Set this flag to false to make the appdebug connection to not use ssl (default true)
+- --db_appdebug_user string db appdebug user userKey (default vt_appdebug)
+- --db_charset string Character set used for this tablet. (default utf8mb4)
++ --db_appdebug_user string db appdebug user userKey (default "vt_appdebug")
++ --db_charset string Character set used for this tablet. (default "utf8mb4")
+ --db_conn_query_info enable parsing and processing of QUERY_OK info fields
+ --db_connect_timeout_ms int connection timeout to mysqld in milliseconds (0 for no timeout)
+ --db_dba_password string db dba password
+ --db_dba_use_ssl Set this flag to false to make the dba connection to not use ssl (default true)
+- --db_dba_user string db dba user userKey (default vt_dba)
++ --db_dba_user string db dba user userKey (default "vt_dba")
+ --db_erepl_password string db erepl password
+ --db_erepl_use_ssl Set this flag to false to make the erepl connection to not use ssl (default true)
+- --db_erepl_user string db erepl user userKey (default vt_erepl)
++ --db_erepl_user string db erepl user userKey (default "vt_erepl")
+ --db_filtered_password string db filtered password
+ --db_filtered_use_ssl Set this flag to false to make the filtered connection to not use ssl (default true)
+- --db_filtered_user string db filtered user userKey (default vt_filtered)
++ --db_filtered_user string db filtered user userKey (default "vt_filtered")
+ --db_flags uint Flag values as defined by MySQL.
+ --db_flavor string Flavor overrid. Valid value is FilePos.
+ --db_host string The host name for the tcp connection.
+ --db_port int tcp port
+ --db_repl_password string db repl password
+ --db_repl_use_ssl Set this flag to false to make the repl connection to not use ssl (default true)
+- --db_repl_user string db repl user userKey (default vt_repl)
++ --db_repl_user string db repl user userKey (default "vt_repl")
+ --db_server_name string server name of the DB we are connecting to.
+ --db_socket string The unix socket to connect on. If this is specified, host and port will not be used.
+ --db_ssl_ca string connection ssl ca
+ --db_ssl_ca_path string connection ssl ca path
+ --db_ssl_cert string connection ssl certificate
+ --db_ssl_key string connection ssl key
+- --db_ssl_mode value SSL mode to connect with. One of disabled, preferred, required, verify_ca & verify_identity.
++ --db_ssl_mode SslMode SSL mode to connect with. One of disabled, preferred, required, verify_ca & verify_identity.
+ --db_tls_min_version string Configures the minimal TLS version negotiated when SSL is enabled. Defaults to TLSv1.2. Options: TLSv1.0, TLSv1.1, TLSv1.2, TLSv1.3.
+ --dba_idle_timeout duration Idle timeout for dba connections (default 1m0s)
+ --dba_pool_size int Size of the connection pool for dba connections (default 20)
+ --degraded_threshold duration replication lag after which a replica is considered degraded (default 30s)
++ --disable-replication-manager Disable replication manager to prevent replication repairs.
+ --disable_active_reparents if set, do not allow active reparents. Use this to protect a cluster using external reparents.
+- --discovery_high_replication_lag_minimum_serving duration the replication lag that is considered too high when applying the min_number_serving_vttablets threshold (default 2h0m0s)
+- --discovery_low_replication_lag duration the replication lag that is considered low enough to be healthy (default 30s)
+ --emit_stats If set, emit stats to push-based monitoring and stats backends
+- --enable-autocommit This flag is deprecated. Autocommit is always allowed. (default true)
+ --enable-consolidator Synonym to -enable_consolidator (default true)
+ --enable-consolidator-replicas Synonym to -enable_consolidator_replicas
+ --enable-lag-throttler Synonym to -enable_lag_throttler
+- --enable-query-plan-field-caching Synonym to -enable_query_plan_field_caching (default true)
+ --enable-tx-throttler Synonym to -enable_tx_throttler
+ --enable_consolidator This option enables the query consolidator. (default true)
+ --enable_consolidator_replicas This option enables the query consolidator only on replicas.
+ --enable_hot_row_protection If true, incoming transactions for the same row (range) will be queued and cannot consume all txpool slots.
+ --enable_hot_row_protection_dry_run If true, hot row protection is not enforced but logs if transactions would have been queued.
+ --enable_lag_throttler If true, vttablet will run a throttler service, and will implicitly enable heartbeats
+- --enable_query_plan_field_caching This option fetches & caches fields (columns) when storing query plans (default true)
+ --enable_replication_reporter Use polling to track replication lag.
+- --enable_semi_sync Enable semi-sync when configuring replication, on primary and replica tablets only (rdonly tablets will not ack).
+ --enable_transaction_limit If true, limit on number of transactions open at the same time will be enforced for all users. User trying to open a new transaction after exhausting their limit will receive an error immediately, regardless of whether there are available slots or not.
+ --enable_transaction_limit_dry_run If true, limit on number of transactions open at the same time will be tracked for all users, but not enforced.
+ --enable_tx_throttler If true replication-lag-based throttling on transactions will be enabled.
+ --enforce-tableacl-config if this flag is true, vttablet will fail to start if a valid tableacl config does not exist
+ --enforce_strict_trans_tables If true, vttablet requires MySQL to run with STRICT_TRANS_TABLES or STRICT_ALL_TABLES on. It is recommended to not turn this flag off. Otherwise MySQL may alter your supplied values before saving them to the database. (default true)
+- --file_backup_storage_root string root directory for the file backup storage
++ --external-compressor string command with arguments to use when compressing a backup.
++ --external-compressor-extension string extension to use when using an external compressor.
++ --external-decompressor string command with arguments to use when decompressing a backup.
++ --file_backup_storage_root string Root directory for the file backup storage.
+ --filecustomrules string file based custom rule path
+ --filecustomrules_watch set up a watch on the target file and reload query rules when it changes
+ --gc_check_interval duration Interval between garbage collection checks (default 1h0m0s)
+ --gc_purge_check_interval duration Interval between purge discovery checks (default 1m0s)
+- --gcs_backup_storage_bucket string Google Cloud Storage bucket to use for backups
+- --gcs_backup_storage_root string root prefix for all backup-related object names
++ --gcs_backup_storage_bucket string Google Cloud Storage bucket to use for backups.
++ --gcs_backup_storage_root string Root prefix for all backup-related object names.
+ --gh-ost-path string override default gh-ost binary full path
+ --grpc_auth_mode string Which auth plugin implementation to use (eg: static)
+ --grpc_auth_mtls_allowed_substrings string List of substrings of at least one of the client certificate names (separated by colon).
+- --grpc_auth_static_client_creds string when using grpc_static_auth in the server, this file provides the credentials to use to authenticate with server
++ --grpc_auth_static_client_creds string When using grpc_static_auth in the server, this file provides the credentials to use to authenticate with server.
+ --grpc_auth_static_password_file string JSON File to read the users/passwords from.
+ --grpc_ca string server CA to use for gRPC connections, requires TLS, and enforces client certificate check
+ --grpc_cert string server certificate to use for gRPC connections, requires grpc_key, enables TLS
+ --grpc_compression string Which protocol to use for compressing gRPC. Default: nothing. Supported: snappy
+ --grpc_crl string path to a certificate revocation list in PEM format, client certificates will be further verified against this file during TLS handshake
+ --grpc_enable_optional_tls enable optional TLS mode when a server accepts both TLS and plain-text connections on the same port
+- --grpc_enable_tracing Enable GRPC tracing
++ --grpc_enable_tracing Enable gRPC tracing.
+ --grpc_initial_conn_window_size int gRPC initial connection window size
+ --grpc_initial_window_size int gRPC initial window size
+ --grpc_keepalive_time duration After a duration of this time, if the client doesn't see any activity, it pings the server to see if the transport is still alive. (default 10s)
+@@ -229,8 +134,8 @@ Usage of vttablet:
+ --grpc_max_connection_age duration Maximum age of a client connection before GoAway is sent. (default 2562047h47m16.854775807s)
+ --grpc_max_connection_age_grace duration Additional grace period after grpc_max_connection_age, after which connections are forcibly closed. (default 2562047h47m16.854775807s)
+ --grpc_max_message_size int Maximum allowed RPC message size. Larger messages will be rejected by gRPC with the error 'exceeding the max size'. (default 16777216)
+- --grpc_port int Port to listen on for gRPC calls
+- --grpc_prometheus Enable gRPC monitoring with Prometheus
++ --grpc_port int Port to listen on for gRPC calls. If zero, do not listen.
++ --grpc_prometheus Enable gRPC monitoring with Prometheus.
+ --grpc_server_ca string path to server CA in PEM format, which will be combine with server cert, return full certificate chain to clients
+ --grpc_server_initial_conn_window_size int gRPC server initial connection window size
+ --grpc_server_initial_window_size int gRPC server initial window size
+@@ -240,6 +145,7 @@ Usage of vttablet:
+ --heartbeat_enable If true, vttablet records (if master) or checks (if replica) the current time of a replication heartbeat in the table _vt.heartbeat. The result is used to inform the serving state of the vttablet via healthchecks.
+ --heartbeat_interval duration How frequently to read and write replication heartbeat. (default 1s)
+ --heartbeat_on_demand_duration duration If non-zero, heartbeats are only written upon consumer request, and only run for up to given duration following the request. Frequent requests can keep the heartbeat running consistently; when requests are infrequent heartbeat may completely stop between requests
++ -h, --help display usage and exit
+ --hot_row_protection_concurrent_transactions int Number of concurrent transactions let through to the txpool/MySQL for the same hot row. Should be > 1 to have enough 'ready' transactions in MySQL and benefit from a pipelining effect. (default 5)
+ --hot_row_protection_max_global_queue_size int Global queue limit across all row (ranges). Useful to prevent that the queue can grow unbounded. (default 1000)
+ --hot_row_protection_max_queue_size int Maximum number of BeginExecute RPCs which will be queued for the same row (range). (default 20)
+@@ -248,27 +154,22 @@ Usage of vttablet:
+ --init_populate_metadata (init parameter) populate metadata tables even if restore_from_backup is disabled. If restore_from_backup is enabled, metadata tables are always populated regardless of this flag.
+ --init_shard string (init parameter) shard to use for this tablet
+ --init_tablet_type string (init parameter) the tablet type to use for this tablet.
+- --init_tags value (init parameter) comma separated list of key:value pairs used to tag the tablet
++ --init_tags StringMap (init parameter) comma separated list of key:value pairs used to tag the tablet
+ --init_timeout duration (init parameter) timeout to use for the init phase. (default 1m0s)
+ --jaeger-agent-host string host and port to send spans to. if empty, no tracing will be done
+ --keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
+ --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
+- --keyspaces_to_watch value Specifies which keyspaces this vtgate should have access to while routing queries or accessing the vschema
+ --lameduck-period duration keep running at least this long after SIGTERM before stopping (default 50ms)
+- --legacy_replication_lag_algorithm use the legacy algorithm when selecting the vttablets for serving (default true)
+ --lock_tables_timeout duration How long to keep the table locked before timing out (default 1m0s)
+- --log_backtrace_at value when logging hits line file:N, emit a stack trace
++ --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
+ --log_dir string If non-empty, write log files in this directory
+ --log_err_stacks log stack traces for errors
+ --log_queries Enable query logging to syslog.
+ --log_queries_to_file string Enable query logging to the specified file
+ --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
+ --logtostderr log to standard error instead of files
+- --master_connect_retry duration Deprecated, use -replication_connect_retry (default 10s)
+- --mem-profile-rate int deprecated: use '-pprof=mem' instead (default 524288)
++ --max_concurrent_online_ddl int Maximum number of online DDL changes that may run concurrently (default 256)
+ --migration_check_interval duration Interval between migration checks (default 1m0s)
+- --min_number_serving_vttablets int the minimum number of vttablets for each replicating tablet_type (e.g. replica, rdonly) that will be continue to be used even with replication lag above discovery_low_replication_lag, but still below discovery_high_replication_lag_minimum_serving (default 2)
+- --mutex-profile-fraction int deprecated: use '-pprof=mutex' instead
+ --mycnf-file string path to my.cnf, if reading all config params from there
+ --mycnf_bin_log_path string mysql binlog path
+ --mycnf_data_dir string data directory for mysql
+@@ -287,48 +188,32 @@ Usage of vttablet:
+ --mycnf_slow_log_path string mysql slow query log path
+ --mycnf_socket_file string mysql socket file
+ --mycnf_tmp_dir string mysql tmp directory
+- --mysql_auth_server_static_file string JSON File to read the users/passwords from.
+- --mysql_auth_server_static_string string JSON representation of the users/passwords config.
+- --mysql_auth_static_reload_interval duration Ticker to reload credentials
+- --mysql_clientcert_auth_method string client-side authentication method to use. Supported values: mysql_clear_password, dialog. (default mysql_clear_password)
+- --mysql_server_flush_delay duration Delay after which buffered response will be flushed to the client. (default 100ms)
+ --mysql_server_version string MySQL server version to advertise.
+- --mysqlctl_client_protocol string the protocol to use to talk to the mysqlctl server (default grpc)
+ --mysqlctl_mycnf_template string template file to use for generating the my.cnf file during server init
+ --mysqlctl_socket string socket file to use for remote mysqlctl actions (empty for local actions)
+ --onclose_timeout duration wait no more than this for OnClose handlers before stopping (default 1ns)
+ --onterm_timeout duration wait no more than this for OnTermSync handlers before stopping (default 10s)
+ --opentsdb_uri string URI of opentsdb /api/put method
+- --orc_api_password string (Optional) Basic auth password to authenticate with Orchestrator's HTTP API.
+- --orc_api_url string Address of Orchestrator's HTTP API (e.g. http://host:port/api/). Leave empty to disable Orchestrator integration.
+- --orc_api_user string (Optional) Basic auth username to authenticate with Orchestrator's HTTP API. Leave empty to disable basic auth.
+- --orc_discover_interval duration How often to ping Orchestrator's HTTP API endpoint to tell it we exist. 0 means never.
+- --orc_timeout duration Timeout for calls to Orchestrator's HTTP API (default 30s)
+ --pid_file string If set, the process will write its pid to the named file, and delete it on graceful shutdown.
+ --pitr_gtid_lookup_timeout duration PITR restore parameter: timeout for fetching gtid from timestamp. (default 1m0s)
+- --pool-name-prefix string Deprecated
+ --pool_hostname_resolve_interval duration if set force an update to all hostnames and reconnect if changed, defaults to 0 (disabled)
+ --port int port for the server
+- --pprof string enable profiling
++ --pprof strings enable profiling
+ --pt-osc-path string override default pt-online-schema-change binary full path
+ --publish_retry_interval duration how long vttablet waits to retry publishing the tablet record (default 30s)
+ --purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
+- --query-log-stream-handler string URL handler for streaming queries log (default /debug/querylog)
++ --query-log-stream-handler string URL handler for streaming queries log (default "/debug/querylog")
+ --querylog-filter-tag string string that must be present in the query for it to be logged; if using a value as the tag, you need to disable query normalization
+- --querylog-format string format for query logs ("text" or "json") (default text)
++ --querylog-format string format for query logs ("text" or "json") (default "text")
+ --querylog-row-threshold uint Number of rows a query has to return or affect before being logged; not useful for streaming queries. 0 means all queries will be logged.
+ --queryserver-config-acl-exempt-acl string an acl that exempt from table acl checking (this acl is free to access any vitess tables).
+- --queryserver-config-allowunsafe-dmls deprecated
+ --queryserver-config-annotate-queries prefix queries to MySQL backend with comment indicating vtgate principal (user) and target tablet type
+ --queryserver-config-enable-table-acl-dry-run If this flag is enabled, tabletserver will emit monitoring metrics and let the request pass regardless of table acl check results
+ --queryserver-config-idle-timeout float query server idle timeout (in seconds), vttablet manages various mysql connection pools. This config means if a connection has not been used in given idle timeout, this connection will be removed from pool. This effectively manages number of connection objects and optimize the pool performance. (default 1800)
+- --queryserver-config-max-dml-rows int query server max dml rows per statement, maximum number of rows allowed to return at a time for an update or delete with either 1) an equality where clauses on primary keys, or 2) a subselect statement. For update and delete statements in above two categories, vttablet will split the original query into multiple small queries based on this configuration value.
+ --queryserver-config-max-result-size int query server max result size, maximum number of rows allowed to return from vttablet for non-streaming queries. (default 10000)
+- --queryserver-config-message-conn-pool-prefill-parallelism int DEPRECATED: Unused.
+- --queryserver-config-message-conn-pool-size int DEPRECATED
+ --queryserver-config-message-postpone-cap int query server message postpone cap is the maximum number of messages that can be postponed at any given time. Set this number to substantially lower than transaction cap, so that the transaction pool isn't exhausted by the message subsystem. (default 4)
++ --queryserver-config-olap-transaction-timeout float query server transaction timeout (in seconds), after which a transaction in an OLAP session will be killed (default 30)
+ --queryserver-config-passthrough-dmls query server pass through all dml statements without rewriting
+- --queryserver-config-pool-prefill-parallelism int query server read pool prefill parallelism, a non-zero value will prefill the pool using the specified parallism.
+ --queryserver-config-pool-size int query server read pool size, connection pool is used by regular queries (non streaming, not in a transaction) (default 16)
+ --queryserver-config-query-cache-lfu query server cache algorithm. when set to true, a new cache algorithm based on a TinyLFU admission policy will be used to improve cache behavior and prevent pollution from sparse queries (default true)
+ --queryserver-config-query-cache-memory int query server query cache size in bytes, maximum amount of memory to be used for caching. vttablet analyzes every incoming query and generate a query plan, these plans are being cached in a lru cache. This config controls the capacity of the lru cache. (default 33554432)
+@@ -340,18 +225,17 @@ Usage of vttablet:
+ --queryserver-config-schema-change-signal-interval float query server schema change signal interval defines at which interval the query server shall send schema updates to vtgate. (default 5)
+ --queryserver-config-schema-reload-time float query server schema reload time, how often vttablet reloads schemas from underlying MySQL instance in seconds. vttablet keeps table schemas in its own memory and periodically refreshes it from MySQL. This config controls the reload time. (default 1800)
+ --queryserver-config-stream-buffer-size int query server stream buffer size, the maximum number of bytes sent from vttablet for each stream call. It's recommended to keep this value in sync with vtgate's stream_buffer_size. (default 32768)
+- --queryserver-config-stream-pool-prefill-parallelism int query server stream pool prefill parallelism, a non-zero value will prefill the pool using the specified parallelism
+ --queryserver-config-stream-pool-size int query server stream connection pool size, stream pool is used by stream queries: queries that return results to client in a streaming fashion (default 200)
+ --queryserver-config-stream-pool-timeout float query server stream pool timeout (in seconds), it is how long vttablet waits for a connection from the stream pool. If set to 0 (default) then there is no timeout.
+ --queryserver-config-stream-pool-waiter-cap int query server stream pool waiter limit, this is the maximum number of streaming queries that can be queued waiting to get a connection
+ --queryserver-config-strict-table-acl only allow queries that pass table acl checks
+ --queryserver-config-terse-errors prevent bind vars from escaping in client error messages
+ --queryserver-config-transaction-cap int query server transaction cap is the maximum number of transactions allowed to happen at any given point of a time for a single vttablet. E.g. by setting transaction cap to 100, there are at most 100 transactions will be processed by a vttablet and the 101th transaction will be blocked (and fail if it cannot get connection within specified timeout) (default 20)
+- --queryserver-config-transaction-prefill-parallelism int query server transaction prefill parallelism, a non-zero value will prefill the pool using the specified parallism.
+ --queryserver-config-transaction-timeout float query server transaction timeout (in seconds), a transaction will be killed if it takes longer than this value (default 30)
+ --queryserver-config-txpool-timeout float query server transaction pool timeout, it is how long vttablet waits if tx pool is full (default 1)
+ --queryserver-config-txpool-waiter-cap int query server transaction pool waiter limit, this is the maximum number of transactions that can be queued waiting to get a connection (default 5000)
+ --queryserver-config-warn-result-size int query server result size warning threshold, warn if number of rows returned from vttablet for non-streaming queries exceeds this
++ --queryserver-enable-settings-pool Enable pooling of connections with modified system settings
+ --queryserver_enable_online_ddl Enable online DDL. (default true)
+ --redact-debug-ui-queries redact full queries and bind variables from debug UI
+ --relay_log_max_items int Maximum number of rows for VReplication target buffering. (default 5000)
+@@ -362,18 +246,18 @@ Usage of vttablet:
+ --restore_from_backup (init restore parameter) will check BackupStorage for a recent backup at startup and start there
+ --restore_from_backup_ts string (init restore parameter) if set, restore the latest backup taken at or before this timestamp. Example: '2021-04-29.133050'
+ --retain_online_ddl_tables duration How long should vttablet keep an old migrated table before purging it (default 24h0m0s)
+- --s3_backup_aws_endpoint string endpoint of the S3 backend (region must be provided)
+- --s3_backup_aws_region string AWS region to use (default us-east-1)
+- --s3_backup_aws_retries int AWS request retries (default -1)
+- --s3_backup_force_path_style force the s3 path style
+- --s3_backup_log_level string determine the S3 loglevel to use from LogOff, LogDebug, LogDebugWithSigning, LogDebugWithHTTPBody, LogDebugWithRequestRetries, LogDebugWithRequestErrors (default LogOff)
+- --s3_backup_server_side_encryption string server-side encryption algorithm (e.g., AES256, aws:kms, sse_c:/path/to/key/file)
+- --s3_backup_storage_bucket string S3 bucket to use for backups
+- --s3_backup_storage_root string root prefix for all backup-related object names
+- --s3_backup_tls_skip_verify_cert skip the 'certificate is valid' check for SSL connections
++ --s3_backup_aws_endpoint string endpoint of the S3 backend (region must be provided).
++ --s3_backup_aws_region string AWS region to use. (default "us-east-1")
++ --s3_backup_aws_retries int AWS request retries. (default -1)
++ --s3_backup_force_path_style force the s3 path style.
++ --s3_backup_log_level string determine the S3 loglevel to use from LogOff, LogDebug, LogDebugWithSigning, LogDebugWithHTTPBody, LogDebugWithRequestRetries, LogDebugWithRequestErrors. (default "LogOff")
++ --s3_backup_server_side_encryption string server-side encryption algorithm (e.g., AES256, aws:kms, sse_c:/path/to/key/file).
++ --s3_backup_storage_bucket string S3 bucket to use for backups.
++ --s3_backup_storage_root string root prefix for all backup-related object names.
++ --s3_backup_tls_skip_verify_cert skip the 'certificate is valid' check for SSL connections.
+ --sanitize_log_messages Remove potentially sensitive information in tablet INFO, WARNING, and ERROR log messages such as query parameters.
+ --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only)
+- --service_map value comma separated list of services to enable (or disable if prefixed with '-') Example: grpc-vtworker
++ --service_map strings comma separated list of services to enable (or disable if prefixed with '-') Example: grpc-queryservice
+ --serving_state_grace_period duration how long to pause after broadcasting health to vtgate, before enforcing a new serving state
+ --shard_sync_retry_delay duration delay between retries of updates to keep the tablet and its shard record in sync (default 30s)
+ --shutdown_grace_period float how long to wait (in seconds) for queries and transactions to complete during graceful shutdown.
+@@ -384,20 +268,19 @@ Usage of vttablet:
+ --srv_topo_timeout duration topo server timeout (default 5s)
+ --stats_backend string The name of the registered push-based monitoring/stats backend to use
+ --stats_combine_dimensions string List of dimensions to be combined into a single "all" value in exported stats vars
+- --stats_common_tags string Comma-separated list of common tags for the stats backend. It provides both label and values. Example: label1:value1,label2:value2
++ --stats_common_tags strings Comma-separated list of common tags for the stats backend. It provides both label and values. Example: label1:value1,label2:value2
+ --stats_drop_variables string Variables to be dropped from the list of exported variables.
+ --stats_emit_period duration Interval between emitting stats to all registered backends (default 1m0s)
+ --statsd_address string Address for statsd client
+- --statsd_sample_rate float (default 1)
+- --stderrthreshold value logs at or above this threshold go to stderr (default 1)
++ --statsd_sample_rate float Sample rate for statsd metrics (default 1)
++ --stderrthreshold severity logs at or above this threshold go to stderr (default 1)
+ --stream_health_buffer_size uint max streaming health entries to buffer per streaming health client (default 20)
+ --table-acl-config string path to table access checker config file; send SIGHUP to reload this file
+ --table-acl-config-reload-interval duration Ticker to reload ACLs. Duration flag, format e.g.: 30s. Default: do not reload
+- --table_gc_lifecycle string States for a DROP TABLE garbage collection cycle. Default is 'hold,purge,evac,drop', use any subset ('drop' implcitly always included) (default hold,purge,evac,drop)
++ --table_gc_lifecycle string States for a DROP TABLE garbage collection cycle. Default is 'hold,purge,evac,drop', use any subset ('drop' implcitly always included) (default "hold,purge,evac,drop")
+ --tablet-path string tablet alias
+ --tablet_config string YAML file config for tablet
+ --tablet_dir string The directory within the vtdataroot to store vttablet/mysql files. Defaults to being generated by the tablet uid.
+- --tablet_filters value Specifies a comma-separated list of 'keyspace|shard_name or keyrange' values to filter the tablets to watch
+ --tablet_grpc_ca string the server ca to use to validate servers when connecting
+ --tablet_grpc_cert string the cert to use to connect
+ --tablet_grpc_crl string the server crl to use to validate server certificates when connecting
+@@ -411,18 +294,15 @@ Usage of vttablet:
+ --tablet_manager_grpc_crl string the server crl to use to validate server certificates when connecting
+ --tablet_manager_grpc_key string the key to use to connect
+ --tablet_manager_grpc_server_name string the server name to use to validate server certificate
+- --tablet_manager_protocol string the protocol to use to talk to vttablet (default grpc)
+- --tablet_protocol string how to talk to the vttablets (default grpc)
+- --tablet_refresh_interval duration tablet refresh interval (default 1m0s)
+- --tablet_refresh_known_tablets tablet refresh reloads the tablet address/port map from topo in case it changes (default true)
+- --tablet_url_template string format string describing debug tablet url formatting. See the Go code for getTabletDebugURL() how to customize this. (default http://{{.GetTabletHostPort}})
++ --tablet_manager_protocol string Protocol to use to make tabletmanager RPCs to vttablets. (default "grpc")
++ --tablet_protocol string Protocol to use to make queryservice RPCs to vttablets. (default "grpc")
+ --throttle_check_as_check_self Should throttler/check return a throttler/check-self result (changes throttler behavior for writes)
+ --throttle_metrics_query SELECT Override default heartbeat/lag metric. Use either SELECT (must return single row, single value) or `SHOW GLOBAL ... LIKE ...` queries. Set -throttle_metrics_threshold respectively.
+ --throttle_metrics_threshold float Override default throttle threshold, respective to -throttle_metrics_query (default 1.7976931348623157e+308)
+- --throttle_tablet_types string Comma separated VTTablet types to be considered by the throttler. default: 'replica'. example: 'replica,rdonly'. 'replica' aways implicitly included (default replica)
++ --throttle_tablet_types string Comma separated VTTablet types to be considered by the throttler. default: 'replica'. example: 'replica,rdonly'. 'replica' aways implicitly included (default "replica")
+ --throttle_threshold duration Replication lag threshold for default lag throttling (default 1s)
+ --topo_consul_lock_delay duration LockDelay for consul session. (default 15s)
+- --topo_consul_lock_session_checks string List of checks for consul session. (default serfHealth)
++ --topo_consul_lock_session_checks string List of checks for consul session. (default "serfHealth")
+ --topo_consul_lock_session_ttl string TTL for consul session.
+ --topo_consul_watch_poll_duration duration time of the long poll for watch queries. (default 30s)
+ --topo_etcd_lease_ttl int Lease TTL for locks and leader election. The client will use KeepAlive to keep the lease going. (default 30)
+@@ -435,63 +315,37 @@ Usage of vttablet:
+ --topo_k8s_context string The kubeconfig context to use, overrides the 'current-context' from the config
+ --topo_k8s_kubeconfig string Path to a valid kubeconfig file. When running as a k8s pod inside the same cluster you wish to use as the topo, you may omit this and the below arguments, and Vitess is capable of auto-discovering the correct values. https://kubernetes.io/docs/tasks/access-application-cluster/access-cluster/#accessing-the-api-from-a-pod
+ --topo_k8s_namespace string The kubernetes namespace to use for all objects. Default comes from the context or in-cluster config
+- --topo_read_concurrency int concurrent topo reads (default 32)
+ --topo_zk_auth_file string auth to use when connecting to the zk topo server, file contents should be :, e.g., digest:user:pass
+ --topo_zk_base_timeout duration zk base timeout (see zk.Connect) (default 30s)
+ --topo_zk_max_concurrency int maximum number of pending requests to send to a Zookeeper server. (default 64)
+ --topo_zk_tls_ca string the server ca to use to validate servers when connecting to the zk topo server
+ --topo_zk_tls_cert string the cert to use to connect to the zk topo server, requires topo_zk_tls_key, enables TLS
+ --topo_zk_tls_key string the key to use to connect to the zk topo server, enables TLS
+- --topocustomrule_cell string topo cell for customrules file. (default global)
++ --topocustomrule_cell string topo cell for customrules file. (default "global")
+ --topocustomrule_path string path for customrules file. Disabled if empty.
+- --tracer string tracing service to use (default noop)
++ --tracer string tracing service to use (default "noop")
+ --tracing-enable-logging whether to enable logging in the tracing service
+- --tracing-sampling-rate value sampling rate for the probabilistic jaeger sampler (default 0.1)
+- --tracing-sampling-type value sampling strategy to use for jaeger. possible values are 'const', 'probabilistic', 'rateLimiting', or 'remote' (default const)
++ --tracing-sampling-rate float sampling rate for the probabilistic jaeger sampler (default 0.1)
++ --tracing-sampling-type string sampling strategy to use for jaeger. possible values are 'const', 'probabilistic', 'rateLimiting', or 'remote' (default "const")
+ --track_schema_versions When enabled, vttablet will store versions of schemas at each position that a DDL is applied and allow retrieval of the schema corresponding to a position
+- --transaction-log-stream-handler string URL handler for streaming transactions log (default /debug/txlog)
++ --transaction-log-stream-handler string URL handler for streaming transactions log (default "/debug/txlog")
+ --transaction_limit_by_component Include CallerID.component when considering who the user is for the purpose of transaction limit.
+ --transaction_limit_by_principal Include CallerID.principal when considering who the user is for the purpose of transaction limit. (default true)
+ --transaction_limit_by_subcomponent Include CallerID.subcomponent when considering who the user is for the purpose of transaction limit.
+ --transaction_limit_by_username Include VTGateCallerID.username when considering who the user is for the purpose of transaction limit. (default true)
+ --transaction_limit_per_user float Maximum number of transactions a single user is allowed to use at any time, represented as fraction of -transaction_cap. (default 0.4)
+- --transaction_shutdown_grace_period float DEPRECATED: use shutdown_grace_period instead.
+ --twopc_abandon_age float time in seconds. Any unresolved transaction older than this time will be sent to the coordinator to be resolved.
+ --twopc_coordinator_address string address of the (VTGate) process(es) that will be used to notify of abandoned transactions.
+ --twopc_enable if the flag is on, 2pc is enabled. Other 2pc flags must be supplied.
+- --tx-throttler-config string Synonym to -tx_throttler_config (default target_replication_lag_sec: 2
+-max_replication_lag_sec: 10
+-initial_rate: 100
+-max_increase: 1
+-emergency_decrease: 0.5
+-min_duration_between_increases_sec: 40
+-max_duration_between_increases_sec: 62
+-min_duration_between_decreases_sec: 20
+-spread_backlog_across_sec: 20
+-age_bad_rate_after_sec: 180
+-bad_rate_increase: 0.1
+-max_rate_approach_threshold: 0.9
+-)
+- --tx-throttler-healthcheck-cells value Synonym to -tx_throttler_healthcheck_cells
+- --tx_throttler_config string The configuration of the transaction throttler as a text formatted throttlerdata.Configuration protocol buffer message (default target_replication_lag_sec: 2
+-max_replication_lag_sec: 10
+-initial_rate: 100
+-max_increase: 1
+-emergency_decrease: 0.5
+-min_duration_between_increases_sec: 40
+-max_duration_between_increases_sec: 62
+-min_duration_between_decreases_sec: 20
+-spread_backlog_across_sec: 20
+-age_bad_rate_after_sec: 180
+-bad_rate_increase: 0.1
+-max_rate_approach_threshold: 0.9
+-)
+- --tx_throttler_healthcheck_cells value A comma-separated list of cells. Only tabletservers running in these cells will be monitored for replication lag by the transaction throttler.
++ --tx-throttler-config string Synonym to -tx_throttler_config (default "target_replication_lag_sec: 2\nmax_replication_lag_sec: 10\ninitial_rate: 100\nmax_increase: 1\nemergency_decrease: 0.5\nmin_duration_between_increases_sec: 40\nmax_duration_between_increases_sec: 62\nmin_duration_between_decreases_sec: 20\nspread_backlog_across_sec: 20\nage_bad_rate_after_sec: 180\nbad_rate_increase: 0.1\nmax_rate_approach_threshold: 0.9\n")
++ --tx-throttler-healthcheck-cells strings Synonym to -tx_throttler_healthcheck_cells
++ --tx_throttler_config string The configuration of the transaction throttler as a text formatted throttlerdata.Configuration protocol buffer message (default "target_replication_lag_sec: 2\nmax_replication_lag_sec: 10\ninitial_rate: 100\nmax_increase: 1\nemergency_decrease: 0.5\nmin_duration_between_increases_sec: 40\nmax_duration_between_increases_sec: 62\nmin_duration_between_decreases_sec: 20\nspread_backlog_across_sec: 20\nage_bad_rate_after_sec: 180\nbad_rate_increase: 0.1\nmax_rate_approach_threshold: 0.9\n")
++ --tx_throttler_healthcheck_cells strings A comma-separated list of cells. Only tabletservers running in these cells will be monitored for replication lag by the transaction throttler.
+ --unhealthy_threshold duration replication lag after which a replica is considered unhealthy (default 2h0m0s)
+ --use_super_read_only Set super_read_only flag when performing planned failover.
+- --v value log level for V logs
+- --version print binary version
+- --vmodule value comma-separated list of pattern=N settings for file-filtered logging
++ --v Level log level for V logs
++ -v, --version print binary version
++ --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
+ --vreplication_copy_phase_duration duration Duration for each copy phase loop (before running the next catchup: default 1h) (default 1h0m0s)
+ --vreplication_copy_phase_max_innodb_history_list_length int The maximum InnoDB transaction history that can exist on a vstreamer (source) before starting another round of copying rows. This helps to limit the impact on the source tablet. (default 1000000)
+ --vreplication_copy_phase_max_mysql_replication_lag int The maximum MySQL replication lag (in seconds) that can exist on a vstreamer (source) before starting another round of copying rows. This helps to limit the impact on the source tablet. (default 43200)
+@@ -500,23 +354,24 @@ max_rate_approach_threshold: 0.9
+ --vreplication_healthcheck_timeout duration healthcheck retry delay (default 1m0s)
+ --vreplication_healthcheck_topology_refresh duration refresh interval for re-reading the topology (default 30s)
+ --vreplication_heartbeat_update_interval int Frequency (in seconds, default 1, max 60) at which the time_updated column of a vreplication stream when idling (default 1)
+- --vreplication_max_time_to_retry_on_error duration stop automatically retrying when we've had consecutive failures with the same error for this long after the first occurrence (default 15m0s)
++ --vreplication_max_time_to_retry_on_error duration stop automatically retrying when we've had consecutive failures with the same error for this long after the first occurrence
+ --vreplication_replica_lag_tolerance duration Replica lag threshold duration: once lag is below this we switch from copy phase to the replication (streaming) phase (default 1m0s)
+ --vreplication_retry_delay duration delay before retrying a failed workflow event in the replication phase (default 5s)
+ --vreplication_store_compressed_gtid Store compressed gtids in the pos column of _vt.vreplication
+- --vreplication_tablet_type string comma separated list of tablet types used as a source (default in_order:REPLICA,PRIMARY)
++ --vreplication_tablet_type string comma separated list of tablet types used as a source (default "in_order:REPLICA,PRIMARY")
++ --vstream-binlog-rotation-threshold int Byte size at which a VStreamer will attempt to rotate the source's open binary log before starting a GTID snapshot based stream (e.g. a ResultStreamer or RowStreamer) (default 67108864)
+ --vstream_dynamic_packet_size Enable dynamic packet sizing for VReplication. This will adjust the packet size during replication to improve performance. (default true)
+ --vstream_packet_size int Suggested packet size for VReplication streamer. This is used only as a recommendation. The actual packet size may be more or less than this amount. (default 250000)
+ --vtctld_addr string address of a vtctld instance
+- --vtgate_protocol string how to talk to vtgate (default grpc)
+- --vttablet_skip_buildinfo_tags string comma-separated list of buildinfo tags to skip from merging with -init_tags. each tag is either an exact match or a regular expression of the form '/regexp/'. (default /.*/)
++ --vtgate_protocol string how to talk to vtgate (default "grpc")
++ --vttablet_skip_buildinfo_tags string comma-separated list of buildinfo tags to skip from merging with --init_tags. each tag is either an exact match or a regular expression of the form '/regexp/'. (default "/.*/")
+ --wait_for_backup_interval duration (init restore parameter) if this is greater than 0, instead of starting up empty when no backups are found, keep checking at this interval for a backup to appear
+ --watch_replication_stream When enabled, vttablet will stream the MySQL replication stream from the local server, and use it to update schema when it sees a DDL.
+- --xbstream_restore_flags string flags to pass to xbstream command during restore. These should be space separated and will be added to the end of the command. These need to match the ones used for backup e.g. --compress / --decompress, --encrypt / --decrypt
+- --xtrabackup_backup_flags string flags to pass to backup command. These should be space separated and will be added to the end of the command
+- --xtrabackup_prepare_flags string flags to pass to prepare command. These should be space separated and will be added to the end of the command
+- --xtrabackup_root_path string directory location of the xtrabackup and xbstream executables, e.g., /usr/bin
+- --xtrabackup_stream_mode string which mode to use if streaming, valid values are tar and xbstream (default tar)
++ --xbstream_restore_flags string Flags to pass to xbstream command during restore. These should be space separated and will be added to the end of the command. These need to match the ones used for backup e.g. --compress / --decompress, --encrypt / --decrypt
++ --xtrabackup_backup_flags string Flags to pass to backup command. These should be space separated and will be added to the end of the command
++ --xtrabackup_prepare_flags string Flags to pass to prepare command. These should be space separated and will be added to the end of the command
++ --xtrabackup_root_path string Directory location of the xtrabackup and xbstream executables, e.g., /usr/bin
++ --xtrabackup_stream_mode string Which mode to use if streaming, valid values are tar and xbstream. Please note that tar is not supported in XtraBackup 8.0 (default "tar")
+ --xtrabackup_stripe_block_size uint Size in bytes of each block that gets sent to a given stripe before rotating to the next stripe (default 102400)
+ --xtrabackup_stripes uint If greater than 0, use data striping across this many destination files to parallelize data transfer and decompression
+ --xtrabackup_user string User that xtrabackup will use to connect to the database server. This user must have all necessary privileges. For details, please refer to xtrabackup documentation.
diff --git a/doc/flags/14.0-to-15.0-transition/vttestserver.diff b/doc/flags/14.0-to-15.0-transition/vttestserver.diff
new file mode 100644
index 00000000000..5d18d2e83e0
--- /dev/null
+++ b/doc/flags/14.0-to-15.0-transition/vttestserver.diff
@@ -0,0 +1,332 @@
+diff --git a/flags/14.0/vttestserver.txt b/flags/15.0/vttestserver.txt
+index 755eba1..d30ab35 100644
+--- a/flags/14.0/vttestserver.txt
++++ b/flags/15.0/vttestserver.txt
+@@ -1,184 +1,144 @@
+ Usage of vttestserver:
+- --alsologtostderr log to standard error as well as files
+- --app_idle_timeout duration Idle timeout for app connections (default 1m0s)
+- --app_pool_size int Size of the connection pool for app connections (default 40)
+- --backup_engine_implementation string Specifies which implementation to use for creating new backups (builtin or xtrabackup). Restores will always be done with whichever engine created a given backup. (default "builtin")
+- --backup_storage_block_size int if backup_storage_compress is true, backup_storage_block_size sets the byte size for each block while compressing (default is 250000). (default 250000)
+- --backup_storage_compress if set, the backup files will be compressed (default is true). Set to false for instance if a backup_storage_hook is specified and it compresses the data. (default true)
+- --backup_storage_hook string if set, we send the contents of the backup files through this hook.
+- --backup_storage_implementation string which implementation to use for the backup storage feature
+- --backup_storage_number_blocks int if backup_storage_compress is true, backup_storage_number_blocks sets the number of blocks that can be processed, at once, before the writer blocks, during compression (default is 2). It should be equal to the number of CPUs available for compression (default 2)
+- --builtinbackup_mysqld_timeout duration how long to wait for mysqld to shutdown at the start of the backup (default 10m0s)
+- --builtinbackup_progress duration how often to send progress updates when backing up large files (default 5s)
+- --catch-sigpipe catch and ignore SIGPIPE on stdout and stderr if specified
+- --cells string Comma separated list of cells (default "test")
+- --charset string MySQL charset (default "utf8mb4")
+- --consul_auth_static_file string JSON File to read the topos/tokens from.
+- --cpu_profile string deprecated: use '-pprof=cpu' instead
+- --data_dir string Directory where the data files will be placed, defaults to a random directory under /vt/vtdataroot
+- --datadog-agent-host string host to send spans to. if empty, no tracing will be done
+- --datadog-agent-port string port to send spans to. if empty, no tracing will be done
+- --db-credentials-file string db credentials file; send SIGHUP to reload this file
+- --db-credentials-server string db credentials server type ('file' - file implementation; 'vault' - HashiCorp Vault implementation) (default "file")
+- --db-credentials-vault-addr string URL to Vault server
+- --db-credentials-vault-path string Vault path to credentials JSON blob, e.g.: secret/data/prod/dbcreds
+- --db-credentials-vault-role-mountpoint string Vault AppRole mountpoint; can also be passed using VAULT_MOUNTPOINT environment variable (default "approle")
+- --db-credentials-vault-role-secretidfile string Path to file containing Vault AppRole secret_id; can also be passed using VAULT_SECRETID environment variable
+- --db-credentials-vault-roleid string Vault AppRole id; can also be passed using VAULT_ROLEID environment variable
+- --db-credentials-vault-timeout duration Timeout for vault API operations (default 10s)
+- --db-credentials-vault-tls-ca string Path to CA PEM for validating Vault server certificate
+- --db-credentials-vault-tokenfile string Path to file containing Vault auth token; token can also be passed using VAULT_TOKEN environment variable
+- --db-credentials-vault-ttl duration How long to cache DB credentials from the Vault server (default 30m0s)
+- --dba_idle_timeout duration Idle timeout for dba connections (default 1m0s)
+- --dba_pool_size int Size of the connection pool for dba connections (default 20)
+- --default_schema_dir string Default directory for initial schema files. If no schema is found in schema_dir, default to this location.
+- --disable_active_reparents if set, do not allow active reparents. Use this to protect a cluster using external reparents.
+- --emit_stats If set, emit stats to push-based monitoring and stats backends
+- --enable_direct_ddl Allow users to submit direct DDL statements (default true)
+- --enable_online_ddl Allow users to submit, review and control Online DDL (default true)
+- --enable_system_settings This will enable the system settings to be changed per session at the database connection level (default true)
+- --external_topo_global_root string the path of the global topology data in the global topology server for vtcombo process
+- --external_topo_global_server_address string the address of the global topology server for vtcombo process
+- --external_topo_implementation string the topology implementation to use for vtcombo process
+- --extra_my_cnf string extra files to add to the config, separated by ':'
+- --foreign_key_mode string This is to provide how to handle foreign key constraint in create/alter table. Valid values are: allow, disallow (default "allow")
+- --grpc_auth_mode string Which auth plugin implementation to use (eg: static)
+- --grpc_auth_mtls_allowed_substrings string List of substrings of at least one of the client certificate names (separated by colon).
+- --grpc_auth_static_client_creds string when using grpc_static_auth in the server, this file provides the credentials to use to authenticate with server
+- --grpc_auth_static_password_file string JSON File to read the users/passwords from.
+- --grpc_ca string server CA to use for gRPC connections, requires TLS, and enforces client certificate check
+- --grpc_cert string server certificate to use for gRPC connections, requires grpc_key, enables TLS
+- --grpc_compression string Which protocol to use for compressing gRPC. Default: nothing. Supported: snappy
+- --grpc_crl string path to a certificate revocation list in PEM format, client certificates will be further verified against this file during TLS handshake
+- --grpc_enable_optional_tls enable optional TLS mode when a server accepts both TLS and plain-text connections on the same port
+- --grpc_enable_tracing Enable GRPC tracing
+- --grpc_initial_conn_window_size int gRPC initial connection window size
+- --grpc_initial_window_size int gRPC initial window size
+- --grpc_keepalive_time duration After a duration of this time, if the client doesn't see any activity, it pings the server to see if the transport is still alive. (default 10s)
+- --grpc_keepalive_timeout duration After having pinged for keepalive check, the client waits for a duration of Timeout and if no activity is seen even after that the connection is closed. (default 10s)
+- --grpc_key string server private key to use for gRPC connections, requires grpc_cert, enables TLS
+- --grpc_max_connection_age duration Maximum age of a client connection before GoAway is sent. (default 2562047h47m16.854775807s)
+- --grpc_max_connection_age_grace duration Additional grace period after grpc_max_connection_age, after which connections are forcibly closed. (default 2562047h47m16.854775807s)
+- --grpc_max_message_size int Maximum allowed RPC message size. Larger messages will be rejected by gRPC with the error 'exceeding the max size'. (default 16777216)
+- --grpc_port int Port to listen on for gRPC calls
+- --grpc_prometheus Enable gRPC monitoring with Prometheus
+- --grpc_server_ca string path to server CA in PEM format, which will be combine with server cert, return full certificate chain to clients
+- --grpc_server_initial_conn_window_size int gRPC server initial connection window size
+- --grpc_server_initial_window_size int gRPC server initial window size
+- --grpc_server_keepalive_enforcement_policy_min_time duration gRPC server minimum keepalive time (default 10s)
+- --grpc_server_keepalive_enforcement_policy_permit_without_stream gRPC server permit client keepalive pings even when there are no active streams (RPCs)
+- --initialize_with_random_data If this flag is each table-shard will be initialized with random data. See also the 'rng_seed' and 'min_shard_size' and 'max_shard_size' flags.
+- --jaeger-agent-host string host and port to send spans to. if empty, no tracing will be done
+- --keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
+- --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
+- --keyspaces string Comma separated list of keyspaces (default "test_keyspace")
+- --lameduck-period duration keep running at least this long after SIGTERM before stopping (default 50ms)
+- --log_backtrace_at value when logging hits line file:N, emit a stack trace
+- --log_dir string If non-empty, write log files in this directory
+- --log_err_stacks log stack traces for errors
+- --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
+- --logtostderr log to standard error instead of files
+- --master_connect_retry duration Deprecated, use -replication_connect_retry (default 10s)
+- --max_table_shard_size int The maximum number of initial rows in a table shard. Ignored if--initialize_with_random_data is false. The actual number is chosen randomly (default 10000)
+- --mem-profile-rate int deprecated: use '-pprof=mem' instead (default 524288)
+- --min_table_shard_size int The minimum number of initial rows in a table shard. Ignored if--initialize_with_random_data is false. The actual number is chosen randomly. (default 1000)
+- --mutex-profile-fraction int deprecated: use '-pprof=mutex' instead
+- --mysql_auth_server_static_file string JSON File to read the users/passwords from.
+- --mysql_auth_server_static_string string JSON representation of the users/passwords config.
+- --mysql_auth_static_reload_interval duration Ticker to reload credentials
+- --mysql_bind_host string which host to bind vtgate mysql listener to (default "localhost")
+- --mysql_clientcert_auth_method string client-side authentication method to use. Supported values: mysql_clear_password, dialog. (default "mysql_clear_password")
+- --mysql_only If this flag is set only mysql is initialized. The rest of the vitess components are not started. Also, the output specifies the mysql unix socket instead of the vtgate port.
+- --mysql_server_flush_delay duration Delay after which buffered response will be flushed to the client. (default 100ms)
+- --mysql_server_version string MySQL server version to advertise.
+- --mysqlctl_client_protocol string the protocol to use to talk to the mysqlctl server (default "grpc")
+- --mysqlctl_mycnf_template string template file to use for generating the my.cnf file during server init
+- --mysqlctl_socket string socket file to use for remote mysqlctl actions (empty for local actions)
+- --null_probability float The probability to initialize a field with 'NULL' if --initialize_with_random_data is true. Only applies to fields that can contain NULL values. (default 0.1)
+- --num_shards string Comma separated shard count (one per keyspace) (default "2")
+- --onclose_timeout duration wait no more than this for OnClose handlers before stopping (default 1ns)
+- --onterm_timeout duration wait no more than this for OnTermSync handlers before stopping (default 10s)
+- --persistent_mode If this flag is set, the MySQL data directory is not cleaned up when LocalCluster.TearDown() is called. This is useful for running vttestserver as a database container in local developer environments. Note that db migration files (--schema_dir option) and seeding of random data (--initialize_with_random_data option) will only run during cluster startup if the data directory does not already exist. vschema migrations are run every time the cluster starts, since persistence for the topology server has not been implemented yet
+- --pid_file string If set, the process will write its pid to the named file, and delete it on graceful shutdown.
+- --planner-version string Sets the default planner to use when the session has not changed it. Valid values are: V3, Gen4, Gen4Greedy and Gen4Fallback. Gen4Fallback tries the new gen4 planner and falls back to the V3 planner if the gen4 fails.
+- --planner_version string planner_version is deprecated. Please use planner-version instead
+- --pool_hostname_resolve_interval duration if set force an update to all hostnames and reconnect if changed, defaults to 0 (disabled)
+- --port int Port to use for vtcombo. If this is 0, a random port will be chosen.
+- --pprof string enable profiling
+- --proto_topo string Define the fake cluster topology as a compact text format encoded vttest proto. See vttest.proto for more information.
+- --purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
+- --queryserver-config-transaction-timeout float query server transaction timeout (in seconds), a transaction will be killed if it takes longer than this value
+- --rdonly_count int Rdonly tablets per shard (default 1)
+- --remote_operation_timeout duration time to wait for a remote operation (default 30s)
+- --replica_count int Replica tablets per shard (includes primary) (default 2)
+- --replication_connect_retry duration how long to wait in between replica reconnect attempts. Only precise to the second. (default 10s)
+- --rng_seed int The random number generator seed to use when initializing with random data (see also --initialize_with_random_data). Multiple runs with the same seed will result with the same initial data. (default 123)
+- --schema_dir string Directory for initial schema files. Within this dir, there should be a subdir for each keyspace. Within each keyspace dir, each file is executed as SQL after the database is created on each shard. If the directory contains a vschema.json file, it will be used as the vschema for the V3 API.
+- --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only)
+- --service_map value comma separated list of services to enable (or disable if prefixed with '-') Example: grpc-vtworker
+- --snapshot_file string A MySQL DB snapshot file
+- --sql-max-length-errors int truncate queries in error logs to the given length (default unlimited)
+- --sql-max-length-ui int truncate queries in debug UIs to the given length (default 512) (default 512)
+- --stats_backend string The name of the registered push-based monitoring/stats backend to use
+- --stats_combine_dimensions string List of dimensions to be combined into a single "all" value in exported stats vars
+- --stats_common_tags string Comma-separated list of common tags for the stats backend. It provides both label and values. Example: label1:value1,label2:value2
+- --stats_drop_variables string Variables to be dropped from the list of exported variables.
+- --stats_emit_period duration Interval between emitting stats to all registered backends (default 1m0s)
+- --stderrthreshold value logs at or above this threshold go to stderr (default 1)
+- --tablet_dir string The directory within the vtdataroot to store vttablet/mysql files. Defaults to being generated by the tablet uid.
+- --tablet_hostname string The hostname to use for the tablet otherwise it will be derived from OS' hostname (default "localhost")
+- --tablet_manager_grpc_ca string the server ca to use to validate servers when connecting
+- --tablet_manager_grpc_cert string the cert to use to connect
+- --tablet_manager_grpc_concurrency int concurrency to use to talk to a vttablet server for performance-sensitive RPCs (like ExecuteFetchAs{Dba,AllPrivs,App}) (default 8)
+- --tablet_manager_grpc_connpool_size int number of tablets to keep tmclient connections open to (default 100)
+- --tablet_manager_grpc_crl string the server crl to use to validate server certificates when connecting
+- --tablet_manager_grpc_key string the key to use to connect
+- --tablet_manager_grpc_server_name string the server name to use to validate server certificate
+- --tablet_manager_protocol string the protocol to use to talk to vttablet (default "grpc")
+- --topo_consul_lock_delay duration LockDelay for consul session. (default 15s)
+- --topo_consul_lock_session_checks string List of checks for consul session. (default "serfHealth")
+- --topo_consul_lock_session_ttl string TTL for consul session.
+- --topo_consul_watch_poll_duration duration time of the long poll for watch queries. (default 30s)
+- --topo_etcd_lease_ttl int Lease TTL for locks and leader election. The client will use KeepAlive to keep the lease going. (default 30)
+- --topo_etcd_tls_ca string path to the ca to use to validate the server cert when connecting to the etcd topo server
+- --topo_etcd_tls_cert string path to the client cert to use to connect to the etcd topo server, requires topo_etcd_tls_key, enables TLS
+- --topo_etcd_tls_key string path to the client key to use to connect to the etcd topo server, enables TLS
+- --topo_global_root string the path of the global topology data in the global topology server
+- --topo_global_server_address string the address of the global topology server
+- --topo_implementation string the topology implementation to use
+- --topo_zk_auth_file string auth to use when connecting to the zk topo server, file contents should be :, e.g., digest:user:pass
+- --topo_zk_base_timeout duration zk base timeout (see zk.Connect) (default 30s)
+- --topo_zk_max_concurrency int maximum number of pending requests to send to a Zookeeper server. (default 64)
+- --topo_zk_tls_ca string the server ca to use to validate servers when connecting to the zk topo server
+- --topo_zk_tls_cert string the cert to use to connect to the zk topo server, requires topo_zk_tls_key, enables TLS
+- --topo_zk_tls_key string the key to use to connect to the zk topo server, enables TLS
+- --tracer string tracing service to use (default "noop")
+- --tracing-enable-logging whether to enable logging in the tracing service
+- --tracing-sampling-rate value sampling rate for the probabilistic jaeger sampler (default 0.1)
+- --tracing-sampling-type value sampling strategy to use for jaeger. possible values are 'const', 'probabilistic', 'rateLimiting', or 'remote' (default const)
+- --transaction_mode string Transaction mode MULTI (default), SINGLE or TWOPC (default "MULTI")
+- --v value log level for V logs
+- --version print binary version
+- --vmodule value comma-separated list of pattern=N settings for file-filtered logging
+- --vschema_ddl_authorized_users string Comma separated list of users authorized to execute vschema ddl operations via vtgate
+- --vtctl_client_protocol string the protocol to use to talk to the vtctl server (default "grpc")
+- --vtctld_grpc_ca string the server ca to use to validate servers when connecting
+- --vtctld_grpc_cert string the cert to use to connect
+- --vtctld_grpc_crl string the server crl to use to validate server certificates when connecting
+- --vtctld_grpc_key string the key to use to connect
+- --vtctld_grpc_server_name string the server name to use to validate server certificate
+- --vtgate_grpc_ca string the server ca to use to validate servers when connecting
+- --vtgate_grpc_cert string the cert to use to connect
+- --vtgate_grpc_crl string the server crl to use to validate server certificates when connecting
+- --vtgate_grpc_key string the key to use to connect
+- --vtgate_grpc_server_name string the server name to use to validate server certificate
+- --vtgate_protocol string how to talk to vtgate (default "grpc")
+- --workflow_manager_init Enable workflow manager
+- --xbstream_restore_flags string flags to pass to xbstream command during restore. These should be space separated and will be added to the end of the command. These need to match the ones used for backup e.g. --compress / --decompress, --encrypt / --decrypt
+- --xtrabackup_backup_flags string flags to pass to backup command. These should be space separated and will be added to the end of the command
+- --xtrabackup_prepare_flags string flags to pass to prepare command. These should be space separated and will be added to the end of the command
+- --xtrabackup_root_path string directory location of the xtrabackup and xbstream executables, e.g., /usr/bin
+- --xtrabackup_stream_mode string which mode to use if streaming, valid values are tar and xbstream (default "tar")
+- --xtrabackup_stripe_block_size uint Size in bytes of each block that gets sent to a given stripe before rotating to the next stripe (default 102400)
+- --xtrabackup_stripes uint If greater than 0, use data striping across this many destination files to parallelize data transfer and decompression
+- --xtrabackup_user string User that xtrabackup will use to connect to the database server. This user must have all necessary privileges. For details, please refer to xtrabackup documentation.
++ --alsologtostderr log to standard error as well as files
++ --app_idle_timeout duration Idle timeout for app connections (default 1m0s)
++ --app_pool_size int Size of the connection pool for app connections (default 40)
++ --backup_engine_implementation string Specifies which implementation to use for creating new backups (builtin or xtrabackup). Restores will always be done with whichever engine created a given backup. (default "builtin")
++ --backup_storage_block_size int if backup_storage_compress is true, backup_storage_block_size sets the byte size for each block while compressing (default is 250000). (default 250000)
++ --backup_storage_compress if set, the backup files will be compressed (default is true). Set to false for instance if a backup_storage_hook is specified and it compresses the data. (default true)
++ --backup_storage_number_blocks int if backup_storage_compress is true, backup_storage_number_blocks sets the number of blocks that can be processed, at once, before the writer blocks, during compression (default is 2). It should be equal to the number of CPUs available for compression. (default 2)
++ --builtinbackup_mysqld_timeout duration how long to wait for mysqld to shutdown at the start of the backup. (default 10m0s)
++ --builtinbackup_progress duration how often to send progress updates when backing up large files. (default 5s)
++ --catch-sigpipe catch and ignore SIGPIPE on stdout and stderr if specified
++ --cells strings Comma separated list of cells (default [test])
++ --charset string MySQL charset (default "utf8mb4")
++ --compression-engine-name string compressor engine used for compression. (default "pargzip")
++ --compression-level int what level to pass to the compressor. (default 1)
++ --consul_auth_static_file string JSON File to read the topos/tokens from.
++ --data_dir string Directory where the data files will be placed, defaults to a random directory under /vt/vtdataroot
++ --dba_idle_timeout duration Idle timeout for dba connections (default 1m0s)
++ --dba_pool_size int Size of the connection pool for dba connections (default 20)
++ --default_schema_dir string Default directory for initial schema files. If no schema is found in schema_dir, default to this location.
++ --disable_active_reparents if set, do not allow active reparents. Use this to protect a cluster using external reparents.
++ --enable_direct_ddl Allow users to submit direct DDL statements (default true)
++ --enable_online_ddl Allow users to submit, review and control Online DDL (default true)
++ --enable_system_settings This will enable the system settings to be changed per session at the database connection level (default true)
++ --external-compressor string command with arguments to use when compressing a backup.
++ --external-compressor-extension string extension to use when using an external compressor.
++ --external-decompressor string command with arguments to use when decompressing a backup.
++ --external_topo_global_root string the path of the global topology data in the global topology server for vtcombo process
++ --external_topo_global_server_address string the address of the global topology server for vtcombo process
++ --external_topo_implementation string the topology implementation to use for vtcombo process
++ --extra_my_cnf string extra files to add to the config, separated by ':'
++ --foreign_key_mode string This is to provide how to handle foreign key constraint in create/alter table. Valid values are: allow, disallow (default "allow")
++ --grpc_auth_mode string Which auth plugin implementation to use (eg: static)
++ --grpc_auth_mtls_allowed_substrings string List of substrings of at least one of the client certificate names (separated by colon).
++ --grpc_auth_static_client_creds string When using grpc_static_auth in the server, this file provides the credentials to use to authenticate with server.
++ --grpc_auth_static_password_file string JSON File to read the users/passwords from.
++ --grpc_ca string server CA to use for gRPC connections, requires TLS, and enforces client certificate check
++ --grpc_cert string server certificate to use for gRPC connections, requires grpc_key, enables TLS
++ --grpc_compression string Which protocol to use for compressing gRPC. Default: nothing. Supported: snappy
++ --grpc_crl string path to a certificate revocation list in PEM format, client certificates will be further verified against this file during TLS handshake
++ --grpc_enable_optional_tls enable optional TLS mode when a server accepts both TLS and plain-text connections on the same port
++ --grpc_enable_tracing Enable gRPC tracing.
++ --grpc_initial_conn_window_size int gRPC initial connection window size
++ --grpc_initial_window_size int gRPC initial window size
++ --grpc_keepalive_time duration After a duration of this time, if the client doesn't see any activity, it pings the server to see if the transport is still alive. (default 10s)
++ --grpc_keepalive_timeout duration After having pinged for keepalive check, the client waits for a duration of Timeout and if no activity is seen even after that the connection is closed. (default 10s)
++ --grpc_key string server private key to use for gRPC connections, requires grpc_cert, enables TLS
++ --grpc_max_connection_age duration Maximum age of a client connection before GoAway is sent. (default 2562047h47m16.854775807s)
++ --grpc_max_connection_age_grace duration Additional grace period after grpc_max_connection_age, after which connections are forcibly closed. (default 2562047h47m16.854775807s)
++ --grpc_max_message_size int Maximum allowed RPC message size. Larger messages will be rejected by gRPC with the error 'exceeding the max size'. (default 16777216)
++ --grpc_port int Port to listen on for gRPC calls. If zero, do not listen.
++ --grpc_prometheus Enable gRPC monitoring with Prometheus.
++ --grpc_server_ca string path to server CA in PEM format, which will be combine with server cert, return full certificate chain to clients
++ --grpc_server_initial_conn_window_size int gRPC server initial connection window size
++ --grpc_server_initial_window_size int gRPC server initial window size
++ --grpc_server_keepalive_enforcement_policy_min_time duration gRPC server minimum keepalive time (default 10s)
++ --grpc_server_keepalive_enforcement_policy_permit_without_stream gRPC server permit client keepalive pings even when there are no active streams (RPCs)
++ -h, --help display usage and exit
++ --initialize_with_random_data If this flag is each table-shard will be initialized with random data. See also the 'rng_seed' and 'min_shard_size' and 'max_shard_size' flags.
++ --keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
++ --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
++ --keyspaces strings Comma separated list of keyspaces (default [test_keyspace])
++ --lameduck-period duration keep running at least this long after SIGTERM before stopping (default 50ms)
++ --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
++ --log_dir string If non-empty, write log files in this directory
++ --log_err_stacks log stack traces for errors
++ --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
++ --logtostderr log to standard error instead of files
++ --max_table_shard_size int The maximum number of initial rows in a table shard. Ignored if--initialize_with_random_data is false. The actual number is chosen randomly (default 10000)
++ --min_table_shard_size int The minimum number of initial rows in a table shard. Ignored if--initialize_with_random_data is false. The actual number is chosen randomly. (default 1000)
++ --mysql_bind_host string which host to bind vtgate mysql listener to (default "localhost")
++ --mysql_only If this flag is set only mysql is initialized. The rest of the vitess components are not started. Also, the output specifies the mysql unix socket instead of the vtgate port.
++ --mysql_server_version string MySQL server version to advertise.
++ --mysqlctl_mycnf_template string template file to use for generating the my.cnf file during server init
++ --mysqlctl_socket string socket file to use for remote mysqlctl actions (empty for local actions)
++ --null_probability float The probability to initialize a field with 'NULL' if --initialize_with_random_data is true. Only applies to fields that can contain NULL values. (default 0.1)
++ --num_shards strings Comma separated shard count (one per keyspace) (default [2])
++ --onclose_timeout duration wait no more than this for OnClose handlers before stopping (default 1ns)
++ --onterm_timeout duration wait no more than this for OnTermSync handlers before stopping (default 10s)
++ --persistent_mode If this flag is set, the MySQL data directory is not cleaned up when LocalCluster.TearDown() is called. This is useful for running vttestserver as a database container in local developer environments. Note that db migration files (--schema_dir option) and seeding of random data (--initialize_with_random_data option) will only run during cluster startup if the data directory does not already exist. vschema migrations are run every time the cluster starts, since persistence for the topology server has not been implemented yet
++ --pid_file string If set, the process will write its pid to the named file, and delete it on graceful shutdown.
++ --planner-version string Sets the default planner to use when the session has not changed it. Valid values are: V3, Gen4, Gen4Greedy and Gen4Fallback. Gen4Fallback tries the new gen4 planner and falls back to the V3 planner if the gen4 fails.
++ --pool_hostname_resolve_interval duration if set force an update to all hostnames and reconnect if changed, defaults to 0 (disabled)
++ --port int Port to use for vtcombo. If this is 0, a random port will be chosen.
++ --pprof strings enable profiling
++ --proto_topo string Define the fake cluster topology as a compact text format encoded vttest proto. See vttest.proto for more information.
++ --purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
++ --queryserver-config-transaction-timeout float query server transaction timeout (in seconds), a transaction will be killed if it takes longer than this value
++ --rdonly_count int Rdonly tablets per shard (default 1)
++ --replica_count int Replica tablets per shard (includes primary) (default 2)
++ --replication_connect_retry duration how long to wait in between replica reconnect attempts. Only precise to the second. (default 10s)
++ --rng_seed int The random number generator seed to use when initializing with random data (see also --initialize_with_random_data). Multiple runs with the same seed will result with the same initial data. (default 123)
++ --schema_dir string Directory for initial schema files. Within this dir, there should be a subdir for each keyspace. Within each keyspace dir, each file is executed as SQL after the database is created on each shard. If the directory contains a vschema.json file, it will be used as the vschema for the V3 API.
++ --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only)
++ --service_map strings comma separated list of services to enable (or disable if prefixed with '-') Example: grpc-queryservice
++ --snapshot_file string A MySQL DB snapshot file
++ --sql-max-length-errors int truncate queries in error logs to the given length (default unlimited)
++ --sql-max-length-ui int truncate queries in debug UIs to the given length (default 512) (default 512)
++ --stderrthreshold severity logs at or above this threshold go to stderr (default 1)
++ --tablet_dir string The directory within the vtdataroot to store vttablet/mysql files. Defaults to being generated by the tablet uid.
++ --tablet_hostname string The hostname to use for the tablet otherwise it will be derived from OS' hostname (default "localhost")
++ --tablet_manager_grpc_ca string the server ca to use to validate servers when connecting
++ --tablet_manager_grpc_cert string the cert to use to connect
++ --tablet_manager_grpc_concurrency int concurrency to use to talk to a vttablet server for performance-sensitive RPCs (like ExecuteFetchAs{Dba,AllPrivs,App}) (default 8)
++ --tablet_manager_grpc_connpool_size int number of tablets to keep tmclient connections open to (default 100)
++ --tablet_manager_grpc_crl string the server crl to use to validate server certificates when connecting
++ --tablet_manager_grpc_key string the key to use to connect
++ --tablet_manager_grpc_server_name string the server name to use to validate server certificate
++ --tablet_manager_protocol string Protocol to use to make tabletmanager RPCs to vttablets. (default "grpc")
++ --topo_consul_lock_delay duration LockDelay for consul session. (default 15s)
++ --topo_consul_lock_session_checks string List of checks for consul session. (default "serfHealth")
++ --topo_consul_lock_session_ttl string TTL for consul session.
++ --topo_consul_watch_poll_duration duration time of the long poll for watch queries. (default 30s)
++ --topo_zk_auth_file string auth to use when connecting to the zk topo server, file contents should be :, e.g., digest:user:pass
++ --topo_zk_base_timeout duration zk base timeout (see zk.Connect) (default 30s)
++ --topo_zk_max_concurrency int maximum number of pending requests to send to a Zookeeper server. (default 64)
++ --topo_zk_tls_ca string the server ca to use to validate servers when connecting to the zk topo server
++ --topo_zk_tls_cert string the cert to use to connect to the zk topo server, requires topo_zk_tls_key, enables TLS
++ --topo_zk_tls_key string the key to use to connect to the zk topo server, enables TLS
++ --transaction_mode string Transaction mode MULTI (default), SINGLE or TWOPC (default "MULTI")
++ --v Level log level for V logs
++ -v, --version print binary version
++ --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
++ --vschema_ddl_authorized_users string Comma separated list of users authorized to execute vschema ddl operations via vtgate
++ --vtctl_client_protocol string Protocol to use to talk to the vtctl server. (default "grpc")
++ --vtctld_grpc_ca string the server ca to use to validate servers when connecting
++ --vtctld_grpc_cert string the cert to use to connect
++ --vtctld_grpc_crl string the server crl to use to validate server certificates when connecting
++ --vtctld_grpc_key string the key to use to connect
++ --vtctld_grpc_server_name string the server name to use to validate server certificate
++ --vtgate_grpc_ca string the server ca to use to validate servers when connecting
++ --vtgate_grpc_cert string the cert to use to connect
++ --vtgate_grpc_crl string the server crl to use to validate server certificates when connecting
++ --vtgate_grpc_key string the key to use to connect
++ --vtgate_grpc_server_name string the server name to use to validate server certificate
++ --workflow_manager_init Enable workflow manager
++ --xbstream_restore_flags string Flags to pass to xbstream command during restore. These should be space separated and will be added to the end of the command. These need to match the ones used for backup e.g. --compress / --decompress, --encrypt / --decrypt
++ --xtrabackup_backup_flags string Flags to pass to backup command. These should be space separated and will be added to the end of the command
++ --xtrabackup_prepare_flags string Flags to pass to prepare command. These should be space separated and will be added to the end of the command
++ --xtrabackup_root_path string Directory location of the xtrabackup and xbstream executables, e.g., /usr/bin
++ --xtrabackup_stream_mode string Which mode to use if streaming, valid values are tar and xbstream. Please note that tar is not supported in XtraBackup 8.0 (default "tar")
++ --xtrabackup_stripe_block_size uint Size in bytes of each block that gets sent to a given stripe before rotating to the next stripe (default 102400)
++ --xtrabackup_stripes uint If greater than 0, use data striping across this many destination files to parallelize data transfer and decompression
++ --xtrabackup_user string User that xtrabackup will use to connect to the database server. This user must have all necessary privileges. For details, please refer to xtrabackup documentation.
diff --git a/doc/flags/14.0-to-15.0-transition/vttlstest.diff b/doc/flags/14.0-to-15.0-transition/vttlstest.diff
new file mode 100644
index 00000000000..d0c86525b9a
--- /dev/null
+++ b/doc/flags/14.0-to-15.0-transition/vttlstest.diff
@@ -0,0 +1,37 @@
+diff --git a/flags/14.0/vttlstest.txt b/flags/15.0/vttlstest.txt
+index 87321df..e149cf2 100644
+--- a/flags/14.0/vttlstest.txt
++++ b/flags/15.0/vttlstest.txt
+@@ -1,13 +1,19 @@
+-Usage of vttlstest:
+- --alsologtostderr log to standard error as well as files
+- --keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
+- --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
+- --log_backtrace_at value when logging hits line file:N, emit a stack trace
+- --log_dir string If non-empty, write log files in this directory
+- --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
+- --logtostderr log to standard error instead of files
+- --purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
+- --root string root directory for certificates and keys (default ".")
+- --stderrthreshold value logs at or above this threshold go to stderr (default 1)
+- --v value log level for V logs
+- --vmodule value comma-separated list of pattern=N settings for file-filtered logging
++vttlstest is a tool for generating test certificates, keys, and related artifacts for TLS tests.
++
++Usage:
++ vttlstest [command]
++
++Available Commands:
++ CreateCA Create certificate authority
++ CreateCRL Create certificate revocation list
++ CreateIntermediateCA Create intermediate certificate authority
++ CreateSignedCert Create signed certificate
++ RevokeCert Revoke a certificate
++ completion Generate the autocompletion script for the specified shell
++ help Help about any command
++
++Flags:
++ -h, --help help for vttlstest
++ --root string root directory for all artifacts (default ".")
++
++Use "vttlstest [command] --help" for more information about a command.
diff --git a/doc/flags/14.0-to-15.0-transition/zk.diff b/doc/flags/14.0-to-15.0-transition/zk.diff
new file mode 100644
index 00000000000..9fb66007217
--- /dev/null
+++ b/doc/flags/14.0-to-15.0-transition/zk.diff
@@ -0,0 +1,14 @@
+diff --git a/flags/14.0/zk.txt b/flags/15.0/zk.txt
+new file mode 100644
+index 0000000..443bf0b
+--- /dev/null
++++ b/flags/15.0/zk.txt
+@@ -0,0 +1,8 @@
++Usage of zk:
++ -h, --help display usage and exit
++ --keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
++ --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
++ --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
++ --purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
++ --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only)
++ --server string server(s) to connect to
diff --git a/doc/flags/14.0-to-15.0-transition/zkctl.diff b/doc/flags/14.0-to-15.0-transition/zkctl.diff
new file mode 100644
index 00000000000..4567181a701
--- /dev/null
+++ b/doc/flags/14.0-to-15.0-transition/zkctl.diff
@@ -0,0 +1,24 @@
+diff --git a/flags/14.0/zkctl.txt b/flags/15.0/zkctl.txt
+new file mode 100644
+index 0000000..e7e41c4
+--- /dev/null
++++ b/flags/15.0/zkctl.txt
+@@ -0,0 +1,18 @@
++Usage of zkctl:
++ --alsologtostderr log to standard error as well as files
++ -h, --help display usage and exit
++ --keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
++ --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
++ --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
++ --log_dir string If non-empty, write log files in this directory
++ --log_err_stacks log stack traces for errors
++ --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
++ --logtostderr log to standard error instead of files
++ --pprof strings enable profiling
++ --purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
++ --stderrthreshold severity logs at or above this threshold go to stderr (default 1)
++ --v Level log level for V logs
++ -v, --version print binary version
++ --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
++ --zk.cfg string zkid@server1:leaderPort1:electionPort1:clientPort1,...) (default "6@:3801:3802:3803")
++ --zk.myid uint which server do you want to be? only needed when running multiple instance on one box, otherwise myid is implied by hostname
diff --git a/doc/flags/14.0-to-15.0-transition/zkctld.diff b/doc/flags/14.0-to-15.0-transition/zkctld.diff
new file mode 100644
index 00000000000..89576d5b422
--- /dev/null
+++ b/doc/flags/14.0-to-15.0-transition/zkctld.diff
@@ -0,0 +1,37 @@
+diff --git a/flags/14.0/zkctld.txt b/flags/15.0/zkctld.txt
+index 5bad4f2..6ec026b 100644
+--- a/flags/14.0/zkctld.txt
++++ b/flags/15.0/zkctld.txt
+@@ -1,14 +1,19 @@
+ Usage of zkctld:
+- --alsologtostderr log to standard error as well as files
+- --keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
+- --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
+- --log_backtrace_at value when logging hits line file:N, emit a stack trace
+- --log_dir string If non-empty, write log files in this directory
+- --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
+- --logtostderr log to standard error instead of files
+- --purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
+- --stderrthreshold value logs at or above this threshold go to stderr (default 1)
+- --v value log level for V logs
+- --vmodule value comma-separated list of pattern=N settings for file-filtered logging
+- --zk.cfg string zkid@server1:leaderPort1:electionPort1:clientPort1,...) (default 6@:3801:3802:3803)
+- --zk.myid uint which server do you want to be? only needed when running multiple instance on one box, otherwise myid is implied by hostname
++ --alsologtostderr log to standard error as well as files
++ -h, --help display usage and exit
++ --keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
++ --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
++ --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
++ --log_dir string If non-empty, write log files in this directory
++ --log_err_stacks log stack traces for errors
++ --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
++ --logtostderr log to standard error instead of files
++ --pprof strings enable profiling
++ --purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
++ --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only)
++ --stderrthreshold severity logs at or above this threshold go to stderr (default 1)
++ --v Level log level for V logs
++ -v, --version print binary version
++ --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
++ --zk.cfg string zkid@server1:leaderPort1:electionPort1:clientPort1,...) (default "6@:3801:3802:3803")
++ --zk.myid uint which server do you want to be? only needed when running multiple instance on one box, otherwise myid is implied by hostname
diff --git a/doc/internal/ReleaseInstructions.md b/doc/internal/ReleaseInstructions.md
index 014487568cc..f45b9fb5902 100644
--- a/doc/internal/ReleaseInstructions.md
+++ b/doc/internal/ReleaseInstructions.md
@@ -55,7 +55,7 @@ Pre-release versions should be labeled with a suffix like `-beta2` or `-rc1`.
## Release Branches
Each major and minor releases (X.Y) should have a [release branch](https://github.com/vitessio/vitess/branches/all?query=release) named
-`release-X.Y`. This branch should diverge from `main` when the code freeze when the release
+`release-X.Y`. This branch should diverge from `main` when the release
is declared, after which point only bugfix PRs should be cherry-picked onto the branch.
All other activity on `main` will go out with a subsequent major or minor release.
@@ -109,6 +109,7 @@ Therefore, file a JIRA ticket with Sonatype to get added ([example for a differe
Follow [Sonatype's GPG instructions](https://central.sonatype.org/pages/working-with-pgp-signatures.html).
Install `gpg-agent` (needed below) e.g. on Ubuntu via: `sudo apt-get install gnupg-agent`.
+for Mac you need to install 'gnupg' via 'brew install gnupg'
#### Login configuration
@@ -118,9 +119,17 @@ Create the `settings.xml` in the `$HOME/.m2/` directory as described in their [i
## Release Cutover
-In this section we describe our current release process. We begin with a short [**overview**](#overview).
+In this section we describe our current release process. We begin with a list of [**pre-requisite for the release team**](#pre-requisites) and with a short [**overview**](#overview).
The release process is divided into three parts: [**Pre-Release**](#pre-release), [**Release**](#release), [**Post-Release**](#post-release), which are detailed after the overview.
+### Pre-Requisites
+
+This section highlights the different pre-requisites the release team has to meet before releasing.
+
+- The tool `gh` must be installed locally and ready to be used.
+- You must have access to the Java release, more information in the [**Java Packages**](#java-packages) section.
+- You must be able to create branches and have admin right on the `vitessio/vitess` and `planetscale/vitess-operator` repositories.
+
### Overview
#### Schedule
@@ -131,7 +140,8 @@ We usually create the RC1 during the first week of the month, and the GA version
#### Code Freeze
Before creating RC1, there is a code freeze. Assuming the release of RC1 happens on a Tuesday, the release branch will be frozen Friday of the previous week.
-This allows us to test that the release branch can be released and avoid discovering unwanted events during the release day. Once the RC1 is released, there are three more weeks to backport bug fixes into the release branches. However, we also proceed to a code freeze the Friday before the GA release. (Assuming GA is on a Tuesday)
+This allows us to test that the release branch can be released and avoid discovering unwanted events during the release day. Once the RC1 is released, there are three more weeks to backport bug fixes into the release branches.
+However, we also proceed to a code freeze the Friday before the GA release. (Assuming GA is on a Tuesday)
Regarding patch releases, no code freeze is planned.
#### Tracking Issue for each Release
@@ -151,8 +161,9 @@ That includes:
> - This includes write access to the Vitess repository and to the Maven repository.
- **Preparing and cleaning the release notes summary.**
> - One or more Pull Requests have to be submitted in advance to create and update the release summary.
- > - The summary files are located in: `./doc/releasenotes/*_*_*_summary.md`.
+ > - The summary files are located in: `./changelog/*.0/*.*.*/summary.md`.
> - The summary file for a release candidate is the same as the one for the GA release.
+ > - Make sure to run `go run ./go/tools/releases/releases.go` to update the `changelog` directory with the latest release notes.
- **Finishing the blog post, and coordinating with the different organizations for cross-posting. Usually CNCF and PlanetScale. This step applies only for GA releases.**
> - The blog post must be finished and reviewed.
> - A Pull Request on the website repository of Vitess has to be created so we can easily publish the blog during the release day.
@@ -160,6 +171,9 @@ That includes:
> - As soon as we go into code freeze, if we are doing an RC, create the release branch.
> - If we are doing a GA release, do not merge any new Pull Requests.
> - The guide on how to do a code freeze is available in the [How To Code Freeze](#how-to-code-freeze) section.
+- **Create the Vitess release.**
+ > - A guide on how to create a Vitess release is available in the [How to prepare the release of Vitess](#how-to-prepare-the-release-of-vitess) section.
+ > - This step will create a Release Pull Request, it must be reviewed and merged before the release day. The release commit will be used to tag the release.
- **Preparing the Vitess Operator release.**
> - While the Vitess Operator is located in a different repository, we also need to do a release for it.
> - The Operator follows the same cycle: RC1 -> GA -> Patches.
@@ -169,8 +183,8 @@ That includes:
On the release day, there are several things to do:
-- **Create the Vitess release.**
- > - A guide on how to create a Vitess release is available in the [How To Release Vitess](#how-to-release-vitess) section.
+- **Tag the Vitess release.**
+ > - A guide on how to tag a version is available in the [How To Release Vitess](#how-to-release-vitess) section.
- **Create the corresponding Vitess operator release.**
> - Applies only to versions greater or equal to `v14.0.0`.
> - If we are doing an RC release, then we will need to create the Vitess Operator RC too. If we are doing a GA release, we're also doing a GA release in the Operator.
@@ -192,16 +206,72 @@ On the release day, there are several things to do:
> - After a while, those elements will finish their execution and their status will be green.
> - This step is even more important for GA releases as we often include a link to _arewefastyet_ in the blog post.
> - The benchmarks need to complete before announcing the blog posts or before they get cross-posted.
-- **Update the release notes on the release branch and on `main`.**
- > - Two new Pull Requests have to be created.
- > - One against `main`, it will contain only the new release notes.
- > - And another against the release branch, this one contains the release notes and the release commit. (The commit on which we did `git tag`)
+- **Update the release notes on `main`.**
+ > - One Pull Request against `main` must be created, it will contain the new release notes.
+- **Go back to dev mode on the release branch.**
+ > - The version constants across the codebase must be updated to `SNAPSHOT`.
+- **Build k8s Docker images and publish them**
+ > - The docker image for `base`, `lite`, etc are built automatically by DockerHub. The k8s images however are dependent on these images and are required to be built manually.
+ > - These images should be built after the `base` image has been built and available on DockerHub.
+ > - To build and publish these images, run `./release.sh` from the directory `vitess/docker`.
### Post-Release
Once the release is over, we need to announce it on both Slack and Twitter. We also want to make sure the blog post was cross-posted, if applicable.
We need to verify that _arewefastyet_ has finished the benchmark too.
+### How to prepare the release of Vitess
+
+> In this example our current version is `v14.0.3` and we release the version `v15.0.0`.
+> Alongside Vitess' release, we also release a new version of the operator.
+> Since we are releasing a release candidate here, the new version of the operator will also be a release candidate.
+> In this example, the new operator version is `2.8.0`.
+>
+> It is important to note that before the RC, there is a code freeze during which we create the release branch.
+>
+> The release branch in this example is `release-15.0`.
+>
+> The example also assumes that `origin` is the `vitessio/vitess` remote.
+
+1. Fetch `github.com/vitessio/vitess`'s remote.
+ ```shell
+ git fetch origin
+ ```
+
+2. Creation of the Release Pull Request.
+ > This step will create the Release Pull Request that will then be reviewed ahead of the release day.
+ > The merge commit of that Pull Request will be used during the release day to tag the release.
+ 1. Run the `create_release` script using the Makefile:
+ 1. Release Candidate:
+ ```shell
+ make BASE_BRANCH="release-15.0" BASE_REMOTE="origin" RELEASE_VERSION="15.0.0-rc1" VTOP_VERSION="2.8.0-rc1" create_release
+ ```
+ 2. General Availability:
+ ```shell
+ make BASE_BRANCH="release-15.0" BASE_REMOTE="origin" RELEASE_VERSION="15.0.0" VTOP_VERSION="2.8.0" create_release
+ ```
+
+ The script will prompt you `Pausing so release notes can be added. Press enter to continue`. We are now going to generate the release notes, continue to the next sub-step.
+
+ 2. Run the following command to generate the release notes:
+ 1. Release Candidate:
+ ```shell
+ go run ./go/tools/release-notes --from "v14.0.3" --to "HEAD" --version "v15.0.0-rc1" --summary "./changelog/15.0/15.0.0/summary.md" [--threads=[0-9.]]
+ ```
+ 2. General Availability:
+ ```shell
+ go run ./go/tools/release-notes --from "v14.0.3" --to "HEAD" --version "v15.0.0" --summary "./changelog/15.0/15.0.0/summary.md" [--threads=[0-9.]]
+ ```
+
+ > Important note: The release note generation fetches a lot of data from the GitHub API. You might reach the API request limit.
+ In which case you should use the `--threads=` flag and set an integer value lower than 10 (the default).
+
+ This command will generate the release notes by looking at all the commits between the tag `v14.0.3` and the reference `HEAD`.
+ It will also use the file located in `./changelog/15.0/15.0.0/summary.md` to prefix the release notes with a text that the maintainers wrote before the release.
+ Please verify the generated release notes to make sure it is well-formatted and all the bookmarks are generated properly.
+
+
+3. Follow the instruction prompted by the `create_release` Makefile command's output in order to push the newly created branch and create the Release Pull Request on GitHub.
### How To Release Vitess
This section is divided into two parts:
@@ -210,7 +280,10 @@ This section is divided into two parts:
#### Creation of the tags and release notes
-> In this example our current version is `v14` and we release the version `v15.0.0`.
+> This step implies that you have created a [Release Pull Request](#how-to-prepare-the-release-of-vitess) beforehand and that it has been reviewed.
+> The merge commit of this Release Pull Request will be used to tag the release.
+>
+> In this example our current version is `v14.0.3` and we release the version `v15.0.0`.
> Alongside Vitess' release, we also release a new version of the operator.
> Since we are releasing a release candidate here, the new version of the operator will also be a release candidate.
> In this example, the new operator version is `2.8.0`.
@@ -226,37 +299,22 @@ This section is divided into two parts:
git fetch origin
```
-2. Creation of the release notes and tags.
- 1. Run the release script using the Makefile:
- 1. Release Candidate:
- ```shell
- make BASE_BRANCH="release-15.0" BASE_REMOTE="origin" RELEASE_VERSION="15.0.0-rc1" DEV_VERSION="15.0.0-SNAPSHOT" VTOP_VERSION="2.8.0-rc1" do_release
- ```
- 2. General Availability:
- ```shell
- make BASE_BRANCH="release-15.0" BASE_REMOTE="origin" RELEASE_VERSION="15.0.0" DEV_VERSION="15.0.1-SNAPSHOT" VTOP_VERSION="2.8.0" do_release
- ```
-
- The script will prompt you `Pausing so release notes can be added. Press enter to continue`. We are now going to generate the release notes, continue to the next sub-step.
-
- 2. Run the following command to generate the release notes:
- 1. Release Candidate:
- ```shell
- make VERSION="v15.0.0-rc1" FROM="v14.0.0" TO="HEAD" SUMMARY="./doc/releasenotes/15_0_0_summary.md" release-notes
- ```
- 2. General Availability:
- ```shell
- make VERSION="v15.0.0-rc1" FROM="v14.0.0" TO="HEAD" SUMMARY="./doc/releasenotes/15_0_0_summary.md" release-notes
- ```
- This command will generate the release notes by looking at all the commits between the tag `v14.0.0` and the reference `HEAD`.
- It will also use the file located in `./doc/releasenotes/15_0_0_summary.md` to prefix the release notes with a text that the maintainers wrote before the release.
-
+2. Checkout to the merge commit of the Release Pull Request.
-3. Follow the instruction prompted by the `do_release` Makefile command's output in order to push the tags, branches and create the Pull Requests.
+3. Tag the release and push the tags
+ ```shell
+ git tag v15.0.0 && git tag v0.15.0 && git push origin v15.0.0 && git push origin v0.15.0
+ ```
-4. Create a Pull Request against the `main` branch with the newly created release notes.
+4. Create a Pull Request against the `main` branch with the release notes found in `./changelog/15.0/15.0.0/15_0_0_*.md`.
-5. Release the tag on GitHub UI as explained in the following section.
+5. Run the back to dev mode tool.
+ ```shell
+ make BASE_BRANCH="release-15.0" BASE_REMOTE="origin" RELEASE_VERSION="15.0.0-rc1" DEV_VERSION="15.0.0-SNAPSHOT" back_to_dev_mode
+ ```
+ > You will then need to follow the instructions given by the output of the back_to_dev_mode Makefile command. You will need to push the newly created branch and open a Pull Request.
+
+6. Release the tag on GitHub UI as explained in the following section.
#### Creating Release or Release Candidate on the GitHub UI
@@ -324,12 +382,12 @@ The script will prompt the command that will allow you to push the code freeze c
>
> For this example, we assume we juste released `v12.0.0`.
-1. Checkout to the release commit.
+1. Checkout to the release commit.
```shell
git checkout v12.0.0
```
-2. Run `gpg-agent` to avoid that Maven will constantly prompt you for the password of your private key.
+2. Run `gpg-agent` to avoid that Maven will constantly prompt you for the password of your private key.
```bash
eval $(gpg-agent --daemon --no-grab --write-env-file $HOME/.gpg-agent-info)
@@ -337,7 +395,13 @@ The script will prompt the command that will allow you to push the code freeze c
export GPG_AGENT_INFO
```
-3. Deploy (upload) the Java code to the oss.sonatype.org repository:
+3. Export following to avoid any version conflicts
+ ```bash
+ export MAVEN_OPTS="--add-opens=java.base/java.util=ALL-UNNAMED --add-opens=java.base/java.lang.reflect=ALL-UNNAMED --add-opens=java.base/java.text=ALL-UNNAMED
+ --add-opens=java.desktop/java.awt.font=ALL-UNNAMED"
+ ```
+
+4. Deploy (upload) the Java code to the oss.sonatype.org repository:
> **Warning:** After the deployment, the Java packages will be automatically released. Once released, you cannot delete them. The only option is to upload a newer version (e.g. increment the patch level).
@@ -345,3 +409,4 @@ The script will prompt the command that will allow you to push the code freeze c
mvn clean deploy -P release -DskipTests
cd ..
```
+5. It will take some time for artifacts to appear on [maven directory](https://mvnrepository.com/artifact/io.vitess/vitess-client)
diff --git a/doc/releasenotes/15_0_0_changelog.md b/doc/releasenotes/15_0_0_changelog.md
deleted file mode 100644
index 6b064865219..00000000000
--- a/doc/releasenotes/15_0_0_changelog.md
+++ /dev/null
@@ -1,46 +0,0 @@
-# Changelog of Vitess v15.0.0
-
-### Bug fixes
-#### Query Serving
- * fix: scalar aggregation engine primitive #10465
- * fix: aggregation empty row on join with grouping and aggregations #10480
-### CI/Build
-#### Governance
- * Update the comment for review checklist with an item for CI workflows #10471
-### Documentation
-#### CLI
- * [vtctldclient] Update CLI docs for usages, flags, and aliases #10502
-#### VTAdmin
- * [vtadmin] Document known issue with node versions 17+ #10483
-### Enhancement
-#### Build/CI
- * Add name to static check workflow #10470
-#### Query Serving
- * Refactor aggregation AST structs #10347
- * fix: change planner_version to planner-version everywhere #10453
- * Add support for alter table rename column #10469
- * schemadiff: `ColumnRenameStrategy` in DiffHints #10472
- * Add parsing support for performance schema functions #10478
- * schemadiff: TableRenameStrategy in DiffHints #10479
- * OnlineDDL executor: adding log entries #10482
-### Internal Cleanup
-#### General
- * Remove v2 resharding fields #10409
-#### Query Serving
- * Reduce shift-reduce conflicts #10500
- * feat: don't stop if compilation errors are happening on the generated files #10506
-#### VTAdmin
- * [vtadmin] Rename ERS/PRS pools+flags properly #10460
-#### web UI
- * Remove sharding_column_name and sharding_column_type from vtctld2 #10459
-### Release
-#### General
- * Post release `v14.0.0-RC1` steps #10458
-### Testing
-#### Build/CI
- * test: reduce number of vttablets to start in the tests #10491
-#### VTAdmin
- * [vtadmin] authz tests - tablet actions #10457
- * [vtadmin] Add authz tests for remaining non-schema related actions #10481
- * [vtadmin] Add schema-related authz tests #10486
-
diff --git a/docker/base/Dockerfile b/docker/base/Dockerfile
index 0e0faff7974..24cd4b22cec 100644
--- a/docker/base/Dockerfile
+++ b/docker/base/Dockerfile
@@ -12,17 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-# NOTE: This file is also symlinked as "Dockerfile" in the root of our
-# repository because the automated build feature on Docker Hub does not
-# allow to specify a different build context. It always assumes that the
-# build context is the same directory as the Dockerfile is in.
-# "make build" below must be called in our repository's root and
-# therefore we need to have the symlinked "Dockerfile" in there as well.
-# TODO(mberlin): Remove the symlink and this note once
-# https://github.com/docker/hub-feedback/issues/292 is fixed.
-
-ARG bootstrap_version=11
-ARG image="vitess/bootstrap:${bootstrap_version}-mysql57"
+ARG bootstrap_version=14.7
+ARG image="vitess/bootstrap:${bootstrap_version}-mysql80"
FROM "${image}"
@@ -32,13 +23,16 @@ ARG CGO_ENABLED=0
# Allows docker builds to set the BUILD_NUMBER
ARG BUILD_NUMBER
+# Allows docker builds to set the BUILD_GIT_BRANCH
+ARG BUILD_GIT_BRANCH
+
+# Allows docker builds to set the BUILD_GIT_REV
+ARG BUILD_GIT_REV
+
# Re-copy sources from working tree
-USER root
-COPY . /vt/src/vitess.io/vitess
+COPY --chown=vitess:vitess . /vt/src/vitess.io/vitess
+
+USER vitess
# Build Vitess
RUN make build
-
-# Fix permissions
-RUN chown -R vitess:vitess /vt
-USER vitess
diff --git a/docker/base/Dockerfile.mariadb b/docker/base/Dockerfile.mariadb
deleted file mode 100644
index 70e01e5cefa..00000000000
--- a/docker/base/Dockerfile.mariadb
+++ /dev/null
@@ -1,21 +0,0 @@
-ARG bootstrap_version=11
-ARG image="vitess/bootstrap:${bootstrap_version}-mariadb"
-
-FROM "${image}"
-
-# Allows some docker builds to disable CGO
-ARG CGO_ENABLED=0
-
-# Allows docker builds to set the BUILD_NUMBER
-ARG BUILD_NUMBER
-
-# Re-copy sources from working tree
-USER root
-COPY . /vt/src/vitess.io/vitess
-
-# Build Vitess
-RUN make build
-
-# Fix permissions
-RUN chown -R vitess:vitess /vt
-USER vitess
diff --git a/docker/base/Dockerfile.mariadb103 b/docker/base/Dockerfile.mariadb103
deleted file mode 100644
index 53d8789b563..00000000000
--- a/docker/base/Dockerfile.mariadb103
+++ /dev/null
@@ -1,21 +0,0 @@
-ARG bootstrap_version=11
-ARG image="vitess/bootstrap:${bootstrap_version}-mariadb103"
-
-FROM "${image}"
-
-# Allows some docker builds to disable CGO
-ARG CGO_ENABLED=0
-
-# Allows docker builds to set the BUILD_NUMBER
-ARG BUILD_NUMBER
-
-# Re-copy sources from working tree
-USER root
-COPY . /vt/src/vitess.io/vitess
-
-# Build Vitess
-RUN make build
-
-# Fix permissions
-RUN chown -R vitess:vitess /vt
-USER vitess
diff --git a/examples/region_sharding/scripts/vttablet-down.sh b/docker/base/Dockerfile.mysql57
old mode 100755
new mode 100644
similarity index 50%
rename from examples/region_sharding/scripts/vttablet-down.sh
rename to docker/base/Dockerfile.mysql57
index 47b881b9793..9e60efde5f2
--- a/examples/region_sharding/scripts/vttablet-down.sh
+++ b/docker/base/Dockerfile.mysql57
@@ -1,30 +1,38 @@
-#!/bin/bash
-
-# Copyright 2019 The Vitess Authors.
-#
+# Copyright 2023 The Vitess Authors.
+#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
-#
+#
# http://www.apache.org/licenses/LICENSE-2.0
-#
+#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-# This is an example script that stops the mysqld and vttablet instances
-# created by vttablet-up.sh
+ARG bootstrap_version=14.7
+ARG image="vitess/bootstrap:${bootstrap_version}-mysql57"
+
+FROM "${image}"
+
+# Allows some docker builds to disable CGO
+ARG CGO_ENABLED=0
-source ./env.sh
+# Allows docker builds to set the BUILD_NUMBER
+ARG BUILD_NUMBER
-printf -v tablet_dir 'vt_%010d' $TABLET_UID
-pid=`cat $VTDATAROOT/$tablet_dir/vttablet.pid`
+# Allows docker builds to set the BUILD_GIT_BRANCH
+ARG BUILD_GIT_BRANCH
-kill $pid
+# Allows docker builds to set the BUILD_GIT_REV
+ARG BUILD_GIT_REV
-# Wait for vttablet to die.
-while ps -p $pid > /dev/null; do sleep 1; done
+# Re-copy sources from working tree
+COPY --chown=vitess:vitess . /vt/src/vitess.io/vitess
+USER vitess
+# Build Vitess
+RUN make build
diff --git a/docker/base/Dockerfile.mysql80 b/docker/base/Dockerfile.mysql80
deleted file mode 100644
index 2689dd9772e..00000000000
--- a/docker/base/Dockerfile.mysql80
+++ /dev/null
@@ -1,21 +0,0 @@
-ARG bootstrap_version=11
-ARG image="vitess/bootstrap:${bootstrap_version}-mysql80"
-
-FROM "${image}"
-
-# Allows some docker builds to disable CGO
-ARG CGO_ENABLED=0
-
-# Allows docker builds to set the BUILD_NUMBER
-ARG BUILD_NUMBER
-
-# Re-copy sources from working tree
-USER root
-COPY . /vt/src/vitess.io/vitess
-
-# Build Vitess
-RUN make build
-
-# Fix permissions
-RUN chown -R vitess:vitess /vt
-USER vitess
diff --git a/docker/base/Dockerfile.percona57 b/docker/base/Dockerfile.percona57
index cd32498f91f..bcb1669601d 100644
--- a/docker/base/Dockerfile.percona57
+++ b/docker/base/Dockerfile.percona57
@@ -1,4 +1,18 @@
-ARG bootstrap_version=11
+# Copyright 2023 The Vitess Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+ARG bootstrap_version=14.7
ARG image="vitess/bootstrap:${bootstrap_version}-percona57"
FROM "${image}"
@@ -9,13 +23,16 @@ ARG CGO_ENABLED=0
# Allows docker builds to set the BUILD_NUMBER
ARG BUILD_NUMBER
+# Allows docker builds to set the BUILD_GIT_BRANCH
+ARG BUILD_GIT_BRANCH
+
+# Allows docker builds to set the BUILD_GIT_REV
+ARG BUILD_GIT_REV
+
# Re-copy sources from working tree
-USER root
-COPY . /vt/src/vitess.io/vitess
+COPY --chown=vitess:vitess . /vt/src/vitess.io/vitess
+
+USER vitess
# Build Vitess
RUN make build
-
-# Fix permissions
-RUN chown -R vitess:vitess /vt
-USER vitess
diff --git a/docker/base/Dockerfile.percona80 b/docker/base/Dockerfile.percona80
index 8a140daed45..c872d140a72 100644
--- a/docker/base/Dockerfile.percona80
+++ b/docker/base/Dockerfile.percona80
@@ -1,4 +1,18 @@
-ARG bootstrap_version=11
+# Copyright 2023 The Vitess Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+ARG bootstrap_version=14.7
ARG image="vitess/bootstrap:${bootstrap_version}-percona80"
FROM "${image}"
@@ -9,12 +23,15 @@ ARG CGO_ENABLED=0
# Allows docker builds to set the BUILD_NUMBER
ARG BUILD_NUMBER
+# Allows docker builds to set the BUILD_GIT_BRANCH
+ARG BUILD_GIT_BRANCH
+
+# Allows docker builds to set the BUILD_GIT_REV
+ARG BUILD_GIT_REV
+
# Re-copy sources from working tree
-USER root
-COPY . /vt/src/vitess.io/vitess
+COPY --chown=vitess:vitess . /vt/src/vitess.io/vitess
-# Fix permissions
-RUN chown -R vitess:vitess /vt
USER vitess
# Build Vitess
diff --git a/docker/bootstrap/CHANGELOG.md b/docker/bootstrap/CHANGELOG.md
index 7604052f33e..59a4567696c 100644
--- a/docker/bootstrap/CHANGELOG.md
+++ b/docker/bootstrap/CHANGELOG.md
@@ -44,4 +44,40 @@ List of changes between bootstrap image versions.
## [11] - 2022-08-31
### Changes
-- Update build to golang 1.18.5
\ No newline at end of file
+- Update build to golang 1.18.5
+
+## [12] - 2022-10-14
+### Changes
+- Update build to golang 1.18.7
+
+## [13] - 2022-12-08
+### Changes
+- Update build to golang 1.19.4
+
+## [14] - 2023-02-21
+### Changes
+- Update build to golang 1.20.1
+
+## [14.2] - 2023-03-27
+### Changes
+- Update build to golang 1.20.2
+
+## [14.3] - 2023-04-05
+### Changes
+- Update build to golang 1.20.3
+
+## [14.4] - 2023-05-09
+### Changes
+- Update build to golang 1.20.4
+
+## [14.5] - 2023-06-07
+### Changes
+- Update build to golang 1.20.5
+
+## [14.6] - 2023-09-07
+### Changes
+- Update build to golang 1.20.8
+
+## [14.7] - 2023-09-06
+### Changes
+- Use Debian Bullseye as base bootstrap image in order to continue upgrading the Golang version
\ No newline at end of file
diff --git a/docker/bootstrap/Dockerfile.common b/docker/bootstrap/Dockerfile.common
index d3f9af78771..243d8425dd6 100644
--- a/docker/bootstrap/Dockerfile.common
+++ b/docker/bootstrap/Dockerfile.common
@@ -1,4 +1,4 @@
-FROM --platform=linux/amd64 golang:1.18.5-buster
+FROM --platform=linux/amd64 golang:1.20.8-bullseye
# Install Vitess build dependencies
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
diff --git a/docker/bootstrap/Dockerfile.mariadb b/docker/bootstrap/Dockerfile.mariadb
deleted file mode 100644
index 3031008b1ef..00000000000
--- a/docker/bootstrap/Dockerfile.mariadb
+++ /dev/null
@@ -1,30 +0,0 @@
-ARG bootstrap_version
-ARG image="vitess/bootstrap:${bootstrap_version}-common"
-
-FROM --platform=linux/amd64 "${image}"
-
-# Install MariaDB 10
-RUN for i in $(seq 1 10); do apt-key adv --no-tty --keyserver keyserver.ubuntu.com --recv-keys 9334A25F8507EFA5 && break; done && \
- add-apt-repository 'deb http://repo.percona.com/apt buster main' && \
- { \
- echo debconf debconf/frontend select Noninteractive; \
- echo percona-server-server-5.7 percona-server-server/root_password password 'unused'; \
- echo percona-server-server-5.7 percona-server-server/root_password_again password 'unused'; \
- } | debconf-set-selections && \
- apt-get update -y \
- && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
- bzip2 \
- mariadb-server \
- libmariadbclient-dev \
- libdbd-mysql-perl \
- rsync \
- libev4 \
- percona-xtrabackup-24 \
- && rm -rf /var/lib/apt/lists/*
-
-# Bootstrap Vitess
-WORKDIR /vt/src/vitess.io/vitess
-
-ENV MYSQL_FLAVOR MariaDB
-USER vitess
-RUN ./bootstrap.sh
diff --git a/docker/bootstrap/Dockerfile.mariadb103 b/docker/bootstrap/Dockerfile.mariadb103
deleted file mode 100644
index 3fa54692eb1..00000000000
--- a/docker/bootstrap/Dockerfile.mariadb103
+++ /dev/null
@@ -1,19 +0,0 @@
-ARG bootstrap_version
-ARG image="vitess/bootstrap:${bootstrap_version}-common"
-
-FROM --platform=linux/amd64 "${image}"
-
-# Install MariaDB 10.3
-RUN apt-key adv --no-tty --recv-keys --keyserver keyserver.ubuntu.com 0xF1656F24C74CD1D8 \
- && add-apt-repository 'deb [arch=amd64] http://ftp.osuosl.org/pub/mariadb/repo/10.3/debian buster main' \
- && apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
- mariadb-server-10.3 \
- libmariadb-dev \
- && rm -rf /var/lib/apt/lists/*
-
-# Bootstrap Vitess
-WORKDIR /vt/src/vitess.io/vitess
-
-ENV MYSQL_FLAVOR MariaDB103
-USER vitess
-RUN ./bootstrap.sh
diff --git a/docker/bootstrap/Dockerfile.mysql80 b/docker/bootstrap/Dockerfile.mysql80
index e064c638d99..dbb16bdbebc 100644
--- a/docker/bootstrap/Dockerfile.mysql80
+++ b/docker/bootstrap/Dockerfile.mysql80
@@ -8,7 +8,7 @@ RUN for i in $(seq 1 10); do apt-key adv --no-tty --recv-keys --keyserver keyser
for i in $(seq 1 10); do apt-key adv --no-tty --recv-keys --keyserver keyserver.ubuntu.com 467B942D3A79BD29 && break; done && \
add-apt-repository 'deb http://repo.mysql.com/apt/debian/ buster mysql-8.0' && \
for i in $(seq 1 10); do apt-key adv --no-tty --keyserver keyserver.ubuntu.com --recv-keys 9334A25F8507EFA5 && break; done && \
- echo 'deb http://repo.percona.com/apt buster main' > /etc/apt/sources.list.d/percona.list && \
+ echo 'deb http://repo.percona.com/apt bullseye main' > /etc/apt/sources.list.d/percona.list && \
{ \
echo debconf debconf/frontend select Noninteractive; \
echo percona-server-server-8.0 percona-server-server/root_password password 'unused'; \
diff --git a/docker/bootstrap/Dockerfile.percona57 b/docker/bootstrap/Dockerfile.percona57
index 2d8beb5e95d..febe09fd8bf 100644
--- a/docker/bootstrap/Dockerfile.percona57
+++ b/docker/bootstrap/Dockerfile.percona57
@@ -5,16 +5,15 @@ FROM --platform=linux/amd64 "${image}"
# Install Percona 5.7
RUN for i in $(seq 1 10); do apt-key adv --no-tty --keyserver keyserver.ubuntu.com --recv-keys 9334A25F8507EFA5 && break; done && \
- add-apt-repository 'deb http://repo.percona.com/apt buster main' && \
+ add-apt-repository 'deb http://repo.percona.com/apt bullseye main' && \
{ \
echo debconf debconf/frontend select Noninteractive; \
echo percona-server-server-5.7 percona-server-server/root_password password 'unused'; \
echo percona-server-server-5.7 percona-server-server/root_password_again password 'unused'; \
} | debconf-set-selections && \
apt-get update && \
- apt-get install -y --no-install-recommends \
- percona-server-server-5.7 \
- libperconaserverclient20-dev percona-xtrabackup-24 && \
+ apt-get install -y --no-install-recommends percona-server-server-5.7 && \
+ apt-get install -y --no-install-recommends libperconaserverclient20-dev percona-xtrabackup-24 && \
rm -rf /var/lib/apt/lists/*
# Bootstrap Vitess
diff --git a/docker/bootstrap/Dockerfile.percona80 b/docker/bootstrap/Dockerfile.percona80
index 5dadc32cd0a..446ec554612 100644
--- a/docker/bootstrap/Dockerfile.percona80
+++ b/docker/bootstrap/Dockerfile.percona80
@@ -5,7 +5,7 @@ FROM --platform=linux/amd64 "${image}"
# Install Percona 8.0
RUN for i in $(seq 1 10); do apt-key adv --no-tty --keyserver keyserver.ubuntu.com --recv-keys 9334A25F8507EFA5 && break; done \
- && echo 'deb http://repo.percona.com/ps-80/apt buster main' > /etc/apt/sources.list.d/percona.list && \
+ && echo 'deb http://repo.percona.com/ps-80/apt bullseye main' > /etc/apt/sources.list.d/percona.list && \
{ \
echo debconf debconf/frontend select Noninteractive; \
echo percona-server-server-8.0 percona-server-server/root_password password 'unused'; \
@@ -21,7 +21,7 @@ RUN for i in $(seq 1 10); do apt-key adv --no-tty --keyserver keyserver.ubuntu.c
rsync \
libev4 \
# && rm -f /etc/apt/sources.list.d/percona.list \
- && echo 'deb http://repo.percona.com/apt buster main' > /etc/apt/sources.list.d/percona.list \
+ && echo 'deb http://repo.percona.com/apt bullseye main' > /etc/apt/sources.list.d/percona.list \
# { \
# echo debconf debconf/frontend select Noninteractive; \
# echo percona-server-server-8.0 percona-server-server/root_password password 'unused'; \
diff --git a/docker/bootstrap/README.md b/docker/bootstrap/README.md
index 156dbb95222..717f4336442 100644
--- a/docker/bootstrap/README.md
+++ b/docker/bootstrap/README.md
@@ -6,8 +6,6 @@ after successfully running `bootstrap.sh` and `dev.env`.
The `vitess/bootstrap` image comes in different flavors:
* `vitess/bootstrap:common` - dependencies that are common to all flavors
-* `vitess/bootstrap:mariadb` - bootstrap image for MariaDB 10.2
-* `vitess/bootstrap:mariadb103`- bootstrap image for MariaDB 10.3
* `vitess/bootstrap:mysql57` - bootstrap image for MySQL 5.7
* `vitess/bootstrap:mysql80` - bootstrap image for MySQL 8.0
* `vitess/bootstrap:percona57` - bootstrap image for Percona Server 5.7
diff --git a/docker/k8s/logrotate/Dockerfile b/docker/k8s/logrotate/Dockerfile
index 4f109ce3c68..a6054b78a26 100644
--- a/docker/k8s/logrotate/Dockerfile
+++ b/docker/k8s/logrotate/Dockerfile
@@ -16,9 +16,9 @@ ARG DEBIAN_VER=stable-slim
FROM debian:${DEBIAN_VER}
-COPY docker/k8s/logrotate/logrotate.conf /vt/logrotate.conf
+COPY logrotate.conf /vt/logrotate.conf
-COPY docker/k8s/logrotate/rotate.sh /vt/rotate.sh
+COPY rotate.sh /vt/rotate.sh
RUN mkdir -p /vt && \
apt-get update && \
diff --git a/docker/k8s/logtail/Dockerfile b/docker/k8s/logtail/Dockerfile
index a5d1d8340d3..b64fe5b3b6f 100644
--- a/docker/k8s/logtail/Dockerfile
+++ b/docker/k8s/logtail/Dockerfile
@@ -18,7 +18,7 @@ FROM debian:${DEBIAN_VER}
ENV TAIL_FILEPATH /dev/null
-COPY docker/k8s/logtail/tail.sh /vt/tail.sh
+COPY tail.sh /vt/tail.sh
RUN mkdir -p /vt && \
apt-get update && \
diff --git a/docker/k8s/vtadmin/Dockerfile b/docker/k8s/vtadmin/Dockerfile
index fe8c8ad5593..837ac8a525a 100644
--- a/docker/k8s/vtadmin/Dockerfile
+++ b/docker/k8s/vtadmin/Dockerfile
@@ -43,7 +43,7 @@ COPY --from=k8s /vt/bin/vtadmin /vt/bin/
COPY --from=k8s /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt
COPY --chown=nginx --from=node /vt/web/vtadmin/build /var/www/
-COPY --chown=nginx docker/k8s/vtadmin/default.conf /etc/nginx/templates/default.conf.template
+COPY --chown=nginx default.conf /etc/nginx/templates/default.conf.template
# command to run nginx is in the base image
# https://github.com/nginxinc/docker-nginx-unprivileged/blob/main/stable/alpine/Dockerfile#L150
diff --git a/docker/lite/Dockerfile b/docker/lite/Dockerfile
index c0929ac9ed2..e058f627eca 120000
--- a/docker/lite/Dockerfile
+++ b/docker/lite/Dockerfile
@@ -1 +1 @@
-Dockerfile.mysql57
\ No newline at end of file
+Dockerfile.mysql80
\ No newline at end of file
diff --git a/docker/lite/Dockerfile.alpine b/docker/lite/Dockerfile.alpine
deleted file mode 100644
index 46f2b0386d3..00000000000
--- a/docker/lite/Dockerfile.alpine
+++ /dev/null
@@ -1,58 +0,0 @@
-# Copyright 2019 The Vitess Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# NOTE: We have to build the Vitess binaries from scratch instead of sharing
-# a base image because Docker Hub dropped the feature we relied upon to
-# ensure images contain the right binaries.
-
-# Use a temporary layer for the build stage.
-ARG bootstrap_version=11
-ARG image="vitess/bootstrap:${bootstrap_version}-mariadb103"
-
-FROM "${image}" AS builder
-
-# Allows docker builds to set the BUILD_NUMBER
-ARG BUILD_NUMBER
-
-# Re-copy sources from working tree.
-COPY --chown=vitess:vitess . /vt/src/vitess.io/vitess
-
-# Build and install Vitess in a temporary output directory.
-USER vitess
-RUN make install PREFIX=/vt/install
-
-# Start over and build the final image.
-FROM alpine:3.8
-
-# Install dependencies
-RUN echo '@edge http://nl.alpinelinux.org/alpine/edge/main' >> /etc/apk/repositories && \
- apk add --no-cache mariadb@edge mariadb-client@edge bzip2 bash
-
-# Set up Vitess user and directory tree.
-RUN addgroup -S vitess && adduser -S -G vitess vitess
-RUN mkdir -p /vt/vtdataroot && chown -R vitess:vitess /vt
-
-# Set up Vitess environment (just enough to run pre-built Go binaries)
-ENV VTROOT /vt/src/vitess.io/vitess
-ENV VTDATAROOT /vt/vtdataroot
-ENV PATH $VTROOT/bin:$PATH
-ENV MYSQL_FLAVOR MariaDB103
-
-# Copy artifacts from builder layer.
-COPY --from=builder --chown=vitess:vitess /vt/install /vt
-COPY --from=builder --chown=vitess:vitess /vt/src/vitess.io/vitess/web/vtadmin /vt/web/vtadmin
-
-# Create mount point for actual data (e.g. MySQL data dir)
-VOLUME /vt/vtdataroot
-USER vitess
diff --git a/docker/lite/Dockerfile.mariadb b/docker/lite/Dockerfile.mariadb
deleted file mode 100644
index 1eb86a180ab..00000000000
--- a/docker/lite/Dockerfile.mariadb
+++ /dev/null
@@ -1,58 +0,0 @@
-# Copyright 2019 The Vitess Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# NOTE: We have to build the Vitess binaries from scratch instead of sharing
-# a base image because Docker Hub dropped the feature we relied upon to
-# ensure images contain the right binaries.
-
-# Use a temporary layer for the build stage.
-ARG bootstrap_version=11
-ARG image="vitess/bootstrap:${bootstrap_version}-mariadb"
-
-FROM "${image}" AS builder
-
-# Allows docker builds to set the BUILD_NUMBER
-ARG BUILD_NUMBER
-
-# Re-copy sources from working tree.
-COPY --chown=vitess:vitess . /vt/src/vitess.io/vitess
-
-# Build and install Vitess in a temporary output directory.
-USER vitess
-RUN make install PREFIX=/vt/install
-
-# Start over and build the final image.
-FROM debian:buster-slim
-
-# Install dependencies
-COPY docker/lite/install_dependencies.sh /vt/dist/install_dependencies.sh
-RUN /vt/dist/install_dependencies.sh mariadb
-
-# Set up Vitess user and directory tree.
-RUN groupadd -r vitess && useradd -r -g vitess vitess
-RUN mkdir -p /vt/vtdataroot && chown -R vitess:vitess /vt
-
-# Set up Vitess environment (just enough to run pre-built Go binaries)
-ENV VTROOT /vt/src/vitess.io/vitess
-ENV VTDATAROOT /vt/vtdataroot
-ENV PATH $VTROOT/bin:$PATH
-ENV MYSQL_FLAVOR MariaDB
-
-# Copy artifacts from builder layer.
-COPY --from=builder --chown=vitess:vitess /vt/install /vt
-COPY --from=builder --chown=vitess:vitess /vt/src/vitess.io/vitess/web/vtadmin /vt/web/vtadmin
-
-# Create mount point for actual data (e.g. MySQL data dir)
-VOLUME /vt/vtdataroot
-USER vitess
diff --git a/docker/lite/Dockerfile.mariadb103 b/docker/lite/Dockerfile.mariadb103
deleted file mode 100644
index e6fe0417dc8..00000000000
--- a/docker/lite/Dockerfile.mariadb103
+++ /dev/null
@@ -1,58 +0,0 @@
-# Copyright 2019 The Vitess Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# NOTE: We have to build the Vitess binaries from scratch instead of sharing
-# a base image because Docker Hub dropped the feature we relied upon to
-# ensure images contain the right binaries.
-
-# Use a temporary layer for the build stage.
-ARG bootstrap_version=11
-ARG image="vitess/bootstrap:${bootstrap_version}-mariadb103"
-
-FROM "${image}" AS builder
-
-# Allows docker builds to set the BUILD_NUMBER
-ARG BUILD_NUMBER
-
-# Re-copy sources from working tree.
-COPY --chown=vitess:vitess . /vt/src/vitess.io/vitess
-
-# Build and install Vitess in a temporary output directory.
-USER vitess
-RUN make install PREFIX=/vt/install
-
-# Start over and build the final image.
-FROM debian:buster-slim
-
-# Install dependencies
-COPY docker/lite/install_dependencies.sh /vt/dist/install_dependencies.sh
-RUN /vt/dist/install_dependencies.sh mariadb103
-
-# Set up Vitess user and directory tree.
-RUN groupadd -r vitess && useradd -r -g vitess vitess
-RUN mkdir -p /vt/vtdataroot && chown -R vitess:vitess /vt
-
-# Set up Vitess environment (just enough to run pre-built Go binaries)
-ENV VTROOT /vt/src/vitess.io/vitess
-ENV VTDATAROOT /vt/vtdataroot
-ENV PATH $VTROOT/bin:$PATH
-ENV MYSQL_FLAVOR MariaDB103
-
-# Copy artifacts from builder layer.
-COPY --from=builder --chown=vitess:vitess /vt/install /vt
-COPY --from=builder --chown=vitess:vitess /vt/src/vitess.io/vitess/web/vtadmin /vt/web/vtadmin
-
-# Create mount point for actual data (e.g. MySQL data dir)
-VOLUME /vt/vtdataroot
-USER vitess
diff --git a/docker/lite/Dockerfile.mysql57 b/docker/lite/Dockerfile.mysql57
index 8b0fb1a4043..152446b2160 100644
--- a/docker/lite/Dockerfile.mysql57
+++ b/docker/lite/Dockerfile.mysql57
@@ -17,7 +17,7 @@
# ensure images contain the right binaries.
# Use a temporary layer for the build stage.
-ARG bootstrap_version=11
+ARG bootstrap_version=14.7
ARG image="vitess/bootstrap:${bootstrap_version}-mysql57"
FROM "${image}" AS builder
diff --git a/docker/lite/Dockerfile.mysql80 b/docker/lite/Dockerfile.mysql80
index 6eb37136aae..f395d03d36d 100644
--- a/docker/lite/Dockerfile.mysql80
+++ b/docker/lite/Dockerfile.mysql80
@@ -17,7 +17,7 @@
# ensure images contain the right binaries.
# Use a temporary layer for the build stage.
-ARG bootstrap_version=11
+ARG bootstrap_version=14.7
ARG image="vitess/bootstrap:${bootstrap_version}-mysql80"
FROM "${image}" AS builder
diff --git a/docker/lite/Dockerfile.percona57 b/docker/lite/Dockerfile.percona57
index 498e9a48028..0bdd2390243 100644
--- a/docker/lite/Dockerfile.percona57
+++ b/docker/lite/Dockerfile.percona57
@@ -17,7 +17,7 @@
# ensure images contain the right binaries.
# Use a temporary layer for the build stage.
-ARG bootstrap_version=11
+ARG bootstrap_version=14.7
ARG image="vitess/bootstrap:${bootstrap_version}-percona57"
FROM "${image}" AS builder
diff --git a/docker/lite/Dockerfile.percona80 b/docker/lite/Dockerfile.percona80
index f934504579f..85ac23141c3 100644
--- a/docker/lite/Dockerfile.percona80
+++ b/docker/lite/Dockerfile.percona80
@@ -17,7 +17,7 @@
# ensure images contain the right binaries.
# Use a temporary layer for the build stage.
-ARG bootstrap_version=11
+ARG bootstrap_version=14.7
ARG image="vitess/bootstrap:${bootstrap_version}-percona80"
FROM "${image}" AS builder
diff --git a/docker/lite/Dockerfile.testing b/docker/lite/Dockerfile.testing
index 7a8d4b709a7..dba735d9947 100644
--- a/docker/lite/Dockerfile.testing
+++ b/docker/lite/Dockerfile.testing
@@ -17,7 +17,7 @@
# ensure images contain the right binaries.
# Use a temporary layer for the build stage.
-ARG bootstrap_version=11
+ARG bootstrap_version=14.7
ARG image="vitess/bootstrap:${bootstrap_version}-mysql57"
FROM "${image}" AS builder
diff --git a/docker/lite/Dockerfile.ubi7.mysql57 b/docker/lite/Dockerfile.ubi7.mysql57
index 51ed4459f7a..e43a30e018e 100644
--- a/docker/lite/Dockerfile.ubi7.mysql57
+++ b/docker/lite/Dockerfile.ubi7.mysql57
@@ -17,7 +17,7 @@
# ensure images contain the right binaries.
# Use a temporary layer for the build stage.
-ARG bootstrap_version=11
+ARG bootstrap_version=14.7
ARG image="vitess/bootstrap:${bootstrap_version}-mysql57"
FROM "${image}" AS builder
@@ -36,6 +36,7 @@ RUN make install PREFIX=/vt/install
FROM registry.access.redhat.com/ubi7/ubi:latest
# Install keys and dependencies
+RUN rpm --import https://repo.mysql.com/RPM-GPG-KEY-mysql-2022
RUN mkdir /tmp/gpg && chmod 700 /tmp/gpg && export GNUPGHOME=/tmp/gpg \
&& yum install -y --setopt=alwaysprompt=no gnupg \
&& ( gpg --keyserver keyserver.ubuntu.com --recv-keys 430BDF5C56E7C94E848EE60C1C4CBDCDCD2EFD2A 4D1BB29D63D98E422B2113B19334A25F8507EFA5 6341AB2753D78A78A7C27BB124C6A8A7F4A80EB5 A4A9406876FCBD3C456770C88C718D3B5072E1F5 ) \
diff --git a/docker/lite/Dockerfile.ubi7.mysql80 b/docker/lite/Dockerfile.ubi7.mysql80
index db6fd40efd1..df61310f844 100644
--- a/docker/lite/Dockerfile.ubi7.mysql80
+++ b/docker/lite/Dockerfile.ubi7.mysql80
@@ -17,7 +17,7 @@
# ensure images contain the right binaries.
# Use a temporary layer for the build stage.
-ARG bootstrap_version=11
+ARG bootstrap_version=14.7
ARG image="vitess/bootstrap:${bootstrap_version}-mysql80"
FROM "${image}" AS builder
@@ -36,6 +36,7 @@ RUN make install PREFIX=/vt/install
FROM registry.access.redhat.com/ubi7/ubi:latest
# Install keys and dependencies
+RUN rpm --import https://repo.mysql.com/RPM-GPG-KEY-mysql-2022
RUN mkdir /tmp/gpg && chmod 700 /tmp/gpg && export GNUPGHOME=/tmp/gpg \
&& yum install -y --setopt=alwaysprompt=no gnupg \
&& ( gpg --keyserver keyserver.ubuntu.com --recv-keys 430BDF5C56E7C94E848EE60C1C4CBDCDCD2EFD2A 4D1BB29D63D98E422B2113B19334A25F8507EFA5 6341AB2753D78A78A7C27BB124C6A8A7F4A80EB5 A4A9406876FCBD3C456770C88C718D3B5072E1F5 ) \
@@ -51,6 +52,8 @@ RUN mkdir /tmp/gpg && chmod 700 /tmp/gpg && export GNUPGHOME=/tmp/gpg \
&& rm -f /tmp/mysqlrepo.rpm /tmp/perconarepo.rpm
RUN echo H4sICH852V8CA2ZvbwC1jr0OgkAQhPt7CgrbY7W6xOQaDaEgRqKxMMTiOFYg/F2WI9G39xCttKGg2UxmJrNfokWqeryxVjUo99ja45kLj3s757IxGqiWhbVmC9CURB352rW63u8oh0mCAHdWY1uRLoDlJtcF6kpuRlnhU97LGt0CoNVgqhLINNxFcIoPPIxDHgVX/v3OsFVpjZlcM5ZoMZhMWex/ES9TMIPyM7UYKj4sqT+kwdufAToNLcP5AvRgmV7zAQAA | base64 -d | gzip -dc > /etc/yum.repos.d/CentOS-Base.repo \
&& yum install -y --setopt=alwaysprompt=no --setopt=tsflags=nodocs --enablerepo c7base --enablerepo c7updates --enablerepo c7extras install libev gperftools-libs numactl-libs sysstat strace
+RUN yum install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
+RUN yum install -y zstd
RUN yum update -y --setopt=alwaysprompt=no --setopt=tsflags=nodocs \
&& yum install -y --setopt=alwaysprompt=no --setopt=tsflags=nodocs bzip2 ca-certificates gnupg libaio libcurl \
jemalloc gperftools-libs procps-ng rsync wget openssl hostname curl tzdata make \
@@ -60,7 +63,7 @@ RUN yum update -y --setopt=alwaysprompt=no --setopt=tsflags=nodocs \
mysql-community-client mysql-community-server \
# Have to use hacks to ignore conflicts on /etc/my.cnf install
&& mkdir -p /tmp/1 \
- && yum install -y --setopt=alwaysprompt=no --downloadonly --downloaddir=/tmp/1 --enablerepo mysql80-community --disablerepo mysql57-community percona-xtrabackup-80 percona-toolkit \
+ && yum install -y --skip-broken --setopt=alwaysprompt=no --downloadonly --downloaddir=/tmp/1 --enablerepo mysql80-community --disablerepo mysql57-community percona-xtrabackup-80 percona-toolkit \
&& rpm -Uvh --replacefiles /tmp/1/*rpm \
&& rm -rf /tmp/1 \
&& yum clean all \
diff --git a/docker/lite/Dockerfile.ubi7.percona57 b/docker/lite/Dockerfile.ubi7.percona57
index 997bc401147..57ed183fa36 100644
--- a/docker/lite/Dockerfile.ubi7.percona57
+++ b/docker/lite/Dockerfile.ubi7.percona57
@@ -17,7 +17,7 @@
# ensure images contain the right binaries.
# Use a temporary layer for the build stage.
-ARG bootstrap_version=11
+ARG bootstrap_version=14.7
ARG image="vitess/bootstrap:${bootstrap_version}-percona57"
FROM "${image}" AS builder
diff --git a/docker/lite/Dockerfile.ubi7.percona80 b/docker/lite/Dockerfile.ubi7.percona80
index e16522cc3ca..b18e35a2573 100644
--- a/docker/lite/Dockerfile.ubi7.percona80
+++ b/docker/lite/Dockerfile.ubi7.percona80
@@ -17,7 +17,7 @@
# ensure images contain the right binaries.
# Use a temporary layer for the build stage.
-ARG bootstrap_version=11
+ARG bootstrap_version=14.7
ARG image="vitess/bootstrap:${bootstrap_version}-percona80"
FROM "${image}" AS builder
@@ -47,6 +47,8 @@ RUN mkdir /tmp/gpg && chmod 700 /tmp/gpg && export GNUPGHOME=/tmp/gpg \
&& rpmkeys --checksig /tmp/perconarepo.rpm \
&& rpm -Uvh /tmp/perconarepo.rpm \
&& rm -f /tmp/perconarepo.rpm
+RUN yum install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
+RUN yum install -y zstd
RUN echo H4sICH852V8CA2ZvbwC1jr0OgkAQhPt7CgrbY7W6xOQaDaEgRqKxMMTiOFYg/F2WI9G39xCttKGg2UxmJrNfokWqeryxVjUo99ja45kLj3s757IxGqiWhbVmC9CURB352rW63u8oh0mCAHdWY1uRLoDlJtcF6kpuRlnhU97LGt0CoNVgqhLINNxFcIoPPIxDHgVX/v3OsFVpjZlcM5ZoMZhMWex/ES9TMIPyM7UYKj4sqT+kwdufAToNLcP5AvRgmV7zAQAA | base64 -d | gzip -dc > /etc/yum.repos.d/CentOS-Base.repo \
&& yum install -y --setopt=alwaysprompt=no --setopt=tsflags=nodocs --enablerepo c7base --enablerepo c7updates --enablerepo c7extras install libev gperftools-libs numactl-libs sysstat strace
RUN yum update -y --setopt=alwaysprompt=no --setopt=tsflags=nodocs \
diff --git a/docker/lite/Dockerfile.ubi8.arm64.mysql80 b/docker/lite/Dockerfile.ubi8.arm64.mysql80
index 6df9a647378..235f4fc6a47 100644
--- a/docker/lite/Dockerfile.ubi8.arm64.mysql80
+++ b/docker/lite/Dockerfile.ubi8.arm64.mysql80
@@ -17,7 +17,7 @@
# ensure images contain the right binaries.
# Use a temporary layer for the build stage.
-ARG bootstrap_version=11
+ARG bootstrap_version=14.7
ARG image="vitess/bootstrap:${bootstrap_version}-mysql80"
FROM "${image}" AS builder
@@ -36,6 +36,7 @@ RUN make cross-install PREFIX=/vt/install GOOS=linux GOARCH=arm64
FROM registry.access.redhat.com/ubi8/ubi:latest
# Install keys and dependencies
+RUN rpm --import https://repo.mysql.com/RPM-GPG-KEY-mysql-2022
RUN mkdir /tmp/gpg && chmod 700 /tmp/gpg && export GNUPGHOME=/tmp/gpg \
&& yum install -y --setopt=alwaysprompt=no gnupg \
&& ( gpg --keyserver keyserver.ubuntu.com --recv-keys 430BDF5C56E7C94E848EE60C1C4CBDCDCD2EFD2A 4D1BB29D63D98E422B2113B19334A25F8507EFA5 99DB70FAE1D7CE227FB6488205B555B38483C65D 3A79BD29 A4A9406876FCBD3C456770C88C718D3B5072E1F5 94E279EB8D8F25B21810ADF121EA45AB2F86D6A1 ) \
@@ -52,10 +53,12 @@ RUN mkdir /tmp/gpg && chmod 700 /tmp/gpg && export GNUPGHOME=/tmp/gpg \
&& rpmkeys --checksig /tmp/mysqlrepo.rpm /tmp/perconarepo.rpm \
&& rpm -Uvh /tmp/mysqlrepo.rpm /tmp/perconarepo.rpm \
&& rm -f /tmp/mysqlrepo.rpm /tmp/perconarepo.rpm
+RUN yum install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
+RUN yum install -y zstd
RUN echo H4sICIDAHmICA2ZvbwDVkDFLxEAQhfv9FVfY7o4RhCBsoXJcIXKHwUIOi7m5MVk2yS6zG0//vYlRULTU4rrHvOHN+2ZL5Q4TP6oeO7bX3Od1pcuFXlyNUzVZg7S2yTmmCwDsgzjuDSUyB5SDI2+QzOChcyJBEnwkPOPQZijNuTkrigKmsHUFJ1MeCjUQEqg61tQweVtM0vOrfXItj1eAM0H0DiR2erTgbnOrV5uVvlk+6M+Kinvctby3p0ptqRziHjOnnxz3s/FnKJcxVlkYu/+k4Zcs+AvM8n3+jWW8MBc2NO6FZILUMEsoYQ76UvWI/vAGB/SOZZsCAAA= | base64 -d | gzip -dc > /etc/yum.repos.d/CentOS-Base.repo \
&& yum install -y --setopt=alwaysprompt=no --setopt=tsflags=nodocs --enablerepo c8base --enablerepo c8updates --enablerepo c8extras libev numactl-libs sysstat strace \
&& yum install -y --setopt=alwaysprompt=no --setopt=tsflags=nodocs https://download-ib01.fedoraproject.org/pub/epel/8/Everything/aarch64/Packages/g/gperftools-libs-2.7-9.el8.aarch64.rpm https://download-ib01.fedoraproject.org/pub/epel/8/Everything/aarch64/Packages/j/jemalloc-5.2.1-2.el8.aarch64.rpm https://download-ib01.fedoraproject.org/pub/epel/8/Everything/aarch64/Packages/l/libunwind-1.3.1-3.el8.aarch64.rpm
-RUN yum update -y --setopt=alwaysprompt=no --setopt=tsflags=nodocs \
+RUN yum update -y --setopt=alwaysprompt=no --setopt=tsflags=nodocs --nobest \
&& yum install -y --setopt=alwaysprompt=no --setopt=tsflags=nodocs bzip2 ca-certificates gnupg libaio libcurl \
procps-ng rsync wget openssl hostname curl tzdata make \
# Can't use alwaysprompt=no here, since we need to pick up deps
diff --git a/docker/lite/Dockerfile.ubi8.mysql80 b/docker/lite/Dockerfile.ubi8.mysql80
index 76766c2fc19..b983a7b4ce4 100644
--- a/docker/lite/Dockerfile.ubi8.mysql80
+++ b/docker/lite/Dockerfile.ubi8.mysql80
@@ -17,7 +17,7 @@
# ensure images contain the right binaries.
# Use a temporary layer for the build stage.
-ARG bootstrap_version=11
+ARG bootstrap_version=14.7
ARG image="vitess/bootstrap:${bootstrap_version}-mysql80"
FROM "${image}" AS builder
@@ -36,6 +36,7 @@ RUN make install PREFIX=/vt/install
FROM registry.access.redhat.com/ubi8/ubi:latest
# Install keys and dependencies
+RUN rpm --import https://repo.mysql.com/RPM-GPG-KEY-mysql-2022
RUN mkdir /tmp/gpg && chmod 700 /tmp/gpg && export GNUPGHOME=/tmp/gpg \
&& yum install -y --setopt=alwaysprompt=no gnupg \
&& ( gpg --keyserver keyserver.ubuntu.com --recv-keys 430BDF5C56E7C94E848EE60C1C4CBDCDCD2EFD2A 4D1BB29D63D98E422B2113B19334A25F8507EFA5 99DB70FAE1D7CE227FB6488205B555B38483C65D 3A79BD29 A4A9406876FCBD3C456770C88C718D3B5072E1F5 94E279EB8D8F25B21810ADF121EA45AB2F86D6A1 ) \
@@ -51,10 +52,12 @@ RUN mkdir /tmp/gpg && chmod 700 /tmp/gpg && export GNUPGHOME=/tmp/gpg \
&& rpmkeys --checksig /tmp/mysqlrepo.rpm /tmp/perconarepo.rpm \
&& rpm -Uvh /tmp/mysqlrepo.rpm /tmp/perconarepo.rpm \
&& rm -f /tmp/mysqlrepo.rpm /tmp/perconarepo.rpm
+RUN yum install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
+RUN yum install -y zstd
RUN echo H4sICIDAHmICA2ZvbwDVkDFLxEAQhfv9FVfY7o4RhCBsoXJcIXKHwUIOi7m5MVk2yS6zG0//vYlRULTU4rrHvOHN+2ZL5Q4TP6oeO7bX3Od1pcuFXlyNUzVZg7S2yTmmCwDsgzjuDSUyB5SDI2+QzOChcyJBEnwkPOPQZijNuTkrigKmsHUFJ1MeCjUQEqg61tQweVtM0vOrfXItj1eAM0H0DiR2erTgbnOrV5uVvlk+6M+Kinvctby3p0ptqRziHjOnnxz3s/FnKJcxVlkYu/+k4Zcs+AvM8n3+jWW8MBc2NO6FZILUMEsoYQ76UvWI/vAGB/SOZZsCAAA= | base64 -d | gzip -dc > /etc/yum.repos.d/CentOS-Base.repo \
&& yum install -y --setopt=alwaysprompt=no --setopt=tsflags=nodocs --enablerepo c8base --enablerepo c8updates --enablerepo c8extras libev numactl-libs sysstat strace \
&& yum install -y --setopt=alwaysprompt=no --setopt=tsflags=nodocs https://download-ib01.fedoraproject.org/pub/epel/8/Everything/x86_64/Packages/g/gperftools-libs-2.7-9.el8.x86_64.rpm https://download-ib01.fedoraproject.org/pub/epel/8/Everything/x86_64/Packages/j/jemalloc-5.2.1-2.el8.x86_64.rpm https://download-ib01.fedoraproject.org/pub/epel/8/Everything/x86_64/Packages/l/libunwind-1.3.1-3.el8.x86_64.rpm
-RUN yum update -y --setopt=alwaysprompt=no --setopt=tsflags=nodocs \
+RUN yum update -y --setopt=alwaysprompt=no --setopt=tsflags=nodocs --nobest \
&& yum install -y --setopt=alwaysprompt=no --setopt=tsflags=nodocs bzip2 ca-certificates gnupg libaio libcurl \
procps-ng rsync wget openssl hostname curl tzdata make \
# Can't use alwaysprompt=no here, since we need to pick up deps
diff --git a/docker/lite/install_dependencies.sh b/docker/lite/install_dependencies.sh
index fce8f8001b2..92f7ab67397 100755
--- a/docker/lite/install_dependencies.sh
+++ b/docker/lite/install_dependencies.sh
@@ -58,6 +58,7 @@ BASE_PACKAGES=(
wget
curl
percona-toolkit
+ zstd
)
apt-get update
@@ -82,7 +83,7 @@ mysql57)
)
;;
mysql80)
- mysql8_version=8.0.23
+ mysql8_version=8.0.30
do_fetch https://repo.mysql.com/apt/debian/pool/mysql-8.0/m/mysql-community/libmysqlclient21_${mysql8_version}-1debian10_amd64.deb /tmp/libmysqlclient21_${mysql8_version}-1debian10_amd64.deb
do_fetch https://repo.mysql.com/apt/debian/pool/mysql-8.0/m/mysql-community/mysql-community-client-core_${mysql8_version}-1debian10_amd64.deb /tmp/mysql-community-client-core_${mysql8_version}-1debian10_amd64.deb
do_fetch https://repo.mysql.com/apt/debian/pool/mysql-8.0/m/mysql-community/mysql-community-client-plugins_${mysql8_version}-1debian10_amd64.deb /tmp/mysql-community-client-plugins_${mysql8_version}-1debian10_amd64.deb
@@ -125,16 +126,6 @@ percona80)
percona-xtrabackup-80
)
;;
-mariadb)
- PACKAGES=(
- mariadb-server-10.2
- )
- ;;
-mariadb103)
- PACKAGES=(
- mariadb-server
- )
- ;;
*)
echo "Unknown flavor ${FLAVOR}"
exit 1
@@ -142,19 +133,11 @@ mariadb103)
esac
# Get GPG keys for extra apt repositories.
-case "${FLAVOR}" in
-mysql57|mysql80)
- # repo.mysql.com
- add_apt_key 8C718D3B5072E1F5
- add_apt_key 467B942D3A79BD29
- ;;
-mariadb|mariadb103)
- # digitalocean.com
- add_apt_key F1656F24C74CD1D8
- ;;
-esac
+# repo.mysql.com
+add_apt_key 8C718D3B5072E1F5
+add_apt_key 467B942D3A79BD29
-# All flavors (except mariadb*) include Percona XtraBackup (from repo.percona.com).
+# All flavors include Percona XtraBackup (from repo.percona.com).
add_apt_key 9334A25F8507EFA5
# Add extra apt repositories for MySQL.
@@ -165,12 +148,6 @@ mysql57)
mysql80)
echo 'deb http://repo.mysql.com/apt/debian/ buster mysql-8.0' > /etc/apt/sources.list.d/mysql.list
;;
-mariadb)
- echo 'deb http://sfo1.mirrors.digitalocean.com/mariadb/repo/10.2/debian stretch main' > /etc/apt/sources.list.d/mariadb.list
- ;;
-mariadb103)
- echo 'deb http://sfo1.mirrors.digitalocean.com/mariadb/repo/10.3/debian buster main' > /etc/apt/sources.list.d/mariadb.list
- ;;
esac
# Add extra apt repositories for Percona Server and/or Percona XtraBackup.
diff --git a/docker/local/Dockerfile b/docker/local/Dockerfile
index 77af8fb2b2a..d520780387d 100644
--- a/docker/local/Dockerfile
+++ b/docker/local/Dockerfile
@@ -1,4 +1,4 @@
-ARG bootstrap_version=11
+ARG bootstrap_version=14.7
ARG image="vitess/bootstrap:${bootstrap_version}-common"
FROM "${image}"
@@ -12,7 +12,7 @@ RUN /vt/dist/install_dependencies.sh mysql57
COPY docker/local/install_local_dependencies.sh /vt/dist/install_local_dependencies.sh
RUN /vt/dist/install_local_dependencies.sh
-RUN echo "source /vt/local/env.sh" >> /etc/bash.bashrc
+RUN echo "source /vt/common/env.sh" >> /etc/bash.bashrc
# Allows some docker builds to disable CGO
ARG CGO_ENABLED=0
diff --git a/docker/local/run.sh b/docker/local/run.sh
index fb847be80c8..9ba5aa07906 100755
--- a/docker/local/run.sh
+++ b/docker/local/run.sh
@@ -1,3 +1,3 @@
#!/bin/bash
-docker run -p 15000:15000 -p 15001:15001 -p 15991:15991 -p 15999:15999 --rm -it vitess/local
+docker run -p 14200:14200 -p 14201:14201 -p 15000:15000 -p 15001:15001 -p 15991:15991 -p 15999:15999 -p 16000:16000 --rm -it vitess/local
diff --git a/docker/mini/Dockerfile b/docker/mini/Dockerfile
index df299ce8634..f9c14932eb0 100644
--- a/docker/mini/Dockerfile
+++ b/docker/mini/Dockerfile
@@ -35,8 +35,9 @@ COPY docker/mini/orchestrator-vitess-mini.conf.json /etc/orchestrator.conf.json
RUN chown vitess:vitess /etc/orchestrator.conf.json
COPY docker/mini/docker-entry /vt/dist/docker/mini/docker-entry
-COPY examples/local/scripts /vt/dist/scripts
-COPY examples/local/env.sh /vt/dist/scripts/env.sh
+COPY examples/common/scripts /vt/dist/scripts
+COPY examples/common/env.sh /vt/dist/scripts/env.sh
+COPY examples/common/lib/utils.sh /vt/dist/scripts/lib/utils.sh
COPY docker/mini/vtctld-mini-up.sh /vt/dist/scripts/vtctld-mini-up.sh
COPY docker/mini/vttablet-mini-up.sh /vt/dist/scripts/vttablet-mini-up.sh
COPY docker/mini/orchestrator-up.sh /vt/dist/scripts/orchestrator-up.sh
diff --git a/docker/mini/vtctld-mini-up.sh b/docker/mini/vtctld-mini-up.sh
index 9d318949970..641763ab016 100755
--- a/docker/mini/vtctld-mini-up.sh
+++ b/docker/mini/vtctld-mini-up.sh
@@ -27,8 +27,6 @@ vtctld \
$TOPOLOGY_FLAGS \
--disable_active_reparents \
-cell $cell \
- -workflow_manager_init \
- -workflow_manager_use_election \
-service_map 'grpc-vtctl' \
-backup_storage_implementation file \
-file_backup_storage_root $VTDATAROOT/backups \
diff --git a/docker/mini/vttablet-mini-up.sh b/docker/mini/vttablet-mini-up.sh
index 586525d5ae4..4cc86156076 100755
--- a/docker/mini/vttablet-mini-up.sh
+++ b/docker/mini/vttablet-mini-up.sh
@@ -50,7 +50,6 @@ vttablet \
-init_shard $shard \
-init_tablet_type $tablet_type \
-health_check_interval 5s \
- -enable_semi_sync \
-enable_replication_reporter \
-backup_storage_implementation file \
-file_backup_storage_root $VTDATAROOT/backups \
diff --git a/docker/release.sh b/docker/release.sh
index 3ca6569387a..1fa6c57326c 100755
--- a/docker/release.sh
+++ b/docker/release.sh
@@ -1,7 +1,7 @@
#!/bin/bash
set -ex
-vt_base_version='v13.0.0'
+vt_base_version='v16.0.5-SNAPSHOT'
debian_versions='buster bullseye'
default_debian_version='bullseye'
diff --git a/docker/vttestserver/Dockerfile.mysql57 b/docker/vttestserver/Dockerfile.mysql57
index bc93a4b8e0a..2aefd753c02 100644
--- a/docker/vttestserver/Dockerfile.mysql57
+++ b/docker/vttestserver/Dockerfile.mysql57
@@ -17,7 +17,7 @@
# ensure images contain the right binaries.
# Use a temporary layer for the build stage.
-ARG bootstrap_version=11
+ARG bootstrap_version=14.7
ARG image="vitess/bootstrap:${bootstrap_version}-mysql57"
FROM "${image}" AS builder
@@ -33,7 +33,7 @@ USER vitess
RUN make install-testing PREFIX=/vt/install
# Start over and build the final image.
-FROM debian:buster-slim
+FROM debian:bullseye-slim
# Install dependencies
COPY docker/lite/install_dependencies.sh /vt/dist/install_dependencies.sh
diff --git a/docker/vttestserver/Dockerfile.mysql80 b/docker/vttestserver/Dockerfile.mysql80
index 158187f4a1c..ab1e6efd104 100644
--- a/docker/vttestserver/Dockerfile.mysql80
+++ b/docker/vttestserver/Dockerfile.mysql80
@@ -17,7 +17,7 @@
# ensure images contain the right binaries.
# Use a temporary layer for the build stage.
-ARG bootstrap_version=11
+ARG bootstrap_version=14.7
ARG image="vitess/bootstrap:${bootstrap_version}-mysql80"
FROM "${image}" AS builder
@@ -33,7 +33,7 @@ USER vitess
RUN make install-testing PREFIX=/vt/install
# Start over and build the final image.
-FROM debian:buster-slim
+FROM debian:bullseye-slim
# Install dependencies
COPY docker/lite/install_dependencies.sh /vt/dist/install_dependencies.sh
diff --git a/docker/vttestserver/run.sh b/docker/vttestserver/run.sh
index 18e40bb5ede..1ff79153af5 100755
--- a/docker/vttestserver/run.sh
+++ b/docker/vttestserver/run.sh
@@ -40,7 +40,8 @@ rm -vf "$VTDATAROOT"/"$tablet_dir"/{mysql.sock,mysql.sock.lock}
--foreign_key_mode "${FOREIGN_KEY_MODE:-allow}" \
--enable_online_ddl="${ENABLE_ONLINE_DDL:-true}" \
--enable_direct_ddl="${ENABLE_DIRECT_DDL:-true}" \
- --planner-version="${PLANNER_VERSION:-v3}" \
+ --planner-version="${PLANNER_VERSION:-gen4}" \
--vschema_ddl_authorized_users=% \
+ --tablet_refresh_interval "${TABLET_REFRESH_INTERVAL:-10s}" \
--schema_dir="/vt/schema/"
diff --git a/examples/are-you-alive/go.mod b/examples/are-you-alive/go.mod
index 83e51ec7293..e3069908616 100644
--- a/examples/are-you-alive/go.mod
+++ b/examples/are-you-alive/go.mod
@@ -3,11 +3,11 @@ module vitess.io/vitess/examples/are-you-alive
go 1.14
require (
- github.com/go-sql-driver/mysql v1.5.0
- github.com/prometheus/client_golang v1.6.0
- github.com/sirupsen/logrus v1.6.0
+ github.com/go-sql-driver/mysql v1.7.0
+ github.com/prometheus/client_golang v1.14.0
+ github.com/sirupsen/logrus v1.9.0
go.uber.org/atomic v1.9.0 // indirect
- go.uber.org/ratelimit v0.1.0
- golang.org/x/sys v0.0.0-20200519105757-fe76b779f299 // indirect
- gopkg.in/yaml.v2 v2.2.8
+ go.uber.org/ratelimit v0.2.0
+ golang.org/x/sys v0.5.0 // indirect
+ gopkg.in/yaml.v2 v2.4.0
)
diff --git a/examples/are-you-alive/go.sum b/examples/are-you-alive/go.sum
index ed267a8aa62..9d5e2d8915f 100644
--- a/examples/are-you-alive/go.sum
+++ b/examples/are-you-alive/go.sum
@@ -1,43 +1,148 @@
+cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
+cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
+cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
+cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
+cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
+cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
+cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
+cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
+cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
+cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
+cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
+cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
+cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
+cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
+cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
+cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
+cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
+cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
+cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
+cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
+cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
+cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
+cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
+cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
+cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
+cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
+cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
+cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
+cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
+cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
+dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
+github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
+github.com/andres-erbsen/clock v0.0.0-20160526145045-9e14626cd129 h1:MzBOUgng9orim59UnfUTLRjMpd09C5uEVQ6RPGeCaVI=
+github.com/andres-erbsen/clock v0.0.0-20160526145045-9e14626cd129/go.mod h1:rFgpPQZYZ8vdbc+48xibu8ALc3yeyd64IhHS+PU6Yyg=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
-github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
+github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
+github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
+github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
+github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
+github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
+github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
+github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
+github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
+github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
+github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
-github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs=
-github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
+github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
+github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
+github.com/go-sql-driver/mysql v1.7.0 h1:ueSltNNllEqE3qcWBTD0iQd3IpL/6U+mJxLkazJ7YPc=
+github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
+github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
+github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
+github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
-github.com/golang/protobuf v1.4.0 h1:oOuy+ugB+P/kBdUnG5QaMXSIyJ1q38wWSojYCb3z5VQ=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
+github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
+github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
+github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
+github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
+github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
+github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
+github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
+github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
+github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
+github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
+github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
-github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
+github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
+github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
+github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
+github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
@@ -51,71 +156,348 @@ github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJ
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
+github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
-github.com/prometheus/client_golang v1.6.0 h1:YVPodQOcK15POxhgARIvnDRVpLcuK8mglnMrWfyrw6A=
-github.com/prometheus/client_golang v1.6.0/go.mod h1:ZLOG9ck3JLRdB5MgO8f+lLTe83AXG6ro35rLTxvnIl4=
+github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
+github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
+github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
+github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=
+github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
+github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=
+github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
-github.com/prometheus/common v0.9.1 h1:KOMtN28tlbam3/7ZKEYKHhKoJZYYj3gMH4uc62x7X7U=
-github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
+github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
+github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
+github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
+github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE=
+github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
-github.com/prometheus/procfs v0.0.11 h1:DhHlBtkHWPYi8O2y31JkK0TF+DGM+51OopZjH/Ia5qI=
-github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
+github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
+github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
+github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
+github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo=
+github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
+github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
-github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
+github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
+github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
-github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
+github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
+github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
+go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
+go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE=
go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
-go.uber.org/ratelimit v0.1.0 h1:U2AruXqeTb4Eh9sYQSTrMhH8Cb7M0Ian2ibBOnBcnAw=
-go.uber.org/ratelimit v0.1.0/go.mod h1:2X8KaoNd1J0lZV+PxJk/5+DGbO/tpwLR1m++a7FnB/Y=
+go.uber.org/ratelimit v0.2.0 h1:UQE2Bgi7p2B85uP5dC2bbRtig0C+OeNRnNEafLjsLPA=
+go.uber.org/ratelimit v0.2.0/go.mod h1:YYBV4e4naJvhpitQrWJu1vCpgB7CboMe0qhltKt6mUg=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
+golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
+golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
+golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
+golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
+golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
+golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
+golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
+golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
+golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
+golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
+golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
+golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200519105757-fe76b779f299 h1:DYfZAGf2WMFjMxbgTjaC+2HC7NkNAQs+6Q8b9WEB/F4=
-golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU=
+golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
+golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
+golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
+golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
+golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
+golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
+golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
+google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
+google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
+google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
+google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
+google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
+google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
+google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
+google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
+google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
+google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
+google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
+google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
+google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
+google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
+google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
+google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
+google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
+google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
+google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
+google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
-google.golang.org/protobuf v1.21.0 h1:qdOKuR/EIArgaWNjetjgTzgVTAZ+S/WXVrq9HW9zimw=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
+google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
+google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
+google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
+google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
+google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
+google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
-gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
+gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
+honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
+honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
+rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
+rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
+rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
diff --git a/examples/local/backups/create_commerce_schema.sql b/examples/backups/create_commerce_schema.sql
similarity index 100%
rename from examples/local/backups/create_commerce_schema.sql
rename to examples/backups/create_commerce_schema.sql
diff --git a/examples/local/backups/create_customer_schema.sql b/examples/backups/create_customer_schema.sql
similarity index 100%
rename from examples/local/backups/create_customer_schema.sql
rename to examples/backups/create_customer_schema.sql
diff --git a/examples/local/backups/restart_tablets.sh b/examples/backups/restart_tablets.sh
similarity index 71%
rename from examples/local/backups/restart_tablets.sh
rename to examples/backups/restart_tablets.sh
index 038fafc5dbf..bfafcf26d4f 100755
--- a/examples/local/backups/restart_tablets.sh
+++ b/examples/backups/restart_tablets.sh
@@ -17,21 +17,21 @@
# this script brings up new tablets for the two new shards that we will
# be creating in the customer keyspace and copies the schema
-source ./env.sh
+source ../common/env.sh
for i in 100 101 102; do
- CELL=zone1 TABLET_UID=$i ./scripts/mysqlctl-up.sh
- CELL=zone1 KEYSPACE=commerce TABLET_UID=$i ./scripts/vttablet-up.sh
+ CELL=zone1 TABLET_UID=$i ../common/scripts/mysqlctl-up.sh
+ CELL=zone1 KEYSPACE=commerce TABLET_UID=$i ../common/scripts/vttablet-up.sh
done
for i in 200 201 202; do
- CELL=zone1 TABLET_UID=$i ./scripts/mysqlctl-up.sh
- SHARD=-80 CELL=zone1 KEYSPACE=customer TABLET_UID=$i ./scripts/vttablet-up.sh
+ CELL=zone1 TABLET_UID=$i ../common/scripts/mysqlctl-up.sh
+ SHARD=-80 CELL=zone1 KEYSPACE=customer TABLET_UID=$i ../common/scripts/vttablet-up.sh
done
for i in 300 301 302; do
- CELL=zone1 TABLET_UID=$i ./scripts/mysqlctl-up.sh
- SHARD=80- CELL=zone1 KEYSPACE=customer TABLET_UID=$i ./scripts/vttablet-up.sh
+ CELL=zone1 TABLET_UID=$i ../common/scripts/mysqlctl-up.sh
+ SHARD=80- CELL=zone1 KEYSPACE=customer TABLET_UID=$i ../common/scripts/vttablet-up.sh
done
sleep 5
@@ -40,7 +40,7 @@ sleep 5
# complete before we start InitShardPrimary, otherwise we end up reading the
# tablet type to RESTORE and do not set semi-sync, which leads to the primary
# hanging on writes.
-totalTime=300
+totalTime=600
for i in 101 201 301; do
while [ $totalTime -gt 0 ]; do
status=$(curl "http://$hostname:15$i/debug/status_details")
@@ -58,6 +58,6 @@ for i in 101 201 301; do
exit 1
done
-vtctldclient InitShardPrimary --force commerce/0 zone1-100
-vtctldclient InitShardPrimary --force customer/-80 zone1-200
-vtctldclient InitShardPrimary --force customer/80- zone1-300
+vtctldclient PlannedReparentShard commerce/0 --new-primary "zone1-100"
+vtctldclient PlannedReparentShard customer/-80 --new-primary "zone1-200"
+vtctldclient PlannedReparentShard customer/80- --new-primary "zone1-300"
diff --git a/examples/backups/start_cluster.sh b/examples/backups/start_cluster.sh
new file mode 100755
index 00000000000..9855171ea4d
--- /dev/null
+++ b/examples/backups/start_cluster.sh
@@ -0,0 +1,81 @@
+#!/bin/bash
+
+# Copyright 2022 The Vitess Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# this script brings up new tablets for the two new shards that we will
+# be creating in the customer keyspace and copies the schema
+
+source ../common/env.sh
+
+# start topo server
+if [ "${TOPO}" = "zk2" ]; then
+ CELL=zone1 ../common/scripts/zk-up.sh
+elif [ "${TOPO}" = "k8s" ]; then
+ CELL=zone1 ../common/scripts/k3s-up.sh
+else
+ CELL=zone1 ../common/scripts/etcd-up.sh
+fi
+
+# start vtctld
+CELL=zone1 ../common/scripts/vtctld-up.sh
+
+# Create keyspace and set the semi_sync durability policy.
+vtctldclient CreateKeyspace --durability-policy=semi_sync commerce || fail "Failed to create and configure the commerce keyspace"
+
+# start vttablets for keyspace commerce
+for i in 100 101 102; do
+ CELL=zone1 TABLET_UID=$i ../common/scripts/mysqlctl-up.sh
+ CELL=zone1 KEYSPACE=commerce TABLET_UID=$i ../common/scripts/vttablet-up.sh
+done
+
+# set one of the replicas to primary
+vtctldclient PlannedReparentShard commerce/0 --new-primary "zone1-100"
+
+# create the schema for commerce
+vtctlclient ApplySchema -- --sql-file ./create_commerce_schema.sql commerce || fail "Could not apply schema for the commerce keyspace"
+vtctlclient ApplyVSchema -- --vschema_file ../local/vschema_commerce_seq.json commerce || fail "Could not apply vschema for the commerce keyspace"
+
+# Create keyspace and set the semi_sync durability policy.
+vtctldclient CreateKeyspace --durability-policy=semi_sync customer || fail "Failed to create and configure the customer keyspace"
+
+# start vttablets for keyspace customer
+for i in 200 201 202; do
+ CELL=zone1 TABLET_UID=$i ../common/scripts/mysqlctl-up.sh
+ SHARD=-80 CELL=zone1 KEYSPACE=customer TABLET_UID=$i ../common/scripts/vttablet-up.sh
+done
+for i in 300 301 302; do
+ CELL=zone1 TABLET_UID=$i ../common/scripts/mysqlctl-up.sh
+ SHARD=80- CELL=zone1 KEYSPACE=customer TABLET_UID=$i ../common/scripts/vttablet-up.sh
+done
+
+# set one of the replicas to primary
+vtctldclient PlannedReparentShard customer/-80 --new-primary "zone1-200"
+vtctldclient PlannedReparentShard customer/80- --new-primary "zone1-300"
+
+for shard in "-80" "80-"; do
+ wait_for_healthy_shard customer "${shard}" || exit 1
+done
+
+# create the schema for customer
+vtctlclient ApplySchema -- --sql-file ./create_customer_schema.sql customer || fail "Could not apply schema for the customer keyspace"
+vtctlclient ApplyVSchema -- --vschema_file ../local/vschema_customer_sharded.json customer || fail "Could not apply vschema for the customer keyspace"
+
+
+# start vtgate
+CELL=zone1 ../common/scripts/vtgate-up.sh
+
+sleep 5
+
+mysql < ../common/insert_commerce_data.sql
diff --git a/examples/local/backups/stop_tablets.sh b/examples/backups/stop_tablets.sh
similarity index 88%
rename from examples/local/backups/stop_tablets.sh
rename to examples/backups/stop_tablets.sh
index 25470ac5491..2a45e9e68d2 100755
--- a/examples/local/backups/stop_tablets.sh
+++ b/examples/backups/stop_tablets.sh
@@ -17,7 +17,7 @@
# this script brings up new tablets for the two new shards that we will
# be creating in the customer keyspace and copies the schema
-source ./env.sh
+source ../common/env.sh
for tablet in 100 200 300; do
if vtctlclient --action_timeout 1s --server localhost:15999 GetTablet zone1-$tablet >/dev/null 2>&1; then
@@ -25,9 +25,9 @@ for tablet in 100 200 300; do
for i in 0 1 2; do
uid=$(($tablet + $i))
echo "Shutting down tablet zone1-$uid"
- CELL=zone1 TABLET_UID=$uid ./scripts/vttablet-down.sh
+ CELL=zone1 TABLET_UID=$uid ../common/scripts/vttablet-down.sh
echo "Shutting down mysql zone1-$uid"
- CELL=zone1 TABLET_UID=$uid ./scripts/mysqlctl-down.sh
+ CELL=zone1 TABLET_UID=$uid ../common/scripts/mysqlctl-down.sh
echo "Removing tablet directory zone1-$uid"
vtctlclient DeleteTablet -- --allow_primary=true zone1-$uid
rm -Rf $VTDATAROOT/vt_0000000$uid
diff --git a/examples/local/backups/take_backups.sh b/examples/backups/take_backups.sh
similarity index 81%
rename from examples/local/backups/take_backups.sh
rename to examples/backups/take_backups.sh
index f8544a95b69..dc1b049c9c3 100755
--- a/examples/local/backups/take_backups.sh
+++ b/examples/backups/take_backups.sh
@@ -17,8 +17,8 @@
# this script brings up new tablets for the two new shards that we will
# be creating in the customer keyspace and copies the schema
-source ./env.sh
+source ../common/env.sh
-vtctlclient BackupShard customer/-80
-vtctlclient BackupShard customer/80-
-vtctlclient BackupShard commerce/0
+for shard in "customer/-80" "customer/80-" "commerce/0"; do
+ vtctlclient BackupShard "${shard}" || fail "Failed to backup shard: ${shard}"
+done
diff --git a/examples/backups/upgrade_cluster.sh b/examples/backups/upgrade_cluster.sh
new file mode 100755
index 00000000000..0144dc94579
--- /dev/null
+++ b/examples/backups/upgrade_cluster.sh
@@ -0,0 +1,97 @@
+#!/bin/bash
+
+# Copyright 2023 The Vitess Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# this script brings up new tablets for the two new shards that we will
+# be creating in the customer keyspace and copies the schema
+
+source ../common/env.sh
+
+# Restart the replica tablets so that they come up with new vttablet versions
+for i in 101 102; do
+ echo "Shutting down tablet zone1-$i"
+ CELL=zone1 TABLET_UID=$i ../common/scripts/vttablet-down.sh
+ echo "Shutting down mysql zone1-$i"
+ CELL=zone1 TABLET_UID=$i ../common/scripts/mysqlctl-down.sh
+ echo "Removing tablet directory zone1-$i"
+ vtctlclient DeleteTablet -- --allow_primary=true zone1-$i
+ rm -Rf $VTDATAROOT/vt_0000000$i
+ echo "Starting tablet zone1-$i again"
+ CELL=zone1 TABLET_UID=$i ../common/scripts/mysqlctl-up.sh
+ CELL=zone1 KEYSPACE=commerce TABLET_UID=$i ../common/scripts/vttablet-up.sh
+done
+
+for i in 201 202; do
+ echo "Shutting down tablet zone1-$i"
+ CELL=zone1 TABLET_UID=$i ../common/scripts/vttablet-down.sh
+ echo "Shutting down mysql zone1-$i"
+ CELL=zone1 TABLET_UID=$i ../common/scripts/mysqlctl-down.sh
+ echo "Removing tablet directory zone1-$i"
+ vtctlclient DeleteTablet -- --allow_primary=true zone1-$i
+ rm -Rf $VTDATAROOT/vt_0000000$i
+ echo "Starting tablet zone1-$i again"
+ CELL=zone1 TABLET_UID=$i ../common/scripts/mysqlctl-up.sh
+ SHARD=-80 CELL=zone1 KEYSPACE=customer TABLET_UID=$i ../common/scripts/vttablet-up.sh
+done
+
+for i in 301 302; do
+ echo "Shutting down tablet zone1-$i"
+ CELL=zone1 TABLET_UID=$i ../common/scripts/vttablet-down.sh
+ echo "Shutting down mysql zone1-$i"
+ CELL=zone1 TABLET_UID=$i ../common/scripts/mysqlctl-down.sh
+ echo "Removing tablet directory zone1-$i"
+ vtctlclient DeleteTablet -- --allow_primary=true zone1-$i
+ rm -Rf $VTDATAROOT/vt_0000000$i
+ echo "Starting tablet zone1-$i again"
+ CELL=zone1 TABLET_UID=$i ../common/scripts/mysqlctl-up.sh
+ SHARD=80- CELL=zone1 KEYSPACE=customer TABLET_UID=$i ../common/scripts/vttablet-up.sh
+done
+
+# Wait for all the replica tablets to be in the serving state before reparenting to them.
+totalTime=600
+for i in 101 201 301; do
+ while [ $totalTime -gt 0 ]; do
+ status=$(curl "http://$hostname:15$i/debug/status_details")
+ echo "$status" | grep "REPLICA: Serving" && break
+ totalTime=$((totalTime-1))
+ sleep 0.1
+ done
+done
+
+# Check that all the replica tablets have reached REPLICA: Serving state
+for i in 101 201 301; do
+ status=$(curl "http://$hostname:15$i/debug/status_details")
+ echo "$status" | grep "REPLICA: Serving" && continue
+ echo "tablet-$i did not reach REPLICA: Serving state. Exiting due to failure."
+ exit 1
+done
+
+# Promote the replica tablets to primary
+vtctldclient PlannedReparentShard commerce/0 --new-primary "zone1-101"
+vtctldclient PlannedReparentShard customer/-80 --new-primary "zone1-201"
+vtctldclient PlannedReparentShard customer/80- --new-primary "zone1-301"
+
+# Restart the old primary tablets so that they are on the latest version of vttablet too.
+echo "Restarting tablet zone1-100"
+CELL=zone1 TABLET_UID=100 ../common/scripts/vttablet-down.sh
+CELL=zone1 KEYSPACE=commerce TABLET_UID=100 ../common/scripts/vttablet-up.sh
+
+echo "Restarting tablet zone1-200"
+CELL=zone1 TABLET_UID=200 ../common/scripts/vttablet-down.sh
+SHARD=-80 CELL=zone1 KEYSPACE=customer TABLET_UID=200 ../common/scripts/vttablet-up.sh
+
+echo "Restarting tablet zone1-300"
+CELL=zone1 TABLET_UID=300 ../common/scripts/vttablet-down.sh
+SHARD=80- CELL=zone1 KEYSPACE=customer TABLET_UID=300 ../common/scripts/vttablet-up.sh
\ No newline at end of file
diff --git a/examples/local/env.sh b/examples/common/env.sh
similarity index 96%
rename from examples/local/env.sh
rename to examples/common/env.sh
index b6db50a27b9..adee0f34d3f 100644
--- a/examples/local/env.sh
+++ b/examples/common/env.sh
@@ -14,15 +14,12 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+source "$(dirname "${BASH_SOURCE[0]:-$0}")/lib/utils.sh"
+
hostname=$(hostname -f)
vtctld_web_port=15000
export VTDATAROOT="${VTDATAROOT:-${PWD}/vtdataroot}"
-function fail() {
- echo "ERROR: $1"
- exit 1
-}
-
if [[ $EUID -eq 0 ]]; then
fail "This script refuses to be run as root. Please switch to a regular user."
fi
@@ -81,7 +78,7 @@ mkdir -p "${VTDATAROOT}/tmp"
# In your own environment you may prefer to use config files,
# such as ~/.my.cnf
-alias mysql="command mysql -h 127.0.0.1 -P 15306"
+alias mysql="command mysql --no-defaults -h 127.0.0.1 -P 15306"
alias vtctlclient="command vtctlclient --server localhost:15999 --log_dir ${VTDATAROOT}/tmp --alsologtostderr"
alias vtctldclient="command vtctldclient --server localhost:15999"
diff --git a/examples/common/lib/utils.sh b/examples/common/lib/utils.sh
new file mode 100644
index 00000000000..24e776c84a9
--- /dev/null
+++ b/examples/common/lib/utils.sh
@@ -0,0 +1,119 @@
+#!/bin/bash
+
+# Copyright 2023 The Vitess Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file contains utility functions that can be used throughout the
+# various examples.
+
+# Wait for the given number of tablets to show up in the topology server
+# for the keyspace/shard. Example (wait for 2 tablets in commerce/0):
+# wait_for_shard_tablets commerce 0 2
+function wait_for_shard_tablets() {
+ if [[ -z ${1} || -z ${2} || -z ${3} ]]; then
+ fail "A keyspace, shard, and number of tablets must be specified when waiting for tablets to come up"
+ fi
+ local keyspace=${1}
+ local shard=${2}
+ local num_tablets=${3}
+ local wait_secs=180
+
+ for _ in $(seq 1 ${wait_secs}); do
+ cur_tablets=$(vtctldclient GetTablets --keyspace "${keyspace}" --shard "${shard}" | wc -l)
+ if [[ ${cur_tablets} -eq ${num_tablets} ]]; then
+ break
+ fi
+ sleep 1
+ done;
+
+ cur_tablets=$(vtctldclient GetTablets --keyspace "${keyspace}" --shard "${shard}" | wc -l)
+ if [[ ${cur_tablets} -lt ${num_tablets} ]]; then
+ fail "Timed out after ${wait_secs} seconds waiting for tablets to come up in ${keyspace}/${shard}"
+ fi
+}
+
+# Wait for a primary tablet to be elected and become healthy and serving
+# in the given keyspace/shard. Example:
+# wait_for_healthy_shard commerce 0
+function wait_for_healthy_shard_primary() {
+ if [[ -z ${1} || -z ${2} ]]; then
+ fail "A keyspace and shard must be specified when waiting for the shard's primary to be healthy"
+ fi
+ local keyspace=${1}
+ local shard=${2}
+ local unhealthy_indicator='"primary_alias": null'
+ local wait_secs=180
+
+ for _ in $(seq 1 ${wait_secs}); do
+ if ! vtctldclient --server=localhost:15999 GetShard "${keyspace}/${shard}" | grep -qi "${unhealthy_indicator}"; then
+ break
+ fi
+ sleep 1
+ done;
+
+ if vtctldclient --server=localhost:15999 GetShard "${keyspace}/${shard}" | grep -qi "${unhealthy_indicator}"; then
+ fail "Timed out after ${wait_secs} seconds waiting for a primary tablet to be elected and become healthy in ${keyspace}/${shard}"
+ fi
+}
+
+# Wait for the shard primary tablet's VReplication engine to open.
+# There is currently no API call or client command that can be specifically used
+# to check the VReplication engine's status (no vars in /debug/vars etc. either).
+# So we use the Workflow listall client command as the method to check for that
+# as it will return an error when the engine is closed -- even when there are
+# no workflows.
+function wait_for_shard_vreplication_engine() {
+ if [[ -z ${1} || -z ${2} ]]; then
+ fail "A keyspace and shard must be specified when waiting for the shard primary tablet's VReplication engine to open"
+ fi
+ local keyspace=${1}
+ local shard=${2}
+ local wait_secs=90
+
+ for _ in $(seq 1 ${wait_secs}); do
+ if vtctlclient --server=localhost:15999 Workflow -- "${keyspace}" listall &>/dev/null; then
+ break
+ fi
+ sleep 1
+ done;
+
+ if ! vtctlclient --server=localhost:15999 Workflow -- "${keyspace}" listall &>/dev/null; then
+ fail "Timed out after ${wait_secs} seconds waiting for the primary tablet's VReplication engine to open in ${keyspace}/${shard}"
+ fi
+}
+
+# Wait for a specified number of the keyspace/shard's tablets to show up
+# in the topology server (3 is the default if no value is specified) and
+# then wait for one of the tablets to be promoted to primary and become
+# healthy and serving. Lastly, wait for the new primary tablet's
+# VReplication engine to fully open. Example:
+# wait_for_healthy_shard commerce 0
+function wait_for_healthy_shard() {
+ if [[ -z ${1} || -z ${2} ]]; then
+ fail "A keyspace and shard must be specified when waiting for tablets to come up"
+ fi
+ local keyspace=${1}
+ local shard=${2}
+ local num_tablets=${3:-3}
+
+ wait_for_shard_tablets "${keyspace}" "${shard}" "${num_tablets}"
+ wait_for_healthy_shard_primary "${keyspace}" "${shard}"
+ wait_for_shard_vreplication_engine "${keyspace}" "${shard}"
+}
+
+# Print error message and exit with error code.
+function fail() {
+ echo "ERROR: ${1}"
+ exit 1
+}
diff --git a/examples/local/scripts/consul-down.sh b/examples/common/scripts/consul-down.sh
similarity index 93%
rename from examples/local/scripts/consul-down.sh
rename to examples/common/scripts/consul-down.sh
index 6bebb8c72ec..4da5694525a 100755
--- a/examples/local/scripts/consul-down.sh
+++ b/examples/common/scripts/consul-down.sh
@@ -16,7 +16,7 @@
# This is an example script that stops the consul server started by consul-up.sh.
-source ./env.sh
+source "$(dirname "${BASH_SOURCE[0]:-$0}")/../env.sh"
echo "Stopping consul..."
kill -9 `cat $VTDATAROOT/tmp/consul.pid`
diff --git a/examples/local/scripts/consul-up.sh b/examples/common/scripts/consul-up.sh
similarity index 96%
rename from examples/local/scripts/consul-up.sh
rename to examples/common/scripts/consul-up.sh
index 4a69d1e1902..584a25f437a 100755
--- a/examples/local/scripts/consul-up.sh
+++ b/examples/common/scripts/consul-up.sh
@@ -16,7 +16,7 @@
# This is an example script that creates a single-node consul datacenter.
-source ./env.sh
+source "$(dirname "${BASH_SOURCE[0]:-$0}")/../env.sh"
cell=${CELL:-'test'}
consul_http_port=${CONSUL_HTTP_PORT:-'8500'}
diff --git a/examples/local/scripts/etcd-down.sh b/examples/common/scripts/etcd-down.sh
similarity index 93%
rename from examples/local/scripts/etcd-down.sh
rename to examples/common/scripts/etcd-down.sh
index 018af7432a3..f9894f8659c 100755
--- a/examples/local/scripts/etcd-down.sh
+++ b/examples/common/scripts/etcd-down.sh
@@ -16,7 +16,7 @@
# This is an example script that stops the etcd servers started by etcd-up.sh.
-source ./env.sh
+source "$(dirname "${BASH_SOURCE[0]:-$0}")/../env.sh"
echo "Stopping etcd..."
kill -9 `cat $VTDATAROOT/tmp/etcd.pid`
diff --git a/examples/local/scripts/etcd-up.sh b/examples/common/scripts/etcd-up.sh
similarity index 96%
rename from examples/local/scripts/etcd-up.sh
rename to examples/common/scripts/etcd-up.sh
index 5d262217c88..20a16a42260 100755
--- a/examples/local/scripts/etcd-up.sh
+++ b/examples/common/scripts/etcd-up.sh
@@ -16,7 +16,7 @@
# This is an example script that creates a quorum of Etcd servers.
-source ./env.sh
+source "$(dirname "${BASH_SOURCE[0]:-$0}")/../env.sh"
cell=${CELL:-'test'}
export ETCDCTL_API=2
diff --git a/examples/region_sharding/scripts/k3s-down.sh b/examples/common/scripts/k3s-down.sh
similarity index 91%
rename from examples/region_sharding/scripts/k3s-down.sh
rename to examples/common/scripts/k3s-down.sh
index 590dc604e3e..195b024bf91 100755
--- a/examples/region_sharding/scripts/k3s-down.sh
+++ b/examples/common/scripts/k3s-down.sh
@@ -18,9 +18,7 @@
set -e
-# shellcheck source=./env.sh
-# shellcheck disable=SC1091
-source ./env.sh
+source "$(dirname "${BASH_SOURCE[0]:-$0}")/../env.sh"
# Stop K3s server.
echo "Stopping k3s server..."
diff --git a/examples/local/scripts/k3s-up.sh b/examples/common/scripts/k3s-up.sh
similarity index 89%
rename from examples/local/scripts/k3s-up.sh
rename to examples/common/scripts/k3s-up.sh
index b2ead1a86d6..7c85cb0ac07 100755
--- a/examples/local/scripts/k3s-up.sh
+++ b/examples/common/scripts/k3s-up.sh
@@ -19,11 +19,8 @@
set -e
cell=${CELL:-'test'}
-script_root=$(dirname "${BASH_SOURCE[0]}")
-
-# shellcheck source=./env.sh
-# shellcheck disable=SC1091
-source ./env.sh
+script_dir="$(dirname "${BASH_SOURCE[0]:-$0}")"
+source "${script_dir}/../env.sh"
case $(uname) in
Linux) ;;
@@ -47,7 +44,7 @@ sleep 15
KUBECTL="k3s kubectl --kubeconfig=${K8S_KUBECONFIG}"
# Create the CRD for vitesstopologynodes
-$KUBECTL create -f ../../go/vt/topo/k8stopo/VitessTopoNodes-crd.yaml
+$KUBECTL create -f "${script_dir}/../../../go/vt/topo/k8stopo/VitessTopoNodes-crd.yaml"
# Add the CellInfo description for the cell
set +e
diff --git a/examples/local/scripts/mysqlctl-down.sh b/examples/common/scripts/mysqlctl-down.sh
similarity index 94%
rename from examples/local/scripts/mysqlctl-down.sh
rename to examples/common/scripts/mysqlctl-down.sh
index 812558d6ca8..b2dee4e2119 100755
--- a/examples/local/scripts/mysqlctl-down.sh
+++ b/examples/common/scripts/mysqlctl-down.sh
@@ -17,7 +17,7 @@
# This is an example script that stops the mysqld and vttablet instances
# created by vttablet-up.sh
-source ./env.sh
+source "$(dirname "${BASH_SOURCE[0]:-$0}")/../env.sh"
cell=${CELL:-'test'}
uid=$TABLET_UID
diff --git a/examples/region_sharding/scripts/mysqlctl-up.sh b/examples/common/scripts/mysqlctl-up.sh
similarity index 95%
rename from examples/region_sharding/scripts/mysqlctl-up.sh
rename to examples/common/scripts/mysqlctl-up.sh
index ae041cf951d..d9df27ccdc0 100755
--- a/examples/region_sharding/scripts/mysqlctl-up.sh
+++ b/examples/common/scripts/mysqlctl-up.sh
@@ -16,7 +16,7 @@
# This is an example script that creates a single shard vttablet deployment.
-source ./env.sh
+source "$(dirname "${BASH_SOURCE[0]:-$0}")/../env.sh"
cell=${CELL:-'test'}
uid=$TABLET_UID
diff --git a/examples/local/scripts/vtadmin-down.sh b/examples/common/scripts/vtadmin-down.sh
similarity index 76%
rename from examples/local/scripts/vtadmin-down.sh
rename to examples/common/scripts/vtadmin-down.sh
index 2a7944d9d5a..011e6da7f49 100755
--- a/examples/local/scripts/vtadmin-down.sh
+++ b/examples/common/scripts/vtadmin-down.sh
@@ -1,6 +1,6 @@
#!/bin/bash
-source ./env.sh
+source "$(dirname "${BASH_SOURCE[0]:-$0}")/../env.sh"
echo "Stopping vtadmin-web..."
kill -9 "$(cat "$VTDATAROOT/tmp/vtadmin-web.pid")"
diff --git a/examples/region_sharding/scripts/vtadmin-up.sh b/examples/common/scripts/vtadmin-up.sh
similarity index 55%
rename from examples/region_sharding/scripts/vtadmin-up.sh
rename to examples/common/scripts/vtadmin-up.sh
index 5ee04c9a959..179e5089b90 100755
--- a/examples/region_sharding/scripts/vtadmin-up.sh
+++ b/examples/common/scripts/vtadmin-up.sh
@@ -1,9 +1,11 @@
#!/bin/bash
-source ./env.sh
+script_dir="$(dirname "${BASH_SOURCE[0]:-$0}")"
+source "${script_dir}/../env.sh"
+cluster_name="local"
log_dir="${VTDATAROOT}/tmp"
-web_dir="../../web/vtadmin"
+web_dir="${script_dir}/../../../web/vtadmin"
vtadmin_api_port=14200
vtadmin_web_port=14201
@@ -18,8 +20,8 @@ vtadmin \
--logtostderr \
--alsologtostderr \
--rbac \
- --rbac-config="./vtadmin/rbac.yaml" \
- --cluster "id=local,name=local,discovery=staticfile,discovery-staticfile-path=./vtadmin/discovery.json,tablet-fqdn-tmpl={{ .Tablet.Hostname }}:15{{ .Tablet.Alias.Uid }}" \
+ --rbac-config="${script_dir}/../vtadmin/rbac.yaml" \
+ --cluster "id=${cluster_name},name=${cluster_name},discovery=staticfile,discovery-staticfile-path=${script_dir}/../vtadmin/discovery.json,tablet-fqdn-tmpl={{ .Tablet.Hostname }}:15{{ .Tablet.Alias.Uid }}" \
> "${log_dir}/vtadmin-api.out" 2>&1 &
vtadmin_api_pid=$!
@@ -32,14 +34,27 @@ vtadmin-api is running!
- PID: ${vtadmin_api_pid}
"
+# Wait for vtadmin to successfully discover the cluster
+expected_cluster_result="{\"result\":{\"clusters\":[{\"id\":\"${cluster_name}\",\"name\":\"${cluster_name}\"}]},\"ok\":true}"
+for _ in {0..300}; do
+ result=$(curl -s "http://localhost:${vtadmin_api_port}/api/clusters")
+ if [[ ${result} == "${expected_cluster_result}" ]]; then
+ break
+ fi
+ sleep 0.1
+done
+
+# Check one last time
+[[ $(curl -s "http://localhost:${vtadmin_api_port}/api/clusters") == "${expected_cluster_result}" ]] || fail "vtadmin failed to discover the running example Vitess cluster."
+
# As a TODO, it'd be nice to make the assumption that vtadmin-web is already
# installed and built (since we assume that `make` has already been run for
# other Vitess components.)
-npm --prefix $web_dir --silent install
+npm --prefix "$web_dir" --silent install
REACT_APP_VTADMIN_API_ADDRESS="http://localhost:${vtadmin_api_port}" \
REACT_APP_ENABLE_EXPERIMENTAL_TABLET_DEBUG_VARS="true" \
- npm run --prefix $web_dir build
+ npm run --prefix "$web_dir" build
"${web_dir}/node_modules/.bin/serve" --no-clipboard -l $vtadmin_web_port -s "${web_dir}/build" \
> "${log_dir}/vtadmin-web.out" 2>&1 &
diff --git a/examples/region_sharding/scripts/vtctld-down.sh b/examples/common/scripts/vtctld-down.sh
similarity index 92%
rename from examples/region_sharding/scripts/vtctld-down.sh
rename to examples/common/scripts/vtctld-down.sh
index d96fa3b927f..a56d59b97e5 100755
--- a/examples/region_sharding/scripts/vtctld-down.sh
+++ b/examples/common/scripts/vtctld-down.sh
@@ -16,7 +16,7 @@
# This is an example script that stops vtctld.
-source ./env.sh
+source "$(dirname "${BASH_SOURCE[0]:-$0}")/../env.sh"
echo "Stopping vtctld..."
kill -9 `cat $VTDATAROOT/tmp/vtctld.pid`
diff --git a/examples/local/scripts/vtctld-up.sh b/examples/common/scripts/vtctld-up.sh
similarity index 93%
rename from examples/local/scripts/vtctld-up.sh
rename to examples/common/scripts/vtctld-up.sh
index 7957bdec8ba..e49b346ca1e 100755
--- a/examples/local/scripts/vtctld-up.sh
+++ b/examples/common/scripts/vtctld-up.sh
@@ -16,7 +16,7 @@
# This is an example script that starts vtctld.
-source ./env.sh
+source "$(dirname "${BASH_SOURCE[0]:-$0}")/../env.sh"
cell=${CELL:-'test'}
grpc_port=15999
@@ -26,8 +26,6 @@ echo "Starting vtctld..."
vtctld \
$TOPOLOGY_FLAGS \
--cell $cell \
- --workflow_manager_init \
- --workflow_manager_use_election \
--service_map 'grpc-vtctl,grpc-vtctld' \
--backup_storage_implementation file \
--file_backup_storage_root $VTDATAROOT/backups \
diff --git a/examples/region_sharding/scripts/vtgate-down.sh b/examples/common/scripts/vtgate-down.sh
similarity index 93%
rename from examples/region_sharding/scripts/vtgate-down.sh
rename to examples/common/scripts/vtgate-down.sh
index 9da0a7179df..3eea5fdf94d 100755
--- a/examples/region_sharding/scripts/vtgate-down.sh
+++ b/examples/common/scripts/vtgate-down.sh
@@ -16,7 +16,7 @@
# This is an example script that stops the instance started by vtgate-up.sh.
-source ./env.sh
+source "$(dirname "${BASH_SOURCE[0]:-$0}")/../env.sh"
# Stop vtgate.
echo "Stopping vtgate..."
diff --git a/examples/region_sharding/scripts/vtgate-up.sh b/examples/common/scripts/vtgate-up.sh
similarity index 96%
rename from examples/region_sharding/scripts/vtgate-up.sh
rename to examples/common/scripts/vtgate-up.sh
index cb33e27839b..03b85869e5d 100755
--- a/examples/region_sharding/scripts/vtgate-up.sh
+++ b/examples/common/scripts/vtgate-up.sh
@@ -16,7 +16,7 @@
# This is an example script that starts a single vtgate.
-source ./env.sh
+source "$(dirname "${BASH_SOURCE[0]:-$0}")/../env.sh"
cell=${CELL:-'test'}
web_port=15001
diff --git a/examples/local/scripts/vtorc-down.sh b/examples/common/scripts/vtorc-down.sh
similarity index 60%
rename from examples/local/scripts/vtorc-down.sh
rename to examples/common/scripts/vtorc-down.sh
index 2fdfc1491ce..f4d2e4cb8a0 100755
--- a/examples/local/scripts/vtorc-down.sh
+++ b/examples/common/scripts/vtorc-down.sh
@@ -1,6 +1,6 @@
#!/bin/bash
-source ./env.sh
+source "$(dirname "${BASH_SOURCE[0]:-$0}")/../env.sh"
echo "Stopping vtorc."
kill -9 "$(cat "$VTDATAROOT/tmp/vtorc.pid")"
diff --git a/examples/local/scripts/vtorc-up.sh b/examples/common/scripts/vtorc-up.sh
similarity index 72%
rename from examples/local/scripts/vtorc-up.sh
rename to examples/common/scripts/vtorc-up.sh
index f5a0d75dbba..66a826da288 100755
--- a/examples/local/scripts/vtorc-up.sh
+++ b/examples/common/scripts/vtorc-up.sh
@@ -1,6 +1,7 @@
#!/bin/bash
-source ./env.sh
+script_dir="$(dirname "${BASH_SOURCE[0]:-$0}")"
+source "${script_dir}/../env.sh"
log_dir="${VTDATAROOT}/tmp"
port=16000
@@ -9,7 +10,7 @@ vtorc \
$TOPOLOGY_FLAGS \
--logtostderr \
--alsologtostderr \
- --config="./vtorc/config.json" \
+ --config="${script_dir}/../vtorc/config.json" \
--port $port \
> "${log_dir}/vtorc.out" 2>&1 &
diff --git a/examples/local/scripts/vttablet-down.sh b/examples/common/scripts/vttablet-down.sh
similarity index 94%
rename from examples/local/scripts/vttablet-down.sh
rename to examples/common/scripts/vttablet-down.sh
index 47b881b9793..3de266def76 100755
--- a/examples/local/scripts/vttablet-down.sh
+++ b/examples/common/scripts/vttablet-down.sh
@@ -17,7 +17,7 @@
# This is an example script that stops the mysqld and vttablet instances
# created by vttablet-up.sh
-source ./env.sh
+source "$(dirname "${BASH_SOURCE[0]:-$0}")/../env.sh"
printf -v tablet_dir 'vt_%010d' $TABLET_UID
pid=`cat $VTDATAROOT/$tablet_dir/vttablet.pid`
diff --git a/examples/local/scripts/vttablet-up.sh b/examples/common/scripts/vttablet-up.sh
similarity index 97%
rename from examples/local/scripts/vttablet-up.sh
rename to examples/common/scripts/vttablet-up.sh
index 43d0849ce76..21c15b8d547 100755
--- a/examples/local/scripts/vttablet-up.sh
+++ b/examples/common/scripts/vttablet-up.sh
@@ -14,7 +14,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-source ./env.sh
+source "$(dirname "${BASH_SOURCE[0]:-$0}")/../env.sh"
cell=${CELL:-'test'}
keyspace=${KEYSPACE:-'test_keyspace'}
@@ -55,7 +55,6 @@ vttablet \
--service_map 'grpc-queryservice,grpc-tabletmanager,grpc-updatestream' \
--pid_file $VTDATAROOT/$tablet_dir/vttablet.pid \
--vtctld_addr http://$hostname:$vtctld_web_port/ \
- --disable_active_reparents \
> $VTDATAROOT/$tablet_dir/vttablet.out 2>&1 &
# Block waiting for the tablet to be listening
diff --git a/examples/local/scripts/zk-down.sh b/examples/common/scripts/zk-down.sh
similarity index 93%
rename from examples/local/scripts/zk-down.sh
rename to examples/common/scripts/zk-down.sh
index 18dd7933bc9..a9fa1e80a30 100755
--- a/examples/local/scripts/zk-down.sh
+++ b/examples/common/scripts/zk-down.sh
@@ -16,7 +16,7 @@
# This is an example script that stops the ZooKeeper servers started by zk-up.sh.
-source ./env.sh
+source "$(dirname "${BASH_SOURCE[0]:-$0}")/../env.sh"
# Stop ZooKeeper servers.
echo "Stopping zk servers..."
diff --git a/examples/local/scripts/zk-up.sh b/examples/common/scripts/zk-up.sh
similarity index 97%
rename from examples/local/scripts/zk-up.sh
rename to examples/common/scripts/zk-up.sh
index d689e167988..519d5305b25 100755
--- a/examples/local/scripts/zk-up.sh
+++ b/examples/common/scripts/zk-up.sh
@@ -16,7 +16,7 @@
# This is an example script that creates a quorum of ZooKeeper servers.
-source ./env.sh
+source "$(dirname "${BASH_SOURCE[0]:-$0}")/../env.sh"
cell=${CELL:-'test'}
diff --git a/examples/local/topo-etcd2.sh b/examples/common/topo-etcd2.sh
similarity index 100%
rename from examples/local/topo-etcd2.sh
rename to examples/common/topo-etcd2.sh
diff --git a/examples/local/topo-k8s.sh b/examples/common/topo-k8s.sh
similarity index 100%
rename from examples/local/topo-k8s.sh
rename to examples/common/topo-k8s.sh
diff --git a/examples/local/topo-zk2.sh b/examples/common/topo-zk2.sh
similarity index 100%
rename from examples/local/topo-zk2.sh
rename to examples/common/topo-zk2.sh
diff --git a/examples/local/vtadmin/discovery.json b/examples/common/vtadmin/discovery.json
similarity index 87%
rename from examples/local/vtadmin/discovery.json
rename to examples/common/vtadmin/discovery.json
index def7dd50f85..92e20fb40d8 100644
--- a/examples/local/vtadmin/discovery.json
+++ b/examples/common/vtadmin/discovery.json
@@ -10,6 +10,7 @@
"vtgates": [
{
"host": {
+ "fqdn": "localhost:15001",
"hostname": "localhost:15991"
}
}
diff --git a/examples/common/vtadmin/rbac.yaml b/examples/common/vtadmin/rbac.yaml
new file mode 100644
index 00000000000..a2e665e4d8d
--- /dev/null
+++ b/examples/common/vtadmin/rbac.yaml
@@ -0,0 +1,5 @@
+rules:
+ - resource: "*"
+ actions: ["*"]
+ subjects: ["*"]
+ clusters: ["*"]
diff --git a/examples/local/vtorc/config.json b/examples/common/vtorc/config.json
similarity index 100%
rename from examples/local/vtorc/config.json
rename to examples/common/vtorc/config.json
diff --git a/examples/compose/config/init_db.sql b/examples/compose/config/init_db.sql
index 75dae7cd89d..d29f16073cd 100644
--- a/examples/compose/config/init_db.sql
+++ b/examples/compose/config/init_db.sql
@@ -64,13 +64,6 @@ GRANT SELECT, INSERT, UPDATE, DELETE, CREATE, DROP, RELOAD, PROCESS, FILE,
LOCK TABLES, EXECUTE, REPLICATION SLAVE, REPLICATION CLIENT, CREATE VIEW,
SHOW VIEW, CREATE ROUTINE, ALTER ROUTINE, CREATE USER, EVENT, TRIGGER
ON *.* TO 'vt_filtered'@'localhost';
-# User for Orchestrator (https://github.com/openark/orchestrator).
-# TODO: Reenable when the password is randomly generated.
-CREATE USER 'orc_client_user'@'%' IDENTIFIED BY 'orc_client_user_password';
-GRANT SUPER, PROCESS, REPLICATION SLAVE, RELOAD
- ON *.* TO 'orc_client_user'@'%';
-GRANT SELECT
- ON _vt.* TO 'orc_client_user'@'%';
FLUSH PRIVILEGES;
RESET SLAVE ALL;
RESET MASTER;
diff --git a/examples/compose/docker-compose.beginners.yml b/examples/compose/docker-compose.beginners.yml
index d982f36c331..108e7cc0d69 100644
--- a/examples/compose/docker-compose.beginners.yml
+++ b/examples/compose/docker-compose.beginners.yml
@@ -1,7 +1,7 @@
version: "2.1"
services:
consul1:
- image: consul:latest
+ image: hashicorp/consul:latest
hostname: "consul1"
ports:
- "8400:8400"
@@ -9,7 +9,7 @@ services:
- "8600:8600"
command: "agent -server -bootstrap-expect 3 -ui -disable-host-node-id -client 0.0.0.0"
consul2:
- image: consul:latest
+ image: hashicorp/consul:latest
hostname: "consul2"
expose:
- "8400"
@@ -19,7 +19,7 @@ services:
depends_on:
- consul1
consul3:
- image: consul:latest
+ image: hashicorp/consul:latest
hostname: "consul3"
expose:
- "8400"
@@ -58,15 +58,13 @@ services:
- "3306"
vtctld:
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v16.0.4
ports:
- "15000:$WEB_PORT"
- "$GRPC_PORT"
command: ["sh", "-c", " /vt/bin/vtctld \
$TOPOLOGY_FLAGS \
--cell $CELL \
- --workflow_manager_init \
- --workflow_manager_use_election \
--service_map 'grpc-vtctl,grpc-vtctld' \
--backup_storage_implementation file \
--file_backup_storage_root /vt/vtdataroot/backups \
@@ -83,7 +81,7 @@ services:
condition: service_healthy
vtgate:
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v16.0.4
ports:
- "15099:$WEB_PORT"
- "$GRPC_PORT"
@@ -113,7 +111,7 @@ services:
condition: service_healthy
schemaload:
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v16.0.4
command:
- sh
- -c
@@ -146,12 +144,12 @@ services:
environment:
- KEYSPACES=$KEYSPACE
- GRPC_PORT=15999
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v16.0.4
volumes:
- .:/script
vttablet100:
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v16.0.4
ports:
- "15100:$WEB_PORT"
- "$GRPC_PORT"
@@ -183,7 +181,7 @@ services:
retries: 15
vttablet101:
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v16.0.4
ports:
- "15101:$WEB_PORT"
- "$GRPC_PORT"
@@ -215,7 +213,7 @@ services:
retries: 15
vttablet102:
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v16.0.4
ports:
- "15102:$WEB_PORT"
- "$GRPC_PORT"
@@ -247,7 +245,7 @@ services:
retries: 15
vttablet103:
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v16.0.4
ports:
- "15103:$WEB_PORT"
- "$GRPC_PORT"
@@ -279,7 +277,7 @@ services:
retries: 15
vtorc:
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v16.0.4
command: ["sh", "-c", "/script/vtorc-up.sh"]
depends_on:
- vtctld
@@ -309,7 +307,7 @@ services:
retries: 15
vreplication:
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v16.0.4
volumes:
- ".:/script"
environment:
diff --git a/examples/compose/docker-compose.yml b/examples/compose/docker-compose.yml
index c6de4af3ce7..37553ac10f8 100644
--- a/examples/compose/docker-compose.yml
+++ b/examples/compose/docker-compose.yml
@@ -2,7 +2,7 @@ services:
consul1:
command: agent -server -bootstrap-expect 3 -ui -disable-host-node-id -client 0.0.0.0
hostname: consul1
- image: consul:latest
+ image: hashicorp/consul:latest
ports:
- 8400:8400
- 8500:8500
@@ -16,7 +16,7 @@ services:
- "8500"
- "8600"
hostname: consul2
- image: consul:latest
+ image: hashicorp/consul:latest
consul3:
command: agent -server -retry-join consul1 -disable-host-node-id
depends_on:
@@ -26,7 +26,7 @@ services:
- "8500"
- "8600"
hostname: consul3
- image: consul:latest
+ image: hashicorp/consul:latest
external_db_host:
build:
context: ./external_db/mysql
@@ -75,7 +75,7 @@ services:
- SCHEMA_FILES=lookup_keyspace_schema_file.sql
- POST_LOAD_FILE=
- EXTERNAL_DB=0
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v16.0.4
volumes:
- .:/script
schemaload_test_keyspace:
@@ -101,7 +101,7 @@ services:
- SCHEMA_FILES=test_keyspace_schema_file.sql
- POST_LOAD_FILE=
- EXTERNAL_DB=0
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v16.0.4
volumes:
- .:/script
set_keyspace_durability_policy:
@@ -115,7 +115,7 @@ services:
environment:
- KEYSPACES=test_keyspace lookup_keyspace
- GRPC_PORT=15999
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v16.0.4
volumes:
- .:/script
vreplication:
@@ -129,7 +129,7 @@ services:
- TOPOLOGY_FLAGS=--topo_implementation consul --topo_global_server_address consul1:8500
--topo_global_root vitess/global
- EXTERNAL_DB=0
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v16.0.4
volumes:
- .:/script
vtctld:
@@ -137,13 +137,13 @@ services:
- sh
- -c
- ' /vt/bin/vtctld --topo_implementation consul --topo_global_server_address consul1:8500
- --topo_global_root vitess/global --cell test --workflow_manager_init --workflow_manager_use_election
+ --topo_global_root vitess/global --cell test
--service_map ''grpc-vtctl,grpc-vtctld'' --backup_storage_implementation file --file_backup_storage_root
/vt/vtdataroot/backups --logtostderr=true --port 8080 --grpc_port 15999 '
depends_on:
external_db_host:
condition: service_healthy
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v16.0.4
ports:
- 15000:8080
- "15999"
@@ -160,7 +160,7 @@ services:
--normalize_queries=true '
depends_on:
- vtctld
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v16.0.4
ports:
- 15099:8080
- "15999"
@@ -182,7 +182,7 @@ services:
- EXTERNAL_DB=0
- DB_USER=
- DB_PASS=
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v16.0.4
ports:
- 13000:8080
volumes:
@@ -217,7 +217,7 @@ services:
- CMD-SHELL
- curl -s --fail --show-error localhost:8080/debug/health
timeout: 10s
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v16.0.4
ports:
- 15101:8080
- "15999"
@@ -254,7 +254,7 @@ services:
- CMD-SHELL
- curl -s --fail --show-error localhost:8080/debug/health
timeout: 10s
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v16.0.4
ports:
- 15102:8080
- "15999"
@@ -291,7 +291,7 @@ services:
- CMD-SHELL
- curl -s --fail --show-error localhost:8080/debug/health
timeout: 10s
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v16.0.4
ports:
- 15201:8080
- "15999"
@@ -328,7 +328,7 @@ services:
- CMD-SHELL
- curl -s --fail --show-error localhost:8080/debug/health
timeout: 10s
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v16.0.4
ports:
- 15202:8080
- "15999"
@@ -365,7 +365,7 @@ services:
- CMD-SHELL
- curl -s --fail --show-error localhost:8080/debug/health
timeout: 10s
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v16.0.4
ports:
- 15301:8080
- "15999"
@@ -402,7 +402,7 @@ services:
- CMD-SHELL
- curl -s --fail --show-error localhost:8080/debug/health
timeout: 10s
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v16.0.4
ports:
- 15302:8080
- "15999"
diff --git a/examples/compose/vtcompose/docker-compose.test.yml b/examples/compose/vtcompose/docker-compose.test.yml
index 69365d4fb46..40eb9d39bba 100644
--- a/examples/compose/vtcompose/docker-compose.test.yml
+++ b/examples/compose/vtcompose/docker-compose.test.yml
@@ -79,7 +79,7 @@ services:
- SCHEMA_FILES=test_keyspace_schema_file.sql
- POST_LOAD_FILE=
- EXTERNAL_DB=0
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v16.0.4
volumes:
- .:/script
schemaload_unsharded_keyspace:
@@ -103,7 +103,7 @@ services:
- SCHEMA_FILES=unsharded_keyspace_schema_file.sql
- POST_LOAD_FILE=
- EXTERNAL_DB=0
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v16.0.4
volumes:
- .:/script
set_keyspace_durability_policy_test_keyspace:
@@ -117,7 +117,7 @@ services:
environment:
- GRPC_PORT=15999
- KEYSPACES=test_keyspace
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v16.0.4
volumes:
- .:/script
set_keyspace_durability_policy_unsharded_keyspace:
@@ -130,7 +130,7 @@ services:
environment:
- GRPC_PORT=15999
- KEYSPACES=unsharded_keyspace
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v16.0.4
volumes:
- .:/script
vreplication:
@@ -144,7 +144,7 @@ services:
- TOPOLOGY_FLAGS=--topo_implementation consul --topo_global_server_address consul1:8500
--topo_global_root vitess/global
- EXTERNAL_DB=0
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v16.0.4
volumes:
- .:/script
vtctld:
@@ -152,14 +152,14 @@ services:
- sh
- -c
- ' /vt/bin/vtctld --topo_implementation consul --topo_global_server_address consul1:8500
- --topo_global_root vitess/global --cell test --workflow_manager_init --workflow_manager_use_election
+ --topo_global_root vitess/global --cell test
--service_map ''grpc-vtctl,grpc-vtctld'' --backup_storage_implementation file
--file_backup_storage_root /vt/vtdataroot/backups --logtostderr=true --port
8080 --grpc_port 15999 '
depends_on:
external_db_host:
condition: service_healthy
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v16.0.4
ports:
- 15000:8080
- "15999"
@@ -176,7 +176,7 @@ services:
''grpc-vtgateservice'' --normalize_queries=true '
depends_on:
- vtctld
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v16.0.4
ports:
- 15099:8080
- "15999"
@@ -199,7 +199,7 @@ services:
- EXTERNAL_DB=0
- DB_USER=
- DB_PASS=
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v16.0.4
ports:
- 13000:8080
volumes:
@@ -234,7 +234,7 @@ services:
- CMD-SHELL
- curl -s --fail --show-error localhost:8080/debug/health
timeout: 10s
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v16.0.4
ports:
- 15101:8080
- "15999"
@@ -271,7 +271,7 @@ services:
- CMD-SHELL
- curl -s --fail --show-error localhost:8080/debug/health
timeout: 10s
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v16.0.4
ports:
- 15102:8080
- "15999"
@@ -308,7 +308,7 @@ services:
- CMD-SHELL
- curl -s --fail --show-error localhost:8080/debug/health
timeout: 10s
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v16.0.4
ports:
- 15201:8080
- "15999"
@@ -345,7 +345,7 @@ services:
- CMD-SHELL
- curl -s --fail --show-error localhost:8080/debug/health
timeout: 10s
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v16.0.4
ports:
- 15202:8080
- "15999"
@@ -382,7 +382,7 @@ services:
- CMD-SHELL
- curl -s --fail --show-error localhost:8080/debug/health
timeout: 10s
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v16.0.4
ports:
- 15301:8080
- "15999"
diff --git a/examples/compose/vtcompose/vtcompose.go b/examples/compose/vtcompose/vtcompose.go
index 37d9f21c191..8ef6dcec387 100644
--- a/examples/compose/vtcompose/vtcompose.go
+++ b/examples/compose/vtcompose/vtcompose.go
@@ -196,9 +196,9 @@ func main() {
// Check if it is an external_db
if _, ok := externalDbInfoMap[k]; ok {
- //This is no longer necessary, but we'll keep it for reference
- //https://github.com/vitessio/vitess/pull/4868, https://github.com/vitessio/vitess/pull/5010
- //vSchemaFile = applyJsonInMemoryPatch(vSchemaFile,`[{"op": "add","path": "/tables/*", "value": {}}]`)
+ // This is no longer necessary, but we'll keep it for reference
+ // https://github.com/vitessio/vitess/pull/4868, https://github.com/vitessio/vitess/pull/5010
+ // vSchemaFile = applyJsonInMemoryPatch(vSchemaFile,`[{"op": "add","path": "/tables/*", "value": {}}]`)
} else {
var primaryTableColumns map[string]string
vSchemaFile, primaryTableColumns = addTablesVschemaPatch(vSchemaFile, keyspaceData.schemaFileNames)
@@ -533,7 +533,7 @@ func generateDefaultShard(tabAlias int, shard string, keyspaceData keyspaceInfo,
- op: add
path: /services/init_shard_primary%[2]d
value:
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v16.0.4
command: ["sh", "-c", "/vt/bin/vtctlclient %[5]s InitShardPrimary -force %[4]s/%[3]s %[6]s-%[2]d "]
%[1]s
`, dependsOn, aliases[0], shard, keyspaceData.keyspace, opts.topologyFlags, opts.cell)
@@ -565,7 +565,7 @@ func generateExternalPrimary(
- op: add
path: /services/vttablet%[1]d
value:
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v16.0.4
ports:
- "15%[1]d:%[3]d"
- "%[4]d"
@@ -627,7 +627,7 @@ func generateDefaultTablet(tabAlias int, shard, role, keyspace string, dbInfo ex
- op: add
path: /services/vttablet%[1]d
value:
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v16.0.4
ports:
- "15%[1]d:%[4]d"
- "%[5]d"
@@ -665,15 +665,13 @@ func generateVtctld(opts vtOptions) string {
- op: add
path: /services/vtctld
value:
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v16.0.4
ports:
- "15000:%[1]d"
- "%[2]d"
command: ["sh", "-c", " /vt/bin/vtctld \
%[3]s \
--cell %[4]s \
- --workflow_manager_init \
- --workflow_manager_use_election \
--service_map 'grpc-vtctl,grpc-vtctld' \
--backup_storage_implementation file \
--file_backup_storage_root /vt/vtdataroot/backups \
@@ -698,7 +696,7 @@ func generateVtgate(opts vtOptions) string {
- op: add
path: /services/vtgate
value:
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v16.0.4
ports:
- "15099:%[1]d"
- "%[2]d"
@@ -740,7 +738,7 @@ func generateVTOrc(dbInfo externalDbInfo, keyspaceInfoMap map[string]keyspaceInf
- op: add
path: /services/vtorc
value:
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v16.0.4
volumes:
- ".:/script"
environment:
@@ -765,7 +763,7 @@ func generateVreplication(dbInfo externalDbInfo, opts vtOptions) string {
- op: add
path: /services/vreplication
value:
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v16.0.4
volumes:
- ".:/script"
environment:
@@ -793,7 +791,7 @@ func generateSetKeyspaceDurabilityPolicy(
- op: add
path: /services/set_keyspace_durability_policy_%[3]s
value:
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v16.0.4
volumes:
- ".:/script"
environment:
@@ -830,7 +828,7 @@ func generateSchemaload(
- op: add
path: /services/schemaload_%[7]s
value:
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v16.0.4
volumes:
- ".:/script"
environment:
diff --git a/examples/local/101_initial_cluster.sh b/examples/local/101_initial_cluster.sh
index a14fa142e4b..ffa24d7f98e 100755
--- a/examples/local/101_initial_cluster.sh
+++ b/examples/local/101_initial_cluster.sh
@@ -17,57 +17,47 @@
# this script brings up zookeeper and all the vitess components
# required for a single shard deployment.
-source ./env.sh
+source ../common/env.sh
# start topo server
if [ "${TOPO}" = "zk2" ]; then
- CELL=zone1 ./scripts/zk-up.sh
+ CELL=zone1 ../common/scripts/zk-up.sh
elif [ "${TOPO}" = "k8s" ]; then
- CELL=zone1 ./scripts/k3s-up.sh
+ CELL=zone1 ../common/scripts/k3s-up.sh
elif [ "${TOPO}" = "consul" ]; then
- CELL=zone1 ./scripts/consul-up.sh
+ CELL=zone1 ../common/scripts/consul-up.sh
else
- CELL=zone1 ./scripts/etcd-up.sh
+ CELL=zone1 ../common/scripts/etcd-up.sh
fi
# start vtctld
-CELL=zone1 ./scripts/vtctld-up.sh
+CELL=zone1 ../common/scripts/vtctld-up.sh
# start vttablets for keyspace commerce
for i in 100 101 102; do
- CELL=zone1 TABLET_UID=$i ./scripts/mysqlctl-up.sh
- CELL=zone1 KEYSPACE=commerce TABLET_UID=$i ./scripts/vttablet-up.sh
+ CELL=zone1 TABLET_UID=$i ../common/scripts/mysqlctl-up.sh
+ CELL=zone1 KEYSPACE=commerce TABLET_UID=$i ../common/scripts/vttablet-up.sh
done
# set the correct durability policy for the keyspace
-vtctldclient --server localhost:15999 SetKeyspaceDurabilityPolicy --durability-policy=semi_sync commerce
+vtctldclient --server localhost:15999 SetKeyspaceDurabilityPolicy --durability-policy=semi_sync commerce || fail "Failed to set keyspace durability policy on the commerce keyspace"
# start vtorc
-./scripts/vtorc-up.sh
+../common/scripts/vtorc-up.sh
# Wait for all the tablets to be up and registered in the topology server
-for _ in $(seq 0 200); do
- vtctldclient GetTablets --keyspace commerce --shard 0 | wc -l | grep -q "3" && break
- sleep 1
-done;
-vtctldclient GetTablets --keyspace commerce --shard 0 | wc -l | grep -q "3" || (echo "Timed out waiting for tablets to be up in commerce/0" && exit 1)
-
-# Wait for a primary tablet to be elected in the shard
-for _ in $(seq 0 200); do
- vtctldclient GetTablets --keyspace commerce --shard 0 | grep -q "primary" && break
- sleep 1
-done;
-vtctldclient GetTablets --keyspace commerce --shard 0 | grep "primary" || (echo "Timed out waiting for primary to be elected in commerce/0" && exit 1)
+# and for a primary tablet to be elected in the shard and become healthy/serving.
+wait_for_healthy_shard commerce 0 || exit 1
# create the schema
-vtctldclient ApplySchema --sql-file create_commerce_schema.sql commerce
+vtctldclient ApplySchema --sql-file create_commerce_schema.sql commerce || fail "Failed to apply schema for the commerce keyspace"
# create the vschema
-vtctldclient ApplyVSchema --vschema-file vschema_commerce_initial.json commerce
+vtctldclient ApplyVSchema --vschema-file vschema_commerce_initial.json commerce || fail "Failed to apply vschema for the commerce keyspace"
# start vtgate
-CELL=zone1 ./scripts/vtgate-up.sh
+CELL=zone1 ../common/scripts/vtgate-up.sh
# start vtadmin
-./scripts/vtadmin-up.sh
+../common/scripts/vtadmin-up.sh
diff --git a/examples/local/201_customer_tablets.sh b/examples/local/201_customer_tablets.sh
index 6a27bef6cfc..1d5bf3585ce 100755
--- a/examples/local/201_customer_tablets.sh
+++ b/examples/local/201_customer_tablets.sh
@@ -18,26 +18,16 @@
# resharding it also splits the vschema between the two keyspaces
# old (commerce) and new (customer)
-source ./env.sh
+source ../common/env.sh
for i in 200 201 202; do
- CELL=zone1 TABLET_UID=$i ./scripts/mysqlctl-up.sh
- CELL=zone1 KEYSPACE=customer TABLET_UID=$i ./scripts/vttablet-up.sh
+ CELL=zone1 TABLET_UID=$i ../common/scripts/mysqlctl-up.sh
+ CELL=zone1 KEYSPACE=customer TABLET_UID=$i ../common/scripts/vttablet-up.sh
done
# set the correct durability policy for the keyspace
-vtctldclient --server localhost:15999 SetKeyspaceDurabilityPolicy --durability-policy=semi_sync customer
+vtctldclient --server localhost:15999 SetKeyspaceDurabilityPolicy --durability-policy=semi_sync customer || fail "Failed to set keyspace durability policy on the customer keyspace"
# Wait for all the tablets to be up and registered in the topology server
-for _ in $(seq 0 200); do
- vtctldclient GetTablets --keyspace customer --shard 0 | wc -l | grep -q "3" && break
- sleep 1
-done;
-vtctldclient GetTablets --keyspace customer --shard 0 | wc -l | grep -q "3" || (echo "Timed out waiting for tablets to be up in customer/0" && exit 1)
-
-# Wait for a primary tablet to be elected in the shard
-for _ in $(seq 0 200); do
- vtctldclient GetTablets --keyspace customer --shard 0 | grep -q "primary" && break
- sleep 1
-done;
-vtctldclient GetTablets --keyspace customer --shard 0 | grep "primary" || (echo "Timed out waiting for primary to be elected in customer/0" && exit 1)
+# and for a primary tablet to be elected in the shard and become healthy/serving.
+wait_for_healthy_shard customer 0 || exit 1
diff --git a/examples/local/201_newkeyspace_tablets.sh b/examples/local/201_newkeyspace_tablets.sh
deleted file mode 100755
index 6a77605fe29..00000000000
--- a/examples/local/201_newkeyspace_tablets.sh
+++ /dev/null
@@ -1,50 +0,0 @@
-#!/bin/bash
-set -eEo pipefail
-
-# Copyright 2020 The Vitess Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# This script creates a primary, replica, and rdonly tablet in the given
-# keyspace and initializes them.
-
-# Let's allow this to be run from anywhere
-pushd "$(dirname "${0}")" >/dev/null
-
-source ./env.sh
-
-KEYSPACE=${1:-test}
-BASETABLETNUM=${2:-2}
-SHARD=${3:-"-"}
-
-for i in ${BASETABLETNUM}00 ${BASETABLETNUM}01 ${BASETABLETNUM}02; do
- CELL=zone1 TABLET_UID="${i}" ./scripts/mysqlctl-up.sh
- SHARD=${SHARD} CELL=zone1 KEYSPACE=${KEYSPACE} TABLET_UID=$i ./scripts/vttablet-up.sh
-done
-
-# Wait for all the tablets to be up and registered in the topology server
-for _ in $(seq 0 200); do
- vtctldclient GetTablets --keyspace "${KEYSPACE}" --shard "${SHARD}" | wc -l | grep -q "3" && break
- sleep 1
-done;
-vtctldclient GetTablets --keyspace "${KEYSPACE}" --shard "${SHARD}" | wc -l | grep -q "3" || (echo "Timed out waiting for tablets to be up in ${KEYSPACE}/${SHARD}" && exit 1)
-
-# Wait for a primary tablet to be elected in the shard
-for _ in $(seq 0 200); do
- vtctldclient GetTablets --keyspace "${KEYSPACE}" --shard "${SHARD}" | grep -q "primary" && break
- sleep 1
-done;
-vtctldclient GetTablets --keyspace "${KEYSPACE}" --shard "${SHARD}" | grep "primary" || (echo "Timed out waiting for primary to be elected in ${KEYSPACE}/${SHARD}" && exit 1)
-
-# Go back to the original ${PWD} in the parent shell
-popd >/dev/null
diff --git a/examples/local/202_move_tables.sh b/examples/local/202_move_tables.sh
index 2b69817445e..f385acb12a3 100755
--- a/examples/local/202_move_tables.sh
+++ b/examples/local/202_move_tables.sh
@@ -17,6 +17,6 @@
# this script copies over all the data from commerce keyspace to
# customer keyspace for the customer and corder tables
-source ./env.sh
+source ../common/env.sh
vtctlclient MoveTables -- --source commerce --tables 'customer,corder' Create customer.commerce2customer
diff --git a/examples/local/203_switch_reads.sh b/examples/local/203_switch_reads.sh
index 1c1abbe7ff0..4bca7e4e257 100755
--- a/examples/local/203_switch_reads.sh
+++ b/examples/local/203_switch_reads.sh
@@ -17,6 +17,6 @@
# this script migrates traffic for the new customer keyspace to the new
# tablets of types rdonly and replica
-source ./env.sh
+source ../common/env.sh
vtctlclient MoveTables -- --tablet_types=rdonly,replica SwitchTraffic customer.commerce2customer
diff --git a/examples/local/204_switch_writes.sh b/examples/local/204_switch_writes.sh
index a04f6ffc947..743ca1e2512 100755
--- a/examples/local/204_switch_writes.sh
+++ b/examples/local/204_switch_writes.sh
@@ -17,6 +17,6 @@
# this script migrates primary traffic for the customer keyspace to the
# new primary tablet
-source ./env.sh
+source ../common/env.sh
vtctlclient MoveTables -- --tablet_types=primary SwitchTraffic customer.commerce2customer
diff --git a/examples/local/205_clean_commerce.sh b/examples/local/205_clean_commerce.sh
index 37824052d27..5d307a231d3 100755
--- a/examples/local/205_clean_commerce.sh
+++ b/examples/local/205_clean_commerce.sh
@@ -17,7 +17,7 @@
# this script removes the customer and corder tables from the commerce
# keyspace
-source ./env.sh
+source ../common/env.sh
vtctlclient MoveTables Complete customer.commerce2customer
diff --git a/examples/local/301_customer_sharded.sh b/examples/local/301_customer_sharded.sh
index 8f008d0638b..ad80cdd98dd 100755
--- a/examples/local/301_customer_sharded.sh
+++ b/examples/local/301_customer_sharded.sh
@@ -20,9 +20,9 @@
# it also changes the customer vschema from unsharded to sharded and
# sets up the necessary vindexes
-source ./env.sh
+source ../common/env.sh
-vtctldclient ApplySchema --sql-file create_commerce_seq.sql commerce
-vtctldclient ApplyVSchema --vschema-file vschema_commerce_seq.json commerce
-vtctldclient ApplyVSchema --vschema-file vschema_customer_sharded.json customer
-vtctldclient ApplySchema --sql-file create_customer_sharded.sql customer
+vtctldclient ApplySchema --sql-file create_commerce_seq.sql commerce || fail "Failed to create sequence tables in the commerce keyspace"
+vtctldclient ApplyVSchema --vschema-file vschema_commerce_seq.json commerce || fail "Failed to create vschema sequences in the commerce keyspace"
+vtctldclient ApplyVSchema --vschema-file vschema_customer_sharded.json customer || fail "Failed to create vschema in sharded customer keyspace"
+vtctldclient ApplySchema --sql-file create_customer_sharded.sql customer || fail "Failed to create schema in sharded customer keyspace"
diff --git a/examples/local/302_new_shards.sh b/examples/local/302_new_shards.sh
index 40569e2c20d..b2ac94f5c7f 100755
--- a/examples/local/302_new_shards.sh
+++ b/examples/local/302_new_shards.sh
@@ -17,30 +17,20 @@
# this script brings up new tablets for the two new shards that we will
# be creating in the customer keyspace and copies the schema
-source ./env.sh
+source ../common/env.sh
for i in 300 301 302; do
- CELL=zone1 TABLET_UID=$i ./scripts/mysqlctl-up.sh
- SHARD=-80 CELL=zone1 KEYSPACE=customer TABLET_UID=$i ./scripts/vttablet-up.sh
+ CELL=zone1 TABLET_UID=$i ../common/scripts/mysqlctl-up.sh
+ SHARD=-80 CELL=zone1 KEYSPACE=customer TABLET_UID=$i ../common/scripts/vttablet-up.sh
done
for i in 400 401 402; do
- CELL=zone1 TABLET_UID=$i ./scripts/mysqlctl-up.sh
- SHARD=80- CELL=zone1 KEYSPACE=customer TABLET_UID=$i ./scripts/vttablet-up.sh
+ CELL=zone1 TABLET_UID=$i ../common/scripts/mysqlctl-up.sh
+ SHARD=80- CELL=zone1 KEYSPACE=customer TABLET_UID=$i ../common/scripts/vttablet-up.sh
done
for shard in "-80" "80-"; do
- # Wait for all the tablets to be up and registered in the topology server
- for _ in $(seq 0 200); do
- vtctldclient GetTablets --keyspace customer --shard $shard | wc -l | grep -q "3" && break
- sleep 1
- done;
- vtctldclient GetTablets --keyspace customer --shard $shard | wc -l | grep -q "3" || (echo "Timed out waiting for tablets to be up in customer/$shard" && exit 1)
-
- # Wait for a primary tablet to be elected in the shard
- for _ in $(seq 0 200); do
- vtctldclient GetTablets --keyspace customer --shard $shard | grep -q "primary" && break
- sleep 1
- done;
- vtctldclient GetTablets --keyspace customer --shard $shard | grep "primary" || (echo "Timed out waiting for primary to be elected in customer/$shard" && exit 1)
+ # Wait for all the tablets to be up and registered in the topology server
+ # and for a primary tablet to be elected in the shard and become healthy/serving.
+ wait_for_healthy_shard customer "${shard}" || exit 1
done;
diff --git a/examples/local/303_reshard.sh b/examples/local/303_reshard.sh
index 510ba68b815..ea12987e9ed 100755
--- a/examples/local/303_reshard.sh
+++ b/examples/local/303_reshard.sh
@@ -17,6 +17,6 @@
# this script copies the data from customer/0 to customer/-80 and customer/80-
# each row will be copied to exactly one shard based on the vindex value
-source ./env.sh
+source ../common/env.sh
vtctlclient Reshard -- --source_shards '0' --target_shards '-80,80-' Create customer.cust2cust
diff --git a/examples/local/304_switch_reads.sh b/examples/local/304_switch_reads.sh
index 67d0fb45e85..52d6093f4ff 100755
--- a/examples/local/304_switch_reads.sh
+++ b/examples/local/304_switch_reads.sh
@@ -16,6 +16,6 @@
# this script migrates traffic for the rdonly and replica tablets
-source ./env.sh
+source ../common/env.sh
vtctlclient Reshard -- --tablet_types=rdonly,replica SwitchTraffic customer.cust2cust
diff --git a/examples/local/305_switch_writes.sh b/examples/local/305_switch_writes.sh
index 289019b1363..9bbc7ed9ea5 100755
--- a/examples/local/305_switch_writes.sh
+++ b/examples/local/305_switch_writes.sh
@@ -16,6 +16,6 @@
# this script migrates traffic for the primary tablet
-source ./env.sh
+source ../common/env.sh
vtctlclient Reshard -- --tablet_types=primary SwitchTraffic customer.cust2cust
diff --git a/examples/local/306_down_shard_0.sh b/examples/local/306_down_shard_0.sh
index 0d956553cd5..db860b3e23c 100755
--- a/examples/local/306_down_shard_0.sh
+++ b/examples/local/306_down_shard_0.sh
@@ -15,11 +15,11 @@
# limitations under the License.
# this script brings down the tablets for customer/0 keyspace
-source ./env.sh
+source ../common/env.sh
vtctlclient Reshard Complete customer.cust2cust
for i in 200 201 202; do
- CELL=zone1 TABLET_UID=$i ./scripts/vttablet-down.sh
- CELL=zone1 TABLET_UID=$i ./scripts/mysqlctl-down.sh
+ CELL=zone1 TABLET_UID=$i ../common/scripts/vttablet-down.sh
+ CELL=zone1 TABLET_UID=$i ../common/scripts/mysqlctl-down.sh
done
diff --git a/examples/local/307_delete_shard_0.sh b/examples/local/307_delete_shard_0.sh
index a09e8f15c00..e0781990f56 100755
--- a/examples/local/307_delete_shard_0.sh
+++ b/examples/local/307_delete_shard_0.sh
@@ -16,6 +16,6 @@
# this script deletes the old shard 0 which has been replaced by 2 shards
-source ./env.sh
+source ../common/env.sh
-vtctldclient DeleteShards --recursive customer/0
+vtctldclient DeleteShards --force --recursive customer/0
diff --git a/examples/local/401_teardown.sh b/examples/local/401_teardown.sh
index ae78d262486..08dcbf3cd29 100755
--- a/examples/local/401_teardown.sh
+++ b/examples/local/401_teardown.sh
@@ -17,43 +17,42 @@
# We should not assume that any of the steps have been executed.
# This makes it possible for a user to cleanup at any point.
-source ./env.sh
+source ../common/env.sh
-./scripts/vtadmin-down.sh
+../common/scripts/vtadmin-down.sh
-./scripts/vtorc-down.sh
+../common/scripts/vtorc-down.sh
-./scripts/vtgate-down.sh
+../common/scripts/vtgate-down.sh
for tablet in 100 200 300 400; do
if vtctlclient --action_timeout 1s --server localhost:15999 GetTablet zone1-$tablet >/dev/null 2>&1; then
# The zero tablet is up. Try to shutdown 0-2 tablet + mysqlctl
for i in 0 1 2; do
- uid=$(($tablet + $i))
+ uid=$((tablet + i))
printf -v alias '%s-%010d' 'zone1' $uid
echo "Shutting down tablet $alias"
- CELL=zone1 TABLET_UID=$uid ./scripts/vttablet-down.sh
- CELL=zone1 TABLET_UID=$uid ./scripts/mysqlctl-down.sh
+ CELL=zone1 TABLET_UID=$uid ../common/scripts/vttablet-down.sh
+ CELL=zone1 TABLET_UID=$uid ../common/scripts/mysqlctl-down.sh
done
fi
done
-./scripts/vtctld-down.sh
+../common/scripts/vtctld-down.sh
if [ "${TOPO}" = "zk2" ]; then
- CELL=zone1 ./scripts/zk-down.sh
+ CELL=zone1 ../common/scripts/zk-down.sh
elif [ "${TOPO}" = "k8s" ]; then
- CELL=zone1 ./scripts/k3s-down.sh
+ CELL=zone1 ../common/scripts/k3s-down.sh
elif [ "${TOPO}" = "consul" ]; then
- CELL=zone1 ./scripts/consul-down.sh
+ CELL=zone1 ../common/scripts/consul-down.sh
else
- CELL=zone1 ./scripts/etcd-down.sh
+ CELL=zone1 ../common/scripts/etcd-down.sh
fi
# pedantic check: grep for any remaining processes
-if [ ! -z "$VTDATAROOT" ]; then
-
+if [ -n "$VTDATAROOT" ]; then
if pgrep -f -l "$VTDATAROOT" >/dev/null; then
echo "ERROR: Stale processes detected! It is recommended to manuallly kill them:"
pgrep -f -l "$VTDATAROOT"
@@ -63,7 +62,6 @@ if [ ! -z "$VTDATAROOT" ]; then
# shellcheck disable=SC2086
rm -r ${VTDATAROOT:?}/*
-
fi
disown -a
diff --git a/examples/local/README.md b/examples/local/README.md
index 220ffccc1fc..cb846b7c8b1 100644
--- a/examples/local/README.md
+++ b/examples/local/README.md
@@ -6,7 +6,7 @@ This document contains the summary of the commands to be run.
```
# Setup environment and aliases
-source env.sh
+source ../common/env.sh
# Bring up initial cluster and commerce keyspace
./101_initial_cluster.sh
@@ -47,7 +47,7 @@ vtctlclient Reshard -- --tablet_types=primary SwitchTraffic customer.cust2cust
# Down shard 0
./306_down_shard_0.sh
-vtctlclient DeleteShard -- --recursive customer/0
+vtctlclient DeleteShard -- --force --recursive customer/0
# Down cluster
./401_teardown.sh
diff --git a/examples/local/backups/start_cluster.sh b/examples/local/backups/start_cluster.sh
deleted file mode 100755
index ff4fdd23342..00000000000
--- a/examples/local/backups/start_cluster.sh
+++ /dev/null
@@ -1,73 +0,0 @@
-#!/bin/bash
-
-# Copyright 2022 The Vitess Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# this script brings up new tablets for the two new shards that we will
-# be creating in the customer keyspace and copies the schema
-
-source ./env.sh
-
-# start topo server
-if [ "${TOPO}" = "zk2" ]; then
- CELL=zone1 ./scripts/zk-up.sh
-elif [ "${TOPO}" = "k8s" ]; then
- CELL=zone1 ./scripts/k3s-up.sh
-else
- CELL=zone1 ./scripts/etcd-up.sh
-fi
-
-# start vtctld
-CELL=zone1 ./scripts/vtctld-up.sh
-
-
-# start vttablets for keyspace commerce
-for i in 100 101 102; do
- CELL=zone1 TABLET_UID=$i ./scripts/mysqlctl-up.sh
- CELL=zone1 KEYSPACE=commerce TABLET_UID=$i ./scripts/vttablet-up.sh
-done
-
-# set one of the replicas to primary
-vtctldclient InitShardPrimary --force commerce/0 zone1-100
-
-# create the schema for commerce
-vtctlclient ApplySchema -- --sql-file backups/create_commerce_schema.sql commerce
-vtctlclient ApplyVSchema -- --vschema_file ./vschema_commerce_seq.json commerce
-
-
-# start vttablets for keyspace customer
-for i in 200 201 202; do
- CELL=zone1 TABLET_UID=$i ./scripts/mysqlctl-up.sh
- SHARD=-80 CELL=zone1 KEYSPACE=customer TABLET_UID=$i ./scripts/vttablet-up.sh
-done
-for i in 300 301 302; do
- CELL=zone1 TABLET_UID=$i ./scripts/mysqlctl-up.sh
- SHARD=80- CELL=zone1 KEYSPACE=customer TABLET_UID=$i ./scripts/vttablet-up.sh
-done
-
-# set one of the replicas to primary
-vtctldclient InitShardPrimary --force customer/-80 zone1-200
-vtctldclient InitShardPrimary --force customer/80- zone1-300
-
-# create the schema for customer
-vtctlclient ApplySchema -- --sql-file backups/create_customer_schema.sql customer
-vtctlclient ApplyVSchema -- --vschema_file ./vschema_customer_sharded.json customer
-
-
-# start vtgate
-CELL=zone1 ./scripts/vtgate-up.sh
-
-sleep 5
-
-mysql < ../common/insert_commerce_data.sql
diff --git a/examples/local/scripts/k3s-down.sh b/examples/local/scripts/k3s-down.sh
deleted file mode 100755
index 590dc604e3e..00000000000
--- a/examples/local/scripts/k3s-down.sh
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/bin/bash
-
-# Copyright 2019 The Vitess Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# This is an example script that stops the k3s server started by k3s-up.sh.
-
-set -e
-
-# shellcheck source=./env.sh
-# shellcheck disable=SC1091
-source ./env.sh
-
-# Stop K3s server.
-echo "Stopping k3s server..."
-
-pid=`cat $VTDATAROOT/tmp/k3s.pid`
-echo "Stopping k3s..."
-kill -9 $pid
diff --git a/examples/local/scripts/mysqlctl-up.sh b/examples/local/scripts/mysqlctl-up.sh
deleted file mode 100755
index ae041cf951d..00000000000
--- a/examples/local/scripts/mysqlctl-up.sh
+++ /dev/null
@@ -1,42 +0,0 @@
-#!/bin/bash
-
-# Copyright 2019 The Vitess Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# This is an example script that creates a single shard vttablet deployment.
-
-source ./env.sh
-
-cell=${CELL:-'test'}
-uid=$TABLET_UID
-mysql_port=$[17000 + $uid]
-printf -v alias '%s-%010d' $cell $uid
-printf -v tablet_dir 'vt_%010d' $uid
-
-mkdir -p $VTDATAROOT/backups
-
-echo "Starting MySQL for tablet $alias..."
-action="init"
-
-if [ -d $VTDATAROOT/$tablet_dir ]; then
- echo "Resuming from existing vttablet dir:"
- echo " $VTDATAROOT/$tablet_dir"
- action='start'
-fi
-
-mysqlctl \
- --log_dir $VTDATAROOT/tmp \
- --tablet_uid $uid \
- --mysql_port $mysql_port \
- $action
diff --git a/examples/local/scripts/vtadmin-up.sh b/examples/local/scripts/vtadmin-up.sh
deleted file mode 100755
index 5ee04c9a959..00000000000
--- a/examples/local/scripts/vtadmin-up.sh
+++ /dev/null
@@ -1,55 +0,0 @@
-#!/bin/bash
-
-source ./env.sh
-
-log_dir="${VTDATAROOT}/tmp"
-web_dir="../../web/vtadmin"
-
-vtadmin_api_port=14200
-vtadmin_web_port=14201
-
-vtadmin \
- --addr ":${vtadmin_api_port}" \
- --http-origin "http://localhost:${vtadmin_web_port}" \
- --http-tablet-url-tmpl "http://{{ .Tablet.Hostname }}:15{{ .Tablet.Alias.Uid }}" \
- --tracer "opentracing-jaeger" \
- --grpc-tracing \
- --http-tracing \
- --logtostderr \
- --alsologtostderr \
- --rbac \
- --rbac-config="./vtadmin/rbac.yaml" \
- --cluster "id=local,name=local,discovery=staticfile,discovery-staticfile-path=./vtadmin/discovery.json,tablet-fqdn-tmpl={{ .Tablet.Hostname }}:15{{ .Tablet.Alias.Uid }}" \
- > "${log_dir}/vtadmin-api.out" 2>&1 &
-
-vtadmin_api_pid=$!
-echo ${vtadmin_api_pid} > "${log_dir}/vtadmin-api.pid"
-
-echo "\
-vtadmin-api is running!
- - API: http://localhost:${vtadmin_api_port}
- - Logs: ${log_dir}/vtadmin-api.out
- - PID: ${vtadmin_api_pid}
-"
-
-# As a TODO, it'd be nice to make the assumption that vtadmin-web is already
-# installed and built (since we assume that `make` has already been run for
-# other Vitess components.)
-npm --prefix $web_dir --silent install
-
-REACT_APP_VTADMIN_API_ADDRESS="http://localhost:${vtadmin_api_port}" \
- REACT_APP_ENABLE_EXPERIMENTAL_TABLET_DEBUG_VARS="true" \
- npm run --prefix $web_dir build
-
-"${web_dir}/node_modules/.bin/serve" --no-clipboard -l $vtadmin_web_port -s "${web_dir}/build" \
- > "${log_dir}/vtadmin-web.out" 2>&1 &
-
-vtadmin_web_pid=$!
-echo ${vtadmin_web_pid} > "${log_dir}/vtadmin-web.pid"
-
-echo "\
-vtadmin-web is running!
- - Browser: http://localhost:${vtadmin_web_port}
- - Logs: ${log_dir}/vtadmin-web.out
- - PID: ${vtadmin_web_pid}
-"
diff --git a/examples/local/scripts/vtctld-down.sh b/examples/local/scripts/vtctld-down.sh
deleted file mode 100755
index d96fa3b927f..00000000000
--- a/examples/local/scripts/vtctld-down.sh
+++ /dev/null
@@ -1,22 +0,0 @@
-#!/bin/bash
-
-# Copyright 2019 The Vitess Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# This is an example script that stops vtctld.
-
-source ./env.sh
-
-echo "Stopping vtctld..."
-kill -9 `cat $VTDATAROOT/tmp/vtctld.pid`
diff --git a/examples/local/scripts/vtgate-down.sh b/examples/local/scripts/vtgate-down.sh
deleted file mode 100755
index 9da0a7179df..00000000000
--- a/examples/local/scripts/vtgate-down.sh
+++ /dev/null
@@ -1,23 +0,0 @@
-#!/bin/bash
-
-# Copyright 2019 The Vitess Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# This is an example script that stops the instance started by vtgate-up.sh.
-
-source ./env.sh
-
-# Stop vtgate.
-echo "Stopping vtgate..."
-kill `cat $VTDATAROOT/tmp/vtgate.pid`
diff --git a/examples/local/scripts/vtgate-up.sh b/examples/local/scripts/vtgate-up.sh
deleted file mode 100755
index cb33e27839b..00000000000
--- a/examples/local/scripts/vtgate-up.sh
+++ /dev/null
@@ -1,57 +0,0 @@
-#!/bin/bash
-
-# Copyright 2019 The Vitess Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# This is an example script that starts a single vtgate.
-
-source ./env.sh
-
-cell=${CELL:-'test'}
-web_port=15001
-grpc_port=15991
-mysql_server_port=15306
-mysql_server_socket_path="/tmp/mysql.sock"
-
-# Start vtgate.
-# shellcheck disable=SC2086
-vtgate \
- $TOPOLOGY_FLAGS \
- --log_dir $VTDATAROOT/tmp \
- --log_queries_to_file $VTDATAROOT/tmp/vtgate_querylog.txt \
- --port $web_port \
- --grpc_port $grpc_port \
- --mysql_server_port $mysql_server_port \
- --mysql_server_socket_path $mysql_server_socket_path \
- --cell $cell \
- --cells_to_watch $cell \
- --tablet_types_to_wait PRIMARY,REPLICA \
- --service_map 'grpc-vtgateservice' \
- --pid_file $VTDATAROOT/tmp/vtgate.pid \
- --mysql_auth_server_impl none \
- > $VTDATAROOT/tmp/vtgate.out 2>&1 &
-
-# Block waiting for vtgate to be listening
-# Not the same as healthy
-
-echo "Waiting for vtgate to be up..."
-while true; do
- curl -I "http://$hostname:$web_port/debug/status" >/dev/null 2>&1 && break
- sleep 0.1
-done;
-echo "vtgate is up!"
-
-echo "Access vtgate at http://$hostname:$web_port/debug/status"
-
-disown -a
diff --git a/examples/local/vtadmin/rbac.yaml b/examples/local/vtadmin/rbac.yaml
deleted file mode 100644
index 1b46933ba39..00000000000
--- a/examples/local/vtadmin/rbac.yaml
+++ /dev/null
@@ -1,17 +0,0 @@
-rules:
- - resource: "*"
- actions:
- - "get"
- - "create"
- - "delete"
- - "put"
- - "ping"
- subjects: ["*"]
- clusters: ["*"]
- - resource: "Shard"
- actions:
- - "emergency_reparent_shard"
- - "planned_reparent_shard"
- subjects: ["*"]
- clusters:
- - "local"
diff --git a/examples/local/vtexplain/atomicity_method1.sh b/examples/local/vtexplain/atomicity_method1.sh
deleted file mode 100644
index b946ec7434f..00000000000
--- a/examples/local/vtexplain/atomicity_method1.sh
+++ /dev/null
@@ -1,3 +0,0 @@
-#!/bin/bash
-set -x
-vtexplain -vschema-file vschema.json -schema-file schema.sql -shards 4 -sql 'INSERT /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ INTO t1 (c1) values (1),(2),(3),(4),(5),(6),(7),(8),(9),(10),(11),(12),(13),(14),(15),(16),(17),(18),(19),(20);'
diff --git a/examples/local/vtexplain/atomicity_method2.sh b/examples/local/vtexplain/atomicity_method2.sh
deleted file mode 100644
index 33a09cbfbee..00000000000
--- a/examples/local/vtexplain/atomicity_method2.sh
+++ /dev/null
@@ -1,3 +0,0 @@
-#!/bin/bash
-set -x
-vtexplain -vschema-file vschema.json -schema-file schema.sql -shards 4 -sql 'SET transaction_mode="single"; INSERT INTO t1 (c1) values (1),(2),(3),(4),(5),(6),(7),(8),(9),(10),(11),(12),(13),(14),(15),(16),(17),(18),(19),(20);'
diff --git a/examples/local/vtexplain/atomicity_method2_reads.sh b/examples/local/vtexplain/atomicity_method2_reads.sh
deleted file mode 100644
index f32732ccfc5..00000000000
--- a/examples/local/vtexplain/atomicity_method2_reads.sh
+++ /dev/null
@@ -1,3 +0,0 @@
-#!/bin/bash
-set -x
-vtexplain -vschema-file vschema.json -schema-file schema.sql -shards 4 -sql 'SET transaction_mode="single"; BEGIN; SELECT * from t1; COMMIT;'
diff --git a/examples/local/vtexplain/atomicity_method2_working.sh b/examples/local/vtexplain/atomicity_method2_working.sh
deleted file mode 100644
index f9c84e87a72..00000000000
--- a/examples/local/vtexplain/atomicity_method2_working.sh
+++ /dev/null
@@ -1,3 +0,0 @@
-#!/bin/bash
-set -x
-vtexplain -vschema-file vschema.json -schema-file schema.sql -shards 4 -sql 'SET transaction_mode="single"; INSERT INTO t1 (c1) values (10),(14),(15),(16);'
diff --git a/examples/local/vtexplain/atomicity_method3.sh b/examples/local/vtexplain/atomicity_method3.sh
deleted file mode 100644
index 772bae21460..00000000000
--- a/examples/local/vtexplain/atomicity_method3.sh
+++ /dev/null
@@ -1,4 +0,0 @@
-#!/bin/bash
-set -x
-# SET transaction_mode="multi" is implied by default
-vtexplain -vschema-file vschema.json -schema-file schema.sql -shards 4 -sql 'INSERT INTO t1 (c1) values (1),(2),(3),(4),(5),(6),(7),(8),(9),(10),(11),(12),(13),(14),(15),(16),(17),(18),(19),(20);'
diff --git a/examples/operator/101_initial_cluster.yaml b/examples/operator/101_initial_cluster.yaml
index 81efad081ba..51311f1dbfb 100644
--- a/examples/operator/101_initial_cluster.yaml
+++ b/examples/operator/101_initial_cluster.yaml
@@ -8,14 +8,14 @@ metadata:
name: example
spec:
images:
- vtctld: vitess/lite:latest
- vtadmin: vitess/vtadmin:latest
- vtgate: vitess/lite:latest
- vttablet: vitess/lite:latest
- vtbackup: vitess/lite:latest
- vtorc: vitess/lite:latest
+ vtctld: vitess/lite:v16.0.4
+ vtadmin: vitess/vtadmin:v16.0.4
+ vtgate: vitess/lite:v16.0.4
+ vttablet: vitess/lite:v16.0.4
+ vtbackup: vitess/lite:v16.0.4
+ vtorc: vitess/lite:v16.0.4
mysqld:
- mysql56Compatible: vitess/lite:latest
+ mysql80Compatible: vitess/lite:v16.0.4
mysqldExporter: prom/mysqld-exporter:v0.11.0
cells:
- name: zone1
@@ -72,15 +72,14 @@ spec:
durabilityPolicy: none
turndownPolicy: Immediate
vitessOrchestrator:
- configSecret:
- name: example-cluster-config
- key: orc_config.json
resources:
limits:
memory: 128Mi
requests:
cpu: 100m
memory: 128Mi
+ extraFlags:
+ recovery-period-block-duration: 5s
partitionings:
- equal:
parts: 1
@@ -88,15 +87,12 @@ spec:
databaseInitScriptSecret:
name: example-cluster-config
key: init_db.sql
- replication:
- enforceSemiSync: false
tabletPools:
- cell: zone1
type: replica
replicas: 2
vttablet:
extraFlags:
- disable_active_reparents: "true"
db_charset: utf8mb4
resources:
limits:
@@ -107,7 +103,7 @@ spec:
mysqld:
resources:
limits:
- memory: 512Mi
+ memory: 1024Mi
requests:
cpu: 100m
memory: 512Mi
@@ -237,17 +233,7 @@ stringData:
clusters: ["*"]
- resource: "Shard"
actions:
- - "emergency_reparent_shard"
- - "planned_reparent_shard"
+ - "emergency_failover_shard"
+ - "planned_failover_shard"
subjects: ["*"]
- clusters:
- - "local"
- orc_config.json: |
- {
- "Debug": true,
- "MySQLTopologyUser": "orc_client_user",
- "MySQLTopologyPassword": "orc_client_user_password",
- "MySQLReplicaUser": "vt_repl",
- "MySQLReplicaPassword": "",
- "RecoveryPeriodBlockSeconds": 5
- }
+ clusters: ["*"]
diff --git a/examples/operator/201_customer_tablets.yaml b/examples/operator/201_customer_tablets.yaml
index 52f110678a2..0f28bc2ee10 100644
--- a/examples/operator/201_customer_tablets.yaml
+++ b/examples/operator/201_customer_tablets.yaml
@@ -4,14 +4,14 @@ metadata:
name: example
spec:
images:
- vtctld: vitess/lite:latest
- vtadmin: vitess/vtadmin:latest
- vtgate: vitess/lite:latest
- vttablet: vitess/lite:latest
- vtbackup: vitess/lite:latest
- vtorc: vitess/lite:latest
+ vtctld: vitess/lite:v16.0.4
+ vtadmin: vitess/vtadmin:v16.0.4
+ vtgate: vitess/lite:v16.0.4
+ vttablet: vitess/lite:v16.0.4
+ vtbackup: vitess/lite:v16.0.4
+ vtorc: vitess/lite:v16.0.4
mysqld:
- mysql56Compatible: vitess/lite:latest
+ mysql80Compatible: vitess/lite:v16.0.4
mysqldExporter: prom/mysqld-exporter:v0.11.0
cells:
- name: zone1
@@ -68,15 +68,14 @@ spec:
durabilityPolicy: none
turndownPolicy: Immediate
vitessOrchestrator:
- configSecret:
- name: example-cluster-config
- key: orc_config.json
resources:
limits:
memory: 128Mi
requests:
cpu: 100m
memory: 128Mi
+ extraFlags:
+ recovery-period-block-duration: 5s
partitionings:
- equal:
parts: 1
@@ -84,15 +83,12 @@ spec:
databaseInitScriptSecret:
name: example-cluster-config
key: init_db.sql
- replication:
- enforceSemiSync: false
tabletPools:
- cell: zone1
type: replica
replicas: 2
vttablet:
extraFlags:
- disable_active_reparents: "true"
db_charset: utf8mb4
resources:
limits:
@@ -103,7 +99,7 @@ spec:
mysqld:
resources:
limits:
- memory: 512Mi
+ memory: 1024Mi
requests:
cpu: 100m
memory: 512Mi
@@ -122,8 +118,6 @@ spec:
databaseInitScriptSecret:
name: example-cluster-config
key: init_db.sql
- replication:
- enforceSemiSync: false
tabletPools:
- cell: zone1
type: replica
@@ -140,7 +134,7 @@ spec:
mysqld:
resources:
limits:
- memory: 512Mi
+ memory: 1024Mi
requests:
cpu: 100m
memory: 512Mi
diff --git a/examples/operator/302_new_shards.yaml b/examples/operator/302_new_shards.yaml
index 44a7aeea8ef..83bc869ee25 100644
--- a/examples/operator/302_new_shards.yaml
+++ b/examples/operator/302_new_shards.yaml
@@ -4,14 +4,14 @@ metadata:
name: example
spec:
images:
- vtctld: vitess/lite:latest
- vtadmin: vitess/vtadmin:latest
- vtgate: vitess/lite:latest
- vttablet: vitess/lite:latest
- vtbackup: vitess/lite:latest
- vtorc: vitess/lite:latest
+ vtctld: vitess/lite:v16.0.4
+ vtadmin: vitess/vtadmin:v16.0.4
+ vtgate: vitess/lite:v16.0.4
+ vttablet: vitess/lite:v16.0.4
+ vtbackup: vitess/lite:v16.0.4
+ vtorc: vitess/lite:v16.0.4
mysqld:
- mysql56Compatible: vitess/lite:latest
+ mysql80Compatible: vitess/lite:v16.0.4
mysqldExporter: prom/mysqld-exporter:v0.11.0
cells:
- name: zone1
@@ -68,15 +68,14 @@ spec:
durabilityPolicy: none
turndownPolicy: Immediate
vitessOrchestrator:
- configSecret:
- name: example-cluster-config
- key: orc_config.json
resources:
limits:
memory: 128Mi
requests:
cpu: 100m
memory: 128Mi
+ extraFlags:
+ recovery-period-block-duration: 5s
partitionings:
- equal:
parts: 1
@@ -84,15 +83,12 @@ spec:
databaseInitScriptSecret:
name: example-cluster-config
key: init_db.sql
- replication:
- enforceSemiSync: false
tabletPools:
- cell: zone1
type: replica
replicas: 2
vttablet:
extraFlags:
- disable_active_reparents: "true"
db_charset: utf8mb4
resources:
limits:
@@ -103,7 +99,7 @@ spec:
mysqld:
resources:
limits:
- memory: 512Mi
+ memory: 1024Mi
requests:
cpu: 100m
memory: 512Mi
@@ -122,8 +118,6 @@ spec:
databaseInitScriptSecret:
name: example-cluster-config
key: init_db.sql
- replication:
- enforceSemiSync: false
tabletPools:
- cell: zone1
type: replica
@@ -140,7 +134,7 @@ spec:
mysqld:
resources:
limits:
- memory: 512Mi
+ memory: 1024Mi
requests:
cpu: 100m
memory: 512Mi
@@ -155,8 +149,6 @@ spec:
databaseInitScriptSecret:
name: example-cluster-config
key: init_db.sql
- replication:
- enforceSemiSync: false
tabletPools:
- cell: zone1
type: replica
@@ -173,7 +165,7 @@ spec:
mysqld:
resources:
limits:
- memory: 512Mi
+ memory: 1024Mi
requests:
cpu: 100m
memory: 512Mi
diff --git a/examples/operator/306_down_shard_0.yaml b/examples/operator/306_down_shard_0.yaml
index a80e57c2f2a..68c51aecdc0 100644
--- a/examples/operator/306_down_shard_0.yaml
+++ b/examples/operator/306_down_shard_0.yaml
@@ -4,14 +4,14 @@ metadata:
name: example
spec:
images:
- vtctld: vitess/lite:latest
- vtadmin: vitess/vtadmin:latest
- vtgate: vitess/lite:latest
- vttablet: vitess/lite:latest
- vtbackup: vitess/lite:latest
- vtorc: vitess/lite:latest
+ vtctld: vitess/lite:v16.0.4
+ vtadmin: vitess/vtadmin:v16.0.4
+ vtgate: vitess/lite:v16.0.4
+ vttablet: vitess/lite:v16.0.4
+ vtbackup: vitess/lite:v16.0.4
+ vtorc: vitess/lite:v16.0.4
mysqld:
- mysql56Compatible: vitess/lite:latest
+ mysql80Compatible: vitess/lite:v16.0.4
mysqldExporter: prom/mysqld-exporter:v0.11.0
cells:
- name: zone1
@@ -68,15 +68,14 @@ spec:
durabilityPolicy: none
turndownPolicy: Immediate
vitessOrchestrator:
- configSecret:
- name: example-cluster-config
- key: orc_config.json
resources:
limits:
memory: 128Mi
requests:
cpu: 100m
memory: 128Mi
+ extraFlags:
+ recovery-period-block-duration: 5s
partitionings:
- equal:
parts: 1
@@ -84,15 +83,12 @@ spec:
databaseInitScriptSecret:
name: example-cluster-config
key: init_db.sql
- replication:
- enforceSemiSync: false
tabletPools:
- cell: zone1
type: replica
replicas: 2
vttablet:
extraFlags:
- disable_active_reparents: "true"
db_charset: utf8mb4
resources:
limits:
@@ -103,7 +99,7 @@ spec:
mysqld:
resources:
limits:
- memory: 512Mi
+ memory: 1024Mi
requests:
cpu: 100m
memory: 512Mi
@@ -122,8 +118,6 @@ spec:
databaseInitScriptSecret:
name: example-cluster-config
key: init_db.sql
- replication:
- enforceSemiSync: false
tabletPools:
- cell: zone1
type: replica
@@ -140,7 +134,7 @@ spec:
mysqld:
resources:
limits:
- memory: 512Mi
+ memory: 1024Mi
requests:
cpu: 100m
memory: 512Mi
diff --git a/examples/operator/operator.yaml b/examples/operator/operator.yaml
index aa4ce0ef75c..82dbee746c5 100644
--- a/examples/operator/operator.yaml
+++ b/examples/operator/operator.yaml
@@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.6.2
+ controller-gen.kubebuilder.io/version: v0.11.3
creationTimestamp: null
name: etcdlockservers.planetscale.com
spec:
@@ -74,8 +74,35 @@ spec:
- kind
- name
type: object
+ x-kubernetes-map-type: atomic
+ dataSourceRef:
+ properties:
+ apiGroup:
+ type: string
+ kind:
+ type: string
+ name:
+ type: string
+ namespace:
+ type: string
+ required:
+ - kind
+ - name
+ type: object
resources:
properties:
+ claims:
+ items:
+ properties:
+ name:
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
limits:
additionalProperties:
anyOf:
@@ -116,6 +143,7 @@ spec:
type: string
type: object
type: object
+ x-kubernetes-map-type: atomic
storageClassName:
type: string
volumeMode:
@@ -143,6 +171,7 @@ spec:
required:
- key
type: object
+ x-kubernetes-map-type: atomic
fieldRef:
properties:
apiVersion:
@@ -152,6 +181,7 @@ spec:
required:
- fieldPath
type: object
+ x-kubernetes-map-type: atomic
resourceFieldRef:
properties:
containerName:
@@ -167,6 +197,7 @@ spec:
required:
- resource
type: object
+ x-kubernetes-map-type: atomic
secretKeyRef:
properties:
key:
@@ -178,6 +209,7 @@ spec:
required:
- key
type: object
+ x-kubernetes-map-type: atomic
type: object
required:
- name
@@ -223,6 +255,7 @@ spec:
name:
type: string
type: object
+ x-kubernetes-map-type: atomic
type: array
initContainers:
x-kubernetes-preserve-unknown-fields: true
@@ -242,6 +275,18 @@ spec:
type: object
resources:
properties:
+ claims:
+ items:
+ properties:
+ name:
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
limits:
additionalProperties:
anyOf:
@@ -281,19 +326,12 @@ spec:
storage: true
subresources:
status: {}
-status:
- acceptedNames:
- kind: ""
- plural: ""
- conditions: []
- storedVersions: []
-
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.6.2
+ controller-gen.kubebuilder.io/version: v0.11.3
creationTimestamp: null
name: vitessbackups.planetscale.com
spec:
@@ -341,19 +379,12 @@ spec:
type: object
served: true
storage: true
-status:
- acceptedNames:
- kind: ""
- plural: ""
- conditions: []
- storedVersions: []
-
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.6.2
+ controller-gen.kubebuilder.io/version: v0.11.3
creationTimestamp: null
name: vitessbackupstorages.planetscale.com
spec:
@@ -514,19 +545,12 @@ spec:
storage: true
subresources:
status: {}
-status:
- acceptedNames:
- kind: ""
- plural: ""
- conditions: []
- storedVersions: []
-
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.6.2
+ controller-gen.kubebuilder.io/version: v0.11.3
creationTimestamp: null
name: vitesscells.planetscale.com
spec:
@@ -605,6 +629,7 @@ spec:
required:
- key
type: object
+ x-kubernetes-map-type: atomic
fieldRef:
properties:
apiVersion:
@@ -614,6 +639,7 @@ spec:
required:
- fieldPath
type: object
+ x-kubernetes-map-type: atomic
resourceFieldRef:
properties:
containerName:
@@ -629,6 +655,7 @@ spec:
required:
- resource
type: object
+ x-kubernetes-map-type: atomic
secretKeyRef:
properties:
key:
@@ -640,6 +667,7 @@ spec:
required:
- key
type: object
+ x-kubernetes-map-type: atomic
type: object
required:
- name
@@ -683,6 +711,18 @@ spec:
type: integer
resources:
properties:
+ claims:
+ items:
+ properties:
+ name:
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
limits:
additionalProperties:
anyOf:
@@ -795,6 +835,7 @@ spec:
name:
type: string
type: object
+ x-kubernetes-map-type: atomic
type: array
images:
properties:
@@ -852,8 +893,35 @@ spec:
- kind
- name
type: object
+ x-kubernetes-map-type: atomic
+ dataSourceRef:
+ properties:
+ apiGroup:
+ type: string
+ kind:
+ type: string
+ name:
+ type: string
+ namespace:
+ type: string
+ required:
+ - kind
+ - name
+ type: object
resources:
properties:
+ claims:
+ items:
+ properties:
+ name:
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
limits:
additionalProperties:
anyOf:
@@ -894,6 +962,7 @@ spec:
type: string
type: object
type: object
+ x-kubernetes-map-type: atomic
storageClassName:
type: string
volumeMode:
@@ -921,6 +990,7 @@ spec:
required:
- key
type: object
+ x-kubernetes-map-type: atomic
fieldRef:
properties:
apiVersion:
@@ -930,6 +1000,7 @@ spec:
required:
- fieldPath
type: object
+ x-kubernetes-map-type: atomic
resourceFieldRef:
properties:
containerName:
@@ -945,6 +1016,7 @@ spec:
required:
- resource
type: object
+ x-kubernetes-map-type: atomic
secretKeyRef:
properties:
key:
@@ -956,6 +1028,7 @@ spec:
required:
- key
type: object
+ x-kubernetes-map-type: atomic
type: object
required:
- name
@@ -1001,6 +1074,7 @@ spec:
name:
type: string
type: object
+ x-kubernetes-map-type: atomic
type: array
initContainers:
x-kubernetes-preserve-unknown-fields: true
@@ -1020,6 +1094,18 @@ spec:
type: object
resources:
properties:
+ claims:
+ items:
+ properties:
+ name:
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
limits:
additionalProperties:
anyOf:
@@ -1124,19 +1210,12 @@ spec:
storage: true
subresources:
status: {}
-status:
- acceptedNames:
- kind: ""
- plural: ""
- conditions: []
- storedVersions: []
-
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.6.2
+ controller-gen.kubebuilder.io/version: v0.11.3
creationTimestamp: null
name: vitessclusters.planetscale.com
spec:
@@ -1341,6 +1420,7 @@ spec:
required:
- key
type: object
+ x-kubernetes-map-type: atomic
fieldRef:
properties:
apiVersion:
@@ -1350,6 +1430,7 @@ spec:
required:
- fieldPath
type: object
+ x-kubernetes-map-type: atomic
resourceFieldRef:
properties:
containerName:
@@ -1365,6 +1446,7 @@ spec:
required:
- resource
type: object
+ x-kubernetes-map-type: atomic
secretKeyRef:
properties:
key:
@@ -1376,6 +1458,7 @@ spec:
required:
- key
type: object
+ x-kubernetes-map-type: atomic
type: object
required:
- name
@@ -1419,6 +1502,18 @@ spec:
type: integer
resources:
properties:
+ claims:
+ items:
+ properties:
+ name:
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
limits:
additionalProperties:
anyOf:
@@ -1544,8 +1639,35 @@ spec:
- kind
- name
type: object
+ x-kubernetes-map-type: atomic
+ dataSourceRef:
+ properties:
+ apiGroup:
+ type: string
+ kind:
+ type: string
+ name:
+ type: string
+ namespace:
+ type: string
+ required:
+ - kind
+ - name
+ type: object
resources:
properties:
+ claims:
+ items:
+ properties:
+ name:
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
limits:
additionalProperties:
anyOf:
@@ -1586,6 +1708,7 @@ spec:
type: string
type: object
type: object
+ x-kubernetes-map-type: atomic
storageClassName:
type: string
volumeMode:
@@ -1613,6 +1736,7 @@ spec:
required:
- key
type: object
+ x-kubernetes-map-type: atomic
fieldRef:
properties:
apiVersion:
@@ -1622,6 +1746,7 @@ spec:
required:
- fieldPath
type: object
+ x-kubernetes-map-type: atomic
resourceFieldRef:
properties:
containerName:
@@ -1637,6 +1762,7 @@ spec:
required:
- resource
type: object
+ x-kubernetes-map-type: atomic
secretKeyRef:
properties:
key:
@@ -1648,6 +1774,7 @@ spec:
required:
- key
type: object
+ x-kubernetes-map-type: atomic
type: object
required:
- name
@@ -1693,6 +1820,7 @@ spec:
name:
type: string
type: object
+ x-kubernetes-map-type: atomic
type: array
initContainers:
x-kubernetes-preserve-unknown-fields: true
@@ -1712,6 +1840,18 @@ spec:
type: object
resources:
properties:
+ claims:
+ items:
+ properties:
+ name:
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
limits:
additionalProperties:
anyOf:
@@ -1823,8 +1963,35 @@ spec:
- kind
- name
type: object
+ x-kubernetes-map-type: atomic
+ dataSourceRef:
+ properties:
+ apiGroup:
+ type: string
+ kind:
+ type: string
+ name:
+ type: string
+ namespace:
+ type: string
+ required:
+ - kind
+ - name
+ type: object
resources:
properties:
+ claims:
+ items:
+ properties:
+ name:
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
limits:
additionalProperties:
anyOf:
@@ -1865,6 +2032,7 @@ spec:
type: string
type: object
type: object
+ x-kubernetes-map-type: atomic
storageClassName:
type: string
volumeMode:
@@ -1892,6 +2060,7 @@ spec:
required:
- key
type: object
+ x-kubernetes-map-type: atomic
fieldRef:
properties:
apiVersion:
@@ -1901,6 +2070,7 @@ spec:
required:
- fieldPath
type: object
+ x-kubernetes-map-type: atomic
resourceFieldRef:
properties:
containerName:
@@ -1916,6 +2086,7 @@ spec:
required:
- resource
type: object
+ x-kubernetes-map-type: atomic
secretKeyRef:
properties:
key:
@@ -1927,6 +2098,7 @@ spec:
required:
- key
type: object
+ x-kubernetes-map-type: atomic
type: object
required:
- name
@@ -1972,6 +2144,7 @@ spec:
name:
type: string
type: object
+ x-kubernetes-map-type: atomic
type: array
initContainers:
x-kubernetes-preserve-unknown-fields: true
@@ -1991,6 +2164,18 @@ spec:
type: object
resources:
properties:
+ claims:
+ items:
+ properties:
+ name:
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
limits:
additionalProperties:
anyOf:
@@ -2052,6 +2237,7 @@ spec:
name:
type: string
type: object
+ x-kubernetes-map-type: atomic
type: array
images:
properties:
@@ -2131,8 +2317,6 @@ spec:
type: object
replication:
properties:
- enforceSemiSync:
- type: boolean
initializeBackup:
type: boolean
initializeMaster:
@@ -2174,8 +2358,35 @@ spec:
- kind
- name
type: object
+ x-kubernetes-map-type: atomic
+ dataSourceRef:
+ properties:
+ apiGroup:
+ type: string
+ kind:
+ type: string
+ name:
+ type: string
+ namespace:
+ type: string
+ required:
+ - kind
+ - name
+ type: object
resources:
properties:
+ claims:
+ items:
+ properties:
+ name:
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
limits:
additionalProperties:
anyOf:
@@ -2216,6 +2427,7 @@ spec:
type: string
type: object
type: object
+ x-kubernetes-map-type: atomic
storageClassName:
type: string
volumeMode:
@@ -2285,6 +2497,7 @@ spec:
required:
- key
type: object
+ x-kubernetes-map-type: atomic
fieldRef:
properties:
apiVersion:
@@ -2294,6 +2507,7 @@ spec:
required:
- fieldPath
type: object
+ x-kubernetes-map-type: atomic
resourceFieldRef:
properties:
containerName:
@@ -2309,6 +2523,7 @@ spec:
required:
- resource
type: object
+ x-kubernetes-map-type: atomic
secretKeyRef:
properties:
key:
@@ -2320,6 +2535,7 @@ spec:
required:
- key
type: object
+ x-kubernetes-map-type: atomic
type: object
required:
- name
@@ -2359,6 +2575,18 @@ spec:
type: string
resources:
properties:
+ claims:
+ items:
+ properties:
+ name:
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
limits:
additionalProperties:
anyOf:
@@ -2407,6 +2635,18 @@ spec:
x-kubernetes-preserve-unknown-fields: true
resources:
properties:
+ claims:
+ items:
+ properties:
+ name:
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
limits:
additionalProperties:
anyOf:
@@ -2450,6 +2690,7 @@ spec:
properties:
parts:
format: int32
+ maximum: 65536
minimum: 1
type: integer
shardTemplate:
@@ -2471,8 +2712,6 @@ spec:
type: object
replication:
properties:
- enforceSemiSync:
- type: boolean
initializeBackup:
type: boolean
initializeMaster:
@@ -2514,8 +2753,35 @@ spec:
- kind
- name
type: object
+ x-kubernetes-map-type: atomic
+ dataSourceRef:
+ properties:
+ apiGroup:
+ type: string
+ kind:
+ type: string
+ name:
+ type: string
+ namespace:
+ type: string
+ required:
+ - kind
+ - name
+ type: object
resources:
properties:
+ claims:
+ items:
+ properties:
+ name:
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
limits:
additionalProperties:
anyOf:
@@ -2556,6 +2822,7 @@ spec:
type: string
type: object
type: object
+ x-kubernetes-map-type: atomic
storageClassName:
type: string
volumeMode:
@@ -2625,6 +2892,7 @@ spec:
required:
- key
type: object
+ x-kubernetes-map-type: atomic
fieldRef:
properties:
apiVersion:
@@ -2634,6 +2902,7 @@ spec:
required:
- fieldPath
type: object
+ x-kubernetes-map-type: atomic
resourceFieldRef:
properties:
containerName:
@@ -2649,6 +2918,7 @@ spec:
required:
- resource
type: object
+ x-kubernetes-map-type: atomic
secretKeyRef:
properties:
key:
@@ -2660,6 +2930,7 @@ spec:
required:
- key
type: object
+ x-kubernetes-map-type: atomic
type: object
required:
- name
@@ -2699,6 +2970,18 @@ spec:
type: string
resources:
properties:
+ claims:
+ items:
+ properties:
+ name:
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
limits:
additionalProperties:
anyOf:
@@ -2747,6 +3030,18 @@ spec:
x-kubernetes-preserve-unknown-fields: true
resources:
properties:
+ claims:
+ items:
+ properties:
+ name:
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
limits:
additionalProperties:
anyOf:
@@ -2801,17 +3096,6 @@ spec:
additionalProperties:
type: string
type: object
- configSecret:
- properties:
- key:
- type: string
- name:
- type: string
- volumeName:
- type: string
- required:
- - key
- type: object
extraEnv:
items:
properties:
@@ -2832,6 +3116,7 @@ spec:
required:
- key
type: object
+ x-kubernetes-map-type: atomic
fieldRef:
properties:
apiVersion:
@@ -2841,6 +3126,7 @@ spec:
required:
- fieldPath
type: object
+ x-kubernetes-map-type: atomic
resourceFieldRef:
properties:
containerName:
@@ -2856,6 +3142,7 @@ spec:
required:
- resource
type: object
+ x-kubernetes-map-type: atomic
secretKeyRef:
properties:
key:
@@ -2867,6 +3154,7 @@ spec:
required:
- key
type: object
+ x-kubernetes-map-type: atomic
type: object
required:
- name
@@ -2906,6 +3194,18 @@ spec:
x-kubernetes-preserve-unknown-fields: true
resources:
properties:
+ claims:
+ items:
+ properties:
+ name:
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
limits:
additionalProperties:
anyOf:
@@ -2936,8 +3236,6 @@ spec:
x-kubernetes-preserve-unknown-fields: true
tolerations:
x-kubernetes-preserve-unknown-fields: true
- required:
- - configSecret
type: object
required:
- name
@@ -3019,6 +3317,7 @@ spec:
required:
- key
type: object
+ x-kubernetes-map-type: atomic
fieldRef:
properties:
apiVersion:
@@ -3028,6 +3327,7 @@ spec:
required:
- fieldPath
type: object
+ x-kubernetes-map-type: atomic
resourceFieldRef:
properties:
containerName:
@@ -3043,6 +3343,7 @@ spec:
required:
- resource
type: object
+ x-kubernetes-map-type: atomic
secretKeyRef:
properties:
key:
@@ -3054,6 +3355,7 @@ spec:
required:
- key
type: object
+ x-kubernetes-map-type: atomic
type: object
required:
- name
@@ -3096,6 +3398,18 @@ spec:
type: integer
resources:
properties:
+ claims:
+ items:
+ properties:
+ name:
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
limits:
additionalProperties:
anyOf:
@@ -3141,6 +3455,18 @@ spec:
type: array
apiResources:
properties:
+ claims:
+ items:
+ properties:
+ name:
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
limits:
additionalProperties:
anyOf:
@@ -3182,6 +3508,7 @@ spec:
required:
- key
type: object
+ x-kubernetes-map-type: atomic
fieldRef:
properties:
apiVersion:
@@ -3191,6 +3518,7 @@ spec:
required:
- fieldPath
type: object
+ x-kubernetes-map-type: atomic
resourceFieldRef:
properties:
containerName:
@@ -3206,6 +3534,7 @@ spec:
required:
- resource
type: object
+ x-kubernetes-map-type: atomic
secretKeyRef:
properties:
key:
@@ -3217,6 +3546,7 @@ spec:
required:
- key
type: object
+ x-kubernetes-map-type: atomic
type: object
required:
- name
@@ -3285,6 +3615,18 @@ spec:
x-kubernetes-preserve-unknown-fields: true
webResources:
properties:
+ claims:
+ items:
+ properties:
+ name:
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
limits:
additionalProperties:
anyOf:
@@ -3416,19 +3758,12 @@ spec:
storage: true
subresources:
status: {}
-status:
- acceptedNames:
- kind: ""
- plural: ""
- conditions: []
- storedVersions: []
-
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.6.2
+ controller-gen.kubebuilder.io/version: v0.11.3
creationTimestamp: null
name: vitesskeyspaces.planetscale.com
spec:
@@ -3621,6 +3956,7 @@ spec:
name:
type: string
type: object
+ x-kubernetes-map-type: atomic
type: array
images:
properties:
@@ -3683,8 +4019,6 @@ spec:
type: object
replication:
properties:
- enforceSemiSync:
- type: boolean
initializeBackup:
type: boolean
initializeMaster:
@@ -3726,8 +4060,35 @@ spec:
- kind
- name
type: object
+ x-kubernetes-map-type: atomic
+ dataSourceRef:
+ properties:
+ apiGroup:
+ type: string
+ kind:
+ type: string
+ name:
+ type: string
+ namespace:
+ type: string
+ required:
+ - kind
+ - name
+ type: object
resources:
properties:
+ claims:
+ items:
+ properties:
+ name:
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
limits:
additionalProperties:
anyOf:
@@ -3768,6 +4129,7 @@ spec:
type: string
type: object
type: object
+ x-kubernetes-map-type: atomic
storageClassName:
type: string
volumeMode:
@@ -3837,6 +4199,7 @@ spec:
required:
- key
type: object
+ x-kubernetes-map-type: atomic
fieldRef:
properties:
apiVersion:
@@ -3846,6 +4209,7 @@ spec:
required:
- fieldPath
type: object
+ x-kubernetes-map-type: atomic
resourceFieldRef:
properties:
containerName:
@@ -3861,6 +4225,7 @@ spec:
required:
- resource
type: object
+ x-kubernetes-map-type: atomic
secretKeyRef:
properties:
key:
@@ -3872,6 +4237,7 @@ spec:
required:
- key
type: object
+ x-kubernetes-map-type: atomic
type: object
required:
- name
@@ -3911,6 +4277,18 @@ spec:
type: string
resources:
properties:
+ claims:
+ items:
+ properties:
+ name:
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
limits:
additionalProperties:
anyOf:
@@ -3959,6 +4337,18 @@ spec:
x-kubernetes-preserve-unknown-fields: true
resources:
properties:
+ claims:
+ items:
+ properties:
+ name:
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
limits:
additionalProperties:
anyOf:
@@ -4002,6 +4392,7 @@ spec:
properties:
parts:
format: int32
+ maximum: 65536
minimum: 1
type: integer
shardTemplate:
@@ -4023,8 +4414,6 @@ spec:
type: object
replication:
properties:
- enforceSemiSync:
- type: boolean
initializeBackup:
type: boolean
initializeMaster:
@@ -4066,8 +4455,35 @@ spec:
- kind
- name
type: object
+ x-kubernetes-map-type: atomic
+ dataSourceRef:
+ properties:
+ apiGroup:
+ type: string
+ kind:
+ type: string
+ name:
+ type: string
+ namespace:
+ type: string
+ required:
+ - kind
+ - name
+ type: object
resources:
properties:
+ claims:
+ items:
+ properties:
+ name:
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
limits:
additionalProperties:
anyOf:
@@ -4108,6 +4524,7 @@ spec:
type: string
type: object
type: object
+ x-kubernetes-map-type: atomic
storageClassName:
type: string
volumeMode:
@@ -4177,6 +4594,7 @@ spec:
required:
- key
type: object
+ x-kubernetes-map-type: atomic
fieldRef:
properties:
apiVersion:
@@ -4186,6 +4604,7 @@ spec:
required:
- fieldPath
type: object
+ x-kubernetes-map-type: atomic
resourceFieldRef:
properties:
containerName:
@@ -4201,6 +4620,7 @@ spec:
required:
- resource
type: object
+ x-kubernetes-map-type: atomic
secretKeyRef:
properties:
key:
@@ -4212,6 +4632,7 @@ spec:
required:
- key
type: object
+ x-kubernetes-map-type: atomic
type: object
required:
- name
@@ -4251,6 +4672,18 @@ spec:
type: string
resources:
properties:
+ claims:
+ items:
+ properties:
+ name:
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
limits:
additionalProperties:
anyOf:
@@ -4299,6 +4732,18 @@ spec:
x-kubernetes-preserve-unknown-fields: true
resources:
properties:
+ claims:
+ items:
+ properties:
+ name:
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
limits:
additionalProperties:
anyOf:
@@ -4387,17 +4832,6 @@ spec:
additionalProperties:
type: string
type: object
- configSecret:
- properties:
- key:
- type: string
- name:
- type: string
- volumeName:
- type: string
- required:
- - key
- type: object
extraEnv:
items:
properties:
@@ -4418,6 +4852,7 @@ spec:
required:
- key
type: object
+ x-kubernetes-map-type: atomic
fieldRef:
properties:
apiVersion:
@@ -4427,6 +4862,7 @@ spec:
required:
- fieldPath
type: object
+ x-kubernetes-map-type: atomic
resourceFieldRef:
properties:
containerName:
@@ -4442,6 +4878,7 @@ spec:
required:
- resource
type: object
+ x-kubernetes-map-type: atomic
secretKeyRef:
properties:
key:
@@ -4453,6 +4890,7 @@ spec:
required:
- key
type: object
+ x-kubernetes-map-type: atomic
type: object
required:
- name
@@ -4492,6 +4930,18 @@ spec:
x-kubernetes-preserve-unknown-fields: true
resources:
properties:
+ claims:
+ items:
+ properties:
+ name:
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
limits:
additionalProperties:
anyOf:
@@ -4522,8 +4972,6 @@ spec:
x-kubernetes-preserve-unknown-fields: true
tolerations:
x-kubernetes-preserve-unknown-fields: true
- required:
- - configSecret
type: object
zoneMap:
additionalProperties:
@@ -4659,19 +5107,12 @@ spec:
storage: true
subresources:
status: {}
-status:
- acceptedNames:
- kind: ""
- plural: ""
- conditions: []
- storedVersions: []
-
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.6.2
+ controller-gen.kubebuilder.io/version: v0.11.3
creationTimestamp: null
name: vitessshards.planetscale.com
spec:
@@ -4873,6 +5314,7 @@ spec:
name:
type: string
type: object
+ x-kubernetes-map-type: atomic
type: array
images:
properties:
@@ -4909,8 +5351,6 @@ spec:
type: string
replication:
properties:
- enforceSemiSync:
- type: boolean
initializeBackup:
type: boolean
initializeMaster:
@@ -4952,8 +5392,35 @@ spec:
- kind
- name
type: object
+ x-kubernetes-map-type: atomic
+ dataSourceRef:
+ properties:
+ apiGroup:
+ type: string
+ kind:
+ type: string
+ name:
+ type: string
+ namespace:
+ type: string
+ required:
+ - kind
+ - name
+ type: object
resources:
properties:
+ claims:
+ items:
+ properties:
+ name:
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
limits:
additionalProperties:
anyOf:
@@ -4994,6 +5461,7 @@ spec:
type: string
type: object
type: object
+ x-kubernetes-map-type: atomic
storageClassName:
type: string
volumeMode:
@@ -5063,6 +5531,7 @@ spec:
required:
- key
type: object
+ x-kubernetes-map-type: atomic
fieldRef:
properties:
apiVersion:
@@ -5072,6 +5541,7 @@ spec:
required:
- fieldPath
type: object
+ x-kubernetes-map-type: atomic
resourceFieldRef:
properties:
containerName:
@@ -5087,6 +5557,7 @@ spec:
required:
- resource
type: object
+ x-kubernetes-map-type: atomic
secretKeyRef:
properties:
key:
@@ -5098,6 +5569,7 @@ spec:
required:
- key
type: object
+ x-kubernetes-map-type: atomic
type: object
required:
- name
@@ -5137,6 +5609,18 @@ spec:
type: string
resources:
properties:
+ claims:
+ items:
+ properties:
+ name:
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
limits:
additionalProperties:
anyOf:
@@ -5185,6 +5669,18 @@ spec:
x-kubernetes-preserve-unknown-fields: true
resources:
properties:
+ claims:
+ items:
+ properties:
+ name:
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
limits:
additionalProperties:
anyOf:
@@ -5258,17 +5754,6 @@ spec:
additionalProperties:
type: string
type: object
- configSecret:
- properties:
- key:
- type: string
- name:
- type: string
- volumeName:
- type: string
- required:
- - key
- type: object
extraEnv:
items:
properties:
@@ -5289,6 +5774,7 @@ spec:
required:
- key
type: object
+ x-kubernetes-map-type: atomic
fieldRef:
properties:
apiVersion:
@@ -5298,6 +5784,7 @@ spec:
required:
- fieldPath
type: object
+ x-kubernetes-map-type: atomic
resourceFieldRef:
properties:
containerName:
@@ -5313,6 +5800,7 @@ spec:
required:
- resource
type: object
+ x-kubernetes-map-type: atomic
secretKeyRef:
properties:
key:
@@ -5324,6 +5812,7 @@ spec:
required:
- key
type: object
+ x-kubernetes-map-type: atomic
type: object
required:
- name
@@ -5363,6 +5852,18 @@ spec:
x-kubernetes-preserve-unknown-fields: true
resources:
properties:
+ claims:
+ items:
+ properties:
+ name:
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
limits:
additionalProperties:
anyOf:
@@ -5393,8 +5894,6 @@ spec:
x-kubernetes-preserve-unknown-fields: true
tolerations:
x-kubernetes-preserve-unknown-fields: true
- required:
- - configSecret
type: object
zoneMap:
additionalProperties:
@@ -5516,12 +6015,6 @@ spec:
storage: true
subresources:
status: {}
-status:
- acceptedNames:
- kind: ""
- plural: ""
- conditions: []
- storedVersions: []
---
apiVersion: v1
kind: ServiceAccount
@@ -5652,11 +6145,11 @@ spec:
fieldPath: metadata.name
- name: OPERATOR_NAME
value: vitess-operator
- image: planetscale/vitess-operator:latest
+ image: planetscale/vitess-operator:v2.9.4
name: vitess-operator
resources:
limits:
- memory: 128Mi
+ memory: 512Mi
requests:
cpu: 100m
memory: 128Mi
diff --git a/examples/region_sharding/101_initial_cluster.sh b/examples/region_sharding/101_initial_cluster.sh
index 210334cf18f..c2692440189 100755
--- a/examples/region_sharding/101_initial_cluster.sh
+++ b/examples/region_sharding/101_initial_cluster.sh
@@ -17,46 +17,43 @@
# this script brings up topo server and all the vitess components
# required for a single shard deployment.
-source ./env.sh
+source ../common/env.sh
# start topo server
if [ "${TOPO}" = "zk2" ]; then
- CELL=zone1 ./scripts/zk-up.sh
+ CELL=zone1 ../common/scripts/zk-up.sh
elif [ "${TOPO}" = "k8s" ]; then
- CELL=zone1 ./scripts/k3s-up.sh
+ CELL=zone1 ../common/scripts/k3s-up.sh
else
- CELL=zone1 ./scripts/etcd-up.sh
+ CELL=zone1 ../common/scripts/etcd-up.sh
fi
# start vtctld
-CELL=zone1 ./scripts/vtctld-up.sh
+CELL=zone1 ../common/scripts/vtctld-up.sh
# start unsharded keyspace and tablet
-CELL=zone1 TABLET_UID=100 ./scripts/mysqlctl-up.sh
-SHARD=0 CELL=zone1 KEYSPACE=main TABLET_UID=100 ./scripts/vttablet-up.sh
+CELL=zone1 TABLET_UID=100 ../common/scripts/mysqlctl-up.sh
+SHARD=0 CELL=zone1 KEYSPACE=main TABLET_UID=100 ../common/scripts/vttablet-up.sh
# set the correct durability policy for the keyspace
-vtctldclient --server localhost:15999 SetKeyspaceDurabilityPolicy --durability-policy=none main
+vtctldclient --server localhost:15999 SetKeyspaceDurabilityPolicy --durability-policy=none main || fail "Failed to set keyspace durability policy on the main keyspace"
# start vtorc
-./scripts/vtorc-up.sh
+../common/scripts/vtorc-up.sh
-# Wait for a primary tablet to be elected in the shard
-for _ in $(seq 0 200); do
- vtctldclient GetTablets --keyspace main --shard 0 | grep -q "primary" && break
- sleep 1
-done;
-vtctldclient GetTablets --keyspace main --shard 0 | grep "primary" || (echo "Timed out waiting for primary to be elected in main/0" && exit 1)
+# Wait for a primary tablet to be elected in the shard and for it
+# to become healthy/sherving.
+wait_for_healthy_shard main 0 1 || exit 1
# create the schema
-vtctldclient ApplySchema --sql-file create_main_schema.sql main
+vtctldclient ApplySchema --sql-file create_main_schema.sql main || fail "Failed to apply schema for the main keyspace"
# create the vschema
-vtctldclient ApplyVSchema --vschema-file main_vschema_initial.json main
+vtctldclient ApplyVSchema --vschema-file main_vschema_initial.json main || fail "Failed to apply vschema for the main keyspace"
# start vtgate
-CELL=zone1 ./scripts/vtgate-up.sh
+CELL=zone1 ../common/scripts/vtgate-up.sh
# start vtadmin
-./scripts/vtadmin-up.sh
+../common/scripts/vtadmin-up.sh
diff --git a/examples/region_sharding/201_main_sharded.sh b/examples/region_sharding/201_main_sharded.sh
index b8a589c0d49..387f89506db 100755
--- a/examples/region_sharding/201_main_sharded.sh
+++ b/examples/region_sharding/201_main_sharded.sh
@@ -14,20 +14,20 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-source ./env.sh
+source ../common/env.sh
# apply sharding vschema
-vtctldclient ApplyVSchema --vschema-file main_vschema_sharded.json main
+vtctldclient ApplyVSchema --vschema-file main_vschema_sharded.json main || fail "Failed to apply vschema for the sharded main keyspace"
# optional: create the schema needed for lookup vindex
#vtctlclient ApplySchema --sql-file create_lookup_schema.sql main
# create the lookup vindex
-vtctlclient CreateLookupVindex -- --tablet_types=PRIMARY main "$(cat lookup_vindex.json)"
+vtctlclient CreateLookupVindex -- --tablet_types=PRIMARY main "$(cat lookup_vindex.json)" || fail "Failed to create lookup vindex in main keyspace"
# we have to wait for replication to catch up
# Can see on vttablet status page Vreplication that copy is complete
sleep 5
-#externalize vindex
-vtctlclient ExternalizeVindex main.customer_region_lookup
+# externalize vindex
+vtctlclient ExternalizeVindex main.customer_region_lookup || fail "Failed to externalize customer_region_lookup vindex in the main keyspace"
diff --git a/examples/region_sharding/202_new_tablets.sh b/examples/region_sharding/202_new_tablets.sh
index e570ae3b3c2..d9134a6a7a3 100755
--- a/examples/region_sharding/202_new_tablets.sh
+++ b/examples/region_sharding/202_new_tablets.sh
@@ -14,23 +14,20 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-source ./env.sh
+source ../common/env.sh
# start vttablets for new shards. we start only one tablet each (primary)
-CELL=zone1 TABLET_UID=200 ./scripts/mysqlctl-up.sh
-SHARD=-40 CELL=zone1 KEYSPACE=main TABLET_UID=200 ./scripts/vttablet-up.sh
-CELL=zone1 TABLET_UID=300 ./scripts/mysqlctl-up.sh
-SHARD=40-80 CELL=zone1 KEYSPACE=main TABLET_UID=300 ./scripts/vttablet-up.sh
-CELL=zone1 TABLET_UID=400 ./scripts/mysqlctl-up.sh
-SHARD=80-c0 CELL=zone1 KEYSPACE=main TABLET_UID=400 ./scripts/vttablet-up.sh
-CELL=zone1 TABLET_UID=500 ./scripts/mysqlctl-up.sh
-SHARD=c0- CELL=zone1 KEYSPACE=main TABLET_UID=500 ./scripts/vttablet-up.sh
+CELL=zone1 TABLET_UID=200 ../common/scripts/mysqlctl-up.sh
+SHARD=-40 CELL=zone1 KEYSPACE=main TABLET_UID=200 ../common/scripts/vttablet-up.sh
+CELL=zone1 TABLET_UID=300 ../common/scripts/mysqlctl-up.sh
+SHARD=40-80 CELL=zone1 KEYSPACE=main TABLET_UID=300 ../common/scripts/vttablet-up.sh
+CELL=zone1 TABLET_UID=400 ../common/scripts/mysqlctl-up.sh
+SHARD=80-c0 CELL=zone1 KEYSPACE=main TABLET_UID=400 ../common/scripts/vttablet-up.sh
+CELL=zone1 TABLET_UID=500 ../common/scripts/mysqlctl-up.sh
+SHARD=c0- CELL=zone1 KEYSPACE=main TABLET_UID=500 ../common/scripts/vttablet-up.sh
for shard in "-40" "40-80" "80-c0" "c0-"; do
- # Wait for a primary tablet to be elected in the shard
- for _ in $(seq 0 200); do
- vtctldclient GetTablets --keyspace main --shard $shard | grep -q "primary" && break
- sleep 1
- done;
- vtctldclient GetTablets --keyspace main --shard $shard | grep "primary" || (echo "Timed out waiting for primary to be elected in main/$shard" && exit 1)
+ # Wait for all the tablets to be up and registered in the topology server
+ # and for a primary tablet to be elected in the shard and become healthy/serving.
+ wait_for_healthy_shard main "${shard}" 1 || exit 1
done;
diff --git a/examples/region_sharding/203_reshard.sh b/examples/region_sharding/203_reshard.sh
index 0cef1dbc4f9..aaa448a135d 100755
--- a/examples/region_sharding/203_reshard.sh
+++ b/examples/region_sharding/203_reshard.sh
@@ -14,6 +14,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-source ./env.sh
+source ../common/env.sh
vtctlclient Reshard -- --source_shards '0' --target_shards '-40,40-80,80-c0,c0-' --tablet_types=PRIMARY Create main.main2regions
diff --git a/examples/region_sharding/204_switch_reads.sh b/examples/region_sharding/204_switch_reads.sh
index f0e6b7fd6cb..20703938199 100755
--- a/examples/region_sharding/204_switch_reads.sh
+++ b/examples/region_sharding/204_switch_reads.sh
@@ -16,6 +16,6 @@
# this script migrates traffic for the rdonly and replica tablets
-source ./env.sh
+source ../common/env.sh
vtctlclient Reshard -- --tablet_types=rdonly,replica SwitchTraffic main.main2regions
diff --git a/examples/region_sharding/205_switch_writes.sh b/examples/region_sharding/205_switch_writes.sh
index caff6d083cc..ad0d8ee51d2 100755
--- a/examples/region_sharding/205_switch_writes.sh
+++ b/examples/region_sharding/205_switch_writes.sh
@@ -16,14 +16,13 @@
# this script migrates traffic for the primary tablet
-source ./env.sh
+source ../common/env.sh
vtctlclient Reshard -- --tablet_types=primary SwitchTraffic main.main2regions
# to go back to unsharded
-# call SwitchReads and SwitchWrites with workflow main.main2regions_reverse
-# delete vreplication rows from sharded tablets
-# drop all the tables
+# call Reshard ReverseTraffic with all tablet types
+# call Reshard Cancel
# change vschema back to unsharded
# drop lookup table
diff --git a/examples/region_sharding/206_down_shard_0.sh b/examples/region_sharding/206_down_shard_0.sh
index d302a071a46..6b46ea34ea0 100755
--- a/examples/region_sharding/206_down_shard_0.sh
+++ b/examples/region_sharding/206_down_shard_0.sh
@@ -15,7 +15,7 @@
# limitations under the License.
# this script brings down the tablets for main/0 keyspace/shard
-source ./env.sh
+source ../common/env.sh
-CELL=zone1 TABLET_UID=100 ./scripts/vttablet-down.sh
-CELL=zone1 TABLET_UID=100 ./scripts/mysqlctl-down.sh
+CELL=zone1 TABLET_UID=100 ../common/scripts/vttablet-down.sh
+CELL=zone1 TABLET_UID=100 ../common/scripts/mysqlctl-down.sh
diff --git a/examples/region_sharding/207_delete_shard_0.sh b/examples/region_sharding/207_delete_shard_0.sh
index 05f24679226..298afd7b273 100755
--- a/examples/region_sharding/207_delete_shard_0.sh
+++ b/examples/region_sharding/207_delete_shard_0.sh
@@ -16,6 +16,6 @@
# this script deletes the old shard 0 which has been replaced by 4 shards
-source ./env.sh
+source ../common/env.sh
-vtctldclient DeleteShards --recursive main/0
+vtctldclient DeleteShards --force --recursive main/0
diff --git a/examples/region_sharding/301_teardown.sh b/examples/region_sharding/301_teardown.sh
index 310c7f1277b..25f3bb259f2 100755
--- a/examples/region_sharding/301_teardown.sh
+++ b/examples/region_sharding/301_teardown.sh
@@ -17,37 +17,36 @@
# We should not assume that any of the steps have been executed.
# This makes it possible for a user to cleanup at any point.
-source ./env.sh
+source ../common/env.sh
-./scripts/vtadmin-down.sh
+../common/scripts/vtadmin-down.sh
-./scripts/vtorc-down.sh
+../common/scripts/vtorc-down.sh
-./scripts/vtgate-down.sh
+../common/scripts/vtgate-down.sh
for tablet in 100 200 300 400 500; do
if vtctlclient --server localhost:15999 GetTablet zone1-$tablet >/dev/null 2>&1; then
- printf -v alias '%s-%010d' 'zone1' $uid
+ printf -v alias '%s-%010d' 'zone1' $tablet
echo "Shutting down tablet $alias"
- CELL=zone1 TABLET_UID=$tablet ./scripts/vttablet-down.sh
- CELL=zone1 TABLET_UID=$tablet ./scripts/mysqlctl-down.sh
+ CELL=zone1 TABLET_UID=$tablet ../common/scripts/vttablet-down.sh
+ CELL=zone1 TABLET_UID=$tablet ../common/scripts/mysqlctl-down.sh
fi
done
-./scripts/vtctld-down.sh
+../common/scripts/vtctld-down.sh
if [ "${TOPO}" = "zk2" ]; then
- CELL=zone1 ./scripts/zk-down.sh
+ CELL=zone1 ../common/scripts/zk-down.sh
elif [ "${TOPO}" = "k8s" ]; then
- CELL=zone1 ./scripts/k3s-down.sh
+ CELL=zone1 ../common/scripts/k3s-down.sh
else
- CELL=zone1 ./scripts/etcd-down.sh
+ CELL=zone1 ../common/scripts/etcd-down.sh
fi
# pedantic check: grep for any remaining processes
-if [ ! -z "$VTDATAROOT" ]; then
-
+if [ -n "$VTDATAROOT" ]; then
if pgrep -f -l "$VTDATAROOT" >/dev/null; then
echo "ERROR: Stale processes detected! It is recommended to manually kill them:"
pgrep -f -l "$VTDATAROOT"
@@ -57,7 +56,6 @@ if [ ! -z "$VTDATAROOT" ]; then
# shellcheck disable=SC2086
rm -r ${VTDATAROOT:?}/*
-
fi
disown -a
diff --git a/examples/region_sharding/README.md b/examples/region_sharding/README.md
index 8b9ced13999..fcc2e617143 100644
--- a/examples/region_sharding/README.md
+++ b/examples/region_sharding/README.md
@@ -5,12 +5,8 @@ This document contains the summary of the commands to be run.
```
-# Edit main_vschema_sharded.json and set region_map to full path of countries.json file
-# Example:
- "region_map": "/home/user/vitess/examples/region_sharding/countries.json",
-
# setup environment and aliases
-source env.sh
+source ../common/env.sh
# Bring up initial cluster and main keyspace (unsharded)
./101_initial_cluster.sh
diff --git a/examples/region_sharding/env.sh b/examples/region_sharding/env.sh
deleted file mode 100644
index 54b9c97fff9..00000000000
--- a/examples/region_sharding/env.sh
+++ /dev/null
@@ -1,83 +0,0 @@
-#!/bin/bash
-
-# Copyright 2019 The Vitess Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-hostname=$(hostname -f)
-vtctld_web_port=15000
-export VTDATAROOT="${VTDATAROOT:-${PWD}/vtdataroot}"
-
-function fail() {
- echo "ERROR: $1"
- exit 1
-}
-
-if [[ $EUID -eq 0 ]]; then
- fail "This script refuses to be run as root. Please switch to a regular user."
-fi
-
-# mysqld might be in /usr/sbin which will not be in the default PATH
-PATH="/usr/sbin:$PATH"
-for binary in mysqld etcd etcdctl curl vtctlclient vttablet vtgate vtctld mysqlctl; do
- command -v "$binary" > /dev/null || fail "${binary} is not installed in PATH. See https://vitess.io/docs/get-started/local/ for install instructions."
-done;
-
-if [ "${TOPO}" = "zk2" ]; then
- # Each ZooKeeper server needs a list of all servers in the quorum.
- # Since we're running them all locally, we need to give them unique ports.
- # In a real deployment, these should be on different machines, and their
- # respective hostnames should be given.
- zkcfg=(\
- "1@$hostname:28881:38881:21811" \
- "2@$hostname:28882:38882:21812" \
- "3@$hostname:28883:38883:21813" \
- )
- printf -v zkcfg ",%s" "${zkcfg[@]}"
- zkcfg=${zkcfg:1}
-
- zkids='1 2 3'
-
- # Set topology environment parameters.
- ZK_SERVER="localhost:21811,localhost:21812,localhost:21813"
- # shellcheck disable=SC2034
- TOPOLOGY_FLAGS="--topo_implementation zk2 --topo_global_server_address ${ZK_SERVER} --topo_global_root /vitess/global"
-
- mkdir -p "${VTDATAROOT}/tmp"
-elif [ "${TOPO}" = "k8s" ]; then
- # Set topology environment parameters.
- K8S_ADDR="localhost"
- K8S_PORT="8443"
- K8S_KUBECONFIG=$VTDATAROOT/tmp/k8s.kubeconfig
- # shellcheck disable=SC2034
- TOPOLOGY_FLAGS="--topo_implementation k8s --topo_k8s_kubeconfig ${K8S_KUBECONFIG} --topo_global_server_address ${K8S_ADDR}:${K8S_PORT} --topo_global_root /vitess/global"
-else
- ETCD_SERVER="localhost:2379"
- TOPOLOGY_FLAGS="--topo_implementation etcd2 --topo_global_server_address $ETCD_SERVER --topo_global_root /vitess/global"
-
- mkdir -p "${VTDATAROOT}/etcd"
-fi
-
-mkdir -p "${VTDATAROOT}/tmp"
-
-# Set aliases to simplify instructions.
-# In your own environment you may prefer to use config files,
-# such as ~/.my.cnf
-
-alias mysql="command mysql -h 127.0.0.1 -P 15306"
-alias vtctlclient="command vtctlclient --server localhost:15999 --log_dir ${VTDATAROOT}/tmp --alsologtostderr"
-alias vtctldclient="command vtctldclient --server localhost:15999"
-
-# Make sure aliases are expanded in non-interactive shell
-shopt -s expand_aliases
-
diff --git a/examples/region_sharding/main_vschema_sharded.json b/examples/region_sharding/main_vschema_sharded.json
index 06d72e58981..ec51afa9ee8 100644
--- a/examples/region_sharding/main_vschema_sharded.json
+++ b/examples/region_sharding/main_vschema_sharded.json
@@ -4,7 +4,7 @@
"region_vdx": {
"type": "region_json",
"params": {
- "region_map": "/vt/src/vitess.io/vitess/examples/region_sharding/countries.json",
+ "region_map": "./countries.json",
"region_bytes": "1"
}
}
diff --git a/examples/region_sharding/scripts/etcd-down.sh b/examples/region_sharding/scripts/etcd-down.sh
deleted file mode 100755
index 018af7432a3..00000000000
--- a/examples/region_sharding/scripts/etcd-down.sh
+++ /dev/null
@@ -1,22 +0,0 @@
-#!/bin/bash
-
-# Copyright 2019 The Vitess Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# This is an example script that stops the etcd servers started by etcd-up.sh.
-
-source ./env.sh
-
-echo "Stopping etcd..."
-kill -9 `cat $VTDATAROOT/tmp/etcd.pid`
diff --git a/examples/region_sharding/scripts/etcd-up.sh b/examples/region_sharding/scripts/etcd-up.sh
deleted file mode 100755
index 9164d32806e..00000000000
--- a/examples/region_sharding/scripts/etcd-up.sh
+++ /dev/null
@@ -1,51 +0,0 @@
-#!/bin/bash
-
-# Copyright 2019 The Vitess Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# This is an example script that creates a quorum of ZooKeeper servers.
-
-source ./env.sh
-
-cell=${CELL:-'test'}
-export ETCDCTL_API=2
-
-# Check that etcd is not already running
-curl "http://${ETCD_SERVER}" > /dev/null 2>&1 && fail "etcd is already running. Exiting."
-
-etcd --enable-v2=true --data-dir "${VTDATAROOT}/etcd/" --listen-client-urls "http://${ETCD_SERVER}" --advertise-client-urls "http://${ETCD_SERVER}" > "${VTDATAROOT}"/tmp/etcd.out 2>&1 &
-PID=$!
-echo $PID > "${VTDATAROOT}/tmp/etcd.pid"
-sleep 5
-
-echo "add /vitess/global"
-etcdctl --endpoints "http://${ETCD_SERVER}" mkdir /vitess/global &
-
-echo "add /vitess/$cell"
-etcdctl --endpoints "http://${ETCD_SERVER}" mkdir /vitess/$cell &
-
-# And also add the CellInfo description for the cell.
-# If the node already exists, it's fine, means we used existing data.
-echo "add $cell CellInfo"
-set +e
-# shellcheck disable=SC2086
-vtctl $TOPOLOGY_FLAGS AddCellInfo -- \
- --root /vitess/$cell \
- --server_address "${ETCD_SERVER}" \
- $cell
-set -e
-
-echo "etcd start done..."
-
-
diff --git a/examples/region_sharding/scripts/k3s-up.sh b/examples/region_sharding/scripts/k3s-up.sh
deleted file mode 100755
index 286b4d0ad85..00000000000
--- a/examples/region_sharding/scripts/k3s-up.sh
+++ /dev/null
@@ -1,60 +0,0 @@
-#!/bin/bash
-
-# Copyright 2019 The Vitess Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# This is an example script that creates a Kubernetes api for topo use by running k3s
-
-set -e
-cell=${CELL:-'test'}
-
-script_root=$(dirname "${BASH_SOURCE[0]}")
-
-# shellcheck source=./env.sh
-# shellcheck disable=SC1091
-source ./env.sh
-
-case $(uname) in
- Linux) ;;
- *) echo "WARNING: unsupported platform. K3s only supports running on Linux, the k8s topology is available for local examples."; exit 1;;
-esac
-
-case $(uname -m) in
- aarch64) ;;
- x86_64) ;;
- *) echo "ERROR: unsupported architecture, the k8s topology is not available for local examples."; exit 1;;
-esac
-
-k3s server --disable-agent --data-dir "${VTDATAROOT}/k3s/" --https-listen-port "${K8S_PORT}" --write-kubeconfig "${K8S_KUBECONFIG}" > "${VTDATAROOT}"/tmp/k3s.out 2>&1 &
-PID=$!
-echo $PID > "${VTDATAROOT}/tmp/k3s.pid"
-disown -a
-echo "Waiting for k3s server to start"
-sleep 15
-
-# Use k3s built-in kubectl with custom config
-KUBECTL="k3s kubectl --kubeconfig=${K8S_KUBECONFIG}"
-
-# Create the CRD for vitesstopologynodes
-$KUBECTL create -f ../../go/vt/topo/k8stopo/VitessTopoNodes-crd.yaml
-
-# Add the CellInfo description for the cell
-set +e
-echo "add $cell CellInfo"
-vtctl $TOPOLOGY_FLAGS AddCellInfo -- \
- --root /vitess/$cell \
- $cell
-set -e
-
-echo "k3s start done..."
diff --git a/examples/region_sharding/scripts/mysqlctl-down.sh b/examples/region_sharding/scripts/mysqlctl-down.sh
deleted file mode 100755
index 37b1d0c729b..00000000000
--- a/examples/region_sharding/scripts/mysqlctl-down.sh
+++ /dev/null
@@ -1,28 +0,0 @@
-#!/bin/bash
-
-# Copyright 2019 The Vitess Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# This is an example script that stops the mysqld and vttablet instances
-# created by vttablet-up.sh
-
-source ./env.sh
-
-cell=${CELL:-'test'}
-uid=$TABLET_UID
-printf -v alias '%s-%010d' $cell $uid
-echo "Shutting down MySQL for tablet $alias..."
-
-mysqlctl --tablet_uid $TABLET_UID shutdown
-
diff --git a/examples/region_sharding/scripts/vtadmin-down.sh b/examples/region_sharding/scripts/vtadmin-down.sh
deleted file mode 100755
index 2a7944d9d5a..00000000000
--- a/examples/region_sharding/scripts/vtadmin-down.sh
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/bin/bash
-
-source ./env.sh
-
-echo "Stopping vtadmin-web..."
-kill -9 "$(cat "$VTDATAROOT/tmp/vtadmin-web.pid")"
-
-echo "Stopping vtadmin-api..."
-kill -9 "$(cat "$VTDATAROOT/tmp/vtadmin-api.pid")"
diff --git a/examples/region_sharding/scripts/vtctld-up.sh b/examples/region_sharding/scripts/vtctld-up.sh
deleted file mode 100755
index 7957bdec8ba..00000000000
--- a/examples/region_sharding/scripts/vtctld-up.sh
+++ /dev/null
@@ -1,38 +0,0 @@
-#!/bin/bash
-
-# Copyright 2019 The Vitess Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# This is an example script that starts vtctld.
-
-source ./env.sh
-
-cell=${CELL:-'test'}
-grpc_port=15999
-
-echo "Starting vtctld..."
-# shellcheck disable=SC2086
-vtctld \
- $TOPOLOGY_FLAGS \
- --cell $cell \
- --workflow_manager_init \
- --workflow_manager_use_election \
- --service_map 'grpc-vtctl,grpc-vtctld' \
- --backup_storage_implementation file \
- --file_backup_storage_root $VTDATAROOT/backups \
- --log_dir $VTDATAROOT/tmp \
- --port $vtctld_web_port \
- --grpc_port $grpc_port \
- --pid_file $VTDATAROOT/tmp/vtctld.pid \
- > $VTDATAROOT/tmp/vtctld.out 2>&1 &
diff --git a/examples/region_sharding/scripts/vtorc-down.sh b/examples/region_sharding/scripts/vtorc-down.sh
deleted file mode 100755
index 2fdfc1491ce..00000000000
--- a/examples/region_sharding/scripts/vtorc-down.sh
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/bin/bash
-
-source ./env.sh
-
-echo "Stopping vtorc."
-kill -9 "$(cat "$VTDATAROOT/tmp/vtorc.pid")"
-
diff --git a/examples/region_sharding/scripts/vtorc-up.sh b/examples/region_sharding/scripts/vtorc-up.sh
deleted file mode 100755
index f5a0d75dbba..00000000000
--- a/examples/region_sharding/scripts/vtorc-up.sh
+++ /dev/null
@@ -1,24 +0,0 @@
-#!/bin/bash
-
-source ./env.sh
-
-log_dir="${VTDATAROOT}/tmp"
-port=16000
-
-vtorc \
- $TOPOLOGY_FLAGS \
- --logtostderr \
- --alsologtostderr \
- --config="./vtorc/config.json" \
- --port $port \
- > "${log_dir}/vtorc.out" 2>&1 &
-
-vtorc_pid=$!
-echo ${vtorc_pid} > "${log_dir}/vtorc.pid"
-
-echo "\
-vtorc is running!
- - UI: http://localhost:${port}
- - Logs: ${log_dir}/vtorc.out
- - PID: ${vtorc_pid}
-"
diff --git a/examples/region_sharding/scripts/vttablet-up.sh b/examples/region_sharding/scripts/vttablet-up.sh
deleted file mode 100755
index 5ecc6657afc..00000000000
--- a/examples/region_sharding/scripts/vttablet-up.sh
+++ /dev/null
@@ -1,69 +0,0 @@
-#!/bin/bash
-
-# Copyright 2019 The Vitess Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-source ./env.sh
-
-cell=${CELL:-'test'}
-keyspace=${KEYSPACE:-'test_keyspace'}
-shard=${SHARD:-'0'}
-uid=$TABLET_UID
-mysql_port=$[17000 + $uid]
-port=$[15000 + $uid]
-grpc_port=$[16000 + $uid]
-printf -v alias '%s-%010d' $cell $uid
-printf -v tablet_dir 'vt_%010d' $uid
-tablet_hostname=''
-printf -v tablet_logfile 'vttablet_%010d_querylog.txt' $uid
-
-tablet_type=replica
-if [[ "${uid: -1}" -gt 1 ]]; then
- tablet_type=rdonly
-fi
-
-echo "Starting vttablet for $alias..."
-# shellcheck disable=SC2086
-vttablet \
- $TOPOLOGY_FLAGS \
- --log_dir $VTDATAROOT/tmp \
- --log_queries_to_file $VTDATAROOT/tmp/$tablet_logfile \
- --tablet-path $alias \
- --tablet_hostname "$tablet_hostname" \
- --init_keyspace $keyspace \
- --init_shard $shard \
- --init_tablet_type $tablet_type \
- --health_check_interval 5s \
- --enable_replication_reporter \
- --backup_storage_implementation file \
- --file_backup_storage_root $VTDATAROOT/backups \
- --restore_from_backup \
- --port $port \
- --grpc_port $grpc_port \
- --service_map 'grpc-queryservice,grpc-tabletmanager,grpc-updatestream' \
- --pid_file $VTDATAROOT/$tablet_dir/vttablet.pid \
- --vtctld_addr http://$hostname:$vtctld_web_port/ \
- --disable_active_reparents \
- > $VTDATAROOT/$tablet_dir/vttablet.out 2>&1 &
-
-# Block waiting for the tablet to be listening
-# Not the same as healthy
-
-for i in $(seq 0 300); do
- curl -I "http://$hostname:$port/debug/status" >/dev/null 2>&1 && break
- sleep 0.1
-done
-
-# check one last time
-curl -I "http://$hostname:$port/debug/status" || fail "tablet could not be started!"
diff --git a/examples/region_sharding/scripts/zk-down.sh b/examples/region_sharding/scripts/zk-down.sh
deleted file mode 100755
index 18dd7933bc9..00000000000
--- a/examples/region_sharding/scripts/zk-down.sh
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/bin/bash
-
-# Copyright 2019 The Vitess Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# This is an example script that stops the ZooKeeper servers started by zk-up.sh.
-
-source ./env.sh
-
-# Stop ZooKeeper servers.
-echo "Stopping zk servers..."
-for zkid in $zkids; do
- zkctl -zk.myid $zkid -zk.cfg $zkcfg -log_dir $VTDATAROOT/tmp shutdown
-done
-
diff --git a/examples/region_sharding/scripts/zk-up.sh b/examples/region_sharding/scripts/zk-up.sh
deleted file mode 100755
index 0667d806838..00000000000
--- a/examples/region_sharding/scripts/zk-up.sh
+++ /dev/null
@@ -1,62 +0,0 @@
-#!/bin/bash
-
-# Copyright 2019 The Vitess Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# This is an example script that creates a quorum of ZooKeeper servers.
-
-source ./env.sh
-
-cell=${CELL:-'test'}
-
-# Start ZooKeeper servers.
-# The "zkctl init" command won't return until the server is able to contact its
-# peers, so we need to start them all in the background and then wait for them.
-echo "Starting zk servers..."
-for zkid in $zkids; do
- action='init'
- printf -v zkdir 'zk_%03d' $zkid
- if [ -f $VTDATAROOT/$zkdir/myid ]; then
- echo "Resuming from existing ZK data dir:"
- echo " $VTDATAROOT/$zkdir"
- action='start'
- fi
- zkctl -zk.myid $zkid -zk.cfg $zkcfg -log_dir $VTDATAROOT/tmp $action \
- > $VTDATAROOT/tmp/zkctl_$zkid.out 2>&1 &
- pids[$zkid]=$!
-done
-
-# Wait for all the zkctl commands to return.
-echo "Waiting for zk servers to be ready..."
-
-for zkid in $zkids; do
- if ! wait ${pids[$zkid]}; then
- echo "ZK server number $zkid failed to start. See log:"
- echo " $VTDATAROOT/tmp/zkctl_$zkid.out"
- fi
-done
-
-echo "Started zk servers."
-
-# Add the CellInfo description for the $CELL cell.
-# If the node already exists, it's fine, means we used existing data.
-set +e
-# shellcheck disable=SC2086
-vtctl $TOPOLOGY_FLAGS AddCellInfo -- \
- --root /vitess/$cell \
- --server_address $ZK_SERVER \
- $cell
-set -e
-
-echo "Configured zk servers."
diff --git a/examples/region_sharding/topo-etcd2.sh b/examples/region_sharding/topo-etcd2.sh
deleted file mode 100644
index c61543a806c..00000000000
--- a/examples/region_sharding/topo-etcd2.sh
+++ /dev/null
@@ -1,21 +0,0 @@
-#!/bin/bash
-
-# Copyright 2019 The Vitess Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# This is an example script that creates a single shard vttablet deployment.
-
-export TOPO='etcd2'
-
-
diff --git a/examples/region_sharding/topo-k8s.sh b/examples/region_sharding/topo-k8s.sh
deleted file mode 100644
index 92e14ba4d69..00000000000
--- a/examples/region_sharding/topo-k8s.sh
+++ /dev/null
@@ -1,21 +0,0 @@
-#!/bin/bash
-
-# Copyright 2019 The Vitess Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# This is an example script that creates a single shard vttablet deployment.
-
-export TOPO='k8s'
-
-
diff --git a/examples/region_sharding/topo-zk2.sh b/examples/region_sharding/topo-zk2.sh
deleted file mode 100644
index 29380949d8f..00000000000
--- a/examples/region_sharding/topo-zk2.sh
+++ /dev/null
@@ -1,21 +0,0 @@
-#!/bin/bash
-
-# Copyright 2019 The Vitess Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# This is an example script that creates a single shard vttablet deployment.
-
-export TOPO='zk2'
-
-
diff --git a/examples/region_sharding/vtadmin/discovery.json b/examples/region_sharding/vtadmin/discovery.json
deleted file mode 100644
index def7dd50f85..00000000000
--- a/examples/region_sharding/vtadmin/discovery.json
+++ /dev/null
@@ -1,17 +0,0 @@
-{
- "vtctlds": [
- {
- "host": {
- "fqdn": "localhost:15000",
- "hostname": "localhost:15999"
- }
- }
- ],
- "vtgates": [
- {
- "host": {
- "hostname": "localhost:15991"
- }
- }
- ]
-}
diff --git a/examples/region_sharding/vtadmin/rbac.yaml b/examples/region_sharding/vtadmin/rbac.yaml
deleted file mode 100644
index 1b46933ba39..00000000000
--- a/examples/region_sharding/vtadmin/rbac.yaml
+++ /dev/null
@@ -1,17 +0,0 @@
-rules:
- - resource: "*"
- actions:
- - "get"
- - "create"
- - "delete"
- - "put"
- - "ping"
- subjects: ["*"]
- clusters: ["*"]
- - resource: "Shard"
- actions:
- - "emergency_reparent_shard"
- - "planned_reparent_shard"
- subjects: ["*"]
- clusters:
- - "local"
diff --git a/examples/region_sharding/vtorc/config.json b/examples/region_sharding/vtorc/config.json
deleted file mode 100644
index e1f27ee5762..00000000000
--- a/examples/region_sharding/vtorc/config.json
+++ /dev/null
@@ -1,8 +0,0 @@
-{
- "MySQLTopologyUser": "orc_client_user",
- "MySQLTopologyPassword": "orc_client_user_password",
- "MySQLReplicaUser": "vt_repl",
- "MySQLReplicaPassword": "",
- "RecoveryPeriodBlockSeconds": 1,
- "InstancePollSeconds": 1
-}
\ No newline at end of file
diff --git a/examples/vtexplain/atomicity_method1.sh b/examples/vtexplain/atomicity_method1.sh
new file mode 100755
index 00000000000..f4e2a300f0e
--- /dev/null
+++ b/examples/vtexplain/atomicity_method1.sh
@@ -0,0 +1,3 @@
+#!/bin/bash
+set -x
+vtexplain --vschema-file atomicity_vschema.json --schema-file atomicity_schema.sql --shards 4 --sql 'INSERT /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ INTO t1 (c1) values (1),(2),(3),(4),(5),(6),(7),(8),(9),(10),(11),(12),(13),(14),(15),(16),(17),(18),(19),(20);'
diff --git a/examples/vtexplain/atomicity_method2.sh b/examples/vtexplain/atomicity_method2.sh
new file mode 100755
index 00000000000..061479ecae9
--- /dev/null
+++ b/examples/vtexplain/atomicity_method2.sh
@@ -0,0 +1,3 @@
+#!/bin/bash
+set -x
+vtexplain --vschema-file atomicity_vschema.json --schema-file atomicity_schema.sql --shards 4 --sql 'SET transaction_mode="single"; INSERT INTO t1 (c1) values (1),(2),(3),(4),(5),(6),(7),(8),(9),(10),(11),(12),(13),(14),(15),(16),(17),(18),(19),(20);'
diff --git a/examples/vtexplain/atomicity_method2_reads.sh b/examples/vtexplain/atomicity_method2_reads.sh
new file mode 100755
index 00000000000..e1b383d1ef5
--- /dev/null
+++ b/examples/vtexplain/atomicity_method2_reads.sh
@@ -0,0 +1,3 @@
+#!/bin/bash
+set -x
+vtexplain --vschema-file atomicity_vschema.json --schema-file atomicity_schema.sql --shards 4 --sql 'SET transaction_mode="single"; BEGIN; SELECT * from t1; COMMIT;'
diff --git a/examples/vtexplain/atomicity_method2_working.sh b/examples/vtexplain/atomicity_method2_working.sh
new file mode 100755
index 00000000000..0ba844fe08f
--- /dev/null
+++ b/examples/vtexplain/atomicity_method2_working.sh
@@ -0,0 +1,3 @@
+#!/bin/bash
+set -x
+vtexplain --vschema-file atomicity_vschema.json --schema-file atomicity_schema.sql --shards 4 --sql 'SET transaction_mode="single"; INSERT INTO t1 (c1) values (10),(14),(15),(16);'
diff --git a/examples/vtexplain/atomicity_method3.sh b/examples/vtexplain/atomicity_method3.sh
new file mode 100755
index 00000000000..e3c013b5a8d
--- /dev/null
+++ b/examples/vtexplain/atomicity_method3.sh
@@ -0,0 +1,4 @@
+#!/bin/bash
+set -x
+# SET transaction_mode="multi" is implied by default
+vtexplain --vschema-file atomicity_vschema.json --schema-file atomicity_schema.sql --shards 4 --sql 'INSERT INTO t1 (c1) values (1),(2),(3),(4),(5),(6),(7),(8),(9),(10),(11),(12),(13),(14),(15),(16),(17),(18),(19),(20);'
diff --git a/examples/local/vtexplain/atomicity_schema.sql b/examples/vtexplain/atomicity_schema.sql
similarity index 100%
rename from examples/local/vtexplain/atomicity_schema.sql
rename to examples/vtexplain/atomicity_schema.sql
diff --git a/examples/local/vtexplain/atomicity_vschema.json b/examples/vtexplain/atomicity_vschema.json
similarity index 100%
rename from examples/local/vtexplain/atomicity_vschema.json
rename to examples/vtexplain/atomicity_vschema.json
diff --git a/go.mod b/go.mod
index 3651a42d026..265d6d0b72a 100644
--- a/go.mod
+++ b/go.mod
@@ -1,190 +1,221 @@
module vitess.io/vitess
-go 1.18
+go 1.20
require (
- cloud.google.com/go/storage v1.10.0
- github.com/AdaLogics/go-fuzz-headers v0.0.0-20211102141018-f7be0cbad29c
- github.com/Azure/azure-pipeline-go v0.2.2
- github.com/Azure/azure-storage-blob-go v0.10.0
- github.com/DataDog/datadog-go v2.2.0+incompatible
+ cloud.google.com/go/storage v1.29.0
+ github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1
+ github.com/Azure/azure-pipeline-go v0.2.3
+ github.com/Azure/azure-storage-blob-go v0.15.0
+ github.com/DataDog/datadog-go v4.8.3+incompatible
github.com/HdrHistogram/hdrhistogram-go v0.9.0 // indirect
github.com/PuerkitoBio/goquery v1.5.1
github.com/aquarapid/vaultlib v0.5.1
- github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878 // indirect
- github.com/aws/aws-sdk-go v1.34.2
- github.com/buger/jsonparser v0.0.0-20200322175846-f7e751efca13
- github.com/cespare/xxhash/v2 v2.1.1
+ github.com/armon/go-metrics v0.4.1 // indirect
+ github.com/aws/aws-sdk-go v1.44.192
+ github.com/buger/jsonparser v1.1.1
+ github.com/cespare/xxhash/v2 v2.2.0
github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0 // indirect
github.com/corpix/uarand v0.1.1 // indirect
- github.com/dave/jennifer v1.4.1
- github.com/evanphx/json-patch v4.9.0+incompatible
- github.com/fsnotify/fsnotify v1.4.9
+ github.com/dave/jennifer v1.6.0
+ github.com/evanphx/json-patch v5.6.0+incompatible
+ github.com/fsnotify/fsnotify v1.6.0
github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab
- github.com/go-sql-driver/mysql v1.6.0
- github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b
- github.com/golang/mock v1.5.0
+ github.com/go-sql-driver/mysql v1.7.0
+ github.com/golang/glog v1.0.0
+ github.com/golang/mock v1.6.0
github.com/golang/protobuf v1.5.2
- github.com/golang/snappy v0.0.3
- github.com/google/go-cmp v0.5.8
+ github.com/golang/snappy v0.0.4
+ github.com/google/go-cmp v0.5.9
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510
github.com/google/uuid v1.3.0
- github.com/googleapis/gnostic v0.4.1 // indirect
github.com/gorilla/handlers v1.5.1
github.com/gorilla/mux v1.8.0
- github.com/gorilla/websocket v1.4.2
- github.com/grpc-ecosystem/go-grpc-middleware v1.1.0
+ github.com/grpc-ecosystem/go-grpc-middleware v1.3.0
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
- github.com/hashicorp/consul/api v1.10.1
- github.com/hashicorp/go-immutable-radix v1.1.0 // indirect
- github.com/hashicorp/go-msgpack v0.5.5 // indirect
- github.com/hashicorp/go-sockaddr v1.0.2 // indirect
- github.com/hashicorp/go-uuid v1.0.2 // indirect
- github.com/hashicorp/serf v0.9.7 // indirect
- github.com/howeyc/gopass v0.0.0-20190910152052-7cb4b85ec19c
+ github.com/hashicorp/consul/api v1.18.0
+ github.com/hashicorp/go-immutable-radix v1.3.1 // indirect
+ github.com/hashicorp/serf v0.10.1 // indirect
+ github.com/howeyc/gopass v0.0.0-20210920133722-c8aef6fb66ef
github.com/icrowley/fake v0.0.0-20180203215853-4178557ae428
- github.com/imdario/mergo v0.3.12 // indirect
+ github.com/imdario/mergo v0.3.13 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
- github.com/klauspost/compress v1.13.0
- github.com/klauspost/pgzip v1.2.4
+ github.com/klauspost/compress v1.15.15
+ github.com/klauspost/pgzip v1.2.5
github.com/krishicks/yaml-patch v0.0.10
- github.com/magiconair/properties v1.8.5
- github.com/mattn/go-sqlite3 v1.14.14
+ github.com/magiconair/properties v1.8.7
+ github.com/mattn/go-sqlite3 v1.14.16 // indirect
github.com/minio/minio-go v0.0.0-20190131015406-c8a261de75c1
- github.com/mitchellh/go-testing-interface v1.14.0 // indirect
- github.com/montanaflynn/stats v0.6.3
- github.com/olekukonko/tablewriter v0.0.5-0.20200416053754-163badb3bac6
- github.com/opentracing-contrib/go-grpc v0.0.0-20180928155321-4b5a12d3ff02
- github.com/opentracing/opentracing-go v1.1.0
+ github.com/montanaflynn/stats v0.7.0
+ github.com/olekukonko/tablewriter v0.0.5
+ github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e
+ github.com/opentracing/opentracing-go v1.2.0
github.com/patrickmn/go-cache v2.1.0+incompatible
- github.com/philhofer/fwd v1.0.0 // indirect
+ github.com/philhofer/fwd v1.1.2 // indirect
github.com/pierrec/lz4 v2.6.1+incompatible
- github.com/pires/go-proxyproto v0.6.1
+ github.com/pires/go-proxyproto v0.6.2
github.com/pkg/errors v0.9.1
github.com/planetscale/pargzip v0.0.0-20201116224723-90c7fc03ea8a
- github.com/planetscale/vtprotobuf v0.3.0
- github.com/prometheus/client_golang v1.11.0
- github.com/prometheus/common v0.29.0 // indirect
- github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0
- github.com/sjmudd/stopwatch v0.0.0-20170613150411-f380bf8a9be1
- github.com/soheilhy/cmux v0.1.4
- github.com/spf13/cobra v1.4.0
+ github.com/planetscale/vtprotobuf v0.4.0
+ github.com/prometheus/client_golang v1.14.0
+ github.com/prometheus/common v0.39.0 // indirect
+ github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475
+ github.com/sjmudd/stopwatch v0.1.1
+ github.com/soheilhy/cmux v0.1.5
+ github.com/spf13/cobra v1.6.1
github.com/spf13/pflag v1.0.5
- github.com/spf13/viper v1.8.1
- github.com/spyzhov/ajson v0.4.2
- github.com/stretchr/testify v1.7.1
- github.com/tchap/go-patricia v2.2.6+incompatible
- github.com/tebeka/selenium v0.9.9
+ github.com/spf13/viper v1.15.0
+ github.com/stretchr/testify v1.8.1
+ github.com/tchap/go-patricia v2.3.0+incompatible
github.com/tidwall/gjson v1.12.1
- github.com/tinylib/msgp v1.1.1 // indirect
- github.com/uber-go/atomic v1.4.0 // indirect
- github.com/uber/jaeger-client-go v2.16.0+incompatible
+ github.com/tinylib/msgp v1.1.8 // indirect
+ github.com/uber/jaeger-client-go v2.30.0+incompatible
github.com/uber/jaeger-lib v2.4.1+incompatible // indirect
github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82
- github.com/z-division/go-zookeeper v0.0.0-20190128072838-6d7457066b9b
- go.etcd.io/etcd/api/v3 v3.5.0
- go.etcd.io/etcd/client/pkg/v3 v3.5.0
- go.etcd.io/etcd/client/v3 v3.5.0
- golang.org/x/crypto v0.0.0-20220507011949-2cf3adece122 // indirect
+ github.com/z-division/go-zookeeper v1.0.0
+ go.etcd.io/etcd/api/v3 v3.5.7
+ go.etcd.io/etcd/client/pkg/v3 v3.5.7
+ go.etcd.io/etcd/client/v3 v3.5.7
+ golang.org/x/crypto v0.5.0 // indirect
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616
- golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 // indirect
- golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4
- golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f
- golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e // indirect
- golang.org/x/term v0.0.0-20210927222741-03fcf44c2211
- golang.org/x/text v0.3.7
- golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac
- golang.org/x/tools v0.1.10
- google.golang.org/api v0.45.0
- google.golang.org/genproto v0.0.0-20210701191553-46259e63a0a9 // indirect
- google.golang.org/grpc v1.45.0
- google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0
+ golang.org/x/mod v0.7.0 // indirect
+ golang.org/x/net v0.7.0
+ golang.org/x/oauth2 v0.4.0
+ golang.org/x/sys v0.5.0 // indirect
+ golang.org/x/term v0.5.0
+ golang.org/x/text v0.7.0
+ golang.org/x/time v0.3.0
+ golang.org/x/tools v0.5.0
+ google.golang.org/api v0.109.0
+ google.golang.org/genproto v0.0.0-20230131230820-1c016267d619 // indirect
+ google.golang.org/grpc v1.52.3
+ google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.2.0
google.golang.org/grpc/examples v0.0.0-20210430044426-28078834f35b
- google.golang.org/protobuf v1.28.0
- gopkg.in/DataDog/dd-trace-go.v1 v1.17.0
+ google.golang.org/protobuf v1.28.1
+ gopkg.in/DataDog/dd-trace-go.v1 v1.47.0
gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d // indirect
gopkg.in/gcfg.v1 v1.2.3
- gopkg.in/ldap.v2 v2.5.0
+ gopkg.in/ldap.v2 v2.5.1
gopkg.in/warnings.v0 v0.1.2 // indirect
gotest.tools v2.2.0+incompatible
- honnef.co/go/tools v0.0.1-2020.1.4
+ honnef.co/go/tools v0.3.3
k8s.io/apiextensions-apiserver v0.18.19
- k8s.io/apimachinery v0.20.6
- k8s.io/client-go v0.20.6
- k8s.io/code-generator v0.18.19
- sigs.k8s.io/yaml v1.2.0
+ k8s.io/apimachinery v0.26.1
+ k8s.io/client-go v0.26.1
+ k8s.io/code-generator v0.26.1
+ sigs.k8s.io/yaml v1.3.0
)
require (
github.com/bndr/gotabulate v1.1.2
- github.com/openark/golib v0.0.0-20210531070646-355f37940af8
+ github.com/hashicorp/go-version v1.6.0
+ github.com/kr/pretty v0.3.1
+ github.com/kr/text v0.2.0
+ github.com/nsf/jsondiff v0.0.0-20210926074059-1e845ec5d249
+ github.com/spyzhov/ajson v0.8.0
+ golang.org/x/exp v0.0.0-20230131160201-f062dba9d201
+ modernc.org/sqlite v1.20.3
)
require (
- cloud.google.com/go v0.81.0 // indirect
- github.com/BurntSushi/toml v0.3.1 // indirect
+ cloud.google.com/go v0.109.0 // indirect
+ cloud.google.com/go/compute v1.18.0 // indirect
+ cloud.google.com/go/compute/metadata v0.2.3 // indirect
+ cloud.google.com/go/iam v0.10.0 // indirect
+ github.com/BurntSushi/toml v1.2.1 // indirect
+ github.com/DataDog/datadog-agent/pkg/obfuscate v0.42.0 // indirect
+ github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.42.0 // indirect
+ github.com/DataDog/datadog-go/v5 v5.2.0 // indirect
+ github.com/DataDog/go-tuf v0.3.0--fix-localmeta-fork // indirect
+ github.com/DataDog/sketches-go v1.4.1 // indirect
+ github.com/Microsoft/go-winio v0.6.0 // indirect
github.com/andybalholm/cascadia v1.1.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect
- github.com/blang/semver v3.5.1+incompatible // indirect
- github.com/coreos/go-semver v0.3.0 // indirect
- github.com/coreos/go-systemd/v22 v22.3.2 // indirect
- github.com/cpuguy83/go-md2man/v2 v2.0.1 // indirect
+ github.com/coreos/go-semver v0.3.1 // indirect
+ github.com/coreos/go-systemd/v22 v22.5.0 // indirect
+ github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
github.com/cyphar/filepath-securejoin v0.2.3 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
- github.com/fatih/color v1.9.0 // indirect
- github.com/felixge/httpsnoop v1.0.1 // indirect
- github.com/frankban/quicktest v1.14.3 // indirect
- github.com/go-logr/logr v0.2.0 // indirect
+ github.com/dgraph-io/ristretto v0.1.1 // indirect
+ github.com/dustin/go-humanize v1.0.1 // indirect
+ github.com/emicklei/go-restful/v3 v3.10.1 // indirect
+ github.com/fatih/color v1.14.1 // indirect
+ github.com/felixge/httpsnoop v1.0.3 // indirect
+ github.com/go-logr/logr v1.2.3 // indirect
+ github.com/go-openapi/jsonpointer v0.19.6 // indirect
+ github.com/go-openapi/jsonreference v0.20.2 // indirect
+ github.com/go-openapi/swag v0.22.3 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
- github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect
- github.com/google/gofuzz v1.1.0 // indirect
- github.com/googleapis/gax-go/v2 v2.0.5 // indirect
- github.com/hashicorp/go-cleanhttp v0.5.1 // indirect
- github.com/hashicorp/go-hclog v0.12.0 // indirect
+ github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
+ github.com/google/gnostic v0.6.9 // indirect
+ github.com/google/gofuzz v1.2.0 // indirect
+ github.com/googleapis/enterprise-certificate-proxy v0.2.1 // indirect
+ github.com/googleapis/gax-go/v2 v2.7.0 // indirect
+ github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
+ github.com/hashicorp/go-hclog v1.4.0 // indirect
github.com/hashicorp/go-rootcerts v1.0.2 // indirect
- github.com/hashicorp/golang-lru v0.5.1 // indirect
+ github.com/hashicorp/golang-lru v0.5.4 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
- github.com/inconshreveable/mousetrap v1.0.0 // indirect
- github.com/json-iterator/go v1.1.11 // indirect
- github.com/jstemmer/go-junit-report v0.9.1 // indirect
- github.com/mattn/go-colorable v0.1.6 // indirect
- github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d // indirect
- github.com/mattn/go-isatty v0.0.12 // indirect
- github.com/mattn/go-runewidth v0.0.7 // indirect
- github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
+ github.com/inconshreveable/mousetrap v1.1.0 // indirect
+ github.com/josharian/intern v1.0.0 // indirect
+ github.com/json-iterator/go v1.1.12 // indirect
+ github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
+ github.com/mailru/easyjson v0.7.7 // indirect
+ github.com/mattn/go-colorable v0.1.13 // indirect
+ github.com/mattn/go-ieproxy v0.0.9 // indirect
+ github.com/mattn/go-isatty v0.0.17 // indirect
+ github.com/mattn/go-runewidth v0.0.14 // indirect
+ github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
- github.com/mitchellh/mapstructure v1.4.1 // indirect
+ github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
- github.com/modern-go/reflect2 v1.0.1 // indirect
- github.com/onsi/ginkgo v1.12.1 // indirect
- github.com/onsi/gomega v1.10.3 // indirect
- github.com/pelletier/go-toml v1.9.3 // indirect
+ github.com/modern-go/reflect2 v1.0.2 // indirect
+ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
+ github.com/pelletier/go-toml/v2 v2.0.6 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
- github.com/prometheus/client_model v0.2.0 // indirect
- github.com/prometheus/procfs v0.6.0 // indirect
+ github.com/prometheus/client_model v0.3.0 // indirect
+ github.com/prometheus/procfs v0.9.0 // indirect
+ github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
+ github.com/rivo/uniseg v0.4.3 // indirect
+ github.com/rogpeppe/go-internal v1.9.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
- github.com/satori/go.uuid v1.2.0 // indirect
- github.com/spf13/afero v1.6.0 // indirect
- github.com/spf13/cast v1.3.1 // indirect
+ github.com/secure-systems-lab/go-securesystemslib v0.4.0 // indirect
+ github.com/spf13/afero v1.9.3 // indirect
+ github.com/spf13/cast v1.5.0 // indirect
github.com/spf13/jwalterweatherman v1.1.0 // indirect
- github.com/subosito/gotenv v1.2.0 // indirect
+ github.com/subosito/gotenv v1.4.2 // indirect
github.com/tidwall/match v1.1.1 // indirect
github.com/tidwall/pretty v1.2.0 // indirect
- go.opencensus.io v0.23.0 // indirect
- go.uber.org/atomic v1.7.0 // indirect
- go.uber.org/multierr v1.6.0 // indirect
- go.uber.org/zap v1.17.0 // indirect
- golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
+ go.opencensus.io v0.24.0 // indirect
+ go.uber.org/atomic v1.10.0 // indirect
+ go.uber.org/multierr v1.9.0 // indirect
+ go.uber.org/zap v1.23.0 // indirect
+ go4.org/intern v0.0.0-20220617035311-6925f38cc365 // indirect
+ go4.org/unsafe/assume-no-moving-gc v0.0.0-20220617031537-928513b29760 // indirect
+ golang.org/x/exp/typeparams v0.0.0-20230131160201-f062dba9d201 // indirect
+ golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
google.golang.org/appengine v1.6.7 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
- gopkg.in/ini.v1 v1.62.0 // indirect
+ gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
- gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
- k8s.io/api v0.20.6 // indirect
- k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac // indirect
- k8s.io/klog v1.0.0 // indirect
- k8s.io/klog/v2 v2.4.0 // indirect
- k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd // indirect
- k8s.io/utils v0.0.0-20201110183641-67b214c5f920 // indirect
- sigs.k8s.io/structured-merge-diff/v4 v4.0.3 // indirect
+ gopkg.in/yaml.v3 v3.0.1 // indirect
+ inet.af/netaddr v0.0.0-20220811202034-502d2d690317 // indirect
+ k8s.io/api v0.26.1 // indirect
+ k8s.io/gengo v0.0.0-20221011193443-fad74ee6edd9 // indirect
+ k8s.io/klog/v2 v2.90.0 // indirect
+ k8s.io/kube-openapi v0.0.0-20230202010329-39b3636cbaa3 // indirect
+ k8s.io/utils v0.0.0-20230115233650-391b47cb4029 // indirect
+ lukechampine.com/uint128 v1.2.0 // indirect
+ modernc.org/cc/v3 v3.40.0 // indirect
+ modernc.org/ccgo/v3 v3.16.13 // indirect
+ modernc.org/libc v1.22.2 // indirect
+ modernc.org/mathutil v1.5.0 // indirect
+ modernc.org/memory v1.5.0 // indirect
+ modernc.org/opt v0.1.3 // indirect
+ modernc.org/strutil v1.1.3 // indirect
+ modernc.org/token v1.1.0 // indirect
+ sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
+ sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
)
+
+replace github.com/spyzhov/ajson v0.8.0 => github.com/rohit-nayak-ps/ajson v0.7.2-0.20230316112806-97deb03d883c
diff --git a/go.sum b/go.sum
index 19fe3502a7c..46ae97a30fb 100644
--- a/go.sum
+++ b/go.sum
@@ -1,9 +1,9 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
-cloud.google.com/go v0.41.0/go.mod h1:OauMR7DV8fzvZIl2qg6rkaIhD/vmgk4iwEw/h6ercmg=
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
+cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
@@ -16,19 +16,24 @@ cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOY
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
-cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg=
-cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=
-cloud.google.com/go v0.81.0 h1:at8Tk2zUz63cLPR0JPWm5vp77pEZmzxEQBEfRKn1VV8=
-cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=
+cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY=
+cloud.google.com/go v0.109.0 h1:38CZoKGlCnPZjGdyj0ZfpoGae0/wgNfy5F0byyxg0Gk=
+cloud.google.com/go v0.109.0/go.mod h1:2sYycXt75t/CSB5R9M2wPU1tJmire7AQZTPtITcGBVE=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
+cloud.google.com/go/compute v1.18.0 h1:FEigFqoDbys2cvFkZ9Fjq4gnHBP55anJ0yQyau2f9oY=
+cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOVXvgU0yacs=
+cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
+cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
-cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
+cloud.google.com/go/iam v0.10.0 h1:fpP/gByFs6US1ma53v7VxhvbJpO2Aapng6wabJ99MuI=
+cloud.google.com/go/iam v0.10.0/go.mod h1:nXAECrMt2qHpF6RZUZseteD6QyanL68reN4OXPw0UWM=
+cloud.google.com/go/longrunning v0.3.0 h1:NjljC+FYPV3uh5/OwWT6pVU+doBqMg2x/rZlE+CamDs=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
@@ -37,54 +42,66 @@ cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiy
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
-cloud.google.com/go/storage v1.10.0 h1:STgFzyU5/8miMl0//zKh2aQeTyeaUH3WN9bSUiJ09bA=
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
+cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo=
+cloud.google.com/go/storage v1.29.0 h1:6weCgzRvMg7lzuUurI4697AqIRPU1SvzHhynwpW31jI=
+cloud.google.com/go/storage v1.29.0/go.mod h1:4puEjyTKnku6gfKoTfNOU/W+a9JyuVNxjpS5GBrB8h4=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
-github.com/AdaLogics/go-fuzz-headers v0.0.0-20211102141018-f7be0cbad29c h1:9K6I0yCgGSneuHCoIlJl0O09UjqqWduCwd+ZL1nHFWc=
-github.com/AdaLogics/go-fuzz-headers v0.0.0-20211102141018-f7be0cbad29c/go.mod h1:WpB7kf89yJUETZxQnP1kgYPNwlT2jjdDYUCoxVggM3g=
-github.com/Azure/azure-pipeline-go v0.2.2 h1:6oiIS9yaG6XCCzhgAgKFfIWyo4LLCiDhZot6ltoThhY=
-github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc=
-github.com/Azure/azure-storage-blob-go v0.10.0 h1:evCwGreYo3XLeBV4vSxLbLiYb6e0SzsJiXQVRGsRXxs=
-github.com/Azure/azure-storage-blob-go v0.10.0/go.mod h1:ep1edmW+kNQx4UfWM9heESNmQdijykocJ0YOxmMX8SE=
+github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1 h1:EKPd1INOIyr5hWOWhvpmQpY6tKjeG0hT1s3AMC/9fic=
+github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1/go.mod h1:VzwV+t+dZ9j/H867F1M2ziD+yLHtB46oM35FxxMJ4d0=
+github.com/Azure/azure-pipeline-go v0.2.3 h1:7U9HBg1JFK3jHl5qmo4CTZKFTVgMwdFHMVtCdfBE21U=
+github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k=
+github.com/Azure/azure-storage-blob-go v0.15.0 h1:rXtgp8tN1p29GvpGgfJetavIG0V7OgcSXPpwp3tx6qk=
+github.com/Azure/azure-storage-blob-go v0.15.0/go.mod h1:vbjsVbX0dlxnRc4FFMPsS9BsJWPcne7GB7onqlPvz58=
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
+github.com/Azure/go-autorest/autorest v0.9.0 h1:MRvx8gncNaXJqOoLmhNjUAKh33JJF8LyxPhomEtOsjs=
github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
-github.com/Azure/go-autorest/autorest v0.11.1 h1:eVvIXUKiTgv++6YnWb42DUA1YL7qDugnKP0HljexdnQ=
-github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw=
github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
-github.com/Azure/go-autorest/autorest/adal v0.8.3/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q=
-github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg=
-github.com/Azure/go-autorest/autorest/adal v0.9.5 h1:Y3bBUV4rTuxenJJs41HU3qmqsb+auo+a3Lz+PlJPpL0=
-github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A=
+github.com/Azure/go-autorest/autorest/adal v0.9.13 h1:Mp5hbtOePIzM8pJVRa3YLrWWmZtoxRXqUEzCfJt3+/Q=
+github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M=
github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
-github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g=
github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw=
github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
-github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM=
-github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
-github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
+github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg=
+github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo=
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
-github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
-github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802 h1:1BDTz0u9nC3//pOCMdNH+CiXJVYJh5UQNCOBG7jbELc=
+github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak=
+github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
-github.com/BurntSushi/xgbutil v0.0.0-20160919175755-f7c97cef3b4e h1:4ZrkT/RzpnROylmoQL57iVUL57wGKTR5O6KpVnbm2tA=
-github.com/BurntSushi/xgbutil v0.0.0-20160919175755-f7c97cef3b4e/go.mod h1:uw9h2sd4WWHOPdJ13MQpwK5qYWKYDumDqxWWIknEQ+k=
-github.com/DataDog/datadog-go v2.2.0+incompatible h1:V5BKkxACZLjzHjSgBbr2gvLA2Ae49yhc6CSY7MLy5k4=
-github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
+github.com/DataDog/datadog-agent/pkg/obfuscate v0.42.0 h1:p9uCmbyi4gEbJAOLoT/GjIAQMGe3velLmiC3mMgSIy4=
+github.com/DataDog/datadog-agent/pkg/obfuscate v0.42.0/go.mod h1:7Bsrm5U8/B+B8dffT3t733tDvdCr7upqIPSVuDqJ0Mw=
+github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.42.0 h1:b/RFr5T6HcEOKoXfKFOqZf33hsUbvskY1F5LDld7HCI=
+github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.42.0/go.mod h1:VVMDDibJxYEkwcLdZBT2g8EHKpbMT4JdOhRbQ9GdjbM=
+github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
+github.com/DataDog/datadog-go v4.8.3+incompatible h1:fNGaYSuObuQb5nzeTQqowRAd9bpDIRRV4/gUtIBjh8Q=
+github.com/DataDog/datadog-go v4.8.3+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
+github.com/DataDog/datadog-go/v5 v5.1.1/go.mod h1:KhiYb2Badlv9/rofz+OznKoEF5XKTonWyhx5K83AP8E=
+github.com/DataDog/datadog-go/v5 v5.2.0 h1:kSptqUGSNK67DgA+By3rwtFnAh6pTBxJ7Hn8JCLZcKY=
+github.com/DataDog/datadog-go/v5 v5.2.0/go.mod h1:XRDJk1pTc00gm+ZDiBKsjh7oOOtJfYfglVCmFb8C2+Q=
+github.com/DataDog/go-tuf v0.3.0--fix-localmeta-fork h1:yBq5PrAtrM4yVeSzQ+bn050+Ysp++RKF1QmtkL4VqvU=
+github.com/DataDog/go-tuf v0.3.0--fix-localmeta-fork/go.mod h1:yA5JwkZsHTLuqq3zaRgUQf35DfDkpOZqgtBqHKpwrBs=
+github.com/DataDog/sketches-go v1.4.1 h1:j5G6as+9FASM2qC36lvpvQAj9qsv/jUs3FtO8CwZNAY=
+github.com/DataDog/sketches-go v1.4.1/go.mod h1:xJIXldczJyyjnbDop7ZZcLxJdV3+7Kra7H1KMgpgkLk=
github.com/HdrHistogram/hdrhistogram-go v0.9.0 h1:dpujRju0R4M/QZzcnR1LH1qm+TVG3UzkWdp5tH1WMcg=
github.com/HdrHistogram/hdrhistogram-go v0.9.0/go.mod h1:nxrse8/Tzg2tg3DZcZjm6qEclQKK70g0KxO61gFFZD4=
github.com/Masterminds/glide v0.13.2/go.mod h1:STyF5vcenH/rUqTEv+/hBXlSTo7KYwg2oc2f4tzPWic=
github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
github.com/Masterminds/vcs v1.13.0/go.mod h1:N09YCmOQr6RLxC6UNHzuVwAdodYbbnycGHSmwVJjcKA=
+github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
+github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
+github.com/Microsoft/go-winio v0.6.0 h1:slsWYD/zyx7lCXoZVlvQrj0hPTM1HI4+v1sIda2yDvg=
+github.com/Microsoft/go-winio v0.6.0/go.mod h1:cTAf44im0RAYeL23bpB+fzCyDH2MJiz2BO69KH/soAE=
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
+github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/PuerkitoBio/goquery v1.5.1 h1:PSPBGne8NIUWw+/7vFBV+kG2J/5MOjbzc7154OaKCSE=
github.com/PuerkitoBio/goquery v1.5.1/go.mod h1:GsLWisAFVj4WgDibEWF4pvYnkVQBpKBKeU+7zCJoLcc=
github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
@@ -97,7 +114,6 @@ github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuy
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
-github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8=
github.com/andybalholm/cascadia v1.1.0 h1:BuuO6sSfQNFRu1LppgbD25Hr2vLYW25JvxHs5zzsLTo=
github.com/andybalholm/cascadia v1.1.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y=
@@ -107,32 +123,30 @@ github.com/aquarapid/vaultlib v0.5.1/go.mod h1:yT7AlEXtuabkxylOc/+Ulyp18tff1+Qjg
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
-github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878 h1:EFSB7Zo9Eg91v7MJPVsifUysc/wPdN+NOnVe6bWbdBM=
-github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878/go.mod h1:3AMJUQhVx52RsWOnlkpikZr01T/yAVN2gn0861vByNg=
+github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA=
+github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4=
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
-github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
-github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
-github.com/aws/aws-sdk-go v1.34.2 h1:9vCknCdTAmmV4ht7lPuda7aJXzllXwEQyCMZKJHjBrM=
-github.com/aws/aws-sdk-go v1.34.2/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0=
+github.com/aws/aws-sdk-go v1.44.192 h1:KL54vCxRd5v5XBGjnF3FelzXXwl+aWHDmDTihFmRNgM=
+github.com/aws/aws-sdk-go v1.44.192/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
+github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
-github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM=
github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
-github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ=
-github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
github.com/bndr/gotabulate v1.1.2 h1:yC9izuZEphojb9r+KYL4W9IJKO/ceIO8HDwxMA24U4c=
github.com/bndr/gotabulate v1.1.2/go.mod h1:0+8yUgaPTtLRTjf49E8oju7ojpU11YmXyvq1LbPAb3U=
-github.com/buger/jsonparser v0.0.0-20200322175846-f7e751efca13 h1:+qUNY4VRkEH46bLUwxCyUU+iOGJMQBVibAaYzWiwWcg=
-github.com/buger/jsonparser v0.0.0-20200322175846-f7e751efca13/go.mod h1:tgcrVJ81GPSF0mz+0nu1Xaz0fazGPrmmJfJtxjbHhUQ=
+github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs=
+github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
-github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
+github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
+github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
@@ -142,11 +156,9 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
-github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
-github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
-github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
-github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
+github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb/go.mod h1:ZjrT6AXHbDs86ZSdt/osfBi5qfexBrKUdONk989Wnk4=
github.com/codegangsta/cli v1.20.0/go.mod h1:/qJNoX69yVSKu5o4jLyXAENLRyk1uhi7zkbQ3slBdOA=
github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0 h1:sDMmm+q/3+BukdIpxwO365v/Rbspp2Nt5XntgQRXq8Q=
github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM=
@@ -154,29 +166,35 @@ github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
-github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM=
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
+github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4=
+github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec=
github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
-github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI=
-github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
+github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
+github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/corpix/uarand v0.1.1 h1:RMr1TWc9F4n5jiPDzFHtmaUXLKLNUFK0SgCLo4BhX/U=
github.com/corpix/uarand v0.1.1/go.mod h1:SFKZvkcRoLqVRFZ4u25xPmp6m9ktANfbpXZ7SJ0/FNU=
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
-github.com/cpuguy83/go-md2man/v2 v2.0.1 h1:r/myEWzV9lfsM1tFLgDyu0atFtJ1fXn261LKYj/3DxU=
-github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
+github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
+github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/cyphar/filepath-securejoin v0.2.3 h1:YX6ebbZCZP7VkM3scTTokDgBL2TY741X51MTk3ycuNI=
github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
-github.com/dave/jennifer v1.4.1 h1:XyqG6cn5RQsTj3qlWQTKlRGAyrTcsk1kUmWdZBzRjDw=
-github.com/dave/jennifer v1.4.1/go.mod h1:7jEdnm+qBcxl8PC0zyp7vxcpSRnzXSt9r39tpTVGlwA=
+github.com/dave/jennifer v1.6.0 h1:MQ/6emI2xM7wt0tJzJzyUik2Q3Tcn2eE0vtYgh4GPVI=
+github.com/dave/jennifer v1.6.0/go.mod h1:AxTG893FiZKqxy3FP1kL80VMshSMuz2G+EgvszgGRnk=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/dgraph-io/ristretto v0.1.0/go.mod h1:fux0lOrBhrVCJd3lcTHsIJhq1T2rokOu6v9Vcb3Q9ug=
+github.com/dgraph-io/ristretto v0.1.1 h1:6CWw5tJNgpegArSHpNHJKldNeq03FQCwYvfMVWajOK8=
+github.com/dgraph-io/ristretto v0.1.1/go.mod h1:S1GPSBCYCIhmVNfcth17y2zZtQT6wzkzgwUve0VDWWA=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
+github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA=
+github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
@@ -184,32 +202,41 @@ github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZ
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
-github.com/dvyukov/go-fuzz v0.0.0-20210914135545-4980593459a1/go.mod h1:11Gm+ccJnvAhCNLlf5+cS9KjtbaD5I5zaZpFMsTHWTw=
+github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
+github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
+github.com/dvyukov/go-fuzz v0.0.0-20210103155950-6a8e9d1f2415/go.mod h1:11Gm+ccJnvAhCNLlf5+cS9KjtbaD5I5zaZpFMsTHWTw=
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
+github.com/emicklei/go-restful/v3 v3.10.1 h1:rc42Y5YTp7Am7CS630D7JmhRjq4UlEUuEKfrDac4bSQ=
+github.com/emicklei/go-restful/v3 v3.10.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
-github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
-github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
-github.com/evanphx/json-patch v4.9.0+incompatible h1:kLcOMZeuLAJvL2BPWLMIj5oaZQobrkAqrL+WFZwQses=
github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
+github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U=
+github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
-github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s=
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
-github.com/felixge/httpsnoop v1.0.1 h1:lvB5Jl89CsZtGIWuTcDM1E/vkVs49/Ml7JJe07l8SPQ=
+github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
+github.com/fatih/color v1.14.1 h1:qfhVLaG5s+nCROl1zJsZRxFeYrHLqWroPOQ8BWiNb4w=
+github.com/fatih/color v1.14.1/go.mod h1:2oHN61fhTpgcxD3TSWCgKDiH1+x4OiDVVGH8WlgGZGg=
github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
+github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk=
+github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
+github.com/flowstack/go-jsonschema v0.1.1/go.mod h1:yL7fNggx1o8rm9RlgXv7hTBWxdBM0rVwpMwimd3F3N0=
+github.com/flynn/go-docopt v0.0.0-20140912013429-f6dd2ebbb31e/go.mod h1:HyVoz1Mz5Co8TFO8EupIdlcpwShBmY98dkT2xeHkvEI=
github.com/form3tech-oss/jwt-go v3.2.2+incompatible h1:TcekIExNqud5crz4xD2pavyTgWiPvpYe4Xau31I0PRk=
github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE=
-github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
-github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
+github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
+github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q=
@@ -219,13 +246,13 @@ github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
-github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
-github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
-github.com/go-logr/logr v0.2.0 h1:QvGt2nLcHH0WK9orKa+ppBPAxREcH364nPUedEpK0TY=
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
+github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
+github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab h1:xveKWz2iaueeTaUgdetzel+U7exyigDYBryyVfV/rZk=
github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8=
github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI=
@@ -241,11 +268,15 @@ github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwds
github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M=
github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
+github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE=
+github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg=
github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I=
github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I=
github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc=
github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
+github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
+github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU=
github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU=
github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU=
@@ -268,25 +299,29 @@ github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/
github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg=
github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
+github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g=
+github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4=
github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA=
github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4=
-github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
-github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE=
-github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
+github.com/go-sql-driver/mysql v1.7.0 h1:ueSltNNllEqE3qcWBTD0iQd3IpL/6U+mJxLkazJ7YPc=
+github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
+github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
-github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ=
+github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4=
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
+github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
@@ -294,8 +329,8 @@ github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt
github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
-github.com/golang/mock v1.5.0 h1:jlYHihg//f7RRwuPfptm04yp4s7O6Kw8EZiVYIGcH0g=
-github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8=
+github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc=
+github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
@@ -312,14 +347,15 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
-github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
-github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA=
-github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
+github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
-github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
+github.com/google/gnostic v0.6.9 h1:ZK/5VhkoX835RikCHpSUJV9a+S3e1zLh59YnyWeBW+0=
+github.com/google/gnostic v0.6.9/go.mod h1:Nm8234We1lq6iB9OmlgNv3nH91XLLVZHCDayfA3xq+E=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
@@ -332,18 +368,17 @@ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
-github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
-github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
-github.com/google/go-github/v27 v27.0.4/go.mod h1:/0Gr8pJ55COkmv+S/yPKCczSkUPIM/LnFyubufRNIS0=
-github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
+github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
+github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
+github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
-github.com/google/martian/v3 v3.1.0 h1:wCKgOCHuUEVfsaQLpPSJb7VdYCdTVZQAuOdYm1yc/60=
github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
+github.com/google/martian/v3 v3.2.1 h1:d8MncMlErDFTwQGBK1xhv026j9kqhvw1Qv9IbWT1VLQ=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
@@ -353,26 +388,28 @@ github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hf
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26 h1:Xim43kblpZXfIBQsbuBVKCudVG457BR2GZFIz3uw3hQ=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/googleapis/enterprise-certificate-proxy v0.2.1 h1:RY7tHKZcRlk788d5WSo/e83gOyyy742E8GSs771ySpg=
+github.com/googleapis/enterprise-certificate-proxy v0.2.1/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
-github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
+github.com/googleapis/gax-go/v2 v2.7.0 h1:IcsPKeInNvYi7eqSaDjiZqDDKu5rsmunY0Y1YupQSSQ=
+github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8=
github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
-github.com/googleapis/gnostic v0.4.1 h1:DLJCy1n/vrD4HPjOvYcT8aYQXpPIzoRZONaYwyycI+I=
-github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
+github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g=
github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8=
-github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
-github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e h1:JKmoR8x90Iww1ks85zJ1lfDGgIiMDuIptTOhJq+zKyg=
github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4=
github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q=
@@ -380,40 +417,37 @@ github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
-github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
-github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
-github.com/grpc-ecosystem/go-grpc-middleware v1.1.0 h1:THDBEeQ9xZ8JEaCLyLQqXMMdRqNr0QAUJTIkQAUtFjg=
-github.com/grpc-ecosystem/go-grpc-middleware v1.1.0/go.mod h1:f5nM7jw/oeRSadq3xCzHAvxcr8HZnzsqU6ILg/0NiiE=
+github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw=
+github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
-github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
-github.com/hashicorp/consul/api v1.10.1 h1:MwZJp86nlnL+6+W1Zly4JUuVn9YHhMggBirMpHGD7kw=
-github.com/hashicorp/consul/api v1.10.1/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M=
-github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
-github.com/hashicorp/consul/sdk v0.8.0 h1:OJtKBtEjboEZvG6AOUdh4Z1Zbyu0WcxQ0qatRrZHTVU=
-github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms=
-github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
+github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw=
+github.com/hashicorp/consul/api v1.18.0 h1:R7PPNzTCeN6VuQNDwwhZWJvzCtGSrNpJqfb22h3yH9g=
+github.com/hashicorp/consul/api v1.18.0/go.mod h1:owRRGJ9M5xReDC5nfT8FTJrNAPbT4NM6p/k+d03q2v4=
+github.com/hashicorp/consul/sdk v0.13.0 h1:lce3nFlpv8humJL8rNrrGHYSKc3q+Kxfeg3Ii1m6ZWU=
+github.com/hashicorp/consul/sdk v0.13.0/go.mod h1:0hs/l5fOVhJy/VdcoaNqUSi2AUs95eF5WKtv+EYIQqE=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
-github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM=
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
-github.com/hashicorp/go-hclog v0.12.0 h1:d4QkX8FRTYaKaCZBoXYY8zJX2BXjWxurN/GA2tkrmZM=
+github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
+github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
+github.com/hashicorp/go-hclog v1.4.0 h1:ctuWFGrhFha8BnnzxqeRGidlEcQkDyL5u8J8t5eA11I=
+github.com/hashicorp/go-hclog v1.4.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
-github.com/hashicorp/go-immutable-radix v1.1.0 h1:vN9wG1D6KG6YHRTWr8512cxGOVgTMEfgEdSj/hr8MPc=
-github.com/hashicorp/go-immutable-radix v1.1.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
+github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc=
+github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
+github.com/hashicorp/go-msgpack v0.5.3 h1:zKjpN5BK/P5lMYrLmBHdBULWbJ0XpYR+7NGzqkZzoD4=
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
-github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI=
-github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
-github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI=
github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA=
+github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
-github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc=
github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
@@ -424,72 +458,69 @@ github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b
github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE=
github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
-github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
+github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
+github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek=
+github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
-github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc=
+github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
-github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
-github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY=
github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc=
-github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
-github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE=
-github.com/hashicorp/memberlist v0.3.0 h1:8+567mCcFDnS5ADl7lrpxPMWiFCElyUEeW0gtj34fMA=
-github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE=
-github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
-github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk=
-github.com/hashicorp/serf v0.9.7 h1:hkdgbqizGQHuU5IPqYM1JdSMV8nKfpuOnZYXssk9muY=
-github.com/hashicorp/serf v0.9.7/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4=
-github.com/howeyc/gopass v0.0.0-20190910152052-7cb4b85ec19c h1:aY2hhxLhjEAbfXOx2nRJxCXezC6CO2V/yN+OCr1srtk=
-github.com/howeyc/gopass v0.0.0-20190910152052-7cb4b85ec19c/go.mod h1:lADxMC39cJJqL93Duh1xhAs4I2Zs8mKS89XWXFGp9cs=
+github.com/hashicorp/memberlist v0.5.0 h1:EtYPN8DpAURiapus508I4n9CzHs2W+8NZGbmmR/prTM=
+github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4mHgHUZ8lrOI0=
+github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY=
+github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4=
+github.com/howeyc/gopass v0.0.0-20210920133722-c8aef6fb66ef h1:A9HsByNhogrvm9cWb28sjiS3i7tcKCkflWFEkHfuAgM=
+github.com/howeyc/gopass v0.0.0-20210920133722-c8aef6fb66ef/go.mod h1:lADxMC39cJJqL93Duh1xhAs4I2Zs8mKS89XWXFGp9cs=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/icrowley/fake v0.0.0-20180203215853-4178557ae428 h1:Mo9W14pwbO9VfRe+ygqZ8dFbPpoIK1HFrG/zjTuQ+nc=
github.com/icrowley/fake v0.0.0-20180203215853-4178557ae428/go.mod h1:uhpZMVGznybq1itEKXj6RYw9I71qK4kH+OGMjRC4KEo=
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
-github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU=
-github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
-github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
+github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk=
+github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
-github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik=
+github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
+github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
+github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
-github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
+github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
+github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ=
-github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
+github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
-github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o=
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
-github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
-github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
-github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
+github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs=
+github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/klauspost/compress v1.13.0 h1:2T7tUoQrQT+fQWdaY5rjWztFGAFwbGD04iPJg90ZiOs=
-github.com/klauspost/compress v1.13.0/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
-github.com/klauspost/pgzip v1.2.4 h1:TQ7CNpYKovDOmqzRHKxJh0BeaBI7UdQZYc6p7pMQh1A=
-github.com/klauspost/pgzip v1.2.4/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
+github.com/klauspost/compress v1.15.15 h1:EF27CXIuDsYJ6mmvtBRlEuB2UVOqHG1tAXgZ7yIO+lw=
+github.com/klauspost/compress v1.15.15/go.mod h1:ZcK2JAFqKOpnBlxcLsJzYfrS9X1akm9fHZNnD9+Vo/4=
+github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
+github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
-github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
-github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
+github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
+github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
@@ -498,36 +529,45 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/krishicks/yaml-patch v0.0.10 h1:H4FcHpnNwVmw8u0MjPRjWyIXtco6zM2F78t+57oNM3E=
github.com/krishicks/yaml-patch v0.0.10/go.mod h1:Sm5TchwZS6sm7RJoyg87tzxm2ZcKzdRE4Q7TjNhPrME=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
-github.com/magiconair/properties v1.8.5 h1:b6kJs+EmPFMYGkow9GiUyCyOvIwYetYJ3fSaWak/Gls=
-github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
+github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY=
+github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
+github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
+github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
-github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE=
github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
-github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d h1:oNAwILwmgWKFpuU+dXvI6dl9jG2mAWAZLX3r9s0PPiw=
-github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc=
+github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
+github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
+github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
+github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
+github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E=
+github.com/mattn/go-ieproxy v0.0.9 h1:RvVbLiMv/Hbjf1gRaC2AQyzwbdVhdId7D2vPnXIml4k=
+github.com/mattn/go-ieproxy v0.0.9/go.mod h1:eF30/rfdQUO9EnzNIZQr0r9HiLMlZNCpJkHbmMuOAE0=
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84=
github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
-github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
+github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
+github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
+github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng=
+github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
-github.com/mattn/go-runewidth v0.0.7 h1:Ei8KR0497xHyKJPAv59M1dkC+rOZCMBJ+t3fZ+twI54=
-github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
-github.com/mattn/go-sqlite3 v1.14.14 h1:qZgc/Rwetq+MtyE18WhzjokPD93dNqLGNT3QJuLvBGw=
-github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
+github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
+github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU=
+github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
+github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y=
+github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
-github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI=
-github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
-github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
+github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
+github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=
github.com/miekg/dns v1.1.41 h1:WMszZWJG0XmzbK9FEmzH2TVcqYzFesusSIB41b8KHxY=
github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI=
@@ -535,54 +575,58 @@ github.com/minio/minio-go v0.0.0-20190131015406-c8a261de75c1 h1:jw16EimP5oAEM/2w
github.com/minio/minio-go v0.0.0-20190131015406-c8a261de75c1/go.mod h1:vuvdOZLJuf5HmJAJrKV64MmozrSsk+or0PB5dzdfspg=
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI=
-github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
-github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
-github.com/mitchellh/go-testing-interface v1.14.0 h1:/x0XQ6h+3U3nAyk1yx+bHPURrKa9sVVvYbuqZ7pIAtI=
-github.com/mitchellh/go-testing-interface v1.14.0/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8=
github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
-github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
-github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
-github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag=
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
+github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
+github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
-github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
-github.com/montanaflynn/stats v0.6.3 h1:F8446DrvIF5V5smZfZ8K9nrmmix0AFgevPdLruGOmzk=
-github.com/montanaflynn/stats v0.6.3/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc=
+github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
+github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
+github.com/montanaflynn/stats v0.7.0 h1:r3y12KyNxj/Sb/iOE46ws+3mS1+MZca1wlHQFPsY/JU=
+github.com/montanaflynn/stats v0.7.0/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow=
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
+github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
-github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
github.com/ngdinhtoan/glide-cleanup v0.2.0/go.mod h1:UQzsmiDOb8YV3nOsCxK/c9zPpCZVNoHScRE3EO9pVMM=
-github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=
+github.com/nsf/jsondiff v0.0.0-20210926074059-1e845ec5d249 h1:NHrXEjTNQY7P0Zfx1aMrNhpgxHmow66XQtm0aQLY0AE=
+github.com/nsf/jsondiff v0.0.0-20210926074059-1e845ec5d249/go.mod h1:mpRZBD8SJ55OIICQ3iWH0Yz3cjzA61JdqMLoWXeB2+8=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
+github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
+github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
-github.com/olekukonko/tablewriter v0.0.5-0.20200416053754-163badb3bac6 h1:F721VBMijn0OBFZ5wUSuMVVLQj2IJiiupn6UNd7UbBE=
-github.com/olekukonko/tablewriter v0.0.5-0.20200416053754-163badb3bac6/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA=
+github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
+github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.12.1 h1:mFwc4LvZ0xpSvDZ3E+k8Yte0hLOMxXUlP+yXtJqkYfQ=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
+github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
+github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc=
+github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
+github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
+github.com/onsi/ginkgo/v2 v2.4.0 h1:+Ig9nvqgS5OBSACXNk15PLdp0U9XPYROt9CFzVdFGIs=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
-github.com/onsi/gomega v1.10.3 h1:gph6h/qe9GSUw1NhH1gp+qb+h8rXD8Cy60Z32Qw3ELA=
-github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc=
-github.com/openark/golib v0.0.0-20210531070646-355f37940af8 h1:9ciIHNuyFqRWi9NpMNw9sVLB6z1ItpP5ZhTY9Q1xVu4=
-github.com/openark/golib v0.0.0-20210531070646-355f37940af8/go.mod h1:1jj8x1eDVZxgc/Z4VyamX4qTbAdHPUQA6NeVtCd8Sl8=
-github.com/opentracing-contrib/go-grpc v0.0.0-20180928155321-4b5a12d3ff02 h1:0R5mDLI66Qw13qN80TRz85zthQ2nf2+uDyiV23w6c3Q=
-github.com/opentracing-contrib/go-grpc v0.0.0-20180928155321-4b5a12d3ff02/go.mod h1:JNdpVEzCpXBgIiv4ds+TzhN1hrtxq6ClLrTlT9OQRSc=
-github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU=
+github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
+github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
+github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs=
+github.com/onsi/gomega v1.23.0 h1:/oxKu9c2HVap+F3PfKort2Hw5DEU+HGlW8n+tguWsys=
+github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e h1:4cPxUYdgaGzZIT5/j0IfqOrrXmq6bG8AwvwisMXpdrg=
+github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e/go.mod h1:DYR5Eij8rJl8h7gblRrOZ8g0kW1umSpKqYIBTgeDtLo=
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
+github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=
+github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY=
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
@@ -590,93 +634,98 @@ github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaR
github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ=
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
-github.com/pelletier/go-toml v1.9.3 h1:zeC5b1GviRUyKYd6OJPvBU/mcVDVoL1OhT17FCt5dSQ=
-github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
+github.com/pelletier/go-toml/v2 v2.0.6 h1:nrzqCb7j9cDFj2coyLNLaZuJTLjWjlaz6nvTvIwycIU=
+github.com/pelletier/go-toml/v2 v2.0.6/go.mod h1:eumQOmlWiOPt5WriQQqoM5y18pDHwha2N+QD+EUNTek=
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
-github.com/philhofer/fwd v1.0.0 h1:UbZqGr5Y38ApvM/V/jEljVxwocdweyH+vmYvRPBnbqQ=
-github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU=
+github.com/philhofer/fwd v1.1.2 h1:bnDivRJ1EWPjUIRXV5KfORO897HTbpFAQddBdE8t7Gw=
+github.com/philhofer/fwd v1.1.2/go.mod h1:qkPdfjR2SIEbspLqpe1tO4n5yICnr2DY7mqEx2tUTP0=
github.com/pierrec/lz4 v2.6.1+incompatible h1:9UY3+iC23yxF0UfGaYrGplQ+79Rg+h/q9FV9ix19jjM=
github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
-github.com/pires/go-proxyproto v0.6.1 h1:EBupykFmo22SDjv4fQVQd2J9NOoLPmyZA/15ldOGkPw=
-github.com/pires/go-proxyproto v0.6.1/go.mod h1:Odh9VFOZJCf9G8cLW5o435Xf1J95Jw9Gw5rnCjcwzAY=
+github.com/pires/go-proxyproto v0.6.2 h1:KAZ7UteSOt6urjme6ZldyFm4wDe/z0ZUP0Yv0Dos0d8=
+github.com/pires/go-proxyproto v0.6.2/go.mod h1:Odh9VFOZJCf9G8cLW5o435Xf1J95Jw9Gw5rnCjcwzAY=
+github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI=
+github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg=
github.com/planetscale/pargzip v0.0.0-20201116224723-90c7fc03ea8a h1:y0OpQ4+5tKxeh9+H+2cVgASl9yMZYV9CILinKOiKafA=
github.com/planetscale/pargzip v0.0.0-20201116224723-90c7fc03ea8a/go.mod h1:GJFUzQuXIoB2Kjn1ZfDhJr/42D5nWOqRcIQVgCxTuIE=
-github.com/planetscale/vtprotobuf v0.3.0 h1:oMrOdDFHS1ADc0dHtC2EApxiM5xd0cQkZeibm0WgXiQ=
-github.com/planetscale/vtprotobuf v0.3.0/go.mod h1:wm1N3qk9G/4+VM1WhpkLbvY/d8+0PbwYYpP5P5VhTks=
+github.com/planetscale/vtprotobuf v0.4.0 h1:NEI+g4woRaAZgeZ3sAvbtyvMBRjIv5kE7EWYQ8m4JwY=
+github.com/planetscale/vtprotobuf v0.4.0/go.mod h1:wm1N3qk9G/4+VM1WhpkLbvY/d8+0PbwYYpP5P5VhTks=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s=
github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
-github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
-github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
-github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ=
-github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
+github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
+github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=
+github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
+github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=
+github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
-github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
-github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
-github.com/prometheus/common v0.29.0 h1:3jqPBvKT4OHAbje2Ql7KeaaSicDBCxMYwEJU1zRJceE=
-github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
+github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
+github.com/prometheus/common v0.39.0 h1:oOyhkDq05hPZKItWVBkJ6g6AtGxi+fy7F4JvUV8uhsI=
+github.com/prometheus/common v0.39.0/go.mod h1:6XBZ7lYdLCbkAVhwRsWTZn+IN5AB9F/NXd5w0BbEX0Y=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
-github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
-github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
-github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4=
-github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
-github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 h1:MkV+77GLUNo5oJ0jf870itWm3D0Sjh7+Za9gazKc5LQ=
-github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
+github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
+github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI=
+github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY=
+github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM=
+github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
+github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
+github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
+github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
+github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
+github.com/rivo/uniseg v0.4.3 h1:utMvzDsuh3suAEnhH0RdHmoPbU648o6CvXxTx4SBMOw=
+github.com/rivo/uniseg v0.4.3/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
-github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k=
-github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
+github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
+github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
+github.com/rohit-nayak-ps/ajson v0.7.2-0.20230316112806-97deb03d883c h1:Y/4qcogoZA2WUtLWMk/yXfJSpaIG3mK3r9Lw4kaARL4=
+github.com/rohit-nayak-ps/ajson v0.7.2-0.20230316112806-97deb03d883c/go.mod h1:63V+CGM6f1Bu/p4nLIN8885ojBdt88TbLoSFzyqMuVA=
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
-github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww=
-github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
+github.com/secure-systems-lab/go-securesystemslib v0.3.1/go.mod h1:o8hhjkbNl2gOamKUA/eNW3xUrntHT9L4W89W1nfj43U=
+github.com/secure-systems-lab/go-securesystemslib v0.4.0 h1:b23VGrQhTA8cN2CbBw7/FulN9fTtqYUdS5+Oxzt+DUE=
+github.com/secure-systems-lab/go-securesystemslib v0.4.0/go.mod h1:FGBZgq2tXWICsxWQW1msNf49F0Pf2Op5Htayx335Qbs=
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
-github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
-github.com/sjmudd/stopwatch v0.0.0-20170613150411-f380bf8a9be1 h1:acClJNSOjUrAUKW+ZneCZymCFDWtSaJG5YQl8FoOlyI=
-github.com/sjmudd/stopwatch v0.0.0-20170613150411-f380bf8a9be1/go.mod h1:Pgf1sZ2KrHK8vdRTV5UHGp80LT7HMUKuNAiKC402abY=
-github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
-github.com/smartystreets/assertions v0.0.0-20190116191733-b6c0e53d7304 h1:Jpy1PXuP99tXNrhbq2BaPz9B+jNAvH1JPQQpG/9GCXY=
+github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
+github.com/sjmudd/stopwatch v0.1.1 h1:x45OvxFB5OtCkjvYtzRF5fWB857Jzjjk84Oyd5C5ebw=
+github.com/sjmudd/stopwatch v0.1.1/go.mod h1:BLw0oIQJ1YLXBO/q9ufK/SgnKBVIkC2qrm6uy78Zw6U=
github.com/smartystreets/assertions v0.0.0-20190116191733-b6c0e53d7304/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s=
-github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=
-github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
-github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E=
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
+github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js=
+github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
+github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
-github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY=
-github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
+github.com/spf13/afero v1.9.3 h1:41FoI0fD7OR7mGcKE/aOiLkGreyf8ifIOQmJANWogMk=
+github.com/spf13/afero v1.9.3/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y=
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
-github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng=
-github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
+github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w=
+github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU=
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
-github.com/spf13/cobra v1.4.0 h1:y+wJpx64xcgO1V+RcnwW0LEHxTKRi2ZDPSBjWnrg88Q=
-github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g=
+github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA=
+github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
@@ -686,28 +735,31 @@ github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnIn
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
-github.com/spf13/viper v1.8.1 h1:Kq1fyeebqsBfbjZj4EL7gj2IO0mMaiyjYUWcUsl2O44=
-github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns=
-github.com/spyzhov/ajson v0.4.2 h1:JMByd/jZApPKDvNsmO90X2WWGbmT2ahDFp73QhZbg3s=
-github.com/spyzhov/ajson v0.4.2/go.mod h1:63V+CGM6f1Bu/p4nLIN8885ojBdt88TbLoSFzyqMuVA=
+github.com/spf13/viper v1.15.0 h1:js3yy885G8xwJa6iOISGFwd+qlUo5AvyXb7CiihdtiU=
+github.com/spf13/viper v1.15.0/go.mod h1:fFcTBJxvhhzSJiZy8n+PeW6t8l+KeT/uTARa0jHOQLA=
+github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48=
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
+github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
+github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c=
+github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=
-github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
-github.com/tchap/go-patricia v2.2.6+incompatible h1:JvoDL7JSoIP2HDE8AbDH3zC8QBPxmzYe32HHy5yQ+Ck=
-github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I=
-github.com/tebeka/selenium v0.9.9 h1:cNziB+etNgyH/7KlNI7RMC1ua5aH1+5wUlFQyzeMh+w=
-github.com/tebeka/selenium v0.9.9/go.mod h1:5Fr8+pUvU6B1OiPfkdCKdXZyr5znvVkxuPd0NOdZCQc=
+github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
+github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
+github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
+github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
+github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8=
+github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0=
+github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
+github.com/tchap/go-patricia v2.3.0+incompatible h1:GkY4dP3cEfEASBPPkWd+AmjYxhmDkqO9/zg7R0lSQRs=
+github.com/tchap/go-patricia v2.3.0+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I=
github.com/tidwall/gjson v1.12.1 h1:ikuZsLdhr8Ws0IdROXUS1Gi4v9Z4pGqpX/CvJkxvfpo=
github.com/tidwall/gjson v1.12.1/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA=
@@ -715,19 +767,20 @@ github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JT
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs=
github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
-github.com/tinylib/msgp v1.1.1 h1:TnCZ3FIuKeaIy+F45+Cnp+caqdXGy4z74HvwXN+570Y=
-github.com/tinylib/msgp v1.1.1/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
+github.com/tinylib/msgp v1.1.8 h1:FCXC1xanKO4I8plpHGH2P7koL/RzZs12l/+r7vakfm0=
+github.com/tinylib/msgp v1.1.8/go.mod h1:qkpG+2ldGg4xRFmx+jfTvZPxfGFhi64BcnL9vkCm/Tw=
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
-github.com/uber-go/atomic v1.4.0 h1:yOuPqEq4ovnhEjpHmfFwsqBXDYbQeT6Nb0bwD6XnD5o=
-github.com/uber-go/atomic v1.4.0/go.mod h1:/Ct5t2lcmbJ4OSe/waGBoaVvVqtO0bmtfVNex1PFV8g=
-github.com/uber/jaeger-client-go v2.16.0+incompatible h1:Q2Pp6v3QYiocMxomCaJuwQGFt7E53bPYqEgug/AoBtY=
-github.com/uber/jaeger-client-go v2.16.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
+github.com/uber/jaeger-client-go v2.30.0+incompatible h1:D6wyKGCecFaSRUpo8lCVbaOOb6ThwMmTEbhRwtKR97o=
+github.com/uber/jaeger-client-go v2.30.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
github.com/uber/jaeger-lib v2.4.1+incompatible h1:td4jdvLcExb4cBISKIpHuGoVXh+dVKhn2Um6rjCsSsg=
github.com/uber/jaeger-lib v2.4.1+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U=
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw=
+github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
+github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
+github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82 h1:BHyfKlQyqbsFN5p3IfnEUduWvb9is428/nNb5L3U01M=
@@ -737,17 +790,17 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
-github.com/z-division/go-zookeeper v0.0.0-20190128072838-6d7457066b9b h1:Itr7GbuXoM1PK/eCeNNia4Qd3ib9IgX9g9SpXgo8BwQ=
-github.com/z-division/go-zookeeper v0.0.0-20190128072838-6d7457066b9b/go.mod h1:JNALoWa+nCXR8SmgLluHcBNVJgyejzpKPZk9pX2yXXE=
+github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
+github.com/z-division/go-zookeeper v1.0.0 h1:ULsCj0nP6+U1liDFWe+2oEF6o4amixoDcDlwEUghVUY=
+github.com/z-division/go-zookeeper v1.0.0/go.mod h1:6X4UioQXpvyezJJl4J9NHAJKsoffCwy5wCaaTktXjOA=
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
-go.etcd.io/etcd/api/v3 v3.5.0 h1:GsV3S+OfZEOCNXdtNkBSR7kgLobAa/SO6tCxRa0GAYw=
-go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
-go.etcd.io/etcd/client/pkg/v3 v3.5.0 h1:2aQv6F436YnN7I4VbI8PPYrBhu+SmrTaADcf8Mi/6PU=
-go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
-go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ=
-go.etcd.io/etcd/client/v3 v3.5.0 h1:62Eh0XOro+rDwkrypAGDfgmNh5Joq+z+W9HZdlXMzek=
-go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0=
+go.etcd.io/etcd/api/v3 v3.5.7 h1:sbcmosSVesNrWOJ58ZQFitHMdncusIifYcrBfwrlJSY=
+go.etcd.io/etcd/api/v3 v3.5.7/go.mod h1:9qew1gCdDDLu+VwmeG+iFpL+QlpHTo7iubavdVDgCAA=
+go.etcd.io/etcd/client/pkg/v3 v3.5.7 h1:y3kf5Gbp4e4q7egZdn5T7W9TSHUvkClN6u+Rq9mEOmg=
+go.etcd.io/etcd/client/pkg/v3 v3.5.7/go.mod h1:o0Abi1MK86iad3YrWhgUsbGx1pmTS+hrORWc2CamuhY=
+go.etcd.io/etcd/client/v3 v3.5.7 h1:u/OhpiuCgYY8awOHlhIhmGIGpxfBU/GZBUP3m/3/Iz4=
+go.etcd.io/etcd/client/v3 v3.5.7/go.mod h1:sOWmj9DZUMyAngS7QQwCyAXXAL6WhgTOPLNS/NabQgw=
go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
@@ -757,21 +810,27 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
-go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M=
-go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
+go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
+go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
-go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
-go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
+go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ=
+go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
+go.uber.org/goleak v1.1.11 h1:wy28qYRKZgnJTxGxvye5/wgWr1EKjmUDGYox5mGlRlI=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
-go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4=
-go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
+go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI=
+go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
-go.uber.org/zap v1.17.0 h1:MTjgFu6ZLKvY6Pvaqk97GlxNBuMpV4Hy/3P6tRGlI2U=
-go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
+go.uber.org/zap v1.23.0 h1:OjGQ5KQDEUawVHxNwQgPpiypGHOxo2mNZsOqTak4fFY=
+go.uber.org/zap v1.23.0/go.mod h1:D+nX8jyLsMHMYrln8A0rJjFt/T/9/bGgIhAqxv5URuY=
+go4.org/intern v0.0.0-20211027215823-ae77deb06f29/go.mod h1:cS2ma+47FKrLPdXFpr7CuxiTW3eyJbWew4qx0qtQWDA=
+go4.org/intern v0.0.0-20220617035311-6925f38cc365 h1:t9hFvR102YlOqU0fQn1wgwhNvSbHGBbbJxX9JKfU3l0=
+go4.org/intern v0.0.0-20220617035311-6925f38cc365/go.mod h1:WXRv3p7T6gzt0CcJm43AAKdKVZmcQbwwC7EwquU5BZU=
+go4.org/unsafe/assume-no-moving-gc v0.0.0-20211027215541-db492cf91b37/go.mod h1:FftLjUGFEDu5k8lt0ddY+HcrH/qU/0qk+H8j9/nTl3E=
+go4.org/unsafe/assume-no-moving-gc v0.0.0-20220617031537-928513b29760 h1:FyBZqvoA/jbNzuAWLQE2kG820zMAkcilx6BMjGbL/E4=
+go4.org/unsafe/assume-no-moving-gc v0.0.0-20220617031537-928513b29760/go.mod h1:FftLjUGFEDu5k8lt0ddY+HcrH/qU/0qk+H8j9/nTl3E=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190128193316-c7b33c32a30b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
@@ -781,14 +840,17 @@ golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20220507011949-2cf3adece122 h1:NvGWuYG8dkDHFSKksI1P9faiVJ9rayE6l0+ouWVIDs8=
-golang.org/x/crypto v0.0.0-20220507011949-2cf3adece122/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
+golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE=
+golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@@ -799,6 +861,10 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
+golang.org/x/exp v0.0.0-20230131160201-f062dba9d201 h1:BEABXpNXLEz0WxtA+6CQIz2xkg80e+1zrhWyMcq8VzE=
+golang.org/x/exp v0.0.0-20230131160201-f062dba9d201/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
+golang.org/x/exp/typeparams v0.0.0-20230131160201-f062dba9d201 h1:O1QcdQUR9htWjzzsXVFPX+RJ3n1P/u/5bsQR8dbs5BY=
+golang.org/x/exp/typeparams v0.0.0-20230131160201-f062dba9d201/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
@@ -825,17 +891,16 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 h1:kQgndtyPBW/JIYERgdxfwMYh3AVStj88WQTlNDi2a+o=
-golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
+golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
+golang.org/x/mod v0.7.0 h1:LapD9S96VoQRhi/GrNTqeBJFrUjs5UHCAtTlgwA5oZA=
+golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -852,7 +917,9 @@ golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190921015927-1a5e07d1ff72/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
@@ -863,23 +930,33 @@ golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/
golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
-golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
-golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
-golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8=
-golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4 h1:HVyaeDAYux4pnY+D/SiwmLOR36ewZ4iGQIIrtnuCjFA=
-golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
+golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220630215102-69896b714898/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
+golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
+golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g=
+golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -889,13 +966,8 @@ golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ
golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210413134643-5e61552d6c78/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f h1:Qmd2pbz05z7z6lm0DrgQVVPuBm92jqujBKMHMOlOQEw=
-golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.4.0 h1:NF0gk8LVPg1Ml7SSbGyySuoxdsXitj7TvgvuRxIMc/M=
+golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -907,12 +979,14 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
+golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -938,14 +1012,13 @@ golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -955,34 +1028,48 @@ golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210412220455-f1c623a9e750/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e h1:fLOSk5Q00efkSvAm+4xcoXD+RRmLmmulPn5I3Y9F2EM=
+golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU=
+golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
-golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
+golang.org/x/term v0.5.0 h1:n2a8QNdAb0sZNpU9R1ALUXBbY+w51fCQDN+7EdxNBsY=
+golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -992,15 +1079,17 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
+golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo=
+golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs=
-golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
+golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@@ -1010,7 +1099,6 @@ golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
@@ -1018,13 +1106,11 @@ golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgw
golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190624190245-7f2218787638/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
@@ -1044,6 +1130,7 @@ golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjs
golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
@@ -1055,18 +1142,22 @@ golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82u
golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
-golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
-golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
-golang.org/x/tools v0.1.10 h1:QjFRCZxdOhBJ/UNgnBZLbNV13DlbnK0quyivTnXJM20=
-golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
+golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
+golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ=
+golang.org/x/tools v0.5.0 h1:+bSpV5HIeWkuvgaMfI3UmKRThoTA5ODJTUd8T17NO+4=
+golang.org/x/tools v0.5.0/go.mod h1:N+Kgy78s5I24c24dU8OfWNEotWjutIs8SnJvn5IDq+k=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk=
+golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
@@ -1086,11 +1177,8 @@ google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz513
google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
-google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU=
-google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94=
-google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8=
-google.golang.org/api v0.45.0 h1:pqMffJFLBVUDIoYsHcqtxgQVTsmxMDpYLOc5MT4Jrww=
-google.golang.org/api v0.45.0/go.mod h1:ISLIJCedJolbZvDfAk+Ctuq5hf+aJ33WgtUsfyFoLXA=
+google.golang.org/api v0.109.0 h1:sW9hgHyX497PP5//NUM7nqfV8D0iDfBApqq7sOh1XR8=
+google.golang.org/api v0.109.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@@ -1104,7 +1192,6 @@ google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRn
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190626174449-989357319d63/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s=
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
@@ -1121,6 +1208,7 @@ google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfG
google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
@@ -1136,15 +1224,11 @@ google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6D
google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
-google.golang.org/genproto v0.0.0-20210413151531-c14fb6ef47c3/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A=
-google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
-google.golang.org/genproto v0.0.0-20210701191553-46259e63a0a9 h1:HBPuvo39L0DgfVn9eHR3ki/RjZoUFWa+em77e7KFDfs=
-google.golang.org/genproto v0.0.0-20210701191553-46259e63a0a9/go.mod h1:yiaVoXHpRzHGyxV3o4DktVWY4mSUErTKaeEOq6C3t3U=
+google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20230131230820-1c016267d619 h1:p0kMzw6AG0JEzd7Z+kXqOiLhC6gjUQTbtS2zR0Q3DbI=
+google.golang.org/genproto v0.0.0-20230131230820-1c016267d619/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
@@ -1164,13 +1248,11 @@ google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv
google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
-google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
-google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
-google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
-google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M=
-google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
-google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0 h1:M1YKkFIboKNieVO5DLUEVzQfGwJD30Nv2jfUgzb5UcE=
-google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
+google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
+google.golang.org/grpc v1.52.3 h1:pf7sOysg4LdgBqduXveGKrcEwbStiK2rtfghdzlUYDQ=
+google.golang.org/grpc v1.52.3/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY=
+google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.2.0 h1:TLkBREm4nIsEcexnCjgQd5GQWaHcqMzwQV0TX9pq8S0=
+google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.2.0/go.mod h1:DNq5QpG7LJqD2AamLZ7zvKE0DEpVl2BSEVjFycAAjRY=
google.golang.org/grpc/examples v0.0.0-20210430044426-28078834f35b h1:D/GTYPo6I1oEo08Bfpuj3xl5XE+UGHj7//5fVyKxhsQ=
google.golang.org/grpc/examples v0.0.0-20210430044426-28078834f35b/go.mod h1:Ly7ZA/ARzg8fnPU9TyZIxoz33sEUuWX7txiqs8lPTgE=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
@@ -1186,17 +1268,19 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw=
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
-gopkg.in/DataDog/dd-trace-go.v1 v1.17.0 h1:j9vAp9Re9bbtA/QFehkJpNba/6W2IbJtNuXZophCa54=
-gopkg.in/DataDog/dd-trace-go.v1 v1.17.0/go.mod h1:DVp8HmDh8PuTu2Z0fVVlBsyWaC++fzwVCaGWylTe3tg=
+google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
+google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+gopkg.in/DataDog/dd-trace-go.v1 v1.47.0 h1:w3mHEgOR1o52mkyCbkTM+El8DG732+Fnug4FAGhIpsk=
+gopkg.in/DataDog/dd-trace-go.v1 v1.47.0/go.mod h1:aHb6c4hPRANXnB64LDAKyfWotKgfRjlHv23MnahM8AI=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d h1:TxyelI5cVkbREznMhfzycHdkp5cLA7DpE+GKjSslYhM=
gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
@@ -1205,10 +1289,10 @@ gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/ini.v1 v1.41.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
-gopkg.in/ini.v1 v1.62.0 h1:duBzk771uxoUuOlyRLkHsygud9+5lrlGjdFBb4mSKDU=
-gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
-gopkg.in/ldap.v2 v2.5.0 h1:1rO3ojzsHUk+gq4ZYhC4Pg+EzWaaKIV8+DJwExS5/QQ=
-gopkg.in/ldap.v2 v2.5.0/go.mod h1:oI0cpe/D7HRtBQl8aTg+ZmzFUAvu4lsv3eLXMLGFxWk=
+gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
+gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
+gopkg.in/ldap.v2 v2.5.1 h1:wiu0okdNfjlBzg6UWvd1Hn8Y+Ux17/u/4nlk4CQr6tU=
+gopkg.in/ldap.v2 v2.5.1/go.mod h1:oI0cpe/D7HRtBQl8aTg+ZmzFUAvu4lsv3eLXMLGFxWk=
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
@@ -1227,8 +1311,10 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
-gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
@@ -1237,49 +1323,78 @@ honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWh
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
-honnef.co/go/tools v0.0.1-2020.1.4 h1:UoveltGrhghAA7ePc+e+QYDHXrBps2PqFZiHkGR/xK8=
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
+honnef.co/go/tools v0.3.3 h1:oDx7VAwstgpYpb3wv0oxiZlxY+foCpRAwY7Vk6XpAgA=
+honnef.co/go/tools v0.3.3/go.mod h1:jzwdWgg7Jdq75wlfblQxO4neNaFFSvgc1tD5Wv8U0Yw=
+inet.af/netaddr v0.0.0-20220811202034-502d2d690317 h1:U2fwK6P2EqmopP/hFLTOAjWTki0qgd4GMJn5X8wOleU=
+inet.af/netaddr v0.0.0-20220811202034-502d2d690317/go.mod h1:OIezDfdzOgFhuw4HuWapWq2e9l0H9tK4F1j+ETRtF3k=
k8s.io/api v0.18.19/go.mod h1:lmViaHqL3es8JiaK3pCJMjBKm2CnzIcAXpHKifwbmAg=
-k8s.io/api v0.20.6 h1:bgdZrW++LqgrLikWYNruIKAtltXbSCX2l5mJu11hrVE=
-k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8=
+k8s.io/api v0.26.1 h1:f+SWYiPd/GsiWwVRz+NbFyCgvv75Pk9NK6dlkZgpCRQ=
+k8s.io/api v0.26.1/go.mod h1:xd/GBNgR0f707+ATNyPmQ1oyKSgndzXij81FzWGsejg=
k8s.io/apiextensions-apiserver v0.18.19 h1:z7tzzrsODC0cqvp3Pcy2HHc6wOnaSQQEWn0l/jbrJ6c=
k8s.io/apiextensions-apiserver v0.18.19/go.mod h1:kiomVdryKCrn+R0E+iPx+bZ/00rgj5tPXEBduSEJwgI=
k8s.io/apimachinery v0.18.19/go.mod h1:70HIRzSveORLKbatTlXzI2B2UUhbWzbq8Vqyf+HbdUQ=
-k8s.io/apimachinery v0.20.6 h1:R5p3SlhaABYShQSO6LpPsYHjV05Q+79eBUR0Ut/f4tk=
-k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc=
+k8s.io/apimachinery v0.26.1 h1:8EZ/eGJL+hY/MYCNwhmDzVqq2lPl3N3Bo8rvweJwXUQ=
+k8s.io/apimachinery v0.26.1/go.mod h1:tnPmbONNJ7ByJNz9+n9kMjNP8ON+1qoAIIC70lztu74=
k8s.io/apiserver v0.18.19/go.mod h1:VY80gRUh89Cmnx2s9S5nZTF8vwzEKweAFy7nTFuFLRU=
k8s.io/client-go v0.18.19/go.mod h1:lB+d4UqdzSjaU41VODLYm/oon3o05LAzsVpm6Me5XkY=
-k8s.io/client-go v0.20.6 h1:nJZOfolnsVtDtbGJNCxzOtKUAu7zvXjB8+pMo9UNxZo=
-k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0=
-k8s.io/code-generator v0.18.19 h1:+CWLkBN3xB7WL89ji5XQgLRlT8spq+ZmKlUGTkTw9eE=
+k8s.io/client-go v0.26.1 h1:87CXzYJnAMGaa/IDDfRdhTzxk/wzGZ+/HUQpqgVSZXU=
+k8s.io/client-go v0.26.1/go.mod h1:IWNSglg+rQ3OcvDkhY6+QLeasV4OYHDjdqeWkDQZwGE=
k8s.io/code-generator v0.18.19/go.mod h1:l5yJd8cLSvkIb0ZJMsQdWuDOx5rWfLNpgmHQyl3LmBE=
+k8s.io/code-generator v0.26.1 h1:dusFDsnNSKlMFYhzIM0jAO1OlnTN5WYwQQ+Ai12IIlo=
+k8s.io/code-generator v0.26.1/go.mod h1:OMoJ5Dqx1wgaQzKgc+ZWaZPfGjdRq/Y3WubFrZmeI3I=
k8s.io/component-base v0.18.19/go.mod h1:nQMCdH6RaS/GD0J1YZqc5NInfCdknth4BwlAT5Mf7tA=
k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/gengo v0.0.0-20200114144118-36b2048a9120/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
-k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac h1:sAvhNk5RRuc6FNYGqe7Ygz3PSo/2wGWbulskmzRX8Vs=
-k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
+k8s.io/gengo v0.0.0-20221011193443-fad74ee6edd9 h1:iu3o/SxaHVI7tKPtkGzD3M9IzrE21j+CUKH98NQJ8Ms=
+k8s.io/gengo v0.0.0-20221011193443-fad74ee6edd9/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
-k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
-k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
-k8s.io/klog/v2 v2.4.0 h1:7+X0fUguPyrKEC4WjH8iGDg3laWgMo5tMnRTIGTTxGQ=
-k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
+k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
+k8s.io/klog/v2 v2.90.0 h1:VkTxIV/FjRXn1fgNNcKGM8cfmL1Z33ZjXRTVxKCoF5M=
+k8s.io/klog/v2 v2.90.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E=
-k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd h1:sOHNzJIkytDF6qadMNKhhDRpc6ODik8lVC6nOur7B2c=
-k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM=
+k8s.io/kube-openapi v0.0.0-20230202010329-39b3636cbaa3 h1:vV3ZKAUX0nMjTflyfVea98dTfROpIxDaEsQws0FT2Ts=
+k8s.io/kube-openapi v0.0.0-20230202010329-39b3636cbaa3/go.mod h1:/BYxry62FuDzmI+i9B+X2pqfySRmSOW2ARmj5Zbqhj0=
k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
-k8s.io/utils v0.0.0-20201110183641-67b214c5f920 h1:CbnUZsM497iRC5QMVkHwyl8s2tB3g7yaSHkYPkpgelw=
-k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
+k8s.io/utils v0.0.0-20230115233650-391b47cb4029 h1:L8zDtT4jrxj+TaQYD0k8KNlr556WaVQylDXswKmX+dE=
+k8s.io/utils v0.0.0-20230115233650-391b47cb4029/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
+lukechampine.com/uint128 v1.2.0 h1:mBi/5l91vocEN8otkC5bDLhi2KdCticRiwbdB0O+rjI=
+lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk=
+modernc.org/cc/v3 v3.40.0 h1:P3g79IUS/93SYhtoeaHW+kRCIrYaxJ27MFPv+7kaTOw=
+modernc.org/cc/v3 v3.40.0/go.mod h1:/bTg4dnWkSXowUO6ssQKnOV0yMVxDYNIsIrzqTFDGH0=
+modernc.org/ccgo/v3 v3.16.13 h1:Mkgdzl46i5F/CNR/Kj80Ri59hC8TKAhZrYSaqvkwzUw=
+modernc.org/ccgo/v3 v3.16.13/go.mod h1:2Quk+5YgpImhPjv2Qsob1DnZ/4som1lJTodubIcoUkY=
+modernc.org/ccorpus v1.11.6 h1:J16RXiiqiCgua6+ZvQot4yUuUy8zxgqbqEEUuGPlISk=
+modernc.org/httpfs v1.0.6 h1:AAgIpFZRXuYnkjftxTAZwMIiwEqAfk8aVB2/oA6nAeM=
+modernc.org/libc v1.22.2 h1:4U7v51GyhlWqQmwCHj28Rdq2Yzwk55ovjFrdPjs8Hb0=
+modernc.org/libc v1.22.2/go.mod h1:uvQavJ1pZ0hIoC/jfqNoMLURIMhKzINIWypNM17puug=
+modernc.org/mathutil v1.5.0 h1:rV0Ko/6SfM+8G+yKiyI830l3Wuz1zRutdslNoQ0kfiQ=
+modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
+modernc.org/memory v1.5.0 h1:N+/8c5rE6EqugZwHii4IFsaJ7MUhoWX07J5tC/iI5Ds=
+modernc.org/memory v1.5.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU=
+modernc.org/opt v0.1.3 h1:3XOZf2yznlhC+ibLltsDGzABUGVx8J6pnFMS3E4dcq4=
+modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0=
+modernc.org/sqlite v1.20.3 h1:SqGJMMxjj1PHusLxdYxeQSodg7Jxn9WWkaAQjKrntZs=
+modernc.org/sqlite v1.20.3/go.mod h1:zKcGyrICaxNTMEHSr1HQ2GUraP0j+845GYw37+EyT6A=
+modernc.org/strutil v1.1.3 h1:fNMm+oJklMGYfU9Ylcywl0CO5O6nTfaowNsh2wpPjzY=
+modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw=
+modernc.org/tcl v1.15.0 h1:oY+JeD11qVVSgVvodMJsu7Edf8tr5E/7tuhF5cNYz34=
+modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y=
+modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
+modernc.org/z v1.7.0 h1:xkDw/KepgEjeizO2sNco+hqYkU12taxQFqPEmgm1GWE=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.7/go.mod h1:PHgbrJT7lCHcxMU+mDHEm+nx46H4zuuHZkDP6icnhu0=
+sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
+sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
sigs.k8s.io/structured-merge-diff/v3 v3.0.1/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
-sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
-sigs.k8s.io/structured-merge-diff/v4 v4.0.3 h1:4oyYo8NREp49LBBhKxEqCulFjg26rawYKrnCmg+Sr6c=
-sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
+sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE=
+sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E=
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
-sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
+sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo=
+sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=
diff --git a/go/bucketpool/bucketpool_test.go b/go/bucketpool/bucketpool_test.go
index 3d74c3f94ce..7649f9b6278 100644
--- a/go/bucketpool/bucketpool_test.go
+++ b/go/bucketpool/bucketpool_test.go
@@ -170,6 +170,9 @@ func TestFuzz(t *testing.T) {
maxTestSize := 16384
for i := 0; i < 20000; i++ {
minSize := rand.Intn(maxTestSize)
+ if minSize == 0 {
+ minSize = 1
+ }
maxSize := rand.Intn(maxTestSize-minSize) + minSize
p := New(minSize, maxSize)
bufSize := rand.Intn(maxTestSize)
diff --git a/go/cmd/mysqlctl/mysqlctl.go b/go/cmd/mysqlctl/mysqlctl.go
index 057e48d4aa1..02431cf20a6 100644
--- a/go/cmd/mysqlctl/mysqlctl.go
+++ b/go/cmd/mysqlctl/mysqlctl.go
@@ -44,11 +44,8 @@ var (
)
func init() {
- servenv.RegisterDefaultFlags()
servenv.RegisterDefaultSocketFileFlags()
servenv.RegisterFlags()
- servenv.RegisterGRPCServerFlags()
- servenv.RegisterGRPCServerAuthFlags()
servenv.RegisterServiceMapFlag()
// mysqlctl only starts and stops mysql, only needs dba.
dbconfigs.RegisterFlags(dbconfigs.Dba)
diff --git a/go/cmd/query_analyzer/query_analyzer.go b/go/cmd/query_analyzer/query_analyzer.go
index 087c88fa5f3..2138bde2673 100644
--- a/go/cmd/query_analyzer/query_analyzer.go
+++ b/go/cmd/query_analyzer/query_analyzer.go
@@ -70,6 +70,7 @@ func main() {
acl.RegisterFlags(fs)
servenv.RegisterMySQLServerFlags(fs)
_flag.Parse(fs)
+ logutil.PurgeLogs()
for _, filename := range _flag.Args() {
fmt.Printf("processing: %s\n", filename)
if err := processFile(filename); err != nil {
diff --git a/go/cmd/rulesctl/cmd/main.go b/go/cmd/rulesctl/cmd/main.go
index d100b8c8680..1b5ab30acfd 100644
--- a/go/cmd/rulesctl/cmd/main.go
+++ b/go/cmd/rulesctl/cmd/main.go
@@ -4,6 +4,7 @@ import (
"github.com/spf13/cobra"
_flag "vitess.io/vitess/go/internal/flag"
+ "vitess.io/vitess/go/vt/logutil"
)
var configFile string
@@ -14,6 +15,7 @@ func Main() *cobra.Command {
Args: cobra.NoArgs,
PreRun: func(cmd *cobra.Command, args []string) {
_flag.TrickGlog()
+ logutil.PurgeLogs()
},
Run: func(cmd *cobra.Command, _ []string) { cmd.Help() },
}
diff --git a/go/cmd/vtadmin/main.go b/go/cmd/vtadmin/main.go
index 2720f701379..210e2edb918 100644
--- a/go/cmd/vtadmin/main.go
+++ b/go/cmd/vtadmin/main.go
@@ -26,6 +26,7 @@ import (
"vitess.io/vitess/go/trace"
"vitess.io/vitess/go/vt/log"
+ "vitess.io/vitess/go/vt/logutil"
"vitess.io/vitess/go/vt/servenv"
"vitess.io/vitess/go/vt/vtadmin"
"vitess.io/vitess/go/vt/vtadmin/cache"
@@ -58,6 +59,7 @@ var (
Use: "vtadmin",
PreRun: func(cmd *cobra.Command, args []string) {
_flag.TrickGlog()
+ logutil.PurgeLogs()
if opts.EnableTracing || httpOpts.EnableTracing {
startTracing(cmd)
@@ -168,7 +170,7 @@ func main() {
rootCmd.Flags().BoolVar(&httpOpts.EnableTracing, "http-tracing", false, "whether to enable tracing on the HTTP server")
// gRPC server flags
- rootCmd.Flags().BoolVar(&opts.AllowReflection, "grpc-allow-reflection", false, "whether to register the gRPC server for reflection; this is required to use tools like `grpc_cli`")
+ rootCmd.Flags().BoolVar(&opts.AllowReflection, "grpc-allow-reflection", false, "whether to register the gRPC server for reflection; this is required to use tools like grpc_cli")
rootCmd.Flags().BoolVar(&opts.EnableChannelz, "grpc-enable-channelz", false, "whether to enable the channelz service on the gRPC server")
// HTTP server flags
diff --git a/go/cmd/vtbackup/vtbackup.go b/go/cmd/vtbackup/vtbackup.go
index 3c595f6ae67..eba97493170 100644
--- a/go/cmd/vtbackup/vtbackup.go
+++ b/go/cmd/vtbackup/vtbackup.go
@@ -74,7 +74,6 @@ import (
"vitess.io/vitess/go/cmd"
"vitess.io/vitess/go/exit"
"vitess.io/vitess/go/mysql"
- "vitess.io/vitess/go/sqlescape"
"vitess.io/vitess/go/vt/dbconfigs"
"vitess.io/vitess/go/vt/log"
"vitess.io/vitess/go/vt/logutil"
@@ -111,6 +110,7 @@ var (
initKeyspace string
initShard string
concurrency = 4
+ incrementalFromPos string
// mysqlctld-like flags
mysqlPort = 3306
mysqlSocket string
@@ -118,6 +118,7 @@ var (
initDBSQLFile string
detachedMode bool
keepAliveTimeout = 0 * time.Second
+ disableRedoLog = false
)
func registerFlags(fs *pflag.FlagSet) {
@@ -132,6 +133,7 @@ func registerFlags(fs *pflag.FlagSet) {
fs.StringVar(&initKeyspace, "init_keyspace", initKeyspace, "(init parameter) keyspace to use for this tablet")
fs.StringVar(&initShard, "init_shard", initShard, "(init parameter) shard to use for this tablet")
fs.IntVar(&concurrency, "concurrency", concurrency, "(init restore parameter) how many concurrent files to restore at once")
+ fs.StringVar(&incrementalFromPos, "incremental_from_pos", incrementalFromPos, "Position of previous backup. Default: empty. If given, then this backup becomes an incremental backup from given position. If value is 'auto', backup taken from last successful backup position")
// mysqlctld-like flags
fs.IntVar(&mysqlPort, "mysql_port", mysqlPort, "mysql port")
fs.StringVar(&mysqlSocket, "mysql_socket", mysqlSocket, "path to the mysql socket")
@@ -139,6 +141,7 @@ func registerFlags(fs *pflag.FlagSet) {
fs.StringVar(&initDBSQLFile, "init_db_sql_file", initDBSQLFile, "path to .sql file to run after mysql_install_db")
fs.BoolVar(&detachedMode, "detach", detachedMode, "detached mode - run backups detached from the terminal")
fs.DurationVar(&keepAliveTimeout, "keep-alive-timeout", keepAliveTimeout, "Wait until timeout elapses after a successful backup before shutting down.")
+ fs.BoolVar(&disableRedoLog, "disable-redo-log", disableRedoLog, "Disable InnoDB redo log during replication-from-primary phase of backup.")
acl.RegisterFlags(fs)
}
@@ -155,7 +158,6 @@ func main() {
servenv.ParseFlags("vtbackup")
servenv.Init()
-
ctx, cancel := context.WithCancel(context.Background())
servenv.OnClose(func() {
cancel()
@@ -279,15 +281,16 @@ func takeBackup(ctx context.Context, topoServer *topo.Server, backupStorage back
}
backupParams := mysqlctl.BackupParams{
- Cnf: mycnf,
- Mysqld: mysqld,
- Logger: logutil.NewConsoleLogger(),
- Concurrency: concurrency,
- HookExtraEnv: extraEnv,
- TopoServer: topoServer,
- Keyspace: initKeyspace,
- Shard: initShard,
- TabletAlias: topoproto.TabletAliasString(tabletAlias),
+ Cnf: mycnf,
+ Mysqld: mysqld,
+ Logger: logutil.NewConsoleLogger(),
+ Concurrency: concurrency,
+ IncrementalFromPos: incrementalFromPos,
+ HookExtraEnv: extraEnv,
+ TopoServer: topoServer,
+ Keyspace: initKeyspace,
+ Shard: initShard,
+ TabletAlias: topoproto.TabletAliasString(tabletAlias),
}
// In initial_backup mode, just take a backup of this empty database.
if initialBackup {
@@ -299,16 +302,11 @@ func takeBackup(ctx context.Context, topoServer *topo.Server, backupStorage back
if err := mysqld.ResetReplication(ctx); err != nil {
return fmt.Errorf("can't reset replication: %v", err)
}
- cmds := mysqlctl.CreateReparentJournal()
- cmds = append(cmds, fmt.Sprintf("CREATE DATABASE IF NOT EXISTS %s", sqlescape.EscapeID(dbName)))
- if err := mysqld.ExecuteSuperQueryList(ctx, cmds); err != nil {
- return fmt.Errorf("can't initialize database: %v", err)
+ cmd := mysqlctl.GenerateInitialBinlogEntry()
+ if err := mysqld.ExecuteSuperQueryList(ctx, []string{cmd}); err != nil {
+ return err
}
- // Execute Alter commands on reparent_journal and ignore errors
- cmds = mysqlctl.AlterReparentJournal()
- _ = mysqld.ExecuteSuperQueryList(ctx, cmds)
-
backupParams.BackupTime = time.Now()
// Now we're ready to take the backup.
if err := mysqlctl.Backup(ctx, backupParams); err != nil {
@@ -326,7 +324,6 @@ func takeBackup(ctx context.Context, topoServer *topo.Server, backupStorage back
Logger: logutil.NewConsoleLogger(),
Concurrency: concurrency,
HookExtraEnv: extraEnv,
- LocalMetadata: map[string]string{},
DeleteBeforeRestore: true,
DbName: dbName,
Keyspace: initKeyspace,
@@ -349,6 +346,16 @@ func takeBackup(ctx context.Context, topoServer *topo.Server, backupStorage back
return fmt.Errorf("can't restore from backup: %v", err)
}
+ // Disable redo logging (if we can) before we start replication.
+ disabledRedoLog := false
+ if disableRedoLog {
+ if err := mysqld.DisableRedoLog(ctx); err != nil {
+ log.Warningf("Error disabling redo logging: %v", err)
+ } else {
+ disabledRedoLog = true
+ }
+ }
+
// We have restored a backup. Now start replication.
if err := resetReplication(ctx, restorePos, mysqld); err != nil {
return fmt.Errorf("error resetting replication: %v", err)
@@ -431,6 +438,13 @@ func takeBackup(ctx context.Context, topoServer *topo.Server, backupStorage back
return fmt.Errorf("not taking backup: replication did not make any progress from restore point: %v", restorePos)
}
+ // Re-enable redo logging.
+ if disabledRedoLog {
+ if err := mysqld.EnableRedoLog(ctx); err != nil {
+ return fmt.Errorf("failed to re-enable redo log: %v", err)
+ }
+ }
+
if restartBeforeBackup {
log.Info("Proceeding with clean MySQL shutdown and startup to flush all buffers.")
// Prep for full/clean shutdown (not typically the default)
diff --git a/go/cmd/vtclient/vtclient.go b/go/cmd/vtclient/vtclient.go
index 4295766723a..adc060d7737 100644
--- a/go/cmd/vtclient/vtclient.go
+++ b/go/cmd/vtclient/vtclient.go
@@ -184,6 +184,8 @@ func run() (*results, error) {
_flag.Parse(fs)
args := _flag.Args()
+ logutil.PurgeLogs()
+
if len(args) == 0 {
pflag.Usage()
return nil, errors.New("no arguments provided. See usage above")
diff --git a/go/cmd/vtcombo/main.go b/go/cmd/vtcombo/main.go
index acbea8ff490..52cf2f0c8d2 100644
--- a/go/cmd/vtcombo/main.go
+++ b/go/cmd/vtcombo/main.go
@@ -169,7 +169,6 @@ func main() {
// vtctld UI requires the cell flag
flags.Set("cell", tpb.Cells[0])
- flags.Set("enable_realtime_stats", "true")
if flags.Lookup("log_dir") == nil {
flags.Set("log_dir", "$VTDATAROOT/tmp")
}
diff --git a/go/cmd/vtctl/vtctl.go b/go/cmd/vtctl/vtctl.go
index 45f63026982..175e49c6831 100644
--- a/go/cmd/vtctl/vtctl.go
+++ b/go/cmd/vtctl/vtctl.go
@@ -41,7 +41,6 @@ import (
"vitess.io/vitess/go/vt/vtctl/grpcvtctldserver"
"vitess.io/vitess/go/vt/vtctl/localvtctldclient"
"vitess.io/vitess/go/vt/vttablet/tmclient"
- "vitess.io/vitess/go/vt/workflow"
"vitess.io/vitess/go/vt/wrangler"
)
@@ -133,8 +132,6 @@ func main() {
ts := topo.Open()
defer ts.Close()
- vtctl.WorkflowManager = workflow.NewManager(ts)
-
ctx, cancel := context.WithTimeout(context.Background(), waitTime)
installSignalHandlers(cancel)
@@ -177,7 +174,7 @@ func main() {
args = args[1:]
fallthrough
default:
- log.Warningf("WARNING: vtctl should only be used for VDiff workflows. Consider using vtctldclient for all other commands.")
+ log.Warningf("WARNING: vtctl should only be used for VDiff v1 workflows. Please use VDiff v2 and consider using vtctldclient for all other commands.")
wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient())
diff --git a/go/cmd/vtctldclient/command/backups.go b/go/cmd/vtctldclient/command/backups.go
index 53aac5b51bb..c427a88f1df 100644
--- a/go/cmd/vtctldclient/command/backups.go
+++ b/go/cmd/vtctldclient/command/backups.go
@@ -129,8 +129,8 @@ func commandBackupShard(cmd *cobra.Command, args []string) error {
stream, err := client.BackupShard(commandCtx, &vtctldatapb.BackupShardRequest{
Keyspace: keyspace,
Shard: shard,
- AllowPrimary: backupOptions.AllowPrimary,
- Concurrency: backupOptions.Concurrency,
+ AllowPrimary: backupShardOptions.AllowPrimary,
+ Concurrency: backupShardOptions.Concurrency,
})
if err != nil {
return err
diff --git a/go/cmd/vtctldclient/command/keyspaces.go b/go/cmd/vtctldclient/command/keyspaces.go
index 4c887d42f23..d952168f909 100644
--- a/go/cmd/vtctldclient/command/keyspaces.go
+++ b/go/cmd/vtctldclient/command/keyspaces.go
@@ -266,12 +266,16 @@ func commandGetKeyspace(cmd *cobra.Command, args []string) error {
resp, err := client.GetKeyspace(commandCtx, &vtctldatapb.GetKeyspaceRequest{
Keyspace: ks,
})
+ if err != nil {
+ return err
+ }
+ data, err := cli.MarshalJSON(resp.Keyspace)
if err != nil {
return err
}
- fmt.Printf("%+v\n", resp.Keyspace)
+ fmt.Printf("%s\n", data)
return nil
}
diff --git a/go/cmd/vtctldclient/command/reparents.go b/go/cmd/vtctldclient/command/reparents.go
index 985cd227607..6483d699457 100644
--- a/go/cmd/vtctldclient/command/reparents.go
+++ b/go/cmd/vtctldclient/command/reparents.go
@@ -53,6 +53,7 @@ WARNING: this can cause data loss on an already-replicating shard. PlannedRepare
EmergencyReparentShard should be used instead.
`,
DisableFlagsInUseLine: true,
+ Deprecated: "Please use PlannedReparentShard instead",
Args: cobra.ExactArgs(2),
RunE: commandInitShardPrimary,
}
@@ -79,7 +80,7 @@ EmergencyReparentShard should be used instead.
Short: "Updates the topology record for the tablet's shard to acknowledge that an external tool made this tablet the primary.",
Long: `Updates the topology record for the tablet's shard to acknowledge that an external tool made this tablet the primary.
-See the Reparenting guide for more information: https://vitess.io/docs/user-guides/reparenting/#external-reparenting.
+See the Reparenting guide for more information: https://vitess.io/docs/user-guides/configuration-advanced/reparenting/#external-reparenting.
`,
DisableFlagsInUseLine: true,
Args: cobra.ExactArgs(1),
diff --git a/go/cmd/vtctldclient/command/root.go b/go/cmd/vtctldclient/command/root.go
index 0d53c97c72d..9e59276993c 100644
--- a/go/cmd/vtctldclient/command/root.go
+++ b/go/cmd/vtctldclient/command/root.go
@@ -19,12 +19,15 @@ package command
import (
"context"
"errors"
+ "fmt"
"io"
+ "strconv"
"time"
"github.com/spf13/cobra"
"vitess.io/vitess/go/trace"
+ "vitess.io/vitess/go/vt/logutil"
"vitess.io/vitess/go/vt/servenv"
"vitess.io/vitess/go/vt/vtctl/vtctldclient"
)
@@ -48,15 +51,9 @@ var (
// We use PersistentPreRun to set up the tracer, grpc client, and
// command context for every command.
PersistentPreRunE: func(cmd *cobra.Command, args []string) (err error) {
+ logutil.PurgeLogs()
traceCloser = trace.StartTracing("vtctldclient")
- if VtctldClientProtocol != "local" {
- if err := ensureServerArg(); err != nil {
- return err
- }
- }
-
- client, err = vtctldclient.New(VtctldClientProtocol, server)
-
+ client, err = getClientForCommand(cmd)
ctx := cmd.Context()
if ctx == nil {
ctx = context.Background()
@@ -66,9 +63,11 @@ var (
},
// Similarly, PersistentPostRun cleans up the resources spawned by
// PersistentPreRun.
- PersistentPostRunE: func(cmd *cobra.Command, args []string) error {
+ PersistentPostRunE: func(cmd *cobra.Command, args []string) (err error) {
commandCancel()
- err := client.Close()
+ if client != nil {
+ err = client.Close()
+ }
trace.LogErrorsWhenClosing(traceCloser)
return err
},
@@ -82,18 +81,52 @@ var (
// propagated).
SilenceErrors: true,
Version: servenv.AppVersion.String(),
+ // If we've reached this function, it means that:
+ //
+ // (1) The user specified some positional arguments, which, for the way
+ // we've structured things can only be a subcommand name, **and**
+ //
+ // (2) Cobra was unable to find a subcommand with that name for which to
+ // call a Run or RunE function.
+ //
+ // From this we conclude that the user was trying to either run a
+ // command that doesn't exist (e.g. "vtctldclient delete-my-data") or
+ // has misspelled a legitimate command (e.g. "vtctldclient StapReplication").
+ // If we think this has happened, return an error, which will get
+ // displayed to the user in main.go along with the usage.
+ RunE: func(cmd *cobra.Command, args []string) error {
+ if cmd.Flags().NArg() > 0 {
+ return fmt.Errorf("unknown command: %s", cmd.Flags().Arg(0))
+ }
+
+ return nil
+ },
}
)
var errNoServer = errors.New("please specify --server to specify the vtctld server to connect to")
-// ensureServerArg validates that --server was passed to the CLI.
-func ensureServerArg() error {
- if server == "" {
- return errNoServer
+const skipClientCreationKey = "skip_client_creation"
+
+// getClientForCommand returns a vtctldclient.VtctldClient for a given command.
+// It validates that --server was passed to the CLI for commands that need it.
+func getClientForCommand(cmd *cobra.Command) (vtctldclient.VtctldClient, error) {
+ if skipStr, ok := cmd.Annotations[skipClientCreationKey]; ok {
+ skipClientCreation, err := strconv.ParseBool(skipStr)
+ if err != nil {
+ skipClientCreation = false
+ }
+
+ if skipClientCreation {
+ return nil, nil
+ }
+ }
+
+ if VtctldClientProtocol != "local" && server == "" {
+ return nil, errNoServer
}
- return nil
+ return vtctldclient.New(VtctldClientProtocol, server)
}
func init() {
diff --git a/go/cmd/vtctldclient/command/root_test.go b/go/cmd/vtctldclient/command/root_test.go
new file mode 100644
index 00000000000..155fac78705
--- /dev/null
+++ b/go/cmd/vtctldclient/command/root_test.go
@@ -0,0 +1,54 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package command_test
+
+import (
+ "os"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "vitess.io/vitess/go/cmd/vtctldclient/command"
+ "vitess.io/vitess/go/vt/vtctl/localvtctldclient"
+
+ vtctlservicepb "vitess.io/vitess/go/vt/proto/vtctlservice"
+)
+
+type emptyLocalServer struct {
+ vtctlservicepb.UnimplementedVtctldServer
+}
+
+func TestRoot(t *testing.T) {
+ t.Run("error on unknown subcommand", func(t *testing.T) {
+ args := append([]string{}, os.Args...)
+ protocol := command.VtctldClientProtocol
+ localvtctldclient.SetServer(&emptyLocalServer{})
+
+ t.Cleanup(func() {
+ os.Args = append([]string{}, args...)
+ command.VtctldClientProtocol = protocol
+ })
+
+ os.Args = []string{"vtctldclient", "this-is-bunk"}
+ command.VtctldClientProtocol = "local"
+
+ err := command.Root.Execute()
+ require.Error(t, err, "root command should error on unknown command")
+ assert.Contains(t, err.Error(), "unknown command")
+ })
+}
diff --git a/go/cmd/vtctldclient/command/schema.go b/go/cmd/vtctldclient/command/schema.go
index 6a9f3981c43..8abe8bd0b94 100644
--- a/go/cmd/vtctldclient/command/schema.go
+++ b/go/cmd/vtctldclient/command/schema.go
@@ -293,7 +293,7 @@ func init() {
ApplySchema.Flags().DurationVar(&applySchemaOptions.WaitReplicasTimeout, "wait-replicas-timeout", wrangler.DefaultWaitReplicasTimeout, "Amount of time to wait for replicas to receive the schema change via replication.")
ApplySchema.Flags().BoolVar(&applySchemaOptions.SkipPreflight, "skip-preflight", false, "Skip pre-apply schema checks, and directly forward schema change query to shards.")
ApplySchema.Flags().StringVar(&applySchemaOptions.CallerID, "caller-id", "", "Effective caller ID used for the operation and should map to an ACL name which grants this identity the necessary permissions to perform the operation (this is only necessary when strict table ACLs are used).")
- ApplySchema.Flags().StringSliceVar(&applySchemaOptions.SQL, "sql", nil, "Semicolon-delimited, repeatable SQL commands to apply. Exactly one of --sql|--sql-file is required.")
+ ApplySchema.Flags().StringArrayVar(&applySchemaOptions.SQL, "sql", nil, "Semicolon-delimited, repeatable SQL commands to apply. Exactly one of --sql|--sql-file is required.")
ApplySchema.Flags().StringVar(&applySchemaOptions.SQLFile, "sql-file", "", "Path to a file containing semicolon-delimited SQL commands to apply. Exactly one of --sql|--sql-file is required.")
Root.AddCommand(ApplySchema)
diff --git a/go/cmd/vtctldclient/command/shard_routing_rules.go b/go/cmd/vtctldclient/command/shard_routing_rules.go
index c1d6d295f42..10ce7e81747 100644
--- a/go/cmd/vtctldclient/command/shard_routing_rules.go
+++ b/go/cmd/vtctldclient/command/shard_routing_rules.go
@@ -35,15 +35,20 @@ var (
// ApplyShardRoutingRules makes an ApplyShardRoutingRules gRPC call to a vtctld.
ApplyShardRoutingRules = &cobra.Command{
Use: "ApplyShardRoutingRules {--rules RULES | --rules-file RULES_FILE} [--cells=c1,c2,...] [--skip-rebuild] [--dry-run]",
- Short: "Applies VSchema shard routing rules.",
+ Short: "Applies the provided shard routing rules.",
DisableFlagsInUseLine: true,
Args: cobra.NoArgs,
RunE: commandApplyShardRoutingRules,
}
// GetShardRoutingRules makes a GetShardRoutingRules gRPC call to a vtctld.
GetShardRoutingRules = &cobra.Command{
- Use: "GetShardRoutingRules",
- Short: "Displays VSchema shard routing rules.",
+ Use: "GetShardRoutingRules",
+ Short: "Displays the currently active shard routing rules as a JSON document.",
+ Long: `Displays the currently active shard routing rules as a JSON document.
+
+See the documentation on shard level migrations[1] for more information.
+
+[1]: https://vitess.io/docs/reference/vreplication/shardlevelmigrations/`,
DisableFlagsInUseLine: true,
Args: cobra.NoArgs,
RunE: commandGetShardRoutingRules,
diff --git a/go/cmd/vtctldclient/command/shards.go b/go/cmd/vtctldclient/command/shards.go
index b670b2ce929..a27421a9a87 100644
--- a/go/cmd/vtctldclient/command/shards.go
+++ b/go/cmd/vtctldclient/command/shards.go
@@ -23,6 +23,7 @@ import (
"github.com/spf13/cobra"
"vitess.io/vitess/go/cmd/vtctldclient/cli"
+ "vitess.io/vitess/go/vt/key"
"vitess.io/vitess/go/vt/topo"
"vitess.io/vitess/go/vt/topo/topoproto"
@@ -52,6 +53,38 @@ that shard.`,
Args: cobra.MinimumNArgs(1),
RunE: commandDeleteShards,
}
+ // GenerateShardRanges outputs a set of shard ranges assuming a (mostly)
+ // equal distribution of N shards.
+ GenerateShardRanges = &cobra.Command{
+ Use: "GenerateShardRanges ",
+ Short: "Print a set of shard ranges assuming a keyspace with N shards.",
+ DisableFlagsInUseLine: true,
+ Args: cobra.ExactArgs(1),
+ RunE: func(cmd *cobra.Command, args []string) error {
+ n, err := strconv.ParseInt(cmd.Flags().Arg(0), 10, 64)
+ if err != nil {
+ return err
+ }
+
+ cli.FinishedParsing(cmd)
+
+ shards, err := key.GenerateShardRanges(int(n))
+ if err != nil {
+ return err
+ }
+
+ data, err := cli.MarshalJSON(shards)
+ if err != nil {
+ return err
+ }
+
+ fmt.Printf("%s\n", data)
+ return nil
+ },
+ Annotations: map[string]string{
+ skipClientCreationKey: "true",
+ },
+ }
// GetShard makes a GetShard gRPC request to a vtctld.
GetShard = &cobra.Command{
Use: "GetShard ",
@@ -153,6 +186,15 @@ Use ctrl-C to interrupt the command and see partial results if needed.`,
Args: cobra.ExactArgs(2),
RunE: commandSourceShardDelete,
}
+
+ // ValidateVersionShard makes a ValidateVersionShard gRPC request to a vtctld.
+ ValidateVersionShard = &cobra.Command{
+ Use: "ValidateVersionShard ",
+ Short: "Validates that the version on the primary matches all of the replicas.",
+ DisableFlagsInUseLine: true,
+ Args: cobra.ExactArgs(1),
+ RunE: commandValidateVersionShard,
+ }
)
var createShardOptions = struct {
@@ -546,6 +588,31 @@ func commandSourceShardDelete(cmd *cobra.Command, args []string) error {
return nil
}
+func commandValidateVersionShard(cmd *cobra.Command, args []string) error {
+ keyspace, shard, err := topoproto.ParseKeyspaceShard(cmd.Flags().Arg(0))
+ if err != nil {
+ return err
+ }
+
+ cli.FinishedParsing(cmd)
+
+ resp, err := client.ValidateVersionShard(commandCtx, &vtctldatapb.ValidateVersionShardRequest{
+ Keyspace: keyspace,
+ Shard: shard,
+ })
+ if err != nil {
+ return err
+ }
+
+ data, err := cli.MarshalJSON(resp)
+ if err != nil {
+ return err
+ }
+
+ fmt.Printf("%s\n", data)
+ return nil
+}
+
func init() {
CreateShard.Flags().BoolVarP(&createShardOptions.Force, "force", "f", false, "Overwrite an existing shard record, if one exists.")
CreateShard.Flags().BoolVarP(&createShardOptions.IncludeParent, "include-parent", "p", false, "Creates the parent keyspace record if does not already exist.")
@@ -557,6 +624,7 @@ func init() {
Root.AddCommand(DeleteShards)
Root.AddCommand(GetShard)
+ Root.AddCommand(GenerateShardRanges)
RemoveShardCell.Flags().BoolVarP(&removeShardCellOptions.Force, "force", "f", false, "Proceed even if the cell's topology server cannot be reached. The assumption is that you turned down the entire cell, and just need to update the global topo data.")
RemoveShardCell.Flags().BoolVarP(&removeShardCellOptions.Recursive, "recursive", "r", false, "Also delete all tablets in that cell beloning to the specified shard.")
@@ -574,6 +642,7 @@ func init() {
Root.AddCommand(ShardReplicationFix)
Root.AddCommand(ShardReplicationPositions)
Root.AddCommand(ShardReplicationRemove)
+ Root.AddCommand(ValidateVersionShard)
SourceShardAdd.Flags().StringVar(&sourceShardAddOptions.KeyRangeStr, "key-range", "", "Key range to use for the SourceShard.")
SourceShardAdd.Flags().StringSliceVar(&sourceShardAddOptions.Tables, "tables", nil, "Comma-separated lists of tables to replicate (for MoveTables). Each table name is either an exact match, or a regular expression of the form \"/regexp/\".")
diff --git a/go/cmd/vtctldclient/command/throttler.go b/go/cmd/vtctldclient/command/throttler.go
new file mode 100644
index 00000000000..b0dbd663013
--- /dev/null
+++ b/go/cmd/vtctldclient/command/throttler.go
@@ -0,0 +1,61 @@
+/*
+Copyright 2022 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package command
+
+import (
+ "github.com/spf13/cobra"
+
+ "vitess.io/vitess/go/cmd/vtctldclient/cli"
+
+ vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata"
+)
+
+var (
+ // UpdateThrottlerConfig makes a UpdateThrottlerConfig gRPC call to a vtctld.
+ UpdateThrottlerConfig = &cobra.Command{
+ Use: "UpdateThrottlerConfig [--enable|--disable] [--threshold=] [--custom-query=] [--check-as-check-self|--check-as-check-shard] ",
+ Short: "Update the tablet throttler configuration for all tablets in the given keyspace (across all cells)",
+ DisableFlagsInUseLine: true,
+ Args: cobra.ExactArgs(1),
+ RunE: commandUpdateThrottlerConfig,
+ }
+)
+
+var updateThrottlerConfigOptions vtctldatapb.UpdateThrottlerConfigRequest
+
+func commandUpdateThrottlerConfig(cmd *cobra.Command, args []string) error {
+ keyspace := cmd.Flags().Arg(0)
+ cli.FinishedParsing(cmd)
+
+ updateThrottlerConfigOptions.CustomQuerySet = cmd.Flags().Changed("custom-query")
+ updateThrottlerConfigOptions.Keyspace = keyspace
+ _, err := client.UpdateThrottlerConfig(commandCtx, &updateThrottlerConfigOptions)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func init() {
+ UpdateThrottlerConfig.Flags().BoolVar(&updateThrottlerConfigOptions.Enable, "enable", false, "Enable the throttler")
+ UpdateThrottlerConfig.Flags().BoolVar(&updateThrottlerConfigOptions.Disable, "disable", false, "Disable the throttler")
+ UpdateThrottlerConfig.Flags().Float64Var(&updateThrottlerConfigOptions.Threshold, "threshold", 0, "threshold for the either default check (replication lag seconds) or custom check")
+ UpdateThrottlerConfig.Flags().StringVar(&updateThrottlerConfigOptions.CustomQuery, "custom-query", "", "custom throttler check query")
+ UpdateThrottlerConfig.Flags().BoolVar(&updateThrottlerConfigOptions.CheckAsCheckSelf, "check-as-check-self", false, "/throttler/check requests behave as is /throttler/check-self was called")
+ UpdateThrottlerConfig.Flags().BoolVar(&updateThrottlerConfigOptions.CheckAsCheckShard, "check-as-check-shard", false, "use standard behavior for /throttler/check requests")
+ Root.AddCommand(UpdateThrottlerConfig)
+}
diff --git a/go/cmd/vtctldclient/command/topology.go b/go/cmd/vtctldclient/command/topology.go
new file mode 100644
index 00000000000..03b1ac73a4c
--- /dev/null
+++ b/go/cmd/vtctldclient/command/topology.go
@@ -0,0 +1,64 @@
+/*
+Copyright 2022 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package command
+
+import (
+ "fmt"
+
+ "github.com/spf13/cobra"
+
+ "vitess.io/vitess/go/cmd/vtctldclient/cli"
+
+ vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata"
+)
+
+var (
+ // GetTopologyPath makes a GetTopologyPath gRPC call to a vtctld.
+ GetTopologyPath = &cobra.Command{
+ Use: "GetTopologyPath ",
+ Short: "Gets the value associated with the particular path (key) in the topology server.",
+ DisableFlagsInUseLine: true,
+ Args: cobra.ExactArgs(1),
+ RunE: commandGetTopologyPath,
+ }
+)
+
+func commandGetTopologyPath(cmd *cobra.Command, args []string) error {
+ path := cmd.Flags().Arg(0)
+
+ cli.FinishedParsing(cmd)
+
+ resp, err := client.GetTopologyPath(commandCtx, &vtctldatapb.GetTopologyPathRequest{
+ Path: path,
+ })
+ if err != nil {
+ return err
+ }
+
+ data, err := cli.MarshalJSON(resp.Cell)
+ if err != nil {
+ return err
+ }
+
+ fmt.Printf("%s\n", data)
+
+ return nil
+}
+
+func init() {
+ Root.AddCommand(GetTopologyPath)
+}
diff --git a/go/cmd/vtexplain/vtexplain.go b/go/cmd/vtexplain/vtexplain.go
index 616bc211672..54f645df2ab 100644
--- a/go/cmd/vtexplain/vtexplain.go
+++ b/go/cmd/vtexplain/vtexplain.go
@@ -64,7 +64,7 @@ func registerFlags(fs *pflag.FlagSet) {
fs.StringVar(&replicationMode, "replication-mode", replicationMode, "The replication mode to simulate -- must be set to either ROW or STATEMENT")
fs.BoolVar(&normalize, "normalize", normalize, "Whether to enable vtgate normalization")
fs.StringVar(&dbName, "dbname", dbName, "Optional database target to override normal routing")
- fs.StringVar(&plannerVersionStr, "planner-version", plannerVersionStr, "Sets the query planner version to use when generating the explain output. Valid values are V3 and Gen4")
+ fs.StringVar(&plannerVersionStr, "planner-version", plannerVersionStr, "Sets the query planner version to use when generating the explain output. Valid values are V3 and Gen4. An empty value will use VTGate's default planner")
fs.IntVar(&numShards, "shards", numShards, "Number of shards per keyspace. Passing --ks-shard-map/--ks-shard-map-file causes this flag to be ignored.")
fs.StringVar(&executionMode, "execution-mode", executionMode, "The execution mode to simulate -- must be set to multi, legacy-autocommit, or twopc")
fs.StringVar(&outputMode, "output-mode", outputMode, "Output in human-friendly text or json")
@@ -114,8 +114,8 @@ func main() {
func parseAndRun() error {
plannerVersion, _ := plancontext.PlannerNameToVersion(plannerVersionStr)
- if plannerVersion != querypb.ExecuteOptions_V3 && plannerVersion != querypb.ExecuteOptions_Gen4 {
- return fmt.Errorf("invalid value specified for planner-version of '%s' -- valid values are V3 and Gen4", plannerVersionStr)
+ if plannerVersionStr != "" && plannerVersion != querypb.ExecuteOptions_V3 && plannerVersion != querypb.ExecuteOptions_Gen4 {
+ return fmt.Errorf("invalid value specified for planner-version of '%s' -- valid values are V3 and Gen4 or an empty value to use the default planner", plannerVersionStr)
}
sql, err := getFileParam(sqlFlag, sqlFileFlag, "sql", true)
diff --git a/go/cmd/vtgate/vtgate.go b/go/cmd/vtgate/vtgate.go
index 2f426891bf9..d043ecf4f95 100644
--- a/go/cmd/vtgate/vtgate.go
+++ b/go/cmd/vtgate/vtgate.go
@@ -28,6 +28,7 @@ import (
"vitess.io/vitess/go/exit"
"vitess.io/vitess/go/vt/discovery"
"vitess.io/vitess/go/vt/log"
+ topodatapb "vitess.io/vitess/go/vt/proto/topodata"
"vitess.io/vitess/go/vt/proto/vtrpc"
"vitess.io/vitess/go/vt/servenv"
"vitess.io/vitess/go/vt/srvtopo"
@@ -36,18 +37,17 @@ import (
"vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/vt/vtgate"
"vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext"
-
- topodatapb "vitess.io/vitess/go/vt/proto/topodata"
)
var (
- cell = "test_nj"
- tabletTypesToWait, plannerName string
+ cell = ""
+ tabletTypesToWait []topodatapb.TabletType
+ plannerName string
)
func registerFlags(fs *pflag.FlagSet) {
fs.StringVar(&cell, "cell", cell, "cell to use")
- fs.StringVar(&tabletTypesToWait, "tablet_types_to_wait", tabletTypesToWait, "wait till connected for specified tablet types during Gateway initialization")
+ fs.Var((*topoproto.TabletTypeListFlag)(&tabletTypesToWait), "tablet_types_to_wait", "Wait till connected for specified tablet types during Gateway initialization. Should be provided as a comma-separated set of tablet types.")
fs.StringVar(&plannerName, "planner-version", plannerName, "Sets the default planner to use when the session has not changed it. Valid values are: V3, Gen4, Gen4Greedy and Gen4Fallback. Gen4Fallback tries the gen4 planner and falls back to the V3 planner if the gen4 fails.")
acl.RegisterFlags(fs)
@@ -134,12 +134,7 @@ func main() {
tabletTypes := make([]topodatapb.TabletType, 0, 1)
if len(tabletTypesToWait) != 0 {
- for _, ttStr := range strings.Split(tabletTypesToWait, ",") {
- tt, err := topoproto.ParseTabletType(ttStr)
- if err != nil {
- log.Errorf("unknown tablet type: %v", ttStr)
- continue
- }
+ for _, tt := range tabletTypesToWait {
if topoproto.IsServingType(tt) {
tabletTypes = append(tabletTypes, tt)
}
diff --git a/go/cmd/vtorc/main.go b/go/cmd/vtorc/main.go
index f5418819a05..f3591505c45 100644
--- a/go/cmd/vtorc/main.go
+++ b/go/cmd/vtorc/main.go
@@ -17,18 +17,14 @@
package main
import (
- "os"
- "reflect"
"strings"
_ "github.com/go-sql-driver/mysql"
- _ "github.com/mattn/go-sqlite3"
"github.com/spf13/pflag"
+ _ "modernc.org/sqlite"
"vitess.io/vitess/go/acl"
- "vitess.io/vitess/go/vt/grpccommon"
"vitess.io/vitess/go/vt/log"
- "vitess.io/vitess/go/vt/logutil"
"vitess.io/vitess/go/vt/servenv"
"vitess.io/vitess/go/vt/vtorc/config"
"vitess.io/vitess/go/vt/vtorc/inst"
@@ -95,42 +91,24 @@ func transformArgsForPflag(fs *pflag.FlagSet, args []string) (result []string) {
// main is the application's entry point. It will spawn an HTTP interface.
func main() {
- // TODO(ajm188): after v15, remove this pflag hack and use servenv.ParseFlags
- // directly.
- fs := pflag.NewFlagSet("vtorc", pflag.ExitOnError)
- grpccommon.RegisterFlags(fs)
- log.RegisterFlags(fs)
- logutil.RegisterFlags(fs)
- logic.RegisterFlags(fs)
- server.RegisterFlags(fs)
- config.RegisterFlags(fs)
servenv.RegisterDefaultFlags()
servenv.RegisterFlags()
- acl.RegisterFlags(fs)
- servenv.OnParseFor("vtorc", func(flags *pflag.FlagSet) { flags.AddFlagSet(fs) })
-
- args := append([]string{}, os.Args...)
- os.Args = os.Args[0:1]
-
- configFile := fs.String("config", "", "config file name")
-
- os.Args = append(os.Args, transformArgsForPflag(fs, args[1:])...)
- if !reflect.DeepEqual(args, os.Args) {
- // warn the user so they can adjust their CLI scripts
- warning := `CLI args passed do not conform to pflag parsing behavior
-The arguments have been transformed for compatibility as follows:
- %v => %v
-Please update your scripts before the next version, when this will begin to break.
-`
- log.Warningf(warning, args, os.Args)
- }
+ var configFile string
+ servenv.OnParseFor("vtorc", func(fs *pflag.FlagSet) {
+ logic.RegisterFlags(fs)
+ server.RegisterFlags(fs)
+ config.RegisterFlags(fs)
+ acl.RegisterFlags(fs)
+
+ fs.StringVar(&configFile, "config", "", "config file name")
+ })
servenv.ParseFlags("vtorc")
config.UpdateConfigValuesFromFlags()
log.Info("starting vtorc")
- if len(*configFile) > 0 {
- config.ForceRead(*configFile)
+ if len(configFile) > 0 {
+ config.ForceRead(configFile)
} else {
config.Read("/etc/vtorc.conf.json", "conf/vtorc.conf.json", "vtorc.conf.json")
}
diff --git a/go/cmd/vtorc/status.go b/go/cmd/vtorc/status.go
index bdb54963051..a4d8a59d3fc 100644
--- a/go/cmd/vtorc/status.go
+++ b/go/cmd/vtorc/status.go
@@ -24,7 +24,7 @@ import (
// addStatusParts adds UI parts to the /debug/status page of VTOrc
func addStatusParts() {
servenv.AddStatusPart("Recent Recoveries", logic.TopologyRecoveriesTemplate, func() any {
- recoveries, _ := logic.ReadRecentRecoveries("", false, 0)
+ recoveries, _ := logic.ReadRecentRecoveries(false, 0)
return recoveries
})
}
diff --git a/go/cmd/vttablet/vttablet.go b/go/cmd/vttablet/vttablet.go
index f9c43bd2a2a..c7ee81511d5 100644
--- a/go/cmd/vttablet/vttablet.go
+++ b/go/cmd/vttablet/vttablet.go
@@ -123,7 +123,6 @@ func main() {
UpdateStream: binlog.NewUpdateStream(ts, tablet.Keyspace, tabletAlias.Cell, qsc.SchemaEngine()),
VREngine: vreplication.NewEngine(config, ts, tabletAlias.Cell, mysqld, qsc.LagThrottler()),
VDiffEngine: vdiff.NewEngine(config, ts, tablet),
- MetadataManager: &mysqlctl.MetadataManager{},
}
if err := tm.Start(tablet, config.Healthcheck.IntervalSeconds.Get()); err != nil {
log.Exitf("failed to parse --tablet-path or initialize DB credentials: %v", err)
diff --git a/go/cmd/vttestserver/main.go b/go/cmd/vttestserver/main.go
index 6eedf82cdcf..a91005f841c 100644
--- a/go/cmd/vttestserver/main.go
+++ b/go/cmd/vttestserver/main.go
@@ -26,6 +26,7 @@ import (
"strings"
"sync"
"syscall"
+ "time"
"github.com/spf13/pflag"
"google.golang.org/protobuf/encoding/prototext"
@@ -152,8 +153,6 @@ func registerFlags(fs *pflag.FlagSet) {
fs.StringVar(&config.TabletHostName, "tablet_hostname", "localhost", "The hostname to use for the tablet otherwise it will be derived from OS' hostname")
- fs.BoolVar(&config.InitWorkflowManager, "workflow_manager_init", false, "Enable workflow manager")
-
fs.StringVar(&config.VSchemaDDLAuthorizedUsers, "vschema_ddl_authorized_users", "", "Comma separated list of users authorized to execute vschema ddl operations via vtgate")
fs.StringVar(&config.ForeignKeyMode, "foreign_key_mode", "allow", "This is to provide how to handle foreign key constraint in create/alter table. Valid values are: allow, disallow")
@@ -165,6 +164,7 @@ func registerFlags(fs *pflag.FlagSet) {
fs.StringVar(&config.ExternalTopoGlobalServerAddress, "external_topo_global_server_address", "", "the address of the global topology server for vtcombo process")
fs.StringVar(&config.ExternalTopoGlobalRoot, "external_topo_global_root", "", "the path of the global topology data in the global topology server for vtcombo process")
+ fs.DurationVar(&config.VtgateTabletRefreshInterval, "tablet_refresh_interval", 10*time.Second, "Interval at which vtgate refreshes tablet information from topology server.")
acl.RegisterFlags(fs)
}
diff --git a/go/cmd/zk/zkcmd.go b/go/cmd/zk/zkcmd.go
index 28deb05d527..5cc736ea959 100644
--- a/go/cmd/zk/zkcmd.go
+++ b/go/cmd/zk/zkcmd.go
@@ -40,8 +40,8 @@ import (
"vitess.io/vitess/go/exit"
"vitess.io/vitess/go/vt/log"
"vitess.io/vitess/go/vt/logutil"
+ "vitess.io/vitess/go/vt/topo"
"vitess.io/vitess/go/vt/topo/zk2topo"
- "vitess.io/vitess/go/vt/vtctl"
)
var doc = `
@@ -147,6 +147,7 @@ func main() {
}
pflag.Parse()
+ logutil.PurgeLogs()
if help || pflag.Arg(0) == "help" {
pflag.Usage()
@@ -588,7 +589,7 @@ func cmdCat(ctx context.Context, subFlags *pflag.FlagSet, args []string) error {
}
decoded := ""
if decodeProto {
- decoded, err = vtctl.DecodeContent(zkPath, data, false)
+ decoded, err = topo.DecodeContent(zkPath, data, false)
if err != nil {
log.Warningf("cat: cannot proto decode %v: %v", zkPath, err)
decoded = string(data)
diff --git a/go/event/syslogger/fake_logger.go b/go/event/syslogger/fake_logger.go
new file mode 100644
index 00000000000..a3eeaf307c1
--- /dev/null
+++ b/go/event/syslogger/fake_logger.go
@@ -0,0 +1,85 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreedto in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package syslogger
+
+import (
+ "fmt"
+
+ "vitess.io/vitess/go/vt/log"
+)
+
+type loggerMsg struct {
+ msg string
+ level string
+}
+type TestLogger struct {
+ logs []loggerMsg
+ savedInfof func(format string, args ...any)
+ savedWarningf func(format string, args ...any)
+ savedErrorf func(format string, args ...any)
+}
+
+func NewTestLogger() *TestLogger {
+ tl := &TestLogger{
+ savedInfof: log.Infof,
+ savedWarningf: log.Warningf,
+ savedErrorf: log.Errorf,
+ }
+ log.Infof = tl.recordInfof
+ log.Warningf = tl.recordWarningf
+ log.Errorf = tl.recordErrorf
+ return tl
+}
+
+func (tl *TestLogger) Close() {
+ log.Infof = tl.savedInfof
+ log.Warningf = tl.savedWarningf
+ log.Errorf = tl.savedErrorf
+}
+
+func (tl *TestLogger) recordInfof(format string, args ...any) {
+ msg := fmt.Sprintf(format, args...)
+ tl.logs = append(tl.logs, loggerMsg{msg, "INFO"})
+ tl.savedInfof(msg)
+}
+
+func (tl *TestLogger) recordWarningf(format string, args ...any) {
+ msg := fmt.Sprintf(format, args...)
+ tl.logs = append(tl.logs, loggerMsg{msg, "WARNING"})
+ tl.savedWarningf(msg)
+}
+
+func (tl *TestLogger) recordErrorf(format string, args ...any) {
+ msg := fmt.Sprintf(format, args...)
+ tl.logs = append(tl.logs, loggerMsg{msg, "ERROR"})
+ tl.savedErrorf(msg)
+}
+
+func (tl *TestLogger) getLog() loggerMsg {
+ if len(tl.logs) > 0 {
+ return tl.logs[len(tl.logs)-1]
+ }
+ return loggerMsg{"no logs!", "ERROR"}
+}
+
+func (tl *TestLogger) GetAllLogs() []string {
+ var logs []string
+ for _, l := range tl.logs {
+ logs = append(logs, l.level+":"+l.msg)
+ }
+ return logs
+}
diff --git a/go/event/syslogger/syslogger_test.go b/go/event/syslogger/syslogger_test.go
index 6549e4ca8bb..4847fecac2a 100644
--- a/go/event/syslogger/syslogger_test.go
+++ b/go/event/syslogger/syslogger_test.go
@@ -23,7 +23,6 @@ import (
"testing"
"vitess.io/vitess/go/event"
- "vitess.io/vitess/go/vt/log"
)
type TestEvent struct {
@@ -63,60 +62,6 @@ func (fw *fakeWriter) Info(msg string) error { return fw.write(syslog.LOG_INF
func (fw *fakeWriter) Notice(msg string) error { return fw.write(syslog.LOG_NOTICE, msg) }
func (fw *fakeWriter) Warning(msg string) error { return fw.write(syslog.LOG_WARNING, msg) }
-type loggerMsg struct {
- msg string
- level string
-}
-type testLogger struct {
- logs []loggerMsg
- savedInfof func(format string, args ...any)
- savedWarningf func(format string, args ...any)
- savedErrorf func(format string, args ...any)
-}
-
-func newTestLogger() *testLogger {
- tl := &testLogger{
- savedInfof: log.Infof,
- savedWarningf: log.Warningf,
- savedErrorf: log.Errorf,
- }
- log.Infof = tl.recordInfof
- log.Warningf = tl.recordWarningf
- log.Errorf = tl.recordErrorf
- return tl
-}
-
-func (tl *testLogger) Close() {
- log.Infof = tl.savedInfof
- log.Warningf = tl.savedWarningf
- log.Errorf = tl.savedErrorf
-}
-
-func (tl *testLogger) recordInfof(format string, args ...any) {
- msg := fmt.Sprintf(format, args...)
- tl.logs = append(tl.logs, loggerMsg{msg, "INFO"})
- tl.savedInfof(msg)
-}
-
-func (tl *testLogger) recordWarningf(format string, args ...any) {
- msg := fmt.Sprintf(format, args...)
- tl.logs = append(tl.logs, loggerMsg{msg, "WARNING"})
- tl.savedWarningf(msg)
-}
-
-func (tl *testLogger) recordErrorf(format string, args ...any) {
- msg := fmt.Sprintf(format, args...)
- tl.logs = append(tl.logs, loggerMsg{msg, "ERROR"})
- tl.savedErrorf(msg)
-}
-
-func (tl *testLogger) getLog() loggerMsg {
- if len(tl.logs) > 0 {
- return tl.logs[len(tl.logs)-1]
- }
- return loggerMsg{"no logs!", "ERROR"}
-}
-
// TestSyslog checks that our callback works.
func TestSyslog(t *testing.T) {
writer = &fakeWriter{}
@@ -132,7 +77,7 @@ func TestSyslog(t *testing.T) {
// TestBadWriter verifies we are still triggering (to normal logs) if
// the syslog connection failed
func TestBadWriter(t *testing.T) {
- tl := newTestLogger()
+ tl := NewTestLogger()
defer tl.Close()
writer = nil
diff --git a/go/flags/endtoend/mysqlctl.txt b/go/flags/endtoend/mysqlctl.txt
index cd2758007e2..c380ebfb557 100644
--- a/go/flags/endtoend/mysqlctl.txt
+++ b/go/flags/endtoend/mysqlctl.txt
@@ -11,113 +11,70 @@ The commands are listed below. Use 'mysqlctl -- {-h, --help}' for comm
position
Global flags:
- --alsologtostderr log to standard error as well as files
- --app_idle_timeout duration Idle timeout for app connections (default 1m0s)
- --app_pool_size int Size of the connection pool for app connections (default 40)
- --backup_engine_implementation string Specifies which implementation to use for creating new backups (builtin or xtrabackup). Restores will always be done with whichever engine created a given backup. (default "builtin")
- --backup_storage_block_size int if backup_storage_compress is true, backup_storage_block_size sets the byte size for each block while compressing (default is 250000). (default 250000)
- --backup_storage_compress if set, the backup files will be compressed (default is true). Set to false for instance if a backup_storage_hook is specified and it compresses the data. (default true)
- --backup_storage_hook string if set, we send the contents of the backup files through this hook.
- --backup_storage_number_blocks int if backup_storage_compress is true, backup_storage_number_blocks sets the number of blocks that can be processed, at once, before the writer blocks, during compression (default is 2). It should be equal to the number of CPUs available for compression. (default 2)
- --builtinbackup_mysqld_timeout duration how long to wait for mysqld to shutdown at the start of the backup. (default 10m0s)
- --builtinbackup_progress duration how often to send progress updates when backing up large files. (default 5s)
- --catch-sigpipe catch and ignore SIGPIPE on stdout and stderr if specified
- --compression-engine-name string compressor engine used for compression. (default "pargzip")
- --compression-level int what level to pass to the compressor. (default 1)
- --db-credentials-file string db credentials file; send SIGHUP to reload this file
- --db-credentials-server string db credentials server type ('file' - file implementation; 'vault' - HashiCorp Vault implementation) (default "file")
- --db-credentials-vault-addr string URL to Vault server
- --db-credentials-vault-path string Vault path to credentials JSON blob, e.g.: secret/data/prod/dbcreds
- --db-credentials-vault-role-mountpoint string Vault AppRole mountpoint; can also be passed using VAULT_MOUNTPOINT environment variable (default "approle")
- --db-credentials-vault-role-secretidfile string Path to file containing Vault AppRole secret_id; can also be passed using VAULT_SECRETID environment variable
- --db-credentials-vault-roleid string Vault AppRole id; can also be passed using VAULT_ROLEID environment variable
- --db-credentials-vault-timeout duration Timeout for vault API operations (default 10s)
- --db-credentials-vault-tls-ca string Path to CA PEM for validating Vault server certificate
- --db-credentials-vault-tokenfile string Path to file containing Vault auth token; token can also be passed using VAULT_TOKEN environment variable
- --db-credentials-vault-ttl duration How long to cache DB credentials from the Vault server (default 30m0s)
- --db_charset string Character set used for this tablet. (default "utf8mb4")
- --db_conn_query_info enable parsing and processing of QUERY_OK info fields
- --db_connect_timeout_ms int connection timeout to mysqld in milliseconds (0 for no timeout)
- --db_dba_password string db dba password
- --db_dba_use_ssl Set this flag to false to make the dba connection to not use ssl (default true)
- --db_dba_user string db dba user userKey (default "vt_dba")
- --db_flags uint Flag values as defined by MySQL.
- --db_flavor string Flavor overrid. Valid value is FilePos.
- --db_host string The host name for the tcp connection.
- --db_port int tcp port
- --db_server_name string server name of the DB we are connecting to.
- --db_socket string The unix socket to connect on. If this is specified, host and port will not be used.
- --db_ssl_ca string connection ssl ca
- --db_ssl_ca_path string connection ssl ca path
- --db_ssl_cert string connection ssl certificate
- --db_ssl_key string connection ssl key
- --db_ssl_mode SslMode SSL mode to connect with. One of disabled, preferred, required, verify_ca & verify_identity.
- --db_tls_min_version string Configures the minimal TLS version negotiated when SSL is enabled. Defaults to TLSv1.2. Options: TLSv1.0, TLSv1.1, TLSv1.2, TLSv1.3.
- --dba_idle_timeout duration Idle timeout for dba connections (default 1m0s)
- --dba_pool_size int Size of the connection pool for dba connections (default 20)
- --disable_active_reparents if set, do not allow active reparents. Use this to protect a cluster using external reparents.
- --external-compressor string command with arguments to use when compressing a backup.
- --external-compressor-extension string extension to use when using an external compressor.
- --external-decompressor string command with arguments to use when decompressing a backup.
- --grpc_auth_mode string Which auth plugin implementation to use (eg: static)
- --grpc_auth_mtls_allowed_substrings string List of substrings of at least one of the client certificate names (separated by colon).
- --grpc_auth_static_client_creds string When using grpc_static_auth in the server, this file provides the credentials to use to authenticate with server.
- --grpc_auth_static_password_file string JSON File to read the users/passwords from.
- --grpc_ca string server CA to use for gRPC connections, requires TLS, and enforces client certificate check
- --grpc_cert string server certificate to use for gRPC connections, requires grpc_key, enables TLS
- --grpc_compression string Which protocol to use for compressing gRPC. Default: nothing. Supported: snappy
- --grpc_crl string path to a certificate revocation list in PEM format, client certificates will be further verified against this file during TLS handshake
- --grpc_enable_optional_tls enable optional TLS mode when a server accepts both TLS and plain-text connections on the same port
- --grpc_initial_conn_window_size int gRPC initial connection window size
- --grpc_initial_window_size int gRPC initial window size
- --grpc_keepalive_time duration After a duration of this time, if the client doesn't see any activity, it pings the server to see if the transport is still alive. (default 10s)
- --grpc_keepalive_timeout duration After having pinged for keepalive check, the client waits for a duration of Timeout and if no activity is seen even after that the connection is closed. (default 10s)
- --grpc_key string server private key to use for gRPC connections, requires grpc_cert, enables TLS
- --grpc_max_connection_age duration Maximum age of a client connection before GoAway is sent. (default 2562047h47m16.854775807s)
- --grpc_max_connection_age_grace duration Additional grace period after grpc_max_connection_age, after which connections are forcibly closed. (default 2562047h47m16.854775807s)
- --grpc_port int Port to listen on for gRPC calls. If zero, do not listen.
- --grpc_server_ca string path to server CA in PEM format, which will be combine with server cert, return full certificate chain to clients
- --grpc_server_initial_conn_window_size int gRPC server initial connection window size
- --grpc_server_initial_window_size int gRPC server initial window size
- --grpc_server_keepalive_enforcement_policy_min_time duration gRPC server minimum keepalive time (default 10s)
- --grpc_server_keepalive_enforcement_policy_permit_without_stream gRPC server permit client keepalive pings even when there are no active streams (RPCs)
- -h, --help display usage and exit
- --keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
- --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
- --lameduck-period duration keep running at least this long after SIGTERM before stopping (default 50ms)
- --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
- --log_dir string If non-empty, write log files in this directory
- --log_err_stacks log stack traces for errors
- --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
- --logtostderr log to standard error instead of files
- --mysql_port int MySQL port (default 3306)
- --mysql_server_version string MySQL server version to advertise.
- --mysql_socket string Path to the mysqld socket file
- --mysqlctl_client_protocol string the protocol to use to talk to the mysqlctl server (default "grpc")
- --mysqlctl_mycnf_template string template file to use for generating the my.cnf file during server init
- --mysqlctl_socket string socket file to use for remote mysqlctl actions (empty for local actions)
- --onclose_timeout duration wait no more than this for OnClose handlers before stopping (default 1ns)
- --onterm_timeout duration wait no more than this for OnTermSync handlers before stopping (default 10s)
- --pid_file string If set, the process will write its pid to the named file, and delete it on graceful shutdown.
- --pool_hostname_resolve_interval duration if set force an update to all hostnames and reconnect if changed, defaults to 0 (disabled)
- --port int port for the server
- --pprof strings enable profiling
- --purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
- --replication_connect_retry duration how long to wait in between replica reconnect attempts. Only precise to the second. (default 10s)
- --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only)
- --service_map strings comma separated list of services to enable (or disable if prefixed with '-') Example: grpc-queryservice
- --socket_file string Local unix socket file to listen on
- --stderrthreshold severity logs at or above this threshold go to stderr (default 1)
- --tablet_dir string The directory within the vtdataroot to store vttablet/mysql files. Defaults to being generated by the tablet uid.
- --tablet_uid uint Tablet UID (default 41983)
- -v, --v Level log level for V logs
- --version print binary version
- --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
- --xbstream_restore_flags string flags to pass to xbstream command during restore. These should be space separated and will be added to the end of the command. These need to match the ones used for backup e.g. --compress / --decompress, --encrypt / --decrypt
- --xtrabackup_backup_flags string flags to pass to backup command. These should be space separated and will be added to the end of the command
- --xtrabackup_prepare_flags string flags to pass to prepare command. These should be space separated and will be added to the end of the command
- --xtrabackup_root_path string directory location of the xtrabackup and xbstream executables, e.g., /usr/bin
- --xtrabackup_stream_mode string which mode to use if streaming, valid values are tar and xbstream (default "tar")
- --xtrabackup_stripe_block_size uint Size in bytes of each block that gets sent to a given stripe before rotating to the next stripe (default 102400)
- --xtrabackup_stripes uint If greater than 0, use data striping across this many destination files to parallelize data transfer and decompression
- --xtrabackup_user string User that xtrabackup will use to connect to the database server. This user must have all necessary privileges. For details, please refer to xtrabackup documentation.
+ --alsologtostderr log to standard error as well as files
+ --app_idle_timeout duration Idle timeout for app connections (default 1m0s)
+ --app_pool_size int Size of the connection pool for app connections (default 40)
+ --catch-sigpipe catch and ignore SIGPIPE on stdout and stderr if specified
+ --db-credentials-file string db credentials file; send SIGHUP to reload this file
+ --db-credentials-server string db credentials server type ('file' - file implementation; 'vault' - HashiCorp Vault implementation) (default "file")
+ --db-credentials-vault-addr string URL to Vault server
+ --db-credentials-vault-path string Vault path to credentials JSON blob, e.g.: secret/data/prod/dbcreds
+ --db-credentials-vault-role-mountpoint string Vault AppRole mountpoint; can also be passed using VAULT_MOUNTPOINT environment variable (default "approle")
+ --db-credentials-vault-role-secretidfile string Path to file containing Vault AppRole secret_id; can also be passed using VAULT_SECRETID environment variable
+ --db-credentials-vault-roleid string Vault AppRole id; can also be passed using VAULT_ROLEID environment variable
+ --db-credentials-vault-timeout duration Timeout for vault API operations (default 10s)
+ --db-credentials-vault-tls-ca string Path to CA PEM for validating Vault server certificate
+ --db-credentials-vault-tokenfile string Path to file containing Vault auth token; token can also be passed using VAULT_TOKEN environment variable
+ --db-credentials-vault-ttl duration How long to cache DB credentials from the Vault server (default 30m0s)
+ --db_charset string Character set used for this tablet. (default "utf8mb4")
+ --db_conn_query_info enable parsing and processing of QUERY_OK info fields
+ --db_connect_timeout_ms int connection timeout to mysqld in milliseconds (0 for no timeout)
+ --db_dba_password string db dba password
+ --db_dba_use_ssl Set this flag to false to make the dba connection to not use ssl (default true)
+ --db_dba_user string db dba user userKey (default "vt_dba")
+ --db_flags uint Flag values as defined by MySQL.
+ --db_flavor string Flavor overrid. Valid value is FilePos.
+ --db_host string The host name for the tcp connection.
+ --db_port int tcp port
+ --db_server_name string server name of the DB we are connecting to.
+ --db_socket string The unix socket to connect on. If this is specified, host and port will not be used.
+ --db_ssl_ca string connection ssl ca
+ --db_ssl_ca_path string connection ssl ca path
+ --db_ssl_cert string connection ssl certificate
+ --db_ssl_key string connection ssl key
+ --db_ssl_mode SslMode SSL mode to connect with. One of disabled, preferred, required, verify_ca & verify_identity.
+ --db_tls_min_version string Configures the minimal TLS version negotiated when SSL is enabled. Defaults to TLSv1.2. Options: TLSv1.0, TLSv1.1, TLSv1.2, TLSv1.3.
+ --dba_idle_timeout duration Idle timeout for dba connections (default 1m0s)
+ --dba_pool_size int Size of the connection pool for dba connections (default 20)
+ -h, --help display usage and exit
+ --keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
+ --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
+ --lameduck-period duration keep running at least this long after SIGTERM before stopping (default 50ms)
+ --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
+ --log_dir string If non-empty, write log files in this directory
+ --log_err_stacks log stack traces for errors
+ --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
+ --logtostderr log to standard error instead of files
+ --max-stack-size int configure the maximum stack size in bytes (default 67108864)
+ --mysql_port int MySQL port (default 3306)
+ --mysql_server_version string MySQL server version to advertise. (default "8.0.30-Vitess")
+ --mysql_socket string Path to the mysqld socket file
+ --mysqlctl_client_protocol string the protocol to use to talk to the mysqlctl server (default "grpc")
+ --mysqlctl_mycnf_template string template file to use for generating the my.cnf file during server init
+ --mysqlctl_socket string socket file to use for remote mysqlctl actions (empty for local actions)
+ --onclose_timeout duration wait no more than this for OnClose handlers before stopping (default 10s)
+ --onterm_timeout duration wait no more than this for OnTermSync handlers before stopping (default 10s)
+ --pid_file string If set, the process will write its pid to the named file, and delete it on graceful shutdown.
+ --pool_hostname_resolve_interval duration if set force an update to all hostnames and reconnect if changed, defaults to 0 (disabled)
+ --pprof strings enable profiling
+ --purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
+ --replication_connect_retry duration how long to wait in between replica reconnect attempts. Only precise to the second. (default 10s)
+ --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only)
+ --service_map strings comma separated list of services to enable (or disable if prefixed with '-') Example: grpc-queryservice
+ --socket_file string Local unix socket file to listen on
+ --stderrthreshold severity logs at or above this threshold go to stderr (default 1)
+ --tablet_dir string The directory within the vtdataroot to store vttablet/mysql files. Defaults to being generated by the tablet uid.
+ --tablet_uid uint Tablet UID (default 41983)
+ --v Level log level for V logs
+ -v, --version print binary version
+ --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
diff --git a/go/flags/endtoend/mysqlctld.txt b/go/flags/endtoend/mysqlctld.txt
index 6ac669ed9c9..d5da79e347f 100644
--- a/go/flags/endtoend/mysqlctld.txt
+++ b/go/flags/endtoend/mysqlctld.txt
@@ -2,16 +2,7 @@ Usage of mysqlctld:
--alsologtostderr log to standard error as well as files
--app_idle_timeout duration Idle timeout for app connections (default 1m0s)
--app_pool_size int Size of the connection pool for app connections (default 40)
- --backup_engine_implementation string Specifies which implementation to use for creating new backups (builtin or xtrabackup). Restores will always be done with whichever engine created a given backup. (default "builtin")
- --backup_storage_block_size int if backup_storage_compress is true, backup_storage_block_size sets the byte size for each block while compressing (default is 250000). (default 250000)
- --backup_storage_compress if set, the backup files will be compressed (default is true). Set to false for instance if a backup_storage_hook is specified and it compresses the data. (default true)
- --backup_storage_hook string if set, we send the contents of the backup files through this hook.
- --backup_storage_number_blocks int if backup_storage_compress is true, backup_storage_number_blocks sets the number of blocks that can be processed, at once, before the writer blocks, during compression (default is 2). It should be equal to the number of CPUs available for compression. (default 2)
- --builtinbackup_mysqld_timeout duration how long to wait for mysqld to shutdown at the start of the backup. (default 10m0s)
- --builtinbackup_progress duration how often to send progress updates when backing up large files. (default 5s)
--catch-sigpipe catch and ignore SIGPIPE on stdout and stderr if specified
- --compression-engine-name string compressor engine used for compression. (default "pargzip")
- --compression-level int what level to pass to the compressor. (default 1)
--db-credentials-file string db credentials file; send SIGHUP to reload this file
--db-credentials-server string db credentials server type ('file' - file implementation; 'vault' - HashiCorp Vault implementation) (default "file")
--db-credentials-vault-addr string URL to Vault server
@@ -43,10 +34,6 @@ Usage of mysqlctld:
--db_tls_min_version string Configures the minimal TLS version negotiated when SSL is enabled. Defaults to TLSv1.2. Options: TLSv1.0, TLSv1.1, TLSv1.2, TLSv1.3.
--dba_idle_timeout duration Idle timeout for dba connections (default 1m0s)
--dba_pool_size int Size of the connection pool for dba connections (default 20)
- --disable_active_reparents if set, do not allow active reparents. Use this to protect a cluster using external reparents.
- --external-compressor string command with arguments to use when compressing a backup.
- --external-compressor-extension string extension to use when using an external compressor.
- --external-decompressor string command with arguments to use when decompressing a backup.
--grpc_auth_mode string Which auth plugin implementation to use (eg: static)
--grpc_auth_mtls_allowed_substrings string List of substrings of at least one of the client certificate names (separated by colon).
--grpc_auth_static_client_creds string When using grpc_static_auth in the server, this file provides the credentials to use to authenticate with server.
@@ -79,12 +66,13 @@ Usage of mysqlctld:
--log_err_stacks log stack traces for errors
--log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
--logtostderr log to standard error instead of files
+ --max-stack-size int configure the maximum stack size in bytes (default 67108864)
--mysql_port int MySQL port (default 3306)
- --mysql_server_version string MySQL server version to advertise.
+ --mysql_server_version string MySQL server version to advertise. (default "8.0.30-Vitess")
--mysql_socket string Path to the mysqld socket file
--mysqlctl_mycnf_template string template file to use for generating the my.cnf file during server init
--mysqlctl_socket string socket file to use for remote mysqlctl actions (empty for local actions)
- --onclose_timeout duration wait no more than this for OnClose handlers before stopping (default 1ns)
+ --onclose_timeout duration wait no more than this for OnClose handlers before stopping (default 10s)
--onterm_timeout duration wait no more than this for OnTermSync handlers before stopping (default 10s)
--pid_file string If set, the process will write its pid to the named file, and delete it on graceful shutdown.
--pool_hostname_resolve_interval duration if set force an update to all hostnames and reconnect if changed, defaults to 0 (disabled)
@@ -98,15 +86,7 @@ Usage of mysqlctld:
--stderrthreshold severity logs at or above this threshold go to stderr (default 1)
--tablet_dir string The directory within the vtdataroot to store vttablet/mysql files. Defaults to being generated by the tablet uid.
--tablet_uid uint Tablet UID (default 41983)
- -v, --v Level log level for V logs
- --version print binary version
+ --v Level log level for V logs
+ -v, --version print binary version
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
--wait_time duration How long to wait for mysqld startup or shutdown (default 5m0s)
- --xbstream_restore_flags string flags to pass to xbstream command during restore. These should be space separated and will be added to the end of the command. These need to match the ones used for backup e.g. --compress / --decompress, --encrypt / --decrypt
- --xtrabackup_backup_flags string flags to pass to backup command. These should be space separated and will be added to the end of the command
- --xtrabackup_prepare_flags string flags to pass to prepare command. These should be space separated and will be added to the end of the command
- --xtrabackup_root_path string directory location of the xtrabackup and xbstream executables, e.g., /usr/bin
- --xtrabackup_stream_mode string which mode to use if streaming, valid values are tar and xbstream (default "tar")
- --xtrabackup_stripe_block_size uint Size in bytes of each block that gets sent to a given stripe before rotating to the next stripe (default 102400)
- --xtrabackup_stripes uint If greater than 0, use data striping across this many destination files to parallelize data transfer and decompression
- --xtrabackup_user string User that xtrabackup will use to connect to the database server. This user must have all necessary privileges. For details, please refer to xtrabackup documentation.
diff --git a/go/flags/endtoend/vtaclcheck.txt b/go/flags/endtoend/vtaclcheck.txt
index 4b158925437..6e2c57db109 100644
--- a/go/flags/endtoend/vtaclcheck.txt
+++ b/go/flags/endtoend/vtaclcheck.txt
@@ -14,6 +14,6 @@ Usage of vtaclcheck:
--security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only)
--static-auth-file string The path of the auth_server_static JSON file to check
--stderrthreshold severity logs at or above this threshold go to stderr (default 1)
- -v, --v Level log level for V logs
- --version print binary version
+ --v Level log level for V logs
+ -v, --version print binary version
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
diff --git a/go/flags/endtoend/vtbackup.txt b/go/flags/endtoend/vtbackup.txt
index 98f145904fa..f9602cbc20d 100644
--- a/go/flags/endtoend/vtbackup.txt
+++ b/go/flags/endtoend/vtbackup.txt
@@ -3,16 +3,18 @@ Usage of vtbackup:
--alsologtostderr log to standard error as well as files
--azblob_backup_account_key_file string Path to a file containing the Azure Storage account key; if this flag is unset, the environment variable VT_AZBLOB_ACCOUNT_KEY will be used as the key itself (NOT a file path).
--azblob_backup_account_name string Azure Storage Account name for backups; if this flag is unset, the environment variable VT_AZBLOB_ACCOUNT_NAME will be used.
+ --azblob_backup_buffer_size int The memory buffer size to use in bytes, per file or stripe, when streaming to Azure Blob Service. (default 104857600)
--azblob_backup_container_name string Azure Blob Container Name.
--azblob_backup_parallelism int Azure Blob operation parallelism (requires extra memory when increased). (default 1)
--azblob_backup_storage_root string Root prefix for all backup-related Azure Blobs; this should exclude both initial and trailing '/' (e.g. just 'a/b' not '/a/b/').
--backup_engine_implementation string Specifies which implementation to use for creating new backups (builtin or xtrabackup). Restores will always be done with whichever engine created a given backup. (default "builtin")
--backup_storage_block_size int if backup_storage_compress is true, backup_storage_block_size sets the byte size for each block while compressing (default is 250000). (default 250000)
- --backup_storage_compress if set, the backup files will be compressed (default is true). Set to false for instance if a backup_storage_hook is specified and it compresses the data. (default true)
- --backup_storage_hook string if set, we send the contents of the backup files through this hook.
+ --backup_storage_compress if set, the backup files will be compressed. (default true)
--backup_storage_implementation string Which backup storage implementation to use for creating and restoring backups.
--backup_storage_number_blocks int if backup_storage_compress is true, backup_storage_number_blocks sets the number of blocks that can be processed, at once, before the writer blocks, during compression (default is 2). It should be equal to the number of CPUs available for compression. (default 2)
--ceph_backup_storage_config string Path to JSON config file for ceph backup storage. (default "ceph_backup_config.json")
+ --compression-engine-name string compressor engine used for compression. (default "pargzip")
+ --compression-level int what level to pass to the compressor. (default 1)
--concurrency int (init restore parameter) how many concurrent files to restore at once (default 4)
--consul_auth_static_file string JSON File to read the topos/tokens from.
--db-credentials-file string db credentials file; send SIGHUP to reload this file
@@ -63,6 +65,11 @@ Usage of vtbackup:
--db_ssl_mode SslMode SSL mode to connect with. One of disabled, preferred, required, verify_ca & verify_identity.
--db_tls_min_version string Configures the minimal TLS version negotiated when SSL is enabled. Defaults to TLSv1.2. Options: TLSv1.0, TLSv1.1, TLSv1.2, TLSv1.3.
--detach detached mode - run backups detached from the terminal
+ --disable-redo-log Disable InnoDB redo log during replication-from-primary phase of backup.
+ --emit_stats If set, emit stats to push-based monitoring and stats backends
+ --external-compressor string command with arguments to use when compressing a backup.
+ --external-compressor-extension string extension to use when using an external compressor.
+ --external-decompressor string command with arguments to use when decompressing a backup.
--file_backup_storage_root string Root directory for the file backup storage.
--gcs_backup_storage_bucket string Google Cloud Storage bucket to use for backups.
--gcs_backup_storage_root string Root prefix for all backup-related object names.
@@ -76,6 +83,7 @@ Usage of vtbackup:
--grpc_max_message_size int Maximum allowed RPC message size. Larger messages will be rejected by gRPC with the error 'exceeding the max size'. (default 16777216)
--grpc_prometheus Enable gRPC monitoring with Prometheus.
-h, --help display usage and exit
+ --incremental_from_pos string Position of previous backup. Default: empty. If given, then this backup becomes an incremental backup from given position. If value is 'auto', backup taken from last successful backup position
--init_db_name_override string (init parameter) override the name of the db used by vttablet
--init_db_sql_file string path to .sql file to run after mysql_install_db
--init_keyspace string (init parameter) keyspace to use for this tablet
@@ -84,6 +92,7 @@ Usage of vtbackup:
--keep-alive-timeout duration Wait until timeout elapses after a successful backup before shutting down.
--keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
--keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
+ --lock-timeout duration Maximum time for which a shard/keyspace lock can be acquired for (default 45s)
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
--log_dir string If non-empty, write log files in this directory
--log_err_stacks log stack traces for errors
@@ -111,13 +120,13 @@ Usage of vtbackup:
--mycnf_socket_file string mysql socket file
--mycnf_tmp_dir string mysql tmp directory
--mysql_port int mysql port (default 3306)
- --mysql_server_version string MySQL server version to advertise.
+ --mysql_server_version string MySQL server version to advertise. (default "8.0.30-Vitess")
--mysql_socket string path to the mysql socket
--mysql_timeout duration how long to wait for mysqld startup (default 5m0s)
--port int port for the server
--pprof strings enable profiling
--purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
- --remote_operation_timeout duration time to wait for a remote operation (default 30s)
+ --remote_operation_timeout duration time to wait for a remote operation (default 15s)
--restart_before_backup Perform a mysqld clean/full restart after applying binlogs, but before taking the backup. Only makes sense to work around xtrabackup bugs.
--s3_backup_aws_endpoint string endpoint of the S3 backend (region must be provided).
--s3_backup_aws_region string AWS region to use. (default "us-east-1")
@@ -131,6 +140,11 @@ Usage of vtbackup:
--security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only)
--sql-max-length-errors int truncate queries in error logs to the given length (default unlimited)
--sql-max-length-ui int truncate queries in debug UIs to the given length (default 512) (default 512)
+ --stats_backend string The name of the registered push-based monitoring/stats backend to use
+ --stats_combine_dimensions string List of dimensions to be combined into a single "all" value in exported stats vars
+ --stats_common_tags strings Comma-separated list of common tags for the stats backend. It provides both label and values. Example: label1:value1,label2:value2
+ --stats_drop_variables string Variables to be dropped from the list of exported variables.
+ --stats_emit_period duration Interval between emitting stats to all registered backends (default 1m0s)
--stderrthreshold severity logs at or above this threshold go to stderr (default 1)
--tablet_manager_grpc_ca string the server ca to use to validate servers when connecting
--tablet_manager_grpc_cert string the cert to use to connect
@@ -157,14 +171,14 @@ Usage of vtbackup:
--topo_zk_tls_ca string the server ca to use to validate servers when connecting to the zk topo server
--topo_zk_tls_cert string the cert to use to connect to the zk topo server, requires topo_zk_tls_key, enables TLS
--topo_zk_tls_key string the key to use to connect to the zk topo server, enables TLS
- -v, --v Level log level for V logs
- --version print binary version
+ --v Level log level for V logs
+ -v, --version print binary version
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
- --xbstream_restore_flags string flags to pass to xbstream command during restore. These should be space separated and will be added to the end of the command. These need to match the ones used for backup e.g. --compress / --decompress, --encrypt / --decrypt
- --xtrabackup_backup_flags string flags to pass to backup command. These should be space separated and will be added to the end of the command
- --xtrabackup_prepare_flags string flags to pass to prepare command. These should be space separated and will be added to the end of the command
- --xtrabackup_root_path string directory location of the xtrabackup and xbstream executables, e.g., /usr/bin
- --xtrabackup_stream_mode string which mode to use if streaming, valid values are tar and xbstream (default "tar")
+ --xbstream_restore_flags string Flags to pass to xbstream command during restore. These should be space separated and will be added to the end of the command. These need to match the ones used for backup e.g. --compress / --decompress, --encrypt / --decrypt
+ --xtrabackup_backup_flags string Flags to pass to backup command. These should be space separated and will be added to the end of the command
+ --xtrabackup_prepare_flags string Flags to pass to prepare command. These should be space separated and will be added to the end of the command
+ --xtrabackup_root_path string Directory location of the xtrabackup and xbstream executables, e.g., /usr/bin
+ --xtrabackup_stream_mode string Which mode to use if streaming, valid values are tar and xbstream. Please note that tar is not supported in XtraBackup 8.0 (default "tar")
--xtrabackup_stripe_block_size uint Size in bytes of each block that gets sent to a given stripe before rotating to the next stripe (default 102400)
--xtrabackup_stripes uint If greater than 0, use data striping across this many destination files to parallelize data transfer and decompression
--xtrabackup_user string User that xtrabackup will use to connect to the database server. This user must have all necessary privileges. For details, please refer to xtrabackup documentation.
diff --git a/go/flags/endtoend/vtctlclient.txt b/go/flags/endtoend/vtctlclient.txt
index 8896b7b3f72..207f31905f2 100644
--- a/go/flags/endtoend/vtctlclient.txt
+++ b/go/flags/endtoend/vtctlclient.txt
@@ -30,8 +30,8 @@ Usage of vtctlclient:
--tracing-enable-logging whether to enable logging in the tracing service
--tracing-sampling-rate float sampling rate for the probabilistic jaeger sampler (default 0.1)
--tracing-sampling-type string sampling strategy to use for jaeger. possible values are 'const', 'probabilistic', 'rateLimiting', or 'remote' (default "const")
- -v, --v Level log level for V logs
- --version print binary version
+ --v Level log level for V logs
+ -v, --version print binary version
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
--vtctl_client_protocol string Protocol to use to talk to the vtctl server. (default "grpc")
--vtctld_grpc_ca string the server ca to use to validate servers when connecting
diff --git a/go/flags/endtoend/vtctld.txt b/go/flags/endtoend/vtctld.txt
index 5895327dc5f..a252a7f1ff8 100644
--- a/go/flags/endtoend/vtctld.txt
+++ b/go/flags/endtoend/vtctld.txt
@@ -1,18 +1,15 @@
Usage of vtctld:
- --action_timeout duration time to wait for an action before resorting to force (default 2m0s)
- --allowed_tablet_types []topodatapb.TabletType Specifies the tablet types this vtgate is allowed to route queries to.
+ --action_timeout duration time to wait for an action before resorting to force (default 1m0s)
--alsologtostderr log to standard error as well as files
- --app_idle_timeout duration Idle timeout for app connections (default 1m0s)
- --app_pool_size int Size of the connection pool for app connections (default 40)
--azblob_backup_account_key_file string Path to a file containing the Azure Storage account key; if this flag is unset, the environment variable VT_AZBLOB_ACCOUNT_KEY will be used as the key itself (NOT a file path).
--azblob_backup_account_name string Azure Storage Account name for backups; if this flag is unset, the environment variable VT_AZBLOB_ACCOUNT_NAME will be used.
+ --azblob_backup_buffer_size int The memory buffer size to use in bytes, per file or stripe, when streaming to Azure Blob Service. (default 104857600)
--azblob_backup_container_name string Azure Blob Container Name.
--azblob_backup_parallelism int Azure Blob operation parallelism (requires extra memory when increased). (default 1)
--azblob_backup_storage_root string Root prefix for all backup-related Azure Blobs; this should exclude both initial and trailing '/' (e.g. just 'a/b' not '/a/b/').
--backup_engine_implementation string Specifies which implementation to use for creating new backups (builtin or xtrabackup). Restores will always be done with whichever engine created a given backup. (default "builtin")
--backup_storage_block_size int if backup_storage_compress is true, backup_storage_block_size sets the byte size for each block while compressing (default is 250000). (default 250000)
- --backup_storage_compress if set, the backup files will be compressed (default is true). Set to false for instance if a backup_storage_hook is specified and it compresses the data. (default true)
- --backup_storage_hook string if set, we send the contents of the backup files through this hook.
+ --backup_storage_compress if set, the backup files will be compressed. (default true)
--backup_storage_implementation string Which backup storage implementation to use for creating and restoring backups.
--backup_storage_number_blocks int if backup_storage_compress is true, backup_storage_number_blocks sets the number of blocks that can be processed, at once, before the writer blocks, during compression (default is 2). It should be equal to the number of CPUs available for compression. (default 2)
--builtinbackup_mysqld_timeout duration how long to wait for mysqld to shutdown at the start of the backup. (default 10m0s)
@@ -25,12 +22,9 @@ Usage of vtctld:
--consul_auth_static_file string JSON File to read the topos/tokens from.
--datadog-agent-host string host to send spans to. if empty, no tracing will be done
--datadog-agent-port string port to send spans to. if empty, no tracing will be done
- --dba_idle_timeout duration Idle timeout for dba connections (default 1m0s)
- --dba_pool_size int Size of the connection pool for dba connections (default 20)
--disable_active_reparents if set, do not allow active reparents. Use this to protect a cluster using external reparents.
--durability_policy string type of durability to enforce. Default is none. Other values are dictated by registered plugins (default "none")
- --enable_realtime_stats Required for the Realtime Stats view. If set, vtctld will maintain a streaming RPC to each tablet (in all cells) to gather the realtime health stats.
- --enable_vtctld_ui If true, the vtctld web interface will be enabled. Default is true. (default true)
+ --emit_stats If set, emit stats to push-based monitoring and stats backends
--external-compressor string command with arguments to use when compressing a backup.
--external-compressor-extension string extension to use when using an external compressor.
--external-decompressor string command with arguments to use when decompressing a backup.
@@ -66,27 +60,23 @@ Usage of vtctld:
--jaeger-agent-host string host and port to send spans to. if empty, no tracing will be done
--keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
--keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
- --keyspaces_to_watch strings Specifies which keyspaces this vtgate should have access to while routing queries or accessing the vschema.
--lameduck-period duration keep running at least this long after SIGTERM before stopping (default 50ms)
+ --lock-timeout duration Maximum time for which a shard/keyspace lock can be acquired for (default 45s)
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
--log_dir string If non-empty, write log files in this directory
--log_err_stacks log stack traces for errors
--log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
--logtostderr log to standard error instead of files
- --mysql_server_version string MySQL server version to advertise.
- --mysqlctl_mycnf_template string template file to use for generating the my.cnf file during server init
- --mysqlctl_socket string socket file to use for remote mysqlctl actions (empty for local actions)
- --onclose_timeout duration wait no more than this for OnClose handlers before stopping (default 1ns)
+ --max-stack-size int configure the maximum stack size in bytes (default 67108864)
+ --onclose_timeout duration wait no more than this for OnClose handlers before stopping (default 10s)
--onterm_timeout duration wait no more than this for OnTermSync handlers before stopping (default 10s)
--opentsdb_uri string URI of opentsdb /api/put method
--pid_file string If set, the process will write its pid to the named file, and delete it on graceful shutdown.
- --pool_hostname_resolve_interval duration if set force an update to all hostnames and reconnect if changed, defaults to 0 (disabled)
--port int port for the server
--pprof strings enable profiling
--proxy_tablets Setting this true will make vtctld proxy the tablet status instead of redirecting to them
--purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
- --remote_operation_timeout duration time to wait for a remote operation (default 30s)
- --replication_connect_retry duration how long to wait in between replica reconnect attempts. Only precise to the second. (default 10s)
+ --remote_operation_timeout duration time to wait for a remote operation (default 15s)
--s3_backup_aws_endpoint string endpoint of the S3 backend (region must be provided).
--s3_backup_aws_region string AWS region to use. (default "us-east-1")
--s3_backup_aws_retries int AWS request retries. (default -1)
@@ -105,9 +95,13 @@ Usage of vtctld:
--service_map strings comma separated list of services to enable (or disable if prefixed with '-') Example: grpc-queryservice
--sql-max-length-errors int truncate queries in error logs to the given length (default unlimited)
--sql-max-length-ui int truncate queries in debug UIs to the given length (default 512) (default 512)
+ --stats_backend string The name of the registered push-based monitoring/stats backend to use
+ --stats_combine_dimensions string List of dimensions to be combined into a single "all" value in exported stats vars
+ --stats_common_tags strings Comma-separated list of common tags for the stats backend. It provides both label and values. Example: label1:value1,label2:value2
+ --stats_drop_variables string Variables to be dropped from the list of exported variables.
+ --stats_emit_period duration Interval between emitting stats to all registered backends (default 1m0s)
--stderrthreshold severity logs at or above this threshold go to stderr (default 1)
--tablet_dir string The directory within the vtdataroot to store vttablet/mysql files. Defaults to being generated by the tablet uid.
- --tablet_filters strings Specifies a comma-separated list of 'keyspace|shard_name or keyrange' values to filter the tablets to watch.
--tablet_grpc_ca string the server ca to use to validate servers when connecting
--tablet_grpc_cert string the cert to use to connect
--tablet_grpc_crl string the server crl to use to validate server certificates when connecting
@@ -151,29 +145,8 @@ Usage of vtctld:
--tracing-enable-logging whether to enable logging in the tracing service
--tracing-sampling-rate float sampling rate for the probabilistic jaeger sampler (default 0.1)
--tracing-sampling-type string sampling strategy to use for jaeger. possible values are 'const', 'probabilistic', 'rateLimiting', or 'remote' (default "const")
- -v, --v Level log level for V logs
- --version print binary version
+ --v Level log level for V logs
+ -v, --version print binary version
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
- --vtctl_healthcheck_retry_delay duration delay before retrying a failed healthcheck (default 5s)
- --vtctl_healthcheck_timeout duration the health check timeout period (default 1m0s)
--vtctl_healthcheck_topology_refresh duration refresh interval for re-reading the topology (default 30s)
--vtctld_sanitize_log_messages When true, vtctld sanitizes logging.
- --vtctld_show_topology_crud Controls the display of the CRUD topology actions in the vtctld UI. (default true)
- --vtgate_grpc_ca string the server ca to use to validate servers when connecting
- --vtgate_grpc_cert string the cert to use to connect
- --vtgate_grpc_crl string the server crl to use to validate server certificates when connecting
- --vtgate_grpc_key string the key to use to connect
- --vtgate_grpc_server_name string the server name to use to validate server certificate
- --web_dir string NOT USED, here for backward compatibility
- --web_dir2 string NOT USED, here for backward compatibility
- --workflow_manager_disable strings comma separated list of workflow types to disable
- --workflow_manager_init Initialize the workflow manager in this vtctld instance.
- --workflow_manager_use_election if specified, will use a topology server-based master election to ensure only one workflow manager is active at a time.
- --xbstream_restore_flags string flags to pass to xbstream command during restore. These should be space separated and will be added to the end of the command. These need to match the ones used for backup e.g. --compress / --decompress, --encrypt / --decrypt
- --xtrabackup_backup_flags string flags to pass to backup command. These should be space separated and will be added to the end of the command
- --xtrabackup_prepare_flags string flags to pass to prepare command. These should be space separated and will be added to the end of the command
- --xtrabackup_root_path string directory location of the xtrabackup and xbstream executables, e.g., /usr/bin
- --xtrabackup_stream_mode string which mode to use if streaming, valid values are tar and xbstream (default "tar")
- --xtrabackup_stripe_block_size uint Size in bytes of each block that gets sent to a given stripe before rotating to the next stripe (default 102400)
- --xtrabackup_stripes uint If greater than 0, use data striping across this many destination files to parallelize data transfer and decompression
- --xtrabackup_user string User that xtrabackup will use to connect to the database server. This user must have all necessary privileges. For details, please refer to xtrabackup documentation.
diff --git a/go/flags/endtoend/vtctldclient.txt b/go/flags/endtoend/vtctldclient.txt
index 3c27875d0da..41c0b64076f 100644
--- a/go/flags/endtoend/vtctldclient.txt
+++ b/go/flags/endtoend/vtctldclient.txt
@@ -1,6 +1,7 @@
Executes a cluster management command on the remote vtctld server.
Usage:
+ vtctldclient [flags]
vtctldclient [command]
Available Commands:
@@ -8,7 +9,7 @@ Available Commands:
AddCellsAlias Defines a group of cells that can be referenced by a single name (the alias).
ApplyRoutingRules Applies the VSchema routing rules.
ApplySchema Applies the schema change to the specified keyspace on every primary, running in parallel on all shards. The changes are then propagated to replicas via replication.
- ApplyShardRoutingRules Applies VSchema shard routing rules.
+ ApplyShardRoutingRules Applies the provided shard routing rules.
ApplyVSchema Applies the VTGate routing schema to the provided keyspace. Shows the result after application.
Backup Uses the BackupStorage service on the given tablet to create and store a new backup.
BackupShard Finds the most up-to-date REPLICA, RDONLY, or SPARE tablet in the given shard and uses the BackupStorage service on that tablet to create and store a new backup.
@@ -26,6 +27,7 @@ Available Commands:
ExecuteFetchAsDBA Executes the given query as the DBA user on the remote tablet.
ExecuteHook Runs the specified hook on the given tablet.
FindAllShardsInKeyspace Returns a map of shard names to shard references for a given keyspace.
+ GenerateShardRanges Print a set of shard ranges assuming a keyspace with N shards.
GetBackups Lists backups for the given shard.
GetCellInfo Gets the CellInfo object for the given cell.
GetCellInfoNames Lists the names of all cells in the cluster.
@@ -37,7 +39,7 @@ Available Commands:
GetRoutingRules Displays the VSchema routing rules.
GetSchema Displays the full schema for a tablet, optionally restricted to the specified tables/views.
GetShard Returns information about a shard in the topology.
- GetShardRoutingRules Displays VSchema shard routing rules.
+ GetShardRoutingRules Displays the currently active shard routing rules as a JSON document.
GetSrvKeyspaceNames Outputs a JSON mapping of cell=>keyspace names served in that cell. Omit to query all cells.
GetSrvKeyspaces Returns the SrvKeyspaces for the given keyspace in one or more cells.
GetSrvVSchema Returns the SrvVSchema for the given cell.
@@ -45,9 +47,9 @@ Available Commands:
GetTablet Outputs a JSON structure that contains information about the tablet.
GetTabletVersion Print the version of a tablet from its debug vars.
GetTablets Looks up tablets according to filter criteria.
+ GetTopologyPath Gets the value associated with the particular path (key) in the topology server.
GetVSchema Prints a JSON representation of a keyspace's topo record.
GetWorkflows Gets all vreplication workflows (Reshard, MoveTables, etc) in the given keyspace.
- InitShardPrimary Sets the initial primary for the shard.
LegacyVtctlCommand Invoke a legacy vtctlclient command. Flag parsing is best effort.
PingTablet Checks that the specified tablet is awake and responding to RPCs. This command can be blocked by other in-flight operations.
PlannedReparentShard Reparents the shard to a new primary, or away from an old primary. Both the old and new primaries must be up and running.
@@ -78,18 +80,19 @@ Available Commands:
TabletExternallyReparented Updates the topology record for the tablet's shard to acknowledge that an external tool made this tablet the primary.
UpdateCellInfo Updates the content of a CellInfo with the provided parameters, creating the CellInfo if it does not exist.
UpdateCellsAlias Updates the content of a CellsAlias with the provided parameters, creating the CellsAlias if it does not exist.
+ UpdateThrottlerConfig Update the tablet throttler configuration for all tablets in the given keyspace (across all cells)
Validate Validates that all nodes reachable from the global replication graph, as well as all tablets in discoverable cells, are consistent.
ValidateKeyspace Validates that all nodes reachable from the specified keyspace are consistent.
ValidateSchemaKeyspace Validates that the schema on the primary tablet for shard 0 matches the schema on all other tablets in the keyspace.
ValidateShard Validates that all nodes reachable from the specified shard are consistent.
ValidateVersionKeyspace Validates that the version on the primary tablet of shard 0 matches all of the other tablets in the keyspace.
+ ValidateVersionShard Validates that the version on the primary matches all of the replicas.
completion Generate the autocompletion script for the specified shell
help Help about any command
Flags:
--action_timeout duration timeout for the total command (default 1h0m0s)
--alsologtostderr log to standard error as well as files
- --emit_stats If set, emit stats to push-based monitoring and stats backends
--grpc_auth_static_client_creds string When using grpc_static_auth in the server, this file provides the credentials to use to authenticate with server.
--grpc_compression string Which protocol to use for compressing gRPC. Default: nothing. Supported: snappy
--grpc_enable_tracing Enable gRPC tracing.
@@ -106,15 +109,10 @@ Flags:
--log_dir string If non-empty, write log files in this directory
--log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
--logtostderr log to standard error instead of files
- --mysql_server_version string MySQL server version to advertise.
+ --mysql_server_version string MySQL server version to advertise. (default "8.0.30-Vitess")
--purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
--security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only)
--server string server to use for connection (required)
- --stats_backend string The name of the registered push-based monitoring/stats backend to use
- --stats_combine_dimensions string List of dimensions to be combined into a single "all" value in exported stats vars
- --stats_common_tags strings Comma-separated list of common tags for the stats backend. It provides both label and values. Example: label1:value1,label2:value2
- --stats_drop_variables string Variables to be dropped from the list of exported variables.
- --stats_emit_period duration Interval between emitting stats to all registered backends (default 1m0s)
--stderrthreshold severity logs at or above this threshold go to stderr (default 1)
-v, --v Level log level for V logs
--version version for vtctldclient
diff --git a/go/flags/endtoend/vtexplain.txt b/go/flags/endtoend/vtexplain.txt
index df5fea9d1d6..a70067a9bc8 100644
--- a/go/flags/endtoend/vtexplain.txt
+++ b/go/flags/endtoend/vtexplain.txt
@@ -1,94 +1,37 @@
Usage of vtexplain:
- --alsologtostderr log to standard error as well as files
- --app_idle_timeout duration Idle timeout for app connections (default 1m0s)
- --app_pool_size int Size of the connection pool for app connections (default 40)
- --backup_engine_implementation string Specifies which implementation to use for creating new backups (builtin or xtrabackup). Restores will always be done with whichever engine created a given backup. (default "builtin")
- --backup_storage_block_size int if backup_storage_compress is true, backup_storage_block_size sets the byte size for each block while compressing (default is 250000). (default 250000)
- --backup_storage_compress if set, the backup files will be compressed (default is true). Set to false for instance if a backup_storage_hook is specified and it compresses the data. (default true)
- --backup_storage_hook string if set, we send the contents of the backup files through this hook.
- --backup_storage_number_blocks int if backup_storage_compress is true, backup_storage_number_blocks sets the number of blocks that can be processed, at once, before the writer blocks, during compression (default is 2). It should be equal to the number of CPUs available for compression. (default 2)
- --batch-interval duration Interval between logical time slots. (default 10ms)
- --builtinbackup_mysqld_timeout duration how long to wait for mysqld to shutdown at the start of the backup. (default 10m0s)
- --builtinbackup_progress duration how often to send progress updates when backing up large files. (default 5s)
- --compression-engine-name string compressor engine used for compression. (default "pargzip")
- --compression-level int what level to pass to the compressor. (default 1)
- --dba_idle_timeout duration Idle timeout for dba connections (default 1m0s)
- --dba_pool_size int Size of the connection pool for dba connections (default 20)
- --dbname string Optional database target to override normal routing
- --default_tablet_type topodatapb.TabletType The default tablet type to set for queries, when one is not explicitly selected. (default PRIMARY)
- --disable_active_reparents if set, do not allow active reparents. Use this to protect a cluster using external reparents.
- --execution-mode string The execution mode to simulate -- must be set to multi, legacy-autocommit, or twopc (default "multi")
- --external-compressor string command with arguments to use when compressing a backup.
- --external-compressor-extension string extension to use when using an external compressor.
- --external-decompressor string command with arguments to use when decompressing a backup.
- -h, --help display usage and exit
- --keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
- --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
- --ks-shard-map string JSON map of keyspace name -> shard name -> ShardReference object. The inner map is the same as the output of FindAllShardsInKeyspace
- --ks-shard-map-file string File containing json blob of keyspace name -> shard name -> ShardReference object
- --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
- --log_dir string If non-empty, write log files in this directory
- --log_err_stacks log stack traces for errors
- --log_queries_to_file string Enable query logging to the specified file
- --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
- --logtostderr log to standard error instead of files
- --message_stream_grace_period duration the amount of time to give for a vttablet to resume if it ends a message stream, usually because of a reparent. (default 30s)
- --mysql-server-pool-conn-read-buffers If set, the server will pool incoming connection read buffers
- --mysql_allow_clear_text_without_tls If set, the server will allow the use of a clear text password over non-SSL connections.
- --mysql_auth_server_impl string Which auth server implementation to use. Options: none, ldap, clientcert, static, vault. (default "static")
- --mysql_default_workload string Default session workload (OLTP, OLAP, DBA) (default "OLTP")
- --mysql_server_bind_address string Binds on this address when listening to MySQL binary protocol. Useful to restrict listening to 'localhost' only for instance.
- --mysql_server_port int If set, also listen for MySQL binary protocol connections on this port. (default -1)
- --mysql_server_query_timeout duration mysql query timeout (default 0s)
- --mysql_server_read_timeout duration connection read timeout (default 0s)
- --mysql_server_require_secure_transport Reject insecure connections but only if mysql_server_ssl_cert and mysql_server_ssl_key are provided
- --mysql_server_socket_path string This option specifies the Unix socket file to use when listening for local connections. By default it will be empty and it won't listen to a unix socket
- --mysql_server_ssl_ca string Path to ssl CA for mysql server plugin SSL. If specified, server will require and validate client certs.
- --mysql_server_ssl_cert string Path to the ssl cert for mysql server plugin SSL
- --mysql_server_ssl_crl string Path to ssl CRL for mysql server plugin SSL
- --mysql_server_ssl_key string Path to ssl key for mysql server plugin SSL
- --mysql_server_ssl_server_ca string path to server CA in PEM format, which will be combine with server cert, return full certificate chain to clients
- --mysql_server_tls_min_version string Configures the minimal TLS version negotiated when SSL is enabled. Defaults to TLSv1.2. Options: TLSv1.0, TLSv1.1, TLSv1.2, TLSv1.3.
- --mysql_server_version string MySQL server version to advertise.
- --mysql_server_write_timeout duration connection write timeout (default 0s)
- --mysql_slow_connect_warn_threshold duration Warn if it takes more than the given threshold for a mysql connection to establish (default 0s)
- --mysql_tcp_version string Select tcp, tcp4, or tcp6 to control the socket type. (default "tcp")
- --mysqlctl_mycnf_template string template file to use for generating the my.cnf file during server init
- --mysqlctl_socket string socket file to use for remote mysqlctl actions (empty for local actions)
- --normalize Whether to enable vtgate normalization
- --output-mode string Output in human-friendly text or json (default "text")
- --planner-version string Sets the query planner version to use when generating the explain output. Valid values are V3 and Gen4
- --pool_hostname_resolve_interval duration if set force an update to all hostnames and reconnect if changed, defaults to 0 (disabled)
- --pprof strings enable profiling
- --proxy_protocol Enable HAProxy PROXY protocol on MySQL listener socket
- --purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
- --querylog-buffer-size int Maximum number of buffered query logs before throttling log output (default 10)
- --remote_operation_timeout duration time to wait for a remote operation (default 30s)
- --replication-mode string The replication mode to simulate -- must be set to either ROW or STATEMENT (default "ROW")
- --replication_connect_retry duration how long to wait in between replica reconnect attempts. Only precise to the second. (default 10s)
- --schema string The SQL table schema
- --schema-file string Identifies the file that contains the SQL table schema
- --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only)
- --shards int Number of shards per keyspace. Passing --ks-shard-map/--ks-shard-map-file causes this flag to be ignored. (default 2)
- --sql string A list of semicolon-delimited SQL commands to analyze
- --sql-file string Identifies the file that contains the SQL commands to analyze
- --sql-max-length-errors int truncate queries in error logs to the given length (default unlimited)
- --sql-max-length-ui int truncate queries in debug UIs to the given length (default 512) (default 512)
- --stderrthreshold severity logs at or above this threshold go to stderr (default 1)
- --tablet_dir string The directory within the vtdataroot to store vttablet/mysql files. Defaults to being generated by the tablet uid.
- --topo_global_root string the path of the global topology data in the global topology server
- --topo_global_server_address string the address of the global topology server
- --topo_implementation string the topology implementation to use
- -v, --v Level log level for V logs
- --version print binary version
- --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
- --vschema string Identifies the VTGate routing schema
- --vschema-file string Identifies the VTGate routing schema file
- --xbstream_restore_flags string flags to pass to xbstream command during restore. These should be space separated and will be added to the end of the command. These need to match the ones used for backup e.g. --compress / --decompress, --encrypt / --decrypt
- --xtrabackup_backup_flags string flags to pass to backup command. These should be space separated and will be added to the end of the command
- --xtrabackup_prepare_flags string flags to pass to prepare command. These should be space separated and will be added to the end of the command
- --xtrabackup_root_path string directory location of the xtrabackup and xbstream executables, e.g., /usr/bin
- --xtrabackup_stream_mode string which mode to use if streaming, valid values are tar and xbstream (default "tar")
- --xtrabackup_stripe_block_size uint Size in bytes of each block that gets sent to a given stripe before rotating to the next stripe (default 102400)
- --xtrabackup_stripes uint If greater than 0, use data striping across this many destination files to parallelize data transfer and decompression
- --xtrabackup_user string User that xtrabackup will use to connect to the database server. This user must have all necessary privileges. For details, please refer to xtrabackup documentation.
+ --alsologtostderr log to standard error as well as files
+ --batch-interval duration Interval between logical time slots. (default 10ms)
+ --dbname string Optional database target to override normal routing
+ --default_tablet_type topodatapb.TabletType The default tablet type to set for queries, when one is not explicitly selected. (default PRIMARY)
+ --execution-mode string The execution mode to simulate -- must be set to multi, legacy-autocommit, or twopc (default "multi")
+ -h, --help display usage and exit
+ --keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
+ --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
+ --ks-shard-map string JSON map of keyspace name -> shard name -> ShardReference object. The inner map is the same as the output of FindAllShardsInKeyspace
+ --ks-shard-map-file string File containing json blob of keyspace name -> shard name -> ShardReference object
+ --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
+ --log_dir string If non-empty, write log files in this directory
+ --log_err_stacks log stack traces for errors
+ --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
+ --logtostderr log to standard error instead of files
+ --mysql_server_version string MySQL server version to advertise. (default "8.0.30-Vitess")
+ --normalize Whether to enable vtgate normalization
+ --output-mode string Output in human-friendly text or json (default "text")
+ --planner-version string Sets the query planner version to use when generating the explain output. Valid values are V3 and Gen4. An empty value will use VTGate's default planner
+ --pprof strings enable profiling
+ --purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
+ --replication-mode string The replication mode to simulate -- must be set to either ROW or STATEMENT (default "ROW")
+ --schema string The SQL table schema
+ --schema-file string Identifies the file that contains the SQL table schema
+ --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only)
+ --shards int Number of shards per keyspace. Passing --ks-shard-map/--ks-shard-map-file causes this flag to be ignored. (default 2)
+ --sql string A list of semicolon-delimited SQL commands to analyze
+ --sql-file string Identifies the file that contains the SQL commands to analyze
+ --sql-max-length-errors int truncate queries in error logs to the given length (default unlimited)
+ --sql-max-length-ui int truncate queries in debug UIs to the given length (default 512) (default 512)
+ --stderrthreshold severity logs at or above this threshold go to stderr (default 1)
+ --v Level log level for V logs
+ -v, --version print binary version
+ --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
+ --vschema string Identifies the VTGate routing schema
+ --vschema-file string Identifies the VTGate routing schema file
diff --git a/go/flags/endtoend/vtgate.txt b/go/flags/endtoend/vtgate.txt
index 8a45969a20a..946b10a39b2 100644
--- a/go/flags/endtoend/vtgate.txt
+++ b/go/flags/endtoend/vtgate.txt
@@ -1,5 +1,5 @@
Usage of vtgate:
- --allowed_tablet_types []topodatapb.TabletType Specifies the tablet types this vtgate is allowed to route queries to.
+ --allowed_tablet_types strings Specifies the tablet types this vtgate is allowed to route queries to. Should be provided as a comma-separated set of tablet types.
--alsologtostderr log to standard error as well as files
--buffer_drain_concurrency int Maximum number of requests retried simultaneously. More concurrency will increase the load on the PRIMARY vttablet when draining the buffer. (default 1)
--buffer_implementation string Allowed values: healthcheck (legacy implementation), keyspace_events (default) (default "keyspace_events")
@@ -9,7 +9,7 @@ Usage of vtgate:
--buffer_size int Maximum number of buffered requests in flight (across all ongoing failovers). (default 1000)
--buffer_window duration Duration for how long a request should be buffered at most. (default 10s)
--catch-sigpipe catch and ignore SIGPIPE on stdout and stderr if specified
- --cell string cell to use (default "test_nj")
+ --cell string cell to use
--cells_to_watch string comma-separated list of cells for watching tablets
--consul_auth_static_file string JSON File to read the topos/tokens from.
--datadog-agent-host string host to send spans to. if empty, no tracing will be done
@@ -19,7 +19,9 @@ Usage of vtgate:
--default_tablet_type topodatapb.TabletType The default tablet type to set for queries, when one is not explicitly selected. (default PRIMARY)
--discovery_high_replication_lag_minimum_serving duration Threshold above which replication lag is considered too high when applying the min_number_serving_vttablets flag. (default 2h0m0s)
--discovery_low_replication_lag duration Threshold below which replication lag is considered low enough to be healthy. (default 30s)
+ --emit_stats If set, emit stats to push-based monitoring and stats backends
--enable-partial-keyspace-migration (Experimental) Follow shard routing rules: enable only while migrating a keyspace shard by shard. See documentation on Partial MoveTables for more. (default false)
+ --enable-views Enable views support in vtgate.
--enable_buffer Enable buffering (stalling) of primary traffic during failovers.
--enable_buffer_dry_run Detect and log failover events, but do not actually buffer requests.
--enable_direct_ddl Allow users to submit direct DDL statements (default true)
@@ -32,6 +34,7 @@ Usage of vtgate:
--gate_query_cache_size int gate server query cache size, maximum number of queries to be cached. vtgate analyzes every incoming query and generate a query plan, these plans are being cached in a cache. This config controls the expected amount of unique entries in the cache. (default 5000)
--gateway_initial_tablet_timeout duration At startup, the tabletGateway will wait up to this duration to get at least one tablet per keyspace/shard/tablet type (default 30s)
--grpc-use-effective-groups If set, and SSL is not used, will set the immediate caller's security groups from the effective caller id's groups.
+ --grpc-use-static-authentication-callerid If set, will set the immediate caller id to the username authenticated by the static auth plugin.
--grpc_auth_mode string Which auth plugin implementation to use (eg: static)
--grpc_auth_mtls_allowed_substrings string List of substrings of at least one of the client certificate names (separated by colon).
--grpc_auth_static_client_creds string When using grpc_static_auth in the server, this file provides the credentials to use to authenticate with server.
@@ -67,6 +70,7 @@ Usage of vtgate:
--keyspaces_to_watch strings Specifies which keyspaces this vtgate should have access to while routing queries or accessing the vschema.
--lameduck-period duration keep running at least this long after SIGTERM before stopping (default 50ms)
--legacy_replication_lag_algorithm Use the legacy algorithm when selecting vttablets for serving. (default true)
+ --lock-timeout duration Maximum time for which a shard/keyspace lock can be acquired for (default 45s)
--lock_heartbeat_time duration If there is lock function used. This will keep the lock connection active by using this heartbeat (default 5s)
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
--log_dir string If non-empty, write log files in this directory
@@ -74,6 +78,7 @@ Usage of vtgate:
--log_queries_to_file string Enable query logging to the specified file
--log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
--logtostderr log to standard error instead of files
+ --max-stack-size int configure the maximum stack size in bytes (default 67108864)
--max_memory_rows int Maximum number of rows that will be held in memory for intermediate results as well as the final result. (default 300000)
--max_payload_size int The threshold for query payloads in bytes. A payload greater than this threshold will result in a failure to handle the query.
--message_stream_grace_period duration the amount of time to give for a vttablet to resume if it ends a message stream, usually because of a reparent. (default 30s)
@@ -101,8 +106,8 @@ Usage of vtgate:
--mysql_server_bind_address string Binds on this address when listening to MySQL binary protocol. Useful to restrict listening to 'localhost' only for instance.
--mysql_server_flush_delay duration Delay after which buffered response will be flushed to the client. (default 100ms)
--mysql_server_port int If set, also listen for MySQL binary protocol connections on this port. (default -1)
- --mysql_server_query_timeout duration mysql query timeout (default 0s)
- --mysql_server_read_timeout duration connection read timeout (default 0s)
+ --mysql_server_query_timeout duration mysql query timeout
+ --mysql_server_read_timeout duration connection read timeout
--mysql_server_require_secure_transport Reject insecure connections but only if mysql_server_ssl_cert and mysql_server_ssl_key are provided
--mysql_server_socket_path string This option specifies the Unix socket file to use when listening for local connections. By default it will be empty and it won't listen to a unix socket
--mysql_server_ssl_ca string Path to ssl CA for mysql server plugin SSL. If specified, server will require and validate client certs.
@@ -111,13 +116,13 @@ Usage of vtgate:
--mysql_server_ssl_key string Path to ssl key for mysql server plugin SSL
--mysql_server_ssl_server_ca string path to server CA in PEM format, which will be combine with server cert, return full certificate chain to clients
--mysql_server_tls_min_version string Configures the minimal TLS version negotiated when SSL is enabled. Defaults to TLSv1.2. Options: TLSv1.0, TLSv1.1, TLSv1.2, TLSv1.3.
- --mysql_server_version string MySQL server version to advertise.
- --mysql_server_write_timeout duration connection write timeout (default 0s)
- --mysql_slow_connect_warn_threshold duration Warn if it takes more than the given threshold for a mysql connection to establish (default 0s)
+ --mysql_server_version string MySQL server version to advertise. (default "8.0.30-Vitess")
+ --mysql_server_write_timeout duration connection write timeout
+ --mysql_slow_connect_warn_threshold duration Warn if it takes more than the given threshold for a mysql connection to establish
--mysql_tcp_version string Select tcp, tcp4, or tcp6 to control the socket type. (default "tcp")
--no_scatter when set to true, the planner will fail instead of producing a plan that includes scatter queries
--normalize_queries Rewrite queries with bind vars. Turn this off if the app itself sends normalized queries with bind vars. (default true)
- --onclose_timeout duration wait no more than this for OnClose handlers before stopping (default 1ns)
+ --onclose_timeout duration wait no more than this for OnClose handlers before stopping (default 10s)
--onterm_timeout duration wait no more than this for OnTermSync handlers before stopping (default 10s)
--opentsdb_uri string URI of opentsdb /api/put method
--pid_file string If set, the process will write its pid to the named file, and delete it on graceful shutdown.
@@ -126,12 +131,13 @@ Usage of vtgate:
--pprof strings enable profiling
--proxy_protocol Enable HAProxy PROXY protocol on MySQL listener socket
--purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
+ --query-timeout int Sets the default query timeout (in ms). Can be overridden by session variable (query_timeout) or comment directive (QUERY_TIMEOUT_MS)
--querylog-buffer-size int Maximum number of buffered query logs before throttling log output (default 10)
--querylog-filter-tag string string that must be present in the query for it to be logged; if using a value as the tag, you need to disable query normalization
--querylog-format string format for query logs ("text" or "json") (default "text")
--querylog-row-threshold uint Number of rows a query has to return or affect before being logged; not useful for streaming queries. 0 means all queries will be logged.
--redact-debug-ui-queries redact full queries and bind variables from debug UI
- --remote_operation_timeout duration time to wait for a remote operation (default 30s)
+ --remote_operation_timeout duration time to wait for a remote operation (default 15s)
--retry-count int retry count (default 2)
--schema_change_signal Enable the schema tracker; requires queryserver-config-schema-change-signal to be enabled on the underlying vttablets for this to work (default true)
--schema_change_signal_user string User to be used to send down query to vttablet to retrieve schema changes
@@ -142,6 +148,11 @@ Usage of vtgate:
--srv_topo_cache_refresh duration how frequently to refresh the topology for cached entries (default 1s)
--srv_topo_cache_ttl duration how long to use cached entries for topology (default 1s)
--srv_topo_timeout duration topo server timeout (default 5s)
+ --stats_backend string The name of the registered push-based monitoring/stats backend to use
+ --stats_combine_dimensions string List of dimensions to be combined into a single "all" value in exported stats vars
+ --stats_common_tags strings Comma-separated list of common tags for the stats backend. It provides both label and values. Example: label1:value1,label2:value2
+ --stats_drop_variables string Variables to be dropped from the list of exported variables.
+ --stats_emit_period duration Interval between emitting stats to all registered backends (default 1m0s)
--statsd_address string Address for statsd client
--statsd_sample_rate float Sample rate for statsd metrics (default 1)
--stderrthreshold severity logs at or above this threshold go to stderr (default 1)
@@ -155,7 +166,7 @@ Usage of vtgate:
--tablet_protocol string Protocol to use to make queryservice RPCs to vttablets. (default "grpc")
--tablet_refresh_interval duration Tablet refresh interval. (default 1m0s)
--tablet_refresh_known_tablets Whether to reload the tablet's address/port map from topo in case they change. (default true)
- --tablet_types_to_wait string wait till connected for specified tablet types during Gateway initialization
+ --tablet_types_to_wait strings Wait till connected for specified tablet types during Gateway initialization. Should be provided as a comma-separated set of tablet types.
--tablet_url_template string Format string describing debug tablet url formatting. See getTabletDebugURL() for how to customize this. (default "http://{{.GetTabletHostPort}}")
--topo_consul_lock_delay duration LockDelay for consul session. (default 15s)
--topo_consul_lock_session_checks string List of checks for consul session. (default "serfHealth")
@@ -183,8 +194,8 @@ Usage of vtgate:
--tracing-sampling-rate float sampling rate for the probabilistic jaeger sampler (default 0.1)
--tracing-sampling-type string sampling strategy to use for jaeger. possible values are 'const', 'probabilistic', 'rateLimiting', or 'remote' (default "const")
--transaction_mode string SINGLE: disallow multi-db transactions, MULTI: allow multi-db transactions with best effort commit, TWOPC: allow multi-db transactions with 2pc commit (default "MULTI")
- -v, --v Level log level for V logs
- --version print binary version
+ --v Level log level for V logs
+ -v, --version print binary version
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
--vschema_ddl_authorized_users string List of users authorized to execute vschema ddl operations, or '%' to allow all users.
--vtctld_addr string address of a vtctld instance
diff --git a/go/flags/endtoend/vtgr.txt b/go/flags/endtoend/vtgr.txt
index d4ed0501d9e..9e0798f9fca 100644
--- a/go/flags/endtoend/vtgr.txt
+++ b/go/flags/endtoend/vtgr.txt
@@ -6,6 +6,7 @@ Usage of vtgr:
--db_config string Full path to db config file that will be used by VTGR.
--db_flavor string MySQL flavor override. (default "MySQL56")
--db_port int Local mysql port, set this to enable local fast check.
+ --emit_stats If set, emit stats to push-based monitoring and stats backends
--enable_heartbeat_check Enable heartbeat checking, set together with --group_heartbeat_threshold.
--gr_port int Port to bootstrap a MySQL group. (default 33061)
--group_heartbeat_threshold int VTGR will trigger backoff on inconsistent state if the group heartbeat staleness exceeds this threshold (in seconds). Should be used along with --enable_heartbeat_check.
@@ -21,6 +22,7 @@ Usage of vtgr:
-h, --help display usage and exit
--keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
--keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
+ --lock-timeout duration Maximum time for which a shard/keyspace lock can be acquired for (default 45s)
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
--log_dir string If non-empty, write log files in this directory
--log_err_stacks log stack traces for errors
@@ -30,10 +32,15 @@ Usage of vtgr:
--pprof strings enable profiling
--purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
--refresh_interval duration Refresh interval to load tablets. (default 10s)
- --remote_operation_timeout duration time to wait for a remote operation (default 30s)
+ --remote_operation_timeout duration time to wait for a remote operation (default 15s)
--scan_interval duration Scan interval to diagnose and repair. (default 3s)
--scan_repair_timeout duration Time to wait for a Diagnose and repair operation. (default 3s)
--security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only)
+ --stats_backend string The name of the registered push-based monitoring/stats backend to use
+ --stats_combine_dimensions string List of dimensions to be combined into a single "all" value in exported stats vars
+ --stats_common_tags strings Comma-separated list of common tags for the stats backend. It provides both label and values. Example: label1:value1,label2:value2
+ --stats_drop_variables string Variables to be dropped from the list of exported variables.
+ --stats_emit_period duration Interval between emitting stats to all registered backends (default 1m0s)
--stderrthreshold severity logs at or above this threshold go to stderr (default 1)
--tablet_manager_grpc_ca string the server ca to use to validate servers when connecting
--tablet_manager_grpc_cert string the cert to use to connect
@@ -60,7 +67,7 @@ Usage of vtgr:
--topo_zk_tls_ca string the server ca to use to validate servers when connecting to the zk topo server
--topo_zk_tls_cert string the cert to use to connect to the zk topo server, requires topo_zk_tls_key, enables TLS
--topo_zk_tls_key string the key to use to connect to the zk topo server, enables TLS
- -v, --v Level log level for V logs
- --version print binary version
+ --v Level log level for V logs
+ -v, --version print binary version
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
--vtgr_config string Config file for vtgr.
diff --git a/go/flags/endtoend/vtorc.txt b/go/flags/endtoend/vtorc.txt
index e8270b9f5e7..68ffda88f1e 100644
--- a/go/flags/endtoend/vtorc.txt
+++ b/go/flags/endtoend/vtorc.txt
@@ -8,6 +8,7 @@ Usage of vtorc:
--clusters_to_watch strings Comma-separated list of keyspaces or keyspace/shards that this instance will monitor and repair. Defaults to all clusters in the topology. Example: "ks1,ks2/-80"
--config string config file name
--consul_auth_static_file string JSON File to read the topos/tokens from.
+ --emit_stats If set, emit stats to push-based monitoring and stats backends
--grpc_auth_static_client_creds string When using grpc_static_auth in the server, this file provides the credentials to use to authenticate with server.
--grpc_compression string Which protocol to use for compressing gRPC. Default: nothing. Supported: snappy
--grpc_enable_tracing Enable gRPC tracing.
@@ -22,13 +23,14 @@ Usage of vtorc:
--keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
--keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
--lameduck-period duration keep running at least this long after SIGTERM before stopping (default 50ms)
- --lock-shard-timeout duration Duration for which a shard lock is held when running a recovery (default 30s)
+ --lock-timeout duration Maximum time for which a shard/keyspace lock can be acquired for (default 45s)
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
--log_dir string If non-empty, write log files in this directory
--log_err_stacks log stack traces for errors
--log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
--logtostderr log to standard error instead of files
- --onclose_timeout duration wait no more than this for OnClose handlers before stopping (default 1ns)
+ --max-stack-size int configure the maximum stack size in bytes (default 67108864)
+ --onclose_timeout duration wait no more than this for OnClose handlers before stopping (default 10s)
--onterm_timeout duration wait no more than this for OnTermSync handlers before stopping (default 10s)
--pid_file string If set, the process will write its pid to the named file, and delete it on graceful shutdown.
--port int port for the server
@@ -38,11 +40,16 @@ Usage of vtorc:
--reasonable-replication-lag duration Maximum replication lag on replicas which is deemed to be acceptable (default 10s)
--recovery-period-block-duration duration Duration for which a new recovery is blocked on an instance after running a recovery (default 30s)
--recovery-poll-duration duration Timer duration on which VTOrc polls its database to run a recovery (default 1s)
- --remote_operation_timeout duration time to wait for a remote operation (default 30s)
+ --remote_operation_timeout duration time to wait for a remote operation (default 15s)
--security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only)
--shutdown_wait_time duration Maximum time to wait for VTOrc to release all the locks that it is holding before shutting down on SIGTERM (default 30s)
--snapshot-topology-interval duration Timer duration on which VTOrc takes a snapshot of the current MySQL information it has in the database. Should be in multiple of hours
--sqlite-data-file string SQLite Datafile to use as VTOrc's database (default "file::memory:?mode=memory&cache=shared")
+ --stats_backend string The name of the registered push-based monitoring/stats backend to use
+ --stats_combine_dimensions string List of dimensions to be combined into a single "all" value in exported stats vars
+ --stats_common_tags strings Comma-separated list of common tags for the stats backend. It provides both label and values. Example: label1:value1,label2:value2
+ --stats_drop_variables string Variables to be dropped from the list of exported variables.
+ --stats_emit_period duration Interval between emitting stats to all registered backends (default 1m0s)
--stderrthreshold severity logs at or above this threshold go to stderr (default 1)
--tablet_manager_grpc_ca string the server ca to use to validate servers when connecting
--tablet_manager_grpc_cert string the cert to use to connect
@@ -73,7 +80,7 @@ Usage of vtorc:
--topo_zk_tls_ca string the server ca to use to validate servers when connecting to the zk topo server
--topo_zk_tls_cert string the cert to use to connect to the zk topo server, requires topo_zk_tls_key, enables TLS
--topo_zk_tls_key string the key to use to connect to the zk topo server, enables TLS
- -v, --v Level log level for V logs
- --version print binary version
+ --v Level log level for V logs
+ -v, --version print binary version
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
--wait-replicas-timeout duration Duration for which to wait for replica's to respond when issuing RPCs (default 30s)
diff --git a/go/flags/endtoend/vttablet.txt b/go/flags/endtoend/vttablet.txt
index 9340e5a0c01..9902b4b2076 100644
--- a/go/flags/endtoend/vttablet.txt
+++ b/go/flags/endtoend/vttablet.txt
@@ -4,13 +4,13 @@ Usage of vttablet:
--app_pool_size int Size of the connection pool for app connections (default 40)
--azblob_backup_account_key_file string Path to a file containing the Azure Storage account key; if this flag is unset, the environment variable VT_AZBLOB_ACCOUNT_KEY will be used as the key itself (NOT a file path).
--azblob_backup_account_name string Azure Storage Account name for backups; if this flag is unset, the environment variable VT_AZBLOB_ACCOUNT_NAME will be used.
+ --azblob_backup_buffer_size int The memory buffer size to use in bytes, per file or stripe, when streaming to Azure Blob Service. (default 104857600)
--azblob_backup_container_name string Azure Blob Container Name.
--azblob_backup_parallelism int Azure Blob operation parallelism (requires extra memory when increased). (default 1)
--azblob_backup_storage_root string Root prefix for all backup-related Azure Blobs; this should exclude both initial and trailing '/' (e.g. just 'a/b' not '/a/b/').
--backup_engine_implementation string Specifies which implementation to use for creating new backups (builtin or xtrabackup). Restores will always be done with whichever engine created a given backup. (default "builtin")
--backup_storage_block_size int if backup_storage_compress is true, backup_storage_block_size sets the byte size for each block while compressing (default is 250000). (default 250000)
- --backup_storage_compress if set, the backup files will be compressed (default is true). Set to false for instance if a backup_storage_hook is specified and it compresses the data. (default true)
- --backup_storage_hook string if set, we send the contents of the backup files through this hook.
+ --backup_storage_compress if set, the backup files will be compressed. (default true)
--backup_storage_implementation string Which backup storage implementation to use for creating and restoring backups.
--backup_storage_number_blocks int if backup_storage_compress is true, backup_storage_number_blocks sets the number of blocks that can be processed, at once, before the writer blocks, during compression (default is 2). It should be equal to the number of CPUs available for compression. (default 2)
--binlog_host string PITR restore parameter: hostname/IP of binlog server.
@@ -88,19 +88,17 @@ Usage of vttablet:
--dba_idle_timeout duration Idle timeout for dba connections (default 1m0s)
--dba_pool_size int Size of the connection pool for dba connections (default 20)
--degraded_threshold duration replication lag after which a replica is considered degraded (default 30s)
- --disable-replication-manager Disable replication manager to prevent replication repairs.
--disable_active_reparents if set, do not allow active reparents. Use this to protect a cluster using external reparents.
+ --emit_stats If set, emit stats to push-based monitoring and stats backends
--enable-consolidator Synonym to -enable_consolidator (default true)
--enable-consolidator-replicas Synonym to -enable_consolidator_replicas
--enable-lag-throttler Synonym to -enable_lag_throttler
- --enable-query-plan-field-caching Synonym to -enable_query_plan_field_caching (default true)
--enable-tx-throttler Synonym to -enable_tx_throttler
--enable_consolidator This option enables the query consolidator. (default true)
--enable_consolidator_replicas This option enables the query consolidator only on replicas.
--enable_hot_row_protection If true, incoming transactions for the same row (range) will be queued and cannot consume all txpool slots.
--enable_hot_row_protection_dry_run If true, hot row protection is not enforced but logs if transactions would have been queued.
--enable_lag_throttler If true, vttablet will run a throttler service, and will implicitly enable heartbeats
- --enable_query_plan_field_caching (DEPRECATED) This option fetches & caches fields (columns) when storing query plans (default true)
--enable_replication_reporter Use polling to track replication lag.
--enable_transaction_limit If true, limit on number of transactions open at the same time will be enforced for all users. User trying to open a new transaction after exhausting their limit will receive an error immediately, regardless of whether there are available slots or not.
--enable_transaction_limit_dry_run If true, limit on number of transactions open at the same time will be tracked for all users, but not enforced.
@@ -153,7 +151,6 @@ Usage of vttablet:
--hot_row_protection_max_queue_size int Maximum number of BeginExecute RPCs which will be queued for the same row (range). (default 20)
--init_db_name_override string (init parameter) override the name of the db used by vttablet. Without this flag, the db name defaults to vt_
--init_keyspace string (init parameter) keyspace to use for this tablet
- --init_populate_metadata (init parameter) populate metadata tables even if restore_from_backup is disabled. If restore_from_backup is enabled, metadata tables are always populated regardless of this flag.
--init_shard string (init parameter) shard to use for this tablet
--init_tablet_type string (init parameter) the tablet type to use for this tablet.
--init_tags StringMap (init parameter) comma separated list of key:value pairs used to tag the tablet
@@ -162,6 +159,7 @@ Usage of vttablet:
--keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
--keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
--lameduck-period duration keep running at least this long after SIGTERM before stopping (default 50ms)
+ --lock-timeout duration Maximum time for which a shard/keyspace lock can be acquired for (default 45s)
--lock_tables_timeout duration How long to keep the table locked before timing out (default 1m0s)
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
--log_dir string If non-empty, write log files in this directory
@@ -170,6 +168,7 @@ Usage of vttablet:
--log_queries_to_file string Enable query logging to the specified file
--log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
--logtostderr log to standard error instead of files
+ --max-stack-size int configure the maximum stack size in bytes (default 67108864)
--max_concurrent_online_ddl int Maximum number of online DDL changes that may run concurrently (default 256)
--migration_check_interval duration Interval between migration checks (default 1m0s)
--mycnf-file string path to my.cnf, if reading all config params from there
@@ -190,10 +189,10 @@ Usage of vttablet:
--mycnf_slow_log_path string mysql slow query log path
--mycnf_socket_file string mysql socket file
--mycnf_tmp_dir string mysql tmp directory
- --mysql_server_version string MySQL server version to advertise.
+ --mysql_server_version string MySQL server version to advertise. (default "8.0.30-Vitess")
--mysqlctl_mycnf_template string template file to use for generating the my.cnf file during server init
--mysqlctl_socket string socket file to use for remote mysqlctl actions (empty for local actions)
- --onclose_timeout duration wait no more than this for OnClose handlers before stopping (default 1ns)
+ --onclose_timeout duration wait no more than this for OnClose handlers before stopping (default 10s)
--onterm_timeout duration wait no more than this for OnTermSync handlers before stopping (default 10s)
--opentsdb_uri string URI of opentsdb /api/put method
--pid_file string If set, the process will write its pid to the named file, and delete it on graceful shutdown.
@@ -216,7 +215,7 @@ Usage of vttablet:
--queryserver-config-message-postpone-cap int query server message postpone cap is the maximum number of messages that can be postponed at any given time. Set this number to substantially lower than transaction cap, so that the transaction pool isn't exhausted by the message subsystem. (default 4)
--queryserver-config-olap-transaction-timeout float query server transaction timeout (in seconds), after which a transaction in an OLAP session will be killed (default 30)
--queryserver-config-passthrough-dmls query server pass through all dml statements without rewriting
- --queryserver-config-pool-prefill-parallelism int (DEPRECATED) query server read pool prefill parallelism, a non-zero value will prefill the pool using the specified parallism.
+ --queryserver-config-pool-conn-max-lifetime float query server connection max lifetime (in seconds), vttablet manages various mysql connection pools. This config means if a connection has lived at least this long, it connection will be removed from pool upon the next time it is returned to the pool.
--queryserver-config-pool-size int query server read pool size, connection pool is used by regular queries (non streaming, not in a transaction) (default 16)
--queryserver-config-query-cache-lfu query server cache algorithm. when set to true, a new cache algorithm based on a TinyLFU admission policy will be used to improve cache behavior and prevent pollution from sparse queries (default true)
--queryserver-config-query-cache-memory int query server query cache size in bytes, maximum amount of memory to be used for caching. vttablet analyzes every incoming query and generate a query plan, these plans are being cached in a lru cache. This config controls the capacity of the lru cache. (default 33554432)
@@ -228,24 +227,23 @@ Usage of vttablet:
--queryserver-config-schema-change-signal-interval float query server schema change signal interval defines at which interval the query server shall send schema updates to vtgate. (default 5)
--queryserver-config-schema-reload-time float query server schema reload time, how often vttablet reloads schemas from underlying MySQL instance in seconds. vttablet keeps table schemas in its own memory and periodically refreshes it from MySQL. This config controls the reload time. (default 1800)
--queryserver-config-stream-buffer-size int query server stream buffer size, the maximum number of bytes sent from vttablet for each stream call. It's recommended to keep this value in sync with vtgate's stream_buffer_size. (default 32768)
- --queryserver-config-stream-pool-prefill-parallelism int (DEPRECATED) query server stream pool prefill parallelism, a non-zero value will prefill the pool using the specified parallelism
--queryserver-config-stream-pool-size int query server stream connection pool size, stream pool is used by stream queries: queries that return results to client in a streaming fashion (default 200)
--queryserver-config-stream-pool-timeout float query server stream pool timeout (in seconds), it is how long vttablet waits for a connection from the stream pool. If set to 0 (default) then there is no timeout.
--queryserver-config-stream-pool-waiter-cap int query server stream pool waiter limit, this is the maximum number of streaming queries that can be queued waiting to get a connection
--queryserver-config-strict-table-acl only allow queries that pass table acl checks
--queryserver-config-terse-errors prevent bind vars from escaping in client error messages
--queryserver-config-transaction-cap int query server transaction cap is the maximum number of transactions allowed to happen at any given point of a time for a single vttablet. E.g. by setting transaction cap to 100, there are at most 100 transactions will be processed by a vttablet and the 101th transaction will be blocked (and fail if it cannot get connection within specified timeout) (default 20)
- --queryserver-config-transaction-prefill-parallelism int (DEPRECATED) query server transaction prefill parallelism, a non-zero value will prefill the pool using the specified parallism.
--queryserver-config-transaction-timeout float query server transaction timeout (in seconds), a transaction will be killed if it takes longer than this value (default 30)
--queryserver-config-txpool-timeout float query server transaction pool timeout, it is how long vttablet waits if tx pool is full (default 1)
--queryserver-config-txpool-waiter-cap int query server transaction pool waiter limit, this is the maximum number of transactions that can be queued waiting to get a connection (default 5000)
--queryserver-config-warn-result-size int query server result size warning threshold, warn if number of rows returned from vttablet for non-streaming queries exceeds this
--queryserver-enable-settings-pool Enable pooling of connections with modified system settings
+ --queryserver-enable-views Enable views support in vttablet.
--queryserver_enable_online_ddl Enable online DDL. (default true)
--redact-debug-ui-queries redact full queries and bind variables from debug UI
--relay_log_max_items int Maximum number of rows for VReplication target buffering. (default 5000)
--relay_log_max_size int Maximum buffer size (in bytes) for VReplication target buffering. If single rows are larger than this, a single row is buffered at a time. (default 250000)
- --remote_operation_timeout duration time to wait for a remote operation (default 30s)
+ --remote_operation_timeout duration time to wait for a remote operation (default 15s)
--replication_connect_retry duration how long to wait in between replica reconnect attempts. Only precise to the second. (default 10s)
--restore_concurrency int (init restore parameter) how many concurrent files to restore at once (default 4)
--restore_from_backup (init restore parameter) will check BackupStorage for a recent backup at startup and start there
@@ -271,6 +269,11 @@ Usage of vttablet:
--srv_topo_cache_refresh duration how frequently to refresh the topology for cached entries (default 1s)
--srv_topo_cache_ttl duration how long to use cached entries for topology (default 1s)
--srv_topo_timeout duration topo server timeout (default 5s)
+ --stats_backend string The name of the registered push-based monitoring/stats backend to use
+ --stats_combine_dimensions string List of dimensions to be combined into a single "all" value in exported stats vars
+ --stats_common_tags strings Comma-separated list of common tags for the stats backend. It provides both label and values. Example: label1:value1,label2:value2
+ --stats_drop_variables string Variables to be dropped from the list of exported variables.
+ --stats_emit_period duration Interval between emitting stats to all registered backends (default 1m0s)
--statsd_address string Address for statsd client
--statsd_sample_rate float Sample rate for statsd metrics (default 1)
--stderrthreshold severity logs at or above this threshold go to stderr (default 1)
@@ -298,9 +301,10 @@ Usage of vttablet:
--tablet_protocol string Protocol to use to make queryservice RPCs to vttablets. (default "grpc")
--throttle_check_as_check_self Should throttler/check return a throttler/check-self result (changes throttler behavior for writes)
--throttle_metrics_query SELECT Override default heartbeat/lag metric. Use either SELECT (must return single row, single value) or `SHOW GLOBAL ... LIKE ...` queries. Set -throttle_metrics_threshold respectively.
- --throttle_metrics_threshold float Override default throttle threshold, respective to -throttle_metrics_query (default 1.7976931348623157e+308)
+ --throttle_metrics_threshold float Override default throttle threshold, respective to --throttle_metrics_query (default 1.7976931348623157e+308)
--throttle_tablet_types string Comma separated VTTablet types to be considered by the throttler. default: 'replica'. example: 'replica,rdonly'. 'replica' aways implicitly included (default "replica")
--throttle_threshold duration Replication lag threshold for default lag throttling (default 1s)
+ --throttler-config-via-topo When 'true', read config from topo service and ignore throttle_threshold, throttle_metrics_threshold, throttle_metrics_query, throttle_check_as_check_self
--topo_consul_lock_delay duration LockDelay for consul session. (default 15s)
--topo_consul_lock_session_checks string List of checks for consul session. (default "serfHealth")
--topo_consul_lock_session_ttl string TTL for consul session.
@@ -343,9 +347,10 @@ Usage of vttablet:
--tx_throttler_healthcheck_cells strings A comma-separated list of cells. Only tabletservers running in these cells will be monitored for replication lag by the transaction throttler.
--unhealthy_threshold duration replication lag after which a replica is considered unhealthy (default 2h0m0s)
--use_super_read_only Set super_read_only flag when performing planned failover.
- -v, --v Level log level for V logs
- --version print binary version
+ --v Level log level for V logs
+ -v, --version print binary version
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
+ --vreplication-parallel-insert-workers int Number of parallel insertion workers to use during copy phase. Set <= 1 to disable parallelism, or > 1 to enable concurrent insertion during copy phase. (default 1)
--vreplication_copy_phase_duration duration Duration for each copy phase loop (before running the next catchup: default 1h) (default 1h0m0s)
--vreplication_copy_phase_max_innodb_history_list_length int The maximum InnoDB transaction history that can exist on a vstreamer (source) before starting another round of copying rows. This helps to limit the impact on the source tablet. (default 1000000)
--vreplication_copy_phase_max_mysql_replication_lag int The maximum MySQL replication lag (in seconds) that can exist on a vstreamer (source) before starting another round of copying rows. This helps to limit the impact on the source tablet. (default 43200)
@@ -367,11 +372,11 @@ Usage of vttablet:
--vttablet_skip_buildinfo_tags string comma-separated list of buildinfo tags to skip from merging with --init_tags. each tag is either an exact match or a regular expression of the form '/regexp/'. (default "/.*/")
--wait_for_backup_interval duration (init restore parameter) if this is greater than 0, instead of starting up empty when no backups are found, keep checking at this interval for a backup to appear
--watch_replication_stream When enabled, vttablet will stream the MySQL replication stream from the local server, and use it to update schema when it sees a DDL.
- --xbstream_restore_flags string flags to pass to xbstream command during restore. These should be space separated and will be added to the end of the command. These need to match the ones used for backup e.g. --compress / --decompress, --encrypt / --decrypt
- --xtrabackup_backup_flags string flags to pass to backup command. These should be space separated and will be added to the end of the command
- --xtrabackup_prepare_flags string flags to pass to prepare command. These should be space separated and will be added to the end of the command
- --xtrabackup_root_path string directory location of the xtrabackup and xbstream executables, e.g., /usr/bin
- --xtrabackup_stream_mode string which mode to use if streaming, valid values are tar and xbstream (default "tar")
+ --xbstream_restore_flags string Flags to pass to xbstream command during restore. These should be space separated and will be added to the end of the command. These need to match the ones used for backup e.g. --compress / --decompress, --encrypt / --decrypt
+ --xtrabackup_backup_flags string Flags to pass to backup command. These should be space separated and will be added to the end of the command
+ --xtrabackup_prepare_flags string Flags to pass to prepare command. These should be space separated and will be added to the end of the command
+ --xtrabackup_root_path string Directory location of the xtrabackup and xbstream executables, e.g., /usr/bin
+ --xtrabackup_stream_mode string Which mode to use if streaming, valid values are tar and xbstream. Please note that tar is not supported in XtraBackup 8.0 (default "tar")
--xtrabackup_stripe_block_size uint Size in bytes of each block that gets sent to a given stripe before rotating to the next stripe (default 102400)
--xtrabackup_stripes uint If greater than 0, use data striping across this many destination files to parallelize data transfer and decompression
--xtrabackup_user string User that xtrabackup will use to connect to the database server. This user must have all necessary privileges. For details, please refer to xtrabackup documentation.
diff --git a/go/flags/endtoend/vttestserver.txt b/go/flags/endtoend/vttestserver.txt
index 384ebd7fe06..193070c5161 100644
--- a/go/flags/endtoend/vttestserver.txt
+++ b/go/flags/endtoend/vttestserver.txt
@@ -4,8 +4,7 @@ Usage of vttestserver:
--app_pool_size int Size of the connection pool for app connections (default 40)
--backup_engine_implementation string Specifies which implementation to use for creating new backups (builtin or xtrabackup). Restores will always be done with whichever engine created a given backup. (default "builtin")
--backup_storage_block_size int if backup_storage_compress is true, backup_storage_block_size sets the byte size for each block while compressing (default is 250000). (default 250000)
- --backup_storage_compress if set, the backup files will be compressed (default is true). Set to false for instance if a backup_storage_hook is specified and it compresses the data. (default true)
- --backup_storage_hook string if set, we send the contents of the backup files through this hook.
+ --backup_storage_compress if set, the backup files will be compressed. (default true)
--backup_storage_number_blocks int if backup_storage_compress is true, backup_storage_number_blocks sets the number of blocks that can be processed, at once, before the writer blocks, during compression (default is 2). It should be equal to the number of CPUs available for compression. (default 2)
--builtinbackup_mysqld_timeout duration how long to wait for mysqld to shutdown at the start of the backup. (default 10m0s)
--builtinbackup_progress duration how often to send progress updates when backing up large files. (default 5s)
@@ -67,16 +66,17 @@ Usage of vttestserver:
--log_err_stacks log stack traces for errors
--log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
--logtostderr log to standard error instead of files
+ --max-stack-size int configure the maximum stack size in bytes (default 67108864)
--max_table_shard_size int The maximum number of initial rows in a table shard. Ignored if--initialize_with_random_data is false. The actual number is chosen randomly (default 10000)
--min_table_shard_size int The minimum number of initial rows in a table shard. Ignored if--initialize_with_random_data is false. The actual number is chosen randomly. (default 1000)
--mysql_bind_host string which host to bind vtgate mysql listener to (default "localhost")
--mysql_only If this flag is set only mysql is initialized. The rest of the vitess components are not started. Also, the output specifies the mysql unix socket instead of the vtgate port.
- --mysql_server_version string MySQL server version to advertise.
+ --mysql_server_version string MySQL server version to advertise. (default "8.0.30-Vitess")
--mysqlctl_mycnf_template string template file to use for generating the my.cnf file during server init
--mysqlctl_socket string socket file to use for remote mysqlctl actions (empty for local actions)
--null_probability float The probability to initialize a field with 'NULL' if --initialize_with_random_data is true. Only applies to fields that can contain NULL values. (default 0.1)
--num_shards strings Comma separated shard count (one per keyspace) (default [2])
- --onclose_timeout duration wait no more than this for OnClose handlers before stopping (default 1ns)
+ --onclose_timeout duration wait no more than this for OnClose handlers before stopping (default 10s)
--onterm_timeout duration wait no more than this for OnTermSync handlers before stopping (default 10s)
--persistent_mode If this flag is set, the MySQL data directory is not cleaned up when LocalCluster.TearDown() is called. This is useful for running vttestserver as a database container in local developer environments. Note that db migration files (--schema_dir option) and seeding of random data (--initialize_with_random_data option) will only run during cluster startup if the data directory does not already exist. vschema migrations are run every time the cluster starts, since persistence for the topology server has not been implemented yet
--pid_file string If set, the process will write its pid to the named file, and delete it on graceful shutdown.
@@ -108,6 +108,7 @@ Usage of vttestserver:
--tablet_manager_grpc_key string the key to use to connect
--tablet_manager_grpc_server_name string the server name to use to validate server certificate
--tablet_manager_protocol string Protocol to use to make tabletmanager RPCs to vttablets. (default "grpc")
+ --tablet_refresh_interval duration Interval at which vtgate refreshes tablet information from topology server. (default 10s)
--topo_consul_lock_delay duration LockDelay for consul session. (default 15s)
--topo_consul_lock_session_checks string List of checks for consul session. (default "serfHealth")
--topo_consul_lock_session_ttl string TTL for consul session.
@@ -119,8 +120,8 @@ Usage of vttestserver:
--topo_zk_tls_cert string the cert to use to connect to the zk topo server, requires topo_zk_tls_key, enables TLS
--topo_zk_tls_key string the key to use to connect to the zk topo server, enables TLS
--transaction_mode string Transaction mode MULTI (default), SINGLE or TWOPC (default "MULTI")
- -v, --v Level log level for V logs
- --version print binary version
+ --v Level log level for V logs
+ -v, --version print binary version
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
--vschema_ddl_authorized_users string Comma separated list of users authorized to execute vschema ddl operations via vtgate
--vtctl_client_protocol string Protocol to use to talk to the vtctl server. (default "grpc")
@@ -134,12 +135,11 @@ Usage of vttestserver:
--vtgate_grpc_crl string the server crl to use to validate server certificates when connecting
--vtgate_grpc_key string the key to use to connect
--vtgate_grpc_server_name string the server name to use to validate server certificate
- --workflow_manager_init Enable workflow manager
- --xbstream_restore_flags string flags to pass to xbstream command during restore. These should be space separated and will be added to the end of the command. These need to match the ones used for backup e.g. --compress / --decompress, --encrypt / --decrypt
- --xtrabackup_backup_flags string flags to pass to backup command. These should be space separated and will be added to the end of the command
- --xtrabackup_prepare_flags string flags to pass to prepare command. These should be space separated and will be added to the end of the command
- --xtrabackup_root_path string directory location of the xtrabackup and xbstream executables, e.g., /usr/bin
- --xtrabackup_stream_mode string which mode to use if streaming, valid values are tar and xbstream (default "tar")
+ --xbstream_restore_flags string Flags to pass to xbstream command during restore. These should be space separated and will be added to the end of the command. These need to match the ones used for backup e.g. --compress / --decompress, --encrypt / --decrypt
+ --xtrabackup_backup_flags string Flags to pass to backup command. These should be space separated and will be added to the end of the command
+ --xtrabackup_prepare_flags string Flags to pass to prepare command. These should be space separated and will be added to the end of the command
+ --xtrabackup_root_path string Directory location of the xtrabackup and xbstream executables, e.g., /usr/bin
+ --xtrabackup_stream_mode string Which mode to use if streaming, valid values are tar and xbstream. Please note that tar is not supported in XtraBackup 8.0 (default "tar")
--xtrabackup_stripe_block_size uint Size in bytes of each block that gets sent to a given stripe before rotating to the next stripe (default 102400)
--xtrabackup_stripes uint If greater than 0, use data striping across this many destination files to parallelize data transfer and decompression
--xtrabackup_user string User that xtrabackup will use to connect to the database server. This user must have all necessary privileges. For details, please refer to xtrabackup documentation.
diff --git a/go/flags/endtoend/zk.txt b/go/flags/endtoend/zk.txt
index 52bebdf4333..443bf0b9ca2 100644
--- a/go/flags/endtoend/zk.txt
+++ b/go/flags/endtoend/zk.txt
@@ -1,14 +1,8 @@
Usage of zk:
- --emit_stats If set, emit stats to push-based monitoring and stats backends
- -h, --help display usage and exit
- --keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
- --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
- --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
- --purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
- --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only)
- --server string server(s) to connect to
- --stats_backend string The name of the registered push-based monitoring/stats backend to use
- --stats_combine_dimensions string List of dimensions to be combined into a single "all" value in exported stats vars
- --stats_common_tags strings Comma-separated list of common tags for the stats backend. It provides both label and values. Example: label1:value1,label2:value2
- --stats_drop_variables string Variables to be dropped from the list of exported variables.
- --stats_emit_period duration Interval between emitting stats to all registered backends (default 1m0s)
+ -h, --help display usage and exit
+ --keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
+ --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
+ --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
+ --purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
+ --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only)
+ --server string server(s) to connect to
diff --git a/go/flags/endtoend/zkctl.txt b/go/flags/endtoend/zkctl.txt
index e07334f86ac..e7e41c4cb4d 100644
--- a/go/flags/endtoend/zkctl.txt
+++ b/go/flags/endtoend/zkctl.txt
@@ -11,8 +11,8 @@ Usage of zkctl:
--pprof strings enable profiling
--purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
--stderrthreshold severity logs at or above this threshold go to stderr (default 1)
- -v, --v Level log level for V logs
- --version print binary version
+ --v Level log level for V logs
+ -v, --version print binary version
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
--zk.cfg string zkid@server1:leaderPort1:electionPort1:clientPort1,...) (default "6@:3801:3802:3803")
--zk.myid uint which server do you want to be? only needed when running multiple instance on one box, otherwise myid is implied by hostname
diff --git a/go/flags/endtoend/zkctld.txt b/go/flags/endtoend/zkctld.txt
index 9bfec0066f7..6ec026be814 100644
--- a/go/flags/endtoend/zkctld.txt
+++ b/go/flags/endtoend/zkctld.txt
@@ -12,8 +12,8 @@ Usage of zkctld:
--purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
--security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only)
--stderrthreshold severity logs at or above this threshold go to stderr (default 1)
- -v, --v Level log level for V logs
- --version print binary version
+ --v Level log level for V logs
+ -v, --version print binary version
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
--zk.cfg string zkid@server1:leaderPort1:electionPort1:clientPort1,...) (default "6@:3801:3802:3803")
--zk.myid uint which server do you want to be? only needed when running multiple instance on one box, otherwise myid is implied by hostname
diff --git a/go/flagutil/sets.go b/go/flagutil/sets.go
index f03123d9488..92a7e2c6800 100644
--- a/go/flagutil/sets.go
+++ b/go/flagutil/sets.go
@@ -39,14 +39,14 @@ var _ pflag.Value = (*StringSetFlag)(nil)
// provides an implementation of pflag.Value, so it is usable in libraries like
// cobra.
type StringSetFlag struct {
- set sets.String
+ set sets.Set[string]
}
// ToSet returns the underlying string set, or an empty set if the underlying
// set is nil.
-func (set *StringSetFlag) ToSet() sets.String {
+func (set *StringSetFlag) ToSet() sets.Set[string] {
if set.set == nil {
- set.set = sets.NewString()
+ set.set = sets.New[string]()
}
return set.set
@@ -55,7 +55,7 @@ func (set *StringSetFlag) ToSet() sets.String {
// Set is part of the pflag.Value and flag.Value interfaces.
func (set *StringSetFlag) Set(s string) error {
if set.set == nil {
- set.set = sets.NewString(s)
+ set.set = sets.New[string]()
return nil
}
@@ -69,7 +69,7 @@ func (set *StringSetFlag) String() string {
return ""
}
- return strings.Join(set.set.List(), ", ")
+ return strings.Join(sets.List(set.set), ", ")
}
// Type is part of the pflag.Value interface.
diff --git a/go/internal/flag/flag.go b/go/internal/flag/flag.go
index a06f1c63988..6f087143610 100644
--- a/go/internal/flag/flag.go
+++ b/go/internal/flag/flag.go
@@ -31,8 +31,6 @@ import (
"strings"
flag "github.com/spf13/pflag"
-
- "vitess.io/vitess/go/vt/log"
)
// Parse wraps the standard library's flag.Parse to perform some sanity checking
@@ -44,6 +42,7 @@ import (
//
// See VEP-4, phase 1 for details: https://github.com/vitessio/enhancements/blob/c766ea905e55409cddeb666d6073cd2ac4c9783e/veps/vep-4.md#phase-1-preparation
func Parse(fs *flag.FlagSet) {
+ preventGlogVFlagFromClobberingVersionFlagShorthand(fs)
fs.AddGoFlagSet(goflag.CommandLine)
if fs.Lookup("help") == nil {
@@ -69,6 +68,15 @@ func Parse(fs *flag.FlagSet) {
flag.Parse()
}
+// IsFlagProvided returns if the given flag has been provided by the user explicitly or not
+func IsFlagProvided(name string) bool {
+ fl := flag.Lookup(name)
+ if fl != nil {
+ return fl.Changed
+ }
+ return false
+}
+
// TrickGlog tricks glog into understanding that flags have been parsed.
//
// N.B. Do not delete this function. `glog` is a persnickity package and wants
@@ -96,6 +104,32 @@ func TrickGlog() {
os.Args = append(os.Args, args...)
}
+// The default behavior of PFlagFromGoFlag (which is called on each flag when
+// calling AddGoFlagSet) is to allow any flags with single-character names to be
+// accessible both as, for example, `-v` and `--v`.
+//
+// This prevents us from exposing version via `--version|-v` (pflag will actually
+// panic when it goes to add the glog log-level flag), so we intervene to blank
+// out the Shorthand for _just_ that flag before adding the rest of the goflags
+// to a particular pflag FlagSet.
+//
+// IMPORTANT: This must be called prior to AddGoFlagSet in both Parse and
+// ParseFlagsForTest.
+func preventGlogVFlagFromClobberingVersionFlagShorthand(fs *flag.FlagSet) {
+ // N.B. we use goflag.Lookup instead of this package's Lookup, because we
+ // explicitly want to check only the goflags.
+ if f := goflag.Lookup("v"); f != nil {
+ if fs.Lookup("v") != nil { // This check is exactly what AddGoFlagSet does.
+ return
+ }
+
+ pf := flag.PFlagFromGoFlag(f)
+ pf.Shorthand = ""
+
+ fs.AddFlag(pf)
+ }
+}
+
// Usage invokes the current CommandLine's Usage func, or if not overridden,
// "prints a simple header and calls PrintDefaults".
func Usage() {
@@ -105,14 +139,23 @@ func Usage() {
// filterTestFlags returns two slices: the second one has just the flags for `go test` and the first one contains
// the rest of the flags.
const goTestFlagSuffix = "-test"
+const goTestRunFlag = "-test.run"
func filterTestFlags() ([]string, []string) {
args := os.Args
var testFlags []string
var otherArgs []string
+ hasExtraTestRunArg := false
for i := 0; 0 < len(args) && i < len(args); i++ {
- if strings.HasPrefix(args[i], goTestFlagSuffix) {
+ // This additional logic to check for the test.run flag is required for running single unit tests in GoLand,
+ // due to the way it uses "go tool test2json" to run the test. The CLI `go test` specifies the test as "-test.run=TestHeartbeat",
+ // but test2json as "-test.run TestHeartbeat". So in the latter case we need to also add the arg following test.run
+ if strings.HasPrefix(args[i], goTestFlagSuffix) || hasExtraTestRunArg {
+ hasExtraTestRunArg = false
testFlags = append(testFlags, args[i])
+ if args[i] == goTestRunFlag {
+ hasExtraTestRunArg = true
+ }
continue
}
otherArgs = append(otherArgs, args[i])
@@ -135,6 +178,7 @@ func ParseFlagsForTest() {
}
// parse remaining flags including the log-related ones like --alsologtostderr
+ preventGlogVFlagFromClobberingVersionFlagShorthand(flag.CommandLine)
flag.CommandLine.AddGoFlagSet(goflag.CommandLine)
flag.Parse()
}
@@ -199,46 +243,6 @@ func Arg(i int) string {
return ""
}
-const (
- singleDashLongFlagsWarning = "Use of single-dash long flags is deprecated and will be removed in the next version of Vitess. Please use --%s instead"
- mixedFlagsAndPosargsWarning = "Detected a dashed argument after a positional argument. " +
- "Currently these are treated as posargs that may be parsed by a subcommand, but in the next version of Vitess they will be parsed as top-level flags, which may not be defined, causing errors. " +
- "To preserve existing behavior, please update your invocation to include a \"--\" after all top-level flags to continue treating %s as a positional argument."
-)
-
-// Check and warn on any single-dash flags.
-// nolint:deadcode
-func warnOnSingleDashLongFlags(fs *goflag.FlagSet, argv []string, warningf func(msg string, args ...any)) {
- fs.Visit(func(f *goflag.Flag) {
- // Boolean flags with single-character names are okay to use the
- // single-dash form. I don't _think_ we have any of these, but I'm being
- // conservative here.
- if bf, ok := f.Value.(maybeBoolFlag); ok && bf.IsBoolFlag() && len(f.Name) == 1 {
- return
- }
-
- for _, arg := range argv {
- if strings.HasPrefix(arg, "-"+f.Name) {
- warningf(singleDashLongFlagsWarning, f.Name)
- }
- }
- })
-}
-
-// Check and warn for any mixed posarg / dashed-arg on the CLI.
-// nolint:deadcode
-func warnOnMixedPositionalAndFlagArguments(posargs []string, warningf func(msg string, args ...any)) {
- for _, arg := range posargs {
- if arg == "--" {
- break
- }
-
- if strings.HasPrefix(arg, "-") {
- log.Warningf(mixedFlagsAndPosargsWarning, arg)
- }
- }
-}
-
// From the standard library documentation:
//
// > If a Value has an IsBoolFlag() bool method returning true, the
diff --git a/go/ioutil2/ioutil.go b/go/ioutil2/ioutil.go
deleted file mode 100644
index 0b2ae7a6459..00000000000
--- a/go/ioutil2/ioutil.go
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
-Copyright 2019 The Vitess Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Package ioutil2 provides extra functionality along similar lines to io/ioutil.
-package ioutil2
-
-import (
- "os"
- "path"
-)
-
-// WriteFileAtomic writes the data to a temp file and atomically move if everything else succeeds.
-func WriteFileAtomic(filename string, data []byte, perm os.FileMode) error {
- dir, name := path.Split(filename)
- f, err := os.CreateTemp(dir, name)
- if err != nil {
- return err
- }
- _, err = f.Write(data)
- if err == nil {
- err = f.Sync()
- }
- if closeErr := f.Close(); err == nil {
- err = closeErr
- }
- if permErr := os.Chmod(f.Name(), perm); err == nil {
- err = permErr
- }
- if err == nil {
- err = os.Rename(f.Name(), filename)
- }
- // Any err should result in full cleanup.
- if err != nil {
- os.Remove(f.Name())
- }
- return err
-}
diff --git a/go/ioutil2/ioutil_test.go b/go/ioutil2/ioutil_test.go
deleted file mode 100644
index 916b1fe2074..00000000000
--- a/go/ioutil2/ioutil_test.go
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
-Copyright 2019 The Vitess Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreedto in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package ioutil2
-
-import (
- "bytes"
- "fmt"
- "os"
- "testing"
- "time"
-)
-
-func TestWrite(t *testing.T) {
- data := []byte("test string\n")
- fname := fmt.Sprintf("/tmp/atomic-file-test-%v.txt", time.Now().UnixNano())
- err := WriteFileAtomic(fname, data, 0664)
- if err != nil {
- t.Fatal(err)
- }
- rData, err := os.ReadFile(fname)
- if err != nil {
- t.Fatal(err)
- }
- if !bytes.Equal(data, rData) {
- t.Fatalf("data mismatch: %v != %v", data, rData)
- }
- if err := os.Remove(fname); err != nil {
- t.Fatal(err)
- }
-}
diff --git a/go/mysql/auth_server_clientcert_test.go b/go/mysql/auth_server_clientcert_test.go
index 7cceb1396b2..4528ee5dbf4 100644
--- a/go/mysql/auth_server_clientcert_test.go
+++ b/go/mysql/auth_server_clientcert_test.go
@@ -24,6 +24,9 @@ import (
"reflect"
"testing"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
"vitess.io/vitess/go/vt/tlstest"
"vitess.io/vitess/go/vt/vttls"
)
@@ -43,9 +46,7 @@ func TestValidCert(t *testing.T) {
// Create the listener, so we can get its host.
l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false)
- if err != nil {
- t.Fatalf("NewListener failed: %v", err)
- }
+ require.NoError(t, err, "NewListener failed: %v", err)
defer l.Close()
host := l.Addr().(*net.TCPAddr).IP.String()
port := l.Addr().(*net.TCPAddr).Port
@@ -65,9 +66,8 @@ func TestValidCert(t *testing.T) {
path.Join(root, "ca-crl.pem"),
"",
tls.VersionTLS12)
- if err != nil {
- t.Fatalf("TLSServerConfig failed: %v", err)
- }
+ require.NoError(t, err, "TLSServerConfig failed: %v", err)
+
l.TLSConfig.Store(serverConfig)
go func() {
l.Accept()
@@ -89,29 +89,20 @@ func TestValidCert(t *testing.T) {
ctx := context.Background()
conn, err := Connect(ctx, params)
- if err != nil {
- t.Fatalf("Connect failed: %v", err)
- }
+ require.NoError(t, err, "Connect failed: %v", err)
+
defer conn.Close()
// Make sure this went through SSL.
result, err := conn.ExecuteFetch("ssl echo", 10000, true)
- if err != nil {
- t.Fatalf("ExecuteFetch failed: %v", err)
- }
- if result.Rows[0][0].ToString() != "ON" {
- t.Errorf("Got wrong result from ExecuteFetch(ssl echo): %v", result)
- }
+ require.NoError(t, err, "ExecuteFetch failed: %v", err)
+ assert.Equal(t, "ON", result.Rows[0][0].ToString(), "Got wrong result from ExecuteFetch(ssl echo): %v", result)
userData := th.LastConn().UserData.Get()
- if userData.Username != clientCertUsername {
- t.Errorf("userdata username is %v, expected %v", userData.Username, clientCertUsername)
- }
+ assert.Equal(t, clientCertUsername, userData.Username, "userdata username is %v, expected %v", userData.Username, clientCertUsername)
expectedGroups := []string{"localhost", clientCertUsername}
- if !reflect.DeepEqual(userData.Groups, expectedGroups) {
- t.Errorf("userdata groups is %v, expected %v", userData.Groups, expectedGroups)
- }
+ assert.True(t, reflect.DeepEqual(userData.Groups, expectedGroups), "userdata groups is %v, expected %v", userData.Groups, expectedGroups)
// Send a ComQuit to avoid the error message on the server side.
conn.writeComQuit()
@@ -124,9 +115,7 @@ func TestNoCert(t *testing.T) {
// Create the listener, so we can get its host.
l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false)
- if err != nil {
- t.Fatalf("NewListener failed: %v", err)
- }
+ require.NoError(t, err, "NewListener failed: %v", err)
defer l.Close()
host := l.Addr().(*net.TCPAddr).IP.String()
port := l.Addr().(*net.TCPAddr).Port
@@ -145,9 +134,8 @@ func TestNoCert(t *testing.T) {
path.Join(root, "ca-crl.pem"),
"",
tls.VersionTLS12)
- if err != nil {
- t.Fatalf("TLSServerConfig failed: %v", err)
- }
+ require.NoError(t, err, "TLSServerConfig failed: %v", err)
+
l.TLSConfig.Store(serverConfig)
go func() {
l.Accept()
@@ -166,9 +154,8 @@ func TestNoCert(t *testing.T) {
ctx := context.Background()
conn, err := Connect(ctx, params)
- if err == nil {
- t.Errorf("Connect() should have errored due to no client cert")
- }
+ assert.Error(t, err, "Connect() should have errored due to no client cert")
+
if conn != nil {
conn.Close()
}
diff --git a/go/mysql/auth_server_static_flaky_test.go b/go/mysql/auth_server_static_flaky_test.go
index ebbc655dffb..52e8fee8ab4 100644
--- a/go/mysql/auth_server_static_flaky_test.go
+++ b/go/mysql/auth_server_static_flaky_test.go
@@ -23,6 +23,8 @@ import (
"syscall"
"testing"
"time"
+
+ "github.com/stretchr/testify/require"
)
// getEntries is a test-only method for AuthServerStatic.
@@ -37,16 +39,10 @@ func TestJsonConfigParser(t *testing.T) {
config := make(map[string][]*AuthServerStaticEntry)
jsonConfig := "{\"mysql_user\":{\"Password\":\"123\", \"UserData\":\"dummy\"}, \"mysql_user_2\": {\"Password\": \"123\", \"UserData\": \"mysql_user_2\"}}"
err := ParseConfig([]byte(jsonConfig), &config)
- if err != nil {
- t.Fatalf("should not get an error, but got: %v", err)
- }
- if len(config["mysql_user"]) != 1 {
- t.Fatalf("mysql_user config size should be equal to 1")
- }
+ require.NoError(t, err, "should not get an error, but got: %v", err)
+ require.Equal(t, 1, len(config["mysql_user"]), "mysql_user config size should be equal to 1")
+ require.Equal(t, 1, len(config["mysql_user_2"]), "mysql_user config size should be equal to 1")
- if len(config["mysql_user_2"]) != 1 {
- t.Fatalf("mysql_user config size should be equal to 1")
- }
// works with new format
jsonConfig = `{"mysql_user":[
{"Password":"123", "UserData":"dummy", "SourceHost": "localhost"},
@@ -54,16 +50,9 @@ func TestJsonConfigParser(t *testing.T) {
{"Password": "456", "UserData": "mysql_user_with_groups", "Groups": ["user_group"]}
]}`
err = ParseConfig([]byte(jsonConfig), &config)
- if err != nil {
- t.Fatalf("should not get an error, but got: %v", err)
- }
- if len(config["mysql_user"]) != 3 {
- t.Fatalf("mysql_user config size should be equal to 3")
- }
-
- if config["mysql_user"][0].SourceHost != "localhost" {
- t.Fatalf("SourceHost should be equal to localhost")
- }
+ require.NoError(t, err, "should not get an error, but got: %v", err)
+ require.Equal(t, 3, len(config["mysql_user"]), "mysql_user config size should be equal to 3")
+ require.Equal(t, "localhost", config["mysql_user"][0].SourceHost, "SourceHost should be equal to localhost")
if len(config["mysql_user"][2].Groups) != 1 || config["mysql_user"][2].Groups[0] != "user_group" {
t.Fatalf("Groups should be equal to [\"user_group\"]")
@@ -73,9 +62,8 @@ func TestJsonConfigParser(t *testing.T) {
"mysql_user": [{"Password": "123", "UserData": "mysql_user_all", "InvalidKey": "oops"}]
}`
err = ParseConfig([]byte(jsonConfig), &config)
- if err == nil {
- t.Fatalf("Invalid config should have errored, but didn't")
- }
+ require.Error(t, err, "Invalid config should have errored, but didn't")
+
}
func TestValidateHashGetter(t *testing.T) {
@@ -87,20 +75,15 @@ func TestValidateHashGetter(t *testing.T) {
addr := &net.IPAddr{IP: ip, Zone: ""}
salt, err := newSalt()
- if err != nil {
- t.Fatalf("error generating salt: %v", err)
- }
+ require.NoError(t, err, "error generating salt: %v", err)
scrambled := ScrambleMysqlNativePassword(salt, []byte("password"))
getter, err := auth.UserEntryWithHash(nil, salt, "mysql_user", scrambled, addr)
- if err != nil {
- t.Fatalf("error validating password: %v", err)
- }
+ require.NoError(t, err, "error validating password: %v", err)
callerID := getter.Get()
- if callerID.Username != "user.name" {
- t.Fatalf("getter username incorrect, expected \"user.name\", got %v", callerID.Username)
- }
+ require.Equal(t, "user.name", callerID.Username, "getter username incorrect, expected \"user.name\", got %v", callerID.Username)
+
if len(callerID.Groups) != 1 || callerID.Groups[0] != "user_group" {
t.Fatalf("getter groups incorrect, expected [\"user_group\"], got %v", callerID.Groups)
}
@@ -110,27 +93,21 @@ func TestHostMatcher(t *testing.T) {
ip := net.ParseIP("192.168.0.1")
addr := &net.TCPAddr{IP: ip, Port: 9999}
match := MatchSourceHost(net.Addr(addr), "")
- if !match {
- t.Fatalf("Should match any address when target is empty")
- }
+ require.True(t, match, "Should match any address when target is empty")
match = MatchSourceHost(net.Addr(addr), "localhost")
- if match {
- t.Fatalf("Should not match address when target is localhost")
- }
+ require.False(t, match, "Should not match address when target is localhost")
socket := &net.UnixAddr{Name: "unixSocket", Net: "1"}
match = MatchSourceHost(net.Addr(socket), "localhost")
- if !match {
- t.Fatalf("Should match socket when target is localhost")
- }
+ require.True(t, match, "Should match socket when target is localhost")
+
}
func TestStaticConfigHUP(t *testing.T) {
tmpFile, err := os.CreateTemp("", "mysql_auth_server_static_file.json")
- if err != nil {
- t.Fatalf("couldn't create temp file: %v", err)
- }
+ require.NoError(t, err, "couldn't create temp file: %v", err)
+
defer os.Remove(tmpFile.Name())
oldStr := "str5"
@@ -141,10 +118,7 @@ func TestStaticConfigHUP(t *testing.T) {
aStatic := NewAuthServerStatic(tmpFile.Name(), "", 0)
defer aStatic.close()
-
- if aStatic.getEntries()[oldStr][0].Password != oldStr {
- t.Fatalf("%s's Password should still be '%s'", oldStr, oldStr)
- }
+ require.Equal(t, oldStr, aStatic.getEntries()[oldStr][0].Password, "%s's Password should still be '%s'", oldStr, oldStr)
hupTest(t, aStatic, tmpFile, oldStr, "str2")
hupTest(t, aStatic, tmpFile, "str2", "str3") // still handling the signal
@@ -159,9 +133,8 @@ func TestStaticConfigHUP(t *testing.T) {
func TestStaticConfigHUPWithRotation(t *testing.T) {
tmpFile, err := os.CreateTemp("", "mysql_auth_server_static_file.json")
- if err != nil {
- t.Fatalf("couldn't create temp file: %v", err)
- }
+ require.NoError(t, err, "couldn't create temp file: %v", err)
+
defer os.Remove(tmpFile.Name())
oldStr := "str1"
@@ -172,10 +145,7 @@ func TestStaticConfigHUPWithRotation(t *testing.T) {
aStatic := NewAuthServerStatic(tmpFile.Name(), "", 10*time.Millisecond)
defer aStatic.close()
-
- if aStatic.getEntries()[oldStr][0].Password != oldStr {
- t.Fatalf("%s's Password should still be '%s'", oldStr, oldStr)
- }
+ require.Equal(t, oldStr, aStatic.getEntries()[oldStr][0].Password, "%s's Password should still be '%s'", oldStr, oldStr)
hupTestWithRotation(t, aStatic, tmpFile, oldStr, "str4")
hupTestWithRotation(t, aStatic, tmpFile, "str4", "str5")
@@ -186,20 +156,14 @@ func hupTest(t *testing.T, aStatic *AuthServerStatic, tmpFile *os.File, oldStr,
if err := os.WriteFile(tmpFile.Name(), []byte(jsonConfig), 0600); err != nil {
t.Fatalf("couldn't overwrite temp file: %v", err)
}
-
- if aStatic.getEntries()[oldStr][0].Password != oldStr {
- t.Fatalf("%s's Password should still be '%s'", oldStr, oldStr)
- }
+ require.Equal(t, oldStr, aStatic.getEntries()[oldStr][0].Password, "%s's Password should still be '%s'", oldStr, oldStr)
syscall.Kill(syscall.Getpid(), syscall.SIGHUP)
- time.Sleep(100 * time.Millisecond) // wait for signal handler
+ time.Sleep(100 * time.Millisecond)
+ // wait for signal handler
+ require.Nil(t, aStatic.getEntries()[oldStr], "Should not have old %s after config reload", oldStr)
+ require.Equal(t, newStr, aStatic.getEntries()[newStr][0].Password, "%s's Password should be '%s'", newStr, newStr)
- if aStatic.getEntries()[oldStr] != nil {
- t.Fatalf("Should not have old %s after config reload", oldStr)
- }
- if aStatic.getEntries()[newStr][0].Password != newStr {
- t.Fatalf("%s's Password should be '%s'", newStr, newStr)
- }
}
func hupTestWithRotation(t *testing.T, aStatic *AuthServerStatic, tmpFile *os.File, oldStr, newStr string) {
@@ -208,14 +172,11 @@ func hupTestWithRotation(t *testing.T, aStatic *AuthServerStatic, tmpFile *os.Fi
t.Fatalf("couldn't overwrite temp file: %v", err)
}
- time.Sleep(20 * time.Millisecond) // wait for signal handler
+ time.Sleep(20 * time.Millisecond)
+ // wait for signal handler
+ require.Nil(t, aStatic.getEntries()[oldStr], "Should not have old %s after config reload", oldStr)
+ require.Equal(t, newStr, aStatic.getEntries()[newStr][0].Password, "%s's Password should be '%s'", newStr, newStr)
- if aStatic.getEntries()[oldStr] != nil {
- t.Fatalf("Should not have old %s after config reload", oldStr)
- }
- if aStatic.getEntries()[newStr][0].Password != newStr {
- t.Fatalf("%s's Password should be '%s'", newStr, newStr)
- }
}
func TestStaticPasswords(t *testing.T) {
@@ -267,21 +228,17 @@ func TestStaticPasswords(t *testing.T) {
for _, c := range tests {
t.Run(fmt.Sprintf("%s-%s", c.user, c.password), func(t *testing.T) {
salt, err := newSalt()
- if err != nil {
- t.Fatalf("error generating salt: %v", err)
- }
+ require.NoError(t, err, "error generating salt: %v", err)
scrambled := ScrambleMysqlNativePassword(salt, []byte(c.password))
_, err = auth.UserEntryWithHash(nil, salt, c.user, scrambled, addr)
if c.success {
- if err != nil {
- t.Fatalf("authentication should have succeeded: %v", err)
- }
+ require.NoError(t, err, "authentication should have succeeded: %v", err)
+
} else {
- if err == nil {
- t.Fatalf("authentication should have failed")
- }
+ require.Error(t, err, "authentication should have failed")
+
}
})
}
diff --git a/go/mysql/binlog_dump.go b/go/mysql/binlog_dump.go
index 4053720beb1..8383a590c5e 100644
--- a/go/mysql/binlog_dump.go
+++ b/go/mysql/binlog_dump.go
@@ -17,6 +17,9 @@ limitations under the License.
package mysql
import (
+ "encoding/binary"
+ "io"
+
vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
"vitess.io/vitess/go/vt/vterrors"
)
@@ -27,8 +30,32 @@ var (
readPacketErr = vterrors.Errorf(vtrpcpb.Code_INTERNAL, "error reading BinlogDumpGTID packet")
)
+const (
+ BinlogDumpNonBlock = 0x01
+ BinlogThroughPosition = 0x02
+ BinlogThroughGTID = 0x04
+)
+
+func (c *Conn) parseComBinlogDump(data []byte) (logFile string, binlogPos uint32, err error) {
+ pos := 1
+
+ binlogPos, pos, ok := readUint32(data, pos)
+ if !ok {
+ return logFile, binlogPos, readPacketErr
+ }
+
+ pos += 2 // flags
+ pos += 4 // server-id
+
+ logFile = string(data[pos:])
+ return logFile, binlogPos, nil
+}
+
func (c *Conn) parseComBinlogDumpGTID(data []byte) (logFile string, logPos uint64, position Position, err error) {
+ // see https://dev.mysql.com/doc/internals/en/com-binlog-dump-gtid.html
pos := 1
+
+ flags2 := binary.LittleEndian.Uint16(data[pos : pos+2])
pos += 2 // flags
pos += 4 // server-id
@@ -44,14 +71,19 @@ func (c *Conn) parseComBinlogDumpGTID(data []byte) (logFile string, logPos uint6
return logFile, logPos, position, readPacketErr
}
- dataSize, pos, ok := readUint32(data, pos)
- if !ok {
- return logFile, logPos, position, readPacketErr
+ if flags2&BinlogDumpNonBlock != 0 {
+ return logFile, logPos, position, io.EOF
}
- if gtid := string(data[pos : pos+int(dataSize)]); gtid != "" {
- position, err = DecodePosition(gtid)
- if err != nil {
- return logFile, logPos, position, err
+ if flags2&BinlogThroughGTID != 0 {
+ dataSize, pos, ok := readUint32(data, pos)
+ if !ok {
+ return logFile, logPos, position, readPacketErr
+ }
+ if gtid := string(data[pos : pos+int(dataSize)]); gtid != "" {
+ position, err = DecodePosition(gtid)
+ if err != nil {
+ return logFile, logPos, position, err
+ }
}
}
diff --git a/go/mysql/binlog_event.go b/go/mysql/binlog_event.go
index 3387eb0e354..822d5c65447 100644
--- a/go/mysql/binlog_event.go
+++ b/go/mysql/binlog_event.go
@@ -70,6 +70,8 @@ type BinlogEvent interface {
IsRand() bool
// IsPreviousGTIDs returns true if this event is a PREVIOUS_GTIDS_EVENT.
IsPreviousGTIDs() bool
+ // IsHeartbeat returns true if this event is a HEARTBEAT_EVENT.
+ IsHeartbeat() bool
// IsSemiSyncAckRequested returns true if the source requests a semi-sync ack for this event
IsSemiSyncAckRequested() bool
diff --git a/go/mysql/binlog_event_common.go b/go/mysql/binlog_event_common.go
index 25945c5d1b9..e481a69ceae 100644
--- a/go/mysql/binlog_event_common.go
+++ b/go/mysql/binlog_event_common.go
@@ -154,6 +154,11 @@ func (ev binlogEvent) IsPreviousGTIDs() bool {
return ev.Type() == ePreviousGTIDsEvent
}
+// IsHeartbeat implements BinlogEvent.IsHeartbeat().
+func (ev binlogEvent) IsHeartbeat() bool {
+ return ev.Type() == eHeartbeatEvent
+}
+
// IsTableMap implements BinlogEvent.IsTableMap().
func (ev binlogEvent) IsTableMap() bool {
return ev.Type() == eTableMapEvent
diff --git a/go/mysql/binlog_event_common_test.go b/go/mysql/binlog_event_common_test.go
index 18ea3f70b5c..802641a7b82 100644
--- a/go/mysql/binlog_event_common_test.go
+++ b/go/mysql/binlog_event_common_test.go
@@ -174,6 +174,11 @@ func TestBinlogEventIsNotRotate(t *testing.T) {
}
}
+func TestBinlogEventIsNotHeartbeat(t *testing.T) {
+ input := binlogEvent(googleFormatEvent)
+ assert.False(t, input.IsHeartbeat())
+}
+
func TestBinlogEventIsXID(t *testing.T) {
input := binlogEvent(googleXIDEvent)
want := true
@@ -199,12 +204,10 @@ func TestBinlogEventFormat(t *testing.T) {
HeaderSizes: googleFormatEvent[76 : len(googleFormatEvent)-5],
}
got, err := input.Format()
- if err != nil {
- t.Errorf("unexpected error: %v", err)
- }
- if !reflect.DeepEqual(got, want) {
- t.Errorf("%#v.Format() = %v, want %v", input, got, want)
- }
+ assert.NoError(t, err, "unexpected error: %v", err)
+ assert.True(t, reflect.DeepEqual(got, want), "%#v.Format() = %v, want %v", input, got, want)
+ assert.False(t, input.IsHeartbeat())
+
}
func TestBinlogEventFormatWrongVersion(t *testing.T) {
@@ -263,9 +266,8 @@ primary key(eid, id)
t.Errorf("unexpected error: %v", err)
return
}
- if !reflect.DeepEqual(got, want) {
- t.Errorf("%#v.Query() = %v, want %v", input, got, want)
- }
+ assert.True(t, reflect.DeepEqual(got, want), "%#v.Query() = %v, want %v", input, got, want)
+
}
func TestBinlogEventQueryBadLength(t *testing.T) {
diff --git a/go/mysql/binlog_event_filepos.go b/go/mysql/binlog_event_filepos.go
index f68e4a58fcd..ffdce5b0bdb 100644
--- a/go/mysql/binlog_event_filepos.go
+++ b/go/mysql/binlog_event_filepos.go
@@ -185,6 +185,10 @@ func (ev filePosFakeEvent) IsPreviousGTIDs() bool {
return false
}
+func (ev filePosFakeEvent) IsHeartbeat() bool {
+ return false
+}
+
func (ev filePosFakeEvent) IsTableMap() bool {
return false
}
diff --git a/go/mysql/binlog_event_json.go b/go/mysql/binlog_event_json.go
index 82b53311c0f..e055b8866ca 100644
--- a/go/mysql/binlog_event_json.go
+++ b/go/mysql/binlog_event_json.go
@@ -153,7 +153,7 @@ func (jh *BinlogJSON) register(typ jsonDataType, Plugin jsonPlugin) {
func (jh *BinlogJSON) getNode(typ jsonDataType, data []byte, pos int) (node *ajson.Node, err error) {
Plugin := jh.plugins[typ]
if Plugin == nil {
- return nil, fmt.Errorf("Plugin not found for type %d", typ)
+ return nil, fmt.Errorf("plugin not found for type %d", typ)
}
return Plugin.getNode(typ, data, pos)
}
@@ -316,59 +316,157 @@ type intPlugin struct {
var _ jsonPlugin = (*intPlugin)(nil)
-func (ih intPlugin) getVal(typ jsonDataType, data []byte, pos int) (value float64) {
+func (ipl intPlugin) getVal(typ jsonDataType, data []byte, pos int) (value int64) {
var val uint64
- var val2 float64
- size := ih.sizes[typ]
+ var val2 int64
+ size := ipl.sizes[typ]
for i := 0; i < size; i++ {
val = val + uint64(data[pos+i])<<(8*i)
}
switch typ {
case jsonInt16:
- val2 = float64(int16(val))
- case jsonUint16:
- val2 = float64(uint16(val))
+ val2 = int64(int16(val))
case jsonInt32:
- val2 = float64(int32(val))
- case jsonUint32:
- val2 = float64(uint32(val))
+ val2 = int64(int32(val))
case jsonInt64:
- val2 = float64(int64(val))
- case jsonUint64:
- val2 = float64(val)
- case jsonDouble:
- val2 = math.Float64frombits(val)
+ val2 = int64(val)
}
return val2
}
-func (ih intPlugin) getNode(typ jsonDataType, data []byte, pos int) (node *ajson.Node, err error) {
- val := ih.getVal(typ, data, pos)
- node = ajson.NumericNode("", val)
+func (ipl intPlugin) getNode(typ jsonDataType, data []byte, pos int) (node *ajson.Node, err error) {
+ val := ipl.getVal(typ, data, pos)
+ node = ajson.IntegerNode("", val)
return node, nil
}
func newIntPlugin() *intPlugin {
- ih := &intPlugin{
+ ipl := &intPlugin{
info: &jsonPluginInfo{
name: "Int",
- types: []jsonDataType{jsonInt64, jsonInt32, jsonInt16, jsonUint16, jsonUint32, jsonUint64, jsonDouble},
+ types: []jsonDataType{jsonInt64, jsonInt32, jsonInt16},
+ },
+ sizes: make(map[jsonDataType]int),
+ }
+ ipl.sizes = map[jsonDataType]int{
+ jsonInt64: 8,
+ jsonInt32: 4,
+ jsonInt16: 2,
+ }
+ for _, typ := range ipl.info.types {
+ binlogJSON.register(typ, ipl)
+ }
+ return ipl
+}
+
+//endregion
+
+//region uint plugin
+
+func init() {
+ newUintPlugin()
+}
+
+type uintPlugin struct {
+ info *jsonPluginInfo
+ sizes map[jsonDataType]int
+}
+
+var _ jsonPlugin = (*uintPlugin)(nil)
+
+func (upl uintPlugin) getVal(typ jsonDataType, data []byte, pos int) (value uint64) {
+ var val uint64
+ var val2 uint64
+ size := upl.sizes[typ]
+ for i := 0; i < size; i++ {
+ val = val + uint64(data[pos+i])<<(8*i)
+ }
+ switch typ {
+ case jsonUint16:
+ val2 = uint64(uint16(val))
+ case jsonUint32:
+ val2 = uint64(uint32(val))
+ case jsonUint64:
+ val2 = val
+ }
+ return val2
+}
+
+func (upl uintPlugin) getNode(typ jsonDataType, data []byte, pos int) (node *ajson.Node, err error) {
+ val := upl.getVal(typ, data, pos)
+ node = ajson.UnsignedIntegerNode("", val)
+ return node, nil
+}
+
+func newUintPlugin() *uintPlugin {
+ upl := &uintPlugin{
+ info: &jsonPluginInfo{
+ name: "Uint",
+ types: []jsonDataType{jsonUint16, jsonUint32, jsonUint64},
},
sizes: make(map[jsonDataType]int),
}
- ih.sizes = map[jsonDataType]int{
+ upl.sizes = map[jsonDataType]int{
jsonUint64: 8,
- jsonInt64: 8,
jsonUint32: 4,
- jsonInt32: 4,
jsonUint16: 2,
- jsonInt16: 2,
+ }
+ for _, typ := range upl.info.types {
+ binlogJSON.register(typ, upl)
+ }
+ return upl
+}
+
+//endregion
+
+//region float plugin
+
+func init() {
+ newFloatPlugin()
+}
+
+type floatPlugin struct {
+ info *jsonPluginInfo
+ sizes map[jsonDataType]int
+}
+
+var _ jsonPlugin = (*floatPlugin)(nil)
+
+func (flp floatPlugin) getVal(typ jsonDataType, data []byte, pos int) (value float64) {
+ var val uint64
+ var val2 float64
+ size := flp.sizes[typ]
+ for i := 0; i < size; i++ {
+ val = val + uint64(data[pos+i])<<(8*i)
+ }
+ switch typ {
+ case jsonDouble:
+ val2 = math.Float64frombits(val)
+ }
+ return val2
+}
+
+func (flp floatPlugin) getNode(typ jsonDataType, data []byte, pos int) (node *ajson.Node, err error) {
+ val := flp.getVal(typ, data, pos)
+ node = ajson.NumericNode("", val)
+ return node, nil
+}
+
+func newFloatPlugin() *floatPlugin {
+ fp := &floatPlugin{
+ info: &jsonPluginInfo{
+ name: "Float",
+ types: []jsonDataType{jsonDouble},
+ },
+ sizes: make(map[jsonDataType]int),
+ }
+ fp.sizes = map[jsonDataType]int{
jsonDouble: 8,
}
- for _, typ := range ih.info.types {
- binlogJSON.register(typ, ih)
+ for _, typ := range fp.info.types {
+ binlogJSON.register(typ, fp)
}
- return ih
+ return fp
}
//endregion
@@ -385,7 +483,7 @@ type literalPlugin struct {
var _ jsonPlugin = (*literalPlugin)(nil)
-func (lh literalPlugin) getNode(typ jsonDataType, data []byte, pos int) (node *ajson.Node, err error) {
+func (lpl literalPlugin) getNode(typ jsonDataType, data []byte, pos int) (node *ajson.Node, err error) {
val := jsonDataLiteral(data[pos])
switch val {
case jsonNullLiteral:
@@ -401,14 +499,14 @@ func (lh literalPlugin) getNode(typ jsonDataType, data []byte, pos int) (node *a
}
func newLiteralPlugin() *literalPlugin {
- lh := &literalPlugin{
+ lpl := &literalPlugin{
info: &jsonPluginInfo{
name: "Literal",
types: []jsonDataType{jsonLiteral},
},
}
- binlogJSON.register(jsonLiteral, lh)
- return lh
+ binlogJSON.register(jsonLiteral, lpl)
+ return lpl
}
//endregion
@@ -427,7 +525,7 @@ var _ jsonPlugin = (*opaquePlugin)(nil)
// other types are stored as catch-all opaque types: documentation on these is scarce.
// we currently know about (and support) date/time/datetime/decimal.
-func (oh opaquePlugin) getNode(typ jsonDataType, data []byte, pos int) (node *ajson.Node, err error) {
+func (opl opaquePlugin) getNode(typ jsonDataType, data []byte, pos int) (node *ajson.Node, err error) {
dataType := data[pos]
start := 3 // account for length of stored value
end := start + 8 // all currently supported opaque data types are 8 bytes in size
@@ -484,14 +582,14 @@ func (oh opaquePlugin) getNode(typ jsonDataType, data []byte, pos int) (node *aj
}
func newOpaquePlugin() *opaquePlugin {
- oh := &opaquePlugin{
+ opl := &opaquePlugin{
info: &jsonPluginInfo{
name: "Opaque",
types: []jsonDataType{jsonOpaque},
},
}
- binlogJSON.register(jsonOpaque, oh)
- return oh
+ binlogJSON.register(jsonOpaque, opl)
+ return opl
}
//endregion
@@ -508,7 +606,7 @@ type stringPlugin struct {
var _ jsonPlugin = (*stringPlugin)(nil)
-func (sh stringPlugin) getNode(typ jsonDataType, data []byte, pos int) (node *ajson.Node, err error) {
+func (spl stringPlugin) getNode(typ jsonDataType, data []byte, pos int) (node *ajson.Node, err error) {
size, pos := readVariableLength(data, pos)
node = ajson.StringNode("", string(data[pos:pos+size]))
@@ -516,14 +614,14 @@ func (sh stringPlugin) getNode(typ jsonDataType, data []byte, pos int) (node *aj
}
func newStringPlugin() *stringPlugin {
- sh := &stringPlugin{
+ spl := &stringPlugin{
info: &jsonPluginInfo{
name: "String",
types: []jsonDataType{jsonString},
},
}
- binlogJSON.register(jsonString, sh)
- return sh
+ binlogJSON.register(jsonString, spl)
+ return spl
}
//endregion
@@ -542,7 +640,7 @@ var _ jsonPlugin = (*arrayPlugin)(nil)
// arrays are stored thus:
// | type_identifier(one of [2,3]) | elem count | obj size | list of offsets+lengths of values | actual values |
-func (ah arrayPlugin) getNode(typ jsonDataType, data []byte, pos int) (node *ajson.Node, err error) {
+func (apl arrayPlugin) getNode(typ jsonDataType, data []byte, pos int) (node *ajson.Node, err error) {
jlog("JSON Array %s, len %d", jsonDataTypeToString(uint(typ)), len(data))
var nodes []*ajson.Node
var elem *ajson.Node
@@ -565,15 +663,15 @@ func (ah arrayPlugin) getNode(typ jsonDataType, data []byte, pos int) (node *ajs
}
func newArrayPlugin() *arrayPlugin {
- ah := &arrayPlugin{
+ apl := &arrayPlugin{
info: &jsonPluginInfo{
name: "Array",
types: []jsonDataType{jsonSmallArray, jsonLargeArray},
},
}
- binlogJSON.register(jsonSmallArray, ah)
- binlogJSON.register(jsonLargeArray, ah)
- return ah
+ binlogJSON.register(jsonSmallArray, apl)
+ binlogJSON.register(jsonLargeArray, apl)
+ return apl
}
//endregion
@@ -592,7 +690,7 @@ var _ jsonPlugin = (*objectPlugin)(nil)
// objects are stored thus:
// | type_identifier(0/1) | elem count | obj size | list of offsets+lengths of keys | list of offsets+lengths of values | actual keys | actual values |
-func (oh objectPlugin) getNode(typ jsonDataType, data []byte, pos int) (node *ajson.Node, err error) {
+func (opl objectPlugin) getNode(typ jsonDataType, data []byte, pos int) (node *ajson.Node, err error) {
jlog("JSON Type is %s, len %d", jsonDataTypeToString(uint(typ)), len(data))
// "large" decides number of bytes used to specify element count and total object size: 4 bytes for large, 2 for small
@@ -640,15 +738,15 @@ func (oh objectPlugin) getNode(typ jsonDataType, data []byte, pos int) (node *aj
}
func newObjectPlugin() *objectPlugin {
- oh := &objectPlugin{
+ opl := &objectPlugin{
info: &jsonPluginInfo{
name: "Object",
types: []jsonDataType{jsonSmallObject, jsonLargeObject},
},
}
- binlogJSON.register(jsonSmallObject, oh)
- binlogJSON.register(jsonLargeObject, oh)
- return oh
+ binlogJSON.register(jsonSmallObject, opl)
+ binlogJSON.register(jsonLargeObject, opl)
+ return opl
}
//endregion
diff --git a/go/mysql/binlog_event_json_test.go b/go/mysql/binlog_event_json_test.go
index 711965386ed..5cc61440084 100644
--- a/go/mysql/binlog_event_json_test.go
+++ b/go/mysql/binlog_event_json_test.go
@@ -18,50 +18,65 @@ package mysql
import (
"encoding/json"
+ "fmt"
"testing"
"github.com/stretchr/testify/require"
)
func TestJSONTypes(t *testing.T) {
+ // most of these test cases have been taken from open source java/python adapters
+ // like https://github.com/shyiko/mysql-binlog-connector-java/pull/119/files
testcases := []struct {
+ name string
data []byte
expected string
isMap bool
}{{
+ name: "null",
data: []byte{},
expected: `null`,
}, {
+ name: "map, string value",
data: []byte{0, 1, 0, 14, 0, 11, 0, 1, 0, 12, 12, 0, 97, 1, 98},
expected: `{"a":"b"}`,
}, {
+ name: "map, int value",
data: []byte{0, 1, 0, 12, 0, 11, 0, 1, 0, 5, 2, 0, 97},
expected: `{"a":2}`,
}, {
+ name: "map, object value",
data: []byte{0, 1, 0, 29, 0, 11, 0, 4, 0, 0, 15, 0, 97, 115, 100, 102, 1, 0, 14, 0, 11, 0, 3, 0, 5, 123, 0, 102, 111, 111},
expected: `{"asdf":{"foo":123}}`,
}, {
+ name: "list of ints",
data: []byte{2, 2, 0, 10, 0, 5, 1, 0, 5, 2, 0},
expected: `[1,2]`,
}, {
+ name: "list of maps",
data: []byte{0, 4, 0, 60, 0, 32, 0, 1, 0, 33, 0, 1, 0, 34, 0, 2, 0, 36, 0, 2, 0, 12, 38, 0, 12, 40, 0, 12, 42, 0, 2, 46, 0, 97, 99, 97, 98, 98, 99, 1, 98, 1, 100, 3, 97, 98, 99, 2, 0, 14, 0, 12, 10, 0, 12, 12, 0, 1, 120, 1, 121},
expected: `{"a":"b","c":"d","ab":"abc","bc":["x","y"]}`,
isMap: true,
}, {
+ name: "list with one string",
data: []byte{2, 1, 0, 37, 0, 12, 8, 0, 0, 4, 104, 101, 114, 101},
expected: `["here"]`,
}, {
+ name: "list varied",
data: []byte{2, 3, 0, 37, 0, 12, 13, 0, 2, 18, 0, 12, 33, 0, 4, 104, 101, 114, 101, 2, 0, 15, 0, 12, 10, 0, 12, 12, 0, 1, 73, 2, 97, 109, 3, 33, 33, 33},
expected: `["here",["I","am"],"!!!"]`,
}, {
+ name: "string",
data: []byte{12, 13, 115, 99, 97, 108, 97, 114, 32, 115, 116, 114, 105, 110, 103},
expected: `"scalar string"`,
}, {
+ name: "map, long string value",
data: []byte{0, 1, 0, 149, 0, 11, 0, 6, 0, 12, 17, 0, 115, 99, 111, 112, 101, 115, 130, 1, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 66, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 66, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 69, 65, 65, 65, 65, 65, 65, 69, 65, 65, 65, 65, 65, 65, 56, 65, 65, 65, 66, 103, 65, 65, 65, 65, 65, 65, 66, 65, 65, 65, 65, 67, 65, 65, 65, 65, 65, 65, 65, 65, 65, 84, 216, 142, 184},
expected: `{"scopes":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAAEAAAAAAEAAAAAA8AAABgAAAAAABAAAACAAAAAAAAA"}`,
}, {
// repeat the same string 10 times, to test the case where length of string
// requires 2 bytes to store
+ name: "long string",
data: []byte{12, 130, 1,
115, 99, 97, 108, 97, 114, 32, 115, 116, 114, 105, 110, 103,
115, 99, 97, 108, 97, 114, 32, 115, 116, 114, 105, 110, 103,
@@ -75,89 +90,139 @@ func TestJSONTypes(t *testing.T) {
115, 99, 97, 108, 97, 114, 32, 115, 116, 114, 105, 110, 103},
expected: `"scalar stringscalar stringscalar stringscalar stringscalar stringscalar stringscalar stringscalar stringscalar stringscalar string"`,
}, {
+ name: "bool true",
data: []byte{4, 1},
expected: `true`,
}, {
+ name: "bool false",
data: []byte{4, 2},
expected: `false`,
}, {
+ name: "bool null",
data: []byte{4, 0},
expected: `null`,
}, {
+ name: "uint16 max",
+ data: []byte{6, 255, 255},
+ expected: `65535`,
+ }, {
+ name: "uint32 max",
+ data: []byte{8, 255, 255, 255, 255},
+ expected: `4294967295`,
+ }, {
+ name: "uint64 max",
+ data: []byte{10, 255, 255, 255, 255, 255, 255, 255, 255},
+ expected: `18446744073709551615`,
+ }, {
+ name: "int16 -1",
data: []byte{5, 255, 255},
expected: `-1`,
}, {
- data: []byte{6, 1, 0},
- expected: `1`,
+ name: "int32 -1",
+ data: []byte{7, 255, 255, 255, 255},
+ expected: `-1`,
+ }, {
+ name: "int64 -1",
+ data: []byte{9, 255, 255, 255, 255, 255, 255, 255, 255},
+ expected: `-1`,
}, {
+ name: "int16 max",
data: []byte{5, 255, 127},
expected: `32767`,
}, {
+ name: "int32 max",
+ data: []byte{7, 255, 255, 255, 127},
+ expected: `2147483647`,
+ }, {
+ name: "int64 max",
+ data: []byte{9, 255, 255, 255, 255, 255, 255, 255, 127},
+ expected: `9223372036854775807`,
+ }, {
+ name: "uint16/1",
+ data: []byte{6, 1, 0},
+ expected: `1`,
+ }, {
+ name: "int32/32768",
data: []byte{7, 0, 128, 0, 0},
expected: `32768`,
}, {
+ name: "int16/neg",
data: []byte{5, 0, 128},
expected: `-32768`,
}, {
+ name: "int32/neg",
data: []byte{7, 255, 127, 255, 255},
expected: `-32769`,
}, {
- data: []byte{7, 255, 255, 255, 127},
- expected: `2.147483647e+09`,
- }, {
+ name: "uint32",
data: []byte{8, 0, 128, 0, 0},
expected: `32768`,
}, {
+ name: "int64",
data: []byte{9, 0, 0, 0, 128, 0, 0, 0, 0},
- expected: `2.147483648e+09`,
+ expected: `2147483648`,
}, {
+ name: "int32/neg",
data: []byte{7, 0, 0, 0, 128},
- expected: `-2.147483648e+09`,
+ expected: `-2147483648`,
}, {
+ name: "int64/neg",
data: []byte{9, 255, 255, 255, 127, 255, 255, 255, 255},
- expected: `-2.147483649e+09`,
+ expected: `-2147483649`,
}, {
+ name: "uint64",
data: []byte{10, 255, 255, 255, 255, 255, 255, 255, 255},
- expected: `1.8446744073709552e+19`,
+ expected: `18446744073709551615`,
}, {
+ name: "int64/neg",
data: []byte{9, 0, 0, 0, 0, 0, 0, 0, 128},
- expected: `-9.223372036854776e+18`,
+ expected: `-9223372036854775808`,
}, {
+ name: "double",
data: []byte{11, 110, 134, 27, 240, 249, 33, 9, 64},
expected: `3.14159`,
}, {
+ name: "empty map",
data: []byte{0, 0, 0, 4, 0},
expected: `{}`,
}, {
+ name: "empty list",
data: []byte{2, 0, 0, 4, 0},
expected: `[]`,
}, {
// opaque, datetime
+ name: "datetime",
data: []byte{15, 12, 8, 0, 0, 0, 25, 118, 31, 149, 25},
expected: `"2015-01-15 23:24:25.000000"`,
}, {
// opaque, time
+ name: "time",
data: []byte{15, 11, 8, 0, 0, 0, 25, 118, 1, 0, 0},
expected: `"23:24:25.000000"`,
}, {
// opaque, time
+ name: "time2",
data: []byte{15, 11, 8, 192, 212, 1, 25, 118, 1, 0, 0},
expected: `"23:24:25.120000"`,
}, {
// opaque, date
+ name: "date",
data: []byte{15, 10, 8, 0, 0, 0, 0, 0, 30, 149, 25},
expected: `"2015-01-15"`,
}, {
// opaque, decimal
+ name: "decimal",
data: []byte{15, 246, 8, 13, 4, 135, 91, 205, 21, 4, 210},
expected: `1.234567891234e+08`,
}, {
// opaque, bit field. Not yet implemented.
+ name: "bitfield: unimplemented",
data: []byte{15, 16, 2, 202, 254},
expected: `opaque type 16 is not supported yet, data [2 202 254]`,
}}
for _, tc := range testcases {
- t.Run(tc.expected, func(t *testing.T) {
+ name := fmt.Sprintf("%s (%s)", tc.name, tc.expected)
+ t.Run(name, func(t *testing.T) {
val, err := getJSONValue(tc.data)
if err != nil {
require.Equal(t, tc.expected, err.Error())
diff --git a/go/mysql/binlog_event_make.go b/go/mysql/binlog_event_make.go
index 0b0af205d19..0688fa9540b 100644
--- a/go/mysql/binlog_event_make.go
+++ b/go/mysql/binlog_event_make.go
@@ -18,6 +18,11 @@ package mysql
import (
"encoding/binary"
+ "hash/crc32"
+)
+
+const (
+ FlagLogEventArtificial = 0x20
)
// This file contains utility methods to create binlog replication
@@ -100,7 +105,12 @@ func (s *FakeBinlogStream) Packetize(f BinlogFormat, typ byte, flags uint16, dat
}
result := make([]byte, length)
- binary.LittleEndian.PutUint32(result[0:4], s.Timestamp)
+ switch typ {
+ case eRotateEvent, eHeartbeatEvent:
+ // timestamp remains zero
+ default:
+ binary.LittleEndian.PutUint32(result[0:4], s.Timestamp)
+ }
result[4] = typ
binary.LittleEndian.PutUint32(result[5:9], s.ServerID)
binary.LittleEndian.PutUint32(result[9:13], uint32(length))
@@ -109,6 +119,13 @@ func (s *FakeBinlogStream) Packetize(f BinlogFormat, typ byte, flags uint16, dat
binary.LittleEndian.PutUint16(result[17:19], flags)
}
copy(result[f.HeaderLength:], data)
+
+ switch f.ChecksumAlgorithm {
+ case BinlogChecksumAlgCRC32:
+ checksum := crc32.ChecksumIEEE(result[0 : length-4])
+ binary.LittleEndian.PutUint32(result[length-4:], checksum)
+ }
+
return result
}
@@ -157,12 +174,38 @@ func NewRotateEvent(f BinlogFormat, s *FakeBinlogStream, position uint64, filena
len(filename)
data := make([]byte, length)
binary.LittleEndian.PutUint64(data[0:8], position)
+ copy(data[8:], filename)
ev := s.Packetize(f, eRotateEvent, 0, data)
- ev[0] = 0
- ev[1] = 0
- ev[2] = 0
- ev[3] = 0
+ return NewMysql56BinlogEvent(ev)
+}
+
+func NewFakeRotateEvent(f BinlogFormat, s *FakeBinlogStream, filename string) BinlogEvent {
+ length := 8 + // position
+ len(filename)
+ data := make([]byte, length)
+ binary.LittleEndian.PutUint64(data[0:8], 4)
+ copy(data[8:], filename)
+
+ ev := s.Packetize(f, eRotateEvent, FlagLogEventArtificial, data)
+ return NewMysql56BinlogEvent(ev)
+}
+
+// NewHeartbeatEvent returns a HeartbeatEvent.
+// see https://dev.mysql.com/doc/internals/en/heartbeat-event.html
+func NewHeartbeatEvent(f BinlogFormat, s *FakeBinlogStream) BinlogEvent {
+ ev := s.Packetize(f, eHeartbeatEvent, 0, []byte{})
+ return NewMysql56BinlogEvent(ev)
+}
+
+// NewHeartbeatEvent returns a HeartbeatEvent.
+// see https://dev.mysql.com/doc/internals/en/heartbeat-event.html
+func NewHeartbeatEventWithLogFile(f BinlogFormat, s *FakeBinlogStream, filename string) BinlogEvent {
+ length := len(filename)
+ data := make([]byte, length)
+ copy(data, filename)
+
+ ev := s.Packetize(f, eHeartbeatEvent, 0, data)
return NewMysql56BinlogEvent(ev)
}
@@ -296,9 +339,9 @@ func NewTableMapEvent(f BinlogFormat, s *FakeBinlogStream, tableID uint64, tm *T
1 + // table name length
len(tm.Name) +
1 + // [00]
- 1 + // column-count FIXME(alainjobart) len enc
+ lenEncIntSize(uint64(len(tm.Types))) + // column-count len enc
len(tm.Types) +
- 1 + // lenenc-str column-meta-def FIXME(alainjobart) len enc
+ lenEncIntSize(uint64(metadataLength)) + // lenenc-str column-meta-def
metadataLength +
len(tm.CanBeNull.data)
data := make([]byte, length)
@@ -320,15 +363,10 @@ func NewTableMapEvent(f BinlogFormat, s *FakeBinlogStream, tableID uint64, tm *T
data[pos] = 0
pos++
- data[pos] = byte(len(tm.Types)) // FIXME(alainjobart) lenenc
- pos++
-
+ pos = writeLenEncInt(data, pos, uint64(len(tm.Types)))
pos += copy(data[pos:], tm.Types)
- // Per-column meta data. Starting with len-enc length.
- // FIXME(alainjobart) lenenc
- data[pos] = byte(metadataLength)
- pos++
+ pos = writeLenEncInt(data, pos, uint64(metadataLength))
for c, typ := range tm.Types {
pos = metadataWrite(data, pos, typ, tm.Metadata[c])
}
@@ -366,10 +404,20 @@ func newRowsEvent(f BinlogFormat, s *FakeBinlogStream, typ byte, tableID uint64,
panic("Not implemented, post_header_length==6")
}
+ hasIdentify := typ == eUpdateRowsEventV1 || typ == eUpdateRowsEventV2 ||
+ typ == eDeleteRowsEventV1 || typ == eDeleteRowsEventV2
+ hasData := typ == eWriteRowsEventV1 || typ == eWriteRowsEventV2 ||
+ typ == eUpdateRowsEventV1 || typ == eUpdateRowsEventV2
+
+ rowLen := rows.DataColumns.Count()
+ if hasIdentify {
+ rowLen = rows.IdentifyColumns.Count()
+ }
+
length := 6 + // table id
2 + // flags
2 + // extra data length, no extra data.
- 1 + // num columns FIXME(alainjobart) len enc
+ lenEncIntSize(uint64(rowLen)) + // num columns
len(rows.IdentifyColumns.data) + // only > 0 for Update & Delete
len(rows.DataColumns.data) // only > 0 for Write & Update
for _, row := range rows.Rows {
@@ -380,11 +428,6 @@ func newRowsEvent(f BinlogFormat, s *FakeBinlogStream, typ byte, tableID uint64,
}
data := make([]byte, length)
- hasIdentify := typ == eUpdateRowsEventV1 || typ == eUpdateRowsEventV2 ||
- typ == eDeleteRowsEventV1 || typ == eDeleteRowsEventV2
- hasData := typ == eWriteRowsEventV1 || typ == eWriteRowsEventV2 ||
- typ == eUpdateRowsEventV1 || typ == eUpdateRowsEventV2
-
data[0] = byte(tableID)
data[1] = byte(tableID >> 8)
data[2] = byte(tableID >> 16)
@@ -396,12 +439,7 @@ func newRowsEvent(f BinlogFormat, s *FakeBinlogStream, typ byte, tableID uint64,
data[8] = 0x02
data[9] = 0x00
- if hasIdentify {
- data[10] = byte(rows.IdentifyColumns.Count()) // FIXME(alainjobart) len
- } else {
- data[10] = byte(rows.DataColumns.Count()) // FIXME(alainjobart) len
- }
- pos := 11
+ pos := writeLenEncInt(data, 10, uint64(rowLen))
if hasIdentify {
pos += copy(data[pos:], rows.IdentifyColumns.data)
diff --git a/go/mysql/binlog_event_make_test.go b/go/mysql/binlog_event_make_test.go
index ef30fc59374..df9bc9d2d3f 100644
--- a/go/mysql/binlog_event_make_test.go
+++ b/go/mysql/binlog_event_make_test.go
@@ -20,6 +20,9 @@ import (
"reflect"
"testing"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata"
)
@@ -31,38 +34,25 @@ func TestFormatDescriptionEvent(t *testing.T) {
s := NewFakeBinlogStream()
event := NewFormatDescriptionEvent(f, s)
- if !event.IsValid() {
- t.Fatalf("IsValid() returned false")
- }
- if !event.IsFormatDescription() {
- t.Fatalf("IsFormatDescription returned false")
- }
+ require.True(t, event.IsValid(), "IsValid() returned false")
+ require.True(t, event.IsFormatDescription(), "IsFormatDescription returned false")
+
gotF, err := event.Format()
- if err != nil {
- t.Fatalf("Format failed: %v", err)
- }
- if !reflect.DeepEqual(gotF, f) {
- t.Fatalf("Parsed BinlogFormat doesn't match, got:\n%v\nexpected:\n%v", gotF, f)
- }
+ require.NoError(t, err, "Format failed: %v", err)
+ require.True(t, reflect.DeepEqual(gotF, f), "Parsed BinlogFormat doesn't match, got:\n%v\nexpected:\n%v", gotF, f)
// MariaDB
f = NewMariaDBBinlogFormat()
s = NewFakeBinlogStream()
event = NewFormatDescriptionEvent(f, s)
- if !event.IsValid() {
- t.Fatalf("IsValid() returned false")
- }
- if !event.IsFormatDescription() {
- t.Fatalf("IsFormatDescription returned false")
- }
+ require.True(t, event.IsValid(), "IsValid() returned false")
+ require.True(t, event.IsFormatDescription(), "IsFormatDescription returned false")
+
gotF, err = event.Format()
- if err != nil {
- t.Fatalf("Format failed: %v", err)
- }
- if !reflect.DeepEqual(gotF, f) {
- t.Fatalf("Parsed BinlogFormat doesn't match, got:\n%v\nexpected:\n%v", gotF, f)
- }
+ require.NoError(t, err, "Format failed: %v", err)
+ require.True(t, reflect.DeepEqual(gotF, f), "Parsed BinlogFormat doesn't match, got:\n%v\nexpected:\n%v", gotF, f)
+
}
func TestQueryEvent(t *testing.T) {
@@ -79,24 +69,16 @@ func TestQueryEvent(t *testing.T) {
},
}
event := NewQueryEvent(f, s, q)
- if !event.IsValid() {
- t.Fatalf("NewQueryEvent returned an invalid event")
- }
- if !event.IsQuery() {
- t.Fatalf("NewQueryEvent returned a non-query event: %v", event)
- }
+ require.True(t, event.IsValid(), "NewQueryEvent returned an invalid event")
+ require.True(t, event.IsQuery(), "NewQueryEvent returned a non-query event: %v", event)
+
event, _, err := event.StripChecksum(f)
- if err != nil {
- t.Fatalf("StripChecksum failed: %v", err)
- }
+ require.NoError(t, err, "StripChecksum failed: %v", err)
gotQ, err := event.Query(f)
- if err != nil {
- t.Fatalf("event.Query() failed: %v", err)
- }
- if !reflect.DeepEqual(gotQ, q) {
- t.Fatalf("event.Query() returned %v was expecting %v", gotQ, q)
- }
+ require.NoError(t, err, "event.Query() failed: %v", err)
+ require.True(t, reflect.DeepEqual(gotQ, q), "event.Query() returned %v was expecting %v", gotQ, q)
+
}
func TestXIDEvent(t *testing.T) {
@@ -104,12 +86,9 @@ func TestXIDEvent(t *testing.T) {
s := NewFakeBinlogStream()
event := NewXIDEvent(f, s)
- if !event.IsValid() {
- t.Fatalf("NewXIDEvent().IsValid() is false")
- }
- if !event.IsXID() {
- t.Fatalf("NewXIDEvent().IsXID() is false")
- }
+ require.True(t, event.IsValid(), "NewXIDEvent().IsValid() is false")
+ require.True(t, event.IsXID(), "NewXIDEvent().IsXID() is false")
+
}
func TestIntVarEvent(t *testing.T) {
@@ -117,28 +96,21 @@ func TestIntVarEvent(t *testing.T) {
s := NewFakeBinlogStream()
event := NewIntVarEvent(f, s, IntVarLastInsertID, 0x123456789abcdef0)
- if !event.IsValid() {
- t.Fatalf("NewIntVarEvent().IsValid() is false")
- }
- if !event.IsIntVar() {
- t.Fatalf("NewIntVarEvent().IsIntVar() is false")
- }
+ require.True(t, event.IsValid(), "NewIntVarEvent().IsValid() is false")
+ require.True(t, event.IsIntVar(), "NewIntVarEvent().IsIntVar() is false")
+
name, value, err := event.IntVar(f)
if name != IntVarLastInsertID || value != 0x123456789abcdef0 || err != nil {
t.Fatalf("IntVar() returned %v/%v/%v", name, value, err)
}
event = NewIntVarEvent(f, s, IntVarInvalidInt, 0x123456789abcdef0)
- if !event.IsValid() {
- t.Fatalf("NewIntVarEvent().IsValid() is false")
- }
- if !event.IsIntVar() {
- t.Fatalf("NewIntVarEvent().IsIntVar() is false")
- }
+ require.True(t, event.IsValid(), "NewIntVarEvent().IsValid() is false")
+ require.True(t, event.IsIntVar(), "NewIntVarEvent().IsIntVar() is false")
+
name, value, err = event.IntVar(f)
- if err == nil {
- t.Fatalf("IntVar(invalid) returned %v/%v/%v", name, value, err)
- }
+ require.Error(t, err, "IntVar(invalid) returned %v/%v/%v", name, value, err)
+
}
func TestInvalidEvents(t *testing.T) {
@@ -153,24 +125,18 @@ func TestInvalidEvents(t *testing.T) {
// InvalidFormatDescriptionEvent
event = NewInvalidFormatDescriptionEvent(f, s)
- if !event.IsValid() {
- t.Fatalf("NewInvalidFormatDescriptionEvent().IsValid() is false")
- }
- if !event.IsFormatDescription() {
- t.Fatalf("NewInvalidFormatDescriptionEvent().IsFormatDescription() is false")
- }
+ require.True(t, event.IsValid(), "NewInvalidFormatDescriptionEvent().IsValid() is false")
+ require.True(t, event.IsFormatDescription(), "NewInvalidFormatDescriptionEvent().IsFormatDescription() is false")
+
if _, err := event.Format(); err == nil {
t.Fatalf("NewInvalidFormatDescriptionEvent().Format() returned err=nil")
}
// InvalidQueryEvent
event = NewInvalidQueryEvent(f, s)
- if !event.IsValid() {
- t.Fatalf("NewInvalidQueryEvent().IsValid() is false")
- }
- if !event.IsQuery() {
- t.Fatalf("NewInvalidQueryEvent().IsQuery() is false")
- }
+ require.True(t, event.IsValid(), "NewInvalidQueryEvent().IsValid() is false")
+ require.True(t, event.IsQuery(), "NewInvalidQueryEvent().IsQuery() is false")
+
if _, err := event.Query(f); err == nil {
t.Fatalf("NewInvalidQueryEvent().Query() returned err=nil")
}
@@ -183,56 +149,38 @@ func TestMariadDBGTIDEVent(t *testing.T) {
// With built-in begin.
event := NewMariaDBGTIDEvent(f, s, MariadbGTID{Domain: 0, Sequence: 0x123456789abcdef0}, true)
- if !event.IsValid() {
- t.Fatalf("NewMariaDBGTIDEvent().IsValid() is false")
- }
- if !event.IsGTID() {
- t.Fatalf("NewMariaDBGTIDEvent().IsGTID() if false")
- }
+ require.True(t, event.IsValid(), "NewMariaDBGTIDEvent().IsValid() is false")
+ require.True(t, event.IsGTID(), "NewMariaDBGTIDEvent().IsGTID() if false")
+
event, _, err := event.StripChecksum(f)
- if err != nil {
- t.Fatalf("StripChecksum failed: %v", err)
- }
+ require.NoError(t, err, "StripChecksum failed: %v", err)
gtid, hasBegin, err := event.GTID(f)
- if err != nil {
- t.Fatalf("NewMariaDBGTIDEvent().GTID() returned error: %v", err)
- }
- if !hasBegin {
- t.Fatalf("NewMariaDBGTIDEvent() didn't store hasBegin properly.")
- }
+ require.NoError(t, err, "NewMariaDBGTIDEvent().GTID() returned error: %v", err)
+ require.True(t, hasBegin, "NewMariaDBGTIDEvent() didn't store hasBegin properly.")
+
mgtid, ok := gtid.(MariadbGTID)
- if !ok {
- t.Fatalf("NewMariaDBGTIDEvent().GTID() returned a non-MariaDBGTID GTID")
- }
+ require.True(t, ok, "NewMariaDBGTIDEvent().GTID() returned a non-MariaDBGTID GTID")
+
if mgtid.Domain != 0 || mgtid.Server != 0x87654321 || mgtid.Sequence != 0x123456789abcdef0 {
t.Fatalf("NewMariaDBGTIDEvent().GTID() returned invalid GITD: %v", mgtid)
}
// Without built-in begin.
event = NewMariaDBGTIDEvent(f, s, MariadbGTID{Domain: 0, Sequence: 0x123456789abcdef0}, false)
- if !event.IsValid() {
- t.Fatalf("NewMariaDBGTIDEvent().IsValid() is false")
- }
- if !event.IsGTID() {
- t.Fatalf("NewMariaDBGTIDEvent().IsGTID() if false")
- }
+ require.True(t, event.IsValid(), "NewMariaDBGTIDEvent().IsValid() is false")
+ require.True(t, event.IsGTID(), "NewMariaDBGTIDEvent().IsGTID() if false")
+
event, _, err = event.StripChecksum(f)
- if err != nil {
- t.Fatalf("StripChecksum failed: %v", err)
- }
+ require.NoError(t, err, "StripChecksum failed: %v", err)
gtid, hasBegin, err = event.GTID(f)
- if err != nil {
- t.Fatalf("NewMariaDBGTIDEvent().GTID() returned error: %v", err)
- }
- if hasBegin {
- t.Fatalf("NewMariaDBGTIDEvent() didn't store hasBegin properly.")
- }
+ require.NoError(t, err, "NewMariaDBGTIDEvent().GTID() returned error: %v", err)
+ require.False(t, hasBegin, "NewMariaDBGTIDEvent() didn't store hasBegin properly.")
+
mgtid, ok = gtid.(MariadbGTID)
- if !ok {
- t.Fatalf("NewMariaDBGTIDEvent().GTID() returned a non-MariaDBGTID GTID")
- }
+ require.True(t, ok, "NewMariaDBGTIDEvent().GTID() returned a non-MariaDBGTID GTID")
+
if mgtid.Domain != 0 || mgtid.Server != 0x87654321 || mgtid.Sequence != 0x123456789abcdef0 {
t.Fatalf("NewMariaDBGTIDEvent().GTID() returned invalid GITD: %v", mgtid)
}
@@ -278,29 +226,61 @@ func TestTableMapEvent(t *testing.T) {
tm.CanBeNull.Set(9, true)
event := NewTableMapEvent(f, s, 0x102030405060, tm)
- if !event.IsValid() {
- t.Fatalf("NewTableMapEvent().IsValid() is false")
- }
- if !event.IsTableMap() {
- t.Fatalf("NewTableMapEvent().IsTableMap() if false")
- }
+ require.True(t, event.IsValid(), "NewTableMapEvent().IsValid() is false")
+ require.True(t, event.IsTableMap(), "NewTableMapEvent().IsTableMap() if false")
event, _, err := event.StripChecksum(f)
- if err != nil {
- t.Fatalf("StripChecksum failed: %v", err)
- }
+ require.NoError(t, err, "StripChecksum failed: %v", err)
tableID := event.TableID(f)
- if tableID != 0x102030405060 {
- t.Fatalf("NewTableMapEvent().ID returned %x", tableID)
- }
+ require.Equal(t, uint64(0x102030405060), tableID, "NewTableMapEvent().ID returned %x", tableID)
+
gotTm, err := event.TableMap(f)
- if err != nil {
- t.Fatalf("NewTableMapEvent().TableMapEvent() returned error: %v", err)
+ require.NoError(t, err, "NewTableMapEvent().TableMapEvent() returned error: %v", err)
+ require.True(t, reflect.DeepEqual(gotTm, tm), "NewTableMapEvent().TableMapEvent() got TableMap:\n%v\nexpected:\n%v", gotTm, tm)
+
+}
+
+func TestLargeTableMapEvent(t *testing.T) {
+ f := NewMySQL56BinlogFormat()
+ s := NewFakeBinlogStream()
+
+ colLen := 256
+ types := make([]byte, 0, colLen)
+ metadata := make([]uint16, 0, colLen)
+
+ for i := 0; i < colLen; i++ {
+ types = append(types, TypeLongLong)
+ metadata = append(metadata, 0)
}
- if !reflect.DeepEqual(gotTm, tm) {
- t.Fatalf("NewTableMapEvent().TableMapEvent() got TableMap:\n%v\nexpected:\n%v", gotTm, tm)
+
+ tm := &TableMap{
+ Flags: 0x8090,
+ Database: "my_database",
+ Name: "my_table",
+ Types: types,
+ CanBeNull: NewServerBitmap(colLen),
+ Metadata: metadata,
}
+ tm.CanBeNull.Set(1, true)
+ tm.CanBeNull.Set(2, true)
+ tm.CanBeNull.Set(5, true)
+ tm.CanBeNull.Set(9, true)
+
+ event := NewTableMapEvent(f, s, 0x102030405060, tm)
+ require.True(t, event.IsValid(), "NewTableMapEvent().IsValid() is false")
+ require.True(t, event.IsTableMap(), "NewTableMapEvent().IsTableMap() if false")
+
+ event, _, err := event.StripChecksum(f)
+ require.NoError(t, err, "StripChecksum failed: %v", err)
+
+ tableID := event.TableID(f)
+ require.Equal(t, uint64(0x102030405060), tableID, "NewTableMapEvent().ID returned %x", tableID)
+
+ gotTm, err := event.TableMap(f)
+ require.NoError(t, err, "NewTableMapEvent().TableMapEvent() returned error: %v", err)
+ require.True(t, reflect.DeepEqual(gotTm, tm), "NewTableMapEvent().TableMapEvent() got TableMap:\n%v\nexpected:\n%v", gotTm, tm)
+
}
func TestRowsEvent(t *testing.T) {
@@ -374,27 +354,145 @@ func TestRowsEvent(t *testing.T) {
}
event := NewUpdateRowsEvent(f, s, 0x102030405060, rows)
- if !event.IsValid() {
- t.Fatalf("NewRowsEvent().IsValid() is false")
+ require.True(t, event.IsValid(), "NewRowsEvent().IsValid() is false")
+ require.True(t, event.IsUpdateRows(), "NewRowsEvent().IsUpdateRows() if false")
+
+ event, _, err := event.StripChecksum(f)
+ require.NoError(t, err, "StripChecksum failed: %v", err)
+
+ tableID = event.TableID(f)
+ require.Equal(t, uint64(0x102030405060), tableID, "NewRowsEvent().ID returned %x", tableID)
+
+ gotRows, err := event.Rows(f, tm)
+ require.NoError(t, err, "NewRowsEvent().Rows() returned error: %v", err)
+ require.True(t, reflect.DeepEqual(gotRows, rows), "NewRowsEvent().Rows() got Rows:\n%v\nexpected:\n%v", gotRows, rows)
+
+ assert.NotZero(t, event.Timestamp())
+}
+
+func TestHeartbeatEvent(t *testing.T) {
+ // MySQL 5.6
+ f := NewMySQL56BinlogFormat()
+ s := NewFakeBinlogStream()
+ event := NewHeartbeatEvent(f, s)
+ require.NotNil(t, event)
+ assert.True(t, event.IsHeartbeat())
+ assert.Zero(t, event.Timestamp())
+}
+
+func TestRotateRotateEvent(t *testing.T) {
+ // MySQL 5.6
+ f := NewMySQL56BinlogFormat()
+ s := NewFakeBinlogStream()
+ event := NewRotateEvent(f, s, 456, "mysql-bin.000123")
+ require.NotNil(t, event)
+ assert.True(t, event.IsRotate())
+ nextFile, pos, err := event.NextLogFile(f)
+ assert.NoError(t, err)
+ assert.Equal(t, 456, int(pos))
+ assert.Equal(t, "mysql-bin.000123", nextFile)
+}
+
+func TestFakeRotateEvent(t *testing.T) {
+ // MySQL 5.6
+ f := NewMySQL56BinlogFormat()
+ s := NewFakeBinlogStream()
+ event := NewFakeRotateEvent(f, s, "mysql-bin.000123")
+ require.NotNil(t, event)
+ assert.True(t, event.IsRotate())
+ nextFile, pos, err := event.NextLogFile(f)
+ assert.NoError(t, err)
+ assert.Equal(t, 4, int(pos))
+ assert.Equal(t, "mysql-bin.000123", nextFile)
+}
+func TestLargeRowsEvent(t *testing.T) {
+ f := NewMySQL56BinlogFormat()
+ s := NewFakeBinlogStream()
+
+ /*
+ Reason for nolint
+ Used in line 384 to 387
+ tableID = event.ID(f)
+ if tableID != 0x102030405060 {
+ t.Fatalf("NewRowsEvent().ID returned %x", tableID)
+ }
+ */
+ tableID := uint64(0x102030405060) //nolint
+
+ colLen := 256
+ types := make([]byte, 0, colLen)
+ metadata := make([]uint16, 0, colLen)
+
+ for i := 0; i < colLen; i++ {
+ types = append(types, TypeLong)
+ metadata = append(metadata, 0)
}
- if !event.IsUpdateRows() {
- t.Fatalf("NewRowsEvent().IsUpdateRows() if false")
+
+ tm := &TableMap{
+ Flags: 0x8090,
+ Database: "my_database",
+ Name: "my_table",
+ Types: types,
+ CanBeNull: NewServerBitmap(colLen),
+ Metadata: metadata,
}
+ tm.CanBeNull.Set(1, true)
- event, _, err := event.StripChecksum(f)
- if err != nil {
- t.Fatalf("StripChecksum failed: %v", err)
+ identify := make([]byte, 0, colLen*4)
+ data := make([]byte, 0, colLen*4)
+ for i := 0; i < colLen; i++ {
+ identify = append(identify, 0x10, 0x20, 0x30, 0x40)
+ data = append(data, 0x10, 0x20, 0x30, 0x40)
}
- tableID = event.TableID(f)
- if tableID != 0x102030405060 {
- t.Fatalf("NewRowsEvent().ID returned %x", tableID)
+ // Do an update packet with all fields set.
+ rows := Rows{
+ Flags: 0x1234,
+ IdentifyColumns: NewServerBitmap(colLen),
+ DataColumns: NewServerBitmap(colLen),
+ Rows: []Row{
+ {
+ NullIdentifyColumns: NewServerBitmap(colLen),
+ NullColumns: NewServerBitmap(colLen),
+ Identify: identify,
+ Data: data,
+ },
+ },
}
- gotRows, err := event.Rows(f, tm)
- if err != nil {
- t.Fatalf("NewRowsEvent().Rows() returned error: %v", err)
+
+ // All rows are included, none are NULL.
+ for i := 0; i < colLen; i++ {
+ rows.IdentifyColumns.Set(i, true)
+ rows.DataColumns.Set(i, true)
}
- if !reflect.DeepEqual(gotRows, rows) {
- t.Fatalf("NewRowsEvent().Rows() got Rows:\n%v\nexpected:\n%v", gotRows, rows)
+
+ // Test the Rows we just created, to be sure.
+ // 1076895760 is 0x40302010.
+ identifies, _ := rows.StringIdentifiesForTests(tm, 0)
+ expected := make([]string, 0, colLen)
+ for i := 0; i < colLen; i++ {
+ expected = append(expected, "1076895760")
+ }
+ if !reflect.DeepEqual(identifies, expected) {
+ t.Fatalf("bad Rows identify, got %v expected %v", identifies, expected)
+ }
+ values, _ := rows.StringValuesForTests(tm, 0)
+ if !reflect.DeepEqual(values, expected) {
+ t.Fatalf("bad Rows data, got %v expected %v", values, expected)
}
+
+ event := NewUpdateRowsEvent(f, s, 0x102030405060, rows)
+ require.True(t, event.IsValid(), "NewRowsEvent().IsValid() is false")
+ require.True(t, event.IsUpdateRows(), "NewRowsEvent().IsUpdateRows() if false")
+
+ event, _, err := event.StripChecksum(f)
+ require.NoError(t, err, "StripChecksum failed: %v", err)
+
+ tableID = event.TableID(f)
+ require.Equal(t, uint64(0x102030405060), tableID, "NewRowsEvent().ID returned %x", tableID)
+
+ gotRows, err := event.Rows(f, tm)
+ require.NoError(t, err, "NewRowsEvent().Rows() returned error: %v", err)
+ require.True(t, reflect.DeepEqual(gotRows, rows), "NewRowsEvent().Rows() got Rows:\n%v\nexpected:\n%v", gotRows, rows)
+
}
diff --git a/go/mysql/binlog_event_mariadb_test.go b/go/mysql/binlog_event_mariadb_test.go
index 1b564f5d6e5..1464da0e573 100644
--- a/go/mysql/binlog_event_mariadb_test.go
+++ b/go/mysql/binlog_event_mariadb_test.go
@@ -21,6 +21,7 @@ import (
"testing"
"github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
)
// sample event data
@@ -100,15 +101,10 @@ func TestMariadbStandaloneBinlogEventGTID(t *testing.T) {
input := mariadbBinlogEvent{binlogEvent: binlogEvent(mariadbStandaloneGTIDEvent)}
want := MariadbGTID{Domain: 0, Server: 62344, Sequence: 9}
got, hasBegin, err := input.GTID(f)
- if err != nil {
- t.Errorf("unexpected error: %v", err)
- }
- if hasBegin {
- t.Errorf("unexpected hasBegin")
- }
- if !reflect.DeepEqual(got, want) {
- t.Errorf("%#v.GTID() = %#v, want %#v", input, got, want)
- }
+ assert.NoError(t, err, "unexpected error: %v", err)
+ assert.False(t, hasBegin, "unexpected hasBegin")
+ assert.True(t, reflect.DeepEqual(got, want), "%#v.GTID() = %#v, want %#v", input, got, want)
+
}
func TestMariadbBinlogEventGTID(t *testing.T) {
@@ -121,15 +117,10 @@ func TestMariadbBinlogEventGTID(t *testing.T) {
input := mariadbBinlogEvent{binlogEvent: binlogEvent(mariadbBeginGTIDEvent)}
want := MariadbGTID{Domain: 0, Server: 62344, Sequence: 10}
got, hasBegin, err := input.GTID(f)
- if err != nil {
- t.Errorf("unexpected error: %v", err)
- }
- if !hasBegin {
- t.Errorf("unexpected !hasBegin")
- }
- if !reflect.DeepEqual(got, want) {
- t.Errorf("%#v.GTID() = %#v, want %#v", input, got, want)
- }
+ assert.NoError(t, err, "unexpected error: %v", err)
+ assert.True(t, hasBegin, "unexpected !hasBegin")
+ assert.True(t, reflect.DeepEqual(got, want), "%#v.GTID() = %#v, want %#v", input, got, want)
+
}
func TestMariadbBinlogEventFormat(t *testing.T) {
@@ -142,12 +133,9 @@ func TestMariadbBinlogEventFormat(t *testing.T) {
HeaderSizes: mariadbFormatEvent[76 : len(mariadbFormatEvent)-5],
}
got, err := input.Format()
- if err != nil {
- t.Errorf("unexpected error: %v", err)
- }
- if !reflect.DeepEqual(got, want) {
- t.Errorf("%#v.Format() = %v, want %v", input, got, want)
- }
+ assert.NoError(t, err, "unexpected error: %v", err)
+ assert.True(t, reflect.DeepEqual(got, want), "%#v.Format() = %v, want %v", input, got, want)
+
}
func TestMariadbBinlogEventChecksumFormat(t *testing.T) {
@@ -160,27 +148,21 @@ func TestMariadbBinlogEventChecksumFormat(t *testing.T) {
HeaderSizes: mariadbChecksumFormatEvent[76 : len(mariadbChecksumFormatEvent)-5],
}
got, err := input.Format()
- if err != nil {
- t.Errorf("unexpected error: %v", err)
- }
- if !reflect.DeepEqual(got, want) {
- t.Errorf("%#v.Format() = %v, want %v", input, got, want)
- }
+ assert.NoError(t, err, "unexpected error: %v", err)
+ assert.True(t, reflect.DeepEqual(got, want), "%#v.Format() = %v, want %v", input, got, want)
+
}
func TestMariadbBinlogEventStripChecksum(t *testing.T) {
f, err := (mariadbBinlogEvent{binlogEvent: binlogEvent(mariadbChecksumFormatEvent)}).Format()
- if err != nil {
- t.Fatalf("unexpected error: %v", err)
- }
+ require.NoError(t, err, "unexpected error: %v", err)
input := mariadbBinlogEvent{binlogEvent: binlogEvent(mariadbChecksumQueryEvent)}
wantEvent := mariadbBinlogEvent{binlogEvent: binlogEvent(mariadbChecksumStrippedQueryEvent)}
wantChecksum := []byte{0xce, 0x49, 0x7a, 0x53}
gotEvent, gotChecksum, err := input.StripChecksum(f)
- if err != nil {
- t.Fatalf("unexpected error: %v", err)
- }
+ require.NoError(t, err, "unexpected error: %v", err)
+
if !reflect.DeepEqual(gotEvent, wantEvent) || !reflect.DeepEqual(gotChecksum, wantChecksum) {
t.Errorf("%#v.StripChecksum() = (%v, %v), want (%v, %v)", input, gotEvent, gotChecksum, wantEvent, wantChecksum)
}
@@ -188,16 +170,13 @@ func TestMariadbBinlogEventStripChecksum(t *testing.T) {
func TestMariadbBinlogEventStripChecksumNone(t *testing.T) {
f, err := (mariadbBinlogEvent{binlogEvent: binlogEvent(mariadbFormatEvent)}).Format()
- if err != nil {
- t.Fatalf("unexpected error: %v", err)
- }
+ require.NoError(t, err, "unexpected error: %v", err)
input := mariadbBinlogEvent{binlogEvent: binlogEvent(mariadbStandaloneGTIDEvent)}
want := input
gotEvent, gotChecksum, err := input.StripChecksum(f)
- if err != nil {
- t.Fatalf("unexpected error: %v", err)
- }
+ require.NoError(t, err, "unexpected error: %v", err)
+
if !reflect.DeepEqual(gotEvent, want) || gotChecksum != nil {
t.Errorf("%#v.StripChecksum() = (%v, %v), want (%v, nil)", input, gotEvent, gotChecksum, want)
}
diff --git a/go/mysql/binlog_event_mysql56_test.go b/go/mysql/binlog_event_mysql56_test.go
index a1373bdfedc..d1cb16499e7 100644
--- a/go/mysql/binlog_event_mysql56_test.go
+++ b/go/mysql/binlog_event_mysql56_test.go
@@ -21,6 +21,7 @@ import (
"testing"
"github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
)
// Sample event data for MySQL 5.6.
@@ -46,14 +47,10 @@ func TestMysql56IsGTID(t *testing.T) {
func TestMysql56StripChecksum(t *testing.T) {
format, err := mysql56FormatEvent.Format()
- if err != nil {
- t.Fatalf("Format() error: %v", err)
- }
+ require.NoError(t, err, "Format() error: %v", err)
stripped, gotChecksum, err := mysql56QueryEvent.StripChecksum(format)
- if err != nil {
- t.Fatalf("StripChecksum() error: %v", err)
- }
+ require.NoError(t, err, "StripChecksum() error: %v", err)
// Check checksum.
if want := []byte{0x92, 0x12, 0x79, 0xc3}; !reflect.DeepEqual(gotChecksum, want) {
@@ -64,9 +61,8 @@ func TestMysql56StripChecksum(t *testing.T) {
// Query length is defined as "the rest of the bytes after offset X",
// so the query will be wrong if the checksum is not stripped.
gotQuery, err := stripped.Query(format)
- if err != nil {
- t.Fatalf("Query() error: %v", err)
- }
+ require.NoError(t, err, "Query() error: %v", err)
+
if want := "insert into test_table (msg) values ('hello')"; string(gotQuery.SQL) != want {
t.Errorf("query = %#v, want %#v", string(gotQuery.SQL), want)
}
@@ -74,28 +70,18 @@ func TestMysql56StripChecksum(t *testing.T) {
func TestMysql56GTID(t *testing.T) {
format, err := mysql56FormatEvent.Format()
- if err != nil {
- t.Fatalf("Format() error: %v", err)
- }
+ require.NoError(t, err, "Format() error: %v", err)
+
input, _, err := mysql56GTIDEvent.StripChecksum(format)
- if err != nil {
- t.Fatalf("StripChecksum() error: %v", err)
- }
- if !input.IsGTID() {
- t.Fatalf("IsGTID() = false, want true")
- }
+ require.NoError(t, err, "StripChecksum() error: %v", err)
+ require.True(t, input.IsGTID(), "IsGTID() = false, want true")
want, _ := parseMysql56GTID("439192bd-f37c-11e4-bbeb-0242ac11035a:4")
got, hasBegin, err := input.GTID(format)
- if err != nil {
- t.Fatalf("GTID() error: %v", err)
- }
- if hasBegin {
- t.Errorf("GTID() returned hasBegin")
- }
- if got != want {
- t.Errorf("GTID() = %#v, want %#v", got, want)
- }
+ require.NoError(t, err, "GTID() error: %v", err)
+ assert.False(t, hasBegin, "GTID() returned hasBegin")
+ assert.Equal(t, want, got, "GTID() = %#v, want %#v", got, want)
+
}
func TestMysql56ParseGTID(t *testing.T) {
@@ -106,12 +92,9 @@ func TestMysql56ParseGTID(t *testing.T) {
}
got, err := parseMysql56GTID(input)
- if err != nil {
- t.Fatalf("unexpected error: %v", err)
- }
- if got != want {
- t.Errorf("(&mysql56{}).ParseGTID(%#v) = %#v, want %#v", input, got, want)
- }
+ require.NoError(t, err, "unexpected error: %v", err)
+ assert.Equal(t, want, got, "(&mysql56{}).ParseGTID(%#v) = %#v, want %#v", input, got, want)
+
}
func TestMysql56ParsePosition(t *testing.T) {
@@ -124,12 +107,9 @@ func TestMysql56ParsePosition(t *testing.T) {
want := Position{GTIDSet: set}
got, err := ParsePosition(Mysql56FlavorID, input)
- if err != nil {
- t.Errorf("unexpected error: %v", err)
- }
- if !got.Equal(want) {
- t.Errorf("(&mysql56{}).ParsePosition(%#v) = %#v, want %#v", input, got, want)
- }
+ assert.NoError(t, err, "unexpected error: %v", err)
+ assert.True(t, got.Equal(want), "(&mysql56{}).ParsePosition(%#v) = %#v, want %#v", input, got, want)
+
}
func TestMysql56SemiSyncAck(t *testing.T) {
diff --git a/go/mysql/binlog_event_rbr.go b/go/mysql/binlog_event_rbr.go
index ed415f27c01..4c38f317a10 100644
--- a/go/mysql/binlog_event_rbr.go
+++ b/go/mysql/binlog_event_rbr.go
@@ -71,21 +71,25 @@ func (ev binlogEvent) TableMap(f BinlogFormat) (*TableMap, error) {
result.Name = string(data[pos+1 : pos+1+l])
pos += 1 + l + 1
- // FIXME(alainjobart) this is varlength encoded.
- columnCount := int(data[pos])
- pos++
+ columnCount, read, ok := readLenEncInt(data, pos)
+ if !ok {
+ return nil, vterrors.Errorf(vtrpc.Code_INTERNAL, "expected column count at position %v (data=%v)", pos, data)
+ }
+ pos = read
- result.Types = data[pos : pos+columnCount]
- pos += columnCount
+ result.Types = data[pos : pos+int(columnCount)]
+ pos += int(columnCount)
- // FIXME(alainjobart) this is a var-len-string.
- l = int(data[pos])
- pos++
+ metaLen, read, ok := readLenEncInt(data, pos)
+ if !ok {
+ return nil, vterrors.Errorf(vtrpc.Code_INTERNAL, "expected metadata length at position %v (data=%v)", pos, data)
+ }
+ pos = read
// Allocate and parse / copy Metadata.
result.Metadata = make([]uint16, columnCount)
- expectedEnd := pos + l
- for c := 0; c < columnCount; c++ {
+ expectedEnd := pos + int(metaLen)
+ for c := uint64(0); c < columnCount; c++ {
var err error
result.Metadata[c], pos, err = metadataRead(data, pos, result.Types[c])
if err != nil {
@@ -97,7 +101,7 @@ func (ev binlogEvent) TableMap(f BinlogFormat) (*TableMap, error) {
}
// A bit array that says if each column can be NULL.
- result.CanBeNull, _ = newBitmap(data, pos, columnCount)
+ result.CanBeNull, _ = newBitmap(data, pos, int(columnCount))
return result, nil
}
@@ -997,22 +1001,24 @@ func (ev binlogEvent) Rows(f BinlogFormat, tm *TableMap) (Rows, error) {
pos += int(extraDataLength)
}
- // FIXME(alainjobart) this is var len encoded.
- columnCount := int(data[pos])
- pos++
+ columnCount, read, ok := readLenEncInt(data, pos)
+ if !ok {
+ return result, vterrors.Errorf(vtrpc.Code_INTERNAL, "expected column count at position %v (data=%v)", pos, data)
+ }
+ pos = read
numIdentifyColumns := 0
numDataColumns := 0
if hasIdentify {
// Bitmap of the columns used for identify.
- result.IdentifyColumns, pos = newBitmap(data, pos, columnCount)
+ result.IdentifyColumns, pos = newBitmap(data, pos, int(columnCount))
numIdentifyColumns = result.IdentifyColumns.BitCount()
}
if hasData {
// Bitmap of columns that are present.
- result.DataColumns, pos = newBitmap(data, pos, columnCount)
+ result.DataColumns, pos = newBitmap(data, pos, int(columnCount))
numDataColumns = result.DataColumns.BitCount()
}
@@ -1027,7 +1033,7 @@ func (ev binlogEvent) Rows(f BinlogFormat, tm *TableMap) (Rows, error) {
// Get the identify values.
startPos := pos
valueIndex := 0
- for c := 0; c < columnCount; c++ {
+ for c := 0; c < int(columnCount); c++ {
if !result.IdentifyColumns.Bit(c) {
// This column is not represented.
continue
@@ -1057,7 +1063,7 @@ func (ev binlogEvent) Rows(f BinlogFormat, tm *TableMap) (Rows, error) {
// Get the values.
startPos := pos
valueIndex := 0
- for c := 0; c < columnCount; c++ {
+ for c := 0; c < int(columnCount); c++ {
if !result.DataColumns.Bit(c) {
// This column is not represented.
continue
diff --git a/go/mysql/binlog_event_test.go b/go/mysql/binlog_event_test.go
index af276396930..c45b8285a86 100644
--- a/go/mysql/binlog_event_test.go
+++ b/go/mysql/binlog_event_test.go
@@ -20,6 +20,8 @@ import (
"strings"
"testing"
+ "github.com/stretchr/testify/assert"
+
binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata"
)
@@ -35,9 +37,8 @@ func TestQueryString(t *testing.T) {
}
want := `{Database: "test_database", Charset:`
got := input.String()
- if !strings.HasPrefix(got, want) {
- t.Errorf("%#v.String() = %#v, want %#v", input, got, want)
- }
+ assert.True(t, strings.HasPrefix(got, want), "%#v.String() = %#v, want %#v", input, got, want)
+
}
func TestQueryStringNilCharset(t *testing.T) {
diff --git a/go/mysql/client_test.go b/go/mysql/client_test.go
index ddc1c6e379b..9ac11282bdc 100644
--- a/go/mysql/client_test.go
+++ b/go/mysql/client_test.go
@@ -475,7 +475,7 @@ func TestTLSClientVerifyIdentity(t *testing.T) {
fmt.Printf("Error: %s", err)
- assert.Contains(t, err.Error(), "cannot send HandshakeResponse41: x509:")
+ assert.Contains(t, err.Error(), "cannot send HandshakeResponse41: tls:")
// Now setup proper CA that is valid to verify
params.SslCa = path.Join(root, "ca-cert.pem")
diff --git a/go/mysql/collations/8bit.go b/go/mysql/collations/8bit.go
index 3f14ae8be33..dcce12abff8 100644
--- a/go/mysql/collations/8bit.go
+++ b/go/mysql/collations/8bit.go
@@ -36,10 +36,10 @@ type simpletables struct {
// take up a lot of binary space.
// Uncomment these fields and pass `-full8bit` to `makemysqldata` to generate
// these tables.
- // tolower *[256]byte
- // toupper *[256]byte
- // ctype *[256]byte
- sort *[256]byte
+ tolower *[256]byte
+ toupper *[256]byte
+ ctype *[256]byte
+ sort *[256]byte
}
type Collation_8bit_bin struct {
@@ -113,6 +113,24 @@ func (c *Collation_8bit_bin) Wildcard(pat []byte, matchOne rune, matchMany rune,
return newEightbitWildcardMatcher(&sortOrderIdentity, c.Collate, pat, matchOne, matchMany, escape)
}
+func (c *Collation_8bit_bin) ToLower(dst, src []byte) []byte {
+ lowerTable := c.simpletables.tolower
+
+ for _, c := range src {
+ dst = append(dst, lowerTable[c])
+ }
+ return dst
+}
+
+func (c *Collation_8bit_bin) ToUpper(dst, src []byte) []byte {
+ upperTable := c.simpletables.toupper
+
+ for _, c := range src {
+ dst = append(dst, upperTable[c])
+ }
+ return dst
+}
+
type Collation_8bit_simple_ci struct {
id ID
name string
@@ -224,6 +242,24 @@ func weightStringPadingSimple(padChar byte, dst []byte, numCodepoints int, padTo
return dst
}
+func (c *Collation_8bit_simple_ci) ToLower(dst, src []byte) []byte {
+ lowerTable := c.simpletables.tolower
+
+ for _, c := range src {
+ dst = append(dst, lowerTable[c])
+ }
+ return dst
+}
+
+func (c *Collation_8bit_simple_ci) ToUpper(dst, src []byte) []byte {
+ upperTable := c.simpletables.toupper
+
+ for _, c := range src {
+ dst = append(dst, upperTable[c])
+ }
+ return dst
+}
+
type Collation_binary struct{}
func (c *Collation_binary) Init() {}
@@ -283,3 +319,13 @@ func (c *Collation_binary) WeightStringLen(numBytes int) int {
func (c *Collation_binary) Wildcard(pat []byte, matchOne rune, matchMany rune, escape rune) WildcardPattern {
return newEightbitWildcardMatcher(&sortOrderIdentity, c.Collate, pat, matchOne, matchMany, escape)
}
+
+func (c *Collation_binary) ToLower(dst, raw []byte) []byte {
+ dst = append(dst, raw...)
+ return dst
+}
+
+func (c *Collation_binary) ToUpper(dst, raw []byte) []byte {
+ dst = append(dst, raw...)
+ return dst
+}
diff --git a/go/mysql/collations/coercion.go b/go/mysql/collations/coercion.go
index 3087975f239..edd1a6289ea 100644
--- a/go/mysql/collations/coercion.go
+++ b/go/mysql/collations/coercion.go
@@ -208,9 +208,16 @@ func (env *Environment) MergeCollations(left, right TypedCollation, opt Coercion
if leftColl == nil || rightColl == nil {
return TypedCollation{}, nil, nil, fmt.Errorf("unsupported TypeCollationID: %v / %v", left.Collation, right.Collation)
}
+
leftCS := leftColl.Charset()
rightCS := rightColl.Charset()
+ if left.Coercibility == CoerceExplicit && right.Coercibility == CoerceExplicit {
+ if left.Collation != right.Collation {
+ goto cannotCoerce
+ }
+ }
+
if leftCS.Name() == rightCS.Name() {
switch {
case left.Coercibility < right.Coercibility:
diff --git a/go/mysql/collations/collation.go b/go/mysql/collations/collation.go
index 651821381e8..a343b29adc2 100644
--- a/go/mysql/collations/collation.go
+++ b/go/mysql/collations/collation.go
@@ -22,6 +22,13 @@ import (
"vitess.io/vitess/go/mysql/collations/internal/charset"
)
+// CaseAwareCollation implements lowercase and uppercase conventions for collations.
+type CaseAwareCollation interface {
+ Collation
+ ToUpper(dst []byte, src []byte) []byte
+ ToLower(dst []byte, src []byte) []byte
+}
+
// ID is a numeric identifier for a collation. These identifiers are defined by MySQL, not by Vitess.
type ID uint16
@@ -188,3 +195,8 @@ func Validate(collation Collation, input []byte) bool {
func Convert(dst []byte, dstCollation Collation, src []byte, srcCollation Collation) ([]byte, error) {
return charset.Convert(dst, dstCollation.Charset(), src, srcCollation.Charset())
}
+
+// Length returns the number of codepoints in the input based on the given collation
+func Length(collation Collation, input []byte) int {
+ return charset.Length(collation.Charset(), input)
+}
diff --git a/go/mysql/collations/env.go b/go/mysql/collations/env.go
index a0873fa0670..db95b2d5ad3 100644
--- a/go/mysql/collations/env.go
+++ b/go/mysql/collations/env.go
@@ -232,6 +232,33 @@ func (env *Environment) CharsetAlias(charset string) (alias string, ok bool) {
return
}
+// CollationAlias returns the internal collaction name for the given charset.
+// For now, this maps all `utf8` to `utf8mb3` collation names; in future versions of MySQL,
+// this mapping will change, so it's important to use this helper so that
+// Vitess code has a consistent mapping for the active collations environment.
+func (env *Environment) CollationAlias(collation string) (string, bool) {
+ col := env.LookupByName(collation)
+ if col == nil {
+ return collation, false
+ }
+ allCols, ok := globalVersionInfo[col.ID()]
+ if !ok {
+ return collation, false
+ }
+ if len(allCols.alias) == 1 {
+ return collation, false
+ }
+ for _, alias := range allCols.alias {
+ for source, dest := range env.version.charsetAliases() {
+ if strings.HasPrefix(collation, fmt.Sprintf("%s_", source)) &&
+ strings.HasPrefix(alias.name, fmt.Sprintf("%s_", dest)) {
+ return alias.name, true
+ }
+ }
+ }
+ return collation, false
+}
+
// DefaultConnectionCharset is the default charset that Vitess will use when negotiating a
// charset in a MySQL connection handshake. Note that in this context, a 'charset' is equivalent
// to a Collation ID, with the exception that it can only fit in 1 byte.
diff --git a/go/mysql/collations/golden_test.go b/go/mysql/collations/golden_test.go
index 9d38e690da3..57321e4818d 100644
--- a/go/mysql/collations/golden_test.go
+++ b/go/mysql/collations/golden_test.go
@@ -25,6 +25,9 @@ import (
"strings"
"testing"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
"vitess.io/vitess/go/mysql/collations/internal/charset"
"vitess.io/vitess/go/mysql/collations/internal/testutil"
)
@@ -52,10 +55,8 @@ func TestGoldenWeights(t *testing.T) {
}
result := coll.WeightString(nil, input, 0)
- if !bytes.Equal(expected, result) {
- t.Errorf("mismatch for collation=%s\noriginal: %s\ninput: %#v\nexpected: %v\nactual: %v",
- coll.Name(), string(goldenCase.Text), input, expected, result)
- }
+ assert.True(t, bytes.Equal(expected, result), "mismatch for collation=%s\noriginal: %s\ninput: %#v\nexpected: %v\nactual: %v", coll.Name(), string(goldenCase.Text), input, expected, result)
+
}
})
}
@@ -78,9 +79,8 @@ func TestCollationsForLanguage(t *testing.T) {
}
for lang := range testutil.KnownLanguages {
- if len(langCounts[lang]) == 0 {
- t.Errorf("no collations found for %q", lang)
- }
+ assert.NotEqual(t, 0, len(langCounts[lang]), "no collations found for %q", lang)
+
t.Logf("%s: %v", lang, langCounts[lang])
}
}
@@ -191,12 +191,9 @@ func TestAllCollationsByCharset(t *testing.T) {
// this doesn't work yet
continue
}
- if cset.Default == nil {
- t.Fatalf("charset %s has no default", csname)
- }
- if cset.Binary == nil {
- t.Fatalf("charset %s has no binary", csname)
- }
+ require.NotNil(t, cset.Default, "charset %s has no default", csname)
+ require.NotNil(t, cset.Binary, "charset %s has no binary", csname)
+
}
for charset, expected := range tc.defaults {
diff --git a/go/mysql/collations/integration/charset_test.go b/go/mysql/collations/integration/charset_test.go
index 2384e4b3077..96ce9dab0a1 100644
--- a/go/mysql/collations/integration/charset_test.go
+++ b/go/mysql/collations/integration/charset_test.go
@@ -21,6 +21,8 @@ import (
"testing"
"unicode/utf8"
+ "github.com/stretchr/testify/require"
+
"vitess.io/vitess/go/mysql/collations"
"vitess.io/vitess/go/mysql/collations/internal/charset"
"vitess.io/vitess/go/mysql/collations/remote"
@@ -85,9 +87,8 @@ func TestCJKStress(t *testing.T) {
t.Helper()
ours, _ := charset.ConvertFromUTF8(nil, local, block)
theirs, err := charset.ConvertFromUTF8(nil, remote, block)
- if err != nil {
- t.Fatalf("remote transcoding failed: %v", err)
- }
+ require.NoError(t, err, "remote transcoding failed: %v", err)
+
return ours, theirs
}
@@ -95,9 +96,8 @@ func TestCJKStress(t *testing.T) {
t.Helper()
ours, _ := charset.Convert(nil, charset.Charset_utf8mb4{}, block, local)
theirs, err := charset.Convert(nil, remoteUtf8mb4, block, remote)
- if err != nil {
- t.Fatalf("remote transcoding failed: %v", err)
- }
+ require.NoError(t, err, "remote transcoding failed: %v", err)
+
return ours, theirs
}
@@ -111,11 +111,8 @@ func TestCJKStress(t *testing.T) {
for _, cp := range string(block) {
input := string(cp)
ours, theirs := convert([]byte(input))
- if !bytes.Equal(ours, theirs) {
- t.Fatalf("%s: bad conversion for %q (U+%04X). ours: %#v, theirs: %#v",
- local.Name(), input, cp, ours, theirs,
- )
- }
+ require.True(t, bytes.Equal(ours, theirs), "%s: bad conversion for %q (U+%04X). ours: %#v, theirs: %#v", local.Name(), input, cp, ours, theirs)
+
}
panic("???")
}
@@ -127,11 +124,8 @@ func TestCJKStress(t *testing.T) {
ours, _ := charset.ConvertFromUTF8(nil, local, []byte(input))
ours2, theirs2 := unconvert(ours)
- if !bytes.Equal(ours2, theirs2) {
- t.Fatalf("%s: bad return conversion for %q (U+%04X) %#v. ours: %#v, theirs: %#v",
- local.Name(), input, cp, ours, ours2, theirs2,
- )
- }
+ require.True(t, bytes.Equal(ours2, theirs2), "%s: bad return conversion for %q (U+%04X) %#v. ours: %#v, theirs: %#v", local.Name(), input, cp, ours, ours2, theirs2)
+
}
panic("???")
}
diff --git a/go/mysql/collations/integration/coercion_test.go b/go/mysql/collations/integration/coercion_test.go
index 486c0c3dc53..e7257e5b953 100644
--- a/go/mysql/collations/integration/coercion_test.go
+++ b/go/mysql/collations/integration/coercion_test.go
@@ -23,6 +23,7 @@ import (
"strings"
"testing"
+ "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"vitess.io/vitess/go/mysql/collations"
@@ -59,12 +60,8 @@ func (tc *testConcat) Expression() string {
func (tc *testConcat) Test(t *testing.T, remote *RemoteCoercionResult, local collations.TypedCollation, coercion1, coercion2 collations.Coercion) {
localCollation := collations.Local().LookupByID(local.Collation)
- if localCollation.Name() != remote.Collation.Name() {
- t.Errorf("bad collation resolved: local is %s, remote is %s", localCollation.Name(), remote.Collation.Name())
- }
- if local.Coercibility != remote.Coercibility {
- t.Errorf("bad coercibility resolved: local is %d, remote is %d", local.Coercibility, remote.Coercibility)
- }
+ assert.Equal(t, remote.Collation.Name(), localCollation.Name(), "bad collation resolved: local is %s, remote is %s", localCollation.Name(), remote.Collation.Name())
+ assert.Equal(t, remote.Coercibility, local.Coercibility, "bad coercibility resolved: local is %d, remote is %d", local.Coercibility, remote.Coercibility)
leftText, err := coercion1(nil, tc.left.Text)
if err != nil {
@@ -84,14 +81,10 @@ func (tc *testConcat) Test(t *testing.T, remote *RemoteCoercionResult, local col
rEBytes, err := remote.Expr.ToBytes()
require.NoError(t, err)
- if !bytes.Equal(concat.Bytes(), rEBytes) {
- t.Errorf("failed to concatenate text;\n\tCONCAT(%v COLLATE %s, %v COLLATE %s) = \n\tCONCAT(%v, %v) COLLATE %s = \n\t\t%v\n\n\texpected: %v",
- tc.left.Text, tc.left.Collation.Name(),
- tc.right.Text, tc.right.Collation.Name(),
- leftText, rightText, localCollation.Name(),
- concat.Bytes(), rEBytes,
- )
- }
+ assert.True(t, bytes.Equal(concat.Bytes(), rEBytes), "failed to concatenate text;\n\tCONCAT(%v COLLATE %s, %v COLLATE %s) = \n\tCONCAT(%v, %v) COLLATE %s = \n\t\t%v\n\n\texpected: %v", tc.left.Text, tc.left.Collation.Name(),
+ tc.right.Text, tc.right.Collation.Name(), leftText, rightText, localCollation.Name(),
+ concat.Bytes(), rEBytes)
+
}
type testComparison struct {
@@ -122,10 +115,8 @@ func (tc *testComparison) Test(t *testing.T, remote *RemoteCoercionResult, local
require.NoError(t, err)
remoteEquals := rEBytes[0] == '1'
localEquals := localCollation.Collate(leftText, rightText, false) == 0
- if remoteEquals != localEquals {
- t.Errorf("failed to collate %#v = %#v with collation %s (expected %v, got %v)",
- leftText, rightText, localCollation.Name(), remoteEquals, localEquals)
- }
+ assert.Equal(t, localEquals, remoteEquals, "failed to collate %#v = %#v with collation %s (expected %v, got %v)", leftText, rightText, localCollation.Name(), remoteEquals, localEquals)
+
}
func TestComparisonSemantics(t *testing.T) {
@@ -135,6 +126,10 @@ func TestComparisonSemantics(t *testing.T) {
conn := mysqlconn(t)
defer conn.Close()
+ if v, err := conn.ServerVersionAtLeast(8, 0, 31); err != nil || !v {
+ t.Skipf("The behavior of Coercion Semantics is not correct before 8.0.31")
+ }
+
for _, coll := range collations.Local().AllCollations() {
text := verifyTranscoding(t, coll, remote.NewCollation(conn, coll.Name()), []byte(BaseString))
testInputs = append(testInputs, &TextWithCollation{Text: text, Collation: coll})
@@ -195,16 +190,14 @@ func TestComparisonSemantics(t *testing.T) {
resultRemote, errRemote := conn.ExecuteFetch(query, 1, false)
if errRemote != nil {
- if !strings.Contains(errRemote.Error(), "Illegal mix of collations") {
- t.Fatalf("query %s failed: %v", query, errRemote)
- }
+ require.True(t, strings.Contains(errRemote.Error(), "Illegal mix of collations"), "query %s failed: %v", query, errRemote)
+
if errLocal == nil {
t.Errorf("expected %s vs %s to fail coercion: %v", collA.Collation.Name(), collB.Collation.Name(), errRemote)
continue
}
- if !strings.HasPrefix(normalizeCollationInError(errRemote.Error()), normalizeCollationInError(errLocal.Error())) {
- t.Fatalf("bad error message: expected %q, got %q", errRemote, errLocal)
- }
+ require.True(t, strings.HasPrefix(normalizeCollationInError(errRemote.Error()), normalizeCollationInError(errLocal.Error())), "bad error message: expected %q, got %q", errRemote, errLocal)
+
continue
}
diff --git a/go/mysql/collations/integration/collations_test.go b/go/mysql/collations/integration/collations_test.go
index d9cc6f11c3f..32ffb81a498 100644
--- a/go/mysql/collations/integration/collations_test.go
+++ b/go/mysql/collations/integration/collations_test.go
@@ -116,9 +116,7 @@ func parseWeightString(b []byte) []byte {
func (u *uca900CollationTest) Test(t *testing.T, result *sqltypes.Result) {
coll := collationEnv.LookupByName(u.collation)
- if coll == nil {
- t.Fatalf("unknown collation %q", u.collation)
- }
+ require.NotNil(t, coll, "unknown collation %q", u.collation)
var checked, errors int
for _, row := range result.Rows {
@@ -231,10 +229,8 @@ func TestCollationWithSpace(t *testing.T) {
for _, size := range []int{0, codepoints, codepoints + 1, codepoints + 2, 20, 32} {
localWeight := local.WeightString(nil, []byte(ExampleString), size)
remoteWeight := remote.WeightString(nil, []byte(ExampleString), size)
- if !bytes.Equal(localWeight, remoteWeight) {
- t.Fatalf("mismatch at len=%d\ninput: %#v\nexpected: %#v\nactual: %#v",
- size, []byte(ExampleString), remoteWeight, localWeight)
- }
+ require.True(t, bytes.Equal(localWeight, remoteWeight), "mismatch at len=%d\ninput: %#v\nexpected: %#v\nactual: %#v", size, []byte(ExampleString), remoteWeight, localWeight)
+
}
})
}
diff --git a/go/mysql/collations/integration/helpers_test.go b/go/mysql/collations/integration/helpers_test.go
index cc67dd82048..d185168d9d1 100644
--- a/go/mysql/collations/integration/helpers_test.go
+++ b/go/mysql/collations/integration/helpers_test.go
@@ -24,6 +24,9 @@ import (
"strings"
"testing"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
"vitess.io/vitess/go/mysql"
"vitess.io/vitess/go/mysql/collations"
"vitess.io/vitess/go/mysql/collations/internal/charset"
@@ -55,10 +58,7 @@ func testRemoteWeights(t *testing.T, golden io.Writer, cases []testweight) {
if err := remote.LastError(); err != nil {
t.Fatalf("remote collation failed: %v", err)
}
-
- if !bytes.Equal(localResult, remoteResult) {
- t.Errorf("expected WEIGHT_STRING(%#v) = %#v (got %#v)", tc.input, remoteResult, localResult)
- }
+ assert.True(t, bytes.Equal(localResult, remoteResult), "expected WEIGHT_STRING(%#v) = %#v (got %#v)", tc.input, remoteResult, localResult)
if golden != nil {
fmt.Fprintf(golden, "{\n\tcollation: %q,\n\texpected: %#v,\n},\n", tc.collation, remoteResult)
@@ -91,9 +91,8 @@ func testRemoteComparison(t *testing.T, golden io.Writer, cases []testcmp) {
if err := remote.LastError(); err != nil {
t.Fatalf("remote collation failed: %v", err)
}
- if localResult != remoteResult {
- t.Errorf("expected STRCMP(%q, %q) = %d (got %d)", string(tc.left), string(tc.right), remoteResult, localResult)
- }
+ assert.Equal(t, remoteResult, localResult, "expected STRCMP(%q, %q) = %d (got %d)", string(tc.left), string(tc.right), remoteResult, localResult)
+
if golden != nil {
fmt.Fprintf(golden, "{\n\tcollation: %q,\n\tleft: %#v,\n\tright: %#v,\n\texpected: %d,\n},\n",
tc.collation, tc.left, tc.right, remoteResult)
@@ -104,16 +103,12 @@ func testRemoteComparison(t *testing.T, golden io.Writer, cases []testcmp) {
func verifyTranscoding(t *testing.T, local collations.Collation, remote *remote.Collation, text []byte) []byte {
transRemote, err := charset.ConvertFromUTF8(nil, remote.Charset(), text)
- if err != nil {
- t.Fatalf("remote transcoding failed: %v", err)
- }
+ require.NoError(t, err, "remote transcoding failed: %v", err)
transLocal, _ := charset.ConvertFromUTF8(nil, local.Charset(), text)
- if !bytes.Equal(transLocal, transRemote) {
- t.Fatalf("transcoding mismatch with %s (%d, charset: %s)\ninput:\n%s\nremote:\n%s\nlocal:\n%s\n",
- local.Name(), local.ID(), local.Charset().Name(),
- hex.Dump(text), hex.Dump(transRemote), hex.Dump(transLocal))
- }
+ require.True(t, bytes.Equal(transLocal, transRemote), "transcoding mismatch with %s (%d, charset: %s)\ninput:\n%s\nremote:\n%s\nlocal:\n%s\n", local.Name(), local.ID(), local.Charset().Name(),
+ hex.Dump(text), hex.Dump(transRemote), hex.Dump(transLocal))
+
return transLocal
}
@@ -143,9 +138,8 @@ func verifyWeightString(t *testing.T, local collations.Collation, remote *remote
func exec(t *testing.T, conn *mysql.Conn, query string) *sqltypes.Result {
res, err := conn.ExecuteFetch(query, -1, true)
- if err != nil {
- t.Fatalf("failed to execute %q: %v", query, err)
- }
+ require.NoError(t, err, "failed to execute %q: %v", query, err)
+
return res
}
diff --git a/go/mysql/collations/internal/charset/eightbit/8bit.go b/go/mysql/collations/internal/charset/eightbit/8bit.go
index c49b1515e62..f63015cd8fc 100644
--- a/go/mysql/collations/internal/charset/eightbit/8bit.go
+++ b/go/mysql/collations/internal/charset/eightbit/8bit.go
@@ -73,3 +73,7 @@ func (e *Charset_8bit) EncodeRune(dst []byte, r rune) int {
}
return -1
}
+
+func (Charset_8bit) Length(src []byte) int {
+ return len(src)
+}
diff --git a/go/mysql/collations/internal/charset/eightbit/binary.go b/go/mysql/collations/internal/charset/eightbit/binary.go
index b22e67293ab..7309b6d7529 100644
--- a/go/mysql/collations/internal/charset/eightbit/binary.go
+++ b/go/mysql/collations/internal/charset/eightbit/binary.go
@@ -54,3 +54,7 @@ func (c Charset_binary) DecodeRune(bytes []byte) (rune, int) {
func (c Charset_binary) Convert(_, in []byte, _ types.Charset) ([]byte, error) {
return in, nil
}
+
+func (Charset_binary) Length(src []byte) int {
+ return len(src)
+}
diff --git a/go/mysql/collations/internal/charset/eightbit/latin1.go b/go/mysql/collations/internal/charset/eightbit/latin1.go
index 157e9e3f003..c321443f6b9 100644
--- a/go/mysql/collations/internal/charset/eightbit/latin1.go
+++ b/go/mysql/collations/internal/charset/eightbit/latin1.go
@@ -222,3 +222,7 @@ func (Charset_latin1) DecodeRune(src []byte) (rune, int) {
}
return rune(tounicode_latin1[src[0]]), 1
}
+
+func (Charset_latin1) Length(src []byte) int {
+ return len(src)
+}
diff --git a/go/mysql/collations/internal/charset/helpers.go b/go/mysql/collations/internal/charset/helpers.go
index 775556af06d..6dee09e77bc 100644
--- a/go/mysql/collations/internal/charset/helpers.go
+++ b/go/mysql/collations/internal/charset/helpers.go
@@ -44,3 +44,16 @@ func Validate(charset Charset, input []byte) bool {
}
return true
}
+
+func Length(charset Charset, input []byte) int {
+ if charset, ok := charset.(interface{ Length([]byte) int }); ok {
+ return charset.Length(input)
+ }
+ var count int
+ for len(input) > 0 {
+ _, size := charset.DecodeRune(input)
+ input = input[size:]
+ count++
+ }
+ return count
+}
diff --git a/go/mysql/collations/internal/charset/unicode/utf16.go b/go/mysql/collations/internal/charset/unicode/utf16.go
index a53994f0b1d..f5cbe9965da 100644
--- a/go/mysql/collations/internal/charset/unicode/utf16.go
+++ b/go/mysql/collations/internal/charset/unicode/utf16.go
@@ -185,3 +185,11 @@ func (Charset_ucs2) DecodeRune(p []byte) (rune, int) {
func (Charset_ucs2) SupportsSupplementaryChars() bool {
return false
}
+
+func (Charset_ucs2) Length(src []byte) int {
+ cnt := len(src)
+ if cnt%2 != 0 {
+ return cnt/2 + 1
+ }
+ return cnt / 2
+}
diff --git a/go/mysql/collations/internal/charset/unicode/utf32.go b/go/mysql/collations/internal/charset/unicode/utf32.go
index 0cf1d208f7d..77c749f03ed 100644
--- a/go/mysql/collations/internal/charset/unicode/utf32.go
+++ b/go/mysql/collations/internal/charset/unicode/utf32.go
@@ -57,3 +57,11 @@ func (Charset_utf32) DecodeRune(p []byte) (rune, int) {
func (Charset_utf32) SupportsSupplementaryChars() bool {
return true
}
+
+func (Charset_utf32) CharLen(src []byte) int {
+ cnt := len(src)
+ if cnt%4 != 0 {
+ return cnt/4 + 1
+ }
+ return cnt / 4
+}
diff --git a/go/mysql/collations/internal/charset/unicode/utf8.go b/go/mysql/collations/internal/charset/unicode/utf8.go
index e33df1fcbe9..0b8a9655650 100644
--- a/go/mysql/collations/internal/charset/unicode/utf8.go
+++ b/go/mysql/collations/internal/charset/unicode/utf8.go
@@ -177,6 +177,10 @@ func (Charset_utf8mb3) SupportsSupplementaryChars() bool {
return false
}
+func (Charset_utf8mb3) Length(src []byte) int {
+ return utf8.RuneCount(src)
+}
+
type Charset_utf8mb4 struct{}
func (Charset_utf8mb4) Name() string {
@@ -207,3 +211,7 @@ func (Charset_utf8mb4) SupportsSupplementaryChars() bool {
func (Charset_utf8mb4) Validate(p []byte) bool {
return utf8.Valid(p)
}
+
+func (Charset_utf8mb4) Length(src []byte) int {
+ return utf8.RuneCount(src)
+}
diff --git a/go/mysql/collations/internal/uca/iter_fast_900.go b/go/mysql/collations/internal/uca/iter_fast_900.go
index 1d2aafe0f22..cbe32cfdb70 100644
--- a/go/mysql/collations/internal/uca/iter_fast_900.go
+++ b/go/mysql/collations/internal/uca/iter_fast_900.go
@@ -66,7 +66,7 @@ func (it *FastIterator900) FastForward32(it2 *FastIterator900) int {
p1 := it.input
p2 := it2.input
- var w1, w2 uint32
+ var w1, w2 uint16
for len(p1) >= 4 && len(p2) >= 4 {
dword1 := *(*uint32)(unsafe.Pointer(&p1[0]))
@@ -75,17 +75,20 @@ func (it *FastIterator900) FastForward32(it2 *FastIterator900) int {
if nonascii == 0 {
if dword1 != dword2 {
+ // Use the weight string fast tables for quick weight comparisons;
+ // see (*FastIterator900).NextWeightBlock64 for a description of
+ // the table format
table := it.fastTable
- if w1, w2 = table[p1[0]], table[p2[0]]; w1 != w2 {
+ if w1, w2 = uint16(table[p1[0]]), uint16(table[p2[0]]); w1 != w2 {
goto mismatch
}
- if w1, w2 = table[p1[1]], table[p2[1]]; w1 != w2 {
+ if w1, w2 = uint16(table[p1[1]]), uint16(table[p2[1]]); w1 != w2 {
goto mismatch
}
- if w1, w2 = table[p1[2]], table[p2[2]]; w1 != w2 {
+ if w1, w2 = uint16(table[p1[2]]), uint16(table[p2[2]]); w1 != w2 {
goto mismatch
}
- if w1, w2 = table[p1[3]], table[p2[3]]; w1 != w2 {
+ if w1, w2 = uint16(table[p1[3]]), uint16(table[p2[3]]); w1 != w2 {
goto mismatch
}
}
@@ -114,7 +117,8 @@ mismatch:
it.unicode++
return 0
}
- return int(w1) - int(w2)
+ // The weights must be byte-swapped before comparison because they're stored in big endian
+ return int(bits.ReverseBytes16(w1)) - int(bits.ReverseBytes16(w2))
}
// NextWeightBlock64 takes a byte slice of 16 bytes and fills it with the next
diff --git a/go/mysql/collations/local.go b/go/mysql/collations/local.go
index 988de156973..c0d3c10da09 100644
--- a/go/mysql/collations/local.go
+++ b/go/mysql/collations/local.go
@@ -35,14 +35,7 @@ func Local() *Environment {
if !flag.Parsed() {
panic("collations.Local() called too early")
}
- if mySQLVersion := servenv.MySQLServerVersion(); mySQLVersion == "" {
- // The default server version used by vtgate is 5.7.9
- // NOTE: this should be changed along with the effective default
- // for the vtgate mysql_server_version flag.
- defaultEnv = fetchCacheEnvironment(collverMySQL57)
- } else {
- defaultEnv = NewEnvironment(mySQLVersion)
- }
+ defaultEnv = NewEnvironment(servenv.MySQLServerVersion())
})
return defaultEnv
}
diff --git a/go/mysql/collations/mysqldata.go b/go/mysql/collations/mysqldata.go
index 636e4ef60b3..57887f515b2 100644
--- a/go/mysql/collations/mysqldata.go
+++ b/go/mysql/collations/mysqldata.go
@@ -8,6 +8,63 @@ import (
uca "vitess.io/vitess/go/mysql/collations/internal/uca"
)
+var ctype_dec8_swedish_ci = [...]uint8{
+ 0x00, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x28, 0x28, 0x28, 0x28, 0x28, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x48, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x10, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x10, 0x81, 0x81, 0x81, 0x81, 0x81, 0x81, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x10, 0x82, 0x82, 0x82, 0x82, 0x82, 0x82, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x10, 0x10, 0x10, 0x10,
+ 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x48, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x10, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x10, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+}
+
+var tolower_dec8_swedish_ci = [...]uint8{
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
+ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f,
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f,
+ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf,
+ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf,
+ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef,
+ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xd7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xdf,
+ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef,
+ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff,
+}
+
+var toupper_dec8_swedish_ci = [...]uint8{
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
+ 0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f,
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f,
+ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf,
+ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf,
+ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf,
+ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf,
+ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf,
+ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xf7, 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xff,
+}
+
var sortorder_dec8_swedish_ci = [...]uint8{
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
@@ -64,6 +121,63 @@ var tounicode_dec8_swedish_ci = [...]uint16{
var fromunicode_dec8_swedish_ci = []eightbit.UnicodeMapping{{From: 0x0, To: 0xff, Range: []uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, 0xa0, 0xa1, 0xa2, 0xa3, 0xa8, 0xa5, 0x0, 0xa7, 0x0, 0xa9, 0xaa, 0xab, 0x0, 0x0, 0x0, 0x0, 0xb0, 0xb1, 0xb2, 0xb3, 0x0, 0xb5, 0xb6, 0xb7, 0x0, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0x0, 0xbf, 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, 0x0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0x0, 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0x0, 0x0, 0xdf, 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, 0x0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0x0, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0x0, 0x0, 0xfd}}, {From: 0x152, To: 0x178, Range: []uint8{0xd7, 0xf7, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xdd}}}
+var ctype_cp850_general_ci = [...]uint8{
+ 0x00, 0x20, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x20, 0x20, 0x28, 0x28, 0x28, 0x28, 0x28, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x20, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x48, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x10, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x10, 0x81, 0x81, 0x81, 0x81, 0x81, 0x81, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x10, 0x82, 0x82, 0x82, 0x82, 0x82, 0x82, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x10, 0x10, 0x10, 0x10,
+ 0x30, 0x01, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x01,
+ 0x01, 0x01, 0x02, 0x01, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x01, 0x01, 0x02, 0x10, 0x01, 0x10,
+ 0x10, 0x02, 0x02, 0x02, 0x02, 0x02, 0x01, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x01, 0x01, 0x01, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x02, 0x01, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x02, 0x01, 0x01, 0x01, 0x01, 0x02, 0x01, 0x01, 0x01, 0x10, 0x10, 0x10, 0x10, 0x10, 0x01,
+ 0x10, 0x01, 0x02, 0x01, 0x01, 0x02, 0x01, 0x10, 0x02, 0x01, 0x01, 0x01, 0x01, 0x02, 0x01, 0x10,
+ 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10,
+}
+
+var tolower_cp850_general_ci = [...]uint8{
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
+ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f,
+ 0x87, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x84, 0x86,
+ 0x82, 0x91, 0x91, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x94, 0x81, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f,
+ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa4, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf,
+ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf,
+ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf,
+ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf,
+ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef,
+ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff,
+}
+
+var toupper_cp850_general_ci = [...]uint8{
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
+ 0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f,
+ 0x80, 0x9a, 0x90, 0x41, 0x8e, 0x41, 0x8f, 0x80, 0x45, 0x45, 0x45, 0x49, 0x49, 0x49, 0x8e, 0x8f,
+ 0x90, 0x92, 0x92, 0x4f, 0x99, 0x4f, 0x55, 0x55, 0x59, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f,
+ 0x41, 0x49, 0x4f, 0x55, 0xa5, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf,
+ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf,
+ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf,
+ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf,
+ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef,
+ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff,
+}
+
var sortorder_cp850_general_ci = [...]uint8{
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
@@ -120,6 +234,25 @@ var tounicode_cp850_general_ci = [...]uint16{
var fromunicode_cp850_general_ci = []eightbit.UnicodeMapping{{From: 0x0, To: 0xff, Range: []uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xad, 0xbd, 0x9c, 0xcf, 0xbe, 0xdd, 0xf5, 0xf9, 0xb8, 0xa6, 0xae, 0xaa, 0xf0, 0xa9, 0xee, 0xf8, 0xf1, 0xfd, 0xfc, 0xef, 0xe6, 0xf4, 0xfa, 0xf7, 0xfb, 0xa7, 0xaf, 0xac, 0xab, 0xf3, 0xa8, 0xb7, 0xb5, 0xb6, 0xc7, 0x8e, 0x8f, 0x92, 0x80, 0xd4, 0x90, 0xd2, 0xd3, 0xde, 0xd6, 0xd7, 0xd8, 0xd1, 0xa5, 0xe3, 0xe0, 0xe2, 0xe5, 0x99, 0x9e, 0x9d, 0xeb, 0xe9, 0xea, 0x9a, 0xed, 0xe8, 0xe1, 0x85, 0xa0, 0x83, 0xc6, 0x84, 0x86, 0x91, 0x87, 0x8a, 0x82, 0x88, 0x89, 0x8d, 0xa1, 0x8c, 0x8b, 0xd0, 0xa4, 0x95, 0xa2, 0x93, 0xe4, 0x94, 0xf6, 0x9b, 0x97, 0xa3, 0x96, 0x81, 0xec, 0xe7, 0x98}}, {From: 0x2500, To: 0x25a0, Range: []uint8{0xc4, 0x0, 0xb3, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xda, 0x0, 0x0, 0x0, 0xbf, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0xd9, 0x0, 0x0, 0x0, 0xc3, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc2, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc5, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xcd, 0xba, 0x0, 0x0, 0xc9, 0x0, 0x0, 0xbb, 0x0, 0x0, 0xc8, 0x0, 0x0, 0xbc, 0x0, 0x0, 0xcc, 0x0, 0x0, 0xb9, 0x0, 0x0, 0xcb, 0x0, 0x0, 0xca, 0x0, 0x0, 0xce, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xdf, 0x0, 0x0, 0x0, 0xdc, 0x0, 0x0, 0x0, 0xdb, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb0, 0xb1, 0xb2, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xfe}}, {From: 0x131, To: 0x192, Range: []uint8{0xd5, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x9f}}, {From: 0x2017, To: 0x2017, Range: []uint8{0xf2}}}
+var ctype_latin1_german1_ci = [...]uint8{
+ 0x00, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x28, 0x28, 0x28, 0x28, 0x28, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x48, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x10, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x10, 0x81, 0x81, 0x81, 0x81, 0x81, 0x81, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x10, 0x82, 0x82, 0x82, 0x82, 0x82, 0x82, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x10, 0x10, 0x10, 0x10,
+ 0x20, 0x10, 0x00, 0x10, 0x02, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x01, 0x10, 0x01, 0x00, 0x01,
+ 0x00, 0x00, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x02, 0x10, 0x02, 0x00, 0x02,
+ 0x01, 0x48, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x10, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x10, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+}
+
var sortorder_latin1_german1_ci = [...]uint8{
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
@@ -139,6 +272,63 @@ var sortorder_latin1_german1_ci = [...]uint8{
0xd0, 0x4e, 0x4f, 0x4f, 0x4f, 0x4f, 0x4f, 0xf7, 0x4f, 0x55, 0x55, 0x55, 0x55, 0x59, 0xde, 0xff,
}
+var ctype_hp8_english_ci = [...]uint8{
+ 0x00, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x28, 0x28, 0x28, 0x28, 0x28, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x48, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x10, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x10, 0x81, 0x81, 0x81, 0x81, 0x81, 0x81, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x10, 0x82, 0x82, 0x82, 0x82, 0x82, 0x82, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x10, 0x10, 0x10, 0x10,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x10, 0x20, 0x20, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x02, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x02, 0x10, 0x02,
+ 0x02, 0x01, 0x10, 0x10, 0x01, 0x02, 0x10, 0x10, 0x02, 0x01, 0x10, 0x01, 0x01, 0x01, 0x10, 0x10,
+ 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x10, 0x10, 0x20, 0x20, 0x20, 0x20, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10,
+}
+
+var tolower_hp8_english_ci = [...]uint8{
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
+ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f,
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f,
+ 0xa0, 0xc8, 0xc0, 0xc9, 0xc1, 0xcd, 0xd1, 0xdd, 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xcb, 0xc3, 0xaf,
+ 0xb0, 0xb2, 0xb2, 0xb3, 0xb5, 0xb5, 0xb7, 0xb7, 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf,
+ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf,
+ 0xd4, 0xd1, 0xd6, 0xd7, 0xd4, 0xd5, 0xd6, 0xd7, 0xcc, 0xd9, 0xce, 0xcf, 0xc5, 0xdd, 0xde, 0xc2,
+ 0xc4, 0xe2, 0xe2, 0xe4, 0xe4, 0xd5, 0xd9, 0xc6, 0xca, 0xea, 0xea, 0xec, 0xec, 0xc7, 0xef, 0xef,
+ 0xf1, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff,
+}
+
+var toupper_hp8_english_ci = [...]uint8{
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
+ 0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f,
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f,
+ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf,
+ 0xb0, 0xb1, 0xb1, 0xb3, 0xb4, 0xb4, 0xb6, 0xb6, 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf,
+ 0xa2, 0xa4, 0xdf, 0xae, 0xe0, 0xdc, 0xe7, 0xed, 0xa1, 0xa3, 0xe8, 0xad, 0xd8, 0xa5, 0xda, 0xdb,
+ 0xd0, 0xa6, 0xd2, 0xd3, 0xd0, 0xe5, 0xd2, 0xd3, 0xd8, 0xe6, 0xda, 0xdb, 0xdc, 0xa7, 0xde, 0xdf,
+ 0xe0, 0xe1, 0xe1, 0xe3, 0xe3, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xe9, 0xeb, 0xeb, 0xed, 0xee, 0xee,
+ 0xf0, 0xf0, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff,
+}
+
var sortorder_hp8_english_ci = [...]uint8{
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
@@ -195,6 +385,63 @@ var tounicode_hp8_english_ci = [...]uint16{
var fromunicode_hp8_english_ci = []eightbit.UnicodeMapping{{From: 0x0, To: 0xff, Range: []uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, 0xa0, 0xb8, 0xbf, 0xbb, 0xba, 0xbc, 0x0, 0xbd, 0xab, 0x0, 0xf9, 0xfb, 0x0, 0x0, 0x0, 0xb0, 0xb3, 0xfe, 0x0, 0x0, 0xa8, 0xf3, 0xf4, 0xf2, 0x0, 0x0, 0xfa, 0xfd, 0xf7, 0xf8, 0xf5, 0xb9, 0xa1, 0xe0, 0xa2, 0xe1, 0xd8, 0xd0, 0xd3, 0xb4, 0xa3, 0xdc, 0xa4, 0xa5, 0xe6, 0xe5, 0xa6, 0xa7, 0xe3, 0xb6, 0xe8, 0xe7, 0xdf, 0xe9, 0xda, 0x0, 0xd2, 0xad, 0xed, 0xae, 0xdb, 0xb1, 0xf0, 0xde, 0xc8, 0xc4, 0xc0, 0xe2, 0xcc, 0xd4, 0xd7, 0xb5, 0xc9, 0xc5, 0xc1, 0xcd, 0xd9, 0xd5, 0xd1, 0xdd, 0xe4, 0xb7, 0xca, 0xc6, 0xc2, 0xea, 0xce, 0x0, 0xd6, 0xcb, 0xc7, 0xc3, 0xcf, 0xb2, 0xf1, 0xef}}, {From: 0x160, To: 0x192, Range: []uint8{0xeb, 0xec, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xee, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xbe}}, {From: 0x2c6, To: 0x2dc, Range: []uint8{0xaa, 0x0, 0x0, 0x0, 0x0, 0xa9, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xac}}, {From: 0x2014, To: 0x20a4, Range: []uint8{0xf6, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xaf}}, {From: 0x25a0, To: 0x25a0, Range: []uint8{0xfc}}}
+var ctype_koi8r_general_ci = [...]uint8{
+ 0x00, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x28, 0x28, 0x28, 0x28, 0x28, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x48, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x10, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x10, 0x81, 0x81, 0x81, 0x81, 0x81, 0x81, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x10, 0x82, 0x82, 0x82, 0x82, 0x82, 0x82, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x10, 0x10, 0x10, 0x10,
+ 0x20, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x10, 0x10, 0x10, 0x02, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x10, 0x10, 0x10, 0x01, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+}
+
+var tolower_koi8r_general_ci = [...]uint8{
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
+ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f,
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f,
+ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf,
+ 0xb0, 0xb1, 0xb2, 0xa3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf,
+ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf,
+ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf,
+ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf,
+ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf,
+}
+
+var toupper_koi8r_general_ci = [...]uint8{
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
+ 0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f,
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f,
+ 0xa0, 0xa1, 0xa2, 0xb3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf,
+ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf,
+ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef,
+ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff,
+ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef,
+ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff,
+}
+
var sortorder_koi8r_general_ci = [...]uint8{
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
@@ -251,6 +498,63 @@ var tounicode_koi8r_general_ci = [...]uint16{
var fromunicode_koi8r_general_ci = []eightbit.UnicodeMapping{{From: 0x0, To: 0xf7, Range: []uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x9a, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xbf, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x9c, 0x0, 0x9d, 0x0, 0x0, 0x0, 0x0, 0x9e, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x9f}}, {From: 0x401, To: 0x451, Range: []uint8{0xb3, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe1, 0xe2, 0xf7, 0xe7, 0xe4, 0xe5, 0xf6, 0xfa, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, 0xf0, 0xf2, 0xf3, 0xf4, 0xf5, 0xe6, 0xe8, 0xe3, 0xfe, 0xfb, 0xfd, 0xff, 0xf9, 0xf8, 0xfc, 0xe0, 0xf1, 0xc1, 0xc2, 0xd7, 0xc7, 0xc4, 0xc5, 0xd6, 0xda, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, 0xd0, 0xd2, 0xd3, 0xd4, 0xd5, 0xc6, 0xc8, 0xc3, 0xde, 0xdb, 0xdd, 0xdf, 0xd9, 0xd8, 0xdc, 0xc0, 0xd1, 0x0, 0xa3}}, {From: 0x2500, To: 0x25a0, Range: []uint8{0x80, 0x0, 0x81, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x82, 0x0, 0x0, 0x0, 0x83, 0x0, 0x0, 0x0, 0x84, 0x0, 0x0, 0x0, 0x85, 0x0, 0x0, 0x0, 0x86, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x87, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x88, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x89, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x8a, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xa0, 0xa1, 0xa2, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, 0xb0, 0xb1, 0xb2, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x8b, 0x0, 0x0, 0x0, 0x8c, 0x0, 0x0, 0x0, 0x8d, 0x0, 0x0, 0x0, 0x8e, 0x0, 0x0, 0x0, 0x8f, 0x90, 0x91, 0x92, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x94}}, {From: 0x2219, To: 0x2265, Range: []uint8{0x95, 0x96, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x98, 0x99}}, {From: 0x2320, To: 0x2321, Range: []uint8{0x93, 0x9b}}}
+var ctype_latin2_general_ci = [...]uint8{
+ 0x00, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x28, 0x28, 0x28, 0x28, 0x28, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x48, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x10, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x10, 0x81, 0x81, 0x81, 0x81, 0x81, 0x81, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x10, 0x82, 0x82, 0x82, 0x82, 0x82, 0x82, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x10, 0x10, 0x10, 0x10,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x48, 0x01, 0x10, 0x01, 0x10, 0x01, 0x01, 0x10, 0x10, 0x01, 0x01, 0x01, 0x01, 0x10, 0x01,
+ 0x01, 0x10, 0x02, 0x10, 0x02, 0x10, 0x02, 0x02, 0x10, 0x10, 0x02, 0x02, 0x02, 0x02, 0x10, 0x02,
+ 0x02, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x10, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x10, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x10, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x10, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+}
+
+var tolower_latin2_general_ci = [...]uint8{
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
+ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f,
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f,
+ 0xa0, 0xb1, 0xa2, 0xb3, 0xa4, 0xb5, 0xb6, 0xa7, 0xa8, 0xb9, 0xba, 0xbb, 0xbc, 0xad, 0xbe, 0xbf,
+ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf,
+ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef,
+ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xd7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xdf,
+ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef,
+ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff,
+}
+
+var toupper_latin2_general_ci = [...]uint8{
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
+ 0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f,
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f,
+ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf,
+ 0xb0, 0xa1, 0xb2, 0xa3, 0xb4, 0xa5, 0xa6, 0xb7, 0xb8, 0xa9, 0xaa, 0xab, 0xac, 0xbd, 0xae, 0xaf,
+ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf,
+ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf,
+ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf,
+ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xf7, 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xff,
+}
+
var sortorder_latin2_general_ci = [...]uint8{
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
@@ -307,6 +611,63 @@ var tounicode_latin2_general_ci = [...]uint16{
var fromunicode_latin2_general_ci = []eightbit.UnicodeMapping{{From: 0x0, To: 0xfd, Range: []uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, 0xa0, 0x0, 0x0, 0x0, 0xa4, 0x0, 0x0, 0xa7, 0xa8, 0x0, 0x0, 0x0, 0x0, 0xad, 0x0, 0x0, 0xb0, 0x0, 0x0, 0x0, 0xb4, 0x0, 0x0, 0x0, 0xb8, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc1, 0xc2, 0x0, 0xc4, 0x0, 0x0, 0xc7, 0x0, 0xc9, 0x0, 0xcb, 0x0, 0xcd, 0xce, 0x0, 0x0, 0x0, 0x0, 0xd3, 0xd4, 0x0, 0xd6, 0xd7, 0x0, 0x0, 0xda, 0x0, 0xdc, 0xdd, 0x0, 0xdf, 0x0, 0xe1, 0xe2, 0x0, 0xe4, 0x0, 0x0, 0xe7, 0x0, 0xe9, 0x0, 0xeb, 0x0, 0xed, 0xee, 0x0, 0x0, 0x0, 0x0, 0xf3, 0xf4, 0x0, 0xf6, 0xf7, 0x0, 0x0, 0xfa, 0x0, 0xfc, 0xfd}}, {From: 0x102, To: 0x17e, Range: []uint8{0xc3, 0xe3, 0xa1, 0xb1, 0xc6, 0xe6, 0x0, 0x0, 0x0, 0x0, 0xc8, 0xe8, 0xcf, 0xef, 0xd0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xca, 0xea, 0xcc, 0xec, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc5, 0xe5, 0x0, 0x0, 0xa5, 0xb5, 0x0, 0x0, 0xa3, 0xb3, 0xd1, 0xf1, 0x0, 0x0, 0xd2, 0xf2, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd5, 0xf5, 0x0, 0x0, 0xc0, 0xe0, 0x0, 0x0, 0xd8, 0xf8, 0xa6, 0xb6, 0x0, 0x0, 0xaa, 0xba, 0xa9, 0xb9, 0xde, 0xfe, 0xab, 0xbb, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd9, 0xf9, 0xdb, 0xfb, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xac, 0xbc, 0xaf, 0xbf, 0xae, 0xbe}}, {From: 0x2c7, To: 0x2dd, Range: []uint8{0xb7, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xa2, 0xff, 0x0, 0xb2, 0x0, 0xbd}}}
+var ctype_swe7_swedish_ci = [...]uint8{
+ 0x00, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x28, 0x28, 0x28, 0x28, 0x28, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x48, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x10, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x01, 0x81, 0x81, 0x81, 0x81, 0x81, 0x81, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x10, 0x01, 0x82, 0x82, 0x82, 0x82, 0x82, 0x82, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+}
+
+var tolower_swe7_swedish_ci = [...]uint8{
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x5f,
+ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f,
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f,
+ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf,
+ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf,
+ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf,
+ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf,
+ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef,
+ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff,
+}
+
+var toupper_swe7_swedish_ci = [...]uint8{
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x7f,
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f,
+ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf,
+ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf,
+ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf,
+ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf,
+ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef,
+ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff,
+}
+
var sortorder_swe7_swedish_ci = [...]uint8{
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
@@ -363,7 +724,45 @@ var tounicode_swe7_swedish_ci = [...]uint16{
var fromunicode_swe7_swedish_ci = []eightbit.UnicodeMapping{{From: 0x0, To: 0xfc, Range: []uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x0, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x0, 0x0, 0x0, 0x0, 0x5f, 0x0, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x5b, 0x5d, 0x0, 0x0, 0x0, 0x40, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x5c, 0x0, 0x0, 0x0, 0x0, 0x0, 0x5e, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x7b, 0x7d, 0x0, 0x0, 0x0, 0x60, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x7c, 0x0, 0x0, 0x0, 0x0, 0x0, 0x7e}}}
-var sortorder_ascii_general_ci = [...]uint8{
+var ctype_ascii_general_ci = [...]uint8{
+ 0x00, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x28, 0x28, 0x28, 0x28, 0x28, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x48, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x10, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x10, 0x81, 0x81, 0x81, 0x81, 0x81, 0x81, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x10, 0x82, 0x82, 0x82, 0x82, 0x82, 0x82, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x10, 0x10, 0x10, 0x10,
+ 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+}
+
+var tolower_ascii_general_ci = [...]uint8{
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
+ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f,
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f,
+ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf,
+ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf,
+ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf,
+ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf,
+ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef,
+ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff,
+}
+
+var toupper_ascii_general_ci = [...]uint8{
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
@@ -419,6 +818,63 @@ var tounicode_ascii_general_ci = [...]uint16{
var fromunicode_ascii_general_ci = []eightbit.UnicodeMapping{{From: 0x0, To: 0x7f, Range: []uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f}}}
+var ctype_cp1251_bulgarian_ci = [...]uint8{
+ 0x00, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x28, 0x28, 0x28, 0x28, 0x28, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x48, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x10, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x10, 0x81, 0x81, 0x81, 0x81, 0x81, 0x81, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x10, 0x82, 0x82, 0x82, 0x82, 0x82, 0x82, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x10, 0x10, 0x10, 0x10,
+ 0x00, 0x01, 0x01, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x01, 0x01, 0x01,
+ 0x01, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x02, 0x02, 0x02,
+ 0x02, 0x00, 0x01, 0x02, 0x01, 0x00, 0x01, 0x00, 0x00, 0x01, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0x01, 0x02, 0x02, 0x00, 0x00, 0x00, 0x02, 0x00, 0x02, 0x00, 0x02, 0x01, 0x02,
+ 0x02, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+}
+
+var tolower_cp1251_bulgarian_ci = [...]uint8{
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
+ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f,
+ 0x90, 0x83, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x9a, 0x8b, 0x9c, 0x9d, 0x9e, 0x9f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f,
+ 0xa0, 0xa2, 0xa2, 0xbc, 0xa4, 0xb4, 0xa6, 0xa7, 0xb8, 0xa9, 0xba, 0xab, 0xac, 0xad, 0xae, 0xbf,
+ 0xb0, 0xb1, 0xb3, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbe, 0xbe, 0xbf,
+ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef,
+ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff,
+ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef,
+ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff,
+}
+
+var toupper_cp1251_bulgarian_ci = [...]uint8{
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
+ 0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f,
+ 0x80, 0x81, 0x82, 0x81, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x80, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x8a, 0x9b, 0x8c, 0x9d, 0x8e, 0x8f,
+ 0xa0, 0xa1, 0xa1, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf,
+ 0xb0, 0xb1, 0xb2, 0xb2, 0xa5, 0xb5, 0xb6, 0xb7, 0xa8, 0xb9, 0xaa, 0xbb, 0xa3, 0xbd, 0xbd, 0xaf,
+ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf,
+ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf,
+ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf,
+ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf,
+}
+
var sortorder_cp1251_bulgarian_ci = [...]uint8{
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
@@ -494,6 +950,25 @@ var sortorder_latin1_danish_ci = [...]uint8{
0x44, 0x4e, 0x4f, 0x4f, 0x4f, 0x4f, 0x5c, 0xf7, 0x5c, 0x55, 0x55, 0x55, 0x59, 0x59, 0xde, 0xff,
}
+var ctype_hebrew_general_ci = [...]uint8{
+ 0x00, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x28, 0x28, 0x28, 0x28, 0x28, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x48, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x10, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x10, 0x81, 0x81, 0x81, 0x81, 0x81, 0x81, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x10, 0x82, 0x82, 0x82, 0x82, 0x82, 0x82, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x10, 0x10, 0x10, 0x10,
+ 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x48, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x00, 0x00, 0x20, 0x20,
+}
+
var sortorder_hebrew_general_ci = [...]uint8{
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
@@ -550,6 +1025,63 @@ var tounicode_hebrew_general_ci = [...]uint16{
var fromunicode_hebrew_general_ci = []eightbit.UnicodeMapping{{From: 0x0, To: 0xf7, Range: []uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, 0xa0, 0x0, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0x0, 0xab, 0xac, 0xad, 0xae, 0x0, 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0x0, 0xbb, 0xbc, 0xbd, 0xbe, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xaa, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xba}}, {From: 0x5d0, To: 0x5ea, Range: []uint8{0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa}}, {From: 0x200e, To: 0x203e, Range: []uint8{0xfd, 0xfe, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xdf, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xaf}}}
+var ctype_latin7_estonian_cs = [...]uint8{
+ 0x00, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x28, 0x28, 0x28, 0x28, 0x28, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x48, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x10, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x10, 0x81, 0x81, 0x81, 0x81, 0x81, 0x81, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x10, 0x82, 0x82, 0x82, 0x82, 0x82, 0x82, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x10, 0x10, 0x10, 0x10,
+ 0x20, 0x01, 0x20, 0x10, 0x20, 0x10, 0x10, 0x00, 0x00, 0x20, 0x10, 0x20, 0x10, 0x20, 0x10, 0x10,
+ 0x10, 0x20, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x20, 0x00, 0x20, 0x10, 0x20, 0x10, 0x10,
+ 0x20, 0x48, 0x20, 0x10, 0x10, 0x10, 0x20, 0x10, 0x10, 0x10, 0x10, 0x01, 0x10, 0x10, 0x10, 0x10,
+ 0x01, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x02, 0x10, 0x10, 0x10, 0x10,
+ 0x02, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x10, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x10, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+}
+
+var tolower_latin7_estonian_cs = [...]uint8{
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
+ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f,
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f,
+ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xb8, 0xa9, 0xba, 0xab, 0xac, 0xad, 0xae, 0xbf,
+ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf,
+ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef,
+ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xd7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xdf,
+ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef,
+ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff,
+}
+
+var toupper_latin7_estonian_cs = [...]uint8{
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
+ 0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f,
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f,
+ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf,
+ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xa8, 0xb9, 0xaa, 0xbb, 0xbc, 0xbd, 0xbe, 0xaf,
+ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf,
+ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf,
+ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf,
+ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xf7, 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xff,
+}
+
var sortorder_latin7_estonian_cs = [...]uint8{
0x00, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x0a, 0x0b,
0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b,
@@ -625,6 +1157,63 @@ var sortorder_latin2_hungarian_ci = [...]uint8{
0xff, 0x62, 0x63, 0x64, 0x66, 0x67, 0x67, 0xff, 0x6d, 0x77, 0x75, 0x78, 0x78, 0x7e, 0x74, 0xff,
}
+var ctype_koi8u_general_ci = [...]uint8{
+ 0x00, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x28, 0x28, 0x28, 0x28, 0x28, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x48, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x10, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x10, 0x81, 0x81, 0x81, 0x81, 0x81, 0x81, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x10, 0x82, 0x82, 0x82, 0x82, 0x82, 0x82, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x10, 0x10, 0x10, 0x10,
+ 0x20, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x10, 0x10, 0x10, 0x02, 0x02, 0x10, 0x02, 0x02, 0x10, 0x10, 0x10, 0x10, 0x10, 0x02, 0x10,
+ 0x10, 0x10, 0x10, 0x10, 0x01, 0x01, 0x10, 0x01, 0x01, 0x10, 0x10, 0x10, 0x10, 0x10, 0x01, 0x10,
+ 0x10, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+}
+
+var tolower_koi8u_general_ci = [...]uint8{
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
+ 0x20, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0xa3, 0xa4, 0x20, 0xa6, 0xa7, 0x20, 0x20, 0x20, 0x20, 0x20, 0xad, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0xa3, 0xa4, 0x20, 0xa6, 0xa7, 0x20, 0x20, 0x20, 0x20, 0x20, 0xad, 0x20, 0x20,
+ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf,
+ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf,
+ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf,
+ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf,
+}
+
+var toupper_koi8u_general_ci = [...]uint8{
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
+ 0x20, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0xb3, 0xb4, 0x20, 0xb6, 0xb7, 0x20, 0x20, 0x20, 0x20, 0x20, 0xbd, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0xb3, 0xb4, 0x20, 0xb6, 0xb7, 0x20, 0x20, 0x20, 0x20, 0x20, 0xbd, 0x20, 0x20,
+ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef,
+ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff,
+ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef,
+ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff,
+}
+
var sortorder_koi8u_general_ci = [...]uint8{
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
@@ -719,6 +1308,63 @@ var sortorder_gb2312_chinese_ci = [...]uint8{
0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff,
}
+var ctype_greek_general_ci = [...]uint8{
+ 0x00, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x28, 0x28, 0x28, 0x28, 0x28, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x48, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x10, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x10, 0x81, 0x81, 0x81, 0x81, 0x81, 0x81, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x10, 0x82, 0x82, 0x82, 0x82, 0x82, 0x82, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x10, 0x10, 0x10, 0x10,
+ 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x48, 0x10, 0x10, 0x10, 0x00, 0x00, 0x10, 0x10, 0x10, 0x10, 0x00, 0x10, 0x10, 0x10, 0x00,
+ 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x01, 0x10, 0x01, 0x01, 0x01, 0x10, 0x01, 0x10, 0x01,
+ 0x01, 0x02, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+}
+
+var tolower_greek_general_ci = [...]uint8{
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
+ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f,
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f,
+ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf,
+ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xdc, 0xb7, 0xdd, 0xde, 0xdf, 0xbb, 0xfc, 0xbd, 0xfd, 0xfe,
+ 0xc0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef,
+ 0xf0, 0xf1, 0xd2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xdc, 0xdd, 0xde, 0xdf,
+ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef,
+ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff,
+}
+
+var toupper_greek_general_ci = [...]uint8{
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
+ 0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f,
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f,
+ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf,
+ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf,
+ 0xda, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf,
+ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xdb, 0xc1, 0xc5, 0xc7, 0xc9,
+ 0xdb, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf,
+ 0xd0, 0xd1, 0xd3, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xdb, 0xcf, 0xd5, 0xd9, 0xff,
+}
+
var sortorder_greek_general_ci = [...]uint8{
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
@@ -775,6 +1421,63 @@ var tounicode_greek_general_ci = [...]uint16{
var fromunicode_greek_general_ci = []eightbit.UnicodeMapping{{From: 0x0, To: 0xbd, Range: []uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, 0xa0, 0x0, 0x0, 0xa3, 0x0, 0x0, 0xa6, 0xa7, 0xa8, 0xa9, 0x0, 0xab, 0xac, 0xad, 0x0, 0x0, 0xb0, 0xb1, 0xb2, 0xb3, 0x0, 0x0, 0x0, 0xb7, 0x0, 0x0, 0x0, 0xbb, 0x0, 0xbd}}, {From: 0x384, To: 0x3ce, Range: []uint8{0xb4, 0xb5, 0xb6, 0x0, 0xb8, 0xb9, 0xba, 0x0, 0xbc, 0x0, 0xbe, 0xbf, 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, 0xd0, 0xd1, 0x0, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe}}, {From: 0x2bc, To: 0x2bd, Range: []uint8{0xa2, 0xa1}}, {From: 0x2015, To: 0x2015, Range: []uint8{0xaf}}}
+var ctype_cp1250_general_ci = [...]uint8{
+ 0x00, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x28, 0x28, 0x28, 0x28, 0x28, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x48, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x10, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x10, 0x81, 0x81, 0x81, 0x81, 0x81, 0x81, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x10, 0x82, 0x82, 0x82, 0x82, 0x82, 0x82, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x10, 0x10, 0x10, 0x10,
+ 0x20, 0x20, 0x20, 0x10, 0x20, 0x10, 0x10, 0x10, 0x10, 0x20, 0x10, 0x01, 0x10, 0x01, 0x01, 0x01,
+ 0x01, 0x20, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x20, 0x10, 0x02, 0x10, 0x02, 0x02, 0x02,
+ 0x02, 0x48, 0x10, 0x10, 0x01, 0x10, 0x01, 0x10, 0x01, 0x10, 0x10, 0x01, 0x10, 0x10, 0x10, 0x10,
+ 0x01, 0x10, 0x10, 0x10, 0x02, 0x10, 0x10, 0x10, 0x10, 0x10, 0x02, 0x02, 0x10, 0x01, 0x10, 0x02,
+ 0x02, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x10, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x10, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+}
+
+var tolower_cp1250_general_ci = [...]uint8{
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
+ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f,
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x9a, 0x8b, 0x9c, 0x9d, 0x9e, 0x9f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f,
+ 0xa0, 0xa1, 0xa2, 0xb3, 0xa4, 0xb9, 0xa6, 0xa7, 0xa8, 0xa9, 0xba, 0xab, 0xac, 0xad, 0xae, 0xbf,
+ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xbb, 0xbe, 0xbd, 0xbe, 0xbf,
+ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef,
+ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xd7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xdf,
+ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef,
+ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff,
+}
+
+var toupper_cp1250_general_ci = [...]uint8{
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
+ 0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f,
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x8a, 0x9b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf,
+ 0xb0, 0xb1, 0xb2, 0xa3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xa5, 0xaa, 0xbb, 0xbc, 0xbd, 0xbc, 0xaf,
+ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf,
+ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf,
+ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf,
+ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xf7, 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xff,
+}
+
var sortorder_cp1250_general_ci = [...]uint8{
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
@@ -850,6 +1553,44 @@ var sortorder_latin2_croatian_ci = [...]uint8{
0x4a, 0x57, 0x57, 0x59, 0x59, 0x59, 0x59, 0xfe, 0x5d, 0x64, 0x64, 0x64, 0x64, 0x69, 0x62, 0xff,
}
+var ctype_cp1257_lithuanian_ci = [...]uint8{
+ 0x00, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x28, 0x28, 0x28, 0x28, 0x28, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x48, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x10, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x10, 0x81, 0x81, 0x81, 0x81, 0x81, 0x81, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x10, 0x82, 0x82, 0x82, 0x82, 0x82, 0x82, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x10, 0x10, 0x10, 0x10,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00,
+ 0x02, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x00, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+}
+
+var toupper_cp1257_lithuanian_ci = [...]uint8{
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
+ 0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f,
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f,
+ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xba, 0xab, 0xac, 0xad, 0xae, 0xaf,
+ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xa8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xaf,
+ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf,
+ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf,
+ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf,
+ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xf7, 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xff,
+}
+
var sortorder_cp1257_lithuanian_ci = [...]uint8{
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
@@ -906,6 +1647,44 @@ var tounicode_cp1257_lithuanian_ci = [...]uint16{
var fromunicode_cp1257_lithuanian_ci = []eightbit.UnicodeMapping{{From: 0x0, To: 0xfc, Range: []uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xa0, 0x0, 0xa2, 0xa3, 0xa4, 0x0, 0xa6, 0xa7, 0x8d, 0xa9, 0x0, 0xab, 0xac, 0xad, 0xae, 0x9d, 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0x8f, 0xb9, 0x0, 0xbb, 0xbc, 0xbd, 0xbe, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc4, 0xc5, 0xaf, 0x0, 0x0, 0xc9, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd3, 0x0, 0xd5, 0xd6, 0xd7, 0xa8, 0x0, 0x0, 0x0, 0xdc, 0x0, 0x0, 0xdf, 0x0, 0x0, 0x0, 0x0, 0xe4, 0xe5, 0xbf, 0x0, 0x0, 0xe9, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf3, 0x0, 0xf5, 0xf6, 0xf7, 0xb8, 0x0, 0x0, 0x0, 0xfc}}, {From: 0x100, To: 0x17e, Range: []uint8{0xc2, 0xe2, 0x0, 0x0, 0xc0, 0xe0, 0xc3, 0xe3, 0x0, 0x0, 0x0, 0x0, 0xc8, 0xe8, 0x0, 0x0, 0x0, 0x0, 0xc7, 0xe7, 0x0, 0x0, 0xcb, 0xeb, 0xc6, 0xe6, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xcc, 0xec, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xce, 0xee, 0x0, 0x0, 0xc1, 0xe1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xcd, 0xed, 0x0, 0x0, 0x0, 0xcf, 0xef, 0x0, 0x0, 0x0, 0x0, 0xd9, 0xf9, 0xd1, 0xf1, 0xd2, 0xf2, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd4, 0xf4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xaa, 0xba, 0x0, 0x0, 0xda, 0xfa, 0x0, 0x0, 0x0, 0x0, 0xd0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xdb, 0xfb, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd8, 0xf8, 0x0, 0x0, 0x0, 0x0, 0x0, 0xca, 0xea, 0xdd, 0xfd, 0xde, 0xfe}}, {From: 0x2013, To: 0x20ac, Range: []uint8{0x96, 0x97, 0x0, 0x0, 0x0, 0x91, 0x92, 0x82, 0x0, 0x93, 0x94, 0x84, 0x0, 0x86, 0x87, 0x95, 0x0, 0x0, 0x0, 0x85, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x89, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x8b, 0x9b, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80}}, {From: 0x2c7, To: 0x2db, Range: []uint8{0x8e, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0x0, 0x9e}}, {From: 0x2122, To: 0x2122, Range: []uint8{0x99}}}
+var tolower_latin5_turkish_ci = [...]uint8{
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0xfd, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
+ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f,
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f,
+ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf,
+ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf,
+ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef,
+ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xd7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0x69, 0xfe, 0xdf,
+ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef,
+ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff,
+}
+
+var toupper_latin5_turkish_ci = [...]uint8{
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
+ 0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0xdd, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f,
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f,
+ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf,
+ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf,
+ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf,
+ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf,
+ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf,
+ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xf7, 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0x49, 0xde, 0xff,
+}
+
var sortorder_latin5_turkish_ci = [...]uint8{
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
@@ -962,6 +1741,63 @@ var tounicode_latin5_turkish_ci = [...]uint16{
var fromunicode_latin5_turkish_ci = []eightbit.UnicodeMapping{{From: 0x0, To: 0xff, Range: []uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, 0x0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0x0, 0x0, 0xdf, 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, 0x0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0x0, 0x0, 0xff}}, {From: 0x11e, To: 0x15f, Range: []uint8{0xd0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xdd, 0xfd, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xde, 0xfe}}}
+var ctype_armscii8_general_ci = [...]uint8{
+ 0x00, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x28, 0x28, 0x28, 0x28, 0x28, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x48, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x10, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x10, 0x81, 0x81, 0x81, 0x81, 0x81, 0x81, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x10, 0x82, 0x82, 0x82, 0x82, 0x82, 0x82, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x10, 0x10, 0x10, 0x10,
+ 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x48, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x10, 0x10, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
+ 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
+ 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
+ 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
+ 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x10,
+}
+
+var tolower_armscii8_general_ci = [...]uint8{
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
+ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f,
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f,
+ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xb8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf,
+ 0xb0, 0xb1, 0xb3, 0xb3, 0xb5, 0xb5, 0xb7, 0xb7, 0xb9, 0xb9, 0xbb, 0xbb, 0xbd, 0xbd, 0xbf, 0xbf,
+ 0xc1, 0xc1, 0xc3, 0xc3, 0xc5, 0xc5, 0xc7, 0xc7, 0xc9, 0xc9, 0xcb, 0xcb, 0xcd, 0xcd, 0xcf, 0xcf,
+ 0xd1, 0xd1, 0xd3, 0xd3, 0xd5, 0xd5, 0xd7, 0xd7, 0xd9, 0xd9, 0xdb, 0xdb, 0xdd, 0xdd, 0xdf, 0xdf,
+ 0xe1, 0xe1, 0xe3, 0xe3, 0xe5, 0xe5, 0xe7, 0xe7, 0xe9, 0xe9, 0xeb, 0xeb, 0xed, 0xed, 0xef, 0xef,
+ 0xf1, 0xf1, 0xf3, 0xf3, 0xf5, 0xf5, 0xf7, 0xf7, 0xf9, 0xf9, 0xfb, 0xfb, 0xfd, 0xfd, 0xfe, 0xff,
+}
+
+var toupper_armscii8_general_ci = [...]uint8{
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
+ 0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f,
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f,
+ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf,
+ 0xb0, 0xb1, 0xb2, 0xb2, 0xb4, 0xb4, 0xb6, 0xb6, 0xb8, 0xb8, 0xba, 0xba, 0xbc, 0xbc, 0xbe, 0xbe,
+ 0xc0, 0xc0, 0xc2, 0xc2, 0xc4, 0xc4, 0xc6, 0xc6, 0xc8, 0xc8, 0xca, 0xca, 0xcc, 0xcc, 0xce, 0xce,
+ 0xd0, 0xd0, 0xd2, 0xd2, 0xd4, 0xd4, 0xd6, 0xd6, 0xd8, 0xd8, 0xda, 0xda, 0xdc, 0xdc, 0xde, 0xde,
+ 0xe0, 0xe0, 0xe2, 0xe2, 0xe4, 0xe4, 0xe6, 0xe6, 0xe8, 0xe8, 0xea, 0xea, 0xec, 0xec, 0xee, 0xee,
+ 0xf0, 0xf0, 0xf2, 0xf2, 0xf4, 0xf4, 0xf6, 0xf6, 0xf8, 0xf8, 0xfa, 0xfa, 0xfc, 0xfc, 0xfe, 0xff,
+}
+
var tounicode_armscii8_general_ci = [...]uint16{
0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007,
0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f,
@@ -999,6 +1835,63 @@ var tounicode_armscii8_general_ci = [...]uint16{
var fromunicode_armscii8_general_ci = []eightbit.UnicodeMapping{{From: 0x0, To: 0xbb, Range: []uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, 0xa0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xa2, 0x0, 0x0, 0x0, 0xa7, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xa6}}, {From: 0x531, To: 0x589, Range: []uint8{0xb2, 0xb4, 0xb6, 0xb8, 0xba, 0xbc, 0xbe, 0xc0, 0xc2, 0xc4, 0xc6, 0xc8, 0xca, 0xcc, 0xce, 0xd0, 0xd2, 0xd4, 0xd6, 0xd8, 0xda, 0xdc, 0xde, 0xe0, 0xe2, 0xe4, 0xe6, 0xe8, 0xea, 0xec, 0xee, 0xf0, 0xf2, 0xf4, 0xf6, 0xf8, 0xfa, 0xfc, 0x0, 0x0, 0x0, 0x0, 0xb0, 0xaf, 0xaa, 0xb1, 0xad, 0x0, 0xb3, 0xb5, 0xb7, 0xb9, 0xbb, 0xbd, 0xbf, 0xc1, 0xc3, 0xc5, 0xc7, 0xc9, 0xcb, 0xcd, 0xcf, 0xd1, 0xd3, 0xd5, 0xd7, 0xd9, 0xdb, 0xdd, 0xdf, 0xe1, 0xe3, 0xe5, 0xe7, 0xe9, 0xeb, 0xed, 0xef, 0xf1, 0xf3, 0xf5, 0xf7, 0xf9, 0xfb, 0xfd, 0x0, 0x0, 0xa3}}, {From: 0x2014, To: 0x2026, Range: []uint8{0xa8, 0x0, 0x0, 0x0, 0x0, 0xfe, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xae}}, {From: 0x2741, To: 0x2741, Range: []uint8{0xa1}}}
+var ctype_cp866_general_ci = [...]uint8{
+ 0x00, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x28, 0x28, 0x28, 0x28, 0x28, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x48, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x10, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x10, 0x81, 0x81, 0x81, 0x81, 0x81, 0x81, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x10, 0x82, 0x82, 0x82, 0x82, 0x82, 0x82, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x10, 0x10, 0x10, 0x10,
+ 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+}
+
+var tolower_cp866_general_ci = [...]uint8{
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
+ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f,
+ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0x86, 0x87, 0x88, 0x89, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf,
+ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef,
+ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0x86, 0x87, 0x88, 0x89, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf,
+ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf,
+ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf,
+ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf,
+ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef,
+ 0xf1, 0xf1, 0xf3, 0xf3, 0xf5, 0xf5, 0xf7, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff,
+}
+
+var toupper_cp866_general_ci = [...]uint8{
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
+ 0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f,
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f,
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf,
+ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf,
+ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f,
+ 0xf0, 0xf0, 0xf2, 0xf2, 0xf4, 0xf4, 0xf6, 0xf6, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff,
+}
+
var sortorder_cp866_general_ci = [...]uint8{
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
@@ -1055,6 +1948,63 @@ var tounicode_cp866_general_ci = [...]uint16{
var fromunicode_cp866_general_ci = []eightbit.UnicodeMapping{{From: 0x0, To: 0xb7, Range: []uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf8, 0x0, 0xfd, 0x0, 0x0, 0x0, 0x0, 0xfa}}, {From: 0x401, To: 0x45e, Range: []uint8{0xf0, 0x0, 0x0, 0xf2, 0x0, 0x0, 0xf4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf6, 0x0, 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, 0x0, 0xf1, 0x0, 0x0, 0xf3, 0x0, 0x0, 0xf5, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf7}}, {From: 0x2500, To: 0x25a0, Range: []uint8{0xc4, 0x0, 0xb3, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xda, 0x0, 0x0, 0x0, 0xbf, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0xd9, 0x0, 0x0, 0x0, 0xc3, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc2, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc5, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xcd, 0xba, 0xd5, 0xd6, 0xc9, 0xb8, 0xb7, 0xbb, 0xd4, 0xd3, 0xc8, 0xbe, 0xbd, 0xbc, 0xc6, 0xc7, 0xcc, 0xb5, 0xb6, 0xb9, 0xd1, 0xd2, 0xcb, 0xcf, 0xd0, 0xca, 0xd8, 0xd7, 0xce, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xdf, 0x0, 0x0, 0x0, 0xdc, 0x0, 0x0, 0x0, 0xdb, 0x0, 0x0, 0x0, 0xdd, 0x0, 0x0, 0x0, 0xde, 0xb0, 0xb1, 0xb2, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xfe}}, {From: 0x2219, To: 0x221a, Range: []uint8{0xf9, 0xfb}}, {From: 0x207f, To: 0x207f, Range: []uint8{0xfc}}}
+var ctype_keybcs2_general_ci = [...]uint8{
+ 0x00, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x28, 0x28, 0x28, 0x28, 0x28, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x48, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x10, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x10, 0x81, 0x81, 0x81, 0x81, 0x81, 0x81, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x10, 0x82, 0x82, 0x82, 0x82, 0x82, 0x82, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x10, 0x10, 0x10, 0x10,
+ 0x00, 0x01, 0x02, 0x82, 0x02, 0x02, 0x01, 0x01, 0x02, 0x82, 0x81, 0x01, 0x01, 0x02, 0x02, 0x01,
+ 0x01, 0x81, 0x02, 0x01, 0x02, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x01, 0x01, 0x01, 0x02, 0x02, 0x02, 0x01, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x02, 0x02, 0x01, 0x02, 0x01, 0x02, 0x00, 0x02, 0x01, 0x01, 0x01, 0x02, 0x00, 0x02, 0x02,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+}
+
+var tolower_keybcs2_general_ci = [...]uint8{
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
+ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f,
+ 0x87, 0x81, 0x82, 0x83, 0x84, 0x83, 0x86, 0x87, 0x88, 0x88, 0x8d, 0xa1, 0x8c, 0x8d, 0x84, 0xa0,
+ 0x82, 0x91, 0x91, 0x93, 0x94, 0xa2, 0x96, 0xa3, 0x98, 0x94, 0x81, 0x9b, 0x8c, 0x98, 0xa9, 0x9f,
+ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa4, 0x96, 0x93, 0x9b, 0xa9, 0xaa, 0xaa, 0xac, 0xad, 0xae, 0xaf,
+ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf,
+ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf,
+ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf,
+ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xed, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef,
+ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff,
+}
+
+var toupper_keybcs2_general_ci = [...]uint8{
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
+ 0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x68, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f,
+ 0x87, 0x9a, 0x90, 0x85, 0x8e, 0x85, 0x86, 0x80, 0x89, 0x89, 0x8a, 0x8b, 0x9c, 0x8a, 0x8e, 0x8f,
+ 0x90, 0x92, 0x92, 0xa7, 0x99, 0x95, 0xa6, 0x97, 0x9d, 0x99, 0x9a, 0xa8, 0x9c, 0x9d, 0x9e, 0x9f,
+ 0x8f, 0x8b, 0x95, 0x97, 0xa5, 0xa5, 0xa6, 0xa7, 0xa8, 0x9e, 0xab, 0xab, 0xac, 0xad, 0xae, 0xaf,
+ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf,
+ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf,
+ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf,
+ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xe8, 0xee, 0xef,
+ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff,
+}
+
var sortorder_keybcs2_general_ci = [...]uint8{
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
@@ -1111,6 +2061,63 @@ var tounicode_keybcs2_general_ci = [...]uint16{
var fromunicode_keybcs2_general_ci = []eightbit.UnicodeMapping{{From: 0x0, To: 0xfd, Range: []uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xad, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xae, 0x0, 0x0, 0x0, 0x0, 0xf8, 0xf1, 0xfd, 0x0, 0x0, 0xe6, 0x0, 0xfa, 0x0, 0x0, 0x0, 0xaf, 0xac, 0x0, 0x0, 0x0, 0x0, 0x8f, 0x0, 0x0, 0x8e, 0x0, 0x0, 0x0, 0x0, 0x90, 0x0, 0x0, 0x0, 0x8b, 0x0, 0x0, 0x0, 0x0, 0x0, 0x95, 0xa7, 0x0, 0x99, 0x0, 0x0, 0x0, 0x97, 0x0, 0x9a, 0x9d, 0x0, 0xe1, 0x0, 0xa0, 0x0, 0x0, 0x84, 0x0, 0x0, 0x0, 0x0, 0x82, 0x0, 0x0, 0x0, 0xa1, 0x0, 0x0, 0x0, 0x0, 0x0, 0xa2, 0x93, 0x0, 0x94, 0xf6, 0x0, 0x0, 0xa3, 0x0, 0x81, 0x98}}, {From: 0x2500, To: 0x25a0, Range: []uint8{0xc4, 0x0, 0xb3, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xda, 0x0, 0x0, 0x0, 0xbf, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0xd9, 0x0, 0x0, 0x0, 0xc3, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc2, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc5, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xcd, 0xba, 0xd5, 0xd6, 0xc9, 0xb8, 0xb7, 0xbb, 0xd4, 0xd3, 0xc8, 0xbe, 0xbd, 0xbc, 0xc6, 0xc7, 0xcc, 0xb5, 0xb6, 0xb9, 0xd1, 0xd2, 0xcb, 0xcf, 0xd0, 0xca, 0xd8, 0xd7, 0xce, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xdf, 0x0, 0x0, 0x0, 0xdc, 0x0, 0x0, 0x0, 0xdb, 0x0, 0x0, 0x0, 0xdd, 0x0, 0x0, 0x0, 0xde, 0xb0, 0xb1, 0xb2, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xfe}}, {From: 0x10c, To: 0x17e, Range: []uint8{0x80, 0x87, 0x85, 0x83, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x89, 0x88, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x8a, 0x8d, 0x0, 0x0, 0x9c, 0x8c, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xa5, 0xa4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xab, 0xaa, 0x0, 0x0, 0x9e, 0xa9, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x9b, 0xa8, 0x0, 0x0, 0x86, 0x9f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xa6, 0x96, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x92, 0x91}}, {From: 0x393, To: 0x3c6, Range: []uint8{0xe2, 0x0, 0x0, 0x0, 0x0, 0xe9, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe4, 0x0, 0x0, 0xe8, 0x0, 0x0, 0xea, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe0, 0x0, 0x0, 0xeb, 0xee, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe3, 0x0, 0x0, 0xe5, 0xe7, 0x0, 0xed}}, {From: 0x2219, To: 0x2265, Range: []uint8{0xf9, 0xfb, 0x0, 0x0, 0x0, 0xec, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xef, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf7, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0xf3, 0xf2}}, {From: 0x2320, To: 0x2321, Range: []uint8{0xf4, 0xf5}}, {From: 0x207f, To: 0x207f, Range: []uint8{0xfc}}}
+var ctype_macce_general_ci = [...]uint8{
+ 0x00, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x28, 0x28, 0x28, 0x28, 0x28, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x48, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x10, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x10, 0x81, 0x81, 0x81, 0x81, 0x81, 0x81, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x10, 0x82, 0x82, 0x82, 0x82, 0x82, 0x82, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x10, 0x10, 0x10, 0x10,
+ 0x00, 0x01, 0x01, 0x02, 0x01, 0x01, 0x01, 0x01, 0x02, 0x02, 0x01, 0x02, 0x02, 0x01, 0x02, 0x02,
+ 0x01, 0x02, 0x01, 0x02, 0x02, 0x01, 0x02, 0x01, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x01, 0x02,
+ 0x02, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x02,
+ 0x01, 0x02, 0x01, 0x00, 0x00, 0x02, 0x01, 0x00, 0x00, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
+ 0x01, 0x02, 0x01, 0x00, 0x00, 0x02, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x01, 0x01, 0x02,
+ 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x01, 0x02, 0x01, 0x00, 0x00, 0x02,
+ 0x01, 0x02, 0x01, 0x00, 0x00, 0x02, 0x01, 0x02, 0x01, 0x01, 0x02, 0x01, 0x01, 0x02, 0x01, 0x01,
+ 0x01, 0x02, 0x01, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x02, 0x01, 0x01, 0x02, 0x01,
+}
+
+var tolower_macce_general_ci = [...]uint8{
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
+ 0x70, 0x71, 0x72, 0x73, 0x54, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
+ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
+ 0x70, 0x71, 0x72, 0x73, 0x54, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f,
+ 0x8a, 0x82, 0x82, 0x8e, 0x88, 0x9a, 0x9f, 0x87, 0x88, 0x8b, 0x8a, 0x8b, 0x8d, 0x8d, 0x8e, 0x90,
+ 0x90, 0x93, 0x92, 0x93, 0x95, 0x95, 0x98, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9e, 0x9e, 0x9f,
+ 0xa0, 0xa1, 0xab, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xb0,
+ 0xb0, 0xb4, 0xb2, 0xb3, 0xb4, 0xfa, 0xb6, 0xb7, 0xb8, 0xba, 0xba, 0xbc, 0xbc, 0xbe, 0xbe, 0xc0,
+ 0xc0, 0xc4, 0xc2, 0xc3, 0xc4, 0xcb, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xcb, 0xce, 0x9b, 0xce, 0xd8,
+ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xda, 0xda, 0xde, 0xdc, 0xdd, 0xde, 0xe0,
+ 0xe0, 0xe4, 0xe2, 0xe3, 0xe4, 0xe6, 0xe6, 0x87, 0xe9, 0xe9, 0x92, 0xec, 0xec, 0xf0, 0x97, 0x99,
+ 0xf0, 0xf3, 0x9c, 0xf3, 0xf5, 0xf5, 0xf7, 0xf7, 0xf9, 0xf9, 0xfa, 0xfd, 0xb8, 0xfd, 0xae, 0xff,
+}
+
+var toupper_macce_general_ci = [...]uint8{
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ 0x50, 0x51, 0x52, 0x53, 0x74, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
+ 0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ 0x50, 0x51, 0x52, 0x53, 0x74, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f,
+ 0x80, 0x81, 0x81, 0x83, 0x84, 0x85, 0x86, 0xe7, 0x84, 0x89, 0x80, 0x89, 0x8c, 0x8c, 0x83, 0x8f,
+ 0x8f, 0x91, 0xea, 0x91, 0x94, 0x94, 0x96, 0xee, 0x96, 0xef, 0x85, 0xcd, 0xf2, 0x9d, 0x9d, 0x86,
+ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xa2, 0xac, 0xad, 0xfe, 0xaf,
+ 0xaf, 0xb1, 0xb2, 0xb3, 0xb1, 0xb5, 0xb6, 0xb7, 0xfc, 0xb9, 0xb9, 0xbb, 0xbb, 0xbd, 0xbd, 0xbf,
+ 0xbf, 0xc1, 0xc2, 0xc3, 0xc1, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xc5, 0xcc, 0xcd, 0xcc, 0xcf,
+ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xcf, 0xd9, 0xd9, 0xdb, 0xdc, 0xdd, 0xdb, 0xdf,
+ 0xdf, 0xe1, 0xe2, 0xe3, 0xe1, 0xe5, 0xe5, 0xe7, 0xe8, 0xe8, 0xea, 0xeb, 0xeb, 0xed, 0xee, 0xef,
+ 0xed, 0xf1, 0xf2, 0xf1, 0xf4, 0xf4, 0xf6, 0xf6, 0xf8, 0xf8, 0xb5, 0xfb, 0xfc, 0xfb, 0xfe, 0xff,
+}
+
var sortorder_macce_general_ci = [...]uint8{
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
@@ -1167,6 +2174,63 @@ var tounicode_macce_general_ci = [...]uint16{
var fromunicode_macce_general_ci = []eightbit.UnicodeMapping{{From: 0x0, To: 0xfd, Range: []uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xca, 0x0, 0x0, 0xa3, 0x0, 0x0, 0x0, 0xa4, 0xac, 0xa9, 0x0, 0xc7, 0xc2, 0x0, 0xa8, 0x0, 0xa1, 0x0, 0x0, 0x0, 0x0, 0x0, 0xa6, 0x0, 0x0, 0x0, 0x0, 0xc8, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe7, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x83, 0x0, 0x0, 0x0, 0xea, 0x0, 0x0, 0x0, 0x0, 0x0, 0xee, 0xef, 0xcd, 0x85, 0x0, 0x0, 0x0, 0xf2, 0x0, 0x86, 0xf8, 0x0, 0xa7, 0x0, 0x87, 0x0, 0x0, 0x8a, 0x0, 0x0, 0x0, 0x0, 0x8e, 0x0, 0x0, 0x0, 0x92, 0x0, 0x0, 0x0, 0x0, 0x0, 0x97, 0x99, 0x9b, 0x9a, 0xd6, 0x0, 0x0, 0x9c, 0x0, 0x9f, 0xf9}}, {From: 0x100, To: 0x17e, Range: []uint8{0x81, 0x82, 0x0, 0x0, 0x84, 0x88, 0x8c, 0x8d, 0x0, 0x0, 0x0, 0x0, 0x89, 0x8b, 0x91, 0x93, 0x0, 0x0, 0x94, 0x95, 0x0, 0x0, 0x96, 0x98, 0xa2, 0xab, 0x9d, 0x9e, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xfe, 0xae, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb1, 0xb4, 0x0, 0x0, 0xaf, 0xb0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb5, 0xfa, 0x0, 0xbd, 0xbe, 0xb9, 0xba, 0xbb, 0xbc, 0x0, 0x0, 0xfc, 0xb8, 0xc1, 0xc4, 0xbf, 0xc0, 0xc5, 0xcb, 0x0, 0x0, 0x0, 0xcf, 0xd8, 0x0, 0x0, 0xcc, 0xce, 0x0, 0x0, 0xd9, 0xda, 0xdf, 0xe0, 0xdb, 0xde, 0xe5, 0xe6, 0x0, 0x0, 0x0, 0x0, 0xe1, 0xe4, 0x0, 0x0, 0xe8, 0xe9, 0x0, 0x0, 0x0, 0x0, 0xed, 0xf0, 0x0, 0x0, 0xf1, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0x0, 0x0, 0x0, 0x0, 0x0, 0x8f, 0x90, 0xfb, 0xfd, 0xeb, 0xec}}, {From: 0x2013, To: 0x203a, Range: []uint8{0xd0, 0xd1, 0x0, 0x0, 0x0, 0xd4, 0xd5, 0xe2, 0x0, 0xd2, 0xd3, 0xe3, 0x0, 0xa0, 0x0, 0xa5, 0x0, 0x0, 0x0, 0xc9, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xdc, 0xdd}}, {From: 0x2202, To: 0x2265, Range: []uint8{0xb6, 0x0, 0x0, 0x0, 0xc6, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb7, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc3, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xad, 0x0, 0x0, 0x0, 0xb2, 0xb3}}, {From: 0x2c7, To: 0x2c7, Range: []uint8{0xff}}, {From: 0x2122, To: 0x2122, Range: []uint8{0xaa}}, {From: 0x25ca, To: 0x25ca, Range: []uint8{0xd7}}}
+var ctype_macroman_general_ci = [...]uint8{
+ 0x00, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x28, 0x28, 0x28, 0x28, 0x28, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x48, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x10, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x10, 0x81, 0x81, 0x81, 0x81, 0x81, 0x81, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x10, 0x82, 0x82, 0x82, 0x82, 0x82, 0x82, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x20, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02,
+ 0x02, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x01, 0x01, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x00, 0x01, 0x01, 0x01, 0x01, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+}
+
+var tolower_macroman_general_ci = [...]uint8{
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
+ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f,
+ 0x8a, 0x8c, 0x8d, 0x8e, 0x96, 0x9a, 0x9f, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f,
+ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xbe, 0xbf,
+ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf,
+ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0x88, 0x8b, 0x9b, 0xce, 0xcf,
+ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd8, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf,
+ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0x89, 0x90, 0x87, 0x91, 0x8f, 0x92, 0x94, 0x95, 0x93, 0x97, 0x99,
+ 0xf0, 0x98, 0x9c, 0x9e, 0x9d, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff,
+}
+
+var toupper_macroman_general_ci = [...]uint8{
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
+ 0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f,
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0xe7, 0xcb, 0xe5, 0x80, 0xcc, 0x81, 0x82, 0x83, 0xe9,
+ 0xe6, 0xe8, 0xea, 0xed, 0xeb, 0xec, 0x84, 0xee, 0xf1, 0xef, 0x85, 0xcd, 0xf2, 0xf4, 0xf3, 0x86,
+ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf,
+ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xae, 0xaf,
+ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf,
+ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd9, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf,
+ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef,
+ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff,
+}
+
var sortorder_macroman_general_ci = [...]uint8{
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
@@ -1223,6 +2287,63 @@ var tounicode_macroman_general_ci = [...]uint16{
var fromunicode_macroman_general_ci = []eightbit.UnicodeMapping{{From: 0x0, To: 0xff, Range: []uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xca, 0xc1, 0xa2, 0xa3, 0x0, 0xb4, 0x0, 0xa4, 0xac, 0xa9, 0xbb, 0xc7, 0xc2, 0x0, 0xa8, 0xf8, 0xa1, 0xb1, 0x0, 0x0, 0xab, 0xb5, 0xa6, 0xe1, 0xfc, 0x0, 0xbc, 0xc8, 0x0, 0x0, 0x0, 0xc0, 0xcb, 0xe7, 0xe5, 0xcc, 0x80, 0x81, 0xae, 0x82, 0xe9, 0x83, 0xe6, 0xe8, 0xed, 0xea, 0xeb, 0xec, 0x0, 0x84, 0xf1, 0xee, 0xef, 0xcd, 0x85, 0x0, 0xaf, 0xf4, 0xf2, 0xf3, 0x86, 0x0, 0x0, 0xa7, 0x88, 0x87, 0x89, 0x8b, 0x8a, 0x8c, 0xbe, 0x8d, 0x8f, 0x8e, 0x90, 0x91, 0x93, 0x92, 0x94, 0x95, 0x0, 0x96, 0x98, 0x97, 0x99, 0x9b, 0x9a, 0xd6, 0xbf, 0x9d, 0x9c, 0x9e, 0x9f, 0x0, 0x0, 0xd8}}, {From: 0x2013, To: 0x20ac, Range: []uint8{0xd0, 0xd1, 0x0, 0x0, 0x0, 0xd4, 0xd5, 0xe2, 0x0, 0xd2, 0xd3, 0xe3, 0x0, 0xa0, 0xe0, 0xa5, 0x0, 0x0, 0x0, 0xc9, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xdc, 0xdd, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xda, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xdb}}, {From: 0x2202, To: 0x2265, Range: []uint8{0xb6, 0x0, 0x0, 0x0, 0xc6, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb8, 0x0, 0xb7, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc3, 0x0, 0x0, 0x0, 0xb0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xba, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc5, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xad, 0x0, 0x0, 0x0, 0xb2, 0xb3}}, {From: 0x2c6, To: 0x2dd, Range: []uint8{0xf6, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf9, 0xfa, 0xfb, 0xfe, 0xf7, 0xfd}}, {From: 0x131, To: 0x192, Range: []uint8{0xf5, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xce, 0xcf, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd9, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc4}}, {From: 0x3a9, To: 0x3c0, Range: []uint8{0xbd, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb9}}, {From: 0xfb01, To: 0xfb02, Range: []uint8{0xde, 0xdf}}, {From: 0x2122, To: 0x2122, Range: []uint8{0xaa}}, {From: 0x25ca, To: 0x25ca, Range: []uint8{0xd7}}, {From: 0xf8ff, To: 0xf8ff, Range: []uint8{0xf0}}}
+var ctype_cp852_general_ci = [...]uint8{
+ 0x00, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x28, 0x28, 0x28, 0x28, 0x28, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x48, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x10, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x10, 0x81, 0x81, 0x81, 0x81, 0x81, 0x81, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x10, 0x82, 0x82, 0x82, 0x82, 0x82, 0x82, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x10, 0x10, 0x10, 0x10,
+ 0x00, 0x01, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x01, 0x02, 0x02, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x02, 0x02, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x01, 0x01, 0x02, 0x01, 0x00,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x00, 0x02, 0x01, 0x01, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x02, 0x00, 0x00, 0x00, 0x00, 0x01, 0x02,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x02, 0x01, 0x01, 0x01, 0x02, 0x01, 0x01, 0x01, 0x02, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01,
+ 0x00, 0x01, 0x02, 0x01, 0x01, 0x02, 0x02, 0x01, 0x02, 0x01, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x01, 0x02, 0x00,
+}
+
+var tolower_cp852_general_ci = [...]uint8{
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
+ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f,
+ 0x87, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8b, 0x8b, 0x8c, 0xab, 0x84, 0x86,
+ 0x82, 0x92, 0x92, 0x93, 0x94, 0x96, 0x96, 0x98, 0x98, 0x94, 0x81, 0x9c, 0x9c, 0x88, 0x9e, 0x9f,
+ 0xa0, 0xa1, 0xa2, 0xa3, 0xa5, 0xa5, 0xa7, 0xa7, 0xa9, 0xa9, 0xaa, 0xab, 0x9f, 0xb8, 0xae, 0xaf,
+ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xa0, 0x83, 0xd8, 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbe, 0xbe, 0xbf,
+ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc7, 0xc7, 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf,
+ 0xd0, 0xd0, 0xd4, 0x89, 0xd4, 0xe5, 0xa1, 0x8c, 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xee, 0x85, 0xdf,
+ 0xa2, 0xe1, 0x93, 0xe4, 0xe4, 0xe5, 0xe7, 0xe7, 0xea, 0xa3, 0xe8, 0xfb, 0xec, 0xec, 0xee, 0xef,
+ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff,
+}
+
+var toupper_cp852_general_ci = [...]uint8{
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
+ 0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f,
+ 0x80, 0x9a, 0x90, 0xb6, 0x8e, 0xde, 0x8f, 0x80, 0x9d, 0xd3, 0x8a, 0x8a, 0xd7, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x91, 0xe2, 0x99, 0x95, 0x95, 0x97, 0x97, 0x99, 0x9a, 0x9b, 0x9b, 0x9d, 0x9e, 0xac,
+ 0xb5, 0xd6, 0xe0, 0xe9, 0xa4, 0xa4, 0xa6, 0xa6, 0xa8, 0xa8, 0xaa, 0x8d, 0xac, 0xad, 0xae, 0xaf,
+ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xad, 0xb9, 0xba, 0xbb, 0xbc, 0xbe, 0xbd, 0xbf,
+ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc6, 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf,
+ 0xd1, 0xd1, 0xd2, 0xd3, 0xd2, 0xd5, 0xd6, 0xd7, 0xb7, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf,
+ 0xe0, 0xe1, 0xe2, 0xe3, 0xe3, 0xd5, 0xe6, 0xe6, 0xe8, 0xe9, 0xe8, 0xeb, 0xed, 0xed, 0xdd, 0xef,
+ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xeb, 0xfc, 0xfc, 0xfe, 0xff,
+}
+
var sortorder_cp852_general_ci = [...]uint8{
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
@@ -1412,6 +2533,63 @@ var sortorder_cp1251_general_cs = [...]uint8{
0xae, 0xb0, 0xb2, 0xb6, 0xba, 0xbc, 0xbe, 0xc0, 0xc4, 0xc6, 0xc8, 0xca, 0xcc, 0xce, 0xd0, 0xd2,
}
+var ctype_cp1256_general_ci = [...]uint8{
+ 0x00, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x28, 0x28, 0x28, 0x28, 0x28, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x48, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x10, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x10, 0x81, 0x81, 0x81, 0x81, 0x81, 0x81, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x10, 0x10, 0x10, 0x00,
+ 0x00, 0x00, 0x82, 0x82, 0x82, 0x82, 0x82, 0x82, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x10, 0x10, 0x10, 0x10,
+ 0x20, 0x00, 0x03, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x03, 0x03,
+ 0x00, 0x03, 0x10, 0x10, 0x10, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00,
+ 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x10, 0x00,
+ 0x00, 0x10, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x00, 0x00, 0x00,
+ 0x10, 0x00, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
+ 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x00, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
+ 0x03, 0x02, 0x03, 0x02, 0x03, 0x03, 0x03, 0x03, 0x02, 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x02,
+ 0x02, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x03, 0x00, 0x03, 0x02, 0x03, 0x02, 0x02, 0x00, 0x00,
+}
+
+var tolower_cp1256_general_ci = [...]uint8{
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
+ 0x70, 0x71, 0x72, 0x73, 0x54, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
+ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
+ 0x70, 0x71, 0x72, 0x73, 0x54, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f,
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x9c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f,
+ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf,
+ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf,
+ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf,
+ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf,
+ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef,
+ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff,
+}
+
+var toupper_cp1256_general_ci = [...]uint8{
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ 0x50, 0x51, 0x52, 0x53, 0x74, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5f, 0x5e, 0x5f,
+ 0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ 0x50, 0x51, 0x52, 0x53, 0x74, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7f, 0x7e, 0x7f,
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x8c, 0x9d, 0x9e, 0x9f,
+ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf,
+ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf,
+ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf,
+ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf,
+ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef,
+ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff,
+}
+
var sortorder_cp1256_general_ci = [...]uint8{
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
@@ -1487,6 +2665,25 @@ var sortorder_cp1257_general_ci = [...]uint8{
0x97, 0x7d, 0x7d, 0x83, 0x83, 0x83, 0x83, 0xc3, 0xa0, 0x75, 0x97, 0xa0, 0xa0, 0xb0, 0xb0, 0xff,
}
+var ctype_geostd8_general_ci = [...]uint8{
+ 0x00, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x28, 0x28, 0x28, 0x28, 0x28, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x48, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x10, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x10, 0x81, 0x81, 0x81, 0x81, 0x81, 0x81, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x10, 0x10, 0x10, 0x10,
+ 0x10, 0x10, 0x82, 0x82, 0x82, 0x82, 0x82, 0x82, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x10, 0x10, 0x10, 0x10,
+ 0x20, 0x00, 0x00, 0x10, 0x00, 0x10, 0x10, 0x10, 0x10, 0x00, 0x10, 0x00, 0x10, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00,
+ 0x00, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
+ 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
+ 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+}
+
var sortorder_geostd8_general_ci = [...]uint8{
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
@@ -2506,7 +3703,10 @@ func init() {
id: 0x3,
name: "dec8_swedish_ci",
simpletables: simpletables{
- sort: &sortorder_dec8_swedish_ci,
+ ctype: &ctype_dec8_swedish_ci,
+ tolower: &tolower_dec8_swedish_ci,
+ toupper: &toupper_dec8_swedish_ci,
+ sort: &sortorder_dec8_swedish_ci,
},
charset: &charset.Charset_8bit{
Name_: "dec8",
@@ -2518,7 +3718,10 @@ func init() {
id: 0x4,
name: "cp850_general_ci",
simpletables: simpletables{
- sort: &sortorder_cp850_general_ci,
+ ctype: &ctype_cp850_general_ci,
+ tolower: &tolower_cp850_general_ci,
+ toupper: &toupper_cp850_general_ci,
+ sort: &sortorder_cp850_general_ci,
},
charset: &charset.Charset_8bit{
Name_: "cp850",
@@ -2530,7 +3733,10 @@ func init() {
id: 0x5,
name: "latin1_german1_ci",
simpletables: simpletables{
- sort: &sortorder_latin1_german1_ci,
+ ctype: &ctype_latin1_german1_ci,
+ tolower: &tolower_dec8_swedish_ci,
+ toupper: &toupper_dec8_swedish_ci,
+ sort: &sortorder_latin1_german1_ci,
},
charset: charset.Charset_latin1{},
})
@@ -2538,7 +3744,10 @@ func init() {
id: 0x6,
name: "hp8_english_ci",
simpletables: simpletables{
- sort: &sortorder_hp8_english_ci,
+ ctype: &ctype_hp8_english_ci,
+ tolower: &tolower_hp8_english_ci,
+ toupper: &toupper_hp8_english_ci,
+ sort: &sortorder_hp8_english_ci,
},
charset: &charset.Charset_8bit{
Name_: "hp8",
@@ -2550,7 +3759,10 @@ func init() {
id: 0x7,
name: "koi8r_general_ci",
simpletables: simpletables{
- sort: &sortorder_koi8r_general_ci,
+ ctype: &ctype_koi8r_general_ci,
+ tolower: &tolower_koi8r_general_ci,
+ toupper: &toupper_koi8r_general_ci,
+ sort: &sortorder_koi8r_general_ci,
},
charset: &charset.Charset_8bit{
Name_: "koi8r",
@@ -2562,7 +3774,10 @@ func init() {
id: 0x8,
name: "latin1_swedish_ci",
simpletables: simpletables{
- sort: &sortorder_dec8_swedish_ci,
+ ctype: &ctype_latin1_german1_ci,
+ tolower: &tolower_dec8_swedish_ci,
+ toupper: &toupper_dec8_swedish_ci,
+ sort: &sortorder_dec8_swedish_ci,
},
charset: charset.Charset_latin1{},
})
@@ -2570,7 +3785,10 @@ func init() {
id: 0x9,
name: "latin2_general_ci",
simpletables: simpletables{
- sort: &sortorder_latin2_general_ci,
+ ctype: &ctype_latin2_general_ci,
+ tolower: &tolower_latin2_general_ci,
+ toupper: &toupper_latin2_general_ci,
+ sort: &sortorder_latin2_general_ci,
},
charset: &charset.Charset_8bit{
Name_: "latin2",
@@ -2582,7 +3800,10 @@ func init() {
id: 0xa,
name: "swe7_swedish_ci",
simpletables: simpletables{
- sort: &sortorder_swe7_swedish_ci,
+ ctype: &ctype_swe7_swedish_ci,
+ tolower: &tolower_swe7_swedish_ci,
+ toupper: &toupper_swe7_swedish_ci,
+ sort: &sortorder_swe7_swedish_ci,
},
charset: &charset.Charset_8bit{
Name_: "swe7",
@@ -2594,7 +3815,10 @@ func init() {
id: 0xb,
name: "ascii_general_ci",
simpletables: simpletables{
- sort: &sortorder_ascii_general_ci,
+ ctype: &ctype_ascii_general_ci,
+ tolower: &tolower_ascii_general_ci,
+ toupper: &toupper_ascii_general_ci,
+ sort: &toupper_ascii_general_ci,
},
charset: &charset.Charset_8bit{
Name_: "ascii",
@@ -2605,20 +3829,23 @@ func init() {
register(&Collation_multibyte{
id: 0xc,
name: "ujis_japanese_ci",
- sort: &sortorder_ascii_general_ci,
+ sort: &toupper_ascii_general_ci,
charset: charset.Charset_ujis{},
})
register(&Collation_multibyte{
id: 0xd,
name: "sjis_japanese_ci",
- sort: &sortorder_ascii_general_ci,
+ sort: &toupper_ascii_general_ci,
charset: charset.Charset_sjis{},
})
register(&Collation_8bit_simple_ci{
id: 0xe,
name: "cp1251_bulgarian_ci",
simpletables: simpletables{
- sort: &sortorder_cp1251_bulgarian_ci,
+ ctype: &ctype_cp1251_bulgarian_ci,
+ tolower: &tolower_cp1251_bulgarian_ci,
+ toupper: &toupper_cp1251_bulgarian_ci,
+ sort: &sortorder_cp1251_bulgarian_ci,
},
charset: &charset.Charset_8bit{
Name_: "cp1251",
@@ -2630,7 +3857,10 @@ func init() {
id: 0xf,
name: "latin1_danish_ci",
simpletables: simpletables{
- sort: &sortorder_latin1_danish_ci,
+ ctype: &ctype_latin1_german1_ci,
+ tolower: &tolower_dec8_swedish_ci,
+ toupper: &toupper_dec8_swedish_ci,
+ sort: &sortorder_latin1_danish_ci,
},
charset: charset.Charset_latin1{},
})
@@ -2638,7 +3868,10 @@ func init() {
id: 0x10,
name: "hebrew_general_ci",
simpletables: simpletables{
- sort: &sortorder_hebrew_general_ci,
+ ctype: &ctype_hebrew_general_ci,
+ tolower: &tolower_ascii_general_ci,
+ toupper: &toupper_ascii_general_ci,
+ sort: &sortorder_hebrew_general_ci,
},
charset: &charset.Charset_8bit{
Name_: "hebrew",
@@ -2649,14 +3882,17 @@ func init() {
register(&Collation_multibyte{
id: 0x13,
name: "euckr_korean_ci",
- sort: &sortorder_ascii_general_ci,
+ sort: &toupper_ascii_general_ci,
charset: charset.Charset_euckr{},
})
register(&Collation_8bit_simple_ci{
id: 0x14,
name: "latin7_estonian_cs",
simpletables: simpletables{
- sort: &sortorder_latin7_estonian_cs,
+ ctype: &ctype_latin7_estonian_cs,
+ tolower: &tolower_latin7_estonian_cs,
+ toupper: &toupper_latin7_estonian_cs,
+ sort: &sortorder_latin7_estonian_cs,
},
charset: &charset.Charset_8bit{
Name_: "latin7",
@@ -2668,7 +3904,10 @@ func init() {
id: 0x15,
name: "latin2_hungarian_ci",
simpletables: simpletables{
- sort: &sortorder_latin2_hungarian_ci,
+ ctype: &ctype_latin2_general_ci,
+ tolower: &tolower_latin2_general_ci,
+ toupper: &toupper_latin2_general_ci,
+ sort: &sortorder_latin2_hungarian_ci,
},
charset: &charset.Charset_8bit{
Name_: "latin2",
@@ -2680,7 +3919,10 @@ func init() {
id: 0x16,
name: "koi8u_general_ci",
simpletables: simpletables{
- sort: &sortorder_koi8u_general_ci,
+ ctype: &ctype_koi8u_general_ci,
+ tolower: &tolower_koi8u_general_ci,
+ toupper: &toupper_koi8u_general_ci,
+ sort: &sortorder_koi8u_general_ci,
},
charset: &charset.Charset_8bit{
Name_: "koi8u",
@@ -2692,7 +3934,10 @@ func init() {
id: 0x17,
name: "cp1251_ukrainian_ci",
simpletables: simpletables{
- sort: &sortorder_cp1251_ukrainian_ci,
+ ctype: &ctype_cp1251_bulgarian_ci,
+ tolower: &tolower_cp1251_bulgarian_ci,
+ toupper: &toupper_cp1251_bulgarian_ci,
+ sort: &sortorder_cp1251_ukrainian_ci,
},
charset: &charset.Charset_8bit{
Name_: "cp1251",
@@ -2710,7 +3955,10 @@ func init() {
id: 0x19,
name: "greek_general_ci",
simpletables: simpletables{
- sort: &sortorder_greek_general_ci,
+ ctype: &ctype_greek_general_ci,
+ tolower: &tolower_greek_general_ci,
+ toupper: &toupper_greek_general_ci,
+ sort: &sortorder_greek_general_ci,
},
charset: &charset.Charset_8bit{
Name_: "greek",
@@ -2722,7 +3970,10 @@ func init() {
id: 0x1a,
name: "cp1250_general_ci",
simpletables: simpletables{
- sort: &sortorder_cp1250_general_ci,
+ ctype: &ctype_cp1250_general_ci,
+ tolower: &tolower_cp1250_general_ci,
+ toupper: &toupper_cp1250_general_ci,
+ sort: &sortorder_cp1250_general_ci,
},
charset: &charset.Charset_8bit{
Name_: "cp1250",
@@ -2734,7 +3985,10 @@ func init() {
id: 0x1b,
name: "latin2_croatian_ci",
simpletables: simpletables{
- sort: &sortorder_latin2_croatian_ci,
+ ctype: &ctype_latin2_general_ci,
+ tolower: &tolower_latin2_general_ci,
+ toupper: &toupper_latin2_general_ci,
+ sort: &sortorder_latin2_croatian_ci,
},
charset: &charset.Charset_8bit{
Name_: "latin2",
@@ -2746,7 +4000,10 @@ func init() {
id: 0x1d,
name: "cp1257_lithuanian_ci",
simpletables: simpletables{
- sort: &sortorder_cp1257_lithuanian_ci,
+ ctype: &ctype_cp1257_lithuanian_ci,
+ tolower: &tolower_latin7_estonian_cs,
+ toupper: &toupper_cp1257_lithuanian_ci,
+ sort: &sortorder_cp1257_lithuanian_ci,
},
charset: &charset.Charset_8bit{
Name_: "cp1257",
@@ -2758,7 +4015,10 @@ func init() {
id: 0x1e,
name: "latin5_turkish_ci",
simpletables: simpletables{
- sort: &sortorder_latin5_turkish_ci,
+ ctype: &ctype_dec8_swedish_ci,
+ tolower: &tolower_latin5_turkish_ci,
+ toupper: &toupper_latin5_turkish_ci,
+ sort: &sortorder_latin5_turkish_ci,
},
charset: &charset.Charset_8bit{
Name_: "latin5",
@@ -2770,7 +4030,10 @@ func init() {
id: 0x20,
name: "armscii8_general_ci",
simpletables: simpletables{
- sort: &sortorder_ascii_general_ci,
+ ctype: &ctype_armscii8_general_ci,
+ tolower: &tolower_armscii8_general_ci,
+ toupper: &toupper_armscii8_general_ci,
+ sort: &toupper_ascii_general_ci,
},
charset: &charset.Charset_8bit{
Name_: "armscii8",
@@ -2794,7 +4057,10 @@ func init() {
id: 0x24,
name: "cp866_general_ci",
simpletables: simpletables{
- sort: &sortorder_cp866_general_ci,
+ ctype: &ctype_cp866_general_ci,
+ tolower: &tolower_cp866_general_ci,
+ toupper: &toupper_cp866_general_ci,
+ sort: &sortorder_cp866_general_ci,
},
charset: &charset.Charset_8bit{
Name_: "cp866",
@@ -2806,7 +4072,10 @@ func init() {
id: 0x25,
name: "keybcs2_general_ci",
simpletables: simpletables{
- sort: &sortorder_keybcs2_general_ci,
+ ctype: &ctype_keybcs2_general_ci,
+ tolower: &tolower_keybcs2_general_ci,
+ toupper: &toupper_keybcs2_general_ci,
+ sort: &sortorder_keybcs2_general_ci,
},
charset: &charset.Charset_8bit{
Name_: "keybcs2",
@@ -2818,7 +4087,10 @@ func init() {
id: 0x26,
name: "macce_general_ci",
simpletables: simpletables{
- sort: &sortorder_macce_general_ci,
+ ctype: &ctype_macce_general_ci,
+ tolower: &tolower_macce_general_ci,
+ toupper: &toupper_macce_general_ci,
+ sort: &sortorder_macce_general_ci,
},
charset: &charset.Charset_8bit{
Name_: "macce",
@@ -2830,7 +4102,10 @@ func init() {
id: 0x27,
name: "macroman_general_ci",
simpletables: simpletables{
- sort: &sortorder_macroman_general_ci,
+ ctype: &ctype_macroman_general_ci,
+ tolower: &tolower_macroman_general_ci,
+ toupper: &toupper_macroman_general_ci,
+ sort: &sortorder_macroman_general_ci,
},
charset: &charset.Charset_8bit{
Name_: "macroman",
@@ -2842,7 +4117,10 @@ func init() {
id: 0x28,
name: "cp852_general_ci",
simpletables: simpletables{
- sort: &sortorder_cp852_general_ci,
+ ctype: &ctype_cp852_general_ci,
+ tolower: &tolower_cp852_general_ci,
+ toupper: &toupper_cp852_general_ci,
+ sort: &sortorder_cp852_general_ci,
},
charset: &charset.Charset_8bit{
Name_: "cp852",
@@ -2854,7 +4132,10 @@ func init() {
id: 0x29,
name: "latin7_general_ci",
simpletables: simpletables{
- sort: &sortorder_latin7_general_ci,
+ ctype: &ctype_latin7_estonian_cs,
+ tolower: &tolower_latin7_estonian_cs,
+ toupper: &toupper_latin7_estonian_cs,
+ sort: &sortorder_latin7_general_ci,
},
charset: &charset.Charset_8bit{
Name_: "latin7",
@@ -2866,7 +4147,10 @@ func init() {
id: 0x2a,
name: "latin7_general_cs",
simpletables: simpletables{
- sort: &sortorder_latin7_general_cs,
+ ctype: &ctype_latin7_estonian_cs,
+ tolower: &tolower_latin7_estonian_cs,
+ toupper: &toupper_latin7_estonian_cs,
+ sort: &sortorder_latin7_general_cs,
},
charset: &charset.Charset_8bit{
Name_: "latin7",
@@ -2875,9 +4159,13 @@ func init() {
},
})
register(&Collation_8bit_bin{
- id: 0x2b,
- name: "macce_bin",
- simpletables: simpletables{},
+ id: 0x2b,
+ name: "macce_bin",
+ simpletables: simpletables{
+ ctype: &ctype_macce_general_ci,
+ tolower: &tolower_macce_general_ci,
+ toupper: &toupper_macce_general_ci,
+ },
charset: &charset.Charset_8bit{
Name_: "macce",
ToUnicode: &tounicode_macce_general_ci,
@@ -2888,7 +4176,10 @@ func init() {
id: 0x2c,
name: "cp1250_croatian_ci",
simpletables: simpletables{
- sort: &sortorder_cp1250_croatian_ci,
+ ctype: &ctype_cp1250_general_ci,
+ tolower: &tolower_cp1250_general_ci,
+ toupper: &toupper_cp1250_general_ci,
+ sort: &sortorder_cp1250_croatian_ci,
},
charset: &charset.Charset_8bit{
Name_: "cp1250",
@@ -2908,16 +4199,23 @@ func init() {
charset: charset.Charset_utf8mb4{},
})
register(&Collation_8bit_bin{
- id: 0x2f,
- name: "latin1_bin",
- simpletables: simpletables{},
- charset: charset.Charset_latin1{},
+ id: 0x2f,
+ name: "latin1_bin",
+ simpletables: simpletables{
+ ctype: &ctype_latin1_german1_ci,
+ tolower: &tolower_dec8_swedish_ci,
+ toupper: &toupper_dec8_swedish_ci,
+ },
+ charset: charset.Charset_latin1{},
})
register(&Collation_8bit_simple_ci{
id: 0x30,
name: "latin1_general_ci",
simpletables: simpletables{
- sort: &sortorder_latin1_general_ci,
+ ctype: &ctype_latin1_german1_ci,
+ tolower: &tolower_dec8_swedish_ci,
+ toupper: &toupper_dec8_swedish_ci,
+ sort: &sortorder_latin1_general_ci,
},
charset: charset.Charset_latin1{},
})
@@ -2925,14 +4223,21 @@ func init() {
id: 0x31,
name: "latin1_general_cs",
simpletables: simpletables{
- sort: &sortorder_latin1_general_cs,
+ ctype: &ctype_latin1_german1_ci,
+ tolower: &tolower_dec8_swedish_ci,
+ toupper: &toupper_dec8_swedish_ci,
+ sort: &sortorder_latin1_general_cs,
},
charset: charset.Charset_latin1{},
})
register(&Collation_8bit_bin{
- id: 0x32,
- name: "cp1251_bin",
- simpletables: simpletables{},
+ id: 0x32,
+ name: "cp1251_bin",
+ simpletables: simpletables{
+ ctype: &ctype_cp1251_bulgarian_ci,
+ tolower: &tolower_cp1251_bulgarian_ci,
+ toupper: &toupper_cp1251_bulgarian_ci,
+ },
charset: &charset.Charset_8bit{
Name_: "cp1251",
ToUnicode: &tounicode_cp1251_bulgarian_ci,
@@ -2943,7 +4248,10 @@ func init() {
id: 0x33,
name: "cp1251_general_ci",
simpletables: simpletables{
- sort: &sortorder_cp1251_general_ci,
+ ctype: &ctype_cp1251_bulgarian_ci,
+ tolower: &tolower_cp1251_bulgarian_ci,
+ toupper: &toupper_cp1251_bulgarian_ci,
+ sort: &sortorder_cp1251_general_ci,
},
charset: &charset.Charset_8bit{
Name_: "cp1251",
@@ -2955,7 +4263,10 @@ func init() {
id: 0x34,
name: "cp1251_general_cs",
simpletables: simpletables{
- sort: &sortorder_cp1251_general_cs,
+ ctype: &ctype_cp1251_bulgarian_ci,
+ tolower: &tolower_cp1251_bulgarian_ci,
+ toupper: &toupper_cp1251_bulgarian_ci,
+ sort: &sortorder_cp1251_general_cs,
},
charset: &charset.Charset_8bit{
Name_: "cp1251",
@@ -2964,9 +4275,13 @@ func init() {
},
})
register(&Collation_8bit_bin{
- id: 0x35,
- name: "macroman_bin",
- simpletables: simpletables{},
+ id: 0x35,
+ name: "macroman_bin",
+ simpletables: simpletables{
+ ctype: &ctype_macroman_general_ci,
+ tolower: &tolower_macroman_general_ci,
+ toupper: &toupper_macroman_general_ci,
+ },
charset: &charset.Charset_8bit{
Name_: "macroman",
ToUnicode: &tounicode_macroman_general_ci,
@@ -2994,7 +4309,10 @@ func init() {
id: 0x39,
name: "cp1256_general_ci",
simpletables: simpletables{
- sort: &sortorder_cp1256_general_ci,
+ ctype: &ctype_cp1256_general_ci,
+ tolower: &tolower_cp1256_general_ci,
+ toupper: &toupper_cp1256_general_ci,
+ sort: &sortorder_cp1256_general_ci,
},
charset: &charset.Charset_8bit{
Name_: "cp1256",
@@ -3003,9 +4321,13 @@ func init() {
},
})
register(&Collation_8bit_bin{
- id: 0x3a,
- name: "cp1257_bin",
- simpletables: simpletables{},
+ id: 0x3a,
+ name: "cp1257_bin",
+ simpletables: simpletables{
+ ctype: &ctype_cp1257_lithuanian_ci,
+ tolower: &tolower_latin7_estonian_cs,
+ toupper: &toupper_cp1257_lithuanian_ci,
+ },
charset: &charset.Charset_8bit{
Name_: "cp1257",
ToUnicode: &tounicode_cp1257_lithuanian_ci,
@@ -3016,7 +4338,10 @@ func init() {
id: 0x3b,
name: "cp1257_general_ci",
simpletables: simpletables{
- sort: &sortorder_cp1257_general_ci,
+ ctype: &ctype_cp1257_lithuanian_ci,
+ tolower: &tolower_latin7_estonian_cs,
+ toupper: &toupper_cp1257_lithuanian_ci,
+ sort: &sortorder_cp1257_general_ci,
},
charset: &charset.Charset_8bit{
Name_: "cp1257",
@@ -3041,9 +4366,13 @@ func init() {
charset: charset.Charset_utf16le{},
})
register(&Collation_8bit_bin{
- id: 0x40,
- name: "armscii8_bin",
- simpletables: simpletables{},
+ id: 0x40,
+ name: "armscii8_bin",
+ simpletables: simpletables{
+ ctype: &ctype_armscii8_general_ci,
+ tolower: &tolower_armscii8_general_ci,
+ toupper: &toupper_armscii8_general_ci,
+ },
charset: &charset.Charset_8bit{
Name_: "armscii8",
ToUnicode: &tounicode_armscii8_general_ci,
@@ -3051,9 +4380,13 @@ func init() {
},
})
register(&Collation_8bit_bin{
- id: 0x41,
- name: "ascii_bin",
- simpletables: simpletables{},
+ id: 0x41,
+ name: "ascii_bin",
+ simpletables: simpletables{
+ ctype: &ctype_ascii_general_ci,
+ tolower: &tolower_ascii_general_ci,
+ toupper: &toupper_ascii_general_ci,
+ },
charset: &charset.Charset_8bit{
Name_: "ascii",
ToUnicode: &tounicode_ascii_general_ci,
@@ -3061,9 +4394,13 @@ func init() {
},
})
register(&Collation_8bit_bin{
- id: 0x42,
- name: "cp1250_bin",
- simpletables: simpletables{},
+ id: 0x42,
+ name: "cp1250_bin",
+ simpletables: simpletables{
+ ctype: &ctype_cp1250_general_ci,
+ tolower: &tolower_cp1250_general_ci,
+ toupper: &toupper_cp1250_general_ci,
+ },
charset: &charset.Charset_8bit{
Name_: "cp1250",
ToUnicode: &tounicode_cp1250_general_ci,
@@ -3071,9 +4408,13 @@ func init() {
},
})
register(&Collation_8bit_bin{
- id: 0x43,
- name: "cp1256_bin",
- simpletables: simpletables{},
+ id: 0x43,
+ name: "cp1256_bin",
+ simpletables: simpletables{
+ ctype: &ctype_cp1256_general_ci,
+ tolower: &tolower_cp1256_general_ci,
+ toupper: &toupper_cp1256_general_ci,
+ },
charset: &charset.Charset_8bit{
Name_: "cp1256",
ToUnicode: &tounicode_cp1256_general_ci,
@@ -3081,9 +4422,13 @@ func init() {
},
})
register(&Collation_8bit_bin{
- id: 0x44,
- name: "cp866_bin",
- simpletables: simpletables{},
+ id: 0x44,
+ name: "cp866_bin",
+ simpletables: simpletables{
+ ctype: &ctype_cp866_general_ci,
+ tolower: &tolower_cp866_general_ci,
+ toupper: &toupper_cp866_general_ci,
+ },
charset: &charset.Charset_8bit{
Name_: "cp866",
ToUnicode: &tounicode_cp866_general_ci,
@@ -3091,9 +4436,13 @@ func init() {
},
})
register(&Collation_8bit_bin{
- id: 0x45,
- name: "dec8_bin",
- simpletables: simpletables{},
+ id: 0x45,
+ name: "dec8_bin",
+ simpletables: simpletables{
+ ctype: &ctype_dec8_swedish_ci,
+ tolower: &tolower_dec8_swedish_ci,
+ toupper: &toupper_dec8_swedish_ci,
+ },
charset: &charset.Charset_8bit{
Name_: "dec8",
ToUnicode: &tounicode_dec8_swedish_ci,
@@ -3101,9 +4450,13 @@ func init() {
},
})
register(&Collation_8bit_bin{
- id: 0x46,
- name: "greek_bin",
- simpletables: simpletables{},
+ id: 0x46,
+ name: "greek_bin",
+ simpletables: simpletables{
+ ctype: &ctype_greek_general_ci,
+ tolower: &tolower_greek_general_ci,
+ toupper: &toupper_greek_general_ci,
+ },
charset: &charset.Charset_8bit{
Name_: "greek",
ToUnicode: &tounicode_greek_general_ci,
@@ -3111,9 +4464,13 @@ func init() {
},
})
register(&Collation_8bit_bin{
- id: 0x47,
- name: "hebrew_bin",
- simpletables: simpletables{},
+ id: 0x47,
+ name: "hebrew_bin",
+ simpletables: simpletables{
+ ctype: &ctype_hebrew_general_ci,
+ tolower: &tolower_ascii_general_ci,
+ toupper: &toupper_ascii_general_ci,
+ },
charset: &charset.Charset_8bit{
Name_: "hebrew",
ToUnicode: &tounicode_hebrew_general_ci,
@@ -3121,9 +4478,13 @@ func init() {
},
})
register(&Collation_8bit_bin{
- id: 0x48,
- name: "hp8_bin",
- simpletables: simpletables{},
+ id: 0x48,
+ name: "hp8_bin",
+ simpletables: simpletables{
+ ctype: &ctype_hp8_english_ci,
+ tolower: &tolower_hp8_english_ci,
+ toupper: &toupper_hp8_english_ci,
+ },
charset: &charset.Charset_8bit{
Name_: "hp8",
ToUnicode: &tounicode_hp8_english_ci,
@@ -3131,9 +4492,13 @@ func init() {
},
})
register(&Collation_8bit_bin{
- id: 0x49,
- name: "keybcs2_bin",
- simpletables: simpletables{},
+ id: 0x49,
+ name: "keybcs2_bin",
+ simpletables: simpletables{
+ ctype: &ctype_keybcs2_general_ci,
+ tolower: &tolower_keybcs2_general_ci,
+ toupper: &toupper_keybcs2_general_ci,
+ },
charset: &charset.Charset_8bit{
Name_: "keybcs2",
ToUnicode: &tounicode_keybcs2_general_ci,
@@ -3141,9 +4506,13 @@ func init() {
},
})
register(&Collation_8bit_bin{
- id: 0x4a,
- name: "koi8r_bin",
- simpletables: simpletables{},
+ id: 0x4a,
+ name: "koi8r_bin",
+ simpletables: simpletables{
+ ctype: &ctype_koi8r_general_ci,
+ tolower: &tolower_koi8r_general_ci,
+ toupper: &toupper_koi8r_general_ci,
+ },
charset: &charset.Charset_8bit{
Name_: "koi8r",
ToUnicode: &tounicode_koi8r_general_ci,
@@ -3151,9 +4520,13 @@ func init() {
},
})
register(&Collation_8bit_bin{
- id: 0x4b,
- name: "koi8u_bin",
- simpletables: simpletables{},
+ id: 0x4b,
+ name: "koi8u_bin",
+ simpletables: simpletables{
+ ctype: &ctype_koi8u_general_ci,
+ tolower: &tolower_koi8u_general_ci,
+ toupper: &toupper_koi8u_general_ci,
+ },
charset: &charset.Charset_8bit{
Name_: "koi8u",
ToUnicode: &tounicode_koi8u_general_ci,
@@ -3161,9 +4534,13 @@ func init() {
},
})
register(&Collation_8bit_bin{
- id: 0x4d,
- name: "latin2_bin",
- simpletables: simpletables{},
+ id: 0x4d,
+ name: "latin2_bin",
+ simpletables: simpletables{
+ ctype: &ctype_latin2_general_ci,
+ tolower: &tolower_latin2_general_ci,
+ toupper: &toupper_latin2_general_ci,
+ },
charset: &charset.Charset_8bit{
Name_: "latin2",
ToUnicode: &tounicode_latin2_general_ci,
@@ -3171,9 +4548,13 @@ func init() {
},
})
register(&Collation_8bit_bin{
- id: 0x4e,
- name: "latin5_bin",
- simpletables: simpletables{},
+ id: 0x4e,
+ name: "latin5_bin",
+ simpletables: simpletables{
+ ctype: &ctype_dec8_swedish_ci,
+ tolower: &tolower_latin5_turkish_ci,
+ toupper: &toupper_latin5_turkish_ci,
+ },
charset: &charset.Charset_8bit{
Name_: "latin5",
ToUnicode: &tounicode_latin5_turkish_ci,
@@ -3181,9 +4562,13 @@ func init() {
},
})
register(&Collation_8bit_bin{
- id: 0x4f,
- name: "latin7_bin",
- simpletables: simpletables{},
+ id: 0x4f,
+ name: "latin7_bin",
+ simpletables: simpletables{
+ ctype: &ctype_latin7_estonian_cs,
+ tolower: &tolower_latin7_estonian_cs,
+ toupper: &toupper_latin7_estonian_cs,
+ },
charset: &charset.Charset_8bit{
Name_: "latin7",
ToUnicode: &tounicode_latin7_estonian_cs,
@@ -3191,9 +4576,13 @@ func init() {
},
})
register(&Collation_8bit_bin{
- id: 0x50,
- name: "cp850_bin",
- simpletables: simpletables{},
+ id: 0x50,
+ name: "cp850_bin",
+ simpletables: simpletables{
+ ctype: &ctype_cp850_general_ci,
+ tolower: &tolower_cp850_general_ci,
+ toupper: &toupper_cp850_general_ci,
+ },
charset: &charset.Charset_8bit{
Name_: "cp850",
ToUnicode: &tounicode_cp850_general_ci,
@@ -3201,9 +4590,13 @@ func init() {
},
})
register(&Collation_8bit_bin{
- id: 0x51,
- name: "cp852_bin",
- simpletables: simpletables{},
+ id: 0x51,
+ name: "cp852_bin",
+ simpletables: simpletables{
+ ctype: &ctype_cp852_general_ci,
+ tolower: &tolower_cp852_general_ci,
+ toupper: &toupper_cp852_general_ci,
+ },
charset: &charset.Charset_8bit{
Name_: "cp852",
ToUnicode: &tounicode_cp852_general_ci,
@@ -3211,9 +4604,13 @@ func init() {
},
})
register(&Collation_8bit_bin{
- id: 0x52,
- name: "swe7_bin",
- simpletables: simpletables{},
+ id: 0x52,
+ name: "swe7_bin",
+ simpletables: simpletables{
+ ctype: &ctype_swe7_swedish_ci,
+ tolower: &tolower_swe7_swedish_ci,
+ toupper: &toupper_swe7_swedish_ci,
+ },
charset: &charset.Charset_8bit{
Name_: "swe7",
ToUnicode: &tounicode_swe7_swedish_ci,
@@ -3254,7 +4651,10 @@ func init() {
id: 0x5c,
name: "geostd8_general_ci",
simpletables: simpletables{
- sort: &sortorder_geostd8_general_ci,
+ ctype: &ctype_geostd8_general_ci,
+ tolower: &tolower_ascii_general_ci,
+ toupper: &toupper_ascii_general_ci,
+ sort: &sortorder_geostd8_general_ci,
},
charset: &charset.Charset_8bit{
Name_: "geostd8",
@@ -3263,9 +4663,13 @@ func init() {
},
})
register(&Collation_8bit_bin{
- id: 0x5d,
- name: "geostd8_bin",
- simpletables: simpletables{},
+ id: 0x5d,
+ name: "geostd8_bin",
+ simpletables: simpletables{
+ ctype: &ctype_geostd8_general_ci,
+ tolower: &tolower_ascii_general_ci,
+ toupper: &toupper_ascii_general_ci,
+ },
charset: &charset.Charset_8bit{
Name_: "geostd8",
ToUnicode: &tounicode_geostd8_general_ci,
@@ -3276,14 +4680,17 @@ func init() {
id: 0x5e,
name: "latin1_spanish_ci",
simpletables: simpletables{
- sort: &sortorder_latin1_spanish_ci,
+ ctype: &ctype_latin1_german1_ci,
+ tolower: &tolower_dec8_swedish_ci,
+ toupper: &toupper_dec8_swedish_ci,
+ sort: &sortorder_latin1_spanish_ci,
},
charset: charset.Charset_latin1{},
})
register(&Collation_multibyte{
id: 0x5f,
name: "cp932_japanese_ci",
- sort: &sortorder_ascii_general_ci,
+ sort: &toupper_ascii_general_ci,
charset: charset.Charset_cp932{},
})
register(&Collation_multibyte{
@@ -3294,7 +4701,7 @@ func init() {
register(&Collation_multibyte{
id: 0x61,
name: "eucjpms_japanese_ci",
- sort: &sortorder_ascii_general_ci,
+ sort: &toupper_ascii_general_ci,
charset: charset.Charset_eucjpms{},
})
register(&Collation_multibyte{
@@ -3306,7 +4713,10 @@ func init() {
id: 0x63,
name: "cp1250_polish_ci",
simpletables: simpletables{
- sort: &sortorder_cp1250_polish_ci,
+ ctype: &ctype_cp1250_general_ci,
+ tolower: &tolower_cp1250_general_ci,
+ toupper: &toupper_cp1250_general_ci,
+ sort: &sortorder_cp1250_polish_ci,
},
charset: &charset.Charset_8bit{
Name_: "cp1250",
diff --git a/go/mysql/collations/mysqlucadata.go b/go/mysql/collations/mysqlucadata.go
index 5cae2f40b1b..ae8e2d48642 100644
--- a/go/mysql/collations/mysqlucadata.go
+++ b/go/mysql/collations/mysqlucadata.go
@@ -158,7 +158,6 @@ var weightTable_uca900_page2FA = weightsUCA_embed(282368, 1792)
var weightTable_uca900_pageE00 = weightsUCA_embed(284160, 1792)
var weightTable_uca900_pageE01 = weightsUCA_embed(285952, 1792)
var weightTable_uca900 = []*[]uint16{
-
&weightTable_uca900_page000, &weightTable_uca900_page001, &weightTable_uca900_page002, &weightTable_uca900_page003, &weightTable_uca900_page004, &weightTable_uca900_page005, &weightTable_uca900_page006, &weightTable_uca900_page007, &weightTable_uca900_page008, &weightTable_uca900_page009, &weightTable_uca900_page00A, &weightTable_uca900_page00B, &weightTable_uca900_page00C, &weightTable_uca900_page00D, &weightTable_uca900_page00E, &weightTable_uca900_page00F, &weightTable_uca900_page010, &weightTable_uca900_page011, &weightTable_uca900_page012, &weightTable_uca900_page013, &weightTable_uca900_page014, &weightTable_uca900_page015, &weightTable_uca900_page016, &weightTable_uca900_page017, &weightTable_uca900_page018, &weightTable_uca900_page019, &weightTable_uca900_page01A, &weightTable_uca900_page01B, &weightTable_uca900_page01C, &weightTable_uca900_page01D, &weightTable_uca900_page01E, &weightTable_uca900_page01F,
&weightTable_uca900_page020, &weightTable_uca900_page021, &weightTable_uca900_page022, &weightTable_uca900_page023, &weightTable_uca900_page024, &weightTable_uca900_page025, &weightTable_uca900_page026, &weightTable_uca900_page027, &weightTable_uca900_page028, &weightTable_uca900_page029, &weightTable_uca900_page02A, &weightTable_uca900_page02B, &weightTable_uca900_page02C, &weightTable_uca900_page02D, &weightTable_uca900_page02E, &weightTable_uca900_page02F, &weightTable_uca900_page030, &weightTable_uca900_page031, &weightTable_uca900_page032, &weightTable_uca900_page033, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil,
nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, &weightTable_uca900_page04D, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil,
@@ -381,7 +380,6 @@ var weightTable_uca900_ja_page09E = weightsUCA_embed(435456, 1792)
var weightTable_uca900_ja_page09F = weightsUCA_embed(437248, 1792)
var weightTable_uca900_ja_page0FF = weightsUCA_embed(439040, 1792)
var weightTable_uca900_ja = []*[]uint16{
-
&weightTable_uca900_page000, &weightTable_uca900_page001, &weightTable_uca900_page002, &weightTable_uca900_page003, &weightTable_uca900_page004, &weightTable_uca900_page005, &weightTable_uca900_page006, &weightTable_uca900_page007, &weightTable_uca900_page008, &weightTable_uca900_page009, &weightTable_uca900_page00A, &weightTable_uca900_page00B, &weightTable_uca900_page00C, &weightTable_uca900_page00D, &weightTable_uca900_page00E, &weightTable_uca900_page00F, &weightTable_uca900_page010, &weightTable_uca900_page011, &weightTable_uca900_page012, &weightTable_uca900_page013, &weightTable_uca900_page014, &weightTable_uca900_page015, &weightTable_uca900_page016, &weightTable_uca900_page017, &weightTable_uca900_page018, &weightTable_uca900_page019, &weightTable_uca900_page01A, &weightTable_uca900_page01B, &weightTable_uca900_page01C, &weightTable_uca900_page01D, &weightTable_uca900_page01E, &weightTable_uca900_page01F,
&weightTable_uca900_page020, &weightTable_uca900_page021, &weightTable_uca900_page022, &weightTable_uca900_page023, &weightTable_uca900_page024, &weightTable_uca900_page025, &weightTable_uca900_page026, &weightTable_uca900_page027, &weightTable_uca900_page028, &weightTable_uca900_page029, &weightTable_uca900_page02A, &weightTable_uca900_page02B, &weightTable_uca900_page02C, &weightTable_uca900_page02D, &weightTable_uca900_page02E, &weightTable_uca900_page02F, &weightTable_uca900_ja_page030, &weightTable_uca900_ja_page031, &weightTable_uca900_page032, &weightTable_uca900_page033, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil,
nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, &weightTable_uca900_page04D, &weightTable_uca900_ja_page04E, &weightTable_uca900_ja_page04F, &weightTable_uca900_ja_page050, &weightTable_uca900_ja_page051, &weightTable_uca900_ja_page052, &weightTable_uca900_ja_page053, &weightTable_uca900_ja_page054, &weightTable_uca900_ja_page055, &weightTable_uca900_ja_page056, &weightTable_uca900_ja_page057, &weightTable_uca900_ja_page058, &weightTable_uca900_ja_page059, &weightTable_uca900_ja_page05A, &weightTable_uca900_ja_page05B, &weightTable_uca900_ja_page05C, &weightTable_uca900_ja_page05D, &weightTable_uca900_ja_page05E, &weightTable_uca900_ja_page05F,
@@ -948,7 +946,6 @@ var weightTable_uca900_zh_page2FA = weightsUCA_embed(1170432, 1792)
var weightTable_uca900_zh_pageE00 = weightsUCA_embed(1172224, 1792)
var weightTable_uca900_zh_pageE01 = weightsUCA_embed(1174016, 1792)
var weightTable_uca900_zh = []*[]uint16{
-
&weightTable_uca900_zh_page000, &weightTable_uca900_zh_page001, &weightTable_uca900_zh_page002, &weightTable_uca900_zh_page003, &weightTable_uca900_zh_page004, &weightTable_uca900_zh_page005, &weightTable_uca900_zh_page006, &weightTable_uca900_zh_page007, &weightTable_uca900_zh_page008, &weightTable_uca900_zh_page009, &weightTable_uca900_zh_page00A, &weightTable_uca900_zh_page00B, &weightTable_uca900_zh_page00C, &weightTable_uca900_zh_page00D, &weightTable_uca900_zh_page00E, &weightTable_uca900_zh_page00F, &weightTable_uca900_zh_page010, &weightTable_uca900_zh_page011, &weightTable_uca900_zh_page012, &weightTable_uca900_zh_page013, &weightTable_uca900_zh_page014, &weightTable_uca900_zh_page015, &weightTable_uca900_zh_page016, &weightTable_uca900_zh_page017, &weightTable_uca900_zh_page018, &weightTable_uca900_zh_page019, &weightTable_uca900_zh_page01A, &weightTable_uca900_zh_page01B, &weightTable_uca900_zh_page01C, &weightTable_uca900_zh_page01D, &weightTable_uca900_zh_page01E, &weightTable_uca900_zh_page01F,
&weightTable_uca900_zh_page020, &weightTable_uca900_zh_page021, &weightTable_uca900_page022, &weightTable_uca900_zh_page023, &weightTable_uca900_zh_page024, &weightTable_uca900_page025, &weightTable_uca900_page026, &weightTable_uca900_page027, &weightTable_uca900_page028, &weightTable_uca900_page029, &weightTable_uca900_page02A, &weightTable_uca900_zh_page02B, &weightTable_uca900_zh_page02C, &weightTable_uca900_zh_page02D, &weightTable_uca900_zh_page02E, &weightTable_uca900_zh_page02F, &weightTable_uca900_zh_page030, &weightTable_uca900_zh_page031, &weightTable_uca900_zh_page032, &weightTable_uca900_zh_page033, &weightTable_uca900_zh_page034, &weightTable_uca900_zh_page035, &weightTable_uca900_zh_page036, &weightTable_uca900_zh_page037, &weightTable_uca900_zh_page038, &weightTable_uca900_zh_page039, &weightTable_uca900_zh_page03A, &weightTable_uca900_zh_page03B, &weightTable_uca900_zh_page03C, &weightTable_uca900_zh_page03D, &weightTable_uca900_zh_page03E, &weightTable_uca900_zh_page03F,
&weightTable_uca900_zh_page040, &weightTable_uca900_zh_page041, &weightTable_uca900_zh_page042, &weightTable_uca900_zh_page043, &weightTable_uca900_zh_page044, &weightTable_uca900_zh_page045, &weightTable_uca900_zh_page046, &weightTable_uca900_zh_page047, &weightTable_uca900_zh_page048, &weightTable_uca900_zh_page049, &weightTable_uca900_zh_page04A, &weightTable_uca900_zh_page04B, &weightTable_uca900_zh_page04C, &weightTable_uca900_zh_page04D, &weightTable_uca900_zh_page04E, &weightTable_uca900_zh_page04F, &weightTable_uca900_zh_page050, &weightTable_uca900_zh_page051, &weightTable_uca900_zh_page052, &weightTable_uca900_zh_page053, &weightTable_uca900_zh_page054, &weightTable_uca900_zh_page055, &weightTable_uca900_zh_page056, &weightTable_uca900_zh_page057, &weightTable_uca900_zh_page058, &weightTable_uca900_zh_page059, &weightTable_uca900_zh_page05A, &weightTable_uca900_zh_page05B, &weightTable_uca900_zh_page05C, &weightTable_uca900_zh_page05D, &weightTable_uca900_zh_page05E, &weightTable_uca900_zh_page05F,
@@ -1145,7 +1142,6 @@ var weightTable_uca400_page0FD = weightsUCA_embed(1208376, 2049)
var weightTable_uca400_page0FE = weightsUCA_embed(1210425, 513)
var weightTable_uca400_page0FF = weightsUCA_embed(1210938, 513)
var weightTable_uca400 = []*[]uint16{
-
&weightTable_uca400_page000, &weightTable_uca400_page001, &weightTable_uca400_page002, &weightTable_uca400_page003, &weightTable_uca400_page004, &weightTable_uca400_page005, &weightTable_uca400_page006, &weightTable_uca400_page007, nil, &weightTable_uca400_page009, &weightTable_uca400_page00A, &weightTable_uca400_page00B, &weightTable_uca400_page00C, &weightTable_uca400_page00D, &weightTable_uca400_page00E, &weightTable_uca400_page00F, &weightTable_uca400_page010, &weightTable_uca400_page011, &weightTable_uca400_page012, &weightTable_uca400_page013, &weightTable_uca400_page014, &weightTable_uca400_page015, &weightTable_uca400_page016, &weightTable_uca400_page017, &weightTable_uca400_page018, &weightTable_uca400_page019, nil, nil, nil, &weightTable_uca400_page01D, &weightTable_uca400_page01E, &weightTable_uca400_page01F,
&weightTable_uca400_page020, &weightTable_uca400_page021, &weightTable_uca400_page022, &weightTable_uca400_page023, &weightTable_uca400_page024, &weightTable_uca400_page025, &weightTable_uca400_page026, &weightTable_uca400_page027, &weightTable_uca400_page028, &weightTable_uca400_page029, &weightTable_uca400_page02A, &weightTable_uca400_page02B, nil, nil, &weightTable_uca400_page02E, &weightTable_uca400_page02F, &weightTable_uca400_page030, &weightTable_uca400_page031, &weightTable_uca400_page032, &weightTable_uca400_page033, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil,
nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, &weightTable_uca400_page04D, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil,
@@ -1265,7 +1261,6 @@ var weightTable_uca520_page2FA = weightsUCA_embed(1270950, 513)
var weightTable_uca520_pageE00 = weightsUCA_embed(1271463, 513)
var weightTable_uca520_pageE01 = weightsUCA_embed(1271976, 513)
var weightTable_uca520 = []*[]uint16{
-
&weightTable_uca520_page000, &weightTable_uca520_page001, &weightTable_uca520_page002, &weightTable_uca520_page003, &weightTable_uca520_page004, &weightTable_uca520_page005, &weightTable_uca520_page006, &weightTable_uca520_page007, &weightTable_uca520_page008, &weightTable_uca520_page009, &weightTable_uca520_page00A, &weightTable_uca520_page00B, &weightTable_uca520_page00C, &weightTable_uca520_page00D, &weightTable_uca520_page00E, &weightTable_uca520_page00F, &weightTable_uca520_page010, &weightTable_uca520_page011, &weightTable_uca520_page012, &weightTable_uca520_page013, &weightTable_uca520_page014, &weightTable_uca520_page015, &weightTable_uca520_page016, &weightTable_uca520_page017, &weightTable_uca520_page018, &weightTable_uca520_page019, &weightTable_uca520_page01A, &weightTable_uca520_page01B, &weightTable_uca520_page01C, &weightTable_uca520_page01D, &weightTable_uca520_page01E, &weightTable_uca520_page01F,
&weightTable_uca520_page020, &weightTable_uca520_page021, &weightTable_uca520_page022, &weightTable_uca520_page023, &weightTable_uca520_page024, &weightTable_uca520_page025, &weightTable_uca520_page026, &weightTable_uca520_page027, &weightTable_uca520_page028, &weightTable_uca520_page029, &weightTable_uca520_page02A, &weightTable_uca520_page02B, &weightTable_uca520_page02C, &weightTable_uca520_page02D, &weightTable_uca520_page02E, &weightTable_uca520_page02F, &weightTable_uca520_page030, &weightTable_uca520_page031, &weightTable_uca520_page032, &weightTable_uca520_page033, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil,
nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, &weightTable_uca520_page04D, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil,
diff --git a/go/mysql/collations/uca.go b/go/mysql/collations/uca.go
index be5f9cb47c4..23d7beafe45 100644
--- a/go/mysql/collations/uca.go
+++ b/go/mysql/collations/uca.go
@@ -17,6 +17,7 @@ limitations under the License.
package collations
import (
+ "bytes"
"math/bits"
"sync"
"unsafe"
@@ -225,6 +226,16 @@ func (c *Collation_utf8mb4_uca_0900) Wildcard(pat []byte, matchOne rune, matchMa
return newUnicodeWildcardMatcher(charset.Charset_utf8mb4{}, c.uca.WeightsEqual, c.Collate, pat, matchOne, matchMany, escape)
}
+func (c *Collation_utf8mb4_uca_0900) ToLower(dst, src []byte) []byte {
+ dst = append(dst, bytes.ToLower(src)...)
+ return dst
+}
+
+func (c *Collation_utf8mb4_uca_0900) ToUpper(dst, src []byte) []byte {
+ dst = append(dst, bytes.ToUpper(src)...)
+ return dst
+}
+
type Collation_utf8mb4_0900_bin struct{}
func (c *Collation_utf8mb4_0900_bin) Init() {}
@@ -274,6 +285,16 @@ func (c *Collation_utf8mb4_0900_bin) Wildcard(pat []byte, matchOne rune, matchMa
return newUnicodeWildcardMatcher(charset.Charset_utf8mb4{}, equals, c.Collate, pat, matchOne, matchMany, escape)
}
+func (c *Collation_utf8mb4_0900_bin) ToLower(dst, src []byte) []byte {
+ dst = append(dst, bytes.ToLower(src)...)
+ return dst
+}
+
+func (c *Collation_utf8mb4_0900_bin) ToUpper(dst, src []byte) []byte {
+ dst = append(dst, bytes.ToUpper(src)...)
+ return dst
+}
+
type Collation_uca_legacy struct {
name string
id ID
diff --git a/go/mysql/collations/uca_contraction_test.go b/go/mysql/collations/uca_contraction_test.go
index cfc1cf795fb..f178b70009e 100644
--- a/go/mysql/collations/uca_contraction_test.go
+++ b/go/mysql/collations/uca_contraction_test.go
@@ -26,6 +26,8 @@ import (
"testing"
"unicode/utf8"
+ "github.com/stretchr/testify/assert"
+
"vitess.io/vitess/go/mysql/collations/internal/charset"
"vitess.io/vitess/go/mysql/collations/internal/uca"
)
@@ -96,15 +98,10 @@ func findContractedCollations(t testing.TB, unique bool) (result []CollationWith
}
func testMatch(t *testing.T, name string, cnt uca.Contraction, result []uint16, remainder []byte, skip int) {
- if !reflect.DeepEqual(cnt.Weights, result) {
- t.Errorf("%s didn't match: expected %#v, got %#v", name, cnt.Weights, result)
- }
- if len(remainder) != 0 {
- t.Errorf("%s bad remainder: %#v", name, remainder)
- }
- if skip != len(cnt.Path) {
- t.Errorf("%s bad skipped length %d for %#v", name, skip, cnt.Path)
- }
+ assert.True(t, reflect.DeepEqual(cnt.Weights, result), "%s didn't match: expected %#v, got %#v", name, cnt.Weights, result)
+ assert.Equal(t, 0, len(remainder), "%s bad remainder: %#v", name, remainder)
+ assert.Equal(t, len(cnt.Path), skip, "%s bad skipped length %d for %#v", name, skip, cnt.Path)
+
}
func TestUCAContractions(t *testing.T) {
diff --git a/go/mysql/collations/uca_tables_test.go b/go/mysql/collations/uca_tables_test.go
index e7a8063e5d2..46d83a85ec1 100644
--- a/go/mysql/collations/uca_tables_test.go
+++ b/go/mysql/collations/uca_tables_test.go
@@ -26,6 +26,7 @@ import (
"unsafe"
"github.com/stretchr/testify/require"
+ "gotest.tools/assert"
"vitess.io/vitess/go/mysql/collations/internal/charset"
"vitess.io/vitess/go/mysql/collations/internal/uca"
@@ -57,9 +58,8 @@ func verifyAllCodepoints(t *testing.T, expected map[rune][]uint16, weights uca.W
for i := range vitessWeights {
a, b := mysqlWeights[i], vitessWeights[i]
- if a != b {
- t.Errorf("weight mismatch for U+%04X (collation entity %d): mysql=%v vitess=%v", cp, i+1, a, b)
- }
+ assert.Equal(t, b, a, "weight mismatch for U+%04X (collation entity %d): mysql=%v vitess=%v", cp, i+1, a, b)
+
}
}
}
diff --git a/go/mysql/collations/uca_test.go b/go/mysql/collations/uca_test.go
index ad1231dd39c..447a5236f6f 100644
--- a/go/mysql/collations/uca_test.go
+++ b/go/mysql/collations/uca_test.go
@@ -19,12 +19,17 @@ package collations
import (
"bytes"
"fmt"
+ "math/rand"
"sort"
"strings"
"sync"
"testing"
"unicode/utf8"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "golang.org/x/exp/slices"
+
"vitess.io/vitess/go/mysql/collations/internal/charset"
)
@@ -53,9 +58,8 @@ func testcollation(t testing.TB, name string) Collation {
t.Helper()
testinit()
coll := testcollationMap[name]
- if coll == nil {
- t.Fatalf("missing collation: %s", name)
- }
+ require.NotNil(t, coll, "missing collation: %s", name)
+
return coll
}
@@ -81,9 +85,8 @@ func TestWeightsForSpace(t *testing.T) {
default:
continue
}
- if actual != expected {
- t.Errorf("expected Weight(' ') == 0x%X, got 0x%X", expected, actual)
- }
+ assert.Equal(t, expected, actual, "expected Weight(' ') == 0x%X, got 0x%X", expected, actual)
+
}
}
@@ -104,9 +107,8 @@ func TestKanaSensitivity(t *testing.T) {
t.Run(tc.collation, func(t *testing.T) {
collation := testcollation(t, tc.collation)
equal := collation.Collate([]byte(Kana1), []byte(Kana2), false) == 0
- if equal != tc.equal {
- t.Errorf("expected %q == %q to be %v", Kana1, Kana2, tc.equal)
- }
+ assert.Equal(t, tc.equal, equal, "expected %q == %q to be %v", Kana1, Kana2, tc.equal)
+
})
}
}
@@ -135,9 +137,8 @@ func TestContractions(t *testing.T) {
for _, in := range tc.inputs {
weightString := coll.WeightString(nil, []byte(in), 0)
- if !bytes.Equal(weightString, tc.expected) {
- t.Errorf("weight_string(%q) = %#v (expected %#v)", in, weightString, tc.expected)
- }
+ assert.True(t, bytes.Equal(weightString, tc.expected), "weight_string(%q) = %#v (expected %#v)", in, weightString, tc.expected)
+
}
})
}
@@ -157,9 +158,8 @@ func TestReplacementCharacter(t *testing.T) {
t.Run(tc.collation, func(t *testing.T) {
coll := testcollation(t, tc.collation)
weightString := coll.WeightString(nil, []byte(string(utf8.RuneError)), 0)
- if !bytes.Equal(weightString, tc.expected) {
- t.Errorf("weight_string(\\uFFFD) = %#v (expected %#v)", weightString, tc.expected)
- }
+ assert.True(t, bytes.Equal(weightString, tc.expected), "weight_string(\\uFFFD) = %#v (expected %#v)", weightString, tc.expected)
+
})
}
}
@@ -182,9 +182,8 @@ func TestIsPrefix(t *testing.T) {
right := string(input[:size])
cmp := coll.Collate([]byte(left), []byte(right), true)
- if cmp != 0 {
- t.Errorf("IsPrefix(%q, %q) = %d (expected 0)", left, right, cmp)
- }
+ assert.Equal(t, 0, cmp, "IsPrefix(%q, %q) = %d (expected 0)", left, right, cmp)
+
}
}
}
@@ -799,10 +798,8 @@ func TestCompareWithWeightString(t *testing.T) {
for _, tc := range cases {
left := collation.WeightString(nil, []byte(tc.left), 0)
right := collation.WeightString(nil, []byte(tc.right), 0)
+ assert.Equal(t, tc.equal, bytes.Equal(left, right), "expected %q / %v == %q / %v to be %v", tc.left, left, tc.right, right, tc.equal)
- if bytes.Equal(left, right) != tc.equal {
- t.Errorf("expected %q / %v == %q / %v to be %v", tc.left, left, tc.right, right, tc.equal)
- }
}
}
@@ -835,9 +832,8 @@ func TestFastIterators(t *testing.T) {
t.Run(tc.collation, func(t *testing.T) {
coll := testcollation(t, tc.collation)
result := coll.WeightString(nil, allASCIICharacters, 0)
- if !bytes.Equal(tc.expected, result) {
- t.Errorf("weight_string(%q) = %#v (expected %#v)", allASCIICharacters, result, tc.expected)
- }
+ assert.True(t, bytes.Equal(tc.expected, result), "weight_string(%q) = %#v (expected %#v)", allASCIICharacters, result, tc.expected)
+
})
}
}
@@ -905,9 +901,45 @@ func TestEqualities(t *testing.T) {
collation = &ConsistentCollation{Collation: collation, t: t}
cmp := collation.Collate([]byte(tc.left), []byte(tc.right), false)
- if (cmp == 0) != tc.equal {
- t.Errorf("expected %q == %q to be %v", tc.left, tc.right, tc.equal)
+ assert.Equal(t, tc.equal, (cmp == 0), "expected %q == %q to be %v", tc.left, tc.right, tc.equal)
+
+ }
+}
+
+func TestUCACollationOrder(t *testing.T) {
+ var sorted = []string{
+ "aaaa",
+ "bbbb",
+ "cccc",
+ "dddd",
+ "zzzz",
+ }
+
+ var collations = []string{
+ "utf8mb4_0900_ai_ci",
+ "utf8mb4_0900_as_cs",
+ }
+
+ for _, colname := range collations {
+ col := testcollation(t, colname)
+
+ for _, a := range sorted {
+ for _, b := range sorted {
+ want := strings.Compare(a, b) < 0
+ got := col.Collate([]byte(a), []byte(b), false) < 0
+ require.Equalf(t, want, got, "failed to compare %q vs %q", a, b)
+ }
}
+
+ ary := slices.Clone(sorted)
+ for i := range ary {
+ j := rand.Intn(i + 1)
+ ary[i], ary[j] = ary[j], ary[i]
+ }
+ slices.SortFunc(ary, func(a, b string) bool {
+ return col.Collate([]byte(a), []byte(b), false) < 0
+ })
+ require.Equal(t, sorted, ary)
}
}
diff --git a/go/mysql/collations/wildcard_test.go b/go/mysql/collations/wildcard_test.go
index d45a116e119..ef0430505dc 100644
--- a/go/mysql/collations/wildcard_test.go
+++ b/go/mysql/collations/wildcard_test.go
@@ -19,6 +19,8 @@ package collations
import (
"testing"
+ "github.com/stretchr/testify/assert"
+
"vitess.io/vitess/go/mysql/collations/internal/charset"
)
@@ -33,9 +35,8 @@ func testWildcardMatches(t *testing.T, collName string, chOne, chMany, chEsc run
for _, tc := range cases {
pat := coll.Wildcard([]byte(tc.pat), chOne, chMany, chEsc)
match := pat.Match([]byte(tc.in))
- if match != tc.match {
- t.Errorf("%q LIKE %q = %v (expected %v)", tc.in, tc.pat, match, tc.match)
- }
+ assert.Equal(t, tc.match, match, "%q LIKE %q = %v (expected %v)", tc.in, tc.pat, match, tc.match)
+
}
})
}
@@ -343,9 +344,8 @@ func TestWildcardMatches(t *testing.T) {
for _, tc := range wildcardTestCases {
wildcard := newUnicodeWildcardMatcher(charset.Charset_utf8mb4{}, identity, nil, []byte(tc.pat), '?', '*', '\\')
match := wildcard.Match([]byte(tc.in))
- if match != tc.match {
- t.Errorf("wildcard(%q, %q) = %v (expected %v)", tc.in, tc.pat, match, tc.match)
- }
+ assert.Equal(t, tc.match, match, "wildcard(%q, %q) = %v (expected %v)", tc.in, tc.pat, match, tc.match)
+
}
})
@@ -353,9 +353,8 @@ func TestWildcardMatches(t *testing.T) {
for _, tc := range wildcardTestCases {
wildcard := newEightbitWildcardMatcher(&sortOrderIdentity, nil, []byte(tc.pat), '?', '*', '\\')
match := wildcard.Match([]byte(tc.in))
- if match != tc.match {
- t.Errorf("wildcard(%q, %q) = %v (expected %v)", tc.in, tc.pat, match, tc.match)
- }
+ assert.Equal(t, tc.match, match, "wildcard(%q, %q) = %v (expected %v)", tc.in, tc.pat, match, tc.match)
+
}
})
diff --git a/go/mysql/conn.go b/go/mysql/conn.go
index a65f9d117d3..f13c3b2242f 100644
--- a/go/mysql/conn.go
+++ b/go/mysql/conn.go
@@ -818,6 +818,10 @@ func getLenEncInt(i uint64) []byte {
return data
}
+func (c *Conn) WriteErrorAndLog(format string, args ...interface{}) bool {
+ return c.writeErrorAndLog(ERUnknownComError, SSNetError, format, args...)
+}
+
func (c *Conn) writeErrorAndLog(errorCode uint16, sqlState string, format string, args ...any) bool {
if err := c.writeErrorPacket(errorCode, sqlState, format, args...); err != nil {
log.Errorf("Error writing error to %s: %v", c, err)
@@ -932,8 +936,12 @@ func (c *Conn) handleNextCommand(handler Handler) bool {
if !c.writeErrorAndLog(ERUnknownComError, SSNetError, "command handling not implemented yet: %v", data[0]) {
return false
}
+ case ComBinlogDump:
+ return c.handleComBinlogDump(handler, data)
case ComBinlogDumpGTID:
return c.handleComBinlogDumpGTID(handler, data)
+ case ComRegisterReplica:
+ return c.handleComRegisterReplica(handler, data)
default:
log.Errorf("Got unhandled packet (default) from %s, returning error: %v", c, data)
c.recycleReadPacket()
@@ -945,8 +953,27 @@ func (c *Conn) handleNextCommand(handler Handler) bool {
return true
}
-func (c *Conn) handleComBinlogDumpGTID(handler Handler, data []byte) (kontinue bool) {
- defer c.recycleReadPacket()
+func (c *Conn) handleComRegisterReplica(handler Handler, data []byte) (kontinue bool) {
+ c.recycleReadPacket()
+
+ replicaHost, replicaPort, replicaUser, replicaPassword, err := c.parseComRegisterReplica(data)
+ if err != nil {
+ log.Errorf("conn %v: parseComRegisterReplica failed: %v", c.ID(), err)
+ return false
+ }
+ if err := handler.ComRegisterReplica(c, replicaHost, replicaPort, replicaUser, replicaPassword); err != nil {
+ c.writeErrorPacketFromError(err)
+ return false
+ }
+ if err := c.writeOKPacket(&PacketOK{}); err != nil {
+ c.writeErrorPacketFromError(err)
+ }
+ return true
+}
+
+func (c *Conn) handleComBinlogDump(handler Handler, data []byte) (kontinue bool) {
+ c.recycleReadPacket()
+ kontinue = true
c.startWriterBuffering()
defer func() {
@@ -956,14 +983,40 @@ func (c *Conn) handleComBinlogDumpGTID(handler Handler, data []byte) (kontinue b
}
}()
- _, _, position, err := c.parseComBinlogDumpGTID(data)
+ logfile, binlogPos, err := c.parseComBinlogDump(data)
if err != nil {
log.Errorf("conn %v: parseComBinlogDumpGTID failed: %v", c.ID(), err)
- kontinue = false
+ return false
+ }
+ if err := handler.ComBinlogDump(c, logfile, binlogPos); err != nil {
+ log.Error(err.Error())
+ return false
}
- handler.ComBinlogDumpGTID(c, position.GTIDSet)
+ return kontinue
+}
- return true
+func (c *Conn) handleComBinlogDumpGTID(handler Handler, data []byte) (kontinue bool) {
+ c.recycleReadPacket()
+ kontinue = true
+
+ c.startWriterBuffering()
+ defer func() {
+ if err := c.endWriterBuffering(); err != nil {
+ log.Errorf("conn %v: flush() failed: %v", c.ID(), err)
+ kontinue = false
+ }
+ }()
+
+ logFile, logPos, position, err := c.parseComBinlogDumpGTID(data)
+ if err != nil {
+ log.Errorf("conn %v: parseComBinlogDumpGTID failed: %v", c.ID(), err)
+ return false
+ }
+ if err := handler.ComBinlogDumpGTID(c, logFile, logPos, position.GTIDSet); err != nil {
+ log.Error(err.Error())
+ return false
+ }
+ return kontinue
}
func (c *Conn) handleComResetConnection(handler Handler) {
@@ -1483,12 +1536,9 @@ func (c *Conn) parseOKPacket(in []byte) (*PacketOK, error) {
// session tracking
if statusFlags&ServerSessionStateChanged == ServerSessionStateChanged {
length, ok := data.readLenEncInt()
- if !ok {
- return fail("invalid OK packet session state change length: %v", data)
- }
- // In case we have a zero length string, there's no additional information so
- // we can return the packet.
- if length == 0 {
+ if !ok || length == 0 {
+ // In case we have no more data or a zero length string, there's no additional information so
+ // we can return the packet.
return packetOK, nil
}
diff --git a/go/mysql/conn_flaky_test.go b/go/mysql/conn_flaky_test.go
index 327c25a0c29..e73e566dad3 100644
--- a/go/mysql/conn_flaky_test.go
+++ b/go/mysql/conn_flaky_test.go
@@ -46,9 +46,8 @@ import (
func createSocketPair(t *testing.T) (net.Listener, *Conn, *Conn) {
// Create a listener.
listener, err := net.Listen("tcp", "127.0.0.1:")
- if err != nil {
- t.Fatalf("Listen failed: %v", err)
- }
+ require.NoError(t, err, "Listen failed: %v", err)
+
addr := listener.Addr().String()
listener.(*net.TCPListener).SetDeadline(time.Now().Add(10 * time.Second))
@@ -72,13 +71,8 @@ func createSocketPair(t *testing.T) (net.Listener, *Conn, *Conn) {
}()
wg.Wait()
-
- if clientErr != nil {
- t.Fatalf("Dial failed: %v", clientErr)
- }
- if serverErr != nil {
- t.Fatalf("Accept failed: %v", serverErr)
- }
+ require.Nil(t, clientErr, "Dial failed: %v", clientErr)
+ require.Nil(t, serverErr, "Accept failed: %v", serverErr)
// Create a Conn on both sides.
cConn := newConn(clientConn)
@@ -366,6 +360,11 @@ func TestOkPackets(t *testing.T) {
expectedErr: "invalid OK packet warnings: &{[0 0 0 2 0] 0}",
}, {
dataIn: `
+00000000 FE 00 00 22 40 00 00 |.....|`,
+ dataOut: `00000000 00 00 00 22 40 00 00 00 04 03 02 00 00 |..."@........|`,
+ cc: CapabilityClientProtocol41 | CapabilityClientTransactions | CapabilityClientSessionTrack | CapabilityClientDeprecateEOF,
+ }, {
+ dataIn: `
00000000 00 00 00 02 40 00 00 00 2a 03 28 00 26 66 32 37 |....@...*.(.&f27|
00000010 66 36 39 37 31 2d 30 33 65 37 2d 31 31 65 62 2d |f6971-03e7-11eb-|
00000020 38 35 63 35 2d 39 38 61 66 36 35 61 36 64 63 34 |85c5-98af65a6dc4|
@@ -390,7 +389,7 @@ func TestOkPackets(t *testing.T) {
00000000 00 00 00 00 40 00 00 00 04 03 02 00 00 |....@........|`,
cc: CapabilityClientProtocol41 | CapabilityClientTransactions | CapabilityClientSessionTrack,
}, {
- dataIn: `0000 00 00 00 03 40 00 00 00 fc 56 04 03 |a.......@....V..|
+ dataIn: `0000 00 00 00 03 40 00 00 00 fc 56 04 03 |....@....V..|
0010 fc 47 04 00 fc 43 04 30 63 36 63 36 62 34 61 2d |.G...C.0c6c6b4a-|
0020 32 64 65 35 2d 31 31 65 64 2d 62 63 37 61 2d 61 |2de5-11ed-bc7a-a|
0030 38 61 31 35 39 38 33 64 35 62 64 3a 31 2d 34 2c |8a15983d5bd:1-4,|
@@ -532,6 +531,151 @@ func TestOkPackets(t *testing.T) {
00000420 35 39 38 33 64 35 62 64 3a 31 2d 39 2c 0a 64 65 |5983d5bd:1-9,.de|
00000430 30 64 63 37 38 30 2d 32 64 65 35 2d 31 31 65 64 |0dc780-2de5-11ed|
00000440 2d 62 31 62 31 2d 61 38 61 31 35 39 38 33 64 35 |-b1b1-a8a15983d5|
+00000450 62 64 3a 31 2d 37 |bd:1-7|`,
+ cc: CapabilityClientProtocol41 | CapabilityClientTransactions | CapabilityClientSessionTrack,
+ }, {
+ dataIn: `00000000 00 00 00 03 40 00 00 00 fc 56 04 05 |....@....V..|
+00000010 09 08 54 5f 52 5f 5f 5f 5f 5f 03 fc 47 04 00 fc |..T_R_____.?G..?|
+00000020 43 04 30 63 36 63 36 62 34 61 2d 32 64 65 35 2d |C.0c6c6b4a-2de5-|
+00000030 31 31 65 64 2d 62 63 37 61 2d 61 38 61 31 35 39 |11ed-bc7a-a8a159|
+00000040 38 33 64 35 62 64 3a 31 2d 34 2c 0a 31 33 65 62 |83d5bd:1-4,.13eb|
+00000050 66 38 32 38 2d 32 64 65 35 2d 31 31 65 64 2d 62 |f828-2de5-11ed-b|
+00000060 34 65 35 2d 61 38 61 31 35 39 38 33 64 35 62 64 |4e5-a8a15983d5bd|
+00000070 3a 31 2d 39 2c 0a 31 38 61 30 66 30 34 38 2d 32 |:1-9,.18a0f048-2|
+00000080 64 65 34 2d 31 31 65 64 2d 38 63 31 63 2d 61 38 |de4-11ed-8c1c-a8|
+00000090 61 31 35 39 38 33 64 35 62 64 3a 31 2d 33 2c 0a |a15983d5bd:1-3,.|
+000000a0 31 66 36 34 31 62 36 33 2d 32 64 65 35 2d 31 31 |1f641b63-2de5-11|
+000000b0 65 64 2d 61 35 31 62 2d 61 38 61 31 35 39 38 33 |ed-a51b-a8a15983|
+000000c0 64 35 62 64 3a 31 2d 39 2c 0a 32 63 36 35 36 35 |d5bd:1-9,.2c6565|
+000000d0 37 31 2d 32 64 65 35 2d 31 31 65 64 2d 61 34 37 |71-2de5-11ed-a47|
+000000e0 34 2d 61 38 61 31 35 39 38 33 64 35 62 64 3a 31 |4-a8a15983d5bd:1|
+000000f0 2d 35 2c 0a 33 32 32 61 34 32 35 34 2d 32 64 65 |-5,.322a4254-2de|
+00000100 35 2d 31 31 65 64 2d 61 65 64 31 2d 61 38 61 31 |5-11ed-aed1-a8a1|
+00000110 35 39 38 33 64 35 62 64 3a 31 2d 34 2c 0a 33 37 |5983d5bd:1-4,.37|
+00000120 63 35 64 30 34 31 2d 32 64 65 35 2d 31 31 65 64 |c5d041-2de5-11ed|
+00000130 2d 38 64 33 66 2d 61 38 61 31 35 39 38 33 64 35 |-8d3f-a8a15983d5|
+00000140 62 64 3a 31 2d 31 32 2c 0a 34 31 34 33 32 37 32 |bd:1-12,.4143272|
+00000150 33 2d 32 64 65 35 2d 31 31 65 64 2d 61 61 36 66 |3-2de5-11ed-aa6f|
+00000160 2d 61 38 61 31 35 39 38 33 64 35 62 64 3a 31 2d |-a8a15983d5bd:1-|
+00000170 37 2c 0a 34 39 38 38 38 35 36 66 2d 32 64 65 34 |7,.4988856f-2de4|
+00000180 2d 31 31 65 64 2d 39 37 31 36 2d 61 38 61 31 35 |-11ed-9716-a8a15|
+00000190 39 38 33 64 35 62 64 3a 31 2d 35 2c 0a 35 35 38 |983d5bd:1-5,.558|
+000001a0 36 61 64 34 65 2d 32 64 65 34 2d 31 31 65 64 2d |6ad4e-2de4-11ed-|
+000001b0 38 63 37 33 2d 61 38 61 31 35 39 38 33 64 35 62 |8c73-a8a15983d5b|
+000001c0 64 3a 31 2d 36 2c 0a 36 34 65 39 66 32 32 66 2d |d:1-6,.64e9f22f-|
+000001d0 32 64 65 34 2d 31 31 65 64 2d 39 62 65 31 2d 61 |2de4-11ed-9be1-a|
+000001e0 38 61 31 35 39 38 33 64 35 62 64 3a 31 2d 33 2c |8a15983d5bd:1-3,|
+000001f0 0a 36 62 31 36 34 37 30 65 2d 32 64 65 34 2d 31 |.6b16470e-2de4-1|
+00000200 31 65 64 2d 61 31 33 64 2d 61 38 61 31 35 39 38 |1ed-a13d-a8a1598|
+00000210 33 64 35 62 64 3a 31 2d 34 2c 0a 37 35 65 37 65 |3d5bd:1-4,.75e7e|
+00000220 32 38 65 2d 32 37 61 38 2d 31 31 65 64 2d 39 61 |28e-27a8-11ed-9a|
+00000230 30 36 2d 61 38 61 31 35 39 38 33 64 35 62 64 3a |06-a8a15983d5bd:|
+00000240 31 2d 39 2c 0a 38 31 34 30 32 37 66 31 2d 32 64 |1-9,.814027f1-2d|
+00000250 65 34 2d 31 31 65 64 2d 39 65 33 63 2d 61 38 61 |e4-11ed-9e3c-a8a|
+00000260 31 35 39 38 33 64 35 62 64 3a 31 2d 34 2c 0a 38 |15983d5bd:1-4,.8|
+00000270 37 63 32 38 64 64 63 2d 32 64 65 34 2d 31 31 65 |7c28ddc-2de4-11e|
+00000280 64 2d 38 32 37 32 2d 61 38 61 31 35 39 38 33 64 |d-8272-a8a15983d|
+00000290 35 62 64 3a 31 2d 31 39 2c 0a 39 30 35 38 33 35 |5bd:1-19,.905835|
+000002a0 62 37 2d 32 64 65 35 2d 31 31 65 64 2d 61 32 39 |b7-2de5-11ed-a29|
+000002b0 39 2d 61 38 61 31 35 39 38 33 64 35 62 64 3a 31 |9-a8a15983d5bd:1|
+000002c0 2d 38 2c 0a 39 37 64 66 36 30 63 39 2d 32 64 65 |-8,.97df60c9-2de|
+000002d0 34 2d 31 31 65 64 2d 62 39 30 65 2d 61 38 61 31 |4-11ed-b90e-a8a1|
+000002e0 35 39 38 33 64 35 62 64 3a 31 2d 35 2c 0a 39 37 |5983d5bd:1-5,.97|
+000002f0 65 39 30 63 30 38 2d 32 64 65 35 2d 31 31 65 64 |e90c08-2de5-11ed|
+00000300 2d 39 37 30 39 2d 61 38 61 31 35 39 38 33 64 35 |-9709-a8a15983d5|
+00000310 62 64 3a 31 2d 33 38 2c 0a 39 39 64 66 61 32 62 |bd:1-38,.99dfa2b|
+00000320 64 2d 32 64 65 33 2d 31 31 65 64 2d 62 37 39 65 |d-2de3-11ed-b79e|
+00000330 2d 61 38 61 31 35 39 38 33 64 35 62 64 3a 31 2c |-a8a15983d5bd:1,|
+00000340 0a 61 31 62 63 34 33 34 32 2d 32 64 65 34 2d 31 |.a1bc4342-2de4-1|
+00000350 31 65 64 2d 61 30 62 31 2d 61 38 61 31 35 39 38 |1ed-a0b1-a8a1598|
+00000360 33 64 35 62 64 3a 31 2d 31 36 2c 0a 61 62 65 35 |3d5bd:1-16,.abe5|
+00000370 65 32 61 34 2d 32 64 65 34 2d 31 31 65 64 2d 62 |e2a4-2de4-11ed-b|
+00000380 62 33 63 2d 61 38 61 31 35 39 38 33 64 35 62 64 |b3c-a8a15983d5bd|
+00000390 3a 31 2d 33 2c 0a 62 37 64 39 61 62 39 37 2d 32 |:1-3,.b7d9ab97-2|
+000003a0 64 65 34 2d 31 31 65 64 2d 39 33 39 64 2d 61 38 |de4-11ed-939d-a8|
+000003b0 61 31 35 39 38 33 64 35 62 64 3a 31 2c 0a 62 64 |a15983d5bd:1,.bd|
+000003c0 33 64 30 34 30 30 2d 32 64 65 34 2d 31 31 65 64 |3d0400-2de4-11ed|
+000003d0 2d 38 62 36 61 2d 61 38 61 31 35 39 38 33 64 35 |-8b6a-a8a15983d5|
+000003e0 62 64 3a 31 2d 36 2c 0a 63 36 61 38 37 33 61 63 |bd:1-6,.c6a873ac|
+000003f0 2d 32 64 65 35 2d 31 31 65 64 2d 38 35 30 33 2d |-2de5-11ed-8503-|
+00000400 61 38 61 31 35 39 38 33 64 35 62 64 3a 31 2d 32 |a8a15983d5bd:1-2|
+00000410 31 2c 0a 64 34 37 65 30 36 32 65 2d 32 64 65 35 |1,.d47e062e-2de5|
+00000420 2d 31 31 65 64 2d 38 63 39 62 2d 61 38 61 31 35 |-11ed-8c9b-a8a15|
+00000430 39 38 33 64 35 62 64 3a 31 2d 39 2c 0a 64 65 30 |983d5bd:1-9,.de0|
+00000440 64 63 37 38 30 2d 32 64 65 35 2d 31 31 65 64 2d |dc780-2de5-11ed-|
+00000450 62 31 62 31 2d 61 38 61 31 35 39 38 33 64 35 62 |b1b1-a8a15983d5b|
+00000460 64 3a 31 2d 37 |d:1-7|
+`,
+ dataOut: `
+00000000 00 00 00 03 40 00 00 00 fc 4b 04 03 fc 47 04 00 |....@....K...G..|
+00000010 fc 43 04 30 63 36 63 36 62 34 61 2d 32 64 65 35 |.C.0c6c6b4a-2de5|
+00000020 2d 31 31 65 64 2d 62 63 37 61 2d 61 38 61 31 35 |-11ed-bc7a-a8a15|
+00000030 39 38 33 64 35 62 64 3a 31 2d 34 2c 0a 31 33 65 |983d5bd:1-4,.13e|
+00000040 62 66 38 32 38 2d 32 64 65 35 2d 31 31 65 64 2d |bf828-2de5-11ed-|
+00000050 62 34 65 35 2d 61 38 61 31 35 39 38 33 64 35 62 |b4e5-a8a15983d5b|
+00000060 64 3a 31 2d 39 2c 0a 31 38 61 30 66 30 34 38 2d |d:1-9,.18a0f048-|
+00000070 32 64 65 34 2d 31 31 65 64 2d 38 63 31 63 2d 61 |2de4-11ed-8c1c-a|
+00000080 38 61 31 35 39 38 33 64 35 62 64 3a 31 2d 33 2c |8a15983d5bd:1-3,|
+00000090 0a 31 66 36 34 31 62 36 33 2d 32 64 65 35 2d 31 |.1f641b63-2de5-1|
+000000a0 31 65 64 2d 61 35 31 62 2d 61 38 61 31 35 39 38 |1ed-a51b-a8a1598|
+000000b0 33 64 35 62 64 3a 31 2d 39 2c 0a 32 63 36 35 36 |3d5bd:1-9,.2c656|
+000000c0 35 37 31 2d 32 64 65 35 2d 31 31 65 64 2d 61 34 |571-2de5-11ed-a4|
+000000d0 37 34 2d 61 38 61 31 35 39 38 33 64 35 62 64 3a |74-a8a15983d5bd:|
+000000e0 31 2d 35 2c 0a 33 32 32 61 34 32 35 34 2d 32 64 |1-5,.322a4254-2d|
+000000f0 65 35 2d 31 31 65 64 2d 61 65 64 31 2d 61 38 61 |e5-11ed-aed1-a8a|
+00000100 31 35 39 38 33 64 35 62 64 3a 31 2d 34 2c 0a 33 |15983d5bd:1-4,.3|
+00000110 37 63 35 64 30 34 31 2d 32 64 65 35 2d 31 31 65 |7c5d041-2de5-11e|
+00000120 64 2d 38 64 33 66 2d 61 38 61 31 35 39 38 33 64 |d-8d3f-a8a15983d|
+00000130 35 62 64 3a 31 2d 31 32 2c 0a 34 31 34 33 32 37 |5bd:1-12,.414327|
+00000140 32 33 2d 32 64 65 35 2d 31 31 65 64 2d 61 61 36 |23-2de5-11ed-aa6|
+00000150 66 2d 61 38 61 31 35 39 38 33 64 35 62 64 3a 31 |f-a8a15983d5bd:1|
+00000160 2d 37 2c 0a 34 39 38 38 38 35 36 66 2d 32 64 65 |-7,.4988856f-2de|
+00000170 34 2d 31 31 65 64 2d 39 37 31 36 2d 61 38 61 31 |4-11ed-9716-a8a1|
+00000180 35 39 38 33 64 35 62 64 3a 31 2d 35 2c 0a 35 35 |5983d5bd:1-5,.55|
+00000190 38 36 61 64 34 65 2d 32 64 65 34 2d 31 31 65 64 |86ad4e-2de4-11ed|
+000001a0 2d 38 63 37 33 2d 61 38 61 31 35 39 38 33 64 35 |-8c73-a8a15983d5|
+000001b0 62 64 3a 31 2d 36 2c 0a 36 34 65 39 66 32 32 66 |bd:1-6,.64e9f22f|
+000001c0 2d 32 64 65 34 2d 31 31 65 64 2d 39 62 65 31 2d |-2de4-11ed-9be1-|
+000001d0 61 38 61 31 35 39 38 33 64 35 62 64 3a 31 2d 33 |a8a15983d5bd:1-3|
+000001e0 2c 0a 36 62 31 36 34 37 30 65 2d 32 64 65 34 2d |,.6b16470e-2de4-|
+000001f0 31 31 65 64 2d 61 31 33 64 2d 61 38 61 31 35 39 |11ed-a13d-a8a159|
+00000200 38 33 64 35 62 64 3a 31 2d 34 2c 0a 37 35 65 37 |83d5bd:1-4,.75e7|
+00000210 65 32 38 65 2d 32 37 61 38 2d 31 31 65 64 2d 39 |e28e-27a8-11ed-9|
+00000220 61 30 36 2d 61 38 61 31 35 39 38 33 64 35 62 64 |a06-a8a15983d5bd|
+00000230 3a 31 2d 39 2c 0a 38 31 34 30 32 37 66 31 2d 32 |:1-9,.814027f1-2|
+00000240 64 65 34 2d 31 31 65 64 2d 39 65 33 63 2d 61 38 |de4-11ed-9e3c-a8|
+00000250 61 31 35 39 38 33 64 35 62 64 3a 31 2d 34 2c 0a |a15983d5bd:1-4,.|
+00000260 38 37 63 32 38 64 64 63 2d 32 64 65 34 2d 31 31 |87c28ddc-2de4-11|
+00000270 65 64 2d 38 32 37 32 2d 61 38 61 31 35 39 38 33 |ed-8272-a8a15983|
+00000280 64 35 62 64 3a 31 2d 31 39 2c 0a 39 30 35 38 33 |d5bd:1-19,.90583|
+00000290 35 62 37 2d 32 64 65 35 2d 31 31 65 64 2d 61 32 |5b7-2de5-11ed-a2|
+000002a0 39 39 2d 61 38 61 31 35 39 38 33 64 35 62 64 3a |99-a8a15983d5bd:|
+000002b0 31 2d 38 2c 0a 39 37 64 66 36 30 63 39 2d 32 64 |1-8,.97df60c9-2d|
+000002c0 65 34 2d 31 31 65 64 2d 62 39 30 65 2d 61 38 61 |e4-11ed-b90e-a8a|
+000002d0 31 35 39 38 33 64 35 62 64 3a 31 2d 35 2c 0a 39 |15983d5bd:1-5,.9|
+000002e0 37 65 39 30 63 30 38 2d 32 64 65 35 2d 31 31 65 |7e90c08-2de5-11e|
+000002f0 64 2d 39 37 30 39 2d 61 38 61 31 35 39 38 33 64 |d-9709-a8a15983d|
+00000300 35 62 64 3a 31 2d 33 38 2c 0a 39 39 64 66 61 32 |5bd:1-38,.99dfa2|
+00000310 62 64 2d 32 64 65 33 2d 31 31 65 64 2d 62 37 39 |bd-2de3-11ed-b79|
+00000320 65 2d 61 38 61 31 35 39 38 33 64 35 62 64 3a 31 |e-a8a15983d5bd:1|
+00000330 2c 0a 61 31 62 63 34 33 34 32 2d 32 64 65 34 2d |,.a1bc4342-2de4-|
+00000340 31 31 65 64 2d 61 30 62 31 2d 61 38 61 31 35 39 |11ed-a0b1-a8a159|
+00000350 38 33 64 35 62 64 3a 31 2d 31 36 2c 0a 61 62 65 |83d5bd:1-16,.abe|
+00000360 35 65 32 61 34 2d 32 64 65 34 2d 31 31 65 64 2d |5e2a4-2de4-11ed-|
+00000370 62 62 33 63 2d 61 38 61 31 35 39 38 33 64 35 62 |bb3c-a8a15983d5b|
+00000380 64 3a 31 2d 33 2c 0a 62 37 64 39 61 62 39 37 2d |d:1-3,.b7d9ab97-|
+00000390 32 64 65 34 2d 31 31 65 64 2d 39 33 39 64 2d 61 |2de4-11ed-939d-a|
+000003a0 38 61 31 35 39 38 33 64 35 62 64 3a 31 2c 0a 62 |8a15983d5bd:1,.b|
+000003b0 64 33 64 30 34 30 30 2d 32 64 65 34 2d 31 31 65 |d3d0400-2de4-11e|
+000003c0 64 2d 38 62 36 61 2d 61 38 61 31 35 39 38 33 64 |d-8b6a-a8a15983d|
+000003d0 35 62 64 3a 31 2d 36 2c 0a 63 36 61 38 37 33 61 |5bd:1-6,.c6a873a|
+000003e0 63 2d 32 64 65 35 2d 31 31 65 64 2d 38 35 30 33 |c-2de5-11ed-8503|
+000003f0 2d 61 38 61 31 35 39 38 33 64 35 62 64 3a 31 2d |-a8a15983d5bd:1-|
+00000400 32 31 2c 0a 64 34 37 65 30 36 32 65 2d 32 64 65 |21,.d47e062e-2de|
+00000410 35 2d 31 31 65 64 2d 38 63 39 62 2d 61 38 61 31 |5-11ed-8c9b-a8a1|
+00000420 35 39 38 33 64 35 62 64 3a 31 2d 39 2c 0a 64 65 |5983d5bd:1-9,.de|
+00000430 30 64 63 37 38 30 2d 32 64 65 35 2d 31 31 65 64 |0dc780-2de5-11ed|
+00000440 2d 62 31 62 31 2d 61 38 61 31 35 39 38 33 64 35 |-b1b1-a8a15983d5|
00000450 62 64 3a 31 2d 37 |bd:1-7|`,
cc: CapabilityClientProtocol41 | CapabilityClientTransactions | CapabilityClientSessionTrack,
}}
@@ -601,9 +745,8 @@ func TestEOFOrLengthEncodedIntFuzz(t *testing.T) {
for i := 0; i < 100; i++ {
bytes := make([]byte, rand.Intn(16)+1)
_, err := crypto_rand.Read(bytes)
- if err != nil {
- t.Fatalf("error doing rand.Read")
- }
+ require.NoError(t, err, "error doing rand.Read")
+
bytes[0] = 0xfe
_, _, isInt := readLenEncInt(bytes, 0)
@@ -1018,7 +1161,15 @@ func (t testRun) ComStmtExecute(c *Conn, prepare *PrepareData, callback func(*sq
panic("implement me")
}
-func (t testRun) ComBinlogDumpGTID(c *Conn, gtidSet GTIDSet) error {
+func (t testRun) ComRegisterReplica(c *Conn, replicaHost string, replicaPort uint16, replicaUser string, replicaPassword string) error {
+ panic("implement me")
+}
+
+func (t testRun) ComBinlogDump(c *Conn, logFile string, binlogPos uint32) error {
+ panic("implement me")
+}
+
+func (t testRun) ComBinlogDumpGTID(c *Conn, logFile string, logPos uint64, gtidSet GTIDSet) error {
panic("implement me")
}
@@ -1029,6 +1180,15 @@ func (t testRun) ComQuery(c *Conn, query string, callback func(*sqltypes.Result)
if strings.Contains(query, "panic") {
panic("test panic attack!")
}
+ if strings.Contains(query, "close before rows read") {
+ c.writeFields(selectRowsResult)
+ // We want to close the connection after the fields are written
+ // and read on the client. So we sleep for 100 milliseconds
+ time.Sleep(100 * time.Millisecond)
+ c.Close()
+ return nil
+ }
+
if strings.Contains(query, "twice") {
callback(selectRowsResult)
}
diff --git a/go/mysql/conn_params.go b/go/mysql/conn_params.go
index e8846d96ead..061aa23f220 100644
--- a/go/mysql/conn_params.go
+++ b/go/mysql/conn_params.go
@@ -47,10 +47,6 @@ type ConnParams struct {
ServerName string `json:"server_name"`
ConnectTimeoutMs uint64 `json:"connect_timeout_ms"`
- // The following is only set when the deprecated "dbname" flags are
- // supplied and will be removed.
- DeprecatedDBName string
-
// The following is only set to force the client to connect without
// using CapabilityClientDeprecateEOF
DisableClientDeprecateEOF bool
diff --git a/go/mysql/constants.go b/go/mysql/constants.go
index adc6aeacbd3..ad5f4c0221e 100644
--- a/go/mysql/constants.go
+++ b/go/mysql/constants.go
@@ -243,6 +243,10 @@ const (
// ComBinlogDumpGTID is COM_BINLOG_DUMP_GTID.
ComBinlogDumpGTID = 0x1e
+ // ComRegisterReplica is COM_REGISTER_SLAVE
+ // https://dev.mysql.com/doc/internals/en/com-register-slave.html
+ ComRegisterReplica = 0x15
+
// OKPacket is the header of the OK packet.
OKPacket = 0x00
@@ -273,7 +277,7 @@ const (
// Error codes for client-side errors.
// Originally found in include/mysql/errmsg.h and
-// https://dev.mysql.com/doc/refman/5.7/en/error-messages-client.html
+// https://dev.mysql.com/doc/mysql-errors/en/client-error-reference.html
const (
// CRUnknownError is CR_UNKNOWN_ERROR
CRUnknownError = 2000
@@ -286,6 +290,10 @@ const (
// This is returned if a connection via a TCP socket fails.
CRConnHostError = 2003
+ // CRUnknownHost is CR_UNKNOWN_HOST
+ // This is returned if the host name cannot be resolved.
+ CRUnknownHost = 2005
+
// CRServerGone is CR_SERVER_GONE_ERROR.
// This is returned if the client tries to send a command but it fails.
CRServerGone = 2006
@@ -325,7 +333,7 @@ const (
// Error codes for server-side errors.
// Originally found in include/mysql/mysqld_error.h and
-// https://dev.mysql.com/doc/refman/5.7/en/error-messages-server.html
+// https://dev.mysql.com/doc/mysql-errors/en/server-error-reference.html
// The below are in sorted order by value, grouped by vterror code they should be bucketed into.
// See above reference for more information on each code.
const (
@@ -364,6 +372,7 @@ const (
ERServerShutdown = 1053
// not found
+ ERDbDropExists = 1008
ERCantFindFile = 1017
ERFormNotFound = 1029
ERKeyNotFound = 1032
@@ -375,7 +384,6 @@ const (
ERNoSuchTable = 1146
ERNonExistingTableGrant = 1147
ERKeyDoesNotExist = 1176
- ERDbDropExists = 1008
// permissions
ERDBAccessDenied = 1044
@@ -403,6 +411,7 @@ const (
ERRowIsReferenced = 1217
ERCantUpdateWithReadLock = 1223
ERNoDefault = 1230
+ ERMasterFatalReadingBinlog = 1236
EROperandColumns = 1241
ERSubqueryNo1Row = 1242
ERWarnDataOutOfRange = 1264
@@ -411,19 +420,19 @@ const (
EROptionPreventsStatement = 1290
ERDuplicatedValueInType = 1291
ERSPDoesNotExist = 1305
+ ERNoDefaultForField = 1364
+ ErSPNotVarArg = 1414
ERRowIsReferenced2 = 1451
ErNoReferencedRow2 = 1452
- ErSPNotVarArg = 1414
+ ERDupIndex = 1831
ERInnodbReadOnly = 1874
- ERMasterFatalReadingBinlog = 1236
- ERNoDefaultForField = 1364
// already exists
+ ERDbCreateExists = 1007
ERTableExists = 1050
ERDupEntry = 1062
ERFileExists = 1086
ERUDFExists = 1125
- ERDbCreateExists = 1007
// aborted
ERGotSignal = 1078
@@ -509,7 +518,11 @@ const (
ERWrongFKDef = 1239
ERKeyRefDoNotMatchTableRef = 1240
ERCyclicReference = 1245
+ ERIllegalReference = 1247
+ ERDerivedMustHaveAlias = 1248
+ ERTableNameNotAllowedHere = 1250
ERCollationCharsetMismatch = 1253
+ ERWarnDataTruncated = 1265
ERCantAggregate2Collations = 1267
ERCantAggregate3Collations = 1270
ERCantAggregateNCollations = 1271
@@ -523,16 +536,14 @@ const (
ERInvalidOnUpdate = 1294
ERUnknownTimeZone = 1298
ERInvalidCharacterString = 1300
- ERIllegalReference = 1247
- ERDerivedMustHaveAlias = 1248
- ERTableNameNotAllowedHere = 1250
ERQueryInterrupted = 1317
ERTruncatedWrongValueForField = 1366
ERIllegalValueForType = 1367
ERDataTooLong = 1406
ErrWrongValueForType = 1411
- ERWarnDataTruncated = 1265
+ ERNoSuchUser = 1449
ERForbidSchemaChange = 1450
+ ERWrongValue = 1525
ERDataOutOfRange = 1690
ERInvalidJSONText = 3140
ERInvalidJSONTextInParams = 3141
@@ -541,7 +552,9 @@ const (
ERInvalidCastToJSON = 3147
ERJSONValueTooBig = 3150
ERJSONDocumentTooDeep = 3157
- ERWrongValue = 1525
+
+ // max execution time exceeded
+ ERQueryTimeout = 3024
ErrCantCreateGeometryObject = 1416
ErrGISDataWrongEndianess = 3055
@@ -677,8 +690,12 @@ func IsEphemeralError(err error) bool {
CRConnHostError,
CRMalformedPacket,
CRNamedPipeStateError,
+ CRServerHandshakeErr,
+ CRServerGone,
CRServerLost,
CRSSLConnectionError,
+ CRUnknownError,
+ CRUnknownHost,
ERCantCreateThread,
ERDiskFull,
ERForcingClose,
@@ -689,6 +706,7 @@ func IsEphemeralError(err error) bool {
ERInternalError,
ERLockDeadlock,
ERLockWaitTimeout,
+ ERQueryTimeout,
EROutOfMemory,
EROutOfResources,
EROutOfSortMemory,
diff --git a/go/mysql/constants_test.go b/go/mysql/constants_test.go
index a3da0bf2fc8..34d8c09ca54 100644
--- a/go/mysql/constants_test.go
+++ b/go/mysql/constants_test.go
@@ -19,6 +19,8 @@ package mysql
import (
"errors"
"testing"
+
+ "github.com/stretchr/testify/assert"
)
func TestIsConnErr(t *testing.T) {
@@ -46,9 +48,8 @@ func TestIsConnErr(t *testing.T) {
}}
for _, tcase := range testcases {
got := IsConnErr(tcase.in)
- if got != tcase.want {
- t.Errorf("IsConnErr(%#v): %v, want %v", tcase.in, got, tcase.want)
- }
+ assert.Equal(t, tcase.want, got, "IsConnErr(%#v): %v, want %v", tcase.in, got, tcase.want)
+
}
}
@@ -77,8 +78,7 @@ func TestIsConnLostDuringQuery(t *testing.T) {
}}
for _, tcase := range testcases {
got := IsConnLostDuringQuery(tcase.in)
- if got != tcase.want {
- t.Errorf("IsConnLostDuringQuery(%#v): %v, want %v", tcase.in, got, tcase.want)
- }
+ assert.Equal(t, tcase.want, got, "IsConnLostDuringQuery(%#v): %v, want %v", tcase.in, got, tcase.want)
+
}
}
diff --git a/go/mysql/encoding.go b/go/mysql/encoding.go
index 38cf932634b..9ebf301d95b 100644
--- a/go/mysql/encoding.go
+++ b/go/mysql/encoding.go
@@ -188,6 +188,11 @@ func readEOFString(data []byte, pos int) (string, int, bool) {
return string(data[pos:]), len(data) - pos, true
}
+func readUint8(data []byte, pos int) (uint8, int, bool) {
+ b, pos, ok := readByte(data, pos)
+ return uint8(b), pos, ok
+}
+
func readUint16(data []byte, pos int) (uint16, int, bool) {
if pos+1 >= len(data) {
return 0, 0, false
diff --git a/go/mysql/encoding_test.go b/go/mysql/encoding_test.go
index ac38b5c7db0..c0081a6455b 100644
--- a/go/mysql/encoding_test.go
+++ b/go/mysql/encoding_test.go
@@ -19,6 +19,8 @@ package mysql
import (
"bytes"
"testing"
+
+ "github.com/stretchr/testify/assert"
)
func TestEncLenInt(t *testing.T) {
@@ -52,22 +54,14 @@ func TestEncLenInt(t *testing.T) {
// Check successful encoding.
data := make([]byte, len(test.encoded))
pos := writeLenEncInt(data, 0, test.value)
- if pos != len(test.encoded) {
- t.Errorf("unexpected pos %v after writeLenEncInt(%x), expected %v", pos, test.value, len(test.encoded))
- }
- if !bytes.Equal(data, test.encoded) {
- t.Errorf("unexpected encoded value for %x, got %v expected %v", test.value, data, test.encoded)
- }
+ assert.Equal(t, len(test.encoded), pos, "unexpected pos %v after writeLenEncInt(%x), expected %v", pos, test.value, len(test.encoded))
+ assert.True(t, bytes.Equal(data, test.encoded), "unexpected encoded value for %x, got %v expected %v", test.value, data, test.encoded)
// Check successful encoding with offset.
data = make([]byte, len(test.encoded)+1)
pos = writeLenEncInt(data, 1, test.value)
- if pos != len(test.encoded)+1 {
- t.Errorf("unexpected pos %v after writeLenEncInt(%x, 1), expected %v", pos, test.value, len(test.encoded)+1)
- }
- if !bytes.Equal(data[1:], test.encoded) {
- t.Errorf("unexpected encoded value for %x, got %v expected %v", test.value, data, test.encoded)
- }
+ assert.Equal(t, len(test.encoded)+1, pos, "unexpected pos %v after writeLenEncInt(%x, 1), expected %v", pos, test.value, len(test.encoded)+1)
+ assert.True(t, bytes.Equal(data[1:], test.encoded), "unexpected encoded value for %x, got %v expected %v", test.value, data, test.encoded)
// Check successful decoding.
got, pos, ok := readLenEncInt(test.encoded, 0)
@@ -77,9 +71,8 @@ func TestEncLenInt(t *testing.T) {
// Check failed decoding.
_, _, ok = readLenEncInt(test.encoded[:len(test.encoded)-1], 0)
- if ok {
- t.Errorf("readLenEncInt returned ok=true for shorter value %x", test.value)
- }
+ assert.False(t, ok, "readLenEncInt returned ok=true for shorter value %x", test.value)
+
}
}
@@ -102,9 +95,8 @@ func TestEncUint16(t *testing.T) {
}
_, _, ok = readUint16(data, 9)
- if ok {
- t.Errorf("readUint16 returned ok=true for shorter value")
- }
+ assert.False(t, ok, "readUint16 returned ok=true for shorter value")
+
}
func TestEncBytes(t *testing.T) {
@@ -120,9 +112,7 @@ func TestEncBytes(t *testing.T) {
}
_, _, ok = readByte(data, 10)
- if ok {
- t.Errorf("readByte returned ok=true for shorter value")
- }
+ assert.False(t, ok, "readByte returned ok=true for shorter value")
b, pos, ok := readBytes(data, 5, 2)
expected := []byte{0xab, 0x00}
@@ -131,9 +121,8 @@ func TestEncBytes(t *testing.T) {
}
_, _, ok = readBytes(data, 9, 2)
- if ok {
- t.Errorf("readBytes returned ok=true for shorter value")
- }
+ assert.False(t, ok, "readBytes returned ok=true for shorter value")
+
}
func TestEncUint32(t *testing.T) {
@@ -155,9 +144,8 @@ func TestEncUint32(t *testing.T) {
}
_, _, ok = readUint32(data, 7)
- if ok {
- t.Errorf("readUint32 returned ok=true for shorter value")
- }
+ assert.False(t, ok, "readUint32 returned ok=true for shorter value")
+
}
func TestEncUint64(t *testing.T) {
@@ -180,9 +168,8 @@ func TestEncUint64(t *testing.T) {
}
_, _, ok = readUint64(data, 7)
- if ok {
- t.Errorf("readUint64 returned ok=true for shorter value")
- }
+ assert.False(t, ok, "readUint64 returned ok=true for shorter value")
+
}
func TestEncString(t *testing.T) {
@@ -232,22 +219,14 @@ func TestEncString(t *testing.T) {
// Check successful encoding.
data := make([]byte, len(test.lenEncoded))
pos := writeLenEncString(data, 0, test.value)
- if pos != len(test.lenEncoded) {
- t.Errorf("unexpected pos %v after writeLenEncString(%v), expected %v", pos, test.value, len(test.lenEncoded))
- }
- if !bytes.Equal(data, test.lenEncoded) {
- t.Errorf("unexpected lenEncoded value for %v, got %v expected %v", test.value, data, test.lenEncoded)
- }
+ assert.Equal(t, len(test.lenEncoded), pos, "unexpected pos %v after writeLenEncString(%v), expected %v", pos, test.value, len(test.lenEncoded))
+ assert.True(t, bytes.Equal(data, test.lenEncoded), "unexpected lenEncoded value for %v, got %v expected %v", test.value, data, test.lenEncoded)
// Check successful encoding with offset.
data = make([]byte, len(test.lenEncoded)+1)
pos = writeLenEncString(data, 1, test.value)
- if pos != len(test.lenEncoded)+1 {
- t.Errorf("unexpected pos %v after writeLenEncString(%v, 1), expected %v", pos, test.value, len(test.lenEncoded)+1)
- }
- if !bytes.Equal(data[1:], test.lenEncoded) {
- t.Errorf("unexpected lenEncoded value for %v, got %v expected %v", test.value, data[1:], test.lenEncoded)
- }
+ assert.Equal(t, len(test.lenEncoded)+1, pos, "unexpected pos %v after writeLenEncString(%v, 1), expected %v", pos, test.value, len(test.lenEncoded)+1)
+ assert.True(t, bytes.Equal(data[1:], test.lenEncoded), "unexpected lenEncoded value for %v, got %v expected %v", test.value, data[1:], test.lenEncoded)
// Check successful decoding as string.
got, pos, ok := readLenEncString(test.lenEncoded, 0)
@@ -257,15 +236,11 @@ func TestEncString(t *testing.T) {
// Check failed decoding with shorter data.
_, _, ok = readLenEncString(test.lenEncoded[:len(test.lenEncoded)-1], 0)
- if ok {
- t.Errorf("readLenEncString returned ok=true for shorter value %v", test.value)
- }
+ assert.False(t, ok, "readLenEncString returned ok=true for shorter value %v", test.value)
// Check failed decoding with no data.
_, _, ok = readLenEncString([]byte{}, 0)
- if ok {
- t.Errorf("readLenEncString returned ok=true for empty value %v", test.value)
- }
+ assert.False(t, ok, "readLenEncString returned ok=true for empty value %v", test.value)
// Check successful skipping as string.
pos, ok = skipLenEncString(test.lenEncoded, 0)
@@ -275,15 +250,11 @@ func TestEncString(t *testing.T) {
// Check failed skipping with shorter data.
_, ok = skipLenEncString(test.lenEncoded[:len(test.lenEncoded)-1], 0)
- if ok {
- t.Errorf("skipLenEncString returned ok=true for shorter value %v", test.value)
- }
+ assert.False(t, ok, "skipLenEncString returned ok=true for shorter value %v", test.value)
// Check failed skipping with no data.
_, ok = skipLenEncString([]byte{}, 0)
- if ok {
- t.Errorf("skipLenEncString returned ok=true for empty value %v", test.value)
- }
+ assert.False(t, ok, "skipLenEncString returned ok=true for empty value %v", test.value)
// Check successful decoding as bytes.
gotb, pos, ok := readLenEncStringAsBytes(test.lenEncoded, 0)
@@ -293,15 +264,11 @@ func TestEncString(t *testing.T) {
// Check failed decoding as bytes with shorter data.
_, _, ok = readLenEncStringAsBytes(test.lenEncoded[:len(test.lenEncoded)-1], 0)
- if ok {
- t.Errorf("readLenEncStringAsBytes returned ok=true for shorter value %v", test.value)
- }
+ assert.False(t, ok, "readLenEncStringAsBytes returned ok=true for shorter value %v", test.value)
// Check failed decoding as bytes with no data.
_, _, ok = readLenEncStringAsBytes([]byte{}, 0)
- if ok {
- t.Errorf("readLenEncStringAsBytes returned ok=true for empty value %v", test.value)
- }
+ assert.False(t, ok, "readLenEncStringAsBytes returned ok=true for empty value %v", test.value)
// Check successful decoding as bytes.
gotbcopy, posCopy, ok := readLenEncStringAsBytesCopy(test.lenEncoded, 0)
@@ -311,27 +278,19 @@ func TestEncString(t *testing.T) {
// Check failed decoding as bytes with shorter data.
_, _, ok = readLenEncStringAsBytesCopy(test.lenEncoded[:len(test.lenEncoded)-1], 0)
- if ok {
- t.Errorf("readLenEncStringAsBytes returned ok=true for shorter value %v", test.value)
- }
+ assert.False(t, ok, "readLenEncStringAsBytes returned ok=true for shorter value %v", test.value)
// Check failed decoding as bytes with no data.
_, _, ok = readLenEncStringAsBytesCopy([]byte{}, 0)
- if ok {
- t.Errorf("readLenEncStringAsBytes returned ok=true for empty value %v", test.value)
- }
+ assert.False(t, ok, "readLenEncStringAsBytes returned ok=true for empty value %v", test.value)
// null encoded tests.
// Check successful encoding.
data = make([]byte, len(test.nullEncoded))
pos = writeNullString(data, 0, test.value)
- if pos != len(test.nullEncoded) {
- t.Errorf("unexpected pos %v after writeNullString(%v), expected %v", pos, test.value, len(test.nullEncoded))
- }
- if !bytes.Equal(data, test.nullEncoded) {
- t.Errorf("unexpected nullEncoded value for %v, got %v expected %v", test.value, data, test.nullEncoded)
- }
+ assert.Equal(t, len(test.nullEncoded), pos, "unexpected pos %v after writeNullString(%v), expected %v", pos, test.value, len(test.nullEncoded))
+ assert.True(t, bytes.Equal(data, test.nullEncoded), "unexpected nullEncoded value for %v, got %v expected %v", test.value, data, test.nullEncoded)
// Check successful decoding.
got, pos, ok = readNullString(test.nullEncoded, 0)
@@ -341,21 +300,15 @@ func TestEncString(t *testing.T) {
// Check failed decoding with shorter data.
_, _, ok = readNullString(test.nullEncoded[:len(test.nullEncoded)-1], 0)
- if ok {
- t.Errorf("readNullString returned ok=true for shorter value %v", test.value)
- }
+ assert.False(t, ok, "readNullString returned ok=true for shorter value %v", test.value)
// EOF encoded tests.
// Check successful encoding.
data = make([]byte, len(test.eofEncoded))
pos = writeEOFString(data, 0, test.value)
- if pos != len(test.eofEncoded) {
- t.Errorf("unexpected pos %v after writeEOFString(%v), expected %v", pos, test.value, len(test.eofEncoded))
- }
- if !bytes.Equal(data, test.eofEncoded[:len(test.eofEncoded)]) {
- t.Errorf("unexpected eofEncoded value for %v, got %v expected %v", test.value, data, test.eofEncoded)
- }
+ assert.Equal(t, len(test.eofEncoded), pos, "unexpected pos %v after writeEOFString(%v), expected %v", pos, test.value, len(test.eofEncoded))
+ assert.True(t, bytes.Equal(data, test.eofEncoded[:len(test.eofEncoded)]), "unexpected eofEncoded value for %v, got %v expected %v", test.value, data, test.eofEncoded)
// Check successful decoding.
got, pos, ok = readEOFString(test.eofEncoded, 0)
diff --git a/go/mysql/endtoend/client_test.go b/go/mysql/endtoend/client_test.go
index 051fbc1e5c1..a48c9629d51 100644
--- a/go/mysql/endtoend/client_test.go
+++ b/go/mysql/endtoend/client_test.go
@@ -243,10 +243,8 @@ func expectNoError(t *testing.T, err error) {
func expectFlag(t *testing.T, msg string, flag, want bool) {
t.Helper()
- if flag != want {
- // We cannot continue the test if flag is incorrect.
- t.Fatalf("%s: %v, want: %v", msg, flag, want)
- }
+ require.Equal(t, want, flag, "%s: %v, want: %v", msg, flag, want)
+
}
// TestTLS tests our client can connect via SSL.
@@ -256,12 +254,8 @@ func TestTLS(t *testing.T) {
// First make sure the official 'mysql' client can connect.
output, ok := runMysql(t, ¶ms, "status")
- if !ok {
- t.Fatalf("'mysql -e status' failed: %v", output)
- }
- if !strings.Contains(output, "Cipher in use is") {
- t.Fatalf("cannot connect via SSL: %v", output)
- }
+ require.True(t, ok, "'mysql -e status' failed: %v", output)
+ require.True(t, strings.Contains(output, "Cipher in use is"), "cannot connect via SSL: %v", output)
// Now connect with our client.
ctx := context.Background()
@@ -272,9 +266,8 @@ func TestTLS(t *testing.T) {
defer conn.Close()
result, err := conn.ExecuteFetch("SHOW STATUS LIKE 'Ssl_cipher'", 10, true)
- if err != nil {
- t.Fatalf("SHOW STATUS LIKE 'Ssl_cipher' failed: %v", err)
- }
+ require.NoError(t, err, "SHOW STATUS LIKE 'Ssl_cipher' failed: %v", err)
+
if len(result.Rows) != 1 || result.Rows[0][0].ToString() != "Ssl_cipher" ||
result.Rows[0][1].ToString() == "" {
t.Fatalf("SHOW STATUS LIKE 'Ssl_cipher' returned unexpected result: %v", result)
@@ -291,9 +284,8 @@ func TestReplicationStatus(t *testing.T) {
defer conn.Close()
status, err := conn.ShowReplicationStatus()
- if err != mysql.ErrNotReplica {
- t.Errorf("Got unexpected result for ShowReplicationStatus: %v %v", status, err)
- }
+ assert.Equal(t, mysql.ErrNotReplica, err, "Got unexpected result for ShowReplicationStatus: %v %v", status, err)
+
}
func TestSessionTrackGTIDs(t *testing.T) {
@@ -326,9 +318,7 @@ func TestCachingSha2Password(t *testing.T) {
defer conn.Close()
qr, err := conn.ExecuteFetch(`select true from information_schema.PLUGINS where PLUGIN_NAME='caching_sha2_password' and PLUGIN_STATUS='ACTIVE'`, 1, false)
- if err != nil {
- t.Errorf("select true from information_schema.PLUGINS failed: %v", err)
- }
+ assert.NoError(t, err, "select true from information_schema.PLUGINS failed: %v", err)
if len(qr.Rows) != 1 {
t.Skip("Server does not support caching_sha2_password plugin")
@@ -370,10 +360,35 @@ func TestClientInfo(t *testing.T) {
// This is the simplest query that would return some textual data in the 'info' field
result, err := conn.ExecuteFetch(`PREPARE stmt1 FROM 'SELECT 1 = 1'`, -1, true)
- if err != nil {
- t.Fatalf("select failed: %v", err)
- }
- if result.Info != infoPrepared {
- t.Fatalf("expected result.Info=%q, got=%q", infoPrepared, result.Info)
- }
+ require.NoError(t, err, "select failed: %v", err)
+ require.Equal(t, infoPrepared, result.Info, "expected result.Info=%q, got=%q", infoPrepared, result.Info)
+}
+
+func TestBaseShowTables(t *testing.T) {
+ params := connParams
+ ctx := context.Background()
+ conn, err := mysql.Connect(ctx, ¶ms)
+ require.NoError(t, err)
+ defer conn.Close()
+
+ sql := conn.BaseShowTables()
+ // An improved test would make assertions about the results. This test just
+ // makes sure there aren't any errors.
+ _, err = conn.ExecuteFetch(sql, -1, true)
+ require.NoError(t, err)
+}
+
+func TestBaseShowTablesFilePos(t *testing.T) {
+ params := connParams
+ params.Flavor = "FilePos"
+ ctx := context.Background()
+ conn, err := mysql.Connect(ctx, ¶ms)
+ require.NoError(t, err)
+ defer conn.Close()
+
+ sql := conn.BaseShowTables()
+ // An improved test would make assertions about the results. This test just
+ // makes sure there aren't any errors.
+ _, err = conn.ExecuteFetch(sql, -1, true)
+ require.NoError(t, err)
}
diff --git a/go/mysql/endtoend/main_test.go b/go/mysql/endtoend/main_test.go
index a18e278a26f..b72b466e4a1 100644
--- a/go/mysql/endtoend/main_test.go
+++ b/go/mysql/endtoend/main_test.go
@@ -25,6 +25,8 @@ import (
"strings"
"testing"
+ "github.com/stretchr/testify/require"
+
"vitess.io/vitess/go/mysql"
vtenv "vitess.io/vitess/go/vt/env"
"vitess.io/vitess/go/vt/mysqlctl"
@@ -41,38 +43,25 @@ var (
// assertSQLError makes sure we get the right error.
func assertSQLError(t *testing.T, err error, code int, sqlState string, subtext string, query string) {
t.Helper()
+ require.Error(t, err, "was expecting SQLError %v / %v / %v but got no error.", code, sqlState, subtext)
- if err == nil {
- t.Fatalf("was expecting SQLError %v / %v / %v but got no error.", code, sqlState, subtext)
- }
serr, ok := err.(*mysql.SQLError)
- if !ok {
- t.Fatalf("was expecting SQLError %v / %v / %v but got: %v", code, sqlState, subtext, err)
- }
- if serr.Num != code {
- t.Fatalf("was expecting SQLError %v / %v / %v but got code %v", code, sqlState, subtext, serr.Num)
- }
- if serr.State != sqlState {
- t.Fatalf("was expecting SQLError %v / %v / %v but got state %v", code, sqlState, subtext, serr.State)
- }
- if subtext != "" && !strings.Contains(serr.Message, subtext) {
- t.Fatalf("was expecting SQLError %v / %v / %v but got message %v", code, sqlState, subtext, serr.Message)
- }
- if serr.Query != query {
- t.Fatalf("was expecting SQLError %v / %v / %v with Query '%v' but got query '%v'", code, sqlState, subtext, query, serr.Query)
- }
+ require.True(t, ok, "was expecting SQLError %v / %v / %v but got: %v", code, sqlState, subtext, err)
+ require.Equal(t, code, serr.Num, "was expecting SQLError %v / %v / %v but got code %v", code, sqlState, subtext, serr.Num)
+ require.Equal(t, sqlState, serr.State, "was expecting SQLError %v / %v / %v but got state %v", code, sqlState, subtext, serr.State)
+ require.True(t, subtext == "" || strings.Contains(serr.Message, subtext), "was expecting SQLError %v / %v / %v but got message %v", code, sqlState, subtext, serr.Message)
+ require.Equal(t, query, serr.Query, "was expecting SQLError %v / %v / %v with Query '%v' but got query '%v'", code, sqlState, subtext, query, serr.Query)
+
}
// runMysql forks a mysql command line process connecting to the provided server.
func runMysql(t *testing.T, params *mysql.ConnParams, command string) (string, bool) {
dir, err := vtenv.VtMysqlRoot()
- if err != nil {
- t.Fatalf("vtenv.VtMysqlRoot failed: %v", err)
- }
+ require.NoError(t, err, "vtenv.VtMysqlRoot failed: %v", err)
+
name, err := binaryPath(dir, "mysql")
- if err != nil {
- t.Fatalf("binaryPath failed: %v", err)
- }
+ require.NoError(t, err, "binaryPath failed: %v", err)
+
// The args contain '-v' 3 times, to switch to very verbose output.
// In particular, it has the message:
// Query OK, 1 row affected (0.00 sec)
diff --git a/go/mysql/endtoend/query_test.go b/go/mysql/endtoend/query_test.go
index 839c989c054..473a51f88f8 100644
--- a/go/mysql/endtoend/query_test.go
+++ b/go/mysql/endtoend/query_test.go
@@ -61,27 +61,21 @@ func TestQueries(t *testing.T) {
// Try a simple DDL.
result, err := conn.ExecuteFetch("create table a(id int, name varchar(128), primary key(id))", 0, false)
- if err != nil {
- t.Fatalf("create table failed: %v", err)
- }
- if result.RowsAffected != 0 {
- t.Errorf("create table returned RowsAffected %v, was expecting 0", result.RowsAffected)
- }
+ require.NoError(t, err, "create table failed: %v", err)
+ assert.Equal(t, uint64(0), result.RowsAffected, "create table returned RowsAffected %v, was expecting 0", result.RowsAffected)
// Try a simple insert.
result, err = conn.ExecuteFetch("insert into a(id, name) values(10, 'nice name')", 1000, true)
- if err != nil {
- t.Fatalf("insert failed: %v", err)
- }
+ require.NoError(t, err, "insert failed: %v", err)
+
if result.RowsAffected != 1 || len(result.Rows) != 0 {
t.Errorf("unexpected result for insert: %v", result)
}
// And re-read what we inserted.
result, err = conn.ExecuteFetch("select * from a", 1000, true)
- if err != nil {
- t.Fatalf("insert failed: %v", err)
- }
+ require.NoError(t, err, "insert failed: %v", err)
+
collID := getDefaultCollationID()
expectedResult := &sqltypes.Result{
Fields: []*querypb.Field{
@@ -120,20 +114,16 @@ func TestQueries(t *testing.T) {
if !result.Equal(expectedResult) {
// MySQL 5.7 is adding the NO_DEFAULT_VALUE_FLAG to Flags.
expectedResult.Fields[0].Flags |= uint32(querypb.MySqlFlag_NO_DEFAULT_VALUE_FLAG)
- if !result.Equal(expectedResult) {
- t.Errorf("unexpected result for select, got:\n%v\nexpected:\n%v\n", result, expectedResult)
- }
+ assert.True(t, result.Equal(expectedResult), "unexpected result for select, got:\n%v\nexpected:\n%v\n", result, expectedResult)
+
}
// Insert a few rows.
for i := 0; i < 100; i++ {
result, err := conn.ExecuteFetch(fmt.Sprintf("insert into a(id, name) values(%v, 'nice name %v')", 1000+i, i), 1000, true)
- if err != nil {
- t.Fatalf("ExecuteFetch(%v) failed: %v", i, err)
- }
- if result.RowsAffected != 1 {
- t.Errorf("insert into returned RowsAffected %v, was expecting 1", result.RowsAffected)
- }
+ require.NoError(t, err, "ExecuteFetch(%v) failed: %v", i, err)
+ assert.Equal(t, uint64(1), result.RowsAffected, "insert into returned RowsAffected %v, was expecting 1", result.RowsAffected)
+
}
// And use a streaming query to read them back.
@@ -143,12 +133,9 @@ func TestQueries(t *testing.T) {
// And drop the table.
result, err = conn.ExecuteFetch("drop table a", 0, false)
- if err != nil {
- t.Fatalf("drop table failed: %v", err)
- }
- if result.RowsAffected != 0 {
- t.Errorf("insert into returned RowsAffected %v, was expecting 0", result.RowsAffected)
- }
+ require.NoError(t, err, "drop table failed: %v", err)
+ assert.Equal(t, uint64(0), result.RowsAffected, "insert into returned RowsAffected %v, was expecting 0", result.RowsAffected)
+
}
func TestLargeQueries(t *testing.T) {
@@ -172,16 +159,14 @@ func TestLargeQueries(t *testing.T) {
expectedString := randString((i+1)*mysql.MaxPacketSize + j)
result, err := conn.ExecuteFetch(fmt.Sprintf("select \"%s\"", expectedString), -1, true)
- if err != nil {
- t.Fatalf("ExecuteFetch failed: %v", err)
- }
+ require.NoError(t, err, "ExecuteFetch failed: %v", err)
+
if len(result.Rows) != 1 || len(result.Rows[0]) != 1 || result.Rows[0][0].IsNull() {
t.Fatalf("ExecuteFetch on large query returned poorly-formed result. " +
"Expected single row single column string.")
}
- if result.Rows[0][0].ToString() != expectedString {
- t.Fatalf("Result row was incorrect. Suppressing large string")
- }
+ require.Equal(t, expectedString, result.Rows[0][0].ToString(), "Result row was incorrect. Suppressing large string")
+
}
}
}
@@ -221,36 +206,31 @@ func readRowsUsingStream(t *testing.T, conn *mysql.Conn, expectedCount int) {
},
}
fields, err := conn.Fields()
- if err != nil {
- t.Fatalf("Fields failed: %v", err)
- }
+ require.NoError(t, err, "Fields failed: %v", err)
+
if !sqltypes.FieldsEqual(fields, expectedFields) {
// MySQL 5.7 is adding the NO_DEFAULT_VALUE_FLAG to Flags.
expectedFields[0].Flags |= uint32(querypb.MySqlFlag_NO_DEFAULT_VALUE_FLAG)
- if !sqltypes.FieldsEqual(fields, expectedFields) {
- t.Fatalf("fields are not right, got:\n%v\nexpected:\n%v", fields, expectedFields)
- }
+ require.True(t, sqltypes.FieldsEqual(fields, expectedFields), "fields are not right, got:\n%v\nexpected:\n%v", fields, expectedFields)
+
}
// Read the rows.
count := 0
for {
row, err := conn.FetchNext(nil)
- if err != nil {
- t.Fatalf("FetchNext failed: %v", err)
- }
+ require.NoError(t, err, "FetchNext failed: %v", err)
+
if row == nil {
// We're done.
break
}
- if len(row) != 2 {
- t.Fatalf("Unexpected row found: %v", row)
- }
+ require.Equal(t, 2, len(row), "Unexpected row found: %v", row)
+
count++
}
- if count != expectedCount {
- t.Errorf("Got unexpected count %v for query, was expecting %v", count, expectedCount)
- }
+ assert.Equal(t, expectedCount, count, "Got unexpected count %v for query, was expecting %v", count, expectedCount)
+
conn.CloseResult()
}
@@ -264,35 +244,24 @@ func doTestWarnings(t *testing.T, disableClientDeprecateEOF bool) {
defer conn.Close()
result, err := conn.ExecuteFetch("create table a(id int, val int not null, primary key(id))", 0, false)
- if err != nil {
- t.Fatalf("create table failed: %v", err)
- }
- if result.RowsAffected != 0 {
- t.Errorf("create table returned RowsAffected %v, was expecting 0", result.RowsAffected)
- }
+ require.NoError(t, err, "create table failed: %v", err)
+ assert.Equal(t, uint64(0), result.RowsAffected, "create table returned RowsAffected %v, was expecting 0", result.RowsAffected)
// Disable strict mode
_, err = conn.ExecuteFetch("set session sql_mode=''", 0, false)
- if err != nil {
- t.Fatalf("disable strict mode failed: %v", err)
- }
+ require.NoError(t, err, "disable strict mode failed: %v", err)
// Try a simple insert with a null value
result, warnings, err := conn.ExecuteFetchWithWarningCount("insert into a(id) values(10)", 1000, true)
- if err != nil {
- t.Fatalf("insert failed: %v", err)
- }
- if result.RowsAffected != 1 || len(result.Rows) != 0 {
- t.Errorf("unexpected result for insert: %v", result)
- }
- if warnings != 1 {
- t.Errorf("unexpected result for warnings: %v", warnings)
- }
+ require.NoError(t, err, "insert failed: %v", err)
+
+ assert.Equal(t, uint64(1), result.RowsAffected, "unexpected rows affected by insert; result: %v", result)
+ assert.Equal(t, 0, len(result.Rows), "unexpected row count in result for insert: %v", result)
+ assert.Equal(t, uint16(1), warnings, "unexpected result for warnings: %v", warnings)
_, err = conn.ExecuteFetch("drop table a", 0, false)
- if err != nil {
- t.Fatalf("create table failed: %v", err)
- }
+ require.NoError(t, err, "create table failed: %v", err)
+
}
func TestWarningsDeprecateEOF(t *testing.T) {
diff --git a/go/mysql/endtoend/replication_test.go b/go/mysql/endtoend/replication_test.go
index 4376a4b9bd4..9664d7a31ec 100644
--- a/go/mysql/endtoend/replication_test.go
+++ b/go/mysql/endtoend/replication_test.go
@@ -25,6 +25,7 @@ import (
"testing"
"time"
+ "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"vitess.io/vitess/go/vt/vtgate/evalengine"
@@ -64,18 +65,15 @@ func connectForReplication(t *testing.T, rbr bool) (*mysql.Conn, mysql.BinlogFor
// First we get the current binlog position.
result, err := conn.ExecuteFetch("SHOW MASTER STATUS", 1, true)
- if err != nil {
- t.Fatalf("SHOW MASTER STATUS failed: %v", err)
- }
+ require.NoError(t, err, "SHOW MASTER STATUS failed: %v", err)
+
if len(result.Fields) < 2 || result.Fields[0].Name != "File" || result.Fields[1].Name != "Position" ||
len(result.Rows) != 1 {
t.Fatalf("SHOW MASTER STATUS returned unexpected result: %v", result)
}
file := result.Rows[0][0].ToString()
position, err := evalengine.ToUint64(result.Rows[0][1])
- if err != nil {
- t.Fatalf("SHOW MASTER STATUS returned invalid position: %v", result.Rows[0][1])
- }
+ require.NoError(t, err, "SHOW MASTER STATUS returned invalid position: %v", result.Rows[0][1])
// Tell the server that we understand the format of events
// that will be used if binlog_checksum is enabled on the server.
@@ -92,28 +90,21 @@ func connectForReplication(t *testing.T, rbr bool) (*mysql.Conn, mysql.BinlogFor
var f mysql.BinlogFormat
for {
be, err := conn.ReadBinlogEvent()
- if err != nil {
- t.Fatalf("ReadPacket failed: %v", err)
- }
- if !be.IsValid() {
- t.Fatalf("NewMysql56BinlogEvent has an invalid packet: %v", be)
- }
+ require.NoError(t, err, "ReadPacket failed: %v", err)
+ require.True(t, be.IsValid(), "NewMysql56BinlogEvent has an invalid packet: %v", be)
// Skip rotate packets. These are normal as first packets.
if be.IsRotate() {
t.Logf("Got a rotate packet: %v", be)
continue
}
-
// And we want a FORMAT_DESCRIPTION_EVENT.
// Print a few things about the event for sanity checks.
- if !be.IsFormatDescription() {
- t.Fatalf("Unexpected packet: %v", be)
- }
+ require.True(t, be.IsFormatDescription(), "Unexpected packet: %v", be)
+
f, err = be.Format()
- if err != nil {
- t.Fatalf("Format() returned error: %v", err)
- }
+ require.NoError(t, err, "Format() returned error: %v", err)
+
t.Logf("Got a FORMAT_DESCRIPTION_EVENT packet: %v\nWith format: %v", be, f)
break
}
@@ -138,12 +129,9 @@ func TestReplicationConnectionClosing(t *testing.T) {
data, err := conn.ReadPacket()
if err != nil {
serr, ok := err.(*mysql.SQLError)
- if !ok {
- t.Errorf("Got a non mysql.SQLError error: %v", err)
- }
- if serr.Num != mysql.CRServerLost {
- t.Errorf("Got an unexpected mysql.SQLError error: %v", serr)
- }
+ assert.True(t, ok, "Got a non mysql.SQLError error: %v", err)
+ assert.Equal(t, mysql.CRServerLost, serr.Num, "Got an unexpected mysql.SQLError error: %v", serr)
+
// we got the right error, all good.
return
}
@@ -174,9 +162,8 @@ func TestReplicationConnectionClosing(t *testing.T) {
t.Fatal(err)
}
result, err := dConn.ExecuteFetch("insert into replicationError(id, name) values(10, 'nice name')", 0, false)
- if err != nil {
- t.Fatalf("insert failed: %v", err)
- }
+ require.NoError(t, err, "insert failed: %v", err)
+
if result.RowsAffected != 1 || len(result.Rows) != 0 {
t.Errorf("unexpected result for insert: %v", result)
}
@@ -211,23 +198,20 @@ func TestRowReplicationWithRealDatabase(t *testing.T) {
t.Fatal(err)
}
result, err := dConn.ExecuteFetch("insert into replication(id, name) values(10, 'nice name')", 0, false)
- if err != nil {
- t.Fatalf("insert failed: %v", err)
- }
+ require.NoError(t, err, "insert failed: %v", err)
+
if result.RowsAffected != 1 || len(result.Rows) != 0 {
t.Errorf("unexpected result for insert: %v", result)
}
result, err = dConn.ExecuteFetch("update replication set name='nicer name' where id=10", 0, false)
- if err != nil {
- t.Fatalf("update failed: %v", err)
- }
+ require.NoError(t, err, "update failed: %v", err)
+
if result.RowsAffected != 1 || len(result.Rows) != 0 {
t.Errorf("unexpected result for update: %v", result)
}
result, err = dConn.ExecuteFetch("delete from replication where id=10", 0, false)
- if err != nil {
- t.Fatalf("delete failed: %v", err)
- }
+ require.NoError(t, err, "delete failed: %v", err)
+
if result.RowsAffected != 1 || len(result.Rows) != 0 {
t.Errorf("unexpected result for delete: %v", result)
}
@@ -250,16 +234,12 @@ func TestRowReplicationWithRealDatabase(t *testing.T) {
// for i := 0; i < 6 && (gtidCount < 2 || !gotCreateTable || !gotTableMapEvent || !gotBegin || !gotInsert || !gotCommit); i++ {
for gtidCount < 4 || !gotCreateTable || !gotTableMapEvent || !gotInsert || !gotUpdate || !gotDelete || beginCount != 3 || commitCount != 3 {
be, err := conn.ReadBinlogEvent()
- if err != nil {
- t.Fatalf("ReadPacket failed: %v", err)
- }
- if !be.IsValid() {
- t.Fatalf("read an invalid packet: %v", be)
- }
+ require.NoError(t, err, "ReadPacket failed: %v", err)
+ require.True(t, be.IsValid(), "read an invalid packet: %v", be)
+
be, _, err = be.StripChecksum(f)
- if err != nil {
- t.Fatalf("StripChecksum failed: %v", err)
- }
+ require.NoError(t, err, "StripChecksum failed: %v", err)
+
switch {
case be.IsGTID():
// We expect one of these at least.
@@ -956,9 +936,8 @@ func TestRowReplicationTypes(t *testing.T) {
}
result, err := dConn.ExecuteFetch(insert, 0, false)
- if err != nil {
- t.Fatalf("insert failed: %v", err)
- }
+ require.NoError(t, err, "insert failed: %v", err)
+
if result.RowsAffected != 1 || len(result.Rows) != 0 {
t.Errorf("unexpected result for insert: %v", result)
}
@@ -971,16 +950,12 @@ func TestRowReplicationTypes(t *testing.T) {
for values == nil {
be, err := conn.ReadBinlogEvent()
- if err != nil {
- t.Fatalf("ReadPacket failed: %v", err)
- }
- if !be.IsValid() {
- t.Fatalf("read an invalid packet: %v", be)
- }
+ require.NoError(t, err, "ReadPacket failed: %v", err)
+ require.True(t, be.IsValid(), "read an invalid packet: %v", be)
+
be, _, err = be.StripChecksum(f)
- if err != nil {
- t.Fatalf("StripChecksum failed: %v", err)
- }
+ require.NoError(t, err, "StripChecksum failed: %v", err)
+
switch {
case be.IsTableMap():
tableID = be.TableID(f) // This would be 0x00ffffff for an event to clear all table map entries.
@@ -1048,9 +1023,8 @@ func TestRowReplicationTypes(t *testing.T) {
}
}
result, err = dConn.ExecuteFetch(sql.String(), 0, false)
- if err != nil {
- t.Fatalf("insert '%v' failed: %v", sql.String(), err)
- }
+ require.NoError(t, err, "insert '%v' failed: %v", sql.String(), err)
+
if result.RowsAffected != 1 || len(result.Rows) != 0 {
t.Errorf("unexpected result for insert: %v", result)
}
@@ -1063,16 +1037,12 @@ func TestRowReplicationTypes(t *testing.T) {
}
stmt += " from replicationtypes"
result, err = dConn.ExecuteFetch(stmt, 2, false)
- if err != nil {
- t.Fatalf("select failed: %v", err)
- }
- if len(result.Rows) != 2 {
- t.Fatalf("unexpected result for select: %v", result)
- }
+ require.NoError(t, err, "select failed: %v", err)
+ require.Equal(t, 2, len(result.Rows), "unexpected result for select: %v", result)
+
for i, tcase := range testcases {
- if !reflect.DeepEqual(result.Rows[0][i+1], result.Rows[1][i+1]) {
- t.Errorf("Field %v is not the same, got %v and %v", tcase.name, result.Rows[0][i+1], result.Rows[1][i+1])
- }
+ assert.True(t, reflect.DeepEqual(result.Rows[0][i+1], result.Rows[1][i+1]), "Field %v is not the same, got %v and %v", tcase.name, result.Rows[0][i+1], result.Rows[1][i+1])
+
}
// Drop the table, we're done.
diff --git a/go/mysql/endtoend/schema_change_test.go b/go/mysql/endtoend/schema_change_test.go
index 39e1275dc08..7e58852d176 100644
--- a/go/mysql/endtoend/schema_change_test.go
+++ b/go/mysql/endtoend/schema_change_test.go
@@ -32,7 +32,6 @@ import (
var ctx = context.Background()
const (
- createDb = `create database if not exists _vt`
createUserTable = `create table vttest.product (id bigint(20) primary key, name char(10) CHARACTER SET utf8 COLLATE utf8_unicode_ci, created bigint(20))`
dropTestTable = `drop table if exists product`
)
@@ -42,11 +41,6 @@ func TestChangeSchemaIsNoticed(t *testing.T) {
require.NoError(t, err)
defer conn.Close()
- _, err = conn.ExecuteFetch(createDb, 1000, true)
- require.NoError(t, err)
- _, err = conn.ExecuteFetch(mysql.CreateSchemaCopyTable, 1000, true)
- require.NoError(t, err)
-
tests := []struct {
name string
changeQ string
diff --git a/go/mysql/fakesqldb/server.go b/go/mysql/fakesqldb/server.go
index 7840c288dbf..7d45755bd11 100644
--- a/go/mysql/fakesqldb/server.go
+++ b/go/mysql/fakesqldb/server.go
@@ -23,12 +23,14 @@ import (
"os"
"path"
"regexp"
- "runtime/debug"
"strings"
"sync"
+ "sync/atomic"
"testing"
"time"
+ "vitess.io/vitess/go/vt/sqlparser"
+
"vitess.io/vitess/go/vt/log"
"vitess.io/vitess/go/mysql"
@@ -66,7 +68,7 @@ type DB struct {
acceptWG sync.WaitGroup
// orderMatters is set when the query order matters.
- orderMatters bool
+ orderMatters atomic.Bool
// Fields set at runtime.
@@ -76,16 +78,16 @@ type DB struct {
// Use SetName() to change.
name string
// isConnFail trigger a panic in the connection handler.
- isConnFail bool
+ isConnFail atomic.Bool
// connDelay causes a sleep in the connection handler
connDelay time.Duration
// shouldClose, if true, tells ComQuery() to close the connection when
// processing the next query. This will trigger a MySQL client error with
// errno 2013 ("server lost").
- shouldClose bool
- // AllowAll: if set to true, ComQuery returns an empty result
+ shouldClose atomic.Bool
+ // allowAll: if set to true, ComQuery returns an empty result
// for all queries. This flag is used for benchmarking.
- AllowAll bool
+ allowAll atomic.Bool
// Handler: interface that allows a caller to override the query handling
// implementation. By default it points to the DB itself
@@ -111,9 +113,6 @@ type DB struct {
expectedExecuteFetch []ExpectedExecuteFetch
// expectedExecuteFetchIndex is the current index of the query.
expectedExecuteFetchIndex int
- // Infinite is true when executed queries beyond our expectation list
- // should respond with the last entry from the list.
- infinite bool
// connections tracks all open connections.
// The key for the map is the value of mysql.Conn.ConnectionID.
@@ -124,7 +123,11 @@ type DB struct {
// if fakesqldb is asked to serve queries or query patterns that it has not been explicitly told about it will
// error out by default. However if you set this flag then any unmatched query results in an empty result
- neverFail bool
+ neverFail atomic.Bool
+
+ // lastError stores the last error in returning a query result.
+ lastErrorMu sync.Mutex
+ lastError error
}
// QueryHandler is the interface used by the DB to simulate executed queries
@@ -140,9 +143,10 @@ type ExpectedResult struct {
}
type exprResult struct {
- expr *regexp.Regexp
- result *sqltypes.Result
- err string
+ queryPattern string
+ expr *regexp.Regexp
+ result *sqltypes.Result
+ err string
}
// ExpectedExecuteFetch defines for an expected query the to be faked output.
@@ -176,6 +180,7 @@ func New(t testing.TB) *DB {
connections: make(map[uint32]*mysql.Conn),
queryPatternUserCallback: make(map[*regexp.Regexp]func(string)),
patternData: make(map[string]exprResult),
+ lastErrorMu: sync.Mutex{},
}
db.Handler = db
@@ -217,12 +222,8 @@ func (db *DB) SetName(name string) *DB {
}
// OrderMatters sets the orderMatters flag.
-func (db *DB) OrderMatters() *DB {
- db.mu.Lock()
- defer db.mu.Unlock()
-
- db.orderMatters = true
- return db
+func (db *DB) OrderMatters() {
+ db.orderMatters.Store(true)
}
// Close closes the Listener and waits for it to stop accepting.
@@ -249,6 +250,13 @@ func (db *DB) CloseAllConnections() {
}
}
+// LastError gives the last error the DB ran into
+func (db *DB) LastError() error {
+ db.lastErrorMu.Lock()
+ defer db.lastErrorMu.Unlock()
+ return db.lastError
+}
+
// WaitForClose should be used after CloseAllConnections() is closed and
// you want to provoke a MySQL client error with errno 2006.
//
@@ -310,7 +318,7 @@ func (db *DB) NewConnection(c *mysql.Conn) {
db.mu.Lock()
defer db.mu.Unlock()
- if db.isConnFail {
+ if db.isConnFail.Load() {
panic(fmt.Errorf("simulating a connection failure"))
}
@@ -346,12 +354,19 @@ func (db *DB) WarningCount(c *mysql.Conn) uint16 {
}
// HandleQuery is the default implementation of the QueryHandler interface
-func (db *DB) HandleQuery(c *mysql.Conn, query string, callback func(*sqltypes.Result) error) error {
- if db.AllowAll {
+func (db *DB) HandleQuery(c *mysql.Conn, query string, callback func(*sqltypes.Result) error) (err error) {
+ defer func() {
+ if err != nil {
+ db.lastErrorMu.Lock()
+ db.lastError = err
+ db.lastErrorMu.Unlock()
+ }
+ }()
+ if db.allowAll.Load() {
return callback(&sqltypes.Result{})
}
- if db.orderMatters {
+ if db.orderMatters.Load() {
result, err := db.comQueryOrdered(query)
if err != nil {
return err
@@ -364,7 +379,7 @@ func (db *DB) HandleQuery(c *mysql.Conn, query string, callback func(*sqltypes.R
db.queryCalled[key]++
db.querylog = append(db.querylog, key)
// Check if we should close the connection and provoke errno 2013.
- if db.shouldClose {
+ if db.shouldClose.Load() {
c.Close()
//log error
@@ -413,17 +428,31 @@ func (db *DB) HandleQuery(c *mysql.Conn, query string, callback func(*sqltypes.R
}
}
- if db.neverFail {
+ if db.neverFail.Load() {
return callback(&sqltypes.Result{})
}
// Nothing matched.
- err := fmt.Errorf("fakesqldb:: query: '%s' is not supported on %v", query, db.name)
- log.Errorf("Query not found: %s:%s", query, debug.Stack())
+ err = fmt.Errorf("fakesqldb:: query: '%s' is not supported on %v",
+ sqlparser.TruncateForUI(query), db.name)
+ log.Errorf("Query not found: %s", sqlparser.TruncateForUI(query))
return err
}
func (db *DB) comQueryOrdered(query string) (*sqltypes.Result, error) {
+ var (
+ afterFn func()
+ entry ExpectedExecuteFetch
+ err error
+ expected string
+ result *sqltypes.Result
+ )
+
+ defer func() {
+ if afterFn != nil {
+ afterFn()
+ }
+ }()
db.mu.Lock()
defer db.mu.Unlock()
@@ -435,44 +464,46 @@ func (db *DB) comQueryOrdered(query string) (*sqltypes.Result, error) {
}
index := db.expectedExecuteFetchIndex
- if db.infinite && index == len(db.expectedExecuteFetch) {
- // Although we already executed all queries, we'll continue to answer the
- // last one in the infinite mode.
- index--
- }
+
if index >= len(db.expectedExecuteFetch) {
+ if db.neverFail.Load() {
+ return &sqltypes.Result{}, nil
+ }
db.t.Errorf("%v: got unexpected out of bound fetch: %v >= %v", db.name, index, len(db.expectedExecuteFetch))
return nil, errors.New("unexpected out of bound fetch")
}
- entry := db.expectedExecuteFetch[index]
- db.expectedExecuteFetchIndex++
- // If the infinite mode is on, reverse the increment and keep the index at
- // len(db.expectedExecuteFetch).
- if db.infinite && db.expectedExecuteFetchIndex > len(db.expectedExecuteFetch) {
- db.expectedExecuteFetchIndex--
- }
+ entry = db.expectedExecuteFetch[index]
+ afterFn = entry.AfterFunc
+ err = entry.Error
+ expected = entry.Query
+ result = entry.QueryResult
- if entry.AfterFunc != nil {
- defer entry.AfterFunc()
- }
-
- expected := entry.Query
if strings.HasSuffix(expected, "*") {
if !strings.HasPrefix(query, expected[0:len(expected)-1]) {
+ if db.neverFail.Load() {
+ return &sqltypes.Result{}, nil
+ }
db.t.Errorf("%v: got unexpected query start (index=%v): %v != %v", db.name, index, query, expected)
+ return nil, errors.New("unexpected query")
}
} else {
if query != expected {
+ if db.neverFail.Load() {
+ return &sqltypes.Result{}, nil
+ }
db.t.Errorf("%v: got unexpected query (index=%v): %v != %v", db.name, index, query, expected)
return nil, errors.New("unexpected query")
}
}
+
+ db.expectedExecuteFetchIndex++
db.t.Logf("ExecuteFetch: %v: %v", db.name, query)
- if entry.Error != nil {
- return nil, entry.Error
+
+ if err != nil {
+ return nil, err
}
- return entry.QueryResult, nil
+ return result, nil
}
// ComPrepare is part of the mysql.Handler interface.
@@ -485,8 +516,18 @@ func (db *DB) ComStmtExecute(c *mysql.Conn, prepare *mysql.PrepareData, callback
return nil
}
+// ComRegisterReplica is part of the mysql.Handler interface.
+func (db *DB) ComRegisterReplica(c *mysql.Conn, replicaHost string, replicaPort uint16, replicaUser string, replicaPassword string) error {
+ return nil
+}
+
+// ComBinlogDump is part of the mysql.Handler interface.
+func (db *DB) ComBinlogDump(c *mysql.Conn, logFile string, binlogPos uint32) error {
+ return nil
+}
+
// ComBinlogDumpGTID is part of the mysql.Handler interface.
-func (db *DB) ComBinlogDumpGTID(c *mysql.Conn, gtidSet mysql.GTIDSet) error {
+func (db *DB) ComBinlogDumpGTID(c *mysql.Conn, logFile string, logPos uint64, gtidSet mysql.GTIDSet) error {
return nil
}
@@ -535,7 +576,7 @@ func (db *DB) AddQueryPattern(queryPattern string, expectedResult *sqltypes.Resu
result := *expectedResult
db.mu.Lock()
defer db.mu.Unlock()
- db.patternData[queryPattern] = exprResult{expr: expr, result: &result}
+ db.patternData[queryPattern] = exprResult{queryPattern: queryPattern, expr: expr, result: &result}
}
// RejectQueryPattern allows a query pattern to be rejected with an error
@@ -543,7 +584,7 @@ func (db *DB) RejectQueryPattern(queryPattern, error string) {
expr := regexp.MustCompile("(?is)^" + queryPattern + "$")
db.mu.Lock()
defer db.mu.Unlock()
- db.patternData[queryPattern] = exprResult{expr: expr, err: error}
+ db.patternData[queryPattern] = exprResult{queryPattern: queryPattern, expr: expr, err: error}
}
// ClearQueryPattern removes all query patterns set up
@@ -604,16 +645,12 @@ func (db *DB) ResetQueryLog() {
// EnableConnFail makes connection to this fake DB fail.
func (db *DB) EnableConnFail() {
- db.mu.Lock()
- defer db.mu.Unlock()
- db.isConnFail = true
+ db.isConnFail.Store(true)
}
// DisableConnFail makes connection to this fake DB success.
func (db *DB) DisableConnFail() {
- db.mu.Lock()
- defer db.mu.Unlock()
- db.isConnFail = false
+ db.isConnFail.Store(false)
}
// SetConnDelay delays connections to this fake DB for the given duration
@@ -625,9 +662,7 @@ func (db *DB) SetConnDelay(d time.Duration) {
// EnableShouldClose closes the connection when processing the next query.
func (db *DB) EnableShouldClose() {
- db.mu.Lock()
- defer db.mu.Unlock()
- db.shouldClose = true
+ db.shouldClose.Store(true)
}
//
@@ -639,14 +674,6 @@ func (db *DB) AddExpectedExecuteFetch(entry ExpectedExecuteFetch) {
db.AddExpectedExecuteFetchAtIndex(appendEntry, entry)
}
-// EnableInfinite turns on the infinite flag (the last ordered query is used).
-func (db *DB) EnableInfinite() {
- db.mu.Lock()
- defer db.mu.Unlock()
-
- db.infinite = true
-}
-
// AddExpectedExecuteFetchAtIndex inserts a new entry at index.
// index values start at 0.
func (db *DB) AddExpectedExecuteFetchAtIndex(index int, entry ExpectedExecuteFetch) {
@@ -740,8 +767,12 @@ func (db *DB) VerifyAllExecutedOrFail() {
}
}
+func (db *DB) SetAllowAll(allowAll bool) {
+ db.allowAll.Store(allowAll)
+}
+
func (db *DB) SetNeverFail(neverFail bool) {
- db.neverFail = neverFail
+ db.neverFail.Store(neverFail)
}
func (db *DB) MockQueriesForTable(table string, result *sqltypes.Result) {
diff --git a/go/mysql/flavor.go b/go/mysql/flavor.go
index 85c1247e678..ed6a3fe4b04 100644
--- a/go/mysql/flavor.go
+++ b/go/mysql/flavor.go
@@ -49,9 +49,11 @@ const (
InstantAddDropVirtualColumnFlavorCapability
InstantAddDropColumnFlavorCapability
InstantChangeColumnDefaultFlavorCapability
+ InstantExpandEnumCapability
MySQLJSONFlavorCapability
MySQLUpgradeInServerFlavorCapability
DynamicRedoLogCapacityFlavorCapability // supported in MySQL 8.0.30 and above: https://dev.mysql.com/doc/relnotes/mysql/8.0/en/news-8-0-30.html
+ DisableRedoLogFlavorCapability // supported in MySQL 8.0.21 and above: https://dev.mysql.com/doc/relnotes/mysql/8.0/en/news-8-0-21.html
)
const (
@@ -113,7 +115,7 @@ type flavor interface {
// sendBinlogDumpCommand sends the packet required to start
// dumping binlogs from the specified location.
- sendBinlogDumpCommand(c *Conn, serverID uint32, startPos Position) error
+ sendBinlogDumpCommand(c *Conn, serverID uint32, binlogFilename string, startPos Position) error
// readBinlogEvent reads the next BinlogEvent from the connection.
readBinlogEvent(c *Conn) (BinlogEvent, error)
@@ -155,6 +157,7 @@ type flavor interface {
enableBinlogPlaybackCommand() string
disableBinlogPlaybackCommand() string
+ baseShowTables() string
baseShowTablesWithSizes() string
supportsCapability(serverVersion string, capability FlavorCapability) (bool, error)
@@ -358,8 +361,8 @@ func (c *Conn) StartSQLThreadCommand() string {
// SendBinlogDumpCommand sends the flavor-specific version of
// the COM_BINLOG_DUMP command to start dumping raw binlog
// events over a server connection, starting at a given GTID.
-func (c *Conn) SendBinlogDumpCommand(serverID uint32, startPos Position) error {
- return c.flavor.sendBinlogDumpCommand(c, serverID, startPos)
+func (c *Conn) SendBinlogDumpCommand(serverID uint32, binlogFilename string, startPos Position) error {
+ return c.flavor.sendBinlogDumpCommand(c, serverID, binlogFilename, startPos)
}
// ReadBinlogEvent reads the next BinlogEvent. This must be used
@@ -571,8 +574,13 @@ func (c *Conn) DisableBinlogPlaybackCommand() string {
return c.flavor.disableBinlogPlaybackCommand()
}
-// BaseShowTables returns a query that shows tables and their sizes
+// BaseShowTables returns a query that shows tables
func (c *Conn) BaseShowTables() string {
+ return c.flavor.baseShowTables()
+}
+
+// BaseShowTablesWithSizes returns a query that shows tables and their sizes
+func (c *Conn) BaseShowTablesWithSizes() string {
return c.flavor.baseShowTablesWithSizes()
}
diff --git a/go/mysql/flavor_filepos.go b/go/mysql/flavor_filepos.go
index a66af7f9f3d..13a3a5b8ab7 100644
--- a/go/mysql/flavor_filepos.go
+++ b/go/mysql/flavor_filepos.go
@@ -119,7 +119,7 @@ func (flv *filePosFlavor) startSQLThreadCommand() string {
}
// sendBinlogDumpCommand is part of the Flavor interface.
-func (flv *filePosFlavor) sendBinlogDumpCommand(c *Conn, serverID uint32, startPos Position) error {
+func (flv *filePosFlavor) sendBinlogDumpCommand(c *Conn, serverID uint32, binlogFilename string, startPos Position) error {
rpos, ok := startPos.GTIDSet.(filePosGTID)
if !ok {
return fmt.Errorf("startPos.GTIDSet is wrong type - expected filePosGTID, got: %#v", startPos.GTIDSet)
@@ -326,6 +326,11 @@ func (*filePosFlavor) disableBinlogPlaybackCommand() string {
return ""
}
+// baseShowTables is part of the Flavor interface.
+func (*filePosFlavor) baseShowTables() string {
+ return mysqlFlavor{}.baseShowTables()
+}
+
// baseShowTablesWithSizes is part of the Flavor interface.
func (*filePosFlavor) baseShowTablesWithSizes() string {
return TablesWithSize56
diff --git a/go/mysql/flavor_mariadb.go b/go/mysql/flavor_mariadb.go
index 5235b4f4357..377ede1ecc8 100644
--- a/go/mysql/flavor_mariadb.go
+++ b/go/mysql/flavor_mariadb.go
@@ -105,7 +105,7 @@ func (mariadbFlavor) startSQLThreadCommand() string {
}
// sendBinlogDumpCommand is part of the Flavor interface.
-func (mariadbFlavor) sendBinlogDumpCommand(c *Conn, serverID uint32, startPos Position) error {
+func (mariadbFlavor) sendBinlogDumpCommand(c *Conn, serverID uint32, binlogFilename string, startPos Position) error {
// Tell the server that we understand GTIDs by setting
// mariadb_slave_capability to MARIA_SLAVE_CAPABILITY_GTID = 4 (MariaDB >= 10.0.1).
if _, err := c.ExecuteFetch("SET @mariadb_slave_capability=4", 0, false); err != nil {
diff --git a/go/mysql/flavor_mariadb_binlog_playback.go b/go/mysql/flavor_mariadb_binlog_playback.go
index e862e744d04..f8ce0053b56 100644
--- a/go/mysql/flavor_mariadb_binlog_playback.go
+++ b/go/mysql/flavor_mariadb_binlog_playback.go
@@ -30,6 +30,11 @@ func (mariadbFlavor) disableBinlogPlaybackCommand() string {
return ""
}
+// baseShowTables is part of the Flavor interface.
+func (mariadbFlavor) baseShowTables() string {
+ return mysqlFlavor{}.baseShowTables()
+}
+
// baseShowTablesWithSizes is part of the Flavor interface.
func (mariadbFlavor101) baseShowTablesWithSizes() string {
return TablesWithSize56
diff --git a/go/mysql/flavor_mariadb_test.go b/go/mysql/flavor_mariadb_test.go
index 0198b8095de..3739e9294ca 100644
--- a/go/mysql/flavor_mariadb_test.go
+++ b/go/mysql/flavor_mariadb_test.go
@@ -42,9 +42,8 @@ func TestMariadbSetReplicationSourceCommand(t *testing.T) {
conn := &Conn{flavor: mariadbFlavor101{}}
got := conn.SetReplicationSourceCommand(params, host, port, connectRetry)
- if got != want {
- t.Errorf("mariadbFlavor.SetReplicationSourceCommand(%#v, %#v, %#v, %#v) = %#v, want %#v", params, host, port, connectRetry, got, want)
- }
+ assert.Equal(t, want, got, "mariadbFlavor.SetReplicationSourceCommand(%#v, %#v, %#v, %#v) = %#v, want %#v", params, host, port, connectRetry, got, want)
+
}
func TestMariadbSetReplicationSourceCommandSSL(t *testing.T) {
@@ -75,9 +74,8 @@ func TestMariadbSetReplicationSourceCommandSSL(t *testing.T) {
conn := &Conn{flavor: mariadbFlavor101{}}
got := conn.SetReplicationSourceCommand(params, host, port, connectRetry)
- if got != want {
- t.Errorf("mariadbFlavor.SetReplicationSourceCommand(%#v, %#v, %#v, %#v) = %#v, want %#v", params, host, port, connectRetry, got, want)
- }
+ assert.Equal(t, want, got, "mariadbFlavor.SetReplicationSourceCommand(%#v, %#v, %#v, %#v) = %#v, want %#v", params, host, port, connectRetry, got, want)
+
}
func TestMariadbRetrieveSourceServerId(t *testing.T) {
diff --git a/go/mysql/flavor_mysql.go b/go/mysql/flavor_mysql.go
index ba4982db4d3..3650d085960 100644
--- a/go/mysql/flavor_mysql.go
+++ b/go/mysql/flavor_mysql.go
@@ -53,7 +53,7 @@ func (mysqlFlavor) primaryGTIDSet(c *Conn) (GTIDSet, error) {
if len(qr.Rows) != 1 || len(qr.Rows[0]) != 1 {
return nil, vterrors.Errorf(vtrpc.Code_INTERNAL, "unexpected result format for gtid_executed: %#v", qr)
}
- return parseMysql56GTIDSet(qr.Rows[0][0].ToString())
+ return ParseMysql56GTIDSet(qr.Rows[0][0].ToString())
}
// purgedGTIDSet is part of the Flavor interface.
@@ -66,7 +66,7 @@ func (mysqlFlavor) purgedGTIDSet(c *Conn) (GTIDSet, error) {
if len(qr.Rows) != 1 || len(qr.Rows[0]) != 1 {
return nil, vterrors.Errorf(vtrpc.Code_INTERNAL, "unexpected result format for gtid_purged: %#v", qr)
}
- return parseMysql56GTIDSet(qr.Rows[0][0].ToString())
+ return ParseMysql56GTIDSet(qr.Rows[0][0].ToString())
}
// serverUUID is part of the Flavor interface.
@@ -131,7 +131,7 @@ func (mysqlFlavor) startSQLThreadCommand() string {
}
// sendBinlogDumpCommand is part of the Flavor interface.
-func (mysqlFlavor) sendBinlogDumpCommand(c *Conn, serverID uint32, startPos Position) error {
+func (mysqlFlavor) sendBinlogDumpCommand(c *Conn, serverID uint32, binlogFilename string, startPos Position) error {
gtidSet, ok := startPos.GTIDSet.(Mysql56GTIDSet)
if !ok {
return vterrors.Errorf(vtrpc.Code_INTERNAL, "startPos.GTIDSet is wrong type - expected Mysql56GTIDSet, got: %#v", startPos.GTIDSet)
@@ -139,7 +139,7 @@ func (mysqlFlavor) sendBinlogDumpCommand(c *Conn, serverID uint32, startPos Posi
// Build the command.
sidBlock := gtidSet.SIDBlock()
- return c.WriteComBinlogDumpGTID(serverID, "", 4, 0, sidBlock)
+ return c.WriteComBinlogDumpGTID(serverID, binlogFilename, 4, 0, sidBlock)
}
// resetReplicationCommands is part of the Flavor interface.
@@ -208,11 +208,11 @@ func parseMysqlReplicationStatus(resultMap map[string]string) (ReplicationStatus
}
var err error
- status.Position.GTIDSet, err = parseMysql56GTIDSet(resultMap["Executed_Gtid_Set"])
+ status.Position.GTIDSet, err = ParseMysql56GTIDSet(resultMap["Executed_Gtid_Set"])
if err != nil {
return ReplicationStatus{}, vterrors.Wrapf(err, "ReplicationStatus can't parse MySQL 5.6 GTID (Executed_Gtid_Set: %#v)", resultMap["Executed_Gtid_Set"])
}
- relayLogGTIDSet, err := parseMysql56GTIDSet(resultMap["Retrieved_Gtid_Set"])
+ relayLogGTIDSet, err := ParseMysql56GTIDSet(resultMap["Retrieved_Gtid_Set"])
if err != nil {
return ReplicationStatus{}, vterrors.Wrapf(err, "ReplicationStatus can't parse MySQL 5.6 GTID (Retrieved_Gtid_Set: %#v)", resultMap["Retrieved_Gtid_Set"])
}
@@ -247,7 +247,7 @@ func parseMysqlPrimaryStatus(resultMap map[string]string) (PrimaryStatus, error)
status := parsePrimaryStatus(resultMap)
var err error
- status.Position.GTIDSet, err = parseMysql56GTIDSet(resultMap["Executed_Gtid_Set"])
+ status.Position.GTIDSet, err = ParseMysql56GTIDSet(resultMap["Executed_Gtid_Set"])
if err != nil {
return PrimaryStatus{}, vterrors.Wrapf(err, "PrimaryStatus can't parse MySQL 5.6 GTID (Executed_Gtid_Set: %#v)", resultMap["Executed_Gtid_Set"])
}
@@ -308,9 +308,24 @@ func (mysqlFlavor) disableBinlogPlaybackCommand() string {
return ""
}
+// baseShowTables is part of the Flavor interface.
+func (mysqlFlavor) baseShowTables() string {
+ return "SELECT table_name, table_type, unix_timestamp(create_time), table_comment FROM information_schema.tables WHERE table_schema = database()"
+}
+
// TablesWithSize56 is a query to select table along with size for mysql 5.6
-const TablesWithSize56 = `SELECT table_name, table_type, unix_timestamp(create_time), table_comment, SUM( data_length + index_length), SUM( data_length + index_length)
- FROM information_schema.tables WHERE table_schema = database() group by table_name`
+const TablesWithSize56 = `SELECT table_name,
+ table_type,
+ UNIX_TIMESTAMP(create_time) AS uts_create_time,
+ table_comment,
+ SUM(data_length + index_length),
+ SUM(data_length + index_length)
+FROM information_schema.tables
+WHERE table_schema = database()
+GROUP BY table_name,
+ table_type,
+ uts_create_time,
+ table_comment`
// TablesWithSize57 is a query to select table along with size for mysql 5.7.
//
@@ -341,17 +356,41 @@ GROUP BY t.table_name, t.table_type, t.create_time, t.table_comment`
// We join with a subquery that materializes the data from `information_schema.innodb_sys_tablespaces`
// early for performance reasons. This effectively causes only a single read of `information_schema.innodb_tablespaces`
// per query.
+// Note the following:
+// - We use UNION ALL to deal differently with partitioned tables vs. non-partitioned tables.
+// Originally, the query handled both, but that introduced "WHERE ... OR" conditions that led to poor query
+// optimization. By separating to UNION ALL we remove all "OR" conditions.
+// - We utilize `INFORMATION_SCHEMA`.`TABLES`.`CREATE_OPTIONS` column to do early pruning before the JOIN.
+// - `TABLES`.`TABLE_NAME` has `utf8mb4_0900_ai_ci` collation. `INNODB_TABLESPACES`.`NAME` has `utf8mb3_general_ci`.
+// We normalize the collation to get better query performance (we force the casting at the time of our choosing)
+// - `create_options` is NULL for views, and therefore we need an additional UNION ALL to include views
const TablesWithSize80 = `SELECT t.table_name,
- t.table_type,
- UNIX_TIMESTAMP(t.create_time),
- t.table_comment,
- SUM(i.file_size),
- SUM(i.allocated_size)
-FROM information_schema.tables t
-INNER JOIN information_schema.innodb_tablespaces i
- ON i.name LIKE CONCAT(database(), '/%') AND (i.name = CONCAT(t.table_schema, '/', t.table_name) OR i.name LIKE CONCAT(t.table_schema, '/', t.table_name, '#p#%'))
-WHERE t.table_schema = database()
-GROUP BY t.table_name, t.table_type, t.create_time, t.table_comment`
+ t.table_type,
+ UNIX_TIMESTAMP(t.create_time),
+ t.table_comment,
+ i.file_size,
+ i.allocated_size
+ FROM information_schema.tables t
+ LEFT JOIN information_schema.innodb_tablespaces i
+ ON i.name = CONCAT(t.table_schema, '/', t.table_name) COLLATE utf8_general_ci
+ WHERE
+ t.table_schema = database() AND not t.create_options <=> 'partitioned'
+UNION ALL
+ SELECT
+ t.table_name,
+ t.table_type,
+ UNIX_TIMESTAMP(t.create_time),
+ t.table_comment,
+ SUM(i.file_size),
+ SUM(i.allocated_size)
+ FROM information_schema.tables t
+ LEFT JOIN information_schema.innodb_tablespaces i
+ ON i.name LIKE (CONCAT(t.table_schema, '/', t.table_name, '#p#%') COLLATE utf8_general_ci )
+ WHERE
+ t.table_schema = database() AND t.create_options <=> 'partitioned'
+ GROUP BY
+ t.table_schema, t.table_name, t.table_type, t.create_time, t.table_comment
+`
// baseShowTablesWithSizes is part of the Flavor interface.
func (mysqlFlavor56) baseShowTablesWithSizes() string {
@@ -390,6 +429,7 @@ func (mysqlFlavor80) baseShowTablesWithSizes() string {
func (mysqlFlavor80) supportsCapability(serverVersion string, capability FlavorCapability) (bool, error) {
switch capability {
case InstantDDLFlavorCapability,
+ InstantExpandEnumCapability,
InstantAddLastColumnFlavorCapability,
InstantAddDropVirtualColumnFlavorCapability,
InstantChangeColumnDefaultFlavorCapability:
@@ -406,6 +446,8 @@ func (mysqlFlavor80) supportsCapability(serverVersion string, capability FlavorC
return ServerVersionAtLeast(serverVersion, 8, 0, 16)
case DynamicRedoLogCapacityFlavorCapability:
return ServerVersionAtLeast(serverVersion, 8, 0, 30)
+ case DisableRedoLogFlavorCapability:
+ return ServerVersionAtLeast(serverVersion, 8, 0, 21)
default:
return false, nil
}
diff --git a/go/mysql/flavor_mysql_test.go b/go/mysql/flavor_mysql_test.go
index bf390427e61..33b98cb4941 100644
--- a/go/mysql/flavor_mysql_test.go
+++ b/go/mysql/flavor_mysql_test.go
@@ -41,9 +41,8 @@ func TestMysql56SetReplicationSourceCommand(t *testing.T) {
conn := &Conn{flavor: mysqlFlavor57{}}
got := conn.SetReplicationSourceCommand(params, host, port, connectRetry)
- if got != want {
- t.Errorf("mysqlFlavor.SetReplicationSourceCommand(%#v, %#v, %#v, %#v) = %#v, want %#v", params, host, port, connectRetry, got, want)
- }
+ assert.Equal(t, want, got, "mysqlFlavor.SetReplicationSourceCommand(%#v, %#v, %#v, %#v) = %#v, want %#v", params, host, port, connectRetry, got, want)
+
}
func TestMysql56SetReplicationSourceCommandSSL(t *testing.T) {
@@ -74,9 +73,8 @@ func TestMysql56SetReplicationSourceCommandSSL(t *testing.T) {
conn := &Conn{flavor: mysqlFlavor57{}}
got := conn.SetReplicationSourceCommand(params, host, port, connectRetry)
- if got != want {
- t.Errorf("mysqlFlavor.SetReplicationSourceCommand(%#v, %#v, %#v, %#v) = %#v, want %#v", params, host, port, connectRetry, got, want)
- }
+ assert.Equal(t, want, got, "mysqlFlavor.SetReplicationSourceCommand(%#v, %#v, %#v, %#v) = %#v, want %#v", params, host, port, connectRetry, got, want)
+
}
func TestMysqlRetrieveSourceServerId(t *testing.T) {
diff --git a/go/mysql/flavor_mysqlgr.go b/go/mysql/flavor_mysqlgr.go
index 0094c563b7b..0d05a085802 100644
--- a/go/mysql/flavor_mysqlgr.go
+++ b/go/mysql/flavor_mysqlgr.go
@@ -239,6 +239,10 @@ func (mysqlGRFlavor) primaryStatus(c *Conn) (PrimaryStatus, error) {
return mysqlFlavor{}.primaryStatus(c)
}
+func (mysqlGRFlavor) baseShowTables() string {
+ return mysqlFlavor{}.baseShowTables()
+}
+
func (mysqlGRFlavor) baseShowTablesWithSizes() string {
return TablesWithSize80
}
@@ -247,6 +251,7 @@ func (mysqlGRFlavor) baseShowTablesWithSizes() string {
func (mysqlGRFlavor) supportsCapability(serverVersion string, capability FlavorCapability) (bool, error) {
switch capability {
case InstantDDLFlavorCapability,
+ InstantExpandEnumCapability,
InstantAddLastColumnFlavorCapability,
InstantAddDropVirtualColumnFlavorCapability,
InstantChangeColumnDefaultFlavorCapability:
diff --git a/go/mysql/flavor_test.go b/go/mysql/flavor_test.go
index ff3feeaab3a..891725b5afc 100644
--- a/go/mysql/flavor_test.go
+++ b/go/mysql/flavor_test.go
@@ -150,6 +150,16 @@ func TestGetFlavor(t *testing.T) {
capability: DynamicRedoLogCapacityFlavorCapability,
isCapable: false,
},
+ {
+ version: "8.0.21",
+ capability: DisableRedoLogFlavorCapability,
+ isCapable: true,
+ },
+ {
+ version: "8.0.20",
+ capability: DisableRedoLogFlavorCapability,
+ isCapable: false,
+ },
}
for _, tc := range testcases {
name := fmt.Sprintf("%s %v", tc.version, tc.capability)
diff --git a/go/mysql/gtid_test.go b/go/mysql/gtid_test.go
index ded8b727a72..8dfea641727 100644
--- a/go/mysql/gtid_test.go
+++ b/go/mysql/gtid_test.go
@@ -19,6 +19,8 @@ package mysql
import (
"strings"
"testing"
+
+ "github.com/stretchr/testify/assert"
)
func TestParseGTID(t *testing.T) {
@@ -30,12 +32,9 @@ func TestParseGTID(t *testing.T) {
want := fakeGTID{value: "12345"}
got, err := ParseGTID(flavor, input)
- if err != nil {
- t.Errorf("unexpected error: %v", err)
- }
- if got != want {
- t.Errorf("ParseGTID(%#v, %#v) = %#v, want %#v", flavor, input, got, want)
- }
+ assert.NoError(t, err, "unexpected error: %v", err)
+ assert.Equal(t, want, got, "ParseGTID(%#v, %#v) = %#v, want %#v", flavor, input, got, want)
+
}
func TestMustParseGTID(t *testing.T) {
@@ -47,18 +46,16 @@ func TestMustParseGTID(t *testing.T) {
want := fakeGTID{value: "12345"}
got := MustParseGTID(flavor, input)
- if got != want {
- t.Errorf("MustParseGTID(%#v, %#v) = %#v, want %#v", flavor, input, got, want)
- }
+ assert.Equal(t, want, got, "MustParseGTID(%#v, %#v) = %#v, want %#v", flavor, input, got, want)
+
}
func TestMustParseGTIDError(t *testing.T) {
defer func() {
want := `parse error: unknown GTID flavor "unknown flavor !@$!@"`
err := recover()
- if err == nil {
- t.Errorf("wrong error, got %#v, want %#v", err, want)
- }
+ assert.NotNil(t, err, "wrong error, got %#v, want %#v", err, want)
+
got, ok := err.(error)
if !ok || !strings.HasPrefix(got.Error(), want) {
t.Errorf("wrong error, got %#v, want %#v", got, want)
@@ -72,9 +69,8 @@ func TestParseUnknownFlavor(t *testing.T) {
want := `parse error: unknown GTID flavor "foobar8675309"`
_, err := ParseGTID("foobar8675309", "foo")
- if !strings.HasPrefix(err.Error(), want) {
- t.Errorf("wrong error, got '%v', want '%v'", err, want)
- }
+ assert.True(t, strings.HasPrefix(err.Error(), want), "wrong error, got '%v', want '%v'", err, want)
+
}
func TestEncodeGTID(t *testing.T) {
@@ -97,12 +93,9 @@ func TestDecodeGTID(t *testing.T) {
want := fakeGTID{value: "123-456:789"}
got, err := DecodeGTID(input)
- if err != nil {
- t.Errorf("unexpected error: %v", err)
- }
- if got != want {
- t.Errorf("DecodeGTID(%#v) = %#v, want %#v", input, got, want)
- }
+ assert.NoError(t, err, "unexpected error: %v", err)
+ assert.Equal(t, want, got, "DecodeGTID(%#v) = %#v, want %#v", input, got, want)
+
}
func TestMustDecodeGTID(t *testing.T) {
@@ -113,18 +106,16 @@ func TestMustDecodeGTID(t *testing.T) {
want := fakeGTID{value: "123-456:789"}
got := MustDecodeGTID(input)
- if got != want {
- t.Errorf("DecodeGTID(%#v) = %#v, want %#v", input, got, want)
- }
+ assert.Equal(t, want, got, "DecodeGTID(%#v) = %#v, want %#v", input, got, want)
+
}
func TestMustDecodeGTIDError(t *testing.T) {
defer func() {
want := `parse error: unknown GTID flavor "unknown flavor !@$!@"`
err := recover()
- if err == nil {
- t.Errorf("wrong error, got %#v, want %#v", err, want)
- }
+ assert.NotNil(t, err, "wrong error, got %#v, want %#v", err, want)
+
got, ok := err.(error)
if !ok || !strings.HasPrefix(got.Error(), want) {
t.Errorf("wrong error, got %#v, want %#v", got, want)
@@ -148,12 +139,9 @@ func TestDecodeNilGTID(t *testing.T) {
want := GTID(nil)
got, err := DecodeGTID(input)
- if err != nil {
- t.Errorf("unexpected error: %v", err)
- }
- if got != want {
- t.Errorf("DecodeGTID(%#v) = %#v, want %#v", input, got, want)
- }
+ assert.NoError(t, err, "unexpected error: %v", err)
+ assert.Equal(t, want, got, "DecodeGTID(%#v) = %#v, want %#v", input, got, want)
+
}
func TestDecodeNoFlavor(t *testing.T) {
@@ -164,12 +152,9 @@ func TestDecodeNoFlavor(t *testing.T) {
want := fakeGTID{value: "12345"}
got, err := DecodeGTID(input)
- if err != nil {
- t.Errorf("unexpected error: %v", err)
- }
- if got != want {
- t.Errorf("DecodeGTID(%#v) = %#v, want %#v", input, got, want)
- }
+ assert.NoError(t, err, "unexpected error: %v", err)
+ assert.Equal(t, want, got, "DecodeGTID(%#v) = %#v, want %#v", input, got, want)
+
}
func TestDecodeGTIDWithSeparator(t *testing.T) {
@@ -180,12 +165,9 @@ func TestDecodeGTIDWithSeparator(t *testing.T) {
want := fakeGTID{value: "GTID containing / a slash"}
got, err := DecodeGTID(input)
- if err != nil {
- t.Errorf("unexpected error: %v", err)
- }
- if got != want {
- t.Errorf("DecodeGTID(%#v) = %#v, want %#v", input, got, want)
- }
+ assert.NoError(t, err, "unexpected error: %v", err)
+ assert.Equal(t, want, got, "DecodeGTID(%#v) = %#v, want %#v", input, got, want)
+
}
type fakeGTID struct {
diff --git a/go/mysql/handshake_test.go b/go/mysql/handshake_test.go
index 3948fe851bf..57435284cba 100644
--- a/go/mysql/handshake_test.go
+++ b/go/mysql/handshake_test.go
@@ -24,6 +24,9 @@ import (
"strings"
"testing"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
"vitess.io/vitess/go/test/utils"
"vitess.io/vitess/go/vt/tlstest"
@@ -43,9 +46,7 @@ func TestClearTextClientAuth(t *testing.T) {
// Create the listener.
l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false)
- if err != nil {
- t.Fatalf("NewListener failed: %v", err)
- }
+ require.NoError(t, err, "NewListener failed: %v", err)
defer l.Close()
host := l.Addr().(*net.TCPAddr).IP.String()
port := l.Addr().(*net.TCPAddr).Port
@@ -72,16 +73,14 @@ func TestClearTextClientAuth(t *testing.T) {
// Change server side to allow clear text without auth.
l.AllowClearTextWithoutTLS.Set(true)
conn, err := Connect(ctx, params)
- if err != nil {
- t.Fatalf("unexpected connection error: %v", err)
- }
+ require.NoError(t, err, "unexpected connection error: %v", err)
+
defer conn.Close()
// Run a 'select rows' command with results.
result, err := conn.ExecuteFetch("select rows", 10000, true)
- if err != nil {
- t.Fatalf("ExecuteFetch failed: %v", err)
- }
+ require.NoError(t, err, "ExecuteFetch failed: %v", err)
+
utils.MustMatch(t, result, selectRowsResult)
// Send a ComQuit to avoid the error message on the server side.
@@ -101,9 +100,7 @@ func TestSSLConnection(t *testing.T) {
// Create the listener, so we can get its host.
l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false)
- if err != nil {
- t.Fatalf("NewListener failed: %v", err)
- }
+ require.NoError(t, err, "NewListener failed: %v", err)
defer l.Close()
host := l.Addr().(*net.TCPAddr).IP.String()
port := l.Addr().(*net.TCPAddr).Port
@@ -122,9 +119,8 @@ func TestSSLConnection(t *testing.T) {
"",
"",
tls.VersionTLS12)
- if err != nil {
- t.Fatalf("TLSServerConfig failed: %v", err)
- }
+ require.NoError(t, err, "TLSServerConfig failed: %v", err)
+
l.TLSConfig.Store(serverConfig)
go func() {
l.Accept()
@@ -158,22 +154,15 @@ func testSSLConnectionClearText(t *testing.T, params *ConnParams) {
// Create a client connection, connect.
ctx := context.Background()
conn, err := Connect(ctx, params)
- if err != nil {
- t.Fatalf("Connect failed: %v", err)
- }
+ require.NoError(t, err, "Connect failed: %v", err)
+
defer conn.Close()
- if conn.User != "user1" {
- t.Errorf("Invalid conn.User, got %v was expecting user1", conn.User)
- }
+ assert.Equal(t, "user1", conn.User, "Invalid conn.User, got %v was expecting user1", conn.User)
// Make sure this went through SSL.
result, err := conn.ExecuteFetch("ssl echo", 10000, true)
- if err != nil {
- t.Fatalf("ExecuteFetch failed: %v", err)
- }
- if result.Rows[0][0].ToString() != "ON" {
- t.Errorf("Got wrong result from ExecuteFetch(ssl echo): %v", result)
- }
+ require.NoError(t, err, "ExecuteFetch failed: %v", err)
+ assert.Equal(t, "ON", result.Rows[0][0].ToString(), "Got wrong result from ExecuteFetch(ssl echo): %v", result)
// Send a ComQuit to avoid the error message on the server side.
conn.writeComQuit()
@@ -183,29 +172,21 @@ func testSSLConnectionBasics(t *testing.T, params *ConnParams) {
// Create a client connection, connect.
ctx := context.Background()
conn, err := Connect(ctx, params)
- if err != nil {
- t.Fatalf("Connect failed: %v", err)
- }
+ require.NoError(t, err, "Connect failed: %v", err)
+
defer conn.Close()
- if conn.User != "user1" {
- t.Errorf("Invalid conn.User, got %v was expecting user1", conn.User)
- }
+ assert.Equal(t, "user1", conn.User, "Invalid conn.User, got %v was expecting user1", conn.User)
// Run a 'select rows' command with results.
result, err := conn.ExecuteFetch("select rows", 10000, true)
- if err != nil {
- t.Fatalf("ExecuteFetch failed: %v", err)
- }
+ require.NoError(t, err, "ExecuteFetch failed: %v", err)
+
utils.MustMatch(t, result, selectRowsResult)
// Make sure this went through SSL.
result, err = conn.ExecuteFetch("ssl echo", 10000, true)
- if err != nil {
- t.Fatalf("ExecuteFetch failed: %v", err)
- }
- if result.Rows[0][0].ToString() != "ON" {
- t.Errorf("Got wrong result from ExecuteFetch(ssl echo): %v", result)
- }
+ require.NoError(t, err, "ExecuteFetch failed: %v", err)
+ assert.Equal(t, "ON", result.Rows[0][0].ToString(), "Got wrong result from ExecuteFetch(ssl echo): %v", result)
// Send a ComQuit to avoid the error message on the server side.
conn.writeComQuit()
diff --git a/go/mysql/ldapauthserver/auth_server_ldap_test.go b/go/mysql/ldapauthserver/auth_server_ldap_test.go
index 8658536d5b2..245ad987277 100644
--- a/go/mysql/ldapauthserver/auth_server_ldap_test.go
+++ b/go/mysql/ldapauthserver/auth_server_ldap_test.go
@@ -20,6 +20,7 @@ import (
"fmt"
"testing"
+ "github.com/stretchr/testify/require"
ldap "gopkg.in/ldap.v2"
)
@@ -46,12 +47,9 @@ func TestValidateClearText(t *testing.T) {
RefreshSeconds: 1,
}
_, err := asl.validate("testuser", "testpass")
- if err != nil {
- t.Fatalf("AuthServerLdap failed to validate valid credentials. Got: %v", err)
- }
+ require.NoError(t, err, "AuthServerLdap failed to validate valid credentials. Got: %v", err)
_, err = asl.validate("invaliduser", "invalidpass")
- if err == nil {
- t.Fatalf("AuthServerLdap validated invalid credentials.")
- }
+ require.Error(t, err, "AuthServerLdap validated invalid credentials.")
+
}
diff --git a/go/mysql/mariadb_gtid_test.go b/go/mysql/mariadb_gtid_test.go
index efecfb8b44c..49472ab8d33 100644
--- a/go/mysql/mariadb_gtid_test.go
+++ b/go/mysql/mariadb_gtid_test.go
@@ -29,12 +29,9 @@ func TestParseMariaGTID(t *testing.T) {
want := MariadbGTID{Domain: 12, Server: 345, Sequence: 6789}
got, err := parseMariadbGTID(input)
- if err != nil {
- t.Errorf("%v", err)
- }
- if got.(MariadbGTID) != want {
- t.Errorf("parseMariadbGTID(%v) = %v, want %v", input, got, want)
- }
+ assert.NoError(t, err, "%v", err)
+ assert.Equal(t, want, got.(MariadbGTID), "parseMariadbGTID(%v) = %v, want %v", input, got, want)
+
}
func TestParseInvalidMariaGTID(t *testing.T) {
@@ -42,12 +39,9 @@ func TestParseInvalidMariaGTID(t *testing.T) {
want := "invalid MariaDB GTID"
_, err := parseMariadbGTID(input)
- if err == nil {
- t.Errorf("expected error for invalid input (%v)", input)
- }
- if !strings.HasPrefix(err.Error(), want) {
- t.Errorf("wrong error message, got '%v', want '%v'", err, want)
- }
+ assert.Error(t, err, "expected error for invalid input (%v)", input)
+ assert.True(t, strings.HasPrefix(err.Error(), want), "wrong error message, got '%v', want '%v'", err, want)
+
}
func TestParseMariaGTIDInvalidDomain(t *testing.T) {
@@ -55,12 +49,9 @@ func TestParseMariaGTIDInvalidDomain(t *testing.T) {
want := "invalid MariaDB GTID Domain ID"
_, err := parseMariadbGTID(input)
- if err == nil {
- t.Errorf("expected error for invalid input (%v)", input)
- }
- if !strings.HasPrefix(err.Error(), want) {
- t.Errorf("wrong error message, got '%v', want '%v'", err, want)
- }
+ assert.Error(t, err, "expected error for invalid input (%v)", input)
+ assert.True(t, strings.HasPrefix(err.Error(), want), "wrong error message, got '%v', want '%v'", err, want)
+
}
func TestParseMariaGTIDInvalidServer(t *testing.T) {
@@ -68,12 +59,9 @@ func TestParseMariaGTIDInvalidServer(t *testing.T) {
want := "invalid MariaDB GTID Server ID"
_, err := parseMariadbGTID(input)
- if err == nil {
- t.Errorf("expected error for invalid input (%v)", input)
- }
- if !strings.HasPrefix(err.Error(), want) {
- t.Errorf("wrong error message, got '%v', want '%v'", err, want)
- }
+ assert.Error(t, err, "expected error for invalid input (%v)", input)
+ assert.True(t, strings.HasPrefix(err.Error(), want), "wrong error message, got '%v', want '%v'", err, want)
+
}
func TestParseMariaGTIDInvalidSequence(t *testing.T) {
@@ -81,12 +69,9 @@ func TestParseMariaGTIDInvalidSequence(t *testing.T) {
want := "invalid MariaDB GTID Sequence number"
_, err := parseMariadbGTID(input)
- if err == nil {
- t.Errorf("expected error for invalid input (%v)", input)
- }
- if !strings.HasPrefix(err.Error(), want) {
- t.Errorf("wrong error message, got '%v', want '%v'", err, want)
- }
+ assert.Error(t, err, "expected error for invalid input (%v)", input)
+ assert.True(t, strings.HasPrefix(err.Error(), want), "wrong error message, got '%v', want '%v'", err, want)
+
}
func TestParseMariaGTIDSet(t *testing.T) {
@@ -97,12 +82,9 @@ func TestParseMariaGTIDSet(t *testing.T) {
}
got, err := parseMariadbGTIDSet(input)
- if err != nil {
- t.Errorf("%v", err)
- }
- if !got.Equal(want) {
- t.Errorf("parseMariadbGTIDSet(%#v) = %#v, want %#v", input, got, want)
- }
+ assert.NoError(t, err, "%v", err)
+ assert.True(t, got.Equal(want), "parseMariadbGTIDSet(%#v) = %#v, want %#v", input, got, want)
+
}
func TestParseInvalidMariaGTIDSet(t *testing.T) {
@@ -124,9 +106,8 @@ func TestMariaGTIDString(t *testing.T) {
want := "5-4727-1737373"
got := input.String()
- if got != want {
- t.Errorf("%#v.String() = '%v', want '%v'", input, got, want)
- }
+ assert.Equal(t, want, got, "%#v.String() = '%v', want '%v'", input, got, want)
+
}
func TestMariaGTIDFlavor(t *testing.T) {
@@ -134,9 +115,8 @@ func TestMariaGTIDFlavor(t *testing.T) {
want := "MariaDB"
got := input.Flavor()
- if got != want {
- t.Errorf("%#v.Flavor() = '%v', want '%v'", input, got, want)
- }
+ assert.Equal(t, want, got, "%#v.Flavor() = '%v', want '%v'", input, got, want)
+
}
func TestMariaGTIDSequenceDomain(t *testing.T) {
@@ -144,9 +124,8 @@ func TestMariaGTIDSequenceDomain(t *testing.T) {
want := any(uint32(12))
got := input.SequenceDomain()
- if got != want {
- t.Errorf("%#v.SequenceDomain() = %#v, want %#v", input, got, want)
- }
+ assert.Equal(t, want, got, "%#v.SequenceDomain() = %#v, want %#v", input, got, want)
+
}
func TestMariaGTIDSourceServer(t *testing.T) {
@@ -154,9 +133,8 @@ func TestMariaGTIDSourceServer(t *testing.T) {
want := any(uint32(345))
got := input.SourceServer()
- if got != want {
- t.Errorf("%#v.SourceServer() = %#v, want %#v", input, got, want)
- }
+ assert.Equal(t, want, got, "%#v.SourceServer() = %#v, want %#v", input, got, want)
+
}
func TestMariaGTIDSequenceNumber(t *testing.T) {
@@ -164,9 +142,8 @@ func TestMariaGTIDSequenceNumber(t *testing.T) {
want := any(uint64(6789))
got := input.SequenceNumber()
- if got != want {
- t.Errorf("%#v.SequenceNumber() = %#v, want %#v", input, got, want)
- }
+ assert.Equal(t, want, got, "%#v.SequenceNumber() = %#v, want %#v", input, got, want)
+
}
func TestMariaGTIDGTIDSet(t *testing.T) {
@@ -174,9 +151,8 @@ func TestMariaGTIDGTIDSet(t *testing.T) {
want := MariadbGTIDSet{12: input}
got := input.GTIDSet()
- if !got.Equal(want) {
- t.Errorf("%#v.GTIDSet() = %#v, want %#v", input, got, want)
- }
+ assert.True(t, got.Equal(want), "%#v.GTIDSet() = %#v, want %#v", input, got, want)
+
}
func TestMariaGTIDSetString(t *testing.T) {
@@ -188,9 +164,8 @@ func TestMariaGTIDSetString(t *testing.T) {
want := "1-1234-5678,3-4321-9876,5-4727-1737373"
got := input.String()
- if got != want {
- t.Errorf("%#v.String() = '%v', want '%v'", input, got, want)
- }
+ assert.Equal(t, want, got, "%#v.String() = '%v', want '%v'", input, got, want)
+
}
func TestMariaGTIDSetContainsLess(t *testing.T) {
@@ -601,10 +576,8 @@ func TestMariaGTIDSetUnion(t *testing.T) {
4: MariadbGTID{Domain: 4, Server: 1, Sequence: 2},
5: MariadbGTID{Domain: 5, Server: 1, Sequence: 4},
}
+ assert.True(t, got.Equal(want), "set1: %#v, set1.Union(%#v) = %#v, want %#v", set1, set2, got, want)
- if !got.Equal(want) {
- t.Errorf("set1: %#v, set1.Union(%#v) = %#v, want %#v", set1, set2, got, want)
- }
}
func TestMariaGTIDSetUnionNewDomain(t *testing.T) {
@@ -628,10 +601,7 @@ func TestMariaGTIDSetUnionNewDomain(t *testing.T) {
5: MariadbGTID{Domain: 5, Server: 1, Sequence: 4},
6: MariadbGTID{Domain: 6, Server: 1, Sequence: 7},
}
-
- if !got.Equal(want) {
- t.Errorf("set1: %#v, set1.Union(%#v) = %#v, want %#v", set1, set2, got, want)
- }
+ assert.True(t, got.Equal(want), "set1: %#v, set1.Union(%#v) = %#v, want %#v", set1, set2, got, want)
switch g := got.(type) {
case MariadbGTIDSet:
diff --git a/go/mysql/mysql56_gtid.go b/go/mysql/mysql56_gtid.go
index f57323056e6..0aae3d54336 100644
--- a/go/mysql/mysql56_gtid.go
+++ b/go/mysql/mysql56_gtid.go
@@ -73,14 +73,14 @@ func ParseSID(s string) (sid SID, err error) {
}
// Drop the dashes so we can just check the error of Decode once.
- b := make([]byte, 0, 32)
- b = append(b, s[:8]...)
- b = append(b, s[9:13]...)
- b = append(b, s[14:18]...)
- b = append(b, s[19:23]...)
- b = append(b, s[24:]...)
-
- if _, err := hex.Decode(sid[:], b); err != nil {
+ var b [32]byte
+ copy(b[0:], s[:8])
+ copy(b[8:], s[9:13])
+ copy(b[12:], s[14:18])
+ copy(b[16:], s[19:23])
+ copy(b[20:], s[24:])
+
+ if _, err := hex.Decode(sid[:], b[:]); err != nil {
return sid, vterrors.Wrapf(err, "invalid MySQL 5.6 SID %q", s)
}
return sid, nil
diff --git a/go/mysql/mysql56_gtid_set.go b/go/mysql/mysql56_gtid_set.go
index 1a0e065bf99..63e778f3527 100644
--- a/go/mysql/mysql56_gtid_set.go
+++ b/go/mysql/mysql56_gtid_set.go
@@ -19,10 +19,11 @@ package mysql
import (
"bytes"
"encoding/binary"
- "sort"
"strconv"
"strings"
+ "golang.org/x/exp/slices"
+
"vitess.io/vitess/go/vt/proto/vtrpc"
"vitess.io/vitess/go/vt/vterrors"
)
@@ -35,20 +36,10 @@ func (iv interval) contains(other interval) bool {
return iv.start <= other.start && other.end <= iv.end
}
-type intervalList []interval
-
-// Len implements sort.Interface.
-func (s intervalList) Len() int { return len(s) }
-
-// Less implements sort.Interface.
-func (s intervalList) Less(i, j int) bool { return s[i].start < s[j].start }
-
-// Swap implements sort.Interface.
-func (s intervalList) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
-
func parseInterval(s string) (interval, error) {
- parts := strings.Split(s, "-")
- start, err := strconv.ParseInt(parts[0], 10, 64)
+ part0, part1, twoParts := strings.Cut(s, "-")
+
+ start, err := strconv.ParseUint(part0, 10, 63)
if err != nil {
return interval{}, vterrors.Wrapf(err, "invalid interval (%q)", s)
}
@@ -56,49 +47,62 @@ func parseInterval(s string) (interval, error) {
return interval{}, vterrors.Errorf(vtrpc.Code_INTERNAL, "invalid interval (%q): start must be > 0", s)
}
- switch len(parts) {
- case 1:
- return interval{start: start, end: start}, nil
- case 2:
- end, err := strconv.ParseInt(parts[1], 10, 64)
+ if twoParts {
+ end, err := strconv.ParseUint(part1, 10, 63)
if err != nil {
return interval{}, vterrors.Wrapf(err, "invalid interval (%q)", s)
}
- return interval{start: start, end: end}, nil
- default:
- return interval{}, vterrors.Errorf(vtrpc.Code_INTERNAL, "invalid interval (%q): expected start-end or single number", s)
+ return interval{start: int64(start), end: int64(end)}, nil
}
+ return interval{start: int64(start), end: int64(start)}, nil
}
-// parseMysql56GTIDSet is registered as a GTIDSet parser.
+// ParseMysql56GTIDSet is registered as a GTIDSet parser.
//
// https://dev.mysql.com/doc/refman/5.6/en/replication-gtids-concepts.html
-func parseMysql56GTIDSet(s string) (Mysql56GTIDSet, error) {
- set := Mysql56GTIDSet{}
+func ParseMysql56GTIDSet(s string) (Mysql56GTIDSet, error) {
+ set := make(Mysql56GTIDSet)
+ input := s
// gtid_set: uuid_set [, uuid_set] ...
- for _, uuidSet := range strings.Split(s, ",") {
+ for len(input) > 0 {
+ var uuidSet string
+ if idx := strings.IndexByte(input, ','); idx >= 0 {
+ uuidSet = input[:idx]
+ input = input[idx+1:]
+ } else {
+ uuidSet = input
+ input = ""
+ }
+
uuidSet = strings.TrimSpace(uuidSet)
if uuidSet == "" {
continue
}
// uuid_set: uuid:interval[:interval]...
- parts := strings.Split(uuidSet, ":")
- if len(parts) < 2 {
+ head, tail, ok := strings.Cut(uuidSet, ":")
+ if !ok {
return nil, vterrors.Errorf(vtrpc.Code_INTERNAL, "invalid MySQL 5.6 GTID set (%q): expected uuid:interval", s)
}
// Parse Server ID.
- sid, err := ParseSID(parts[0])
+ sid, err := ParseSID(head)
if err != nil {
return nil, vterrors.Wrapf(err, "invalid MySQL 5.6 GTID set (%q)", s)
}
- // Parse Intervals.
- intervals := make([]interval, 0, len(parts)-1)
- for _, part := range parts[1:] {
- iv, err := parseInterval(part)
+ intervals := make([]interval, 0, strings.Count(tail, ":")+1)
+ for len(tail) > 0 {
+ if idx := strings.IndexByte(tail, ':'); idx >= 0 {
+ head = tail[:idx]
+ tail = tail[idx+1:]
+ } else {
+ head = tail
+ tail = ""
+ }
+
+ iv, err := parseInterval(head)
if err != nil {
return nil, vterrors.Wrapf(err, "invalid MySQL 5.6 GTID set (%q)", s)
}
@@ -115,8 +119,16 @@ func parseMysql56GTIDSet(s string) (Mysql56GTIDSet, error) {
continue
}
+ if sidIntervals, ok := set[sid]; ok {
+ // SID already exists, we append
+ // Example: "00010203-0405-0607-0809-0a0b0c0d0e0f:1-5,00010203-0405-0607-0809-0a0b0c0d0e0f:10-20"
+ // turns to: "00010203-0405-0607-0809-0a0b0c0d0e0f:1-5:10-20"
+ intervals = append(sidIntervals, intervals...)
+ }
// Internally we expect intervals to be stored in order.
- sort.Sort(intervalList(intervals))
+ slices.SortFunc(intervals, func(a, b interval) bool {
+ return a.start < b.start
+ })
set[sid] = intervals
}
@@ -132,25 +144,19 @@ func (set Mysql56GTIDSet) SIDs() []SID {
for sid := range set {
sids = append(sids, sid)
}
- sort.Sort(sidList(sids))
+ sortSIDs(sids)
return sids
}
-type sidList []SID
-
-// Len implements sort.Interface.
-func (s sidList) Len() int { return len(s) }
-
-// Less implements sort.Interface.
-func (s sidList) Less(i, j int) bool { return bytes.Compare(s[i][:], s[j][:]) < 0 }
-
-// Swap implements sort.Interface.
-func (s sidList) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+func sortSIDs(sids []SID) {
+ slices.SortFunc(sids, func(a, b SID) bool {
+ return bytes.Compare(a[:], b[:]) < 0
+ })
+}
// String implements GTIDSet.
func (set Mysql56GTIDSet) String() string {
- buf := &bytes.Buffer{}
-
+ var buf strings.Builder
for i, sid := range set.SIDs() {
if i != 0 {
buf.WriteByte(',')
@@ -167,7 +173,6 @@ func (set Mysql56GTIDSet) String() string {
}
}
}
-
return buf.String()
}
@@ -175,8 +180,7 @@ func (set Mysql56GTIDSet) String() string {
// For gtidset having multiple SIDs or multiple intervals
// it just returns the last SID with last interval
func (set Mysql56GTIDSet) Last() string {
- buf := &bytes.Buffer{}
-
+ var buf strings.Builder
if len(set.SIDs()) > 0 {
sid := set.SIDs()[len(set.SIDs())-1]
buf.WriteString(sid.String())
@@ -187,7 +191,6 @@ func (set Mysql56GTIDSet) Last() string {
buf.WriteString(strconv.FormatInt(lastInterval.end, 10))
}
}
-
return buf.String()
}
@@ -657,7 +660,7 @@ func popInterval(dst *interval, s1, s2 *[]interval) bool {
func init() {
gtidSetParsers[Mysql56FlavorID] = func(s string) (GTIDSet, error) {
- return parseMysql56GTIDSet(s)
+ return ParseMysql56GTIDSet(s)
}
}
@@ -665,11 +668,11 @@ func init() {
// The result is also a string.
// An error is thrown if parsing is not possible for either GTIDSets
func Subtract(lhs, rhs string) (string, error) {
- lhsSet, err := parseMysql56GTIDSet(lhs)
+ lhsSet, err := ParseMysql56GTIDSet(lhs)
if err != nil {
return "", err
}
- rhsSet, err := parseMysql56GTIDSet(rhs)
+ rhsSet, err := ParseMysql56GTIDSet(rhs)
if err != nil {
return "", err
}
diff --git a/go/mysql/mysql56_gtid_set_test.go b/go/mysql/mysql56_gtid_set_test.go
index ba55f642315..98162513fd7 100644
--- a/go/mysql/mysql56_gtid_set_test.go
+++ b/go/mysql/mysql56_gtid_set_test.go
@@ -19,11 +19,11 @@ package mysql
import (
"fmt"
"reflect"
- "sort"
"strings"
"testing"
"github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
)
func TestSortSIDList(t *testing.T) {
@@ -43,10 +43,8 @@ func TestSortSIDList(t *testing.T) {
{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1},
{1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15},
}
- sort.Sort(sidList(input))
- if !reflect.DeepEqual(input, want) {
- t.Errorf("got %#v, want %#v", input, want)
- }
+ sortSIDs(input)
+ assert.True(t, reflect.DeepEqual(input, want), "got %#v, want %#v", input, want)
}
func TestParseMysql56GTIDSet(t *testing.T) {
@@ -81,6 +79,14 @@ func TestParseMysql56GTIDSet(t *testing.T) {
"00010203-0405-0607-0809-0a0b0c0d0e0f:1-5:8-7:10-20": {
sid1: []interval{{1, 5}, {10, 20}},
},
+ // Same repeating SIDs
+ "00010203-0405-0607-0809-0a0b0c0d0e0f:1-5,00010203-0405-0607-0809-0a0b0c0d0e0f:10-20": {
+ sid1: []interval{{1, 5}, {10, 20}},
+ },
+ // Same repeating SIDs, backwards order
+ "00010203-0405-0607-0809-0a0b0c0d0e0f:10-20,00010203-0405-0607-0809-0a0b0c0d0e0f:1-5": {
+ sid1: []interval{{1, 5}, {10, 20}},
+ },
// Multiple SIDs
"00010203-0405-0607-0809-0a0b0c0d0e0f:1-5:10-20,00010203-0405-0607-0809-0a0b0c0d0eff:1-5:50": {
sid1: []interval{{1, 5}, {10, 20}},
@@ -94,14 +100,11 @@ func TestParseMysql56GTIDSet(t *testing.T) {
}
for input, want := range table {
- got, err := parseMysql56GTIDSet(input)
- if err != nil {
- t.Errorf("unexpected error: %v", err)
- continue
- }
- if !got.Equal(want) {
- t.Errorf("parseMysql56GTIDSet(%#v) = %#v, want %#v", input, got, want)
- }
+ t.Run(input, func(t *testing.T) {
+ got, err := ParseMysql56GTIDSet(input)
+ require.NoError(t, err)
+ assert.Equal(t, want, got)
+ })
}
}
@@ -119,10 +122,8 @@ func TestParseMysql56GTIDSetInvalid(t *testing.T) {
}
for _, input := range table {
- _, err := parseMysql56GTIDSet(input)
- if err == nil {
- t.Errorf("parseMysql56GTIDSet(%#v) expected error, got none", err)
- }
+ _, err := ParseMysql56GTIDSet(input)
+ assert.Error(t, err, "parseMysql56GTIDSet(%#v) expected error, got none", err)
}
}
@@ -152,9 +153,8 @@ func TestMysql56GTIDSetString(t *testing.T) {
for want, input := range table {
got := strings.ToLower(input.String())
- if got != want {
- t.Errorf("%#v.String() = %#v, want %#v", input, got, want)
- }
+ assert.Equal(t, want, got, "%#v.String() = %#v, want %#v", input, got, want)
+
}
}
@@ -230,9 +230,8 @@ func TestMysql56GTIDSetContains(t *testing.T) {
}
for _, other := range contained {
- if !set.Contains(other) {
- t.Errorf("Contains(%#v) = false, want true", other)
- }
+ assert.True(t, set.Contains(other), "Contains(%#v) = false, want true", other)
+
}
// Test cases that should return Contains() = false.
@@ -266,6 +265,28 @@ func TestMysql56GTIDSetContains(t *testing.T) {
}
}
+func TestMysql56GTIDSetContains2(t *testing.T) {
+ set1, err := ParseMysql56GTIDSet("16b1039f-22b6-11ed-b765-0a43f95f28a3:1-243")
+ require.NoError(t, err)
+ set2, err := ParseMysql56GTIDSet("16b1039f-22b6-11ed-b765-0a43f95f28a3:1-615")
+ require.NoError(t, err)
+ set3, err := ParseMysql56GTIDSet("16b1039f-22b6-11ed-b765-0a43f95f28a3:1-632")
+ require.NoError(t, err)
+ set4, err := ParseMysql56GTIDSet("16b1039f-22b6-11ed-b765-0a43f95f28a3:20-664")
+ require.NoError(t, err)
+ set5, err := ParseMysql56GTIDSet("16b1039f-22b6-11ed-b765-0a43f95f28a3:20-243")
+ require.NoError(t, err)
+
+ compareSet, err := ParseMysql56GTIDSet("16b1039f-22b6-11ed-b765-0a43f95f28a3:1-615")
+ require.NoError(t, err)
+
+ assert.True(t, compareSet.Contains(set1))
+ assert.True(t, compareSet.Contains(set2))
+ assert.False(t, compareSet.Contains(set3))
+ assert.False(t, compareSet.Contains(set4))
+ assert.True(t, compareSet.Contains(set5))
+}
+
func TestMysql56GTIDSetEqual(t *testing.T) {
sid1 := SID{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}
sid2 := SID{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 16}
@@ -289,13 +310,10 @@ func TestMysql56GTIDSetEqual(t *testing.T) {
}
for _, other := range equal {
- if !set.Equal(other) {
- t.Errorf("%#v.Equal(%#v) = false, want true", set, other)
- }
+ assert.True(t, set.Equal(other), "%#v.Equal(%#v) = false, want true", set, other)
// Equality should be transitive.
- if !other.Equal(set) {
- t.Errorf("%#v.Equal(%#v) = false, want true", other, set)
- }
+ assert.True(t, other.Equal(set), "%#v.Equal(%#v) = false, want true", other, set)
+
}
// Test cases that should return Equal() = false.
@@ -443,10 +461,8 @@ func TestMysql56GTIDSetUnion(t *testing.T) {
sid2: []interval{{1, 6}, {20, 50}, {60, 72}},
sid3: []interval{{1, 45}},
}
+ assert.True(t, got.Equal(want), "set1: %#v, set1.Union(%#v) = %#v, want %#v", set1, set2, got, want)
- if !got.Equal(want) {
- t.Errorf("set1: %#v, set1.Union(%#v) = %#v, want %#v", set1, set2, got, want)
- }
}
func TestMysql56GTIDSetDifference(t *testing.T) {
@@ -478,10 +494,7 @@ func TestMysql56GTIDSetDifference(t *testing.T) {
sid4: []interval{{1, 30}},
sid5: []interval{{1, 1}, {7, 7}},
}
-
- if !got.Equal(want) {
- t.Errorf("got %#v; want %#v", got, want)
- }
+ assert.True(t, got.Equal(want), "got %#v; want %#v", got, want)
sid10 := SID{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}
sid11 := SID{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}
@@ -493,9 +506,8 @@ func TestMysql56GTIDSetDifference(t *testing.T) {
}
got = set10.Difference(set11)
want = Mysql56GTIDSet{}
- if !got.Equal(want) {
- t.Errorf("got %#v; want %#v", got, want)
- }
+ assert.True(t, got.Equal(want), "got %#v; want %#v", got, want)
+
}
func TestMysql56GTIDSetSIDBlock(t *testing.T) {
@@ -531,18 +543,13 @@ func TestMysql56GTIDSetSIDBlock(t *testing.T) {
6, 0, 0, 0, 0, 0, 0, 0,
}
got := input.SIDBlock()
- if !reflect.DeepEqual(got, want) {
- t.Errorf("%#v.SIDBlock() = %#v, want %#v", input, got, want)
- }
+ assert.True(t, reflect.DeepEqual(got, want), "%#v.SIDBlock() = %#v, want %#v", input, got, want)
// Testing the conversion back.
set, err := NewMysql56GTIDSetFromSIDBlock(want)
- if err != nil {
- t.Fatalf("Reconstructing Mysql56GTIDSet from SID block failed: %v", err)
- }
- if !reflect.DeepEqual(set, input) {
- t.Errorf("NewMysql56GTIDSetFromSIDBlock(%#v) = %#v, want %#v", want, set, input)
- }
+ require.NoError(t, err, "Reconstructing Mysql56GTIDSet from SID block failed: %v", err)
+ assert.True(t, reflect.DeepEqual(set, input), "NewMysql56GTIDSetFromSIDBlock(%#v) = %#v, want %#v", want, set, input)
+
}
func TestMySQL56GTIDSetLast(t *testing.T) {
@@ -611,6 +618,46 @@ func TestSubtract(t *testing.T) {
lhs: "8bc65c84-3fe4-11ed-a912-257f0fcdd6c9:1-8,8bc65cca-3fe4-11ed-bbfb-091034d48b3e:1",
rhs: "8bc65c84-3fe4-11ed-a912-257f0fcdd6c9:1-8,8bc65cca-3fe4-11ed-bbfb-091034d48b3e:1",
difference: "",
+ }, {
+ name: "subtract prefix",
+ lhs: "8bc65c84-3fe4-11ed-a912-257f0fcdd6c9:1-8",
+ rhs: "8bc65c84-3fe4-11ed-a912-257f0fcdd6c9:1-3",
+ difference: "8bc65c84-3fe4-11ed-a912-257f0fcdd6c9:4-8",
+ }, {
+ name: "subtract mid",
+ lhs: "8bc65c84-3fe4-11ed-a912-257f0fcdd6c9:1-8",
+ rhs: "8bc65c84-3fe4-11ed-a912-257f0fcdd6c9:2-3",
+ difference: "8bc65c84-3fe4-11ed-a912-257f0fcdd6c9:1:4-8",
+ }, {
+ name: "subtract suffix",
+ lhs: "8bc65c84-3fe4-11ed-a912-257f0fcdd6c9:1-8",
+ rhs: "8bc65c84-3fe4-11ed-a912-257f0fcdd6c9:7-8",
+ difference: "8bc65c84-3fe4-11ed-a912-257f0fcdd6c9:1-6",
+ }, {
+ name: "subtract complex range 1",
+ lhs: "8bc65c84-3fe4-11ed-a912-257f0fcdd6c9:1-8:12-17",
+ rhs: "8bc65c84-3fe4-11ed-a912-257f0fcdd6c9:7-8",
+ difference: "8bc65c84-3fe4-11ed-a912-257f0fcdd6c9:1-6:12-17",
+ }, {
+ name: "subtract complex range 2",
+ lhs: "8bc65c84-3fe4-11ed-a912-257f0fcdd6c9:1-8:12-17",
+ rhs: "8bc65c84-3fe4-11ed-a912-257f0fcdd6c9:12-13",
+ difference: "8bc65c84-3fe4-11ed-a912-257f0fcdd6c9:1-8:14-17",
+ }, {
+ name: "subtract complex range 3",
+ lhs: "8bc65c84-3fe4-11ed-a912-257f0fcdd6c9:1-8:12-17",
+ rhs: "8bc65c84-3fe4-11ed-a912-257f0fcdd6c9:7-13",
+ difference: "8bc65c84-3fe4-11ed-a912-257f0fcdd6c9:1-6:14-17",
+ }, {
+ name: "subtract repeating uuid",
+ lhs: "8bc65c84-3fe4-11ed-a912-257f0fcdd6c9:1-8,8bc65c84-3fe4-11ed-a912-257f0fcdd6c9:12-17",
+ rhs: "8bc65c84-3fe4-11ed-a912-257f0fcdd6c9:7-13",
+ difference: "8bc65c84-3fe4-11ed-a912-257f0fcdd6c9:1-6:14-17",
+ }, {
+ name: "subtract repeating uuid in descending order",
+ lhs: "8bc65c84-3fe4-11ed-a912-257f0fcdd6c9:12-17,8bc65c84-3fe4-11ed-a912-257f0fcdd6c9:1-8",
+ rhs: "8bc65c84-3fe4-11ed-a912-257f0fcdd6c9:7-13",
+ difference: "8bc65c84-3fe4-11ed-a912-257f0fcdd6c9:1-6:14-17",
}, {
name: "parsing error in left set",
lhs: "incorrect set",
@@ -635,3 +682,25 @@ func TestSubtract(t *testing.T) {
})
}
}
+
+func BenchmarkMySQL56GTIDParsing(b *testing.B) {
+ var Inputs = []string{
+ "00010203-0405-0607-0809-0a0b0c0d0e0f:1-5",
+ "00010203-0405-0607-0809-0a0b0c0d0e0f:12",
+ "00010203-0405-0607-0809-0a0b0c0d0e0f:1-5:10-20",
+ "00010203-0405-0607-0809-0a0b0c0d0e0f:10-20:1-5",
+ "00010203-0405-0607-0809-0a0b0c0d0e0f:8-7",
+ "00010203-0405-0607-0809-0a0b0c0d0e0f:1-5:8-7:10-20",
+ "00010203-0405-0607-0809-0a0b0c0d0e0f:1-5:10-20,00010203-0405-0607-0809-0a0b0c0d0eff:1-5:50",
+ "8aabbf4f-5074-11ed-b225-aa23ce7e3ba2:1-20443,a6f1bf40-5073-11ed-9c0f-12a3889dc912:1-343402",
+ }
+
+ b.ReportAllocs()
+ b.ResetTimer()
+
+ for n := 0; n < b.N; n++ {
+ for _, input := range Inputs {
+ _, _ = ParseMysql56GTIDSet(input)
+ }
+ }
+}
diff --git a/go/mysql/mysql56_gtid_test.go b/go/mysql/mysql56_gtid_test.go
index 9ea8363645d..335835d8199 100644
--- a/go/mysql/mysql56_gtid_test.go
+++ b/go/mysql/mysql56_gtid_test.go
@@ -19,6 +19,9 @@ package mysql
import (
"strings"
"testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
)
func TestParseMysql56GTID(t *testing.T) {
@@ -29,12 +32,9 @@ func TestParseMysql56GTID(t *testing.T) {
}
got, err := parseMysql56GTID(input)
- if err != nil {
- t.Fatalf("unexpected error: %v", err)
- }
- if got != want {
- t.Errorf("parseMysql56GTID(%#v) = %#v, want %#v", input, got, want)
- }
+ require.NoError(t, err, "unexpected error: %v", err)
+ assert.Equal(t, want, got, "parseMysql56GTID(%#v) = %#v, want %#v", input, got, want)
+
}
func TestParseMysql56GTIDInvalid(t *testing.T) {
@@ -48,9 +48,8 @@ func TestParseMysql56GTIDInvalid(t *testing.T) {
for _, input := range table {
_, err := parseMysql56GTID(input)
- if err == nil {
- t.Errorf("parseMysql56GTID(%#v): expected error, got none", input)
- }
+ assert.Error(t, err, "parseMysql56GTID(%#v): expected error, got none", input)
+
}
}
@@ -68,12 +67,9 @@ func TestParseSID(t *testing.T) {
want := SID{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}
got, err := ParseSID(input)
- if err != nil {
- t.Fatalf("unexpected error: %v", err)
- }
- if got != want {
- t.Errorf("ParseSID(%#v) = %#v, want %#v", input, got, want)
- }
+ require.NoError(t, err, "unexpected error: %v", err)
+ assert.Equal(t, want, got, "ParseSID(%#v) = %#v, want %#v", input, got, want)
+
}
func TestParseSIDInvalid(t *testing.T) {
@@ -86,9 +82,8 @@ func TestParseSIDInvalid(t *testing.T) {
for _, input := range table {
_, err := ParseSID(input)
- if err == nil {
- t.Errorf("ParseSID(%#v): expected error, got none", input)
- }
+ assert.Error(t, err, "ParseSID(%#v): expected error, got none", input)
+
}
}
diff --git a/go/mysql/query.go b/go/mysql/query.go
index 6818d646c57..f8adb91f60f 100644
--- a/go/mysql/query.go
+++ b/go/mysql/query.go
@@ -416,7 +416,7 @@ func (c *Conn) ReadQueryResult(maxrows int, wantfields bool) (*sqltypes.Result,
for {
data, err := c.readEphemeralPacket()
if err != nil {
- return nil, false, 0, err
+ return nil, false, 0, NewSQLError(CRServerLost, SSUnknownSQLState, "%v", err)
}
if c.isEOFPacket(data) {
@@ -573,7 +573,7 @@ func (c *Conn) parseComStmtExecute(prepareData map[uint32]*PrepareData, data []b
}
if prepare.ParamsCount > 0 {
- bitMap, pos, ok = readBytes(payload, pos, int((prepare.ParamsCount+7)/8))
+ bitMap, pos, ok = readBytes(payload, pos, (int(prepare.ParamsCount)+7)/8)
if !ok {
return stmtID, 0, NewSQLError(CRMalformedPacket, SSUnknownSQLState, "reading NULL-bitmap failed")
}
diff --git a/go/mysql/query_test.go b/go/mysql/query_test.go
index 5fdcbc8c830..8305103c891 100644
--- a/go/mysql/query_test.go
+++ b/go/mysql/query_test.go
@@ -91,9 +91,8 @@ func TestComInitDB(t *testing.T) {
t.Fatalf("sConn.ReadPacket - ComInitDB failed: %v %v", data, err)
}
db := sConn.parseComInitDB(data)
- if db != "my_db" {
- t.Errorf("parseComInitDB returned unexpected data: %v", db)
- }
+ assert.Equal(t, "my_db", db, "parseComInitDB returned unexpected data: %v", db)
+
}
func TestComSetOption(t *testing.T) {
@@ -113,12 +112,9 @@ func TestComSetOption(t *testing.T) {
t.Fatalf("sConn.ReadPacket - ComSetOption failed: %v %v", data, err)
}
operation, ok := sConn.parseComSetOption(data)
- if !ok {
- t.Fatalf("parseComSetOption failed unexpectedly")
- }
- if operation != 1 {
- t.Errorf("parseComSetOption returned unexpected data: %v", operation)
- }
+ require.True(t, ok, "parseComSetOption failed unexpectedly")
+ assert.Equal(t, uint16(1), operation, "parseComSetOption returned unexpected data: %v", operation)
+
}
func TestComStmtPrepare(t *testing.T) {
@@ -137,14 +133,10 @@ func TestComStmtPrepare(t *testing.T) {
}
data, err := sConn.ReadPacket()
- if err != nil {
- t.Fatalf("sConn.ReadPacket - ComPrepare failed: %v", err)
- }
+ require.NoError(t, err, "sConn.ReadPacket - ComPrepare failed: %v", err)
parsedQuery := sConn.parseComPrepare(data)
- if parsedQuery != sql {
- t.Fatalf("Received incorrect query, want: %v, got: %v", sql, parsedQuery)
- }
+ require.Equal(t, sql, parsedQuery, "Received incorrect query, want: %v, got: %v", sql, parsedQuery)
prepare, result := MockPrepareData(t)
sConn.PrepareData = make(map[uint32]*PrepareData)
@@ -156,12 +148,9 @@ func TestComStmtPrepare(t *testing.T) {
}
resp, err := cConn.ReadPacket()
- if err != nil {
- t.Fatalf("cConn.ReadPacket failed: %v", err)
- }
- if uint32(resp[1]) != prepare.StatementID {
- t.Fatalf("Received incorrect Statement ID, want: %v, got: %v", prepare.StatementID, resp[1])
- }
+ require.NoError(t, err, "cConn.ReadPacket failed: %v", err)
+ require.Equal(t, prepare.StatementID, uint32(resp[1]), "Received incorrect Statement ID, want: %v, got: %v", prepare.StatementID, resp[1])
+
}
func TestComStmtPrepareUpdStmt(t *testing.T) {
@@ -229,20 +218,13 @@ func TestComStmtSendLongData(t *testing.T) {
t.Fatalf("sConn.ReadPacket - ComStmtClose failed: %v %v", data, err)
}
stmtID, paramID, chunkData, ok := sConn.parseComStmtSendLongData(data)
- if !ok {
- t.Fatalf("parseComStmtSendLongData failed")
- }
- if paramID != 1 {
- t.Fatalf("Received incorrect ParamID, want %v, got %v:", paramID, 1)
- }
- if stmtID != prepare.StatementID {
- t.Fatalf("Received incorrect value, want: %v, got: %v", uint32(data[1]), prepare.StatementID)
- }
+ require.True(t, ok, "parseComStmtSendLongData failed")
+ require.Equal(t, uint16(1), paramID, "Received incorrect ParamID, want %v, got %v:", paramID, 1)
+ require.Equal(t, prepare.StatementID, stmtID, "Received incorrect value, want: %v, got: %v", uint32(data[1]), prepare.StatementID)
// Check length of chunkData, Since its a subset of `data` and compare with it after we subtract the number of bytes that was read from it.
// sizeof(uint32) + sizeof(uint16) + 1 = 7
- if len(chunkData) != len(data)-7 {
- t.Fatalf("Received bad chunkData")
- }
+ require.Equal(t, len(data)-7, len(chunkData), "Received bad chunkData")
+
}
func TestComStmtExecute(t *testing.T) {
@@ -261,12 +243,9 @@ func TestComStmtExecute(t *testing.T) {
data := []byte{23, 18, 0, 0, 0, 128, 1, 0, 0, 0, 0, 1, 1, 128, 1}
stmtID, _, err := sConn.parseComStmtExecute(cConn.PrepareData, data)
- if err != nil {
- t.Fatalf("parseComStmtExeute failed: %v", err)
- }
- if stmtID != 18 {
- t.Fatalf("Parsed incorrect values")
- }
+ require.NoError(t, err, "parseComStmtExeute failed: %v", err)
+ require.Equal(t, uint32(18), stmtID, "Parsed incorrect values")
+
}
func TestComStmtExecuteUpdStmt(t *testing.T) {
@@ -367,12 +346,35 @@ func TestComStmtClose(t *testing.T) {
t.Fatalf("sConn.ReadPacket - ComStmtClose failed: %v %v", data, err)
}
stmtID, ok := sConn.parseComStmtClose(data)
- if !ok {
- t.Fatalf("parseComStmtClose failed")
- }
- if stmtID != prepare.StatementID {
- t.Fatalf("Received incorrect value, want: %v, got: %v", uint32(data[1]), prepare.StatementID)
- }
+ require.True(t, ok, "parseComStmtClose failed")
+ require.Equal(t, prepare.StatementID, stmtID, "Received incorrect value, want: %v, got: %v", uint32(data[1]), prepare.StatementID)
+
+}
+
+// This test has been added to verify that IO errors in a connection lead to SQL Server lost errors
+// So that we end up closing the connection higher up the stack and not reusing it.
+// This test was added in response to a panic that was run into.
+func TestSQLErrorOnServerClose(t *testing.T) {
+ // Create socket pair for the server and client
+ listener, sConn, cConn := createSocketPair(t)
+ defer func() {
+ listener.Close()
+ sConn.Close()
+ cConn.Close()
+ }()
+
+ err := cConn.WriteComQuery("close before rows read")
+ require.NoError(t, err)
+
+ handler := &testRun{t: t}
+ _ = sConn.handleNextCommand(handler)
+
+ // From the server we will receive a field packet which the client will read
+ // At that point, if the server crashes and closes the connection.
+ // We should be getting a Connection lost error.
+ _, _, _, err = cConn.ReadQueryResult(100, true)
+ require.Error(t, err)
+ require.True(t, IsConnLostDuringQuery(err), err.Error())
}
func TestQueries(t *testing.T) {
@@ -746,16 +748,13 @@ func checkQueryInternal(t *testing.T, query string, sConn, cConn *Conn, result *
for i := 0; i < count; i++ {
kontinue := sConn.handleNextCommand(&handler)
- if !kontinue {
- t.Fatalf("error handling command: %d", i)
- }
+ require.True(t, kontinue, "error handling command: %d", i)
+
}
wg.Wait()
+ require.Equal(t, "", fatalError, fatalError)
- if fatalError != "" {
- t.Fatalf(fatalError)
- }
}
// nolint
diff --git a/go/mysql/register_replica.go b/go/mysql/register_replica.go
new file mode 100644
index 00000000000..d1095742ef4
--- /dev/null
+++ b/go/mysql/register_replica.go
@@ -0,0 +1,72 @@
+/*
+Copyright 2022 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package mysql
+
+import (
+ vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
+ "vitess.io/vitess/go/vt/vterrors"
+)
+
+var (
+ comRegisterReplicaPacketErr = vterrors.Errorf(vtrpcpb.Code_INTERNAL, "error reading BinlogDumpGTID packet")
+)
+
+func (c *Conn) parseComRegisterReplica(data []byte) (
+ replicaHost string,
+ replicaPort uint16,
+ replicaUser string,
+ replicaPassword string,
+ err error,
+) {
+ pos := 1
+ pos += 4 // server-id
+
+ // hostname
+ hostnameLen, pos, ok := readUint8(data, pos)
+ if !ok {
+ return replicaHost, replicaPort, replicaUser, replicaPassword, comRegisterReplicaPacketErr
+ }
+ replicaHost = string(data[pos : pos+int(hostnameLen)])
+ pos += int(hostnameLen)
+
+ // username
+ usernameLen, pos, ok := readUint8(data, pos)
+ if !ok {
+ return replicaHost, replicaPort, replicaUser, replicaPassword, comRegisterReplicaPacketErr
+ }
+ replicaUser = string(data[pos : pos+int(usernameLen)])
+ pos += int(usernameLen)
+
+ // password
+ passwordLen, pos, ok := readUint8(data, pos)
+ if !ok {
+ return replicaHost, replicaPort, replicaUser, replicaPassword, comRegisterReplicaPacketErr
+ }
+ replicaPassword = string(data[pos : pos+int(passwordLen)])
+ pos += int(passwordLen)
+
+ // port
+ replicaPort, _, ok = readUint16(data, pos)
+ if !ok {
+ return replicaHost, replicaPort, replicaUser, replicaPassword, comRegisterReplicaPacketErr
+ }
+ // remaining: (commented because of ineffectual assignment)
+ // pos += 4 // replication rank
+ // pos += 4 // master-id
+
+ return replicaHost, replicaPort, replicaUser, replicaPassword, nil
+}
diff --git a/go/mysql/replication.go b/go/mysql/replication.go
index d4bf5346cf6..33f24860266 100644
--- a/go/mysql/replication.go
+++ b/go/mysql/replication.go
@@ -116,6 +116,27 @@ func (c *Conn) SendSemiSyncAck(binlogFilename string, binlogPos uint64) error {
}
+// WriteBinlogEvent writes a binlog event as part of a replication stream
+// https://dev.mysql.com/doc/internals/en/binlog-network-stream.html
+// https://dev.mysql.com/doc/internals/en/binlog-event.html
+func (c *Conn) WriteBinlogEvent(ev BinlogEvent, semiSyncEnabled bool) error {
+ extraBytes := 1 // OK packet
+ if semiSyncEnabled {
+ extraBytes += 2
+ }
+ data, pos := c.startEphemeralPacketWithHeader(len(ev.Bytes()) + extraBytes)
+ pos = writeByte(data, pos, 0) // "OK" prefix
+ if semiSyncEnabled {
+ pos = writeByte(data, pos, 0xef) // semi sync indicator
+ pos = writeByte(data, pos, 0) // no ack expected
+ }
+ _ = writeEOFString(data, pos, string(ev.Bytes()))
+ if err := c.writeEphemeralPacket(); err != nil {
+ return NewSQLError(CRServerGone, SSUnknownSQLState, "%v", err)
+ }
+ return nil
+}
+
// SemiSyncExtensionLoaded checks if the semisync extension has been loaded.
// It should work for both MariaDB and MySQL.
func (c *Conn) SemiSyncExtensionLoaded() bool {
diff --git a/go/mysql/replication_constants.go b/go/mysql/replication_constants.go
index f29a551134b..5fcb17271b5 100644
--- a/go/mysql/replication_constants.go
+++ b/go/mysql/replication_constants.go
@@ -190,7 +190,7 @@ const (
eDeleteRowsEventV1 = 25
// Unused
//eIncidentEvent = 26
- //eHeartbeatEvent = 27
+ eHeartbeatEvent = 27
// Unused
//eIgnorableEvent = 28
// Unused
diff --git a/go/mysql/replication_position.go b/go/mysql/replication_position.go
index ca7775666d3..7d242a9b248 100644
--- a/go/mysql/replication_position.go
+++ b/go/mysql/replication_position.go
@@ -138,12 +138,12 @@ func DecodePosition(s string) (rp Position, err error) {
return rp, nil
}
- parts := strings.SplitN(s, "/", 2)
- if len(parts) != 2 {
+ flav, gtid, ok := strings.Cut(s, "/")
+ if !ok {
// There is no flavor. Try looking for a default parser.
return ParsePosition("", s)
}
- return ParsePosition(parts[0], parts[1])
+ return ParsePosition(flav, gtid)
}
// ParsePosition calls the parser for the specified flavor.
diff --git a/go/mysql/replication_position_test.go b/go/mysql/replication_position_test.go
index b8b6379392a..5bb2e5385d0 100644
--- a/go/mysql/replication_position_test.go
+++ b/go/mysql/replication_position_test.go
@@ -20,6 +20,8 @@ import (
"encoding/json"
"strings"
"testing"
+
+ "github.com/stretchr/testify/assert"
)
func TestPositionEqual(t *testing.T) {
@@ -225,9 +227,8 @@ func TestMustParsePositionError(t *testing.T) {
defer func() {
want := `parse error: unknown GTIDSet flavor "unknown flavor !@$!@"`
err := recover()
- if err == nil {
- t.Errorf("wrong error, got %#v, want %#v", err, want)
- }
+ assert.NotNil(t, err, "wrong error, got %#v, want %#v", err, want)
+
got, ok := err.(error)
if !ok || !strings.HasPrefix(got.Error(), want) {
t.Errorf("wrong error, got %#v, want %#v", got, want)
@@ -266,12 +267,9 @@ func TestDecodePosition(t *testing.T) {
want := Position{GTIDSet: fakeGTID{value: "123-456:789"}}
got, err := DecodePosition(input)
- if err != nil {
- t.Errorf("unexpected error: %v", err)
- }
- if !got.Equal(want) {
- t.Errorf("DecodePosition(%#v) = %#v, want %#v", input, got, want)
- }
+ assert.NoError(t, err, "unexpected error: %v", err)
+ assert.True(t, got.Equal(want), "DecodePosition(%#v) = %#v, want %#v", input, got, want)
+
}
func TestDecodePositionZero(t *testing.T) {
@@ -279,12 +277,9 @@ func TestDecodePositionZero(t *testing.T) {
want := Position{}
got, err := DecodePosition(input)
- if err != nil {
- t.Errorf("unexpected error: %v", err)
- }
- if !got.Equal(want) {
- t.Errorf("DecodePosition(%#v) = %#v, want %#v", input, got, want)
- }
+ assert.NoError(t, err, "unexpected error: %v", err)
+ assert.True(t, got.Equal(want), "DecodePosition(%#v) = %#v, want %#v", input, got, want)
+
}
func TestDecodePositionNoFlavor(t *testing.T) {
@@ -295,12 +290,9 @@ func TestDecodePositionNoFlavor(t *testing.T) {
want := Position{GTIDSet: fakeGTID{value: "12345"}}
got, err := DecodePosition(input)
- if err != nil {
- t.Errorf("unexpected error: %v", err)
- }
- if !got.Equal(want) {
- t.Errorf("DecodePosition(%#v) = %#v, want %#v", input, got, want)
- }
+ assert.NoError(t, err, "unexpected error: %v", err)
+ assert.True(t, got.Equal(want), "DecodePosition(%#v) = %#v, want %#v", input, got, want)
+
}
func TestJsonMarshalPosition(t *testing.T) {
@@ -308,9 +300,7 @@ func TestJsonMarshalPosition(t *testing.T) {
want := `"golf/par"`
buf, err := json.Marshal(input)
- if err != nil {
- t.Errorf("unexpected error: %v", err)
- }
+ assert.NoError(t, err, "unexpected error: %v", err)
if got := string(buf); got != want {
t.Errorf("json.Marshal(%#v) = %#v, want %#v", input, got, want)
@@ -322,9 +312,7 @@ func TestJsonMarshalPositionPointer(t *testing.T) {
want := `"golf/par"`
buf, err := json.Marshal(&input)
- if err != nil {
- t.Errorf("unexpected error: %v", err)
- }
+ assert.NoError(t, err, "unexpected error: %v", err)
if got := string(buf); got != want {
t.Errorf("json.Marshal(%#v) = %#v, want %#v", input, got, want)
@@ -340,12 +328,9 @@ func TestJsonUnmarshalPosition(t *testing.T) {
var got Position
err := json.Unmarshal([]byte(input), &got)
- if err != nil {
- t.Errorf("unexpected error: %v", err)
- }
- if !got.Equal(want) {
- t.Errorf("json.Unmarshal(%#v) = %#v, want %#v", input, got, want)
- }
+ assert.NoError(t, err, "unexpected error: %v", err)
+ assert.True(t, got.Equal(want), "json.Unmarshal(%#v) = %#v, want %#v", input, got, want)
+
}
func TestJsonMarshalPositionInStruct(t *testing.T) {
@@ -357,9 +342,7 @@ func TestJsonMarshalPositionInStruct(t *testing.T) {
}
buf, err := json.Marshal(&mystruct{input})
- if err != nil {
- t.Errorf("unexpected error: %v", err)
- }
+ assert.NoError(t, err, "unexpected error: %v", err)
if got := string(buf); got != want {
t.Errorf("json.Marshal(%#v) = %#v, want %#v", input, got, want)
@@ -377,9 +360,8 @@ func TestJsonUnmarshalPositionInStruct(t *testing.T) {
Position Position
}
err := json.Unmarshal([]byte(input), &gotStruct)
- if err != nil {
- t.Errorf("unexpected error: %v", err)
- }
+ assert.NoError(t, err, "unexpected error: %v", err)
+
if got := gotStruct.Position; !got.Equal(want) {
t.Errorf("json.Unmarshal(%#v) = %#v, want %#v", input, got, want)
}
@@ -390,9 +372,7 @@ func TestJsonMarshalPositionZero(t *testing.T) {
want := `""`
buf, err := json.Marshal(input)
- if err != nil {
- t.Errorf("unexpected error: %v", err)
- }
+ assert.NoError(t, err, "unexpected error: %v", err)
if got := string(buf); got != want {
t.Errorf("json.Marshal(%#v) = %#v, want %#v", input, got, want)
@@ -405,10 +385,7 @@ func TestJsonUnmarshalPositionZero(t *testing.T) {
var got Position
err := json.Unmarshal([]byte(input), &got)
- if err != nil {
- t.Errorf("unexpected error: %v", err)
- }
- if !got.Equal(want) {
- t.Errorf("json.Unmarshal(%#v) = %#v, want %#v", input, got, want)
- }
+ assert.NoError(t, err, "unexpected error: %v", err)
+ assert.True(t, got.Equal(want), "json.Unmarshal(%#v) = %#v, want %#v", input, got, want)
+
}
diff --git a/go/mysql/replication_status.go b/go/mysql/replication_status.go
index 411245764eb..8b27342f2bc 100644
--- a/go/mysql/replication_status.go
+++ b/go/mysql/replication_status.go
@@ -178,7 +178,13 @@ func ProtoToReplicationStatus(s *replicationdatapb.Status) ReplicationStatus {
// FindErrantGTIDs can be used to find errant GTIDs in the receiver's relay log, by comparing it against all known replicas,
// provided as a list of ReplicationStatus's. This method only works if the flavor for all retrieved ReplicationStatus's is MySQL.
// The result is returned as a Mysql56GTIDSet, each of whose elements is a found errant GTID.
+// This function is best effort in nature. If it marks something as errant, then it is for sure errant. But there may be cases of errant GTIDs, which aren't caught by this function.
func (s *ReplicationStatus) FindErrantGTIDs(otherReplicaStatuses []*ReplicationStatus) (Mysql56GTIDSet, error) {
+ if len(otherReplicaStatuses) == 0 {
+ // If there is nothing to compare this replica against, then we must assume that its GTID set is the correct one.
+ return nil, nil
+ }
+
relayLogSet, ok := s.RelayLogPosition.GTIDSet.(Mysql56GTIDSet)
if !ok {
return nil, fmt.Errorf("errant GTIDs can only be computed on the MySQL flavor")
diff --git a/go/mysql/replication_test.go b/go/mysql/replication_test.go
index c801e898307..c397bc71b45 100644
--- a/go/mysql/replication_test.go
+++ b/go/mysql/replication_test.go
@@ -19,6 +19,11 @@ package mysql
import (
"reflect"
"testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata"
)
func TestComBinlogDump(t *testing.T) {
@@ -35,9 +40,7 @@ func TestComBinlogDump(t *testing.T) {
}
data, err := sConn.ReadPacket()
- if err != nil {
- t.Fatalf("sConn.ReadPacket - ComBinlogDump failed: %v", err)
- }
+ require.NoError(t, err, "sConn.ReadPacket - ComBinlogDump failed: %v", err)
expectedData := []byte{
ComBinlogDump,
@@ -46,9 +49,8 @@ func TestComBinlogDump(t *testing.T) {
0x04, 0x03, 0x02, 0x01, // server-id
'm', 'o', 'o', 'f', 'a', 'r', 'm', // binlog-filename
}
- if !reflect.DeepEqual(data, expectedData) {
- t.Errorf("ComBinlogDump returned unexpected data:\n%v\nwas expecting:\n%v", data, expectedData)
- }
+ assert.True(t, reflect.DeepEqual(data, expectedData), "ComBinlogDump returned unexpected data:\n%v\nwas expecting:\n%v", data, expectedData)
+
sConn.sequence = 0
// Write ComBinlogDump packet with no filename, read it, compare.
@@ -57,9 +59,7 @@ func TestComBinlogDump(t *testing.T) {
}
data, err = sConn.ReadPacket()
- if err != nil {
- t.Fatalf("sConn.ReadPacket - ComBinlogDump failed: %v", err)
- }
+ require.NoError(t, err, "sConn.ReadPacket - ComBinlogDump failed: %v", err)
expectedData = []byte{
ComBinlogDump,
@@ -67,9 +67,8 @@ func TestComBinlogDump(t *testing.T) {
0x0a, 0x09, // flags
0x04, 0x03, 0x02, 0x01, // server-id
}
- if !reflect.DeepEqual(data, expectedData) {
- t.Errorf("ComBinlogDump returned unexpected data:\n%v\nwas expecting:\n%v", data, expectedData)
- }
+ assert.True(t, reflect.DeepEqual(data, expectedData), "ComBinlogDump returned unexpected data:\n%v\nwas expecting:\n%v", data, expectedData)
+
}
func TestComBinlogDumpGTID(t *testing.T) {
@@ -80,53 +79,85 @@ func TestComBinlogDumpGTID(t *testing.T) {
cConn.Close()
}()
- // Write ComBinlogDumpGTID packet, read it, compare.
- if err := cConn.WriteComBinlogDumpGTID(0x01020304, "moofarm", 0x05060708090a0b0c, 0x0d0e, []byte{0xfa, 0xfb}); err != nil {
- t.Fatalf("WriteComBinlogDumpGTID failed: %v", err)
- }
+ t.Run("WriteComBinlogDumpGTID", func(t *testing.T) {
+ // Write ComBinlogDumpGTID packet, read it, compare.
+ err := cConn.WriteComBinlogDumpGTID(0x01020304, "moofarm", 0x05060708090a0b0c, 0x0d0e, []byte{0xfa, 0xfb})
+ assert.NoError(t, err)
+ data, err := sConn.ReadPacket()
+ require.NoError(t, err, "sConn.ReadPacket - ComBinlogDumpGTID failed: %v", err)
+
+ expectedData := []byte{
+ ComBinlogDumpGTID,
+ 0x0e, 0x0d, // flags
+ 0x04, 0x03, 0x02, 0x01, // server-id
+ 0x07, 0x00, 0x00, 0x00, // binlog-filename-len
+ 'm', 'o', 'o', 'f', 'a', 'r', 'm', // bilog-filename
+ 0x0c, 0x0b, 0x0a, 0x09, 0x08, 0x07, 0x06, 0x05, // binlog-pos
+ 0x02, 0x00, 0x00, 0x00, // data-size
+ 0xfa, 0xfb, // data
+ }
+ assert.Equal(t, expectedData, data)
+ })
- data, err := sConn.ReadPacket()
- if err != nil {
- t.Fatalf("sConn.ReadPacket - ComBinlogDumpGTID failed: %v", err)
- }
-
- expectedData := []byte{
- ComBinlogDumpGTID,
- 0x0e, 0x0d, // flags
- 0x04, 0x03, 0x02, 0x01, // server-id
- 0x07, 0x00, 0x00, 0x00, // binlog-filename-len
- 'm', 'o', 'o', 'f', 'a', 'r', 'm', // bilog-filename
- 0x0c, 0x0b, 0x0a, 0x09, 0x08, 0x07, 0x06, 0x05, // binlog-pos
- 0x02, 0x00, 0x00, 0x00, // data-size
- 0xfa, 0xfb, // data
- }
- if !reflect.DeepEqual(data, expectedData) {
- t.Errorf("ComBinlogDumpGTID returned unexpected data:\n%v\nwas expecting:\n%v", data, expectedData)
- }
sConn.sequence = 0
- // Write ComBinlogDumpGTID packet with no filename, read it, compare.
- if err := cConn.WriteComBinlogDumpGTID(0x01020304, "", 0x05060708090a0b0c, 0x0d0e, []byte{0xfa, 0xfb}); err != nil {
- t.Fatalf("WriteComBinlogDumpGTID failed: %v", err)
- }
-
- data, err = sConn.ReadPacket()
- if err != nil {
- t.Fatalf("sConn.ReadPacket - ComBinlogDumpGTID failed: %v", err)
- }
-
- expectedData = []byte{
- ComBinlogDumpGTID,
- 0x0e, 0x0d, // flags
- 0x04, 0x03, 0x02, 0x01, // server-id
- 0x00, 0x00, 0x00, 0x00, // binlog-filename-len
- 0x0c, 0x0b, 0x0a, 0x09, 0x08, 0x07, 0x06, 0x05, // binlog-pos
- 0x02, 0x00, 0x00, 0x00, // data-size
- 0xfa, 0xfb, // data
- }
- if !reflect.DeepEqual(data, expectedData) {
- t.Errorf("ComBinlogDumpGTID returned unexpected data:\n%v\nwas expecting:\n%v", data, expectedData)
- }
+ t.Run("WriteComBinlogDumpGTID no filename", func(t *testing.T) {
+ // Write ComBinlogDumpGTID packet with no filename, read it, compare.
+ err := cConn.WriteComBinlogDumpGTID(0x01020304, "", 0x05060708090a0b0c, 0x0d0e, []byte{0xfa, 0xfb})
+ assert.NoError(t, err)
+ data, err := sConn.ReadPacket()
+ require.NoError(t, err, "sConn.ReadPacket - ComBinlogDumpGTID failed: %v", err)
+
+ expectedData := []byte{
+ ComBinlogDumpGTID,
+ 0x0e, 0x0d, // flags
+ 0x04, 0x03, 0x02, 0x01, // server-id
+ 0x00, 0x00, 0x00, 0x00, // binlog-filename-len
+ 0x0c, 0x0b, 0x0a, 0x09, 0x08, 0x07, 0x06, 0x05, // binlog-pos
+ 0x02, 0x00, 0x00, 0x00, // data-size
+ 0xfa, 0xfb, // data
+ }
+ assert.Equal(t, expectedData, data)
+ })
+ f := NewMySQL56BinlogFormat()
+ s := NewFakeBinlogStream()
+
+ t.Run("Write rotate event", func(t *testing.T) {
+ event := NewRotateEvent(f, s, 456, "mysql-bin.000123")
+ err := cConn.WriteBinlogEvent(event, false)
+ assert.NoError(t, err)
+ data, err := sConn.ReadPacket()
+ require.NoError(t, err)
+
+ expectedData := []byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x4, 0x1, 0x0, 0x0, 0x0, 0x2f,
+ 0x0, 0x0, 0x0, 0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc8, 0x1, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x2d, 0x62, 0x69, 0x6e, 0x2e, 0x30,
+ 0x30, 0x30, 0x31, 0x32, 0x33, 0xfd, 0x1c, 0x1d, 0x80}
+ assert.Equal(t, expectedData, data)
+ })
+ t.Run("Write query event", func(t *testing.T) {
+ q := Query{
+ Database: "my database",
+ SQL: "my query",
+ Charset: &binlogdatapb.Charset{
+ Client: 0x1234,
+ Conn: 0x5678,
+ Server: 0x9abc,
+ },
+ }
+ event := NewQueryEvent(f, s, q)
+ err := cConn.WriteBinlogEvent(event, false)
+ assert.NoError(t, err)
+ data, err := sConn.ReadPacket()
+ require.NoError(t, err)
+
+ expectedData := []byte{0x0, 0x98, 0x68, 0xe9, 0x53, 0x2, 0x1, 0x0, 0x0, 0x0,
+ 0x3f, 0x0, 0x0, 0x0, 0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0xb, 0x0, 0x0, 0x7, 0x0, 0x4, 0x34, 0x12, 0x78, 0x56, 0xbc,
+ 0x9a, 0x6d, 0x79, 0x20, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65,
+ 0x0, 0x6d, 0x79, 0x20, 0x71, 0x75, 0x65, 0x72, 0x79, 0x65, 0xaa, 0x33, 0xe}
+ assert.Equal(t, expectedData, data)
+ })
}
func TestSendSemiSyncAck(t *testing.T) {
@@ -145,16 +176,13 @@ func TestSendSemiSyncAck(t *testing.T) {
}
data, err := sConn.ReadPacket()
- if err != nil {
- t.Fatalf("sConn.ReadPacket - SendSemiSyncAck failed: %v", err)
- }
+ require.NoError(t, err, "sConn.ReadPacket - SendSemiSyncAck failed: %v", err)
expectedData := []byte{
ComSemiSyncAck,
0x3c, 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // log pos
'm', 'o', 'o', 'f', 'a', 'r', 'm', // binlog-filename
}
- if !reflect.DeepEqual(data, expectedData) {
- t.Errorf("SendSemiSyncAck returned unexpected data:\n%v\nwas expecting:\n%v", data, expectedData)
- }
+ assert.True(t, reflect.DeepEqual(data, expectedData), "SendSemiSyncAck returned unexpected data:\n%v\nwas expecting:\n%v", data, expectedData)
+
}
diff --git a/go/mysql/schema.go b/go/mysql/schema.go
index 860d6b8c06f..1b3f50b31cd 100644
--- a/go/mysql/schema.go
+++ b/go/mysql/schema.go
@@ -23,12 +23,7 @@ import (
querypb "vitess.io/vitess/go/vt/proto/query"
)
-// This file provides a few utility variables and methods, mostly for tests.
-// The assumptions made about the types of fields and data returned
-// by MySQl are validated in schema_test.go. This way all tests
-// can use these variables and methods to simulate a MySQL server
-// (using fakesqldb/ package for instance) and still be guaranteed correct
-// data.
+// This file contains the mysql queries used by different parts of the code.
const (
// BaseShowPrimary is the base query for fetching primary key info.
@@ -40,22 +35,6 @@ const (
// ShowRowsRead is the query used to find the number of rows read.
ShowRowsRead = "show status like 'Innodb_rows_read'"
- // CreateVTDatabase creates the _vt database
- CreateVTDatabase = `CREATE DATABASE IF NOT EXISTS _vt`
-
- // CreateSchemaCopyTable query creates schemacopy table in _vt schema.
- CreateSchemaCopyTable = `
-CREATE TABLE if not exists _vt.schemacopy (
- table_schema varchar(64) NOT NULL,
- table_name varchar(64) NOT NULL,
- column_name varchar(64) NOT NULL,
- ordinal_position bigint(21) unsigned NOT NULL,
- character_set_name varchar(32) DEFAULT NULL,
- collation_name varchar(32) DEFAULT NULL,
- data_type varchar(64) NOT NULL,
- column_key varchar(3) NOT NULL,
- PRIMARY KEY (table_schema, table_name, ordinal_position))`
-
// DetectSchemaChange query detects if there is any schema change from previous copy.
DetectSchemaChange = `
SELECT DISTINCT table_name
@@ -67,7 +46,25 @@ FROM (
UNION ALL
SELECT table_name, column_name, ordinal_position, character_set_name, collation_name, data_type, column_key
- FROM _vt.schemacopy c
+ FROM _vt.schemacopy
+ WHERE table_schema = database()
+) _inner
+GROUP BY table_name, column_name, ordinal_position, character_set_name, collation_name, data_type, column_key
+HAVING COUNT(*) = 1
+`
+
+ // DetectSchemaChangeOnlyBaseTable query detects if there is any schema change from previous copy excluding view tables.
+ DetectSchemaChangeOnlyBaseTable = `
+SELECT DISTINCT table_name
+FROM (
+ SELECT table_name, column_name, ordinal_position, character_set_name, collation_name, data_type, column_key
+ FROM information_schema.columns
+ WHERE table_schema = database() and table_name in (select table_name from information_schema.tables where table_schema = database() and table_type = 'BASE TABLE')
+
+ UNION ALL
+
+ SELECT table_name, column_name, ordinal_position, character_set_name, collation_name, data_type, column_key
+ FROM _vt.schemacopy
WHERE table_schema = database()
) _inner
GROUP BY table_name, column_name, ordinal_position, character_set_name, collation_name, data_type, column_key
@@ -101,13 +98,34 @@ order by table_name, ordinal_position`
// GetColumnNamesQueryPatternForTable is used for mocking queries in unit tests
GetColumnNamesQueryPatternForTable = `SELECT COLUMN_NAME.*TABLE_NAME.*%s.*`
-)
-// VTDatabaseInit contains all the schema creation queries needed to
-var VTDatabaseInit = []string{
- CreateVTDatabase,
- CreateSchemaCopyTable,
-}
+ // Views
+ InsertIntoViewsTable = `insert into _vt.views (
+ table_schema,
+ table_name,
+ create_statement) values (database(), :table_name, :create_statement)`
+
+ ReplaceIntoViewsTable = `replace into _vt.views (
+ table_schema,
+ table_name,
+ create_statement) values (database(), :table_name, :create_statement)`
+
+ UpdateViewsTable = `update _vt.views
+ set create_statement = :create_statement
+ where table_schema = database() and table_name = :table_name`
+
+ DeleteFromViewsTable = `delete from _vt.views where table_schema = database() and table_name in ::table_name`
+
+ SelectFromViewsTable = `select table_name from _vt.views where table_schema = database() and table_name in ::table_name`
+
+ SelectAllViews = `select table_name, updated_at from _vt.views where table_schema = database()`
+
+ // FetchUpdatedViews queries fetches information about updated views
+ FetchUpdatedViews = `select table_name, create_statement from _vt.views where table_schema = database() and table_name in ::viewnames`
+
+ // FetchViews queries fetches all views
+ FetchViews = `select table_name, create_statement from _vt.views where table_schema = database()`
+)
// BaseShowTablesFields contains the fields returned by a BaseShowTables or a BaseShowTablesForTable command.
// They are validated by the
diff --git a/go/mysql/server.go b/go/mysql/server.go
index c32f0d6d85f..4d65ce93a81 100644
--- a/go/mysql/server.go
+++ b/go/mysql/server.go
@@ -116,8 +116,14 @@ type Handler interface {
// execute query.
ComStmtExecute(c *Conn, prepare *PrepareData, callback func(*sqltypes.Result) error) error
+ // ComRegisterReplica is called when a connection receives a ComRegisterReplica request
+ ComRegisterReplica(c *Conn, replicaHost string, replicaPort uint16, replicaUser string, replicaPassword string) error
+
+ // ComBinlogDump is called when a connection receives a ComBinlogDump request
+ ComBinlogDump(c *Conn, logFile string, binlogPos uint32) error
+
// ComBinlogDumpGTID is called when a connection receives a ComBinlogDumpGTID request
- ComBinlogDumpGTID(c *Conn, gtidSet GTIDSet) error
+ ComBinlogDumpGTID(c *Conn, logFile string, logPos uint64, gtidSet GTIDSet) error
// WarningCount is called at the end of each query to obtain
// the value to be returned to the client in the EOF packet.
diff --git a/go/mysql/server_flaky_test.go b/go/mysql/server_flaky_test.go
index ed30f6a9e7d..325cd1fc111 100644
--- a/go/mysql/server_flaky_test.go
+++ b/go/mysql/server_flaky_test.go
@@ -225,7 +225,14 @@ func (th *testHandler) ComPrepare(c *Conn, query string, bindVars map[string]*qu
func (th *testHandler) ComStmtExecute(c *Conn, prepare *PrepareData, callback func(*sqltypes.Result) error) error {
return nil
}
-func (th *testHandler) ComBinlogDumpGTID(c *Conn, gtidSet GTIDSet) error {
+
+func (th *testHandler) ComRegisterReplica(c *Conn, replicaHost string, replicaPort uint16, replicaUser string, replicaPassword string) error {
+ return nil
+}
+func (th *testHandler) ComBinlogDump(c *Conn, logFile string, binlogPos uint32) error {
+ return nil
+}
+func (th *testHandler) ComBinlogDumpGTID(c *Conn, logFile string, logPos uint64, gtidSet GTIDSet) error {
return nil
}
@@ -562,7 +569,8 @@ func TestServer(t *testing.T) {
output, err = runMysqlWithErr(t, params, "error after send")
require.Error(t, err)
assert.Contains(t, output, "ERROR 2013 (HY000)", "Unexpected output for 'panic'")
- assert.Contains(t, output, "Lost connection to MySQL server during query", "Unexpected output for 'panic'")
+ // MariaDB might not print the MySQL bit here
+ assert.Regexp(t, `Lost connection to( MySQL)? server during query`, output, "Unexpected output for 'panic': %v", output)
// Run an 'insert' command, no rows, but rows affected.
output, err = runMysqlWithErr(t, params, "insert")
@@ -674,7 +682,8 @@ func TestServerStats(t *testing.T) {
output, err = runMysqlWithErr(t, params, "panic")
require.Error(t, err)
assert.Contains(t, output, "ERROR 2013 (HY000)")
- assert.Contains(t, output, "Lost connection to MySQL server during query", "Unexpected output for 'panic': %v", output)
+ // MariaDB might not print the MySQL bit here
+ assert.Regexp(t, `Lost connection to( MySQL)? server during query`, output, "Unexpected output for 'panic': %v", output)
assert.EqualValues(t, 0, connCount.Get(), "connCount")
assert.EqualValues(t, 2, connAccept.Get(), "connAccept")
@@ -997,9 +1006,7 @@ func TestCachingSha2PasswordAuthWithTLS(t *testing.T) {
// Create the listener, so we can get its host.
l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false)
- if err != nil {
- t.Fatalf("NewListener failed: %v", err)
- }
+ require.NoError(t, err, "NewListener failed: %v", err)
defer l.Close()
host := l.Addr().(*net.TCPAddr).IP.String()
port := l.Addr().(*net.TCPAddr).Port
@@ -1018,9 +1025,8 @@ func TestCachingSha2PasswordAuthWithTLS(t *testing.T) {
"",
"",
tls.VersionTLS12)
- if err != nil {
- t.Fatalf("TLSServerConfig failed: %v", err)
- }
+ require.NoError(t, err, "TLSServerConfig failed: %v", err)
+
l.TLSConfig.Store(serverConfig)
go func() {
l.Accept()
@@ -1044,16 +1050,14 @@ func TestCachingSha2PasswordAuthWithTLS(t *testing.T) {
ctx := context.Background()
conn, err := Connect(ctx, params)
- if err != nil {
- t.Fatalf("unexpected connection error: %v", err)
- }
+ require.NoError(t, err, "unexpected connection error: %v", err)
+
defer conn.Close()
// Run a 'select rows' command with results.
result, err := conn.ExecuteFetch("select rows", 10000, true)
- if err != nil {
- t.Fatalf("ExecuteFetch failed: %v", err)
- }
+ require.NoError(t, err, "ExecuteFetch failed: %v", err)
+
utils.MustMatch(t, result, selectRowsResult)
// Send a ComQuit to avoid the error message on the server side.
@@ -1096,9 +1100,7 @@ func TestCachingSha2PasswordAuthWithMoreData(t *testing.T) {
// Create the listener, so we can get its host.
l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false)
- if err != nil {
- t.Fatalf("NewListener failed: %v", err)
- }
+ require.NoError(t, err, "NewListener failed: %v", err)
defer l.Close()
host := l.Addr().(*net.TCPAddr).IP.String()
port := l.Addr().(*net.TCPAddr).Port
@@ -1117,9 +1119,8 @@ func TestCachingSha2PasswordAuthWithMoreData(t *testing.T) {
"",
"",
tls.VersionTLS12)
- if err != nil {
- t.Fatalf("TLSServerConfig failed: %v", err)
- }
+ require.NoError(t, err, "TLSServerConfig failed: %v", err)
+
l.TLSConfig.Store(serverConfig)
go func() {
l.Accept()
@@ -1143,16 +1144,14 @@ func TestCachingSha2PasswordAuthWithMoreData(t *testing.T) {
ctx := context.Background()
conn, err := Connect(ctx, params)
- if err != nil {
- t.Fatalf("unexpected connection error: %v", err)
- }
+ require.NoError(t, err, "unexpected connection error: %v", err)
+
defer conn.Close()
// Run a 'select rows' command with results.
result, err := conn.ExecuteFetch("select rows", 10000, true)
- if err != nil {
- t.Fatalf("ExecuteFetch failed: %v", err)
- }
+ require.NoError(t, err, "ExecuteFetch failed: %v", err)
+
utils.MustMatch(t, result, selectRowsResult)
// Send a ComQuit to avoid the error message on the server side.
@@ -1170,9 +1169,7 @@ func TestCachingSha2PasswordAuthWithoutTLS(t *testing.T) {
// Create the listener.
l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false)
- if err != nil {
- t.Fatalf("NewListener failed: %v", err)
- }
+ require.NoError(t, err, "NewListener failed: %v", err)
defer l.Close()
host := l.Addr().(*net.TCPAddr).IP.String()
port := l.Addr().(*net.TCPAddr).Port
diff --git a/go/mysql/sql_error.go b/go/mysql/sql_error.go
index 22cd2c2ae9e..19a8ef70ee3 100644
--- a/go/mysql/sql_error.go
+++ b/go/mysql/sql_error.go
@@ -152,10 +152,12 @@ func mapToSQLErrorFromErrorCode(err error, msg string) *SQLError {
}
}
-var stateToMysqlCode = map[vterrors.State]struct {
+type mysqlCode struct {
num int
state string
-}{
+}
+
+var stateToMysqlCode = map[vterrors.State]mysqlCode{
vterrors.Undefined: {num: ERUnknownError, state: SSUnknownSQLState},
vterrors.AccessDeniedError: {num: ERAccessDeniedError, state: SSAccessDeniedError},
vterrors.BadDb: {num: ERBadDb, state: SSClientError},
@@ -199,6 +201,28 @@ var stateToMysqlCode = map[vterrors.State]struct {
vterrors.WrongValueCountOnRow: {num: ERWrongValueCountOnRow, state: SSWrongValueCountOnRow},
}
+func getStateToMySQLState(state vterrors.State) mysqlCode {
+ if state == 0 {
+ return mysqlCode{}
+ }
+ s := stateToMysqlCode[state]
+ return s
+}
+
+// ConvertStateToMySQLErrorCode returns MySQL error code for the given vterrors.State
+// If the state is == 0, an empty string is returned
+func ConvertStateToMySQLErrorCode(state vterrors.State) string {
+ s := getStateToMySQLState(state)
+ return strconv.Itoa(s.num)
+}
+
+// ConvertStateToMySQLState returns MySQL state for the given vterrors.State
+// If the state is == 0, an empty string is returned
+func ConvertStateToMySQLState(state vterrors.State) string {
+ s := getStateToMySQLState(state)
+ return s.state
+}
+
func init() {
if len(stateToMysqlCode) != int(vterrors.NumOfStates) {
panic("all vterrors states are not mapped to mysql errors")
diff --git a/go/mysql/sql_error_test.go b/go/mysql/sql_error_test.go
index c6fe2f65251..0b29735e758 100644
--- a/go/mysql/sql_error_test.go
+++ b/go/mysql/sql_error_test.go
@@ -17,6 +17,7 @@ limitations under the License.
package mysql
import (
+ "fmt"
"testing"
"vitess.io/vitess/go/vt/proto/vtrpc"
@@ -151,6 +152,21 @@ func TestNewSQLErrorFromError(t *testing.T) {
num: ERNoDb,
ss: SSNoDB,
},
+ {
+ err: fmt.Errorf("just some random text here"),
+ num: ERUnknownError,
+ ss: SSUnknownSQLState,
+ },
+ {
+ err: fmt.Errorf("task error: Column 'val' cannot be null (errno 1048) (sqlstate 23000) during query: insert into _edf4846d_ab65_11ed_abb1_0a43f95f28a3_20230213061619_vrepl(id,val,ts) values (1,2,'2023-02-13 04:46:16'), (2,3,'2023-02-13 04:46:16'), (3,null,'2023-02-13 04:46:16')"),
+ num: ERBadNullError,
+ ss: SSConstraintViolation,
+ },
+ {
+ err: vterrors.Wrapf(fmt.Errorf("Column 'val' cannot be null (errno 1048) (sqlstate 23000) during query: insert into _edf4846d_ab65_11ed_abb1_0a43f95f28a3_20230213061619_vrepl(id,val,ts) values (1,2,'2023-02-13 04:46:16'), (2,3,'2023-02-13 04:46:16'), (3,null,'2023-02-13 04:46:16')"), "task error: %d", 17),
+ num: ERBadNullError,
+ ss: SSConstraintViolation,
+ },
}
for _, tc := range tCases {
diff --git a/go/pools/resource_pool.go b/go/pools/resource_pool.go
index 63eba86d9a1..37ffc189e09 100644
--- a/go/pools/resource_pool.go
+++ b/go/pools/resource_pool.go
@@ -22,6 +22,7 @@ import (
"context"
"errors"
"fmt"
+ "math/rand"
"sync"
"time"
@@ -51,6 +52,7 @@ type (
WaitTime() time.Duration
IdleTimeout() time.Duration
IdleClosed() int64
+ MaxLifetimeClosed() int64
Exhausted() int64
GetCount() int64
GetSettingCount() int64
@@ -63,6 +65,7 @@ type (
// is the responsibility of the caller.
Resource interface {
Close()
+ Expired(time.Duration) bool
ApplySetting(ctx context.Context, setting *Setting) error
IsSettingApplied() bool
IsSameSetting(setting string) bool
@@ -86,16 +89,18 @@ type (
// ResourcePool allows you to use a pool of resources.
ResourcePool struct {
// stats. Atomic fields must remain at the top in order to prevent panics on certain architectures.
- available sync2.AtomicInt64
- active sync2.AtomicInt64
- inUse sync2.AtomicInt64
- waitCount sync2.AtomicInt64
- waitTime sync2.AtomicDuration
- idleClosed sync2.AtomicInt64
- exhausted sync2.AtomicInt64
+ available sync2.AtomicInt64
+ active sync2.AtomicInt64
+ inUse sync2.AtomicInt64
+ waitCount sync2.AtomicInt64
+ waitTime sync2.AtomicDuration
+ idleClosed sync2.AtomicInt64
+ maxLifetimeClosed sync2.AtomicInt64
+ exhausted sync2.AtomicInt64
capacity sync2.AtomicInt64
idleTimeout sync2.AtomicDuration
+ maxLifetime sync2.AtomicDuration
resources chan resourceWrapper
factory Factory
@@ -148,11 +153,12 @@ func (s *Setting) GetResetQuery() string {
// If a resource is unused beyond idleTimeout, it's replaced
// with a new one.
// An idleTimeout of 0 means that there is no timeout.
+// An maxLifetime of 0 means that there is no timeout.
// A non-zero value of prefillParallelism causes the pool to be pre-filled.
// The value specifies how many resources can be opened in parallel.
// refreshCheck is a function we consult at refreshInterval
// intervals to determine if the pool should be drained and reopened
-func NewResourcePool(factory Factory, capacity, maxCap int, idleTimeout time.Duration, logWait func(time.Time), refreshCheck RefreshCheck, refreshInterval time.Duration) *ResourcePool {
+func NewResourcePool(factory Factory, capacity, maxCap int, idleTimeout time.Duration, maxLifetime time.Duration, logWait func(time.Time), refreshCheck RefreshCheck, refreshInterval time.Duration) *ResourcePool {
if capacity <= 0 || maxCap <= 0 || capacity > maxCap {
panic(errors.New("invalid/out of range capacity"))
}
@@ -163,6 +169,7 @@ func NewResourcePool(factory Factory, capacity, maxCap int, idleTimeout time.Dur
available: sync2.NewAtomicInt64(int64(capacity)),
capacity: sync2.NewAtomicInt64(int64(capacity)),
idleTimeout: sync2.NewAtomicDuration(idleTimeout),
+ maxLifetime: sync2.NewAtomicDuration(maxLifetime),
logWait: logWait,
}
for i := 0; i < capacity; i++ {
@@ -403,7 +410,14 @@ func (rp *ResourcePool) Put(resource Resource) {
timeUsed: time.Now(),
}
hasSettings = resource.IsSettingApplied()
- } else {
+ if resource.Expired(rp.extendedMaxLifetime()) {
+ rp.maxLifetimeClosed.Add(1)
+ resource.Close()
+ resource = nil
+ }
+ }
+ if resource == nil {
+ // Create new resource
rp.reopenResource(&wrapper)
recreated = true
}
@@ -514,7 +528,7 @@ func (rp *ResourcePool) SetIdleTimeout(idleTimeout time.Duration) {
// StatsJSON returns the stats in JSON format.
func (rp *ResourcePool) StatsJSON() string {
- return fmt.Sprintf(`{"Capacity": %v, "Available": %v, "Active": %v, "InUse": %v, "MaxCapacity": %v, "WaitCount": %v, "WaitTime": %v, "IdleTimeout": %v, "IdleClosed": %v, "Exhausted": %v}`,
+ return fmt.Sprintf(`{"Capacity": %v, "Available": %v, "Active": %v, "InUse": %v, "MaxCapacity": %v, "WaitCount": %v, "WaitTime": %v, "IdleTimeout": %v, "IdleClosed": %v, "MaxLifetimeClosed": %v, "Exhausted": %v}`,
rp.Capacity(),
rp.Available(),
rp.Active(),
@@ -524,6 +538,7 @@ func (rp *ResourcePool) StatsJSON() string {
rp.WaitTime().Nanoseconds(),
rp.IdleTimeout().Nanoseconds(),
rp.IdleClosed(),
+ rp.MaxLifetimeClosed(),
rp.Exhausted(),
)
}
@@ -564,7 +579,7 @@ func (rp *ResourcePool) WaitTime() time.Duration {
return rp.waitTime.Get()
}
-// IdleTimeout returns the idle timeout.
+// IdleTimeout returns the resource idle timeout.
func (rp *ResourcePool) IdleTimeout() time.Duration {
return rp.idleTimeout.Get()
}
@@ -574,6 +589,20 @@ func (rp *ResourcePool) IdleClosed() int64 {
return rp.idleClosed.Get()
}
+// extendedLifetimeTimeout returns random duration within range [maxLifetime, 2*maxLifetime)
+func (rp *ResourcePool) extendedMaxLifetime() time.Duration {
+ maxLifetime := rp.maxLifetime.Get()
+ if maxLifetime == 0 {
+ return 0
+ }
+ return maxLifetime + time.Duration(rand.Int63n(maxLifetime.Nanoseconds()))
+}
+
+// MaxLifetimeClosed returns the count of resources closed due to refresh timeout.
+func (rp *ResourcePool) MaxLifetimeClosed() int64 {
+ return rp.maxLifetimeClosed.Get()
+}
+
// Exhausted returns the number of times Available dropped below 1
func (rp *ResourcePool) Exhausted() int64 {
return rp.exhausted.Get()
diff --git a/go/pools/resource_pool_test.go b/go/pools/resource_pool_test.go
index 4798e150718..8ec812b4603 100644
--- a/go/pools/resource_pool_test.go
+++ b/go/pools/resource_pool_test.go
@@ -40,10 +40,11 @@ var (
)
type TestResource struct {
- num int64
- closed bool
- setting string
- failApply bool
+ num int64
+ timeCreated time.Time
+ closed bool
+ setting string
+ failApply bool
}
func (tr *TestResource) ResetSetting(ctx context.Context) error {
@@ -78,13 +79,17 @@ func (tr *TestResource) Close() {
var _ Resource = (*TestResource)(nil)
+func (tr *TestResource) Expired(lifetimeTimeout time.Duration) bool {
+ return lifetimeTimeout > 0 && time.Until(tr.timeCreated.Add(lifetimeTimeout)) < 0
+}
+
func logWait(start time.Time) {
waitStarts = append(waitStarts, start)
}
func PoolFactory(context.Context) (Resource, error) {
count.Add(1)
- return &TestResource{num: lastID.Add(1)}, nil
+ return &TestResource{num: lastID.Add(1), timeCreated: time.Now()}, nil
}
func FailFactory(context.Context) (Resource, error) {
@@ -107,7 +112,7 @@ func TestOpen(t *testing.T) {
count.Set(0)
waitStarts = waitStarts[:0]
- p := NewResourcePool(PoolFactory, 6, 6, time.Second, logWait, nil, 0)
+ p := NewResourcePool(PoolFactory, 6, 6, time.Second, 0, logWait, nil, 0)
p.SetCapacity(5)
var resources [10]Resource
var r Resource
@@ -226,7 +231,7 @@ func TestShrinking(t *testing.T) {
count.Set(0)
waitStarts = waitStarts[:0]
- p := NewResourcePool(PoolFactory, 5, 5, time.Second, logWait, nil, 0)
+ p := NewResourcePool(PoolFactory, 5, 5, time.Second, 0, logWait, nil, 0)
var resources [10]Resource
// Leave one empty slot in the pool
for i := 0; i < 4; i++ {
@@ -245,7 +250,7 @@ func TestShrinking(t *testing.T) {
p.SetCapacity(3)
done <- true
}()
- expected := `{"Capacity": 3, "Available": 0, "Active": 4, "InUse": 4, "MaxCapacity": 5, "WaitCount": 0, "WaitTime": 0, "IdleTimeout": 1000000000, "IdleClosed": 0, "Exhausted": 0}`
+ expected := `{"Capacity": 3, "Available": 0, "Active": 4, "InUse": 4, "MaxCapacity": 5, "WaitCount": 0, "WaitTime": 0, "IdleTimeout": 1000000000, "IdleClosed": 0, "MaxLifetimeClosed": 0, "Exhausted": 0}`
for i := 0; i < 10; i++ {
time.Sleep(10 * time.Millisecond)
stats := p.StatsJSON()
@@ -264,7 +269,7 @@ func TestShrinking(t *testing.T) {
p.Put(resources[i])
}
stats := p.StatsJSON()
- expected = `{"Capacity": 3, "Available": 3, "Active": 3, "InUse": 0, "MaxCapacity": 5, "WaitCount": 0, "WaitTime": 0, "IdleTimeout": 1000000000, "IdleClosed": 0, "Exhausted": 0}`
+ expected = `{"Capacity": 3, "Available": 3, "Active": 3, "InUse": 0, "MaxCapacity": 5, "WaitCount": 0, "WaitTime": 0, "IdleTimeout": 1000000000, "IdleClosed": 0, "MaxLifetimeClosed": 0, "Exhausted": 0}`
assert.Equal(t, expected, stats)
assert.EqualValues(t, 3, count.Get())
@@ -359,7 +364,7 @@ func TestClosing(t *testing.T) {
ctx := context.Background()
lastID.Set(0)
count.Set(0)
- p := NewResourcePool(PoolFactory, 5, 5, time.Second, logWait, nil, 0)
+ p := NewResourcePool(PoolFactory, 5, 5, time.Second, 0, logWait, nil, 0)
var resources [10]Resource
for i := 0; i < 5; i++ {
var r Resource
@@ -381,7 +386,7 @@ func TestClosing(t *testing.T) {
// Wait for goroutine to call Close
time.Sleep(10 * time.Millisecond)
stats := p.StatsJSON()
- expected := `{"Capacity": 0, "Available": 0, "Active": 5, "InUse": 5, "MaxCapacity": 5, "WaitCount": 0, "WaitTime": 0, "IdleTimeout": 1000000000, "IdleClosed": 0, "Exhausted": 1}`
+ expected := `{"Capacity": 0, "Available": 0, "Active": 5, "InUse": 5, "MaxCapacity": 5, "WaitCount": 0, "WaitTime": 0, "IdleTimeout": 1000000000, "IdleClosed": 0, "MaxLifetimeClosed": 0, "Exhausted": 1}`
assert.Equal(t, expected, stats)
// Put is allowed when closing
@@ -393,7 +398,7 @@ func TestClosing(t *testing.T) {
<-ch
stats = p.StatsJSON()
- expected = `{"Capacity": 0, "Available": 0, "Active": 0, "InUse": 0, "MaxCapacity": 5, "WaitCount": 0, "WaitTime": 0, "IdleTimeout": 1000000000, "IdleClosed": 0, "Exhausted": 1}`
+ expected = `{"Capacity": 0, "Available": 0, "Active": 0, "InUse": 0, "MaxCapacity": 5, "WaitCount": 0, "WaitTime": 0, "IdleTimeout": 1000000000, "IdleClosed": 0, "MaxLifetimeClosed": 0, "Exhausted": 1}`
assert.Equal(t, expected, stats)
assert.EqualValues(t, 5, lastID.Get())
assert.EqualValues(t, 0, count.Get())
@@ -406,7 +411,7 @@ func TestReopen(t *testing.T) {
refreshCheck := func() (bool, error) {
return true, nil
}
- p := NewResourcePool(PoolFactory, 5, 5, time.Second, logWait, refreshCheck, 500*time.Millisecond)
+ p := NewResourcePool(PoolFactory, 5, 5, time.Second, 0, logWait, refreshCheck, 500*time.Millisecond)
var resources [10]Resource
for i := 0; i < 5; i++ {
var r Resource
@@ -422,7 +427,7 @@ func TestReopen(t *testing.T) {
time.Sleep(10 * time.Millisecond)
stats := p.StatsJSON()
- expected := `{"Capacity": 5, "Available": 0, "Active": 5, "InUse": 5, "MaxCapacity": 5, "WaitCount": 0, "WaitTime": 0, "IdleTimeout": 1000000000, "IdleClosed": 0, "Exhausted": 1}`
+ expected := `{"Capacity": 5, "Available": 0, "Active": 5, "InUse": 5, "MaxCapacity": 5, "WaitCount": 0, "WaitTime": 0, "IdleTimeout": 1000000000, "IdleClosed": 0, "MaxLifetimeClosed": 0, "Exhausted": 1}`
assert.Equal(t, expected, stats)
time.Sleep(650 * time.Millisecond)
@@ -431,7 +436,7 @@ func TestReopen(t *testing.T) {
}
time.Sleep(50 * time.Millisecond)
stats = p.StatsJSON()
- expected = `{"Capacity": 5, "Available": 5, "Active": 0, "InUse": 0, "MaxCapacity": 5, "WaitCount": 0, "WaitTime": 0, "IdleTimeout": 1000000000, "IdleClosed": 0, "Exhausted": 1}`
+ expected = `{"Capacity": 5, "Available": 5, "Active": 0, "InUse": 0, "MaxCapacity": 5, "WaitCount": 0, "WaitTime": 0, "IdleTimeout": 1000000000, "IdleClosed": 0, "MaxLifetimeClosed": 0, "Exhausted": 1}`
assert.Equal(t, expected, stats)
assert.EqualValues(t, 5, lastID.Get())
assert.EqualValues(t, 0, count.Get())
@@ -441,7 +446,7 @@ func TestIdleTimeout(t *testing.T) {
ctx := context.Background()
lastID.Set(0)
count.Set(0)
- p := NewResourcePool(PoolFactory, 1, 1, 10*time.Millisecond, logWait, nil, 0)
+ p := NewResourcePool(PoolFactory, 1, 1, 10*time.Millisecond, 0, logWait, nil, 0)
defer p.Close()
r, err := p.Get(ctx, nil)
@@ -503,7 +508,7 @@ func TestIdleTimeoutWithSettings(t *testing.T) {
ctx := context.Background()
lastID.Set(0)
count.Set(0)
- p := NewResourcePool(PoolFactory, 1, 1, 10*time.Millisecond, logWait, nil, 0)
+ p := NewResourcePool(PoolFactory, 1, 1, 10*time.Millisecond, 0, logWait, nil, 0)
defer p.Close()
r, err := p.Get(ctx, sFooBar)
@@ -565,7 +570,7 @@ func TestIdleTimeoutCreateFail(t *testing.T) {
ctx := context.Background()
lastID.Set(0)
count.Set(0)
- p := NewResourcePool(PoolFactory, 1, 1, 10*time.Millisecond, logWait, nil, 0)
+ p := NewResourcePool(PoolFactory, 1, 1, 10*time.Millisecond, 0, logWait, nil, 0)
defer p.Close()
for _, setting := range []*Setting{nil, sFoo} {
r, err := p.Get(ctx, setting)
@@ -588,11 +593,81 @@ func TestIdleTimeoutCreateFail(t *testing.T) {
}
}
+func TestMaxLifetime(t *testing.T) {
+ // maxLifetime 0
+ ctx := context.Background()
+ lastID.Set(0)
+ count.Set(0)
+
+ p := NewResourcePool(PoolFactory, 1, 1, 10*time.Second, 0, logWait, nil, 0)
+ defer p.Close()
+
+ r, err := p.Get(ctx, nil)
+ require.NoError(t, err)
+ assert.EqualValues(t, 1, count.Get())
+ assert.EqualValues(t, 0, p.MaxLifetimeClosed())
+
+ time.Sleep(10 * time.Millisecond)
+
+ p.Put(r)
+ assert.EqualValues(t, 1, lastID.Get())
+ assert.EqualValues(t, 1, count.Get())
+ assert.EqualValues(t, 0, p.MaxLifetimeClosed())
+
+ // maxLifetime > 0
+ ctx = context.Background()
+ lastID.Set(0)
+ count.Set(0)
+
+ p = NewResourcePool(PoolFactory, 1, 1, 10*time.Second, 10*time.Millisecond, logWait, nil, 0)
+ defer p.Close()
+
+ r, err = p.Get(ctx, nil)
+ require.NoError(t, err)
+ assert.EqualValues(t, 1, count.Get())
+ assert.EqualValues(t, 0, p.MaxLifetimeClosed())
+
+ time.Sleep(5 * time.Millisecond)
+
+ p.Put(r)
+ assert.EqualValues(t, 1, lastID.Get())
+ assert.EqualValues(t, 1, count.Get())
+ assert.EqualValues(t, 0, p.MaxLifetimeClosed())
+
+ r, err = p.Get(ctx, nil)
+ require.NoError(t, err)
+ assert.EqualValues(t, 1, count.Get())
+ assert.EqualValues(t, 0, p.MaxLifetimeClosed())
+
+ time.Sleep(10 * time.Millisecond * 2)
+
+ p.Put(r)
+ assert.EqualValues(t, 2, lastID.Get())
+ assert.EqualValues(t, 1, count.Get())
+ assert.EqualValues(t, 1, p.MaxLifetimeClosed())
+}
+
+func TestExtendedLifetimeTimeout(t *testing.T) {
+ // maxLifetime 0
+ p := NewResourcePool(PoolFactory, 5, 5, time.Second, 0, logWait, nil, 0)
+ defer p.Close()
+ assert.Zero(t, p.extendedMaxLifetime())
+
+ // maxLifetime > 0
+ maxLifetime := 10 * time.Millisecond
+ for i := 0; i < 10; i++ {
+ p = NewResourcePool(PoolFactory, 5, 5, time.Second, maxLifetime, logWait, nil, 0)
+ defer p.Close()
+ assert.LessOrEqual(t, maxLifetime, p.extendedMaxLifetime())
+ assert.Greater(t, 2*maxLifetime, p.extendedMaxLifetime())
+ }
+}
+
func TestCreateFail(t *testing.T) {
ctx := context.Background()
lastID.Set(0)
count.Set(0)
- p := NewResourcePool(FailFactory, 5, 5, time.Second, logWait, nil, 0)
+ p := NewResourcePool(FailFactory, 5, 5, time.Second, 0, logWait, nil, 0)
defer p.Close()
for _, setting := range []*Setting{nil, sFoo} {
@@ -600,7 +675,7 @@ func TestCreateFail(t *testing.T) {
t.Errorf("Expecting Failed, received %v", err)
}
stats := p.StatsJSON()
- expected := `{"Capacity": 5, "Available": 5, "Active": 0, "InUse": 0, "MaxCapacity": 5, "WaitCount": 0, "WaitTime": 0, "IdleTimeout": 1000000000, "IdleClosed": 0, "Exhausted": 0}`
+ expected := `{"Capacity": 5, "Available": 5, "Active": 0, "InUse": 0, "MaxCapacity": 5, "WaitCount": 0, "WaitTime": 0, "IdleTimeout": 1000000000, "IdleClosed": 0, "MaxLifetimeClosed": 0, "Exhausted": 0}`
assert.Equal(t, expected, stats)
}
}
@@ -609,7 +684,7 @@ func TestCreateFailOnPut(t *testing.T) {
ctx := context.Background()
lastID.Set(0)
count.Set(0)
- p := NewResourcePool(PoolFactory, 5, 5, time.Second, logWait, nil, 0)
+ p := NewResourcePool(PoolFactory, 5, 5, time.Second, 0, logWait, nil, 0)
defer p.Close()
for _, setting := range []*Setting{nil, sFoo} {
@@ -630,7 +705,7 @@ func TestSlowCreateFail(t *testing.T) {
ctx := context.Background()
lastID.Set(0)
count.Set(0)
- p := NewResourcePool(SlowFailFactory, 2, 2, time.Second, logWait, nil, 0)
+ p := NewResourcePool(SlowFailFactory, 2, 2, time.Second, 0, logWait, nil, 0)
defer p.Close()
ch := make(chan bool)
for _, setting := range []*Setting{nil, sFoo} {
@@ -652,7 +727,7 @@ func TestTimeout(t *testing.T) {
ctx := context.Background()
lastID.Set(0)
count.Set(0)
- p := NewResourcePool(PoolFactory, 1, 1, time.Second, logWait, nil, 0)
+ p := NewResourcePool(PoolFactory, 1, 1, time.Second, 0, logWait, nil, 0)
defer p.Close()
// take the only connection available
@@ -675,7 +750,7 @@ func TestTimeout(t *testing.T) {
func TestExpired(t *testing.T) {
lastID.Set(0)
count.Set(0)
- p := NewResourcePool(PoolFactory, 1, 1, time.Second, logWait, nil, 0)
+ p := NewResourcePool(PoolFactory, 1, 1, time.Second, 0, logWait, nil, 0)
defer p.Close()
for _, setting := range []*Setting{nil, sFoo} {
@@ -693,7 +768,7 @@ func TestMultiSettings(t *testing.T) {
count.Set(0)
waitStarts = waitStarts[:0]
- p := NewResourcePool(PoolFactory, 5, 5, time.Second, logWait, nil, 0)
+ p := NewResourcePool(PoolFactory, 5, 5, time.Second, 0, logWait, nil, 0)
var resources [10]Resource
var r Resource
var err error
@@ -756,7 +831,7 @@ func TestMultiSettingsWithReset(t *testing.T) {
count.Set(0)
resetCount.Set(0)
- p := NewResourcePool(PoolFactory, 5, 5, time.Second, logWait, nil, 0)
+ p := NewResourcePool(PoolFactory, 5, 5, time.Second, 0, logWait, nil, 0)
var resources [10]Resource
var r Resource
var err error
@@ -802,7 +877,7 @@ func TestApplySettingsFailure(t *testing.T) {
var r Resource
var err error
- p := NewResourcePool(PoolFactory, 5, 5, time.Second, logWait, nil, 0)
+ p := NewResourcePool(PoolFactory, 5, 5, time.Second, 0, logWait, nil, 0)
defer p.Close()
settings := []*Setting{nil, sFoo, sBar, sBar, sFoo}
diff --git a/go/pools/rp_bench_test.go b/go/pools/rp_bench_test.go
index 037450462b4..a045c31d52f 100644
--- a/go/pools/rp_bench_test.go
+++ b/go/pools/rp_bench_test.go
@@ -27,7 +27,7 @@ func BenchmarkGetPut(b *testing.B) {
for _, parallelism := range []int{1, 8, 32, 128} {
rName := fmt.Sprintf("x%d-cap%d", parallelism, size)
b.Run(rName, func(b *testing.B) {
- pool := NewResourcePool(testResourceFactory, size, size, 0, nil, nil, 0)
+ pool := NewResourcePool(testResourceFactory, size, size, 0, 0, nil, nil, 0)
defer pool.Close()
b.ReportAllocs()
@@ -56,7 +56,7 @@ func BenchmarkGetPutWithSettings(b *testing.B) {
for _, parallelism := range []int{1, 8, 32, 128} {
rName := fmt.Sprintf("x%d-cap%d", parallelism, size)
b.Run(rName, func(b *testing.B) {
- pool := NewResourcePool(testResourceFactory, size, size, 0, nil, nil, 0)
+ pool := NewResourcePool(testResourceFactory, size, size, 0, 0, nil, nil, 0)
defer pool.Close()
b.ReportAllocs()
@@ -85,7 +85,7 @@ func BenchmarkGetPutMixed(b *testing.B) {
for _, parallelism := range []int{1, 8, 32, 128} {
rName := fmt.Sprintf("x%d-cap%d", parallelism, size)
b.Run(rName, func(b *testing.B) {
- pool := NewResourcePool(testResourceFactory, size, size, 0, nil, nil, 0)
+ pool := NewResourcePool(testResourceFactory, size, size, 0, 0, nil, nil, 0)
defer pool.Close()
b.ReportAllocs()
@@ -116,7 +116,7 @@ func BenchmarkGetPutMixedMulti(b *testing.B) {
for _, parallelism := range []int{1, 8, 32, 128} {
rName := fmt.Sprintf("x%d-cap%d", parallelism, size)
b.Run(rName, func(b *testing.B) {
- pool := NewResourcePool(testResourceFactory, size, size, 0, nil, nil, 0)
+ pool := NewResourcePool(testResourceFactory, size, size, 0, 0, nil, nil, 0)
defer pool.Close()
b.ReportAllocs()
diff --git a/go/pools/rpc_pool.go b/go/pools/rpc_pool.go
index 174ad0364f6..7ed1349e89e 100644
--- a/go/pools/rpc_pool.go
+++ b/go/pools/rpc_pool.go
@@ -48,7 +48,7 @@ type RPCPool struct {
// will not be called).
func NewRPCPool(size int, waitTimeout time.Duration, logWait func(time.Time)) *RPCPool {
return &RPCPool{
- rp: NewResourcePool(rpcResourceFactory, size, size, 0, logWait, nil, 0),
+ rp: NewResourcePool(rpcResourceFactory, size, size, 0, 0, logWait, nil, 0),
waitTimeout: waitTimeout,
}
}
@@ -111,6 +111,10 @@ func (r *_rpc) ResetSetting(context.Context) error {
return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG]: _rpc does not support ResetSetting")
}
+func (r *_rpc) Expired(time.Duration) bool {
+ return false
+}
+
// we only ever return the same rpc pointer. it's used as a sentinel and is
// only used internally so using the same one over and over doesn't matter.
func rpcResourceFactory(ctx context.Context) (Resource, error) { return rpc, nil }
diff --git a/go/sqltypes/proto3.go b/go/sqltypes/proto3.go
index b6a6c9b5707..6eaefcf4b0c 100644
--- a/go/sqltypes/proto3.go
+++ b/go/sqltypes/proto3.go
@@ -100,11 +100,12 @@ func ResultToProto3(qr *Result) *querypb.QueryResult {
return nil
}
return &querypb.QueryResult{
- Fields: qr.Fields,
- RowsAffected: qr.RowsAffected,
- InsertId: qr.InsertID,
- Rows: RowsToProto3(qr.Rows),
- Info: qr.Info,
+ Fields: qr.Fields,
+ RowsAffected: qr.RowsAffected,
+ InsertId: qr.InsertID,
+ Rows: RowsToProto3(qr.Rows),
+ Info: qr.Info,
+ SessionStateChanges: qr.SessionStateChanges,
}
}
@@ -115,11 +116,12 @@ func Proto3ToResult(qr *querypb.QueryResult) *Result {
return nil
}
return &Result{
- Fields: qr.Fields,
- RowsAffected: qr.RowsAffected,
- InsertID: qr.InsertId,
- Rows: proto3ToRows(qr.Fields, qr.Rows),
- Info: qr.Info,
+ Fields: qr.Fields,
+ RowsAffected: qr.RowsAffected,
+ InsertID: qr.InsertId,
+ Rows: proto3ToRows(qr.Fields, qr.Rows),
+ Info: qr.Info,
+ SessionStateChanges: qr.SessionStateChanges,
}
}
@@ -131,10 +133,12 @@ func CustomProto3ToResult(fields []*querypb.Field, qr *querypb.QueryResult) *Res
return nil
}
return &Result{
- Fields: qr.Fields,
- RowsAffected: qr.RowsAffected,
- InsertID: qr.InsertId,
- Rows: proto3ToRows(fields, qr.Rows),
+ Fields: qr.Fields,
+ RowsAffected: qr.RowsAffected,
+ InsertID: qr.InsertId,
+ Rows: proto3ToRows(fields, qr.Rows),
+ Info: qr.Info,
+ SessionStateChanges: qr.SessionStateChanges,
}
}
diff --git a/go/sqltypes/result.go b/go/sqltypes/result.go
index bf00cd801b7..80952598ec9 100644
--- a/go/sqltypes/result.go
+++ b/go/sqltypes/result.go
@@ -90,8 +90,11 @@ func (result *Result) ReplaceKeyspace(keyspace string) {
// Copy creates a deep copy of Result.
func (result *Result) Copy() *Result {
out := &Result{
- InsertID: result.InsertID,
- RowsAffected: result.RowsAffected,
+ RowsAffected: result.RowsAffected,
+ InsertID: result.InsertID,
+ SessionStateChanges: result.SessionStateChanges,
+ StatusFlags: result.StatusFlags,
+ Info: result.Info,
}
if result.Fields != nil {
out.Fields = make([]*querypb.Field, len(result.Fields))
@@ -108,6 +111,30 @@ func (result *Result) Copy() *Result {
return out
}
+// ShallowCopy creates a shallow copy of Result.
+func (result *Result) ShallowCopy() *Result {
+ return &Result{
+ Fields: result.Fields,
+ InsertID: result.InsertID,
+ RowsAffected: result.RowsAffected,
+ Info: result.Info,
+ SessionStateChanges: result.SessionStateChanges,
+ Rows: result.Rows,
+ }
+}
+
+// Metadata creates a shallow copy of Result without the rows useful
+// for sending as a first packet in streaming results.
+func (result *Result) Metadata() *Result {
+ return &Result{
+ Fields: result.Fields,
+ InsertID: result.InsertID,
+ RowsAffected: result.RowsAffected,
+ Info: result.Info,
+ SessionStateChanges: result.SessionStateChanges,
+ }
+}
+
// CopyRow makes a copy of the row.
func CopyRow(r []Value) []Value {
// The raw bytes of the values are supposed to be treated as read-only.
@@ -125,8 +152,10 @@ func (result *Result) Truncate(l int) *Result {
}
out := &Result{
- InsertID: result.InsertID,
- RowsAffected: result.RowsAffected,
+ InsertID: result.InsertID,
+ RowsAffected: result.RowsAffected,
+ Info: result.Info,
+ SessionStateChanges: result.SessionStateChanges,
}
if result.Fields != nil {
out.Fields = result.Fields[:l]
diff --git a/go/sqltypes/result_test.go b/go/sqltypes/result_test.go
index c0525f8dc03..90d2eb9af65 100644
--- a/go/sqltypes/result_test.go
+++ b/go/sqltypes/result_test.go
@@ -17,7 +17,6 @@ limitations under the License.
package sqltypes
import (
- "reflect"
"testing"
"vitess.io/vitess/go/test/utils"
@@ -31,20 +30,20 @@ func TestRepair(t *testing.T) {
}, {
Type: VarChar,
}}
- in := Result{
+ in := &Result{
Rows: [][]Value{
{TestValue(VarBinary, "1"), TestValue(VarBinary, "aa")},
{TestValue(VarBinary, "2"), TestValue(VarBinary, "bb")},
},
}
- want := Result{
+ want := &Result{
Rows: [][]Value{
{TestValue(Int64, "1"), TestValue(VarChar, "aa")},
{TestValue(Int64, "2"), TestValue(VarChar, "bb")},
},
}
in.Repair(fields)
- if !reflect.DeepEqual(in, want) {
+ if !in.Equal(want) {
t.Errorf("Repair:\n%#v, want\n%#v", in, want)
}
}
@@ -85,7 +84,7 @@ func TestTruncate(t *testing.T) {
}
out := in.Truncate(0)
- if !reflect.DeepEqual(out, in) {
+ if !out.Equal(in) {
t.Errorf("Truncate(0):\n%v, want\n%v", out, in)
}
@@ -102,7 +101,7 @@ func TestTruncate(t *testing.T) {
{TestValue(Int64, "3")},
},
}
- if !reflect.DeepEqual(out, want) {
+ if !out.Equal(want) {
t.Errorf("Truncate(1):\n%v, want\n%v", out, want)
}
}
@@ -279,19 +278,21 @@ func TestStripMetaData(t *testing.T) {
},
}}
for _, tcase := range testcases {
- inCopy := tcase.in.Copy()
- out := inCopy.StripMetadata(tcase.includedFields)
- if !reflect.DeepEqual(out, tcase.expected) {
- t.Errorf("StripMetaData unexpected result for %v: %v", tcase.name, out)
- }
- if len(tcase.in.Fields) > 0 {
- // check the out array is different than the in array.
- if out.Fields[0] == inCopy.Fields[0] && tcase.includedFields != querypb.ExecuteOptions_ALL {
- t.Errorf("StripMetaData modified original Field for %v", tcase.name)
+ t.Run(tcase.name, func(t *testing.T) {
+ inCopy := tcase.in.Copy()
+ out := inCopy.StripMetadata(tcase.includedFields)
+ if !out.Equal(tcase.expected) {
+ t.Errorf("StripMetaData unexpected result for %v: %v", tcase.name, out)
+ }
+ if len(tcase.in.Fields) > 0 {
+ // check the out array is different than the in array.
+ if out.Fields[0] == inCopy.Fields[0] && tcase.includedFields != querypb.ExecuteOptions_ALL {
+ t.Errorf("StripMetaData modified original Field for %v", tcase.name)
+ }
}
- }
- // check we didn't change the original result.
- utils.MustMatch(t, tcase.in, inCopy)
+ // check we didn't change the original result.
+ utils.MustMatch(t, tcase.in, inCopy)
+ })
}
}
@@ -340,7 +341,7 @@ func TestAppendResult(t *testing.T) {
result.AppendResult(src)
- if !reflect.DeepEqual(result, want) {
+ if !result.Equal(want) {
t.Errorf("Got:\n%#v, want:\n%#v", result, want)
}
}
diff --git a/go/sqltypes/value.go b/go/sqltypes/value.go
index 20b8843b4bc..d38c7a1306f 100644
--- a/go/sqltypes/value.go
+++ b/go/sqltypes/value.go
@@ -23,6 +23,7 @@ import (
"encoding/json"
"errors"
"fmt"
+ "math/big"
"strconv"
"strings"
@@ -73,12 +74,12 @@ type (
func NewValue(typ querypb.Type, val []byte) (v Value, err error) {
switch {
case IsSigned(typ):
- if _, err := strconv.ParseInt(string(val), 0, 64); err != nil {
+ if _, err := strconv.ParseInt(string(val), 10, 64); err != nil {
return NULL, err
}
return MakeTrusted(typ, val), nil
case IsUnsigned(typ):
- if _, err := strconv.ParseUint(string(val), 0, 64); err != nil {
+ if _, err := strconv.ParseUint(string(val), 10, 64); err != nil {
return NULL, err
}
return MakeTrusted(typ, val), nil
@@ -527,17 +528,12 @@ func (v *Value) decodeBitNum() ([]byte, error) {
if len(v.val) < 3 || v.val[0] != '0' || v.val[1] != 'b' {
return nil, vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "invalid bit number: %v", v.val)
}
- bitBytes := v.val[2:]
- ui, err := strconv.ParseUint(string(bitBytes), 2, 64)
- if err != nil {
- return nil, err
- }
- hexVal := fmt.Sprintf("%x", ui)
- decodedHexBytes, err := hex.DecodeString(hexVal)
- if err != nil {
- return nil, err
+ var i big.Int
+ _, ok := i.SetString(string(v.val), 0)
+ if !ok {
+ return nil, vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "invalid bit number: %v", v.val)
}
- return decodedHexBytes, nil
+ return i.Bytes(), nil
}
func encodeBytesSQL(val []byte, b BinWriter) {
@@ -548,7 +544,12 @@ func encodeBytesSQL(val []byte, b BinWriter) {
func encodeBytesSQLBytes2(val []byte, buf *bytes2.Buffer) {
buf.WriteByte('\'')
- for _, ch := range val {
+ for idx, ch := range val {
+ // If \% or \_ is present, we want to keep them as is, and don't want to escape \ again
+ if ch == '\\' && idx+1 < len(val) && (val[idx+1] == '%' || val[idx+1] == '_') {
+ buf.WriteByte(ch)
+ continue
+ }
if encodedChar := SQLEncodeMap[ch]; encodedChar == DontEscape {
buf.WriteByte(ch)
} else {
@@ -561,7 +562,12 @@ func encodeBytesSQLBytes2(val []byte, buf *bytes2.Buffer) {
func encodeBytesSQLStringBuilder(val []byte, buf *strings.Builder) {
buf.WriteByte('\'')
- for _, ch := range val {
+ for idx, ch := range val {
+ // If \% or \_ is present, we want to keep them as is, and don't want to escape \ again
+ if ch == '\\' && idx+1 < len(val) && (val[idx+1] == '%' || val[idx+1] == '_') {
+ buf.WriteByte(ch)
+ continue
+ }
if encodedChar := SQLEncodeMap[ch]; encodedChar == DontEscape {
buf.WriteByte(ch)
} else {
@@ -575,11 +581,16 @@ func encodeBytesSQLStringBuilder(val []byte, buf *strings.Builder) {
// BufEncodeStringSQL encodes the string into a strings.Builder
func BufEncodeStringSQL(buf *strings.Builder, val string) {
buf.WriteByte('\'')
- for _, ch := range val {
+ for idx, ch := range val {
if ch > 255 {
buf.WriteRune(ch)
continue
}
+ // If \% or \_ is present, we want to keep them as is, and don't want to escape \ again
+ if ch == '\\' && idx+1 < len(val) && (val[idx+1] == '%' || val[idx+1] == '_') {
+ buf.WriteRune(ch)
+ continue
+ }
if encodedChar := SQLEncodeMap[ch]; encodedChar == DontEscape {
buf.WriteRune(ch)
} else {
@@ -616,7 +627,13 @@ func encodeBytesASCII(val []byte, b BinWriter) {
}
// SQLEncodeMap specifies how to escape binary data with '\'.
-// Complies to http://dev.mysql.com/doc/refman/5.1/en/string-syntax.html
+// Complies to https://dev.mysql.com/doc/refman/5.7/en/string-literals.html
+// Handling escaping of % and _ is different than other characters.
+// When escaped in a like clause, they are supposed to be treated as literals
+// Everywhere else, they evaluate to strings '\%' and '\_' respectively.
+// In Vitess, the way we are choosing to handle this behaviour is to always
+// preserve the escaping of % and _ as is in all the places and handle it like MySQL
+// in our evaluation engine for Like.
var SQLEncodeMap [256]byte
// SQLDecodeMap is the reverse of SQLEncodeMap
diff --git a/go/sqltypes/value_test.go b/go/sqltypes/value_test.go
index a6d91c814d3..82aea752480 100644
--- a/go/sqltypes/value_test.go
+++ b/go/sqltypes/value_test.go
@@ -86,6 +86,14 @@ func TestNewValue(t *testing.T) {
inType: Uint64,
inVal: "1",
outVal: TestValue(Uint64, "1"),
+ }, {
+ inType: Uint64,
+ inVal: "01",
+ outVal: TestValue(Uint64, "01"),
+ }, {
+ inType: Int64,
+ inVal: "01",
+ outVal: TestValue(Int64, "01"),
}, {
inType: Float32,
inVal: "1.00",
@@ -491,6 +499,9 @@ func TestHexAndBitToBytes(t *testing.T) {
}, {
in: MakeTrusted(BitNum, []byte("0b1001000110100")),
out: []byte{0x12, 0x34},
+ }, {
+ in: MakeTrusted(BitNum, []byte("0b11101010100101010010101010101010101010101000100100100100100101001101010101010101000001")),
+ out: []byte{0x3a, 0xa5, 0x4a, 0xaa, 0xaa, 0xa2, 0x49, 0x25, 0x35, 0x55, 0x41},
}}
for _, tcase := range tcases {
diff --git a/go/stats/export.go b/go/stats/export.go
index 17218443c87..0a335517a14 100644
--- a/go/stats/export.go
+++ b/go/stats/export.go
@@ -52,18 +52,13 @@ var (
// CommonTags is a comma-separated list of common tags for stats backends
var CommonTags []string
-func init() {
- registerFlags()
-}
-
-func registerFlags() {
- pflag.BoolVar(&emitStats, "emit_stats", emitStats, "If set, emit stats to push-based monitoring and stats backends")
- pflag.DurationVar(&statsEmitPeriod, "stats_emit_period", statsEmitPeriod, "Interval between emitting stats to all registered backends")
- pflag.StringVar(&statsBackend, "stats_backend", statsBackend, "The name of the registered push-based monitoring/stats backend to use")
- pflag.StringVar(&combineDimensions, "stats_combine_dimensions", combineDimensions, `List of dimensions to be combined into a single "all" value in exported stats vars`)
- pflag.StringVar(&dropVariables, "stats_drop_variables", dropVariables, `Variables to be dropped from the list of exported variables.`)
- pflag.StringSliceVar(&CommonTags, "stats_common_tags", CommonTags, `Comma-separated list of common tags for the stats backend. It provides both label and values. Example: label1:value1,label2:value2`)
-
+func RegisterFlags(fs *pflag.FlagSet) {
+ fs.BoolVar(&emitStats, "emit_stats", emitStats, "If set, emit stats to push-based monitoring and stats backends")
+ fs.DurationVar(&statsEmitPeriod, "stats_emit_period", statsEmitPeriod, "Interval between emitting stats to all registered backends")
+ fs.StringVar(&statsBackend, "stats_backend", statsBackend, "The name of the registered push-based monitoring/stats backend to use")
+ fs.StringVar(&combineDimensions, "stats_combine_dimensions", combineDimensions, `List of dimensions to be combined into a single "all" value in exported stats vars`)
+ fs.StringVar(&dropVariables, "stats_drop_variables", dropVariables, `Variables to be dropped from the list of exported variables.`)
+ fs.StringSliceVar(&CommonTags, "stats_common_tags", CommonTags, `Comma-separated list of common tags for the stats backend. It provides both label and values. Example: label1:value1,label2:value2`)
}
// StatsAllStr is the consolidated name if a dimension gets combined.
diff --git a/go/stats/prometheusbackend/collectors.go b/go/stats/prometheusbackend/collectors.go
index 206e8dfcec3..a1126c0d211 100644
--- a/go/stats/prometheusbackend/collectors.go
+++ b/go/stats/prometheusbackend/collectors.go
@@ -74,7 +74,7 @@ func newCountersWithSingleLabelCollector(c *stats.CountersWithSingleLabel, name
desc: prometheus.NewDesc(
name,
c.Help(),
- []string{labelName},
+ []string{normalizeMetric(labelName)},
nil),
vt: vt}
@@ -111,7 +111,7 @@ func newGaugesWithSingleLabelCollector(g *stats.GaugesWithSingleLabel, name stri
desc: prometheus.NewDesc(
name,
g.Help(),
- []string{labelName},
+ []string{normalizeMetric(labelName)},
nil),
vt: vt}
@@ -266,7 +266,7 @@ func newTimingsCollector(t *stats.Timings, name string) {
desc: prometheus.NewDesc(
name,
t.Help(),
- []string{t.Label()},
+ []string{normalizeMetric(t.Label())},
nil),
}
diff --git a/go/stats/prometheusbackend/prometheusbackend_test.go b/go/stats/prometheusbackend/prometheusbackend_test.go
index 438e678bb3e..888dd630941 100644
--- a/go/stats/prometheusbackend/prometheusbackend_test.go
+++ b/go/stats/prometheusbackend/prometheusbackend_test.go
@@ -357,6 +357,26 @@ func testMetricsHandler(t *testing.T) *httptest.ResponseRecorder {
return response
}
+func TestPrometheusLabels(t *testing.T) {
+ m1 := stats.NewCountersWithSingleLabel("ThisIsMetric1", "helpstring1", "ThisIsALabel")
+ m1.Add("labelvalue1", 420)
+
+ m2 := stats.NewCountersWithMultiLabels("ThisIsMetric2", "helpstring2", []string{"ThisIsALabel"})
+ m2.Add([]string{"labelvalue2"}, 420)
+
+ response := testMetricsHandler(t)
+
+ expect := []string{
+ "namespace_this_is_metric1{this_is_a_label=\"labelvalue1\"} 420",
+ "namespace_this_is_metric2{this_is_a_label=\"labelvalue2\"} 420",
+ }
+ for _, line := range expect {
+ if !strings.Contains(response.Body.String(), line) {
+ t.Fatalf("Expected result to contain %s, got %s", line, response.Body.String())
+ }
+ }
+}
+
func TestMain(m *testing.M) {
Init(namespace)
os.Exit(m.Run())
diff --git a/go/stats/statsd/statsd_test.go b/go/stats/statsd/statsd_test.go
index 982ad321f0e..c615da3cdfd 100644
--- a/go/stats/statsd/statsd_test.go
+++ b/go/stats/statsd/statsd_test.go
@@ -55,8 +55,8 @@ func TestStatsdCounter(t *testing.T) {
t.Fatal(err)
}
result := string(bytes[:n])
- expected := "test.counter_name:1|c"
- assert.Equal(t, result, expected)
+ expected := "test.counter_name:1|c\n"
+ assert.Equal(t, expected, result)
}
})
if !found {
@@ -84,8 +84,8 @@ func TestStatsdGauge(t *testing.T) {
t.Fatal(err)
}
result := string(bytes[:n])
- expected := "test.gauge_name:10.000000|g"
- assert.Equal(t, result, expected)
+ expected := "test.gauge_name:10|g\n"
+ assert.Equal(t, expected, result)
}
})
if !found {
@@ -113,8 +113,8 @@ func TestStatsdGaugeFloat64(t *testing.T) {
t.Fatal(err)
}
result := string(bytes[:n])
- expected := "test.gauge_name_f64:3.140000|g"
- assert.Equal(t, result, expected)
+ expected := "test.gauge_name_f64:3.14|g\n"
+ assert.Equal(t, expected, result)
}
})
if !found {
@@ -143,8 +143,8 @@ func TestStatsdGaugeFunc(t *testing.T) {
t.Fatal(err)
}
result := string(bytes[:n])
- expected := "test.gauge_func_name:2.000000|g"
- assert.Equal(t, result, expected)
+ expected := "test.gauge_func_name:2|g\n"
+ assert.Equal(t, expected, result)
}
})
if !found {
@@ -172,8 +172,8 @@ func TestStatsdCounterDuration(t *testing.T) {
t.Fatal(err)
}
result := string(bytes[:n])
- expected := "test.counter_duration_name:1.000000|ms"
- assert.Equal(t, result, expected)
+ expected := "test.counter_duration_name:1.000000|ms\n"
+ assert.Equal(t, expected, result)
}
})
if !found {
@@ -203,11 +203,12 @@ func TestStatsdCountersWithSingleLabel(t *testing.T) {
result := strings.Split(string(bytes[:n]), "\n")
sort.Strings(result)
expected := []string{
+ "",
"test.counter_with_single_label_name:0|c|#label:tag2",
"test.counter_with_single_label_name:2|c|#label:tag1",
}
for i, res := range result {
- assert.Equal(t, res, expected[i])
+ assert.Equal(t, expected[i], res)
}
}
})
@@ -236,8 +237,8 @@ func TestStatsdCountersWithMultiLabels(t *testing.T) {
t.Fatal(err)
}
result := string(bytes[:n])
- expected := "test.counter_with_multiple_label_name:1|c|#label1:foo,label2:bar"
- assert.Equal(t, result, expected)
+ expected := "test.counter_with_multiple_label_name:1|c|#label1:foo,label2:bar\n"
+ assert.Equal(t, expected, result)
}
})
if !found {
@@ -271,11 +272,12 @@ func TestStatsdCountersFuncWithMultiLabels(t *testing.T) {
result := strings.Split(string(bytes[:n]), "\n")
sort.Strings(result)
expected := []string{
+ "",
"test.counter_func_with_multiple_labels_name:1|c|#label1:foo,label2:bar",
"test.counter_func_with_multiple_labels_name:2|c|#label1:bar,label2:baz",
}
for i, res := range result {
- assert.Equal(t, res, expected[i])
+ assert.Equal(t, expected[i], res)
}
}
})
@@ -304,8 +306,8 @@ func TestStatsdGaugesWithMultiLabels(t *testing.T) {
t.Fatal(err)
}
result := string(bytes[:n])
- expected := "test.gauges_with_multiple_label_name:3.000000|g|#label1:foo,label2:bar"
- assert.Equal(t, result, expected)
+ expected := "test.gauges_with_multiple_label_name:3|g|#label1:foo,label2:bar\n"
+ assert.Equal(t, expected, result)
}
})
if !found {
@@ -339,11 +341,12 @@ func TestStatsdGaugesFuncWithMultiLabels(t *testing.T) {
result := strings.Split(string(bytes[:n]), "\n")
sort.Strings(result)
expected := []string{
- "test.gauges_func_with_multiple_labels_name:1.000000|g|#label1:foo,label2:bar",
- "test.gauges_func_with_multiple_labels_name:2.000000|g|#label1:bar,label2:baz",
+ "",
+ "test.gauges_func_with_multiple_labels_name:1|g|#label1:foo,label2:bar",
+ "test.gauges_func_with_multiple_labels_name:2|g|#label1:bar,label2:baz",
}
for i, res := range result {
- assert.Equal(t, res, expected[i])
+ assert.Equal(t, expected[i], res)
}
}
})
@@ -372,8 +375,8 @@ func TestStatsdGaugesWithSingleLabel(t *testing.T) {
t.Fatal(err)
}
result := string(bytes[:n])
- expected := "test.gauges_with_single_label_name:1.000000|g|#label1:bar"
- assert.Equal(t, result, expected)
+ expected := "test.gauges_with_single_label_name:1|g|#label1:bar\n"
+ assert.Equal(t, expected, result)
}
})
if !found {
@@ -401,8 +404,8 @@ func TestStatsdMultiTimings(t *testing.T) {
t.Fatal(err)
}
result := string(bytes[:n])
- expected := "test.multi_timings_name:10.000000|ms|#label1:foo,label2:bar"
- assert.Equal(t, result, expected)
+ expected := "test.multi_timings_name:10.000000|ms|#label1:foo,label2:bar\n"
+ assert.Equal(t, expected, result)
}
})
if !found {
@@ -430,8 +433,8 @@ func TestStatsdTimings(t *testing.T) {
t.Fatal(err)
}
result := string(bytes[:n])
- expected := "test.timings_name:2.000000|ms|#label1:foo"
- assert.Equal(t, result, expected)
+ expected := "test.timings_name:2.000000|ms|#label1:foo\n"
+ assert.Equal(t, expected, result)
}
})
if !found {
@@ -462,12 +465,13 @@ func TestStatsdHistogram(t *testing.T) {
}
result := string(bytes[:n])
expected := []string{
- "test.histogram_name:2.000000|h",
- "test.histogram_name:3.000000|h",
- "test.histogram_name:6.000000|h",
+ "test.histogram_name:2|h",
+ "test.histogram_name:3|h",
+ "test.histogram_name:6|h",
+ "",
}
for i, res := range strings.Split(result, "\n") {
- assert.Equal(t, res, expected[i])
+ assert.Equal(t, expected[i], res)
}
}
})
diff --git a/go/test/dbg/dbg.go b/go/test/dbg/dbg.go
new file mode 100644
index 00000000000..94ce7a385c2
--- /dev/null
+++ b/go/test/dbg/dbg.go
@@ -0,0 +1,180 @@
+/*
+Copyright 2023 The Vitess Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package dbg
+
+import (
+ "bytes"
+ "fmt"
+ "go/ast"
+ "go/format"
+ "go/parser"
+ "go/token"
+ "os"
+ "path"
+ "runtime"
+ "strings"
+ "sync"
+
+ "github.com/kr/pretty"
+ "github.com/kr/text"
+)
+
+type params struct {
+ pos token.Position
+ fn string
+ params []string
+}
+
+func (p *params) Position() string {
+ if p == nil {
+ return ""
+ }
+ return p.pos.String()
+}
+
+func (p *params) ShortPosition() string {
+ if p == nil {
+ return ""
+ }
+ return fmt.Sprintf("%s:%d", path.Base(p.pos.Filename), p.pos.Line)
+}
+
+func (p *params) Arg(n int) string {
+ if p == nil || n >= len(p.params) {
+ return "arg"
+ }
+ return p.params[n]
+}
+
+func (p *params) Fn() string {
+ if p == nil || p.fn == "" {
+ return "?"
+ }
+ return p.fn
+}
+
+type file struct {
+ once sync.Once
+ fset *token.FileSet
+ path string
+ calls map[int]*params
+}
+
+type cache struct {
+ mu sync.Mutex
+ parsed map[string]*file
+ fset *token.FileSet
+}
+
+func (f *file) parse() {
+ a, err := parser.ParseFile(f.fset, f.path, nil, 0)
+ if err != nil {
+ _, _ = fmt.Fprintf(os.Stderr, "[dbg] failed to parse %q: %v\n", f.path, err)
+ return
+ }
+
+ f.calls = map[int]*params{}
+
+ var curfn string
+ ast.Inspect(a, func(node ast.Node) bool {
+ switch n := node.(type) {
+ case *ast.FuncDecl:
+ var buf strings.Builder
+ if n.Recv != nil && len(n.Recv.List) == 1 {
+ buf.WriteByte('(')
+ _ = format.Node(&buf, f.fset, n.Recv.List[0].Type)
+ buf.WriteString(").")
+ }
+ buf.WriteString(n.Name.String())
+ curfn = buf.String()
+
+ case *ast.CallExpr:
+ if sel, ok := n.Fun.(*ast.SelectorExpr); ok {
+ if pkg, ok := sel.X.(*ast.Ident); ok {
+ if pkg.Name == "dbg" && (sel.Sel.Name == "P" || sel.Sel.Name == "V") {
+ var p = params{
+ pos: f.fset.Position(n.Pos()),
+ fn: curfn,
+ }
+
+ for _, arg := range n.Args {
+ var buf strings.Builder
+ _ = format.Node(&buf, f.fset, arg)
+ p.params = append(p.params, buf.String())
+ }
+
+ f.calls[p.pos.Line] = &p
+ return false
+ }
+ }
+ }
+ }
+ return true
+ })
+}
+
+func (f *file) resolve(lineno int) *params {
+ f.once.Do(f.parse)
+ return f.calls[lineno]
+}
+
+func (c *cache) resolve(filename string, lineno int) *params {
+ var f *file
+
+ c.mu.Lock()
+ f = c.parsed[filename]
+ if f == nil {
+ f = &file{fset: c.fset, path: filename}
+ c.parsed[filename] = f
+ }
+ c.mu.Unlock()
+
+ return f.resolve(lineno)
+}
+
+var defaultCache = cache{
+ fset: token.NewFileSet(),
+ parsed: map[string]*file{},
+}
+
+// V prints the given argument in compact debug form and returns it unchanged
+func V[Val any](v Val) Val {
+ var p *params
+ if _, f, lineno, ok := runtime.Caller(1); ok {
+ p = defaultCache.resolve(f, lineno)
+ }
+ _, _ = fmt.Fprintf(os.Stdout, "[%s]: %s = %# v\n", p.ShortPosition(), p.Arg(0), pretty.Formatter(v))
+ return v
+}
+
+// P prints all the arguments passed to the function in verbose debug form
+func P(vals ...any) {
+ var p *params
+ if _, f, lineno, ok := runtime.Caller(1); ok {
+ p = defaultCache.resolve(f, lineno)
+ }
+
+ var buf bytes.Buffer
+ _, _ = fmt.Fprintf(&buf, "%s @ %s\n", p.Position(), p.Fn())
+ for i, v := range vals {
+ indent, _ := fmt.Fprintf(&buf, " [%d] %s = ", i, p.Arg(i))
+
+ w := text.NewIndentWriter(&buf, nil, bytes.Repeat([]byte{' '}, indent))
+ fmt.Fprintf(w, "%# v\n", pretty.Formatter(v))
+ }
+ _, _ = buf.WriteTo(os.Stdout)
+}
diff --git a/go/test/endtoend/backup/pitr/backup_mysqlctld_pitr_test.go b/go/test/endtoend/backup/pitr/backup_mysqlctld_pitr_test.go
new file mode 100644
index 00000000000..f93dfa475b6
--- /dev/null
+++ b/go/test/endtoend/backup/pitr/backup_mysqlctld_pitr_test.go
@@ -0,0 +1,212 @@
+/*
+Copyright 2022 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package mysqlctld
+
+import (
+ "context"
+ "fmt"
+ "math/rand"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "vitess.io/vitess/go/mysql"
+ backup "vitess.io/vitess/go/test/endtoend/backup/vtctlbackup"
+ "vitess.io/vitess/go/test/endtoend/cluster"
+)
+
+func waitForReplica(t *testing.T) {
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+ pMsgs := backup.ReadRowsFromPrimary(t)
+ for {
+ rMsgs := backup.ReadRowsFromReplica(t)
+ if len(pMsgs) == len(rMsgs) {
+ // success
+ return
+ }
+ select {
+ case <-ctx.Done():
+ assert.FailNow(t, "timeout waiting for replica to catch up")
+ return
+ case <-time.After(time.Second):
+ //
+ }
+ }
+}
+
+// TestIncrementalBackupMysqlctld - tests incremental backups using myslctld
+func TestIncrementalBackupMysqlctld(t *testing.T) {
+ defer cluster.PanicHandler(t)
+ // setup cluster for the testing
+ code, err := backup.LaunchCluster(backup.Mysqlctld, "xbstream", 0, nil)
+ require.NoError(t, err, "setup failed with status code %d", code)
+ defer backup.TearDownCluster()
+
+ backup.InitTestTable(t)
+
+ rowsPerPosition := map[string]int{}
+ backupPositions := []string{}
+
+ recordRowsPerPosition := func(t *testing.T) {
+ pos := backup.GetReplicaPosition(t)
+ msgs := backup.ReadRowsFromReplica(t)
+ if _, ok := rowsPerPosition[pos]; !ok {
+ backupPositions = append(backupPositions, pos)
+ rowsPerPosition[pos] = len(msgs)
+ }
+ }
+
+ var fullBackupPos mysql.Position
+ t.Run("full backup", func(t *testing.T) {
+ backup.InsertRowOnPrimary(t, "before-full-backup")
+ waitForReplica(t)
+ manifest, _ := backup.TestReplicaFullBackup(t)
+ fullBackupPos = manifest.Position
+ require.False(t, fullBackupPos.IsZero())
+ //
+ msgs := backup.ReadRowsFromReplica(t)
+ pos := mysql.EncodePosition(fullBackupPos)
+ backupPositions = append(backupPositions, pos)
+ rowsPerPosition[pos] = len(msgs)
+ })
+
+ lastBackupPos := fullBackupPos
+ backup.InsertRowOnPrimary(t, "before-incremental-backups")
+
+ tt := []struct {
+ name string
+ writeBeforeBackup bool
+ fromFullPosition bool
+ autoPosition bool
+ expectError string
+ }{
+ {
+ name: "first incremental backup",
+ },
+ {
+ name: "make writes, succeed",
+ writeBeforeBackup: true,
+ },
+ {
+ name: "fail, no binary logs to backup",
+ expectError: "no binary logs to backup",
+ },
+ {
+ name: "make writes again, succeed",
+ writeBeforeBackup: true,
+ },
+ {
+ name: "auto position, succeed",
+ writeBeforeBackup: true,
+ autoPosition: true,
+ },
+ {
+ name: "fail auto position, no binary logs to backup",
+ autoPosition: true,
+ expectError: "no binary logs to backup",
+ },
+ {
+ name: "auto position, make writes again, succeed",
+ writeBeforeBackup: true,
+ autoPosition: true,
+ },
+ {
+ name: "from full backup position",
+ fromFullPosition: true,
+ },
+ }
+ var fromFullPositionBackups []string
+ for _, tc := range tt {
+ t.Run(tc.name, func(t *testing.T) {
+ if tc.writeBeforeBackup {
+ backup.InsertRowOnPrimary(t, "")
+ }
+ // we wait for 1 second because backups ar ewritten to a directory named after the current timestamp,
+ // in 1 second resolution. We want to aoid two backups that have the same pathname. Realistically this
+ // is only ever a problem in this endtoend test, not in production.
+ // Also, we gie the replica a chance to catch up.
+ time.Sleep(1100 * time.Millisecond)
+ waitForReplica(t)
+ recordRowsPerPosition(t)
+ // configure --incremental-from-pos to either:
+ // - auto
+ // - explicit last backup pos
+ // - back in history to the original full backup
+ var incrementalFromPos mysql.Position
+ if !tc.autoPosition {
+ incrementalFromPos = lastBackupPos
+ if tc.fromFullPosition {
+ incrementalFromPos = fullBackupPos
+ }
+ }
+ manifest, backupName := backup.TestReplicaIncrementalBackup(t, incrementalFromPos, tc.expectError)
+ if tc.expectError != "" {
+ return
+ }
+ defer func() {
+ lastBackupPos = manifest.Position
+ }()
+ if tc.fromFullPosition {
+ fromFullPositionBackups = append(fromFullPositionBackups, backupName)
+ }
+ require.False(t, manifest.FromPosition.IsZero())
+ require.NotEqual(t, manifest.Position, manifest.FromPosition)
+ require.True(t, manifest.Position.GTIDSet.Contains(manifest.FromPosition.GTIDSet))
+
+ gtidPurgedPos, err := mysql.ParsePosition(mysql.Mysql56FlavorID, backup.GetReplicaGtidPurged(t))
+ require.NoError(t, err)
+ fromPositionIncludingPurged := manifest.FromPosition.GTIDSet.Union(gtidPurgedPos.GTIDSet)
+
+ expectFromPosition := lastBackupPos.GTIDSet.Union(gtidPurgedPos.GTIDSet)
+ if !incrementalFromPos.IsZero() {
+ expectFromPosition = incrementalFromPos.GTIDSet.Union(gtidPurgedPos.GTIDSet)
+ }
+ require.Equalf(t, expectFromPosition, fromPositionIncludingPurged, "expected: %v, found: %v", expectFromPosition, fromPositionIncludingPurged)
+ })
+ }
+
+ testRestores := func(t *testing.T) {
+ for _, r := range rand.Perm(len(backupPositions)) {
+ pos := backupPositions[r]
+ testName := fmt.Sprintf("%s, %d records", pos, rowsPerPosition[pos])
+ t.Run(testName, func(t *testing.T) {
+ restoreToPos, err := mysql.DecodePosition(pos)
+ require.NoError(t, err)
+ backup.TestReplicaRestoreToPos(t, restoreToPos, "")
+ msgs := backup.ReadRowsFromReplica(t)
+ count, ok := rowsPerPosition[pos]
+ require.True(t, ok)
+ assert.Equalf(t, count, len(msgs), "messages: %v", msgs)
+ })
+ }
+ }
+ t.Run("PITR", func(t *testing.T) {
+ testRestores(t)
+ })
+ t.Run("remove full position backups", func(t *testing.T) {
+ // Delete the fromFullPosition backup(s), which leaves us with less restore options. Try again.
+ for _, backupName := range fromFullPositionBackups {
+ backup.RemoveBackup(t, backupName)
+ }
+ })
+ t.Run("PITR-2", func(t *testing.T) {
+ testRestores(t)
+ })
+}
diff --git a/go/test/endtoend/backup/transform/backup_transform_utils.go b/go/test/endtoend/backup/transform/backup_transform_utils.go
deleted file mode 100644
index 8418f7e999a..00000000000
--- a/go/test/endtoend/backup/transform/backup_transform_utils.go
+++ /dev/null
@@ -1,395 +0,0 @@
-/*
-Copyright 2019 The Vitess Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package transform
-
-import (
- "encoding/json"
- "flag"
- "fmt"
- "os"
- "os/exec"
- "path"
- "testing"
- "time"
-
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
-
- "vitess.io/vitess/go/test/endtoend/cluster"
- "vitess.io/vitess/go/vt/log"
- topodatapb "vitess.io/vitess/go/vt/proto/topodata"
-)
-
-// test main part of the testcase
-var (
- primary *cluster.Vttablet
- replica1 *cluster.Vttablet
- replica2 *cluster.Vttablet
- testTablets []*cluster.Vttablet
- localCluster *cluster.LocalProcessCluster
- newInitDBFile string
- cell = cluster.DefaultCell
- hostname = "localhost"
- keyspaceName = "ks"
- dbPassword = "VtDbaPass"
- shardKsName = fmt.Sprintf("%s/%s", keyspaceName, shardName)
- dbCredentialFile string
- shardName = "0"
- commonTabletArg = []string{
- "--vreplication_healthcheck_topology_refresh", "1s",
- "--vreplication_healthcheck_retry_delay", "1s",
- "--vreplication_retry_delay", "1s",
- "--degraded_threshold", "5s",
- "--lock_tables_timeout", "5s",
- "--watch_replication_stream",
- "--enable_replication_reporter",
- "--serving_state_grace_period", "1s"}
-)
-
-// TestMainSetup sets up the basic test cluster
-func TestMainSetup(m *testing.M, useMysqlctld bool) {
- defer cluster.PanicHandler(nil)
- flag.Parse()
-
- exitCode, err := func() (int, error) {
- localCluster = cluster.NewCluster(cell, hostname)
- defer localCluster.Teardown()
-
- // Start topo server
- err := localCluster.StartTopo()
- if err != nil {
- return 1, err
- }
-
- // Start keyspace
- localCluster.Keyspaces = []cluster.Keyspace{
- {
- Name: keyspaceName,
- Shards: []cluster.Shard{
- {
- Name: shardName,
- },
- },
- },
- }
- shard := &localCluster.Keyspaces[0].Shards[0]
- // changing password for mysql user
- dbCredentialFile = cluster.WriteDbCredentialToTmp(localCluster.TmpDirectory)
- initDb, _ := os.ReadFile(path.Join(os.Getenv("VTROOT"), "/config/init_db.sql"))
- sql := string(initDb)
- newInitDBFile = path.Join(localCluster.TmpDirectory, "init_db_with_passwords.sql")
- sql = sql + cluster.GetPasswordUpdateSQL(localCluster)
- os.WriteFile(newInitDBFile, []byte(sql), 0666)
-
- extraArgs := []string{"--db-credentials-file", dbCredentialFile}
- commonTabletArg = append(commonTabletArg, "--db-credentials-file", dbCredentialFile)
-
- // start mysql process for all replicas and primary
- var mysqlProcs []*exec.Cmd
- for i := 0; i < 3; i++ {
- tabletType := "replica"
- tablet := localCluster.NewVttabletInstance(tabletType, 0, cell)
- tablet.VttabletProcess = localCluster.VtprocessInstanceFromVttablet(tablet, shard.Name, keyspaceName)
- tablet.VttabletProcess.DbPassword = dbPassword
- tablet.VttabletProcess.ExtraArgs = commonTabletArg
- tablet.VttabletProcess.SupportsBackup = true
- tablet.VttabletProcess.EnableSemiSync = true
-
- if useMysqlctld {
- tablet.MysqlctldProcess = *cluster.MysqlCtldProcessInstance(tablet.TabletUID, tablet.MySQLPort, localCluster.TmpDirectory)
- tablet.MysqlctldProcess.InitDBFile = newInitDBFile
- tablet.MysqlctldProcess.ExtraArgs = extraArgs
- tablet.MysqlctldProcess.Password = tablet.VttabletProcess.DbPassword
- err := tablet.MysqlctldProcess.Start()
- if err != nil {
- return 1, err
- }
-
- shard.Vttablets = append(shard.Vttablets, tablet)
- continue
- }
-
- tablet.MysqlctlProcess = *cluster.MysqlCtlProcessInstance(tablet.TabletUID, tablet.MySQLPort, localCluster.TmpDirectory)
- tablet.MysqlctlProcess.InitDBFile = newInitDBFile
- tablet.MysqlctlProcess.ExtraArgs = extraArgs
- proc, err := tablet.MysqlctlProcess.StartProcess()
- if err != nil {
- return 1, err
- }
- mysqlProcs = append(mysqlProcs, proc)
-
- shard.Vttablets = append(shard.Vttablets, tablet)
- }
- for _, proc := range mysqlProcs {
- if err := proc.Wait(); err != nil {
- return 1, err
- }
- }
-
- // initialize tablets
- primary = shard.Vttablets[0]
- replica1 = shard.Vttablets[1]
- replica2 = shard.Vttablets[2]
- testTablets = []*cluster.Vttablet{primary, replica1, replica2}
-
- for _, tablet := range testTablets {
- if err := localCluster.VtctlclientProcess.InitTablet(tablet, cell, keyspaceName, hostname, shard.Name); err != nil {
- return 1, err
- }
- }
- vtctldClientProcess := cluster.VtctldClientProcessInstance("localhost", localCluster.VtctldProcess.GrpcPort, localCluster.TmpDirectory)
- _, err = vtctldClientProcess.ExecuteCommandWithOutput("SetKeyspaceDurabilityPolicy", keyspaceName, "--durability-policy=semi_sync")
- if err != nil {
- return 1, err
- }
-
- // create database for primary and replicas
- for _, tablet := range testTablets {
- if err := tablet.VttabletProcess.CreateDB(keyspaceName); err != nil {
- return 1, err
- }
- if err := tablet.VttabletProcess.Setup(); err != nil {
- return 1, err
- }
- }
-
- // initialize primary and start replication
- if err := localCluster.VtctlclientProcess.InitShardPrimary(keyspaceName, shard.Name, cell, primary.TabletUID); err != nil {
- return 1, err
- }
- primary.Type = topodatapb.TabletType_PRIMARY.String()
- return m.Run(), nil
- }()
-
- if err != nil {
- log.Error(err.Error())
- os.Exit(1)
- } else {
- os.Exit(exitCode)
- }
-}
-
-// create query for test table creation
-var vtInsertTest = `create table vt_insert_test (
- id bigint auto_increment,
- msg varchar(64),
- primary key (id)
- ) Engine=InnoDB`
-
-// TestBackupTransformImpl tests backups with transform hooks
-func TestBackupTransformImpl(t *testing.T) {
- // insert data in primary, validate same in replica
- defer cluster.PanicHandler(t)
- verifyInitialReplication(t)
-
- // restart the replica1 tablet with transform hook parameter
- replica1.VttabletProcess.TearDown()
- replica1.VttabletProcess.ExtraArgs = []string{
- "--db-credentials-file", dbCredentialFile,
- "--backup_storage_hook", "test_backup_transform",
- "--backup_storage_compress=false",
- "--restore_from_backup",
- "--backup_storage_implementation", "file",
- "--file_backup_storage_root", localCluster.VtctldProcess.FileBackupStorageRoot}
- replica1.VttabletProcess.ServingStatus = "SERVING"
- err := replica1.VttabletProcess.Setup()
- require.Nil(t, err)
-
- if err := cluster.WaitForTabletSetup(&localCluster.VtctlclientProcess, 2, []string{"replica", "primary"}); err != nil {
- require.Nil(t, err)
- }
-
- // take backup, it should not give any error
- err = localCluster.VtctlclientProcess.ExecuteCommand("Backup", replica1.Alias)
- require.Nil(t, err)
-
- // insert data in primary
- _, err = primary.VttabletProcess.QueryTablet("insert into vt_insert_test (msg) values ('test2')", keyspaceName, true)
- require.Nil(t, err)
-
- // validate backup_list, expecting 1 backup available
- backups := localCluster.VerifyBackupCount(t, shardKsName, 1)
-
- backupLocation := localCluster.CurrentVTDATAROOT + "/backups/" + shardKsName + "/" + backups[0]
-
- // validate that MANIFEST has TransformHook
- // every file should start with 'header'
- validateManifestFile(t, backupLocation)
-
- // restore replica2 from backup, should not give any error
- // Note: we don't need to pass in the backup_storage_transform parameter,
- // as it is read from the MANIFEST.
- // clear replica2
-
- // Stop the tablet
- replica2.VttabletProcess.TearDown()
- // Remove the data
- if replica2.MysqlctlProcess.TabletUID > 0 {
- err = replica2.MysqlctlProcess.Stop()
- require.NoError(t, err)
- replica2.MysqlctlProcess.CleanupFiles(replica2.TabletUID)
- err = replica2.MysqlctlProcess.Start()
- require.Nil(t, err)
- } else {
- err = replica2.MysqlctldProcess.Stop()
- require.NoError(t, err)
- replica2.MysqlctldProcess.CleanupFiles(replica2.TabletUID)
- require.NoError(t, err)
- err = replica2.MysqlctldProcess.Start()
- require.Nil(t, err)
- }
-
- // Start from the backup
- err = localCluster.VtctlclientProcess.InitTablet(replica2, cell, keyspaceName, hostname, shardName)
- require.Nil(t, err)
- replica2.VttabletProcess.ExtraArgs = []string{
- "--db-credentials-file", dbCredentialFile,
- "--restore_from_backup",
- "--backup_storage_implementation", "file",
- "--file_backup_storage_root", localCluster.VtctldProcess.FileBackupStorageRoot}
- replica2.VttabletProcess.ServingStatus = ""
- err = replica2.VttabletProcess.Setup()
- require.Nil(t, err)
- err = replica2.VttabletProcess.WaitForTabletStatusesForTimeout([]string{"SERVING"}, 30*time.Second)
- require.Nil(t, err)
- defer replica2.VttabletProcess.TearDown()
- // We restart replication here because semi-sync will not be set correctly on tablet startup since
- // we deprecated enable_semi_sync. StartReplication RPC fixes the semi-sync settings by consulting the
- // durability policies set.
- err = localCluster.VtctlclientProcess.ExecuteCommand("StopReplication", replica2.Alias)
- require.NoError(t, err)
- err = localCluster.VtctlclientProcess.ExecuteCommand("StartReplication", replica2.Alias)
- require.NoError(t, err)
-
- // validate that semi-sync is enabled for replica, disable for rdOnly
- if replica2.Type == "replica" {
- verifySemiSyncStatus(t, replica2, "ON")
- } else if replica2.Type == "rdonly" {
- verifySemiSyncStatus(t, replica2, "OFF")
- }
-
- // validate that new replica has all the data
- cluster.VerifyRowsInTablet(t, replica2, keyspaceName, 2)
-
- // Remove all backups
- localCluster.RemoveAllBackups(t, shardKsName)
-
-}
-
-// TestBackupTransformErrorImpl validates backup behavior with transform hook
-// when the hook encounters an error
-func TestBackupTransformErrorImpl(t *testing.T) {
- // restart the replica with transform hook parameter
- defer cluster.PanicHandler(t)
- err := replica1.VttabletProcess.TearDown()
- require.Nil(t, err)
-
- replica1.VttabletProcess.ExtraArgs = []string{
- "--db-credentials-file", dbCredentialFile,
- "--backup_storage_hook", "test_backup_error",
- "--restore_from_backup",
- "--backup_storage_implementation", "file",
- "--file_backup_storage_root", localCluster.VtctldProcess.FileBackupStorageRoot}
- replica1.VttabletProcess.ServingStatus = "SERVING"
- err = replica1.VttabletProcess.Setup()
- require.Nil(t, err)
-
- // create backup, it should fail
- out, err := localCluster.VtctlclientProcess.ExecuteCommandWithOutput("Backup", replica1.Alias)
- require.NotNil(t, err)
- require.Containsf(t, out, "backup is not usable, aborting it", "unexpected error received %v", err)
-
- // validate there is no backup left
- localCluster.VerifyBackupCount(t, shardKsName, 0)
-}
-
-// validateManifestFile reads manifest and validates that it
-// has a TransformHook, SkipCompress and FileEntries. It also
-// validates that backup_files available in FileEntries have
-// 'header' as their first line.
-func validateManifestFile(t *testing.T, backupLocation string) {
-
- // reading manifest
- data, err := os.ReadFile(backupLocation + "/MANIFEST")
- require.Nilf(t, err, "error while reading MANIFEST %v", err)
- manifest := make(map[string]any)
-
- // parsing manifest
- err = json.Unmarshal(data, &manifest)
- require.Nilf(t, err, "error while parsing MANIFEST %v", err)
-
- // validate manifest
- transformHook := manifest["TransformHook"]
- require.Equalf(t, "test_backup_transform", transformHook, "invalid transformHook in MANIFEST")
- skipCompress := manifest["SkipCompress"]
- assert.Equalf(t, skipCompress, true, "invalid value of skipCompress")
-
- // validate backup files
- fielEntries := manifest["FileEntries"]
- fileArr, ok := fielEntries.([]any)
- require.True(t, ok)
- for i := range fileArr {
- f, err := os.Open(fmt.Sprintf("%s/%d", backupLocation, i))
- require.Nilf(t, err, "error while opening backup_file %d: %v", i, err)
- var fileHeader string
- _, err = fmt.Fscanln(f, &fileHeader)
- f.Close()
-
- require.Nilf(t, err, "error while reading backup_file %d: %v", i, err)
- require.Equalf(t, "header", fileHeader, "wrong file contents for %d", i)
- }
-
-}
-
-// verifySemiSyncStatus validates the replication status in tablet.
-func verifySemiSyncStatus(t *testing.T, vttablet *cluster.Vttablet, expectedStatus string) {
- status, err := vttablet.VttabletProcess.GetDBVar("rpl_semi_sync_slave_enabled", keyspaceName)
- require.Nil(t, err)
- assert.Equal(t, expectedStatus, status)
- status, err = vttablet.VttabletProcess.GetDBStatus("rpl_semi_sync_slave_status", keyspaceName)
- require.Nil(t, err)
- assert.Equal(t, expectedStatus, status)
-}
-
-// verifyInitialReplication generates a record on the primary and verifies that the record
-// exists on all tablets in the shard. We also check the PRIMARY as with lossless semi-sync
-// it's possible that a GTID is applied on a replica but not (yet) on the source:
-// http://my-replication-life.blogspot.com/2013/09/loss-less-semi-synchronous-replication.html
-func verifyInitialReplication(t *testing.T) {
- // confirm that semi-sync is enabled for the replica tablets
- healthyReplicaCount := 0
- for _, tablet := range testTablets {
- if tablet.Type == "replica" {
- verifySemiSyncStatus(t, tablet, "ON")
- healthyReplicaCount++
- }
- }
-
- if healthyReplicaCount < 2 {
- log.Errorf("Not enough healthy replicas to guarantee safe backups when semi-sync is enabled! Should have at least two, currently have %d",
- healthyReplicaCount)
- }
-
- _, err := primary.VttabletProcess.QueryTablet(vtInsertTest, keyspaceName, true)
- require.Nil(t, err)
- _, err = primary.VttabletProcess.QueryTablet("insert into vt_insert_test (msg) values ('test1')", keyspaceName, true)
- require.Nil(t, err)
-
- for _, tablet := range testTablets {
- cluster.VerifyRowsInTablet(t, tablet, keyspaceName, 1)
- }
-}
diff --git a/go/test/endtoend/backup/vtbackup/backup_only_test.go b/go/test/endtoend/backup/vtbackup/backup_only_test.go
index f67b5fadeed..3730a1fa586 100644
--- a/go/test/endtoend/backup/vtbackup/backup_only_test.go
+++ b/go/test/endtoend/backup/vtbackup/backup_only_test.go
@@ -17,6 +17,7 @@ limitations under the License.
package vtbackup
import (
+ "context"
"fmt"
"os"
"path"
@@ -24,20 +25,18 @@ import (
"testing"
"time"
- "vitess.io/vitess/go/vt/mysqlctl"
-
+ "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
+ "vitess.io/vitess/go/mysql"
"vitess.io/vitess/go/test/endtoend/cluster"
-
- "github.com/stretchr/testify/assert"
-
"vitess.io/vitess/go/vt/log"
+ "vitess.io/vitess/go/vt/mysqlctl"
)
var (
vtInsertTest = `
- create table vt_insert_test (
+ create table if not exists vt_insert_test (
id bigint auto_increment,
msg varchar(64),
primary key (id)
@@ -57,15 +56,21 @@ func TestTabletInitialBackup(t *testing.T) {
// - list the backups, remove them
defer cluster.PanicHandler(t)
- vtBackup(t, true, false)
+ vtBackup(t, true, false, false)
verifyBackupCount(t, shardKsName, 1)
// Initialize the tablets
initTablets(t, false, false)
// Restore the Tablets
+
restore(t, primary, "replica", "NOT_SERVING")
+ // Vitess expects that the user has set the database into ReadWrite mode before calling
+ // TabletExternallyReparented
err := localCluster.VtctlclientProcess.ExecuteCommand(
+ "SetReadWrite", primary.Alias)
+ require.Nil(t, err)
+ err = localCluster.VtctlclientProcess.ExecuteCommand(
"TabletExternallyReparented", primary.Alias)
require.Nil(t, err)
restore(t, replica1, "replica", "SERVING")
@@ -75,6 +80,7 @@ func TestTabletInitialBackup(t *testing.T) {
tearDown(t, true)
}
+
func TestTabletBackupOnly(t *testing.T) {
// Test Backup Flow
// TestTabletBackupOnly will:
@@ -126,7 +132,7 @@ func firstBackupTest(t *testing.T, tabletType string) {
// backup the replica
log.Infof("taking backup %s", time.Now())
- vtBackup(t, false, true)
+ vtBackup(t, false, true, true)
log.Infof("done taking backup %s", time.Now())
// check that the backup shows up in the listing
@@ -137,7 +143,7 @@ func firstBackupTest(t *testing.T, tabletType string) {
require.Nil(t, err)
cluster.VerifyRowsInTablet(t, replica1, keyspaceName, 2)
- // eventhough we change the value of compression it won't effect
+ // even though we change the value of compression it won't affect
// decompression since it gets its value from MANIFEST file, created
// as part of backup.
mysqlctl.CompressionEngineName = "lz4"
@@ -151,32 +157,37 @@ func firstBackupTest(t *testing.T, tabletType string) {
//check the new replica has the data
cluster.VerifyRowsInTablet(t, replica2, keyspaceName, 2)
- // check that the restored replica has the right local_metadata
- result, err := replica2.VttabletProcess.QueryTabletWithDB("select * from local_metadata", "_vt")
- require.Nil(t, err)
- require.NotNil(t, result)
- require.NotEmpty(t, result.Rows)
- assert.Equal(t, replica2.Alias, result.Rows[0][1].ToString(), "Alias")
- assert.Equal(t, "ks.0", result.Rows[1][1].ToString(), "ClusterAlias")
- assert.Equal(t, cell, result.Rows[2][1].ToString(), "DataCenter")
- if tabletType == "replica" {
- assert.Equal(t, "neutral", result.Rows[3][1].ToString(), "PromotionRule")
- } else {
- assert.Equal(t, "must_not", result.Rows[3][1].ToString(), "PromotionRule")
- }
-
removeBackups(t)
verifyBackupCount(t, shardKsName, 0)
}
-func vtBackup(t *testing.T, initialBackup bool, restartBeforeBackup bool) {
+func vtBackup(t *testing.T, initialBackup bool, restartBeforeBackup, disableRedoLog bool) {
+ mysqlSocket, err := os.CreateTemp("", "vtbackup_test_mysql.sock")
+ require.Nil(t, err)
+ defer os.Remove(mysqlSocket.Name())
+
// Take the back using vtbackup executable
- extraArgs := []string{"--allow_first_backup", "--db-credentials-file", dbCredentialFile}
+ extraArgs := []string{
+ "--allow_first_backup",
+ "--db-credentials-file", dbCredentialFile,
+ "--mysql_socket", mysqlSocket.Name(),
+ }
if restartBeforeBackup {
extraArgs = append(extraArgs, "--restart_before_backup")
}
+ if disableRedoLog {
+ extraArgs = append(extraArgs, "--disable-redo-log")
+ }
+
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ if !initialBackup && disableRedoLog {
+ go verifyDisableEnableRedoLogs(ctx, t, mysqlSocket.Name())
+ }
+
log.Infof("starting backup tablet %s", time.Now())
- err := localCluster.StartVtbackup(newInitDBFile, initialBackup, keyspaceName, shardName, cell, extraArgs...)
+ err = localCluster.StartVtbackup(newInitDBFile, initialBackup, keyspaceName, shardName, cell, extraArgs...)
require.Nil(t, err)
}
@@ -260,7 +271,6 @@ func restore(t *testing.T, tablet *cluster.Vttablet, tabletType string, waitForS
}
func resetTabletDirectory(t *testing.T, tablet cluster.Vttablet, initMysql bool) {
-
extraArgs := []string{"--db-credentials-file", dbCredentialFile}
tablet.MysqlctlProcess.ExtraArgs = extraArgs
@@ -280,7 +290,6 @@ func resetTabletDirectory(t *testing.T, tablet cluster.Vttablet, initMysql bool)
err = tablet.MysqlctlProcess.Start()
require.Nil(t, err)
}
-
}
func tearDown(t *testing.T, initMysql bool) {
@@ -311,3 +320,50 @@ func tearDown(t *testing.T, initMysql bool) {
require.Nil(t, err)
}
}
+
+func verifyDisableEnableRedoLogs(ctx context.Context, t *testing.T, mysqlSocket string) {
+ params := cluster.NewConnParams(0, dbPassword, mysqlSocket, keyspaceName)
+
+ for {
+ select {
+ case <-time.After(100 * time.Millisecond):
+ // Connect to vtbackup mysqld.
+ conn, err := mysql.Connect(ctx, ¶ms)
+ if err != nil {
+ // Keep trying, vtbackup mysqld may not be ready yet.
+ continue
+ }
+
+ // Check if server supports disable/enable redo log.
+ qr, err := conn.ExecuteFetch("SELECT 1 FROM performance_schema.global_status WHERE variable_name = 'innodb_redo_log_enabled'", 1, false)
+ require.Nil(t, err)
+ // If not, there's nothing to test.
+ if len(qr.Rows) == 0 {
+ return
+ }
+
+ // MY-013600
+ // https://dev.mysql.com/doc/mysql-errors/8.0/en/server-error-reference.html#error_er_ib_wrn_redo_disabled
+ qr, err = conn.ExecuteFetch("SELECT 1 FROM performance_schema.error_log WHERE error_code = 'MY-013600'", 1, false)
+ require.Nil(t, err)
+ if len(qr.Rows) != 1 {
+ // Keep trying, possible we haven't disabled yet.
+ continue
+ }
+
+ // MY-013601
+ // https://dev.mysql.com/doc/mysql-errors/8.0/en/server-error-reference.html#error_er_ib_wrn_redo_enabled
+ qr, err = conn.ExecuteFetch("SELECT 1 FROM performance_schema.error_log WHERE error_code = 'MY-013601'", 1, false)
+ require.Nil(t, err)
+ if len(qr.Rows) != 1 {
+ // Keep trying, possible we haven't disabled yet.
+ continue
+ }
+
+ // Success
+ return
+ case <-ctx.Done():
+ require.Fail(t, "Failed to verify disable/enable redo log.")
+ }
+ }
+}
diff --git a/go/test/endtoend/backup/vtbackup/main_test.go b/go/test/endtoend/backup/vtbackup/main_test.go
index ce0720e77c4..39dfc834728 100644
--- a/go/test/endtoend/backup/vtbackup/main_test.go
+++ b/go/test/endtoend/backup/vtbackup/main_test.go
@@ -25,6 +25,7 @@ import (
"testing"
"vitess.io/vitess/go/test/endtoend/cluster"
+ "vitess.io/vitess/go/test/endtoend/utils"
"vitess.io/vitess/go/vt/log"
)
@@ -89,8 +90,12 @@ func TestMain(m *testing.M) {
dbCredentialFile = cluster.WriteDbCredentialToTmp(localCluster.TmpDirectory)
initDb, _ := os.ReadFile(path.Join(os.Getenv("VTROOT"), "/config/init_db.sql"))
sql := string(initDb)
+ // The original init_db.sql does not have any passwords. Here we update the init file with passwords
+ sql, err = utils.GetInitDBSQL(sql, cluster.GetPasswordUpdateSQL(localCluster), "")
+ if err != nil {
+ return 1, err
+ }
newInitDBFile = path.Join(localCluster.TmpDirectory, "init_db_with_passwords.sql")
- sql = sql + cluster.GetPasswordUpdateSQL(localCluster)
err = os.WriteFile(newInitDBFile, []byte(sql), 0666)
if err != nil {
return 1, err
@@ -111,7 +116,6 @@ func TestMain(m *testing.M) {
tablet.VttabletProcess.DbPassword = dbPassword
tablet.VttabletProcess.ExtraArgs = commonTabletArg
tablet.VttabletProcess.SupportsBackup = true
- tablet.VttabletProcess.EnableSemiSync = true
tablet.MysqlctlProcess = *cluster.MysqlCtlProcessInstance(tablet.TabletUID, tablet.MySQLPort, localCluster.TmpDirectory)
tablet.MysqlctlProcess.InitDBFile = newInitDBFile
@@ -135,6 +139,16 @@ func TestMain(m *testing.M) {
}
}
+ if localCluster.VtTabletMajorVersion >= 16 {
+ // If vttablets are any lower than version 16, then they are running the replication manager.
+ // Running VTOrc and replication manager sometimes creates the situation where VTOrc has set up semi-sync on the primary,
+ // but the replication manager starts replication on the replica without setting semi-sync. This hangs the primary.
+ // Even if VTOrc fixes it, since there is no ongoing traffic, the state remains blocked.
+ if err := localCluster.StartVTOrc(keyspaceName); err != nil {
+ return 1, err
+ }
+ }
+
return m.Run(), nil
}()
diff --git a/go/test/endtoend/backup/vtctlbackup/backup_utils.go b/go/test/endtoend/backup/vtctlbackup/backup_utils.go
index e862e1cd52c..dc785ed992d 100644
--- a/go/test/endtoend/backup/vtctlbackup/backup_utils.go
+++ b/go/test/endtoend/backup/vtctlbackup/backup_utils.go
@@ -28,9 +28,13 @@ import (
"testing"
"time"
+ "vitess.io/vitess/go/mysql"
+ "vitess.io/vitess/go/sqltypes"
+ "vitess.io/vitess/go/textutil"
"vitess.io/vitess/go/vt/mysqlctl"
"vitess.io/vitess/go/vt/proto/topodata"
"vitess.io/vitess/go/vt/proto/vtrpc"
+ "vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/vterrors"
"github.com/stretchr/testify/assert"
@@ -214,6 +218,11 @@ func LaunchCluster(setupType int, streamMode string, stripes int, cDetails *Comp
if err := localCluster.VtctlclientProcess.InitShardPrimary(keyspaceName, shard.Name, cell, primary.TabletUID); err != nil {
return 1, err
}
+
+ if err := localCluster.StartVTOrc(keyspaceName); err != nil {
+ return 1, err
+ }
+
return 0, nil
}
@@ -378,6 +387,14 @@ type restoreMethod func(t *testing.T, tablet *cluster.Vttablet)
// 13. verify that don't have the data added after the first backup
// 14. remove the backups
func primaryBackup(t *testing.T) {
+ // Having the VTOrc in this test causes a lot of flakiness. For example when we delete the tablet `replica2` which
+ // is the current primary and then try to restore from backup the old primary (`primary.Alias`), but before that sometimes the VTOrc
+ // promotes the `replica1` to primary right after we delete the replica2 (current primary).
+ // This can result in unexpected behavior. Therefore, disabling the VTOrc in this test to remove flakiness.
+ localCluster.DisableVTOrcRecoveries(t)
+ defer func() {
+ localCluster.EnableVTOrcRecoveries(t)
+ }()
verifyInitialReplication(t)
output, err := localCluster.VtctlclientProcess.ExecuteCommandWithOutput("Backup", primary.Alias)
@@ -405,7 +422,6 @@ func primaryBackup(t *testing.T) {
// Verify that we have all the new data -- we should have 2 records now...
// And only 1 record after we restore using the first backup timestamp
cluster.VerifyRowsInTablet(t, replica2, keyspaceName, 2)
- cluster.VerifyLocalMetadata(t, replica2, keyspaceName, shardName, cell)
err = localCluster.VtctlclientProcess.ExecuteCommand("Backup", "--", "--allow_primary=true", primary.Alias)
require.Nil(t, err)
@@ -430,14 +446,13 @@ func primaryBackup(t *testing.T) {
// Restore the older/first backup -- using the timestamp we saved -- on the original primary tablet (primary)
err = localCluster.VtctlclientProcess.ExecuteCommand("RestoreFromBackup", "--", "--backup_timestamp", firstBackupTimestamp, primary.Alias)
require.Nil(t, err)
-
+ verifyRestorePositionAndTimeStats(t, primary.VttabletProcess.GetVars())
// Re-init the shard -- making the original primary tablet (primary) primary again -- for subsequent tests
err = localCluster.VtctlclientProcess.InitShardPrimary(keyspaceName, shardName, cell, primary.TabletUID)
require.Nil(t, err)
// Verify that we don't have the record created after the older/first backup
cluster.VerifyRowsInTablet(t, primary, keyspaceName, 1)
- cluster.VerifyLocalMetadata(t, primary, keyspaceName, shardName, cell)
verifyAfterRemovingBackupNoBackupShouldBePresent(t, backups)
require.Nil(t, err)
@@ -763,15 +778,9 @@ func terminatedRestore(t *testing.T) {
// Args:
// tablet_type: 'replica' or 'rdonly'.
func vtctlBackup(t *testing.T, tabletType string) {
- // Start vtorc before running backups
- vtorcProcess := localCluster.NewVTOrcProcess(cluster.VTOrcConfiguration{})
- err := vtorcProcess.Setup()
- require.NoError(t, err)
- localCluster.VTOrcProcesses = append(localCluster.VTOrcProcesses, vtorcProcess)
-
// StopReplication on replica1. We verify that the replication works fine later in
// verifyInitialReplication. So this will also check that VTOrc is running.
- err = localCluster.VtctlclientProcess.ExecuteCommand("StopReplication", replica1.Alias)
+ err := localCluster.VtctlclientProcess.ExecuteCommand("StopReplication", replica1.Alias)
require.Nil(t, err)
verifyInitialReplication(t)
@@ -789,14 +798,7 @@ func vtctlBackup(t *testing.T, tabletType string) {
require.Nil(t, err)
cluster.VerifyRowsInTablet(t, replica2, keyspaceName, 2)
- cluster.VerifyLocalMetadata(t, replica2, keyspaceName, shardName, cell)
verifyAfterRemovingBackupNoBackupShouldBePresent(t, backups)
-
- // Stop VTOrc
- err = localCluster.VTOrcProcesses[0].TearDown()
- localCluster.VTOrcProcesses = nil
- require.NoError(t, err)
-
err = replica2.VttabletProcess.TearDown()
require.Nil(t, err)
@@ -807,11 +809,17 @@ func vtctlBackup(t *testing.T, tabletType string) {
}
+func InitTestTable(t *testing.T) {
+ _, err := primary.VttabletProcess.QueryTablet("DROP TABLE IF EXISTS vt_insert_test", keyspaceName, true)
+ require.Nil(t, err)
+ _, err = primary.VttabletProcess.QueryTablet(vtInsertTest, keyspaceName, true)
+ require.Nil(t, err)
+}
+
// This will create schema in primary, insert some data to primary and verify the same data in replica
func verifyInitialReplication(t *testing.T) {
- _, err := primary.VttabletProcess.QueryTablet(vtInsertTest, keyspaceName, true)
- require.Nil(t, err)
- _, err = primary.VttabletProcess.QueryTablet("insert into vt_insert_test (msg) values ('test1')", keyspaceName, true)
+ InitTestTable(t)
+ _, err := primary.VttabletProcess.QueryTablet("insert into vt_insert_test (msg) values ('test1')", keyspaceName, true)
require.Nil(t, err)
cluster.VerifyRowsInTablet(t, replica1, keyspaceName, 1)
}
@@ -840,11 +848,15 @@ func restoreWaitForBackup(t *testing.T, tabletType string, cDetails *Compression
require.Nil(t, err)
}
+func RemoveBackup(t *testing.T, backupName string) {
+ err := localCluster.VtctlclientProcess.ExecuteCommand("RemoveBackup", shardKsName, backupName)
+ require.Nil(t, err)
+}
+
func verifyAfterRemovingBackupNoBackupShouldBePresent(t *testing.T, backups []string) {
// Remove the backup
for _, backup := range backups {
- err := localCluster.VtctlclientProcess.ExecuteCommand("RemoveBackup", shardKsName, backup)
- require.Nil(t, err)
+ RemoveBackup(t, backup)
}
// Now, there should not be no backup
@@ -917,3 +929,135 @@ func terminateRestore(t *testing.T) {
}
assert.True(t, found, "Restore message not found")
}
+
+func vtctlBackupReplicaNoDestroyNoWrites(t *testing.T, tabletType string) (backups []string, destroy func(t *testing.T)) {
+ restoreWaitForBackup(t, tabletType, nil, true)
+ verifyInitialReplication(t)
+
+ err := localCluster.VtctlclientProcess.ExecuteCommand("Backup", replica1.Alias)
+ require.Nil(t, err)
+
+ backups = localCluster.VerifyBackupCount(t, shardKsName, 1)
+
+ err = replica2.VttabletProcess.WaitForTabletStatusesForTimeout([]string{"SERVING"}, 25*time.Second)
+ require.Nil(t, err)
+
+ err = replica2.VttabletProcess.TearDown()
+ require.Nil(t, err)
+
+ err = localCluster.VtctlclientProcess.ExecuteCommand("DeleteTablet", replica2.Alias)
+ require.Nil(t, err)
+
+ destroy = func(t *testing.T) {
+ verifyAfterRemovingBackupNoBackupShouldBePresent(t, backups)
+ }
+ return backups, destroy
+}
+
+func GetReplicaPosition(t *testing.T) string {
+ pos, _ := cluster.GetPrimaryPosition(t, *replica1, hostname)
+ return pos
+}
+
+func GetReplicaGtidPurged(t *testing.T) string {
+ query := "select @@global.gtid_purged as gtid_purged"
+ rs, err := replica1.VttabletProcess.QueryTablet(query, keyspaceName, true)
+ require.NoError(t, err)
+ row := rs.Named().Row()
+ require.NotNil(t, row)
+ return row.AsString("gtid_purged", "")
+}
+
+func InsertRowOnPrimary(t *testing.T, hint string) {
+ if hint == "" {
+ hint = textutil.RandomHash()[:12]
+ }
+ query, err := sqlparser.ParseAndBind("insert into vt_insert_test (msg) values (%a)", sqltypes.StringBindVariable(hint))
+ require.NoError(t, err)
+ _, err = primary.VttabletProcess.QueryTablet(query, keyspaceName, true)
+ require.NoError(t, err)
+}
+
+func ReadRowsFromTablet(t *testing.T, tablet *cluster.Vttablet) (msgs []string) {
+ query := "select msg from vt_insert_test"
+ rs, err := tablet.VttabletProcess.QueryTablet(query, keyspaceName, true)
+ require.NoError(t, err)
+ for _, row := range rs.Named().Rows {
+ msg, err := row.ToString("msg")
+ require.NoError(t, err)
+ msgs = append(msgs, msg)
+ }
+ return msgs
+}
+
+func ReadRowsFromPrimary(t *testing.T) (msgs []string) {
+ return ReadRowsFromTablet(t, primary)
+}
+
+func ReadRowsFromReplica(t *testing.T) (msgs []string) {
+ return ReadRowsFromTablet(t, replica1)
+}
+
+func readManifestFile(t *testing.T, backupLocation string) (manifest *mysqlctl.BackupManifest) {
+ // reading manifest
+ data, err := os.ReadFile(backupLocation + "/MANIFEST")
+ require.NoErrorf(t, err, "error while reading MANIFEST %v", err)
+
+ // parsing manifest
+ err = json.Unmarshal(data, &manifest)
+ require.NoErrorf(t, err, "error while parsing MANIFEST %v", err)
+ require.NotNil(t, manifest)
+ return manifest
+}
+
+func TestReplicaFullBackup(t *testing.T) (manifest *mysqlctl.BackupManifest, destroy func(t *testing.T)) {
+ backups, destroy := vtctlBackupReplicaNoDestroyNoWrites(t, "replica")
+
+ backupLocation := localCluster.CurrentVTDATAROOT + "/backups/" + shardKsName + "/" + backups[len(backups)-1]
+ return readManifestFile(t, backupLocation), destroy
+}
+
+func TestReplicaIncrementalBackup(t *testing.T, incrementalFromPos mysql.Position, expectError string) (manifest *mysqlctl.BackupManifest, backupName string) {
+ incrementalFromPosArg := "auto"
+ if !incrementalFromPos.IsZero() {
+ incrementalFromPosArg = mysql.EncodePosition(incrementalFromPos)
+ }
+ output, err := localCluster.VtctlclientProcess.ExecuteCommandWithOutput("Backup", "--", "--incremental_from_pos", incrementalFromPosArg, replica1.Alias)
+ if expectError != "" {
+ require.Errorf(t, err, "expected: %v", expectError)
+ require.Contains(t, output, expectError)
+ return nil, ""
+ }
+ require.NoErrorf(t, err, "output: %v", output)
+
+ backups, err := localCluster.ListBackups(shardKsName)
+ require.NoError(t, err)
+ backupName = backups[len(backups)-1]
+ backupLocation := localCluster.CurrentVTDATAROOT + "/backups/" + shardKsName + "/" + backupName
+ return readManifestFile(t, backupLocation), backupName
+}
+
+func TestReplicaRestoreToPos(t *testing.T, restoreToPos mysql.Position, expectError string) {
+ require.False(t, restoreToPos.IsZero())
+ restoreToPosArg := mysql.EncodePosition(restoreToPos)
+ output, err := localCluster.VtctlclientProcess.ExecuteCommandWithOutput("RestoreFromBackup", "--", "--restore_to_pos", restoreToPosArg, replica1.Alias)
+ if expectError != "" {
+ require.Errorf(t, err, "expected: %v", expectError)
+ require.Contains(t, output, expectError)
+ return
+ }
+ require.NoErrorf(t, err, "output: %v", output)
+}
+
+func verifyRestorePositionAndTimeStats(t *testing.T, vars map[string]any) {
+ require.Contains(t, vars, "RestoredBackupTime")
+ backupTime := vars["RestoredBackupTime"].(string)
+ require.NotEqual(t, "", backupTime)
+
+ require.Contains(t, vars, "RestorePosition")
+ backupPosition := vars["RestorePosition"].(string)
+ require.NotEqual(t, "", backupPosition)
+ rp, err := mysql.DecodePosition(backupPosition)
+ require.NoError(t, err)
+ require.False(t, rp.IsZero())
+}
diff --git a/go/test/endtoend/cellalias/cell_alias_test.go b/go/test/endtoend/cellalias/cell_alias_test.go
index 36868656ccb..8d749c73bbf 100644
--- a/go/test/endtoend/cellalias/cell_alias_test.go
+++ b/go/test/endtoend/cellalias/cell_alias_test.go
@@ -28,6 +28,7 @@ import (
"os"
"os/exec"
"testing"
+ "time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -116,6 +117,12 @@ func TestMain(m *testing.M) {
return 1, err
}
+ vtctldClientProcess := cluster.VtctldClientProcessInstance("localhost", localCluster.VtctldProcess.GrpcPort, localCluster.TmpDirectory)
+ _, err = vtctldClientProcess.ExecuteCommandWithOutput("CreateKeyspace", keyspaceName, "--durability-policy=semi_sync")
+ if err != nil {
+ return 1, err
+ }
+
shard1Primary = localCluster.NewVttabletInstance("primary", 0, cell1)
shard1Replica = localCluster.NewVttabletInstance("replica", 0, cell2)
shard1Rdonly = localCluster.NewVttabletInstance("rdonly", 0, cell2)
@@ -139,7 +146,6 @@ func TestMain(m *testing.M) {
hostname,
localCluster.TmpDirectory,
commonTabletArg,
- true,
localCluster.DefaultCharset,
)
tablet.VttabletProcess.SupportsBackup = true
@@ -155,10 +161,6 @@ func TestMain(m *testing.M) {
}
}
- if err := localCluster.VtctlProcess.CreateKeyspace(keyspaceName); err != nil {
- return 1, err
- }
-
shard1 := cluster.Shard{
Name: "-80",
Vttablets: []*cluster.Vttablet{shard1Primary, shard1Replica, shard1Rdonly},
@@ -204,6 +206,10 @@ func TestMain(m *testing.M) {
return 1, err
}
+ if err := localCluster.StartVTOrc(keyspaceName); err != nil {
+ return 1, err
+ }
+
if err := localCluster.VtctlclientProcess.ApplySchema(keyspaceName, fmt.Sprintf(sqlSchema, tableName)); err != nil {
return 1, err
}
@@ -334,12 +340,9 @@ func TestAddAliasWhileVtgateUp(t *testing.T) {
func waitTillAllTabletsAreHealthyInVtgate(t *testing.T, vtgateInstance cluster.VtgateProcess, shards ...string) {
for _, shard := range shards {
- err := vtgateInstance.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", keyspaceName, shard), 1)
- require.Nil(t, err)
- err = vtgateInstance.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", keyspaceName, shard), 1)
- require.Nil(t, err)
- err = vtgateInstance.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.rdonly", keyspaceName, shard), 1)
- require.Nil(t, err)
+ require.NoError(t, vtgateInstance.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", keyspaceName, shard), 1, 30*time.Second))
+ require.NoError(t, vtgateInstance.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", keyspaceName, shard), 1, 30*time.Second))
+ require.NoError(t, vtgateInstance.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.rdonly", keyspaceName, shard), 1, 30*time.Second))
}
}
diff --git a/go/test/endtoend/cluster/cluster_process.go b/go/test/endtoend/cluster/cluster_process.go
index cb6fbb4fd40..b0401f56324 100644
--- a/go/test/endtoend/cluster/cluster_process.go
+++ b/go/test/endtoend/cluster/cluster_process.go
@@ -33,11 +33,13 @@ import (
"strings"
"sync"
"syscall"
+ "testing"
"time"
"vitess.io/vitess/go/json2"
"vitess.io/vitess/go/mysql"
"vitess.io/vitess/go/sqltypes"
+ "vitess.io/vitess/go/test/endtoend/filelock"
"vitess.io/vitess/go/vt/grpcclient"
"vitess.io/vitess/go/vt/log"
"vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext"
@@ -116,8 +118,6 @@ type LocalProcessCluster struct {
VtctldExtraArgs []string
- EnableSemiSync bool
-
// mutex added to handle the parallel teardowns
mx *sync.Mutex
teardownCompleted bool
@@ -251,6 +251,22 @@ func (cluster *LocalProcessCluster) StartTopo() (err error) {
return
}
+// StartVTOrc starts a VTOrc instance
+func (cluster *LocalProcessCluster) StartVTOrc(keyspace string) error {
+ // Start vtorc
+ vtorcProcess := cluster.NewVTOrcProcess(VTOrcConfiguration{})
+ err := vtorcProcess.Setup()
+ if err != nil {
+ log.Error(err.Error())
+ return err
+ }
+ if keyspace != "" {
+ vtorcProcess.ExtraArgs = append(vtorcProcess.ExtraArgs, fmt.Sprintf(`--clusters_to_watch="%s"`, keyspace))
+ }
+ cluster.VTOrcProcesses = append(cluster.VTOrcProcesses, vtorcProcess)
+ return nil
+}
+
// StartUnshardedKeyspace starts unshared keyspace with shard name as "0"
func (cluster *LocalProcessCluster) StartUnshardedKeyspace(keyspace Keyspace, replicaCount int, rdonly bool) error {
return cluster.StartKeyspace(keyspace, []string{"0"}, replicaCount, rdonly)
@@ -358,7 +374,6 @@ func (cluster *LocalProcessCluster) startKeyspace(keyspace Keyspace, shardNames
cluster.Hostname,
cluster.TmpDirectory,
cluster.VtTabletExtraArgs,
- cluster.EnableSemiSync,
cluster.DefaultCharset)
tablet.Alias = tablet.VttabletProcess.TabletPath
if cluster.ReusingVTDATAROOT {
@@ -427,6 +442,13 @@ func (cluster *LocalProcessCluster) startKeyspace(keyspace Keyspace, shardNames
}
log.Infof("Done creating keyspace: %v ", keyspace.Name)
+
+ err = cluster.StartVTOrc(keyspace.Name)
+ if err != nil {
+ log.Errorf("Error starting VTOrc - %v", err)
+ return err
+ }
+
return
}
@@ -499,7 +521,6 @@ func (cluster *LocalProcessCluster) StartKeyspaceLegacy(keyspace Keyspace, shard
cluster.Hostname,
cluster.TmpDirectory,
cluster.VtTabletExtraArgs,
- cluster.EnableSemiSync,
cluster.DefaultCharset)
tablet.Alias = tablet.VttabletProcess.TabletPath
if cluster.ReusingVTDATAROOT {
@@ -613,7 +634,6 @@ func (cluster *LocalProcessCluster) SetupCluster(keyspace *Keyspace, shards []Sh
cluster.Hostname,
cluster.TmpDirectory,
cluster.VtTabletExtraArgs,
- cluster.EnableSemiSync,
cluster.DefaultCharset)
}
@@ -668,8 +688,8 @@ func (cluster *LocalProcessCluster) NewVtgateInstance() *VtgateProcess {
return vtgateProcInstance
}
-// NewCluster instantiates a new cluster
-func NewCluster(cell string, hostname string) *LocalProcessCluster {
+// NewBareCluster instantiates a new cluster and does not assume existence of any of the vitess processes
+func NewBareCluster(cell string, hostname string) *LocalProcessCluster {
cluster := &LocalProcessCluster{Cell: cell, Hostname: hostname, mx: new(sync.Mutex), DefaultCharset: "utf8mb4"}
go cluster.CtrlCHandler()
@@ -688,12 +708,18 @@ func NewCluster(cell string, hostname string) *LocalProcessCluster {
_ = os.Setenv("VTDATAROOT", cluster.CurrentVTDATAROOT)
log.Infof("Created cluster on %s. ReusingVTDATAROOT=%v", cluster.CurrentVTDATAROOT, cluster.ReusingVTDATAROOT)
+ rand.Seed(time.Now().UTC().UnixNano())
+ return cluster
+}
+
+// NewCluster instantiates a new cluster
+func NewCluster(cell string, hostname string) *LocalProcessCluster {
+ cluster := NewBareCluster(cell, hostname)
+
err := cluster.populateVersionInfo()
if err != nil {
log.Errorf("Error populating version information - %v", err)
}
-
- rand.Seed(time.Now().UTC().UnixNano())
return cluster
}
@@ -757,14 +783,14 @@ func (cluster *LocalProcessCluster) WaitForTabletsToHealthyInVtgate() (err error
rdonlyTabletCount++
}
}
- if err = cluster.VtgateProcess.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", keyspace.Name, shard.Name), 1); err != nil {
+ if err = cluster.VtgateProcess.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", keyspace.Name, shard.Name), 1, 2*time.Minute); err != nil {
return err
}
if replicaTabletCount > 0 {
- err = cluster.VtgateProcess.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", keyspace.Name, shard.Name), replicaTabletCount)
+ err = cluster.VtgateProcess.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", keyspace.Name, shard.Name), replicaTabletCount, 2*time.Minute)
}
if rdonlyTabletCount > 0 {
- err = cluster.VtgateProcess.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.rdonly", keyspace.Name, shard.Name), rdonlyTabletCount)
+ err = cluster.VtgateProcess.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.rdonly", keyspace.Name, shard.Name), rdonlyTabletCount, 2*time.Minute)
}
if err != nil {
return err
@@ -782,7 +808,7 @@ func (cluster *LocalProcessCluster) ExecOnTablet(ctx context.Context, vttablet *
return nil, err
}
- tablet, err := cluster.vtctlclientGetTablet(vttablet)
+ tablet, err := cluster.VtctlclientGetTablet(vttablet)
if err != nil {
return nil, err
}
@@ -825,7 +851,7 @@ func (cluster *LocalProcessCluster) ExecOnVTGate(ctx context.Context, addr strin
// returns the responses. It returns an error if the stream ends with fewer than
// `count` responses.
func (cluster *LocalProcessCluster) StreamTabletHealth(ctx context.Context, vttablet *Vttablet, count int) (responses []*querypb.StreamHealthResponse, err error) {
- tablet, err := cluster.vtctlclientGetTablet(vttablet)
+ tablet, err := cluster.VtctlclientGetTablet(vttablet)
if err != nil {
return nil, err
}
@@ -857,7 +883,7 @@ func (cluster *LocalProcessCluster) StreamTabletHealth(ctx context.Context, vtta
return responses, nil
}
-func (cluster *LocalProcessCluster) vtctlclientGetTablet(tablet *Vttablet) (*topodatapb.Tablet, error) {
+func (cluster *LocalProcessCluster) VtctlclientGetTablet(tablet *Vttablet) (*topodatapb.Tablet, error) {
result, err := cluster.VtctlclientProcess.ExecuteCommandWithOutput("GetTablet", "--", tablet.Alias)
if err != nil {
return nil, err
@@ -982,7 +1008,7 @@ func (cluster *LocalProcessCluster) waitForMySQLProcessToExit(mysqlctlProcessLis
}
// StartVtbackup starts a vtbackup
-func (cluster *LocalProcessCluster) StartVtbackup(newInitDBFile string, initalBackup bool,
+func (cluster *LocalProcessCluster) StartVtbackup(newInitDBFile string, initialBackup bool,
keyspace string, shard string, cell string, extraArgs ...string) error {
log.Info("Starting vtbackup")
cluster.VtbackupProcess = *VtbackupProcessInstance(
@@ -995,7 +1021,7 @@ func (cluster *LocalProcessCluster) StartVtbackup(newInitDBFile string, initalBa
cluster.Hostname,
cluster.TmpDirectory,
cluster.TopoPort,
- initalBackup)
+ initialBackup)
cluster.VtbackupProcess.ExtraArgs = extraArgs
return cluster.VtbackupProcess.Setup()
@@ -1027,24 +1053,49 @@ func (cluster *LocalProcessCluster) GetAndReservePort() int {
return cluster.nextPortForProcess
}
+// portFileTimeout determines when we see the content of a port file as
+// stale. After this time, we assume we can start with the default base
+// port again.
+const portFileTimeout = 1 * time.Hour
+
// getPort checks if we have recent used port info in /tmp/todaytime.port
// If no, then use a random port and save that port + 200 in the above file
// If yes, then return that port, and save port + 200 in the same file
// here, assumptions is 200 ports might be consumed for all tests in a package
func getPort() int {
- tmpPortFileName := path.Join(os.TempDir(), time.Now().Format("01022006.port"))
+ portFile, err := os.OpenFile(path.Join(os.TempDir(), "endtoend.port"), os.O_CREATE|os.O_RDWR, 0644)
+ if err != nil {
+ panic(err)
+ }
+
+ filelock.Lock(portFile)
+ defer filelock.Unlock(portFile)
+
+ fileInfo, err := portFile.Stat()
+ if err != nil {
+ panic(err)
+ }
+
+ portBytes, err := io.ReadAll(portFile)
+ if err != nil {
+ panic(err)
+ }
+
var port int
- if _, err := os.Stat(tmpPortFileName); os.IsNotExist(err) {
+ if len(portBytes) == 0 || time.Now().After(fileInfo.ModTime().Add(portFileTimeout)) {
port = getVtStartPort()
} else {
- result, _ := os.ReadFile(tmpPortFileName)
- cport, err := strconv.Atoi(string(result))
- if err != nil || cport > 60000 || cport == 0 {
- cport = getVtStartPort()
+ parsedPort, err := strconv.ParseInt(string(portBytes), 10, 64)
+ if err != nil {
+ panic(err)
}
- port = cport
+ port = int(parsedPort)
}
- os.WriteFile(tmpPortFileName, []byte(fmt.Sprintf("%d", port+200)), 0666)
+
+ portFile.Truncate(0)
+ portFile.Seek(0, 0)
+ portFile.WriteString(fmt.Sprintf("%v", port+200))
+ portFile.Close()
return port
}
@@ -1068,7 +1119,7 @@ func getRandomNumber(maxNumber int32, baseNumber int) int {
func getVtStartPort() int {
osVtPort := os.Getenv("VTPORTSTART")
if osVtPort != "" {
- cport, err := strconv.Atoi(string(osVtPort))
+ cport, err := strconv.Atoi(osVtPort)
if err == nil {
return cport
}
@@ -1136,7 +1187,6 @@ func (cluster *LocalProcessCluster) VtprocessInstanceFromVttablet(tablet *Vttabl
cluster.Hostname,
cluster.TmpDirectory,
cluster.VtTabletExtraArgs,
- cluster.EnableSemiSync,
cluster.DefaultCharset)
}
@@ -1156,7 +1206,6 @@ func (cluster *LocalProcessCluster) StartVttablet(tablet *Vttablet, servingStatu
hostname,
cluster.TmpDirectory,
cluster.VtTabletExtraArgs,
- cluster.EnableSemiSync,
cluster.DefaultCharset)
tablet.VttabletProcess.SupportsBackup = supportBackup
@@ -1205,3 +1254,17 @@ func (cluster *LocalProcessCluster) GetVTParams(dbname string) mysql.ConnParams
}
return params
}
+
+// DisableVTOrcRecoveries stops all VTOrcs from running any recoveries
+func (cluster *LocalProcessCluster) DisableVTOrcRecoveries(t *testing.T) {
+ for _, vtorc := range cluster.VTOrcProcesses {
+ vtorc.DisableGlobalRecoveries(t)
+ }
+}
+
+// EnableVTOrcRecoveries allows all VTOrcs to run any recoveries
+func (cluster *LocalProcessCluster) EnableVTOrcRecoveries(t *testing.T) {
+ for _, vtorc := range cluster.VTOrcProcesses {
+ vtorc.EnableGlobalRecoveries(t)
+ }
+}
diff --git a/go/test/endtoend/cluster/cluster_util.go b/go/test/endtoend/cluster/cluster_util.go
index 1134e51d40d..0a820a63540 100644
--- a/go/test/endtoend/cluster/cluster_util.go
+++ b/go/test/endtoend/cluster/cluster_util.go
@@ -26,6 +26,7 @@ import (
"testing"
"time"
+ "github.com/buger/jsonparser"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -41,6 +42,8 @@ var (
tmClient = tmc.NewClient()
dbCredentialFile string
InsertTabletTemplateKsID = `insert into %s (id, msg) values (%d, '%s') /* id:%d */`
+ defaultOperationTimeout = 60 * time.Second
+ defeaultRetryDelay = 1 * time.Second
)
// Restart restarts vttablet and mysql.
@@ -95,16 +98,20 @@ func GetPrimaryPosition(t *testing.T, vttablet Vttablet, hostname string) (strin
// This is used to check that replication has caught up with the changes on primary.
func VerifyRowsInTabletForTable(t *testing.T, vttablet *Vttablet, ksName string, expectedRows int, tableName string) {
timeout := time.Now().Add(1 * time.Minute)
+ lastNumRowsFound := 0
for time.Now().Before(timeout) {
// ignoring the error check, if the newly created table is not replicated, then there might be error and we should ignore it
// but eventually it will catch up and if not caught up in required time, testcase will fail
qr, _ := vttablet.VttabletProcess.QueryTablet("select * from "+tableName, ksName, true)
- if qr != nil && len(qr.Rows) == expectedRows {
- return
+ if qr != nil {
+ if len(qr.Rows) == expectedRows {
+ return
+ }
+ lastNumRowsFound = len(qr.Rows)
}
time.Sleep(300 * time.Millisecond)
}
- assert.Fail(t, "expected rows not found.")
+ require.Equalf(t, expectedRows, lastNumRowsFound, "unexpected number of rows in %s (%s.%s)", vttablet.Alias, ksName, tableName)
}
// VerifyRowsInTablet Verify total number of rows in a tablet
@@ -121,20 +128,6 @@ func PanicHandler(t *testing.T) {
require.Nilf(t, err, "panic occured in testcase %v", t.Name())
}
-// VerifyLocalMetadata Verify Local Metadata of a tablet
-func VerifyLocalMetadata(t *testing.T, tablet *Vttablet, ksName string, shardName string, cell string) {
- qr, err := tablet.VttabletProcess.QueryTablet("select * from _vt.local_metadata", ksName, false)
- require.Nil(t, err)
- assert.Equal(t, fmt.Sprintf("%v", qr.Rows[0][1]), fmt.Sprintf(`BLOB("%s")`, tablet.Alias))
- assert.Equal(t, fmt.Sprintf("%v", qr.Rows[1][1]), fmt.Sprintf(`BLOB("%s.%s")`, ksName, shardName))
- assert.Equal(t, fmt.Sprintf("%v", qr.Rows[2][1]), fmt.Sprintf(`BLOB("%s")`, cell))
- if tablet.Type == "replica" {
- assert.Equal(t, fmt.Sprintf("%v", qr.Rows[3][1]), `BLOB("neutral")`)
- } else if tablet.Type == "rdonly" {
- assert.Equal(t, fmt.Sprintf("%v", qr.Rows[3][1]), `BLOB("must_not")`)
- }
-}
-
// ListBackups Lists back preset in shard
func (cluster LocalProcessCluster) ListBackups(shardKsName string) ([]string, error) {
output, err := cluster.VtctlclientProcess.ExecuteCommandWithOutput("ListBackups", shardKsName)
@@ -186,12 +179,22 @@ func getTablet(tabletGrpcPort int, hostname string) *topodatapb.Tablet {
func filterResultForWarning(input string) string {
lines := strings.Split(input, "\n")
var result string
- for _, line := range lines {
- if strings.Contains(line, "WARNING: vtctl should only be used for VDiff workflows") {
+ for i, line := range lines {
+ if strings.Contains(line, "WARNING: vtctl should only be used for VDiff v1 workflows. Please use VDiff v2 and consider using vtctldclient for all other commands.") {
continue
}
- result = result + line + "\n"
+
+ if strings.Contains(line, "Failed to read in config") && strings.Contains(line, `Config File "vtconfig" Not Found in`) {
+ continue
+ }
+
+ result += line
+
+ if i < len(lines)-1 {
+ result += "\n"
+ }
}
+
return result
}
@@ -262,8 +265,8 @@ func NewConnParams(port int, password, socketPath, keyspace string) mysql.ConnPa
UnixSocket: socketPath,
Pass: password,
}
-
- if keyspace != "" {
+ cp.DbName = keyspace
+ if keyspace != "" && keyspace != "_vt" {
cp.DbName = "vt_" + keyspace
}
@@ -391,3 +394,48 @@ func WaitForTabletSetup(vtctlClientProcess *VtctlClientProcess, expectedTablets
return fmt.Errorf("all %d tablet are not in expected state %s", expectedTablets, expectedStatus)
}
+
+// WaitForHealthyShard waits for the given shard info record in the topo
+// server to list a tablet (alias and uid) as the primary serving tablet
+// for the shard. This is done using "vtctldclient GetShard" and parsing
+// its JSON output. All other watchers should then also see this shard
+// info status as well.
+func WaitForHealthyShard(vtctldclient *VtctldClientProcess, keyspace, shard string) error {
+ var (
+ tmr = time.NewTimer(defaultOperationTimeout)
+ res string
+ err error
+ json []byte
+ cell string
+ uid int64
+ )
+ for {
+ res, err = vtctldclient.ExecuteCommandWithOutput("GetShard", fmt.Sprintf("%s/%s", keyspace, shard))
+ if err != nil {
+ return err
+ }
+ json = []byte(res)
+
+ cell, err = jsonparser.GetString(json, "shard", "primary_alias", "cell")
+ if err != nil && err != jsonparser.KeyPathNotFoundError {
+ return err
+ }
+ uid, err = jsonparser.GetInt(json, "shard", "primary_alias", "uid")
+ if err != nil && err != jsonparser.KeyPathNotFoundError {
+ return err
+ }
+
+ if cell != "" && uid > 0 {
+ return nil
+ }
+
+ select {
+ case <-tmr.C:
+ return fmt.Errorf("timed out waiting for the %s/%s shard to become healthy in the topo after %v; last seen status: %s; last seen error: %v",
+ keyspace, shard, defaultOperationTimeout, res, err)
+ default:
+ }
+
+ time.Sleep(defeaultRetryDelay)
+ }
+}
diff --git a/go/test/endtoend/cluster/mysqlctl_process.go b/go/test/endtoend/cluster/mysqlctl_process.go
index 97ccaf80f86..32e8f27e050 100644
--- a/go/test/endtoend/cluster/mysqlctl_process.go
+++ b/go/test/endtoend/cluster/mysqlctl_process.go
@@ -30,6 +30,7 @@ import (
"vitess.io/vitess/go/mysql"
"vitess.io/vitess/go/vt/log"
+ "vitess.io/vitess/go/vt/mysqlctl"
"vitess.io/vitess/go/vt/tlstest"
)
@@ -65,7 +66,16 @@ func (mysqlctl *MysqlctlProcess) InitDb() (err error) {
// Start executes mysqlctl command to start mysql instance
func (mysqlctl *MysqlctlProcess) Start() (err error) {
- tmpProcess, err := mysqlctl.StartProcess()
+ tmpProcess, err := mysqlctl.startProcess(true)
+ if err != nil {
+ return err
+ }
+ return tmpProcess.Wait()
+}
+
+// StartProvideInit executes mysqlctl command to start mysql instance
+func (mysqlctl *MysqlctlProcess) StartProvideInit(init bool) (err error) {
+ tmpProcess, err := mysqlctl.startProcess(init)
if err != nil {
return err
}
@@ -74,6 +84,10 @@ func (mysqlctl *MysqlctlProcess) Start() (err error) {
// StartProcess starts the mysqlctl and returns the process reference
func (mysqlctl *MysqlctlProcess) StartProcess() (*exec.Cmd, error) {
+ return mysqlctl.startProcess(true)
+}
+
+func (mysqlctl *MysqlctlProcess) startProcess(init bool) (*exec.Cmd, error) {
tmpProcess := exec.Command(
mysqlctl.Binary,
"--log_dir", mysqlctl.LogDirectory,
@@ -91,13 +105,22 @@ func (mysqlctl *MysqlctlProcess) StartProcess() (*exec.Cmd, error) {
if mysqlctl.SecureTransport {
// Set up EXTRA_MY_CNF for ssl
sslPath := path.Join(os.Getenv("VTDATAROOT"), fmt.Sprintf("/ssl_%010d", mysqlctl.TabletUID))
+ os.MkdirAll(sslPath, 0755)
+
+ // create certificates
+ clientServerKeyPair := tlstest.CreateClientServerCertPairs(sslPath)
+
+ // use the certificate values in template to create cnf file
sslPathData := struct {
- Dir string
+ Dir string
+ ServerCert string
+ ServerKey string
}{
- Dir: sslPath,
+ Dir: sslPath,
+ ServerCert: clientServerKeyPair.ServerCert,
+ ServerKey: clientServerKeyPair.ServerKey,
}
- os.MkdirAll(sslPath, 0755)
extraMyCNF := path.Join(sslPath, "ssl.cnf")
fout, err := os.Create(extraMyCNF)
if err != nil {
@@ -107,21 +130,21 @@ func (mysqlctl *MysqlctlProcess) StartProcess() (*exec.Cmd, error) {
template.Must(template.New(fmt.Sprintf("%010d", mysqlctl.TabletUID)).Parse(`
ssl_ca={{.Dir}}/ca-cert.pem
-ssl_cert={{.Dir}}/server-001-cert.pem
-ssl_key={{.Dir}}/server-001-key.pem
+ssl_cert={{.ServerCert}}
+ssl_key={{.ServerKey}}
`)).Execute(fout, sslPathData)
if err := fout.Close(); err != nil {
return nil, err
}
- tlstest.CreateClientServerCertPairs(sslPath)
-
tmpProcess.Env = append(tmpProcess.Env, "EXTRA_MY_CNF="+extraMyCNF)
tmpProcess.Env = append(tmpProcess.Env, "VTDATAROOT="+os.Getenv("VTDATAROOT"))
}
- tmpProcess.Args = append(tmpProcess.Args, "init", "--",
- "--init_db_sql_file", mysqlctl.InitDBFile)
+ if init {
+ tmpProcess.Args = append(tmpProcess.Args, "init", "--",
+ "--init_db_sql_file", mysqlctl.InitDBFile)
+ }
}
tmpProcess.Args = append(tmpProcess.Args, "start")
log.Infof("Starting mysqlctl with command: %v", tmpProcess.Args)
@@ -184,6 +207,14 @@ func (mysqlctl *MysqlctlProcess) StopProcess() (*exec.Cmd, error) {
return tmpProcess, tmpProcess.Start()
}
+func (mysqlctl *MysqlctlProcess) BasePath() string {
+ return path.Join(os.Getenv("VTDATAROOT"), fmt.Sprintf("/vt_%010d", mysqlctl.TabletUID))
+}
+
+func (mysqlctl *MysqlctlProcess) BinaryLogsPath() string {
+ return path.Join(mysqlctl.BasePath(), "bin-logs")
+}
+
// CleanupFiles clean the mysql files to make sure we can start the same process again
func (mysqlctl *MysqlctlProcess) CleanupFiles(tabletUID int) {
os.RemoveAll(path.Join(os.Getenv("VTDATAROOT"), fmt.Sprintf("/vt_%010d/data", tabletUID)))
@@ -193,14 +224,28 @@ func (mysqlctl *MysqlctlProcess) CleanupFiles(tabletUID int) {
os.RemoveAll(path.Join(os.Getenv("VTDATAROOT"), fmt.Sprintf("/vt_%010d/innodb", tabletUID)))
}
+// Connect returns a new connection to the underlying MySQL server
+func (mysqlctl *MysqlctlProcess) Connect(ctx context.Context, username string) (*mysql.Conn, error) {
+ params := mysql.ConnParams{
+ Uname: username,
+ UnixSocket: path.Join(os.Getenv("VTDATAROOT"), fmt.Sprintf("/vt_%010d", mysqlctl.TabletUID), "/mysql.sock"),
+ }
+
+ return mysql.Connect(ctx, ¶ms)
+}
+
// MysqlCtlProcessInstanceOptionalInit returns a Mysqlctl handle for mysqlctl process
// configured with the given Config.
func MysqlCtlProcessInstanceOptionalInit(tabletUID int, mySQLPort int, tmpDirectory string, initMySQL bool) *MysqlctlProcess {
+ initFile, err := getInitDBFileUsed()
+ if err != nil {
+ log.Errorf("Couldn't find init db file - %v", err)
+ }
mysqlctl := &MysqlctlProcess{
Name: "mysqlctl",
Binary: "mysqlctl",
LogDirectory: tmpDirectory,
- InitDBFile: path.Join(os.Getenv("VTROOT"), "/config/init_db.sql"),
+ InitDBFile: initFile,
}
mysqlctl.MySQLPort = mySQLPort
mysqlctl.TabletUID = tabletUID
@@ -209,6 +254,23 @@ func MysqlCtlProcessInstanceOptionalInit(tabletUID int, mySQLPort int, tmpDirect
return mysqlctl
}
+func getInitDBFileUsed() (string, error) {
+ versionStr, err := mysqlctl.GetVersionString()
+ if err != nil {
+ return "", err
+ }
+ flavor, _, err := mysqlctl.ParseVersionString(versionStr)
+ if err != nil {
+ return "", err
+ }
+ if flavor == mysqlctl.FlavorMySQL || flavor == mysqlctl.FlavorPercona {
+ return path.Join(os.Getenv("VTROOT"), "/config/init_db.sql"), nil
+ }
+ // Non-MySQL instances for example MariaDB, will use init_testserver_db.sql which does not contain super_read_only global variable.
+ // Even though MariaDB support is deprecated (https://github.com/vitessio/vitess/issues/9518) but we still support migration scenario.
+ return path.Join(os.Getenv("VTROOT"), "go/test/endtoend/vreplication/testdata/config/init_testserver_db.sql"), nil
+}
+
// MysqlCtlProcessInstance returns a Mysqlctl handle for mysqlctl process
// configured with the given Config.
func MysqlCtlProcessInstance(tabletUID int, mySQLPort int, tmpDirectory string) *MysqlctlProcess {
diff --git a/go/test/endtoend/cluster/mysqlctld_process.go b/go/test/endtoend/cluster/mysqlctld_process.go
index 9876dd38797..d71f2e3b1c8 100644
--- a/go/test/endtoend/cluster/mysqlctld_process.go
+++ b/go/test/endtoend/cluster/mysqlctld_process.go
@@ -85,7 +85,7 @@ func (mysqlctld *MysqlctldProcess) Start() error {
tempProcess.Stdout = os.Stdout
tempProcess.Stderr = os.Stderr
- log.Infof("%v %v", strings.Join(tempProcess.Args, " "))
+ log.Infof("%v", strings.Join(tempProcess.Args, " "))
err := tempProcess.Start()
if err != nil {
diff --git a/go/test/endtoend/cluster/topo_process.go b/go/test/endtoend/cluster/topo_process.go
index bb5901db9ce..7326aa57a52 100644
--- a/go/test/endtoend/cluster/topo_process.go
+++ b/go/test/endtoend/cluster/topo_process.go
@@ -103,6 +103,7 @@ func (topo *TopoProcess) SetupEtcd() (err error) {
topo.exit = make(chan error)
go func() {
topo.exit <- topo.proc.Wait()
+ close(topo.exit)
}()
timeout := time.Now().Add(60 * time.Second)
@@ -227,6 +228,7 @@ func (topo *TopoProcess) SetupConsul(cluster *LocalProcessCluster) (err error) {
topo.exit = make(chan error)
go func() {
topo.exit <- topo.proc.Wait()
+ close(topo.exit)
}()
timeout := time.Now().Add(60 * time.Second)
@@ -289,8 +291,9 @@ func (topo *TopoProcess) TearDown(Cell string, originalVtRoot string, currentRoo
case <-time.After(10 * time.Second):
topo.proc.Process.Kill()
+ err := <-topo.exit
topo.proc = nil
- return <-topo.exit
+ return err
}
}
diff --git a/go/test/endtoend/cluster/vtbackup_process.go b/go/test/endtoend/cluster/vtbackup_process.go
index 73253d3be3b..b7beed67936 100644
--- a/go/test/endtoend/cluster/vtbackup_process.go
+++ b/go/test/endtoend/cluster/vtbackup_process.go
@@ -92,6 +92,14 @@ func (vtbackup *VtbackupProcess) Setup() (err error) {
return
}
+ vtbackup.exit = make(chan error)
+ go func() {
+ if vtbackup.proc != nil {
+ vtbackup.exit <- vtbackup.proc.Wait()
+ close(vtbackup.exit)
+ }
+ }()
+
return nil
}
@@ -111,8 +119,9 @@ func (vtbackup *VtbackupProcess) TearDown() error {
case <-time.After(10 * time.Second):
vtbackup.proc.Process.Kill()
+ err := <-vtbackup.exit
vtbackup.proc = nil
- return <-vtbackup.exit
+ return err
}
}
diff --git a/go/test/endtoend/cluster/vtctlclient_process.go b/go/test/endtoend/cluster/vtctlclient_process.go
index 369ffcb09c7..1bf02d7f513 100644
--- a/go/test/endtoend/cluster/vtctlclient_process.go
+++ b/go/test/endtoend/cluster/vtctlclient_process.go
@@ -219,7 +219,7 @@ func (vtctlclient *VtctlClientProcess) ExecuteCommandWithOutput(args ...string)
}
time.Sleep(retryDelay)
}
- return filterResultWhenRunsForCoverage(resultStr), err
+ return filterResultForWarning(filterResultWhenRunsForCoverage(resultStr)), err
}
// VtctlClientProcessInstance returns a VtctlProcess handle for vtctlclient process
diff --git a/go/test/endtoend/cluster/vtctld_process.go b/go/test/endtoend/cluster/vtctld_process.go
index 074234fb97c..5e85f172ce1 100644
--- a/go/test/endtoend/cluster/vtctld_process.go
+++ b/go/test/endtoend/cluster/vtctld_process.go
@@ -58,8 +58,6 @@ func (vtctld *VtctldProcess) Setup(cell string, extraArgs ...string) (err error)
"--topo_global_server_address", vtctld.CommonArg.TopoGlobalAddress,
"--topo_global_root", vtctld.CommonArg.TopoGlobalRoot,
"--cell", cell,
- "--workflow_manager_init",
- "--workflow_manager_use_election",
"--service_map", vtctld.ServiceMap,
"--backup_storage_implementation", vtctld.BackupStorageImplementation,
"--file_backup_storage_root", vtctld.FileBackupStorageRoot,
@@ -87,6 +85,7 @@ func (vtctld *VtctldProcess) Setup(cell string, extraArgs ...string) (err error)
vtctld.exit = make(chan error)
go func() {
vtctld.exit <- vtctld.proc.Wait()
+ close(vtctld.exit)
}()
timeout := time.Now().Add(60 * time.Second)
@@ -138,8 +137,9 @@ func (vtctld *VtctldProcess) TearDown() error {
case <-time.After(10 * time.Second):
vtctld.proc.Process.Kill()
+ err := <-vtctld.exit
vtctld.proc = nil
- return <-vtctld.exit
+ return err
}
}
diff --git a/go/test/endtoend/cluster/vtgate_process.go b/go/test/endtoend/cluster/vtgate_process.go
index 54166f47fac..f037f53a055 100644
--- a/go/test/endtoend/cluster/vtgate_process.go
+++ b/go/test/endtoend/cluster/vtgate_process.go
@@ -138,6 +138,7 @@ func (vtgate *VtgateProcess) Setup() (err error) {
go func() {
if vtgate.proc != nil {
vtgate.exit <- vtgate.proc.Wait()
+ close(vtgate.exit)
}
}()
@@ -200,11 +201,11 @@ func (vtgate *VtgateProcess) GetStatusForTabletOfShard(name string, endPointsCou
// WaitForStatusOfTabletInShard function waits till status of a tablet in shard is 1
// endPointsCount: how many endpoints to wait for
-func (vtgate *VtgateProcess) WaitForStatusOfTabletInShard(name string, endPointsCount int) error {
+func (vtgate *VtgateProcess) WaitForStatusOfTabletInShard(name string, endPointsCount int, timeout time.Duration) error {
log.Infof("Waiting for healthy status of %d %s tablets in cell %s",
endPointsCount, name, vtgate.Cell)
- timeout := time.Now().Add(30 * time.Second)
- for time.Now().Before(timeout) {
+ deadline := time.Now().Add(timeout)
+ for time.Now().Before(deadline) {
if vtgate.GetStatusForTabletOfShard(name, endPointsCount) {
return nil
}
@@ -236,8 +237,9 @@ func (vtgate *VtgateProcess) TearDown() error {
case <-time.After(30 * time.Second):
vtgate.proc.Process.Kill()
+ err := <-vtgate.exit
vtgate.proc = nil
- return <-vtgate.exit
+ return err
}
}
diff --git a/go/test/endtoend/cluster/vtgr_process.go b/go/test/endtoend/cluster/vtgr_process.go
index f2205a93232..1960e469489 100644
--- a/go/test/endtoend/cluster/vtgr_process.go
+++ b/go/test/endtoend/cluster/vtgr_process.go
@@ -77,6 +77,7 @@ func (vtgr *VtgrProcess) Start(alias string) (err error) {
go func() {
if vtgr.proc != nil {
vtgr.exit <- vtgr.proc.Wait()
+ close(vtgr.exit)
}
}()
@@ -97,8 +98,9 @@ func (vtgr *VtgrProcess) TearDown() error {
return nil
case <-time.After(10 * time.Second):
- _ = vtgr.proc.Process.Kill()
+ vtgr.proc.Process.Kill()
+ err := <-vtgr.exit
vtgr.proc = nil
- return <-vtgr.exit
+ return err
}
}
diff --git a/go/test/endtoend/cluster/vtorc_process.go b/go/test/endtoend/cluster/vtorc_process.go
index 14f475fa48c..57347bb68e0 100644
--- a/go/test/endtoend/cluster/vtorc_process.go
+++ b/go/test/endtoend/cluster/vtorc_process.go
@@ -27,6 +27,7 @@ import (
"path"
"strings"
"syscall"
+ "testing"
"time"
"vitess.io/vitess/go/vt/log"
@@ -36,14 +37,15 @@ import (
// vtorc as a separate process for testing
type VTOrcProcess struct {
VtctlProcess
- Port int
- LogDir string
- ExtraArgs []string
- ConfigPath string
- Config VTOrcConfiguration
- WebPort int
- proc *exec.Cmd
- exit chan error
+ Port int
+ LogDir string
+ LogFileName string
+ ExtraArgs []string
+ ConfigPath string
+ Config VTOrcConfiguration
+ WebPort int
+ proc *exec.Cmd
+ exit chan error
}
type VTOrcConfiguration struct {
@@ -123,7 +125,10 @@ func (orc *VTOrcProcess) Setup() (err error) {
orc.proc.Args = append(orc.proc.Args, orc.ExtraArgs...)
orc.proc.Args = append(orc.proc.Args, "--alsologtostderr")
- errFile, _ := os.Create(path.Join(orc.LogDir, fmt.Sprintf("orc-stderr-%d.txt", timeNow)))
+ if orc.LogFileName == "" {
+ orc.LogFileName = fmt.Sprintf("orc-stderr-%d.txt", timeNow)
+ }
+ errFile, _ := os.Create(path.Join(orc.LogDir, orc.LogFileName))
orc.proc.Stderr = errFile
orc.proc.Env = append(orc.proc.Env, os.Environ()...)
@@ -139,6 +144,7 @@ func (orc *VTOrcProcess) Setup() (err error) {
go func() {
if orc.proc != nil {
orc.exit <- orc.proc.Wait()
+ close(orc.exit)
}
}()
@@ -160,8 +166,9 @@ func (orc *VTOrcProcess) TearDown() error {
case <-time.After(30 * time.Second):
_ = orc.proc.Process.Kill()
+ err := <-orc.exit
orc.proc = nil
- return <-orc.exit
+ return err
}
}
@@ -191,10 +198,46 @@ func (orc *VTOrcProcess) MakeAPICall(endpoint string) (status int, response stri
url := fmt.Sprintf("http://localhost:%d/%s", orc.Port, endpoint)
resp, err := http.Get(url)
if err != nil {
- return resp.StatusCode, "", err
+ if resp != nil {
+ status = resp.StatusCode
+ }
+ return status, "", err
}
- defer resp.Body.Close()
+ defer func() {
+ if resp != nil && resp.Body != nil {
+ resp.Body.Close()
+ }
+ }()
respByte, _ := io.ReadAll(resp.Body)
return resp.StatusCode, string(respByte), err
}
+
+// MakeAPICallRetry is used to make an API call and retries until success
+func (orc *VTOrcProcess) MakeAPICallRetry(t *testing.T, url string) {
+ t.Helper()
+ timeout := time.After(10 * time.Second)
+ for {
+ select {
+ case <-timeout:
+ t.Fatal("timed out waiting for api to work")
+ return
+ default:
+ status, _, err := orc.MakeAPICall(url)
+ if err == nil && status == 200 {
+ return
+ }
+ time.Sleep(1 * time.Second)
+ }
+ }
+}
+
+// DisableGlobalRecoveries stops VTOrc from running any recoveries
+func (orc *VTOrcProcess) DisableGlobalRecoveries(t *testing.T) {
+ orc.MakeAPICallRetry(t, "/api/disable-global-recoveries")
+}
+
+// EnableGlobalRecoveries allows VTOrc to run any recoveries
+func (orc *VTOrcProcess) EnableGlobalRecoveries(t *testing.T) {
+ orc.MakeAPICallRetry(t, "/api/enable-global-recoveries")
+}
diff --git a/go/test/endtoend/cluster/vttablet_process.go b/go/test/endtoend/cluster/vttablet_process.go
index 46f55f579e3..68dfddb831e 100644
--- a/go/test/endtoend/cluster/vttablet_process.go
+++ b/go/test/endtoend/cluster/vttablet_process.go
@@ -21,6 +21,7 @@ import (
"bufio"
"context"
"encoding/json"
+ "errors"
"fmt"
"io"
"net/http"
@@ -28,6 +29,7 @@ import (
"os/exec"
"path"
"reflect"
+ "strconv"
"strings"
"syscall"
"testing"
@@ -64,7 +66,6 @@ type VttabletProcess struct {
VerifyURL string
QueryzURL string
StatusDetailsURL string
- EnableSemiSync bool
SupportsBackup bool
ServingStatus string
DbPassword string
@@ -72,6 +73,7 @@ type VttabletProcess struct {
VreplicationTabletType string
DbFlavor string
Charset string
+ ConsolidationsURL string
//Extra Args to be set before starting the vttablet process
ExtraArgs []string
@@ -117,9 +119,6 @@ func (vttablet *VttabletProcess) Setup() (err error) {
if vttablet.SupportsBackup {
vttablet.proc.Args = append(vttablet.proc.Args, "--restore_from_backup")
}
- if vttablet.EnableSemiSync {
- vttablet.proc.Args = append(vttablet.proc.Args, "--enable_semi_sync")
- }
if vttablet.DbFlavor != "" {
vttablet.proc.Args = append(vttablet.proc.Args, fmt.Sprintf("--db_flavor=%s", vttablet.DbFlavor))
}
@@ -142,6 +141,7 @@ func (vttablet *VttabletProcess) Setup() (err error) {
go func() {
if vttablet.proc != nil {
vttablet.exit <- vttablet.proc.Wait()
+ close(vttablet.exit)
}
}()
@@ -204,6 +204,41 @@ func (vttablet *VttabletProcess) GetStatusDetails() string {
return string(respByte)
}
+// GetConsolidations gets consolidations
+func (vttablet *VttabletProcess) GetConsolidations() (map[string]int, error) {
+ resp, err := http.Get(vttablet.ConsolidationsURL)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get consolidations: %v", err)
+ }
+ defer resp.Body.Close()
+
+ result := make(map[string]int)
+
+ scanner := bufio.NewScanner(resp.Body)
+ for scanner.Scan() {
+ line := scanner.Text()
+ splits := strings.SplitN(line, ":", 2)
+ if len(splits) != 2 {
+ return nil, fmt.Errorf("failed to split consolidations line: %v", err)
+ }
+ // Discard "Length: [N]" lines.
+ if splits[0] == "Length" {
+ continue
+ }
+ countS := splits[0]
+ countI64, err := strconv.ParseInt(countS, 10, 32)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse consolidations count: %v", err)
+ }
+ result[strings.TrimSpace(splits[1])] = int(countI64)
+ }
+ if err := scanner.Err(); err != nil && !errors.Is(err, io.EOF) {
+ return nil, fmt.Errorf("failed to read consolidations: %v", err)
+ }
+
+ return result, nil
+}
+
// WaitForStatus waits till desired status of tablet is reached
func (vttablet *VttabletProcess) WaitForStatus(status string, howLong time.Duration) bool {
ticker := time.NewTicker(howLong)
@@ -361,12 +396,10 @@ func (vttablet *VttabletProcess) TearDownWithTimeout(timeout time.Duration) erro
return nil
case <-time.After(timeout):
- proc := vttablet.proc
- if proc != nil {
- vttablet.proc.Process.Kill()
- vttablet.proc = nil
- }
- return <-vttablet.exit
+ vttablet.proc.Process.Kill()
+ err := <-vttablet.exit
+ vttablet.proc = nil
+ return err
}
}
@@ -556,7 +589,7 @@ func (vttablet *VttabletProcess) IsShutdown() bool {
// VttabletProcessInstance returns a VttabletProcess handle for vttablet process
// configured with the given Config.
// The process must be manually started by calling setup()
-func VttabletProcessInstance(port, grpcPort, tabletUID int, cell, shard, keyspace string, vtctldPort int, tabletType string, topoPort int, hostname, tmpDirectory string, extraArgs []string, enableSemiSync bool, charset string) *VttabletProcess {
+func VttabletProcessInstance(port, grpcPort, tabletUID int, cell, shard, keyspace string, vtctldPort int, tabletType string, topoPort int, hostname, tmpDirectory string, extraArgs []string, charset string) *VttabletProcess {
vtctl := VtctlProcessInstance(topoPort, hostname)
vttablet := &VttabletProcess{
Name: "vttablet",
@@ -576,7 +609,6 @@ func VttabletProcessInstance(port, grpcPort, tabletUID int, cell, shard, keyspac
GrpcPort: grpcPort,
VtctldAddress: fmt.Sprintf("http://%s:%d", hostname, vtctldPort),
ExtraArgs: extraArgs,
- EnableSemiSync: enableSemiSync,
SupportsBackup: true,
ServingStatus: "NOT_SERVING",
BackupStorageImplementation: "file",
@@ -592,6 +624,7 @@ func VttabletProcessInstance(port, grpcPort, tabletUID int, cell, shard, keyspac
vttablet.VerifyURL = fmt.Sprintf("http://%s:%d/debug/vars", hostname, port)
vttablet.QueryzURL = fmt.Sprintf("http://%s:%d/queryz", hostname, port)
vttablet.StatusDetailsURL = fmt.Sprintf("http://%s:%d/debug/status_details", hostname, port)
+ vttablet.ConsolidationsURL = fmt.Sprintf("http://%s:%d/debug/consolidations", hostname, port)
return vttablet
}
diff --git a/go/test/endtoend/clustertest/main_test.go b/go/test/endtoend/clustertest/main_test.go
index 0be66d56af8..35da40a3edb 100644
--- a/go/test/endtoend/clustertest/main_test.go
+++ b/go/test/endtoend/clustertest/main_test.go
@@ -107,9 +107,10 @@ func testURL(t *testing.T, url string, testCaseName string) {
// getStatusForUrl returns the status code for the URL
func getStatusForURL(url string) int {
- resp, _ := http.Get(url)
- if resp != nil {
- return resp.StatusCode
+ resp, err := http.Get(url)
+ if err != nil {
+ return 0
}
- return 0
+ defer resp.Body.Close()
+ return resp.StatusCode
}
diff --git a/go/test/endtoend/clustertest/vtctld_test.go b/go/test/endtoend/clustertest/vtctld_test.go
index 36fcb51d97d..0ba4af1ee41 100644
--- a/go/test/endtoend/clustertest/vtctld_test.go
+++ b/go/test/endtoend/clustertest/vtctld_test.go
@@ -48,12 +48,12 @@ func TestVtctldProcess(t *testing.T) {
url := fmt.Sprintf("http://%s:%d/api/keyspaces/", clusterInstance.Hostname, clusterInstance.VtctldHTTPPort)
testURL(t, url, "keyspace url")
- healthCheckURL := fmt.Sprintf("http://%s:%d/debug/health/", clusterInstance.Hostname, clusterInstance.VtctldHTTPPort)
+ healthCheckURL := fmt.Sprintf("http://%s:%d/debug/health", clusterInstance.Hostname, clusterInstance.VtctldHTTPPort)
testURL(t, healthCheckURL, "vtctld health check url")
url = fmt.Sprintf("http://%s:%d/api/topodata/", clusterInstance.Hostname, clusterInstance.VtctldHTTPPort)
-
testTopoDataAPI(t, url)
+
testListAllTablets(t)
testTabletStatus(t)
testExecuteAsDba(t)
@@ -62,13 +62,15 @@ func TestVtctldProcess(t *testing.T) {
func testTopoDataAPI(t *testing.T, url string) {
resp, err := http.Get(url)
- require.Nil(t, err)
+ require.NoError(t, err)
+ defer resp.Body.Close()
assert.Equal(t, resp.StatusCode, 200)
resultMap := make(map[string]any)
- respByte, _ := io.ReadAll(resp.Body)
+ respByte, err := io.ReadAll(resp.Body)
+ require.NoError(t, err)
err = json.Unmarshal(respByte, &resultMap)
- require.Nil(t, err)
+ require.NoError(t, err)
errorValue := reflect.ValueOf(resultMap["Error"])
assert.Empty(t, errorValue.String())
@@ -83,7 +85,7 @@ func testTopoDataAPI(t *testing.T, url string) {
func testListAllTablets(t *testing.T) {
// first w/o any filters, aside from cell
result, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("ListAllTablets", clusterInstance.Cell)
- require.Nil(t, err)
+ require.NoError(t, err)
tablets := getAllTablets()
@@ -104,7 +106,7 @@ func testListAllTablets(t *testing.T) {
"ListAllTablets", "--", "--keyspace", clusterInstance.Keyspaces[0].Name,
"--tablet_type", "primary",
clusterInstance.Cell)
- require.Nil(t, err)
+ require.NoError(t, err)
// We should only return a single primary tablet per shard in the first keyspace
tabletsFromCMD = strings.Split(result, "\n")
@@ -115,9 +117,10 @@ func testListAllTablets(t *testing.T) {
func testTabletStatus(t *testing.T) {
resp, err := http.Get(fmt.Sprintf("http://%s:%d", clusterInstance.Hostname, clusterInstance.Keyspaces[0].Shards[0].Vttablets[0].HTTPPort))
- require.Nil(t, err)
+ require.NoError(t, err)
+ defer resp.Body.Close()
respByte, err := io.ReadAll(resp.Body)
- require.Nil(t, err)
+ require.NoError(t, err)
result := string(respByte)
log.Infof("Tablet status response: %v", result)
assert.True(t, strings.Contains(result, `Alias: \n", uuid)
-
- strategySetting, err := schema.ParseDDLStrategy(ddlStrategy)
- assert.NoError(t, err)
-
- if !strategySetting.Strategy.IsDirect() {
- status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards, uuid, 20*time.Second, schema.OnlineDDLStatusComplete, schema.OnlineDDLStatusFailed)
- fmt.Printf("# Migration status (for debug purposes): <%s>\n", status)
- }
-
- if expectHint != "" {
- checkMigratedTable(t, tableName, expectHint)
- }
- return uuid
-}
-
-func testOnlineDDLStatementForTable(t *testing.T, alterStatement string, ddlStrategy string, executeStrategy string, expectHint string, expectError string) (uuid string) {
- return testOnlineDDLStatement(t, alterStatement, ddlStrategy, executeStrategy, tableName, expectHint, expectError)
-}
-
-func testOnlineDDLStatementForView(t *testing.T, alterStatement string, ddlStrategy string, executeStrategy string, expectHint string, expectError string) (uuid string) {
- return testOnlineDDLStatement(t, alterStatement, ddlStrategy, executeStrategy, viewName, expectHint, expectError)
-}
-
-// testRevertMigration reverts a given migration
-func testRevertMigration(t *testing.T, revertUUID string) (uuid string) {
- revertQuery := fmt.Sprintf("revert vitess_migration '%s'", revertUUID)
- r := onlineddl.VtgateExecQuery(t, &vtParams, revertQuery, "")
-
- row := r.Named().Row()
- require.NotNil(t, row)
-
- uuid = row["uuid"].ToString()
-
- fmt.Println("# Generated UUID (for debug purposes):")
- fmt.Printf("<%s>\n", uuid)
-
- _ = onlineddl.WaitForMigrationStatus(t, &vtParams, shards, uuid, 20*time.Second, schema.OnlineDDLStatusComplete, schema.OnlineDDLStatusFailed)
- return uuid
-}
-
-// checkTable checks the number of tables in the first two shards.
-func checkTable(t *testing.T, showTableName string, expectExists bool) bool {
- expectCount := 0
- if expectExists {
- expectCount = 1
- }
- for i := range clusterInstance.Keyspaces[0].Shards {
- if !checkTablesCount(t, clusterInstance.Keyspaces[0].Shards[i].Vttablets[0], showTableName, expectCount) {
- return false
- }
- }
- return true
-}
-
-// checkTablesCount checks the number of tables in the given tablet
-func checkTablesCount(t *testing.T, tablet *cluster.Vttablet, showTableName string, expectCount int) bool {
- query := fmt.Sprintf(`show tables like '%%%s%%';`, showTableName)
- queryResult, err := tablet.VttabletProcess.QueryTablet(query, keyspaceName, true)
- require.Nil(t, err)
- return assert.Equal(t, expectCount, len(queryResult.Rows))
-}
-
-// checkMigratedTables checks the CREATE STATEMENT of a table after migration
-func checkMigratedTable(t *testing.T, tableName, expectHint string) {
- for i := range clusterInstance.Keyspaces[0].Shards {
- createStatement := getCreateTableStatement(t, clusterInstance.Keyspaces[0].Shards[i].Vttablets[0], tableName)
- assert.Contains(t, createStatement, expectHint)
- }
-}
-
-// getCreateTableStatement returns the CREATE TABLE statement for a given table
-func getCreateTableStatement(t *testing.T, tablet *cluster.Vttablet, tableName string) (statement string) {
- queryResult, err := tablet.VttabletProcess.QueryTablet(fmt.Sprintf("show create table %s;", tableName), keyspaceName, true)
- require.Nil(t, err)
-
- assert.Equal(t, len(queryResult.Rows), 1)
- assert.GreaterOrEqual(t, len(queryResult.Rows[0]), 2) // table name, create statement; views have even more columns
- statement = queryResult.Rows[0][1].ToString()
- return statement
-}
-
-func generateInsert(t *testing.T, conn *mysql.Conn) error {
- id := rand.Int31n(int32(maxTableRows))
- query := fmt.Sprintf(insertRowStatement, id)
- qr, err := conn.ExecuteFetch(query, 1000, true)
-
- func() {
- writeMetrics.mu.Lock()
- defer writeMetrics.mu.Unlock()
-
- writeMetrics.insertsAttempts++
- if err != nil {
- writeMetrics.insertsFailures++
- return
- }
- assert.Less(t, qr.RowsAffected, uint64(2))
- if qr.RowsAffected == 0 {
- writeMetrics.insertsNoops++
- return
- }
- writeMetrics.inserts++
- }()
- return err
-}
-
-func generateUpdate(t *testing.T, conn *mysql.Conn) error {
- id := rand.Int31n(int32(maxTableRows))
- query := fmt.Sprintf(updateRowStatement, id)
- qr, err := conn.ExecuteFetch(query, 1000, true)
-
- func() {
- writeMetrics.mu.Lock()
- defer writeMetrics.mu.Unlock()
-
- writeMetrics.updatesAttempts++
- if err != nil {
- writeMetrics.updatesFailures++
- return
- }
- assert.Less(t, qr.RowsAffected, uint64(2))
- if qr.RowsAffected == 0 {
- writeMetrics.updatesNoops++
- return
- }
- writeMetrics.updates++
- }()
- return err
-}
-
-func generateDelete(t *testing.T, conn *mysql.Conn) error {
- id := rand.Int31n(int32(maxTableRows))
- query := fmt.Sprintf(deleteRowStatement, id)
- qr, err := conn.ExecuteFetch(query, 1000, true)
-
- func() {
- writeMetrics.mu.Lock()
- defer writeMetrics.mu.Unlock()
-
- writeMetrics.deletesAttempts++
- if err != nil {
- writeMetrics.deletesFailures++
- return
- }
- assert.Less(t, qr.RowsAffected, uint64(2))
- if qr.RowsAffected == 0 {
- writeMetrics.deletesNoops++
- return
- }
- writeMetrics.deletes++
- }()
- return err
-}
-
-func initTable(t *testing.T) {
- log.Infof("initTable begin")
- defer log.Infof("initTable complete")
-
- ctx := context.Background()
- conn, err := mysql.Connect(ctx, &vtParams)
- require.Nil(t, err)
- defer conn.Close()
-
- writeMetrics.Clear()
- _, err = conn.ExecuteFetch(truncateStatement, 1000, true)
- require.Nil(t, err)
-
- for i := 0; i < maxTableRows/2; i++ {
- generateInsert(t, conn)
- }
- for i := 0; i < maxTableRows/4; i++ {
- generateUpdate(t, conn)
- }
- for i := 0; i < maxTableRows/4; i++ {
- generateDelete(t, conn)
- }
-}
-
-func testSelectTableMetrics(t *testing.T) {
- writeMetrics.mu.Lock()
- defer writeMetrics.mu.Unlock()
-
- log.Infof("%s", writeMetrics.String())
-
- ctx := context.Background()
- conn, err := mysql.Connect(ctx, &vtParams)
- require.Nil(t, err)
- defer conn.Close()
-
- rs, err := conn.ExecuteFetch(selectCountRowsStatement, 1000, true)
- require.Nil(t, err)
-
- row := rs.Named().Row()
- require.NotNil(t, row)
- log.Infof("testSelectTableMetrics, row: %v", row)
- numRows := row.AsInt64("num_rows", 0)
- sumUpdates := row.AsInt64("sum_updates", 0)
-
- assert.NotZero(t, numRows)
- assert.NotZero(t, sumUpdates)
- assert.NotZero(t, writeMetrics.inserts)
- assert.NotZero(t, writeMetrics.deletes)
- assert.NotZero(t, writeMetrics.updates)
- assert.Equal(t, writeMetrics.inserts-writeMetrics.deletes, numRows)
- assert.Equal(t, writeMetrics.updates-writeMetrics.deletes, sumUpdates) // because we DELETE WHERE updates=1
-}
diff --git a/go/test/endtoend/onlineddl/revert/onlineddl_revert_test.go b/go/test/endtoend/onlineddl/revert/onlineddl_revert_test.go
index 644def2d4c2..f829af51f8e 100644
--- a/go/test/endtoend/onlineddl/revert/onlineddl_revert_test.go
+++ b/go/test/endtoend/onlineddl/revert/onlineddl_revert_test.go
@@ -94,35 +94,7 @@ var (
tableName = `stress_test`
viewBaseTableName = `view_base_table_test`
viewName = `view_test`
- partitionedTableName = `part_test`
- createStatement = `
- CREATE TABLE stress_test (
- id bigint(20) not null,
- rand_val varchar(32) null default '',
- hint_col varchar(64) not null default 'just-created',
- created_timestamp timestamp not null default current_timestamp,
- updates int unsigned not null default 0,
- PRIMARY KEY (id),
- key created_idx(created_timestamp),
- key updates_idx(updates)
- ) ENGINE=InnoDB
- `
- createIfNotExistsStatement = `
- CREATE TABLE IF NOT EXISTS stress_test (
- id bigint(20) not null,
- PRIMARY KEY (id)
- ) ENGINE=InnoDB
- `
- dropStatement = `
- DROP TABLE stress_test
- `
- dropIfExistsStatement = `
- DROP TABLE IF EXISTS stress_test
- `
- alterHintStatement = `
- ALTER TABLE stress_test modify hint_col varchar(64) not null default '%s'
- `
- insertRowStatement = `
+ insertRowStatement = `
INSERT IGNORE INTO stress_test (id, rand_val) VALUES (%d, left(md5(rand()), 8))
`
updateRowStatement = `
@@ -139,43 +111,6 @@ var (
TRUNCATE TABLE stress_test
`
- createViewBaseTableStatement = `
- CREATE TABLE view_base_table_test (id INT PRIMARY KEY)
- `
- createViewStatement = `
- CREATE VIEW view_test AS SELECT 'success_create' AS msg FROM view_base_table_test
- `
- createOrReplaceViewStatement = `
- CREATE OR REPLACE VIEW view_test AS SELECT 'success_replace' AS msg FROM view_base_table_test
- `
- alterViewStatement = `
- ALTER VIEW view_test AS SELECT 'success_alter' AS msg FROM view_base_table_test
- `
- dropViewStatement = `
- DROP VIEW view_test
- `
- dropViewIfExistsStatement = `
- DROP VIEW IF EXISTS view_test
- `
- createPartitionedTableStatement = `
- CREATE TABLE part_test (
- id INT NOT NULL,
- ts TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
- primary key (id)
- )
- PARTITION BY RANGE (id) (
- PARTITION p1 VALUES LESS THAN (10),
- PARTITION p2 VALUES LESS THAN (20),
- PARTITION p3 VALUES LESS THAN (30),
- PARTITION p4 VALUES LESS THAN (40),
- PARTITION p5 VALUES LESS THAN (50),
- PARTITION p6 VALUES LESS THAN (60)
- )
- `
- populatePartitionedTableStatement = `
- INSERT INTO part_test (id) VALUES (2),(11),(23),(37),(41),(53)
- `
-
writeMetrics WriteMetrics
)
@@ -184,6 +119,16 @@ const (
maxConcurrency = 5
)
+type revertibleTestCase struct {
+ name string
+ fromSchema string
+ toSchema string
+ // expectProblems bool
+ removedUniqueKeyNames string
+ droppedNoDefaultColumnNames string
+ expandedColumnNames string
+}
+
func TestMain(m *testing.M) {
defer cluster.PanicHandler(nil)
flag.Parse()
@@ -260,6 +205,262 @@ func TestSchemaChange(t *testing.T) {
shards = clusterInstance.Keyspaces[0].Shards
require.Equal(t, 1, len(shards))
+ t.Run("revertible", testRevertible)
+ t.Run("revert", testRevert)
+}
+
+func testRevertible(t *testing.T) {
+
+ var testCases = []revertibleTestCase{
+ {
+ name: "identical schemas",
+ fromSchema: `id int primary key, i1 int not null default 0`,
+ toSchema: `id int primary key, i2 int not null default 0`,
+ },
+ {
+ name: "different schemas, nothing to note",
+ fromSchema: `id int primary key, i1 int not null default 0, unique key i1_uidx(i1)`,
+ toSchema: `id int primary key, i1 int not null default 0, i2 int not null default 0, unique key i1_uidx(i1)`,
+ },
+ {
+ name: "removed non-nullable unique key",
+ fromSchema: `id int primary key, i1 int not null default 0, unique key i1_uidx(i1)`,
+ toSchema: `id int primary key, i2 int not null default 0`,
+ removedUniqueKeyNames: `i1_uidx`,
+ },
+ {
+ name: "removed nullable unique key",
+ fromSchema: `id int primary key, i1 int default null, unique key i1_uidx(i1)`,
+ toSchema: `id int primary key, i2 int default null`,
+ removedUniqueKeyNames: `i1_uidx`,
+ },
+ {
+ name: "expanding unique key removes unique constraint",
+ fromSchema: `id int primary key, i1 int default null, unique key i1_uidx(i1)`,
+ toSchema: `id int primary key, i1 int default null, unique key i1_uidx(i1, id)`,
+ removedUniqueKeyNames: `i1_uidx`,
+ },
+ {
+ name: "reducing unique key does not remove unique constraint",
+ fromSchema: `id int primary key, i1 int default null, unique key i1_uidx(i1, id)`,
+ toSchema: `id int primary key, i1 int default null, unique key i1_uidx(i1)`,
+ removedUniqueKeyNames: ``,
+ },
+ {
+ name: "remove column without default",
+ fromSchema: `id int primary key, i1 int not null`,
+ toSchema: `id int primary key, i2 int not null default 0`,
+ droppedNoDefaultColumnNames: `i1`,
+ },
+ {
+ name: "expanded: nullable",
+ fromSchema: `id int primary key, i1 int not null, i2 int default null`,
+ toSchema: `id int primary key, i1 int default null, i2 int not null`,
+ expandedColumnNames: `i1`,
+ },
+ {
+ name: "expanded: longer text",
+ fromSchema: `id int primary key, i1 int default null, v1 varchar(40) not null, v2 varchar(5), v3 varchar(3)`,
+ toSchema: `id int primary key, i1 int not null, v1 varchar(100) not null, v2 char(3), v3 char(5)`,
+ expandedColumnNames: `v1,v3`,
+ },
+ {
+ name: "expanded: int numeric precision and scale",
+ fromSchema: `id int primary key, i1 int, i2 tinyint, i3 mediumint, i4 bigint`,
+ toSchema: `id int primary key, i1 int, i2 mediumint, i3 int, i4 tinyint`,
+ expandedColumnNames: `i2,i3`,
+ },
+ {
+ name: "expanded: floating point",
+ fromSchema: `id int primary key, i1 int, n2 bigint, n3 bigint, n4 float, n5 double`,
+ toSchema: `id int primary key, i1 int, n2 float, n3 double, n4 double, n5 float`,
+ expandedColumnNames: `n2,n3,n4`,
+ },
+ {
+ name: "expanded: decimal numeric precision and scale",
+ fromSchema: `id int primary key, i1 int, d1 decimal(10,2), d2 decimal (10,2), d3 decimal (10,2)`,
+ toSchema: `id int primary key, i1 int, d1 decimal(11,2), d2 decimal (9,1), d3 decimal (10,3)`,
+ expandedColumnNames: `d1,d3`,
+ },
+ {
+ name: "expanded: signed, unsigned",
+ fromSchema: `id int primary key, i1 bigint signed, i2 int unsigned, i3 bigint unsigned`,
+ toSchema: `id int primary key, i1 int signed, i2 int signed, i3 int signed`,
+ expandedColumnNames: `i2,i3`,
+ },
+ {
+ name: "expanded: signed, unsigned: range",
+ fromSchema: `id int primary key, i1 int signed, i2 bigint signed, i3 int signed`,
+ toSchema: `id int primary key, i1 int unsigned, i2 int unsigned, i3 bigint unsigned`,
+ expandedColumnNames: `i1,i3`,
+ },
+ {
+ name: "expanded: datetime precision",
+ fromSchema: `id int primary key, dt1 datetime, ts1 timestamp, ti1 time, dt2 datetime(3), dt3 datetime(6), ts2 timestamp(3)`,
+ toSchema: `id int primary key, dt1 datetime(3), ts1 timestamp(6), ti1 time(3), dt2 datetime(6), dt3 datetime(3), ts2 timestamp`,
+ expandedColumnNames: `dt1,ts1,ti1,dt2`,
+ },
+ {
+ name: "expanded: strange data type changes",
+ fromSchema: `id int primary key, dt1 datetime, ts1 timestamp, i1 int, d1 date, e1 enum('a', 'b')`,
+ toSchema: `id int primary key, dt1 char(32), ts1 varchar(32), i1 tinytext, d1 char(2), e1 varchar(2)`,
+ expandedColumnNames: `dt1,ts1,i1,d1,e1`,
+ },
+ {
+ name: "expanded: temporal types",
+ fromSchema: `id int primary key, t1 time, t2 timestamp, t3 date, t4 datetime, t5 time, t6 date`,
+ toSchema: `id int primary key, t1 datetime, t2 datetime, t3 timestamp, t4 timestamp, t5 timestamp, t6 datetime`,
+ expandedColumnNames: `t1,t2,t3,t5,t6`,
+ },
+ {
+ name: "expanded: character sets",
+ fromSchema: `id int primary key, c1 char(3) charset utf8, c2 char(3) charset utf8mb4, c3 char(3) charset ascii, c4 char(3) charset utf8mb4, c5 char(3) charset utf8, c6 char(3) charset latin1`,
+ toSchema: `id int primary key, c1 char(3) charset utf8mb4, c2 char(3) charset utf8, c3 char(3) charset utf8, c4 char(3) charset ascii, c5 char(3) charset utf8, c6 char(3) charset utf8mb4`,
+ expandedColumnNames: `c1,c3,c6`,
+ },
+ {
+ name: "expanded: enum",
+ fromSchema: `id int primary key, e1 enum('a', 'b'), e2 enum('a', 'b'), e3 enum('a', 'b'), e4 enum('a', 'b'), e5 enum('a', 'b'), e6 enum('a', 'b'), e7 enum('a', 'b'), e8 enum('a', 'b')`,
+ toSchema: `id int primary key, e1 enum('a', 'b'), e2 enum('a'), e3 enum('a', 'b', 'c'), e4 enum('a', 'x'), e5 enum('a', 'x', 'b'), e6 enum('b'), e7 varchar(1), e8 tinyint`,
+ expandedColumnNames: `e3,e4,e5,e6,e7,e8`,
+ },
+ {
+ name: "expanded: set",
+ fromSchema: `id int primary key, e1 set('a', 'b'), e2 set('a', 'b'), e3 set('a', 'b'), e4 set('a', 'b'), e5 set('a', 'b'), e6 set('a', 'b'), e7 set('a', 'b'), e8 set('a', 'b')`,
+ toSchema: `id int primary key, e1 set('a', 'b'), e2 set('a'), e3 set('a', 'b', 'c'), e4 set('a', 'x'), e5 set('a', 'x', 'b'), e6 set('b'), e7 varchar(1), e8 tinyint`,
+ expandedColumnNames: `e3,e4,e5,e6,e7,e8`,
+ },
+ }
+
+ var (
+ createTableWrapper = `CREATE TABLE onlineddl_test(%s)`
+ dropTableStatement = `
+ DROP TABLE onlineddl_test
+ `
+ tableName = "onlineddl_test"
+ ddlStrategy = "online --declarative --allow-zero-in-date"
+ )
+
+ removeBackticks := func(s string) string {
+ return strings.Replace(s, "`", "", -1)
+ }
+
+ for _, testcase := range testCases {
+ t.Run(testcase.name, func(t *testing.T) {
+
+ t.Run("ensure table dropped", func(t *testing.T) {
+ // A preparation step, to clean up anything from the previous test case
+ uuid := testOnlineDDLStatement(t, dropTableStatement, ddlStrategy, "vtgate", tableName, "")
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete)
+ checkTable(t, tableName, false)
+ })
+
+ t.Run("create from-table", func(t *testing.T) {
+ // A preparation step, to re-create the base table
+ fromStatement := fmt.Sprintf(createTableWrapper, testcase.fromSchema)
+ uuid := testOnlineDDLStatement(t, fromStatement, ddlStrategy, "vtgate", tableName, "")
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete)
+ checkTable(t, tableName, true)
+ })
+ var uuid string
+ t.Run("run migration", func(t *testing.T) {
+ // This is the migration we will test, and see whether it is revertible or not (and why not).
+ toStatement := fmt.Sprintf(createTableWrapper, testcase.toSchema)
+ uuid = testOnlineDDLStatement(t, toStatement, ddlStrategy, "vtgate", tableName, "")
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete)
+ checkTable(t, tableName, true)
+ })
+ t.Run("check migration", func(t *testing.T) {
+ // All right, the actual test
+ rs := onlineddl.ReadMigrations(t, &vtParams, uuid)
+ require.NotNil(t, rs)
+ for _, row := range rs.Named().Rows {
+ removedUniqueKeyNames := row.AsString("removed_unique_key_names", "")
+ droppedNoDefaultColumnNames := row.AsString("dropped_no_default_column_names", "")
+ expandedColumnNames := row.AsString("expanded_column_names", "")
+
+ assert.Equal(t, testcase.removedUniqueKeyNames, removeBackticks(removedUniqueKeyNames))
+ assert.Equal(t, testcase.droppedNoDefaultColumnNames, removeBackticks(droppedNoDefaultColumnNames))
+ assert.Equal(t, testcase.expandedColumnNames, removeBackticks(expandedColumnNames))
+ }
+ })
+ })
+ }
+}
+
+func testRevert(t *testing.T) {
+
+ var (
+ partitionedTableName = `part_test`
+ createStatement = `
+ CREATE TABLE stress_test (
+ id bigint(20) not null,
+ rand_val varchar(32) null default '',
+ hint_col varchar(64) not null default 'just-created',
+ created_timestamp timestamp not null default current_timestamp,
+ updates int unsigned not null default 0,
+ PRIMARY KEY (id),
+ key created_idx(created_timestamp),
+ key updates_idx(updates)
+ ) ENGINE=InnoDB
+ `
+ createIfNotExistsStatement = `
+ CREATE TABLE IF NOT EXISTS stress_test (
+ id bigint(20) not null,
+ PRIMARY KEY (id)
+ ) ENGINE=InnoDB
+ `
+ dropStatement = `
+ DROP TABLE stress_test
+ `
+ dropIfExistsStatement = `
+ DROP TABLE IF EXISTS stress_test
+ `
+ alterHintStatement = `
+ ALTER TABLE stress_test modify hint_col varchar(64) not null default '%s'
+ `
+ createViewBaseTableStatement = `
+ CREATE TABLE view_base_table_test (id INT PRIMARY KEY)
+ `
+ createViewStatement = `
+ CREATE VIEW view_test AS SELECT 'success_create' AS msg FROM view_base_table_test
+ `
+ createOrReplaceViewStatement = `
+ CREATE OR REPLACE VIEW view_test AS SELECT 'success_replace' AS msg FROM view_base_table_test
+ `
+ alterViewStatement = `
+ ALTER VIEW view_test AS SELECT 'success_alter' AS msg FROM view_base_table_test
+ `
+ dropViewStatement = `
+ DROP VIEW view_test
+ `
+ dropViewIfExistsStatement = `
+ DROP VIEW IF EXISTS view_test
+ `
+ createPartitionedTableStatement = `
+ CREATE TABLE part_test (
+ id INT NOT NULL,
+ ts TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
+ primary key (id)
+ )
+ PARTITION BY RANGE (id) (
+ PARTITION p1 VALUES LESS THAN (10),
+ PARTITION p2 VALUES LESS THAN (20),
+ PARTITION p3 VALUES LESS THAN (30),
+ PARTITION p4 VALUES LESS THAN (40),
+ PARTITION p5 VALUES LESS THAN (50),
+ PARTITION p6 VALUES LESS THAN (60)
+ )
+ `
+ populatePartitionedTableStatement = `
+ INSERT INTO part_test (id) VALUES (2),(11),(23),(37),(41),(53)
+ `
+ )
+
+ populatePartitionedTable := func(t *testing.T) {
+ onlineddl.VtgateExecQuery(t, &vtParams, populatePartitionedTableStatement, "")
+ }
+
mysqlVersion = onlineddl.GetMySQLVersion(t, clusterInstance.Keyspaces[0].Shards[0].PrimaryTablet())
require.NotEmpty(t, mysqlVersion)
@@ -347,6 +548,7 @@ func TestSchemaChange(t *testing.T) {
// ALTER VIEW
t.Run("ALTER VIEW where view exists", func(t *testing.T) {
// The view exists
+ checkTable(t, viewName, true)
uuid := testOnlineDDLStatementForView(t, alterViewStatement, ddlStrategy, "vtgate", "success_alter")
uuids = append(uuids, uuid)
onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete)
@@ -627,7 +829,8 @@ func TestSchemaChange(t *testing.T) {
checkMigratedTable(t, tableName, alterHints[0])
testSelectTableMetrics(t)
})
- t.Run("postponed revert", func(t *testing.T) {
+ testPostponedRevert := func(t *testing.T, expectStatuses ...schema.OnlineDDLStatus) {
+ require.NotEmpty(t, expectStatuses)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
var wg sync.WaitGroup
@@ -636,25 +839,59 @@ func TestSchemaChange(t *testing.T) {
defer wg.Done()
runMultipleConnections(ctx, t)
}()
- uuid := testRevertMigration(t, uuids[len(uuids)-1], ddlStrategy+" -postpone-completion")
+ uuid := testRevertMigration(t, uuids[len(uuids)-1], ddlStrategy+" --postpone-completion")
uuids = append(uuids, uuid)
// Should be still running!
- onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusRunning)
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, expectStatuses...)
// Issue a complete and wait for successful completion
onlineddl.CheckCompleteMigration(t, &vtParams, shards, uuid, true)
- // This part may take a while, because we depend on vreplicatoin polling
+ // This part may take a while, because we depend on vreplication polling
status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards, uuid, 60*time.Second, schema.OnlineDDLStatusComplete, schema.OnlineDDLStatusFailed)
fmt.Printf("# Migration status (for debug purposes): <%s>\n", status)
onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete)
cancel() // will cause runMultipleConnections() to terminate
wg.Wait()
+ }
+ t.Run("postponed revert", func(t *testing.T) {
+ testPostponedRevert(t, schema.OnlineDDLStatusRunning)
checkMigratedTable(t, tableName, alterHints[1])
testSelectTableMetrics(t)
})
+ t.Run("postponed revert view", func(t *testing.T) {
+ t.Run("CREATE VIEW again", func(t *testing.T) {
+ // The view does not exist
+ uuid := testOnlineDDLStatementForView(t, createViewStatement, ddlStrategy, "vtgate", "success_create")
+ uuids = append(uuids, uuid)
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete)
+ checkTable(t, viewName, true)
+ testRevertedUUID(t, uuid, "")
+ })
+ t.Run("ALTER VIEW, postpone completion", func(t *testing.T) {
+ // Technically this test better fits in `onlineddl_scheduler_test.go`, but since we've already laid the grounds here, this is where it landed.
+ // The view exists
+ checkTable(t, viewName, true)
+ uuid := testOnlineDDLStatementForView(t, alterViewStatement, ddlStrategy+" --postpone-completion", "vtgate", "success_create")
+ uuids = append(uuids, uuid)
+
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusQueued, schema.OnlineDDLStatusReady)
+ // Issue a complete and wait for successful completion
+ onlineddl.CheckCompleteMigration(t, &vtParams, shards, uuid, true)
+ // This part may take a while, because we depend on vreplication polling
+ status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards, uuid, 60*time.Second, schema.OnlineDDLStatusComplete, schema.OnlineDDLStatusFailed)
+ fmt.Printf("# Migration status (for debug purposes): <%s>\n", status)
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete)
+ checkTable(t, viewName, true)
+ testRevertedUUID(t, uuid, "")
+ })
+ // now verify that the revert for ALTER VIEW respects `--postpone-completion`
+ testPostponedRevert(t, schema.OnlineDDLStatusQueued, schema.OnlineDDLStatusReady)
+ checkTable(t, viewName, true)
+ })
+
// INSTANT DDL
t.Run("INSTANT DDL: add column", func(t *testing.T) {
- uuid := testOnlineDDLStatementForTable(t, "alter table stress_test add column i_instant int not null default 0", ddlStrategy+" --fast-over-revertible", "vtgate", "i_instant")
+ uuid := testOnlineDDLStatementForTable(t, "alter table stress_test add column i_instant int not null default 0", ddlStrategy+" --prefer-instant-ddl", "vtgate", "i_instant")
uuids = append(uuids, uuid)
onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete)
checkTable(t, tableName, true)
@@ -1075,7 +1312,3 @@ func testSelectTableMetrics(t *testing.T) {
assert.Equal(t, writeMetrics.inserts-writeMetrics.deletes, numRows)
assert.Equal(t, writeMetrics.updates-writeMetrics.deletes, sumUpdates) // because we DELETE WHERE updates=1
}
-
-func populatePartitionedTable(t *testing.T) {
- onlineddl.VtgateExecQuery(t, &vtParams, populatePartitionedTableStatement, "")
-}
diff --git a/go/test/endtoend/onlineddl/revertible/onlineddl_revertible_test.go b/go/test/endtoend/onlineddl/revertible/onlineddl_revertible_test.go
deleted file mode 100644
index 4835b2f6118..00000000000
--- a/go/test/endtoend/onlineddl/revertible/onlineddl_revertible_test.go
+++ /dev/null
@@ -1,376 +0,0 @@
-/*
-Copyright 2021 The Vitess Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package singleton
-
-import (
- "flag"
- "fmt"
- "os"
- "path"
- "strings"
- "testing"
- "time"
-
- "vitess.io/vitess/go/mysql"
- "vitess.io/vitess/go/vt/schema"
-
- "vitess.io/vitess/go/test/endtoend/cluster"
- "vitess.io/vitess/go/test/endtoend/onlineddl"
-
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
-)
-
-var (
- clusterInstance *cluster.LocalProcessCluster
- shards []cluster.Shard
- vtParams mysql.ConnParams
-
- hostname = "localhost"
- keyspaceName = "ks"
- cell = "zone1"
- schemaChangeDirectory = ""
- tableName = `onlineddl_test`
- createTableWrapper = `CREATE TABLE onlineddl_test(%s)`
- dropTableStatement = `
- DROP TABLE onlineddl_test
- `
- ddlStrategy = "online -declarative -allow-zero-in-date"
-)
-
-type testCase struct {
- name string
- fromSchema string
- toSchema string
- // expectProblems bool
- removedUniqueKeyNames string
- droppedNoDefaultColumnNames string
- expandedColumnNames string
-}
-
-var testCases = []testCase{
- {
- name: "identical schemas",
- fromSchema: `id int primary key, i1 int not null default 0`,
- toSchema: `id int primary key, i2 int not null default 0`,
- },
- {
- name: "different schemas, nothing to note",
- fromSchema: `id int primary key, i1 int not null default 0, unique key i1_uidx(i1)`,
- toSchema: `id int primary key, i1 int not null default 0, i2 int not null default 0, unique key i1_uidx(i1)`,
- },
- {
- name: "removed non-nullable unique key",
- fromSchema: `id int primary key, i1 int not null default 0, unique key i1_uidx(i1)`,
- toSchema: `id int primary key, i2 int not null default 0`,
- removedUniqueKeyNames: `i1_uidx`,
- },
- {
- name: "removed nullable unique key",
- fromSchema: `id int primary key, i1 int default null, unique key i1_uidx(i1)`,
- toSchema: `id int primary key, i2 int default null`,
- removedUniqueKeyNames: `i1_uidx`,
- },
- {
- name: "expanding unique key removes unique constraint",
- fromSchema: `id int primary key, i1 int default null, unique key i1_uidx(i1)`,
- toSchema: `id int primary key, i1 int default null, unique key i1_uidx(i1, id)`,
- removedUniqueKeyNames: `i1_uidx`,
- },
- {
- name: "reducing unique key does not remove unique constraint",
- fromSchema: `id int primary key, i1 int default null, unique key i1_uidx(i1, id)`,
- toSchema: `id int primary key, i1 int default null, unique key i1_uidx(i1)`,
- removedUniqueKeyNames: ``,
- },
- {
- name: "remove column without default",
- fromSchema: `id int primary key, i1 int not null`,
- toSchema: `id int primary key, i2 int not null default 0`,
- droppedNoDefaultColumnNames: `i1`,
- },
- {
- name: "expanded: nullable",
- fromSchema: `id int primary key, i1 int not null, i2 int default null`,
- toSchema: `id int primary key, i1 int default null, i2 int not null`,
- expandedColumnNames: `i1`,
- },
- {
- name: "expanded: longer text",
- fromSchema: `id int primary key, i1 int default null, v1 varchar(40) not null, v2 varchar(5), v3 varchar(3)`,
- toSchema: `id int primary key, i1 int not null, v1 varchar(100) not null, v2 char(3), v3 char(5)`,
- expandedColumnNames: `v1,v3`,
- },
- {
- name: "expanded: int numeric precision and scale",
- fromSchema: `id int primary key, i1 int, i2 tinyint, i3 mediumint, i4 bigint`,
- toSchema: `id int primary key, i1 int, i2 mediumint, i3 int, i4 tinyint`,
- expandedColumnNames: `i2,i3`,
- },
- {
- name: "expanded: floating point",
- fromSchema: `id int primary key, i1 int, n2 bigint, n3 bigint, n4 float, n5 double`,
- toSchema: `id int primary key, i1 int, n2 float, n3 double, n4 double, n5 float`,
- expandedColumnNames: `n2,n3,n4`,
- },
- {
- name: "expanded: decimal numeric precision and scale",
- fromSchema: `id int primary key, i1 int, d1 decimal(10,2), d2 decimal (10,2), d3 decimal (10,2)`,
- toSchema: `id int primary key, i1 int, d1 decimal(11,2), d2 decimal (9,1), d3 decimal (10,3)`,
- expandedColumnNames: `d1,d3`,
- },
- {
- name: "expanded: signed, unsigned",
- fromSchema: `id int primary key, i1 bigint signed, i2 int unsigned, i3 bigint unsigned`,
- toSchema: `id int primary key, i1 int signed, i2 int signed, i3 int signed`,
- expandedColumnNames: `i2,i3`,
- },
- {
- name: "expanded: signed, unsigned: range",
- fromSchema: `id int primary key, i1 int signed, i2 bigint signed, i3 int signed`,
- toSchema: `id int primary key, i1 int unsigned, i2 int unsigned, i3 bigint unsigned`,
- expandedColumnNames: `i1,i3`,
- },
- {
- name: "expanded: datetime precision",
- fromSchema: `id int primary key, dt1 datetime, ts1 timestamp, ti1 time, dt2 datetime(3), dt3 datetime(6), ts2 timestamp(3)`,
- toSchema: `id int primary key, dt1 datetime(3), ts1 timestamp(6), ti1 time(3), dt2 datetime(6), dt3 datetime(3), ts2 timestamp`,
- expandedColumnNames: `dt1,ts1,ti1,dt2`,
- },
- {
- name: "expanded: strange data type changes",
- fromSchema: `id int primary key, dt1 datetime, ts1 timestamp, i1 int, d1 date, e1 enum('a', 'b')`,
- toSchema: `id int primary key, dt1 char(32), ts1 varchar(32), i1 tinytext, d1 char(2), e1 varchar(2)`,
- expandedColumnNames: `dt1,ts1,i1,d1,e1`,
- },
- {
- name: "expanded: temporal types",
- fromSchema: `id int primary key, t1 time, t2 timestamp, t3 date, t4 datetime, t5 time, t6 date`,
- toSchema: `id int primary key, t1 datetime, t2 datetime, t3 timestamp, t4 timestamp, t5 timestamp, t6 datetime`,
- expandedColumnNames: `t1,t2,t3,t5,t6`,
- },
- {
- name: "expanded: character sets",
- fromSchema: `id int primary key, c1 char(3) charset utf8, c2 char(3) charset utf8mb4, c3 char(3) charset ascii, c4 char(3) charset utf8mb4, c5 char(3) charset utf8, c6 char(3) charset latin1`,
- toSchema: `id int primary key, c1 char(3) charset utf8mb4, c2 char(3) charset utf8, c3 char(3) charset utf8, c4 char(3) charset ascii, c5 char(3) charset utf8, c6 char(3) charset utf8mb4`,
- expandedColumnNames: `c1,c3,c6`,
- },
- {
- name: "expanded: enum",
- fromSchema: `id int primary key, e1 enum('a', 'b'), e2 enum('a', 'b'), e3 enum('a', 'b'), e4 enum('a', 'b'), e5 enum('a', 'b'), e6 enum('a', 'b'), e7 enum('a', 'b'), e8 enum('a', 'b')`,
- toSchema: `id int primary key, e1 enum('a', 'b'), e2 enum('a'), e3 enum('a', 'b', 'c'), e4 enum('a', 'x'), e5 enum('a', 'x', 'b'), e6 enum('b'), e7 varchar(1), e8 tinyint`,
- expandedColumnNames: `e3,e4,e5,e6,e7,e8`,
- },
- {
- name: "expanded: set",
- fromSchema: `id int primary key, e1 set('a', 'b'), e2 set('a', 'b'), e3 set('a', 'b'), e4 set('a', 'b'), e5 set('a', 'b'), e6 set('a', 'b'), e7 set('a', 'b'), e8 set('a', 'b')`,
- toSchema: `id int primary key, e1 set('a', 'b'), e2 set('a'), e3 set('a', 'b', 'c'), e4 set('a', 'x'), e5 set('a', 'x', 'b'), e6 set('b'), e7 varchar(1), e8 tinyint`,
- expandedColumnNames: `e3,e4,e5,e6,e7,e8`,
- },
-}
-
-func TestMain(m *testing.M) {
- defer cluster.PanicHandler(nil)
- flag.Parse()
-
- exitcode, err := func() (int, error) {
- clusterInstance = cluster.NewCluster(cell, hostname)
- schemaChangeDirectory = path.Join("/tmp", fmt.Sprintf("schema_change_dir_%d", clusterInstance.GetAndReserveTabletUID()))
- defer os.RemoveAll(schemaChangeDirectory)
- defer clusterInstance.Teardown()
-
- if _, err := os.Stat(schemaChangeDirectory); os.IsNotExist(err) {
- _ = os.Mkdir(schemaChangeDirectory, 0700)
- }
-
- clusterInstance.VtctldExtraArgs = []string{
- "--schema_change_dir", schemaChangeDirectory,
- "--schema_change_controller", "local",
- "--schema_change_check_interval", "1"}
-
- clusterInstance.VtTabletExtraArgs = []string{
- "--enable-lag-throttler",
- "--throttle_threshold", "1s",
- "--heartbeat_enable",
- "--heartbeat_interval", "250ms",
- "--heartbeat_on_demand_duration", "5s",
- "--watch_replication_stream",
- }
- clusterInstance.VtGateExtraArgs = []string{}
-
- if err := clusterInstance.StartTopo(); err != nil {
- return 1, err
- }
-
- // Start keyspace
- keyspace := &cluster.Keyspace{
- Name: keyspaceName,
- }
-
- // No need for replicas in this stress test
- if err := clusterInstance.StartKeyspace(*keyspace, []string{"1"}, 0, false); err != nil {
- return 1, err
- }
-
- vtgateInstance := clusterInstance.NewVtgateInstance()
- // Start vtgate
- if err := vtgateInstance.Setup(); err != nil {
- return 1, err
- }
- // ensure it is torn down during cluster TearDown
- clusterInstance.VtgateProcess = *vtgateInstance
- vtParams = mysql.ConnParams{
- Host: clusterInstance.Hostname,
- Port: clusterInstance.VtgateMySQLPort,
- }
-
- return m.Run(), nil
- }()
- if err != nil {
- fmt.Printf("%v\n", err)
- os.Exit(1)
- } else {
- os.Exit(exitcode)
- }
-}
-
-func removeBackticks(s string) string {
- return strings.Replace(s, "`", "", -1)
-}
-
-func TestSchemaChange(t *testing.T) {
- defer cluster.PanicHandler(t)
- shards = clusterInstance.Keyspaces[0].Shards
- require.Equal(t, 1, len(shards))
-
- for _, testcase := range testCases {
- t.Run(testcase.name, func(t *testing.T) {
-
- t.Run("ensure table dropped", func(t *testing.T) {
- uuid := testOnlineDDLStatement(t, dropTableStatement, ddlStrategy, "vtgate", "", "", false)
- onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete)
- checkTable(t, tableName, false)
- })
-
- t.Run("create from-table", func(t *testing.T) {
- fromStatement := fmt.Sprintf(createTableWrapper, testcase.fromSchema)
- uuid := testOnlineDDLStatement(t, fromStatement, ddlStrategy, "vtgate", "", "", false)
- onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete)
- checkTable(t, tableName, true)
- })
- var uuid string
- t.Run("run migration", func(t *testing.T) {
- toStatement := fmt.Sprintf(createTableWrapper, testcase.toSchema)
- uuid = testOnlineDDLStatement(t, toStatement, ddlStrategy, "vtgate", "", "", false)
- onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete)
- checkTable(t, tableName, true)
- })
- t.Run("check migration", func(t *testing.T) {
- rs := onlineddl.ReadMigrations(t, &vtParams, uuid)
- require.NotNil(t, rs)
- for _, row := range rs.Named().Rows {
- removedUniqueKeyNames := row.AsString("removed_unique_key_names", "")
- droppedNoDefaultColumnNames := row.AsString("dropped_no_default_column_names", "")
- expandedColumnNames := row.AsString("expanded_column_names", "")
-
- assert.Equal(t, testcase.removedUniqueKeyNames, removeBackticks(removedUniqueKeyNames))
- assert.Equal(t, testcase.droppedNoDefaultColumnNames, removeBackticks(droppedNoDefaultColumnNames))
- assert.Equal(t, testcase.expandedColumnNames, removeBackticks(expandedColumnNames))
- }
- })
- })
- }
-}
-
-// testOnlineDDLStatement runs an online DDL, ALTER statement
-func testOnlineDDLStatement(t *testing.T, alterStatement string, ddlStrategy string, executeStrategy string, expectHint string, expectError string, skipWait bool) (uuid string) {
- strategySetting, err := schema.ParseDDLStrategy(ddlStrategy)
- require.NoError(t, err)
-
- if executeStrategy == "vtgate" {
- result := onlineddl.VtgateExecDDL(t, &vtParams, ddlStrategy, alterStatement, expectError)
- if result != nil {
- row := result.Named().Row()
- if row != nil {
- uuid = row.AsString("uuid", "")
- }
- }
- } else {
- output, err := clusterInstance.VtctlclientProcess.ApplySchemaWithOutput(keyspaceName, alterStatement, cluster.VtctlClientParams{DDLStrategy: ddlStrategy, SkipPreflight: true})
- if expectError == "" {
- assert.NoError(t, err)
- uuid = output
- } else {
- assert.Error(t, err)
- assert.Contains(t, output, expectError)
- }
- }
- uuid = strings.TrimSpace(uuid)
- fmt.Println("# Generated UUID (for debug purposes):")
- fmt.Printf("<%s>\n", uuid)
-
- if !strategySetting.Strategy.IsDirect() && !skipWait {
- status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards, uuid, 20*time.Second, schema.OnlineDDLStatusComplete, schema.OnlineDDLStatusFailed)
- fmt.Printf("# Migration status (for debug purposes): <%s>\n", status)
- }
-
- if expectError == "" && expectHint != "" {
- checkMigratedTable(t, tableName, expectHint)
- }
- return uuid
-}
-
-// checkTable checks the number of tables in the first two shards.
-func checkTable(t *testing.T, showTableName string, expectExists bool) bool {
- expectCount := 0
- if expectExists {
- expectCount = 1
- }
- for i := range clusterInstance.Keyspaces[0].Shards {
- if !checkTablesCount(t, clusterInstance.Keyspaces[0].Shards[i].Vttablets[0], showTableName, expectCount) {
- return false
- }
- }
- return true
-}
-
-// checkTablesCount checks the number of tables in the given tablet
-func checkTablesCount(t *testing.T, tablet *cluster.Vttablet, showTableName string, expectCount int) bool {
- query := fmt.Sprintf(`show tables like '%%%s%%';`, showTableName)
- queryResult, err := tablet.VttabletProcess.QueryTablet(query, keyspaceName, true)
- require.Nil(t, err)
- return assert.Equal(t, expectCount, len(queryResult.Rows))
-}
-
-// checkMigratedTables checks the CREATE STATEMENT of a table after migration
-func checkMigratedTable(t *testing.T, tableName, expectHint string) {
- for i := range clusterInstance.Keyspaces[0].Shards {
- createStatement := getCreateTableStatement(t, clusterInstance.Keyspaces[0].Shards[i].Vttablets[0], tableName)
- assert.Contains(t, createStatement, expectHint)
- }
-}
-
-// getCreateTableStatement returns the CREATE TABLE statement for a given table
-func getCreateTableStatement(t *testing.T, tablet *cluster.Vttablet, tableName string) (statement string) {
- queryResult, err := tablet.VttabletProcess.QueryTablet(fmt.Sprintf("show create table %s;", tableName), keyspaceName, true)
- require.Nil(t, err)
-
- assert.Equal(t, len(queryResult.Rows), 1)
- assert.Equal(t, len(queryResult.Rows[0]), 2) // table name, create statement
- statement = queryResult.Rows[0][1].ToString()
- return statement
-}
diff --git a/go/test/endtoend/onlineddl/scheduler/onlineddl_scheduler_test.go b/go/test/endtoend/onlineddl/scheduler/onlineddl_scheduler_test.go
index 05e1217a670..2b068df643d 100644
--- a/go/test/endtoend/onlineddl/scheduler/onlineddl_scheduler_test.go
+++ b/go/test/endtoend/onlineddl/scheduler/onlineddl_scheduler_test.go
@@ -17,15 +17,22 @@ limitations under the License.
package scheduler
import (
+ "context"
+ "errors"
"flag"
"fmt"
+ "io"
+ "math/rand"
"os"
"path"
"strings"
+ "sync"
"testing"
"time"
"vitess.io/vitess/go/mysql"
+ "vitess.io/vitess/go/textutil"
+ "vitess.io/vitess/go/vt/log"
"vitess.io/vitess/go/vt/schema"
"vitess.io/vitess/go/vt/sqlparser"
@@ -36,60 +43,154 @@ import (
"github.com/stretchr/testify/require"
)
+const (
+ anyErrorIndicator = ""
+)
+
+type testOnlineDDLStatementParams struct {
+ ddlStatement string
+ ddlStrategy string
+ executeStrategy string
+ expectHint string
+ expectError string
+ skipWait bool
+ migrationContext string
+}
+
+type testRevertMigrationParams struct {
+ revertUUID string
+ executeStrategy string
+ ddlStrategy string
+ migrationContext string
+ expectError string
+ skipWait bool
+}
+
var (
clusterInstance *cluster.LocalProcessCluster
shards []cluster.Shard
vtParams mysql.ConnParams
- normalWaitTime = 20 * time.Second
- extendedWaitTime = 60 * time.Second
+ normalWaitTime = 20 * time.Second
+ extendedWaitTime = 60 * time.Second
+ ensureStateNotChangedTime = 5 * time.Second
hostname = "localhost"
keyspaceName = "ks"
cell = "zone1"
schemaChangeDirectory = ""
overrideVtctlParams *cluster.VtctlClientParams
- ddlStrategy = "vitess"
- t1Name = "t1_test"
- t2Name = "t2_test"
- createT1Statement = `
- CREATE TABLE t1_test (
- id bigint(20) not null,
- hint_col varchar(64) not null default 'just-created',
- PRIMARY KEY (id)
- ) ENGINE=InnoDB
- `
- createT2Statement = `
- CREATE TABLE t2_test (
- id bigint(20) not null,
- hint_col varchar(64) not null default 'just-created',
- PRIMARY KEY (id)
- ) ENGINE=InnoDB
- `
- createT1IfNotExistsStatement = `
- CREATE TABLE IF NOT EXISTS t1_test (
- id bigint(20) not null,
- hint_col varchar(64) not null default 'should_not_appear',
- PRIMARY KEY (id)
- ) ENGINE=InnoDB
- `
- trivialAlterT1Statement = `
- ALTER TABLE t1_test ENGINE=InnoDB;
- `
- trivialAlterT2Statement = `
- ALTER TABLE t2_test ENGINE=InnoDB;
- `
- dropT1Statement = `
- DROP TABLE IF EXISTS t1_test
- `
- dropT3Statement = `
- DROP TABLE IF EXISTS t3_test
- `
- dropT4Statement = `
- DROP TABLE IF EXISTS t4_test
- `
)
+type WriteMetrics struct {
+ mu sync.Mutex
+ insertsAttempts, insertsFailures, insertsNoops, inserts int64
+ updatesAttempts, updatesFailures, updatesNoops, updates int64
+ deletesAttempts, deletesFailures, deletesNoops, deletes int64
+}
+
+func (w *WriteMetrics) Clear() {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ w.inserts = 0
+ w.updates = 0
+ w.deletes = 0
+
+ w.insertsAttempts = 0
+ w.insertsFailures = 0
+ w.insertsNoops = 0
+
+ w.updatesAttempts = 0
+ w.updatesFailures = 0
+ w.updatesNoops = 0
+
+ w.deletesAttempts = 0
+ w.deletesFailures = 0
+ w.deletesNoops = 0
+}
+
+func (w *WriteMetrics) String() string {
+ return fmt.Sprintf(`WriteMetrics: inserts-deletes=%d, updates-deletes=%d,
+insertsAttempts=%d, insertsFailures=%d, insertsNoops=%d, inserts=%d,
+updatesAttempts=%d, updatesFailures=%d, updatesNoops=%d, updates=%d,
+deletesAttempts=%d, deletesFailures=%d, deletesNoops=%d, deletes=%d,
+`,
+ w.inserts-w.deletes, w.updates-w.deletes,
+ w.insertsAttempts, w.insertsFailures, w.insertsNoops, w.inserts,
+ w.updatesAttempts, w.updatesFailures, w.updatesNoops, w.updates,
+ w.deletesAttempts, w.deletesFailures, w.deletesNoops, w.deletes,
+ )
+}
+
+func parseTableName(t *testing.T, sql string) (tableName string) {
+ // ddlStatement could possibly be composed of multiple DDL statements
+ tokenizer := sqlparser.NewStringTokenizer(sql)
+ for {
+ stmt, err := sqlparser.ParseNextStrictDDL(tokenizer)
+ if err != nil && errors.Is(err, io.EOF) {
+ break
+ }
+ require.NoErrorf(t, err, "parsing sql: [%v]", sql)
+ ddlStmt, ok := stmt.(sqlparser.DDLStatement)
+ require.True(t, ok)
+ tableName = ddlStmt.GetTable().Name.String()
+ if tableName == "" {
+ tbls := ddlStmt.AffectedTables()
+ require.NotEmpty(t, tbls)
+ tableName = tbls[0].Name.String()
+ }
+ require.NotEmptyf(t, tableName, "could not parse table name from SQL: %s", sqlparser.String(ddlStmt))
+ }
+ require.NotEmptyf(t, tableName, "could not parse table name from SQL: %s", sql)
+ return tableName
+}
+
+// testOnlineDDLStatement runs an online DDL, ALTER statement
+func TestParseTableName(t *testing.T) {
+ sqls := []string{
+ `ALTER TABLE t1_test ENGINE=InnoDB`,
+ `ALTER TABLE t1_test ENGINE=InnoDB;`,
+ `DROP TABLE IF EXISTS t1_test`,
+ `
+ ALTER TABLE stress_test ENGINE=InnoDB;
+ ALTER TABLE stress_test ENGINE=InnoDB;
+ ALTER TABLE stress_test ENGINE=InnoDB;
+ `,
+ }
+
+ for _, sql := range sqls {
+ t.Run(sql, func(t *testing.T) {
+ parseTableName(t, sql)
+ })
+ }
+}
+
+func waitForReadyToComplete(t *testing.T, uuid string, expected bool) {
+ ctx, cancel := context.WithTimeout(context.Background(), normalWaitTime)
+ defer cancel()
+
+ ticker := time.NewTicker(time.Second)
+ defer ticker.Stop()
+ for {
+
+ rs := onlineddl.ReadMigrations(t, &vtParams, uuid)
+ require.NotNil(t, rs)
+ for _, row := range rs.Named().Rows {
+ readyToComplete := row.AsInt64("ready_to_complete", 0)
+ if expected == (readyToComplete > 0) {
+ // all good. This is what we waited for
+ return
+ }
+ }
+ select {
+ case <-ticker.C:
+ case <-ctx.Done():
+ }
+ require.NoError(t, ctx.Err())
+ }
+}
+
func TestMain(m *testing.M) {
defer cluster.PanicHandler(nil)
flag.Parse()
@@ -157,12 +258,102 @@ func TestMain(m *testing.M) {
}
func TestSchemaChange(t *testing.T) {
+ t.Run("scheduler", testScheduler)
+ t.Run("singleton", testSingleton)
+ t.Run("declarative", testDeclarative)
+ t.Run("foreign-keys", testForeignKeys)
+ t.Run("summary: validate sequential migration IDs", func(t *testing.T) {
+ onlineddl.ValidateSequentialMigrationIDs(t, &vtParams, shards)
+ })
+ t.Run("summary: validate completed_timestamp", func(t *testing.T) {
+ onlineddl.ValidateCompletedTimestamp(t, &vtParams)
+ })
+}
+
+func testScheduler(t *testing.T) {
defer cluster.PanicHandler(t)
shards = clusterInstance.Keyspaces[0].Shards
require.Equal(t, 1, len(shards))
- var t1uuid string
- var t2uuid string
+ ddlStrategy := "vitess"
+
+ createParams := func(ddlStatement string, ddlStrategy string, executeStrategy string, expectHint string, expectError string, skipWait bool) *testOnlineDDLStatementParams {
+ return &testOnlineDDLStatementParams{
+ ddlStatement: ddlStatement,
+ ddlStrategy: ddlStrategy,
+ executeStrategy: executeStrategy,
+ expectHint: expectHint,
+ expectError: expectError,
+ skipWait: skipWait,
+ }
+ }
+
+ createRevertParams := func(revertUUID string, ddlStrategy string, executeStrategy string, expectError string, skipWait bool) *testRevertMigrationParams {
+ return &testRevertMigrationParams{
+ revertUUID: revertUUID,
+ executeStrategy: executeStrategy,
+ ddlStrategy: ddlStrategy,
+ expectError: expectError,
+ skipWait: skipWait,
+ }
+ }
+
+ mysqlVersion := onlineddl.GetMySQLVersion(t, clusterInstance.Keyspaces[0].Shards[0].PrimaryTablet())
+ require.NotEmpty(t, mysqlVersion)
+ _, capableOf, _ := mysql.GetFlavor(mysqlVersion, nil)
+
+ var (
+ t1uuid string
+ t2uuid string
+
+ t1Name = "t1_test"
+ t2Name = "t2_test"
+ createT1Statement = `
+ CREATE TABLE t1_test (
+ id bigint(20) not null,
+ hint_col varchar(64) not null default 'just-created',
+ PRIMARY KEY (id)
+ ) ENGINE=InnoDB
+ `
+ createT2Statement = `
+ CREATE TABLE t2_test (
+ id bigint(20) not null,
+ hint_col varchar(64) not null default 'just-created',
+ PRIMARY KEY (id)
+ ) ENGINE=InnoDB
+ `
+ createT1IfNotExistsStatement = `
+ CREATE TABLE IF NOT EXISTS t1_test (
+ id bigint(20) not null,
+ hint_col varchar(64) not null default 'should_not_appear',
+ PRIMARY KEY (id)
+ ) ENGINE=InnoDB
+ `
+ trivialAlterT1Statement = `
+ ALTER TABLE t1_test ENGINE=InnoDB;
+ `
+ trivialAlterT2Statement = `
+ ALTER TABLE t2_test ENGINE=InnoDB;
+ `
+ instantAlterT1Statement = `
+ ALTER TABLE t1_test ADD COLUMN i0 INT NOT NULL DEFAULT 0;
+ `
+ dropT1Statement = `
+ DROP TABLE IF EXISTS t1_test
+ `
+ dropT3Statement = `
+ DROP TABLE IF EXISTS t3_test
+ `
+ dropT4Statement = `
+ DROP TABLE IF EXISTS t4_test
+ `
+ alterExtraColumn = `
+ ALTER TABLE t1_test ADD COLUMN extra_column int NOT NULL DEFAULT 0
+ `
+ createViewDependsOnExtraColumn = `
+ CREATE VIEW t1_test_view AS SELECT id, extra_column FROM t1_test
+ `
+ )
testReadTimestamp := func(t *testing.T, uuid string, timestampColumn string) (timestamp string) {
rs := onlineddl.ReadMigrations(t, &vtParams, uuid)
@@ -203,20 +394,20 @@ func TestSchemaChange(t *testing.T) {
// CREATE
t.Run("CREATE TABLEs t1, t1", func(t *testing.T) {
{ // The table does not exist
- t1uuid = testOnlineDDLStatement(t, createT1Statement, ddlStrategy, "vtgate", "just-created", "", false)
+ t1uuid = testOnlineDDLStatement(t, createParams(createT1Statement, ddlStrategy, "vtgate", "just-created", "", false))
onlineddl.CheckMigrationStatus(t, &vtParams, shards, t1uuid, schema.OnlineDDLStatusComplete)
checkTable(t, t1Name, true)
}
{
// The table does not exist
- t2uuid = testOnlineDDLStatement(t, createT2Statement, ddlStrategy, "vtgate", "just-created", "", false)
+ t2uuid = testOnlineDDLStatement(t, createParams(createT2Statement, ddlStrategy, "vtgate", "just-created", "", false))
onlineddl.CheckMigrationStatus(t, &vtParams, shards, t2uuid, schema.OnlineDDLStatusComplete)
checkTable(t, t2Name, true)
}
testTableSequentialTimes(t, t1uuid, t2uuid)
})
t.Run("Postpone launch CREATE", func(t *testing.T) {
- t1uuid = testOnlineDDLStatement(t, createT1IfNotExistsStatement, ddlStrategy+" --postpone-launch", "vtgate", "", "", true) // skip wait
+ t1uuid = testOnlineDDLStatement(t, createParams(createT1IfNotExistsStatement, ddlStrategy+" --postpone-launch", "vtgate", "", "", true)) // skip wait
time.Sleep(2 * time.Second)
rs := onlineddl.ReadMigrations(t, &vtParams, t1uuid)
require.NotNil(t, rs)
@@ -240,7 +431,7 @@ func TestSchemaChange(t *testing.T) {
})
})
t.Run("Postpone launch ALTER", func(t *testing.T) {
- t1uuid = testOnlineDDLStatement(t, trivialAlterT1Statement, ddlStrategy+" --postpone-launch", "vtgate", "", "", true) // skip wait
+ t1uuid = testOnlineDDLStatement(t, createParams(trivialAlterT1Statement, ddlStrategy+" --postpone-launch", "vtgate", "", "", true)) // skip wait
time.Sleep(2 * time.Second)
rs := onlineddl.ReadMigrations(t, &vtParams, t1uuid)
require.NotNil(t, rs)
@@ -287,8 +478,8 @@ func TestSchemaChange(t *testing.T) {
})
})
t.Run("ALTER both tables non-concurrent", func(t *testing.T) {
- t1uuid = testOnlineDDLStatement(t, trivialAlterT1Statement, ddlStrategy, "vtgate", "", "", true) // skip wait
- t2uuid = testOnlineDDLStatement(t, trivialAlterT2Statement, ddlStrategy, "vtgate", "", "", true) // skip wait
+ t1uuid = testOnlineDDLStatement(t, createParams(trivialAlterT1Statement, ddlStrategy, "vtgate", "", "", true)) // skip wait
+ t2uuid = testOnlineDDLStatement(t, createParams(trivialAlterT2Statement, ddlStrategy, "vtgate", "", "", true)) // skip wait
t.Run("wait for t1 complete", func(t *testing.T) {
status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards, t1uuid, normalWaitTime, schema.OnlineDDLStatusComplete, schema.OnlineDDLStatusFailed)
@@ -305,15 +496,15 @@ func TestSchemaChange(t *testing.T) {
testTableSequentialTimes(t, t1uuid, t2uuid)
})
t.Run("ALTER both tables non-concurrent, postponed", func(t *testing.T) {
- t1uuid = testOnlineDDLStatement(t, trivialAlterT1Statement, ddlStrategy+" -postpone-completion", "vtgate", "", "", true) // skip wait
- t2uuid = testOnlineDDLStatement(t, trivialAlterT2Statement, ddlStrategy+" -postpone-completion", "vtgate", "", "", true) // skip wait
+ t1uuid = testOnlineDDLStatement(t, createParams(trivialAlterT1Statement, ddlStrategy+" -postpone-completion", "vtgate", "", "", true)) // skip wait
+ t2uuid = testOnlineDDLStatement(t, createParams(trivialAlterT2Statement, ddlStrategy+" -postpone-completion", "vtgate", "", "", true)) // skip wait
testAllowConcurrent(t, "t1", t1uuid, 0)
t.Run("expect t1 running, t2 queued", func(t *testing.T) {
onlineddl.WaitForMigrationStatus(t, &vtParams, shards, t1uuid, normalWaitTime, schema.OnlineDDLStatusRunning)
// now that t1 is running, let's unblock t2. We expect it to remain queued.
onlineddl.CheckCompleteMigration(t, &vtParams, shards, t2uuid, true)
- time.Sleep(5 * time.Second)
+ time.Sleep(ensureStateNotChangedTime)
// t1 should be still running!
onlineddl.CheckMigrationStatus(t, &vtParams, shards, t1uuid, schema.OnlineDDLStatusRunning)
// non-concurrent -- should be queued!
@@ -335,17 +526,18 @@ func TestSchemaChange(t *testing.T) {
})
testTableSequentialTimes(t, t1uuid, t2uuid)
})
+
t.Run("ALTER both tables, elligible for concurrenct", func(t *testing.T) {
// ALTER TABLE is allowed to run concurrently when no other ALTER is busy with copy state. Our tables are tiny so we expect to find both migrations running
- t1uuid = testOnlineDDLStatement(t, trivialAlterT1Statement, ddlStrategy+" --allow-concurrent --postpone-completion", "vtgate", "", "", true) // skip wait
- t2uuid = testOnlineDDLStatement(t, trivialAlterT2Statement, ddlStrategy+" --allow-concurrent --postpone-completion", "vtgate", "", "", true) // skip wait
+ t1uuid = testOnlineDDLStatement(t, createParams(trivialAlterT1Statement, ddlStrategy+" --allow-concurrent --postpone-completion", "vtgate", "", "", true)) // skip wait
+ t2uuid = testOnlineDDLStatement(t, createParams(trivialAlterT2Statement, ddlStrategy+" --allow-concurrent --postpone-completion", "vtgate", "", "", true)) // skip wait
testAllowConcurrent(t, "t1", t1uuid, 1)
testAllowConcurrent(t, "t2", t2uuid, 1)
t.Run("expect both running", func(t *testing.T) {
onlineddl.WaitForMigrationStatus(t, &vtParams, shards, t1uuid, normalWaitTime, schema.OnlineDDLStatusRunning)
onlineddl.WaitForMigrationStatus(t, &vtParams, shards, t2uuid, normalWaitTime, schema.OnlineDDLStatusRunning)
- time.Sleep(5 * time.Second)
+ time.Sleep(ensureStateNotChangedTime)
// both should be still running!
onlineddl.CheckMigrationStatus(t, &vtParams, shards, t1uuid, schema.OnlineDDLStatusRunning)
onlineddl.CheckMigrationStatus(t, &vtParams, shards, t2uuid, schema.OnlineDDLStatusRunning)
@@ -374,8 +566,8 @@ func TestSchemaChange(t *testing.T) {
onlineddl.ThrottleAllMigrations(t, &vtParams)
defer onlineddl.UnthrottleAllMigrations(t, &vtParams)
// ALTER TABLE is allowed to run concurrently when no other ALTER is busy with copy state. Our tables are tiny so we expect to find both migrations running
- t1uuid = testOnlineDDLStatement(t, trivialAlterT1Statement, ddlStrategy+" -allow-concurrent -postpone-completion", "vtgate", "", "", true) // skip wait
- t2uuid = testOnlineDDLStatement(t, trivialAlterT2Statement, ddlStrategy+" -allow-concurrent -postpone-completion", "vtgate", "", "", true) // skip wait
+ t1uuid = testOnlineDDLStatement(t, createParams(trivialAlterT1Statement, ddlStrategy+" -allow-concurrent -postpone-completion", "vtgate", "", "", true)) // skip wait
+ t2uuid = testOnlineDDLStatement(t, createParams(trivialAlterT2Statement, ddlStrategy+" -allow-concurrent -postpone-completion", "vtgate", "", "", true)) // skip wait
testAllowConcurrent(t, "t1", t1uuid, 1)
testAllowConcurrent(t, "t2", t2uuid, 1)
@@ -384,16 +576,22 @@ func TestSchemaChange(t *testing.T) {
// since all migrations are throttled, t1 migration is not ready_to_complete, hence
// t2 should not be running
onlineddl.WaitForMigrationStatus(t, &vtParams, shards, t2uuid, normalWaitTime, schema.OnlineDDLStatusQueued, schema.OnlineDDLStatusReady)
- time.Sleep(5 * time.Second)
+ time.Sleep(ensureStateNotChangedTime)
// both should be still running!
onlineddl.CheckMigrationStatus(t, &vtParams, shards, t1uuid, schema.OnlineDDLStatusRunning)
onlineddl.CheckMigrationStatus(t, &vtParams, shards, t2uuid, schema.OnlineDDLStatusQueued, schema.OnlineDDLStatusReady)
})
+
+ t.Run("check ready to complete (before)", func(t *testing.T) {
+ for _, uuid := range []string{t1uuid, t2uuid} {
+ waitForReadyToComplete(t, uuid, false)
+ }
+ })
t.Run("unthrottle, expect t2 running", func(t *testing.T) {
onlineddl.UnthrottleAllMigrations(t, &vtParams)
// t1 should now be ready_to_complete, hence t2 should start running
onlineddl.WaitForMigrationStatus(t, &vtParams, shards, t2uuid, extendedWaitTime, schema.OnlineDDLStatusRunning)
- time.Sleep(5 * time.Second)
+ time.Sleep(ensureStateNotChangedTime)
// both should be still running!
onlineddl.CheckMigrationStatus(t, &vtParams, shards, t1uuid, schema.OnlineDDLStatusRunning)
onlineddl.CheckMigrationStatus(t, &vtParams, shards, t2uuid, schema.OnlineDDLStatusRunning)
@@ -416,11 +614,17 @@ func TestSchemaChange(t *testing.T) {
fmt.Printf("# Migration status (for debug purposes): <%s>\n", status)
onlineddl.CheckMigrationStatus(t, &vtParams, shards, t1uuid, schema.OnlineDDLStatusComplete)
})
+ t.Run("check ready to complete (after)", func(t *testing.T) {
+ for _, uuid := range []string{t1uuid, t2uuid} {
+ waitForReadyToComplete(t, uuid, true)
+ }
+ })
+
testTableCompletionTimes(t, t2uuid, t1uuid)
})
t.Run("REVERT both tables concurrent, postponed", func(t *testing.T) {
- t1uuid = testRevertMigration(t, t1uuid, ddlStrategy+" -allow-concurrent -postpone-completion", "vtgate", "", true)
- t2uuid = testRevertMigration(t, t2uuid, ddlStrategy+" -allow-concurrent -postpone-completion", "vtgate", "", true)
+ t1uuid = testRevertMigration(t, createRevertParams(t1uuid, ddlStrategy+" --allow-concurrent --postpone-completion", "vtgate", "", true))
+ t2uuid = testRevertMigration(t, createRevertParams(t2uuid, ddlStrategy+" --allow-concurrent --postpone-completion", "vtgate", "", true))
testAllowConcurrent(t, "t1", t1uuid, 1)
t.Run("expect both migrations to run", func(t *testing.T) {
@@ -428,12 +632,7 @@ func TestSchemaChange(t *testing.T) {
onlineddl.WaitForMigrationStatus(t, &vtParams, shards, t2uuid, normalWaitTime, schema.OnlineDDLStatusRunning)
})
t.Run("test ready-to-complete", func(t *testing.T) {
- rs := onlineddl.ReadMigrations(t, &vtParams, t1uuid)
- require.NotNil(t, rs)
- for _, row := range rs.Named().Rows {
- readyToComplete := row.AsInt64("ready_to_complete", 0)
- assert.Equal(t, int64(1), readyToComplete)
- }
+ waitForReadyToComplete(t, t1uuid, true)
})
t.Run("complete t2", func(t *testing.T) {
// now that both are running, let's unblock t2. We expect it to complete.
@@ -456,8 +655,8 @@ func TestSchemaChange(t *testing.T) {
})
})
t.Run("concurrent REVERT vs two non-concurrent DROPs", func(t *testing.T) {
- t1uuid = testRevertMigration(t, t1uuid, ddlStrategy+" -allow-concurrent -postpone-completion", "vtgate", "", true)
- drop3uuid := testOnlineDDLStatement(t, dropT3Statement, ddlStrategy, "vtgate", "", "", true) // skip wait
+ t1uuid = testRevertMigration(t, createRevertParams(t1uuid, ddlStrategy+" -allow-concurrent -postpone-completion", "vtgate", "", true))
+ drop3uuid := testOnlineDDLStatement(t, createParams(dropT3Statement, ddlStrategy, "vtgate", "", "", true)) // skip wait
testAllowConcurrent(t, "t1", t1uuid, 1)
testAllowConcurrent(t, "drop3", drop3uuid, 0)
@@ -466,7 +665,7 @@ func TestSchemaChange(t *testing.T) {
fmt.Printf("# Migration status (for debug purposes): <%s>\n", status)
onlineddl.CheckMigrationStatus(t, &vtParams, shards, t1uuid, schema.OnlineDDLStatusRunning)
})
- drop1uuid := testOnlineDDLStatement(t, dropT1Statement, ddlStrategy, "vtgate", "", "", true) // skip wait
+ drop1uuid := testOnlineDDLStatement(t, createParams(dropT1Statement, ddlStrategy, "vtgate", "", "", true)) // skip wait
t.Run("drop3 complete", func(t *testing.T) {
// drop3 migration should not block. It can run concurrently to t1, and does not conflict
status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards, drop3uuid, normalWaitTime, schema.OnlineDDLStatusComplete, schema.OnlineDDLStatusFailed)
@@ -494,9 +693,9 @@ func TestSchemaChange(t *testing.T) {
})
})
t.Run("non-concurrent REVERT vs three concurrent drops", func(t *testing.T) {
- t1uuid = testRevertMigration(t, t1uuid, ddlStrategy+" -postpone-completion", "vtgate", "", true)
- drop3uuid := testOnlineDDLStatement(t, dropT3Statement, ddlStrategy+" -allow-concurrent", "vtgate", "", "", true) // skip wait
- drop4uuid := testOnlineDDLStatement(t, dropT4Statement, ddlStrategy+" -allow-concurrent -postpone-completion", "vtgate", "", "", true) // skip wait
+ t1uuid = testRevertMigration(t, createRevertParams(t1uuid, ddlStrategy+" -postpone-completion", "vtgate", "", true))
+ drop3uuid := testOnlineDDLStatement(t, createParams(dropT3Statement, ddlStrategy+" -allow-concurrent", "vtgate", "", "", true)) // skip wait
+ drop4uuid := testOnlineDDLStatement(t, createParams(dropT4Statement, ddlStrategy+" -allow-concurrent -postpone-completion", "vtgate", "", "", true)) // skip wait
testAllowConcurrent(t, "drop3", drop3uuid, 1)
t.Run("expect t1 migration to run", func(t *testing.T) {
@@ -504,7 +703,7 @@ func TestSchemaChange(t *testing.T) {
fmt.Printf("# Migration status (for debug purposes): <%s>\n", status)
onlineddl.CheckMigrationStatus(t, &vtParams, shards, t1uuid, schema.OnlineDDLStatusRunning)
})
- drop1uuid := testOnlineDDLStatement(t, dropT1Statement, ddlStrategy+" -allow-concurrent", "vtgate", "", "", true) // skip wait
+ drop1uuid := testOnlineDDLStatement(t, createParams(dropT1Statement, ddlStrategy+" -allow-concurrent", "vtgate", "", "", true)) // skip wait
testAllowConcurrent(t, "drop1", drop1uuid, 1)
t.Run("t3drop complete", func(t *testing.T) {
// drop3 migration should not block. It can run concurrently to t1, and does not conflict
@@ -544,7 +743,7 @@ func TestSchemaChange(t *testing.T) {
checkTable(t, t1Name, false)
})
t.Run("revert t1 drop", func(t *testing.T) {
- revertDrop3uuid := testRevertMigration(t, drop1uuid, ddlStrategy+" -allow-concurrent", "vtgate", "", true)
+ revertDrop3uuid := testRevertMigration(t, createRevertParams(drop1uuid, ddlStrategy+" -allow-concurrent", "vtgate", "", true))
status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards, revertDrop3uuid, normalWaitTime, schema.OnlineDDLStatusComplete, schema.OnlineDDLStatusFailed)
fmt.Printf("# Migration status (for debug purposes): <%s>\n", status)
onlineddl.CheckMigrationStatus(t, &vtParams, shards, revertDrop3uuid, schema.OnlineDDLStatusComplete)
@@ -552,36 +751,31 @@ func TestSchemaChange(t *testing.T) {
})
})
t.Run("conflicting migration does not block other queued migrations", func(t *testing.T) {
- t1uuid = testOnlineDDLStatement(t, trivialAlterT1Statement, ddlStrategy, "vtgate", "", "", false) // skip wait
+ t1uuid = testOnlineDDLStatement(t, createParams(trivialAlterT1Statement, ddlStrategy, "vtgate", "", "", false)) // skip wait
t.Run("trivial t1 migration", func(t *testing.T) {
onlineddl.CheckMigrationStatus(t, &vtParams, shards, t1uuid, schema.OnlineDDLStatusComplete)
checkTable(t, t1Name, true)
})
- t1uuid = testRevertMigration(t, t1uuid, ddlStrategy+" -postpone-completion", "vtgate", "", true)
+ t1uuid = testRevertMigration(t, createRevertParams(t1uuid, ddlStrategy+" -postpone-completion", "vtgate", "", true))
t.Run("expect t1 revert migration to run", func(t *testing.T) {
status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards, t1uuid, normalWaitTime, schema.OnlineDDLStatusRunning)
fmt.Printf("# Migration status (for debug purposes): <%s>\n", status)
onlineddl.CheckMigrationStatus(t, &vtParams, shards, t1uuid, schema.OnlineDDLStatusRunning)
})
- drop1uuid := testOnlineDDLStatement(t, dropT1Statement, ddlStrategy+" -allow-concurrent", "vtgate", "", "", true) // skip wait
+ drop1uuid := testOnlineDDLStatement(t, createParams(dropT1Statement, ddlStrategy+" -allow-concurrent", "vtgate", "", "", true)) // skip wait
t.Run("t1drop blocked", func(t *testing.T) {
- time.Sleep(5 * time.Second)
+ time.Sleep(ensureStateNotChangedTime)
// drop1 migration should block. It can run concurrently to t1, but conflicts on table name
onlineddl.CheckMigrationStatus(t, &vtParams, shards, drop1uuid, schema.OnlineDDLStatusReady)
})
t.Run("t3 ready to complete", func(t *testing.T) {
- rs := onlineddl.ReadMigrations(t, &vtParams, drop1uuid)
- require.NotNil(t, rs)
- for _, row := range rs.Named().Rows {
- readyToComplete := row.AsInt64("ready_to_complete", 0)
- assert.Equal(t, int64(1), readyToComplete)
- }
+ waitForReadyToComplete(t, drop1uuid, true)
})
t.Run("t3drop complete", func(t *testing.T) {
// drop3 migration should not block. It can run concurrently to t1, and does not conflict
// even though t1drop is blocked! This is the heart of this test
- drop3uuid := testOnlineDDLStatement(t, dropT3Statement, ddlStrategy+" -allow-concurrent", "vtgate", "", "", false)
+ drop3uuid := testOnlineDDLStatement(t, createParams(dropT3Statement, ddlStrategy+" -allow-concurrent", "vtgate", "", "", false))
onlineddl.CheckMigrationStatus(t, &vtParams, shards, drop3uuid, schema.OnlineDDLStatusComplete)
})
t.Run("cancel drop1", func(t *testing.T) {
@@ -611,7 +805,7 @@ func TestSchemaChange(t *testing.T) {
defer func() { overrideVtctlParams = nil }()
// create a migration and cancel it. We don't let it complete. We want it in "failed" state
t.Run("start and fail migration", func(t *testing.T) {
- executedUUID := testOnlineDDLStatement(t, trivialAlterT1Statement, ddlStrategy+" -postpone-completion", "vtctl", "", "", true) // skip wait
+ executedUUID := testOnlineDDLStatement(t, createParams(trivialAlterT1Statement, ddlStrategy+" -postpone-completion", "vtctl", "", "", true)) // skip wait
require.Equal(t, uuid, executedUUID)
onlineddl.WaitForMigrationStatus(t, &vtParams, shards, uuid, normalWaitTime, schema.OnlineDDLStatusRunning)
// let's cancel it
@@ -623,7 +817,7 @@ func TestSchemaChange(t *testing.T) {
// now, we submit the exact same migratoin again: same UUID, same migration context.
t.Run("resubmit migration", func(t *testing.T) {
- executedUUID := testOnlineDDLStatement(t, trivialAlterT1Statement, ddlStrategy, "vtctl", "", "", true) // skip wait
+ executedUUID := testOnlineDDLStatement(t, createParams(trivialAlterT1Statement, ddlStrategy, "vtctl", "", "", true)) // skip wait
require.Equal(t, uuid, executedUUID)
// expect it to complete
@@ -639,21 +833,1303 @@ func TestSchemaChange(t *testing.T) {
}
})
})
+
+ t.Run("Idempotent submission, retry failed migration in singleton context", func(t *testing.T) {
+ uuid := "00000000_1111_3333_3333_444444444444"
+ ddlStrategy := ddlStrategy + " --singleton-context"
+ overrideVtctlParams = &cluster.VtctlClientParams{DDLStrategy: ddlStrategy, SkipPreflight: true, UUIDList: uuid, MigrationContext: "idempotent:1111-3333-3333"}
+ defer func() { overrideVtctlParams = nil }()
+ // create a migration and cancel it. We don't let it complete. We want it in "failed" state
+ t.Run("start and fail migration", func(t *testing.T) {
+ executedUUID := testOnlineDDLStatement(t, createParams(trivialAlterT1Statement, ddlStrategy+" --postpone-completion", "vtctl", "", "", true)) // skip wait
+ require.Equal(t, uuid, executedUUID)
+ onlineddl.WaitForMigrationStatus(t, &vtParams, shards, uuid, normalWaitTime, schema.OnlineDDLStatusRunning)
+ // let's cancel it
+ onlineddl.CheckCancelMigration(t, &vtParams, shards, uuid, true)
+ status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards, uuid, normalWaitTime, schema.OnlineDDLStatusFailed, schema.OnlineDDLStatusCancelled)
+ fmt.Printf("# Migration status (for debug purposes): <%s>\n", status)
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusCancelled)
+ })
+
+ // now, we submit the exact same migratoin again: same UUID, same migration context.
+ t.Run("resubmit migration", func(t *testing.T) {
+ executedUUID := testOnlineDDLStatement(t, createParams(trivialAlterT1Statement, ddlStrategy, "vtctl", "", "", true)) // skip wait
+ require.Equal(t, uuid, executedUUID)
+
+ // expect it to complete
+ status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards, uuid, normalWaitTime, schema.OnlineDDLStatusComplete, schema.OnlineDDLStatusFailed)
+ fmt.Printf("# Migration status (for debug purposes): <%s>\n", status)
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete)
+
+ rs := onlineddl.ReadMigrations(t, &vtParams, uuid)
+ require.NotNil(t, rs)
+ for _, row := range rs.Named().Rows {
+ retries := row.AsInt64("retries", 0)
+ assert.Greater(t, retries, int64(0))
+ }
+ })
+ })
+
+ // INSTANT DDL
+ instantDDLCapable, err := capableOf(mysql.InstantAddLastColumnFlavorCapability)
+ require.NoError(t, err)
+ if instantDDLCapable {
+ t.Run("INSTANT DDL: postpone-completion", func(t *testing.T) {
+ t1uuid := testOnlineDDLStatement(t, createParams(instantAlterT1Statement, ddlStrategy+" --prefer-instant-ddl --postpone-completion", "vtgate", "", "", true))
+
+ t.Run("expect t1 queued", func(t *testing.T) {
+ // we want to validate that the migration remains queued even after some time passes. It must not move beyond 'queued'
+ time.Sleep(ensureStateNotChangedTime)
+ onlineddl.WaitForMigrationStatus(t, &vtParams, shards, t1uuid, normalWaitTime, schema.OnlineDDLStatusQueued, schema.OnlineDDLStatusReady)
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, t1uuid, schema.OnlineDDLStatusQueued, schema.OnlineDDLStatusReady)
+ })
+ t.Run("complete t1", func(t *testing.T) {
+ // Issue a complete and wait for successful completion
+ onlineddl.CheckCompleteMigration(t, &vtParams, shards, t1uuid, true)
+ status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards, t1uuid, normalWaitTime, schema.OnlineDDLStatusComplete, schema.OnlineDDLStatusFailed)
+ fmt.Printf("# Migration status (for debug purposes): <%s>\n", status)
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, t1uuid, schema.OnlineDDLStatusComplete)
+ })
+ })
+ }
+ // 'mysql' strategy
+ t.Run("mysql strategy", func(t *testing.T) {
+ t.Run("declarative", func(t *testing.T) {
+ t1uuid = testOnlineDDLStatement(t, createParams(createT1Statement, "mysql --declarative", "vtgate", "just-created", "", false))
+
+ status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards, t1uuid, normalWaitTime, schema.OnlineDDLStatusComplete, schema.OnlineDDLStatusFailed)
+ fmt.Printf("# Migration status (for debug purposes): <%s>\n", status)
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, t1uuid, schema.OnlineDDLStatusComplete)
+ checkTable(t, t1Name, true)
+ })
+
+ t.Run("fail postpone-completion", func(t *testing.T) {
+ t1uuid := testOnlineDDLStatement(t, createParams(trivialAlterT1Statement, "mysql --postpone-completion", "vtgate", "", "", true))
+
+ // --postpone-completion not supported in mysql strategy
+ time.Sleep(ensureStateNotChangedTime)
+ onlineddl.WaitForMigrationStatus(t, &vtParams, shards, t1uuid, normalWaitTime, schema.OnlineDDLStatusFailed)
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, t1uuid, schema.OnlineDDLStatusFailed)
+ })
+ t.Run("trivial", func(t *testing.T) {
+ t1uuid := testOnlineDDLStatement(t, createParams(trivialAlterT1Statement, "mysql", "vtgate", "", "", true))
+
+ status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards, t1uuid, normalWaitTime, schema.OnlineDDLStatusComplete, schema.OnlineDDLStatusFailed)
+ fmt.Printf("# Migration status (for debug purposes): <%s>\n", status)
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, t1uuid, schema.OnlineDDLStatusComplete)
+
+ rs := onlineddl.ReadMigrations(t, &vtParams, t1uuid)
+ require.NotNil(t, rs)
+ for _, row := range rs.Named().Rows {
+ artifacts := row.AsString("artifacts", "-")
+ assert.Empty(t, artifacts)
+ }
+ })
+ t.Run("instant", func(t *testing.T) {
+ t1uuid := testOnlineDDLStatement(t, createParams(instantAlterT1Statement, "mysql", "vtgate", "", "", true))
+
+ status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards, t1uuid, normalWaitTime, schema.OnlineDDLStatusComplete, schema.OnlineDDLStatusFailed)
+ fmt.Printf("# Migration status (for debug purposes): <%s>\n", status)
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, t1uuid, schema.OnlineDDLStatusComplete)
+ })
+ })
+ // in-order-completion
+ t.Run("in-order-completion: multiple drops for nonexistent tables and views", func(t *testing.T) {
+ u, err := schema.CreateOnlineDDLUUID()
+ require.NoError(t, err)
+
+ sqls := []string{
+ fmt.Sprintf("drop table if exists t4_%s", u),
+ fmt.Sprintf("drop view if exists t1_%s", u),
+ fmt.Sprintf("drop table if exists t2_%s", u),
+ fmt.Sprintf("drop view if exists t3_%s", u),
+ }
+ sql := strings.Join(sqls, ";")
+ var vuuids []string
+ t.Run("drop multiple tables and views, in-order-completion", func(t *testing.T) {
+ uuidList := testOnlineDDLStatement(t, createParams(sql, ddlStrategy+" --allow-concurrent --in-order-completion", "vtctl", "", "", true)) // skip wait
+ vuuids = strings.Split(uuidList, "\n")
+ assert.Equal(t, 4, len(vuuids))
+ for _, uuid := range vuuids {
+ status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards, uuid, normalWaitTime, schema.OnlineDDLStatusComplete, schema.OnlineDDLStatusFailed)
+ fmt.Printf("# Migration status (for debug purposes): <%s>\n", status)
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete)
+ }
+ })
+ require.Equal(t, 4, len(vuuids))
+ for i := range vuuids {
+ if i > 0 {
+ testTableCompletionTimes(t, vuuids[i-1], vuuids[i])
+ }
+ }
+ })
+ t.Run("in-order-completion: two new views, one depends on the other", func(t *testing.T) {
+ u, err := schema.CreateOnlineDDLUUID()
+ require.NoError(t, err)
+ v2name := fmt.Sprintf("v2_%s", u)
+ createv2 := fmt.Sprintf("create view %s as select id from t1_test", v2name)
+ v1name := fmt.Sprintf("v1_%s", u)
+ createv1 := fmt.Sprintf("create view %s as select id from %s", v1name, v2name)
+
+ sql := fmt.Sprintf("%s; %s;", createv2, createv1)
+ var vuuids []string
+ t.Run("create two views, expect both complete", func(t *testing.T) {
+ uuidList := testOnlineDDLStatement(t, createParams(sql, ddlStrategy+" --allow-concurrent --in-order-completion", "vtctl", "", "", true)) // skip wait
+ vuuids = strings.Split(uuidList, "\n")
+ assert.Equal(t, 2, len(vuuids))
+ for _, uuid := range vuuids {
+ status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards, uuid, normalWaitTime, schema.OnlineDDLStatusComplete, schema.OnlineDDLStatusFailed)
+ fmt.Printf("# Migration status (for debug purposes): <%s>\n", status)
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete)
+ }
+ })
+ require.Equal(t, 2, len(vuuids))
+ testTableCompletionTimes(t, vuuids[0], vuuids[1])
+ })
+ t.Run("in-order-completion: new table column, new view depends on said column", func(t *testing.T) {
+ // The VIEW creation can only succeed when the ALTER has completed and the table has the new column
+ t1uuid = testOnlineDDLStatement(t, createParams(alterExtraColumn, ddlStrategy+" --allow-concurrent --postpone-completion --in-order-completion", "vtctl", "", "", true)) // skip wait
+ v1uuid := testOnlineDDLStatement(t, createParams(createViewDependsOnExtraColumn, ddlStrategy+" --allow-concurrent --postpone-completion --in-order-completion", "vtctl", "", "", true)) // skip wait
+
+ testAllowConcurrent(t, "t1", t1uuid, 1)
+ testAllowConcurrent(t, "v1", v1uuid, 1)
+ t.Run("expect table running, expect view ready", func(t *testing.T) {
+ onlineddl.WaitForMigrationStatus(t, &vtParams, shards, t1uuid, normalWaitTime, schema.OnlineDDLStatusRunning)
+ onlineddl.WaitForMigrationStatus(t, &vtParams, shards, v1uuid, normalWaitTime, schema.OnlineDDLStatusQueued, schema.OnlineDDLStatusReady)
+ time.Sleep(ensureStateNotChangedTime)
+ // nothing should change
+ onlineddl.WaitForMigrationStatus(t, &vtParams, shards, t1uuid, normalWaitTime, schema.OnlineDDLStatusRunning)
+ onlineddl.WaitForMigrationStatus(t, &vtParams, shards, v1uuid, normalWaitTime, schema.OnlineDDLStatusQueued, schema.OnlineDDLStatusReady)
+ })
+ t.Run("complete both", func(t *testing.T) {
+ onlineddl.CheckCompleteAllMigrations(t, &vtParams, len(shards)*2)
+ })
+ t.Run("expect table success", func(t *testing.T) {
+ status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards, t1uuid, normalWaitTime, schema.OnlineDDLStatusComplete, schema.OnlineDDLStatusFailed)
+ fmt.Printf("# Migration status (for debug purposes): <%s>\n", status)
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, t1uuid, schema.OnlineDDLStatusComplete)
+ })
+ t.Run("expect view success", func(t *testing.T) {
+ status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards, v1uuid, normalWaitTime, schema.OnlineDDLStatusComplete, schema.OnlineDDLStatusFailed)
+ fmt.Printf("# Migration status (for debug purposes): <%s>\n", status)
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, v1uuid, schema.OnlineDDLStatusComplete)
+ })
+ testTableCompletionTimes(t, t1uuid, v1uuid)
+ })
+}
+
+func testSingleton(t *testing.T) {
+ defer cluster.PanicHandler(t)
+ shards = clusterInstance.Keyspaces[0].Shards
+ require.Equal(t, 1, len(shards))
+
+ createParams := func(ddlStatement string, ddlStrategy string, executeStrategy string, migrationContext string, expectHint string, expectError string, skipWait bool) *testOnlineDDLStatementParams {
+ return &testOnlineDDLStatementParams{
+ ddlStatement: ddlStatement,
+ ddlStrategy: ddlStrategy,
+ executeStrategy: executeStrategy,
+ migrationContext: migrationContext,
+ expectHint: expectHint,
+ expectError: expectError,
+ skipWait: skipWait,
+ }
+ }
+
+ createRevertParams := func(revertUUID string, ddlStrategy string, executeStrategy string, migrationContext string, expectError string, skipWait bool) *testRevertMigrationParams {
+ return &testRevertMigrationParams{
+ revertUUID: revertUUID,
+ executeStrategy: executeStrategy,
+ ddlStrategy: ddlStrategy,
+ migrationContext: migrationContext,
+ expectError: expectError,
+ skipWait: skipWait,
+ }
+ }
+
+ var (
+ tableName = `stress_test`
+ onlineSingletonDDLStrategy = "online --singleton"
+ onlineSingletonContextDDLStrategy = "online --singleton-context"
+ createStatement = `
+ CREATE TABLE stress_test (
+ id bigint(20) not null,
+ rand_val varchar(32) null default '',
+ hint_col varchar(64) not null default 'just-created',
+ created_timestamp timestamp not null default current_timestamp,
+ updates int unsigned not null default 0,
+ PRIMARY KEY (id),
+ key created_idx(created_timestamp),
+ key updates_idx(updates)
+ ) ENGINE=InnoDB
+ `
+ // We will run this query with "gh-ost --max-load=Threads_running=1"
+ alterTableThrottlingStatement = `
+ ALTER TABLE stress_test DROP COLUMN created_timestamp
+ `
+ multiAlterTableThrottlingStatement = `
+ ALTER TABLE stress_test ENGINE=InnoDB;
+ ALTER TABLE stress_test ENGINE=InnoDB;
+ ALTER TABLE stress_test ENGINE=InnoDB;
+ `
+ // A trivial statement which must succeed and does not change the schema
+ alterTableTrivialStatement = `
+ ALTER TABLE stress_test ENGINE=InnoDB
+ `
+ dropStatement = `
+ DROP TABLE stress_test
+`
+ dropIfExistsStatement = `
+DROP TABLE IF EXISTS stress_test
+`
+ dropNonexistentTableStatement = `
+ DROP TABLE IF EXISTS t_non_existent
+ `
+ multiDropStatements = `DROP TABLE IF EXISTS t1; DROP TABLE IF EXISTS t2; DROP TABLE IF EXISTS t3;`
+ )
+
+ var uuids []string
+ // init-cleanup
+ t.Run("init DROP TABLE", func(t *testing.T) {
+ uuid := testOnlineDDLStatement(t, createParams(dropIfExistsStatement, onlineSingletonDDLStrategy, "vtgate", "", "", "", false))
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete)
+ checkTable(t, tableName, false)
+ })
+
+ // CREATE
+ t.Run("CREATE TABLE", func(t *testing.T) {
+ // The table does not exist
+ uuid := testOnlineDDLStatement(t, createParams(createStatement, onlineSingletonDDLStrategy, "vtgate", "", "", "", false))
+ uuids = append(uuids, uuid)
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete)
+ checkTable(t, tableName, true)
+ })
+ t.Run("revert CREATE TABLE", func(t *testing.T) {
+ // The table existed, so it will now be dropped (renamed)
+ uuid := testRevertMigration(t, createRevertParams(uuids[len(uuids)-1], onlineSingletonDDLStrategy, "vtgate", "", "", false))
+ uuids = append(uuids, uuid)
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete)
+ checkTable(t, tableName, false)
+ })
+ t.Run("revert revert CREATE TABLE", func(t *testing.T) {
+ // Table was dropped (renamed) so it will now be restored
+ uuid := testRevertMigration(t, createRevertParams(uuids[len(uuids)-1], onlineSingletonDDLStrategy, "vtgate", "", "", false))
+ uuids = append(uuids, uuid)
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete)
+ checkTable(t, tableName, true)
+ })
+
+ var throttledUUID string
+ t.Run("throttled migration", func(t *testing.T) {
+ throttledUUID = testOnlineDDLStatement(t, createParams(alterTableThrottlingStatement, "gh-ost --singleton --max-load=Threads_running=1", "vtgate", "", "hint_col", "", false))
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, throttledUUID, schema.OnlineDDLStatusRunning)
+ })
+ t.Run("failed singleton migration, vtgate", func(t *testing.T) {
+ uuid := testOnlineDDLStatement(t, createParams(alterTableThrottlingStatement, "gh-ost --singleton --max-load=Threads_running=1", "vtgate", "", "hint_col", "rejected", true))
+ assert.Empty(t, uuid)
+ })
+ t.Run("failed singleton migration, vtctl", func(t *testing.T) {
+ uuid := testOnlineDDLStatement(t, createParams(alterTableThrottlingStatement, "gh-ost --singleton --max-load=Threads_running=1", "vtctl", "", "hint_col", "rejected", true))
+ assert.Empty(t, uuid)
+ })
+ t.Run("failed revert migration", func(t *testing.T) {
+ uuid := testRevertMigration(t, createRevertParams(throttledUUID, onlineSingletonDDLStrategy, "vtgate", "", "rejected", true))
+ assert.Empty(t, uuid)
+ })
+ t.Run("terminate throttled migration", func(t *testing.T) {
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, throttledUUID, schema.OnlineDDLStatusRunning)
+ onlineddl.CheckCancelMigration(t, &vtParams, shards, throttledUUID, true)
+ status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards, throttledUUID, 20*time.Second, schema.OnlineDDLStatusFailed, schema.OnlineDDLStatusCancelled)
+ fmt.Printf("# Migration status (for debug purposes): <%s>\n", status)
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, throttledUUID, schema.OnlineDDLStatusCancelled)
+ })
+ t.Run("successful gh-ost alter, vtctl", func(t *testing.T) {
+ uuid := testOnlineDDLStatement(t, createParams(alterTableTrivialStatement, "gh-ost --singleton", "vtctl", "", "hint_col", "", false))
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete)
+ onlineddl.CheckCancelMigration(t, &vtParams, shards, uuid, false)
+ onlineddl.CheckRetryMigration(t, &vtParams, shards, uuid, false)
+ })
+ t.Run("successful gh-ost alter, vtgate", func(t *testing.T) {
+ uuid := testOnlineDDLStatement(t, createParams(alterTableTrivialStatement, "gh-ost --singleton", "vtgate", "", "hint_col", "", false))
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete)
+ onlineddl.CheckCancelMigration(t, &vtParams, shards, uuid, false)
+ onlineddl.CheckRetryMigration(t, &vtParams, shards, uuid, false)
+ })
+
+ t.Run("successful online alter, vtgate", func(t *testing.T) {
+ uuid := testOnlineDDLStatement(t, createParams(alterTableTrivialStatement, onlineSingletonDDLStrategy, "vtgate", "", "hint_col", "", false))
+ uuids = append(uuids, uuid)
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete)
+ onlineddl.CheckCancelMigration(t, &vtParams, shards, uuid, false)
+ onlineddl.CheckRetryMigration(t, &vtParams, shards, uuid, false)
+ checkTable(t, tableName, true)
+ })
+ t.Run("revert ALTER TABLE, vttablet", func(t *testing.T) {
+ // The table existed, so it will now be dropped (renamed)
+ uuid := testRevertMigration(t, createRevertParams(uuids[len(uuids)-1], onlineSingletonDDLStrategy, "vtctl", "", "", false))
+ uuids = append(uuids, uuid)
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete)
+ checkTable(t, tableName, true)
+ })
+
+ var throttledUUIDs []string
+ // singleton-context
+ t.Run("throttled migrations, singleton-context", func(t *testing.T) {
+ uuidList := testOnlineDDLStatement(t, createParams(multiAlterTableThrottlingStatement, "gh-ost --singleton-context --max-load=Threads_running=1", "vtctl", "", "hint_col", "", false))
+ throttledUUIDs = strings.Split(uuidList, "\n")
+ assert.Equal(t, 3, len(throttledUUIDs))
+ for _, uuid := range throttledUUIDs {
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusQueued, schema.OnlineDDLStatusReady, schema.OnlineDDLStatusRunning)
+ }
+ })
+ t.Run("failed migrations, singleton-context", func(t *testing.T) {
+ _ = testOnlineDDLStatement(t, createParams(multiAlterTableThrottlingStatement, "gh-ost --singleton-context --max-load=Threads_running=1", "vtctl", "", "hint_col", "rejected", false))
+ })
+ t.Run("terminate throttled migrations", func(t *testing.T) {
+ for _, uuid := range throttledUUIDs {
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusQueued, schema.OnlineDDLStatusReady, schema.OnlineDDLStatusRunning)
+ onlineddl.CheckCancelMigration(t, &vtParams, shards, uuid, true)
+ }
+ time.Sleep(2 * time.Second)
+ for _, uuid := range throttledUUIDs {
+ uuid = strings.TrimSpace(uuid)
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusFailed, schema.OnlineDDLStatusCancelled)
+ }
+ })
+
+ t.Run("successful multiple statement, singleton-context, vtctl", func(t *testing.T) {
+ uuidList := testOnlineDDLStatement(t, createParams(multiDropStatements, onlineSingletonContextDDLStrategy, "vtctl", "", "", "", false))
+ uuidSlice := strings.Split(uuidList, "\n")
+ assert.Equal(t, 3, len(uuidSlice))
+ for _, uuid := range uuidSlice {
+ uuid = strings.TrimSpace(uuid)
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete)
+ }
+ })
+
+ //DROP
+
+ t.Run("online DROP TABLE", func(t *testing.T) {
+ uuid := testOnlineDDLStatement(t, createParams(dropStatement, onlineSingletonDDLStrategy, "vtgate", "", "", "", false))
+ uuids = append(uuids, uuid)
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete)
+ checkTable(t, tableName, false)
+ })
+ t.Run("revert DROP TABLE", func(t *testing.T) {
+ // This will recreate the table (well, actually, rename it back into place)
+ uuid := testRevertMigration(t, createRevertParams(uuids[len(uuids)-1], onlineSingletonDDLStrategy, "vttablet", "", "", false))
+ uuids = append(uuids, uuid)
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete)
+ checkTable(t, tableName, true)
+ })
+
+ t.Run("fail concurrent singleton, vtgate", func(t *testing.T) {
+ uuid := testOnlineDDLStatement(t, createParams(alterTableTrivialStatement, "vitess --postpone-completion --singleton", "vtgate", "", "hint_col", "", true))
+ uuids = append(uuids, uuid)
+ _ = testOnlineDDLStatement(t, createParams(dropNonexistentTableStatement, "vitess --singleton", "vtgate", "", "hint_col", "rejected", true))
+ onlineddl.CheckCompleteAllMigrations(t, &vtParams, len(shards))
+ status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards, uuid, 20*time.Second, schema.OnlineDDLStatusComplete, schema.OnlineDDLStatusFailed)
+ fmt.Printf("# Migration status (for debug purposes): <%s>\n", status)
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete)
+ })
+ t.Run("fail concurrent singleton-context with revert", func(t *testing.T) {
+ revertUUID := testRevertMigration(t, createRevertParams(uuids[len(uuids)-1], "vitess --allow-concurrent --postpone-completion --singleton-context", "vtctl", "rev:ctx", "", false))
+ onlineddl.WaitForMigrationStatus(t, &vtParams, shards, revertUUID, 20*time.Second, schema.OnlineDDLStatusRunning)
+ // revert is running
+ _ = testOnlineDDLStatement(t, createParams(dropNonexistentTableStatement, "vitess --allow-concurrent --singleton-context", "vtctl", "migrate:ctx", "", "rejected", true))
+ onlineddl.CheckCancelMigration(t, &vtParams, shards, revertUUID, true)
+ status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards, revertUUID, 20*time.Second, schema.OnlineDDLStatusFailed, schema.OnlineDDLStatusCancelled)
+ fmt.Printf("# Migration status (for debug purposes): <%s>\n", status)
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, revertUUID, schema.OnlineDDLStatusCancelled)
+ })
+ t.Run("success concurrent singleton-context with no-context revert", func(t *testing.T) {
+ revertUUID := testRevertMigration(t, createRevertParams(uuids[len(uuids)-1], "vitess --allow-concurrent --postpone-completion", "vtctl", "rev:ctx", "", false))
+ onlineddl.WaitForMigrationStatus(t, &vtParams, shards, revertUUID, 20*time.Second, schema.OnlineDDLStatusRunning)
+ // revert is running but has no --singleton-context. Our next migration should be able to run.
+ uuid := testOnlineDDLStatement(t, createParams(dropNonexistentTableStatement, "vitess --allow-concurrent --singleton-context", "vtctl", "migrate:ctx", "", "", false))
+ uuids = append(uuids, uuid)
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete)
+ onlineddl.CheckCancelMigration(t, &vtParams, shards, revertUUID, true)
+ status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards, revertUUID, 20*time.Second, schema.OnlineDDLStatusFailed, schema.OnlineDDLStatusCancelled)
+ fmt.Printf("# Migration status (for debug purposes): <%s>\n", status)
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, revertUUID, schema.OnlineDDLStatusCancelled)
+ })
+}
+func testDeclarative(t *testing.T) {
+ defer cluster.PanicHandler(t)
+ shards = clusterInstance.Keyspaces[0].Shards
+ require.Equal(t, 1, len(shards))
+
+ var (
+ tableName = `stress_test`
+ viewBaseTableName = `view_base_table_test`
+ viewName = `view_test`
+ migrationContext = "1111-2222-3333"
+ createStatement1 = `
+ CREATE TABLE stress_test (
+ id bigint(20) not null,
+ rand_val varchar(32) null default '',
+ hint_col varchar(64) not null default 'create1',
+ created_timestamp timestamp not null default current_timestamp,
+ updates int unsigned not null default 0,
+ PRIMARY KEY (id),
+ key created_idx(created_timestamp),
+ key updates_idx(updates)
+ ) ENGINE=InnoDB
+ `
+ createStatement2 = `
+ CREATE TABLE stress_test (
+ id bigint(20) not null,
+ rand_val varchar(32) null default '',
+ hint_col varchar(64) not null default 'create2',
+ created_timestamp timestamp not null default current_timestamp,
+ updates int unsigned not null default 0,
+ PRIMARY KEY (id),
+ key created_idx(created_timestamp),
+ key updates_idx(updates)
+ ) ENGINE=InnoDB
+ `
+ createIfNotExistsStatement = `
+ CREATE TABLE IF NOT EXISTS stress_test (
+ id bigint(20) not null,
+ PRIMARY KEY (id)
+ ) ENGINE=InnoDB
+ `
+ createStatementZeroDate = `
+ CREATE TABLE zerodate_test (
+ id bigint(20) not null,
+ hint_col varchar(64) not null default 'create_with_zero',
+ zero_datetime datetime NOT NULL DEFAULT '0000-00-00 00:00:00',
+ PRIMARY KEY (id)
+ ) ENGINE=InnoDB
+ `
+ createStatementZeroDate2 = `
+ CREATE TABLE zerodate_test (
+ id bigint(20) not null,
+ i int not null default 0,
+ hint_col varchar(64) not null default 'create_with_zero2',
+ zero_datetime datetime NOT NULL DEFAULT '0000-00-00 00:00:00',
+ zero_datetime2 datetime NOT NULL DEFAULT '0000-00-00 00:00:00',
+ PRIMARY KEY (id)
+ ) ENGINE=InnoDB
+ `
+ dropZeroDateStatement = `
+ DROP TABLE zerodate_test
+ `
+ dropStatement = `
+ DROP TABLE stress_test
+ `
+ dropIfExistsStatement = `
+ DROP TABLE IF EXISTS stress_test
+ `
+ alterStatement = `
+ ALTER TABLE stress_test modify hint_col varchar(64) not null default 'this-should-fail'
+ `
+ trivialAlterStatement = `
+ ALTER TABLE stress_test ENGINE=InnoDB
+ `
+ dropViewBaseTableStatement = `
+ DROP TABLE IF EXISTS view_base_table_test
+ `
+ createViewBaseTableStatement = `
+ CREATE TABLE view_base_table_test (id INT PRIMARY KEY)
+ `
+ createViewStatement1 = `
+ CREATE VIEW view_test AS SELECT 'success_create1' AS msg FROM view_base_table_test
+ `
+ createViewStatement2 = `
+ CREATE VIEW view_test AS SELECT 'success_create2' AS msg FROM view_base_table_test
+ `
+ createOrReplaceViewStatement = `
+ CREATE OR REPLACE VIEW view_test AS SELECT 'success_replace' AS msg FROM view_base_table_test
+ `
+ alterViewStatement = `
+ ALTER VIEW view_test AS SELECT 'success_alter' AS msg FROM view_base_table_test
+ `
+ dropViewStatement = `
+ DROP VIEW view_test
+ `
+ dropViewIfExistsStatement = `
+ DROP VIEW IF EXISTS view_test
+ `
+ insertRowStatement = `
+ INSERT IGNORE INTO stress_test (id, rand_val) VALUES (%d, left(md5(rand()), 8))
+ `
+ updateRowStatement = `
+ UPDATE stress_test SET updates=updates+1 WHERE id=%d
+ `
+ deleteRowStatement = `
+ DELETE FROM stress_test WHERE id=%d AND updates=1
+ `
+ // We use CAST(SUM(updates) AS SIGNED) because SUM() returns a DECIMAL datatype, and we want to read a SIGNED INTEGER type
+ selectCountRowsStatement = `
+ SELECT COUNT(*) AS num_rows, CAST(SUM(updates) AS SIGNED) AS sum_updates FROM stress_test
+ `
+ truncateStatement = `
+ TRUNCATE TABLE stress_test
+ `
+ writeMetrics WriteMetrics
+ maxTableRows = 4096
+ )
+
+ declarativeStrategy := "online -declarative"
+ var uuids []string
+
+ generateInsert := func(t *testing.T, conn *mysql.Conn) error {
+ id := rand.Int31n(int32(maxTableRows))
+ query := fmt.Sprintf(insertRowStatement, id)
+ qr, err := conn.ExecuteFetch(query, 1000, true)
+
+ func() {
+ writeMetrics.mu.Lock()
+ defer writeMetrics.mu.Unlock()
+
+ writeMetrics.insertsAttempts++
+ if err != nil {
+ writeMetrics.insertsFailures++
+ return
+ }
+ assert.Less(t, qr.RowsAffected, uint64(2))
+ if qr.RowsAffected == 0 {
+ writeMetrics.insertsNoops++
+ return
+ }
+ writeMetrics.inserts++
+ }()
+ return err
+ }
+
+ generateUpdate := func(t *testing.T, conn *mysql.Conn) error {
+ id := rand.Int31n(int32(maxTableRows))
+ query := fmt.Sprintf(updateRowStatement, id)
+ qr, err := conn.ExecuteFetch(query, 1000, true)
+
+ func() {
+ writeMetrics.mu.Lock()
+ defer writeMetrics.mu.Unlock()
+
+ writeMetrics.updatesAttempts++
+ if err != nil {
+ writeMetrics.updatesFailures++
+ return
+ }
+ assert.Less(t, qr.RowsAffected, uint64(2))
+ if qr.RowsAffected == 0 {
+ writeMetrics.updatesNoops++
+ return
+ }
+ writeMetrics.updates++
+ }()
+ return err
+ }
+
+ generateDelete := func(t *testing.T, conn *mysql.Conn) error {
+ id := rand.Int31n(int32(maxTableRows))
+ query := fmt.Sprintf(deleteRowStatement, id)
+ qr, err := conn.ExecuteFetch(query, 1000, true)
+
+ func() {
+ writeMetrics.mu.Lock()
+ defer writeMetrics.mu.Unlock()
+
+ writeMetrics.deletesAttempts++
+ if err != nil {
+ writeMetrics.deletesFailures++
+ return
+ }
+ assert.Less(t, qr.RowsAffected, uint64(2))
+ if qr.RowsAffected == 0 {
+ writeMetrics.deletesNoops++
+ return
+ }
+ writeMetrics.deletes++
+ }()
+ return err
+ }
+
+ initTable := func(t *testing.T) {
+ log.Infof("initTable begin")
+ defer log.Infof("initTable complete")
+
+ ctx := context.Background()
+ conn, err := mysql.Connect(ctx, &vtParams)
+ require.Nil(t, err)
+ defer conn.Close()
+
+ writeMetrics.Clear()
+ _, err = conn.ExecuteFetch(truncateStatement, 1000, true)
+ require.Nil(t, err)
+
+ for i := 0; i < maxTableRows/2; i++ {
+ generateInsert(t, conn)
+ }
+ for i := 0; i < maxTableRows/4; i++ {
+ generateUpdate(t, conn)
+ }
+ for i := 0; i < maxTableRows/4; i++ {
+ generateDelete(t, conn)
+ }
+ }
+
+ testSelectTableMetrics := func(t *testing.T) {
+ writeMetrics.mu.Lock()
+ defer writeMetrics.mu.Unlock()
+
+ log.Infof("%s", writeMetrics.String())
+
+ ctx := context.Background()
+ conn, err := mysql.Connect(ctx, &vtParams)
+ require.Nil(t, err)
+ defer conn.Close()
+
+ rs, err := conn.ExecuteFetch(selectCountRowsStatement, 1000, true)
+ require.Nil(t, err)
+
+ row := rs.Named().Row()
+ require.NotNil(t, row)
+ log.Infof("testSelectTableMetrics, row: %v", row)
+ numRows := row.AsInt64("num_rows", 0)
+ sumUpdates := row.AsInt64("sum_updates", 0)
+
+ assert.NotZero(t, numRows)
+ assert.NotZero(t, sumUpdates)
+ assert.NotZero(t, writeMetrics.inserts)
+ assert.NotZero(t, writeMetrics.deletes)
+ assert.NotZero(t, writeMetrics.updates)
+ assert.Equal(t, writeMetrics.inserts-writeMetrics.deletes, numRows)
+ assert.Equal(t, writeMetrics.updates-writeMetrics.deletes, sumUpdates) // because we DELETE WHERE updates=1
+ }
+
+ testOnlineDDL := func(t *testing.T, alterStatement string, ddlStrategy string, executeStrategy string, expectHint string, expectError string) (uuid string) {
+ params := &testOnlineDDLStatementParams{
+ ddlStatement: alterStatement,
+ ddlStrategy: ddlStrategy,
+ executeStrategy: executeStrategy,
+ expectHint: expectHint,
+ expectError: expectError,
+ }
+ if executeStrategy != "vtgate" {
+ params.migrationContext = migrationContext
+ }
+ return testOnlineDDLStatement(t, params)
+ }
+ createRevertParams := func(revertUUID string) *testRevertMigrationParams {
+ return &testRevertMigrationParams{
+ revertUUID: revertUUID,
+ executeStrategy: "vtctl",
+ ddlStrategy: string(schema.DDLStrategyOnline),
+ }
+ }
+
+ // init-cleaup
+ t.Run("init: drop table", func(t *testing.T) {
+ // IF EXISTS is not supported in -declarative
+ uuid := testOnlineDDL(t, dropIfExistsStatement, "online", "vtgate", "", "")
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete)
+ })
+ t.Run("init: drop view base table", func(t *testing.T) {
+ // IF EXISTS is not supported in -declarative
+ uuid := testOnlineDDL(t, dropViewBaseTableStatement, "online", "vtgate", "", "")
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete)
+ })
+
+ // VIEWS
+ t.Run("create base table for view", func(t *testing.T) {
+ uuid := testOnlineDDL(t, createViewBaseTableStatement, declarativeStrategy, "vtgate", "", "")
+ uuids = append(uuids, uuid)
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete)
+ checkTable(t, viewBaseTableName, true)
+ })
+ // CREATE VIEW 1
+ t.Run("declarative CREATE VIEW where table does not exist", func(t *testing.T) {
+ // The table does not exist
+ uuid := testOnlineDDL(t, createViewStatement1, declarativeStrategy, "vtgate", "success_create1", "")
+ uuids = append(uuids, uuid)
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete)
+ onlineddl.CheckMigrationArtifacts(t, &vtParams, shards, uuid, true)
+ checkTable(t, viewName, true)
+ })
+ // CREATE VIEW 1 again, noop
+ t.Run("declarative CREATE VIEW with no changes where view exists", func(t *testing.T) {
+ // The exists with exact same schema
+ uuid := testOnlineDDL(t, createViewStatement1, declarativeStrategy, "vtgate", "success_create1", "")
+ uuids = append(uuids, uuid)
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete)
+ onlineddl.CheckMigrationArtifacts(t, &vtParams, shards, uuid, false)
+ checkTable(t, viewName, true)
+ })
+ t.Run("revert CREATE VIEW expecting noop", func(t *testing.T) {
+ // Reverting a noop changes nothing
+ uuid := testRevertMigration(t, createRevertParams(uuids[len(uuids)-1]))
+ uuids = append(uuids, uuid)
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete)
+ checkMigratedTable(t, viewName, "success_create1")
+ checkTable(t, viewName, true)
+ })
+ // CREATE OR REPLACE VIEW
+ t.Run("CREATE OR REPLACE VIEW expecting failure", func(t *testing.T) {
+ // IF NOT EXISTS is not supported in -declarative
+ uuid := testOnlineDDL(t, createOrReplaceViewStatement, declarativeStrategy, "vtgate", "", "")
+ uuids = append(uuids, uuid)
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusFailed)
+ checkMigratedTable(t, viewName, "success_create1")
+ checkTable(t, viewName, true)
+ })
+ t.Run("ALTER VIEW expecting failure", func(t *testing.T) {
+ // IF NOT EXISTS is not supported in -declarative
+ uuid := testOnlineDDL(t, alterViewStatement, declarativeStrategy, "vtgate", "", "")
+ uuids = append(uuids, uuid)
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusFailed)
+ checkMigratedTable(t, viewName, "success_create1")
+ checkTable(t, viewName, true)
+ })
+ t.Run("DROP VIEW IF EXISTS expecting failure", func(t *testing.T) {
+ // IF NOT EXISTS is not supported in -declarative
+ uuid := testOnlineDDL(t, dropViewIfExistsStatement, declarativeStrategy, "vtgate", "", "")
+ uuids = append(uuids, uuid)
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusFailed)
+ checkMigratedTable(t, viewName, "success_create1")
+ checkTable(t, viewName, true)
+ })
+ t.Run("declarative DROP VIEW", func(t *testing.T) {
+ uuid := testOnlineDDL(t, dropViewStatement, declarativeStrategy, "vtgate", "", "")
+ uuids = append(uuids, uuid)
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete)
+ onlineddl.CheckMigrationArtifacts(t, &vtParams, shards, uuid, true)
+ checkTable(t, viewName, false)
+ })
+ // View dropped. Let's start afresh.
+
+ // CREATE VIEW1
+ t.Run("declarative CREATE VIEW where view does not exist", func(t *testing.T) {
+ // The table does not exist
+ uuid := testOnlineDDL(t, createViewStatement1, declarativeStrategy, "vtgate", "success_create1", "")
+ uuids = append(uuids, uuid)
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete)
+ onlineddl.CheckMigrationArtifacts(t, &vtParams, shards, uuid, true)
+ checkTable(t, viewName, true)
+ })
+ // CREATE VIEW2: Change view
+ t.Run("declarative CREATE VIEW with changes where view exists", func(t *testing.T) {
+ // The table exists with different schema
+ uuid := testOnlineDDL(t, createViewStatement2, declarativeStrategy, "vtgate", "success_create2", "")
+ uuids = append(uuids, uuid)
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete)
+ onlineddl.CheckMigrationArtifacts(t, &vtParams, shards, uuid, true)
+ checkTable(t, viewName, true)
+ })
+ t.Run("revert CREATE VIEW expecting previous schema", func(t *testing.T) {
+ // Reverting back to 1st version
+ uuid := testRevertMigration(t, createRevertParams(uuids[len(uuids)-1]))
+ uuids = append(uuids, uuid)
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete)
+ checkMigratedTable(t, viewName, "success_create1")
+ checkTable(t, viewName, true)
+ })
+ t.Run("declarative DROP VIEW", func(t *testing.T) {
+ // Table exists
+ uuid := testOnlineDDL(t, dropViewStatement, declarativeStrategy, "vtgate", "", "")
+ uuids = append(uuids, uuid)
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete)
+ onlineddl.CheckMigrationArtifacts(t, &vtParams, shards, uuid, true)
+ checkTable(t, viewName, false)
+ })
+ t.Run("revert DROP VIEW", func(t *testing.T) {
+ // This will recreate the table (well, actually, rename it back into place)
+ uuid := testRevertMigration(t, createRevertParams(uuids[len(uuids)-1]))
+ uuids = append(uuids, uuid)
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete)
+ checkTable(t, viewName, true)
+ checkMigratedTable(t, viewName, "success_create1")
+ })
+ t.Run("revert revert DROP VIEW", func(t *testing.T) {
+ // This will reapply DROP VIEW
+ uuid := testRevertMigration(t, createRevertParams(uuids[len(uuids)-1]))
+ uuids = append(uuids, uuid)
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete)
+ checkTable(t, viewName, false)
+ })
+ t.Run("declarative DROP VIEW where view does not exist", func(t *testing.T) {
+ uuid := testOnlineDDL(t, dropViewStatement, declarativeStrategy, "vtgate", "", "")
+ uuids = append(uuids, uuid)
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete)
+ onlineddl.CheckMigrationArtifacts(t, &vtParams, shards, uuid, false)
+ checkTable(t, viewName, false)
+ })
+ t.Run("revert DROP VIEW where view did not exist", func(t *testing.T) {
+ // Table will not be recreated because it didn't exist during the previous DROP VIEW
+ uuid := testRevertMigration(t, createRevertParams(uuids[len(uuids)-1]))
+ uuids = append(uuids, uuid)
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete)
+ checkTable(t, viewName, false)
+ })
+ // View dropped. Let's start afresh.
+
+ // TABLES
+
+ // CREATE1
+ t.Run("declarative CREATE TABLE where table does not exist", func(t *testing.T) {
+ // The table does not exist
+ uuid := testOnlineDDL(t, createStatement1, declarativeStrategy, "vtgate", "create1", "")
+ uuids = append(uuids, uuid)
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete)
+ onlineddl.CheckMigrationArtifacts(t, &vtParams, shards, uuid, true)
+ checkTable(t, tableName, true)
+ initTable(t)
+ testSelectTableMetrics(t)
+ })
+ // CREATE1 again, noop
+ t.Run("declarative CREATE TABLE with no changes where table exists", func(t *testing.T) {
+ // The exists with exact same schema
+ uuid := testOnlineDDL(t, createStatement1, declarativeStrategy, "vtgate", "create1", "")
+ uuids = append(uuids, uuid)
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete)
+ onlineddl.CheckMigrationArtifacts(t, &vtParams, shards, uuid, false)
+ checkTable(t, tableName, true)
+ testSelectTableMetrics(t)
+ })
+ t.Run("revert CREATE TABLE expecting noop", func(t *testing.T) {
+ // Reverting a noop changes nothing
+ uuid := testRevertMigration(t, createRevertParams(uuids[len(uuids)-1]))
+ uuids = append(uuids, uuid)
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete)
+ checkMigratedTable(t, tableName, "create1")
+ checkTable(t, tableName, true)
+ testSelectTableMetrics(t)
+ })
+ t.Run("declarative DROP TABLE", func(t *testing.T) {
+ uuid := testOnlineDDL(t, dropStatement, declarativeStrategy, "vtgate", "", "")
+ uuids = append(uuids, uuid)
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete)
+ onlineddl.CheckMigrationArtifacts(t, &vtParams, shards, uuid, true)
+ checkTable(t, tableName, false)
+ })
+ // Table dropped. Let's start afresh.
+
+ // CREATE1
+ t.Run("declarative CREATE TABLE where table does not exist", func(t *testing.T) {
+ // The table does not exist
+ uuid := testOnlineDDL(t, createStatement1, declarativeStrategy, "vtgate", "create1", "")
+ uuids = append(uuids, uuid)
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete)
+ onlineddl.CheckMigrationArtifacts(t, &vtParams, shards, uuid, true)
+ checkTable(t, tableName, true)
+ initTable(t)
+ testSelectTableMetrics(t)
+ })
+ // CREATE2: Change schema
+ t.Run("declarative CREATE TABLE with changes where table exists", func(t *testing.T) {
+ // The table exists with different schema
+ uuid := testOnlineDDL(t, createStatement2, declarativeStrategy, "vtgate", "create2", "")
+ uuids = append(uuids, uuid)
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete)
+ onlineddl.CheckMigrationArtifacts(t, &vtParams, shards, uuid, true)
+ checkTable(t, tableName, true)
+ testSelectTableMetrics(t)
+ })
+ t.Run("revert CREATE TABLE expecting previous schema", func(t *testing.T) {
+ // Reverting back to 1st version
+ uuid := testRevertMigration(t, createRevertParams(uuids[len(uuids)-1]))
+ uuids = append(uuids, uuid)
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete)
+ checkMigratedTable(t, tableName, "create1")
+ checkTable(t, tableName, true)
+ testSelectTableMetrics(t)
+ })
+ t.Run("declarative DROP TABLE", func(t *testing.T) {
+ // Table exists
+ uuid := testOnlineDDL(t, dropStatement, declarativeStrategy, "vtgate", "", "")
+ uuids = append(uuids, uuid)
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete)
+ onlineddl.CheckMigrationArtifacts(t, &vtParams, shards, uuid, true)
+ checkTable(t, tableName, false)
+ })
+ t.Run("revert DROP TABLE", func(t *testing.T) {
+ // This will recreate the table (well, actually, rename it back into place)
+ uuid := testRevertMigration(t, createRevertParams(uuids[len(uuids)-1]))
+ uuids = append(uuids, uuid)
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete)
+ checkTable(t, tableName, true)
+ checkMigratedTable(t, tableName, "create1")
+ testSelectTableMetrics(t)
+ })
+ t.Run("revert revert DROP TABLE", func(t *testing.T) {
+ // This will reapply DROP TABLE
+ uuid := testRevertMigration(t, createRevertParams(uuids[len(uuids)-1]))
+ uuids = append(uuids, uuid)
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete)
+ checkTable(t, tableName, false)
+ })
+ t.Run("declarative DROP TABLE where table does not exist", func(t *testing.T) {
+ uuid := testOnlineDDL(t, dropStatement, declarativeStrategy, "vtgate", "", "")
+ uuids = append(uuids, uuid)
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete)
+ onlineddl.CheckMigrationArtifacts(t, &vtParams, shards, uuid, false)
+ checkTable(t, tableName, false)
+ })
+ t.Run("revert DROP TABLE where table did not exist", func(t *testing.T) {
+ // Table will not be recreated because it didn't exist during the previous DROP TABLE
+ uuid := testRevertMigration(t, createRevertParams(uuids[len(uuids)-1]))
+ uuids = append(uuids, uuid)
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete)
+ checkTable(t, tableName, false)
+ })
+ // Table dropped. Let's start afresh.
+
+ // CREATE1
+ t.Run("declarative CREATE TABLE where table does not exist", func(t *testing.T) {
+ // The table does not exist
+ uuid := testOnlineDDL(t, createStatement1, declarativeStrategy, "vtgate", "create1", "")
+ uuids = append(uuids, uuid)
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete)
+ onlineddl.CheckMigrationArtifacts(t, &vtParams, shards, uuid, true)
+ checkTable(t, tableName, true)
+ initTable(t)
+ testSelectTableMetrics(t)
+ })
+ // CREATE2
+ t.Run("declarative CREATE TABLE with changes where table exists", func(t *testing.T) {
+ // The table exists but with different schema
+ uuid := testOnlineDDL(t, createStatement2, declarativeStrategy, "vtgate", "create2", "")
+ uuids = append(uuids, uuid)
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete)
+ onlineddl.CheckMigrationArtifacts(t, &vtParams, shards, uuid, true)
+ checkTable(t, tableName, true)
+ testSelectTableMetrics(t)
+ })
+ // CREATE1 again
+ t.Run("declarative CREATE TABLE again with changes where table exists", func(t *testing.T) {
+ // The table exists but with different schema
+ uuid := testOnlineDDL(t, createStatement1, declarativeStrategy, "vtgate", "create1", "")
+ uuids = append(uuids, uuid)
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete)
+ onlineddl.CheckMigrationArtifacts(t, &vtParams, shards, uuid, true)
+ checkTable(t, tableName, true)
+ testSelectTableMetrics(t)
+ })
+ t.Run("revert CREATE TABLE expecting previous schema", func(t *testing.T) {
+ // Reverting back to previous version
+ uuid := testRevertMigration(t, createRevertParams(uuids[len(uuids)-1]))
+ uuids = append(uuids, uuid)
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete)
+ checkMigratedTable(t, tableName, "create2")
+ checkTable(t, tableName, true)
+ testSelectTableMetrics(t)
+ })
+ t.Run("ALTER TABLE expecting failure", func(t *testing.T) {
+ // ALTER is not supported in -declarative
+ uuid := testOnlineDDL(t, alterStatement, declarativeStrategy, "vtgate", "", "")
+ uuids = append(uuids, uuid)
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusFailed)
+ checkMigratedTable(t, tableName, "create2")
+ checkTable(t, tableName, true)
+ testSelectTableMetrics(t)
+ })
+ t.Run("CREATE TABLE IF NOT EXISTS expecting failure", func(t *testing.T) {
+ // IF NOT EXISTS is not supported in -declarative
+ uuid := testOnlineDDL(t, createIfNotExistsStatement, declarativeStrategy, "vtgate", "", "")
+ uuids = append(uuids, uuid)
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusFailed)
+ checkMigratedTable(t, tableName, "create2")
+ checkTable(t, tableName, true)
+ testSelectTableMetrics(t)
+ })
+ t.Run("DROP TABLE IF EXISTS expecting failure", func(t *testing.T) {
+ // IF EXISTS is not supported in -declarative
+ uuid := testOnlineDDL(t, dropIfExistsStatement, declarativeStrategy, "vtgate", "", "")
+ uuids = append(uuids, uuid)
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusFailed)
+ checkMigratedTable(t, tableName, "create2")
+ checkTable(t, tableName, true)
+ testSelectTableMetrics(t)
+ })
+ t.Run("CREATE TABLE IF NOT EXISTS non-declarative is successful", func(t *testing.T) {
+ // IF NOT EXISTS is supported in non-declarative mode. Just verifying that the statement itself is good,
+ // so that the failure we tested for, above, actually tests the "declarative" logic, rather than some
+ // unrelated error.
+ uuid := testOnlineDDL(t, createIfNotExistsStatement, "online", "vtgate", "", "")
+ uuids = append(uuids, uuid)
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete)
+ // the table existed, so we expect no changes in this non-declarative DDL
+ checkMigratedTable(t, tableName, "create2")
+ checkTable(t, tableName, true)
+ testSelectTableMetrics(t)
+ })
+ t.Run("CREATE TABLE with zero date and --allow-zero-in-date is successful", func(t *testing.T) {
+ uuid := testOnlineDDL(t, createStatementZeroDate, "online --allow-zero-in-date", "vtgate", "", "")
+ uuids = append(uuids, uuid)
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete)
+ checkMigratedTable(t, "zerodate_test", "create_with_zero")
+ checkTable(t, tableName, true)
+ testSelectTableMetrics(t)
+ })
+ t.Run("CREATE TABLE with zero date and --allow-zero-in-date is successful", func(t *testing.T) {
+ uuid := testOnlineDDL(t, createStatementZeroDate, "online -declarative --allow-zero-in-date", "vtgate", "", "")
+ uuids = append(uuids, uuid)
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete)
+ checkMigratedTable(t, "zerodate_test", "create_with_zero")
+ checkTable(t, tableName, true)
+ testSelectTableMetrics(t)
+ })
+ t.Run("CREATE TABLE with zero date and --allow-zero-in-date is successful", func(t *testing.T) {
+ uuid := testOnlineDDL(t, createStatementZeroDate2, "online -declarative --allow-zero-in-date", "vtgate", "", "")
+ uuids = append(uuids, uuid)
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete)
+ checkMigratedTable(t, "zerodate_test", "create_with_zero2")
+ checkTable(t, tableName, true)
+ testSelectTableMetrics(t)
+ })
+
+ // ### The following tests are not strictly 'declarative' but are best served under this endtoend test
+
+ // Test duplicate context/SQL
+ t.Run("Trivial statement with request context is successful", func(t *testing.T) {
+ uuid := testOnlineDDL(t, trivialAlterStatement, "online", "vtctl", "", "")
+ uuids = append(uuids, uuid)
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete)
+ // the table existed, so we expect no changes in this non-declarative DDL
+ checkTable(t, tableName, true)
+
+ rs := onlineddl.ReadMigrations(t, &vtParams, uuid)
+ require.NotNil(t, rs)
+ for _, row := range rs.Named().Rows {
+ message := row["message"].ToString()
+ require.NotContains(t, message, "duplicate DDL")
+ }
+ })
+ t.Run("Duplicate trivial statement with request context is successful", func(t *testing.T) {
+ uuid := testOnlineDDL(t, trivialAlterStatement, "online", "vtctl", "", "")
+ uuids = append(uuids, uuid)
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete)
+ // the table existed, so we expect no changes in this non-declarative DDL
+ checkTable(t, tableName, true)
+
+ rs := onlineddl.ReadMigrations(t, &vtParams, uuid)
+ require.NotNil(t, rs)
+ for _, row := range rs.Named().Rows {
+ message := row["message"].ToString()
+ // Message suggests that the migration was identified as duplicate
+ require.Contains(t, message, "duplicate DDL")
+ }
+ })
+ // Piggyride this test suite, let's also test --allow-zero-in-date for 'direct' strategy
+ t.Run("drop non_online", func(t *testing.T) {
+ _ = testOnlineDDL(t, dropZeroDateStatement, "direct", "vtctl", "", "")
+ checkTable(t, "zerodate_test", false)
+ })
+ t.Run("CREATE TABLE with zero date fails in 'direct' strategy", func(t *testing.T) {
+ _ = testOnlineDDL(t, createStatementZeroDate, "direct", "vtctl", "", "Invalid default value for")
+ checkTable(t, "zerodate_test", false)
+ })
+ t.Run("CREATE TABLE with zero date and --allow-zero-in-date succeeds in 'direct' strategy", func(t *testing.T) {
+ _ = testOnlineDDL(t, createStatementZeroDate, "direct --allow-zero-in-date", "vtctl", "", "")
+ checkTable(t, "zerodate_test", true)
+ })
+}
+
+func testForeignKeys(t *testing.T) {
+ defer cluster.PanicHandler(t)
+
+ var (
+ createStatements = []string{
+ `
+ CREATE TABLE parent_table (
+ id INT NOT NULL,
+ parent_hint_col INT NOT NULL DEFAULT 0,
+ PRIMARY KEY (id)
+ )
+ `,
+ `
+ CREATE TABLE child_table (
+ id INT NOT NULL auto_increment,
+ parent_id INT,
+ child_hint_col INT NOT NULL DEFAULT 0,
+ PRIMARY KEY (id),
+ KEY parent_id_idx (parent_id),
+ CONSTRAINT child_parent_fk FOREIGN KEY (parent_id) REFERENCES parent_table(id) ON DELETE CASCADE
+ )
+ `,
+ `
+ CREATE TABLE child_nofk_table (
+ id INT NOT NULL auto_increment,
+ parent_id INT,
+ child_hint_col INT NOT NULL DEFAULT 0,
+ PRIMARY KEY (id),
+ KEY parent_id_idx (parent_id)
+ )
+ `,
+ }
+ insertStatements = []string{
+ "insert into parent_table (id) values(43)",
+ "insert into child_table (id, parent_id) values(1,43)",
+ "insert into child_table (id, parent_id) values(2,43)",
+ "insert into child_table (id, parent_id) values(3,43)",
+ "insert into child_table (id, parent_id) values(4,43)",
+ }
+ ddlStrategy = "online --allow-zero-in-date"
+ ddlStrategyAllowFK = ddlStrategy + " --unsafe-allow-foreign-keys"
+ )
+
+ type testCase struct {
+ name string
+ sql string
+ allowForeignKeys bool
+ expectHint string
+ }
+ var testCases = []testCase{
+ {
+ name: "modify parent, not allowed",
+ sql: "alter table parent_table engine=innodb",
+ allowForeignKeys: false,
+ },
+ {
+ name: "modify child, not allowed",
+ sql: "alter table child_table engine=innodb",
+ allowForeignKeys: false,
+ },
+ {
+ name: "add foreign key to child, not allowed",
+ sql: "alter table child_table add CONSTRAINT another_fk FOREIGN KEY (parent_id) REFERENCES parent_table(id) ON DELETE CASCADE",
+ allowForeignKeys: false,
+ },
+ {
+ name: "add foreign key to table which wasn't a child before, not allowed",
+ sql: "alter table child_nofk_table add CONSTRAINT new_fk FOREIGN KEY (parent_id) REFERENCES parent_table(id) ON DELETE CASCADE",
+ allowForeignKeys: false,
+ },
+ {
+ // on vanilla MySQL, this migration ends with the child_table referencing the old, original table, and not to the new table now called parent_table.
+ // This is a fundamental foreign key limitation, see https://vitess.io/blog/2021-06-15-online-ddl-why-no-fk/
+ // However, this tests is still valid in the sense that it lets us modify the parent table in the first place.
+ name: "modify parent, trivial",
+ sql: "alter table parent_table engine=innodb",
+ allowForeignKeys: true,
+ expectHint: "parent_hint_col",
+ },
+ {
+ // on vanilla MySQL, this migration ends with two tables, the original and the new child_table, both referencing parent_table. This has
+ // the unwanted property of then limiting actions on the parent_table based on what rows exist or do not exist on the now stale old
+ // child table.
+ // This is a fundamental foreign key limitation, see https://vitess.io/blog/2021-06-15-online-ddl-why-no-fk/
+ // However, this tests is still valid in the sense that it lets us modify the child table in the first place.
+ // A valid use case: using FOREIGN_KEY_CHECKS=0 at all times.
+ name: "modify child, trivial",
+ sql: "alter table child_table engine=innodb",
+ allowForeignKeys: true,
+ expectHint: "REFERENCES `parent_table`",
+ },
+ {
+ // on vanilla MySQL, this migration ends with two tables, the original and the new child_table, both referencing parent_table. This has
+ // the unwanted property of then limiting actions on the parent_table based on what rows exist or do not exist on the now stale old
+ // child table.
+ // This is a fundamental foreign key limitation, see https://vitess.io/blog/2021-06-15-online-ddl-why-no-fk/
+ // However, this tests is still valid in the sense that it lets us modify the child table in the first place.
+ // A valid use case: using FOREIGN_KEY_CHECKS=0 at all times.
+ name: "add foreign key to child",
+ sql: "alter table child_table add CONSTRAINT another_fk FOREIGN KEY (parent_id) REFERENCES parent_table(id) ON DELETE CASCADE",
+ allowForeignKeys: true,
+ expectHint: "another_fk",
+ },
+ {
+ name: "add foreign key to table which wasn't a child before",
+ sql: "alter table child_nofk_table add CONSTRAINT new_fk FOREIGN KEY (parent_id) REFERENCES parent_table(id) ON DELETE CASCADE",
+ allowForeignKeys: true,
+ expectHint: "new_fk",
+ },
+ }
+
+ createParams := func(ddlStatement string, ddlStrategy string, executeStrategy string, expectHint string, expectError string, skipWait bool) *testOnlineDDLStatementParams {
+ return &testOnlineDDLStatementParams{
+ ddlStatement: ddlStatement,
+ ddlStrategy: ddlStrategy,
+ executeStrategy: executeStrategy,
+ expectHint: expectHint,
+ expectError: expectError,
+ skipWait: skipWait,
+ }
+ }
+
+ testStatement := func(t *testing.T, sql string, ddlStrategy string, expectHint string, expectError bool) (uuid string) {
+ errorHint := ""
+ if expectError {
+ errorHint = anyErrorIndicator
+ }
+ return testOnlineDDLStatement(t, createParams(sql, ddlStrategy, "vtctl", expectHint, errorHint, false))
+ }
+ for _, testcase := range testCases {
+ t.Run(testcase.name, func(t *testing.T) {
+ t.Run("create tables", func(t *testing.T) {
+ for _, statement := range createStatements {
+ t.Run(statement, func(t *testing.T) {
+ uuid := testStatement(t, statement, ddlStrategyAllowFK, "", false)
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete)
+ })
+ }
+ })
+ t.Run("populate tables", func(t *testing.T) {
+ for _, statement := range insertStatements {
+ t.Run(statement, func(t *testing.T) {
+ onlineddl.VtgateExecQuery(t, &vtParams, statement, "")
+ })
+ }
+ })
+ var uuid string
+ t.Run("run migration", func(t *testing.T) {
+ if testcase.allowForeignKeys {
+ uuid = testStatement(t, testcase.sql, ddlStrategyAllowFK, testcase.expectHint, false)
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete)
+ } else {
+ uuid = testStatement(t, testcase.sql, ddlStrategy, "", true)
+ if uuid != "" {
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusFailed)
+ }
+ }
+ })
+ t.Run("cleanup", func(t *testing.T) {
+ var artifacts []string
+ if uuid != "" {
+ rs := onlineddl.ReadMigrations(t, &vtParams, uuid)
+ require.NotNil(t, rs)
+ row := rs.Named().Row()
+ require.NotNil(t, row)
+
+ artifacts = textutil.SplitDelimitedList(row.AsString("artifacts", ""))
+ }
+
+ artifacts = append(artifacts, "child_table", "child_nofk_table", "parent_table")
+ // brute force drop all tables. In MySQL 8.0 you can do a single `DROP TABLE ... `
+ // which auto-resovled order. But in 5.7 you can't.
+ droppedTables := map[string]bool{}
+ for range artifacts {
+ for _, artifact := range artifacts {
+ if droppedTables[artifact] {
+ continue
+ }
+ statement := fmt.Sprintf("DROP TABLE IF EXISTS %s", artifact)
+ _, err := clusterInstance.VtctlclientProcess.ApplySchemaWithOutput(keyspaceName, statement, cluster.VtctlClientParams{DDLStrategy: "direct", SkipPreflight: true})
+ if err == nil {
+ droppedTables[artifact] = true
+ }
+ }
+ }
+ statement := fmt.Sprintf("DROP TABLE IF EXISTS %s", strings.Join(artifacts, ","))
+ t.Run(statement, func(t *testing.T) {
+ testStatement(t, statement, "direct", "", false)
+ })
+ })
+ })
+ }
}
// testOnlineDDLStatement runs an online DDL, ALTER statement
-func testOnlineDDLStatement(t *testing.T, ddlStatement string, ddlStrategy string, executeStrategy string, expectHint string, expectError string, skipWait bool) (uuid string) {
- strategySetting, err := schema.ParseDDLStrategy(ddlStrategy)
+func testOnlineDDLStatement(t *testing.T, params *testOnlineDDLStatementParams) (uuid string) {
+ strategySetting, err := schema.ParseDDLStrategy(params.ddlStrategy)
require.NoError(t, err)
- stmt, err := sqlparser.Parse(ddlStatement)
- require.NoError(t, err)
- ddlStmt, ok := stmt.(sqlparser.DDLStatement)
- require.True(t, ok)
- tableName := ddlStmt.GetTable().Name.String()
+ tableName := parseTableName(t, params.ddlStatement)
- if executeStrategy == "vtgate" {
- result := onlineddl.VtgateExecDDL(t, &vtParams, ddlStrategy, ddlStatement, expectError)
+ if params.executeStrategy == "vtgate" {
+ require.Empty(t, params.migrationContext, "explicit migration context not supported in vtgate. Test via vtctl")
+ result := onlineddl.VtgateExecDDL(t, &vtParams, params.ddlStrategy, params.ddlStatement, params.expectError)
if result != nil {
row := result.Named().Row()
if row != nil {
@@ -661,39 +2137,48 @@ func testOnlineDDLStatement(t *testing.T, ddlStatement string, ddlStrategy strin
}
}
} else {
- params := &cluster.VtctlClientParams{DDLStrategy: ddlStrategy, SkipPreflight: true}
+ vtctlParams := &cluster.VtctlClientParams{DDLStrategy: params.ddlStrategy, MigrationContext: params.migrationContext, SkipPreflight: true}
if overrideVtctlParams != nil {
- params = overrideVtctlParams
+ vtctlParams = overrideVtctlParams
}
- output, err := clusterInstance.VtctlclientProcess.ApplySchemaWithOutput(keyspaceName, ddlStatement, *params)
- if expectError == "" {
+ output, err := clusterInstance.VtctlclientProcess.ApplySchemaWithOutput(keyspaceName, params.ddlStatement, *vtctlParams)
+ switch params.expectError {
+ case anyErrorIndicator:
+ if err != nil {
+ // fine. We got any error.
+ t.Logf("expected any error, got this error: %v", err)
+ return
+ }
+ uuid = output
+ case "":
assert.NoError(t, err)
uuid = output
- } else {
+ default:
assert.Error(t, err)
- assert.Contains(t, output, expectError)
+ assert.Contains(t, output, params.expectError)
}
}
uuid = strings.TrimSpace(uuid)
fmt.Println("# Generated UUID (for debug purposes):")
fmt.Printf("<%s>\n", uuid)
- if !strategySetting.Strategy.IsDirect() && !skipWait {
+ if !strategySetting.Strategy.IsDirect() && !params.skipWait && uuid != "" {
status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards, uuid, normalWaitTime, schema.OnlineDDLStatusComplete, schema.OnlineDDLStatusFailed)
fmt.Printf("# Migration status (for debug purposes): <%s>\n", status)
}
- if expectError == "" && expectHint != "" {
- checkMigratedTable(t, tableName, expectHint)
+ if params.expectError == "" && params.expectHint != "" {
+ checkMigratedTable(t, tableName, params.expectHint)
}
return uuid
}
// testRevertMigration reverts a given migration
-func testRevertMigration(t *testing.T, revertUUID string, ddlStrategy, executeStrategy string, expectError string, skipWait bool) (uuid string) {
- revertQuery := fmt.Sprintf("revert vitess_migration '%s'", revertUUID)
- if executeStrategy == "vtgate" {
- result := onlineddl.VtgateExecDDL(t, &vtParams, ddlStrategy, revertQuery, expectError)
+func testRevertMigration(t *testing.T, params *testRevertMigrationParams) (uuid string) {
+ revertQuery := fmt.Sprintf("revert vitess_migration '%s'", params.revertUUID)
+ if params.executeStrategy == "vtgate" {
+ require.Empty(t, params.migrationContext, "explicit migration context not supported in vtgate. Test via vtctl")
+ result := onlineddl.VtgateExecDDL(t, &vtParams, params.ddlStrategy, revertQuery, params.expectError)
if result != nil {
row := result.Named().Row()
if row != nil {
@@ -701,22 +2186,22 @@ func testRevertMigration(t *testing.T, revertUUID string, ddlStrategy, executeSt
}
}
} else {
- output, err := clusterInstance.VtctlclientProcess.ApplySchemaWithOutput(keyspaceName, revertQuery, cluster.VtctlClientParams{DDLStrategy: ddlStrategy, SkipPreflight: true})
- if expectError == "" {
+ output, err := clusterInstance.VtctlclientProcess.ApplySchemaWithOutput(keyspaceName, revertQuery, cluster.VtctlClientParams{DDLStrategy: params.ddlStrategy, MigrationContext: params.migrationContext, SkipPreflight: true})
+ if params.expectError == "" {
assert.NoError(t, err)
uuid = output
} else {
assert.Error(t, err)
- assert.Contains(t, output, expectError)
+ assert.Contains(t, output, params.expectError)
}
}
- if expectError == "" {
+ if params.expectError == "" {
uuid = strings.TrimSpace(uuid)
fmt.Println("# Generated UUID (for debug purposes):")
fmt.Printf("<%s>\n", uuid)
}
- if !skipWait {
+ if !params.skipWait {
time.Sleep(time.Second * 20)
}
return uuid
@@ -758,7 +2243,7 @@ func getCreateTableStatement(t *testing.T, tablet *cluster.Vttablet, tableName s
require.Nil(t, err)
assert.Equal(t, len(queryResult.Rows), 1)
- assert.Equal(t, len(queryResult.Rows[0]), 2) // table name, create statement
+ assert.GreaterOrEqual(t, len(queryResult.Rows[0]), 2) // table name, create statement, and if it's a view then additional columns
statement = queryResult.Rows[0][1].ToString()
return statement
}
diff --git a/go/test/endtoend/onlineddl/singleton/onlineddl_singleton_test.go b/go/test/endtoend/onlineddl/singleton/onlineddl_singleton_test.go
deleted file mode 100644
index 227c667dfcc..00000000000
--- a/go/test/endtoend/onlineddl/singleton/onlineddl_singleton_test.go
+++ /dev/null
@@ -1,428 +0,0 @@
-/*
-Copyright 2021 The Vitess Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package singleton
-
-import (
- "flag"
- "fmt"
- "os"
- "path"
- "strings"
- "testing"
- "time"
-
- "vitess.io/vitess/go/mysql"
- "vitess.io/vitess/go/vt/schema"
-
- "vitess.io/vitess/go/test/endtoend/cluster"
- "vitess.io/vitess/go/test/endtoend/onlineddl"
-
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
-)
-
-var (
- clusterInstance *cluster.LocalProcessCluster
- shards []cluster.Shard
- vtParams mysql.ConnParams
-
- hostname = "localhost"
- keyspaceName = "ks"
- cell = "zone1"
- schemaChangeDirectory = ""
- tableName = `stress_test`
- onlineSingletonDDLStrategy = "online --singleton"
- onlineSingletonContextDDLStrategy = "online --singleton-context"
- createStatement = `
- CREATE TABLE stress_test (
- id bigint(20) not null,
- rand_val varchar(32) null default '',
- hint_col varchar(64) not null default 'just-created',
- created_timestamp timestamp not null default current_timestamp,
- updates int unsigned not null default 0,
- PRIMARY KEY (id),
- key created_idx(created_timestamp),
- key updates_idx(updates)
- ) ENGINE=InnoDB
- `
- // We will run this query with "gh-ost --max-load=Threads_running=1"
- alterTableThrottlingStatement = `
- ALTER TABLE stress_test DROP COLUMN created_timestamp
- `
- multiAlterTableThrottlingStatement = `
- ALTER TABLE stress_test ENGINE=InnoDB;
- ALTER TABLE stress_test ENGINE=InnoDB;
- ALTER TABLE stress_test ENGINE=InnoDB;
- `
- // A trivial statement which must succeed and does not change the schema
- alterTableTrivialStatement = `
- ALTER TABLE stress_test ENGINE=InnoDB
- `
- dropStatement = `
- DROP TABLE stress_test
- `
- dropNonexistentTableStatement = `
- DROP TABLE IF EXISTS t_non_existent
- `
- multiDropStatements = `DROP TABLE IF EXISTS t1; DROP TABLE IF EXISTS t2; DROP TABLE IF EXISTS t3;`
-)
-
-func TestMain(m *testing.M) {
- defer cluster.PanicHandler(nil)
- flag.Parse()
-
- exitcode, err := func() (int, error) {
- clusterInstance = cluster.NewCluster(cell, hostname)
- schemaChangeDirectory = path.Join("/tmp", fmt.Sprintf("schema_change_dir_%d", clusterInstance.GetAndReserveTabletUID()))
- defer os.RemoveAll(schemaChangeDirectory)
- defer clusterInstance.Teardown()
-
- if _, err := os.Stat(schemaChangeDirectory); os.IsNotExist(err) {
- _ = os.Mkdir(schemaChangeDirectory, 0700)
- }
-
- clusterInstance.VtctldExtraArgs = []string{
- "--schema_change_dir", schemaChangeDirectory,
- "--schema_change_controller", "local",
- "--schema_change_check_interval", "1"}
-
- clusterInstance.VtTabletExtraArgs = []string{
- "--enable-lag-throttler",
- "--throttle_threshold", "1s",
- "--heartbeat_enable",
- "--heartbeat_interval", "250ms",
- "--heartbeat_on_demand_duration", "5s",
- "--watch_replication_stream",
- }
- clusterInstance.VtGateExtraArgs = []string{}
-
- if err := clusterInstance.StartTopo(); err != nil {
- return 1, err
- }
-
- // Start keyspace
- keyspace := &cluster.Keyspace{
- Name: keyspaceName,
- }
-
- // No need for replicas in this stress test
- if err := clusterInstance.StartKeyspace(*keyspace, []string{"1"}, 0, false); err != nil {
- return 1, err
- }
-
- vtgateInstance := clusterInstance.NewVtgateInstance()
- // Start vtgate
- if err := vtgateInstance.Setup(); err != nil {
- return 1, err
- }
- // ensure it is torn down during cluster TearDown
- clusterInstance.VtgateProcess = *vtgateInstance
- vtParams = mysql.ConnParams{
- Host: clusterInstance.Hostname,
- Port: clusterInstance.VtgateMySQLPort,
- }
-
- return m.Run(), nil
- }()
- if err != nil {
- fmt.Printf("%v\n", err)
- os.Exit(1)
- } else {
- os.Exit(exitcode)
- }
-
-}
-
-func TestSchemaChange(t *testing.T) {
- defer cluster.PanicHandler(t)
- shards = clusterInstance.Keyspaces[0].Shards
- require.Equal(t, 1, len(shards))
-
- var uuids []string
- // CREATE
- t.Run("CREATE TABLE", func(t *testing.T) {
- // The table does not exist
- uuid := testOnlineDDLStatement(t, createStatement, onlineSingletonDDLStrategy, "vtgate", "", "", "", false)
- uuids = append(uuids, uuid)
- onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete)
- checkTable(t, tableName, true)
- })
- t.Run("revert CREATE TABLE", func(t *testing.T) {
- // The table existed, so it will now be dropped (renamed)
- uuid := testRevertMigration(t, uuids[len(uuids)-1], "vtgate", onlineSingletonDDLStrategy, "", "", false)
- uuids = append(uuids, uuid)
- onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete)
- checkTable(t, tableName, false)
- })
- t.Run("revert revert CREATE TABLE", func(t *testing.T) {
- // Table was dropped (renamed) so it will now be restored
- uuid := testRevertMigration(t, uuids[len(uuids)-1], "vtgate", onlineSingletonDDLStrategy, "", "", false)
- uuids = append(uuids, uuid)
- onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete)
- checkTable(t, tableName, true)
- })
-
- var throttledUUID string
- t.Run("throttled migration", func(t *testing.T) {
- throttledUUID = testOnlineDDLStatement(t, alterTableThrottlingStatement, "gh-ost --singleton --max-load=Threads_running=1", "vtgate", "", "hint_col", "", false)
- onlineddl.CheckMigrationStatus(t, &vtParams, shards, throttledUUID, schema.OnlineDDLStatusRunning)
- })
- t.Run("failed singleton migration, vtgate", func(t *testing.T) {
- uuid := testOnlineDDLStatement(t, alterTableThrottlingStatement, "gh-ost --singleton --max-load=Threads_running=1", "vtgate", "", "hint_col", "rejected", true)
- assert.Empty(t, uuid)
- })
- t.Run("failed singleton migration, vtctl", func(t *testing.T) {
- uuid := testOnlineDDLStatement(t, alterTableThrottlingStatement, "gh-ost --singleton --max-load=Threads_running=1", "vtctl", "", "hint_col", "rejected", true)
- assert.Empty(t, uuid)
- })
- t.Run("failed revert migration", func(t *testing.T) {
- uuid := testRevertMigration(t, throttledUUID, "vtgate", onlineSingletonDDLStrategy, "", "rejected", true)
- assert.Empty(t, uuid)
- })
- t.Run("terminate throttled migration", func(t *testing.T) {
- onlineddl.CheckMigrationStatus(t, &vtParams, shards, throttledUUID, schema.OnlineDDLStatusRunning)
- onlineddl.CheckCancelMigration(t, &vtParams, shards, throttledUUID, true)
- status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards, throttledUUID, 20*time.Second, schema.OnlineDDLStatusFailed, schema.OnlineDDLStatusCancelled)
- fmt.Printf("# Migration status (for debug purposes): <%s>\n", status)
- onlineddl.CheckMigrationStatus(t, &vtParams, shards, throttledUUID, schema.OnlineDDLStatusCancelled)
- })
- t.Run("successful gh-ost alter, vtctl", func(t *testing.T) {
- uuid := testOnlineDDLStatement(t, alterTableTrivialStatement, "gh-ost --singleton", "vtctl", "", "hint_col", "", false)
- onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete)
- onlineddl.CheckCancelMigration(t, &vtParams, shards, uuid, false)
- onlineddl.CheckRetryMigration(t, &vtParams, shards, uuid, false)
- })
- t.Run("successful gh-ost alter, vtgate", func(t *testing.T) {
- uuid := testOnlineDDLStatement(t, alterTableTrivialStatement, "gh-ost --singleton", "vtgate", "", "hint_col", "", false)
- onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete)
- onlineddl.CheckCancelMigration(t, &vtParams, shards, uuid, false)
- onlineddl.CheckRetryMigration(t, &vtParams, shards, uuid, false)
- })
-
- t.Run("successful online alter, vtgate", func(t *testing.T) {
- uuid := testOnlineDDLStatement(t, alterTableTrivialStatement, onlineSingletonDDLStrategy, "vtgate", "", "hint_col", "", false)
- uuids = append(uuids, uuid)
- onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete)
- onlineddl.CheckCancelMigration(t, &vtParams, shards, uuid, false)
- onlineddl.CheckRetryMigration(t, &vtParams, shards, uuid, false)
- checkTable(t, tableName, true)
- })
- t.Run("revert ALTER TABLE, vttablet", func(t *testing.T) {
- // The table existed, so it will now be dropped (renamed)
- uuid := testRevertMigration(t, uuids[len(uuids)-1], "vtctl", onlineSingletonDDLStrategy, "", "", false)
- uuids = append(uuids, uuid)
- onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete)
- checkTable(t, tableName, true)
- })
-
- var throttledUUIDs []string
- // singleton-context
- t.Run("throttled migrations, singleton-context", func(t *testing.T) {
- uuidList := testOnlineDDLStatement(t, multiAlterTableThrottlingStatement, "gh-ost --singleton-context --max-load=Threads_running=1", "vtctl", "", "hint_col", "", false)
- throttledUUIDs = strings.Split(uuidList, "\n")
- assert.Equal(t, 3, len(throttledUUIDs))
- for _, uuid := range throttledUUIDs {
- onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusQueued, schema.OnlineDDLStatusReady, schema.OnlineDDLStatusRunning)
- }
- })
- t.Run("failed migrations, singleton-context", func(t *testing.T) {
- _ = testOnlineDDLStatement(t, multiAlterTableThrottlingStatement, "gh-ost --singleton-context --max-load=Threads_running=1", "vtctl", "", "hint_col", "rejected", false)
- })
- t.Run("terminate throttled migrations", func(t *testing.T) {
- for _, uuid := range throttledUUIDs {
- onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusQueued, schema.OnlineDDLStatusReady, schema.OnlineDDLStatusRunning)
- onlineddl.CheckCancelMigration(t, &vtParams, shards, uuid, true)
- }
- time.Sleep(2 * time.Second)
- for _, uuid := range throttledUUIDs {
- uuid = strings.TrimSpace(uuid)
- onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusFailed, schema.OnlineDDLStatusCancelled)
- }
- })
-
- t.Run("successful multiple statement, singleton-context, vtctl", func(t *testing.T) {
- uuidList := testOnlineDDLStatement(t, multiDropStatements, onlineSingletonContextDDLStrategy, "vtctl", "", "", "", false)
- uuidSlice := strings.Split(uuidList, "\n")
- assert.Equal(t, 3, len(uuidSlice))
- for _, uuid := range uuidSlice {
- uuid = strings.TrimSpace(uuid)
- onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete)
- }
- })
-
- //DROP
-
- t.Run("online DROP TABLE", func(t *testing.T) {
- uuid := testOnlineDDLStatement(t, dropStatement, onlineSingletonDDLStrategy, "vtgate", "", "", "", false)
- uuids = append(uuids, uuid)
- onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete)
- checkTable(t, tableName, false)
- })
- t.Run("revert DROP TABLE", func(t *testing.T) {
- // This will recreate the table (well, actually, rename it back into place)
- uuid := testRevertMigration(t, uuids[len(uuids)-1], "vttablet", onlineSingletonDDLStrategy, "", "", false)
- uuids = append(uuids, uuid)
- onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete)
- checkTable(t, tableName, true)
- })
-
- t.Run("fail concurrent singleton, vtgate", func(t *testing.T) {
- uuid := testOnlineDDLStatement(t, alterTableTrivialStatement, "vitess --postpone-completion --singleton", "vtgate", "", "hint_col", "", true)
- uuids = append(uuids, uuid)
- _ = testOnlineDDLStatement(t, dropNonexistentTableStatement, "vitess --singleton", "vtgate", "", "hint_col", "rejected", true)
- onlineddl.CheckCompleteAllMigrations(t, &vtParams, len(shards))
- status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards, uuid, 20*time.Second, schema.OnlineDDLStatusComplete, schema.OnlineDDLStatusFailed)
- fmt.Printf("# Migration status (for debug purposes): <%s>\n", status)
- onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete)
- })
- t.Run("fail concurrent singleton-context with revert", func(t *testing.T) {
- revertUUID := testRevertMigration(t, uuids[len(uuids)-1], "vtctl", "vitess --allow-concurrent --postpone-completion --singleton-context", "rev:ctx", "", false)
- onlineddl.WaitForMigrationStatus(t, &vtParams, shards, revertUUID, 20*time.Second, schema.OnlineDDLStatusRunning)
- // revert is running
- _ = testOnlineDDLStatement(t, dropNonexistentTableStatement, "vitess --allow-concurrent --singleton-context", "vtctl", "migrate:ctx", "", "rejected", true)
- onlineddl.CheckCancelMigration(t, &vtParams, shards, revertUUID, true)
- status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards, revertUUID, 20*time.Second, schema.OnlineDDLStatusFailed, schema.OnlineDDLStatusCancelled)
- fmt.Printf("# Migration status (for debug purposes): <%s>\n", status)
- onlineddl.CheckMigrationStatus(t, &vtParams, shards, revertUUID, schema.OnlineDDLStatusCancelled)
- })
- t.Run("success concurrent singleton-context with no-context revert", func(t *testing.T) {
- revertUUID := testRevertMigration(t, uuids[len(uuids)-1], "vtctl", "vitess --allow-concurrent --postpone-completion", "rev:ctx", "", false)
- onlineddl.WaitForMigrationStatus(t, &vtParams, shards, revertUUID, 20*time.Second, schema.OnlineDDLStatusRunning)
- // revert is running but has no --singleton-context. Our next migration should be able to run.
- uuid := testOnlineDDLStatement(t, dropNonexistentTableStatement, "vitess --allow-concurrent --singleton-context", "vtctl", "migrate:ctx", "", "", false)
- uuids = append(uuids, uuid)
- onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete)
- onlineddl.CheckCancelMigration(t, &vtParams, shards, revertUUID, true)
- status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards, revertUUID, 20*time.Second, schema.OnlineDDLStatusFailed, schema.OnlineDDLStatusCancelled)
- fmt.Printf("# Migration status (for debug purposes): <%s>\n", status)
- onlineddl.CheckMigrationStatus(t, &vtParams, shards, revertUUID, schema.OnlineDDLStatusCancelled)
- })
-}
-
-// testOnlineDDLStatement runs an online DDL, ALTER statement
-func testOnlineDDLStatement(t *testing.T, alterStatement string, ddlStrategy string, executeStrategy string, migrationContext string, expectHint string, expectError string, skipWait bool) (uuid string) {
- strategySetting, err := schema.ParseDDLStrategy(ddlStrategy)
- require.NoError(t, err)
-
- if executeStrategy == "vtgate" {
- assert.Empty(t, migrationContext, "explicit migration context not supported in vtgate. Test via vtctl")
- result := onlineddl.VtgateExecDDL(t, &vtParams, ddlStrategy, alterStatement, expectError)
- if result != nil {
- row := result.Named().Row()
- if row != nil {
- uuid = row.AsString("uuid", "")
- }
- }
- } else {
- output, err := clusterInstance.VtctlclientProcess.ApplySchemaWithOutput(keyspaceName, alterStatement, cluster.VtctlClientParams{DDLStrategy: ddlStrategy, SkipPreflight: true, MigrationContext: migrationContext})
- if expectError == "" {
- assert.NoError(t, err)
- uuid = output
- } else {
- assert.Error(t, err)
- assert.Contains(t, output, expectError)
- }
- }
- uuid = strings.TrimSpace(uuid)
- fmt.Println("# Generated UUID (for debug purposes):")
- fmt.Printf("<%s>\n", uuid)
-
- if !strategySetting.Strategy.IsDirect() && !skipWait && uuid != "" {
- status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards, uuid, 20*time.Second, schema.OnlineDDLStatusComplete, schema.OnlineDDLStatusFailed)
- fmt.Printf("# Migration status (for debug purposes): <%s>\n", status)
- }
-
- if expectError == "" && expectHint != "" {
- checkMigratedTable(t, tableName, expectHint)
- }
- return uuid
-}
-
-// testRevertMigration reverts a given migration
-func testRevertMigration(t *testing.T, revertUUID string, executeStrategy string, ddlStrategy string, migrationContext string, expectError string, skipWait bool) (uuid string) {
- revertQuery := fmt.Sprintf("revert vitess_migration '%s'", revertUUID)
- if executeStrategy == "vtgate" {
- assert.Empty(t, migrationContext, "explicit migration context not supported in vtgate. Test via vtctl")
- result := onlineddl.VtgateExecDDL(t, &vtParams, ddlStrategy, revertQuery, expectError)
- if result != nil {
- row := result.Named().Row()
- if row != nil {
- uuid = row.AsString("uuid", "")
- }
- }
- } else {
- output, err := clusterInstance.VtctlclientProcess.ApplySchemaWithOutput(keyspaceName, revertQuery, cluster.VtctlClientParams{DDLStrategy: ddlStrategy, SkipPreflight: true, MigrationContext: migrationContext})
- if expectError == "" {
- assert.NoError(t, err)
- uuid = output
- } else {
- assert.Error(t, err)
- assert.Contains(t, output, expectError)
- }
- }
-
- if expectError == "" {
- uuid = strings.TrimSpace(uuid)
- fmt.Println("# Generated UUID (for debug purposes):")
- fmt.Printf("<%s>\n", uuid)
- }
- if !skipWait {
- time.Sleep(time.Second * 20)
- }
- return uuid
-}
-
-// checkTable checks the number of tables in the first two shards.
-func checkTable(t *testing.T, showTableName string, expectExists bool) bool {
- expectCount := 0
- if expectExists {
- expectCount = 1
- }
- for i := range clusterInstance.Keyspaces[0].Shards {
- if !checkTablesCount(t, clusterInstance.Keyspaces[0].Shards[i].Vttablets[0], showTableName, expectCount) {
- return false
- }
- }
- return true
-}
-
-// checkTablesCount checks the number of tables in the given tablet
-func checkTablesCount(t *testing.T, tablet *cluster.Vttablet, showTableName string, expectCount int) bool {
- query := fmt.Sprintf(`show tables like '%%%s%%';`, showTableName)
- queryResult, err := tablet.VttabletProcess.QueryTablet(query, keyspaceName, true)
- require.Nil(t, err)
- return assert.Equal(t, expectCount, len(queryResult.Rows))
-}
-
-// checkMigratedTables checks the CREATE STATEMENT of a table after migration
-func checkMigratedTable(t *testing.T, tableName, expectHint string) {
- for i := range clusterInstance.Keyspaces[0].Shards {
- createStatement := getCreateTableStatement(t, clusterInstance.Keyspaces[0].Shards[i].Vttablets[0], tableName)
- assert.Contains(t, createStatement, expectHint)
- }
-}
-
-// getCreateTableStatement returns the CREATE TABLE statement for a given table
-func getCreateTableStatement(t *testing.T, tablet *cluster.Vttablet, tableName string) (statement string) {
- queryResult, err := tablet.VttabletProcess.QueryTablet(fmt.Sprintf("show create table %s;", tableName), keyspaceName, true)
- require.Nil(t, err)
-
- assert.Equal(t, len(queryResult.Rows), 1)
- assert.Equal(t, len(queryResult.Rows[0]), 2) // table name, create statement
- statement = queryResult.Rows[0][1].ToString()
- return statement
-}
diff --git a/go/test/endtoend/onlineddl/vrepl/onlineddl_vrepl_test.go b/go/test/endtoend/onlineddl/vrepl/onlineddl_vrepl_test.go
index 2d7cd937cd5..10902baf1d4 100644
--- a/go/test/endtoend/onlineddl/vrepl/onlineddl_vrepl_test.go
+++ b/go/test/endtoend/onlineddl/vrepl/onlineddl_vrepl_test.go
@@ -20,7 +20,6 @@ import (
"flag"
"fmt"
"io"
- "net/http"
"os"
"path"
"strings"
@@ -31,6 +30,7 @@ import (
"vitess.io/vitess/go/mysql"
"vitess.io/vitess/go/test/endtoend/cluster"
"vitess.io/vitess/go/test/endtoend/onlineddl"
+ "vitess.io/vitess/go/test/endtoend/throttler"
"vitess.io/vitess/go/vt/schema"
"vitess.io/vitess/go/vt/vttablet/tabletmanager/vreplication"
throttlebase "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/base"
@@ -47,8 +47,8 @@ var (
onlineDDLThrottlerAppName = "online-ddl"
vstreamerThrottlerAppName = "vstreamer"
- normalMigrationWait = 20 * time.Second
- extendedMigrationWait = 20 * time.Second
+ normalMigrationWait = 45 * time.Second
+ extendedMigrationWait = 60 * time.Second
hostname = "localhost"
keyspaceName = "ks"
@@ -172,8 +172,7 @@ func TestMain(m *testing.M) {
}
clusterInstance.VtTabletExtraArgs = []string{
- "--enable-lag-throttler",
- "--throttle_threshold", "1s",
+ "--throttler-config-via-topo",
"--heartbeat_enable",
"--heartbeat_interval", "250ms",
"--heartbeat_on_demand_duration", "5s",
@@ -214,31 +213,32 @@ func TestMain(m *testing.M) {
if err != nil {
fmt.Printf("%v\n", err)
os.Exit(1)
- } else {
+ } else { // nolint:revive
os.Exit(exitcode)
}
}
// direct per-tablet throttler API instruction
-func throttleResponse(tablet *cluster.Vttablet, path string) (resp *http.Response, respBody string, err error) {
+func throttleResponse(tablet *cluster.Vttablet, path string) (respBody string, err error) {
apiURL := fmt.Sprintf("http://%s:%d/%s", tablet.VttabletProcess.TabletHostname, tablet.HTTPPort, path)
- resp, err = httpClient.Get(apiURL)
+ resp, err := httpClient.Get(apiURL)
if err != nil {
- return resp, respBody, err
+ return "", err
}
+ defer resp.Body.Close()
b, err := io.ReadAll(resp.Body)
respBody = string(b)
- return resp, respBody, err
+ return respBody, err
}
// direct per-tablet throttler API instruction
-func throttleApp(tablet *cluster.Vttablet, app string) (*http.Response, string, error) {
+func throttleApp(tablet *cluster.Vttablet, app string) (string, error) {
return throttleResponse(tablet, fmt.Sprintf("throttler/throttle-app?app=%s&duration=1h", app))
}
// direct per-tablet throttler API instruction
-func unthrottleApp(tablet *cluster.Vttablet, app string) (*http.Response, string, error) {
+func unthrottleApp(tablet *cluster.Vttablet, app string) (string, error) {
return throttleResponse(tablet, fmt.Sprintf("throttler/unthrottle-app?app=%s", app))
}
@@ -253,6 +253,24 @@ func TestSchemaChange(t *testing.T) {
providedUUID := ""
providedMigrationContext := ""
+
+ // We execute the throttler commands via vtgate, which in turn
+ // executes them via vttablet. So let's wait until vtgate's view
+ // is updated.
+ err := clusterInstance.WaitForTabletsToHealthyInVtgate()
+ require.NoError(t, err)
+
+ _, err = throttler.UpdateThrottlerTopoConfig(clusterInstance, true, false, 0, "", false)
+ require.NoError(t, err)
+
+ for _, ks := range clusterInstance.Keyspaces {
+ for _, shard := range ks.Shards {
+ for _, tablet := range shard.Vttablets {
+ throttler.WaitForThrottlerStatusEnabled(t, tablet, true, nil, extendedMigrationWait)
+ }
+ }
+ }
+
testWithInitialSchema(t)
t.Run("alter non_online", func(t *testing.T) {
_ = testOnlineDDLStatement(t, alterTableNormalStatement, string(schema.DDLStrategyDirect), providedUUID, providedMigrationContext, "vtctl", "non_online", "", false)
@@ -323,6 +341,7 @@ func TestSchemaChange(t *testing.T) {
insertRows(t, 2)
uuid := testOnlineDDLStatement(t, alterTableTrivialStatement, "vitess -postpone-completion", providedUUID, providedMigrationContext, "vtgate", "test_val", "", false)
// Should be still running!
+ _ = onlineddl.WaitForMigrationStatus(t, &vtParams, shards, uuid, extendedMigrationWait, schema.OnlineDDLStatusRunning)
onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusRunning)
// Issue a complete and wait for successful completion
onlineddl.CheckCompleteMigration(t, &vtParams, shards, uuid, true)
@@ -398,7 +417,7 @@ func TestSchemaChange(t *testing.T) {
// vstreamer source; but it's OK to be on the safe side and throttle on all tablets. Doesn't
// change the essence of this test.
for _, tablet := range shard.Vttablets {
- _, body, err := throttleApp(tablet, vstreamerThrottlerAppName)
+ body, err := throttleApp(tablet, vstreamerThrottlerAppName)
defer unthrottleApp(tablet, vstreamerThrottlerAppName)
assert.NoError(t, err)
@@ -487,7 +506,9 @@ func TestSchemaChange(t *testing.T) {
// reparent shard -80 to replica
// and then reparent it back to original state
// (two pretty much identical tests, the point is to end up with original state)
- for currentPrimaryTabletIndex, reparentTabletIndex := range []int{1, 0} {
+ for _, currentPrimaryTabletIndex := range []int{0, 1} {
+ currentPrimaryTablet := shards[0].Vttablets[currentPrimaryTabletIndex]
+ reparentTablet := shards[0].Vttablets[1-currentPrimaryTabletIndex]
t.Run(fmt.Sprintf("PlannedReparentShard via throttling %d/2", (currentPrimaryTabletIndex+1)), func(t *testing.T) {
insertRows(t, 2)
@@ -498,12 +519,12 @@ func TestSchemaChange(t *testing.T) {
case 0:
// this is the shard where we run PRS
// Use per-tablet throttling API
- _, body, err = throttleApp(shards[i].Vttablets[currentPrimaryTabletIndex], onlineDDLThrottlerAppName)
- defer unthrottleApp(shards[i].Vttablets[currentPrimaryTabletIndex], onlineDDLThrottlerAppName)
+ body, err = throttleApp(currentPrimaryTablet, onlineDDLThrottlerAppName)
+ defer unthrottleApp(currentPrimaryTablet, onlineDDLThrottlerAppName)
case 1:
// no PRS on this shard
// Use per-tablet throttling API
- _, body, err = throttleApp(shards[i].Vttablets[0], onlineDDLThrottlerAppName)
+ body, err = throttleApp(shards[i].Vttablets[0], onlineDDLThrottlerAppName)
defer unthrottleApp(shards[i].Vttablets[0], onlineDDLThrottlerAppName)
}
assert.NoError(t, err)
@@ -511,12 +532,19 @@ func TestSchemaChange(t *testing.T) {
}
uuid := testOnlineDDLStatement(t, alterTableTrivialStatement, "vitess", providedUUID, providedMigrationContext, "vtgate", "test_val", "", true)
- t.Run("wait for migration and vreplication to run", func(t *testing.T) {
+ t.Run("wait for migration to run", func(t *testing.T) {
_ = onlineddl.WaitForMigrationStatus(t, &vtParams, shards, uuid, normalMigrationWait, schema.OnlineDDLStatusRunning)
onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusRunning)
- time.Sleep(5 * time.Second) // wait for _vt.vreplication to be created
- vreplStatus := onlineddl.WaitForVReplicationStatus(t, &vtParams, shards, uuid, normalMigrationWait, "Copying")
+ })
+ t.Run("wait for vreplication to run on shard -80", func(t *testing.T) {
+ vreplStatus := onlineddl.WaitForVReplicationStatus(t, currentPrimaryTablet, uuid, normalMigrationWait, "Copying", "Running")
require.Contains(t, []string{"Copying", "Running"}, vreplStatus)
+ })
+ t.Run("wait for vreplication to run on shard 80-", func(t *testing.T) {
+ vreplStatus := onlineddl.WaitForVReplicationStatus(t, shards[1].Vttablets[0], uuid, normalMigrationWait, "Copying", "Running")
+ require.Contains(t, []string{"Copying", "Running"}, vreplStatus)
+ })
+ t.Run("check status again", func(t *testing.T) {
// again see that we're still 'running'
onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusRunning)
testRows(t)
@@ -533,7 +561,7 @@ func TestSchemaChange(t *testing.T) {
switch shard {
case "-80":
- require.Equal(t, shards[0].Vttablets[currentPrimaryTabletIndex].Alias, tablet)
+ require.Equal(t, currentPrimaryTablet.Alias, tablet)
case "80-":
require.Equal(t, shards[1].Vttablets[0].Alias, tablet)
default:
@@ -543,11 +571,12 @@ func TestSchemaChange(t *testing.T) {
})
t.Run("PRS shard -80", func(t *testing.T) {
// migration has started and is throttled. We now run PRS
- err := clusterInstance.VtctlclientProcess.ExecuteCommand("PlannedReparentShard", "--", "--keyspace_shard", keyspaceName+"/-80", "--new_primary", shards[0].Vttablets[reparentTabletIndex].Alias)
+ err := clusterInstance.VtctlclientProcess.ExecuteCommand("PlannedReparentShard", "--", "--keyspace_shard", keyspaceName+"/-80", "--new_primary", reparentTablet.Alias)
require.NoError(t, err, "failed PRS: %v", err)
+ rs := onlineddl.VtgateExecQuery(t, &vtParams, "show vitess_tablets", "")
+ onlineddl.PrintQueryResult(os.Stdout, rs)
})
-
- t.Run("unthrottle and expect completion", func(t *testing.T) {
+ t.Run("unthrottle", func(t *testing.T) {
for i := range shards {
var body string
var err error
@@ -555,16 +584,17 @@ func TestSchemaChange(t *testing.T) {
case 0:
// this is the shard where we run PRS
// Use per-tablet throttling API
- _, body, err = unthrottleApp(shards[i].Vttablets[currentPrimaryTabletIndex], onlineDDLThrottlerAppName)
+ body, err = unthrottleApp(currentPrimaryTablet, onlineDDLThrottlerAppName)
case 1:
// no PRS on this shard
// Use per-tablet throttling API
- _, body, err = unthrottleApp(shards[i].Vttablets[0], onlineDDLThrottlerAppName)
+ body, err = unthrottleApp(shards[i].Vttablets[0], onlineDDLThrottlerAppName)
}
assert.NoError(t, err)
assert.Contains(t, body, onlineDDLThrottlerAppName)
}
-
+ })
+ t.Run("expect completion", func(t *testing.T) {
_ = onlineddl.WaitForMigrationStatus(t, &vtParams, shards, uuid, extendedMigrationWait, schema.OnlineDDLStatusComplete, schema.OnlineDDLStatusFailed)
onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete)
})
@@ -582,7 +612,7 @@ func TestSchemaChange(t *testing.T) {
switch shard {
case "-80":
// PRS for this tablet, we promoted tablet[1]
- require.Equal(t, shards[0].Vttablets[reparentTabletIndex].Alias, tablet)
+ require.Equal(t, reparentTablet.Alias, tablet)
case "80-":
// No PRS for this tablet
require.Equal(t, shards[1].Vttablets[0].Alias, tablet)
@@ -597,6 +627,105 @@ func TestSchemaChange(t *testing.T) {
})
})
}
+
+ // reparent shard -80 to replica
+ // and then reparent it back to original state
+ // (two pretty much identical tests, the point is to end up with original state)
+ for _, currentPrimaryTabletIndex := range []int{0, 1} {
+ currentPrimaryTablet := shards[0].Vttablets[currentPrimaryTabletIndex]
+ reparentTablet := shards[0].Vttablets[1-currentPrimaryTabletIndex]
+
+ t.Run(fmt.Sprintf("PlannedReparentShard via postponed %d/2", (currentPrimaryTabletIndex+1)), func(t *testing.T) {
+
+ insertRows(t, 2)
+
+ uuid := testOnlineDDLStatement(t, alterTableTrivialStatement, "vitess --postpone-completion", providedUUID, providedMigrationContext, "vtgate", "test_val", "", true)
+
+ t.Run("wait for migration to run", func(t *testing.T) {
+ _ = onlineddl.WaitForMigrationStatus(t, &vtParams, shards, uuid, normalMigrationWait, schema.OnlineDDLStatusRunning)
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusRunning)
+ })
+ t.Run("wait for vreplication to run on shard -80", func(t *testing.T) {
+ vreplStatus := onlineddl.WaitForVReplicationStatus(t, currentPrimaryTablet, uuid, normalMigrationWait, "Copying", "Running")
+ require.Contains(t, []string{"Copying", "Running"}, vreplStatus)
+ })
+ t.Run("wait for vreplication to run on shard 80-", func(t *testing.T) {
+ vreplStatus := onlineddl.WaitForVReplicationStatus(t, shards[1].Vttablets[0], uuid, normalMigrationWait, "Copying", "Running")
+ require.Contains(t, []string{"Copying", "Running"}, vreplStatus)
+ })
+ t.Run("check status again", func(t *testing.T) {
+ // again see that we're still 'running'
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusRunning)
+ testRows(t)
+ })
+
+ t.Run("Check tablet", func(t *testing.T) {
+ // onlineddl.Executor marks this migration with its tablet alias
+ // reminder that onlineddl.Executor runs on the primary tablet.
+ rs := onlineddl.ReadMigrations(t, &vtParams, uuid)
+ require.NotNil(t, rs)
+ for _, row := range rs.Named().Rows {
+ shard := row["shard"].ToString()
+ tablet := row["tablet"].ToString()
+
+ switch shard {
+ case "-80":
+ require.Equal(t, currentPrimaryTablet.Alias, tablet)
+ case "80-":
+ require.Equal(t, shards[1].Vttablets[0].Alias, tablet)
+ default:
+ require.NoError(t, fmt.Errorf("unexpected shard name: %s", shard))
+ }
+ }
+ })
+ t.Run("PRS shard -80", func(t *testing.T) {
+ // migration has started and completion is postponed. We now PRS
+ err := clusterInstance.VtctlclientProcess.ExecuteCommand("PlannedReparentShard", "--", "--keyspace_shard", keyspaceName+"/-80", "--new_primary", reparentTablet.Alias)
+ require.NoError(t, err, "failed PRS: %v", err)
+ rs := onlineddl.VtgateExecQuery(t, &vtParams, "show vitess_tablets", "")
+ onlineddl.PrintQueryResult(os.Stdout, rs)
+ })
+ t.Run("complete and expect completion", func(t *testing.T) {
+ query := fmt.Sprintf("select * from _vt.vreplication where workflow ='%s'", uuid)
+ rs, err := reparentTablet.VttabletProcess.QueryTablet(query, "", true)
+ assert.NoError(t, err)
+ onlineddl.PrintQueryResult(os.Stdout, rs)
+
+ onlineddl.CheckCompleteAllMigrations(t, &vtParams, len(shards))
+
+ _ = onlineddl.WaitForMigrationStatus(t, &vtParams, shards, uuid, extendedMigrationWait, schema.OnlineDDLStatusComplete, schema.OnlineDDLStatusFailed)
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete)
+ })
+
+ t.Run("Check tablet post PRS", func(t *testing.T) {
+ // onlineddl.Executor will find that a vrepl migration started in a different tablet.
+ // it will own the tablet and will update 'tablet' column in _vt.schema_migrations with its own
+ // (promoted primary) tablet alias.
+ rs := onlineddl.ReadMigrations(t, &vtParams, uuid)
+ require.NotNil(t, rs)
+ for _, row := range rs.Named().Rows {
+ shard := row["shard"].ToString()
+ tablet := row["tablet"].ToString()
+
+ switch shard {
+ case "-80":
+ // PRS for this tablet
+ require.Equal(t, reparentTablet.Alias, tablet)
+ case "80-":
+ // No PRS for this tablet
+ require.Equal(t, shards[1].Vttablets[0].Alias, tablet)
+ default:
+ require.NoError(t, fmt.Errorf("unexpected shard name: %s", shard))
+ }
+ }
+
+ onlineddl.CheckRetryPartialMigration(t, &vtParams, uuid, 1)
+ // Now it should complete on the failed shard
+ _ = onlineddl.WaitForMigrationStatus(t, &vtParams, shards, uuid, extendedMigrationWait, schema.OnlineDDLStatusComplete)
+ })
+ })
+ }
+
t.Run("Online DROP, vtctl", func(t *testing.T) {
uuid := testOnlineDDLStatement(t, onlineDDLDropTableStatement, "online", providedUUID, providedMigrationContext, "vtctl", "", "", false)
t.Run("test ready to complete", func(t *testing.T) {
@@ -627,14 +756,14 @@ func TestSchemaChange(t *testing.T) {
})
t.Run("Online CREATE, vtctl, extra flags", func(t *testing.T) {
// the flags are meaningless to this migration. The test just validates that they don't get in the way.
- uuid := testOnlineDDLStatement(t, onlineDDLCreateTableStatement, "vitess --fast-over-revertible --allow-zero-in-date", providedUUID, providedMigrationContext, "vtctl", "online_ddl_create_col", "", false)
+ uuid := testOnlineDDLStatement(t, onlineDDLCreateTableStatement, "vitess --prefer-instant-ddl --allow-zero-in-date", providedUUID, providedMigrationContext, "vtctl", "online_ddl_create_col", "", false)
onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete)
onlineddl.CheckCancelMigration(t, &vtParams, shards, uuid, false)
onlineddl.CheckRetryMigration(t, &vtParams, shards, uuid, false)
})
t.Run("Online DROP TABLE IF EXISTS, vtgate, extra flags", func(t *testing.T) {
// the flags are meaningless to this migration. The test just validates that they don't get in the way.
- uuid := testOnlineDDLStatement(t, onlineDDLDropTableIfExistsStatement, "vitess --fast-over-revertible --allow-zero-in-date", providedUUID, providedMigrationContext, "vtgate", "", "", false)
+ uuid := testOnlineDDLStatement(t, onlineDDLDropTableIfExistsStatement, "vitess --prefer-instant-ddl --allow-zero-in-date", providedUUID, providedMigrationContext, "vtgate", "", "", false)
onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete)
onlineddl.CheckCancelMigration(t, &vtParams, shards, uuid, false)
onlineddl.CheckRetryMigration(t, &vtParams, shards, uuid, false)
@@ -684,7 +813,7 @@ func TestSchemaChange(t *testing.T) {
// shard 0 will run normally, shard 1 will be throttled
defer unthrottleApp(shards[1].Vttablets[0], onlineDDLThrottlerAppName)
t.Run("throttle shard 1", func(t *testing.T) {
- _, body, err := throttleApp(shards[1].Vttablets[0], onlineDDLThrottlerAppName)
+ body, err := throttleApp(shards[1].Vttablets[0], onlineDDLThrottlerAppName)
assert.NoError(t, err)
assert.Contains(t, body, onlineDDLThrottlerAppName)
})
@@ -708,7 +837,7 @@ func TestSchemaChange(t *testing.T) {
onlineddl.CheckCancelAllMigrations(t, &vtParams, 1)
})
t.Run("unthrottle shard 1", func(t *testing.T) {
- _, body, err := unthrottleApp(shards[1].Vttablets[0], onlineDDLThrottlerAppName)
+ body, err := unthrottleApp(shards[1].Vttablets[0], onlineDDLThrottlerAppName)
assert.NoError(t, err)
assert.Contains(t, body, onlineDDLThrottlerAppName)
})
@@ -756,6 +885,12 @@ func TestSchemaChange(t *testing.T) {
}
})
})
+ t.Run("summary: validate sequential migration IDs", func(t *testing.T) {
+ onlineddl.ValidateSequentialMigrationIDs(t, &vtParams, shards)
+ })
+ t.Run("summary: validate completed_timestamp", func(t *testing.T) {
+ onlineddl.ValidateCompletedTimestamp(t, &vtParams)
+ })
}
func insertRow(t *testing.T) {
diff --git a/go/test/endtoend/onlineddl/vrepl_stress/onlineddl_vrepl_mini_stress_test.go b/go/test/endtoend/onlineddl/vrepl_stress/onlineddl_vrepl_mini_stress_test.go
index 0531af319b4..7201fa70652 100644
--- a/go/test/endtoend/onlineddl/vrepl_stress/onlineddl_vrepl_mini_stress_test.go
+++ b/go/test/endtoend/onlineddl/vrepl_stress/onlineddl_vrepl_mini_stress_test.go
@@ -140,6 +140,7 @@ const (
maxConcurrency = 20
singleConnectionSleepInterval = 2 * time.Millisecond
countIterations = 5
+ migrationWaitTimeout = 60 * time.Second
)
func resetOpOrder() {
@@ -309,6 +310,10 @@ func TestSchemaChange(t *testing.T) {
})
})
}
+
+ t.Run("summary: validate sequential migration IDs", func(t *testing.T) {
+ onlineddl.ValidateSequentialMigrationIDs(t, &vtParams, shards)
+ })
}
func testWithInitialSchema(t *testing.T) {
@@ -344,7 +349,7 @@ func testOnlineDDLStatement(t *testing.T, alterStatement string, ddlStrategy str
assert.NoError(t, err)
if !strategySetting.Strategy.IsDirect() {
- status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards, uuid, 30*time.Second, schema.OnlineDDLStatusComplete, schema.OnlineDDLStatusFailed)
+ status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards, uuid, migrationWaitTimeout, schema.OnlineDDLStatusComplete, schema.OnlineDDLStatusFailed)
fmt.Printf("# Migration status (for debug purposes): <%s>\n", status)
}
diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/different-pk-int-to-text/order_by b/go/test/endtoend/onlineddl/vrepl_suite/testdata/different-pk-int-to-text/order_by
new file mode 100644
index 00000000000..074d1eeb404
--- /dev/null
+++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/different-pk-int-to-text/order_by
@@ -0,0 +1 @@
+id
diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-fk-parent/expect_failure b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-fk-parent/expect_failure
index 53b8e8c8c32..d1b1e7e0525 100644
--- a/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-fk-parent/expect_failure
+++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-fk-parent/expect_failure
@@ -1 +1 @@
-foreign key constraints are not supported in online DDL
+FOREIGN KEY constraints are not supported
diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/int-to-enum/alter b/go/test/endtoend/onlineddl/vrepl_suite/testdata/int-to-enum/alter
new file mode 100644
index 00000000000..71c96c5d04e
--- /dev/null
+++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/int-to-enum/alter
@@ -0,0 +1 @@
+change i i enum('0', '1')
diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/int-to-enum/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/int-to-enum/create.sql
new file mode 100644
index 00000000000..0dff0c9c2e0
--- /dev/null
+++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/int-to-enum/create.sql
@@ -0,0 +1,26 @@
+drop table if exists onlineddl_test;
+create table onlineddl_test (
+ id int auto_increment,
+ i int not null,
+ primary key(id)
+) auto_increment=1;
+
+insert into onlineddl_test values (null, 0);
+insert into onlineddl_test values (null, 1);
+
+drop event if exists onlineddl_test;
+delimiter ;;
+create event onlineddl_test
+ on schedule every 1 second
+ starts current_timestamp
+ ends current_timestamp + interval 60 second
+ on completion not preserve
+ enable
+ do
+begin
+ insert into onlineddl_test values (null, 0);
+ insert into onlineddl_test values (null, 1);
+ insert into onlineddl_test values (null, 1);
+ set @last_insert_id := last_insert_id();
+ update onlineddl_test set i='0' where id = @last_insert_id;
+end ;;
diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/many-columns-add-one/after_columns b/go/test/endtoend/onlineddl/vrepl_suite/testdata/many-columns-add-one/after_columns
new file mode 100644
index 00000000000..7d84f38dcc1
--- /dev/null
+++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/many-columns-add-one/after_columns
@@ -0,0 +1 @@
+id, i000, i001, i002, i003, i004, i005, i006, i007, i008, i009, i010, i011, i012, i013, i014, i015, i016, i017, i018, i019, i020, i021, i022, i023, i024, i025, i026, i027, i028, i029, i030, i031, i032, i033, i034, i035, i036, i037, i038, i039, i040, i041, i042, i043, i044, i045, i046, i047, i048, i049, i050, i051, i052, i053, i054, i055, i056, i057, i058, i059, i060, i061, i062, i063, i064, i065, i066, i067, i068, i069, i070, i071, i072, i073, i074, i075, i076, i077, i078, i079, i080, i081, i082, i083, i084, i085, i086, i087, i088, i089, i090, i091, i092, i093, i094, i095, i096, i097, i098, i099, i100, i101, i102, i103, i104, i105, i106, i107, i108, i109, i110, i111, i112, i113, i114, i115, i116, i117, i118, i119, i120, i121, i122, i123, i124, i125, i126, i127, i128, i129, i130, i131, i132, i133, i134, i135, i136, i137, i138, i139, i140, i141, i142, i143, i144, i145, i146, i147, i148, i149, i150, i151, i152, i153, i154, i155, i156, i157, i158, i159, i160, i161, i162, i163, i164, i165, i166, i167, i168, i169, i170, i171, i172, i173, i174, i175, i176, i177, i178, i179, i180, i181, i182, i183, i184, i185, i186, i187, i188, i189, i190, i191, i192, i193, i194, i195, i196, i197, i198, i199, i200, i201, i202, i203, i204, i205, i206, i207, i208, i209, i210, i211, i212, i213, i214, i215, i216, i217, i218, i219, i220, i221, i222, i223, i224, i225, i226, i227, i228, i229, i230, i231, i232, i233, i234, i235, i236, i237, i238, i239, i240, i241, i242, i243, i244, i245, i246, i247, i248, i249, i250, i251, i252, i253, i254, i255, i256, i257
diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/many-columns-add-one/alter b/go/test/endtoend/onlineddl/vrepl_suite/testdata/many-columns-add-one/alter
new file mode 100644
index 00000000000..b7581200408
--- /dev/null
+++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/many-columns-add-one/alter
@@ -0,0 +1 @@
+add column i258 int not null default 0
diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/many-columns-add-one/before_columns b/go/test/endtoend/onlineddl/vrepl_suite/testdata/many-columns-add-one/before_columns
new file mode 100644
index 00000000000..7d84f38dcc1
--- /dev/null
+++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/many-columns-add-one/before_columns
@@ -0,0 +1 @@
+id, i000, i001, i002, i003, i004, i005, i006, i007, i008, i009, i010, i011, i012, i013, i014, i015, i016, i017, i018, i019, i020, i021, i022, i023, i024, i025, i026, i027, i028, i029, i030, i031, i032, i033, i034, i035, i036, i037, i038, i039, i040, i041, i042, i043, i044, i045, i046, i047, i048, i049, i050, i051, i052, i053, i054, i055, i056, i057, i058, i059, i060, i061, i062, i063, i064, i065, i066, i067, i068, i069, i070, i071, i072, i073, i074, i075, i076, i077, i078, i079, i080, i081, i082, i083, i084, i085, i086, i087, i088, i089, i090, i091, i092, i093, i094, i095, i096, i097, i098, i099, i100, i101, i102, i103, i104, i105, i106, i107, i108, i109, i110, i111, i112, i113, i114, i115, i116, i117, i118, i119, i120, i121, i122, i123, i124, i125, i126, i127, i128, i129, i130, i131, i132, i133, i134, i135, i136, i137, i138, i139, i140, i141, i142, i143, i144, i145, i146, i147, i148, i149, i150, i151, i152, i153, i154, i155, i156, i157, i158, i159, i160, i161, i162, i163, i164, i165, i166, i167, i168, i169, i170, i171, i172, i173, i174, i175, i176, i177, i178, i179, i180, i181, i182, i183, i184, i185, i186, i187, i188, i189, i190, i191, i192, i193, i194, i195, i196, i197, i198, i199, i200, i201, i202, i203, i204, i205, i206, i207, i208, i209, i210, i211, i212, i213, i214, i215, i216, i217, i218, i219, i220, i221, i222, i223, i224, i225, i226, i227, i228, i229, i230, i231, i232, i233, i234, i235, i236, i237, i238, i239, i240, i241, i242, i243, i244, i245, i246, i247, i248, i249, i250, i251, i252, i253, i254, i255, i256, i257
diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/many-columns-add-one/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/many-columns-add-one/create.sql
new file mode 100644
index 00000000000..33700907112
--- /dev/null
+++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/many-columns-add-one/create.sql
@@ -0,0 +1,283 @@
+drop table if exists onlineddl_test;
+create table onlineddl_test (
+ id int auto_increment,
+ i000 int not null default 0,
+ i001 int not null default 0,
+ i002 int not null default 0,
+ i003 int not null default 0,
+ i004 int not null default 0,
+ i005 int not null default 0,
+ i006 int not null default 0,
+ i007 int not null default 0,
+ i008 int not null default 0,
+ i009 int not null default 0,
+ i010 int not null default 0,
+ i011 int not null default 0,
+ i012 int not null default 0,
+ i013 int not null default 0,
+ i014 int not null default 0,
+ i015 int not null default 0,
+ i016 int not null default 0,
+ i017 int not null default 0,
+ i018 int not null default 0,
+ i019 int not null default 0,
+ i020 int not null default 0,
+ i021 int not null default 0,
+ i022 int not null default 0,
+ i023 int not null default 0,
+ i024 int not null default 0,
+ i025 int not null default 0,
+ i026 int not null default 0,
+ i027 int not null default 0,
+ i028 int not null default 0,
+ i029 int not null default 0,
+ i030 int not null default 0,
+ i031 int not null default 0,
+ i032 int not null default 0,
+ i033 int not null default 0,
+ i034 int not null default 0,
+ i035 int not null default 0,
+ i036 int not null default 0,
+ i037 int not null default 0,
+ i038 int not null default 0,
+ i039 int not null default 0,
+ i040 int not null default 0,
+ i041 int not null default 0,
+ i042 int not null default 0,
+ i043 int not null default 0,
+ i044 int not null default 0,
+ i045 int not null default 0,
+ i046 int not null default 0,
+ i047 int not null default 0,
+ i048 int not null default 0,
+ i049 int not null default 0,
+ i050 int not null default 0,
+ i051 int not null default 0,
+ i052 int not null default 0,
+ i053 int not null default 0,
+ i054 int not null default 0,
+ i055 int not null default 0,
+ i056 int not null default 0,
+ i057 int not null default 0,
+ i058 int not null default 0,
+ i059 int not null default 0,
+ i060 int not null default 0,
+ i061 int not null default 0,
+ i062 int not null default 0,
+ i063 int not null default 0,
+ i064 int not null default 0,
+ i065 int not null default 0,
+ i066 int not null default 0,
+ i067 int not null default 0,
+ i068 int not null default 0,
+ i069 int not null default 0,
+ i070 int not null default 0,
+ i071 int not null default 0,
+ i072 int not null default 0,
+ i073 int not null default 0,
+ i074 int not null default 0,
+ i075 int not null default 0,
+ i076 int not null default 0,
+ i077 int not null default 0,
+ i078 int not null default 0,
+ i079 int not null default 0,
+ i080 int not null default 0,
+ i081 int not null default 0,
+ i082 int not null default 0,
+ i083 int not null default 0,
+ i084 int not null default 0,
+ i085 int not null default 0,
+ i086 int not null default 0,
+ i087 int not null default 0,
+ i088 int not null default 0,
+ i089 int not null default 0,
+ i090 int not null default 0,
+ i091 int not null default 0,
+ i092 int not null default 0,
+ i093 int not null default 0,
+ i094 int not null default 0,
+ i095 int not null default 0,
+ i096 int not null default 0,
+ i097 int not null default 0,
+ i098 int not null default 0,
+ i099 int not null default 0,
+ i100 int not null default 0,
+ i101 int not null default 0,
+ i102 int not null default 0,
+ i103 int not null default 0,
+ i104 int not null default 0,
+ i105 int not null default 0,
+ i106 int not null default 0,
+ i107 int not null default 0,
+ i108 int not null default 0,
+ i109 int not null default 0,
+ i110 int not null default 0,
+ i111 int not null default 0,
+ i112 int not null default 0,
+ i113 int not null default 0,
+ i114 int not null default 0,
+ i115 int not null default 0,
+ i116 int not null default 0,
+ i117 int not null default 0,
+ i118 int not null default 0,
+ i119 int not null default 0,
+ i120 int not null default 0,
+ i121 int not null default 0,
+ i122 int not null default 0,
+ i123 int not null default 0,
+ i124 int not null default 0,
+ i125 int not null default 0,
+ i126 int not null default 0,
+ i127 int not null default 0,
+ i128 int not null default 0,
+ i129 int not null default 0,
+ i130 int not null default 0,
+ i131 int not null default 0,
+ i132 int not null default 0,
+ i133 int not null default 0,
+ i134 int not null default 0,
+ i135 int not null default 0,
+ i136 int not null default 0,
+ i137 int not null default 0,
+ i138 int not null default 0,
+ i139 int not null default 0,
+ i140 int not null default 0,
+ i141 int not null default 0,
+ i142 int not null default 0,
+ i143 int not null default 0,
+ i144 int not null default 0,
+ i145 int not null default 0,
+ i146 int not null default 0,
+ i147 int not null default 0,
+ i148 int not null default 0,
+ i149 int not null default 0,
+ i150 int not null default 0,
+ i151 int not null default 0,
+ i152 int not null default 0,
+ i153 int not null default 0,
+ i154 int not null default 0,
+ i155 int not null default 0,
+ i156 int not null default 0,
+ i157 int not null default 0,
+ i158 int not null default 0,
+ i159 int not null default 0,
+ i160 int not null default 0,
+ i161 int not null default 0,
+ i162 int not null default 0,
+ i163 int not null default 0,
+ i164 int not null default 0,
+ i165 int not null default 0,
+ i166 int not null default 0,
+ i167 int not null default 0,
+ i168 int not null default 0,
+ i169 int not null default 0,
+ i170 int not null default 0,
+ i171 int not null default 0,
+ i172 int not null default 0,
+ i173 int not null default 0,
+ i174 int not null default 0,
+ i175 int not null default 0,
+ i176 int not null default 0,
+ i177 int not null default 0,
+ i178 int not null default 0,
+ i179 int not null default 0,
+ i180 int not null default 0,
+ i181 int not null default 0,
+ i182 int not null default 0,
+ i183 int not null default 0,
+ i184 int not null default 0,
+ i185 int not null default 0,
+ i186 int not null default 0,
+ i187 int not null default 0,
+ i188 int not null default 0,
+ i189 int not null default 0,
+ i190 int not null default 0,
+ i191 int not null default 0,
+ i192 int not null default 0,
+ i193 int not null default 0,
+ i194 int not null default 0,
+ i195 int not null default 0,
+ i196 int not null default 0,
+ i197 int not null default 0,
+ i198 int not null default 0,
+ i199 int not null default 0,
+ i200 int not null default 0,
+ i201 int not null default 0,
+ i202 int not null default 0,
+ i203 int not null default 0,
+ i204 int not null default 0,
+ i205 int not null default 0,
+ i206 int not null default 0,
+ i207 int not null default 0,
+ i208 int not null default 0,
+ i209 int not null default 0,
+ i210 int not null default 0,
+ i211 int not null default 0,
+ i212 int not null default 0,
+ i213 int not null default 0,
+ i214 int not null default 0,
+ i215 int not null default 0,
+ i216 int not null default 0,
+ i217 int not null default 0,
+ i218 int not null default 0,
+ i219 int not null default 0,
+ i220 int not null default 0,
+ i221 int not null default 0,
+ i222 int not null default 0,
+ i223 int not null default 0,
+ i224 int not null default 0,
+ i225 int not null default 0,
+ i226 int not null default 0,
+ i227 int not null default 0,
+ i228 int not null default 0,
+ i229 int not null default 0,
+ i230 int not null default 0,
+ i231 int not null default 0,
+ i232 int not null default 0,
+ i233 int not null default 0,
+ i234 int not null default 0,
+ i235 int not null default 0,
+ i236 int not null default 0,
+ i237 int not null default 0,
+ i238 int not null default 0,
+ i239 int not null default 0,
+ i240 int not null default 0,
+ i241 int not null default 0,
+ i242 int not null default 0,
+ i243 int not null default 0,
+ i244 int not null default 0,
+ i245 int not null default 0,
+ i246 int not null default 0,
+ i247 int not null default 0,
+ i248 int not null default 0,
+ i249 int not null default 0,
+ i250 int not null default 0,
+ i251 int not null default 0,
+ i252 int not null default 0,
+ i253 int not null default 0,
+ i254 int not null default 0,
+ i255 int not null default 0,
+ i256 int not null default 0,
+ i257 int not null default 0,
+ primary key(id)
+) auto_increment=1;
+
+insert into onlineddl_test (id) values (null);
+update onlineddl_test set i003=id order by id desc limit 1;
+update onlineddl_test set i257=id order by id desc limit 1;
+
+drop event if exists onlineddl_test;
+delimiter ;;
+create event onlineddl_test
+ on schedule every 1 second
+ starts current_timestamp
+ ends current_timestamp + interval 60 second
+ on completion not preserve
+ enable
+ do
+begin
+ insert into onlineddl_test (id) values (null);
+ update onlineddl_test set i003=id order by id desc limit 1;
+ update onlineddl_test set i257=id order by id desc limit 1;
+end ;;
+
diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/many-columns/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/many-columns/create.sql
new file mode 100644
index 00000000000..33700907112
--- /dev/null
+++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/many-columns/create.sql
@@ -0,0 +1,283 @@
+drop table if exists onlineddl_test;
+create table onlineddl_test (
+ id int auto_increment,
+ i000 int not null default 0,
+ i001 int not null default 0,
+ i002 int not null default 0,
+ i003 int not null default 0,
+ i004 int not null default 0,
+ i005 int not null default 0,
+ i006 int not null default 0,
+ i007 int not null default 0,
+ i008 int not null default 0,
+ i009 int not null default 0,
+ i010 int not null default 0,
+ i011 int not null default 0,
+ i012 int not null default 0,
+ i013 int not null default 0,
+ i014 int not null default 0,
+ i015 int not null default 0,
+ i016 int not null default 0,
+ i017 int not null default 0,
+ i018 int not null default 0,
+ i019 int not null default 0,
+ i020 int not null default 0,
+ i021 int not null default 0,
+ i022 int not null default 0,
+ i023 int not null default 0,
+ i024 int not null default 0,
+ i025 int not null default 0,
+ i026 int not null default 0,
+ i027 int not null default 0,
+ i028 int not null default 0,
+ i029 int not null default 0,
+ i030 int not null default 0,
+ i031 int not null default 0,
+ i032 int not null default 0,
+ i033 int not null default 0,
+ i034 int not null default 0,
+ i035 int not null default 0,
+ i036 int not null default 0,
+ i037 int not null default 0,
+ i038 int not null default 0,
+ i039 int not null default 0,
+ i040 int not null default 0,
+ i041 int not null default 0,
+ i042 int not null default 0,
+ i043 int not null default 0,
+ i044 int not null default 0,
+ i045 int not null default 0,
+ i046 int not null default 0,
+ i047 int not null default 0,
+ i048 int not null default 0,
+ i049 int not null default 0,
+ i050 int not null default 0,
+ i051 int not null default 0,
+ i052 int not null default 0,
+ i053 int not null default 0,
+ i054 int not null default 0,
+ i055 int not null default 0,
+ i056 int not null default 0,
+ i057 int not null default 0,
+ i058 int not null default 0,
+ i059 int not null default 0,
+ i060 int not null default 0,
+ i061 int not null default 0,
+ i062 int not null default 0,
+ i063 int not null default 0,
+ i064 int not null default 0,
+ i065 int not null default 0,
+ i066 int not null default 0,
+ i067 int not null default 0,
+ i068 int not null default 0,
+ i069 int not null default 0,
+ i070 int not null default 0,
+ i071 int not null default 0,
+ i072 int not null default 0,
+ i073 int not null default 0,
+ i074 int not null default 0,
+ i075 int not null default 0,
+ i076 int not null default 0,
+ i077 int not null default 0,
+ i078 int not null default 0,
+ i079 int not null default 0,
+ i080 int not null default 0,
+ i081 int not null default 0,
+ i082 int not null default 0,
+ i083 int not null default 0,
+ i084 int not null default 0,
+ i085 int not null default 0,
+ i086 int not null default 0,
+ i087 int not null default 0,
+ i088 int not null default 0,
+ i089 int not null default 0,
+ i090 int not null default 0,
+ i091 int not null default 0,
+ i092 int not null default 0,
+ i093 int not null default 0,
+ i094 int not null default 0,
+ i095 int not null default 0,
+ i096 int not null default 0,
+ i097 int not null default 0,
+ i098 int not null default 0,
+ i099 int not null default 0,
+ i100 int not null default 0,
+ i101 int not null default 0,
+ i102 int not null default 0,
+ i103 int not null default 0,
+ i104 int not null default 0,
+ i105 int not null default 0,
+ i106 int not null default 0,
+ i107 int not null default 0,
+ i108 int not null default 0,
+ i109 int not null default 0,
+ i110 int not null default 0,
+ i111 int not null default 0,
+ i112 int not null default 0,
+ i113 int not null default 0,
+ i114 int not null default 0,
+ i115 int not null default 0,
+ i116 int not null default 0,
+ i117 int not null default 0,
+ i118 int not null default 0,
+ i119 int not null default 0,
+ i120 int not null default 0,
+ i121 int not null default 0,
+ i122 int not null default 0,
+ i123 int not null default 0,
+ i124 int not null default 0,
+ i125 int not null default 0,
+ i126 int not null default 0,
+ i127 int not null default 0,
+ i128 int not null default 0,
+ i129 int not null default 0,
+ i130 int not null default 0,
+ i131 int not null default 0,
+ i132 int not null default 0,
+ i133 int not null default 0,
+ i134 int not null default 0,
+ i135 int not null default 0,
+ i136 int not null default 0,
+ i137 int not null default 0,
+ i138 int not null default 0,
+ i139 int not null default 0,
+ i140 int not null default 0,
+ i141 int not null default 0,
+ i142 int not null default 0,
+ i143 int not null default 0,
+ i144 int not null default 0,
+ i145 int not null default 0,
+ i146 int not null default 0,
+ i147 int not null default 0,
+ i148 int not null default 0,
+ i149 int not null default 0,
+ i150 int not null default 0,
+ i151 int not null default 0,
+ i152 int not null default 0,
+ i153 int not null default 0,
+ i154 int not null default 0,
+ i155 int not null default 0,
+ i156 int not null default 0,
+ i157 int not null default 0,
+ i158 int not null default 0,
+ i159 int not null default 0,
+ i160 int not null default 0,
+ i161 int not null default 0,
+ i162 int not null default 0,
+ i163 int not null default 0,
+ i164 int not null default 0,
+ i165 int not null default 0,
+ i166 int not null default 0,
+ i167 int not null default 0,
+ i168 int not null default 0,
+ i169 int not null default 0,
+ i170 int not null default 0,
+ i171 int not null default 0,
+ i172 int not null default 0,
+ i173 int not null default 0,
+ i174 int not null default 0,
+ i175 int not null default 0,
+ i176 int not null default 0,
+ i177 int not null default 0,
+ i178 int not null default 0,
+ i179 int not null default 0,
+ i180 int not null default 0,
+ i181 int not null default 0,
+ i182 int not null default 0,
+ i183 int not null default 0,
+ i184 int not null default 0,
+ i185 int not null default 0,
+ i186 int not null default 0,
+ i187 int not null default 0,
+ i188 int not null default 0,
+ i189 int not null default 0,
+ i190 int not null default 0,
+ i191 int not null default 0,
+ i192 int not null default 0,
+ i193 int not null default 0,
+ i194 int not null default 0,
+ i195 int not null default 0,
+ i196 int not null default 0,
+ i197 int not null default 0,
+ i198 int not null default 0,
+ i199 int not null default 0,
+ i200 int not null default 0,
+ i201 int not null default 0,
+ i202 int not null default 0,
+ i203 int not null default 0,
+ i204 int not null default 0,
+ i205 int not null default 0,
+ i206 int not null default 0,
+ i207 int not null default 0,
+ i208 int not null default 0,
+ i209 int not null default 0,
+ i210 int not null default 0,
+ i211 int not null default 0,
+ i212 int not null default 0,
+ i213 int not null default 0,
+ i214 int not null default 0,
+ i215 int not null default 0,
+ i216 int not null default 0,
+ i217 int not null default 0,
+ i218 int not null default 0,
+ i219 int not null default 0,
+ i220 int not null default 0,
+ i221 int not null default 0,
+ i222 int not null default 0,
+ i223 int not null default 0,
+ i224 int not null default 0,
+ i225 int not null default 0,
+ i226 int not null default 0,
+ i227 int not null default 0,
+ i228 int not null default 0,
+ i229 int not null default 0,
+ i230 int not null default 0,
+ i231 int not null default 0,
+ i232 int not null default 0,
+ i233 int not null default 0,
+ i234 int not null default 0,
+ i235 int not null default 0,
+ i236 int not null default 0,
+ i237 int not null default 0,
+ i238 int not null default 0,
+ i239 int not null default 0,
+ i240 int not null default 0,
+ i241 int not null default 0,
+ i242 int not null default 0,
+ i243 int not null default 0,
+ i244 int not null default 0,
+ i245 int not null default 0,
+ i246 int not null default 0,
+ i247 int not null default 0,
+ i248 int not null default 0,
+ i249 int not null default 0,
+ i250 int not null default 0,
+ i251 int not null default 0,
+ i252 int not null default 0,
+ i253 int not null default 0,
+ i254 int not null default 0,
+ i255 int not null default 0,
+ i256 int not null default 0,
+ i257 int not null default 0,
+ primary key(id)
+) auto_increment=1;
+
+insert into onlineddl_test (id) values (null);
+update onlineddl_test set i003=id order by id desc limit 1;
+update onlineddl_test set i257=id order by id desc limit 1;
+
+drop event if exists onlineddl_test;
+delimiter ;;
+create event onlineddl_test
+ on schedule every 1 second
+ starts current_timestamp
+ ends current_timestamp + interval 60 second
+ on completion not preserve
+ enable
+ do
+begin
+ insert into onlineddl_test (id) values (null);
+ update onlineddl_test set i003=id order by id desc limit 1;
+ update onlineddl_test set i257=id order by id desc limit 1;
+end ;;
+
diff --git a/go/test/endtoend/onlineddl/vtctlutil.go b/go/test/endtoend/onlineddl/vtctlutil.go
index b98d1134283..d2da7327e3a 100644
--- a/go/test/endtoend/onlineddl/vtctlutil.go
+++ b/go/test/endtoend/onlineddl/vtctlutil.go
@@ -18,12 +18,15 @@ package onlineddl
import (
"testing"
+ "time"
"vitess.io/vitess/go/test/endtoend/cluster"
"github.com/stretchr/testify/assert"
)
+var throttlerConfigTimeout = 60 * time.Second
+
// CheckCancelAllMigrations cancels all pending migrations. There is no validation for affected migrations.
func CheckCancelAllMigrationsViaVtctl(t *testing.T, vtctlclient *cluster.VtctlClientProcess, keyspace string) {
cancelQuery := "alter vitess_migration cancel all"
diff --git a/go/test/endtoend/onlineddl/vtgate_util.go b/go/test/endtoend/onlineddl/vtgate_util.go
index 86ea963465a..5052065082b 100644
--- a/go/test/endtoend/onlineddl/vtgate_util.go
+++ b/go/test/endtoend/onlineddl/vtgate_util.go
@@ -19,22 +19,34 @@ package onlineddl
import (
"context"
"fmt"
+ "io"
"math"
+ "net/http"
"os"
"testing"
"time"
"vitess.io/vitess/go/mysql"
"vitess.io/vitess/go/sqltypes"
+ "vitess.io/vitess/go/vt/log"
"vitess.io/vitess/go/vt/schema"
"vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/test/endtoend/cluster"
+ "github.com/buger/jsonparser"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
+var (
+ testsStartupTime time.Time
+)
+
+func init() {
+ testsStartupTime = time.Now()
+}
+
// VtgateExecQuery runs a query on VTGate using given query params
func VtgateExecQuery(t *testing.T, vtParams *mysql.ConnParams, query string, expectError string) *sqltypes.Result {
t.Helper()
@@ -344,3 +356,111 @@ func WaitForThrottledTimestamp(t *testing.T, vtParams *mysql.ConnParams, uuid st
t.Error("timeout waiting for last_throttled_timestamp to have nonempty value")
return
}
+
+// WaitForThrottlerStatusEnabled waits for a tablet to report its throttler status as enabled.
+func WaitForThrottlerStatusEnabled(t *testing.T, tablet *cluster.Vttablet, timeout time.Duration) {
+ jsonPath := "IsEnabled"
+ url := fmt.Sprintf("http://localhost:%d/throttler/status", tablet.HTTPPort)
+
+ ctx, cancel := context.WithTimeout(context.Background(), timeout)
+ defer cancel()
+
+ ticker := time.NewTicker(time.Second)
+ defer ticker.Stop()
+
+ for {
+ body := getHTTPBody(url)
+ val, err := jsonparser.GetBoolean([]byte(body), jsonPath)
+ require.NoError(t, err)
+ if val {
+ return
+ }
+ select {
+ case <-ctx.Done():
+ t.Error("timeout waiting for tablet's throttler status to be enabled")
+ return
+ case <-ticker.C:
+ }
+ }
+}
+
+func getHTTPBody(url string) string {
+ resp, err := http.Get(url)
+ if err != nil {
+ log.Infof("http Get returns %+v", err)
+ return ""
+ }
+ if resp.StatusCode != 200 {
+ log.Infof("http Get returns status %d", resp.StatusCode)
+ return ""
+ }
+ respByte, _ := io.ReadAll(resp.Body)
+ defer resp.Body.Close()
+ body := string(respByte)
+ return body
+}
+
+// ValidateSequentialMigrationIDs validates that schem_migrations.id column, which is an AUTO_INCREMENT, does
+// not have gaps
+func ValidateSequentialMigrationIDs(t *testing.T, vtParams *mysql.ConnParams, shards []cluster.Shard) {
+ r := VtgateExecQuery(t, vtParams, "show vitess_migrations", "")
+ shardMin := map[string]uint64{}
+ shardMax := map[string]uint64{}
+ shardCount := map[string]uint64{}
+
+ for _, row := range r.Named().Rows {
+ id := row.AsUint64("id", 0)
+ require.NotZero(t, id)
+
+ shard := row.AsString("shard", "")
+ require.NotEmpty(t, shard)
+
+ if _, ok := shardMin[shard]; !ok {
+ shardMin[shard] = id
+ shardMax[shard] = id
+ }
+ if id < shardMin[shard] {
+ shardMin[shard] = id
+ }
+ if id > shardMax[shard] {
+ shardMax[shard] = id
+ }
+ shardCount[shard]++
+ }
+ require.NotEmpty(t, shards)
+ assert.Equal(t, len(shards), len(shardMin))
+ assert.Equal(t, len(shards), len(shardMax))
+ assert.Equal(t, len(shards), len(shardCount))
+ for shard, count := range shardCount {
+ assert.NotZero(t, count)
+ assert.Equalf(t, count, shardMax[shard]-shardMin[shard]+1, "mismatch: shared=%v, count=%v, min=%v, max=%v", shard, count, shardMin[shard], shardMax[shard])
+ }
+}
+
+// ValidateCompletedTimestamp ensures that any migration in `cancelled`, `completed`, `failed` statuses
+// has a non-nil and valid `completed_timestamp` value.
+func ValidateCompletedTimestamp(t *testing.T, vtParams *mysql.ConnParams) {
+ require.False(t, testsStartupTime.IsZero())
+ r := VtgateExecQuery(t, vtParams, "show vitess_migrations", "")
+
+ completedTimestampNumValidations := 0
+ for _, row := range r.Named().Rows {
+ migrationStatus := row.AsString("migration_status", "")
+ require.NotEmpty(t, migrationStatus)
+ switch migrationStatus {
+ case string(schema.OnlineDDLStatusComplete),
+ string(schema.OnlineDDLStatusFailed),
+ string(schema.OnlineDDLStatusCancelled):
+ {
+ assert.False(t, row["completed_timestamp"].IsNull())
+ // Also make sure the timestamp is "real", and that it is recent.
+ timestamp := row.AsString("completed_timestamp", "")
+ completedTime, err := time.Parse(sqltypes.TimestampFormat, timestamp)
+ assert.NoError(t, err)
+ assert.Greater(t, completedTime.Unix(), testsStartupTime.Unix())
+ completedTimestampNumValidations++
+ }
+ }
+ }
+ assert.NotZero(t, completedTimestampNumValidations)
+}
diff --git a/go/test/endtoend/onlineddl/vttablet_util.go b/go/test/endtoend/onlineddl/vttablet_util.go
index 4d4e88b8189..b4669490f63 100644
--- a/go/test/endtoend/onlineddl/vttablet_util.go
+++ b/go/test/endtoend/onlineddl/vttablet_util.go
@@ -20,7 +20,6 @@ import (
"testing"
"time"
- "vitess.io/vitess/go/mysql"
"vitess.io/vitess/go/sqltypes"
"vitess.io/vitess/go/vt/sqlparser"
@@ -31,9 +30,9 @@ import (
)
// WaitForVReplicationStatus waits for a vreplication stream to be in one of given states, or timeout
-func WaitForVReplicationStatus(t *testing.T, vtParams *mysql.ConnParams, shards []cluster.Shard, uuid string, timeout time.Duration, expectStatuses ...string) (status string) {
+func WaitForVReplicationStatus(t *testing.T, tablet *cluster.Vttablet, uuid string, timeout time.Duration, expectStatuses ...string) (status string) {
- query, err := sqlparser.ParseAndBind("select workflow, state from _vt.vreplication where workflow=%a",
+ query, err := sqlparser.ParseAndBind("select state from _vt.vreplication where workflow=%a",
sqltypes.StringBindVariable(uuid),
)
require.NoError(t, err)
@@ -45,22 +44,16 @@ func WaitForVReplicationStatus(t *testing.T, vtParams *mysql.ConnParams, shards
startTime := time.Now()
lastKnownStatus := ""
for time.Since(startTime) < timeout {
- countMatchedShards := 0
+ r, err := tablet.VttabletProcess.QueryTablet(query, "", true)
+ require.NoError(t, err)
- for _, shard := range shards {
- r, err := shard.Vttablets[0].VttabletProcess.QueryTablet(query, "", false)
- require.NoError(t, err)
-
- for _, row := range r.Named().Rows {
- lastKnownStatus = row["state"].ToString()
- if row["workflow"].ToString() == uuid && statusesMap[lastKnownStatus] {
- countMatchedShards++
- }
+ if row := r.Named().Row(); row != nil {
+ lastKnownStatus, err = row.ToString("state")
+ assert.NoError(t, err)
+ if statusesMap[lastKnownStatus] {
+ return lastKnownStatus
}
}
- if countMatchedShards == len(shards) {
- return lastKnownStatus
- }
time.Sleep(1 * time.Second)
}
return lastKnownStatus
diff --git a/go/test/endtoend/recovery/pitr/shardedpitr_test.go b/go/test/endtoend/recovery/pitr/shardedpitr_test.go
index 6ae2e72b1a9..5a7ae3e1399 100644
--- a/go/test/endtoend/recovery/pitr/shardedpitr_test.go
+++ b/go/test/endtoend/recovery/pitr/shardedpitr_test.go
@@ -23,6 +23,7 @@ import (
"testing"
"time"
+ "github.com/buger/jsonparser"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -40,12 +41,15 @@ var (
var (
clusterInstance *cluster.LocalProcessCluster
- primary *cluster.Vttablet
- replica *cluster.Vttablet
- shard0Primary *cluster.Vttablet
- shard0Replica *cluster.Vttablet
- shard1Primary *cluster.Vttablet
- shard1Replica *cluster.Vttablet
+ primary *cluster.Vttablet
+ replica1 *cluster.Vttablet
+ replica2 *cluster.Vttablet
+ shard0Primary *cluster.Vttablet
+ shard0Replica1 *cluster.Vttablet
+ shard0Replica2 *cluster.Vttablet
+ shard1Primary *cluster.Vttablet
+ shard1Replica1 *cluster.Vttablet
+ shard1Replica2 *cluster.Vttablet
cell = "zone1"
hostname = "localhost"
@@ -86,6 +90,9 @@ var (
"--lock_tables_timeout", "5s",
"--watch_replication_stream",
"--serving_state_grace_period", "1s"}
+
+ defaultTimeout = 30 * time.Second
+ defaultTick = 1 * time.Second
)
// Test pitr (Point in time recovery).
@@ -127,10 +134,10 @@ func TestPITRRecovery(t *testing.T) {
insertRow(t, 1, "prd-1", false)
insertRow(t, 2, "prd-2", false)
- cluster.VerifyRowsInTabletForTable(t, replica, keyspaceName, 2, "product")
+ cluster.VerifyRowsInTabletForTable(t, replica1, keyspaceName, 2, "product")
// backup the replica
- err = clusterInstance.VtctlclientProcess.ExecuteCommand("Backup", replica.Alias)
+ err = clusterInstance.VtctlclientProcess.ExecuteCommand("Backup", replica1.Alias)
require.NoError(t, err)
// check that the backup shows up in the listing
@@ -166,14 +173,14 @@ func TestPITRRecovery(t *testing.T) {
}
// wait till all the shards have required data
- cluster.VerifyRowsInTabletForTable(t, shard0Replica, keyspaceName, 6, "product")
- cluster.VerifyRowsInTabletForTable(t, shard1Replica, keyspaceName, 4, "product")
+ cluster.VerifyRowsInTabletForTable(t, shard0Replica1, keyspaceName, 6, "product")
+ cluster.VerifyRowsInTabletForTable(t, shard1Replica1, keyspaceName, 4, "product")
// take the backup (to simulate the regular backup)
- err = clusterInstance.VtctlclientProcess.ExecuteCommand("Backup", shard0Replica.Alias)
+ err = clusterInstance.VtctlclientProcess.ExecuteCommand("Backup", shard0Replica1.Alias)
require.NoError(t, err)
// take the backup (to simulate the regular backup)
- err = clusterInstance.VtctlclientProcess.ExecuteCommand("Backup", shard1Replica.Alias)
+ err = clusterInstance.VtctlclientProcess.ExecuteCommand("Backup", shard1Replica1.Alias)
require.NoError(t, err)
backups, err := clusterInstance.ListBackups(keyspaceName + "/-80")
@@ -287,35 +294,29 @@ func performResharding(t *testing.T) {
err := clusterInstance.VtctlclientProcess.ApplyVSchema(keyspaceName, vSchema)
require.NoError(t, err)
- err = clusterInstance.VtctlProcess.ExecuteCommand("InitShardPrimary", "--", "--force", "ks/-80", shard0Primary.Alias)
+ err = clusterInstance.VtctlclientProcess.ExecuteCommand("Reshard", "--", "--source_shards=0", "--target_shards=-80,80-", "Create", "ks.reshardWorkflow")
require.NoError(t, err)
- err = clusterInstance.VtctlProcess.ExecuteCommand("InitShardPrimary", "--", "--force", "ks/80-", shard1Primary.Alias)
- require.NoError(t, err)
+ waitTimeout := 30 * time.Second
+ shard0Primary.VttabletProcess.WaitForVReplicationToCatchup(t, "ks.reshardWorkflow", dbName, waitTimeout)
+ shard1Primary.VttabletProcess.WaitForVReplicationToCatchup(t, "ks.reshardWorkflow", dbName, waitTimeout)
- // we need to create the schema, and the worker will do data copying
- for _, keyspaceShard := range []string{"ks/-80", "ks/80-"} {
- err = clusterInstance.VtctlclientProcess.ExecuteCommand("CopySchemaShard", "ks/0", keyspaceShard)
- require.NoError(t, err)
- }
+ waitForNoWorkflowLag(t, clusterInstance, "ks.reshardWorkflow")
- err = clusterInstance.VtctlclientProcess.ExecuteCommand("Reshard", "--", "--v1", "ks.reshardWorkflow", "0", "--", "-80,80-")
+ err = clusterInstance.VtctlclientProcess.ExecuteCommand("Reshard", "--", "--tablet_types=rdonly", "SwitchTraffic", "ks.reshardWorkflow")
require.NoError(t, err)
- err = clusterInstance.VtctlclientProcess.ExecuteCommand("SwitchReads", "--", "--tablet_types=rdonly", "ks.reshardWorkflow")
- require.NoError(t, err)
-
- err = clusterInstance.VtctlclientProcess.ExecuteCommand("SwitchReads", "--", "--tablet_types=replica", "ks.reshardWorkflow")
+ err = clusterInstance.VtctlclientProcess.ExecuteCommand("Reshard", "--", "--tablet_types=replica", "SwitchTraffic", "ks.reshardWorkflow")
require.NoError(t, err)
// then serve primary from the split shards
- err = clusterInstance.VtctlclientProcess.ExecuteCommand("SwitchWrites", "ks.reshardWorkflow")
+ err = clusterInstance.VtctlclientProcess.ExecuteCommand("Reshard", "--", "--tablet_types=primary", "SwitchTraffic", "ks.reshardWorkflow")
require.NoError(t, err)
// remove the original tablets in the original shard
- removeTablets(t, []*cluster.Vttablet{primary, replica})
+ removeTablets(t, []*cluster.Vttablet{primary, replica1, replica2})
- for _, tablet := range []*cluster.Vttablet{replica} {
+ for _, tablet := range []*cluster.Vttablet{replica1, replica2} {
err = clusterInstance.VtctlclientProcess.ExecuteCommand("DeleteTablet", tablet.Alias)
require.NoError(t, err)
}
@@ -394,15 +395,18 @@ func initializeCluster(t *testing.T) {
// Defining all the tablets
primary = clusterInstance.NewVttabletInstance("replica", 0, "")
- replica = clusterInstance.NewVttabletInstance("replica", 0, "")
+ replica1 = clusterInstance.NewVttabletInstance("replica", 0, "")
+ replica2 = clusterInstance.NewVttabletInstance("replica", 0, "")
shard0Primary = clusterInstance.NewVttabletInstance("replica", 0, "")
- shard0Replica = clusterInstance.NewVttabletInstance("replica", 0, "")
+ shard0Replica1 = clusterInstance.NewVttabletInstance("replica", 0, "")
+ shard0Replica2 = clusterInstance.NewVttabletInstance("replica", 0, "")
shard1Primary = clusterInstance.NewVttabletInstance("replica", 0, "")
- shard1Replica = clusterInstance.NewVttabletInstance("replica", 0, "")
+ shard1Replica1 = clusterInstance.NewVttabletInstance("replica", 0, "")
+ shard1Replica2 = clusterInstance.NewVttabletInstance("replica", 0, "")
- shard.Vttablets = []*cluster.Vttablet{primary, replica}
- shard0.Vttablets = []*cluster.Vttablet{shard0Primary, shard0Replica}
- shard1.Vttablets = []*cluster.Vttablet{shard1Primary, shard1Replica}
+ shard.Vttablets = []*cluster.Vttablet{primary, replica1, replica2}
+ shard0.Vttablets = []*cluster.Vttablet{shard0Primary, shard0Replica1, shard0Replica2}
+ shard1.Vttablets = []*cluster.Vttablet{shard1Primary, shard1Replica1, shard1Replica2}
clusterInstance.VtTabletExtraArgs = append(clusterInstance.VtTabletExtraArgs, commonTabletArg...)
clusterInstance.VtTabletExtraArgs = append(clusterInstance.VtTabletExtraArgs, "--restore_from_backup")
@@ -436,19 +440,30 @@ func initializeCluster(t *testing.T) {
"FLUSH PRIVILEGES;",
}
- for _, tablet := range []*cluster.Vttablet{primary, replica, shard0Primary, shard0Replica, shard1Primary, shard1Replica} {
- for _, query := range queryCmds {
- _, err = tablet.VttabletProcess.QueryTablet(query, keyspace.Name, false)
+ for _, shard := range clusterInstance.Keyspaces[0].Shards {
+ for _, tablet := range shard.Vttablets {
+ for _, query := range queryCmds {
+ _, err = tablet.VttabletProcess.QueryTablet(query, keyspace.Name, false)
+ require.NoError(t, err)
+ }
+
+ err = tablet.VttabletProcess.Setup()
require.NoError(t, err)
}
-
- err = tablet.VttabletProcess.Setup()
- require.NoError(t, err)
}
err = clusterInstance.VtctlclientProcess.InitShardPrimary(keyspaceName, shard.Name, cell, primary.TabletUID)
require.NoError(t, err)
+ err = clusterInstance.VtctlclientProcess.InitShardPrimary(keyspaceName, shard0.Name, cell, shard0Primary.TabletUID)
+ require.NoError(t, err)
+
+ err = clusterInstance.VtctlclientProcess.InitShardPrimary(keyspaceName, shard1.Name, cell, shard1Primary.TabletUID)
+ require.NoError(t, err)
+
+ err = clusterInstance.StartVTOrc(keyspaceName)
+ require.NoError(t, err)
+
// Start vtgate
err = clusterInstance.StartVtgate()
require.NoError(t, err)
@@ -511,7 +526,6 @@ func launchRecoveryTablet(t *testing.T, tablet *cluster.Vttablet, binlogServer *
clusterInstance.Hostname,
clusterInstance.TmpDirectory,
clusterInstance.VtTabletExtraArgs,
- clusterInstance.EnableSemiSync,
clusterInstance.DefaultCharset)
tablet.Alias = tablet.VttabletProcess.TabletPath
tablet.VttabletProcess.SupportsBackup = true
@@ -544,3 +558,27 @@ func launchRecoveryTablet(t *testing.T, tablet *cluster.Vttablet, binlogServer *
tablet.VttabletProcess.WaitForTabletStatusesForTimeout([]string{"SERVING"}, 20*time.Second)
}
+
+// waitForNoWorkflowLag waits for the VReplication workflow's MaxVReplicationTransactionLag
+// value to be 0.
+func waitForNoWorkflowLag(t *testing.T, vc *cluster.LocalProcessCluster, ksWorkflow string) {
+ lag := int64(0)
+ timer := time.NewTimer(defaultTimeout)
+ defer timer.Stop()
+ for {
+ output, err := vc.VtctlclientProcess.ExecuteCommandWithOutput("Workflow", "--", ksWorkflow, "show")
+ require.NoError(t, err)
+ lag, err = jsonparser.GetInt([]byte(output), "MaxVReplicationTransactionLag")
+ require.NoError(t, err)
+ if lag == 0 {
+ return
+ }
+ select {
+ case <-timer.C:
+ require.FailNow(t, fmt.Sprintf("workflow %q did not eliminate VReplication lag before the timeout of %s; last seen MaxVReplicationTransactionLag: %d",
+ ksWorkflow, defaultTimeout, lag))
+ default:
+ time.Sleep(defaultTick)
+ }
+ }
+}
diff --git a/go/test/endtoend/recovery/pitrtls/shardedpitr_tls_test.go b/go/test/endtoend/recovery/pitrtls/shardedpitr_tls_test.go
deleted file mode 100644
index ab0020d3734..00000000000
--- a/go/test/endtoend/recovery/pitrtls/shardedpitr_tls_test.go
+++ /dev/null
@@ -1,544 +0,0 @@
-/*
-Copyright 2020 The Vitess Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package pitrtls
-
-import (
- "context"
- "crypto/x509"
- "encoding/pem"
- "fmt"
- "os"
- "os/exec"
- "path"
- "testing"
- "time"
-
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
-
- "vitess.io/vitess/go/mysql"
- "vitess.io/vitess/go/test/endtoend/cluster"
- "vitess.io/vitess/go/vt/log"
-)
-
-var (
- createTable = `create table product (id bigint(20) primary key, name char(10), created bigint(20));`
- insertTable = `insert into product (id, name, created) values(%d, '%s', unix_timestamp());`
- getCountID = `select count(*) from product`
-)
-
-var (
- clusterInstance *cluster.LocalProcessCluster
-
- primary *cluster.Vttablet
- replica *cluster.Vttablet
- shard0Primary *cluster.Vttablet
- shard0Replica *cluster.Vttablet
- shard1Primary *cluster.Vttablet
- shard1Replica *cluster.Vttablet
-
- cell = "zone1"
- hostname = "localhost"
- keyspaceName = "ks"
- restoreKS1Name = "restoreks1"
- restoreKS2Name = "restoreks2"
- restoreKS3Name = "restoreks3"
- shardName = "0"
- shard0Name = "-80"
- shard1Name = "80-"
- dbName = "vt_ks"
- mysqlUserName = "vt_dba"
- mysqlPassword = "password"
- vSchema = `{
- "sharded": true,
- "vindexes": {
- "hash_index": {
- "type": "hash"
- }
- },
- "tables": {
- "product": {
- "column_vindexes": [
- {
- "column": "id",
- "name": "hash_index"
- }
- ]
- }
- }
- }`
- commonTabletArg = []string{
- "--vreplication_healthcheck_topology_refresh", "1s",
- "--vreplication_healthcheck_retry_delay", "1s",
- "--vreplication_retry_delay", "1s",
- "--degraded_threshold", "5s",
- "--lock_tables_timeout", "5s",
- "--watch_replication_stream",
- "--serving_state_grace_period", "1s"}
-)
-
-func removeTablets(t *testing.T, tablets []*cluster.Vttablet) {
- var mysqlProcs []*exec.Cmd
- for _, tablet := range tablets {
- proc, _ := tablet.MysqlctlProcess.StopProcess()
- mysqlProcs = append(mysqlProcs, proc)
- }
- for _, proc := range mysqlProcs {
- err := proc.Wait()
- require.NoError(t, err)
- }
- for _, tablet := range tablets {
- tablet.VttabletProcess.TearDown()
- }
-}
-
-func initializeCluster(t *testing.T) {
- clusterInstance = cluster.NewCluster(cell, hostname)
-
- // Start topo server
- err := clusterInstance.StartTopo()
- require.NoError(t, err)
-
- // Start keyspace
- keyspace := &cluster.Keyspace{
- Name: keyspaceName,
- }
- clusterInstance.Keyspaces = append(clusterInstance.Keyspaces, *keyspace)
-
- shard := &cluster.Shard{
- Name: shardName,
- }
- shard0 := &cluster.Shard{
- Name: shard0Name,
- }
- shard1 := &cluster.Shard{
- Name: shard1Name,
- }
-
- // Defining all the tablets
- primary = clusterInstance.NewVttabletInstance("replica", 0, "")
- replica = clusterInstance.NewVttabletInstance("replica", 0, "")
- shard0Primary = clusterInstance.NewVttabletInstance("replica", 0, "")
- shard0Replica = clusterInstance.NewVttabletInstance("replica", 0, "")
- shard1Primary = clusterInstance.NewVttabletInstance("replica", 0, "")
- shard1Replica = clusterInstance.NewVttabletInstance("replica", 0, "")
-
- shard.Vttablets = []*cluster.Vttablet{primary, replica}
- shard0.Vttablets = []*cluster.Vttablet{shard0Primary, shard0Replica}
- shard1.Vttablets = []*cluster.Vttablet{shard1Primary, shard1Replica}
-
- clusterInstance.VtTabletExtraArgs = append(clusterInstance.VtTabletExtraArgs, commonTabletArg...)
- clusterInstance.VtTabletExtraArgs = append(clusterInstance.VtTabletExtraArgs, "--restore_from_backup")
-
- err = clusterInstance.SetupCluster(keyspace, []cluster.Shard{*shard, *shard0, *shard1})
- require.NoError(t, err)
- vtctldClientProcess := cluster.VtctldClientProcessInstance("localhost", clusterInstance.VtctldProcess.GrpcPort, clusterInstance.TmpDirectory)
- out, err := vtctldClientProcess.ExecuteCommandWithOutput("SetKeyspaceDurabilityPolicy", keyspaceName, "--durability-policy=semi_sync")
- require.NoError(t, err, out)
- // Start MySql
- var mysqlCtlProcessList []*exec.Cmd
- for _, shard := range clusterInstance.Keyspaces[0].Shards {
- for _, tablet := range shard.Vttablets {
- tablet.MysqlctlProcess.SecureTransport = true
- proc, err := tablet.MysqlctlProcess.StartProcess()
- require.NoError(t, err)
- mysqlCtlProcessList = append(mysqlCtlProcessList, proc)
- }
- }
-
- // Wait for mysql processes to start
- for _, proc := range mysqlCtlProcessList {
- err = proc.Wait()
- require.NoError(t, err)
- }
-
- queryCmds := []string{
- fmt.Sprintf("CREATE USER '%s'@'%%' IDENTIFIED BY '%s';", mysqlUserName, mysqlPassword),
- fmt.Sprintf("GRANT ALL ON *.* TO '%s'@'%%';", mysqlUserName),
- fmt.Sprintf("GRANT GRANT OPTION ON *.* TO '%s'@'%%';", mysqlUserName),
- fmt.Sprintf("create database %s;", "vt_ks"),
- "FLUSH PRIVILEGES;",
- }
-
- for _, tablet := range []*cluster.Vttablet{primary, replica, shard0Primary, shard0Replica, shard1Primary, shard1Replica} {
- for _, query := range queryCmds {
- _, err = tablet.VttabletProcess.QueryTablet(query, keyspace.Name, false)
- require.NoError(t, err)
- }
-
- err = tablet.VttabletProcess.Setup()
- require.NoError(t, err)
- }
-
- err = clusterInstance.VtctlclientProcess.InitShardPrimary(keyspaceName, shard.Name, cell, primary.TabletUID)
- require.NoError(t, err)
-
- // Start vtgate
- err = clusterInstance.StartVtgate()
- require.NoError(t, err)
-}
-
-func insertRow(t *testing.T, id int, productName string, isSlow bool) {
- ctx := context.Background()
- vtParams := mysql.ConnParams{
- Host: clusterInstance.Hostname,
- Port: clusterInstance.VtgateMySQLPort,
- }
- conn, err := mysql.Connect(ctx, &vtParams)
- require.NoError(t, err)
- defer conn.Close()
-
- insertSmt := fmt.Sprintf(insertTable, id, productName)
- _, err = conn.ExecuteFetch(insertSmt, 1000, true)
- require.NoError(t, err)
-
- if isSlow {
- time.Sleep(1 * time.Second)
- }
-}
-
-func createRestoreKeyspace(t *testing.T, timeToRecover, restoreKeyspaceName string) {
- output, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("CreateKeyspace", "--",
- "--keyspace_type=SNAPSHOT", "--base_keyspace="+keyspaceName,
- "--snapshot_time", timeToRecover, restoreKeyspaceName)
- log.Info(output)
- require.NoError(t, err)
-}
-
-// Test pitr (Point in time recovery).
-// -------------------------------------------
-// The following test will:
-// - create a shard with primary and replica
-// - run InitShardPrimary
-// - insert some data using vtgate (e.g. here we have inserted rows 1,2)
-// - verify the replication
-// - take backup of replica
-// - insert some data using vtgate (e.g. we inserted rows 3 4 5 6), while inserting row-4, note down the time (restoreTime1)
-// - perform a resharding to create 2 shards (-80, 80-), and delete the old shard
-// - insert some data using vtgate (e.g. we will insert 7 8 9 10) and verify we get required number of rows in -80, 80- shard
-// - take backup of both shards
-// - insert some more data using vtgate (e.g. we will insert 11 12 13 14 15), while inserting row-13, note down the time (restoreTime2)
-// - note down the current time (restoreTime3)
-
-// - Till now we did all the presetup for assertions
-
-// - asserting that restoring to restoreTime1 (going from 2 shards to 1 shard) is working, i.e. we should get 4 rows.
-// - asserting that while restoring if we give small timeout value, it will restore upto to the last available backup (asserting only -80 shard)
-// - asserting that restoring to restoreTime2 (going from 2 shards to 2 shards with past time) is working, it will assert for both shards
-// - asserting that restoring to restoreTime3 is working, we should get complete data after restoring, as we have in existing shards.
-func TestTLSPITRRecovery(t *testing.T) {
- defer cluster.PanicHandler(nil)
- initializeCluster(t)
- defer clusterInstance.Teardown()
-
- // Creating the table
- _, err := primary.VttabletProcess.QueryTablet(createTable, keyspaceName, true)
- require.NoError(t, err)
-
- insertRow(t, 1, "prd-1", false)
- insertRow(t, 2, "prd-2", false)
-
- cluster.VerifyRowsInTabletForTable(t, replica, keyspaceName, 2, "product")
-
- // backup the replica
- err = clusterInstance.VtctlclientProcess.ExecuteCommand("Backup", replica.Alias)
- require.NoError(t, err)
-
- // check that the backup shows up in the listing
- output, err := clusterInstance.ListBackups("ks/0")
- require.NoError(t, err)
- assert.Equal(t, 1, len(output))
-
- // now insert some more data to simulate the changes after regular backup
- // every insert has some time lag/difference to simulate the time gap between rows
- // and when we recover to certain time, this time gap will be able to identify the exact eligible row
- var restoreTime1 string
- for counter := 3; counter <= 6; counter++ {
- if counter == 4 { // we want to recovery till this, so noting the time
- tm := time.Now().Add(1 * time.Second).UTC()
- restoreTime1 = tm.Format(time.RFC3339)
- }
- insertRow(t, counter, fmt.Sprintf("prd-%d", counter), true)
- }
-
- // creating restore keyspace with snapshot time as restoreTime1
- // Need to test this before resharding and we tear down the
- // original mysql replica, which we use as a binlog source
- createRestoreKeyspace(t, restoreTime1, restoreKS1Name)
-
- // Launching a recovery tablet which recovers data from the primary till the restoreTime1
- tlsTestTabletRecovery(t, replica, "2m", restoreKS1Name, "0", "INT64(4)")
-
- // starting resharding process
- tlsPerformResharding(t)
-
- for counter := 7; counter <= 10; counter++ {
- insertRow(t, counter, fmt.Sprintf("prd-%d", counter), false)
- }
-
- // wait till all the shards have required data
- cluster.VerifyRowsInTabletForTable(t, shard0Replica, keyspaceName, 6, "product")
- cluster.VerifyRowsInTabletForTable(t, shard1Replica, keyspaceName, 4, "product")
-
- // take the backup (to simulate the regular backup)
- err = clusterInstance.VtctlclientProcess.ExecuteCommand("Backup", shard0Replica.Alias)
- require.NoError(t, err)
- // take the backup (to simulate the regular backup)
- err = clusterInstance.VtctlclientProcess.ExecuteCommand("Backup", shard1Replica.Alias)
- require.NoError(t, err)
-
- backups, err := clusterInstance.ListBackups(keyspaceName + "/-80")
- require.NoError(t, err)
- require.Equal(t, len(backups), 1)
-
- backups, err = clusterInstance.ListBackups(keyspaceName + "/80-")
- require.NoError(t, err)
- require.Equal(t, len(backups), 1)
-
- // now insert some more data to simulate the changes after regular backup
- // every insert has some time lag/difference to simulate the time gap between rows
- // and when we recover to certain time, this time gap will be able to identify the exact eligible row
- var restoreTime2 string
- for counter := 11; counter <= 15; counter++ {
- if counter == 13 { // we want to recovery till this, so noting the time
- tm := time.Now().Add(1 * time.Second).UTC()
- restoreTime2 = tm.Format(time.RFC3339)
- }
- insertRow(t, counter, fmt.Sprintf("prd-%d", counter), true)
- }
- restoreTime3 := time.Now().UTC().Format(time.RFC3339)
-
- // create restoreKeyspace with snapshot time as restoreTime2
- createRestoreKeyspace(t, restoreTime2, restoreKS2Name)
-
- // test the recovery with smaller binlog_lookup_timeout for shard0
- // since we have small lookup timeout, it will just get whatever available in the backup
- // mysql> select * from product;
- // +----+--------+------------+
- // | id | name | created |
- // +----+--------+------------+
- // | 1 | prd-1 | 1597219030 |
- // | 2 | prd-2 | 1597219030 |
- // | 3 | prd-3 | 1597219043 |
- // | 5 | prd-5 | 1597219045 |
- // | 9 | prd-9 | 1597219130 |
- // | 10 | prd-10 | 1597219130 |
- // +----+--------+------------+
- tlsTestTabletRecovery(t, shard0Replica, "1ms", restoreKS2Name, "-80", "INT64(6)")
-
- // test the recovery with valid binlog_lookup_timeout for shard0 and getting the data till the restoreTime2
- // mysql> select * from product;
- // +----+--------+------------+
- // | id | name | created |
- // +----+--------+------------+
- // | 1 | prd-1 | 1597219030 |
- // | 2 | prd-2 | 1597219030 |
- // | 3 | prd-3 | 1597219043 |
- // | 5 | prd-5 | 1597219045 |
- // | 9 | prd-9 | 1597219130 |
- // | 10 | prd-10 | 1597219130 |
- // | 13 | prd-13 | 1597219141 |
- // +----+--------+------------+
- tlsTestTabletRecovery(t, shard0Replica, "2m", restoreKS2Name, "-80", "INT64(7)")
-
- // test the recovery with valid binlog_lookup_timeout for shard1 and getting the data till the restoreTime2
- // mysql> select * from product;
- // +----+--------+------------+
- // | id | name | created |
- // +----+--------+------------+
- // | 4 | prd-4 | 1597219044 |
- // | 6 | prd-6 | 1597219046 |
- // | 7 | prd-7 | 1597219130 |
- // | 8 | prd-8 | 1597219130 |
- // | 11 | prd-11 | 1597219139 |
- // | 12 | prd-12 | 1597219140 |
- // +----+--------+------------+
- tlsTestTabletRecovery(t, shard1Replica, "2m", restoreKS2Name, "80-", "INT64(6)")
-
- // test the recovery with timetorecover > (timestamp of last binlog event in binlog server)
- createRestoreKeyspace(t, restoreTime3, restoreKS3Name)
-
- // mysql> select * from product;
- // +----+--------+------------+
- // | id | name | created |
- // +----+--------+------------+
- // | 1 | prd-1 | 1597219030 |
- // | 2 | prd-2 | 1597219030 |
- // | 3 | prd-3 | 1597219043 |
- // | 5 | prd-5 | 1597219045 |
- // | 9 | prd-9 | 1597219130 |
- // | 10 | prd-10 | 1597219130 |
- // | 13 | prd-13 | 1597219141 |
- // | 15 | prd-15 | 1597219142 |
- // +----+--------+------------+
- tlsTestTabletRecovery(t, shard0Replica, "2m", restoreKS3Name, "-80", "INT64(8)")
-
- // mysql> select * from product;
- // +----+--------+------------+
- // | id | name | created |
- // +----+--------+------------+
- // | 4 | prd-4 | 1597219044 |
- // | 6 | prd-6 | 1597219046 |
- // | 7 | prd-7 | 1597219130 |
- // | 8 | prd-8 | 1597219130 |
- // | 11 | prd-11 | 1597219139 |
- // | 12 | prd-12 | 1597219140 |
- // | 14 | prd-14 | 1597219142 |
- // +----+--------+------------+
- tlsTestTabletRecovery(t, shard1Replica, "2m", restoreKS3Name, "80-", "INT64(7)")
-}
-
-func tlsPerformResharding(t *testing.T) {
- err := clusterInstance.VtctlclientProcess.ApplyVSchema(keyspaceName, vSchema)
- require.NoError(t, err)
-
- err = clusterInstance.VtctlProcess.ExecuteCommand("InitShardPrimary", "--", "--force", "ks/-80", shard0Primary.Alias)
- require.NoError(t, err)
-
- err = clusterInstance.VtctlProcess.ExecuteCommand("InitShardPrimary", "--", "--force", "ks/80-", shard1Primary.Alias)
- require.NoError(t, err)
-
- // we need to create the schema, and the worker will do data copying
- for _, keyspaceShard := range []string{"ks/-80", "ks/80-"} {
- err = clusterInstance.VtctlclientProcess.ExecuteCommand("CopySchemaShard", "ks/0", keyspaceShard)
- require.NoError(t, err)
- }
-
- err = clusterInstance.VtctlclientProcess.ExecuteCommand("Reshard", "ks.reshardWorkflow", "0", "--", "-80,80-")
- require.NoError(t, err)
-
- err = clusterInstance.VtctlclientProcess.ExecuteCommand("SwitchReads", "--", "--tablet_type=rdonly", "ks.reshardWorkflow")
- require.NoError(t, err)
-
- err = clusterInstance.VtctlclientProcess.ExecuteCommand("SwitchReads", "--", "--tablet_type=replica", "ks.reshardWorkflow")
- require.NoError(t, err)
-
- // then serve primary from the split shards
- err = clusterInstance.VtctlclientProcess.ExecuteCommand("SwitchWrites", "ks.reshardWorkflow")
- require.NoError(t, err)
-
- // remove the original tablets in the original shard
- removeTablets(t, []*cluster.Vttablet{primary, replica})
-
- for _, tablet := range []*cluster.Vttablet{replica} {
- err = clusterInstance.VtctlclientProcess.ExecuteCommand("DeleteTablet", tablet.Alias)
- require.NoError(t, err)
- }
- err = clusterInstance.VtctlclientProcess.ExecuteCommand("DeleteTablet", "--", "--allow_primary", primary.Alias)
- require.NoError(t, err)
-
- // rebuild the serving graph, all mentions of the old shards should be gone
- err = clusterInstance.VtctlclientProcess.ExecuteCommand("RebuildKeyspaceGraph", "ks")
- require.NoError(t, err)
-
- // delete the original shard
- err = clusterInstance.VtctlclientProcess.ExecuteCommand("DeleteShard", "ks/0")
- require.NoError(t, err)
-
- // Restart vtgate process
- err = clusterInstance.VtgateProcess.TearDown()
- require.NoError(t, err)
-
- err = clusterInstance.VtgateProcess.Setup()
- require.NoError(t, err)
-
- clusterInstance.WaitForTabletsToHealthyInVtgate()
-}
-
-func tlsTestTabletRecovery(t *testing.T, tabletForBinlogs *cluster.Vttablet, lookupTimeout, restoreKeyspaceName, shardName, expectedRows string) {
- recoveryTablet := clusterInstance.NewVttabletInstance("replica", 0, cell)
- tlsLaunchRecoveryTablet(t, recoveryTablet, tabletForBinlogs, lookupTimeout, restoreKeyspaceName, shardName)
-
- sqlRes, err := recoveryTablet.VttabletProcess.QueryTablet(getCountID, keyspaceName, true)
- require.NoError(t, err)
- assert.Equal(t, expectedRows, sqlRes.Rows[0][0].String())
-
- defer recoveryTablet.MysqlctlProcess.Stop()
- defer recoveryTablet.VttabletProcess.TearDown()
-}
-
-func tlsLaunchRecoveryTablet(t *testing.T, tablet *cluster.Vttablet, tabletForBinlogs *cluster.Vttablet, lookupTimeout, restoreKeyspaceName, shardName string) {
- tablet.MysqlctlProcess = *cluster.MysqlCtlProcessInstance(tablet.TabletUID, tablet.MySQLPort, clusterInstance.TmpDirectory)
- tablet.MysqlctlProcess.SecureTransport = true
- err := tablet.MysqlctlProcess.Start()
- require.NoError(t, err)
-
- tablet.VttabletProcess = cluster.VttabletProcessInstance(
- tablet.HTTPPort,
- tablet.GrpcPort,
- tablet.TabletUID,
- clusterInstance.Cell,
- shardName,
- keyspaceName,
- clusterInstance.VtctldProcess.Port,
- tablet.Type,
- clusterInstance.TopoProcess.Port,
- clusterInstance.Hostname,
- clusterInstance.TmpDirectory,
- clusterInstance.VtTabletExtraArgs,
- clusterInstance.EnableSemiSync,
- clusterInstance.DefaultCharset)
- tablet.Alias = tablet.VttabletProcess.TabletPath
- tablet.VttabletProcess.SupportsBackup = true
- tablet.VttabletProcess.Keyspace = restoreKeyspaceName
-
- certDir := path.Join(os.Getenv("VTDATAROOT"), fmt.Sprintf("/ssl_%010d", tablet.MysqlctlProcess.TabletUID))
- tablet.VttabletProcess.ExtraArgs = []string{
- "--disable_active_reparents",
- "--enable_replication_reporter=false",
- "--init_db_name_override", dbName,
- "--init_tablet_type", "replica",
- "--init_keyspace", restoreKeyspaceName,
- "--init_shard", shardName,
- "--binlog_host", clusterInstance.Hostname,
- "--binlog_port", fmt.Sprintf("%d", tabletForBinlogs.MySQLPort),
- "--binlog_user", mysqlUserName,
- "--binlog_password", mysqlPassword,
- "--binlog_ssl_ca", certDir + "/ca-cert.pem",
- "--binlog_ssl_server_name", getCNFromCertPEM(certDir + "/server-001-cert.pem"),
- "--pitr_gtid_lookup_timeout", lookupTimeout,
- "--vreplication_healthcheck_topology_refresh", "1s",
- "--vreplication_healthcheck_retry_delay", "1s",
- "--vreplication_tablet_type", "replica",
- "--vreplication_retry_delay", "1s",
- "--degraded_threshold", "5s",
- "--lock_tables_timeout", "5s",
- "--watch_replication_stream",
- "--serving_state_grace_period", "1s",
- }
- tablet.VttabletProcess.ServingStatus = ""
-
- err = tablet.VttabletProcess.Setup()
- require.NoError(t, err)
-
- tablet.VttabletProcess.WaitForTabletStatusesForTimeout([]string{"SERVING"}, 20*time.Second)
-}
-
-func getCNFromCertPEM(filename string) string {
- pemBytes, _ := os.ReadFile(filename)
- block, _ := pem.Decode(pemBytes)
- cert, _ := x509.ParseCertificate(block.Bytes)
- rdn := cert.Subject.ToRDNSequence()[0][0]
- t := rdn.Type
-
- // 2.5.4.3 is ASN OID for "CN"
- if len(t) == 4 && t[0] == 2 && t[1] == 5 && t[2] == 4 && t[3] == 3 {
- return fmt.Sprintf("%s", rdn.Value)
- }
- // As good a fallback as any
- return "localhost"
-}
diff --git a/go/test/endtoend/recovery/recovery_util.go b/go/test/endtoend/recovery/recovery_util.go
index acc1d8ce616..cffae6a5005 100644
--- a/go/test/endtoend/recovery/recovery_util.go
+++ b/go/test/endtoend/recovery/recovery_util.go
@@ -51,18 +51,20 @@ func VerifyQueriesUsingVtgate(t *testing.T, session *vtgateconn.VTGateSession, q
}
// RestoreTablet performs a PITR restore.
-func RestoreTablet(t *testing.T, localCluster *cluster.LocalProcessCluster, tablet *cluster.Vttablet, restoreKSName string, shardName string, keyspaceName string, commonTabletArg []string) {
+func RestoreTablet(t *testing.T, localCluster *cluster.LocalProcessCluster, tablet *cluster.Vttablet, restoreKSName string, shardName string, keyspaceName string, commonTabletArg []string, restoreTime time.Time) {
tablet.ValidateTabletRestart(t)
replicaTabletArgs := commonTabletArg
_, err := localCluster.VtctlProcess.ExecuteCommandWithOutput("GetKeyspace", restoreKSName)
+ if restoreTime.IsZero() {
+ restoreTime = time.Now().UTC()
+ }
+
if err != nil {
- tm := time.Now().UTC()
- tm.Format(time.RFC3339)
_, err := localCluster.VtctlProcess.ExecuteCommandWithOutput("CreateKeyspace", "--",
"--keyspace_type=SNAPSHOT", "--base_keyspace="+keyspaceName,
- "--snapshot_time", tm.Format(time.RFC3339), restoreKSName)
+ "--snapshot_time", restoreTime.Format(time.RFC3339), restoreKSName)
require.Nil(t, err)
}
diff --git a/go/test/endtoend/recovery/unshardedrecovery/recovery.go b/go/test/endtoend/recovery/unshardedrecovery/recovery.go
index 68c66a7bbc0..6190530ca31 100644
--- a/go/test/endtoend/recovery/unshardedrecovery/recovery.go
+++ b/go/test/endtoend/recovery/unshardedrecovery/recovery.go
@@ -24,6 +24,7 @@ import (
"os/exec"
"path"
"testing"
+ "time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -160,6 +161,9 @@ SET GLOBAL old_alter_table = ON;
if err := localCluster.VtctlclientProcess.InitializeShard(keyspaceName, shard.Name, cell, primary.TabletUID); err != nil {
return 1, err
}
+ if err := localCluster.StartVTOrc(keyspaceName); err != nil {
+ return 1, err
+ }
return m.Run(), nil
}()
@@ -173,24 +177,29 @@ SET GLOBAL old_alter_table = ON;
}
// TestRecoveryImpl does following
-// - create a shard with primary and replica1 only
-// - run InitShardPrimary
-// - insert some data
-// - take a backup
-// - insert more data on the primary
-// - take another backup
-// - create a recovery keyspace after first backup
-// - bring up tablet_replica2 in the new keyspace
-// - check that new tablet does not have data created after backup1
-// - create second recovery keyspace after second backup
-// - bring up tablet_replica3 in second keyspace
-// - check that new tablet has data created after backup1 but not data created after backup2
-// - check that vtgate queries work correctly
+// 1. create a shard with primary and replica1 only
+// - run InitShardPrimary
+// - insert some data
+//
+// 2. take a backup
+// 3.create a recovery keyspace after first backup
+// - bring up tablet_replica2 in the new keyspace
+// - check that new tablet has data from backup1
+//
+// 4. insert more data on the primary
+//
+// 5. take another backup
+// 6. create a recovery keyspace after second backup
+// - bring up tablet_replica3 in the new keyspace
+// - check that new tablet has data from backup2
+//
+// 7. check that vtgate queries work correctly
func TestRecoveryImpl(t *testing.T) {
defer cluster.PanicHandler(t)
defer tabletsTeardown()
verifyInitialReplication(t)
+ // take first backup of value = test1
err := localCluster.VtctlclientProcess.ExecuteCommand("Backup", replica1.Alias)
assert.NoError(t, err)
@@ -198,10 +207,6 @@ func TestRecoveryImpl(t *testing.T) {
require.Equal(t, len(backups), 1)
assert.Contains(t, backups[0], replica1.Alias)
- _, err = primary.VttabletProcess.QueryTablet("insert into vt_insert_test (msg) values ('test2')", keyspaceName, true)
- assert.NoError(t, err)
- cluster.VerifyRowsInTablet(t, replica1, keyspaceName, 2)
-
err = localCluster.VtctlclientProcess.ApplyVSchema(keyspaceName, vSchema)
assert.NoError(t, err)
@@ -209,66 +214,80 @@ func TestRecoveryImpl(t *testing.T) {
assert.NoError(t, err)
assert.Contains(t, output, "vt_insert_test")
- recovery.RestoreTablet(t, localCluster, replica2, recoveryKS1, "0", keyspaceName, commonTabletArg)
+ // restore with latest backup
+ restoreTime := time.Now().UTC()
+ recovery.RestoreTablet(t, localCluster, replica2, recoveryKS1, "0", keyspaceName, commonTabletArg, restoreTime)
output, err = localCluster.VtctlclientProcess.ExecuteCommandWithOutput("GetSrvVSchema", cell)
assert.NoError(t, err)
assert.Contains(t, output, keyspaceName)
assert.Contains(t, output, recoveryKS1)
- err = localCluster.VtctlclientProcess.ExecuteCommand("GetSrvKeyspace", cell, keyspaceName)
- assert.NoError(t, err)
-
output, err = localCluster.VtctlclientProcess.ExecuteCommandWithOutput("GetVSchema", recoveryKS1)
assert.NoError(t, err)
assert.Contains(t, output, "vt_insert_test")
cluster.VerifyRowsInTablet(t, replica2, keyspaceName, 1)
- cluster.VerifyLocalMetadata(t, replica2, recoveryKS1, shardName, cell)
+ // verify that restored replica has value = test1
+ qr, err := replica2.VttabletProcess.QueryTablet("select msg from vt_insert_test where id = 1", keyspaceName, true)
+ assert.NoError(t, err)
+ assert.Equal(t, "test1", qr.Rows[0][0].ToString())
+
+ // insert new row on primary
+ _, err = primary.VttabletProcess.QueryTablet("insert into vt_insert_test (msg) values ('test2')", keyspaceName, true)
+ assert.NoError(t, err)
+ cluster.VerifyRowsInTablet(t, replica1, keyspaceName, 2)
// update the original row in primary
_, err = primary.VttabletProcess.QueryTablet("update vt_insert_test set msg = 'msgx1' where id = 1", keyspaceName, true)
assert.NoError(t, err)
- //verify that primary has new value
- qr, err := primary.VttabletProcess.QueryTablet("select msg from vt_insert_test where id = 1", keyspaceName, true)
+ // verify that primary has new value
+ qr, err = primary.VttabletProcess.QueryTablet("select msg from vt_insert_test where id = 1", keyspaceName, true)
assert.NoError(t, err)
assert.Equal(t, "msgx1", qr.Rows[0][0].ToString())
- //verify that restored replica has old value
- qr, err = replica2.VttabletProcess.QueryTablet("select msg from vt_insert_test where id = 1", keyspaceName, true)
- assert.NoError(t, err)
- assert.Equal(t, "test1", qr.Rows[0][0].ToString())
+ // check that replica1, used for the backup, has the new value
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
- err = localCluster.VtctlclientProcess.ExecuteCommand("Backup", replica1.Alias)
- assert.NoError(t, err)
+ ticker := time.NewTicker(time.Second)
+ defer ticker.Stop()
- _, err = primary.VttabletProcess.QueryTablet("insert into vt_insert_test (msg) values ('test3')", keyspaceName, true)
- assert.NoError(t, err)
- cluster.VerifyRowsInTablet(t, replica1, keyspaceName, 3)
+ for {
+ qr, err = replica1.VttabletProcess.QueryTablet("select msg from vt_insert_test where id = 1", keyspaceName, true)
+ assert.NoError(t, err)
+ if qr.Rows[0][0].ToString() == "msgx1" {
+ break
+ }
- recovery.RestoreTablet(t, localCluster, replica3, recoveryKS2, "0", keyspaceName, commonTabletArg)
+ select {
+ case <-ctx.Done():
+ t.Error("timeout waiting for new value to be replicated on replica 1")
+ break
+ case <-ticker.C:
+ }
+ }
- output, err = localCluster.VtctlclientProcess.ExecuteCommandWithOutput("GetVSchema", recoveryKS2)
+ // take second backup of value = msgx1
+ err = localCluster.VtctlclientProcess.ExecuteCommand("Backup", replica1.Alias)
assert.NoError(t, err)
- assert.Contains(t, output, "vt_insert_test")
- cluster.VerifyRowsInTablet(t, replica3, keyspaceName, 2)
+ // restore to first backup
+ recovery.RestoreTablet(t, localCluster, replica3, recoveryKS2, "0", keyspaceName, commonTabletArg, restoreTime)
- // update the original row in primary
- _, err = primary.VttabletProcess.QueryTablet("update vt_insert_test set msg = 'msgx2' where id = 1", keyspaceName, true)
+ output, err = localCluster.VtctlclientProcess.ExecuteCommandWithOutput("GetVSchema", recoveryKS2)
assert.NoError(t, err)
+ assert.Contains(t, output, "vt_insert_test")
- //verify that primary has new value
- qr, err = primary.VttabletProcess.QueryTablet("select msg from vt_insert_test where id = 1", keyspaceName, true)
- assert.NoError(t, err)
- assert.Equal(t, "msgx2", qr.Rows[0][0].ToString())
+ // only one row from first backup
+ cluster.VerifyRowsInTablet(t, replica3, keyspaceName, 1)
- //verify that restored replica has old value
+ //verify that restored replica has value = test1
qr, err = replica3.VttabletProcess.QueryTablet("select msg from vt_insert_test where id = 1", keyspaceName, true)
assert.NoError(t, err)
- assert.Equal(t, "msgx1", qr.Rows[0][0].ToString())
+ assert.Equal(t, "test1", qr.Rows[0][0].ToString())
vtgateInstance := localCluster.NewVtgateInstance()
vtgateInstance.TabletTypesToWait = "REPLICA"
@@ -276,14 +295,10 @@ func TestRecoveryImpl(t *testing.T) {
localCluster.VtgateGrpcPort = vtgateInstance.GrpcPort
assert.NoError(t, err)
defer vtgateInstance.TearDown()
- err = vtgateInstance.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", keyspaceName, shardName), 1)
- assert.NoError(t, err)
- err = vtgateInstance.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", keyspaceName, shardName), 1)
- assert.NoError(t, err)
- err = vtgateInstance.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", recoveryKS1, shardName), 1)
- assert.NoError(t, err)
- err = vtgateInstance.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", recoveryKS2, shardName), 1)
- assert.NoError(t, err)
+ assert.NoError(t, vtgateInstance.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", keyspaceName, shardName), 1, 30*time.Second))
+ assert.NoError(t, vtgateInstance.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", keyspaceName, shardName), 1, 30*time.Second))
+ assert.NoError(t, vtgateInstance.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", recoveryKS1, shardName), 1, 30*time.Second))
+ assert.NoError(t, vtgateInstance.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", recoveryKS2, shardName), 1, 30*time.Second))
// Build vtgate grpc connection
grpcAddress := fmt.Sprintf("%s:%d", localCluster.Hostname, localCluster.VtgateGrpcPort)
@@ -292,27 +307,27 @@ func TestRecoveryImpl(t *testing.T) {
defer vtgateConn.Close()
session := vtgateConn.Session("@replica", nil)
- //check that vtgate doesn't route queries to new tablet
- recovery.VerifyQueriesUsingVtgate(t, session, "select count(*) from vt_insert_test", "INT64(3)")
- recovery.VerifyQueriesUsingVtgate(t, session, "select msg from vt_insert_test where id = 1", `VARCHAR("msgx2")`)
+ // check that vtgate doesn't route queries to new tablet
+ recovery.VerifyQueriesUsingVtgate(t, session, "select count(*) from vt_insert_test", "INT64(2)")
+ recovery.VerifyQueriesUsingVtgate(t, session, "select msg from vt_insert_test where id = 1", `VARCHAR("msgx1")`)
recovery.VerifyQueriesUsingVtgate(t, session, fmt.Sprintf("select count(*) from %s.vt_insert_test", recoveryKS1), "INT64(1)")
recovery.VerifyQueriesUsingVtgate(t, session, fmt.Sprintf("select msg from %s.vt_insert_test where id = 1", recoveryKS1), `VARCHAR("test1")`)
- recovery.VerifyQueriesUsingVtgate(t, session, fmt.Sprintf("select count(*) from %s.vt_insert_test", recoveryKS2), "INT64(2)")
- recovery.VerifyQueriesUsingVtgate(t, session, fmt.Sprintf("select msg from %s.vt_insert_test where id = 1", recoveryKS2), `VARCHAR("msgx1")`)
+ recovery.VerifyQueriesUsingVtgate(t, session, fmt.Sprintf("select count(*) from %s.vt_insert_test", recoveryKS2), "INT64(1)")
+ recovery.VerifyQueriesUsingVtgate(t, session, fmt.Sprintf("select msg from %s.vt_insert_test where id = 1", recoveryKS2), `VARCHAR("test1")`)
// check that new keyspace is accessible with 'use ks'
cluster.ExecuteQueriesUsingVtgate(t, session, "use "+recoveryKS1+"@replica")
recovery.VerifyQueriesUsingVtgate(t, session, "select count(*) from vt_insert_test", "INT64(1)")
cluster.ExecuteQueriesUsingVtgate(t, session, "use "+recoveryKS2+"@replica")
- recovery.VerifyQueriesUsingVtgate(t, session, "select count(*) from vt_insert_test", "INT64(2)")
+ recovery.VerifyQueriesUsingVtgate(t, session, "select count(*) from vt_insert_test", "INT64(1)")
// check that new tablet is accessible with use `ks:shard`
cluster.ExecuteQueriesUsingVtgate(t, session, "use `"+recoveryKS1+":0@replica`")
recovery.VerifyQueriesUsingVtgate(t, session, "select count(*) from vt_insert_test", "INT64(1)")
cluster.ExecuteQueriesUsingVtgate(t, session, "use `"+recoveryKS2+":0@replica`")
- recovery.VerifyQueriesUsingVtgate(t, session, "select count(*) from vt_insert_test", "INT64(2)")
+ recovery.VerifyQueriesUsingVtgate(t, session, "select count(*) from vt_insert_test", "INT64(1)")
}
// verifyInitialReplication will create schema in primary, insert some data to primary and verify the same data in replica.
diff --git a/go/test/endtoend/reparent/emergencyreparent/ers_test.go b/go/test/endtoend/reparent/emergencyreparent/ers_test.go
index 3564ba3badb..8f6638ecb7e 100644
--- a/go/test/endtoend/reparent/emergencyreparent/ers_test.go
+++ b/go/test/endtoend/reparent/emergencyreparent/ers_test.go
@@ -387,8 +387,6 @@ func TestERSForInitialization(t *testing.T) {
shard.Vttablets = tablets
clusterInstance.VtTabletExtraArgs = []string{
"--lock_tables_timeout", "5s",
- "--enable_semi_sync",
- "--init_populate_metadata",
"--track_schema_versions=true",
}
diff --git a/go/test/endtoend/reparent/newfeaturetest/reparent_test.go b/go/test/endtoend/reparent/newfeaturetest/reparent_test.go
index b450fb44420..db7784f6459 100644
--- a/go/test/endtoend/reparent/newfeaturetest/reparent_test.go
+++ b/go/test/endtoend/reparent/newfeaturetest/reparent_test.go
@@ -17,155 +17,95 @@ limitations under the License.
package newfeaturetest
import (
- "strconv"
"testing"
- "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
- "google.golang.org/protobuf/encoding/protojson"
-
- "vitess.io/vitess/go/mysql"
"vitess.io/vitess/go/test/endtoend/cluster"
"vitess.io/vitess/go/test/endtoend/reparent/utils"
- replicationdatapb "vitess.io/vitess/go/vt/proto/replicationdata"
)
-// TestCrossCellDurability tests 2 things -
-// 1. When PRS is run with the cross_cell durability policy setup, then the semi-sync settings on all the tablets are as expected
-// 2. Bringing up a new vttablet should have its replication and semi-sync setup correctly without any external interference
-func TestCrossCellDurability(t *testing.T) {
+// TestRecoverWithMultipleVttabletFailures tests that ERS succeeds with the default values
+// even when there are multiple vttablet failures. In this test we use the semi_sync policy
+// to allow multiple failures to happen and still be recoverable.
+// The test takes down the vttablets of the primary and a rdonly tablet and runs ERS with the
+// default values of remote_operation_timeout, lock-timeout flags and wait_replicas_timeout subflag.
+func TestRecoverWithMultipleVttabletFailures(t *testing.T) {
defer cluster.PanicHandler(t)
- clusterInstance := utils.SetupReparentCluster(t, "cross_cell")
+ clusterInstance := utils.SetupReparentCluster(t, "semi_sync")
defer utils.TeardownCluster(clusterInstance)
tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets
-
utils.ConfirmReplication(t, tablets[0], []*cluster.Vttablet{tablets[1], tablets[2], tablets[3]})
- // When tablets[0] is the primary, the only tablet in a different cell is tablets[3].
- // So the other two should have semi-sync turned off
- utils.CheckSemiSyncSetupCorrectly(t, tablets[0], "ON")
- utils.CheckSemiSyncSetupCorrectly(t, tablets[3], "ON")
- utils.CheckSemiSyncSetupCorrectly(t, tablets[1], "OFF")
- utils.CheckSemiSyncSetupCorrectly(t, tablets[2], "OFF")
+ // make tablets[1] a rdonly tablet.
+ err := clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", tablets[1].Alias, "rdonly")
+ require.NoError(t, err)
+
+ // Confirm that replication is still working as intended
+ utils.ConfirmReplication(t, tablets[0], tablets[1:])
+
+ // Make the rdonly and primary tablets and databases unavailable.
+ utils.StopTablet(t, tablets[1], true)
+ utils.StopTablet(t, tablets[0], true)
- // Run forced reparent operation, this should proceed unimpeded.
- out, err := utils.Prs(t, clusterInstance, tablets[3])
+ // We expect this to succeed since we only have 1 primary eligible tablet which is down
+ out, err := utils.Ers(clusterInstance, nil, "", "")
require.NoError(t, err, out)
- utils.ConfirmReplication(t, tablets[3], []*cluster.Vttablet{tablets[0], tablets[1], tablets[2]})
-
- // All the tablets will have semi-sync setup since tablets[3] is in Cell2 and all
- // others are in Cell1, so all of them are eligible to send semi-sync ACKs
- for _, tablet := range tablets {
- utils.CheckSemiSyncSetupCorrectly(t, tablet, "ON")
- }
-
- for i, supportsBackup := range []bool{false, true} {
- // Bring up a new replica tablet
- // In this new tablet, we do not disable active reparents, otherwise replication will not be started.
- newReplica := utils.StartNewVTTablet(t, clusterInstance, 300+i, supportsBackup)
- // Add the tablet to the list of tablets in this shard
- clusterInstance.Keyspaces[0].Shards[0].Vttablets = append(clusterInstance.Keyspaces[0].Shards[0].Vttablets, newReplica)
- // Check that we can replicate to it and semi-sync is setup correctly on it
- utils.ConfirmReplication(t, tablets[3], []*cluster.Vttablet{tablets[0], tablets[1], tablets[2], newReplica})
- utils.CheckSemiSyncSetupCorrectly(t, newReplica, "ON")
- }
+ newPrimary := utils.GetNewPrimary(t, clusterInstance)
+ utils.ConfirmReplication(t, newPrimary, []*cluster.Vttablet{tablets[2], tablets[3]})
}
-// TestFullStatus tests that the RPC FullStatus works as intended.
-func TestFullStatus(t *testing.T) {
+// TetsSingeReplicaERS tests that ERS works even when there is only 1 tablet left
+// as long the durability policy allows this failure. Moreover, this also tests that the
+// replica is one such that it was a primary itself before. This way its executed gtid set
+// will have atleast 2 tablets in it. We want to make sure this tablet is not marked as errant
+// and ERS succeeds.
+func TestSingleReplicaERS(t *testing.T) {
+ // Set up a cluster with none durability policy
defer cluster.PanicHandler(t)
- clusterInstance := utils.SetupReparentCluster(t, "semi_sync")
+ clusterInstance := utils.SetupReparentCluster(t, "none")
defer utils.TeardownCluster(clusterInstance)
tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets
+ // Confirm that the replication is setup correctly in the beginning.
+ // tablets[0] is the primary tablet in the beginning.
utils.ConfirmReplication(t, tablets[0], []*cluster.Vttablet{tablets[1], tablets[2], tablets[3]})
- // Check that full status gives the correct result for a primary tablet
- primaryStatusString, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("GetFullStatus", tablets[0].Alias)
- require.NoError(t, err)
- primaryStatus := &replicationdatapb.FullStatus{}
- err = protojson.Unmarshal([]byte(primaryStatusString), primaryStatus)
- require.NoError(t, err)
- assert.NotEmpty(t, primaryStatus.ServerUuid)
- assert.NotEmpty(t, primaryStatus.ServerId)
- // For a primary tablet there is no replication status
- assert.Nil(t, primaryStatus.ReplicationStatus)
- assert.Contains(t, primaryStatus.PrimaryStatus.String(), "vt-0000000101-bin")
- assert.Equal(t, primaryStatus.GtidPurged, "MySQL56/")
- assert.False(t, primaryStatus.ReadOnly)
- assert.True(t, primaryStatus.SemiSyncPrimaryEnabled)
- assert.True(t, primaryStatus.SemiSyncReplicaEnabled)
- assert.True(t, primaryStatus.SemiSyncPrimaryStatus)
- assert.False(t, primaryStatus.SemiSyncReplicaStatus)
- assert.EqualValues(t, 3, primaryStatus.SemiSyncPrimaryClients)
- assert.EqualValues(t, 1000000000000000000, primaryStatus.SemiSyncPrimaryTimeout)
- assert.EqualValues(t, 1, primaryStatus.SemiSyncWaitForReplicaCount)
- assert.Equal(t, "ROW", primaryStatus.BinlogFormat)
- assert.Equal(t, "FULL", primaryStatus.BinlogRowImage)
- assert.Equal(t, "ON", primaryStatus.GtidMode)
- assert.True(t, primaryStatus.LogReplicaUpdates)
- assert.True(t, primaryStatus.LogBinEnabled)
- assert.Regexp(t, `[58]\.[07].*`, primaryStatus.Version)
- assert.NotEmpty(t, primaryStatus.VersionComment)
-
- // Check that full status gives the correct result for a replica tablet
- replicaStatusString, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("GetFullStatus", tablets[1].Alias)
- require.NoError(t, err)
- replicaStatus := &replicationdatapb.FullStatus{}
- err = protojson.Unmarshal([]byte(replicaStatusString), replicaStatus)
- require.NoError(t, err)
- assert.NotEmpty(t, replicaStatus.ServerUuid)
- assert.NotEmpty(t, replicaStatus.ServerId)
- assert.Contains(t, replicaStatus.ReplicationStatus.Position, "MySQL56/"+replicaStatus.ReplicationStatus.SourceUuid)
- assert.EqualValues(t, mysql.ReplicationStateRunning, replicaStatus.ReplicationStatus.IoState)
- assert.EqualValues(t, mysql.ReplicationStateRunning, replicaStatus.ReplicationStatus.SqlState)
- assert.Equal(t, fileNameFromPosition(replicaStatus.ReplicationStatus.FilePosition), fileNameFromPosition(primaryStatus.PrimaryStatus.FilePosition))
- assert.LessOrEqual(t, rowNumberFromPosition(replicaStatus.ReplicationStatus.FilePosition), rowNumberFromPosition(primaryStatus.PrimaryStatus.FilePosition))
- assert.Equal(t, replicaStatus.ReplicationStatus.RelayLogSourceBinlogEquivalentPosition, primaryStatus.PrimaryStatus.FilePosition)
- assert.Contains(t, replicaStatus.ReplicationStatus.RelayLogFilePosition, "vt-0000000102-relay")
- assert.Equal(t, replicaStatus.ReplicationStatus.Position, primaryStatus.PrimaryStatus.Position)
- assert.Equal(t, replicaStatus.ReplicationStatus.RelayLogPosition, primaryStatus.PrimaryStatus.Position)
- assert.Empty(t, replicaStatus.ReplicationStatus.LastIoError)
- assert.Empty(t, replicaStatus.ReplicationStatus.LastSqlError)
- assert.Equal(t, replicaStatus.ReplicationStatus.SourceUuid, primaryStatus.ServerUuid)
- assert.LessOrEqual(t, int(replicaStatus.ReplicationStatus.ReplicationLagSeconds), 1)
- assert.False(t, replicaStatus.ReplicationStatus.ReplicationLagUnknown)
- assert.EqualValues(t, 0, replicaStatus.ReplicationStatus.SqlDelay)
- assert.False(t, replicaStatus.ReplicationStatus.SslAllowed)
- assert.False(t, replicaStatus.ReplicationStatus.HasReplicationFilters)
- assert.False(t, replicaStatus.ReplicationStatus.UsingGtid)
- assert.True(t, replicaStatus.ReplicationStatus.AutoPosition)
- assert.Equal(t, replicaStatus.ReplicationStatus.SourceHost, utils.Hostname)
- assert.EqualValues(t, replicaStatus.ReplicationStatus.SourcePort, tablets[0].MySQLPort)
- assert.Equal(t, replicaStatus.ReplicationStatus.SourceUser, "vt_repl")
- assert.Contains(t, replicaStatus.PrimaryStatus.String(), "vt-0000000102-bin")
- assert.Equal(t, replicaStatus.GtidPurged, "MySQL56/")
- assert.True(t, replicaStatus.ReadOnly)
- assert.False(t, replicaStatus.SemiSyncPrimaryEnabled)
- assert.True(t, replicaStatus.SemiSyncReplicaEnabled)
- assert.False(t, replicaStatus.SemiSyncPrimaryStatus)
- assert.True(t, replicaStatus.SemiSyncReplicaStatus)
- assert.EqualValues(t, 0, replicaStatus.SemiSyncPrimaryClients)
- assert.EqualValues(t, 1000000000000000000, replicaStatus.SemiSyncPrimaryTimeout)
- assert.EqualValues(t, 1, replicaStatus.SemiSyncWaitForReplicaCount)
- assert.Equal(t, "ROW", replicaStatus.BinlogFormat)
- assert.Equal(t, "FULL", replicaStatus.BinlogRowImage)
- assert.Equal(t, "ON", replicaStatus.GtidMode)
- assert.True(t, replicaStatus.LogReplicaUpdates)
- assert.True(t, replicaStatus.LogBinEnabled)
- assert.Regexp(t, `[58]\.[07].*`, replicaStatus.Version)
- assert.NotEmpty(t, replicaStatus.VersionComment)
-}
+ // Delete and stop two tablets. We only want to have 2 tablets for this test.
+ utils.DeleteTablet(t, clusterInstance, tablets[2])
+ utils.DeleteTablet(t, clusterInstance, tablets[3])
+ utils.StopTablet(t, tablets[2], true)
+ utils.StopTablet(t, tablets[3], true)
+
+ // Reparent to the other replica
+ output, err := utils.Prs(t, clusterInstance, tablets[1])
+ require.NoError(t, err, "error in PlannedReparentShard output - %s", output)
+
+ // Check the replication is set up correctly before we failover
+ utils.ConfirmReplication(t, tablets[1], []*cluster.Vttablet{tablets[0]})
-// fileNameFromPosition gets the file name from the position
-func fileNameFromPosition(pos string) string {
- return pos[0 : len(pos)-4]
+ // Make the current primary vttablet unavailable.
+ utils.StopTablet(t, tablets[1], true)
+
+ // Run an ERS with only one replica reachable. Also, this replica is such that it was a primary before.
+ output, err = utils.Ers(clusterInstance, tablets[0], "", "")
+ require.NoError(t, err, "error in Emergency Reparent Shard output - %s", output)
+
+ // Check the tablet is indeed promoted
+ utils.CheckPrimaryTablet(t, clusterInstance, tablets[0])
+ // Also check the writes succeed after failover
+ utils.ConfirmReplication(t, tablets[0], []*cluster.Vttablet{})
}
-// rowNumberFromPosition gets the row number from the position
-func rowNumberFromPosition(pos string) int {
- rowNumStr := pos[len(pos)-4:]
- rowNum, _ := strconv.Atoi(rowNumStr)
- return rowNum
+// TestTabletRestart tests that a running tablet can be restarted and everything is still fine
+func TestTabletRestart(t *testing.T) {
+ defer cluster.PanicHandler(t)
+ clusterInstance := utils.SetupReparentCluster(t, "semi_sync")
+ defer utils.TeardownCluster(clusterInstance)
+ tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets
+
+ utils.StopTablet(t, tablets[1], false)
+ tablets[1].VttabletProcess.ServingStatus = "SERVING"
+ err := tablets[1].VttabletProcess.Setup()
+ require.NoError(t, err)
}
diff --git a/go/test/endtoend/reparent/plannedreparent/reparent_test.go b/go/test/endtoend/reparent/plannedreparent/reparent_test.go
index 66db2908380..ba8e17eb4d2 100644
--- a/go/test/endtoend/reparent/plannedreparent/reparent_test.go
+++ b/go/test/endtoend/reparent/plannedreparent/reparent_test.go
@@ -19,15 +19,20 @@ package plannedreparent
import (
"context"
"fmt"
+ "strconv"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
+ "google.golang.org/protobuf/encoding/protojson"
+
+ "vitess.io/vitess/go/mysql"
"vitess.io/vitess/go/test/endtoend/cluster"
"vitess.io/vitess/go/test/endtoend/reparent/utils"
"vitess.io/vitess/go/vt/log"
+ replicationdatapb "vitess.io/vitess/go/vt/proto/replicationdata"
)
func TestPrimaryToSpareStateChangeImpossible(t *testing.T) {
@@ -358,21 +363,175 @@ func TestChangeTypeSemiSync(t *testing.T) {
utils.CheckDBstatus(ctx, t, rdonly2, "Rpl_semi_sync_slave_status", "ON")
}
-func TestReparentDoesntHangIfPrimaryFails(t *testing.T) {
+// TestCrossCellDurability tests 2 things -
+// 1. When PRS is run with the cross_cell durability policy setup, then the semi-sync settings on all the tablets are as expected
+// 2. Bringing up a new vttablet should have its replication and semi-sync setup correctly without any manual intervention
+func TestCrossCellDurability(t *testing.T) {
+ defer cluster.PanicHandler(t)
+ clusterInstance := utils.SetupReparentCluster(t, "cross_cell")
+ defer utils.TeardownCluster(clusterInstance)
+ tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets
+
+ utils.ConfirmReplication(t, tablets[0], []*cluster.Vttablet{tablets[1], tablets[2], tablets[3]})
+
+ // When tablets[0] is the primary, the only tablet in a different cell is tablets[3].
+ // So the other two should have semi-sync turned off
+ utils.CheckSemiSyncSetupCorrectly(t, tablets[0], "ON")
+ utils.CheckSemiSyncSetupCorrectly(t, tablets[3], "ON")
+ utils.CheckSemiSyncSetupCorrectly(t, tablets[1], "OFF")
+ utils.CheckSemiSyncSetupCorrectly(t, tablets[2], "OFF")
+
+ // Run forced reparent operation, this should proceed unimpeded.
+ out, err := utils.Prs(t, clusterInstance, tablets[3])
+ require.NoError(t, err, out)
+
+ utils.ConfirmReplication(t, tablets[3], []*cluster.Vttablet{tablets[0], tablets[1], tablets[2]})
+
+ // All the tablets will have semi-sync setup since tablets[3] is in Cell2 and all
+ // others are in Cell1, so all of them are eligible to send semi-sync ACKs
+ for _, tablet := range tablets {
+ utils.CheckSemiSyncSetupCorrectly(t, tablet, "ON")
+ }
+
+ for i, supportsBackup := range []bool{false, true} {
+ // Bring up a new replica tablet
+ // In this new tablet, we do not disable active reparents, otherwise replication will not be started.
+ newReplica := utils.StartNewVTTablet(t, clusterInstance, 300+i, supportsBackup)
+ // Add the tablet to the list of tablets in this shard
+ clusterInstance.Keyspaces[0].Shards[0].Vttablets = append(clusterInstance.Keyspaces[0].Shards[0].Vttablets, newReplica)
+ // Check that we can replicate to it and semi-sync is setup correctly on it
+ utils.ConfirmReplication(t, tablets[3], []*cluster.Vttablet{tablets[0], tablets[1], tablets[2], newReplica})
+ utils.CheckSemiSyncSetupCorrectly(t, newReplica, "ON")
+ }
+}
+
+// TestFullStatus tests that the RPC FullStatus works as intended.
+func TestFullStatus(t *testing.T) {
defer cluster.PanicHandler(t)
clusterInstance := utils.SetupReparentCluster(t, "semi_sync")
defer utils.TeardownCluster(clusterInstance)
tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets
+ utils.ConfirmReplication(t, tablets[0], []*cluster.Vttablet{tablets[1], tablets[2], tablets[3]})
- // Change the schema of the _vt.reparent_journal table, so that
- // inserts into it will fail. That will make the primary fail.
- _, err := tablets[0].VttabletProcess.QueryTabletWithDB(
- "ALTER TABLE reparent_journal DROP COLUMN replication_position", "_vt")
+ // Check that full status gives the correct result for a primary tablet
+ primaryTablet := tablets[0]
+ primaryStatusString, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("GetFullStatus", primaryTablet.Alias)
+ require.NoError(t, err)
+ primaryStatus := &replicationdatapb.FullStatus{}
+ opt := protojson.UnmarshalOptions{DiscardUnknown: true}
+ err = opt.Unmarshal([]byte(primaryStatusString), primaryStatus)
+ require.NoError(t, err)
+ assert.NotEmpty(t, primaryStatus.ServerUuid)
+ assert.NotEmpty(t, primaryStatus.ServerId)
+ // For a primary tablet there is no replication status
+ assert.Nil(t, primaryStatus.ReplicationStatus)
+ assert.Contains(t, primaryStatus.PrimaryStatus.String(), "vt-0000000101-bin")
+ assert.Equal(t, primaryStatus.GtidPurged, "MySQL56/")
+ assert.False(t, primaryStatus.ReadOnly)
+ assert.True(t, primaryStatus.SemiSyncPrimaryEnabled)
+ assert.True(t, primaryStatus.SemiSyncReplicaEnabled)
+ assert.True(t, primaryStatus.SemiSyncPrimaryStatus)
+ assert.False(t, primaryStatus.SemiSyncReplicaStatus)
+ assert.EqualValues(t, 3, primaryStatus.SemiSyncPrimaryClients)
+ assert.EqualValues(t, 1000000000000000000, primaryStatus.SemiSyncPrimaryTimeout)
+ assert.EqualValues(t, 1, primaryStatus.SemiSyncWaitForReplicaCount)
+ assert.Equal(t, "ROW", primaryStatus.BinlogFormat)
+ assert.Equal(t, "FULL", primaryStatus.BinlogRowImage)
+ assert.Equal(t, "ON", primaryStatus.GtidMode)
+ assert.True(t, primaryStatus.LogReplicaUpdates)
+ assert.True(t, primaryStatus.LogBinEnabled)
+ assert.Regexp(t, `[58]\.[07].*`, primaryStatus.Version)
+ assert.NotEmpty(t, primaryStatus.VersionComment)
+
+ replicaTablet := tablets[1]
+
+ waitForFilePosition(t, clusterInstance, primaryTablet, replicaTablet, 5*time.Second)
+
+ // Check that full status gives the correct result for a replica tablet
+ replicaStatusString, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("GetFullStatus", replicaTablet.Alias)
+ require.NoError(t, err)
+ replicaStatus := &replicationdatapb.FullStatus{}
+ opt = protojson.UnmarshalOptions{DiscardUnknown: true}
+ err = opt.Unmarshal([]byte(replicaStatusString), replicaStatus)
require.NoError(t, err)
+ assert.NotEmpty(t, replicaStatus.ServerUuid)
+ assert.NotEmpty(t, replicaStatus.ServerId)
+ assert.Contains(t, replicaStatus.ReplicationStatus.Position, "MySQL56/"+replicaStatus.ReplicationStatus.SourceUuid)
+ assert.EqualValues(t, mysql.ReplicationStateRunning, replicaStatus.ReplicationStatus.IoState)
+ assert.EqualValues(t, mysql.ReplicationStateRunning, replicaStatus.ReplicationStatus.SqlState)
+ assert.Equal(t, fileNameFromPosition(replicaStatus.ReplicationStatus.FilePosition), fileNameFromPosition(primaryStatus.PrimaryStatus.FilePosition))
+ assert.LessOrEqual(t, rowNumberFromPosition(replicaStatus.ReplicationStatus.FilePosition), rowNumberFromPosition(primaryStatus.PrimaryStatus.FilePosition))
+ assert.Equal(t, replicaStatus.ReplicationStatus.RelayLogSourceBinlogEquivalentPosition, primaryStatus.PrimaryStatus.FilePosition)
+ assert.Contains(t, replicaStatus.ReplicationStatus.RelayLogFilePosition, "vt-0000000102-relay")
+ assert.Equal(t, replicaStatus.ReplicationStatus.Position, primaryStatus.PrimaryStatus.Position)
+ assert.Equal(t, replicaStatus.ReplicationStatus.RelayLogPosition, primaryStatus.PrimaryStatus.Position)
+ assert.Empty(t, replicaStatus.ReplicationStatus.LastIoError)
+ assert.Empty(t, replicaStatus.ReplicationStatus.LastSqlError)
+ assert.Equal(t, replicaStatus.ReplicationStatus.SourceUuid, primaryStatus.ServerUuid)
+ assert.LessOrEqual(t, int(replicaStatus.ReplicationStatus.ReplicationLagSeconds), 1)
+ assert.False(t, replicaStatus.ReplicationStatus.ReplicationLagUnknown)
+ assert.EqualValues(t, 0, replicaStatus.ReplicationStatus.SqlDelay)
+ assert.False(t, replicaStatus.ReplicationStatus.SslAllowed)
+ assert.False(t, replicaStatus.ReplicationStatus.HasReplicationFilters)
+ assert.False(t, replicaStatus.ReplicationStatus.UsingGtid)
+ assert.True(t, replicaStatus.ReplicationStatus.AutoPosition)
+ assert.Equal(t, replicaStatus.ReplicationStatus.SourceHost, utils.Hostname)
+ assert.EqualValues(t, replicaStatus.ReplicationStatus.SourcePort, tablets[0].MySQLPort)
+ assert.Equal(t, replicaStatus.ReplicationStatus.SourceUser, "vt_repl")
+ assert.Contains(t, replicaStatus.PrimaryStatus.String(), "vt-0000000102-bin")
+ assert.Equal(t, replicaStatus.GtidPurged, "MySQL56/")
+ assert.True(t, replicaStatus.ReadOnly)
+ assert.False(t, replicaStatus.SemiSyncPrimaryEnabled)
+ assert.True(t, replicaStatus.SemiSyncReplicaEnabled)
+ assert.False(t, replicaStatus.SemiSyncPrimaryStatus)
+ assert.True(t, replicaStatus.SemiSyncReplicaStatus)
+ assert.EqualValues(t, 0, replicaStatus.SemiSyncPrimaryClients)
+ assert.EqualValues(t, 1000000000000000000, replicaStatus.SemiSyncPrimaryTimeout)
+ assert.EqualValues(t, 1, replicaStatus.SemiSyncWaitForReplicaCount)
+ assert.Equal(t, "ROW", replicaStatus.BinlogFormat)
+ assert.Equal(t, "FULL", replicaStatus.BinlogRowImage)
+ assert.Equal(t, "ON", replicaStatus.GtidMode)
+ assert.True(t, replicaStatus.LogReplicaUpdates)
+ assert.True(t, replicaStatus.LogBinEnabled)
+ assert.Regexp(t, `[58]\.[07].*`, replicaStatus.Version)
+ assert.NotEmpty(t, replicaStatus.VersionComment)
+}
- // Perform a planned reparent operation, the primary will fail the
- // insert. The replicas should then abort right away.
- out, err := utils.Prs(t, clusterInstance, tablets[1])
- require.Error(t, err)
- assert.Contains(t, out, "primary failed to PopulateReparentJournal")
+func getFullStatus(t *testing.T, clusterInstance *cluster.LocalProcessCluster, tablet *cluster.Vttablet) *replicationdatapb.FullStatus {
+ statusString, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("GetFullStatus", tablet.Alias)
+ require.NoError(t, err)
+ status := &replicationdatapb.FullStatus{}
+ opt := protojson.UnmarshalOptions{DiscardUnknown: true}
+ err = opt.Unmarshal([]byte(statusString), status)
+ require.NoError(t, err)
+ return status
+}
+
+// waitForFilePosition waits for timeout to see if FilePositions align b/w primary and replica, to fix flakiness in tests due to race conditions where replica is still catching up
+func waitForFilePosition(t *testing.T, clusterInstance *cluster.LocalProcessCluster, primary *cluster.Vttablet, replica *cluster.Vttablet, timeout time.Duration) {
+ start := time.Now()
+ for {
+ primaryStatus := getFullStatus(t, clusterInstance, primary)
+ replicaStatus := getFullStatus(t, clusterInstance, replica)
+ if primaryStatus.PrimaryStatus.FilePosition == replicaStatus.ReplicationStatus.FilePosition {
+ return
+ }
+ if d := time.Since(start); d > timeout {
+ require.FailNowf(t, "waitForFilePosition timed out, primary %s, replica %s",
+ primaryStatus.PrimaryStatus.FilePosition, replicaStatus.ReplicationStatus.FilePosition)
+ }
+ time.Sleep(100 * time.Millisecond)
+ }
+}
+
+// fileNameFromPosition gets the file name from the position
+func fileNameFromPosition(pos string) string {
+ return pos[0 : len(pos)-4]
+}
+
+// rowNumberFromPosition gets the row number from the position
+func rowNumberFromPosition(pos string) int {
+ rowNumStr := pos[len(pos)-4:]
+ rowNum, _ := strconv.Atoi(rowNumStr)
+ return rowNum
}
diff --git a/go/test/endtoend/reparent/prscomplex/main_test.go b/go/test/endtoend/reparent/prscomplex/main_test.go
new file mode 100644
index 00000000000..88276012781
--- /dev/null
+++ b/go/test/endtoend/reparent/prscomplex/main_test.go
@@ -0,0 +1,157 @@
+/*
+Copyright 2022 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package misc
+
+import (
+ "context"
+ _ "embed"
+ "flag"
+ "os"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "vitess.io/vitess/go/mysql"
+ "vitess.io/vitess/go/test/endtoend/cluster"
+ rutils "vitess.io/vitess/go/test/endtoend/reparent/utils"
+ "vitess.io/vitess/go/test/endtoend/utils"
+)
+
+var (
+ clusterInstance *cluster.LocalProcessCluster
+ vtParams mysql.ConnParams
+ keyspaceName = "ks"
+ cell = "test"
+
+ //go:embed schema.sql
+ schemaSQL string
+)
+
+func TestMain(m *testing.M) {
+ defer cluster.PanicHandler(nil)
+ flag.Parse()
+
+ exitCode := func() int {
+ clusterInstance = cluster.NewCluster(cell, "localhost")
+ defer clusterInstance.Teardown()
+
+ // Start topo server
+ err := clusterInstance.StartTopo()
+ if err != nil {
+ return 1
+ }
+
+ // Start keyspace
+ keyspace := &cluster.Keyspace{
+ Name: keyspaceName,
+ SchemaSQL: schemaSQL,
+ }
+ clusterInstance.VtTabletExtraArgs = append(clusterInstance.VtTabletExtraArgs,
+ "--queryserver-config-query-timeout=9000",
+ "--queryserver-config-pool-size=3",
+ "--queryserver-config-stream-pool-size=3",
+ "--queryserver-config-transaction-cap=2",
+ "--queryserver-config-transaction-timeout=20",
+ "--shutdown_grace_period=3",
+ "--queryserver-config-schema-change-signal=false")
+ err = clusterInstance.StartUnshardedKeyspace(*keyspace, 1, false)
+ if err != nil {
+ return 1
+ }
+
+ // Start vtgate
+ clusterInstance.VtGateExtraArgs = append(clusterInstance.VtGateExtraArgs,
+ "--planner-version=gen4",
+ "--mysql_default_workload=olap",
+ "--schema_change_signal=false")
+ err = clusterInstance.StartVtgate()
+ if err != nil {
+ return 1
+ }
+
+ vtParams = mysql.ConnParams{
+ Host: clusterInstance.Hostname,
+ Port: clusterInstance.VtgateMySQLPort,
+ }
+ return m.Run()
+ }()
+ os.Exit(exitCode)
+}
+
+/*
+TestAcquireSameConnID tests that a query started on a connection gets reconnected with a new connection.
+Another query acquires the old connection ID and does not override the query list maintained by the vttablet process.
+PRS should not fail as the query list is maintained appropriately.
+*/
+func TestAcquireSameConnID(t *testing.T) {
+ defer func() {
+ err := recover()
+ if err != nil {
+ require.Equal(t, "Fail in goroutine after TestAcquireSameConnID has completed", err)
+ }
+ }()
+ ctx := context.Background()
+ conn, err := mysql.Connect(ctx, &vtParams)
+ require.NoError(t, err)
+ defer conn.Close()
+
+ // start a reserved connection
+ utils.Exec(t, conn, "set sql_mode=''")
+ _ = utils.Exec(t, conn, "select connection_id()")
+
+ // restart the mysql to trigger reconnect on next query.
+ primTablet := clusterInstance.Keyspaces[0].Shards[0].PrimaryTablet()
+ err = primTablet.MysqlctlProcess.Stop()
+ require.NoError(t, err)
+ err = primTablet.MysqlctlProcess.StartProvideInit(false)
+ require.NoError(t, err)
+
+ go func() {
+ // this will trigger reconnect with a new connection id, which will be lower than the origin connection id.
+ _, _ = utils.ExecAllowError(t, conn, "select connection_id(), sleep(4000)")
+ }()
+ time.Sleep(5 * time.Second)
+
+ totalErrCount := 0
+ // run through 100 times to acquire new connection, this might override the original connection id.
+ var conn2 *mysql.Conn
+ for i := 0; i < 100; i++ {
+ conn2, err = mysql.Connect(ctx, &vtParams)
+ require.NoError(t, err)
+
+ utils.Exec(t, conn2, "set sql_mode=''")
+ // ReserveExecute
+ _, err = utils.ExecAllowError(t, conn2, "select connection_id()")
+ if err != nil {
+ totalErrCount++
+ }
+ // Execute
+ _, err = utils.ExecAllowError(t, conn2, "select connection_id()")
+ if err != nil {
+ totalErrCount++
+ }
+ }
+
+ // We run the above loop 100 times so we execute 200 queries, of which only some should fail due to MySQL restart.
+ assert.Less(t, totalErrCount, 10, "MySQL restart can cause some errors, but not too many.")
+
+ // prs should happen without any error.
+ text, err := rutils.Prs(t, clusterInstance, clusterInstance.Keyspaces[0].Shards[0].Replica())
+ require.NoError(t, err, text)
+}
diff --git a/go/test/endtoend/reparent/prscomplex/schema.sql b/go/test/endtoend/reparent/prscomplex/schema.sql
new file mode 100644
index 00000000000..3e78cab09d6
--- /dev/null
+++ b/go/test/endtoend/reparent/prscomplex/schema.sql
@@ -0,0 +1,5 @@
+create table t1(
+ id1 bigint,
+ id2 bigint,
+ primary key(id1)
+) Engine=InnoDB;
\ No newline at end of file
diff --git a/go/test/endtoend/reparent/prssettingspool/main_test.go b/go/test/endtoend/reparent/prssettingspool/main_test.go
new file mode 100644
index 00000000000..a9f4312caea
--- /dev/null
+++ b/go/test/endtoend/reparent/prssettingspool/main_test.go
@@ -0,0 +1,148 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package misc
+
+import (
+ "context"
+ _ "embed"
+ "flag"
+ "os"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+
+ "vitess.io/vitess/go/mysql"
+ "vitess.io/vitess/go/test/endtoend/cluster"
+ rutils "vitess.io/vitess/go/test/endtoend/reparent/utils"
+ "vitess.io/vitess/go/test/endtoend/utils"
+)
+
+var (
+ clusterInstance *cluster.LocalProcessCluster
+ vtParams mysql.ConnParams
+ keyspaceName = "ks"
+ cell = "test"
+
+ //go:embed schema.sql
+ schemaSQL string
+)
+
+func TestMain(m *testing.M) {
+ defer cluster.PanicHandler(nil)
+ flag.Parse()
+
+ exitCode := func() int {
+ clusterInstance = cluster.NewCluster(cell, "localhost")
+ defer clusterInstance.Teardown()
+
+ // Start topo server
+ err := clusterInstance.StartTopo()
+ if err != nil {
+ return 1
+ }
+
+ // Start keyspace
+ keyspace := &cluster.Keyspace{
+ Name: keyspaceName,
+ SchemaSQL: schemaSQL,
+ }
+ clusterInstance.VtTabletExtraArgs = append(clusterInstance.VtTabletExtraArgs,
+ "--queryserver-enable-settings-pool")
+ err = clusterInstance.StartUnshardedKeyspace(*keyspace, 2, false)
+ if err != nil {
+ return 1
+ }
+
+ // Start vtgate
+ clusterInstance.VtGateExtraArgs = append(clusterInstance.VtGateExtraArgs,
+ "--planner-version", "gen4")
+ err = clusterInstance.StartVtgate()
+ if err != nil {
+ return 1
+ }
+
+ vtParams = mysql.ConnParams{
+ Host: clusterInstance.Hostname,
+ Port: clusterInstance.VtgateMySQLPort,
+ }
+ return m.Run()
+ }()
+ os.Exit(exitCode)
+}
+
+func TestSettingsPoolWithTXAndPRS(t *testing.T) {
+ ctx := context.Background()
+ conn, err := mysql.Connect(ctx, &vtParams)
+ require.NoError(t, err)
+ defer conn.Close()
+
+ // set a system settings that will trigger reserved connection usage.
+ utils.Exec(t, conn, "set default_week_format = 5")
+
+ // have transaction on the session
+ utils.Exec(t, conn, "begin")
+ utils.Exec(t, conn, "select id1, id2 from t1")
+ utils.Exec(t, conn, "commit")
+
+ tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets
+
+ // prs should happen without any error.
+ text, err := rutils.Prs(t, clusterInstance, tablets[1])
+ require.NoError(t, err, text)
+ rutils.WaitForTabletToBeServing(t, clusterInstance, tablets[0], 1*time.Minute)
+
+ defer func() {
+ // reset state
+ text, err = rutils.Prs(t, clusterInstance, tablets[0])
+ require.NoError(t, err, text)
+ rutils.WaitForTabletToBeServing(t, clusterInstance, tablets[1], 1*time.Minute)
+ }()
+
+ // no error should occur and it should go to the right tablet.
+ utils.Exec(t, conn, "select id1, id2 from t1")
+}
+
+func TestSettingsPoolWithoutTXAndPRS(t *testing.T) {
+ ctx := context.Background()
+ conn, err := mysql.Connect(ctx, &vtParams)
+ require.NoError(t, err)
+ defer conn.Close()
+
+ // set a system settings that will trigger reserved connection usage.
+ utils.Exec(t, conn, "set default_week_format = 5")
+
+ // execute non-tx query
+ utils.Exec(t, conn, "select id1, id2 from t1")
+
+ tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets
+
+ // prs should happen without any error.
+ text, err := rutils.Prs(t, clusterInstance, tablets[1])
+ require.NoError(t, err, text)
+ rutils.WaitForTabletToBeServing(t, clusterInstance, tablets[0], 1*time.Minute)
+ defer func() {
+ // reset state
+ text, err = rutils.Prs(t, clusterInstance, tablets[0])
+ require.NoError(t, err, text)
+ rutils.WaitForTabletToBeServing(t, clusterInstance, tablets[1], 1*time.Minute)
+ }()
+
+ // no error should occur and it should go to the right tablet.
+ utils.Exec(t, conn, "select id1, id2 from t1")
+
+}
diff --git a/go/test/endtoend/reparent/prssettingspool/schema.sql b/go/test/endtoend/reparent/prssettingspool/schema.sql
new file mode 100644
index 00000000000..3e78cab09d6
--- /dev/null
+++ b/go/test/endtoend/reparent/prssettingspool/schema.sql
@@ -0,0 +1,5 @@
+create table t1(
+ id1 bigint,
+ id2 bigint,
+ primary key(id1)
+) Engine=InnoDB;
\ No newline at end of file
diff --git a/go/test/endtoend/reparent/utils/utils.go b/go/test/endtoend/reparent/utils/utils.go
index a3359d172d5..8bab1fe4c0c 100644
--- a/go/test/endtoend/reparent/utils/utils.go
+++ b/go/test/endtoend/reparent/utils/utils.go
@@ -31,6 +31,9 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
+ querypb "vitess.io/vitess/go/vt/proto/query"
+ "vitess.io/vitess/go/vt/vttablet/tabletconn"
+
"vitess.io/vitess/go/json2"
"vitess.io/vitess/go/mysql"
"vitess.io/vitess/go/sqltypes"
@@ -60,7 +63,7 @@ var (
replicationWaitTimeout = time.Duration(15 * time.Second)
)
-//region cluster setup/teardown
+// region cluster setup/teardown
// SetupReparentCluster is used to setup the reparent cluster
func SetupReparentCluster(t *testing.T, durability string) *cluster.LocalProcessCluster {
@@ -72,9 +75,25 @@ func SetupRangeBasedCluster(ctx context.Context, t *testing.T) *cluster.LocalPro
return setupCluster(ctx, t, ShardName, []string{cell1}, []int{2}, "semi_sync")
}
-// TeardownCluster is used to teardown the reparent cluster
+// TeardownCluster is used to teardown the reparent cluster. When
+// run in a CI environment -- which is considered true when the
+// "CI" env variable is set to "true" -- the teardown also removes
+// the VTDATAROOT directory that was used for the test/cluster.
func TeardownCluster(clusterInstance *cluster.LocalProcessCluster) {
+ usedRoot := clusterInstance.CurrentVTDATAROOT
clusterInstance.Teardown()
+ // This is always set to "true" on GitHub Actions runners:
+ // https://docs.github.com/en/actions/learn-github-actions/variables#default-environment-variables
+ ci, ok := os.LookupEnv("CI")
+ if !ok || strings.ToLower(ci) != "true" {
+ // Leave the directory in place to support local debugging.
+ return
+ }
+ // We're running in the CI, so free up disk space for any
+ // subsequent tests.
+ if err := os.RemoveAll(usedRoot); err != nil {
+ log.Errorf("Failed to remove previously used VTDATAROOT (%s): %v", usedRoot, err)
+ }
}
func setupCluster(ctx context.Context, t *testing.T, shardName string, cells []string, numTablets []int, durability string) *cluster.LocalProcessCluster {
@@ -82,10 +101,6 @@ func setupCluster(ctx context.Context, t *testing.T, shardName string, cells []s
clusterInstance := cluster.NewCluster(cells[0], Hostname)
keyspace := &cluster.Keyspace{Name: KeyspaceName}
- if durability == "semi_sync" {
- clusterInstance.VtTabletExtraArgs = append(clusterInstance.VtTabletExtraArgs, "--enable_semi_sync")
- }
-
// Start topo server
err := clusterInstance.StartTopo()
require.NoError(t, err, "Error starting topo")
@@ -115,7 +130,6 @@ func setupCluster(ctx context.Context, t *testing.T, shardName string, cells []s
clusterInstance.VtTabletExtraArgs = append(clusterInstance.VtTabletExtraArgs,
"--lock_tables_timeout", "5s",
- "--init_populate_metadata",
"--track_schema_versions=true",
// disabling online-ddl for reparent tests. This is done to reduce flakiness.
// All the tests in this package reparent frequently between different tablets
@@ -129,13 +143,13 @@ func setupCluster(ctx context.Context, t *testing.T, shardName string, cells []s
// the replication manager to silently fix the replication in case ERS or PRS mess up. All the
// tests in this test suite should work irrespective of this flag. Each run of ERS, PRS should be
// setting up the replication correctly.
- "--disable_active_reparents")
+ "--disable-replication-manager")
// Initialize Cluster
err = clusterInstance.SetupCluster(keyspace, []cluster.Shard{*shard})
require.NoError(t, err, "Cannot launch cluster")
- //Start MySql
+ // Start MySql
var mysqlCtlProcessList []*exec.Cmd
for _, shard := range clusterInstance.Keyspaces[0].Shards {
for _, tablet := range shard.Vttablets {
@@ -214,11 +228,9 @@ func StartNewVTTablet(t *testing.T, clusterInstance *cluster.LocalProcessCluster
clusterInstance.TmpDirectory,
[]string{
"--lock_tables_timeout", "5s",
- "--init_populate_metadata",
"--track_schema_versions=true",
"--queryserver_enable_online_ddl=false",
},
- clusterInstance.EnableSemiSync,
clusterInstance.DefaultCharset)
tablet.VttabletProcess.SupportsBackup = supportsBackup
@@ -238,7 +250,7 @@ func StartNewVTTablet(t *testing.T, clusterInstance *cluster.LocalProcessCluster
return tablet
}
-//endregion
+// endregion
// region database queries
func getMysqlConnParam(tablet *cluster.Vttablet) mysql.ConnParams {
@@ -266,7 +278,7 @@ func execute(t *testing.T, conn *mysql.Conn, query string) *sqltypes.Result {
return qr
}
-//endregion
+// endregion
// region ers, prs
@@ -700,3 +712,24 @@ func CheckReplicationStatus(ctx context.Context, t *testing.T, tablet *cluster.V
require.Equal(t, "No", res.Rows[0][11].ToString())
}
}
+
+func WaitForTabletToBeServing(t *testing.T, clusterInstance *cluster.LocalProcessCluster, tablet *cluster.Vttablet, timeout time.Duration) {
+ vTablet, err := clusterInstance.VtctlclientGetTablet(tablet)
+ require.NoError(t, err)
+
+ tConn, err := tabletconn.GetDialer()(vTablet, false)
+ require.NoError(t, err)
+
+ newCtx, cancel := context.WithTimeout(context.Background(), timeout)
+ err = tConn.StreamHealth(newCtx, func(shr *querypb.StreamHealthResponse) error {
+ if shr.Serving {
+ cancel()
+ }
+ return nil
+ })
+
+ // the error should only be because we cancelled the context when the tablet became serving again.
+ if err != nil && !strings.Contains(err.Error(), "context canceled") {
+ t.Fatal(err.Error())
+ }
+}
diff --git a/go/test/endtoend/sharded/sharded_keyspace_test.go b/go/test/endtoend/sharded/sharded_keyspace_test.go
index 945b4179846..d5f5e5b2255 100644
--- a/go/test/endtoend/sharded/sharded_keyspace_test.go
+++ b/go/test/endtoend/sharded/sharded_keyspace_test.go
@@ -112,6 +112,9 @@ func TestShardedKeyspace(t *testing.T) {
err = clusterInstance.VtctlclientProcess.InitializeShard(keyspaceName, shard2.Name, cell, shard2Primary.TabletUID)
require.Nil(t, err)
+ err = clusterInstance.StartVTOrc(keyspaceName)
+ require.NoError(t, err)
+
// apply the schema on the first shard through vtctl, so all tablets
// are the same.
//apply the schema on the second shard.
@@ -233,7 +236,6 @@ func initCluster(shardNames []string, totalTabletsRequired int) {
clusterInstance.Hostname,
clusterInstance.TmpDirectory,
clusterInstance.VtTabletExtraArgs,
- clusterInstance.EnableSemiSync,
clusterInstance.DefaultCharset)
tablet.Alias = tablet.VttabletProcess.TabletPath
diff --git a/go/test/endtoend/tabletgateway/buffer/buffer_test_helpers.go b/go/test/endtoend/tabletgateway/buffer/buffer_test_helpers.go
index 7383c0b7818..843c6800622 100644
--- a/go/test/endtoend/tabletgateway/buffer/buffer_test_helpers.go
+++ b/go/test/endtoend/tabletgateway/buffer/buffer_test_helpers.go
@@ -288,7 +288,7 @@ func (bt *BufferingTest) Test(t *testing.T) {
// Healthcheck interval on tablet is set to 1s, so sleep for 2s
time.Sleep(2 * time.Second)
conn, err := mysql.Connect(context.Background(), &vtParams)
- require.Nil(t, err)
+ require.NoError(t, err)
defer conn.Close()
// Insert two rows for the later threads (critical read, update).
@@ -350,11 +350,14 @@ func (bt *BufferingTest) Test(t *testing.T) {
//At least one thread should have been buffered.
//This may fail if a failover is too fast. Add retries then.
resp, err := http.Get(clusterInstance.VtgateProcess.VerifyURL)
- require.Nil(t, err)
+ require.NoError(t, err)
+ defer resp.Body.Close()
+
require.Equal(t, 200, resp.StatusCode)
var metadata VTGateBufferingStats
- respByte, _ := io.ReadAll(resp.Body)
+ respByte, err := io.ReadAll(resp.Body)
+ require.NoError(t, err)
err = json.Unmarshal(respByte, &metadata)
require.NoError(t, err)
diff --git a/go/test/endtoend/tabletgateway/buffer/reparent/failover_buffer_test.go b/go/test/endtoend/tabletgateway/buffer/reparent/failover_buffer_test.go
index f3e4d6460c1..ace652fc1d2 100644
--- a/go/test/endtoend/tabletgateway/buffer/reparent/failover_buffer_test.go
+++ b/go/test/endtoend/tabletgateway/buffer/reparent/failover_buffer_test.go
@@ -30,10 +30,8 @@ import (
)
const (
- demoteQuery = "SET GLOBAL read_only = ON;FLUSH TABLES WITH READ LOCK;UNLOCK TABLES;"
- disableSemiSyncSourceQuery = "SET GLOBAL rpl_semi_sync_master_enabled = 0"
- enableSemiSyncSourceQuery = "SET GLOBAL rpl_semi_sync_master_enabled = 1"
- promoteQuery = "STOP SLAVE;RESET SLAVE ALL;SET GLOBAL read_only = OFF;"
+ demoteQuery = "SET GLOBAL read_only = ON;FLUSH TABLES WITH READ LOCK;UNLOCK TABLES;"
+ promoteQuery = "STOP SLAVE;RESET SLAVE ALL;SET GLOBAL read_only = OFF;"
hostname = "localhost"
)
@@ -51,9 +49,6 @@ func failoverExternalReparenting(t *testing.T, clusterInstance *cluster.LocalPro
oldPrimary := primary
newPrimary := replica
primary.VttabletProcess.QueryTablet(demoteQuery, keyspaceUnshardedName, true)
- if primary.VttabletProcess.EnableSemiSync {
- primary.VttabletProcess.QueryTablet(disableSemiSyncSourceQuery, keyspaceUnshardedName, true)
- }
// Wait for replica to catch up to primary.
cluster.WaitForReplicationPos(t, primary, replica, "localhost", 60.0)
@@ -69,10 +64,6 @@ func failoverExternalReparenting(t *testing.T, clusterInstance *cluster.LocalPro
// Promote replica to new primary.
replica.VttabletProcess.QueryTablet(promoteQuery, keyspaceUnshardedName, true)
- if replica.VttabletProcess.EnableSemiSync {
- replica.VttabletProcess.QueryTablet(enableSemiSyncSourceQuery, keyspaceUnshardedName, true)
- }
-
// Configure old primary to replicate from new primary.
_, gtID := cluster.GetPrimaryPosition(t, *newPrimary, hostname)
diff --git a/go/test/endtoend/tabletgateway/vtgate_test.go b/go/test/endtoend/tabletgateway/vtgate_test.go
index 239fb0f9bb9..a3876b259f3 100644
--- a/go/test/endtoend/tabletgateway/vtgate_test.go
+++ b/go/test/endtoend/tabletgateway/vtgate_test.go
@@ -45,7 +45,7 @@ func TestVtgateHealthCheck(t *testing.T) {
verifyVtgateVariables(t, clusterInstance.VtgateProcess.VerifyURL)
ctx := context.Background()
conn, err := mysql.Connect(ctx, &vtParams)
- require.Nil(t, err)
+ require.NoError(t, err)
defer conn.Close()
qr := utils.Exec(t, conn, "show vitess_tablets")
@@ -59,7 +59,7 @@ func TestVtgateReplicationStatusCheck(t *testing.T) {
verifyVtgateVariables(t, clusterInstance.VtgateProcess.VerifyURL)
ctx := context.Background()
conn, err := mysql.Connect(ctx, &vtParams)
- require.Nil(t, err)
+ require.NoError(t, err)
defer conn.Close()
// Only returns rows for REPLICA and RDONLY tablets -- so should be 2 of them
@@ -72,10 +72,12 @@ func TestVtgateReplicationStatusCheck(t *testing.T) {
func verifyVtgateVariables(t *testing.T, url string) {
resp, err := http.Get(url)
require.NoError(t, err)
+ defer resp.Body.Close()
require.Equal(t, 200, resp.StatusCode, "Vtgate api url response not found")
resultMap := make(map[string]any)
- respByte, _ := io.ReadAll(resp.Body)
+ respByte, err := io.ReadAll(resp.Body)
+ require.NoError(t, err)
err = json.Unmarshal(respByte, &resultMap)
require.NoError(t, err)
assert.Contains(t, resultMap, "VtgateVSchemaCounts", "Vschema count should be present in variables")
@@ -203,7 +205,7 @@ func TestReplicaTransactions(t *testing.T) {
// been restarted and the session lost
replicaTablet.VttabletProcess.ServingStatus = "SERVING"
err = replicaTablet.VttabletProcess.Setup()
- require.Nil(t, err)
+ require.NoError(t, err)
serving := replicaTablet.VttabletProcess.WaitForStatus("SERVING", 60*time.Second)
assert.Equal(t, serving, true, "Tablet did not become ready within a reasonable time")
utils.AssertContainsError(t, readConn, fetchAllCustomers, "not found")
diff --git a/go/test/endtoend/tabletmanager/main_test.go b/go/test/endtoend/tabletmanager/main_test.go
index 7dcfa4ea1a5..39f4830b33d 100644
--- a/go/test/endtoend/tabletmanager/main_test.go
+++ b/go/test/endtoend/tabletmanager/main_test.go
@@ -105,8 +105,6 @@ func TestMain(m *testing.M) {
"--health_check_interval", tabletHealthcheckRefreshInterval.String(),
"--unhealthy_threshold", tabletUnhealthyThreshold.String(),
}
- // We do not need semiSync for this test case.
- clusterInstance.EnableSemiSync = false
// Start keyspace
keyspace := &cluster.Keyspace{
diff --git a/go/test/endtoend/tabletmanager/primary/tablet_test.go b/go/test/endtoend/tabletmanager/primary/tablet_test.go
index 3fcb414e25c..3db692694b5 100644
--- a/go/test/endtoend/tabletmanager/primary/tablet_test.go
+++ b/go/test/endtoend/tabletmanager/primary/tablet_test.go
@@ -89,8 +89,6 @@ func TestMain(m *testing.M) {
"--watch_replication_stream",
"--enable_replication_reporter",
}
- // We do not need semiSync for this test case.
- clusterInstance.EnableSemiSync = false
// Start keyspace
keyspace := &cluster.Keyspace{
@@ -124,16 +122,16 @@ func TestRepeatedInitShardPrimary(t *testing.T) {
// Make replica tablet as primary
err := clusterInstance.VtctlclientProcess.InitShardPrimary(keyspaceName, shardName, cell, replicaTablet.TabletUID)
- require.Nil(t, err)
+ require.NoError(t, err)
// Run health check on both, make sure they are both healthy.
// Also make sure the types are correct.
err = clusterInstance.VtctlclientProcess.ExecuteCommand("RunHealthCheck", primaryTablet.Alias)
- require.Nil(t, err)
+ require.NoError(t, err)
checkHealth(t, primaryTablet.HTTPPort, false)
err = clusterInstance.VtctlclientProcess.ExecuteCommand("RunHealthCheck", replicaTablet.Alias)
- require.Nil(t, err)
+ require.NoError(t, err)
checkHealth(t, replicaTablet.HTTPPort, false)
checkTabletType(t, primaryTablet.Alias, "REPLICA")
@@ -141,16 +139,16 @@ func TestRepeatedInitShardPrimary(t *testing.T) {
// Come back to the original tablet.
err = clusterInstance.VtctlclientProcess.InitShardPrimary(keyspaceName, shardName, cell, primaryTablet.TabletUID)
- require.Nil(t, err)
+ require.NoError(t, err)
// Run health check on both, make sure they are both healthy.
// Also make sure the types are correct.
err = clusterInstance.VtctlclientProcess.ExecuteCommand("RunHealthCheck", primaryTablet.Alias)
- require.Nil(t, err)
+ require.NoError(t, err)
checkHealth(t, primaryTablet.HTTPPort, false)
err = clusterInstance.VtctlclientProcess.ExecuteCommand("RunHealthCheck", replicaTablet.Alias)
- require.Nil(t, err)
+ require.NoError(t, err)
checkHealth(t, replicaTablet.HTTPPort, false)
checkTabletType(t, primaryTablet.Alias, "PRIMARY")
@@ -165,14 +163,14 @@ func TestPrimaryRestartSetsTERTimestamp(t *testing.T) {
// Make replica as primary
err := clusterInstance.VtctlclientProcess.InitShardPrimary(keyspaceName, shardName, cell, replicaTablet.TabletUID)
- require.Nil(t, err)
+ require.NoError(t, err)
err = replicaTablet.VttabletProcess.WaitForTabletStatus("SERVING")
- require.Nil(t, err)
+ require.NoError(t, err)
// Capture the current TER.
shrs, err := clusterInstance.StreamTabletHealth(context.Background(), &replicaTablet, 1)
- require.Nil(t, err)
+ require.NoError(t, err)
streamHealthRes1 := shrs[0]
actualType := streamHealthRes1.GetTarget().GetTabletType()
@@ -188,15 +186,15 @@ func TestPrimaryRestartSetsTERTimestamp(t *testing.T) {
// kill the newly promoted primary tablet
err = replicaTablet.VttabletProcess.TearDown()
- require.Nil(t, err)
+ require.NoError(t, err)
// Start Vttablet
err = clusterInstance.StartVttablet(&replicaTablet, "SERVING", false, cell, keyspaceName, hostname, shardName)
- require.Nil(t, err)
+ require.NoError(t, err)
// Make sure that the TER did not change
shrs, err = clusterInstance.StreamTabletHealth(context.Background(), &replicaTablet, 1)
- require.Nil(t, err)
+ require.NoError(t, err)
streamHealthRes2 := shrs[0]
@@ -215,16 +213,17 @@ func TestPrimaryRestartSetsTERTimestamp(t *testing.T) {
// Reset primary
err = clusterInstance.VtctlclientProcess.InitShardPrimary(keyspaceName, shardName, cell, primaryTablet.TabletUID)
- require.Nil(t, err)
+ require.NoError(t, err)
err = primaryTablet.VttabletProcess.WaitForTabletStatus("SERVING")
- require.Nil(t, err)
+ require.NoError(t, err)
}
func checkHealth(t *testing.T, port int, shouldError bool) {
url := fmt.Sprintf("http://localhost:%d/healthz", port)
resp, err := http.Get(url)
- require.Nil(t, err)
+ require.NoError(t, err)
+ defer resp.Body.Close()
if shouldError {
assert.True(t, resp.StatusCode > 400)
} else {
@@ -234,11 +233,11 @@ func checkHealth(t *testing.T, port int, shouldError bool) {
func checkTabletType(t *testing.T, tabletAlias string, typeWant string) {
result, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("GetTablet", tabletAlias)
- require.Nil(t, err)
+ require.NoError(t, err)
var tablet topodatapb.Tablet
err = json2.Unmarshal([]byte(result), &tablet)
- require.Nil(t, err)
+ require.NoError(t, err)
actualType := tablet.GetType()
got := fmt.Sprintf("%d", actualType)
diff --git a/go/test/endtoend/tabletmanager/replication_manager/tablet_test.go b/go/test/endtoend/tabletmanager/replication_manager/tablet_test.go
index 3284ab65d49..86b02244762 100644
--- a/go/test/endtoend/tabletmanager/replication_manager/tablet_test.go
+++ b/go/test/endtoend/tabletmanager/replication_manager/tablet_test.go
@@ -24,6 +24,8 @@ import (
"testing"
"time"
+ "vitess.io/vitess/go/vt/sidecardb"
+
"github.com/stretchr/testify/require"
"vitess.io/vitess/go/test/endtoend/cluster"
@@ -84,9 +86,6 @@ func TestMain(m *testing.M) {
return 1
}
- // We do not need semiSync for this test case.
- clusterInstance.EnableSemiSync = false
-
// Start keyspace
keyspace := &cluster.Keyspace{
Name: keyspaceName,
@@ -159,8 +158,7 @@ func waitForSourcePort(ctx context.Context, t *testing.T, tablet cluster.Vttable
for time.Now().Before(timeout) {
// Check that initially replication is setup correctly on the replica tablet
replicaStatus, err := tmcGetReplicationStatus(ctx, tablet.GrpcPort)
- require.NoError(t, err)
- if replicaStatus.SourcePort == expectedPort {
+ if err == nil && replicaStatus.SourcePort == expectedPort {
return nil
}
time.Sleep(300 * time.Millisecond)
@@ -168,15 +166,28 @@ func waitForSourcePort(ctx context.Context, t *testing.T, tablet cluster.Vttable
return fmt.Errorf("time out before source port became %v for %v", expectedPort, tablet.Alias)
}
+func getSidecarDBDDLQueryCount(tablet *cluster.VttabletProcess) (int64, error) {
+ vars := tablet.GetVars()
+ key := sidecardb.StatsKeyQueryCount
+ val, ok := vars[key]
+ if !ok {
+ return 0, fmt.Errorf("%s not found in debug/vars", key)
+ }
+ return int64(val.(float64)), nil
+}
func TestReplicationRepairAfterPrimaryTabletChange(t *testing.T) {
ctx := context.Background()
// Check that initially replication is setup correctly on the replica tablet
err := waitForSourcePort(ctx, t, replicaTablet, int32(primaryTablet.MySQLPort))
require.NoError(t, err)
+ sidecarDDLCount, err := getSidecarDBDDLQueryCount(primaryTablet.VttabletProcess)
+ require.NoError(t, err)
+ // sidecar db should create all _vt tables when vttablet started
+ require.Greater(t, sidecarDDLCount, int64(0))
+
// Stop the primary tablet
stopTablet(t, primaryTablet)
-
// Change the MySQL port of the primary tablet
newMysqlPort := clusterInstance.GetAndReservePort()
primaryTablet.MySQLPort = newMysqlPort
@@ -188,4 +199,9 @@ func TestReplicationRepairAfterPrimaryTabletChange(t *testing.T) {
// Let replication manager repair replication
err = waitForSourcePort(ctx, t, replicaTablet, int32(newMysqlPort))
require.NoError(t, err)
+
+ sidecarDDLCount, err = getSidecarDBDDLQueryCount(primaryTablet.VttabletProcess)
+ require.NoError(t, err)
+ // sidecardb should find the desired _vt schema and not apply any new creates or upgrades when the tablet comes up again
+ require.Equal(t, sidecarDDLCount, int64(0))
}
diff --git a/go/test/endtoend/tabletmanager/tablegc/tablegc_test.go b/go/test/endtoend/tabletmanager/tablegc/tablegc_test.go
index bf8e5310eca..a29999de675 100644
--- a/go/test/endtoend/tabletmanager/tablegc/tablegc_test.go
+++ b/go/test/endtoend/tabletmanager/tablegc/tablegc_test.go
@@ -103,8 +103,6 @@ func TestMain(m *testing.M) {
"--gc_purge_check_interval", gcPurgeCheckInterval.String(),
"--table_gc_lifecycle", "hold,purge,evac,drop",
}
- // We do not need semiSync for this test case.
- clusterInstance.EnableSemiSync = false
// Start keyspace
keyspace := &cluster.Keyspace{
diff --git a/go/test/endtoend/tabletmanager/tablet_health_test.go b/go/test/endtoend/tabletmanager/tablet_health_test.go
index d3d23e0075f..19359406607 100644
--- a/go/test/endtoend/tabletmanager/tablet_health_test.go
+++ b/go/test/endtoend/tabletmanager/tablet_health_test.go
@@ -202,6 +202,7 @@ func checkHealth(t *testing.T, port int, shouldError bool) {
url := fmt.Sprintf("http://localhost:%d/healthz", port)
resp, err := http.Get(url)
require.NoError(t, err)
+ defer resp.Body.Close()
if shouldError {
assert.True(t, resp.StatusCode > 400)
} else {
diff --git a/go/test/endtoend/tabletmanager/tablet_test.go b/go/test/endtoend/tabletmanager/tablet_test.go
index 0296f397f5a..3c597e97981 100644
--- a/go/test/endtoend/tabletmanager/tablet_test.go
+++ b/go/test/endtoend/tabletmanager/tablet_test.go
@@ -52,67 +52,15 @@ func TestEnsureDB(t *testing.T) {
status := tablet.VttabletProcess.GetStatusDetails()
assert.Contains(t, status, "read-only")
- // Switch to read-write and verify that that we go serving.
- _ = clusterInstance.VtctlclientProcess.ExecuteCommand("SetReadWrite", tablet.Alias)
+ // Switch to read-write and verify that we go serving.
+ // Note: for TabletExternallyReparented, we expect SetReadWrite to be called by the user
+ err = clusterInstance.VtctlclientProcess.ExecuteCommand("SetReadWrite", tablet.Alias)
+ require.NoError(t, err)
err = tablet.VttabletProcess.WaitForTabletStatus("SERVING")
require.NoError(t, err)
killTablets(t, tablet)
}
-// TestLocalMetadata tests the contents of local_metadata table after vttablet startup
-func TestLocalMetadata(t *testing.T) {
- defer cluster.PanicHandler(t)
- // by default tablets are started with --restore_from_backup
- // so metadata should exist
- cluster.VerifyLocalMetadata(t, &replicaTablet, keyspaceName, shardName, cell)
-
- // Create new tablet
- rTablet := clusterInstance.NewVttabletInstance("replica", 0, "")
-
- clusterInstance.VtTabletExtraArgs = []string{
- "--lock_tables_timeout", "5s",
- "--init_populate_metadata",
- }
- rTablet.MysqlctlProcess = *cluster.MysqlCtlProcessInstance(rTablet.TabletUID, rTablet.MySQLPort, clusterInstance.TmpDirectory)
- err := rTablet.MysqlctlProcess.Start()
- require.NoError(t, err)
-
- log.Info(fmt.Sprintf("Started vttablet %v", rTablet))
- // SupportsBackup=False prevents vttablet from trying to restore
- // Start vttablet process
- clusterInstance.VtGatePlannerVersion = 0
- err = clusterInstance.StartVttablet(rTablet, "SERVING", false, cell, keyspaceName, hostname, shardName)
- require.NoError(t, err)
-
- cluster.VerifyLocalMetadata(t, rTablet, keyspaceName, shardName, cell)
-
- // Create another new tablet
- rTablet2 := clusterInstance.NewVttabletInstance("replica", 0, "")
-
- // start with --init_populate_metadata false (default)
- clusterInstance.VtTabletExtraArgs = []string{
- "--lock_tables_timeout", "5s",
- }
- rTablet2.MysqlctlProcess = *cluster.MysqlCtlProcessInstance(rTablet2.TabletUID, rTablet2.MySQLPort, clusterInstance.TmpDirectory)
- err = rTablet2.MysqlctlProcess.Start()
- require.NoError(t, err)
-
- log.Info(fmt.Sprintf("Started vttablet %v", rTablet2))
- // SupportsBackup=False prevents vttablet from trying to restore
- // Start vttablet process
- err = clusterInstance.StartVttablet(rTablet2, "SERVING", false, cell, keyspaceName, hostname, shardName)
- require.NoError(t, err)
-
- // check that tablet did _not_ get populated
- qr, err := rTablet2.VttabletProcess.QueryTablet("select * from _vt.local_metadata", keyspaceName, false)
- require.NoError(t, err)
- require.Nil(t, qr.Rows)
-
- // Reset the VtTabletExtraArgs and kill tablets
- clusterInstance.VtTabletExtraArgs = []string{}
- killTablets(t, rTablet, rTablet2)
-}
-
// TestResetReplicationParameters tests that the RPC ResetReplicationParameters works as intended.
func TestResetReplicationParameters(t *testing.T) {
defer cluster.PanicHandler(t)
diff --git a/go/test/endtoend/tabletmanager/throttler/throttler_test.go b/go/test/endtoend/tabletmanager/throttler/throttler_test.go
index 38f886c034e..28d0c287c24 100644
--- a/go/test/endtoend/tabletmanager/throttler/throttler_test.go
+++ b/go/test/endtoend/tabletmanager/throttler/throttler_test.go
@@ -16,6 +16,7 @@ limitations under the License.
package throttler
import (
+ "context"
"flag"
"fmt"
"io"
@@ -29,6 +30,7 @@ import (
"vitess.io/vitess/go/test/endtoend/cluster"
"github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
)
var (
@@ -73,11 +75,9 @@ var (
)
const (
- throttlerInitWait = 30 * time.Second
- accumulateLagWait = 2 * time.Second
- throttlerRefreshIntervalWait = 20 * time.Second
- replicationCatchUpWait = 10 * time.Second
- onDemandHeartbeatDuration = 5 * time.Second
+ throttlerThreshold = 1 * time.Second // standard, tight threshold
+ onDemandHeartbeatDuration = 5 * time.Second
+ applyConfigWait = 15 * time.Second // time after which we're sure the throttler has refreshed config and tablets
)
func TestMain(m *testing.M) {
@@ -100,13 +100,12 @@ func TestMain(m *testing.M) {
"--watch_replication_stream",
"--enable_replication_reporter",
"--enable-lag-throttler",
- "--throttle_threshold", "1s",
+ "--throttle_threshold", throttlerThreshold.String(),
"--heartbeat_enable",
"--heartbeat_interval", "250ms",
"--heartbeat_on_demand_duration", onDemandHeartbeatDuration.String(),
+ "--disable_active_reparents",
}
- // We do not need semiSync for this test case.
- clusterInstance.EnableSemiSync = false
// Start keyspace
keyspace := &cluster.Keyspace{
@@ -147,148 +146,174 @@ func throttledApps(tablet *cluster.Vttablet) (resp *http.Response, respBody stri
return resp, respBody, err
}
-func throttleCheck(tablet *cluster.Vttablet) (*http.Response, error) {
- return httpClient.Head(fmt.Sprintf("http://localhost:%d/%s", tablet.HTTPPort, checkAPIPath))
+func throttleCheck(tablet *cluster.Vttablet, skipRequestHeartbeats bool) (*http.Response, error) {
+ return httpClient.Get(fmt.Sprintf("http://localhost:%d/%s?s=%t", tablet.HTTPPort, checkAPIPath, skipRequestHeartbeats))
}
func throttleCheckSelf(tablet *cluster.Vttablet) (*http.Response, error) {
return httpClient.Head(fmt.Sprintf("http://localhost:%d/%s", tablet.HTTPPort, checkSelfAPIPath))
}
-func TestThrottlerBeforeMetricsCollected(t *testing.T) {
- defer cluster.PanicHandler(t)
-
- // Immediately after startup, we expect this response:
- // {"StatusCode":404,"Value":0,"Threshold":0,"Message":"No such metric"}
- {
- resp, err := throttleCheck(primaryTablet)
- assert.NoError(t, err)
- assert.Equal(t, http.StatusNotFound, resp.StatusCode)
- }
-}
-
func warmUpHeartbeat(t *testing.T) (respStatus int) {
// because we run with -heartbeat_on_demand_duration=5s, the heartbeat is "cold" right now.
// Let's warm it up.
- resp, err := throttleCheck(primaryTablet)
+ resp, err := throttleCheck(primaryTablet, false)
+ require.NoError(t, err)
+ defer resp.Body.Close()
time.Sleep(time.Second)
- assert.NoError(t, err)
return resp.StatusCode
}
+// waitForThrottleCheckStatus waits for the tablet to return the provided HTTP code in a throttle check
+func waitForThrottleCheckStatus(t *testing.T, tablet *cluster.Vttablet, wantCode int) {
+ _ = warmUpHeartbeat(t)
+ ctx, cancel := context.WithTimeout(context.Background(), onDemandHeartbeatDuration+applyConfigWait)
+ defer cancel()
+
+ for {
+ resp, err := throttleCheck(tablet, true)
+ require.NoError(t, err)
+
+ if wantCode == resp.StatusCode {
+ // Wait for any cached check values to be cleared and the new
+ // status value to be in effect everywhere before returning.
+ resp.Body.Close()
+ return
+ }
+ select {
+ case <-ctx.Done():
+ b, err := io.ReadAll(resp.Body)
+ require.NoError(t, err)
+ resp.Body.Close()
+
+ assert.Equal(t, wantCode, resp.StatusCode, "body: %v", string(b))
+ return
+ default:
+ resp.Body.Close()
+ time.Sleep(time.Second)
+ }
+ }
+}
+
func TestThrottlerAfterMetricsCollected(t *testing.T) {
defer cluster.PanicHandler(t)
- time.Sleep(throttlerInitWait)
- // By this time metrics will have been collected. We expect no lag, and something like:
- // {"StatusCode":200,"Value":0.282278,"Threshold":1,"Message":""}
- //
- respStatus := warmUpHeartbeat(t)
- assert.NotEqual(t, http.StatusOK, respStatus)
- time.Sleep(time.Second)
- {
- resp, err := throttleCheck(primaryTablet)
- assert.NoError(t, err)
+ // We run with on-demand heartbeats. Immediately as the tablet manager opens, it sends a one-time
+ // request for heartbeats, which means the throttler is able to collect initial "good" data.
+ // After a few seconds, the heartbeat lease terminates. We wait for that.
+ // {"StatusCode":429,"Value":4.864921,"Threshold":1,"Message":"Threshold exceeded"}
+ t.Run("expect push back once initial heartbeat lease terminates", func(t *testing.T) {
+ time.Sleep(onDemandHeartbeatDuration)
+ waitForThrottleCheckStatus(t, primaryTablet, http.StatusTooManyRequests)
+ })
+ t.Run("requesting heartbeats", func(t *testing.T) {
+ respStatus := warmUpHeartbeat(t)
+ assert.NotEqual(t, http.StatusOK, respStatus)
+ })
+ t.Run("expect OK once heartbeats lease renewed", func(t *testing.T) {
+ time.Sleep(1 * time.Second)
+ resp, err := throttleCheck(primaryTablet, false)
+ require.NoError(t, err)
+ defer resp.Body.Close()
assert.Equal(t, http.StatusOK, resp.StatusCode)
- }
- {
+ })
+ t.Run("expect OK once heartbeats lease renewed, still", func(t *testing.T) {
+ time.Sleep(1 * time.Second)
+ resp, err := throttleCheck(primaryTablet, false)
+ require.NoError(t, err)
+ defer resp.Body.Close()
+ assert.Equal(t, http.StatusOK, resp.StatusCode)
+ })
+ t.Run("validate throttled-apps", func(t *testing.T) {
resp, body, err := throttledApps(primaryTablet)
- assert.NoError(t, err)
+ require.NoError(t, err)
+ defer resp.Body.Close()
assert.Equal(t, http.StatusOK, resp.StatusCode)
assert.Contains(t, body, "always-throttled-app")
- }
- {
+ })
+ t.Run("validate check-self", func(t *testing.T) {
resp, err := throttleCheckSelf(primaryTablet)
- assert.NoError(t, err)
+ require.NoError(t, err)
+ defer resp.Body.Close()
assert.Equal(t, http.StatusOK, resp.StatusCode)
- }
- {
+ })
+ t.Run("validate check-self, again", func(t *testing.T) {
resp, err := throttleCheckSelf(replicaTablet)
- assert.NoError(t, err)
+ require.NoError(t, err)
+ defer resp.Body.Close()
assert.Equal(t, http.StatusOK, resp.StatusCode)
- }
+ })
}
func TestLag(t *testing.T) {
defer cluster.PanicHandler(t)
+ // Stop VTOrc because we want to stop replication to increase lag.
+ // We don't want VTOrc to fix this.
+ clusterInstance.DisableVTOrcRecoveries(t)
+ defer clusterInstance.EnableVTOrcRecoveries(t)
- {
+ t.Run("stopping replication", func(t *testing.T) {
err := clusterInstance.VtctlclientProcess.ExecuteCommand("StopReplication", replicaTablet.Alias)
assert.NoError(t, err)
-
- time.Sleep(accumulateLagWait)
- // Lag will have accumulated
- // {"StatusCode":429,"Value":4.864921,"Threshold":1,"Message":"Threshold exceeded"}
- {
- resp, err := throttleCheck(primaryTablet)
- assert.NoError(t, err)
- assert.Equal(t, http.StatusTooManyRequests, resp.StatusCode)
- }
- {
- resp, err := throttleCheckSelf(primaryTablet)
- assert.NoError(t, err)
- // self (on primary) is unaffected by replication lag
- assert.Equal(t, http.StatusOK, resp.StatusCode)
- }
- {
- resp, err := throttleCheckSelf(replicaTablet)
- assert.NoError(t, err)
- assert.Equal(t, http.StatusTooManyRequests, resp.StatusCode)
- }
- }
- {
+ })
+ t.Run("accumulating lag, expecting throttler push back", func(t *testing.T) {
+ time.Sleep(2 * throttlerThreshold)
+
+ resp, err := throttleCheck(primaryTablet, false)
+ require.NoError(t, err)
+ defer resp.Body.Close()
+ assert.Equal(t, http.StatusTooManyRequests, resp.StatusCode)
+ })
+ t.Run("primary self-check should still be fine", func(t *testing.T) {
+ resp, err := throttleCheckSelf(primaryTablet)
+ require.NoError(t, err)
+ defer resp.Body.Close()
+ // self (on primary) is unaffected by replication lag
+ assert.Equal(t, http.StatusOK, resp.StatusCode)
+ })
+ t.Run("replica self-check should show error", func(t *testing.T) {
+ resp, err := throttleCheckSelf(replicaTablet)
+ require.NoError(t, err)
+ defer resp.Body.Close()
+ assert.Equal(t, http.StatusTooManyRequests, resp.StatusCode)
+ })
+ t.Run("starting replication", func(t *testing.T) {
err := clusterInstance.VtctlclientProcess.ExecuteCommand("StartReplication", replicaTablet.Alias)
assert.NoError(t, err)
-
- time.Sleep(replicationCatchUpWait)
- // Restore
- // by now heartbeat lease has expired. Let's warm it up
- respStatus := warmUpHeartbeat(t)
- assert.NotEqual(t, http.StatusOK, respStatus)
- time.Sleep(time.Second)
- {
- resp, err := throttleCheck(primaryTablet)
- assert.NoError(t, err)
- assert.Equal(t, http.StatusOK, resp.StatusCode)
- }
- {
- resp, err := throttleCheckSelf(primaryTablet)
- assert.NoError(t, err)
- assert.Equal(t, http.StatusOK, resp.StatusCode)
- }
- {
- resp, err := throttleCheckSelf(replicaTablet)
- assert.NoError(t, err)
- assert.Equal(t, http.StatusOK, resp.StatusCode)
- }
- }
+ })
+ t.Run("expecting replication to catch up and throttler check to return OK", func(t *testing.T) {
+ waitForThrottleCheckStatus(t, primaryTablet, http.StatusOK)
+ })
+ t.Run("primary self-check should be fine", func(t *testing.T) {
+ resp, err := throttleCheckSelf(primaryTablet)
+ require.NoError(t, err)
+ defer resp.Body.Close()
+ // self (on primary) is unaffected by replication lag
+ assert.Equal(t, http.StatusOK, resp.StatusCode)
+ })
+ t.Run("replica self-check should be fine", func(t *testing.T) {
+ resp, err := throttleCheckSelf(replicaTablet)
+ require.NoError(t, err)
+ defer resp.Body.Close()
+ assert.Equal(t, http.StatusOK, resp.StatusCode)
+ })
}
func TestNoReplicas(t *testing.T) {
defer cluster.PanicHandler(t)
- {
+ t.Run("changing replica to RDONLY", func(t *testing.T) {
err := clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", replicaTablet.Alias, "RDONLY")
assert.NoError(t, err)
- time.Sleep(throttlerRefreshIntervalWait)
// This makes no REPLICA servers available. We expect something like:
// {"StatusCode":200,"Value":0,"Threshold":1,"Message":""}
- respStatus := warmUpHeartbeat(t)
- assert.Equal(t, http.StatusOK, respStatus)
- resp, err := throttleCheck(primaryTablet)
- assert.NoError(t, err)
- assert.Equal(t, http.StatusOK, resp.StatusCode)
- }
- {
+ waitForThrottleCheckStatus(t, primaryTablet, http.StatusOK)
+ })
+ t.Run("restoring to REPLICA", func(t *testing.T) {
+
err := clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", replicaTablet.Alias, "REPLICA")
assert.NoError(t, err)
- time.Sleep(throttlerRefreshIntervalWait)
- // Restore valid replica
- respStatus := warmUpHeartbeat(t)
- assert.NotEqual(t, http.StatusOK, respStatus)
- resp, err := throttleCheck(primaryTablet)
- assert.NoError(t, err)
- assert.Equal(t, http.StatusOK, resp.StatusCode)
- }
+ waitForThrottleCheckStatus(t, primaryTablet, http.StatusOK)
+ })
}
diff --git a/go/test/endtoend/tabletmanager/throttler_custom_config/throttler_test.go b/go/test/endtoend/tabletmanager/throttler_custom_config/throttler_test.go
index 39dfd93293d..04f520defe9 100644
--- a/go/test/endtoend/tabletmanager/throttler_custom_config/throttler_test.go
+++ b/go/test/endtoend/tabletmanager/throttler_custom_config/throttler_test.go
@@ -21,6 +21,7 @@ import (
"fmt"
"net/http"
"os"
+ "sync"
"testing"
"time"
@@ -76,8 +77,8 @@ var (
)
const (
- testThreshold = 5
- throttlerInitWait = 10 * time.Second
+ testThreshold = 5
+ applyConfigWait = 15 * time.Second // time after which we're sure the throttler has refreshed config and tablets
)
func TestMain(m *testing.M) {
@@ -106,8 +107,6 @@ func TestMain(m *testing.M) {
"--heartbeat_enable",
"--heartbeat_interval", "250ms",
}
- // We do not need semiSync for this test case.
- clusterInstance.EnableSemiSync = false
// Start keyspace
keyspace := &cluster.Keyspace{
@@ -148,7 +147,8 @@ func TestMain(m *testing.M) {
}
func throttleCheck(tablet *cluster.Vttablet) (*http.Response, error) {
- return httpClient.Head(fmt.Sprintf("http://localhost:%d/%s", tablet.HTTPPort, checkAPIPath))
+ resp, err := httpClient.Get(fmt.Sprintf("http://localhost:%d/%s", tablet.HTTPPort, checkAPIPath))
+ return resp, err
}
func throttleCheckSelf(tablet *cluster.Vttablet) (*http.Response, error) {
@@ -158,65 +158,65 @@ func throttleCheckSelf(tablet *cluster.Vttablet) (*http.Response, error) {
func TestThrottlerThresholdOK(t *testing.T) {
defer cluster.PanicHandler(t)
- {
+ t.Run("immediately", func(t *testing.T) {
resp, err := throttleCheck(primaryTablet)
- assert.NoError(t, err)
+ require.NoError(t, err)
+ defer resp.Body.Close()
assert.Equal(t, http.StatusOK, resp.StatusCode)
- }
-}
-
-func TestThrottlerAfterMetricsCollected(t *testing.T) {
- defer cluster.PanicHandler(t)
-
- time.Sleep(throttlerInitWait)
- // By this time metrics will have been collected. We expect no lag, and something like:
- // {"StatusCode":200,"Value":0.282278,"Threshold":1,"Message":""}
- {
+ })
+ t.Run("after long wait", func(t *testing.T) {
+ time.Sleep(applyConfigWait)
resp, err := throttleCheck(primaryTablet)
- assert.NoError(t, err)
- assert.Equal(t, http.StatusOK, resp.StatusCode)
- }
- {
- resp, err := throttleCheckSelf(primaryTablet)
- assert.NoError(t, err)
+ require.NoError(t, err)
+ defer resp.Body.Close()
assert.Equal(t, http.StatusOK, resp.StatusCode)
- }
+ })
}
func TestThreadsRunning(t *testing.T) {
defer cluster.PanicHandler(t)
- sleepSeconds := 6
+ sleepDuration := 10 * time.Second
+ var wg sync.WaitGroup
for i := 0; i < testThreshold; i++ {
- // each query must be distinct, so they don't get consolidated
- go vtgateExec(t, fmt.Sprintf("select sleep(%d)", sleepSeconds+i), "")
+ // generate different Sleep() calls, all at minimum sleepDuration
+ wg.Add(1)
+ go func(i int) {
+ defer wg.Done()
+ vtgateExec(t, fmt.Sprintf("select sleep(%d)", int(sleepDuration.Seconds())+i), "")
+ }(i)
}
t.Run("exceeds threshold", func(t *testing.T) {
- time.Sleep(3 * time.Second)
- // by this time we will have +1 threads_running, and we should hit the threshold
+ time.Sleep(sleepDuration / 2)
+ // by this time we will have testThreshold+1 threads_running, and we should hit the threshold
// {"StatusCode":429,"Value":2,"Threshold":2,"Message":"Threshold exceeded"}
{
resp, err := throttleCheck(primaryTablet)
- assert.NoError(t, err)
+ require.NoError(t, err)
+ defer resp.Body.Close()
assert.Equal(t, http.StatusTooManyRequests, resp.StatusCode)
}
{
resp, err := throttleCheckSelf(primaryTablet)
- assert.NoError(t, err)
+ require.NoError(t, err)
+ defer resp.Body.Close()
assert.Equal(t, http.StatusTooManyRequests, resp.StatusCode)
}
})
+ t.Run("wait for queries to terminate", func(t *testing.T) {
+ wg.Wait()
+ })
t.Run("restored below threshold", func(t *testing.T) {
- time.Sleep(time.Duration(sleepSeconds) * time.Second * 2) // * 2 since we have two planner executing the select sleep(6) query
- // Restore
{
resp, err := throttleCheck(primaryTablet)
- assert.NoError(t, err)
+ require.NoError(t, err)
+ defer resp.Body.Close()
assert.Equal(t, http.StatusOK, resp.StatusCode)
}
{
resp, err := throttleCheckSelf(primaryTablet)
- assert.NoError(t, err)
+ require.NoError(t, err)
+ defer resp.Body.Close()
assert.Equal(t, http.StatusOK, resp.StatusCode)
}
})
@@ -227,7 +227,7 @@ func vtgateExec(t *testing.T, query string, expectError string) *sqltypes.Result
ctx := context.Background()
conn, err := mysql.Connect(ctx, &vtParams)
- require.Nil(t, err)
+ require.NoError(t, err)
defer conn.Close()
qr, err := conn.ExecuteFetch(query, 1000, true)
diff --git a/go/test/endtoend/tabletmanager/throttler_topo/throttler_test.go b/go/test/endtoend/tabletmanager/throttler_topo/throttler_test.go
new file mode 100644
index 00000000000..fe87262a21f
--- /dev/null
+++ b/go/test/endtoend/tabletmanager/throttler_topo/throttler_test.go
@@ -0,0 +1,531 @@
+/*
+Copyright 2022 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package throttler
+
+import (
+ "context"
+ "flag"
+ "fmt"
+ "io"
+ "net/http"
+ "os"
+ "sync"
+ "testing"
+ "time"
+
+ "vitess.io/vitess/go/mysql"
+ "vitess.io/vitess/go/sqltypes"
+ "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/base"
+
+ "vitess.io/vitess/go/test/endtoend/cluster"
+ "vitess.io/vitess/go/test/endtoend/throttler"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+const (
+ customQuery = "show global status like 'threads_running'"
+ customThreshold = 5 * time.Second
+ unreasonablyLowThreshold = 1 * time.Millisecond
+ extremelyHighThreshold = 1 * time.Hour
+ onDemandHeartbeatDuration = 5 * time.Second
+ throttlerEnabledTimeout = 60 * time.Second
+ useDefaultQuery = ""
+)
+
+var (
+ clusterInstance *cluster.LocalProcessCluster
+ primaryTablet *cluster.Vttablet
+ replicaTablet *cluster.Vttablet
+ vtParams mysql.ConnParams
+ hostname = "localhost"
+ keyspaceName = "ks"
+ cell = "zone1"
+ sqlSchema = `
+ create table t1(
+ id bigint,
+ value varchar(16),
+ primary key(id)
+ ) Engine=InnoDB;
+`
+
+ vSchema = `
+ {
+ "sharded": true,
+ "vindexes": {
+ "hash": {
+ "type": "hash"
+ }
+ },
+ "tables": {
+ "t1": {
+ "column_vindexes": [
+ {
+ "column": "id",
+ "name": "hash"
+ }
+ ]
+ }
+ }
+ }`
+
+ httpClient = base.SetupHTTPClient(time.Second)
+ throttledAppsAPIPath = "throttler/throttled-apps"
+ checkAPIPath = "throttler/check"
+ checkSelfAPIPath = "throttler/check-self"
+ getResponseBody = func(resp *http.Response) string {
+ body, _ := io.ReadAll(resp.Body)
+ return string(body)
+ }
+)
+
+func TestMain(m *testing.M) {
+ defer cluster.PanicHandler(nil)
+ flag.Parse()
+
+ exitCode := func() int {
+ clusterInstance = cluster.NewCluster(cell, hostname)
+ defer clusterInstance.Teardown()
+
+ // Start topo server
+ err := clusterInstance.StartTopo()
+ if err != nil {
+ return 1
+ }
+
+ // Set extra tablet args for lock timeout
+ clusterInstance.VtTabletExtraArgs = []string{
+ "--lock_tables_timeout", "5s",
+ "--watch_replication_stream",
+ "--enable_replication_reporter",
+ "--throttler-config-via-topo",
+ "--heartbeat_enable",
+ "--heartbeat_interval", "250ms",
+ "--heartbeat_on_demand_duration", onDemandHeartbeatDuration.String(),
+ "--disable_active_reparents",
+ }
+
+ // Start keyspace
+ keyspace := &cluster.Keyspace{
+ Name: keyspaceName,
+ SchemaSQL: sqlSchema,
+ VSchema: vSchema,
+ }
+
+ if err = clusterInstance.StartUnshardedKeyspace(*keyspace, 1, false); err != nil {
+ return 1
+ }
+
+ // Collect table paths and ports
+ tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets
+ for _, tablet := range tablets {
+ if tablet.Type == "primary" {
+ primaryTablet = tablet
+ } else if tablet.Type != "rdonly" {
+ replicaTablet = tablet
+ }
+ }
+
+ vtgateInstance := clusterInstance.NewVtgateInstance()
+ // Start vtgate
+ if err := vtgateInstance.Setup(); err != nil {
+ return 1
+ }
+ // ensure it is torn down during cluster TearDown
+ clusterInstance.VtgateProcess = *vtgateInstance
+ vtParams = mysql.ConnParams{
+ Host: clusterInstance.Hostname,
+ Port: clusterInstance.VtgateMySQLPort,
+ }
+ clusterInstance.VtctldClientProcess = *cluster.VtctldClientProcessInstance("localhost", clusterInstance.VtctldProcess.GrpcPort, clusterInstance.TmpDirectory)
+
+ return m.Run()
+ }()
+ os.Exit(exitCode)
+}
+
+func throttledApps(tablet *cluster.Vttablet) (resp *http.Response, respBody string, err error) {
+ resp, err = httpClient.Get(fmt.Sprintf("http://localhost:%d/%s", tablet.HTTPPort, throttledAppsAPIPath))
+ if err != nil {
+ return resp, respBody, err
+ }
+ b, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return resp, respBody, err
+ }
+ respBody = string(b)
+ return resp, respBody, err
+}
+
+func throttleCheck(tablet *cluster.Vttablet, skipRequestHeartbeats bool) (*http.Response, error) {
+ resp, err := httpClient.Get(fmt.Sprintf("http://localhost:%d/%s?s=%t", tablet.HTTPPort, checkAPIPath, skipRequestHeartbeats))
+ return resp, err
+}
+
+func throttleCheckSelf(tablet *cluster.Vttablet) (*http.Response, error) {
+ return httpClient.Get(fmt.Sprintf("http://localhost:%d/%s", tablet.HTTPPort, checkSelfAPIPath))
+}
+
+func warmUpHeartbeat(t *testing.T) (respStatus int) {
+ // because we run with -heartbeat_on_demand_duration=5s, the heartbeat is "cold" right now.
+ // Let's warm it up.
+ resp, err := throttleCheck(primaryTablet, false)
+ require.NoError(t, err)
+ defer resp.Body.Close()
+
+ time.Sleep(time.Second)
+ return resp.StatusCode
+}
+
+// waitForThrottleCheckStatus waits for the tablet to return the provided HTTP code in a throttle check
+func waitForThrottleCheckStatus(t *testing.T, tablet *cluster.Vttablet, wantCode int) {
+ _ = warmUpHeartbeat(t)
+ ctx, cancel := context.WithTimeout(context.Background(), onDemandHeartbeatDuration*4)
+ defer cancel()
+
+ for {
+ resp, err := throttleCheck(tablet, true)
+ require.NoError(t, err)
+
+ if wantCode == resp.StatusCode {
+ // Wait for any cached check values to be cleared and the new
+ // status value to be in effect everywhere before returning.
+ resp.Body.Close()
+ return
+ }
+ select {
+ case <-ctx.Done():
+ b, err := io.ReadAll(resp.Body)
+ require.NoError(t, err)
+ resp.Body.Close()
+
+ assert.Equalf(t, wantCode, resp.StatusCode, "body: %s", string(b))
+ return
+ default:
+ resp.Body.Close()
+ time.Sleep(time.Second)
+ }
+ }
+}
+
+func vtgateExec(t *testing.T, query string, expectError string) *sqltypes.Result {
+ t.Helper()
+
+ ctx := context.Background()
+ conn, err := mysql.Connect(ctx, &vtParams)
+ require.Nil(t, err)
+ defer conn.Close()
+
+ qr, err := conn.ExecuteFetch(query, 1000, true)
+ if expectError == "" {
+ require.NoError(t, err)
+ } else {
+ require.Error(t, err, "error should not be nil")
+ assert.Contains(t, err.Error(), expectError, "Unexpected error")
+ }
+ return qr
+}
+
+func TestInitialThrottler(t *testing.T) {
+ defer cluster.PanicHandler(t)
+
+ t.Run("validating OK response from disabled throttler", func(t *testing.T) {
+ waitForThrottleCheckStatus(t, primaryTablet, http.StatusOK)
+ })
+ t.Run("enabling throttler with very low threshold", func(t *testing.T) {
+ _, err := throttler.UpdateThrottlerTopoConfig(clusterInstance, true, false, unreasonablyLowThreshold.Seconds(), useDefaultQuery, false)
+ assert.NoError(t, err)
+
+ // Wait for the throttler to be enabled everywhere with the new config.
+ for _, tablet := range clusterInstance.Keyspaces[0].Shards[0].Vttablets {
+ throttler.WaitForThrottlerStatusEnabled(t, tablet, true, &throttler.Config{Query: throttler.DefaultQuery, Threshold: unreasonablyLowThreshold.Seconds()}, throttlerEnabledTimeout)
+ }
+ })
+ t.Run("validating pushback response from throttler", func(t *testing.T) {
+ waitForThrottleCheckStatus(t, primaryTablet, http.StatusTooManyRequests)
+ })
+ t.Run("disabling throttler", func(t *testing.T) {
+ _, err := throttler.UpdateThrottlerTopoConfig(clusterInstance, false, true, unreasonablyLowThreshold.Seconds(), useDefaultQuery, false)
+ assert.NoError(t, err)
+
+ // Wait for the throttler to be disabled everywhere.
+ for _, tablet := range clusterInstance.Keyspaces[0].Shards[0].Vttablets {
+ throttler.WaitForThrottlerStatusEnabled(t, tablet, false, nil, throttlerEnabledTimeout)
+ }
+ })
+ t.Run("validating OK response from disabled throttler, again", func(t *testing.T) {
+ waitForThrottleCheckStatus(t, primaryTablet, http.StatusOK)
+ })
+ t.Run("enabling throttler, again", func(t *testing.T) {
+ // Enable the throttler again with the default query which also moves us back
+ // to the default threshold.
+ _, err := throttler.UpdateThrottlerTopoConfig(clusterInstance, true, false, 0, useDefaultQuery, true)
+ assert.NoError(t, err)
+
+ // Wait for the throttler to be enabled everywhere again with the default config.
+ for _, tablet := range clusterInstance.Keyspaces[0].Shards[0].Vttablets {
+ throttler.WaitForThrottlerStatusEnabled(t, tablet, true, throttler.DefaultConfig, throttlerEnabledTimeout)
+ }
+ })
+ t.Run("validating pushback response from throttler, again", func(t *testing.T) {
+ waitForThrottleCheckStatus(t, primaryTablet, http.StatusTooManyRequests)
+ })
+ t.Run("setting high threshold", func(t *testing.T) {
+ _, err := throttler.UpdateThrottlerTopoConfig(clusterInstance, false, false, extremelyHighThreshold.Seconds(), useDefaultQuery, true)
+ assert.NoError(t, err)
+
+ // Wait for the throttler to be enabled everywhere with new config.
+ for _, tablet := range []cluster.Vttablet{*primaryTablet, *replicaTablet} {
+ throttler.WaitForThrottlerStatusEnabled(t, &tablet, true, &throttler.Config{Query: throttler.DefaultQuery, Threshold: extremelyHighThreshold.Seconds()}, throttlerEnabledTimeout)
+ }
+ })
+ t.Run("validating OK response from throttler with high threshold", func(t *testing.T) {
+ waitForThrottleCheckStatus(t, primaryTablet, http.StatusOK)
+ })
+ t.Run("setting low threshold", func(t *testing.T) {
+ _, err := throttler.UpdateThrottlerTopoConfig(clusterInstance, false, false, throttler.DefaultThreshold.Seconds(), useDefaultQuery, true)
+ assert.NoError(t, err)
+
+ // Wait for the throttler to be enabled everywhere with new config.
+ for _, tablet := range clusterInstance.Keyspaces[0].Shards[0].Vttablets {
+ throttler.WaitForThrottlerStatusEnabled(t, tablet, true, throttler.DefaultConfig, throttlerEnabledTimeout)
+ }
+ })
+ t.Run("validating pushback response from throttler on low threshold", func(t *testing.T) {
+ waitForThrottleCheckStatus(t, primaryTablet, http.StatusTooManyRequests)
+ })
+ t.Run("requesting heartbeats", func(t *testing.T) {
+ respStatus := warmUpHeartbeat(t)
+ assert.NotEqual(t, http.StatusOK, respStatus)
+ })
+ t.Run("validating OK response from throttler with low threshold, heartbeats running", func(t *testing.T) {
+ time.Sleep(1 * time.Second)
+ resp, err := throttleCheck(primaryTablet, false)
+ require.NoError(t, err)
+ defer resp.Body.Close()
+ assert.Equalf(t, http.StatusOK, resp.StatusCode, "Unexpected response from throttler: %s", getResponseBody(resp))
+ })
+ t.Run("validating OK response from throttler with low threshold, heartbeats running still", func(t *testing.T) {
+ time.Sleep(1 * time.Second)
+ resp, err := throttleCheck(primaryTablet, false)
+ require.NoError(t, err)
+ defer resp.Body.Close()
+ assert.Equalf(t, http.StatusOK, resp.StatusCode, "Unexpected response from throttler: %s", getResponseBody(resp))
+ })
+ t.Run("validating pushback response from throttler on low threshold once heartbeats go stale", func(t *testing.T) {
+ time.Sleep(2 * onDemandHeartbeatDuration) // just... really wait long enough, make sure on-demand stops
+ waitForThrottleCheckStatus(t, primaryTablet, http.StatusTooManyRequests)
+ })
+}
+
+func TestThrottlerAfterMetricsCollected(t *testing.T) {
+ defer cluster.PanicHandler(t)
+
+ // By this time metrics will have been collected. We expect no lag, and something like:
+ // {"StatusCode":200,"Value":0.282278,"Threshold":1,"Message":""}
+ t.Run("validating throttler OK", func(t *testing.T) {
+ waitForThrottleCheckStatus(t, primaryTablet, http.StatusOK)
+ })
+ t.Run("validating throttled apps", func(t *testing.T) {
+ resp, body, err := throttledApps(primaryTablet)
+ require.NoError(t, err)
+ defer resp.Body.Close()
+ assert.Equalf(t, http.StatusOK, resp.StatusCode, "Unexpected response from throttler: %s", getResponseBody(resp))
+ assert.Contains(t, body, "always-throttled-app")
+ })
+ t.Run("validating primary check self", func(t *testing.T) {
+ resp, err := throttleCheckSelf(primaryTablet)
+ require.NoError(t, err)
+ defer resp.Body.Close()
+ assert.Equalf(t, http.StatusOK, resp.StatusCode, "Unexpected response from throttler: %s", getResponseBody(resp))
+ })
+ t.Run("validating replica check self", func(t *testing.T) {
+ resp, err := throttleCheckSelf(replicaTablet)
+ require.NoError(t, err)
+ defer resp.Body.Close()
+ assert.Equalf(t, http.StatusOK, resp.StatusCode, "Unexpected response from throttler: %s", getResponseBody(resp))
+ })
+}
+
+func TestLag(t *testing.T) {
+ defer cluster.PanicHandler(t)
+ // Temporarily disable VTOrc recoveries because we want to
+ // STOP replication specifically in order to increase the
+ // lag and we DO NOT want VTOrc to try and fix this.
+ clusterInstance.DisableVTOrcRecoveries(t)
+ defer clusterInstance.EnableVTOrcRecoveries(t)
+
+ t.Run("stopping replication", func(t *testing.T) {
+ err := clusterInstance.VtctlclientProcess.ExecuteCommand("StopReplication", replicaTablet.Alias)
+ assert.NoError(t, err)
+ })
+ t.Run("accumulating lag, expecting throttler push back", func(t *testing.T) {
+ time.Sleep(2 * throttler.DefaultThreshold)
+
+ resp, err := throttleCheck(primaryTablet, false)
+ require.NoError(t, err)
+ defer resp.Body.Close()
+ assert.Equalf(t, http.StatusTooManyRequests, resp.StatusCode, "Unexpected response from throttler: %s", getResponseBody(resp))
+ })
+ t.Run("primary self-check should still be fine", func(t *testing.T) {
+ resp, err := throttleCheckSelf(primaryTablet)
+ require.NoError(t, err)
+ defer resp.Body.Close()
+ // self (on primary) is unaffected by replication lag
+ assert.Equalf(t, http.StatusOK, resp.StatusCode, "Unexpected response from throttler: %s", getResponseBody(resp))
+ })
+ t.Run("replica self-check should show error", func(t *testing.T) {
+ resp, err := throttleCheckSelf(replicaTablet)
+ require.NoError(t, err)
+ defer resp.Body.Close()
+ assert.Equalf(t, http.StatusTooManyRequests, resp.StatusCode, "Unexpected response from throttler: %s", getResponseBody(resp))
+ })
+ t.Run("starting replication", func(t *testing.T) {
+ err := clusterInstance.VtctlclientProcess.ExecuteCommand("StartReplication", replicaTablet.Alias)
+ assert.NoError(t, err)
+ })
+ t.Run("expecting replication to catch up and throttler check to return OK", func(t *testing.T) {
+ waitForThrottleCheckStatus(t, primaryTablet, http.StatusOK)
+ })
+ t.Run("primary self-check should be fine", func(t *testing.T) {
+ resp, err := throttleCheckSelf(primaryTablet)
+ require.NoError(t, err)
+ defer resp.Body.Close()
+ // self (on primary) is unaffected by replication lag
+ assert.Equalf(t, http.StatusOK, resp.StatusCode, "Unexpected response from throttler: %s", getResponseBody(resp))
+ })
+ t.Run("replica self-check should be fine", func(t *testing.T) {
+ resp, err := throttleCheckSelf(replicaTablet)
+ require.NoError(t, err)
+ defer resp.Body.Close()
+ assert.Equalf(t, http.StatusOK, resp.StatusCode, "Unexpected response from throttler: %s", getResponseBody(resp))
+ })
+}
+
+func TestNoReplicas(t *testing.T) {
+ defer cluster.PanicHandler(t)
+ t.Run("changing replica to RDONLY", func(t *testing.T) {
+ err := clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", replicaTablet.Alias, "RDONLY")
+ assert.NoError(t, err)
+
+ // This makes no REPLICA servers available. We expect something like:
+ // {"StatusCode":200,"Value":0,"Threshold":1,"Message":""}
+ waitForThrottleCheckStatus(t, primaryTablet, http.StatusOK)
+ })
+ t.Run("restoring to REPLICA", func(t *testing.T) {
+ err := clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", replicaTablet.Alias, "REPLICA")
+ assert.NoError(t, err)
+
+ waitForThrottleCheckStatus(t, primaryTablet, http.StatusOK)
+ })
+}
+
+func TestCustomQuery(t *testing.T) {
+ defer cluster.PanicHandler(t)
+
+ t.Run("enabling throttler with custom query and threshold", func(t *testing.T) {
+ _, err := throttler.UpdateThrottlerTopoConfig(clusterInstance, true, false, customThreshold.Seconds(), customQuery, false)
+ assert.NoError(t, err)
+
+ // Wait for the throttler to be enabled everywhere with new custom config.
+ for _, tablet := range clusterInstance.Keyspaces[0].Shards[0].Vttablets {
+ throttler.WaitForThrottlerStatusEnabled(t, tablet, true, &throttler.Config{Query: customQuery, Threshold: customThreshold.Seconds()}, throttlerEnabledTimeout)
+ }
+ })
+ t.Run("validating OK response from throttler with custom query", func(t *testing.T) {
+ resp, err := throttleCheck(primaryTablet, false)
+ require.NoError(t, err)
+ defer resp.Body.Close()
+ assert.Equalf(t, http.StatusOK, resp.StatusCode, "Unexpected response from throttler: %s", getResponseBody(resp))
+ })
+ t.Run("test threads running", func(t *testing.T) {
+ sleepDuration := 20 * time.Second
+ var wg sync.WaitGroup
+ for i := 0; i < int(customThreshold.Seconds()); i++ {
+ // Generate different Sleep() calls, all at minimum sleepDuration.
+ wg.Add(1)
+ go func(i int) {
+ defer wg.Done()
+ vtgateExec(t, fmt.Sprintf("select sleep(%d)", int(sleepDuration.Seconds())+i), "")
+ }(i)
+ }
+ t.Run("exceeds threshold", func(t *testing.T) {
+ throttler.WaitForQueryResult(t, primaryTablet,
+ "select if(variable_value > 5, 'true', 'false') as result from performance_schema.global_status where variable_name='threads_running'",
+ "true", sleepDuration/3)
+ throttler.WaitForValidData(t, primaryTablet, sleepDuration-(5*time.Second))
+ // Now we should be reporting ~ customThreshold*2 threads_running, and we should
+ // hit the threshold. For example:
+ // {"StatusCode":429,"Value":6,"Threshold":5,"Message":"Threshold exceeded"}
+ {
+ resp, err := throttleCheck(primaryTablet, false)
+ require.NoError(t, err)
+ defer resp.Body.Close()
+ assert.Equalf(t, http.StatusTooManyRequests, resp.StatusCode, "Unexpected response from throttler: %s", getResponseBody(resp))
+ }
+ {
+ resp, err := throttleCheckSelf(primaryTablet)
+ require.NoError(t, err)
+ defer resp.Body.Close()
+ assert.Equalf(t, http.StatusTooManyRequests, resp.StatusCode, "Unexpected response from throttler: %s", getResponseBody(resp))
+ }
+ })
+ t.Run("wait for queries to terminate", func(t *testing.T) {
+ wg.Wait()
+ time.Sleep(1 * time.Second) // graceful time to let throttler read metrics
+ })
+ t.Run("restored below threshold", func(t *testing.T) {
+ {
+ resp, err := throttleCheck(primaryTablet, false)
+ require.NoError(t, err)
+ defer resp.Body.Close()
+ assert.Equalf(t, http.StatusOK, resp.StatusCode, "Unexpected response from throttler: %s", getResponseBody(resp))
+ }
+ {
+ resp, err := throttleCheckSelf(primaryTablet)
+ require.NoError(t, err)
+ defer resp.Body.Close()
+ assert.Equalf(t, http.StatusOK, resp.StatusCode, "Unexpected response from throttler: %s", getResponseBody(resp))
+ }
+ })
+ })
+}
+
+func TestRestoreDefaultQuery(t *testing.T) {
+ defer cluster.PanicHandler(t)
+
+ // Validate going back from custom-query to default-query (replication lag) still works.
+ t.Run("enabling throttler with default query and threshold", func(t *testing.T) {
+ _, err := throttler.UpdateThrottlerTopoConfig(clusterInstance, true, false, throttler.DefaultThreshold.Seconds(), useDefaultQuery, false)
+ assert.NoError(t, err)
+
+ // Wait for the throttler to be up and running everywhere again with the default config.
+ for _, tablet := range clusterInstance.Keyspaces[0].Shards[0].Vttablets {
+ throttler.WaitForThrottlerStatusEnabled(t, tablet, true, throttler.DefaultConfig, throttlerEnabledTimeout)
+ }
+ })
+ t.Run("validating OK response from throttler with default threshold, heartbeats running", func(t *testing.T) {
+ resp, err := throttleCheck(primaryTablet, false)
+ require.NoError(t, err)
+ defer resp.Body.Close()
+ assert.Equalf(t, http.StatusOK, resp.StatusCode, "Unexpected response from throttler: %s", getResponseBody(resp))
+ })
+ t.Run("validating pushback response from throttler on default threshold once heartbeats go stale", func(t *testing.T) {
+ time.Sleep(2 * onDemandHeartbeatDuration) // just... really wait long enough, make sure on-demand stops
+ waitForThrottleCheckStatus(t, primaryTablet, http.StatusTooManyRequests)
+ })
+}
diff --git a/go/test/endtoend/throttler/util.go b/go/test/endtoend/throttler/util.go
new file mode 100644
index 00000000000..e8769999fc1
--- /dev/null
+++ b/go/test/endtoend/throttler/util.go
@@ -0,0 +1,212 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package throttler
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "net/http"
+ "testing"
+ "time"
+
+ "github.com/buger/jsonparser"
+ "github.com/stretchr/testify/require"
+
+ "vitess.io/vitess/go/test/endtoend/cluster"
+ "vitess.io/vitess/go/vt/log"
+)
+
+type Config struct {
+ Query string
+ Threshold float64
+}
+
+const (
+ DefaultQuery = "select unix_timestamp(now(6))-max(ts/1000000000) as replication_lag from _vt.heartbeat"
+ DefaultThreshold = 1 * time.Second
+ ConfigTimeout = 60 * time.Second
+)
+
+var DefaultConfig = &Config{
+ Query: DefaultQuery,
+ Threshold: DefaultThreshold.Seconds(),
+}
+
+// UpdateThrottlerTopoConfig runs vtctlclient UpdateThrottlerConfig.
+// This retries the command until it succeeds or times out as the
+// SrvKeyspace record may not yet exist for a newly created
+// Keyspace that is still initializing before it becomes serving.
+func UpdateThrottlerTopoConfig(clusterInstance *cluster.LocalProcessCluster, enable bool, disable bool, threshold float64, metricsQuery string, viaVtctldClient bool) (result string, err error) {
+ args := []string{}
+ clientfunc := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput
+ if !viaVtctldClient {
+ args = append(args, "--")
+ clientfunc = clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput
+ }
+ args = append(args, "UpdateThrottlerConfig")
+ if enable {
+ args = append(args, "--enable")
+ }
+ if disable {
+ args = append(args, "--disable")
+ }
+ if threshold > 0 {
+ args = append(args, "--threshold", fmt.Sprintf("%f", threshold))
+ }
+ args = append(args, "--custom-query", metricsQuery)
+ if metricsQuery != "" {
+ args = append(args, "--check-as-check-self")
+ } else {
+ args = append(args, "--check-as-check-shard")
+ }
+ args = append(args, clusterInstance.Keyspaces[0].Name)
+
+ ctx, cancel := context.WithTimeout(context.Background(), ConfigTimeout)
+ defer cancel()
+
+ ticker := time.NewTicker(time.Second)
+ defer ticker.Stop()
+
+ for {
+ result, err = clientfunc(args...)
+ if err == nil {
+ return result, nil
+ }
+ select {
+ case <-ctx.Done():
+ return "", fmt.Errorf("timed out waiting for UpdateThrottlerConfig to succeed after %v; last seen value: %+v, error: %v", ConfigTimeout, result, err)
+ case <-ticker.C:
+ }
+ }
+}
+
+// WaitForThrottlerStatusEnabled waits for a tablet to report its throttler status as
+// enabled/disabled and have the provided config (if any) until the specified timeout.
+func WaitForThrottlerStatusEnabled(t *testing.T, tablet *cluster.Vttablet, enabled bool, config *Config, timeout time.Duration) {
+ enabledJSONPath := "IsEnabled"
+ queryJSONPath := "Query"
+ thresholdJSONPath := "Threshold"
+ url := fmt.Sprintf("http://localhost:%d/throttler/status", tablet.HTTPPort)
+ ctx, cancel := context.WithTimeout(context.Background(), timeout)
+ defer cancel()
+ ticker := time.NewTicker(time.Second)
+ defer ticker.Stop()
+
+ for {
+ body := getHTTPBody(url)
+ isEnabled, err := jsonparser.GetBoolean([]byte(body), enabledJSONPath)
+ require.NoError(t, err)
+ if isEnabled == enabled {
+ if config == nil {
+ return
+ }
+ query, err := jsonparser.GetString([]byte(body), queryJSONPath)
+ require.NoError(t, err)
+ threshold, err := jsonparser.GetFloat([]byte(body), thresholdJSONPath)
+ require.NoError(t, err)
+ if query == config.Query && threshold == config.Threshold {
+ return
+ }
+ }
+ select {
+ case <-ctx.Done():
+ t.Errorf("timed out waiting for the %s tablet's throttler status enabled to be %t with the correct config after %v; last seen value: %s",
+ tablet.Alias, enabled, timeout, body)
+ return
+ case <-ticker.C:
+ }
+ }
+}
+
+func getHTTPBody(url string) string {
+ resp, err := http.Get(url)
+ if err != nil {
+ log.Infof("http Get returns %+v", err)
+ return ""
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode != 200 {
+ log.Infof("http Get returns status %d", resp.StatusCode)
+ return ""
+ }
+ respByte, _ := io.ReadAll(resp.Body)
+ body := string(respByte)
+ return body
+}
+
+// WaitForQueryResult waits for a tablet to return the given result for the given
+// query until the specified timeout.
+// This is for simple queries that return 1 column in 1 row. It compares the result
+// for that column as a string with the provided result.
+func WaitForQueryResult(t *testing.T, tablet *cluster.Vttablet, query, result string, timeout time.Duration) {
+ ctx, cancel := context.WithTimeout(context.Background(), timeout)
+ defer cancel()
+ ticker := time.NewTicker(time.Second)
+ defer ticker.Stop()
+
+ for {
+ res, err := tablet.VttabletProcess.QueryTablet(query, "", false)
+ require.NoError(t, err)
+ if res != nil && len(res.Rows) == 1 && res.Rows[0][0].ToString() == result {
+ return
+ }
+ select {
+ case <-ctx.Done():
+ t.Errorf("timed out waiting for the %q query to produce a result of %q on tablet %s after %v; last seen value: %s",
+ query, result, tablet.Alias, timeout, res.Rows[0][0].ToString())
+ return
+ case <-ticker.C:
+ }
+ }
+}
+
+// WaitForValidData waits for a tablet's checks to return a non 500 http response
+// which indicates that it's not able to provide valid results. This is most
+// commonly caused by the throttler still gathering the initial results for
+// the given configuration.
+func WaitForValidData(t *testing.T, tablet *cluster.Vttablet, timeout time.Duration) {
+ checkURL := fmt.Sprintf("http://localhost:%d/throttler/check", tablet.HTTPPort)
+ selfCheckURL := fmt.Sprintf("http://localhost:%d/throttler/check-self", tablet.HTTPPort)
+ ctx, cancel := context.WithTimeout(context.Background(), timeout)
+ defer cancel()
+ ticker := time.NewTicker(500 * time.Millisecond)
+ defer ticker.Stop()
+
+ for {
+ checkResp, checkErr := http.Get(checkURL)
+ if checkErr != nil {
+ defer checkResp.Body.Close()
+ }
+ selfCheckResp, selfCheckErr := http.Get(selfCheckURL)
+ if selfCheckErr != nil {
+ defer selfCheckResp.Body.Close()
+ }
+ if checkErr == nil && selfCheckErr == nil &&
+ checkResp.StatusCode != http.StatusInternalServerError &&
+ selfCheckResp.StatusCode != http.StatusInternalServerError {
+ return
+ }
+ select {
+ case <-ctx.Done():
+ t.Errorf("timed out waiting for %s tablet's throttler to return a valid result after %v; last seen value: %+v",
+ tablet.Alias, timeout, checkResp)
+ return
+ case <-ticker.C:
+ }
+ }
+}
diff --git a/go/test/endtoend/topoconncache/main_test.go b/go/test/endtoend/topoconncache/main_test.go
index 038204108a7..2a074e8428a 100644
--- a/go/test/endtoend/topoconncache/main_test.go
+++ b/go/test/endtoend/topoconncache/main_test.go
@@ -124,6 +124,12 @@ func TestMain(m *testing.M) {
return 1, err
}
+ vtctldClientProcess := cluster.VtctldClientProcessInstance("localhost", clusterInstance.VtctldProcess.GrpcPort, clusterInstance.TmpDirectory)
+ _, err = vtctldClientProcess.ExecuteCommandWithOutput("CreateKeyspace", keyspaceName, "--durability-policy=semi_sync")
+ if err != nil {
+ return 1, err
+ }
+
shard1Primary = clusterInstance.NewVttabletInstance("primary", 0, cell1)
shard1Replica = clusterInstance.NewVttabletInstance("replica", 0, cell2)
shard1Rdonly = clusterInstance.NewVttabletInstance("rdonly", 0, cell2)
@@ -147,7 +153,6 @@ func TestMain(m *testing.M) {
hostname,
clusterInstance.TmpDirectory,
commonTabletArg,
- true,
clusterInstance.DefaultCharset,
)
tablet.VttabletProcess.SupportsBackup = true
@@ -163,10 +168,6 @@ func TestMain(m *testing.M) {
}
}
- if err := clusterInstance.VtctlProcess.CreateKeyspace(keyspaceName); err != nil {
- return 1, err
- }
-
shard1 = cluster.Shard{
Name: "-80",
Vttablets: []*cluster.Vttablet{shard1Primary, shard1Replica, shard1Rdonly},
@@ -212,6 +213,10 @@ func TestMain(m *testing.M) {
return 1, err
}
+ if err := clusterInstance.StartVTOrc(keyspaceName); err != nil {
+ return 1, err
+ }
+
if err := clusterInstance.VtctlclientProcess.ApplySchema(keyspaceName, fmt.Sprintf(sqlSchema, tableName)); err != nil {
return 1, err
}
@@ -240,9 +245,10 @@ func testURL(t *testing.T, url string, testCaseName string) {
// getStatusForUrl returns the status code for the URL
func getStatusForURL(url string) int {
- resp, _ := http.Get(url)
- if resp != nil {
- return resp.StatusCode
+ resp, err := http.Get(url)
+ if err != nil {
+ return 0
}
- return 0
+ defer resp.Body.Close()
+ return resp.StatusCode
}
diff --git a/go/test/endtoend/topoconncache/topo_conn_cache_test.go b/go/test/endtoend/topoconncache/topo_conn_cache_test.go
index f91ff67371c..02f14a7304d 100644
--- a/go/test/endtoend/topoconncache/topo_conn_cache_test.go
+++ b/go/test/endtoend/topoconncache/topo_conn_cache_test.go
@@ -41,7 +41,7 @@ func TestVtctldListAllTablets(t *testing.T) {
url := fmt.Sprintf("http://%s:%d/api/keyspaces/", clusterInstance.Hostname, clusterInstance.VtctldHTTPPort)
testURL(t, url, "keyspace url")
- healthCheckURL := fmt.Sprintf("http://%s:%d/debug/health/", clusterInstance.Hostname, clusterInstance.VtctldHTTPPort)
+ healthCheckURL := fmt.Sprintf("http://%s:%d/debug/health", clusterInstance.Hostname, clusterInstance.VtctldHTTPPort)
testURL(t, healthCheckURL, "vtctld health check url")
testListAllTablets(t)
@@ -149,7 +149,6 @@ func addCellback(t *testing.T) {
hostname,
clusterInstance.TmpDirectory,
commonTabletArg,
- true,
clusterInstance.DefaultCharset,
)
tablet.VttabletProcess.SupportsBackup = true
diff --git a/go/test/endtoend/utils/cmp.go b/go/test/endtoend/utils/cmp.go
index 89e51c1e665..a377ed777c8 100644
--- a/go/test/endtoend/utils/cmp.go
+++ b/go/test/endtoend/utils/cmp.go
@@ -85,6 +85,39 @@ func (mcmp *MySQLCompare) AssertMatchesAny(query string, expected ...string) {
mcmp.t.Errorf("Query: %s (-want +got):\n%v\nGot:%s", query, expected, got)
}
+// AssertMatchesAnyNoCompare ensures the given query produces any one of the expected results.
+// This method does not compare the mysql and vitess results together
+func (mcmp *MySQLCompare) AssertMatchesAnyNoCompare(query string, expected ...string) {
+ mcmp.t.Helper()
+
+ mQr, vQr := mcmp.execNoCompare(query)
+ got := fmt.Sprintf("%v", mQr.Rows)
+ valid := false
+ for _, e := range expected {
+ diff := cmp.Diff(e, got)
+ if diff == "" {
+ valid = true
+ break
+ }
+ }
+ if !valid {
+ mcmp.t.Errorf("MySQL Query: %s (-want +got):\n%v\nGot:%s", query, expected, got)
+ }
+ valid = false
+
+ got = fmt.Sprintf("%v", vQr.Rows)
+ for _, e := range expected {
+ diff := cmp.Diff(e, got)
+ if diff == "" {
+ valid = true
+ break
+ }
+ }
+ if !valid {
+ mcmp.t.Errorf("Vitess Query: %s (-want +got):\n%v\nGot:%s", query, expected, got)
+ }
+}
+
// AssertContainsError executes the query on both Vitess and MySQL.
// Both clients need to return an error. The error of Vitess must be matching the given expectation.
func (mcmp *MySQLCompare) AssertContainsError(query, expected string) {
diff --git a/go/test/endtoend/utils/mysql.go b/go/test/endtoend/utils/mysql.go
index 5b6b226f131..5bbf75ffb71 100644
--- a/go/test/endtoend/utils/mysql.go
+++ b/go/test/endtoend/utils/mysql.go
@@ -26,6 +26,7 @@ import (
"github.com/stretchr/testify/assert"
"vitess.io/vitess/go/sqltypes"
+ "vitess.io/vitess/go/vt/dbconfigs"
"vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/mysql"
@@ -42,6 +43,23 @@ func NewMySQL(cluster *cluster.LocalProcessCluster, dbName string, schemaSQL ...
return NewMySQLWithDetails(cluster.GetAndReservePort(), cluster.Hostname, dbName, schemaSQL...)
}
+// CreateMysqldAndMycnf returns a Mysqld and a Mycnf object to use for working with a MySQL
+// installation that hasn't been set up yet.
+func CreateMysqldAndMycnf(tabletUID uint32, mysqlSocket string, mysqlPort int32) (*mysqlctl.Mysqld, *mysqlctl.Mycnf, error) {
+ mycnf := mysqlctl.NewMycnf(tabletUID, mysqlPort)
+ if err := mycnf.RandomizeMysqlServerID(); err != nil {
+ return nil, nil, fmt.Errorf("couldn't generate random MySQL server_id: %v", err)
+ }
+ if mysqlSocket != "" {
+ mycnf.SocketFile = mysqlSocket
+ }
+ var cfg dbconfigs.DBConfigs
+ // ensure the DBA username is 'root' instead of the system's default username so that mysqladmin can shutdown
+ cfg.Dba.User = "root"
+ cfg.InitWithSocket(mycnf.SocketFile)
+ return mysqlctl.NewMysqld(&cfg), mycnf, nil
+}
+
func NewMySQLWithDetails(port int, hostname, dbName string, schemaSQL ...string) (mysql.ConnParams, func(), error) {
mysqlDir, err := createMySQLDir()
if err != nil {
@@ -53,7 +71,7 @@ func NewMySQLWithDetails(port int, hostname, dbName string, schemaSQL ...string)
}
mysqlPort := port
- mysqld, mycnf, err := mysqlctl.CreateMysqldAndMycnf(0, "", int32(mysqlPort))
+ mysqld, mycnf, err := CreateMysqldAndMycnf(0, "", int32(mysqlPort))
if err != nil {
return mysql.ConnParams{}, nil, err
}
diff --git a/go/test/endtoend/utils/utils.go b/go/test/endtoend/utils/utils.go
index bd80385342f..b78877f36ce 100644
--- a/go/test/endtoend/utils/utils.go
+++ b/go/test/endtoend/utils/utils.go
@@ -18,6 +18,7 @@ package utils
import (
"fmt"
+ "strings"
"testing"
"time"
@@ -33,6 +34,14 @@ import (
"vitess.io/vitess/go/sqltypes"
)
+// AssertContains ensures the given query result contains the expected results.
+func AssertContains(t testing.TB, conn *mysql.Conn, query, expected string) {
+ t.Helper()
+ qr := Exec(t, conn, query)
+ got := fmt.Sprintf("%v", qr.Rows)
+ assert.Contains(t, got, expected, "Query: %s", query)
+}
+
// AssertMatches ensures the given query produces the expected results.
func AssertMatches(t testing.TB, conn *mysql.Conn, query, expected string) {
t.Helper()
@@ -44,6 +53,30 @@ func AssertMatches(t testing.TB, conn *mysql.Conn, query, expected string) {
}
}
+// AssertMatchesContains ensures the given query produces the given substring.
+func AssertMatchesContains(t testing.TB, conn *mysql.Conn, query string, substrings ...string) {
+ t.Helper()
+ qr := Exec(t, conn, query)
+ got := fmt.Sprintf("%v", qr.Rows)
+ for _, substring := range substrings {
+ if !strings.Contains(got, substring) {
+ t.Errorf("Query: %s Got:\n%s\nLooking for substring:%s", query, got, substring)
+ }
+ }
+}
+
+// AssertMatchesNotContains ensures the given query's output doesn't have the given substring.
+func AssertMatchesNotContains(t testing.TB, conn *mysql.Conn, query string, substrings ...string) {
+ t.Helper()
+ qr := Exec(t, conn, query)
+ got := fmt.Sprintf("%v", qr.Rows)
+ for _, substring := range substrings {
+ if strings.Contains(got, substring) {
+ t.Errorf("Query: %s Got:\n%s\nFound substring:%s", query, got, substring)
+ }
+ }
+}
+
// AssertMatchesAny ensures the given query produces any one of the expected results.
func AssertMatchesAny(t testing.TB, conn *mysql.Conn, query string, expected ...string) {
t.Helper()
@@ -166,7 +199,11 @@ func AssertMatchesWithTimeout(t *testing.T, conn *mysql.Conn, query, expected st
case <-timeout:
require.Fail(t, failureMsg, diff)
case <-time.After(r):
- qr := Exec(t, conn, query)
+ qr, err := ExecAllowError(t, conn, query)
+ if err != nil {
+ diff = err.Error()
+ break
+ }
diff = cmp.Diff(expected,
fmt.Sprintf("%v", qr.Rows))
}
@@ -199,6 +236,47 @@ func WaitForAuthoritative(t *testing.T, vtgateProcess cluster.VtgateProcess, ks,
}
}
+// WaitForColumn waits for a table's column to be present
+func WaitForColumn(t *testing.T, vtgateProcess cluster.VtgateProcess, ks, tbl, col string) error {
+ timeout := time.After(10 * time.Second)
+ for {
+ select {
+ case <-timeout:
+ return fmt.Errorf("schema tracking did not find column '%s' in table '%s'", col, tbl)
+ default:
+ time.Sleep(1 * time.Second)
+ res, err := vtgateProcess.ReadVSchema()
+ require.NoError(t, err, res)
+ t2Map := getTableT2Map(res, ks, tbl)
+ authoritative, fieldPresent := t2Map["column_list_authoritative"]
+ if !fieldPresent {
+ break
+ }
+ authoritativeBool, isBool := authoritative.(bool)
+ if !isBool || !authoritativeBool {
+ break
+ }
+ colMap, exists := t2Map["columns"]
+ if !exists {
+ break
+ }
+ colList, isSlice := colMap.([]interface{})
+ if !isSlice {
+ break
+ }
+ for _, c := range colList {
+ colDef, isMap := c.(map[string]interface{})
+ if !isMap {
+ break
+ }
+ if colName, exists := colDef["name"]; exists && colName == col {
+ return nil
+ }
+ }
+ }
+ }
+}
+
func getTableT2Map(res *interface{}, ks, tbl string) map[string]interface{} {
step1 := convertToMap(*res)["keyspaces"]
step2 := convertToMap(step1)[ks]
@@ -211,3 +289,40 @@ func convertToMap(input interface{}) map[string]interface{} {
output := input.(map[string]interface{})
return output
}
+
+// TimeoutAction performs the action within the given timeout limit.
+// If the timeout is reached, the test is failed with errMsg.
+// If action returns false, the timeout loop continues, if it returns true, the function succeeds.
+func TimeoutAction(t *testing.T, timeout time.Duration, errMsg string, action func() bool) {
+ deadline := time.After(timeout)
+ ok := false
+ for !ok {
+ select {
+ case <-deadline:
+ t.Error(errMsg)
+ return
+ case <-time.After(1 * time.Second):
+ ok = action()
+ }
+ }
+}
+
+func GetInitDBSQL(initDBSQL string, updatedPasswords string, oldAlterTableMode string) (string, error) {
+ // Since password update is DML we need to insert it before we disable
+ // super_read_only therefore doing the split below.
+ splitString := strings.Split(initDBSQL, "# {{custom_sql}}")
+ if len(splitString) != 2 {
+ return "", fmt.Errorf("missing `# {{custom_sql}}` in init_db.sql file")
+ }
+ var builder strings.Builder
+ builder.WriteString(splitString[0])
+ builder.WriteString(updatedPasswords)
+
+ // https://github.com/vitessio/vitess/issues/8315
+ if oldAlterTableMode != "" {
+ builder.WriteString(oldAlterTableMode)
+ }
+ builder.WriteString(splitString[1])
+
+ return builder.String(), nil
+}
diff --git a/go/test/endtoend/vault/vault_test.go b/go/test/endtoend/vault/vault_test.go
index da66f3a52e3..25ed88f4335 100644
--- a/go/test/endtoend/vault/vault_test.go
+++ b/go/test/endtoend/vault/vault_test.go
@@ -293,6 +293,9 @@ func initializeClusterLate(t *testing.T) {
err = clusterInstance.VtctlclientProcess.InitShardPrimary(keyspaceName, shard.Name, cell, primary.TabletUID)
require.NoError(t, err)
+ err = clusterInstance.StartVTOrc(keyspaceName)
+ require.NoError(t, err)
+
// Start vtgate
err = clusterInstance.StartVtgate()
require.NoError(t, err)
diff --git a/go/test/endtoend/vreplication/cluster_test.go b/go/test/endtoend/vreplication/cluster_test.go
index 8232d85a018..edd90599de3 100644
--- a/go/test/endtoend/vreplication/cluster_test.go
+++ b/go/test/endtoend/vreplication/cluster_test.go
@@ -50,6 +50,8 @@ var (
extraVtctldArgs = []string{"--remote_operation_timeout", "600s", "--topo_etcd_lease_ttl", "120"}
// This variable can be used within specific tests to alter vttablet behavior
extraVTTabletArgs = []string{}
+
+ parallelInsertWorkers = "--vreplication-parallel-insert-workers=4"
)
// ClusterConfig defines the parameters like ports, tmpDir, tablet types which uniquely define a vitess cluster
@@ -69,6 +71,7 @@ type ClusterConfig struct {
tabletPortBase int
tabletGrpcPortBase int
tabletMysqlPortBase int
+ vtorcPort int
vreplicationCompressGTID bool
}
@@ -82,6 +85,8 @@ type VitessCluster struct {
Vtctld *cluster.VtctldProcess
Vtctl *cluster.VtctlProcess
VtctlClient *cluster.VtctlClientProcess
+ VtctldClient *cluster.VtctldClientProcess
+ VTOrcProcess *cluster.VTOrcProcess
}
// Cell represents a Vitess cell within the test cluster
@@ -128,6 +133,29 @@ func setTempVtDataRoot() string {
return vtdataroot
}
+// StartVTOrc starts a VTOrc instance
+func (vc *VitessCluster) StartVTOrc() error {
+ // Start vtorc if not already running
+ if vc.VTOrcProcess != nil {
+ return nil
+ }
+ base := cluster.VtctlProcessInstance(vc.ClusterConfig.topoPort, vc.ClusterConfig.hostname)
+ base.Binary = "vtorc"
+ vtorcProcess := &cluster.VTOrcProcess{
+ VtctlProcess: *base,
+ LogDir: vc.ClusterConfig.tmpDir,
+ Config: cluster.VTOrcConfiguration{},
+ Port: vc.ClusterConfig.vtorcPort,
+ }
+ err := vtorcProcess.Setup()
+ if err != nil {
+ log.Error(err.Error())
+ return err
+ }
+ vc.VTOrcProcess = vtorcProcess
+ return nil
+}
+
// setVtMySQLRoot creates the root directory if it does not exist
// and saves the directory in the VT_MYSQL_ROOT OS env var.
// mysqlctl will then look for the mysql related binaries in the
@@ -223,6 +251,9 @@ func downloadDBTypeVersion(dbType string, majorVersion string, path string) erro
} else if dbType == "mysql" && majorVersion == "8.0" {
versionFile = "mysql-8.0.28-linux-glibc2.17-x86_64-minimal.tar.xz"
url = "https://dev.mysql.com/get/Downloads/MySQL-8.0/" + versionFile
+ } else if dbType == "mariadb" && majorVersion == "10.10" {
+ versionFile = "mariadb-10.10.3-linux-systemd-x86_64.tar.gz"
+ url = "https://github.com/vitessio/vitess-resources/releases/download/v4.0/" + versionFile
} else {
return fmt.Errorf("invalid/unsupported major version: %s for database: %s", majorVersion, dbType)
}
@@ -280,6 +311,7 @@ func getClusterConfig(idx int, dataRootDir string) *ClusterConfig {
tabletPortBase: basePort + 1000,
tabletGrpcPortBase: basePort + 1991,
tabletMysqlPortBase: basePort + 1306,
+ vtorcPort: basePort + 2639,
charset: "utf8mb4",
}
}
@@ -306,6 +338,9 @@ func init() {
func NewVitessCluster(t *testing.T, name string, cellNames []string, clusterConfig *ClusterConfig) *VitessCluster {
vc := &VitessCluster{Name: name, Cells: make(map[string]*Cell), ClusterConfig: clusterConfig}
require.NotNil(t, vc)
+
+ vc.CleanupDataroot(t, true)
+
topo := cluster.TopoProcessInstance(vc.ClusterConfig.topoPort, vc.ClusterConfig.topoPort+1, vc.ClusterConfig.hostname, "etcd2", "global")
require.NotNil(t, topo)
@@ -336,10 +371,31 @@ func NewVitessCluster(t *testing.T, name string, cellNames []string, clusterConf
vc.VtctlClient = cluster.VtctlClientProcessInstance(vc.ClusterConfig.hostname, vc.Vtctld.GrpcPort, vc.ClusterConfig.tmpDir)
require.NotNil(t, vc.VtctlClient)
-
+ vc.VtctldClient = cluster.VtctldClientProcessInstance(vc.ClusterConfig.hostname, vc.Vtctld.GrpcPort, vc.ClusterConfig.tmpDir)
+ require.NotNil(t, vc.VtctldClient)
return vc
}
+// CleanupDataroot deletes the vtdataroot directory. Since we run multiple tests sequentially in a single CI test shard,
+// we can run out of disk space due to all the leftover artifacts from previous tests.
+func (vc *VitessCluster) CleanupDataroot(t *testing.T, recreate bool) {
+ // This is always set to "true" on GitHub Actions runners:
+ // https://docs.github.com/en/actions/learn-github-actions/variables#default-environment-variables
+ ci, ok := os.LookupEnv("CI")
+ if !ok || strings.ToLower(ci) != "true" {
+ // Leave the directory in place to support local debugging.
+ return
+ }
+ dir := vc.ClusterConfig.vtdataroot
+ log.Infof("Deleting vtdataroot %s", dir)
+ err := os.RemoveAll(dir)
+ require.NoError(t, err)
+ if recreate {
+ err = os.Mkdir(dir, 0700)
+ require.NoError(t, err)
+ }
+}
+
// AddKeyspace creates a keyspace with specified shard keys and number of replica/read-only tablets.
// You can pass optional key value pairs (opts) if you want conditional behavior.
func (vc *VitessCluster) AddKeyspace(t *testing.T, cells []*Cell, ksName string, shards string, vschema string, schema string, numReplicas int, numRdonly int, tabletIDBase int, opts map[string]string) (*Keyspace, error) {
@@ -392,7 +448,7 @@ func (vc *VitessCluster) AddTablet(t testing.TB, cell *Cell, keyspace *Keyspace,
"--enable-lag-throttler",
"--heartbeat_enable",
"--heartbeat_interval", "250ms",
- } //FIXME: for multi-cell initial schema doesn't seem to load without "--queryserver-config-schema-reload-time"
+ } // FIXME: for multi-cell initial schema doesn't seem to load without "--queryserver-config-schema-reload-time"
options = append(options, extraVTTabletArgs...)
if mainClusterConfig.vreplicationCompressGTID {
@@ -412,7 +468,6 @@ func (vc *VitessCluster) AddTablet(t testing.TB, cell *Cell, keyspace *Keyspace,
vc.ClusterConfig.hostname,
vc.ClusterConfig.tmpDir,
options,
- false,
vc.ClusterConfig.charset)
require.NotNil(t, vttablet)
@@ -436,8 +491,20 @@ func (vc *VitessCluster) AddTablet(t testing.TB, cell *Cell, keyspace *Keyspace,
// AddShards creates shards given list of comma-separated keys with specified tablets in each shard
func (vc *VitessCluster) AddShards(t *testing.T, cells []*Cell, keyspace *Keyspace, names string, numReplicas int, numRdonly int, tabletIDBase int, opts map[string]string) error {
+ // Add a VTOrc instance if one is not already running
+ if err := vc.StartVTOrc(); err != nil {
+ return err
+ }
+ // Disable global recoveries until the shard has been added.
+ // We need this because we run ISP in the end. Running ISP after VTOrc has already run PRS
+ // causes issues.
+ vc.VTOrcProcess.DisableGlobalRecoveries(t)
+ defer vc.VTOrcProcess.EnableGlobalRecoveries(t)
+
if value, exists := opts["DBTypeVersion"]; exists {
- setupDBTypeVersion(t, value)
+ if resetFunc := setupDBTypeVersion(t, value); resetFunc != nil {
+ defer resetFunc()
+ }
}
arrNames := strings.Split(names, ",")
@@ -513,6 +580,7 @@ func (vc *VitessCluster) AddShards(t *testing.T, cells []*Cell, keyspace *Keyspa
require.NoError(t, vc.VtctlClient.InitializeShard(keyspace.Name, shardName, cells[0].Name, primaryTabletUID))
log.Infof("Finished creating shard %s", shard.Name)
}
+
return nil
}
@@ -525,7 +593,7 @@ func (vc *VitessCluster) DeleteShard(t testing.TB, cellName string, ksName strin
tab.Vttablet.TearDown()
}
log.Infof("Deleting Shard %s", shardName)
- //TODO how can we avoid the use of even_if_serving?
+ // TODO how can we avoid the use of even_if_serving?
if output, err := vc.VtctlClient.ExecuteCommandWithOutput("DeleteShard", "--", "--recursive", "--even_if_serving", ksName+"/"+shardName); err != nil {
t.Fatalf("DeleteShard command failed with error %+v and output %s\n", err, output)
}
@@ -560,7 +628,7 @@ func (vc *VitessCluster) AddCell(t testing.TB, name string) (*Cell, error) {
return cell, nil
}
-func (vc *VitessCluster) teardown(t testing.TB) {
+func (vc *VitessCluster) teardown() {
for _, cell := range vc.Cells {
for _, vtgate := range cell.Vtgates {
if err := vtgate.TearDown(); err != nil {
@@ -570,7 +638,7 @@ func (vc *VitessCluster) teardown(t testing.TB) {
}
}
}
- //collect unique keyspaces across cells
+ // collect unique keyspaces across cells
keyspaces := make(map[string]*Keyspace)
for _, cell := range vc.Cells {
for _, keyspace := range cell.Keyspaces {
@@ -614,16 +682,22 @@ func (vc *VitessCluster) teardown(t testing.TB) {
log.Infof("Successfully tore down topo %s", vc.Topo.Name)
}
}
+
+ if vc.VTOrcProcess != nil {
+ if err := vc.VTOrcProcess.TearDown(); err != nil {
+ log.Infof("Error stopping VTOrc: %s", err.Error())
+ }
+ }
}
// TearDown brings down a cluster, deleting processes, removing topo keys
-func (vc *VitessCluster) TearDown(t testing.TB) {
+func (vc *VitessCluster) TearDown(t *testing.T) {
if debugMode {
return
}
done := make(chan bool)
go func() {
- vc.teardown(t)
+ vc.teardown()
done <- true
}()
select {
@@ -634,6 +708,7 @@ func (vc *VitessCluster) TearDown(t testing.TB) {
}
// some processes seem to hang around for a bit
time.Sleep(5 * time.Second)
+ vc.CleanupDataroot(t, false)
}
func (vc *VitessCluster) getVttabletsInKeyspace(t *testing.T, cell *Cell, ksName string, tabletType string) map[string]*cluster.VttabletProcess {
@@ -692,7 +767,10 @@ func (vc *VitessCluster) startQuery(t *testing.T, query string) (func(t *testing
return commit, rollback
}
-func setupDBTypeVersion(t *testing.T, value string) {
+// setupDBTypeVersion will perform any work needed to enable a specific
+// database type and version if not already installed. It returns a
+// function to reset any environment changes made.
+func setupDBTypeVersion(t *testing.T, value string) func() {
details := strings.Split(value, "-")
if len(details) != 2 {
t.Fatalf("Invalid database details: %s", value)
@@ -707,21 +785,23 @@ func setupDBTypeVersion(t *testing.T, value string) {
}
if dbTypeMajorVersion == dbVersionInUse {
t.Logf("Requsted database version %s is already installed, doing nothing.", dbTypeMajorVersion)
- } else {
- path := fmt.Sprintf("/tmp/%s", dbTypeMajorVersion)
- // Set the root path and create it if needed
- if err := setVtMySQLRoot(path); err != nil {
- t.Fatalf("Could not set VT_MYSQL_ROOT to %s, error: %v", path, err)
- }
- defer unsetVtMySQLRoot()
- // Download and extract the version artifact if needed
- if err := downloadDBTypeVersion(dbType, majorVersion, path); err != nil {
- t.Fatalf("Could not download %s, error: %v", majorVersion, err)
- }
- // Set the MYSQL_FLAVOR OS ENV var for mysqlctl to use the correct config file
- if err := setDBFlavor(); err != nil {
- t.Fatalf("Could not set MYSQL_FLAVOR: %v", err)
- }
- defer unsetDBFlavor()
+ return func() {}
+ }
+ path := fmt.Sprintf("/tmp/%s", dbTypeMajorVersion)
+ // Set the root path and create it if needed
+ if err := setVtMySQLRoot(path); err != nil {
+ t.Fatalf("Could not set VT_MYSQL_ROOT to %s, error: %v", path, err)
+ }
+ // Download and extract the version artifact if needed
+ if err := downloadDBTypeVersion(dbType, majorVersion, path); err != nil {
+ t.Fatalf("Could not download %s, error: %v", majorVersion, err)
+ }
+ // Set the MYSQL_FLAVOR OS ENV var for mysqlctl to use the correct config file
+ if err := setDBFlavor(); err != nil {
+ t.Fatalf("Could not set MYSQL_FLAVOR: %v", err)
+ }
+ return func() {
+ unsetDBFlavor()
+ unsetVtMySQLRoot()
}
}
diff --git a/go/test/endtoend/vreplication/config_test.go b/go/test/endtoend/vreplication/config_test.go
index 95246f5d849..9cb05fa1044 100644
--- a/go/test/endtoend/vreplication/config_test.go
+++ b/go/test/endtoend/vreplication/config_test.go
@@ -32,22 +32,23 @@ package vreplication
// The internal table _vt_PURGE_4f9194b43b2011eb8a0104ed332e05c2_20221210194431 should be ignored by vreplication
// The db_order_test table is used to ensure vreplication and vdiff work well with complex non-integer PKs, even across DB versions.
var (
+ // All standard user tables should have a primary key and at least one secondary key.
initialProductSchema = `
-create table product(pid int, description varbinary(128), date1 datetime not null default '0000-00-00 00:00:00', date2 datetime not null default '2021-00-01 00:00:00', primary key(pid)) CHARSET=utf8mb4;
+create table product(pid int, description varbinary(128), date1 datetime not null default '0000-00-00 00:00:00', date2 datetime not null default '2021-00-01 00:00:00', primary key(pid), key(date1,date2)) CHARSET=utf8mb4;
create table customer(cid int, name varchar(128) collate utf8mb4_bin, meta json default null, typ enum('individual','soho','enterprise'), sport set('football','cricket','baseball'),
ts timestamp not null default current_timestamp, bits bit(2) default b'11', date1 datetime not null default '0000-00-00 00:00:00',
- date2 datetime not null default '2021-00-01 00:00:00', dec80 decimal(8,0), primary key(cid,typ)) CHARSET=utf8mb4;
+ date2 datetime not null default '2021-00-01 00:00:00', dec80 decimal(8,0), primary key(cid,typ), key(name)) CHARSET=utf8mb4;
create table customer_seq(id int, next_id bigint, cache bigint, primary key(id)) comment 'vitess_sequence';
-create table merchant(mname varchar(128), category varchar(128), primary key(mname)) CHARSET=utf8mb4 COLLATE=utf8mb4_general_ci;
-create table orders(oid int, cid int, pid int, mname varchar(128), price int, qty int, total int as (qty * price), total2 int as (qty * price) stored, primary key(oid)) CHARSET=utf8;
+create table merchant(mname varchar(128), category varchar(128), primary key(mname), key(category)) CHARSET=utf8mb4 COLLATE=utf8mb4_general_ci;
+create table orders(oid int, cid int, pid int, mname varchar(128), price int, qty int, total int as (qty * price), total2 int as (qty * price) stored, primary key(oid), key(pid), key(cid)) CHARSET=utf8;
create table order_seq(id int, next_id bigint, cache bigint, primary key(id)) comment 'vitess_sequence';
-create table customer2(cid int, name varchar(128), typ enum('individual','soho','enterprise'), sport set('football','cricket','baseball'),ts timestamp not null default current_timestamp, primary key(cid)) CHARSET=utf8;
+create table customer2(cid int, name varchar(128), typ enum('individual','soho','enterprise'), sport set('football','cricket','baseball'),ts timestamp not null default current_timestamp, primary key(cid), key(ts)) CHARSET=utf8;
create table customer_seq2(id int, next_id bigint, cache bigint, primary key(id)) comment 'vitess_sequence';
-create table ` + "`Lead`(`Lead-id`" + ` binary(16), name varbinary(16), date1 datetime not null default '0000-00-00 00:00:00', date2 datetime not null default '2021-00-01 00:00:00', primary key (` + "`Lead-id`" + `));
-create table ` + "`Lead-1`(`Lead`" + ` binary(16), name varbinary(16), date1 datetime not null default '0000-00-00 00:00:00', date2 datetime not null default '2021-00-01 00:00:00', primary key (` + "`Lead`" + `));
-create table _vt_PURGE_4f9194b43b2011eb8a0104ed332e05c2_20221210194431(id int, val varbinary(128), primary key(id));
-create table db_order_test (c_uuid varchar(64) not null default '', created_at datetime not null, dstuff varchar(128), dtstuff text, dbstuff blob, cstuff char(32), primary key (c_uuid,created_at)) CHARSET=utf8mb4;
-create table datze (id int, dt1 datetime not null default current_timestamp, dt2 datetime not null, ts1 timestamp default current_timestamp, primary key (id));
+create table ` + "`Lead`(`Lead-id`" + ` binary(16), name varbinary(16), date1 datetime not null default '0000-00-00 00:00:00', date2 datetime not null default '2021-00-01 00:00:00', primary key (` + "`Lead-id`" + `), key (date1));
+create table ` + "`Lead-1`(`Lead`" + ` binary(16), name varbinary(16), date1 datetime not null default '0000-00-00 00:00:00', date2 datetime not null default '2021-00-01 00:00:00', primary key (` + "`Lead`" + `), key (date2));
+create table _vt_PURGE_4f9194b43b2011eb8a0104ed332e05c2_20221210194431(id int, val varbinary(128), primary key(id), key(val));
+create table db_order_test (c_uuid varchar(64) not null default '', created_at datetime not null, dstuff varchar(128), dtstuff text, dbstuff blob, cstuff char(32), primary key (c_uuid,created_at), key (dstuff)) CHARSET=utf8mb4;
+create table datze (id int, dt1 datetime not null default current_timestamp, dt2 datetime not null, ts1 timestamp default current_timestamp, primary key (id), key (dt1));
`
// These should always be ignored in vreplication
diff --git a/go/test/endtoend/vreplication/helper_test.go b/go/test/endtoend/vreplication/helper_test.go
index 264c748a5bb..bc3ace3f064 100644
--- a/go/test/endtoend/vreplication/helper_test.go
+++ b/go/test/endtoend/vreplication/helper_test.go
@@ -23,25 +23,25 @@ import (
"net/http"
"os/exec"
"regexp"
+ "sort"
"strconv"
"strings"
"testing"
"time"
- "vitess.io/vitess/go/vt/log"
-
+ "github.com/PuerkitoBio/goquery"
"github.com/buger/jsonparser"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/tidwall/gjson"
- "vitess.io/vitess/go/test/endtoend/cluster"
- "vitess.io/vitess/go/vt/schema"
-
- "github.com/PuerkitoBio/goquery"
-
"vitess.io/vitess/go/mysql"
+ "vitess.io/vitess/go/sqlescape"
"vitess.io/vitess/go/sqltypes"
+ "vitess.io/vitess/go/test/endtoend/cluster"
+ "vitess.io/vitess/go/vt/log"
+ "vitess.io/vitess/go/vt/schema"
+ "vitess.io/vitess/go/vt/sqlparser"
)
const (
@@ -97,10 +97,8 @@ func execVtgateQuery(t *testing.T, conn *mysql.Conn, database string, query stri
func checkHealth(t *testing.T, url string) bool {
resp, err := http.Get(url)
require.NoError(t, err)
- if err != nil || resp.StatusCode != 200 {
- return false
- }
- return true
+ defer resp.Body.Close()
+ return resp.StatusCode == 200
}
func waitForQueryResult(t *testing.T, conn *mysql.Conn, database string, query string, want string) {
@@ -129,9 +127,9 @@ func waitForTabletThrottlingStatus(t *testing.T, tablet *cluster.VttabletProcess
timer := time.NewTimer(defaultTimeout)
defer timer.Stop()
for {
- _, output, err := throttlerCheckSelf(tablet, appName)
+ output, err := throttlerCheckSelf(tablet, appName)
require.NoError(t, err)
- require.NotNil(t, output)
+
gotCode, err = jsonparser.GetInt([]byte(output), "StatusCode")
require.NoError(t, err)
if wantCode == gotCode {
@@ -238,7 +236,12 @@ func validateThatQueryExecutesOnTablet(t *testing.T, conn *mysql.Conn, tablet *c
return newCount == count+1
}
-func waitForWorkflowState(t *testing.T, vc *VitessCluster, ksWorkflow string, wantState string) {
+// waitForWorkflowState waits for all of the given workflow's
+// streams to reach the provided state. You can pass optional
+// key value pairs of the form "key==value" to also wait for
+// additional stream sub-state such as "Message==for vdiff".
+// Invalid checks are ignored.
+func waitForWorkflowState(t *testing.T, vc *VitessCluster, ksWorkflow string, wantState string, fieldEqualityChecks ...string) {
done := false
timer := time.NewTimer(workflowStateTimeout)
log.Infof("Waiting for workflow %q to fully reach %q state", ksWorkflow, wantState)
@@ -252,9 +255,21 @@ func waitForWorkflowState(t *testing.T, vc *VitessCluster, ksWorkflow string, wa
tabletStreams.ForEach(func(streamId, streamInfos gjson.Result) bool { // for each stream
if streamId.String() == "PrimaryReplicationStatuses" {
streamInfos.ForEach(func(attributeKey, attributeValue gjson.Result) bool { // for each attribute in the stream
+ // we need to wait for all streams to have the desired state
state = attributeValue.Get("State").String()
- if state != wantState {
- done = false // we need to wait for all streams to have the desired state
+ if state == wantState {
+ for i := 0; i < len(fieldEqualityChecks); i++ {
+ if kvparts := strings.Split(fieldEqualityChecks[i], "=="); len(kvparts) == 2 {
+ key := kvparts[0]
+ val := kvparts[1]
+ res := attributeValue.Get(key).String()
+ if !strings.EqualFold(res, val) {
+ done = false
+ }
+ }
+ }
+ } else {
+ done = false
}
return true
})
@@ -269,14 +284,70 @@ func waitForWorkflowState(t *testing.T, vc *VitessCluster, ksWorkflow string, wa
}
select {
case <-timer.C:
- require.FailNowf(t, "workflow %q did not fully reach the expected state of %q before the timeout of %s; last seen output: %s",
- ksWorkflow, wantState, workflowStateTimeout, output)
+ var extraRequirements string
+ if len(fieldEqualityChecks) > 0 {
+ extraRequirements = fmt.Sprintf(" with the additional requirements of \"%v\"", fieldEqualityChecks)
+ }
+ require.FailNowf(t, "workflow state not reached",
+ "Workflow %q did not fully reach the expected state of %q%s before the timeout of %s; last seen output: %s",
+ ksWorkflow, wantState, extraRequirements, workflowStateTimeout, output)
default:
time.Sleep(defaultTick)
}
}
}
+// confirmTablesHaveSecondaryKeys confirms that the tables provided
+// as a CSV have secondary keys. This is useful when testing the
+// --defer-secondary-keys flag to confirm that the secondary keys
+// were re-added by the time the workflow hits the running phase.
+// For a Reshard workflow, where no tables are specififed, pass
+// an empty string for the tables and all tables in the target
+// keyspace will be checked.
+func confirmTablesHaveSecondaryKeys(t *testing.T, tablets []*cluster.VttabletProcess, ksName string, tables string) {
+ require.NotNil(t, tablets)
+ require.NotNil(t, tablets[0])
+ var tableArr []string
+ if strings.TrimSpace(tables) != "" {
+ tableArr = strings.Split(tables, ",")
+ }
+ if len(tableArr) == 0 { // We don't specify any for Reshard.
+ // In this case we check all of them.
+ res, err := tablets[0].QueryTablet("show tables", ksName, true)
+ require.NoError(t, err)
+ require.NotNil(t, res)
+ for _, row := range res.Rows {
+ tableArr = append(tableArr, row[0].ToString())
+ }
+ }
+ for _, tablet := range tablets {
+ for _, table := range tableArr {
+ if schema.IsInternalOperationTableName(table) {
+ continue
+ }
+ table := strings.TrimSpace(table)
+ secondaryKeys := 0
+ res, err := tablet.QueryTablet(fmt.Sprintf("show create table %s", sqlescape.EscapeID(table)), ksName, true)
+ require.NoError(t, err)
+ require.NotNil(t, res)
+ row := res.Named().Row()
+ tableSchema := row["Create Table"].ToString()
+ parsedDDL, err := sqlparser.ParseStrictDDL(tableSchema)
+ require.NoError(t, err)
+ createTable, ok := parsedDDL.(*sqlparser.CreateTable)
+ require.True(t, ok)
+ require.NotNil(t, createTable)
+ require.NotNil(t, createTable.GetTableSpec())
+ for _, index := range createTable.GetTableSpec().Indexes {
+ if !index.Info.Primary {
+ secondaryKeys++
+ }
+ }
+ require.Greater(t, secondaryKeys, 0, "Table %s does not have any secondary keys", table)
+ }
+ }
+}
+
func getHTTPBody(url string) string {
resp, err := http.Get(url)
if err != nil {
@@ -435,13 +506,6 @@ func printShardPositions(vc *VitessCluster, ksShards []string) {
}
}
-func clearRoutingRules(t *testing.T, vc *VitessCluster) error {
- if _, err := vc.VtctlClient.ExecuteCommandWithOutput("ApplyRoutingRules", "--", "--rules={}"); err != nil {
- return err
- }
- return nil
-}
-
func printRoutingRules(t *testing.T, vc *VitessCluster, msg string) error {
var output string
var err error
@@ -462,6 +526,7 @@ func getDebugVar(t *testing.T, port int, varPath []string) (string, error) {
var val []byte
var err error
url := fmt.Sprintf("http://localhost:%d/debug/vars", port)
+ log.Infof("url: %s, varPath: %s", url, strings.Join(varPath, ":"))
body := getHTTPBody(url)
val, _, _, err = jsonparser.Get([]byte(body), varPath...)
require.NoError(t, err)
@@ -503,3 +568,75 @@ func confirmWorkflowHasCopiedNoData(t *testing.T, targetKS, workflow string) {
}
}
}
+
+// getShardRoutingRules returns the shard routing rules stored in the
+// topo. It returns the rules sorted by shard,to_keyspace and with all
+// newlines and whitespace removed so that we have predictable,
+// compact, and easy to compare results for tests.
+func getShardRoutingRules(t *testing.T) string {
+ output, err := osExec(t, "vtctldclient", []string{"--server", getVtctldGRPCURL(), "GetShardRoutingRules"})
+ log.Infof("GetShardRoutingRules err: %+v, output: %+v", err, output)
+ require.Nilf(t, err, output)
+ require.NotNil(t, output)
+
+ // Sort the rules by shard,to_keyspace
+ jsonOutput := gjson.Parse(output)
+ rules := jsonOutput.Get("rules").Array()
+ sort.Slice(rules, func(i, j int) bool {
+ shardI := rules[i].Get("shard").String()
+ shardJ := rules[j].Get("shard").String()
+ if shardI == shardJ {
+ return rules[i].Get("to_keyspace").String() < rules[j].Get("to_keyspace").String()
+ }
+ return shardI < shardJ
+ })
+ sb := strings.Builder{}
+ for i := 0; i < len(rules); i++ {
+ if i > 0 {
+ sb.WriteString(",")
+ }
+ sb.WriteString(rules[i].String())
+ }
+ output = fmt.Sprintf(`{"rules":[%s]}`, sb.String())
+
+ // Remove newlines and whitespace
+ re := regexp.MustCompile(`[\n\s]+`)
+ output = re.ReplaceAllString(output, "")
+ output = strings.TrimSpace(output)
+ return output
+}
+
+func verifyCopyStateIsOptimized(t *testing.T, tablet *cluster.VttabletProcess) {
+ // Update information_schem with the latest data
+ _, err := tablet.QueryTablet("analyze table _vt.copy_state", "", false)
+ require.NoError(t, err)
+
+ // Verify that there's no delete marked rows and we reset the auto-inc value.
+ // MySQL doesn't always immediately update information_schema so we wait.
+ tmr := time.NewTimer(defaultTimeout)
+ defer tmr.Stop()
+ query := "select data_free, auto_increment from information_schema.tables where table_schema='_vt' and table_name='copy_state'"
+ var dataFree, autoIncrement int64
+ for {
+ res, err := tablet.QueryTablet(query, "", false)
+ require.NoError(t, err)
+ require.NotNil(t, res)
+ require.Equal(t, 1, len(res.Rows))
+ dataFree, err = res.Rows[0][0].ToInt64()
+ require.NoError(t, err)
+ autoIncrement, err = res.Rows[0][1].ToInt64()
+ require.NoError(t, err)
+ if dataFree == 0 && autoIncrement == 1 {
+ return
+ }
+
+ select {
+ case <-tmr.C:
+ require.FailNowf(t, "timed out waiting for copy_state table to be optimized",
+ "data_free should be 0 and auto_increment should be 1, last seen values were %d and %d respectively",
+ dataFree, autoIncrement)
+ default:
+ time.Sleep(defaultTick)
+ }
+ }
+}
diff --git a/go/test/endtoend/vreplication/materialize_test.go b/go/test/endtoend/vreplication/materialize_test.go
index 0016a0771dd..a13ec1d0da6 100644
--- a/go/test/endtoend/vreplication/materialize_test.go
+++ b/go/test/endtoend/vreplication/materialize_test.go
@@ -17,10 +17,11 @@ limitations under the License.
package vreplication
import (
- "fmt"
"testing"
"github.com/stretchr/testify/require"
+
+ "vitess.io/vitess/go/test/endtoend/cluster"
)
const smSchema = `
@@ -68,6 +69,7 @@ func testShardedMaterialize(t *testing.T) {
vc = NewVitessCluster(t, "TestShardedMaterialize", allCells, mainClusterConfig)
ks1 := "ks1"
ks2 := "ks2"
+ shard := "0"
require.NotNil(t, vc)
defaultReplicas = 0 // because of CI resource constraints we can only run this test with primary tablets
defer func() { defaultReplicas = 1 }()
@@ -78,15 +80,17 @@ func testShardedMaterialize(t *testing.T) {
vc.AddKeyspace(t, []*Cell{defaultCell}, ks1, "0", smVSchema, smSchema, defaultReplicas, defaultRdonly, 100, nil)
vtgate = defaultCell.Vtgates[0]
require.NotNil(t, vtgate)
- vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", ks1, "0"), 1)
+ err := cluster.WaitForHealthyShard(vc.VtctldClient, ks1, shard)
+ require.NoError(t, err)
vc.AddKeyspace(t, []*Cell{defaultCell}, ks2, "0", smVSchema, smSchema, defaultReplicas, defaultRdonly, 200, nil)
- vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", ks2, "0"), 1)
+ err = cluster.WaitForHealthyShard(vc.VtctldClient, ks2, shard)
+ require.NoError(t, err)
vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort)
defer vtgateConn.Close()
verifyClusterHealth(t, vc)
- _, err := vtgateConn.ExecuteFetch(initDataQuery, 0, false)
+ _, err = vtgateConn.ExecuteFetch(initDataQuery, 0, false)
require.NoError(t, err)
materialize(t, smMaterializeSpec)
tab := vc.getPrimaryTablet(t, ks2, "0")
@@ -184,6 +188,7 @@ func testMaterialize(t *testing.T) {
vc = NewVitessCluster(t, "TestMaterialize", allCells, mainClusterConfig)
sourceKs := "source"
targetKs := "target"
+ shard := "0"
require.NotNil(t, vc)
defaultReplicas = 0 // because of CI resource constraints we can only run this test with primary tablets
defer func() { defaultReplicas = 1 }()
@@ -194,19 +199,21 @@ func testMaterialize(t *testing.T) {
vc.AddKeyspace(t, []*Cell{defaultCell}, sourceKs, "0", smMaterializeVSchemaSource, smMaterializeSchemaSource, defaultReplicas, defaultRdonly, 300, nil)
vtgate = defaultCell.Vtgates[0]
require.NotNil(t, vtgate)
- vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", sourceKs, "0"), 1)
+ err := cluster.WaitForHealthyShard(vc.VtctldClient, sourceKs, shard)
+ require.NoError(t, err)
vc.AddKeyspace(t, []*Cell{defaultCell}, targetKs, "0", smMaterializeVSchemaTarget, smMaterializeSchemaTarget, defaultReplicas, defaultRdonly, 400, nil)
- vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", targetKs, "0"), 1)
+ err = cluster.WaitForHealthyShard(vc.VtctldClient, targetKs, shard)
+ require.NoError(t, err)
vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort)
defer vtgateConn.Close()
verifyClusterHealth(t, vc)
- _, err := vtgateConn.ExecuteFetch(materializeInitDataQuery, 0, false)
+ _, err = vtgateConn.ExecuteFetch(materializeInitDataQuery, 0, false)
require.NoError(t, err)
- ks2Primary := vc.getPrimaryTablet(t, targetKs, "0")
+ ks2Primary := vc.getPrimaryTablet(t, targetKs, shard)
_, err = ks2Primary.QueryTablet(customFunc, targetKs, true)
require.NoError(t, err)
diff --git a/go/test/endtoend/vreplication/migrate_test.go b/go/test/endtoend/vreplication/migrate_test.go
index 18745aea4cd..0c83658cee8 100644
--- a/go/test/endtoend/vreplication/migrate_test.go
+++ b/go/test/endtoend/vreplication/migrate_test.go
@@ -23,6 +23,7 @@ import (
"github.com/stretchr/testify/require"
"vitess.io/vitess/go/mysql"
+ "vitess.io/vitess/go/test/endtoend/cluster"
)
func insertInitialDataIntoExternalCluster(t *testing.T, conn *mysql.Conn) {
@@ -55,9 +56,10 @@ func TestMigrate(t *testing.T) {
defaultCell = vc.Cells[defaultCellName]
vc.AddKeyspace(t, []*Cell{defaultCell}, "product", "0", initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, nil)
+ err := cluster.WaitForHealthyShard(vc.VtctldClient, "product", "0")
+ require.NoError(t, err)
vtgate = defaultCell.Vtgates[0]
require.NotNil(t, vtgate)
- vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", "product", "0"), 1)
vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort)
defer vtgateConn.Close()
@@ -76,12 +78,12 @@ func TestMigrate(t *testing.T) {
extVtgate := extCell2.Vtgates[0]
require.NotNil(t, extVtgate)
- extVtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", "rating", "0"), 1)
+ err = cluster.WaitForHealthyShard(extVc.VtctldClient, "rating", "0")
+ require.NoError(t, err)
verifyClusterHealth(t, extVc)
extVtgateConn := getConnection(t, extVc.ClusterConfig.hostname, extVc.ClusterConfig.vtgateMySQLPort)
insertInitialDataIntoExternalCluster(t, extVtgateConn)
- var err error
var output, expected string
ksWorkflow := "product.e1"
diff --git a/go/test/endtoend/vreplication/partial_movetables_test.go b/go/test/endtoend/vreplication/partial_movetables_test.go
new file mode 100644
index 00000000000..c130000e53a
--- /dev/null
+++ b/go/test/endtoend/vreplication/partial_movetables_test.go
@@ -0,0 +1,275 @@
+/*
+Copyright 2022 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package vreplication
+
+import (
+ "fmt"
+ "strings"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+ "github.com/tidwall/gjson"
+
+ "vitess.io/vitess/go/vt/log"
+ "vitess.io/vitess/go/vt/wrangler"
+)
+
+// TestPartialMoveTables tests partial move tables by moving each
+// customer shard -- -80,80- -- once a a time to customer2.
+func TestPartialMoveTables(t *testing.T) {
+ origDefaultRdonly := defaultRdonly
+ defer func() {
+ defaultRdonly = origDefaultRdonly
+ }()
+ defaultRdonly = 1
+ origExtraVTGateArgs := extraVTGateArgs
+ // We need to enable shard routing for partial movetables routing.
+ // And we need to disable schema change tracking in vtgate as we want
+ // to test query routing using a query we know will fail as it's
+ // using a column that doesn't exist in the schema -- this way we
+ // get the target shard details back in the error message. If schema
+ // tracking is enabled then vtgate will produce an error about the
+ // unknown symbol before attempting to route the query.
+ extraVTGateArgs = append(extraVTGateArgs, []string{
+ "--enable-partial-keyspace-migration",
+ "--schema_change_signal=false",
+ }...)
+ defer func() {
+ extraVTGateArgs = origExtraVTGateArgs
+ }()
+ vc = setupCluster(t)
+ defer vtgateConn.Close()
+ defer vc.TearDown(t)
+ setupCustomerKeyspace(t)
+
+ // Move customer table from unsharded product keyspace to
+ // sharded customer keyspace.
+ createMoveTablesWorkflow(t, "customer")
+ tstWorkflowSwitchReadsAndWrites(t)
+ tstWorkflowComplete(t)
+
+ emptyGlobalRoutingRules := "{}\n"
+
+ // These should be listed in shard order
+ emptyShardRoutingRules := `{"rules":[]}`
+ preCutoverShardRoutingRules := `{"rules":[{"from_keyspace":"customer2","to_keyspace":"customer","shard":"-80"},{"from_keyspace":"customer2","to_keyspace":"customer","shard":"80-"}]}`
+ halfCutoverShardRoutingRules := `{"rules":[{"from_keyspace":"customer2","to_keyspace":"customer","shard":"-80"},{"from_keyspace":"customer","to_keyspace":"customer2","shard":"80-"}]}`
+ postCutoverShardRoutingRules := `{"rules":[{"from_keyspace":"customer","to_keyspace":"customer2","shard":"-80"},{"from_keyspace":"customer","to_keyspace":"customer2","shard":"80-"}]}`
+
+ // Remove any manually applied shard routing rules as these
+ // should be set by SwitchTraffic.
+ applyShardRoutingRules(t, emptyShardRoutingRules)
+ require.Equal(t, emptyShardRoutingRules, getShardRoutingRules(t))
+
+ // Now setup the customer2 keyspace so we can do a partial
+ // move tables for one of the two shards: 80-.
+ defaultRdonly = 0
+ setupCustomer2Keyspace(t)
+ currentWorkflowType = wrangler.MoveTablesWorkflow
+ wfName := "partial80Dash"
+ sourceKs := "customer"
+ targetKs := "customer2"
+ shard := "80-"
+ ksWf := fmt.Sprintf("%s.%s", targetKs, wfName)
+
+ // start the partial movetables for 80-
+ err := tstWorkflowExec(t, defaultCellName, wfName, sourceKs, targetKs,
+ "customer", workflowActionCreate, "", shard, "")
+ require.NoError(t, err)
+ targetTab1 = vc.getPrimaryTablet(t, targetKs, shard)
+ catchup(t, targetTab1, wfName, "Partial MoveTables Customer to Customer2")
+ vdiff1(t, ksWf, "")
+
+ waitForRowCount(t, vtgateConn, "customer", "customer", 3) // customer: all shards
+ waitForRowCount(t, vtgateConn, "customer2", "customer", 3) // customer2: all shards
+ waitForRowCount(t, vtgateConn, "customer2:80-", "customer", 2) // customer2: 80-
+
+ confirmGlobalRoutingToSource := func() {
+ output, err := vc.VtctlClient.ExecuteCommandWithOutput("GetRoutingRules")
+ require.NoError(t, err)
+ result := gjson.Get(output, "rules")
+ result.ForEach(func(attributeKey, attributeValue gjson.Result) bool {
+ // 0 is the keyspace and 1 is optional tablename[@tablettype]
+ fromKsTbl := strings.Split(attributeValue.Get("fromTable").String(), ".")
+ // 0 is the keyspace and 1 is the tablename
+ toKsTbl := strings.Split(attributeValue.Get("toTables.0").String(), ".")
+ // All tables in the customer and customer2 keyspaces should be
+ // routed to the customer keyspace.
+ if fromKsTbl[0] == "customer" || fromKsTbl[0] == "customer2" {
+ require.Equal(t, "customer", toKsTbl[0])
+ }
+ return true
+ })
+ }
+
+ // This query uses an ID that should always get routed to shard 80-
+ shard80MinusRoutedQuery := "select name from customer where cid = 1 and noexistcol = 'foo'"
+ // This query uses an ID that should always get routed to shard -80
+ shardMinus80RoutedQuery := "select name from customer where cid = 2 and noexistcol = 'foo'"
+
+ // reset any existing vtgate connection state
+ vtgateConn.Close()
+ vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort)
+ defer vtgateConn.Close()
+
+ // Global routing rules should be in place with everything going to
+ // the source keyspace (customer).
+ confirmGlobalRoutingToSource()
+
+ // Shard routing rules should now also be in place with everything
+ // going to the source keyspace (customer).
+ require.Equal(t, preCutoverShardRoutingRules, getShardRoutingRules(t))
+
+ // Confirm shard targeting works before we switch any traffic.
+ // Everything should be routed to the source keyspace (customer).
+
+ log.Infof("Testing reverse route (target->source) for shard being switched")
+ _, err = vtgateConn.ExecuteFetch("use `customer2:80-`", 0, false)
+ require.NoError(t, err)
+ _, err = vtgateConn.ExecuteFetch(shard80MinusRoutedQuery, 0, false)
+ require.Error(t, err)
+ require.Contains(t, err.Error(), "target: customer.80-.primary", "Query was routed to the target before any SwitchTraffic")
+
+ log.Infof("Testing reverse route (target->source) for shard NOT being switched")
+ _, err = vtgateConn.ExecuteFetch("use `customer2:-80`", 0, false)
+ require.NoError(t, err)
+ _, err = vtgateConn.ExecuteFetch(shardMinus80RoutedQuery, 0, false)
+ require.Error(t, err)
+ require.Contains(t, err.Error(), "target: customer.-80.primary", "Query was routed to the target before any SwitchTraffic")
+
+ // Switch all traffic for the shard
+ require.NoError(t, tstWorkflowExec(t, "", wfName, "", targetKs, "", workflowActionSwitchTraffic, "", "", ""))
+ expectedSwitchOutput := fmt.Sprintf("SwitchTraffic was successful for workflow %s.%s\nStart State: Reads Not Switched. Writes Not Switched\nCurrent State: Reads partially switched, for shards: %s. Writes partially switched, for shards: %s\n\n",
+ targetKs, wfName, shard, shard)
+ require.Equal(t, expectedSwitchOutput, lastOutput)
+
+ // Confirm global routing rules -- everything should still be routed
+ // to the source side, customer, globally.
+ confirmGlobalRoutingToSource()
+
+ // Confirm shard routing rules -- all traffic for the 80- shard should be
+ // routed into the customer2 keyspace, overriding the global routing rules.
+ require.Equal(t, halfCutoverShardRoutingRules, getShardRoutingRules(t))
+
+ // reset any existing vtgate connection state
+ vtgateConn.Close()
+ vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort)
+ defer vtgateConn.Close()
+
+ // No shard targeting
+ _, err = vtgateConn.ExecuteFetch(shard80MinusRoutedQuery, 0, false)
+ require.Error(t, err)
+ require.Contains(t, err.Error(), "target: customer2.80-.primary", "Query was routed to the source after partial SwitchTraffic")
+ _, err = vtgateConn.ExecuteFetch(shardMinus80RoutedQuery, 0, false)
+ require.Error(t, err)
+ require.Contains(t, err.Error(), "target: customer.-80.primary", "Query was routed to the target before partial SwitchTraffic")
+
+ // Shard targeting
+ _, err = vtgateConn.ExecuteFetch("use `customer2:80-`", 0, false)
+ require.NoError(t, err)
+ _, err = vtgateConn.ExecuteFetch(shard80MinusRoutedQuery, 0, false)
+ require.Error(t, err)
+ require.Contains(t, err.Error(), "target: customer2.80-.primary", "Query was routed to the source after partial SwitchTraffic")
+ _, err = vtgateConn.ExecuteFetch("use `customer:80-`", 0, false)
+ require.NoError(t, err)
+ _, err = vtgateConn.ExecuteFetch(shard80MinusRoutedQuery, 0, false)
+ require.Error(t, err)
+ require.Contains(t, err.Error(), "target: customer2.80-.primary", "Query was routed to the source after partial SwitchTraffic")
+
+ // Tablet type targeting
+ _, err = vtgateConn.ExecuteFetch("use `customer2@replica`", 0, false)
+ require.NoError(t, err)
+ _, err = vtgateConn.ExecuteFetch(shard80MinusRoutedQuery, 0, false)
+ require.Error(t, err)
+ require.Contains(t, err.Error(), "target: customer2.80-.replica", "Query was routed to the source after partial SwitchTraffic")
+ _, err = vtgateConn.ExecuteFetch(shardMinus80RoutedQuery, 0, false)
+ require.Error(t, err)
+ require.Contains(t, err.Error(), "target: customer.-80.replica", "Query was routed to the target before partial SwitchTraffic")
+ _, err = vtgateConn.ExecuteFetch("use `customer@replica`", 0, false)
+ require.NoError(t, err)
+ _, err = vtgateConn.ExecuteFetch(shard80MinusRoutedQuery, 0, false)
+ require.Error(t, err)
+ require.Contains(t, err.Error(), "target: customer2.80-.replica", "Query was routed to the source after partial SwitchTraffic")
+ _, err = vtgateConn.ExecuteFetch(shardMinus80RoutedQuery, 0, false)
+ require.Error(t, err)
+ require.Contains(t, err.Error(), "target: customer.-80.replica", "Query was routed to the target before partial SwitchTraffic")
+
+ // We cannot Complete a partial move tables at the moment because
+ // it will find that all traffic has (obviously) not been switched.
+ err = tstWorkflowExec(t, "", wfName, "", targetKs, "", workflowActionComplete, "", "", "")
+ require.Error(t, err)
+
+ // Confirm global routing rules: -80 should still be be routed to customer
+ // while 80- should be routed to customer2.
+ require.Equal(t, halfCutoverShardRoutingRules, getShardRoutingRules(t))
+
+ // Now move the other shard: -80
+ wfName = "partialDash80"
+ shard = "-80"
+ ksWf = fmt.Sprintf("%s.%s", targetKs, wfName)
+
+ // Start the partial movetables for -80, 80- has already been switched
+ err = tstWorkflowExec(t, defaultCellName, wfName, sourceKs, targetKs,
+ "customer", workflowActionCreate, "", shard, "")
+ require.NoError(t, err)
+ targetTab2 := vc.getPrimaryTablet(t, targetKs, shard)
+ catchup(t, targetTab2, wfName, "Partial MoveTables Customer to Customer2: -80")
+ vdiff1(t, ksWf, "")
+ // Switch all traffic for the shard
+ require.NoError(t, tstWorkflowExec(t, "", wfName, "", targetKs, "", workflowActionSwitchTraffic, "", "", ""))
+ expectedSwitchOutput = fmt.Sprintf("SwitchTraffic was successful for workflow %s.%s\nStart State: Reads partially switched, for shards: 80-. Writes partially switched, for shards: 80-\nCurrent State: All Reads Switched. All Writes Switched\n\n",
+ targetKs, wfName)
+ require.Equal(t, expectedSwitchOutput, lastOutput)
+
+ // Confirm global routing rules: everything should still be routed
+ // to the source side, customer, globally.
+ confirmGlobalRoutingToSource()
+
+ // Confirm shard routing rules: all shards should be routed to the
+ // target side (customer2).
+ require.Equal(t, postCutoverShardRoutingRules, getShardRoutingRules(t))
+
+ // Cancel both reverse workflows (as we've done the cutover), which should
+ // clean up both the global routing rules and the shard routing rules.
+ for _, wf := range []string{"partialDash80", "partial80Dash"} {
+ // We switched traffic, so it's the reverse workflow we want to cancel.
+ reverseWf := wf + "_reverse"
+ reverseKs := sourceKs // customer
+ err = tstWorkflowExec(t, "", reverseWf, "", reverseKs, "", workflowActionCancel, "", "", "")
+ require.NoError(t, err)
+
+ output, err := vc.VtctlClient.ExecuteCommandWithOutput("Workflow", fmt.Sprintf("%s.%s", reverseKs, reverseWf), "show")
+ require.Error(t, err)
+ require.Contains(t, output, "no streams found")
+
+ // Delete the original workflow
+ originalKsWf := fmt.Sprintf("%s.%s", targetKs, wf)
+ _, err = vc.VtctlClient.ExecuteCommandWithOutput("Workflow", originalKsWf, "delete")
+ require.NoError(t, err)
+ output, err = vc.VtctlClient.ExecuteCommandWithOutput("Workflow", originalKsWf, "show")
+ require.Error(t, err)
+ require.Contains(t, output, "no streams found")
+ }
+
+ // Confirm that the global routing rules are now gone.
+ output, err := vc.VtctlClient.ExecuteCommandWithOutput("GetRoutingRules")
+ require.NoError(t, err)
+ require.Equal(t, emptyGlobalRoutingRules, output)
+
+ // Confirm that the shard routing rules are now gone.
+ require.Equal(t, emptyShardRoutingRules, getShardRoutingRules(t))
+}
diff --git a/go/test/endtoend/vreplication/performance_test.go b/go/test/endtoend/vreplication/performance_test.go
index 14d5d6c867c..ce47e027f2d 100644
--- a/go/test/endtoend/vreplication/performance_test.go
+++ b/go/test/endtoend/vreplication/performance_test.go
@@ -63,7 +63,8 @@ create table customer(cid int, name varbinary(128), meta json default null, typ
vtgate = defaultCell.Vtgates[0]
require.NotNil(t, vtgate)
- vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", "product", "0"), 1)
+ err := cluster.WaitForHealthyShard(vc.VtctldClient, "product", "0")
+ require.NoError(t, err)
vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort)
defer vtgateConn.Close()
@@ -96,7 +97,7 @@ create table customer(cid int, name varbinary(128), meta json default null, typ
}
}
- moveTables(t, defaultCell.Name, "stress_workflow", sourceKs, targetKs, "largebin")
+ moveTablesAction(t, "Create", defaultCell.Name, "stress_workflow", sourceKs, targetKs, "largebin")
keyspaceTgt := defaultCell.Keyspaces[targetKs]
for _, shard := range keyspaceTgt.Shards {
diff --git a/go/test/endtoend/vreplication/resharding_workflows_v2_test.go b/go/test/endtoend/vreplication/resharding_workflows_v2_test.go
index 7b085a9321b..bfe0404f5ab 100644
--- a/go/test/endtoend/vreplication/resharding_workflows_v2_test.go
+++ b/go/test/endtoend/vreplication/resharding_workflows_v2_test.go
@@ -19,14 +19,13 @@ package vreplication
import (
"fmt"
"net"
- "regexp"
"strconv"
"strings"
"testing"
+ "time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
- "github.com/tidwall/gjson"
"vitess.io/vitess/go/test/endtoend/cluster"
"vitess.io/vitess/go/vt/log"
@@ -65,6 +64,7 @@ func createReshardWorkflow(t *testing.T, sourceShards, targetShards string) erro
"", workflowActionCreate, "", sourceShards, targetShards)
require.NoError(t, err)
waitForWorkflowState(t, vc, ksWorkflow, workflowStateRunning)
+ confirmTablesHaveSecondaryKeys(t, []*cluster.VttabletProcess{targetTab1}, targetKs, "")
catchup(t, targetTab1, workflowName, "Reshard")
catchup(t, targetTab2, workflowName, "Reshard")
vdiff1(t, ksWorkflow, "")
@@ -79,6 +79,7 @@ func createMoveTablesWorkflow(t *testing.T, tables string) {
tables, workflowActionCreate, "", "", "")
require.NoError(t, err)
waitForWorkflowState(t, vc, ksWorkflow, workflowStateRunning)
+ confirmTablesHaveSecondaryKeys(t, []*cluster.VttabletProcess{targetTab1}, targetKs, tables)
catchup(t, targetTab1, workflowName, "MoveTables")
catchup(t, targetTab2, workflowName, "MoveTables")
vdiff1(t, ksWorkflow, "")
@@ -112,6 +113,11 @@ func tstWorkflowExec(t *testing.T, cells, workflow, sourceKs, targetKs, tables,
} else {
args = append(args, "--source_shards", sourceShards, "--target_shards", targetShards)
}
+ // Test new experimental --defer-secondary-keys flag
+ switch currentWorkflowType {
+ case wrangler.MoveTablesWorkflow, wrangler.MigrateWorkflow, wrangler.ReshardWorkflow:
+ args = append(args, "--defer-secondary-keys")
+ }
}
if cells != "" {
args = append(args, "--cells", cells)
@@ -151,13 +157,13 @@ func tstWorkflowReverseWrites(t *testing.T) {
require.NoError(t, tstWorkflowAction(t, workflowActionReverseTraffic, "primary", ""))
}
-// tstWorkflowSwitchReadsAndWrites tests that SwitchWrites w/o any user provided --tablet_types
+// tstWorkflowSwitchReadsAndWrites tests that switching traffic w/o any user provided --tablet_types
// value switches all traffic
func tstWorkflowSwitchReadsAndWrites(t *testing.T) {
require.NoError(t, tstWorkflowAction(t, workflowActionSwitchTraffic, "", ""))
}
-// tstWorkflowReversesReadsAndWrites tests that SwitchWrites w/o any user provided --tablet_types
+// tstWorkflowReversesReadsAndWrites tests that ReverseTraffic w/o any user provided --tablet_types
// value switches all traffic in reverse
func tstWorkflowReverseReadsAndWrites(t *testing.T) {
require.NoError(t, tstWorkflowAction(t, workflowActionReverseTraffic, "", ""))
@@ -207,25 +213,15 @@ func validateWritesRouteToTarget(t *testing.T) {
execVtgateQuery(t, vtgateConn, "customer", "delete from customer where cid > 100")
}
-func revert(t *testing.T) {
- switchWrites(t, reverseKsWorkflow, false)
+func revert(t *testing.T, workflowType string) {
+ switchWrites(t, workflowType, ksWorkflow, true)
validateWritesRouteToSource(t)
- switchReadsNew(t, allCellNames, ksWorkflow, true)
+ switchReadsNew(t, workflowType, allCellNames, ksWorkflow, true)
validateReadsRouteToSource(t, "replica")
- queries := []string{
- "delete from _vt.vreplication",
- "delete from _vt.resharding_journal",
- }
- for _, query := range queries {
- targetTab1.QueryTablet(query, "customer", true)
- targetTab2.QueryTablet(query, "customer", true)
- sourceTab.QueryTablet(query, "product", true)
- }
- targetTab1.QueryTablet("drop table vt_customer.customer", "customer", true)
- targetTab2.QueryTablet("drop table vt_customer.customer", "customer", true)
-
- clearRoutingRules(t, vc)
+ // cancel the workflow to cleanup
+ _, err := vc.VtctlClient.ExecuteCommandWithOutput(workflowType, "--", "Cancel", ksWorkflow)
+ require.NoError(t, err, fmt.Sprintf("%s Cancel error: %v", workflowType, err))
}
func checkStates(t *testing.T, startState, endState string) {
@@ -245,7 +241,14 @@ func getCurrentState(t *testing.T) string {
func TestBasicV2Workflows(t *testing.T) {
defaultRdonly = 1
- defer func() { defaultRdonly = 0 }()
+ extraVTTabletArgs = []string{
+ parallelInsertWorkers,
+ }
+ defer func() {
+ defaultRdonly = 0
+ extraVTTabletArgs = []string{}
+ }()
+
vc = setupCluster(t)
defer vtgateConn.Close()
defer vc.TearDown(t)
@@ -259,152 +262,6 @@ func TestBasicV2Workflows(t *testing.T) {
log.Flush()
}
-// TestPartialMoveTables tests partial move tables by moving just one shard
-// 80- from customer to customer2.
-func TestPartialMoveTables(t *testing.T) {
- defaultRdonly = 1
- origExtraVTGateArgs := extraVTGateArgs
- // We need to enable shard routing for partial movetables routing.
- // And we need to disable schema change tracking in vtgate as we want
- // to test query routing using a query we know will fail as it's
- // using a column that doesn't exist in the schema -- this way we
- // get the target shard details back in the error message. If schema
- // tracking is enabled then vtgate will produce an error about the
- // unknown symbol before attempting to route the query.
- extraVTGateArgs = append(extraVTGateArgs, []string{
- "--enable-partial-keyspace-migration",
- "--schema_change_signal=false",
- }...)
- defer func() {
- extraVTGateArgs = origExtraVTGateArgs
- }()
- vc = setupCluster(t)
- defer vtgateConn.Close()
- defer vc.TearDown(t)
- setupCustomerKeyspace(t)
-
- // Move customer table from unsharded product keyspace to
- // sharded customer keyspace.
- createMoveTablesWorkflow(t, "customer")
- tstWorkflowSwitchReadsAndWrites(t)
- tstWorkflowComplete(t)
-
- // Now setup the customer2 keyspace so we can do a partial
- // move tables for one of the two shards: 80-.
- defaultRdonly = 0
- setupCustomer2Keyspace(t)
- currentWorkflowType = wrangler.MoveTablesWorkflow
- wfName := "partial"
- moveToKs := "customer2"
- shard := "80-"
- ksWf := fmt.Sprintf("%s.%s", moveToKs, wfName)
- err := tstWorkflowExec(t, defaultCellName, wfName, targetKs, moveToKs,
- "customer", workflowActionCreate, "", shard, "")
- require.NoError(t, err)
- targetTab1 = vc.getPrimaryTablet(t, moveToKs, shard)
- catchup(t, targetTab1, wfName, "Partial MoveTables Customer to Customer2")
- vdiff1(t, ksWf, "")
-
- waitForRowCount(t, vtgateConn, "customer", "customer", 3) // customer: all shards
- waitForRowCount(t, vtgateConn, "customer2", "customer", 3) // customer: all shards
- waitForRowCount(t, vtgateConn, "customer2:80-", "customer", 2) // customer2: 80-
-
- // Remove any manually applied shard routing rules as these
- // should be set by SwitchTraffic.
- emptyRules := `{"rules":[]}`
- applyShardRoutingRules(t, emptyRules)
- require.Equal(t, emptyRules, getShardRoutingRules(t))
-
- // switch all traffic
- require.NoError(t, tstWorkflowExec(t, "", wfName, "", moveToKs, "", workflowActionSwitchTraffic, "", "", ""))
- expectedSwitchOutput := fmt.Sprintf("SwitchTraffic was successful for workflow customer2.partial\nStart State: Reads Not Switched. Writes Not Switched\nCurrent State: Reads partially switched, for shards: %s. Writes partially switched, for shards: %s\n\n",
- shard, shard)
- require.Equal(t, expectedSwitchOutput, lastOutput)
-
- // Confirm global routing rules -- everything should still be routed
- // to the source side, customer, globally.
- output, err := vc.VtctlClient.ExecuteCommandWithOutput("GetRoutingRules")
- require.NoError(t, err)
- result := gjson.Get(output, "rules")
- result.ForEach(func(attributeKey, attributeValue gjson.Result) bool {
- // 0 is the keyspace and 1 is optional tablename[@tablettype]
- fromKsTbl := strings.Split(attributeValue.Get("fromTable").String(), ".")
- // 0 is the keyspace and 1 is the tablename
- toKsTbl := strings.Split(attributeValue.Get("toTables.0").String(), ".")
- // All tables in the customer and customer2 keyspaces should be
- // routed to the customer keyspace.
- if fromKsTbl[0] == "customer" || fromKsTbl[0] == "customer2" {
- require.Equal(t, "customer", toKsTbl[0])
- }
- return true
- })
- // Confirm shard routing rules -- all traffic for the 80- shard should be
- // routed into the customer2 keyspace, overriding the global routing rules.
- expectedShardRoutingRules := `{"rules":[{"from_keyspace":"customer","to_keyspace":"customer2","shard":"80-"}]}`
- require.Equal(t, expectedShardRoutingRules, getShardRoutingRules(t))
-
- // This query uses an ID that should always get routed to customer2:80-
- targetRoutedQuery := "select name from customer where cid = 1 and noexistcol = 'foo'"
- // This query uses an ID that should always get routed to customer:-80
- sourceRoutedQuery := "select name from customer where cid = 2 and noexistcol = 'foo'"
-
- // reset any existing vtgate connection state
- vtgateConn.Close()
- vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort)
- defer vtgateConn.Close()
-
- // No shard targeting
- _, err = vtgateConn.ExecuteFetch(targetRoutedQuery, 0, false)
- require.Error(t, err)
- require.Contains(t, err.Error(), "target: customer2.80-.primary")
- _, err = vtgateConn.ExecuteFetch(sourceRoutedQuery, 0, false)
- require.Error(t, err)
- require.Contains(t, err.Error(), "target: customer.-80.primary")
-
- // Shard targeting
- _, err = vtgateConn.ExecuteFetch("use `customer2:80-`", 0, false)
- require.NoError(t, err)
- _, err = vtgateConn.ExecuteFetch(targetRoutedQuery, 0, false)
- require.Error(t, err)
- require.Contains(t, err.Error(), "target: customer2.80-.primary")
- _, err = vtgateConn.ExecuteFetch("use `customer:80-`", 0, false)
- require.NoError(t, err)
- _, err = vtgateConn.ExecuteFetch(targetRoutedQuery, 0, false)
- require.Error(t, err)
- require.Contains(t, err.Error(), "target: customer2.80-.primary")
-
- // Tablet type targeting
- _, err = vtgateConn.ExecuteFetch("use `customer2@replica`", 0, false)
- require.NoError(t, err)
- _, err = vtgateConn.ExecuteFetch(targetRoutedQuery, 0, false)
- require.Error(t, err)
- require.Contains(t, err.Error(), "target: customer2.80-.replica")
- _, err = vtgateConn.ExecuteFetch(sourceRoutedQuery, 0, false)
- require.Error(t, err)
- require.Contains(t, err.Error(), "target: customer.-80.replica")
- _, err = vtgateConn.ExecuteFetch("use `customer@replica`", 0, false)
- require.NoError(t, err)
- _, err = vtgateConn.ExecuteFetch(targetRoutedQuery, 0, false)
- require.Error(t, err)
- require.Contains(t, err.Error(), "target: customer2.80-.replica")
- _, err = vtgateConn.ExecuteFetch(sourceRoutedQuery, 0, false)
- require.Error(t, err)
- require.Contains(t, err.Error(), "target: customer.-80.replica")
-
- // We cannot Complete a partial move tables at the moment because it will
- // find that all traffic has (obviously) not been switched we need to
- // cleanup using Workflow delete.
- err = tstWorkflowExec(t, "", wfName, "", moveToKs, "", workflowActionComplete, "", "", "")
- require.Error(t, err)
- require.Equal(t, expectedShardRoutingRules, getShardRoutingRules(t))
- _, err = vc.VtctlClient.ExecuteCommandWithOutput("Workflow", ksWf, "delete")
- require.NoError(t, err)
- output, err = vc.VtctlClient.ExecuteCommandWithOutput("Workflow", ksWf, "show")
- require.Error(t, err)
- require.Contains(t, output, "no streams found")
-
-}
-
func getVtctldGRPCURL() string {
return net.JoinHostPort("localhost", strconv.Itoa(vc.Vtctld.GrpcPort))
}
@@ -416,17 +273,6 @@ func applyShardRoutingRules(t *testing.T, rules string) {
require.NotNil(t, output)
}
-func getShardRoutingRules(t *testing.T) string {
- output, err := osExec(t, "vtctldclient", []string{"--server", getVtctldGRPCURL(), "GetShardRoutingRules"})
- log.Infof("GetShardRoutingRules err: %+v, output: %+v", err, output)
- require.Nilf(t, err, output)
- require.NotNil(t, output)
- re := regexp.MustCompile(`[\n\s]+`)
- output = re.ReplaceAllString(output, "")
- output = strings.TrimSpace(output)
- return output
-}
-
/*
testVSchemaForSequenceAfterMoveTables checks that the related sequence tag is migrated correctly in the vschema
while moving a table with an auto-increment from sharded to unsharded.
@@ -589,7 +435,7 @@ func testMoveTablesV2Workflow(t *testing.T) {
}
func testPartialSwitches(t *testing.T) {
- //nothing switched
+ // nothing switched
require.Equal(t, getCurrentState(t), wrangler.WorkflowStateNotSwitched)
tstWorkflowSwitchReads(t, "replica,rdonly", "zone1")
nextState := "Reads partially switched. Replica switched in cells: zone1. Rdonly switched in cells: zone1. Writes Not Switched"
@@ -601,7 +447,7 @@ func testPartialSwitches(t *testing.T) {
checkStates(t, currentState, nextState)
tstWorkflowSwitchReads(t, "", "")
- checkStates(t, nextState, nextState) //idempotency
+ checkStates(t, nextState, nextState) // idempotency
tstWorkflowSwitchWrites(t)
currentState = nextState
@@ -609,7 +455,7 @@ func testPartialSwitches(t *testing.T) {
checkStates(t, currentState, nextState)
tstWorkflowSwitchWrites(t)
- checkStates(t, nextState, nextState) //idempotency
+ checkStates(t, nextState, nextState) // idempotency
keyspace := "product"
if currentWorkflowType == wrangler.ReshardWorkflow {
@@ -725,9 +571,10 @@ func setupCluster(t *testing.T) *VitessCluster {
vtgate = zone1.Vtgates[0]
require.NotNil(t, vtgate)
- vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", "product", "0"), 1)
- vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", "product", "0"), 2)
- vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.rdonly", "product", "0"), 1)
+ err := cluster.WaitForHealthyShard(vc.VtctldClient, "product", "0")
+ require.NoError(t, err)
+ require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", "product", "0"), 2, 30*time.Second))
+ require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.rdonly", "product", "0"), 1, 30*time.Second))
vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort)
verifyClusterHealth(t, vc)
@@ -745,24 +592,12 @@ func setupCustomerKeyspace(t *testing.T) {
customerVSchema, customerSchema, defaultReplicas, defaultRdonly, 200, nil); err != nil {
t.Fatal(err)
}
- if err := vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", "customer", "-80"), 1); err != nil {
- t.Fatal(err)
- }
- if err := vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", "customer", "80-"), 1); err != nil {
- t.Fatal(err)
- }
- if err := vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", "customer", "-80"), 2); err != nil {
- t.Fatal(err)
- }
- if err := vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", "customer", "80-"), 2); err != nil {
- t.Fatal(err)
- }
- if err := vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.rdonly", "customer", "-80"), 1); err != nil {
- t.Fatal(err)
- }
- if err := vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.rdonly", "customer", "80-"), 1); err != nil {
- t.Fatal(err)
- }
+ require.NoError(t, cluster.WaitForHealthyShard(vc.VtctldClient, "customer", "-80"))
+ require.NoError(t, cluster.WaitForHealthyShard(vc.VtctldClient, "customer", "80-"))
+ require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", "customer", "-80"), 2, 30*time.Second))
+ require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", "customer", "80-"), 2, 30*time.Second))
+ require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.rdonly", "customer", "-80"), 1, 30*time.Second))
+ require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.rdonly", "customer", "80-"), 1, 30*time.Second))
custKs := vc.Cells[defaultCell.Name].Keyspaces["customer"]
targetTab1 = custKs.Shards["-80"].Tablets["zone1-200"].Vttablet
targetTab2 = custKs.Shards["80-"].Tablets["zone1-300"].Vttablet
@@ -778,18 +613,13 @@ func setupCustomer2Keyspace(t *testing.T) {
t.Fatal(err)
}
for _, c2shard := range c2shards {
- if err := vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", c2keyspace, c2shard), 1); err != nil {
- t.Fatal(err)
- }
+ err := cluster.WaitForHealthyShard(vc.VtctldClient, c2keyspace, c2shard)
+ require.NoError(t, err)
if defaultReplicas > 0 {
- if err := vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", c2keyspace, c2shard), defaultReplicas); err != nil {
- t.Fatal(err)
- }
+ require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", c2keyspace, c2shard), defaultReplicas, 30*time.Second))
}
if defaultRdonly > 0 {
- if err := vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.rdonly", c2keyspace, c2shard), defaultRdonly); err != nil {
- t.Fatal(err)
- }
+ require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.rdonly", c2keyspace, c2shard), defaultRdonly, 30*time.Second))
}
}
}
@@ -800,9 +630,13 @@ func TestSwitchReadsWritesInAnyOrder(t *testing.T) {
moveCustomerTableSwitchFlows(t, []*Cell{vc.Cells["zone1"]}, "zone1")
}
-func switchReadsNew(t *testing.T, cells, ksWorkflow string, reverse bool) {
- output, err := vc.VtctlClient.ExecuteCommandWithOutput("SwitchReads", "--", "--cells="+cells,
- "--tablet_types=rdonly,replica", fmt.Sprintf("--reverse=%t", reverse), ksWorkflow)
+func switchReadsNew(t *testing.T, workflowType, cells, ksWorkflow string, reverse bool) {
+ command := "SwitchTraffic"
+ if reverse {
+ command = "ReverseTraffic"
+ }
+ output, err := vc.VtctlClient.ExecuteCommandWithOutput(workflowType, "--", "--cells="+cells,
+ "--tablet_types=rdonly,replica", command, ksWorkflow)
require.NoError(t, err, fmt.Sprintf("SwitchReads Error: %s: %s", err, output))
if output != "" {
fmt.Printf("SwitchReads output: %s\n", output)
@@ -816,11 +650,12 @@ func moveCustomerTableSwitchFlows(t *testing.T, cells []*Cell, sourceCellOrAlias
ksWorkflow := fmt.Sprintf("%s.%s", targetKs, workflow)
tables := "customer"
setupCustomerKeyspace(t)
+ workflowType := "MoveTables"
var moveTablesAndWait = func() {
- moveTables(t, sourceCellOrAlias, workflow, sourceKs, targetKs, tables)
- catchup(t, targetTab1, workflow, "MoveTables")
- catchup(t, targetTab2, workflow, "MoveTables")
+ moveTablesAction(t, "Create", sourceCellOrAlias, workflow, sourceKs, targetKs, tables)
+ catchup(t, targetTab1, workflow, workflowType)
+ catchup(t, targetTab2, workflow, workflowType)
vdiff1(t, ksWorkflow, "")
}
@@ -828,71 +663,71 @@ func moveCustomerTableSwitchFlows(t *testing.T, cells []*Cell, sourceCellOrAlias
moveTablesAndWait()
validateReadsRouteToSource(t, "replica")
- switchReadsNew(t, allCellNames, ksWorkflow, false)
+ switchReadsNew(t, workflowType, allCellNames, ksWorkflow, false)
validateReadsRouteToTarget(t, "replica")
validateWritesRouteToSource(t)
- switchWrites(t, ksWorkflow, false)
+ switchWrites(t, workflowType, ksWorkflow, false)
validateWritesRouteToTarget(t)
- revert(t)
+ revert(t, workflowType)
}
var switchWritesFollowedBySwitchReads = func() {
moveTablesAndWait()
validateWritesRouteToSource(t)
- switchWrites(t, ksWorkflow, false)
+ switchWrites(t, workflowType, ksWorkflow, false)
validateWritesRouteToTarget(t)
validateReadsRouteToSource(t, "replica")
- switchReadsNew(t, allCellNames, ksWorkflow, false)
+ switchReadsNew(t, workflowType, allCellNames, ksWorkflow, false)
validateReadsRouteToTarget(t, "replica")
- revert(t)
+ revert(t, workflowType)
}
var switchReadsReverseSwitchWritesSwitchReads = func() {
moveTablesAndWait()
validateReadsRouteToSource(t, "replica")
- switchReadsNew(t, allCellNames, ksWorkflow, false)
+ switchReadsNew(t, workflowType, allCellNames, ksWorkflow, false)
validateReadsRouteToTarget(t, "replica")
- switchReadsNew(t, allCellNames, ksWorkflow, true)
+ switchReadsNew(t, workflowType, allCellNames, ksWorkflow, true)
validateReadsRouteToSource(t, "replica")
- printRoutingRules(t, vc, "After reversing SwitchReads")
+ printRoutingRules(t, vc, "After reversing read traffic")
validateWritesRouteToSource(t)
- switchWrites(t, ksWorkflow, false)
+ switchWrites(t, workflowType, ksWorkflow, false)
validateWritesRouteToTarget(t)
- printRoutingRules(t, vc, "After SwitchWrites and reversing SwitchReads")
+ printRoutingRules(t, vc, "After switching writes and reversing reads")
validateReadsRouteToSource(t, "replica")
- switchReadsNew(t, allCellNames, ksWorkflow, false)
+ switchReadsNew(t, workflowType, allCellNames, ksWorkflow, false)
validateReadsRouteToTarget(t, "replica")
- revert(t)
+ revert(t, workflowType)
}
var switchWritesReverseSwitchReadsSwitchWrites = func() {
moveTablesAndWait()
validateWritesRouteToSource(t)
- switchWrites(t, ksWorkflow, false)
+ switchWrites(t, workflowType, ksWorkflow, false)
validateWritesRouteToTarget(t)
- switchWrites(t, reverseKsWorkflow, true)
+ switchWrites(t, workflowType, reverseKsWorkflow, true)
validateWritesRouteToSource(t)
validateReadsRouteToSource(t, "replica")
- switchReadsNew(t, allCellNames, ksWorkflow, false)
+ switchReadsNew(t, workflowType, allCellNames, ksWorkflow, false)
validateReadsRouteToTarget(t, "replica")
validateWritesRouteToSource(t)
- switchWrites(t, ksWorkflow, false)
+ switchWrites(t, workflowType, ksWorkflow, false)
validateWritesRouteToTarget(t)
- revert(t)
+ revert(t, workflowType)
}
switchReadsFollowedBySwitchWrites()
@@ -908,15 +743,10 @@ func createAdditionalCustomerShards(t *testing.T, shards string) {
arrTargetShardNames := strings.Split(shards, ",")
for _, shardName := range arrTargetShardNames {
- if err := vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", ksName, shardName), 1); err != nil {
- require.NoError(t, err)
- }
- if err := vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", ksName, shardName), 2); err != nil {
- require.NoError(t, err)
- }
- if err := vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.rdonly", ksName, shardName), 1); err != nil {
- require.NoError(t, err)
- }
+ err := cluster.WaitForHealthyShard(vc.VtctldClient, ksName, shardName)
+ require.NoError(t, err)
+ require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", ksName, shardName), 2, 30*time.Second))
+ require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.rdonly", ksName, shardName), 1, 30*time.Second))
}
custKs := vc.Cells[defaultCell.Name].Keyspaces[ksName]
targetTab2 = custKs.Shards["80-c0"].Tablets["zone1-600"].Vttablet
diff --git a/go/test/endtoend/vreplication/sidecardb_test.go b/go/test/endtoend/vreplication/sidecardb_test.go
new file mode 100644
index 00000000000..56ca2d08acd
--- /dev/null
+++ b/go/test/endtoend/vreplication/sidecardb_test.go
@@ -0,0 +1,140 @@
+package vreplication
+
+import (
+ "fmt"
+ "strconv"
+ "testing"
+
+ "vitess.io/vitess/go/vt/sidecardb"
+
+ "github.com/stretchr/testify/require"
+ "github.com/tidwall/gjson"
+)
+
+const GetCurrentTablesQuery = "show tables from _vt"
+
+func getSidecarDBTables(t *testing.T, tabletID string) (numTablets int, tables []string) {
+ output, err := vc.VtctlClient.ExecuteCommandWithOutput("ExecuteFetchAsDba", "--", "--json", tabletID, GetCurrentTablesQuery)
+ require.NoError(t, err)
+ result := gjson.Get(output, "rows")
+ require.NotNil(t, result)
+ require.True(t, result.IsArray())
+ rows := result.Array()
+ numTablets = len(rows)
+ for _, row := range rows {
+ require.True(t, row.IsArray())
+ rows2 := row.Array()
+ require.NotNil(t, rows2)
+ require.Equal(t, len(rows2), 1)
+ table := rows2[0].String()
+ tables = append(tables, table)
+ }
+ return numTablets, tables
+}
+
+var sidecarDBTables []string
+var numSidecarDBTables int
+var ddls1, ddls2 []string
+
+func init() {
+ sidecarDBTables = []string{"copy_state", "dt_participant", "dt_state", "heartbeat", "post_copy_action", "redo_state",
+ "redo_statement", "reparent_journal", "resharding_journal", "schema_migrations", "schema_version", "schemacopy",
+ "vdiff", "vdiff_log", "vdiff_table", "views", "vreplication", "vreplication_log"}
+ numSidecarDBTables = len(sidecarDBTables)
+ ddls1 = []string{
+ "drop table _vt.vreplication_log",
+ "alter table _vt.vreplication drop column defer_secondary_keys",
+ }
+ ddls2 = []string{
+ "alter table _vt.vreplication modify column defer_secondary_keys boolean default false",
+ }
+}
+
+func prs(t *testing.T, keyspace, shard string) {
+ _, err := vc.VtctldClient.ExecuteCommandWithOutput("PlannedReparentShard", "--", fmt.Sprintf("%s/%s", keyspace, shard))
+ require.NoError(t, err)
+}
+
+// TestSidecarDB launches a Vitess cluster and ensures that the expected sidecar tables are created. We also drop/alter
+// tables and ensure the next tablet init will recreate the sidecar database to the desired schema.
+func TestSidecarDB(t *testing.T) {
+ cells := []string{"zone1"}
+
+ vc = NewVitessCluster(t, "TestSidecarDB", cells, mainClusterConfig)
+ require.NotNil(t, vc)
+ allCellNames = "zone1"
+ defaultCellName := "zone1"
+ defaultCell = vc.Cells[defaultCellName]
+
+ defer vc.TearDown(t)
+
+ keyspace := "product"
+ shard := "0"
+
+ cell1 := vc.Cells[defaultCellName]
+ tablet100 := fmt.Sprintf("%s-100", defaultCellName)
+ tablet101 := fmt.Sprintf("%s-101", defaultCellName)
+ vc.AddKeyspace(t, []*Cell{cell1}, keyspace, shard, initialProductVSchema, initialProductSchema, 1, 0, 100, sourceKsOpts)
+ shard0 := vc.Cells[defaultCellName].Keyspaces[keyspace].Shards[shard]
+ tablet100Port := shard0.Tablets[tablet100].Vttablet.Port
+ tablet101Port := shard0.Tablets[tablet101].Vttablet.Port
+ currentPrimary := tablet100
+
+ var expectedChanges100, expectedChanges101 int
+
+ t.Run("validate sidecar on startup", func(t *testing.T) {
+ expectedChanges100 = len(sidecarDBTables)
+ expectedChanges101 = 0
+ validateSidecarDBTables(t, tablet100, sidecarDBTables)
+ validateSidecarDBTables(t, tablet101, sidecarDBTables)
+ require.Equal(t, expectedChanges100, getNumExecutedDDLQueries(t, tablet100Port))
+ require.Equal(t, expectedChanges101, getNumExecutedDDLQueries(t, tablet101Port))
+ })
+
+ t.Run("modify schema, prs, and self heal on primary", func(t *testing.T) {
+ numChanges := modifySidecarDBSchema(t, vc, currentPrimary, ddls1)
+ validateSidecarDBTables(t, tablet100, sidecarDBTables[0:numSidecarDBTables-1])
+ validateSidecarDBTables(t, tablet101, sidecarDBTables[0:numSidecarDBTables-1])
+
+ prs(t, keyspace, shard)
+ currentPrimary = tablet101
+ expectedChanges100 += numChanges
+ validateSidecarDBTables(t, tablet100, sidecarDBTables)
+ validateSidecarDBTables(t, tablet101, sidecarDBTables)
+ require.Equal(t, expectedChanges100, getNumExecutedDDLQueries(t, tablet100Port))
+ require.Equal(t, expectedChanges101, getNumExecutedDDLQueries(t, tablet101Port))
+ })
+
+ t.Run("modify schema, prs, and self heal on new primary", func(t *testing.T) {
+ numChanges := modifySidecarDBSchema(t, vc, currentPrimary, ddls1)
+ expectedChanges101 += numChanges
+ prs(t, keyspace, shard)
+ // nolint
+ currentPrimary = tablet100
+
+ validateSidecarDBTables(t, tablet100, sidecarDBTables)
+ validateSidecarDBTables(t, tablet101, sidecarDBTables)
+ require.Equal(t, expectedChanges100, getNumExecutedDDLQueries(t, tablet100Port))
+ require.Equal(t, expectedChanges101, getNumExecutedDDLQueries(t, tablet101Port))
+ })
+}
+func validateSidecarDBTables(t *testing.T, tabletID string, tables []string) {
+ _, tables2 := getSidecarDBTables(t, tabletID)
+ require.EqualValues(t, tables, tables2)
+}
+
+func modifySidecarDBSchema(t *testing.T, vc *VitessCluster, tabletID string, ddls []string) (numChanges int) {
+ for _, ddl := range ddls {
+ output, err := vc.VtctlClient.ExecuteCommandWithOutput("ExecuteFetchAsDba", "--", tabletID, ddl)
+ require.NoErrorf(t, err, output)
+ }
+ return len(ddls)
+}
+
+func getNumExecutedDDLQueries(t *testing.T, port int) int {
+ val, err := getDebugVar(t, port, []string{sidecardb.StatsKeyQueryCount})
+ require.NoError(t, err)
+ i, err := strconv.Atoi(val)
+ require.NoError(t, err)
+ return i
+}
diff --git a/go/test/endtoend/vreplication/testdata/config/init_testserver_db.sql b/go/test/endtoend/vreplication/testdata/config/init_testserver_db.sql
new file mode 100644
index 00000000000..03df754ea21
--- /dev/null
+++ b/go/test/endtoend/vreplication/testdata/config/init_testserver_db.sql
@@ -0,0 +1,91 @@
+# This file is for testing purpose only.
+# This file is executed immediately after initializing a fresh data directory.
+# It is equivalent of init_db.sql. Given init_db.sql is for mysql which has super_read_only
+# related stuff therefore for testing purpose we avoid setting `super_read_only` during initialization.
+
+###############################################################################
+# WARNING: Any change to init_db.sql should gets reflected in this file as well.
+###############################################################################
+
+###############################################################################
+# WARNING: This sql is *NOT* safe for production use,
+# as it contains default well-known users and passwords.
+# Care should be taken to change these users and passwords
+# for production.
+###############################################################################
+
+###############################################################################
+# Equivalent of mysql_secure_installation
+###############################################################################
+# We need to ensure that read_only is disabled so that we can execute
+# these commands.
+SET GLOBAL read_only='OFF';
+
+# Changes during the init db should not make it to the binlog.
+# They could potentially create errant transactions on replicas.
+SET sql_log_bin = 0;
+# Remove anonymous users.
+DELETE FROM mysql.user WHERE User = '';
+
+# Disable remote root access (only allow UNIX socket).
+DELETE FROM mysql.user WHERE User = 'root' AND Host != 'localhost';
+
+# Remove test database.
+DROP DATABASE IF EXISTS test;
+
+###############################################################################
+# Vitess defaults
+###############################################################################
+
+# Admin user with all privileges.
+CREATE USER 'vt_dba'@'localhost';
+GRANT ALL ON *.* TO 'vt_dba'@'localhost';
+GRANT GRANT OPTION ON *.* TO 'vt_dba'@'localhost';
+
+# User for app traffic, with global read-write access.
+CREATE USER 'vt_app'@'localhost';
+GRANT SELECT, INSERT, UPDATE, DELETE, CREATE, DROP, RELOAD, PROCESS, FILE,
+ REFERENCES, INDEX, ALTER, SHOW DATABASES, CREATE TEMPORARY TABLES,
+ LOCK TABLES, EXECUTE, REPLICATION CLIENT, CREATE VIEW,
+ SHOW VIEW, CREATE ROUTINE, ALTER ROUTINE, CREATE USER, EVENT, TRIGGER
+ ON *.* TO 'vt_app'@'localhost';
+
+# User for app debug traffic, with global read access.
+CREATE USER 'vt_appdebug'@'localhost';
+GRANT SELECT, SHOW DATABASES, PROCESS ON *.* TO 'vt_appdebug'@'localhost';
+
+# User for administrative operations that need to be executed as non-SUPER.
+# Same permissions as vt_app here.
+CREATE USER 'vt_allprivs'@'localhost';
+GRANT SELECT, INSERT, UPDATE, DELETE, CREATE, DROP, RELOAD, PROCESS, FILE,
+ REFERENCES, INDEX, ALTER, SHOW DATABASES, CREATE TEMPORARY TABLES,
+ LOCK TABLES, EXECUTE, REPLICATION SLAVE, REPLICATION CLIENT, CREATE VIEW,
+ SHOW VIEW, CREATE ROUTINE, ALTER ROUTINE, CREATE USER, EVENT, TRIGGER
+ ON *.* TO 'vt_allprivs'@'localhost';
+
+# User for slave replication connections.
+CREATE USER 'vt_repl'@'%';
+GRANT REPLICATION SLAVE ON *.* TO 'vt_repl'@'%';
+
+# User for Vitess VReplication (base vstreamers and vplayer).
+CREATE USER 'vt_filtered'@'localhost';
+GRANT SELECT, INSERT, UPDATE, DELETE, CREATE, DROP, RELOAD, PROCESS, FILE,
+ REFERENCES, INDEX, ALTER, SHOW DATABASES, CREATE TEMPORARY TABLES,
+ LOCK TABLES, EXECUTE, REPLICATION SLAVE, REPLICATION CLIENT, CREATE VIEW,
+ SHOW VIEW, CREATE ROUTINE, ALTER ROUTINE, CREATE USER, EVENT, TRIGGER
+ ON *.* TO 'vt_filtered'@'localhost';
+
+# User for general MySQL monitoring.
+CREATE USER 'vt_monitoring'@'localhost';
+GRANT SELECT, PROCESS, SUPER, REPLICATION CLIENT, RELOAD
+ ON *.* TO 'vt_monitoring'@'localhost';
+GRANT SELECT, UPDATE, DELETE, DROP
+ ON performance_schema.* TO 'vt_monitoring'@'localhost';
+
+FLUSH PRIVILEGES;
+
+RESET SLAVE ALL;
+RESET MASTER;
+
+# custom sql is used to add custom scripts like creating users/passwords. We use it in our tests
+# {{custom_sql}}
diff --git a/go/test/endtoend/vreplication/time_zone_test.go b/go/test/endtoend/vreplication/time_zone_test.go
index b10cd55e048..f5d57eac9df 100644
--- a/go/test/endtoend/vreplication/time_zone_test.go
+++ b/go/test/endtoend/vreplication/time_zone_test.go
@@ -36,6 +36,7 @@ func TestMoveTablesTZ(t *testing.T) {
workflow := "tz"
sourceKs := "product"
targetKs := "customer"
+ shard := "0"
ksWorkflow := fmt.Sprintf("%s.%s", targetKs, workflow)
ksReverseWorkflow := fmt.Sprintf("%s.%s_reverse", sourceKs, workflow)
@@ -51,7 +52,8 @@ func TestMoveTablesTZ(t *testing.T) {
vtgate = cell1.Vtgates[0]
require.NotNil(t, vtgate)
- vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", "product", "0"), 1)
+ err := cluster.WaitForHealthyShard(vc.VtctldClient, sourceKs, shard)
+ require.NoError(t, err)
vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort)
defer vtgateConn.Close()
@@ -87,9 +89,8 @@ func TestMoveTablesTZ(t *testing.T) {
if _, err := vc.AddKeyspace(t, cells, targetKs, "0", customerVSchema, customerSchema, defaultReplicas, defaultRdonly, 200, targetKsOpts); err != nil {
t.Fatal(err)
}
- if err := vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", "customer", "0"), 1); err != nil {
- t.Fatal(err)
- }
+ err = cluster.WaitForHealthyShard(vc.VtctldClient, targetKs, shard)
+ require.NoError(t, err)
defaultCell := vc.Cells["zone1"]
custKs := vc.Cells[defaultCell.Name].Keyspaces[targetKs]
diff --git a/go/test/endtoend/vreplication/unsharded_init_data.sql b/go/test/endtoend/vreplication/unsharded_init_data.sql
index b12aaa9bf79..019945609db 100644
--- a/go/test/endtoend/vreplication/unsharded_init_data.sql
+++ b/go/test/endtoend/vreplication/unsharded_init_data.sql
@@ -1,6 +1,6 @@
insert into customer(cid, name, typ, sport, meta) values(1, 'Jøhn "❤️" Rizzolo',1,'football,baseball','{}');
insert into customer(cid, name, typ, sport, meta) values(2, 'Paül','soho','cricket',convert(x'7b7d' using utf8mb4));
-insert into customer(cid, name, typ, sport) values(3, 'ringo','enterprise','');
+insert into customer(cid, name, typ, sport, meta) values(3, 'ringo','enterprise','',null);
insert into merchant(mname, category) values('Monoprice', 'eléctronics');
insert into merchant(mname, category) values('newegg', 'elec†ronics');
insert into product(pid, description) values(1, 'keyböard ⌨️');
diff --git a/go/test/endtoend/vreplication/vdiff2_test.go b/go/test/endtoend/vreplication/vdiff2_test.go
index c20fc435b84..82e2b24f4b3 100644
--- a/go/test/endtoend/vreplication/vdiff2_test.go
+++ b/go/test/endtoend/vreplication/vdiff2_test.go
@@ -23,6 +23,9 @@ import (
"time"
"github.com/stretchr/testify/require"
+ "github.com/tidwall/gjson"
+
+ "vitess.io/vitess/go/test/endtoend/cluster"
)
type testCase struct {
@@ -121,7 +124,7 @@ func TestVDiff2(t *testing.T) {
vtgate = defaultCell.Vtgates[0]
require.NotNil(t, vtgate)
for _, shard := range sourceShards {
- require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", sourceKs, shard), 1))
+ require.NoError(t, cluster.WaitForHealthyShard(vc.VtctldClient, sourceKs, shard))
}
vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort)
@@ -139,7 +142,7 @@ func TestVDiff2(t *testing.T) {
_, err := vc.AddKeyspace(t, cells, targetKs, strings.Join(targetShards, ","), customerVSchema, customerSchema, 0, 0, 200, targetKsOpts)
require.NoError(t, err)
for _, shard := range targetShards {
- require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", targetKs, shard), 1))
+ require.NoError(t, cluster.WaitForHealthyShard(vc.VtctldClient, targetKs, shard))
}
for _, tc := range testCases {
@@ -155,7 +158,7 @@ func testWorkflow(t *testing.T, vc *VitessCluster, tc *testCase, cells []*Cell)
tks := vc.Cells[cells[0].Name].Keyspaces[tc.targetKs]
require.NoError(t, vc.AddShards(t, cells, tks, tc.targetShards, 0, 0, tc.tabletBaseID, targetKsOpts))
for _, shard := range arrTargetShards {
- require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", tc.targetKs, shard), 1))
+ require.NoError(t, cluster.WaitForHealthyShard(vc.VtctldClient, tc.targetKs, shard))
}
}
ksWorkflow := fmt.Sprintf("%s.%s", tc.targetKs, tc.workflow)
@@ -232,16 +235,47 @@ func testCLIErrors(t *testing.T, ksWorkflow, cells string) {
func testDelete(t *testing.T, ksWorkflow, cells string) {
t.Run("Delete", func(t *testing.T) {
- // test show verbose too as a side effect
+ // Let's be sure that we have at least 3 unique VDiffs.
+ // We have one record in the SHOW output per VDiff, per
+ // shard. So we want to get a count of the unique VDiffs
+ // by UUID.
+ uuidCount := func(uuids []gjson.Result) int64 {
+ seen := make(map[string]struct{})
+ for _, uuid := range uuids {
+ seen[uuid.String()] = struct{}{}
+ }
+ return int64(len(seen))
+ }
+ _, output := performVDiff2Action(t, ksWorkflow, cells, "show", "all", false)
+ initialVDiffCount := uuidCount(gjson.Get(output, "#.UUID").Array())
+ for ; initialVDiffCount < 3; initialVDiffCount++ {
+ _, _ = performVDiff2Action(t, ksWorkflow, cells, "create", "", false)
+ }
+
+ // Now let's confirm that we have at least 3 unique VDiffs.
+ _, output = performVDiff2Action(t, ksWorkflow, cells, "show", "all", false)
+ require.GreaterOrEqual(t, uuidCount(gjson.Get(output, "#.UUID").Array()), int64(3))
+ // And that our initial count is what we expect.
+ require.Equal(t, initialVDiffCount, uuidCount(gjson.Get(output, "#.UUID").Array()))
+
+ // Test show last with verbose too as a side effect.
uuid, output := performVDiff2Action(t, ksWorkflow, cells, "show", "last", false, "--verbose")
- // only present with --verbose
+ // The TableSummary is only present with --verbose.
require.Contains(t, output, `"TableSummary":`)
+
+ // Now let's delete one of the VDiffs.
_, output = performVDiff2Action(t, ksWorkflow, cells, "delete", uuid, false)
- require.Contains(t, output, `"Status": "completed"`)
+ require.Equal(t, "completed", gjson.Get(output, "Status").String())
+ // And confirm that our unique VDiff count has only decreased by one.
+ _, output = performVDiff2Action(t, ksWorkflow, cells, "show", "all", false)
+ require.Equal(t, initialVDiffCount-1, uuidCount(gjson.Get(output, "#.UUID").Array()))
+
+ // Now let's delete all of them.
_, output = performVDiff2Action(t, ksWorkflow, cells, "delete", "all", false)
- require.Contains(t, output, `"Status": "completed"`)
+ require.Equal(t, "completed", gjson.Get(output, "Status").String())
+ // And finally confirm that we have no more VDiffs.
_, output = performVDiff2Action(t, ksWorkflow, cells, "show", "all", false)
- require.Equal(t, "[]\n", output)
+ require.Equal(t, int64(0), gjson.Get(output, "#").Int())
})
}
diff --git a/go/test/endtoend/vreplication/vdiff_helper_test.go b/go/test/endtoend/vreplication/vdiff_helper_test.go
index 7824c792777..35b3b3f5d26 100644
--- a/go/test/endtoend/vreplication/vdiff_helper_test.go
+++ b/go/test/endtoend/vreplication/vdiff_helper_test.go
@@ -23,8 +23,8 @@ import (
"testing"
"time"
- "github.com/buger/jsonparser"
"github.com/stretchr/testify/require"
+ "github.com/tidwall/gjson"
"vitess.io/vitess/go/sqlescape"
"vitess.io/vitess/go/sqltypes"
@@ -65,7 +65,7 @@ func vdiff1(t *testing.T, ksWorkflow, cells string) {
func doVDiff1(t *testing.T, ksWorkflow, cells string) {
t.Run(fmt.Sprintf("vdiff1 %s", ksWorkflow), func(t *testing.T) {
- output, err := vc.VtctlClient.ExecuteCommandWithOutput("VDiff", "--", "--tablet_types=primary", "--source_cell="+cells, "--format", "json", ksWorkflow)
+ output, err := vc.VtctlClient.ExecuteCommandWithOutput("VDiff", "--", "--v1", "--tablet_types=primary", "--source_cell="+cells, "--format", "json", ksWorkflow)
log.Infof("vdiff1 err: %+v, output: %+v", err, output)
require.Nil(t, err)
require.NotNil(t, output)
@@ -110,12 +110,20 @@ func waitForVDiff2ToComplete(t *testing.T, ksWorkflow, cells, uuid string, compl
// The timestamp format allows us to compare them lexicographically.
// We don't test that the ETA always increases as it can decrease based on how
// quickly we're doing work.
- if info.Progress.ETA != "" {
- // If we're operating at the second boundary then the ETA can be up
- // to 1 second in the past due to using second based precision.
- loc, _ := time.LoadLocation("UTC")
- require.GreaterOrEqual(t, info.Progress.ETA, time.Now().Add(-time.Second).In(loc).Format(vdiff2.TimestampFormat))
- }
+
+ // Commenting out this check for now as it is quite flaky in Github CI: we sometimes get a difference of
+ // more than 1s between the ETA and the current time, empirically seen 2s when it has failed,
+ // but presumably it can be higher. Keeping the code here for now in case we want to re-enable it.
+
+ /*
+ if info.Progress.ETA != "" {
+ // If we're operating at the second boundary then the ETA can be up
+ // to 1 second in the past due to using second based precision.
+ loc, _ := time.LoadLocation("UTC")
+ require.GreaterOrEqual(t, info.Progress.ETA, time.Now().Add(-time.Second).In(loc).Format(vdiff2.TimestampFormat))
+ }
+ */
+
if !first {
require.GreaterOrEqual(t, info.Progress.Percentage, previousProgress.Percentage)
}
@@ -165,7 +173,7 @@ func doVdiff2(t *testing.T, keyspace, workflow, cells string, want *expectedVDif
func performVDiff2Action(t *testing.T, ksWorkflow, cells, action, actionArg string, expectError bool, extraFlags ...string) (uuid string, output string) {
var err error
- args := []string{"VDiff", "--", "--v2", "--tablet_types=primary", "--source_cell=" + cells, "--format=json"}
+ args := []string{"VDiff", "--", "--tablet_types=primary", "--source_cell=" + cells, "--format=json"}
if len(extraFlags) > 0 {
args = append(args, extraFlags...)
}
@@ -174,7 +182,7 @@ func performVDiff2Action(t *testing.T, ksWorkflow, cells, action, actionArg stri
log.Infof("vdiff2 output: %+v (err: %+v)", output, err)
if !expectError {
require.Nil(t, err)
- uuid, err = jsonparser.GetString([]byte(output), "UUID")
+ uuid = gjson.Get(output, "UUID").String()
if action != "delete" && !(action == "show" && actionArg == "all") { // a UUID is not required
require.NoError(t, err)
require.NotEmpty(t, uuid)
@@ -193,19 +201,18 @@ type vdiffInfo struct {
Progress vdiff2.ProgressReport
}
-func getVDiffInfo(jsonStr string) *vdiffInfo {
+func getVDiffInfo(json string) *vdiffInfo {
var info vdiffInfo
- json := []byte(jsonStr)
- info.Workflow, _ = jsonparser.GetString(json, "Workflow")
- info.Keyspace, _ = jsonparser.GetString(json, "Keyspace")
- info.State, _ = jsonparser.GetString(json, "State")
- info.Shards, _ = jsonparser.GetString(json, "Shards")
- info.RowsCompared, _ = jsonparser.GetInt(json, "RowsCompared")
- info.StartedAt, _ = jsonparser.GetString(json, "StartedAt")
- info.CompletedAt, _ = jsonparser.GetString(json, "CompletedAt")
- info.HasMismatch, _ = jsonparser.GetBoolean(json, "HasMismatch")
- info.Progress.Percentage, _ = jsonparser.GetFloat(json, "Progress", "Percentage")
- info.Progress.ETA, _ = jsonparser.GetString(json, "Progress", "ETA")
+ info.Workflow = gjson.Get(json, "Workflow").String()
+ info.Keyspace = gjson.Get(json, "Keyspace").String()
+ info.State = gjson.Get(json, "State").String()
+ info.Shards = gjson.Get(json, "Shards").String()
+ info.RowsCompared = gjson.Get(json, "RowsCompared").Int()
+ info.StartedAt = gjson.Get(json, "StartedAt").String()
+ info.CompletedAt = gjson.Get(json, "CompletedAt").String()
+ info.HasMismatch = gjson.Get(json, "HasMismatch").Bool()
+ info.Progress.Percentage = gjson.Get(json, "Progress.Percentage").Float()
+ info.Progress.ETA = gjson.Get(json, "Progress.ETA").String()
return &info
}
diff --git a/go/test/endtoend/vreplication/vreplication_test.go b/go/test/endtoend/vreplication/vreplication_test.go
index 40e34bbfce4..d6a411fdb3c 100644
--- a/go/test/endtoend/vreplication/vreplication_test.go
+++ b/go/test/endtoend/vreplication/vreplication_test.go
@@ -34,6 +34,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
+ "github.com/tidwall/gjson"
"github.com/buger/jsonparser"
@@ -63,7 +64,7 @@ var (
)
const (
- // for some tests we keep an open transaction during a SwitchWrites and commit it afterwards, to reproduce https://github.com/vitessio/vitess/issues/9400
+ // for some tests we keep an open transaction during a Switch writes and commit it afterwards, to reproduce https://github.com/vitessio/vitess/issues/9400
// we also then delete the extra row (if) added so that the row counts for the future count comparisons stay the same
openTxQuery = "insert into customer(cid, name, typ, sport, meta) values(4, 'openTxQuery',1,'football,baseball','{}');"
deleteOpenTxQuery = "delete from customer where name = 'openTxQuery'"
@@ -82,34 +83,141 @@ func init() {
defaultReplicas = 1
}
-func throttleResponse(tablet *cluster.VttabletProcess, path string) (resp *http.Response, respBody string, err error) {
+func throttleResponse(tablet *cluster.VttabletProcess, path string) (respBody string, err error) {
apiURL := fmt.Sprintf("http://%s:%d/%s", tablet.TabletHostname, tablet.Port, path)
- resp, err = httpClient.Get(apiURL)
+ resp, err := httpClient.Get(apiURL)
if err != nil {
- return resp, respBody, err
+ return "", err
}
+ defer resp.Body.Close()
b, err := io.ReadAll(resp.Body)
respBody = string(b)
- return resp, respBody, err
+ return respBody, err
}
-func throttleApp(tablet *cluster.VttabletProcess, app string) (*http.Response, string, error) {
+func throttleApp(tablet *cluster.VttabletProcess, app string) (string, error) {
return throttleResponse(tablet, fmt.Sprintf("throttler/throttle-app?app=%s&duration=1h", app))
}
-func unthrottleApp(tablet *cluster.VttabletProcess, app string) (*http.Response, string, error) {
+func unthrottleApp(tablet *cluster.VttabletProcess, app string) (string, error) {
return throttleResponse(tablet, fmt.Sprintf("throttler/unthrottle-app?app=%s", app))
}
-func throttlerCheckSelf(tablet *cluster.VttabletProcess, app string) (resp *http.Response, respBody string, err error) {
+func throttlerCheckSelf(tablet *cluster.VttabletProcess, app string) (respBody string, err error) {
apiURL := fmt.Sprintf("http://%s:%d/throttler/check-self?app=%s", tablet.TabletHostname, tablet.Port, app)
- resp, err = httpClient.Get(apiURL)
+ resp, err := httpClient.Get(apiURL)
if err != nil {
- return resp, respBody, err
+ return "", err
}
+ defer resp.Body.Close()
b, err := io.ReadAll(resp.Body)
respBody = string(b)
- return resp, respBody, err
+ return respBody, err
+}
+
+// TestVReplicationDDLHandling tests the DDL handling in
+// VReplication for the values of IGNORE, STOP, and EXEC.
+// NOTE: this is a manual test. It is not executed in the
+// CI.
+func TestVReplicationDDLHandling(t *testing.T) {
+ workflow := "onddl_test"
+ ksWorkflow := fmt.Sprintf("%s.%s", targetKs, workflow)
+ table := "orders"
+ newColumn := "ddltest"
+ cell := "zone1"
+ shard := "0"
+ vc = NewVitessCluster(t, t.Name(), []string{cell}, mainClusterConfig)
+ defer vc.TearDown(t)
+ defaultCell = vc.Cells[cell]
+
+ if _, err := vc.AddKeyspace(t, []*Cell{defaultCell}, sourceKs, shard, initialProductVSchema, initialProductSchema, 0, 0, 100, nil); err != nil {
+ t.Fatal(err)
+ }
+ if _, err := vc.AddKeyspace(t, []*Cell{defaultCell}, targetKs, shard, "", "", 0, 0, 200, nil); err != nil {
+ t.Fatal(err)
+ }
+ vtgate = defaultCell.Vtgates[0]
+ require.NotNil(t, vtgate)
+ err := cluster.WaitForHealthyShard(vc.VtctldClient, sourceKs, shard)
+ require.NoError(t, err)
+ err = cluster.WaitForHealthyShard(vc.VtctldClient, targetKs, shard)
+ require.NoError(t, err)
+ verifyClusterHealth(t, vc)
+
+ vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort)
+ defer vtgateConn.Close()
+ sourceTab = vc.getPrimaryTablet(t, sourceKs, shard)
+ targetTab := vc.getPrimaryTablet(t, targetKs, shard)
+
+ insertInitialData(t)
+
+ _, err = vtgateConn.ExecuteFetch(fmt.Sprintf("use %s", sourceKs), 1, false)
+ require.NoError(t, err)
+
+ addColDDL := fmt.Sprintf("alter table %s add column %s varchar(64)", table, newColumn)
+ dropColDDL := fmt.Sprintf("alter table %s drop column %s", table, newColumn)
+ checkColQuerySource := fmt.Sprintf("select count(column_name) from information_schema.columns where table_schema='vt_%s' and table_name='%s' and column_name='%s'",
+ sourceKs, table, newColumn)
+ checkColQueryTarget := fmt.Sprintf("select count(column_name) from information_schema.columns where table_schema='vt_%s' and table_name='%s' and column_name='%s'",
+ targetKs, table, newColumn)
+
+ // Test IGNORE behavior
+ moveTablesAction(t, "Create", defaultCellName, workflow, sourceKs, targetKs, table, "--on-ddl=IGNORE")
+ // Wait until we get through the copy phase...
+ catchup(t, targetTab, workflow, "MoveTables")
+ // Add new col on source
+ _, err = vtgateConn.ExecuteFetch(addColDDL, 1, false)
+ require.NoError(t, err, "error executing %q: %v", addColDDL, err)
+ // Confirm workflow is still running fine
+ waitForWorkflowState(t, vc, ksWorkflow, "Running")
+ // Confirm new col does not exist on target
+ waitForQueryResult(t, vtgateConn, targetKs, checkColQueryTarget, "[[INT64(0)]]")
+ // Confirm new col does exist on source
+ waitForQueryResult(t, vtgateConn, sourceKs, checkColQuerySource, "[[INT64(1)]]")
+ // Also test Cancel --keep_routing_rules
+ moveTablesAction(t, "Cancel", defaultCellName, workflow, sourceKs, targetKs, table, "--keep_routing_rules")
+ // Confirm that the routing rules were NOT cleared
+ rr, err := vc.VtctldClient.ExecuteCommandWithOutput("GetRoutingRules")
+ require.NoError(t, err)
+ require.Greater(t, len(gjson.Get(rr, "rules").Array()), 0)
+ // Manually clear the routing rules
+ err = vc.VtctldClient.ExecuteCommand("ApplyRoutingRules", "--rules", "{}")
+ require.NoError(t, err)
+ // Confirm that the routing rules are gone
+ rr, err = vc.VtctldClient.ExecuteCommandWithOutput("GetRoutingRules")
+ require.NoError(t, err)
+ require.Equal(t, len(gjson.Get(rr, "rules").Array()), 0)
+ // Drop the column on source to start fresh again
+ _, err = vtgateConn.ExecuteFetch(dropColDDL, 1, false)
+ require.NoError(t, err, "error executing %q: %v", dropColDDL, err)
+
+ // Test STOP behavior (new col now exists nowhere)
+ moveTablesAction(t, "Create", defaultCellName, workflow, sourceKs, targetKs, table, "--on-ddl=STOP")
+ // Wait until we get through the copy phase...
+ catchup(t, targetTab, workflow, "MoveTables")
+ // Add new col on the source
+ _, err = vtgateConn.ExecuteFetch(addColDDL, 1, false)
+ require.NoError(t, err, "error executing %q: %v", addColDDL, err)
+ // Confirm that the worfklow stopped because of the DDL
+ waitForWorkflowState(t, vc, ksWorkflow, "Stopped", fmt.Sprintf("Message==Stopped at DDL %s", addColDDL))
+ // Confirm that the target does not have new col
+ waitForQueryResult(t, vtgateConn, targetKs, checkColQueryTarget, "[[INT64(0)]]")
+ moveTablesAction(t, "Cancel", defaultCellName, workflow, sourceKs, targetKs, table)
+
+ // Test EXEC behavior (new col now exists on source)
+ moveTablesAction(t, "Create", defaultCellName, workflow, sourceKs, targetKs, table, "--on-ddl=EXEC")
+ // Wait until we get through the copy phase...
+ catchup(t, targetTab, workflow, "MoveTables")
+ // Confirm target has new col from copy phase
+ waitForQueryResult(t, vtgateConn, targetKs, checkColQueryTarget, "[[INT64(1)]]")
+ // Drop col on source
+ _, err = vtgateConn.ExecuteFetch(dropColDDL, 1, false)
+ require.NoError(t, err, "error executing %q: %v", dropColDDL, err)
+ // Confirm workflow is still running fine
+ waitForWorkflowState(t, vc, ksWorkflow, "Running")
+ // Confirm new col was dropped on target
+ waitForQueryResult(t, vtgateConn, targetKs, checkColQueryTarget, "[[INT64(0)]]")
+ moveTablesAction(t, "Cancel", defaultCellName, workflow, sourceKs, targetKs, table)
}
func TestVreplicationCopyThrottling(t *testing.T) {
@@ -127,6 +235,7 @@ func TestVreplicationCopyThrottling(t *testing.T) {
// to avoid flakiness when the CI is very slow.
fmt.Sprintf("--queryserver-config-transaction-timeout=%d", int64(defaultTimeout.Seconds())*3),
fmt.Sprintf("--vreplication_copy_phase_max_innodb_history_list_length=%d", maxSourceTrxHistory),
+ parallelInsertWorkers,
}
if _, err := vc.AddKeyspace(t, []*Cell{defaultCell}, sourceKs, shard, initialProductVSchema, initialProductSchema, 0, 0, 100, nil); err != nil {
@@ -137,8 +246,10 @@ func TestVreplicationCopyThrottling(t *testing.T) {
}
vtgate = defaultCell.Vtgates[0]
require.NotNil(t, vtgate)
- vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", sourceKs, shard), 1)
- vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", targetKs, shard), 1)
+ err := cluster.WaitForHealthyShard(vc.VtctldClient, sourceKs, shard)
+ require.NoError(t, err)
+ err = cluster.WaitForHealthyShard(vc.VtctldClient, targetKs, shard)
+ require.NoError(t, err)
// Confirm that the initial copy table phase does not proceed until the source tablet(s)
// have an InnoDB History List length that is less than specified in the tablet's config.
@@ -148,7 +259,9 @@ func TestVreplicationCopyThrottling(t *testing.T) {
// History should have been generated on the source primary tablet
waitForInnoDBHistoryLength(t, vc.getPrimaryTablet(t, sourceKs, shard), maxSourceTrxHistory)
// We need to force primary tablet types as the history list has been increased on the source primary
- moveTablesWithTabletTypes(t, defaultCell.Name, workflow, sourceKs, targetKs, table, "primary")
+ // We use a small timeout and ignore errors as we don't expect the MoveTables to start here
+ // because of the InnoDB History List length.
+ moveTablesActionWithTabletTypes(t, "Create", defaultCell.Name, workflow, sourceKs, targetKs, table, "primary", 5*time.Second, true)
// Wait for the copy phase to start
waitForWorkflowState(t, vc, fmt.Sprintf("%s.%s", targetKs, workflow), workflowStateCopying)
// The initial copy phase should be blocking on the history list
@@ -163,7 +276,20 @@ func TestBasicVreplicationWorkflow(t *testing.T) {
testBasicVreplicationWorkflow(t)
}
+func TestVreplicationCopyParallel(t *testing.T) {
+ sourceKsOpts["DBTypeVersion"] = "mysql-5.7"
+ targetKsOpts["DBTypeVersion"] = "mysql-5.7"
+ extraVTTabletArgs = []string{
+ parallelInsertWorkers,
+ }
+ testBasicVreplicationWorkflow(t)
+}
+
func testBasicVreplicationWorkflow(t *testing.T) {
+ testVreplicationWorkflows(t, false)
+}
+
+func testVreplicationWorkflows(t *testing.T, minimal bool) {
defaultCellName := "zone1"
allCells := []string{"zone1"}
allCellNames = "zone1"
@@ -181,7 +307,8 @@ func testBasicVreplicationWorkflow(t *testing.T) {
vc.AddKeyspace(t, []*Cell{defaultCell}, "product", "0", initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, sourceKsOpts)
vtgate = defaultCell.Vtgates[0]
require.NotNil(t, vtgate)
- vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", "product", "0"), 1)
+ err := cluster.WaitForHealthyShard(vc.VtctldClient, "product", "0")
+ require.NoError(t, err)
vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort)
defer vtgateConn.Close()
@@ -197,6 +324,10 @@ func testBasicVreplicationWorkflow(t *testing.T) {
shardOrders(t)
shardMerchant(t)
+ if minimal {
+ return
+ }
+
materializeProduct(t)
materializeMerchantOrders(t)
@@ -213,6 +344,15 @@ func testBasicVreplicationWorkflow(t *testing.T) {
expectNumberOfStreams(t, vtgateConn, "Customer3to2", "sales", "product:0", 3)
reshardCustomer3to1Merge(t)
expectNumberOfStreams(t, vtgateConn, "Customer3to1", "sales", "product:0", 1)
+
+ t.Run("Verify CopyState Is Optimized Afterwards", func(t *testing.T) {
+ tabletMap := vc.getVttabletsInKeyspace(t, defaultCell, "customer", topodatapb.TabletType_PRIMARY.String())
+ require.NotNil(t, tabletMap)
+ require.Greater(t, len(tabletMap), 0)
+ for _, tablet := range tabletMap {
+ verifyCopyStateIsOptimized(t, tablet)
+ }
+ })
}
func TestV2WorkflowsAcrossDBVersions(t *testing.T) {
@@ -221,6 +361,15 @@ func TestV2WorkflowsAcrossDBVersions(t *testing.T) {
testBasicVreplicationWorkflow(t)
}
+// TestMoveTablesMariaDBToMySQL tests that MoveTables works between a MariaDB source
+// and a MySQL target as while MariaDB is not supported in Vitess v14+ we want
+// MariaDB users to have a way to migrate into Vitess.
+func TestMoveTablesMariaDBToMySQL(t *testing.T) {
+ sourceKsOpts["DBTypeVersion"] = "mariadb-10.10"
+ targetKsOpts["DBTypeVersion"] = "mysql-8.0"
+ testVreplicationWorkflows(t, true /* only do MoveTables */)
+}
+
func TestMultiCellVreplicationWorkflow(t *testing.T) {
cells := []string{"zone1", "zone2"}
allCellNames = strings.Join(cells, ",")
@@ -229,17 +378,20 @@ func TestMultiCellVreplicationWorkflow(t *testing.T) {
require.NotNil(t, vc)
defaultCellName := "zone1"
defaultCell = vc.Cells[defaultCellName]
+ keyspace := "product"
+ shard := "0"
defer vc.TearDown(t)
cell1 := vc.Cells["zone1"]
cell2 := vc.Cells["zone2"]
- vc.AddKeyspace(t, []*Cell{cell1, cell2}, "product", "0", initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, sourceKsOpts)
+ vc.AddKeyspace(t, []*Cell{cell1, cell2}, keyspace, shard, initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, sourceKsOpts)
vtgate = cell1.Vtgates[0]
require.NotNil(t, vtgate)
- vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", "product", "0"), 1)
- vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", "product", "0"), 2)
+ err := cluster.WaitForHealthyShard(vc.VtctldClient, keyspace, shard)
+ require.NoError(t, err)
+ vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", keyspace, shard), 2, 30*time.Second)
vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort)
defer vtgateConn.Close()
@@ -274,8 +426,10 @@ func TestVStreamFlushBinlog(t *testing.T) {
}
vtgate = defaultCell.Vtgates[0]
require.NotNil(t, vtgate)
- vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", sourceKs, shard), 1)
- vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", targetKs, shard), 1)
+ err := cluster.WaitForHealthyShard(vc.VtctldClient, sourceKs, shard)
+ require.NoError(t, err)
+ err = cluster.WaitForHealthyShard(vc.VtctldClient, targetKs, shard)
+ require.NoError(t, err)
verifyClusterHealth(t, vc)
vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort)
@@ -285,7 +439,7 @@ func TestVStreamFlushBinlog(t *testing.T) {
insertInitialData(t)
tables := "product,customer,merchant,orders"
- moveTables(t, defaultCellName, workflow, sourceKs, targetKs, tables)
+ moveTablesAction(t, "Create", defaultCellName, workflow, sourceKs, targetKs, tables)
// Wait until we get through the copy phase...
catchup(t, vc.getPrimaryTablet(t, targetKs, shard), workflow, "MoveTables")
@@ -420,12 +574,14 @@ func TestCellAliasVreplicationWorkflow(t *testing.T) {
allCellNames = "zone1,zone2"
defaultCellName := "zone1"
defaultCell = vc.Cells[defaultCellName]
+ keyspace := "product"
+ shard := "0"
defer vc.TearDown(t)
cell1 := vc.Cells["zone1"]
cell2 := vc.Cells["zone2"]
- vc.AddKeyspace(t, []*Cell{cell1, cell2}, "product", "0", initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, sourceKsOpts)
+ vc.AddKeyspace(t, []*Cell{cell1, cell2}, keyspace, shard, initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, sourceKsOpts)
// Add cell alias containing only zone2
result, err := vc.VtctlClient.ExecuteCommandWithOutput("AddCellsAlias", "--", "--cells", "zone2", "alias")
@@ -433,8 +589,9 @@ func TestCellAliasVreplicationWorkflow(t *testing.T) {
vtgate = cell1.Vtgates[0]
require.NotNil(t, vtgate)
- vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", "product", "0"), 1)
- vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", "product", "0"), 2)
+ err = cluster.WaitForHealthyShard(vc.VtctldClient, keyspace, shard)
+ require.NoError(t, err)
+ vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", keyspace, shard), 2, 30*time.Second)
vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort)
defer vtgateConn.Close()
@@ -442,7 +599,7 @@ func TestCellAliasVreplicationWorkflow(t *testing.T) {
insertInitialData(t)
t.Run("VStreamFrom", func(t *testing.T) {
- testVStreamFrom(t, "product", 2)
+ testVStreamFrom(t, keyspace, 2)
})
shardCustomer(t, true, []*Cell{cell1, cell2}, "alias", false)
}
@@ -567,27 +724,26 @@ func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAl
if _, err := vc.AddKeyspace(t, cells, "customer", "-80,80-", customerVSchema, customerSchema, defaultReplicas, defaultRdonly, 200, targetKsOpts); err != nil {
t.Fatal(err)
}
- if err := vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", "customer", "-80"), 1); err != nil {
- t.Fatal(err)
- }
- if err := vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", "customer", "80-"), 1); err != nil {
- t.Fatal(err)
- }
+ err := cluster.WaitForHealthyShard(vc.VtctldClient, targetKs, "-80")
+ require.NoError(t, err)
+ err = cluster.WaitForHealthyShard(vc.VtctldClient, targetKs, "80-")
+ require.NoError(t, err)
// Assume we are operating on first cell
defaultCell := cells[0]
custKs := vc.Cells[defaultCell.Name].Keyspaces["customer"]
tables := "customer,Lead,Lead-1,db_order_test"
- moveTables(t, sourceCellOrAlias, workflow, sourceKs, targetKs, tables)
+ moveTablesAction(t, "Create", sourceCellOrAlias, workflow, sourceKs, targetKs, tables)
customerTab1 := custKs.Shards["-80"].Tablets["zone1-200"].Vttablet
customerTab2 := custKs.Shards["80-"].Tablets["zone1-300"].Vttablet
productTab := vc.Cells[defaultCell.Name].Keyspaces["product"].Shards["0"].Tablets["zone1-100"].Vttablet
// Wait to finish the copy phase for all tables
- catchup(t, customerTab1, workflow, "MoveTables")
- catchup(t, customerTab2, workflow, "MoveTables")
+ workflowType := "MoveTables"
+ catchup(t, customerTab1, workflow, workflowType)
+ catchup(t, customerTab2, workflow, workflowType)
// Confirm that the 0 scale decimal field, dec80, is replicated correctly
dec80Replicated := false
@@ -604,9 +760,11 @@ func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAl
query := "select cid from customer"
require.True(t, validateThatQueryExecutesOnTablet(t, vtgateConn, productTab, "product", query, query))
- insertQuery1 := "insert into customer(cid, name) values(1001, 'tempCustomer1')"
- matchInsertQuery1 := "insert into customer(cid, `name`) values (:vtg1, :vtg2)"
- require.True(t, validateThatQueryExecutesOnTablet(t, vtgateConn, productTab, "product", insertQuery1, matchInsertQuery1))
+ insertQuery1 := "insert into customer(cid, name, meta) values(1001, 'tempCustomer1', '{\"a\": 1629849600, \"b\": 930701976723823}')"
+
+ matchInsertQuery0 := "insert into customer(cid, `name`) values (:vtg1, :vtg2)"
+ matchInsertQuery1 := "insert into customer(cid, `name`, meta) values (:vtg1, :vtg2, :vtg3)"
+ validateThatQueryExecutesOnTablet(t, vtgateConn, productTab, "product", insertQuery1, matchInsertQuery1)
// confirm that the backticking of table names in the routing rules works
tbls := []string{"Lead", "Lead-1"}
@@ -620,16 +778,16 @@ func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAl
}
vdiff1(t, ksWorkflow, "")
- switchReadsDryRun(t, allCellNames, ksWorkflow, dryRunResultsReadCustomerShard)
- switchReads(t, allCellNames, ksWorkflow)
+ switchReadsDryRun(t, workflowType, allCellNames, ksWorkflow, dryRunResultsReadCustomerShard)
+ switchReads(t, workflowType, allCellNames, ksWorkflow, false)
require.True(t, validateThatQueryExecutesOnTablet(t, vtgateConn, productTab, "customer", query, query))
var commit func(t *testing.T)
if withOpenTx {
commit, _ = vc.startQuery(t, openTxQuery)
}
- switchWritesDryRun(t, ksWorkflow, dryRunResultsSwitchWritesCustomerShard)
- switchWrites(t, ksWorkflow, false)
+ switchWritesDryRun(t, workflowType, ksWorkflow, dryRunResultsSwitchWritesCustomerShard)
+ switchWrites(t, workflowType, ksWorkflow, false)
checkThatVDiffFails(t, targetKs, workflow)
if withOpenTx && commit != nil {
@@ -649,19 +807,18 @@ func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAl
matchInsertQuery2 := "insert into customer(`name`, cid) values (:vtg1, :_cid0)"
require.False(t, validateThatQueryExecutesOnTablet(t, vtgateConn, productTab, "customer", insertQuery2, matchInsertQuery2))
- insertQuery2 = "insert into customer(name, cid) values('tempCustomer3', 101)" //ID 101, hence due to reverse_bits in shard 80-
+ insertQuery2 = "insert into customer(name, cid) values('tempCustomer3', 101)" // ID 101, hence due to reverse_bits in shard 80-
require.True(t, validateThatQueryExecutesOnTablet(t, vtgateConn, customerTab2, "customer", insertQuery2, matchInsertQuery2))
- insertQuery2 = "insert into customer(name, cid) values('tempCustomer4', 102)" //ID 102, hence due to reverse_bits in shard -80
+ insertQuery2 = "insert into customer(name, cid) values('tempCustomer4', 102)" // ID 102, hence due to reverse_bits in shard -80
require.True(t, validateThatQueryExecutesOnTablet(t, vtgateConn, customerTab1, "customer", insertQuery2, matchInsertQuery2))
execVtgateQuery(t, vtgateConn, "customer", "update customer set meta = convert(x'7b7d' using utf8mb4) where cid = 1")
- reverseKsWorkflow := "product.p2c_reverse"
if testReverse {
- //Reverse Replicate
- switchReads(t, allCellNames, reverseKsWorkflow)
+ // Reverse Replicate
+ switchReads(t, workflowType, allCellNames, ksWorkflow, true)
printShardPositions(vc, ksShards)
- switchWrites(t, reverseKsWorkflow, false)
+ switchWrites(t, workflowType, ksWorkflow, true)
output, err := vc.VtctlClient.ExecuteCommandWithOutput("Workflow", ksWorkflow, "show")
require.NoError(t, err)
@@ -669,24 +826,25 @@ func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAl
require.Contains(t, output, "'customer.bmd5'")
insertQuery1 = "insert into customer(cid, name) values(1002, 'tempCustomer5')"
- require.True(t, validateThatQueryExecutesOnTablet(t, vtgateConn, productTab, "product", insertQuery1, matchInsertQuery1))
+ require.True(t, validateThatQueryExecutesOnTablet(t, vtgateConn, productTab, "product", insertQuery1, matchInsertQuery0))
// both inserts go into 80-, this tests the edge-case where a stream (-80) has no relevant new events after the previous switch
insertQuery1 = "insert into customer(cid, name) values(1003, 'tempCustomer6')"
- require.False(t, validateThatQueryExecutesOnTablet(t, vtgateConn, customerTab1, "customer", insertQuery1, matchInsertQuery1))
+ require.False(t, validateThatQueryExecutesOnTablet(t, vtgateConn, customerTab1, "customer", insertQuery1, matchInsertQuery0))
insertQuery1 = "insert into customer(cid, name) values(1004, 'tempCustomer7')"
- require.False(t, validateThatQueryExecutesOnTablet(t, vtgateConn, customerTab2, "customer", insertQuery1, matchInsertQuery1))
+ require.False(t, validateThatQueryExecutesOnTablet(t, vtgateConn, customerTab2, "customer", insertQuery1, matchInsertQuery0))
+
+ waitForNoWorkflowLag(t, vc, targetKs, workflow)
- //Go forward again
- switchReads(t, allCellNames, ksWorkflow)
- switchWrites(t, ksWorkflow, false)
- dropSourcesDryRun(t, ksWorkflow, false, dryRunResultsDropSourcesDropCustomerShard)
- dropSourcesDryRun(t, ksWorkflow, true, dryRunResultsDropSourcesRenameCustomerShard)
+ // Go forward again
+ switchReads(t, workflowType, allCellNames, ksWorkflow, false)
+ switchWrites(t, workflowType, ksWorkflow, false)
var exists bool
exists, err = checkIfDenyListExists(t, vc, "product:0", "customer")
require.NoError(t, err, "Error getting denylist for customer:0")
require.True(t, exists)
- dropSources(t, ksWorkflow)
+
+ moveTablesAction(t, "Complete", allCellNames, workflow, sourceKs, targetKs, tables)
exists, err = checkIfDenyListExists(t, vc, "product:0", "customer")
require.NoError(t, err, "Error getting denylist for customer:0")
@@ -707,11 +865,11 @@ func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAl
assert.NoError(t, err, "Customer table not deleted from zone1-200")
require.True(t, found)
- insertQuery2 = "insert into customer(name, cid) values('tempCustomer8', 103)" //ID 103, hence due to reverse_bits in shard 80-
+ insertQuery2 = "insert into customer(name, cid) values('tempCustomer8', 103)" // ID 103, hence due to reverse_bits in shard 80-
require.False(t, validateThatQueryExecutesOnTablet(t, vtgateConn, productTab, "customer", insertQuery2, matchInsertQuery2))
- insertQuery2 = "insert into customer(name, cid) values('tempCustomer10', 104)" //ID 105, hence due to reverse_bits in shard -80
+ insertQuery2 = "insert into customer(name, cid) values('tempCustomer10', 104)" // ID 105, hence due to reverse_bits in shard -80
require.True(t, validateThatQueryExecutesOnTablet(t, vtgateConn, customerTab1, "customer", insertQuery2, matchInsertQuery2))
- insertQuery2 = "insert into customer(name, cid) values('tempCustomer9', 105)" //ID 104, hence due to reverse_bits in shard 80-
+ insertQuery2 = "insert into customer(name, cid) values('tempCustomer9', 105)" // ID 104, hence due to reverse_bits in shard 80-
require.True(t, validateThatQueryExecutesOnTablet(t, vtgateConn, customerTab2, "customer", insertQuery2, matchInsertQuery2))
execVtgateQuery(t, vtgateConn, "customer", "delete from customer where name like 'tempCustomer%'")
@@ -807,7 +965,7 @@ func reshardMerchant3to1Merge(t *testing.T) {
})
}
-func reshardCustomer3to2SplitMerge(t *testing.T) { //-40,40-80,80-c0 => merge/split, c0- stays the same ending up with 3
+func reshardCustomer3to2SplitMerge(t *testing.T) { // -40,40-80,80-c0 => merge/split, c0- stays the same ending up with 3
t.Run("reshardCustomer3to2SplitMerge", func(t *testing.T) {
ksName := "customer"
counts := map[string]int{"zone1-1000": 8, "zone1-1100": 8, "zone1-1200": 5}
@@ -815,7 +973,7 @@ func reshardCustomer3to2SplitMerge(t *testing.T) { //-40,40-80,80-c0 => merge/sp
})
}
-func reshardCustomer3to1Merge(t *testing.T) { //to unsharded
+func reshardCustomer3to1Merge(t *testing.T) { // to unsharded
t.Run("reshardCustomer3to1Merge", func(t *testing.T) {
ksName := "customer"
counts := map[string]int{"zone1-1500": 21}
@@ -837,12 +995,13 @@ func reshard(t *testing.T, ksName string, tableName string, workflow string, sou
arrTargetShardNames := strings.Split(targetShards, ",")
for _, shardName := range arrTargetShardNames {
- if err := vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", ksName, shardName), 1); err != nil {
- t.Fatal(err)
- }
+ err := cluster.WaitForHealthyShard(vc.VtctldClient, ksName, shardName)
+ require.NoError(t, err)
}
- if err := vc.VtctlClient.ExecuteCommand("Reshard", "--", "--v1", "--cells="+sourceCellOrAlias, "--tablet_types=replica,primary", ksWorkflow, "--", sourceShards, targetShards); err != nil {
- t.Fatalf("Reshard command failed with %+v\n", err)
+ workflowType := "Reshard"
+ if err := vc.VtctlClient.ExecuteCommand(workflowType, "--", "--source_shards="+sourceShards, "--target_shards="+targetShards,
+ "--cells="+sourceCellOrAlias, "--tablet_types=replica,primary", "Create", ksWorkflow); err != nil {
+ t.Fatalf("Reshard Create command failed with %+v\n", err)
}
tablets := vc.getVttabletsInKeyspace(t, defaultCell, ksName, "primary")
targetShards = "," + targetShards + ","
@@ -856,12 +1015,15 @@ func reshard(t *testing.T, ksName string, tableName string, workflow string, sou
}
}
vdiff1(t, ksWorkflow, "")
- switchReads(t, allCellNames, ksWorkflow)
+ switchReads(t, workflowType, allCellNames, ksWorkflow, false)
if dryRunResultSwitchWrites != nil {
- switchWritesDryRun(t, ksWorkflow, dryRunResultSwitchWrites)
+ switchWritesDryRun(t, workflowType, ksWorkflow, dryRunResultSwitchWrites)
+ }
+ switchWrites(t, workflowType, ksWorkflow, false)
+ if err := vc.VtctlClient.ExecuteCommand(workflowType, "--", "--source_shards="+sourceShards, "--target_shards="+targetShards,
+ "--cells="+sourceCellOrAlias, "--tablet_types=replica,primary", "Complete", ksWorkflow); err != nil {
+ t.Fatalf("Reshard Complete command failed with %+v\n", err)
}
- switchWrites(t, ksWorkflow, false)
- dropSources(t, ksWorkflow)
for tabletName, count := range counts {
if tablets[tabletName] == nil {
continue
@@ -880,17 +1042,18 @@ func shardOrders(t *testing.T) {
tables := "orders"
ksWorkflow := fmt.Sprintf("%s.%s", targetKs, workflow)
applyVSchema(t, ordersVSchema, targetKs)
- moveTables(t, cell, workflow, sourceKs, targetKs, tables)
+ moveTablesAction(t, "Create", cell, workflow, sourceKs, targetKs, tables)
custKs := vc.Cells[defaultCell.Name].Keyspaces["customer"]
customerTab1 := custKs.Shards["-80"].Tablets["zone1-200"].Vttablet
customerTab2 := custKs.Shards["80-"].Tablets["zone1-300"].Vttablet
- catchup(t, customerTab1, workflow, "MoveTables")
- catchup(t, customerTab2, workflow, "MoveTables")
+ workflowType := "MoveTables"
+ catchup(t, customerTab1, workflow, workflowType)
+ catchup(t, customerTab2, workflow, workflowType)
vdiff1(t, ksWorkflow, "")
- switchReads(t, allCellNames, ksWorkflow)
- switchWrites(t, ksWorkflow, false)
- dropSources(t, ksWorkflow)
+ switchReads(t, workflowType, allCellNames, ksWorkflow, false)
+ switchWrites(t, workflowType, ksWorkflow, false)
+ moveTablesAction(t, "Complete", cell, workflow, sourceKs, targetKs, tables)
waitForRowCountInTablet(t, customerTab1, "customer", "orders", 1)
waitForRowCountInTablet(t, customerTab2, "customer", "orders", 2)
waitForRowCount(t, vtgateConn, "customer", "orders", 3)
@@ -900,12 +1063,12 @@ func shardOrders(t *testing.T) {
func checkThatVDiffFails(t *testing.T, keyspace, workflow string) {
ksWorkflow := fmt.Sprintf("%s.%s", keyspace, workflow)
t.Run("check that vdiff1 won't run", func(t2 *testing.T) {
- output, err := vc.VtctlClient.ExecuteCommandWithOutput("VDiff", ksWorkflow)
+ output, err := vc.VtctlClient.ExecuteCommandWithOutput("VDiff", "--", "--v1", ksWorkflow)
require.Error(t, err)
require.Contains(t, output, "invalid VDiff run")
})
t.Run("check that vdiff2 won't run", func(t2 *testing.T) {
- output, err := vc.VtctlClient.ExecuteCommandWithOutput("VDiff", "--", "--v2", ksWorkflow)
+ output, err := vc.VtctlClient.ExecuteCommandWithOutput("VDiff", "--", ksWorkflow)
require.Error(t, err)
require.Contains(t, output, "invalid VDiff run")
@@ -923,22 +1086,21 @@ func shardMerchant(t *testing.T) {
if _, err := vc.AddKeyspace(t, []*Cell{defaultCell}, merchantKeyspace, "-80,80-", merchantVSchema, "", defaultReplicas, defaultRdonly, 400, targetKsOpts); err != nil {
t.Fatal(err)
}
- if err := vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", merchantKeyspace, "-80"), 1); err != nil {
- t.Fatal(err)
- }
- if err := vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", merchantKeyspace, "80-"), 1); err != nil {
- t.Fatal(err)
- }
- moveTables(t, cell, workflow, sourceKs, targetKs, tables)
+ err := cluster.WaitForHealthyShard(vc.VtctldClient, merchantKeyspace, "-80")
+ require.NoError(t, err)
+ err = cluster.WaitForHealthyShard(vc.VtctldClient, merchantKeyspace, "80-")
+ require.NoError(t, err)
+ moveTablesAction(t, "Create", cell, workflow, sourceKs, targetKs, tables)
merchantKs := vc.Cells[defaultCell.Name].Keyspaces[merchantKeyspace]
merchantTab1 := merchantKs.Shards["-80"].Tablets["zone1-400"].Vttablet
merchantTab2 := merchantKs.Shards["80-"].Tablets["zone1-500"].Vttablet
- catchup(t, merchantTab1, workflow, "MoveTables")
- catchup(t, merchantTab2, workflow, "MoveTables")
+ workflowType := "MoveTables"
+ catchup(t, merchantTab1, workflow, workflowType)
+ catchup(t, merchantTab2, workflow, workflowType)
vdiff1(t, fmt.Sprintf("%s.%s", merchantKeyspace, workflow), "")
- switchReads(t, allCellNames, ksWorkflow)
- switchWrites(t, ksWorkflow, false)
+ switchReads(t, workflowType, allCellNames, ksWorkflow, false)
+ switchWrites(t, workflowType, ksWorkflow, false)
printRoutingRules(t, vc, "After merchant movetables")
// confirm that the backticking of keyspaces in the routing rules works
@@ -947,7 +1109,7 @@ func shardMerchant(t *testing.T) {
if err != nil {
require.FailNow(t, output)
}
- dropSources(t, ksWorkflow)
+ moveTablesAction(t, "Complete", cell, workflow, sourceKs, targetKs, tables)
waitForRowCountInTablet(t, merchantTab1, merchantKeyspace, "merchant", 1)
waitForRowCountInTablet(t, merchantTab2, merchantKeyspace, "merchant", 1)
@@ -979,7 +1141,7 @@ func materializeProduct(t *testing.T) {
t.Run("throttle-app-product", func(t *testing.T) {
// Now, throttle the streamer on source tablets, insert some rows
for _, tab := range productTablets {
- _, body, err := throttleApp(tab, sourceThrottlerAppName)
+ body, err := throttleApp(tab, sourceThrottlerAppName)
assert.NoError(t, err)
assert.Contains(t, body, sourceThrottlerAppName)
@@ -997,7 +1159,7 @@ func materializeProduct(t *testing.T) {
t.Run("unthrottle-app-product", func(t *testing.T) {
// unthrottle on source tablets, and expect the rows to show up
for _, tab := range productTablets {
- _, body, err := unthrottleApp(tab, sourceThrottlerAppName)
+ body, err := unthrottleApp(tab, sourceThrottlerAppName)
assert.NoError(t, err)
assert.Contains(t, body, sourceThrottlerAppName)
// give time for unthrottling to take effect and for target to fetch data
@@ -1012,7 +1174,7 @@ func materializeProduct(t *testing.T) {
// Now, throttle vreplication (vcopier/vapplier) on target tablets, and
// insert some more rows.
for _, tab := range customerTablets {
- _, body, err := throttleApp(tab, targetThrottlerAppName)
+ body, err := throttleApp(tab, targetThrottlerAppName)
assert.NoError(t, err)
assert.Contains(t, body, targetThrottlerAppName)
// Wait for throttling to take effect (caching will expire by this time):
@@ -1030,7 +1192,7 @@ func materializeProduct(t *testing.T) {
t.Run("unthrottle-app-customer", func(t *testing.T) {
// unthrottle on target tablets, and expect the rows to show up
for _, tab := range customerTablets {
- _, body, err := unthrottleApp(tab, targetThrottlerAppName)
+ body, err := unthrottleApp(tab, targetThrottlerAppName)
assert.NoError(t, err)
assert.Contains(t, body, targetThrottlerAppName)
}
@@ -1173,41 +1335,75 @@ func catchup(t *testing.T, vttablet *cluster.VttabletProcess, workflow, info str
vttablet.WaitForVReplicationToCatchup(t, workflow, fmt.Sprintf("vt_%s", vttablet.Keyspace), maxWait)
}
-func moveTables(t *testing.T, cell, workflow, sourceKs, targetKs, tables string) {
- if err := vc.VtctlClient.ExecuteCommand("MoveTables", "--", "--v1", "--cells="+cell, "--workflow="+workflow,
- "--tablet_types="+"primary,replica,rdonly", sourceKs, targetKs, tables); err != nil {
- t.Fatalf("MoveTables command failed with %+v\n", err)
+func moveTablesAction(t *testing.T, action, cell, workflow, sourceKs, targetKs, tables string, extraFlags ...string) {
+ var err error
+ if len(extraFlags) > 0 {
+ err = vc.VtctlClient.ExecuteCommand("MoveTables", "--", "--source="+sourceKs, "--tables="+tables,
+ "--cells="+cell, "--tablet_types=primary,replica,rdonly", strings.Join(extraFlags, " "),
+ action, fmt.Sprintf("%s.%s", targetKs, workflow))
+ } else {
+ err = vc.VtctlClient.ExecuteCommand("MoveTables", "--", "--source="+sourceKs, "--tables="+tables, "--cells="+cell,
+ "--tablet_types=primary,replica,rdonly", action, fmt.Sprintf("%s.%s", targetKs, workflow))
+ }
+ if err != nil {
+ t.Fatalf("MoveTables %s command failed with %+v\n", action, err)
}
}
-func moveTablesWithTabletTypes(t *testing.T, cell, workflow, sourceKs, targetKs, tables string, tabletTypes string) {
- if err := vc.VtctlClient.ExecuteCommand("MoveTables", "--", "--v1", "--cells="+cell, "--workflow="+workflow,
- "--tablet_types="+tabletTypes, sourceKs, targetKs, tables); err != nil {
- t.Fatalf("MoveTables command failed with %+v\n", err)
+func moveTablesActionWithTabletTypes(t *testing.T, action, cell, workflow, sourceKs, targetKs, tables string, tabletTypes string, timeout time.Duration, ignoreErrors bool) {
+ if err := vc.VtctlClient.ExecuteCommand("MoveTables", "--", "--source="+sourceKs, "--tables="+tables, "--cells="+cell,
+ "--tablet_types="+tabletTypes, "--timeout="+timeout.String(), action, fmt.Sprintf("%s.%s", targetKs, workflow)); err != nil {
+ if !ignoreErrors {
+ t.Fatalf("MoveTables %s command failed with %+v\n", action, err)
+ }
}
}
+
func applyVSchema(t *testing.T, vschema, keyspace string) {
err := vc.VtctlClient.ExecuteCommand("ApplyVSchema", "--", "--vschema", vschema, keyspace)
require.NoError(t, err)
}
-func switchReadsDryRun(t *testing.T, cells, ksWorkflow string, dryRunResults []string) {
- output, err := vc.VtctlClient.ExecuteCommandWithOutput("SwitchReads", "--", "--cells="+cells, "--tablet_types=replica", "--dry_run", ksWorkflow)
- require.NoError(t, err, fmt.Sprintf("SwitchReads DryRun Error: %s: %s", err, output))
+func switchReadsDryRun(t *testing.T, workflowType, cells, ksWorkflow string, dryRunResults []string) {
+ if workflowType != binlogdatapb.VReplicationWorkflowType_name[int32(binlogdatapb.VReplicationWorkflowType_MoveTables)] &&
+ workflowType != binlogdatapb.VReplicationWorkflowType_name[int32(binlogdatapb.VReplicationWorkflowType_Reshard)] {
+ require.FailNowf(t, "Invalid workflow type for SwitchTraffic, must be MoveTables or Reshard",
+ "workflow type specified: %s", workflowType)
+ }
+ output, err := vc.VtctlClient.ExecuteCommandWithOutput(workflowType, "--", "--cells="+cells, "--tablet_types=rdonly,replica",
+ "--dry_run", "SwitchTraffic", ksWorkflow)
+ require.NoError(t, err, fmt.Sprintf("Switching Reads DryRun Error: %s: %s", err, output))
validateDryRunResults(t, output, dryRunResults)
}
-func switchReads(t *testing.T, cells, ksWorkflow string) {
+func switchReads(t *testing.T, workflowType, cells, ksWorkflow string, reverse bool) {
+ if workflowType != binlogdatapb.VReplicationWorkflowType_name[int32(binlogdatapb.VReplicationWorkflowType_MoveTables)] &&
+ workflowType != binlogdatapb.VReplicationWorkflowType_name[int32(binlogdatapb.VReplicationWorkflowType_Reshard)] {
+ require.FailNowf(t, "Invalid workflow type for SwitchTraffic, must be MoveTables or Reshard",
+ "workflow type specified: %s", workflowType)
+ }
var output string
var err error
- output, err = vc.VtctlClient.ExecuteCommandWithOutput("SwitchReads", "--", "--cells="+cells, "--tablet_types=rdonly", ksWorkflow)
- require.NoError(t, err, fmt.Sprintf("SwitchReads Error: %s: %s", err, output))
- output, err = vc.VtctlClient.ExecuteCommandWithOutput("SwitchReads", "--", "--cells="+cells, "--tablet_types=replica", ksWorkflow)
- require.NoError(t, err, fmt.Sprintf("SwitchReads Error: %s: %s", err, output))
+ command := "SwitchTraffic"
+ if reverse {
+ command = "ReverseTraffic"
+ }
+ output, err = vc.VtctlClient.ExecuteCommandWithOutput(workflowType, "--", "--cells="+cells, "--tablet_types=rdonly",
+ command, ksWorkflow)
+ require.NoError(t, err, fmt.Sprintf("%s Error: %s: %s", command, err, output))
+ output, err = vc.VtctlClient.ExecuteCommandWithOutput(workflowType, "--", "--cells="+cells, "--tablet_types=replica",
+ command, ksWorkflow)
+ require.NoError(t, err, fmt.Sprintf("%s Error: %s: %s", command, err, output))
}
-func switchWritesDryRun(t *testing.T, ksWorkflow string, dryRunResults []string) {
- output, err := vc.VtctlClient.ExecuteCommandWithOutput("SwitchWrites", "--", "--dry_run", ksWorkflow)
- require.NoError(t, err, fmt.Sprintf("SwitchWrites DryRun Error: %s: %s", err, output))
+func switchWritesDryRun(t *testing.T, workflowType, ksWorkflow string, dryRunResults []string) {
+ if workflowType != binlogdatapb.VReplicationWorkflowType_name[int32(binlogdatapb.VReplicationWorkflowType_MoveTables)] &&
+ workflowType != binlogdatapb.VReplicationWorkflowType_name[int32(binlogdatapb.VReplicationWorkflowType_Reshard)] {
+ require.FailNowf(t, "Invalid workflow type for SwitchTraffic, must be MoveTables or Reshard",
+ "workflow type specified: %s", workflowType)
+ }
+ output, err := vc.VtctlClient.ExecuteCommandWithOutput(workflowType, "--", "--tablet_types=primary", "--dry_run",
+ "SwitchTraffic", ksWorkflow)
+ require.NoError(t, err, fmt.Sprintf("Switch writes DryRun Error: %s: %s", err, output))
validateDryRunResults(t, output, dryRunResults)
}
@@ -1215,7 +1411,7 @@ func printSwitchWritesExtraDebug(t *testing.T, ksWorkflow, msg string) {
// Temporary code: print lots of info for debugging occasional flaky failures in customer reshard in CI for multicell test
debug := true
if debug {
- log.Infof("------------------- START Extra debug info %s SwitchWrites %s", msg, ksWorkflow)
+ log.Infof("------------------- START Extra debug info %s Switch writes %s", msg, ksWorkflow)
ksShards := []string{"product/0", "customer/-80", "customer/80-"}
printShardPositions(vc, ksShards)
custKs := vc.Cells[defaultCell.Name].Keyspaces["customer"]
@@ -1241,32 +1437,25 @@ func printSwitchWritesExtraDebug(t *testing.T, ksWorkflow, msg string) {
}
}
-func switchWrites(t *testing.T, ksWorkflow string, reverse bool) {
+func switchWrites(t *testing.T, workflowType, ksWorkflow string, reverse bool) {
+ if workflowType != binlogdatapb.VReplicationWorkflowType_name[int32(binlogdatapb.VReplicationWorkflowType_MoveTables)] &&
+ workflowType != binlogdatapb.VReplicationWorkflowType_name[int32(binlogdatapb.VReplicationWorkflowType_Reshard)] {
+ require.FailNowf(t, "Invalid workflow type for SwitchTraffic, must be MoveTables or Reshard",
+ "workflow type specified: %s", workflowType)
+ }
+ command := "SwitchTraffic"
+ if reverse {
+ command = "ReverseTraffic"
+ }
const SwitchWritesTimeout = "91s" // max: 3 tablet picker 30s waits + 1
- output, err := vc.VtctlClient.ExecuteCommandWithOutput("SwitchWrites", "--",
- "--filtered_replication_wait_time="+SwitchWritesTimeout, fmt.Sprintf("--reverse=%t", reverse), ksWorkflow)
+ output, err := vc.VtctlClient.ExecuteCommandWithOutput(workflowType, "--", "--tablet_types=primary",
+ "--timeout="+SwitchWritesTimeout, command, ksWorkflow)
if output != "" {
- fmt.Printf("Output of SwitchWrites for %s:\n++++++\n%s\n--------\n", ksWorkflow, output)
+ fmt.Printf("Output of switching writes for %s:\n++++++\n%s\n--------\n", ksWorkflow, output)
}
- //printSwitchWritesExtraDebug is useful when debugging failures in SwitchWrites due to corner cases/races
+ // printSwitchWritesExtraDebug is useful when debugging failures in Switch writes due to corner cases/races
_ = printSwitchWritesExtraDebug
- require.NoError(t, err, fmt.Sprintf("SwitchWrites Error: %s: %s", err, output))
-}
-
-func dropSourcesDryRun(t *testing.T, ksWorkflow string, renameTables bool, dryRunResults []string) {
- args := []string{"DropSources", "--", "--dry_run"}
- if renameTables {
- args = append(args, "--rename_tables")
- }
- args = append(args, ksWorkflow)
- output, err := vc.VtctlClient.ExecuteCommandWithOutput(args...)
- require.NoError(t, err, fmt.Sprintf("DropSources Error: %s: %s", err, output))
- validateDryRunResults(t, output, dryRunResults)
-}
-
-func dropSources(t *testing.T, ksWorkflow string) {
- output, err := vc.VtctlClient.ExecuteCommandWithOutput("DropSources", ksWorkflow)
- require.NoError(t, err, fmt.Sprintf("DropSources Error: %s: %s", err, output))
+ require.NoError(t, err, fmt.Sprintf("Switch writes Error: %s: %s", err, output))
}
// generateInnoDBRowHistory generates at least maxSourceTrxHistory rollback segment entries.
diff --git a/go/test/endtoend/vreplication/vreplication_test_env.go b/go/test/endtoend/vreplication/vreplication_test_env.go
index 8329ae5e490..24f4f30e9e4 100644
--- a/go/test/endtoend/vreplication/vreplication_test_env.go
+++ b/go/test/endtoend/vreplication/vreplication_test_env.go
@@ -21,13 +21,13 @@ var dryRunResultsSwitchWritesCustomerShard = []string{
"Lock keyspace customer",
"Stop writes on keyspace product, tables [Lead,Lead-1,customer,db_order_test]:",
"/ Keyspace product, Shard 0 at Position",
- "Wait for VReplication on stopped streams to catchup for upto 30s",
+ "Wait for VReplication on stopped streams to catchup for up to 30s",
"Create reverse replication workflow p2c_reverse",
"Create journal entries on source databases",
"Enable writes on keyspace customer tables [Lead,Lead-1,customer,db_order_test]",
"Switch routing from keyspace product to keyspace customer",
"Routing rules for tables [Lead,Lead-1,customer,db_order_test] will be updated",
- "SwitchWrites completed, freeze and delete vreplication streams on:",
+ "Switch writes completed, freeze and delete vreplication streams on:",
" tablet 200 ",
" tablet 300 ",
"Start reverse replication streams on:",
@@ -41,7 +41,7 @@ var dryRunResultsSwitchWritesCustomerShard = []string{
var dryRunResultsReadCustomerShard = []string{
"Lock keyspace product",
- "Switch reads for tables [Lead,Lead-1,customer,db_order_test] to keyspace customer for tablet types [REPLICA,RDONLY]",
+ "Switch reads for tables [Lead,Lead-1,customer,db_order_test] to keyspace customer for tablet types [RDONLY,REPLICA]",
"Routing rules for tables [Lead,Lead-1,customer,db_order_test] will be updated",
"Unlock keyspace product",
}
@@ -60,7 +60,7 @@ var dryRunResultsSwitchWritesM2m3 = []string{
"Stop writes on keyspace merchant-type, tables [/.*]:",
"/ Keyspace merchant-type, Shard -80 at Position",
"/ Keyspace merchant-type, Shard 80- at Position",
- "Wait for VReplication on stopped streams to catchup for upto 30s",
+ "Wait for VReplication on stopped streams to catchup for up to 30s",
"Create reverse replication workflow m2m3_reverse",
"Create journal entries on source databases",
"Enable writes on keyspace merchant-type tables [/.*]",
@@ -72,7 +72,7 @@ var dryRunResultsSwitchWritesM2m3 = []string{
" Shard -40, Tablet 1600 ",
" Shard 40-c0, Tablet 1700 ",
" Shard c0-, Tablet 1800 ",
- "SwitchWrites completed, freeze and delete vreplication streams on:",
+ "Switch writes completed, freeze and delete vreplication streams on:",
" tablet 1600 ",
" tablet 1700 ",
" tablet 1800 ",
@@ -85,43 +85,3 @@ var dryRunResultsSwitchWritesM2m3 = []string{
" Keyspace merchant-type, Shard c0-, Tablet 1800, Workflow m2m3, DbName vt_merchant-type",
"Unlock keyspace merchant-type",
}
-
-var dryRunResultsDropSourcesDropCustomerShard = []string{
- "Lock keyspace product",
- "Lock keyspace customer",
- "Dropping these tables from the database and removing them from the vschema for keyspace product:",
- " Keyspace product Shard 0 DbName vt_product Tablet 100 Table Lead",
- " Keyspace product Shard 0 DbName vt_product Tablet 100 Table Lead-1",
- " Keyspace product Shard 0 DbName vt_product Tablet 100 Table customer",
- " Keyspace product Shard 0 DbName vt_product Tablet 100 Table db_order_test",
- "Denied tables [Lead,Lead-1,customer,db_order_test] will be removed from:",
- " Keyspace product Shard 0 Tablet 100",
- "Delete reverse vreplication streams on source:",
- " Keyspace product Shard 0 Workflow p2c_reverse DbName vt_product Tablet 100",
- "Delete vreplication streams on target:",
- " Keyspace customer Shard -80 Workflow p2c DbName vt_customer Tablet 200",
- " Keyspace customer Shard 80- Workflow p2c DbName vt_customer Tablet 300",
- "Routing rules for participating tables will be deleted",
- "Unlock keyspace customer",
- "Unlock keyspace product",
-}
-
-var dryRunResultsDropSourcesRenameCustomerShard = []string{
- "Lock keyspace product",
- "Lock keyspace customer",
- "Renaming these tables from the database and removing them from the vschema for keyspace product:",
- " Keyspace product Shard 0 DbName vt_product Tablet 100 Table Lead",
- " Keyspace product Shard 0 DbName vt_product Tablet 100 Table Lead-1",
- " Keyspace product Shard 0 DbName vt_product Tablet 100 Table customer",
- " Keyspace product Shard 0 DbName vt_product Tablet 100 Table db_order_test",
- "Denied tables [Lead,Lead-1,customer,db_order_test] will be removed from:",
- " Keyspace product Shard 0 Tablet 100",
- "Delete reverse vreplication streams on source:",
- " Keyspace product Shard 0 Workflow p2c_reverse DbName vt_product Tablet 100",
- "Delete vreplication streams on target:",
- " Keyspace customer Shard -80 Workflow p2c DbName vt_customer Tablet 200",
- " Keyspace customer Shard 80- Workflow p2c DbName vt_customer Tablet 300",
- "Routing rules for participating tables will be deleted",
- "Unlock keyspace customer",
- "Unlock keyspace product",
-}
diff --git a/go/test/endtoend/vreplication/vschema_load_test.go b/go/test/endtoend/vreplication/vschema_load_test.go
index 731679e1eba..a5cac4c68f8 100644
--- a/go/test/endtoend/vreplication/vschema_load_test.go
+++ b/go/test/endtoend/vreplication/vschema_load_test.go
@@ -26,6 +26,7 @@ import (
"github.com/stretchr/testify/require"
+ "vitess.io/vitess/go/test/endtoend/cluster"
"vitess.io/vitess/go/vt/log"
binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata"
topodatapb "vitess.io/vitess/go/vt/proto/topodata"
@@ -52,8 +53,9 @@ func TestVSchemaChangesUnderLoad(t *testing.T) {
vc.AddKeyspace(t, []*Cell{defaultCell}, "product", "0", initialProductVSchema, initialProductSchema, 1, 0, 100, sourceKsOpts)
vtgate = defaultCell.Vtgates[0]
require.NotNil(t, vtgate)
- vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", "product", "0"), 1)
- vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", "product", "0"), 1)
+ err := cluster.WaitForHealthyShard(vc.VtctldClient, "product", "0")
+ require.NoError(t, err)
+ vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", "product", "0"), 1, 30*time.Second)
vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort)
defer vtgateConn.Close()
diff --git a/go/test/endtoend/vreplication/vstream_test.go b/go/test/endtoend/vreplication/vstream_test.go
index c596ed084f7..3f3044b0294 100644
--- a/go/test/endtoend/vreplication/vstream_test.go
+++ b/go/test/endtoend/vreplication/vstream_test.go
@@ -27,6 +27,7 @@ import (
"github.com/stretchr/testify/require"
+ "vitess.io/vitess/go/test/endtoend/cluster"
"vitess.io/vitess/go/vt/log"
binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata"
topodatapb "vitess.io/vitess/go/vt/proto/topodata"
@@ -57,7 +58,7 @@ func testVStreamWithFailover(t *testing.T, failover bool) {
vc.AddKeyspace(t, []*Cell{defaultCell}, "product", "0", initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, nil)
vtgate = defaultCell.Vtgates[0]
require.NotNil(t, vtgate)
- vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", "product", "0"), 3)
+ vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", "product", "0"), 3, 30*time.Second)
vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort)
defer vtgateConn.Close()
@@ -188,7 +189,7 @@ const vschemaUnsharded = `
}
`
const schemaSharded = `
-create table customer(cid int, name varbinary(128), primary key(cid)) CHARSET=utf8mb4;
+create table customer(cid int, name varbinary(128), primary key(cid)) TABLESPACE innodb_system CHARSET=utf8mb4;
`
const vschemaSharded = `
{
@@ -245,7 +246,8 @@ func testVStreamStopOnReshardFlag(t *testing.T, stopOnReshard bool, baseTabletID
vc.AddKeyspace(t, []*Cell{defaultCell}, "unsharded", "0", vschemaUnsharded, schemaUnsharded, defaultReplicas, defaultRdonly, baseTabletID+100, nil)
vtgate = defaultCell.Vtgates[0]
require.NotNil(t, vtgate)
- vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", "unsharded", "0"), 1)
+ err := cluster.WaitForHealthyShard(vc.VtctldClient, "unsharded", "0")
+ require.NoError(t, err)
vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort)
defer vtgateConn.Close()
diff --git a/go/test/endtoend/vtcombo/vttest_sample_test.go b/go/test/endtoend/vtcombo/vttest_sample_test.go
index df9e11a98f2..91db0f8a2c0 100644
--- a/go/test/endtoend/vtcombo/vttest_sample_test.go
+++ b/go/test/endtoend/vtcombo/vttest_sample_test.go
@@ -30,7 +30,7 @@ import (
"strings"
"testing"
- mysql "github.com/go-sql-driver/mysql"
+ "github.com/go-sql-driver/mysql"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -123,12 +123,13 @@ func TestMain(m *testing.M) {
func TestStandalone(t *testing.T) {
// validate debug vars
resp, err := http.Get(fmt.Sprintf("http://%s/debug/vars", vtctldAddr))
- require.Nil(t, err)
+ require.NoError(t, err)
+ defer resp.Body.Close()
require.Equal(t, 200, resp.StatusCode)
resultMap := make(map[string]any)
respByte, _ := io.ReadAll(resp.Body)
err = json.Unmarshal(respByte, &resultMap)
- require.Nil(t, err)
+ require.NoError(t, err)
cmd := resultMap["cmdline"]
require.NotNil(t, cmd, "cmdline is not available in debug vars")
tmp, _ := cmd.([]any)
@@ -136,7 +137,7 @@ func TestStandalone(t *testing.T) {
ctx := context.Background()
conn, err := vtgateconn.Dial(ctx, grpcAddress)
- require.Nil(t, err)
+ require.NoError(t, err)
defer conn.Close()
cfg := mysql.NewConfig()
@@ -155,9 +156,9 @@ func TestStandalone(t *testing.T) {
assertTabletsPresent(t)
err = localCluster.TearDown()
- require.Nil(t, err)
+ require.NoError(t, err)
err = localCluster.Setup()
- require.Nil(t, err)
+ require.NoError(t, err)
assertInsertedRowsExist(ctx, t, conn, idStart, rowCount)
assertTabletsPresent(t)
@@ -170,7 +171,7 @@ func assertInsertedRowsExist(ctx context.Context, t *testing.T, conn *vtgateconn
"id_start": {Type: querypb.Type_UINT64, Value: []byte(strconv.FormatInt(int64(idStart), 10))},
}
res, err := cur.Execute(ctx, "select * from test_table where id >= :id_start", bindVariables)
- require.Nil(t, err)
+ require.NoError(t, err)
assert.Equal(t, rowCount, len(res.Rows))
@@ -179,7 +180,7 @@ func assertInsertedRowsExist(ctx context.Context, t *testing.T, conn *vtgateconn
"id_start": {Type: querypb.Type_UINT64, Value: []byte(strconv.FormatInt(int64(idStart), 10))},
}
res, err = cur.Execute(ctx, "select * from test_table where id = :id_start", bindVariables)
- require.Nil(t, err)
+ require.NoError(t, err)
require.Equal(t, 1, len(res.Rows))
assert.Equal(t, "VARCHAR(\"test1000\")", res.Rows[0][1].String())
}
@@ -200,7 +201,7 @@ func assertRouting(ctx context.Context, t *testing.T, db *sql.DB) {
func assertCanInsertRow(ctx context.Context, t *testing.T, conn *vtgateconn.VTGateConn) {
cur := conn.Session(ks1+":80-@primary", nil)
_, err := cur.Execute(ctx, "begin", nil)
- require.Nil(t, err)
+ require.NoError(t, err)
i := 0x810000000000000
bindVariables := map[string]*querypb.BindVariable{
@@ -210,10 +211,10 @@ func assertCanInsertRow(ctx context.Context, t *testing.T, conn *vtgateconn.VTGa
}
query := "insert into test_table (id, msg, keyspace_id) values (:id, :msg, :keyspace_id)"
_, err = cur.Execute(ctx, query, bindVariables)
- require.Nil(t, err)
+ require.NoError(t, err)
_, err = cur.Execute(ctx, "commit", nil)
- require.Nil(t, err)
+ require.NoError(t, err)
}
func insertManyRows(ctx context.Context, t *testing.T, conn *vtgateconn.VTGateConn, idStart, rowCount int) {
@@ -221,7 +222,7 @@ func insertManyRows(ctx context.Context, t *testing.T, conn *vtgateconn.VTGateCo
query := "insert into test_table (id, msg, keyspace_id) values (:id, :msg, :keyspace_id)"
_, err := cur.Execute(ctx, "begin", nil)
- require.Nil(t, err)
+ require.NoError(t, err)
for i := idStart; i < idStart+rowCount; i++ {
bindVariables := map[string]*querypb.BindVariable{
@@ -230,11 +231,11 @@ func insertManyRows(ctx context.Context, t *testing.T, conn *vtgateconn.VTGateCo
"keyspace_id": {Type: querypb.Type_UINT64, Value: []byte(strconv.FormatInt(int64(i), 10))},
}
_, err = cur.Execute(ctx, query, bindVariables)
- require.Nil(t, err)
+ require.NoError(t, err)
}
_, err = cur.Execute(ctx, "commit", nil)
- require.Nil(t, err)
+ require.NoError(t, err)
}
func assertTabletsPresent(t *testing.T) {
@@ -243,7 +244,7 @@ func assertTabletsPresent(t *testing.T) {
log.Infof("Running vtctlclient with command: %v", tmpCmd.Args)
output, err := tmpCmd.CombinedOutput()
- require.Nil(t, err)
+ require.NoError(t, err)
numPrimary, numReplica, numRdonly, numDash80, num80Dash, numRouted := 0, 0, 0, 0, 0, 0
lines := strings.Split(string(output), "\n")
@@ -302,17 +303,17 @@ func assertTransactionalityAndRollbackObeyed(ctx context.Context, t *testing.T,
}
query := "insert into test_table (id, msg, keyspace_id) values (:id, :msg, :keyspace_id)"
_, err := cur.Execute(ctx, query, bindVariables)
- require.Nil(t, err)
+ require.NoError(t, err)
bindVariables = map[string]*querypb.BindVariable{
"msg": {Type: querypb.Type_VARCHAR, Value: []byte(msg)},
}
res, err := cur.Execute(ctx, "select * from test_table where msg = :msg", bindVariables)
- require.Nil(t, err)
+ require.NoError(t, err)
require.Equal(t, 1, len(res.Rows))
_, err = cur.Execute(ctx, "begin", nil)
- require.Nil(t, err)
+ require.NoError(t, err)
msg2 := msg + "2"
bindVariables = map[string]*querypb.BindVariable{
@@ -321,15 +322,15 @@ func assertTransactionalityAndRollbackObeyed(ctx context.Context, t *testing.T,
}
query = "update test_table set msg = :msg where id = :id"
_, err = cur.Execute(ctx, query, bindVariables)
- require.Nil(t, err)
+ require.NoError(t, err)
_, err = cur.Execute(ctx, "rollback", nil)
- require.Nil(t, err)
+ require.NoError(t, err)
bindVariables = map[string]*querypb.BindVariable{
"msg": {Type: querypb.Type_VARCHAR, Value: []byte(msg2)},
}
res, err = cur.Execute(ctx, "select * from test_table where msg = :msg", bindVariables)
- require.Nil(t, err)
+ require.NoError(t, err)
require.Equal(t, 0, len(res.Rows))
}
diff --git a/go/test/endtoend/vtctldclient/cli_test.go b/go/test/endtoend/vtctldclient/cli_test.go
new file mode 100644
index 00000000000..82dbc6658a2
--- /dev/null
+++ b/go/test/endtoend/vtctldclient/cli_test.go
@@ -0,0 +1,79 @@
+/*
+Copyright 2022 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package vtctldclient
+
+import (
+ "context"
+ "os"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "google.golang.org/protobuf/proto"
+
+ "vitess.io/vitess/go/cmd/vtctldclient/command"
+ "vitess.io/vitess/go/protoutil"
+ "vitess.io/vitess/go/vt/vtctl/localvtctldclient"
+
+ vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata"
+ vtctlservicepb "vitess.io/vitess/go/vt/proto/vtctlservice"
+)
+
+type fakeServer struct {
+ vtctlservicepb.UnimplementedVtctldServer
+ t testing.TB
+
+ applySchemaRequests []*vtctldatapb.ApplySchemaRequest
+}
+
+func (s *fakeServer) ApplySchema(ctx context.Context, req *vtctldatapb.ApplySchemaRequest) (*vtctldatapb.ApplySchemaResponse, error) {
+ s.applySchemaRequests = append(s.applySchemaRequests, req)
+ return &vtctldatapb.ApplySchemaResponse{}, nil
+}
+
+func TestApplySchema(t *testing.T) {
+ server := &fakeServer{t: t}
+
+ command.VtctldClientProtocol = "local"
+ localvtctldclient.SetServer(server)
+
+ defer func(argv []string) {
+ os.Args = argv
+ }(append([]string{}, os.Args...))
+
+ os.Args = []string{
+ "vtctldclient",
+ "--server='doesnotmatter'",
+ "ApplySchema",
+ "--sql",
+ `"CREATE TABLE foo(id int not null primary key, name varchar(255)); CREATE TABLE bar (id int not null primary key, foo_id int not null);`,
+ "test",
+ }
+
+ require.NoError(t, command.Root.Execute())
+ expected := &vtctldatapb.ApplySchemaRequest{
+ Keyspace: "test",
+ Sql: []string{
+ `"CREATE TABLE foo(id int not null primary key, name varchar(255)); CREATE TABLE bar (id int not null primary key, foo_id int not null);`,
+ },
+ DdlStrategy: "direct",
+ WaitReplicasTimeout: protoutil.DurationToProto(10 * time.Second),
+ }
+ actual := server.applySchemaRequests[0]
+ assert.True(t, proto.Equal(actual, expected), "ApplySchema received unexpected request (got %v want %v)", actual, expected)
+}
diff --git a/go/test/endtoend/vtctldweb/vtctld_web_main_test.go b/go/test/endtoend/vtctldweb/vtctld_web_main_test.go
deleted file mode 100644
index 40b0bbd7c7a..00000000000
--- a/go/test/endtoend/vtctldweb/vtctld_web_main_test.go
+++ /dev/null
@@ -1,512 +0,0 @@
-/*
-Copyright 2020 The Vitess Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package vtctldweb
-
-import (
- "flag"
- "fmt"
- "math/rand"
- "os"
- "os/exec"
- "strings"
- "syscall"
- "testing"
- "time"
-
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
- "github.com/tebeka/selenium"
- "github.com/tebeka/selenium/chrome"
-
- "vitess.io/vitess/go/test/endtoend/cluster"
- vttestpb "vitess.io/vitess/go/vt/proto/vttest"
- "vitess.io/vitess/go/vt/vttest"
-)
-
-// nolint
-var (
- localCluster *vttest.LocalCluster
- hostname = "localhost" //nolint
- wd selenium.WebDriver
- seleniumService *selenium.Service
- vtctldAddr string
- ks1 = "test_keyspace"
- ks2 = "test_keyspace2"
- sqlSchema = "CREATE TABLE test_table (\n" +
- " `id` BIGINT(20) UNSIGNED NOT NULL,\n" +
- " `msg` VARCHAR(64),\n" +
- " `keyspace_id` BIGINT(20) UNSIGNED NOT NULL,\n" +
- " PRIMARY KEY (id)\n" +
- ") ENGINE=InnoDB"
-)
-
-func TestMain(m *testing.M) {
- defer cluster.PanicHandler(nil)
- flag.Parse()
-
- exitcode, err := func() (int, error) {
-
- // runs Xvfb in background
- tearDownXvfb, err := RunXvfb()
- if err != nil {
- return 1, err
- }
- defer tearDownXvfb()
-
- // cluster setup using vtcombo
- topology := new(vttestpb.VTTestTopology)
- topology.Cells = []string{"test", "test2"}
- topology.Keyspaces = []*vttestpb.Keyspace{
- {
- Name: ks1,
- Shards: []*vttestpb.Shard{
- {Name: "-80"},
- {Name: "80-"},
- },
- RdonlyCount: 2,
- ReplicaCount: 2,
- },
- {
- Name: ks2,
- Shards: []*vttestpb.Shard{
- {Name: "0"},
- },
- RdonlyCount: 2,
- ReplicaCount: 1,
- },
- }
-
- // create driver here
- err = CreateWebDriver(getPort())
- if err != nil {
- return 1, err
- }
- defer TeardownWebDriver()
-
- var cfg vttest.Config
- cfg.Topology = topology
- cfg.SchemaDir = os.Getenv("VTROOT") + "/test/vttest_schema"
- cfg.DefaultSchemaDir = os.Getenv("VTROOT") + "/test/vttest_schema/default"
-
- localCluster = &vttest.LocalCluster{
- Config: cfg,
- }
-
- err = localCluster.Setup()
- defer localCluster.TearDown()
-
- vtctldAddr = fmt.Sprintf("http://localhost:%d", localCluster.Env.PortForProtocol("vtcombo", "port"))
- if err != nil {
- return 1, err
- }
-
- return m.Run(), nil
- }()
- if err != nil {
- fmt.Printf("%v\n", err)
- os.Exit(1)
- } else {
- os.Exit(exitcode)
- }
-}
-
-// RunXvfb runs Xvfb command in background and returns the teardown function.
-func RunXvfb() (func() error, error) {
-
- tmpProcess := exec.Command("Xvfb", ":15", "-ac")
-
- err := tmpProcess.Start()
- if err != nil {
- return nil, err
- }
-
- exit := make(chan error)
- go func() {
- exit <- tmpProcess.Wait()
- }()
-
- teardownFunc := func() error {
- tmpProcess.Process.Signal(syscall.SIGTERM)
- select {
- case <-exit:
- return nil
- case <-time.After(10 * time.Second):
- tmpProcess.Process.Kill()
- return <-exit
- }
- }
-
- os.Setenv("DISPLAY", ":15")
-
- return teardownFunc, nil
-}
-
-// CreateWebDriver Creates a webdriver object (local or remote for Travis).
-func CreateWebDriver(port int) error {
- // selenium.SetDebug(true)
-
- // Set common Options
- options := selenium.ChromeDriver(os.Getenv("VTROOT") + "/dist")
-
- if os.Getenv("CI") == "true" && os.Getenv("TRAVIS") == "true" {
-
- capabilities := selenium.Capabilities{}
- capabilities["tunnel-identifier"] = os.Getenv("TRAVIS_JOB_NUMBER")
- capabilities["build"] = os.Getenv("TRAVIS_BUILD_NUMBER")
- capabilities["platform"] = "Linux"
- capabilities["browserName"] = "chrome"
- capabilities["chromeOptions"] = options
-
- var err error
- wd, err = selenium.NewRemote(capabilities, fmt.Sprintf("%s:%s@localhost:4445/wd/hub", os.Getenv("SAUCE_USERNAME"), os.Getenv("SAUCE_ACCESS_KEY")))
- if err != nil {
- return err
- }
-
- name, _ := wd.CurrentWindowHandle() //nolint
- return wd.ResizeWindow(name, 1280, 1024)
- }
-
- // Only testing against Chrome for now
- cc := selenium.Capabilities{"browserName": "chrome"}
- cc.AddChrome(chrome.Capabilities{
- Args: []string{
- "--disable-gpu",
- "--no-sandbox",
- "--headless",
- },
- })
-
- os.Setenv("webdriver.chrome.driver", os.Getenv("VTROOT")+"/dist")
-
- var err error
- seleniumService, err = selenium.NewChromeDriverService(os.Getenv("VTROOT")+"/dist/chromedriver/chromedriver", port, options)
- if err != nil {
- return err
- }
-
- wd, err = selenium.NewRemote(cc, fmt.Sprintf("http://localhost:%d/wd/hub", port))
- if err != nil {
- return err
- }
- name, _ := wd.CurrentWindowHandle() //nolint
- return wd.ResizeWindow(name, 1280, 1024)
-}
-
-func TeardownWebDriver() {
- wd.Quit()
- if seleniumService != nil {
- seleniumService.Stop()
-
- }
-}
-
-func checkNewView(t *testing.T, keyspaces, cells, types, metrics []string, selectedKs, selectedCell, selectedType, selectedMetric string) {
- checkDropdowns(t, keyspaces, cells, types, metrics, selectedKs, selectedCell, selectedType, selectedMetric)
- checkHeatMaps(t, selectedKs)
-}
-
-func checkHeatMaps(t *testing.T, selectedKs string) {
- elem, err := wd.FindElement(selenium.ByTagName, "vt-status")
- require.Nil(t, err)
-
- elems, err := elem.FindElements(selenium.ByTagName, "vt-heatmap")
- require.Nil(t, err)
-
- if selectedKs == "all" {
- availableKs := getDropdownOptions(t, "keyspace")
- assert.Equal(t, len(elems), len(availableKs)-1)
- for _, elem := range elems {
- heading, err := elem.FindElement(selenium.ByID, "keyspaceName")
- require.Nil(t, err)
-
- headingTxt := text(t, heading)
-
- _, err = elem.FindElement(selenium.ByID, headingTxt)
- require.Nil(t, err)
-
- assert.Contains(t, availableKs, headingTxt)
- }
- return
- }
-
- assert.Equal(t, 1, len(elems))
- heading, err := elems[0].FindElement(selenium.ByID, "keyspaceName")
- require.Nil(t, err)
-
- headingTxt := text(t, heading)
-
- _, err = elem.FindElement(selenium.ByID, headingTxt)
- require.Nil(t, err)
-
- assert.Equal(t, selectedKs, headingTxt)
-}
-
-// changeDropdownOptions changes the selected value of dropdown.
-func changeDropdownOptions(t *testing.T, dropdownID, dropdownValue string) {
- statusContent, err := wd.FindElement(selenium.ByTagName, "vt-status")
- require.Nil(t, err)
-
- dropdown, err := statusContent.FindElement(selenium.ByID, dropdownID)
- require.Nil(t, err)
-
- click(t, dropdown)
- options, err := dropdown.FindElements(selenium.ByTagName, "li")
- require.Nil(t, err)
-
- triedOption := []string{}
- for _, op := range options {
- opTxt := text(t, op)
- if opTxt == dropdownValue {
- click(t, op)
- return
- }
-
- triedOption = append(triedOption, opTxt)
- }
- ss(t, "option_check")
- t.Log("dropdown options change failed", strings.Join(triedOption, ","), dropdownValue)
-}
-
-// checkDropdowns validates the dropdown values and selected value.
-func checkDropdowns(t *testing.T, keyspaces, cells, types, metrics []string, selectedKs, selectedCell, selectedType, selectedMetric string) {
-
- Options := getDropdownOptions(t, "keyspace")
- Selected := getDropdownSelection(t, "keyspace")
-
- assert.Equal(t, keyspaces, Options)
- assert.Equal(t, selectedKs, Selected)
-
- Options = getDropdownOptions(t, "cell")
- Selected = getDropdownSelection(t, "cell")
-
- assert.Equal(t, cells, Options)
- assert.Equal(t, selectedCell, Selected)
-
- Options = getDropdownOptions(t, "type")
- Selected = getDropdownSelection(t, "type")
-
- assert.Equal(t, types, Options)
- assert.Equal(t, selectedType, Selected)
-
- Options = getDropdownOptions(t, "metric")
- Selected = getDropdownSelection(t, "metric")
-
- assert.Equal(t, metrics, Options)
- assert.Equal(t, selectedMetric, Selected)
-
-}
-
-// get element functions
-// getDropdownSelection fetchs selected value for corresponding group.
-func getDropdownSelection(t *testing.T, group string) string {
- elem, err := wd.FindElement(selenium.ByTagName, "vt-status")
- require.Nil(t, err)
- elem, err = elem.FindElement(selenium.ByID, group)
- require.Nil(t, err)
- elem, err = elem.FindElement(selenium.ByTagName, "label")
- require.Nil(t, err)
-
- return text(t, elem)
-}
-
-// getDropdownOptions fetchs list of option available for corresponding group.
-func getDropdownOptions(t *testing.T, group string) []string {
- elem, err := wd.FindElement(selenium.ByTagName, "vt-status")
- require.Nil(t, err)
- elem, err = elem.FindElement(selenium.ByID, group)
- require.Nil(t, err)
- elems, err := elem.FindElements(selenium.ByTagName, "option")
- require.Nil(t, err)
-
- var out []string
- for _, elem = range elems {
- out = append(out, text(t, elem))
- }
-
- return out
-}
-
-// getDashboardKeyspaces fetches keyspaces from the dashboard.
-func getDashboardKeyspaces(t *testing.T) []string {
- wait(t, selenium.ByTagName, "vt-dashboard")
-
- dashboardContent, err := wd.FindElement(selenium.ByTagName, "vt-dashboard")
- require.Nil(t, err)
-
- ksCards, _ := dashboardContent.FindElements(selenium.ByClassName, "vt-keyspace-card") //nolint
- var out []string
- for _, ks := range ksCards {
- out = append(out, text(t, ks))
- }
- return out
-}
-
-// getDashboardShards fetches shards from the dashboard.
-func getDashboardShards(t *testing.T) []string {
- wait(t, selenium.ByTagName, "vt-dashboard")
-
- dashboardContent, err := wd.FindElement(selenium.ByTagName, "vt-dashboard") //nolint
- require.Nil(t, err)
-
- ksCards, _ := dashboardContent.FindElements(selenium.ByClassName, "vt-shard-stats") //nolint
- var out []string
- for _, ks := range ksCards {
- out = append(out, text(t, ks))
- }
- return out
-}
-
-func getKeyspaceShard(t *testing.T) []string {
- wait(t, selenium.ByTagName, "vt-keyspace-view")
-
- ksContent, err := wd.FindElement(selenium.ByTagName, "vt-keyspace-view")
- require.Nil(t, err)
-
- shards, err := ksContent.FindElements(selenium.ByClassName, "vt-serving-shard")
- require.Nil(t, err)
- var out []string
- for _, s := range shards {
- out = append(out, text(t, s))
- }
- return out
-}
-
-// getShardTablets gives list of tablet type and uid.
-func getShardTablets(t *testing.T) ([]string, []string) {
- wait(t, selenium.ByTagName, "vt-shard-view")
- shardContent, err := wd.FindElement(selenium.ByTagName, "vt-shard-view")
- require.Nil(t, err)
-
- tableRows, _ := shardContent.FindElements(selenium.ByTagName, "tr") //nolint
- tableRows = tableRows[1:]
-
- var tabletTypes, tabletUIDs []string
- for _, row := range tableRows {
- columns, err := row.FindElements(selenium.ByTagName, "td")
- require.Nil(t, err)
-
- typ, err := columns[1].FindElement(selenium.ByClassName, "ui-cell-data")
- require.Nil(t, err)
-
- typTxt := text(t, typ)
-
- tabletTypes = append(tabletTypes, typTxt)
-
- uid, err := columns[3].FindElement(selenium.ByClassName, "ui-cell-data")
- require.Nil(t, err)
-
- uidTxt := text(t, uid)
- tabletUIDs = append(tabletUIDs, uidTxt)
- }
-
- return tabletTypes, tabletUIDs
-}
-
-// navigation functions
-// navigateToDashBoard navigates chrome screen to dashboard of vitess.
-func navigateToDashBoard(t *testing.T) {
- err := wd.Get(vtctldAddr + "/app2")
- require.Nil(t, err)
-
- wait(t, selenium.ByID, "test_keyspace")
-}
-
-// navigateToKeyspaceView navigates chrome screen to first keyspace.
-func navigateToKeyspaceView(t *testing.T) {
- navigateToDashBoard(t)
- dashboardContent, err := wd.FindElement(selenium.ByTagName, "vt-dashboard")
- require.Nil(t, err)
- ksCard, err := dashboardContent.FindElements(selenium.ByClassName, "vt-card")
- require.Nil(t, err)
- require.Equal(t, 2, len(ksCard))
-
- shardStarts, err := ksCard[0].FindElement(selenium.ByTagName, "md-list")
- require.Nil(t, err)
-
- click(t, shardStarts)
-
- wait(t, selenium.ByClassName, "vt-card")
-}
-
-// navigateToShardView navigates chrome screen to the first shard of first keyspace.
-func navigateToShardView(t *testing.T) {
- navigateToKeyspaceView(t)
- ksContent, err := wd.FindElement(selenium.ByTagName, "vt-keyspace-view")
- require.Nil(t, err)
-
- shardCards, err := ksContent.FindElements(selenium.ByClassName, "vt-serving-shard")
- require.Nil(t, err)
- require.Equal(t, 2, len(shardCards))
-
- click(t, shardCards[0])
-
- wait(t, selenium.ByID, "1")
-}
-
-// other utility
-// wait waits for the given element to be discoverable.
-func wait(t *testing.T, by, val string) {
- err := wd.WaitWithTimeout(func(xwd selenium.WebDriver) (bool, error) {
- _, err := xwd.FindElement(by, val)
- return err == nil, nil
- }, selenium.DefaultWaitTimeout)
- require.Nil(t, err)
-}
-
-// assertDialogCommand validates the command in dialog.
-func assertDialogCommand(t *testing.T, dialog selenium.WebElement, cmds []string) {
- elms, err := dialog.FindElements(selenium.ByClassName, "vt-sheet")
- require.Nil(t, err)
-
- var tmpCmd []string
- for _, elm := range elms {
- tmpCmd = append(tmpCmd, text(t, elm))
- }
-
- assert.ElementsMatch(t, cmds, tmpCmd)
-}
-
-func text(t *testing.T, elem selenium.WebElement) string {
- for i := 0; i < 5; i++ {
- opTxt, err := elem.Text()
- require.Nil(t, err)
- if opTxt != "" {
- return opTxt
- }
- }
-
- return ""
-}
-
-func click(t *testing.T, elem selenium.WebElement) {
- require.Nil(t, elem.Click())
-}
-
-// ss takes screenshot of chrome, for debugging only.
-func ss(t *testing.T, name string) {
- b, err := wd.Screenshot()
- require.Nil(t, err)
- f, err := os.Create("./" + name)
- require.Nil(t, err)
- _, err = f.Write(b)
- require.Nil(t, err)
-}
-
-func getPort() int {
- return 20000 + rand.Intn(10000)
-}
diff --git a/go/test/endtoend/vtctldweb/vtctld_web_test.go b/go/test/endtoend/vtctldweb/vtctld_web_test.go
deleted file mode 100644
index fed8a98320f..00000000000
--- a/go/test/endtoend/vtctldweb/vtctld_web_test.go
+++ /dev/null
@@ -1,218 +0,0 @@
-/*
-Copyright 2020 The Vitess Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package vtctldweb
-
-import (
- "fmt"
- "testing"
- "time"
-
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
- "github.com/tebeka/selenium"
-
- "vitess.io/vitess/go/test/endtoend/cluster"
-)
-
-// TestRealtimeStats checks the status by changing dropdown values.
-func TestRealtimeStats(t *testing.T) {
- defer cluster.PanicHandler(t)
- err := wd.Get(vtctldAddr + "/app2")
- require.Nil(t, err)
-
- statusBtn, err := wd.FindElement(selenium.ByPartialLinkText, "Status")
- require.Nil(t, err)
-
- click(t, statusBtn)
-
- wait(t, selenium.ByTagName, "vt-status")
-
- testCases := [8][5]string{
- {"", "", "all", "all", "all"},
- {"type", "REPLICA", "all", "all", "REPLICA"},
- {"cell", "test2", "all", "test2", "REPLICA"},
- {"keyspace", "test_keyspace", "test_keyspace", "test2", "REPLICA"},
- {"cell", "all", "test_keyspace", "all", "REPLICA"},
- {"type", "all", "test_keyspace", "all", "all"},
- {"cell", "test2", "test_keyspace", "test2", "all"},
- {"keyspace", "all", "all", "test2", "all"},
- }
- for _, k := range testCases {
- if k[0] != "" && k[1] != "" {
- changeDropdownOptions(t, k[0], k[1])
- }
-
- tabletOption := []string{"all", "PRIMARY", "REPLICA", "RDONLY"}
- if k[3] == "test2" {
- tabletOption = []string{"all", "REPLICA", "RDONLY"}
- }
-
- checkNewView(t, []string{"all", ks1, ks2}, []string{"all", "test", "test2"}, tabletOption, []string{"lag", "qps", "health"}, k[2], k[3], k[4], "health")
- }
-}
-
-// TestShardView validates tablet type and uids.
-func TestShardView(t *testing.T) {
- defer cluster.PanicHandler(t)
- navigateToShardView(t)
-
- tabletTypes, tabletUIDs := getShardTablets(t)
- // TODO: update vtctld web ui to change master to primary
- assert.ElementsMatch(t, []string{"master", "replica", "rdonly", "rdonly", "replica", "replica", "rdonly", "rdonly"}, tabletTypes)
- assert.ElementsMatch(t, []string{"1", "2", "3", "4", "5", "6", "7", "8"}, tabletUIDs)
-}
-
-// TestKsView validates the shard names for keyspace.
-func TestKsView(t *testing.T) {
- defer cluster.PanicHandler(t)
- navigateToKeyspaceView(t)
- shards := getKeyspaceShard(t)
- assert.ElementsMatch(t, []string{"-80", "80-"}, shards)
-}
-
-// TestCreateKs validates the keyspace creation using ui.
-func TestCreateKs(t *testing.T) {
- defer cluster.PanicHandler(t)
- navigateToDashBoard(t)
-
- dashboardContent, err := wd.FindElement(selenium.ByTagName, "vt-dashboard")
- require.Nil(t, err)
-
- dialog, err := dashboardContent.FindElement(selenium.ByTagName, "vt-dialog")
- require.Nil(t, err)
-
- dashboardMenu, err := dashboardContent.FindElement(selenium.ByClassName, "vt-menu")
- require.Nil(t, err)
-
- click(t, dashboardMenu)
-
- dashboardOptions, err := dashboardContent.FindElements(selenium.ByClassName, "ui-menuitem-text")
- require.Nil(t, err)
-
- for _, v := range dashboardOptions {
- if text(t, v) == "New" {
- click(t, v)
- break
- }
- }
-
- inputFields, err := dialog.FindElements(selenium.ByTagName, "md-input")
- require.Nil(t, err)
-
- for i, input := range inputFields {
- ele, err := input.FindElement(selenium.ByTagName, "input")
- require.Nil(t, err)
- switch i {
- case 0:
- err := ele.SendKeys("test_keyspace3")
- require.Nil(t, err)
- assertDialogCommand(t, dialog, []string{"CreateKeyspace", "--force=false", "test_keyspace3"})
- }
- }
-
- assertDialogCommand(t, dialog, []string{"CreateKeyspace", "--force=false", "test_keyspace3"})
-
- create, err := dialog.FindElement(selenium.ByID, "vt-action")
- require.Nil(t, err)
- click(t, create)
-
- dismiss, err := dialog.FindElement(selenium.ByID, "vt-dismiss")
- require.Nil(t, err)
- click(t, dismiss)
- time.Sleep(5 * time.Microsecond)
-
- ksNames := getDashboardKeyspaces(t)
- assert.ElementsMatch(t, []string{"test_keyspace", "test_keyspace2", "test_keyspace3"}, ksNames)
-
- testKs, err := dashboardContent.FindElements(selenium.ByClassName, "vt-card")
- require.Nil(t, err)
- menu, err := testKs[2].FindElement(selenium.ByClassName, "vt-menu")
- require.Nil(t, err)
- click(t, menu)
-
- options, err := testKs[2].FindElements(selenium.ByTagName, "li")
- require.Nil(t, err)
- for _, v := range options {
- if text(t, v) == "Delete" {
- click(t, v)
- break
- }
- }
-
- delete, err := dialog.FindElement(selenium.ByID, "vt-action")
- require.Nil(t, err)
- click(t, delete)
-
- dismiss, err = dialog.FindElement(selenium.ByID, "vt-dismiss")
- require.Nil(t, err)
- click(t, dismiss)
-
- ksNames = getDashboardKeyspaces(t)
- assert.ElementsMatch(t, []string{"test_keyspace", "test_keyspace2"}, ksNames)
-}
-
-// TestDashboard validate the keyspaces and shard in dashboard.
-func TestDashboard(t *testing.T) {
- defer cluster.PanicHandler(t)
- navigateToDashBoard(t)
- ksNames := getDashboardKeyspaces(t)
- assert.ElementsMatch(t, []string{"test_keyspace", "test_keyspace2"}, ksNames)
- shardNames := getDashboardShards(t)
- assert.ElementsMatch(t, []string{"2 Shards", "1 Shards"}, shardNames)
-}
-
-// TestDashboardValidate validates the validate command from the ui.
-func TestDashboardValidate(t *testing.T) {
- defer cluster.PanicHandler(t)
- navigateToDashBoard(t)
- dashboardContent, err := wd.FindElement(selenium.ByTagName, "vt-dashboard")
- require.Nil(t, err)
-
- menu, err := dashboardContent.FindElement(selenium.ByClassName, "vt-menu")
- require.Nil(t, err)
- click(t, menu)
-
- firstOption, err := dashboardContent.FindElement(selenium.ByClassName, "ui-menuitem-text")
- require.Nil(t, err)
- assert.Equal(t, "Validate", text(t, firstOption))
-
- click(t, firstOption)
-
- dialog, err := dashboardContent.FindElement(selenium.ByTagName, "vt-dialog")
- require.Nil(t, err)
-
- assertDialogCommand(t, dialog, []string{"Validate", "--ping-tablets=false"})
-
- checkBoxes, err := dialog.FindElements(selenium.ByClassName, "md-checkbox-inner-container")
- require.Nil(t, err)
-
- click(t, checkBoxes[0])
-
- assertDialogCommand(t, dialog, []string{"Validate", "--ping-tablets"})
-
- validate, err := dialog.FindElement(selenium.ByID, "vt-action")
- require.Nil(t, err)
- click(t, validate)
- validateResp, err := dialog.FindElement(selenium.ByClassName, "vt-resp")
- require.Nil(t, err)
-
- fmt.Printf("Validate command response: %s\n", text(t, validateResp))
-
- dismiss, err := dialog.FindElement(selenium.ByID, "vt-dismiss")
- require.Nil(t, err)
- click(t, dismiss)
-}
diff --git a/go/test/endtoend/vtgate/consolidator/main_test.go b/go/test/endtoend/vtgate/consolidator/main_test.go
new file mode 100644
index 00000000000..021db7e513e
--- /dev/null
+++ b/go/test/endtoend/vtgate/consolidator/main_test.go
@@ -0,0 +1,259 @@
+/*
+Copyright 2022 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package vtgate
+
+import (
+ "context"
+ "flag"
+ "fmt"
+ "os"
+ "testing"
+
+ "github.com/google/go-cmp/cmp"
+ "github.com/stretchr/testify/require"
+
+ "vitess.io/vitess/go/mysql"
+ "vitess.io/vitess/go/sqltypes"
+ "vitess.io/vitess/go/test/endtoend/cluster"
+ "vitess.io/vitess/go/test/endtoend/utils"
+)
+
+type consolidatorTestCase struct {
+ tabletType string
+ tabletProcess *cluster.VttabletProcess
+ query string
+ expectConsolidations bool
+}
+
+var (
+ clusterInstance *cluster.LocalProcessCluster
+ vtParams mysql.ConnParams
+ KeyspaceName = "ks"
+ Cell = "test"
+ SchemaSQL = `create table t1(
+ id1 bigint,
+ id2 bigint,
+ primary key(id1)
+) Engine=InnoDB;`
+
+ VSchema = `
+{
+ "sharded": false,
+ "vindexes": {
+ "hash": {
+ "type": "hash"
+ }
+ },
+ "tables": {
+ "t1": {}
+ }
+}`
+)
+
+func TestMain(m *testing.M) {
+ defer cluster.PanicHandler(nil)
+ flag.Parse()
+
+ exitCode := func() int {
+ clusterInstance = cluster.NewCluster(Cell, "localhost")
+ defer clusterInstance.Teardown()
+
+ // Start topo server
+ err := clusterInstance.StartTopo()
+ if err != nil {
+ return 1
+ }
+
+ // Start keyspace
+ keyspace := &cluster.Keyspace{
+ Name: KeyspaceName,
+ SchemaSQL: SchemaSQL,
+ VSchema: VSchema,
+ }
+ if err := clusterInstance.StartKeyspace(
+ *keyspace,
+ []string{"-"},
+ 1, /*creates 1 replica tablet in addition to primary*/
+ false,
+ ); err != nil {
+ return 1
+ }
+
+ // Start vtgate
+ if err := clusterInstance.StartVtgate(); err != nil {
+ return 1
+ }
+
+ vtParams = mysql.ConnParams{
+ Host: clusterInstance.Hostname,
+ Port: clusterInstance.VtgateMySQLPort,
+ }
+
+ conn, err := mysql.Connect(context.Background(), &vtParams)
+ if err != nil {
+ return 1
+ }
+ defer conn.Close()
+
+ // Insert some test data.
+ _, err = conn.ExecuteFetch(`insert into t1(id1, id2) values (1, 1)`, 1000, true)
+ if err != nil {
+ return 1
+ }
+ defer func() {
+ conn.ExecuteFetch(`use @primary`, 1000, true)
+ conn.ExecuteFetch(`delete from t1`, 1000, true)
+ }()
+
+ return m.Run()
+ }()
+ os.Exit(exitCode)
+}
+
+func TestConsolidatorEnabledByDefault(t *testing.T) {
+ testConsolidator(t, []consolidatorTestCase{
+ {
+ "@primary",
+ clusterInstance.Keyspaces[0].Shards[0].PrimaryTablet().VttabletProcess,
+ `select id2 from t1 where sleep(2) = 0 order by id1 asc limit 1`,
+ true,
+ },
+ {
+ "@replica",
+ clusterInstance.Keyspaces[0].Shards[0].Replica().VttabletProcess,
+ `select id2 from t1 where sleep(2) = 0 order by id1 asc limit 1`,
+ true,
+ },
+ })
+}
+
+func TestConsolidatorEnabledWithDirective(t *testing.T) {
+ testConsolidator(t, []consolidatorTestCase{
+ {
+ "@primary",
+ clusterInstance.Keyspaces[0].Shards[0].PrimaryTablet().VttabletProcess,
+ `select /*vt+ CONSOLIDATOR=enabled */ id2 from t1 where sleep(2) = 0 order by id1 asc limit 1`,
+ true,
+ },
+ {
+ "@replica",
+ clusterInstance.Keyspaces[0].Shards[0].Replica().VttabletProcess,
+ `select /*vt+ CONSOLIDATOR=enabled */ id2 from t1 where sleep(2) = 0 order by id1 asc limit 1`,
+ true,
+ },
+ })
+}
+
+func TestConsolidatorDisabledWithDirective(t *testing.T) {
+ testConsolidator(t, []consolidatorTestCase{
+ {
+ "@primary",
+ clusterInstance.Keyspaces[0].Shards[0].PrimaryTablet().VttabletProcess,
+ `select /*vt+ CONSOLIDATOR=disabled */ id2 from t1 where sleep(2) = 0 order by id1 asc limit 1`,
+ false,
+ },
+ {
+ "@replica",
+ clusterInstance.Keyspaces[0].Shards[0].Replica().VttabletProcess,
+ `select /*vt+ CONSOLIDATOR=disabled */ id2 from t1 where sleep(2) = 0 order by id1 asc limit 1`,
+ false,
+ },
+ })
+}
+
+func TestConsolidatorEnabledReplicasWithDirective(t *testing.T) {
+ testConsolidator(t, []consolidatorTestCase{
+ {
+ "@primary",
+ clusterInstance.Keyspaces[0].Shards[0].PrimaryTablet().VttabletProcess,
+ `select /*vt+ CONSOLIDATOR=enabled_replicas */ id2 from t1 where sleep(2) = 0 order by id1 asc limit 1`,
+ false,
+ },
+ {
+ "@replica",
+ clusterInstance.Keyspaces[0].Shards[0].Replica().VttabletProcess,
+ `select /*vt+ CONSOLIDATOR=enabled_replicas */ id2 from t1 where sleep(2) = 0 order by id1 asc limit 1`,
+ true,
+ },
+ })
+}
+
+func testConsolidator(t *testing.T, testCases []consolidatorTestCase) {
+ for _, testCase := range testCases {
+ t.Run(fmt.Sprintf("%s%s", testCase.query, testCase.tabletType), func(t *testing.T) {
+ // Create a connection.
+ conn1, err := mysql.Connect(context.Background(), &vtParams)
+ require.NoError(t, err)
+ utils.Exec(t, conn1, fmt.Sprintf("use %s", testCase.tabletType))
+ defer conn1.Close()
+
+ // Create another connection.
+ conn2, err := mysql.Connect(context.Background(), &vtParams)
+ require.NoError(t, err)
+ utils.Exec(t, conn2, fmt.Sprintf("use %s", testCase.tabletType))
+ defer conn2.Close()
+
+ // Create a channel for query results.
+ qrCh := make(chan *sqltypes.Result, 2)
+ defer close(qrCh)
+
+ execAsync := func(conn *mysql.Conn, query string, qrCh chan *sqltypes.Result) {
+ go func() {
+ qrCh <- utils.Exec(t, conn, query)
+ }()
+ }
+
+ // Check initial consolidations.
+ consolidations, err := testCase.tabletProcess.GetConsolidations()
+ require.NoError(t, err, "Failed to get consolidations.")
+ count := consolidations[testCase.query]
+
+ // Send two identical async queries in quick succession.
+ execAsync(conn1, testCase.query, qrCh)
+ execAsync(conn2, testCase.query, qrCh)
+
+ // Wait for results, verify they are the same.
+ qr1 := <-qrCh
+ qr2 := <-qrCh
+ diff := cmp.Diff(fmt.Sprintf("%v", qr1.Rows), fmt.Sprintf("%v", qr2.Rows))
+ require.Empty(t, diff, "Expected query results to be equal but they are different.")
+
+ // Verify the query was (or was not) consolidated.
+ consolidations, err = testCase.tabletProcess.GetConsolidations()
+ require.NoError(t, err, "Failed to get consolidations.")
+ if testCase.expectConsolidations {
+ require.Greater(
+ t,
+ consolidations[testCase.query],
+ count,
+ "Expected query `%s` to be consolidated on %s tablet.",
+ testCase.query,
+ testCase.tabletType,
+ )
+ } else {
+ require.Equal(
+ t,
+ count,
+ consolidations[testCase.query],
+ "Did not expect query `%s` to be consolidated on %s tablet.",
+ testCase.query,
+ testCase.tabletType,
+ )
+ }
+ })
+ }
+}
diff --git a/go/test/endtoend/vtgate/gen4/gen4_test.go b/go/test/endtoend/vtgate/gen4/gen4_test.go
index fc1e53c9a37..c1521012909 100644
--- a/go/test/endtoend/vtgate/gen4/gen4_test.go
+++ b/go/test/endtoend/vtgate/gen4/gen4_test.go
@@ -495,3 +495,32 @@ func TestFilterOnLeftOuterJoin(t *testing.T) {
mcmp.AssertMatches(query, "[[INT32(22)] [INT32(33)]]")
}
+
+func TestPercentageAndUnderscore(t *testing.T) {
+ mcmp, closer := start(t)
+ defer closer()
+
+ // insert some data.
+ mcmp.Exec(`insert into t2(id, tcol1, tcol2) values (1, 'A%B', 'A%B'),(2, 'C_D', 'E'),(3, 'AB', 'C1D'),(4, 'E', 'A%B'),(5, 'A%B', 'AB'),(6, 'C1D', 'E'),(7, 'C_D', 'A%B'),(8, 'E', 'C_D')`)
+
+ // Verify that %, _ and their escaped counter-parts work in Vitess in the like clause as well as equality clause
+ mcmp.Exec(`select * from t2 where tcol1 like "A%B"`)
+ mcmp.Exec(`select * from t2 where tcol1 like "A\%B"`)
+ mcmp.Exec(`select * from t2 where tcol1 like "C_D"`)
+ mcmp.Exec(`select * from t2 where tcol1 like "C\_D"`)
+
+ mcmp.Exec(`select * from t2 where tcol1 = "A%B"`)
+ mcmp.Exec(`select * from t2 where tcol1 = "A\%B"`)
+ mcmp.Exec(`select * from t2 where tcol1 = "C_D"`)
+ mcmp.Exec(`select * from t2 where tcol1 = "C\_D"`)
+
+ // Verify that %, _ and their escaped counter-parts work with filtering on VTGate level
+ mcmp.Exec(`select a.tcol1 from t2 a join t2 b where a.tcol1 = b.tcol2 group by a.tcol1 having repeat(a.tcol1,min(a.id)) like "A\%B" order by a.tcol1`)
+ mcmp.Exec(`select a.tcol1 from t2 a join t2 b where a.tcol1 = b.tcol2 group by a.tcol1 having repeat(a.tcol1,min(a.id)) like "A%B" order by a.tcol1`)
+ mcmp.Exec(`select a.tcol1 from t2 a join t2 b where a.tcol1 = b.tcol2 group by a.tcol1 having repeat(a.tcol1,min(a.id)) = "A\%B" order by a.tcol1`)
+ mcmp.Exec(`select a.tcol1 from t2 a join t2 b where a.tcol1 = b.tcol2 group by a.tcol1 having repeat(a.tcol1,min(a.id)) = "A%B" order by a.tcol1`)
+ mcmp.Exec(`select a.tcol1 from t2 a join t2 b where a.tcol1 = b.tcol2 group by a.tcol1 having repeat(a.tcol1,min(a.id)) like "C_D%" order by a.tcol1`)
+ mcmp.Exec(`select a.tcol1 from t2 a join t2 b where a.tcol1 = b.tcol2 group by a.tcol1 having repeat(a.tcol1,min(a.id)) like "C\_D%" order by a.tcol1`)
+ mcmp.Exec(`select a.tcol1 from t2 a join t2 b where a.tcol1 = b.tcol2 group by a.tcol1 having repeat(a.tcol1,min(a.id)) = "C_DC_D" order by a.tcol1`)
+ mcmp.Exec(`select a.tcol1 from t2 a join t2 b where a.tcol1 = b.tcol2 group by a.tcol1 having repeat(a.tcol1,min(a.id)) = "C\_DC\_D" order by a.tcol1`)
+}
diff --git a/go/test/endtoend/vtgate/gen4/system_schema_test.go b/go/test/endtoend/vtgate/gen4/system_schema_test.go
index 5f9bec3287f..c075479bb11 100644
--- a/go/test/endtoend/vtgate/gen4/system_schema_test.go
+++ b/go/test/endtoend/vtgate/gen4/system_schema_test.go
@@ -214,3 +214,15 @@ func TestMultipleSchemaPredicates(t *testing.T) {
require.Error(t, err)
require.Contains(t, err.Error(), "specifying two different database in the query is not supported")
}
+
+func TestQuerySystemTables(t *testing.T) {
+ defer cluster.PanicHandler(t)
+ ctx := context.Background()
+ conn, err := mysql.Connect(ctx, &vtParams)
+ require.NoError(t, err)
+ defer conn.Close()
+
+ utils.Exec(t, conn, `select * from sys.sys_config`)
+ utils.Exec(t, conn, "select * from mysql.`db`")
+ utils.Exec(t, conn, "select * from performance_schema.error_log")
+}
diff --git a/go/test/endtoend/vtgate/grpc_server_acls/acls_test.go b/go/test/endtoend/vtgate/grpc_server_acls/acls_test.go
new file mode 100644
index 00000000000..a63ca12a201
--- /dev/null
+++ b/go/test/endtoend/vtgate/grpc_server_acls/acls_test.go
@@ -0,0 +1,204 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package grpc_server_acls
+
+import (
+ "context"
+ "flag"
+ "fmt"
+ "os"
+ "path"
+ "testing"
+
+ "vitess.io/vitess/go/vt/callerid"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "google.golang.org/grpc"
+
+ "vitess.io/vitess/go/test/endtoend/cluster"
+ "vitess.io/vitess/go/vt/grpcclient"
+ "vitess.io/vitess/go/vt/vtgate/grpcvtgateconn"
+ "vitess.io/vitess/go/vt/vtgate/vtgateconn"
+)
+
+var (
+ clusterInstance *cluster.LocalProcessCluster
+ vtgateGrpcAddress string
+ hostname = "localhost"
+ keyspaceName = "ks"
+ cell = "zone1"
+ sqlSchema = `
+ create table test_table (
+ id bigint,
+ val varchar(128),
+ primary key(id)
+ ) Engine=InnoDB;
+`
+ grpcServerAuthStaticJSON = `
+ [
+ {
+ "Username": "some_other_user",
+ "Password": "test_password"
+ },
+ {
+ "Username": "another_unrelated_user",
+ "Password": "test_password"
+ }
+ ]
+`
+ tableACLJSON = `
+ {
+ "table_groups": [
+ {
+ "name": "default",
+ "table_names_or_prefixes": ["%"],
+ "readers": ["user_with_access"],
+ "writers": ["user_with_access"],
+ "admins": ["user_with_access"]
+ }
+ ]
+ }
+`
+)
+
+func TestMain(m *testing.M) {
+
+ defer cluster.PanicHandler(nil)
+ flag.Parse()
+
+ exitcode := func() int {
+ clusterInstance = cluster.NewCluster(cell, hostname)
+ defer clusterInstance.Teardown()
+
+ // Start topo server
+ if err := clusterInstance.StartTopo(); err != nil {
+ return 1
+ }
+
+ // Directory for authn / authz config files
+ authDirectory := path.Join(clusterInstance.TmpDirectory, "auth")
+ if err := os.Mkdir(authDirectory, 0700); err != nil {
+ return 1
+ }
+
+ // Create grpc_server_auth_static.json file
+ grpcServerAuthStaticPath := path.Join(authDirectory, "grpc_server_auth_static.json")
+ if err := createFile(grpcServerAuthStaticPath, grpcServerAuthStaticJSON); err != nil {
+ return 1
+ }
+
+ // Create table_acl.json file
+ tableACLPath := path.Join(authDirectory, "table_acl.json")
+ if err := createFile(tableACLPath, tableACLJSON); err != nil {
+ return 1
+ }
+
+ // Configure vtgate to use static auth
+ clusterInstance.VtGateExtraArgs = []string{
+ "--grpc_auth_mode", "static",
+ "--grpc_auth_static_password_file", grpcServerAuthStaticPath,
+ "--grpc_use_effective_callerid",
+ "--grpc-use-static-authentication-callerid",
+ }
+
+ // Configure vttablet to use table ACL
+ clusterInstance.VtTabletExtraArgs = []string{
+ "--enforce-tableacl-config",
+ "--queryserver-config-strict-table-acl",
+ "--table-acl-config", tableACLPath,
+ }
+
+ // Start keyspace
+ keyspace := &cluster.Keyspace{
+ Name: keyspaceName,
+ SchemaSQL: sqlSchema,
+ }
+ if err := clusterInstance.StartUnshardedKeyspace(*keyspace, 1, false); err != nil {
+ return 1
+ }
+
+ // Start vtgate
+ if err := clusterInstance.StartVtgate(); err != nil {
+ clusterInstance.VtgateProcess = cluster.VtgateProcess{}
+ return 1
+ }
+ vtgateGrpcAddress = fmt.Sprintf("%s:%d", clusterInstance.Hostname, clusterInstance.VtgateGrpcPort)
+
+ return m.Run()
+ }()
+ os.Exit(exitcode)
+}
+
+// TestEffectiveCallerIDWithAccess verifies that an authenticated gRPC static user with an effectiveCallerID that has ACL access can execute queries
+func TestEffectiveCallerIDWithAccess(t *testing.T) {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ vtgateConn, err := dialVTGate(ctx, t, "some_other_user", "test_password")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer vtgateConn.Close()
+
+ session := vtgateConn.Session(keyspaceName+"@primary", nil)
+ query := "SELECT id FROM test_table"
+ ctx = callerid.NewContext(ctx, callerid.NewEffectiveCallerID("user_with_access", "", ""), nil)
+ _, err = session.Execute(ctx, query, nil)
+ assert.NoError(t, err)
+}
+
+// TestEffectiveCallerIDWithNoAccess verifies that an authenticated gRPC static user without an effectiveCallerID that has ACL access cannot execute queries
+func TestEffectiveCallerIDWithNoAccess(t *testing.T) {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ vtgateConn, err := dialVTGate(ctx, t, "another_unrelated_user", "test_password")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer vtgateConn.Close()
+
+ session := vtgateConn.Session(keyspaceName+"@primary", nil)
+ query := "SELECT id FROM test_table"
+ ctx = callerid.NewContext(ctx, callerid.NewEffectiveCallerID("user_no_access", "", ""), nil)
+ _, err = session.Execute(ctx, query, nil)
+ require.Error(t, err)
+ assert.Contains(t, err.Error(), "Select command denied to user")
+ assert.Contains(t, err.Error(), "for table 'test_table' (ACL check error)")
+}
+
+func dialVTGate(ctx context.Context, t *testing.T, username string, password string) (*vtgateconn.VTGateConn, error) {
+ clientCreds := &grpcclient.StaticAuthClientCreds{Username: username, Password: password}
+ creds := grpc.WithPerRPCCredentials(clientCreds)
+ dialerFunc := grpcvtgateconn.DialWithOpts(ctx, creds)
+ dialerName := t.Name()
+ vtgateconn.RegisterDialer(dialerName, dialerFunc)
+ return vtgateconn.DialProtocol(ctx, dialerName, vtgateGrpcAddress)
+}
+
+func createFile(path string, contents string) error {
+ f, err := os.Create(path)
+ if err != nil {
+ return err
+ }
+ _, err = f.WriteString(contents)
+ if err != nil {
+ return err
+ }
+ return f.Close()
+}
diff --git a/go/test/endtoend/vtgate/grpc_server_auth_static/main_test.go b/go/test/endtoend/vtgate/grpc_server_auth_static/main_test.go
new file mode 100644
index 00000000000..c00f4b3f2c1
--- /dev/null
+++ b/go/test/endtoend/vtgate/grpc_server_auth_static/main_test.go
@@ -0,0 +1,216 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package grpcserverauthstatic
+
+import (
+ "context"
+ "flag"
+ "fmt"
+ "os"
+ "path"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "google.golang.org/grpc"
+
+ "vitess.io/vitess/go/test/endtoend/cluster"
+ "vitess.io/vitess/go/vt/grpcclient"
+ "vitess.io/vitess/go/vt/vtgate/grpcvtgateconn"
+ "vitess.io/vitess/go/vt/vtgate/vtgateconn"
+)
+
+var (
+ clusterInstance *cluster.LocalProcessCluster
+ vtgateGrpcAddress string
+ hostname = "localhost"
+ keyspaceName = "ks"
+ cell = "zone1"
+ sqlSchema = `
+ create table test_table (
+ id bigint,
+ val varchar(128),
+ primary key(id)
+ ) Engine=InnoDB;
+`
+ grpcServerAuthStaticJSON = `
+ [
+ {
+ "Username": "user_with_access",
+ "Password": "test_password"
+ },
+ {
+ "Username": "user_no_access",
+ "Password": "test_password"
+ }
+ ]
+`
+ tableACLJSON = `
+ {
+ "table_groups": [
+ {
+ "name": "default",
+ "table_names_or_prefixes": ["%"],
+ "readers": ["user_with_access"],
+ "writers": ["user_with_access"],
+ "admins": ["user_with_access"]
+ }
+ ]
+ }
+`
+)
+
+func TestMain(m *testing.M) {
+ defer cluster.PanicHandler(nil)
+ flag.Parse()
+
+ exitcode := func() int {
+ clusterInstance = cluster.NewCluster(cell, hostname)
+ defer clusterInstance.Teardown()
+
+ // Start topo server
+ if err := clusterInstance.StartTopo(); err != nil {
+ return 1
+ }
+
+ // Directory for authn / authz config files
+ authDirectory := path.Join(clusterInstance.TmpDirectory, "auth")
+ if err := os.Mkdir(authDirectory, 0700); err != nil {
+ return 1
+ }
+
+ // Create grpc_server_auth_static.json file
+ grpcServerAuthStaticPath := path.Join(authDirectory, "grpc_server_auth_static.json")
+ if err := createFile(grpcServerAuthStaticPath, grpcServerAuthStaticJSON); err != nil {
+ return 1
+ }
+
+ // Create table_acl.json file
+ tableACLPath := path.Join(authDirectory, "table_acl.json")
+ if err := createFile(tableACLPath, tableACLJSON); err != nil {
+ return 1
+ }
+
+ // Configure vtgate to use static auth
+ clusterInstance.VtGateExtraArgs = []string{
+ "--grpc_auth_mode", "static",
+ "--grpc_auth_static_password_file", grpcServerAuthStaticPath,
+ "--grpc-use-static-authentication-callerid",
+ }
+
+ // Configure vttablet to use table ACL
+ clusterInstance.VtTabletExtraArgs = []string{
+ "--enforce-tableacl-config",
+ "--queryserver-config-strict-table-acl",
+ "--table-acl-config", tableACLPath,
+ }
+
+ // Start keyspace
+ keyspace := &cluster.Keyspace{
+ Name: keyspaceName,
+ SchemaSQL: sqlSchema,
+ }
+ if err := clusterInstance.StartUnshardedKeyspace(*keyspace, 1, false); err != nil {
+ return 1
+ }
+
+ // Start vtgate
+ if err := clusterInstance.StartVtgate(); err != nil {
+ clusterInstance.VtgateProcess = cluster.VtgateProcess{}
+ return 1
+ }
+ vtgateGrpcAddress = fmt.Sprintf("%s:%d", clusterInstance.Hostname, clusterInstance.VtgateGrpcPort)
+
+ return m.Run()
+ }()
+ os.Exit(exitcode)
+}
+
+// TestAuthenticatedUserWithAccess verifies that an authenticated gRPC static user with ACL access can execute queries
+func TestAuthenticatedUserWithAccess(t *testing.T) {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ vtgateConn, err := dialVTGate(ctx, t, "user_with_access", "test_password")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer vtgateConn.Close()
+
+ session := vtgateConn.Session(keyspaceName+"@primary", nil)
+ query := "SELECT id FROM test_table"
+ _, err = session.Execute(ctx, query, nil)
+ assert.NoError(t, err)
+}
+
+// TestAuthenticatedUserNoAccess verifies that an authenticated gRPC static user with no ACL access cannot execute queries
+func TestAuthenticatedUserNoAccess(t *testing.T) {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ vtgateConn, err := dialVTGate(ctx, t, "user_no_access", "test_password")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer vtgateConn.Close()
+
+ session := vtgateConn.Session(keyspaceName+"@primary", nil)
+ query := "SELECT id FROM test_table"
+ _, err = session.Execute(ctx, query, nil)
+ require.Error(t, err)
+ assert.Contains(t, err.Error(), "Select command denied to user")
+ assert.Contains(t, err.Error(), "for table 'test_table' (ACL check error)")
+}
+
+// TestUnauthenticatedUser verifies that an unauthenticated gRPC user cannot execute queries
+func TestUnauthenticatedUser(t *testing.T) {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ vtgateConn, err := dialVTGate(ctx, t, "", "")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer vtgateConn.Close()
+
+ session := vtgateConn.Session(keyspaceName+"@primary", nil)
+ query := "SELECT id FROM test_table"
+ _, err = session.Execute(ctx, query, nil)
+ require.Error(t, err)
+ assert.Contains(t, err.Error(), "invalid credentials")
+}
+
+func dialVTGate(ctx context.Context, t *testing.T, username string, password string) (*vtgateconn.VTGateConn, error) {
+ clientCreds := &grpcclient.StaticAuthClientCreds{Username: username, Password: password}
+ creds := grpc.WithPerRPCCredentials(clientCreds)
+ dialerFunc := grpcvtgateconn.DialWithOpts(ctx, creds)
+ dialerName := t.Name()
+ vtgateconn.RegisterDialer(dialerName, dialerFunc)
+ return vtgateconn.DialProtocol(ctx, dialerName, vtgateGrpcAddress)
+}
+
+func createFile(path string, contents string) error {
+ f, err := os.Create(path)
+ if err != nil {
+ return err
+ }
+ _, err = f.WriteString(contents)
+ if err != nil {
+ return err
+ }
+ return f.Close()
+}
diff --git a/go/test/endtoend/vtgate/keyspace_watches/keyspace_watch_test.go b/go/test/endtoend/vtgate/keyspace_watches/keyspace_watch_test.go
index e941b78c2cd..ab844a8ffd1 100644
--- a/go/test/endtoend/vtgate/keyspace_watches/keyspace_watch_test.go
+++ b/go/test/endtoend/vtgate/keyspace_watches/keyspace_watch_test.go
@@ -48,7 +48,7 @@ var (
PRIMARY KEY (id)
) Engine=InnoDB;`
vschemaDDL = "alter vschema create vindex test_vdx using hash"
- vschemaDDLError = fmt.Sprintf("Error 1105: cannot perform Update on keyspaces/%s/VSchema as the topology server connection is read-only",
+ vschemaDDLError = fmt.Sprintf("Error 1105 (HY000): cannot perform Update on keyspaces/%s/VSchema as the topology server connection is read-only",
keyspaceUnshardedName)
)
diff --git a/go/test/endtoend/vtgate/lookup_test.go b/go/test/endtoend/vtgate/lookup_test.go
index c984459ab97..3294c1898d6 100644
--- a/go/test/endtoend/vtgate/lookup_test.go
+++ b/go/test/endtoend/vtgate/lookup_test.go
@@ -42,6 +42,45 @@ func TestUnownedLookupInsertNull(t *testing.T) {
utils.Exec(t, conn, "insert into t8(id, parent_id, t9_id) VALUES (3, 2, 2)")
}
+func TestLookupUniqueWithAutocommit(t *testing.T) {
+ conn, closer := start(t)
+ defer closer()
+
+ // conn2 is to check entries in the lookup table
+ conn2, err := mysql.Connect(context.Background(), &vtParams)
+ require.Nil(t, err)
+ defer conn2.Close()
+
+ // Test that all vindex writes are autocommitted outside of any ongoing transactions.
+ //
+ // Also test that autocommited vindex entries are visible inside transactions, as lookups
+ // should also use the autocommit connection.
+
+ utils.Exec(t, conn, "insert into t10(id, sharding_key) VALUES (1, 1)")
+
+ utils.AssertMatches(t, conn2, "select id from t10_id_to_keyspace_id_idx order by id asc", "[[INT64(1)]]")
+ utils.AssertMatches(t, conn, "select id from t10 where id = 1", "[[INT64(1)]]")
+
+ utils.Exec(t, conn, "begin")
+
+ utils.Exec(t, conn, "insert into t10(id, sharding_key) VALUES (2, 1)")
+
+ utils.AssertMatches(t, conn2, "select id from t10_id_to_keyspace_id_idx order by id asc", "[[INT64(1)] [INT64(2)]]")
+ utils.AssertMatches(t, conn, "select id from t10 where id = 2", "[[INT64(2)]]")
+
+ utils.Exec(t, conn, "insert into t10(id, sharding_key) VALUES (3, 1)")
+
+ utils.AssertMatches(t, conn2, "select id from t10_id_to_keyspace_id_idx order by id asc", "[[INT64(1)] [INT64(2)] [INT64(3)]]")
+ utils.AssertMatches(t, conn, "select id from t10 where id = 3", "[[INT64(3)]]")
+
+ utils.Exec(t, conn, "savepoint sp_foobar")
+
+ utils.Exec(t, conn, "insert into t10(id, sharding_key) VALUES (4, 1)")
+
+ utils.AssertMatches(t, conn2, "select id from t10_id_to_keyspace_id_idx order by id asc", "[[INT64(1)] [INT64(2)] [INT64(3)] [INT64(4)]]")
+ utils.AssertMatches(t, conn, "select id from t10 where id = 4", "[[INT64(4)]]")
+}
+
func TestUnownedLookupInsertChecksKeyspaceIdsAreMatching(t *testing.T) {
conn, closer := start(t)
defer closer()
diff --git a/go/test/endtoend/vtgate/main_test.go b/go/test/endtoend/vtgate/main_test.go
index f683be5cef1..1d2bc59b50a 100644
--- a/go/test/endtoend/vtgate/main_test.go
+++ b/go/test/endtoend/vtgate/main_test.go
@@ -110,7 +110,7 @@ func start(t *testing.T) (*mysql.Conn, func()) {
deleteAll := func() {
utils.Exec(t, conn, "use ks")
- tables := []string{"t1", "t2", "vstream_test", "t3", "t4", "t6", "t7_xxhash", "t7_xxhash_idx", "t7_fk", "t8", "t9", "t9_id_to_keyspace_id_idx", "t1_id2_idx", "t2_id4_idx", "t3_id7_idx", "t4_id2_idx", "t5_null_vindex", "t6_id2_idx"}
+ tables := []string{"t1", "t2", "vstream_test", "t3", "t4", "t6", "t7_xxhash", "t7_xxhash_idx", "t7_fk", "t8", "t9", "t9_id_to_keyspace_id_idx", "t10", "t10_id_to_keyspace_id_idx", "t1_id2_idx", "t2_id4_idx", "t3_id7_idx", "t4_id2_idx", "t5_null_vindex", "t6_id2_idx"}
for _, table := range tables {
_, _ = utils.ExecAllowError(t, conn, "delete from "+table)
}
diff --git a/go/test/endtoend/vtgate/partialfailure/main_test.go b/go/test/endtoend/vtgate/partialfailure/main_test.go
index cf47ad6a70f..b74947d63e9 100644
--- a/go/test/endtoend/vtgate/partialfailure/main_test.go
+++ b/go/test/endtoend/vtgate/partialfailure/main_test.go
@@ -22,12 +22,11 @@ import (
"os"
"testing"
- "vitess.io/vitess/go/test/endtoend/utils"
-
"github.com/stretchr/testify/require"
"vitess.io/vitess/go/mysql"
"vitess.io/vitess/go/test/endtoend/cluster"
+ "vitess.io/vitess/go/test/endtoend/utils"
)
var (
@@ -99,39 +98,54 @@ CREATE TABLE test_vdx (
`
)
+var enableSettingsPool bool
+
func TestMain(m *testing.M) {
defer cluster.PanicHandler(nil)
flag.Parse()
- exitCode := func() int {
- clusterInstance = cluster.NewCluster(cell, hostname)
- defer clusterInstance.Teardown()
+ code := runAllTests(m)
+ if code != 0 {
+ os.Exit(code)
+ }
- // Start topo server
- if err := clusterInstance.StartTopo(); err != nil {
- return 1
- }
+ println("running with settings pool enabled")
+ // run again with settings pool enabled.
+ enableSettingsPool = true
+ code = runAllTests(m)
+ os.Exit(code)
+}
- // Start keyspace
- keyspace := &cluster.Keyspace{
- Name: keyspaceName,
- SchemaSQL: sqlSchema,
- VSchema: vSchema,
- }
- if err := clusterInstance.StartKeyspace(*keyspace, []string{"-40", "40-80", "80-c0", "c0-"}, 0, false); err != nil {
+func runAllTests(m *testing.M) int {
+ clusterInstance = cluster.NewCluster(cell, hostname)
+ defer clusterInstance.Teardown()
- return 1
- }
+ // Start topo server
+ if err := clusterInstance.StartTopo(); err != nil {
+ return 1
+ }
- // Start vtgate
- clusterInstance.VtGateExtraArgs = append(clusterInstance.VtGateExtraArgs, "--planner-version", "Gen4Fallback")
- if err := clusterInstance.StartVtgate(); err != nil {
- return 1
- }
- vtParams = clusterInstance.GetVTParams(keyspaceName)
- return m.Run()
- }()
- os.Exit(exitCode)
+ // Start keyspace
+ keyspace := &cluster.Keyspace{
+ Name: keyspaceName,
+ SchemaSQL: sqlSchema,
+ VSchema: vSchema,
+ }
+ if enableSettingsPool {
+ clusterInstance.VtTabletExtraArgs = append(clusterInstance.VtTabletExtraArgs, "--queryserver-enable-settings-pool")
+ }
+ if err := clusterInstance.StartKeyspace(*keyspace, []string{"-40", "40-80", "80-c0", "c0-"}, 0, false); err != nil {
+
+ return 1
+ }
+
+ // Start vtgate
+ clusterInstance.VtGateExtraArgs = append(clusterInstance.VtGateExtraArgs, "--planner-version", "Gen4Fallback")
+ if err := clusterInstance.StartVtgate(); err != nil {
+ return 1
+ }
+ vtParams = clusterInstance.GetVTParams(keyspaceName)
+ return m.Run()
}
func testAllModes(t *testing.T, stmts func(conn *mysql.Conn)) {
diff --git a/go/test/endtoend/vtgate/queries/aggregation/aggregation_test.go b/go/test/endtoend/vtgate/queries/aggregation/aggregation_test.go
index dfcfcc0c426..13a5d628725 100644
--- a/go/test/endtoend/vtgate/queries/aggregation/aggregation_test.go
+++ b/go/test/endtoend/vtgate/queries/aggregation/aggregation_test.go
@@ -33,7 +33,7 @@ func start(t *testing.T) (utils.MySQLCompare, func()) {
deleteAll := func() {
_, _ = utils.ExecAllowError(t, mcmp.VtConn, "set workload = oltp")
- tables := []string{"aggr_test", "t3", "t7_xxhash", "aggr_test_dates", "t7_xxhash_idx", "t1", "t2"}
+ tables := []string{"t9", "aggr_test", "t3", "t7_xxhash", "aggr_test_dates", "t7_xxhash_idx", "t1", "t2", "t10"}
for _, table := range tables {
_, _ = mcmp.ExecAndIgnore("delete from " + table)
}
@@ -352,7 +352,7 @@ func TestEmptyTableAggr(t *testing.T) {
})
}
- mcmp.Exec("insert into t1(t1_id, `name`, `value`, shardkey) values(1,'a1','foo',100), (2,'b1','foo',200), (3,'c1','foo',300), (3,'a1','foo',100), (3,'b1','bar',200)")
+ mcmp.Exec("insert into t1(t1_id, `name`, `value`, shardkey) values(1,'a1','foo',100), (2,'b1','foo',200), (3,'c1','foo',300), (4,'a1','foo',100), (5,'b1','bar',200)")
for _, workload := range []string{"oltp", "olap"} {
t.Run(workload, func(t *testing.T) {
@@ -365,3 +365,88 @@ func TestEmptyTableAggr(t *testing.T) {
}
}
+
+func TestOrderByCount(t *testing.T) {
+ mcmp, closer := start(t)
+ defer closer()
+
+ mcmp.Exec("insert into t9(id1, id2, id3) values(1, '1', '1'), (2, '2', '2'), (3, '2', '2'), (4, '3', '3'), (5, '3', '3'), (6, '3', '3')")
+
+ mcmp.AssertMatches("SELECT /*vt+ PLANNER=gen4 */ t9.id2 FROM t9 GROUP BY t9.id2 ORDER BY COUNT(t9.id2) DESC", `[[VARCHAR("3")] [VARCHAR("2")] [VARCHAR("1")]]`)
+}
+
+func TestAggregateRandom(t *testing.T) {
+ mcmp, closer := start(t)
+ defer closer()
+
+ mcmp.Exec("insert into t1(t1_id, name, value, shardKey) values (1, 'name 1', 'value 1', 1), (2, 'name 2', 'value 2', 2)")
+ mcmp.Exec("insert into t2(id, shardKey) values (1, 10), (2, 20)")
+
+ mcmp.AssertMatches("SELECT /*vt+ PLANNER=gen4 */ t1.shardKey, t1.name, count(t2.id) FROM t1 JOIN t2 ON t1.value != t2.shardKey GROUP BY t1.t1_id", `[[INT64(1) VARCHAR("name 1") INT64(2)] [INT64(2) VARCHAR("name 2") INT64(2)]]`)
+
+ mcmp.Exec("set sql_mode=''")
+ mcmp.AssertMatches("select /*vt+ PLANNER=Gen4 */ tbl0.comm, count(*) from emp as tbl0, emp as tbl1 where tbl0.empno = tbl1.deptno", `[[NULL INT64(0)]]`)
+}
+
+// TestAggregateLeftJoin tests that aggregates work with left joins and does not ignore the count when column value does not match the right side table.
+func TestAggregateLeftJoin(t *testing.T) {
+ mcmp, closer := start(t)
+ defer closer()
+
+ mcmp.Exec("insert into t1(t1_id, name, value, shardKey) values (11, 'r', 'r', 1), (3, 'r', 'r', 0)")
+ mcmp.Exec("insert into t2(id, shardKey) values (11, 1)")
+
+ mcmp.AssertMatchesNoOrder("SELECT t1.shardkey FROM t1 LEFT JOIN t2 ON t1.t1_id = t2.id", `[[INT64(1)] [INT64(0)]]`)
+ mcmp.AssertMatches("SELECT count(t1.shardkey) FROM t1 LEFT JOIN t2 ON t1.t1_id = t2.id", `[[INT64(2)]]`)
+ mcmp.AssertMatches("SELECT count(*) FROM t1 LEFT JOIN t2 ON t1.t1_id = t2.id", `[[INT64(2)]]`)
+ mcmp.AssertMatches("SELECT sum(t1.shardkey) FROM t1 LEFT JOIN t2 ON t1.t1_id = t2.id", `[[DECIMAL(1)]]`)
+}
+
+// TestScalarAggregate tests validates that only count is returned and no additional field is returned.gst
+func TestScalarAggregate(t *testing.T) {
+ // disable schema tracking to have weight_string column added to query send down to mysql.
+ clusterInstance.VtGateExtraArgs = append(clusterInstance.VtGateExtraArgs, "--schema_change_signal=false")
+ require.NoError(t,
+ clusterInstance.RestartVtgate())
+
+ // update vtgate params
+ vtParams = clusterInstance.GetVTParams(keyspaceName)
+
+ defer func() {
+ // roll it back
+ clusterInstance.VtGateExtraArgs = append(clusterInstance.VtGateExtraArgs, "--schema_change_signal")
+ require.NoError(t,
+ clusterInstance.RestartVtgate())
+ // update vtgate params
+ vtParams = clusterInstance.GetVTParams(keyspaceName)
+
+ }()
+
+ mcmp, closer := start(t)
+ defer closer()
+
+ mcmp.Exec("insert into aggr_test(id, val1, val2) values(1,'a',1), (2,'A',1), (3,'b',1), (4,'c',3), (5,'c',4)")
+ mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(distinct val1) from aggr_test", `[[INT64(3)]]`)
+}
+
+func TestAggregationRandomOnAnAggregatedValue(t *testing.T) {
+ mcmp, closer := start(t)
+ defer closer()
+
+ mcmp.Exec("insert into t10(k, a, b) values (0, 100, 10), (10, 200, 20);")
+
+ mcmp.AssertMatchesNoOrder("select /*vt+ PLANNER=gen4 */ A.a, A.b, (A.a / A.b) as d from (select sum(a) as a, sum(b) as b from t10 where a = 100) A;",
+ `[[DECIMAL(100) DECIMAL(10) DECIMAL(10.0000)]]`)
+}
+
+func TestBuggyQueries(t *testing.T) {
+ // These queries have been found to be producing the wrong results by the query fuzzer
+ // Adding them as end2end tests to make sure we never get them wrong again
+ mcmp, closer := start(t)
+ defer closer()
+
+ mcmp.Exec("insert into t10(k, a, b) values (0, 100, 10), (10, 200, 20), (20, null, null)")
+
+ mcmp.AssertMatches("select /*vt+ PLANNER=Gen4 */ sum(t1.a) from t10 as t1, t10 as t2",
+ `[[DECIMAL(900)]]`)
+}
diff --git a/go/test/endtoend/vtgate/queries/aggregation/main_test.go b/go/test/endtoend/vtgate/queries/aggregation/main_test.go
index 65cf3a0343b..a859002f44a 100644
--- a/go/test/endtoend/vtgate/queries/aggregation/main_test.go
+++ b/go/test/endtoend/vtgate/queries/aggregation/main_test.go
@@ -33,8 +33,8 @@ var (
clusterInstance *cluster.LocalProcessCluster
vtParams mysql.ConnParams
mysqlParams mysql.ConnParams
- keyspaceName = "ks_union"
- cell = "test_union"
+ keyspaceName = "ks_aggr"
+ cell = "test_aggr"
//go:embed schema.sql
schemaSQL string
diff --git a/go/test/endtoend/vtgate/queries/aggregation/schema.sql b/go/test/endtoend/vtgate/queries/aggregation/schema.sql
index 944c3783048..e1489b4bd21 100644
--- a/go/test/endtoend/vtgate/queries/aggregation/schema.sql
+++ b/go/test/endtoend/vtgate/queries/aggregation/schema.sql
@@ -60,6 +60,7 @@ CREATE TABLE t1 (
`name` varchar(20) NOT NULL,
`value` varchar(50),
shardKey bigint,
+ PRIMARY KEY (t1_id),
UNIQUE KEY `t1id_name` (t1_id, `name`),
KEY `IDX_TA_ValueName` (`value`(20), `name`(10))
) ENGINE InnoDB;
@@ -69,3 +70,30 @@ CREATE TABLE t2 (
shardKey bigint,
PRIMARY KEY (id)
) ENGINE InnoDB;
+
+CREATE TABLE t10 (
+ k BIGINT PRIMARY KEY,
+ a INT,
+ b INT
+);
+
+CREATE TABLE emp (
+ empno bigint NOT NULL,
+ ename VARCHAR(10),
+ job VARCHAR(9),
+ mgr bigint,
+ hiredate DATE,
+ sal bigint,
+ comm bigint,
+ deptno bigint,
+ PRIMARY KEY (empno)
+) Engine = InnoDB
+ COLLATE = utf8mb4_general_ci;
+
+CREATE TABLE dept (
+ deptno bigint,
+ dname VARCHAR(14),
+ loc VARCHAR(13),
+ PRIMARY KEY (deptno)
+) Engine = InnoDB
+ COLLATE = utf8mb4_general_ci;
\ No newline at end of file
diff --git a/go/test/endtoend/vtgate/queries/aggregation/vschema.json b/go/test/endtoend/vtgate/queries/aggregation/vschema.json
index c2d3f133a35..050202aed81 100644
--- a/go/test/endtoend/vtgate/queries/aggregation/vschema.json
+++ b/go/test/endtoend/vtgate/queries/aggregation/vschema.json
@@ -123,6 +123,30 @@
"name": "hash"
}
]
+ },
+ "t10": {
+ "column_vindexes": [
+ {
+ "column": "k",
+ "name": "hash"
+ }
+ ]
+ },
+ "emp": {
+ "column_vindexes": [
+ {
+ "column": "deptno",
+ "name": "hash"
+ }
+ ]
+ },
+ "dept": {
+ "column_vindexes": [
+ {
+ "column": "deptno",
+ "name": "hash"
+ }
+ ]
}
}
}
\ No newline at end of file
diff --git a/go/test/endtoend/vtgate/queries/derived/derived_test.go b/go/test/endtoend/vtgate/queries/derived/derived_test.go
index 5da8d8bac9b..62601ed528d 100644
--- a/go/test/endtoend/vtgate/queries/derived/derived_test.go
+++ b/go/test/endtoend/vtgate/queries/derived/derived_test.go
@@ -30,7 +30,7 @@ func start(t *testing.T) (utils.MySQLCompare, func()) {
require.NoError(t, err)
deleteAll := func() {
- tables := []string{"music"}
+ tables := []string{"music", "user"}
for _, table := range tables {
_, _ = mcmp.ExecAndIgnore("delete from " + table)
}
@@ -56,6 +56,7 @@ func TestDerivedTableWithOrderByLimit(t *testing.T) {
}
func TestDerivedAggregationOnRHS(t *testing.T) {
+ t.Skip("skipped for now, issue: https://github.com/vitessio/vitess/issues/11703")
mcmp, closer := start(t)
defer closer()
@@ -84,10 +85,7 @@ func TestDerivedTableWithHaving(t *testing.T) {
mcmp.Exec("insert into user(id, name) values(1,'toto'), (2,'tata'), (3,'titi'), (4,'tete'), (5,'foo')")
mcmp.Exec("set sql_mode = ''")
-
- // this is probably flaky? the id returned from the derived table could be any of the ids from user.
- // works on my machine (TM)
- mcmp.Exec("select /*vt+ PLANNER=Gen4 */ * from (select id from user having count(*) >= 1) s")
+ mcmp.AssertMatchesAnyNoCompare("select /*vt+ PLANNER=Gen4 */ * from (select id from user having count(*) >= 1) s", "[[INT64(1)]]", "[[INT64(4)]]")
}
func TestDerivedTableColumns(t *testing.T) {
diff --git a/go/test/endtoend/vtgate/queries/dml/insert_test.go b/go/test/endtoend/vtgate/queries/dml/insert_test.go
index 22ca01d32b0..a6b5d1a1fc4 100644
--- a/go/test/endtoend/vtgate/queries/dml/insert_test.go
+++ b/go/test/endtoend/vtgate/queries/dml/insert_test.go
@@ -71,8 +71,8 @@ func TestFailureInsertSelect(t *testing.T) {
// lookup key same (does not fail on MySQL as there is no lookup, and we have not put unique contrains on num column)
utils.AssertContainsError(t, mcmp.VtConn, "insert into s_tbl(id, num) select id*20, num from s_tbl where id = 1", `lookup.Create: Code: ALREADY_EXISTS`)
// mismatch column count
- mcmp.AssertContainsError("insert into s_tbl(id, num) select 100,200,300", `Column count doesn't match value count at row 1`)
- mcmp.AssertContainsError("insert into s_tbl(id, num) select 100", `Column count doesn't match value count at row 1`)
+ mcmp.AssertContainsError("insert into s_tbl(id, num) select 100,200,300", `column count does not match value count at row 1`)
+ mcmp.AssertContainsError("insert into s_tbl(id, num) select 100", `column count does not match value count at row 1`)
})
}
}
@@ -323,8 +323,8 @@ func TestIgnoreInsertSelect(t *testing.T) {
utils.AssertMatches(t, mcmp.VtConn, "select oid, cust_no from order_tbl order by oid", `[[INT64(1) INT64(100)] [INT64(2) INT64(200)] [INT64(3) INT64(300)] [INT64(4) INT64(401)]]`)
// inserting on dup trying to update vindex throws error.
- utils.AssertContainsError(t, mcmp.VtConn, "insert into order_tbl(region_id, oid, cust_no) select 1, 10, 1000 on duplicate key update region_id = region_id + 1", `unsupported: DML cannot change vindex column`)
- utils.AssertContainsError(t, mcmp.VtConn, "insert into order_tbl(region_id, oid, cust_no) select 1, 10, 1000 on duplicate key update oid = oid + 100", `unsupported: DML cannot change vindex column`)
+ utils.AssertContainsError(t, mcmp.VtConn, "insert into order_tbl(region_id, oid, cust_no) select 1, 10, 1000 on duplicate key update region_id = region_id + 1", `unsupported: DML cannot update vindex column`)
+ utils.AssertContainsError(t, mcmp.VtConn, "insert into order_tbl(region_id, oid, cust_no) select 1, 10, 1000 on duplicate key update oid = oid + 100", `unsupported: DML cannot update vindex column`)
}
func TestIgnoreInsertSelectOlapMode(t *testing.T) {
@@ -361,8 +361,8 @@ func TestIgnoreInsertSelectOlapMode(t *testing.T) {
utils.AssertMatches(t, mcmp.VtConn, "select oid, cust_no from order_tbl order by oid", `[[INT64(1) INT64(100)] [INT64(2) INT64(200)] [INT64(3) INT64(300)] [INT64(4) INT64(401)]]`)
// inserting on dup trying to update vindex throws error.
- utils.AssertContainsError(t, mcmp.VtConn, "insert into order_tbl(region_id, oid, cust_no) select 1, 10, 1000 on duplicate key update region_id = region_id + 1", `unsupported: DML cannot change vindex column`)
- utils.AssertContainsError(t, mcmp.VtConn, "insert into order_tbl(region_id, oid, cust_no) select 1, 10, 1000 on duplicate key update oid = oid + 100", `unsupported: DML cannot change vindex column`)
+ utils.AssertContainsError(t, mcmp.VtConn, "insert into order_tbl(region_id, oid, cust_no) select 1, 10, 1000 on duplicate key update region_id = region_id + 1", `unsupported: DML cannot update vindex column`)
+ utils.AssertContainsError(t, mcmp.VtConn, "insert into order_tbl(region_id, oid, cust_no) select 1, 10, 1000 on duplicate key update oid = oid + 100", `unsupported: DML cannot update vindex column`)
}
func TestInsertSelectUnshardedUsingSharded(t *testing.T) {
@@ -384,3 +384,13 @@ func TestInsertSelectUnshardedUsingSharded(t *testing.T) {
})
}
}
+
+func TestRedactDupError(t *testing.T) {
+ mcmp, closer := start(t)
+ defer closer()
+
+ mcmp.Exec("insert into order_tbl(region_id, oid, cust_no) values (1,1,100),(1,2,200),(1,3,300)")
+
+ // inserting same rows, throws error.
+ mcmp.AssertContainsError("insert into order_tbl(region_id, oid, cust_no) select region_id, oid, cust_no from order_tbl", `BindVars: {REDACTED}`)
+}
diff --git a/go/test/endtoend/vtgate/queries/dml/main_test.go b/go/test/endtoend/vtgate/queries/dml/main_test.go
index de3c7897ae0..7fb361837f8 100644
--- a/go/test/endtoend/vtgate/queries/dml/main_test.go
+++ b/go/test/endtoend/vtgate/queries/dml/main_test.go
@@ -98,6 +98,8 @@ func TestMain(m *testing.M) {
return 1
}
+ clusterInstance.VtGateExtraArgs = append(clusterInstance.VtGateExtraArgs, "--vtgate-config-terse-errors")
+
// Start vtgate
clusterInstance.VtGatePlannerVersion = planbuilder.Gen4
err = clusterInstance.StartVtgate()
diff --git a/go/test/endtoend/vtgate/queries/informationschema/informationschema_test.go b/go/test/endtoend/vtgate/queries/informationschema/informationschema_test.go
index be902e5bf0e..0b7b72a4f25 100644
--- a/go/test/endtoend/vtgate/queries/informationschema/informationschema_test.go
+++ b/go/test/endtoend/vtgate/queries/informationschema/informationschema_test.go
@@ -38,10 +38,7 @@ func start(t *testing.T) (utils.MySQLCompare, func()) {
deleteAll := func() {
_, _ = utils.ExecAllowError(t, mcmp.VtConn, "set workload = oltp")
- tables := []string{
- "t1", "t1_id2_idx", "vstream_test", "t2", "t2_id4_idx", "t3", "t3_id7_idx", "t4",
- "t4_id2_idx", "t5_null_vindex", "t6", "t6_id2_idx", "t7_xxhash", "t7_xxhash_idx", "t7_fk", "t8",
- }
+ tables := []string{"t1", "t1_id2_idx", "t7_xxhash", "t7_xxhash_idx", "t7_fk"}
for _, table := range tables {
_, _ = mcmp.ExecAndIgnore("delete from " + table)
}
@@ -206,3 +203,24 @@ func TestMultipleSchemaPredicates(t *testing.T) {
require.Error(t, err)
require.Contains(t, err.Error(), "specifying two different database in the query is not supported")
}
+
+func TestInfrSchemaAndUnionAll(t *testing.T) {
+ clusterInstance.VtGateExtraArgs = append(clusterInstance.VtGateExtraArgs, "--planner-version=gen4")
+ require.NoError(t,
+ clusterInstance.RestartVtgate())
+
+ vtConnParams := clusterInstance.GetVTParams(keyspaceName)
+ vtConnParams.DbName = keyspaceName
+ conn, err := mysql.Connect(context.Background(), &vtConnParams)
+ require.NoError(t, err)
+
+ for _, workload := range []string{"oltp", "olap"} {
+ t.Run(workload, func(t *testing.T) {
+ utils.Exec(t, conn, fmt.Sprintf("set workload = %s", workload))
+ utils.Exec(t, conn, "start transaction")
+ utils.Exec(t, conn, `select connection_id()`)
+ utils.Exec(t, conn, `(select 'corder' from t1 limit 1) union all (select 'customer' from t7_xxhash limit 1)`)
+ utils.Exec(t, conn, "rollback")
+ })
+ }
+}
diff --git a/go/test/endtoend/vtgate/queries/informationschema/schema.sql b/go/test/endtoend/vtgate/queries/informationschema/schema.sql
index f34e4e8c5bd..1fc9949406b 100644
--- a/go/test/endtoend/vtgate/queries/informationschema/schema.sql
+++ b/go/test/endtoend/vtgate/queries/informationschema/schema.sql
@@ -12,87 +12,6 @@ create table t1_id2_idx
primary key (id2)
) Engine = InnoDB;
-create table vstream_test
-(
- id bigint,
- val bigint,
- primary key (id)
-) Engine = InnoDB;
-
-create table t2
-(
- id3 bigint,
- id4 bigint,
- primary key (id3)
-) Engine = InnoDB;
-
-create table t2_id4_idx
-(
- id bigint not null auto_increment,
- id4 bigint,
- id3 bigint,
- primary key (id),
- key idx_id4 (id4)
-) Engine = InnoDB;
-
-create table t3
-(
- id5 bigint,
- id6 bigint,
- id7 bigint,
- primary key (id5)
-) Engine = InnoDB;
-
-create table t3_id7_idx
-(
- id bigint not null auto_increment,
- id7 bigint,
- id6 bigint,
- primary key (id)
-) Engine = InnoDB;
-
-create table t4
-(
- id1 bigint,
- id2 varchar(10),
- primary key (id1)
-) ENGINE = InnoDB
- DEFAULT charset = utf8mb4
- COLLATE = utf8mb4_general_ci;
-
-create table t4_id2_idx
-(
- id2 varchar(10),
- id1 bigint,
- keyspace_id varbinary(50),
- primary key (id2, id1)
-) Engine = InnoDB
- DEFAULT charset = utf8mb4
- COLLATE = utf8mb4_general_ci;
-
-create table t5_null_vindex
-(
- id bigint not null,
- idx varchar(50),
- primary key (id)
-) Engine = InnoDB;
-
-create table t6
-(
- id1 bigint,
- id2 varchar(10),
- primary key (id1)
-) Engine = InnoDB;
-
-create table t6_id2_idx
-(
- id2 varchar(10),
- id1 bigint,
- keyspace_id varbinary(50),
- primary key (id1),
- key (id2)
-) Engine = InnoDB;
-
create table t7_xxhash
(
uid varchar(50),
@@ -116,10 +35,3 @@ create table t7_fk
CONSTRAINT t7_fk_ibfk_1 foreign key (t7_uid) references t7_xxhash (uid)
on delete set null on update cascade
) Engine = InnoDB;
-
-create table t8
-(
- id8 bigint,
- testId bigint,
- primary key (id8)
-) Engine = InnoDB;
diff --git a/go/test/endtoend/vtgate/queries/informationschema/vschema.json b/go/test/endtoend/vtgate/queries/informationschema/vschema.json
index b440e3905dc..eec57e9970d 100644
--- a/go/test/endtoend/vtgate/queries/informationschema/vschema.json
+++ b/go/test/endtoend/vtgate/queries/informationschema/vschema.json
@@ -7,12 +7,12 @@
"unicode_loose_xxhash" : {
"type": "unicode_loose_xxhash"
},
- "t3_id7_vdx": {
- "type": "lookup_hash",
+ "t1_id2_idx": {
+ "type": "lookup_unique",
"params": {
- "table": "t3_id7_idx",
- "from": "id7",
- "to": "id6"
+ "table": "t1_id2_idx",
+ "from": "id2",
+ "to": "keyspace_id"
},
"owner": "t3"
},
@@ -28,15 +28,15 @@
}
},
"tables": {
- "t3": {
+ "t1": {
"column_vindexes": [
{
- "column": "id6",
+ "column": "id1",
"name": "hash"
},
{
- "column": "id7",
- "name": "t3_id7_vdx"
+ "column": "id2",
+ "name": "t1_id2_idx"
}
]
},
@@ -48,46 +48,6 @@
}
]
},
- "t9": {
- "column_vindexes": [
- {
- "column": "id1",
- "name": "hash"
- }
- ]
- },
- "aggr_test": {
- "column_vindexes": [
- {
- "column": "id",
- "name": "hash"
- }
- ],
- "columns": [
- {
- "name": "val1",
- "type": "VARCHAR"
- }
- ]
- },
- "aggr_test_dates": {
- "column_vindexes": [
- {
- "column": "id",
- "name": "hash"
- }
- ],
- "columns": [
- {
- "name": "val1",
- "type": "DATETIME"
- },
- {
- "name": "val2",
- "type": "DATETIME"
- }
- ]
- },
"t7_xxhash": {
"column_vindexes": [
{
diff --git a/go/test/endtoend/vtgate/queries/misc/main_test.go b/go/test/endtoend/vtgate/queries/misc/main_test.go
index 1dcb377e130..d71dc55ef46 100644
--- a/go/test/endtoend/vtgate/queries/misc/main_test.go
+++ b/go/test/endtoend/vtgate/queries/misc/main_test.go
@@ -34,8 +34,12 @@ var (
vtParams mysql.ConnParams
mysqlParams mysql.ConnParams
keyspaceName = "ks_misc"
+ uks = "uks"
cell = "test_misc"
+ //go:embed uschema.sql
+ uschemaSQL string
+
//go:embed schema.sql
schemaSQL string
@@ -57,6 +61,20 @@ func TestMain(m *testing.M) {
return 1
}
+ clusterInstance.VtTabletExtraArgs = append(clusterInstance.VtTabletExtraArgs,
+ "--queryserver-config-max-result-size", "1000000",
+ "--queryserver-config-query-timeout", "200",
+ "--queryserver-config-query-pool-timeout", "200")
+ // Start Unsharded keyspace
+ ukeyspace := &cluster.Keyspace{
+ Name: uks,
+ SchemaSQL: uschemaSQL,
+ }
+ err = clusterInstance.StartUnshardedKeyspace(*ukeyspace, 0, false)
+ if err != nil {
+ return 1
+ }
+
// Start keyspace
keyspace := &cluster.Keyspace{
Name: keyspaceName,
@@ -68,7 +86,8 @@ func TestMain(m *testing.M) {
return 1
}
- clusterInstance.VtGateExtraArgs = append(clusterInstance.VtGateExtraArgs, "--enable_system_settings=true")
+ clusterInstance.VtGateExtraArgs = append(clusterInstance.VtGateExtraArgs,
+ "--query-timeout", "100")
// Start vtgate
err = clusterInstance.StartVtgate()
if err != nil {
diff --git a/go/test/endtoend/vtgate/queries/misc/misc_test.go b/go/test/endtoend/vtgate/queries/misc/misc_test.go
index fb66f395f9e..14448989956 100644
--- a/go/test/endtoend/vtgate/queries/misc/misc_test.go
+++ b/go/test/endtoend/vtgate/queries/misc/misc_test.go
@@ -17,8 +17,15 @@ limitations under the License.
package misc
import (
+ "database/sql"
+ "fmt"
+ "strconv"
+ "strings"
"testing"
+ _ "github.com/go-sql-driver/mysql"
+
+ "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"vitess.io/vitess/go/test/endtoend/cluster"
@@ -89,3 +96,147 @@ func TestInvalidDateTimeTimestampVals(t *testing.T) {
_, err = mcmp.ExecAllowAndCompareError(`select timestamp'2022'`)
require.Error(t, err)
}
+
+func TestQueryTimeoutWithDual(t *testing.T) {
+ mcmp, closer := start(t)
+ defer closer()
+
+ _, err := utils.ExecAllowError(t, mcmp.VtConn, "select sleep(0.04) from dual")
+ assert.NoError(t, err)
+ _, err = utils.ExecAllowError(t, mcmp.VtConn, "select sleep(0.24) from dual")
+ assert.Error(t, err)
+ _, err = utils.ExecAllowError(t, mcmp.VtConn, "set @@session.query_timeout=20")
+ require.NoError(t, err)
+ _, err = utils.ExecAllowError(t, mcmp.VtConn, "select sleep(0.04) from dual")
+ assert.Error(t, err)
+ _, err = utils.ExecAllowError(t, mcmp.VtConn, "select sleep(0.01) from dual")
+ assert.NoError(t, err)
+ _, err = utils.ExecAllowError(t, mcmp.VtConn, "select /*vt+ QUERY_TIMEOUT_MS=500 */ sleep(0.24) from dual")
+ assert.NoError(t, err)
+ _, err = utils.ExecAllowError(t, mcmp.VtConn, "select /*vt+ QUERY_TIMEOUT_MS=10 */ sleep(0.04) from dual")
+ assert.Error(t, err)
+ _, err = utils.ExecAllowError(t, mcmp.VtConn, "select /*vt+ QUERY_TIMEOUT_MS=15 */ sleep(0.001) from dual")
+ assert.NoError(t, err)
+}
+
+func TestQueryTimeoutWithTables(t *testing.T) {
+ mcmp, closer := start(t)
+ defer closer()
+
+ // unsharded
+ utils.Exec(t, mcmp.VtConn, "insert /*vt+ QUERY_TIMEOUT_MS=1000 */ into uks.unsharded(id1) values (1),(2),(3),(4),(5)")
+ for i := 0; i < 12; i++ {
+ utils.Exec(t, mcmp.VtConn, "insert /*vt+ QUERY_TIMEOUT_MS=1000 */ into uks.unsharded(id1) select id1+5 from uks.unsharded")
+ }
+
+ utils.Exec(t, mcmp.VtConn, "select count(*) from uks.unsharded where id1 > 31")
+ utils.Exec(t, mcmp.VtConn, "select /*vt+ PLANNER=gen4 QUERY_TIMEOUT_MS=100 */ count(*) from uks.unsharded where id1 > 31")
+
+ // the query usually takes more than 5ms to return. So this should fail.
+ _, err := utils.ExecAllowError(t, mcmp.VtConn, "select /*vt+ PLANNER=gen4 QUERY_TIMEOUT_MS=1 */ count(*) from uks.unsharded where id1 > 31")
+ require.Error(t, err)
+ assert.Contains(t, err.Error(), "context deadline exceeded")
+ assert.Contains(t, err.Error(), "(errno 1317) (sqlstate 70100)")
+
+ // sharded
+ utils.Exec(t, mcmp.VtConn, "insert /*vt+ QUERY_TIMEOUT_MS=1000 */ into ks_misc.t1(id1, id2) values (1,2),(2,4),(3,6),(4,8),(5,10)")
+
+ // sleep take in seconds, so 0.1 is 100ms
+ utils.Exec(t, mcmp.VtConn, "select /*vt+ PLANNER=gen4 QUERY_TIMEOUT_MS=500 */ sleep(0.1) from t1 where id1 = 1")
+ _, err = utils.ExecAllowError(t, mcmp.VtConn, "select /*vt+ PLANNER=gen4 QUERY_TIMEOUT_MS=20 */ sleep(0.1) from t1 where id1 = 1")
+ require.Error(t, err)
+ assert.Contains(t, err.Error(), "context deadline exceeded")
+ assert.Contains(t, err.Error(), "(errno 1317) (sqlstate 70100)")
+}
+
+// TestIntervalWithMathFunctions tests that the Interval keyword can be used with math functions.
+func TestIntervalWithMathFunctions(t *testing.T) {
+ mcmp, closer := start(t)
+ defer closer()
+
+ // Set the time zone explicitly to UTC, otherwise the output of FROM_UNIXTIME is going to be dependent
+ // on the time zone of the system.
+ mcmp.Exec("SET time_zone = '+00:00'")
+ mcmp.AssertMatches("select '2020-01-01' + interval month(DATE_SUB(FROM_UNIXTIME(1234), interval 1 month))-1 month", `[[CHAR("2020-12-01")]]`)
+ mcmp.AssertMatches("select DATE_ADD(MIN(FROM_UNIXTIME(1673444922)),interval -DAYOFWEEK(MIN(FROM_UNIXTIME(1673444922)))+1 DAY)", `[[DATETIME("2023-01-08 13:48:42")]]`)
+}
+
+// TestCast tests the queries that contain the cast function.
+func TestCast(t *testing.T) {
+ mcmp, closer := start(t)
+ defer closer()
+
+ mcmp.AssertMatches("select cast('2023-01-07 12:34:56' as date) limit 1", `[[DATE("2023-01-07")]]`)
+ mcmp.AssertMatches("select cast('2023-01-07 12:34:56' as date)", `[[DATE("2023-01-07")]]`)
+ mcmp.AssertMatches("select cast('3.2' as float)", `[[FLOAT32(3.2)]]`)
+ mcmp.AssertMatches("select cast('3.2' as double)", `[[FLOAT64(3.2)]]`)
+ mcmp.AssertMatches("select cast('3.2' as unsigned)", `[[UINT64(3)]]`)
+}
+
+func TestOuterJoinWithPredicate(t *testing.T) {
+ mcmp, closer := start(t)
+ defer closer()
+
+ // This test uses a predicate on the outer side.
+ // These can't be pushed down to MySQL and have
+ // to be evaluated on the vtgate, so we are checking
+ // that evalengine handles the predicate correctly
+
+ mcmp.Exec("insert into t1(id1, id2) values (0,0), (1,10), (2,20), (3,30), (4,40)")
+
+ mcmp.AssertMatchesNoOrder("select A.id1, B.id2 from t1 as A left join t1 as B on A.id1*10 = B.id2 WHERE B.id2 BETWEEN 20 AND 30",
+ `[[INT64(2) INT64(20)] [INT64(3) INT64(30)]]`)
+ mcmp.AssertMatchesNoOrder("select A.id1, B.id2 from t1 as A left join t1 as B on A.id1*10 = B.id2 WHERE B.id2 NOT BETWEEN 20 AND 30",
+ `[[INT64(0) INT64(0)] [INT64(1) INT64(10)] [INT64(4) INT64(40)]]`)
+}
+
+// This test ensures that we support PREPARE statement with 65530 parameters.
+// It opens a MySQL connection using the go-mysql driver and execute a select query
+// it then checks the result contains the proper rows and that it's not failing.
+func TestHighNumberOfParams(t *testing.T) {
+ mcmp, closer := start(t)
+ defer closer()
+
+ mcmp.Exec("insert into t1(id1) values (0), (1), (2), (3), (4)")
+
+ paramCount := 65530
+
+ // create the value and argument slices used to build the prepare stmt
+ var vals []any
+ var params []string
+ for i := 0; i < paramCount; i++ {
+ vals = append(vals, strconv.Itoa(i))
+ params = append(params, "?")
+ }
+
+ // connect to the vitess cluster
+ db, err := sql.Open("mysql", fmt.Sprintf("@tcp(%s:%v)/%s", vtParams.Host, vtParams.Port, vtParams.DbName))
+ require.NoError(t, err)
+
+ // run the query
+ r, err := db.Query(fmt.Sprintf("SELECT /*vt+ QUERY_TIMEOUT_MS=10000 */ id1 FROM t1 WHERE id1 in (%s) ORDER BY id1 ASC", strings.Join(params, ", ")), vals...)
+ require.NoError(t, err)
+
+ // check the results we got, we should get 5 rows with each: 0, 1, 2, 3, 4
+ // count is the row number we are currently visiting, also correspond to the
+ // column value we expect.
+ count := 0
+ for r.Next() {
+ j := -1
+ err := r.Scan(&j)
+ require.NoError(t, err)
+ require.Equal(t, j, count)
+ count++
+ }
+ require.Equal(t, 5, count)
+}
+
+func TestBuggyOuterJoin(t *testing.T) {
+ // We found a couple of inconsistencies around outer joins, adding these tests to stop regressions
+ mcmp, closer := start(t)
+ defer closer()
+
+ mcmp.Exec("insert into t1(id1, id2) values (1,2), (42,5), (5, 42)")
+
+ mcmp.Exec("select t1.id1, t2.id1 from t1 left join t1 as t2 on t2.id1 = t2.id2")
+}
diff --git a/go/test/endtoend/vtgate/queries/misc/schema.sql b/go/test/endtoend/vtgate/queries/misc/schema.sql
index 3e78cab09d6..ceac0c07e6d 100644
--- a/go/test/endtoend/vtgate/queries/misc/schema.sql
+++ b/go/test/endtoend/vtgate/queries/misc/schema.sql
@@ -1,4 +1,4 @@
-create table t1(
+create table if not exists t1(
id1 bigint,
id2 bigint,
primary key(id1)
diff --git a/go/test/endtoend/vtgate/queries/misc/uschema.sql b/go/test/endtoend/vtgate/queries/misc/uschema.sql
new file mode 100644
index 00000000000..6ba158b134e
--- /dev/null
+++ b/go/test/endtoend/vtgate/queries/misc/uschema.sql
@@ -0,0 +1,5 @@
+create table unsharded(
+ id1 bigint,
+ id2 bigint,
+ key(id1)
+) Engine=InnoDB;
\ No newline at end of file
diff --git a/go/test/endtoend/vtgate/queries/reference/main_test.go b/go/test/endtoend/vtgate/queries/reference/main_test.go
new file mode 100644
index 00000000000..4c9440ca4ff
--- /dev/null
+++ b/go/test/endtoend/vtgate/queries/reference/main_test.go
@@ -0,0 +1,283 @@
+/*
+Copyright 2022 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package reference
+
+import (
+ "context"
+ "flag"
+ "fmt"
+ "os"
+ "testing"
+ "time"
+
+ "vitess.io/vitess/go/mysql"
+
+ querypb "vitess.io/vitess/go/vt/proto/query"
+ "vitess.io/vitess/go/vt/vtgate/vtgateconn"
+
+ "vitess.io/vitess/go/test/endtoend/cluster"
+)
+
+var (
+ clusterInstance *cluster.LocalProcessCluster
+ cell = "zone1"
+ hostname = "localhost"
+ vtParams mysql.ConnParams
+
+ unshardedKeyspaceName = "uks"
+ unshardedSQLSchema = `
+ CREATE TABLE IF NOT EXISTS zip(
+ id BIGINT NOT NULL AUTO_INCREMENT,
+ code5 INT(5) NOT NULL,
+ PRIMARY KEY(id)
+ ) ENGINE=InnoDB;
+
+ INSERT INTO zip(id, code5)
+ VALUES (1, 47107),
+ (2, 82845),
+ (3, 11237);
+
+ CREATE TABLE IF NOT EXISTS zip_detail(
+ id BIGINT NOT NULL AUTO_INCREMENT,
+ zip_id BIGINT NOT NULL,
+ discontinued_at DATE,
+ PRIMARY KEY(id)
+ ) ENGINE=InnoDB;
+
+ `
+ unshardedVSchema = `
+ {
+ "sharded":false,
+ "tables": {
+ "zip": {},
+ "zip_detail": {}
+ }
+ }
+ `
+ shardedKeyspaceName = "sks"
+ shardedSQLSchema = `
+ CREATE TABLE IF NOT EXISTS delivery_failure (
+ id BIGINT NOT NULL,
+ zip_detail_id BIGINT NOT NULL,
+ reason VARCHAR(255),
+ PRIMARY KEY(id)
+ ) ENGINE=InnoDB;
+ `
+ shardedVSchema = `
+ {
+ "sharded": true,
+ "vindexes": {
+ "hash": {
+ "type": "hash"
+ }
+ },
+ "tables": {
+ "delivery_failure": {
+ "columnVindexes": [
+ {
+ "column": "id",
+ "name": "hash"
+ }
+ ]
+ },
+ "zip_detail": {
+ "type": "reference",
+ "source": "` + unshardedKeyspaceName + `.zip_detail"
+ }
+ }
+ }
+ `
+)
+
+func TestMain(m *testing.M) {
+ defer cluster.PanicHandler(nil)
+ flag.Parse()
+
+ exitCode := func() int {
+ clusterInstance = cluster.NewCluster(cell, hostname)
+ defer clusterInstance.Teardown()
+
+ // Start topo server
+ if err := clusterInstance.StartTopo(); err != nil {
+ return 1
+ }
+
+ // Start keyspace
+ uKeyspace := &cluster.Keyspace{
+ Name: unshardedKeyspaceName,
+ SchemaSQL: unshardedSQLSchema,
+ VSchema: unshardedVSchema,
+ }
+ if err := clusterInstance.StartUnshardedKeyspace(*uKeyspace, 0, false); err != nil {
+ return 1
+ }
+
+ sKeyspace := &cluster.Keyspace{
+ Name: shardedKeyspaceName,
+ SchemaSQL: shardedSQLSchema,
+ VSchema: shardedVSchema,
+ }
+ if err := clusterInstance.StartKeyspace(*sKeyspace, []string{"-80", "80-"}, 0, false); err != nil {
+ return 1
+ }
+
+ // Start vtgate
+ if err := clusterInstance.StartVtgate(); err != nil {
+ return 1
+ }
+
+ if err := clusterInstance.WaitForTabletsToHealthyInVtgate(); err != nil {
+ return 1
+ }
+
+ vtParams = mysql.ConnParams{
+ Host: "localhost",
+ Port: clusterInstance.VtgateMySQLPort,
+ }
+
+ // TODO(maxeng) remove when we have a proper way to check
+ // materialization lag and cutover.
+ done := make(chan bool, 1)
+ expectRows := 2
+ go func() {
+ ctx := context.Background()
+ vtgateAddr := fmt.Sprintf("%s:%d", clusterInstance.Hostname, clusterInstance.VtgateProcess.GrpcPort)
+ vtgateConn, err := vtgateconn.Dial(ctx, vtgateAddr)
+ if err != nil {
+ done <- false
+ return
+ }
+ defer vtgateConn.Close()
+
+ maxWait := time.After(300 * time.Second)
+ for _, ks := range clusterInstance.Keyspaces {
+ if ks.Name != shardedKeyspaceName {
+ continue
+ }
+ for _, s := range ks.Shards {
+ var ok bool
+ for !ok {
+ select {
+ case <-maxWait:
+ fmt.Println("Waited too long for materialization, cancelling.")
+ done <- false
+ return
+ default:
+ }
+ shard := fmt.Sprintf("%s/%s@primary", ks.Name, s.Name)
+ session := vtgateConn.Session(shard, nil)
+ _, err := session.Execute(ctx, "SHOW CREATE TABLE zip_detail", map[string]*querypb.BindVariable{})
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Failed to SHOW CREATE TABLE zip_detail; might not exist yet: %v\n", err)
+ time.Sleep(1 * time.Second)
+ continue
+ }
+ qr, err := session.Execute(ctx, "SELECT * FROM zip_detail", map[string]*querypb.BindVariable{})
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Failed to query sharded keyspace for zip_detail rows: %v\n", err)
+ done <- false
+ return
+ }
+ if len(qr.Rows) != expectRows {
+ fmt.Fprintf(os.Stderr, "Shard %s doesn't yet have expected number of zip_detail rows\n", shard)
+ time.Sleep(10 * time.Second)
+ continue
+ }
+ fmt.Fprintf(os.Stdout, "Shard %s has expected number of zip_detail rows.\n", shard)
+ ok = true
+ }
+ }
+ fmt.Println("All shards have expected number of zip_detail rows.")
+ done <- true
+ }
+ }()
+
+ // Materialize zip_detail to sharded keyspace.
+ output, err := clusterInstance.VtctlProcess.ExecuteCommandWithOutput(
+ "Materialize",
+ "--",
+ "--tablet_types",
+ "PRIMARY",
+ `{
+ "workflow": "copy_zip_detail",
+ "source_keyspace": "`+unshardedKeyspaceName+`",
+ "target_keyspace": "`+shardedKeyspaceName+`",
+ "tablet_types": "PRIMARY",
+ "table_settings": [
+ {
+ "target_table": "zip_detail",
+ "source_expression": "select * from zip_detail",
+ "create_ddl": "copy"
+ }
+ ]
+ }`,
+ )
+ fmt.Fprintf(os.Stderr, "Output from materialize: %s\n", output)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Got error trying to start materialize zip_detail: %v\n", err)
+ return 1
+ }
+
+ ctx := context.Background()
+ vtgateAddr := fmt.Sprintf("%s:%d", clusterInstance.Hostname, clusterInstance.VtgateProcess.GrpcPort)
+ vtgateConn, err := vtgateconn.Dial(ctx, vtgateAddr)
+ if err != nil {
+ return 1
+ }
+ defer vtgateConn.Close()
+
+ session := vtgateConn.Session("@primary", nil)
+ // INSERT some zip_detail rows.
+ if _, err := session.Execute(ctx, `
+ INSERT INTO zip_detail(id, zip_id, discontinued_at)
+ VALUES (1, 1, '2022-05-13'),
+ (2, 2, '2022-08-15')
+ `, map[string]*querypb.BindVariable{}); err != nil {
+ return 1
+ }
+
+ // INSERT some delivery_failure rows.
+ if _, err := session.Execute(ctx, `
+ INSERT INTO delivery_failure(id, zip_detail_id, reason)
+ VALUES (1, 1, 'Failed delivery due to discontinued zipcode.'),
+ (2, 2, 'Failed delivery due to discontinued zipcode.'),
+ (3, 3, 'Failed delivery due to unknown reason.');
+ `, map[string]*querypb.BindVariable{}); err != nil {
+ return 1
+ }
+
+ if ok := <-done; !ok {
+ fmt.Fprintf(os.Stderr, "Materialize did not succeed.\n")
+ return 1
+ }
+
+ // Stop materialize zip_detail to sharded keyspace.
+ err = clusterInstance.VtctlProcess.ExecuteCommand(
+ "Workflow",
+ "--",
+ shardedKeyspaceName+".copy_zip_detail",
+ "delete",
+ )
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Failed to stop materialization workflow: %v", err)
+ return 1
+ }
+
+ return m.Run()
+ }()
+ os.Exit(exitCode)
+}
diff --git a/go/test/endtoend/vtgate/queries/reference/reference_test.go b/go/test/endtoend/vtgate/queries/reference/reference_test.go
new file mode 100644
index 00000000000..75efc840880
--- /dev/null
+++ b/go/test/endtoend/vtgate/queries/reference/reference_test.go
@@ -0,0 +1,139 @@
+/*
+Copyright 2022 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package reference
+
+import (
+ "context"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "vitess.io/vitess/go/mysql"
+ "vitess.io/vitess/go/test/endtoend/utils"
+
+ "vitess.io/vitess/go/test/endtoend/cluster"
+)
+
+func start(t *testing.T) (*mysql.Conn, func()) {
+ ctx := context.Background()
+ vtConn, err := mysql.Connect(ctx, &vtParams)
+ require.NoError(t, err)
+
+ return vtConn, func() {
+ vtConn.Close()
+ cluster.PanicHandler(t)
+ }
+}
+
+// TestGlobalReferenceRouting tests that unqualified queries for reference
+// tables go to the right place.
+//
+// Given:
+// - Unsharded keyspace `uks` and sharded keyspace `sks`.
+// - Source table `uks.zip_detail` and a reference table `sks.zip_detail`,
+// initially with the same rows.
+// - Unsharded table `uks.zip` and sharded table `sks.delivery_failure`.
+//
+// When: we execute `INSERT INTO zip_detail ...`,
+// Then: `zip_detail` should be routed to `uks`.
+//
+// When: we execute `UPDATE zip_detail ...`,
+// Then: `zip_detail` should be routed to `uks`.
+//
+// When: we execute `SELECT ... FROM zip JOIN zip_detail ...`,
+// Then: `zip_detail` should be routed to `uks`.
+//
+// When: we execute `SELECT ... FROM delivery_failure JOIN zip_detail ...`,
+// Then: `zip_detail` should be routed to `sks`.
+//
+// When: we execute `DELETE FROM zip_detail ...`,
+// Then: `zip_detail` should be routed to `uks`.
+func TestReferenceRouting(t *testing.T) {
+ conn, closer := start(t)
+ defer closer()
+
+ // INSERT should route an unqualified zip_detail to unsharded keyspace.
+ utils.Exec(t, conn, "INSERT INTO zip_detail(id, zip_id, discontinued_at) VALUES(3, 1, DATE('2022-12-03'))")
+ // Verify with qualified zip_detail queries to each keyspace. The unsharded
+ // keyspace should have an extra row.
+ utils.AssertMatches(
+ t,
+ conn,
+ "SELECT COUNT(zd.id) FROM "+unshardedKeyspaceName+".zip_detail zd WHERE id = 3",
+ `[[INT64(1)]]`,
+ )
+ utils.AssertMatches(
+ t,
+ conn,
+ "SELECT COUNT(zd.id) FROM "+shardedKeyspaceName+".zip_detail zd WHERE id = 3",
+ `[[INT64(0)]]`,
+ )
+
+ // UPDATE should route an unqualified zip_detail to unsharded keyspace.
+ utils.Exec(t, conn,
+ "UPDATE zip_detail SET discontinued_at = NULL WHERE id = 2")
+ // Verify with qualified zip_detail queries to each keyspace. The unsharded
+ // keyspace should have a matching row, but not the sharded keyspace.
+ utils.AssertMatches(
+ t,
+ conn,
+ "SELECT COUNT(id) FROM "+unshardedKeyspaceName+".zip_detail WHERE discontinued_at IS NULL",
+ `[[INT64(1)]]`,
+ )
+ utils.AssertMatches(
+ t,
+ conn,
+ "SELECT COUNT(id) FROM "+shardedKeyspaceName+".zip_detail WHERE discontinued_at IS NULL",
+ `[[INT64(0)]]`,
+ )
+
+ // SELECT a table in unsharded keyspace and JOIN unqualified zip_detail.
+ utils.AssertMatches(
+ t,
+ conn,
+ "SELECT COUNT(zd.id) FROM zip z JOIN zip_detail zd ON z.id = zd.zip_id WHERE zd.id = 3",
+ `[[INT64(1)]]`,
+ )
+
+ // SELECT a table in sharded keyspace and JOIN unqualified zip_detail.
+ // Use gen4 planner to avoid errors from gen3 planner.
+ utils.AssertMatches(
+ t,
+ conn,
+ `SELECT /*vt+ PLANNER=gen4 */ COUNT(zd.id)
+ FROM delivery_failure df
+ JOIN zip_detail zd ON zd.id = df.zip_detail_id WHERE zd.id = 3`,
+ `[[INT64(0)]]`,
+ )
+
+ // DELETE should route an unqualified zip_detail to unsharded keyspace.
+ utils.Exec(t, conn, "DELETE FROM zip_detail")
+ // Verify with qualified zip_detail queries to each keyspace. The unsharded
+ // keyspace should not have any rows; the sharded keyspace should.
+ utils.AssertMatches(
+ t,
+ conn,
+ "SELECT COUNT(id) FROM "+unshardedKeyspaceName+".zip_detail",
+ `[[INT64(0)]]`,
+ )
+ utils.AssertMatches(
+ t,
+ conn,
+ "SELECT COUNT(id) FROM "+shardedKeyspaceName+".zip_detail",
+ `[[INT64(2)]]`,
+ )
+}
diff --git a/go/test/endtoend/vtgate/queries/subquery/subquery_test.go b/go/test/endtoend/vtgate/queries/subquery/subquery_test.go
index d955c4b2d06..01cc7b2ee54 100644
--- a/go/test/endtoend/vtgate/queries/subquery/subquery_test.go
+++ b/go/test/endtoend/vtgate/queries/subquery/subquery_test.go
@@ -58,14 +58,15 @@ func TestSubqueriesHasValues(t *testing.T) {
mcmp.AssertMatches(`SELECT id2 FROM t1 WHERE id1 NOT IN (SELECT id1 FROM t1 WHERE id1 > 10) ORDER BY id2`, `[[INT64(1)] [INT64(2)] [INT64(3)] [INT64(4)] [INT64(5)] [INT64(6)]]`)
}
-// Test only supported in >= v14.0.0
+// Test only supported in >= v16.0.0
func TestSubqueriesExists(t *testing.T) {
- utils.SkipIfBinaryIsBelowVersion(t, 14, "vtgate")
+ utils.SkipIfBinaryIsBelowVersion(t, 16, "vtgate")
mcmp, closer := start(t)
defer closer()
mcmp.Exec("insert into t1(id1, id2) values (0,1),(1,2),(2,3),(3,4),(4,5),(5,6)")
mcmp.AssertMatches(`SELECT id2 FROM t1 WHERE EXISTS (SELECT id1 FROM t1 WHERE id1 > 0) ORDER BY id2`, `[[INT64(1)] [INT64(2)] [INT64(3)] [INT64(4)] [INT64(5)] [INT64(6)]]`)
+ mcmp.AssertMatches(`select * from (select 1) as tmp where exists(select 1 from t1 where id1 = 1)`, `[[INT32(1)]]`)
}
func TestQueryAndSubQWithLimit(t *testing.T) {
diff --git a/go/test/endtoend/vtgate/queries/vtexplain/main_test.go b/go/test/endtoend/vtgate/queries/vexplain/main_test.go
similarity index 99%
rename from go/test/endtoend/vtgate/queries/vtexplain/main_test.go
rename to go/test/endtoend/vtgate/queries/vexplain/main_test.go
index 8a4fb66db4f..c1c401bc573 100644
--- a/go/test/endtoend/vtgate/queries/vtexplain/main_test.go
+++ b/go/test/endtoend/vtgate/queries/vexplain/main_test.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package vtexplain
+package vexplain
import (
_ "embed"
diff --git a/go/test/endtoend/vtgate/queries/vtexplain/schema.sql b/go/test/endtoend/vtgate/queries/vexplain/schema.sql
similarity index 100%
rename from go/test/endtoend/vtgate/queries/vtexplain/schema.sql
rename to go/test/endtoend/vtgate/queries/vexplain/schema.sql
diff --git a/go/test/endtoend/vtgate/queries/vtexplain/vtexplain_test.go b/go/test/endtoend/vtgate/queries/vexplain/vexplain_test.go
similarity index 66%
rename from go/test/endtoend/vtgate/queries/vtexplain/vtexplain_test.go
rename to go/test/endtoend/vtgate/queries/vexplain/vexplain_test.go
index 75a7668d936..a06db1e8789 100644
--- a/go/test/endtoend/vtgate/queries/vtexplain/vtexplain_test.go
+++ b/go/test/endtoend/vtgate/queries/vexplain/vexplain_test.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package vtexplain
+package vexplain
import (
"context"
@@ -22,22 +22,45 @@ import (
"github.com/stretchr/testify/require"
+ "vitess.io/vitess/go/test/endtoend/cluster"
+
"vitess.io/vitess/go/mysql"
"vitess.io/vitess/go/test/endtoend/utils"
)
-func TestVtGateVtExplain(t *testing.T) {
- vtParams := clusterInstance.GetVTParams(shardedKs)
- conn, err := mysql.Connect(context.Background(), &vtParams)
+func start(t *testing.T) (*mysql.Conn, func()) {
+ ctx := context.Background()
+ vtConn, err := mysql.Connect(ctx, &vtParams)
require.NoError(t, err)
- defer conn.Close()
+
+ deleteAll := func() {
+ _, _ = utils.ExecAllowError(t, vtConn, "set workload = oltp")
+
+ tables := []string{"user", "lookup", "lookup_unique"}
+ for _, table := range tables {
+ _, _ = utils.ExecAllowError(t, vtConn, "delete from "+table)
+ }
+ }
+
+ deleteAll()
+
+ return vtConn, func() {
+ deleteAll()
+ vtConn.Close()
+ cluster.PanicHandler(t)
+ }
+}
+
+func TestVtGateVExplain(t *testing.T) {
+ conn, closer := start(t)
+ defer closer()
utils.AssertContainsError(t, conn,
- `explain format=vtexplain insert into user (id,lookup,lookup_unique) values (4,'apa','foo'),(5,'apa','bar'),(6,'monkey','nobar')`,
- "vtexplain will actually run queries")
+ `vexplain queries insert into user (id,lookup,lookup_unique) values (4,'apa','foo'),(5,'apa','bar'),(6,'monkey','nobar')`,
+ "vexplain queries/all will actually run queries")
expected := `[[INT32(0) VARCHAR("ks") VARCHAR("-40") VARCHAR("begin")]` +
- ` [INT32(0) VARCHAR("ks") VARCHAR("-40") VARCHAR("insert into lookup(lookup, id, keyspace_id) values ('apa', 1, '\x16k@\xb4J\xbaK\xd6'), ('apa', 2, '\x06\xe7\xea\\\"Βp\x8f') on duplicate key update lookup = values(lookup), id = values(id), keyspace_id = values(keyspace_id)")]` +
+ ` [INT32(0) VARCHAR("ks") VARCHAR("-40") VARCHAR("insert into lookup(lookup, id, keyspace_id) values ('apa', 1, '\x16k@\xb4J\xbaK\xd6') on duplicate key update lookup = values(lookup), id = values(id), keyspace_id = values(keyspace_id)")]` +
` [INT32(1) VARCHAR("ks") VARCHAR("40-80") VARCHAR("begin")]` +
` [INT32(1) VARCHAR("ks") VARCHAR("40-80") VARCHAR("insert into lookup(lookup, id, keyspace_id) values ('monkey', 3, 'N\xb1\x90ɢ\xfa\x16\x9c') on duplicate key update lookup = values(lookup), id = values(id), keyspace_id = values(keyspace_id)")]` +
` [INT32(2) VARCHAR("ks") VARCHAR("-40") VARCHAR("commit")]` +
@@ -45,21 +68,24 @@ func TestVtGateVtExplain(t *testing.T) {
` [INT32(4) VARCHAR("ks") VARCHAR("40-80") VARCHAR("begin")]` +
` [INT32(4) VARCHAR("ks") VARCHAR("40-80") VARCHAR("insert into lookup_unique(lookup_unique, keyspace_id) values ('monkey', 'N\xb1\x90ɢ\xfa\x16\x9c')")]` +
` [INT32(5) VARCHAR("ks") VARCHAR("-40") VARCHAR("begin")]` +
- ` [INT32(5) VARCHAR("ks") VARCHAR("-40") VARCHAR("insert into lookup_unique(lookup_unique, keyspace_id) values ('apa', '\x16k@\xb4J\xbaK\xd6'), ('bandar', '\x06\xe7\xea\\\"Βp\x8f')")]` +
+ ` [INT32(5) VARCHAR("ks") VARCHAR("-40") VARCHAR("insert into lookup_unique(lookup_unique, keyspace_id) values ('apa', '\x16k@\xb4J\xbaK\xd6')")]` +
` [INT32(6) VARCHAR("ks") VARCHAR("40-80") VARCHAR("commit")]` +
` [INT32(7) VARCHAR("ks") VARCHAR("-40") VARCHAR("commit")]` +
` [INT32(8) VARCHAR("ks") VARCHAR("40-80") VARCHAR("begin")]` +
` [INT32(8) VARCHAR("ks") VARCHAR("40-80") VARCHAR("insert into ` + "`user`" + `(id, lookup, lookup_unique) values (3, 'monkey', 'monkey')")]` +
` [INT32(9) VARCHAR("ks") VARCHAR("-40") VARCHAR("begin")]` +
- ` [INT32(9) VARCHAR("ks") VARCHAR("-40") VARCHAR("insert into ` + "`user`" + `(id, lookup, lookup_unique) values (1, 'apa', 'apa'), (2, 'apa', 'bandar')")]]`
- utils.AssertMatchesNoOrder(t, conn, `explain /*vt+ EXECUTE_DML_QUERIES */ format=vtexplain insert into user (id,lookup,lookup_unique) values (1,'apa','apa'),(2,'apa','bandar'),(3,'monkey','monkey')`, expected)
+ ` [INT32(9) VARCHAR("ks") VARCHAR("-40") VARCHAR("insert into ` + "`user`" + `(id, lookup, lookup_unique) values (1, 'apa', 'apa')")]]`
+ utils.AssertMatchesNoOrder(t, conn, `vexplain /*vt+ EXECUTE_DML_QUERIES */ queries insert into user (id,lookup,lookup_unique) values (1,'apa','apa'),(3,'monkey','monkey')`, expected)
+
+ // Assert that the output of vexplain all doesn't have begin queries because they aren't explainable
+ utils.AssertMatchesNotContains(t, conn, `vexplain /*vt+ EXECUTE_DML_QUERIES */ all insert into user (id,lookup,lookup_unique) values (2,'apa','bandar')`, `begin`)
expected = `[[INT32(0) VARCHAR("ks") VARCHAR("-40") VARCHAR("select lookup, keyspace_id from lookup where lookup in ('apa')")]` +
` [INT32(1) VARCHAR("ks") VARCHAR("-40") VARCHAR("select id from ` + "`user`" + ` where lookup = 'apa'")]]`
for _, mode := range []string{"oltp", "olap"} {
t.Run(mode, func(t *testing.T) {
utils.Exec(t, conn, "set workload = "+mode)
- utils.AssertMatches(t, conn, `explain format=vtexplain select id from user where lookup = "apa"`, expected)
+ utils.AssertMatches(t, conn, `vexplain queries select id from user where lookup = "apa"`, expected)
})
}
@@ -86,8 +112,25 @@ func TestVtGateVtExplain(t *testing.T) {
` [INT32(11) VARCHAR("ks") VARCHAR("c0-") VARCHAR("insert into ` + "`user`" + `(id, lookup, lookup_unique) values (4, 'apa', 'foo'), (6, 'monkey', 'nobar')")]]`
utils.AssertMatchesNoOrder(t, conn,
- `explain /*vt+ EXECUTE_DML_QUERIES */ format=vtexplain insert into user (id,lookup,lookup_unique) values (4,'apa','foo'),(5,'apa','bar'),(6,'monkey','nobar')`,
+ `vexplain /*vt+ EXECUTE_DML_QUERIES */ queries insert into user (id,lookup,lookup_unique) values (4,'apa','foo'),(5,'apa','bar'),(6,'monkey','nobar')`,
expected)
utils.Exec(t, conn, "rollback")
}
+
+func TestVExplainPlan(t *testing.T) {
+ conn, closer := start(t)
+ defer closer()
+
+ // the test infra is adding \ to the test output
+ utils.AssertMatchesContains(t, conn, `vexplain plan select id from user where lookup = "apa"`, `\"OperatorType\": \"VindexLookup\"`)
+ utils.AssertMatchesContains(t, conn, `vexplain plan insert into user (id,lookup,lookup_unique) values (4,'apa','foo'),(5,'apa','bar'),(6,'monkey','nobar')`, "Insert")
+}
+
+func TestVExplainAll(t *testing.T) {
+ conn, closer := start(t)
+ defer closer()
+
+ utils.AssertMatchesContains(t, conn, `vexplain /*vt+ EXECUTE_DML_QUERIES */ all insert into user (id,lookup,lookup_unique) values (4,'apa','foo'),(5,'apa','bar'),(6,'monkey','nobar')`, "Insert", "mysql_explain_json")
+ utils.AssertMatchesContains(t, conn, `vexplain all select id from user where lookup = "apa"`, "mysql_explain_json", "ByDestination")
+}
diff --git a/go/test/endtoend/vtgate/queries/vtexplain/vschema.json b/go/test/endtoend/vtgate/queries/vexplain/vschema.json
similarity index 100%
rename from go/test/endtoend/vtgate/queries/vtexplain/vschema.json
rename to go/test/endtoend/vtgate/queries/vexplain/vschema.json
diff --git a/go/test/endtoend/vtgate/reservedconn/main_test.go b/go/test/endtoend/vtgate/reservedconn/main_test.go
index 73044fe4573..cc76e7a3b46 100644
--- a/go/test/endtoend/vtgate/reservedconn/main_test.go
+++ b/go/test/endtoend/vtgate/reservedconn/main_test.go
@@ -23,6 +23,7 @@ import (
"testing"
"vitess.io/vitess/go/test/endtoend/utils"
+ querypb "vitess.io/vitess/go/vt/proto/query"
"github.com/stretchr/testify/assert"
@@ -144,6 +145,7 @@ func runAllTests(m *testing.M) int {
// This test requires setting the mysql_server_version vtgate flag
// to 5.7 regardless of the actual MySQL version used for the tests.
clusterInstance.VtGateExtraArgs = []string{"--lock_heartbeat_time", "2s", "--mysql_server_version", "5.7.0"}
+ clusterInstance.VtGatePlannerVersion = querypb.ExecuteOptions_Gen4
if err := clusterInstance.StartVtgate(); err != nil {
return 1
}
diff --git a/go/test/endtoend/vtgate/reservedconn/sysvar_test.go b/go/test/endtoend/vtgate/reservedconn/sysvar_test.go
index 1f660306049..0dc2261c7ba 100644
--- a/go/test/endtoend/vtgate/reservedconn/sysvar_test.go
+++ b/go/test/endtoend/vtgate/reservedconn/sysvar_test.go
@@ -348,7 +348,7 @@ func TestSysvarSocket(t *testing.T) {
require.True(t, ok, "not a mysql error: %T", err)
assert.Equal(t, mysql.ERIncorrectGlobalLocalVar, sqlErr.Number())
assert.Equal(t, mysql.SSUnknownSQLState, sqlErr.SQLState())
- assert.Equal(t, "variable 'socket' is a read only variable (errno 1238) (sqlstate HY000) during query: set socket = '/any/path'", sqlErr.Error())
+ assert.Equal(t, "VT03010: variable 'socket' is a read only variable (errno 1238) (sqlstate HY000) during query: set socket = '/any/path'", sqlErr.Error())
}
func TestReservedConnInStreaming(t *testing.T) {
@@ -421,3 +421,42 @@ func checkOltpAndOlapInterchangingTx(t *testing.T, conn *mysql.Conn) {
utils.Exec(t, conn, "set workload = oltp")
utils.AssertMatches(t, conn, "select id, val1 from test where id = 80", "[[INT64(80) NULL]]")
}
+
+func TestSysVarTxIsolation(t *testing.T) {
+ conn, err := mysql.Connect(context.Background(), &vtParams)
+ require.NoError(t, err)
+ defer conn.Close()
+
+ // will run every check twice to see that the isolation level is set for all the queries in the session and
+
+ // default from mysql
+ utils.AssertMatches(t, conn, "select @@transaction_isolation", `[[VARCHAR("REPEATABLE-READ")]]`)
+ // ensuring it goes to mysql
+ utils.AssertContains(t, conn, "select @@transaction_isolation, connection_id()", `REPEATABLE-READ`)
+ // second run, ensuring it has the same value.
+ utils.AssertContains(t, conn, "select @@transaction_isolation, connection_id()", `REPEATABLE-READ`)
+
+ // setting to different value.
+ utils.Exec(t, conn, "set @@transaction_isolation = 'read-committed'")
+ utils.AssertMatches(t, conn, "select @@transaction_isolation", `[[VARCHAR("READ-COMMITTED")]]`)
+ // ensuring it goes to mysql
+ utils.AssertContains(t, conn, "select @@transaction_isolation, connection_id()", `READ-COMMITTED`)
+ // second run, to ensuring the setting is applied on the session and not just on next query after settings.
+ utils.AssertContains(t, conn, "select @@transaction_isolation, connection_id()", `READ-COMMITTED`)
+
+ // changing setting to different value.
+ utils.Exec(t, conn, "set session transaction isolation level read uncommitted")
+ utils.AssertMatches(t, conn, "select @@transaction_isolation", `[[VARCHAR("READ-UNCOMMITTED")]]`)
+ // ensuring it goes to mysql
+ utils.AssertContains(t, conn, "select @@transaction_isolation, connection_id()", `READ-UNCOMMITTED`)
+ // second run, to ensuring the setting is applied on the session and not just on next query after settings.
+ utils.AssertContains(t, conn, "select @@transaction_isolation, connection_id()", `READ-UNCOMMITTED`)
+
+ // changing setting to different value.
+ utils.Exec(t, conn, "set transaction isolation level serializable")
+ utils.AssertMatches(t, conn, "select @@transaction_isolation", `[[VARCHAR("SERIALIZABLE")]]`)
+ // ensuring it goes to mysql
+ utils.AssertContains(t, conn, "select @@transaction_isolation, connection_id()", `SERIALIZABLE`)
+ // second run, to ensuring the setting is applied on the session and not just on next query after settings.
+ utils.AssertContains(t, conn, "select @@transaction_isolation, connection_id()", `SERIALIZABLE`)
+}
diff --git a/go/test/endtoend/vtgate/schema.sql b/go/test/endtoend/vtgate/schema.sql
index c597bd7e53e..536bec397ec 100644
--- a/go/test/endtoend/vtgate/schema.sql
+++ b/go/test/endtoend/vtgate/schema.sql
@@ -138,3 +138,17 @@ create table t9_id_to_keyspace_id_idx
keyspace_id varbinary(10),
primary key (id)
) Engine = InnoDB;
+
+create table t10
+(
+ id bigint,
+ sharding_key bigint,
+ primary key (id)
+) Engine = InnoDB;
+
+create table t10_id_to_keyspace_id_idx
+(
+ id bigint,
+ keyspace_id varbinary(10),
+ primary key (id)
+) Engine = InnoDB;
diff --git a/go/test/endtoend/vtgate/schematracker/loadkeyspace/schema_load_keyspace_test.go b/go/test/endtoend/vtgate/schematracker/loadkeyspace/schema_load_keyspace_test.go
index a603bc1c89b..0b00b571393 100644
--- a/go/test/endtoend/vtgate/schematracker/loadkeyspace/schema_load_keyspace_test.go
+++ b/go/test/endtoend/vtgate/schematracker/loadkeyspace/schema_load_keyspace_test.go
@@ -19,11 +19,14 @@ package loadkeyspace
import (
"os"
"path"
+ "strings"
"testing"
"time"
"github.com/stretchr/testify/require"
+ "vitess.io/vitess/go/test/endtoend/utils"
+
"vitess.io/vitess/go/test/endtoend/cluster"
)
@@ -53,44 +56,6 @@ var (
`
)
-func TestBlockedLoadKeyspace(t *testing.T) {
- defer cluster.PanicHandler(t)
- var err error
-
- clusterInstance = cluster.NewCluster(cell, hostname)
- defer clusterInstance.Teardown()
-
- // Start topo server
- err = clusterInstance.StartTopo()
- require.NoError(t, err)
-
- // Start keyspace without the --queryserver-config-schema-change-signal flag
- keyspace := &cluster.Keyspace{
- Name: keyspaceName,
- SchemaSQL: sqlSchema,
- }
- clusterInstance.VtTabletExtraArgs = []string{"--queryserver-config-schema-change-signal=false"}
- err = clusterInstance.StartUnshardedKeyspace(*keyspace, 0, false)
- require.NoError(t, err)
-
- // Start vtgate with the schema_change_signal flag
- clusterInstance.VtGateExtraArgs = []string{"--schema_change_signal"}
- err = clusterInstance.StartVtgate()
- require.NoError(t, err)
-
- // wait for addKeyspaceToTracker to timeout
- time.Sleep(30 * time.Second)
-
- // check warning logs
- logDir := clusterInstance.VtgateProcess.LogDir
- all, err := os.ReadFile(path.Join(logDir, "vtgate-stderr.txt"))
- require.NoError(t, err)
- require.Contains(t, string(all), "Unable to get initial schema reload")
-
- // This error should not be logged as the initial load itself failed.
- require.NotContains(t, string(all), "Unable to add keyspace to tracker")
-}
-
func TestLoadKeyspaceWithNoTablet(t *testing.T) {
defer cluster.PanicHandler(t)
var err error
@@ -115,6 +80,9 @@ func TestLoadKeyspaceWithNoTablet(t *testing.T) {
for _, vttablet := range clusterInstance.Keyspaces[0].Shards[0].Vttablets {
err = vttablet.VttabletProcess.TearDown()
require.NoError(t, err)
+ utils.TimeoutAction(t, 1*time.Minute, "timeout - teardown of VTTablet", func() bool {
+ return vttablet.VttabletProcess.GetStatus() == ""
+ })
}
// Start vtgate with the schema_change_signal flag
@@ -122,11 +90,13 @@ func TestLoadKeyspaceWithNoTablet(t *testing.T) {
err = clusterInstance.StartVtgate()
require.NoError(t, err)
- // check warning logs
- logDir := clusterInstance.VtgateProcess.LogDir
- all, err := os.ReadFile(path.Join(logDir, "vtgate-stderr.txt"))
- require.NoError(t, err)
- require.Contains(t, string(all), "Unable to get initial schema reload")
+ // After starting VTGate we need to leave enough time for resolveAndLoadKeyspace to reach
+ // the schema tracking timeout (5 seconds).
+ utils.TimeoutAction(t, 5*time.Minute, "timeout - could not find 'Unable to get initial schema reload' in 'vtgate-stderr.txt'", func() bool {
+ logDir := clusterInstance.VtgateProcess.LogDir
+ all, _ := os.ReadFile(path.Join(logDir, "vtgate-stderr.txt"))
+ return strings.Contains(string(all), "Unable to get initial schema reload")
+ })
}
func TestNoInitialKeyspace(t *testing.T) {
diff --git a/go/test/endtoend/vtgate/schematracker/restarttablet/schema_restart_test.go b/go/test/endtoend/vtgate/schematracker/restarttablet/schema_restart_test.go
index 9d3de02f846..fa0fa2e4672 100644
--- a/go/test/endtoend/vtgate/schematracker/restarttablet/schema_restart_test.go
+++ b/go/test/endtoend/vtgate/schematracker/restarttablet/schema_restart_test.go
@@ -147,18 +147,20 @@ func TestVSchemaTrackerKeyspaceReInit(t *testing.T) {
require.NoError(t, err)
err = clusterInstance.WaitForTabletsToHealthyInVtgate()
require.NoError(t, err)
- time.Sleep(time.Duration(signalInterval*2) * time.Second)
- var newResults any
- readVSchema(t, &clusterInstance.VtgateProcess, &newResults)
- assert.Equal(t, originalResults, newResults)
- newResults = nil
+
+ utils.TimeoutAction(t, 1*time.Minute, "timeout - could not find the updated vschema in VTGate", func() bool {
+ var newResults any
+ readVSchema(t, &clusterInstance.VtgateProcess, &newResults)
+ return assert.ObjectsAreEqual(originalResults, newResults)
+ })
}
}
func readVSchema(t *testing.T, vtgate *cluster.VtgateProcess, results *any) {
httpClient := &http.Client{Timeout: 5 * time.Second}
resp, err := httpClient.Get(vtgate.VSchemaURL)
- require.Nil(t, err)
+ require.NoError(t, err)
+ defer resp.Body.Close()
assert.Equal(t, 200, resp.StatusCode)
json.NewDecoder(resp.Body).Decode(results)
}
diff --git a/go/test/endtoend/vtgate/schematracker/sharded/schema.sql b/go/test/endtoend/vtgate/schematracker/sharded/schema.sql
new file mode 100644
index 00000000000..e4cd2a07965
--- /dev/null
+++ b/go/test/endtoend/vtgate/schematracker/sharded/schema.sql
@@ -0,0 +1,19 @@
+create table t2(
+ id3 bigint,
+ id4 bigint,
+ primary key(id3)
+) Engine=InnoDB;
+
+create table t2_id4_idx(
+ id bigint not null auto_increment,
+ id4 bigint,
+ id3 bigint,
+ primary key(id),
+ key idx_id4(id4)
+) Engine=InnoDB;
+
+create table t8(
+ id8 bigint,
+ testId bigint,
+ primary key(id8)
+) Engine=InnoDB;
diff --git a/go/test/endtoend/vtgate/schematracker/sharded/st_sharded_test.go b/go/test/endtoend/vtgate/schematracker/sharded/st_sharded_test.go
index ee5b10483f1..532e5edae90 100644
--- a/go/test/endtoend/vtgate/schematracker/sharded/st_sharded_test.go
+++ b/go/test/endtoend/vtgate/schematracker/sharded/st_sharded_test.go
@@ -18,12 +18,17 @@ package sharded
import (
"context"
+ _ "embed"
"flag"
+ "fmt"
"os"
"testing"
"time"
+ "github.com/stretchr/testify/assert"
+
"vitess.io/vitess/go/test/endtoend/utils"
+ "vitess.io/vitess/go/vt/vtgate/planbuilder"
"github.com/stretchr/testify/require"
@@ -36,86 +41,11 @@ var (
vtParams mysql.ConnParams
KeyspaceName = "ks"
Cell = "test"
- SchemaSQL = `
-create table t2(
- id3 bigint,
- id4 bigint,
- primary key(id3)
-) Engine=InnoDB;
-
-create table t2_id4_idx(
- id bigint not null auto_increment,
- id4 bigint,
- id3 bigint,
- primary key(id),
- key idx_id4(id4)
-) Engine=InnoDB;
-
-create table t8(
- id8 bigint,
- testId bigint,
- primary key(id8)
-) Engine=InnoDB;
-`
-
- VSchema = `
-{
- "sharded": true,
- "vindexes": {
- "unicode_loose_xxhash" : {
- "type": "unicode_loose_xxhash"
- },
- "unicode_loose_md5" : {
- "type": "unicode_loose_md5"
- },
- "hash": {
- "type": "hash"
- },
- "xxhash": {
- "type": "xxhash"
- },
- "t2_id4_idx": {
- "type": "lookup_hash",
- "params": {
- "table": "t2_id4_idx",
- "from": "id4",
- "to": "id3",
- "autocommit": "true"
- },
- "owner": "t2"
- }
- },
- "tables": {
- "t2": {
- "column_vindexes": [
- {
- "column": "id3",
- "name": "hash"
- },
- {
- "column": "id4",
- "name": "t2_id4_idx"
- }
- ]
- },
- "t2_id4_idx": {
- "column_vindexes": [
- {
- "column": "id4",
- "name": "hash"
- }
- ]
- },
- "t8": {
- "column_vindexes": [
- {
- "column": "id8",
- "name": "hash"
- }
- ]
- }
- }
-}`
+ //go:embed schema.sql
+ SchemaSQL string
+
+ //go:embed vschema.json
+ VSchema string
)
func TestMain(m *testing.M) {
@@ -138,8 +68,29 @@ func TestMain(m *testing.M) {
SchemaSQL: SchemaSQL,
VSchema: VSchema,
}
- clusterInstance.VtGateExtraArgs = []string{"--schema_change_signal", "--vschema_ddl_authorized_users", "%", "--schema_change_signal_user", "userData1"}
- clusterInstance.VtTabletExtraArgs = []string{"--queryserver-config-schema-change-signal", "--queryserver-config-schema-change-signal-interval", "0.1", "--queryserver-config-strict-table-acl", "--queryserver-config-acl-exempt-acl", "userData1", "--table-acl-config", "dummy.json"}
+ clusterInstance.VtGateExtraArgs = []string{"--schema_change_signal",
+ "--vschema_ddl_authorized_users", "%",
+ "--schema_change_signal_user", "userData1"}
+ clusterInstance.VtGatePlannerVersion = planbuilder.Gen4
+ clusterInstance.VtTabletExtraArgs = []string{"--queryserver-config-schema-change-signal",
+ "--queryserver-config-schema-change-signal-interval", "0.1",
+ "--queryserver-config-strict-table-acl",
+ "--queryserver-config-acl-exempt-acl", "userData1",
+ "--table-acl-config", "dummy.json"}
+
+ vtgateVer, err := cluster.GetMajorVersion("vtgate")
+ if err != nil {
+ return 1
+ }
+ vttabletVer, err := cluster.GetMajorVersion("vttablet")
+ if err != nil {
+ return 1
+ }
+ if vtgateVer >= 16 && vttabletVer >= 16 {
+ clusterInstance.VtGateExtraArgs = append(clusterInstance.VtGateExtraArgs, "--enable-views")
+ clusterInstance.VtTabletExtraArgs = append(clusterInstance.VtTabletExtraArgs, "--queryserver-enable-views")
+ }
+
err = clusterInstance.StartKeyspace(*keyspace, []string{"-80", "80-"}, 0, false)
if err != nil {
return 1
@@ -209,35 +160,54 @@ func TestInitAndUpdate(t *testing.T) {
require.NoError(t, err)
defer conn.Close()
+ vtgateVersion, err := cluster.GetMajorVersion("vtgate")
+ require.NoError(t, err)
+
+ expected := `[[VARCHAR("dual")] [VARCHAR("t2")] [VARCHAR("t2_id4_idx")] [VARCHAR("t8")]]`
+ if vtgateVersion >= 17 {
+ expected = `[[VARCHAR("t2")] [VARCHAR("t2_id4_idx")] [VARCHAR("t8")]]`
+ }
utils.AssertMatchesWithTimeout(t, conn,
"SHOW VSCHEMA TABLES",
- `[[VARCHAR("dual")] [VARCHAR("t2")] [VARCHAR("t2_id4_idx")] [VARCHAR("t8")]]`,
+ expected,
100*time.Millisecond,
3*time.Second,
"initial table list not complete")
// Init
_ = utils.Exec(t, conn, "create table test_sc (id bigint primary key)")
+ expected = `[[VARCHAR("dual")] [VARCHAR("t2")] [VARCHAR("t2_id4_idx")] [VARCHAR("t8")] [VARCHAR("test_sc")]]`
+ if vtgateVersion >= 17 {
+ expected = `[[VARCHAR("t2")] [VARCHAR("t2_id4_idx")] [VARCHAR("t8")] [VARCHAR("test_sc")]]`
+ }
utils.AssertMatchesWithTimeout(t, conn,
"SHOW VSCHEMA TABLES",
- `[[VARCHAR("dual")] [VARCHAR("t2")] [VARCHAR("t2_id4_idx")] [VARCHAR("t8")] [VARCHAR("test_sc")]]`,
+ expected,
100*time.Millisecond,
3*time.Second,
"test_sc not in vschema tables")
// Tables Update via health check.
_ = utils.Exec(t, conn, "create table test_sc1 (id bigint primary key)")
+ expected = `[[VARCHAR("dual")] [VARCHAR("t2")] [VARCHAR("t2_id4_idx")] [VARCHAR("t8")] [VARCHAR("test_sc")] [VARCHAR("test_sc1")]]`
+ if vtgateVersion >= 17 {
+ expected = `[[VARCHAR("t2")] [VARCHAR("t2_id4_idx")] [VARCHAR("t8")] [VARCHAR("test_sc")] [VARCHAR("test_sc1")]]`
+ }
utils.AssertMatchesWithTimeout(t, conn,
"SHOW VSCHEMA TABLES",
- `[[VARCHAR("dual")] [VARCHAR("t2")] [VARCHAR("t2_id4_idx")] [VARCHAR("t8")] [VARCHAR("test_sc")] [VARCHAR("test_sc1")]]`,
+ expected,
100*time.Millisecond,
3*time.Second,
"test_sc1 not in vschema tables")
_ = utils.Exec(t, conn, "drop table test_sc, test_sc1")
+ expected = `[[VARCHAR("dual")] [VARCHAR("t2")] [VARCHAR("t2_id4_idx")] [VARCHAR("t8")]]`
+ if vtgateVersion >= 17 {
+ expected = `[[VARCHAR("t2")] [VARCHAR("t2_id4_idx")] [VARCHAR("t8")]]`
+ }
utils.AssertMatchesWithTimeout(t, conn,
"SHOW VSCHEMA TABLES",
- `[[VARCHAR("dual")] [VARCHAR("t2")] [VARCHAR("t2_id4_idx")] [VARCHAR("t8")]]`,
+ expected,
100*time.Millisecond,
3*time.Second,
"test_sc and test_sc_1 should not be in vschema tables")
@@ -253,10 +223,16 @@ func TestDMLOnNewTable(t *testing.T) {
// create a new table which is not part of the VSchema
utils.Exec(t, conn, `create table new_table_tracked(id bigint, name varchar(100), primary key(id)) Engine=InnoDB`)
+ vtgateVersion, err := cluster.GetMajorVersion("vtgate")
+ require.NoError(t, err)
+ expected := `[[VARCHAR("dual")] [VARCHAR("new_table_tracked")] [VARCHAR("t2")] [VARCHAR("t2_id4_idx")] [VARCHAR("t8")]]`
+ if vtgateVersion >= 17 {
+ expected = `[[VARCHAR("new_table_tracked")] [VARCHAR("t2")] [VARCHAR("t2_id4_idx")] [VARCHAR("t8")]]`
+ }
// wait for vttablet's schema reload interval to pass
utils.AssertMatchesWithTimeout(t, conn,
"SHOW VSCHEMA TABLES",
- `[[VARCHAR("dual")] [VARCHAR("new_table_tracked")] [VARCHAR("t2")] [VARCHAR("t2_id4_idx")] [VARCHAR("t8")]]`,
+ expected,
100*time.Millisecond,
3*time.Second,
"test_sc not in vschema tables")
@@ -280,3 +256,64 @@ func TestDMLOnNewTable(t *testing.T) {
defer utils.Exec(t, conn, `delete from t8`)
utils.AssertMatchesNoOrder(t, conn, `select id from new_table_tracked join t8`, `[[INT64(0)] [INT64(1)]]`)
}
+
+// TestNewView validates that view tracking works as expected.
+func TestNewView(t *testing.T) {
+ utils.SkipIfBinaryIsBelowVersion(t, 16, "vtgate")
+ utils.SkipIfBinaryIsBelowVersion(t, 16, "vttablet")
+
+ ctx := context.Background()
+ conn, err := mysql.Connect(ctx, &vtParams)
+ require.NoError(t, err)
+ defer conn.Close()
+
+ // insert some data
+ _ = utils.Exec(t, conn, "insert into t2 (id3, id4) values (1, 10), (2, 20), (3, 30)")
+ defer utils.Exec(t, conn, "delete from t2")
+
+ selQuery := "select sum(id4) from t2 where id4 > 10"
+
+ // create a view
+ _ = utils.Exec(t, conn, "create view test_view as "+selQuery)
+
+ // executing the query directly
+ qr := utils.Exec(t, conn, selQuery)
+ // selecting it through the view.
+ utils.AssertMatchesWithTimeout(t, conn, "select * from test_view", fmt.Sprintf("%v", qr.Rows), 100*time.Millisecond, 10*time.Second, "test_view not in vschema tables")
+}
+
+// TestViewAndTable validates that new column added in table is present in the view definition
+func TestViewAndTable(t *testing.T) {
+ utils.SkipIfBinaryIsBelowVersion(t, 16, "vtgate")
+ utils.SkipIfBinaryIsBelowVersion(t, 16, "vttablet")
+
+ ctx := context.Background()
+ conn, err := mysql.Connect(ctx, &vtParams)
+ require.NoError(t, err)
+ defer conn.Close()
+
+ // add a new column to the table t8
+ _ = utils.Exec(t, conn, "alter table t8 add column new_col varchar(50)")
+ err = utils.WaitForColumn(t, clusterInstance.VtgateProcess, KeyspaceName, "t8", "new_col")
+ require.NoError(t, err)
+
+ // insert some data
+ _ = utils.Exec(t, conn, "insert into t8(id8, new_col) values (1, 'V')")
+ defer utils.Exec(t, conn, "delete from t8")
+
+ // create a view with t8, having the new column.
+ _ = utils.Exec(t, conn, "create view t8_view as select * from t8")
+
+ // executing the view query, with the new column in the select field.
+ utils.AssertMatchesWithTimeout(t, conn, "select new_col from t8_view", `[[VARCHAR("V")]]`, 100*time.Millisecond, 5*time.Second, "t8_view not in vschema tables")
+
+ // add another column to the table t8
+ _ = utils.Exec(t, conn, "alter table t8 add column additional_col bigint")
+ err = utils.WaitForColumn(t, clusterInstance.VtgateProcess, KeyspaceName, "t8", "additional_col")
+ require.NoError(t, err)
+
+ // executing the query on view
+ qr := utils.Exec(t, conn, "select * from t8_view")
+ // validate that field name should not have additional_col
+ assert.NotContains(t, fmt.Sprintf("%v", qr.Fields), "additional_col")
+}
diff --git a/go/test/endtoend/vtgate/schematracker/sharded/vschema.json b/go/test/endtoend/vtgate/schematracker/sharded/vschema.json
new file mode 100644
index 00000000000..72543aa368e
--- /dev/null
+++ b/go/test/endtoend/vtgate/schematracker/sharded/vschema.json
@@ -0,0 +1,57 @@
+{
+ "sharded": true,
+ "vindexes": {
+ "unicode_loose_xxhash" : {
+ "type": "unicode_loose_xxhash"
+ },
+ "unicode_loose_md5" : {
+ "type": "unicode_loose_md5"
+ },
+ "hash": {
+ "type": "hash"
+ },
+ "xxhash": {
+ "type": "xxhash"
+ },
+ "t2_id4_idx": {
+ "type": "lookup_hash",
+ "params": {
+ "table": "t2_id4_idx",
+ "from": "id4",
+ "to": "id3",
+ "autocommit": "true"
+ },
+ "owner": "t2"
+ }
+ },
+ "tables": {
+ "t2": {
+ "column_vindexes": [
+ {
+ "column": "id3",
+ "name": "hash"
+ },
+ {
+ "column": "id4",
+ "name": "t2_id4_idx"
+ }
+ ]
+ },
+ "t2_id4_idx": {
+ "column_vindexes": [
+ {
+ "column": "id4",
+ "name": "hash"
+ }
+ ]
+ },
+ "t8": {
+ "column_vindexes": [
+ {
+ "column": "id8",
+ "name": "hash"
+ }
+ ]
+ }
+ }
+}
\ No newline at end of file
diff --git a/go/test/endtoend/vtgate/schematracker/sharded_prs/st_sharded_test.go b/go/test/endtoend/vtgate/schematracker/sharded_prs/st_sharded_test.go
index f77a320bbc7..a441a1a2826 100644
--- a/go/test/endtoend/vtgate/schematracker/sharded_prs/st_sharded_test.go
+++ b/go/test/endtoend/vtgate/schematracker/sharded_prs/st_sharded_test.go
@@ -173,6 +173,10 @@ func TestMain(m *testing.M) {
}
}
+ if err := clusterInstance.StartVTOrc(KeyspaceName); err != nil {
+ return 1
+ }
+
err = waitForVTGateAndVTTablet()
if err != nil {
fmt.Println(err)
diff --git a/go/test/endtoend/vtgate/schematracker/unauthorized/unauthorized_test.go b/go/test/endtoend/vtgate/schematracker/unauthorized/unauthorized_test.go
index 276664c74fd..d2fcf07810c 100644
--- a/go/test/endtoend/vtgate/schematracker/unauthorized/unauthorized_test.go
+++ b/go/test/endtoend/vtgate/schematracker/unauthorized/unauthorized_test.go
@@ -102,9 +102,11 @@ func TestSchemaTrackingError(t *testing.T) {
case <-timeout:
t.Error("timeout waiting for schema tracking error")
case <-time.After(1 * time.Second):
- // check info logs
+ // check info logs, continue if the file could not be read correctly.
all, err := os.ReadFile(path.Join(logDir, "vtgate.WARNING"))
- require.NoError(t, err)
+ if err != nil {
+ continue
+ }
if strings.Contains(string(all), "Table ACL might be enabled, --schema_change_signal_user needs to be passed to VTGate for schema tracking to work. Check 'schema tracking' docs on vitess.io") {
present = true
}
diff --git a/go/test/endtoend/vtgate/schematracker/unsharded/st_unsharded_test.go b/go/test/endtoend/vtgate/schematracker/unsharded/st_unsharded_test.go
index 930f288b831..d858d1d4c66 100644
--- a/go/test/endtoend/vtgate/schematracker/unsharded/st_unsharded_test.go
+++ b/go/test/endtoend/vtgate/schematracker/unsharded/st_unsharded_test.go
@@ -95,10 +95,17 @@ func TestNewUnshardedTable(t *testing.T) {
require.NoError(t, err)
defer conn.Close()
+ vtgateVersion, err := cluster.GetMajorVersion("vtgate")
+ require.NoError(t, err)
+ expected := `[[VARCHAR("dual")] [VARCHAR("main")]]`
+ if vtgateVersion >= 17 {
+ expected = `[[VARCHAR("main")]]`
+ }
+
// ensuring our initial table "main" is in the schema
utils.AssertMatchesWithTimeout(t, conn,
"SHOW VSCHEMA TABLES",
- `[[VARCHAR("dual")] [VARCHAR("main")]]`,
+ expected,
100*time.Millisecond,
3*time.Second,
"initial table list not complete")
@@ -106,10 +113,15 @@ func TestNewUnshardedTable(t *testing.T) {
// create a new table which is not part of the VSchema
utils.Exec(t, conn, `create table new_table_tracked(id bigint, name varchar(100), primary key(id)) Engine=InnoDB`)
+ expected = `[[VARCHAR("dual")] [VARCHAR("main")] [VARCHAR("new_table_tracked")]]`
+ if vtgateVersion >= 17 {
+ expected = `[[VARCHAR("main")] [VARCHAR("new_table_tracked")]]`
+ }
+
// waiting for the vttablet's schema_reload interval to kick in
utils.AssertMatchesWithTimeout(t, conn,
"SHOW VSCHEMA TABLES",
- `[[VARCHAR("dual")] [VARCHAR("main")] [VARCHAR("new_table_tracked")]]`,
+ expected,
100*time.Millisecond,
3*time.Second,
"new_table_tracked not in vschema tables")
@@ -126,9 +138,13 @@ func TestNewUnshardedTable(t *testing.T) {
utils.Exec(t, conn, `drop table new_table_tracked`)
// waiting for the vttablet's schema_reload interval to kick in
+ expected = `[[VARCHAR("dual")] [VARCHAR("main")]]`
+ if vtgateVersion >= 17 {
+ expected = `[[VARCHAR("main")]]`
+ }
utils.AssertMatchesWithTimeout(t, conn,
"SHOW VSCHEMA TABLES",
- `[[VARCHAR("dual")] [VARCHAR("main")]]`,
+ expected,
100*time.Millisecond,
3*time.Second,
"new_table_tracked not in vschema tables")
diff --git a/go/test/endtoend/vtgate/sequence/seq_test.go b/go/test/endtoend/vtgate/sequence/seq_test.go
index 0d72b870955..e655b7d7034 100644
--- a/go/test/endtoend/vtgate/sequence/seq_test.go
+++ b/go/test/endtoend/vtgate/sequence/seq_test.go
@@ -47,9 +47,9 @@ var (
)Engine=InnoDB;
create table sequence_test_seq (
- id int default 0,
- next_id bigint default null,
- cache bigint default null,
+ id int default 0,
+ next_id bigint default null,
+ cache bigint default null,
primary key(id)
) comment 'vitess_sequence' Engine=InnoDB;
@@ -60,13 +60,13 @@ INSERT INTO id_seq (id, next_id, cache) values (0, 1, 1000);
`
unshardedVSchema = `
- {
+ {
"sharded":false,
"vindexes": {
"hash_index": {
"type": "hash"
}
- },
+ },
"tables": {
"sequence_test":{
"auto_increment":{
@@ -147,7 +147,7 @@ CREATE TABLE allDefaults (
"column": "id",
"sequence": "id_seq"
}
- },
+ },
"allDefaults": {
"columnVindexes": [
{
@@ -264,6 +264,12 @@ func TestSeq(t *testing.T) {
if err == nil || !strings.Contains(err.Error(), want) {
t.Errorf("wrong insert: %v, must contain %s", err, want)
}
+
+ utils.Exec(t, conn, "DELETE FROM sequence_test_seq")
+ qr = utils.Exec(t, conn, "select * from sequence_test_seq")
+ if got, want := fmt.Sprintf("%v", qr.Rows), `[]`; got != want {
+ t.Errorf("select:\n%v want\n%v", got, want)
+ }
}
func TestDotTableSeq(t *testing.T) {
diff --git a/go/test/endtoend/vtgate/tablet_healthcheck_cache/correctness_test.go b/go/test/endtoend/vtgate/tablet_healthcheck_cache/correctness_test.go
index bcfdbd51a8f..a080fe968ad 100644
--- a/go/test/endtoend/vtgate/tablet_healthcheck_cache/correctness_test.go
+++ b/go/test/endtoend/vtgate/tablet_healthcheck_cache/correctness_test.go
@@ -201,7 +201,6 @@ func addTablet(t *testing.T, tabletUID int, tabletType string) *cluster.Vttablet
clusterInstance.Hostname,
clusterInstance.TmpDirectory,
clusterInstance.VtTabletExtraArgs,
- clusterInstance.EnableSemiSync,
clusterInstance.DefaultCharset)
// wait for mysqld to be ready
@@ -215,7 +214,7 @@ func addTablet(t *testing.T, tabletUID int, tabletType string) *cluster.Vttablet
serving := tablet.VttabletProcess.WaitForStatus("SERVING", time.Duration(60*time.Second))
assert.Equal(t, serving, true, "Tablet did not become ready within a reasonable time")
err = clusterInstance.VtgateProcess.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.%s",
- tablet.VttabletProcess.Keyspace, tablet.VttabletProcess.Shard, tablet.Type), 1)
+ tablet.VttabletProcess.Keyspace, tablet.VttabletProcess.Shard, tablet.Type), 1, 30*time.Second)
require.Nil(t, err)
t.Logf("Added tablet: %s", tablet.Alias)
diff --git a/go/test/endtoend/vtgate/transaction/restart/main_test.go b/go/test/endtoend/vtgate/transaction/restart/main_test.go
new file mode 100644
index 00000000000..3c7ac710e9d
--- /dev/null
+++ b/go/test/endtoend/vtgate/transaction/restart/main_test.go
@@ -0,0 +1,114 @@
+/*
+Copyright 2022 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package misc
+
+import (
+ "context"
+ _ "embed"
+ "flag"
+ "os"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "vitess.io/vitess/go/mysql"
+ "vitess.io/vitess/go/test/endtoend/cluster"
+ "vitess.io/vitess/go/test/endtoend/utils"
+)
+
+var (
+ clusterInstance *cluster.LocalProcessCluster
+ vtParams mysql.ConnParams
+ keyspaceName = "ks"
+ cell = "test"
+
+ //go:embed schema.sql
+ schemaSQL string
+)
+
+func TestMain(m *testing.M) {
+ defer cluster.PanicHandler(nil)
+ flag.Parse()
+
+ exitCode := func() int {
+ clusterInstance = cluster.NewCluster(cell, "localhost")
+ defer clusterInstance.Teardown()
+
+ // Start topo server
+ err := clusterInstance.StartTopo()
+ if err != nil {
+ return 1
+ }
+
+ // Start keyspace
+ keyspace := &cluster.Keyspace{
+ Name: keyspaceName,
+ SchemaSQL: schemaSQL,
+ }
+ err = clusterInstance.StartUnshardedKeyspace(*keyspace, 1, false)
+ if err != nil {
+ return 1
+ }
+
+ // Start vtgate
+ clusterInstance.VtGateExtraArgs = append(clusterInstance.VtGateExtraArgs,
+ "--planner-version=gen4",
+ "--mysql_default_workload=olap")
+ err = clusterInstance.StartVtgate()
+ if err != nil {
+ return 1
+ }
+
+ vtParams = mysql.ConnParams{
+ Host: clusterInstance.Hostname,
+ Port: clusterInstance.VtgateMySQLPort,
+ }
+ return m.Run()
+ }()
+ os.Exit(exitCode)
+}
+
+/*
+TestStreamTxRestart tests that when a connection is killed my mysql (may be due to restart),
+then the transaction should not continue to serve the query via reconnect.
+*/
+func TestStreamTxRestart(t *testing.T) {
+ ctx := context.Background()
+ conn, err := mysql.Connect(ctx, &vtParams)
+ require.NoError(t, err)
+ defer conn.Close()
+
+ utils.Exec(t, conn, "begin")
+ // BeginStreamExecute
+ _ = utils.Exec(t, conn, "select connection_id()")
+
+ // StreamExecute
+ _ = utils.Exec(t, conn, "select connection_id()")
+
+ // restart the mysql to terminate all the existing connections.
+ primTablet := clusterInstance.Keyspaces[0].Shards[0].PrimaryTablet()
+ err = primTablet.MysqlctlProcess.Stop()
+ require.NoError(t, err)
+ err = primTablet.MysqlctlProcess.StartProvideInit(false)
+ require.NoError(t, err)
+
+ // query should return connection error
+ _, err = utils.ExecAllowError(t, conn, "select connection_id()")
+ require.Error(t, err)
+ assert.Contains(t, err.Error(), "broken pipe (errno 2006) (sqlstate HY000)")
+}
diff --git a/go/test/endtoend/vtgate/transaction/restart/schema.sql b/go/test/endtoend/vtgate/transaction/restart/schema.sql
new file mode 100644
index 00000000000..3e78cab09d6
--- /dev/null
+++ b/go/test/endtoend/vtgate/transaction/restart/schema.sql
@@ -0,0 +1,5 @@
+create table t1(
+ id1 bigint,
+ id2 bigint,
+ primary key(id1)
+) Engine=InnoDB;
\ No newline at end of file
diff --git a/go/test/endtoend/vtgate/transaction/schema.sql b/go/test/endtoend/vtgate/transaction/schema.sql
new file mode 100644
index 00000000000..84afdfa5815
--- /dev/null
+++ b/go/test/endtoend/vtgate/transaction/schema.sql
@@ -0,0 +1,17 @@
+create table twopc_user (
+ user_id bigint,
+ name varchar(128),
+ primary key (user_id)
+) Engine=InnoDB;
+
+create table twopc_lookup (
+ name varchar(128),
+ id bigint,
+ primary key (id)
+) Engine=InnoDB;
+
+create table test (
+ id bigint,
+ msg varchar(25),
+ primary key (id)
+) Engine=InnoDB;
\ No newline at end of file
diff --git a/go/test/endtoend/vtgate/transaction/trxn_mode_test.go b/go/test/endtoend/vtgate/transaction/trxn_mode_test.go
deleted file mode 100644
index 76839702f65..00000000000
--- a/go/test/endtoend/vtgate/transaction/trxn_mode_test.go
+++ /dev/null
@@ -1,195 +0,0 @@
-/*
-Copyright 2019 The Vitess Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package transaction
-
-import (
- "context"
- "flag"
- "fmt"
- "os"
- "testing"
-
- "vitess.io/vitess/go/test/endtoend/utils"
-
- "github.com/stretchr/testify/require"
-
- "vitess.io/vitess/go/mysql"
- "vitess.io/vitess/go/test/endtoend/cluster"
-)
-
-var (
- clusterInstance *cluster.LocalProcessCluster
- vtParams mysql.ConnParams
- keyspaceName = "ks"
- cell = "zone1"
- hostname = "localhost"
- sqlSchema = `
- create table twopc_user (
- user_id bigint,
- name varchar(128),
- primary key (user_id)
- ) Engine=InnoDB;
-
- create table twopc_lookup (
- name varchar(128),
- id bigint,
- primary key (id)
- ) Engine=InnoDB;`
-
- vSchema = `
- {
- "sharded":true,
- "vindexes": {
- "hash_index": {
- "type": "hash"
- },
- "twopc_lookup_vdx": {
- "type": "lookup_hash_unique",
- "params": {
- "table": "twopc_lookup",
- "from": "name",
- "to": "id",
- "autocommit": "true"
- },
- "owner": "twopc_user"
- }
- },
- "tables": {
- "twopc_user":{
- "column_vindexes": [
- {
- "column": "user_id",
- "name": "hash_index"
- },
- {
- "column": "name",
- "name": "twopc_lookup_vdx"
- }
- ]
- },
- "twopc_lookup": {
- "column_vindexes": [
- {
- "column": "id",
- "name": "hash_index"
- }
- ]
- }
- }
- }
- `
-)
-
-func TestMain(m *testing.M) {
- defer cluster.PanicHandler(nil)
- flag.Parse()
-
- exitcode, err := func() (int, error) {
- clusterInstance = cluster.NewCluster(cell, hostname)
- defer clusterInstance.Teardown()
-
- // Reserve vtGate port in order to pass it to vtTablet
- clusterInstance.VtgateGrpcPort = clusterInstance.GetAndReservePort()
- // Set extra tablet args for twopc
- clusterInstance.VtTabletExtraArgs = []string{
- "--twopc_enable",
- "--twopc_coordinator_address", fmt.Sprintf("localhost:%d", clusterInstance.VtgateGrpcPort),
- "--twopc_abandon_age", "3600",
- }
-
- // Start topo server
- if err := clusterInstance.StartTopo(); err != nil {
- return 1, err
- }
-
- // Start keyspace
- keyspace := &cluster.Keyspace{
- Name: keyspaceName,
- SchemaSQL: sqlSchema,
- VSchema: vSchema,
- }
- if err := clusterInstance.StartKeyspace(*keyspace, []string{"-80", "80-"}, 1, false); err != nil {
- return 1, err
- }
-
- // Starting Vtgate in SINGLE transaction mode
- clusterInstance.VtGateExtraArgs = []string{"--transaction_mode", "SINGLE"}
- if err := clusterInstance.StartVtgate(); err != nil {
- return 1, err
- }
- vtParams = clusterInstance.GetVTParams(keyspaceName)
-
- return m.Run(), nil
- }()
- if err != nil {
- fmt.Printf("%v\n", err)
- os.Exit(1)
- } else {
- os.Exit(exitcode)
- }
-}
-
-// TestTransactionModes tests transactions using twopc mode
-func TestTransactionModes(t *testing.T) {
- defer cluster.PanicHandler(t)
- ctx := context.Background()
- conn, err := mysql.Connect(ctx, &vtParams)
- require.NoError(t, err)
- defer conn.Close()
-
- // Insert targeted to multiple tables should fail as Transaction mode is SINGLE
- utils.Exec(t, conn, "begin")
- utils.Exec(t, conn, "insert into twopc_user(user_id, name) values(1,'john')")
- _, err = conn.ExecuteFetch("insert into twopc_user(user_id, name) values(6,'vick')", 1000, false)
- utils.Exec(t, conn, "rollback")
- want := "multi-db transaction attempted"
- require.Error(t, err)
- require.Contains(t, err.Error(), want)
-
- // Enable TWOPC transaction mode
- clusterInstance.VtGateExtraArgs = []string{"--transaction_mode", "TWOPC"}
-
- // Restart VtGate
- require.NoError(t, clusterInstance.RestartVtgate())
-
- // Make a new mysql connection to vtGate
- vtParams = clusterInstance.GetVTParams(keyspaceName)
- conn2, err := mysql.Connect(ctx, &vtParams)
- require.NoError(t, err)
- defer conn2.Close()
-
- // Insert targeted to multiple db should PASS with TWOPC trx mode
- utils.Exec(t, conn2, "begin")
- utils.Exec(t, conn2, "insert into twopc_user(user_id, name) values(3,'mark')")
- utils.Exec(t, conn2, "insert into twopc_user(user_id, name) values(4,'doug')")
- utils.Exec(t, conn2, "insert into twopc_lookup(name, id) values('Tim',7)")
- utils.Exec(t, conn2, "commit")
-
- // Verify the values are present
- utils.AssertMatches(t, conn2, "select user_id from twopc_user where name='mark'", `[[INT64(3)]]`)
- utils.AssertMatches(t, conn2, "select name from twopc_lookup where id=3", `[[VARCHAR("mark")]]`)
-
- // DELETE from multiple tables using TWOPC transaction mode
- utils.Exec(t, conn2, "begin")
- utils.Exec(t, conn2, "delete from twopc_user where user_id = 3")
- utils.Exec(t, conn2, "delete from twopc_lookup where id = 3")
- utils.Exec(t, conn2, "commit")
-
- // VERIFY that values are deleted
- utils.AssertMatches(t, conn2, "select user_id from twopc_user where user_id=3", `[]`)
- utils.AssertMatches(t, conn2, "select name from twopc_lookup where id=3", `[]`)
-}
diff --git a/go/test/endtoend/vtgate/transaction/tx_test.go b/go/test/endtoend/vtgate/transaction/tx_test.go
new file mode 100644
index 00000000000..8a004277b89
--- /dev/null
+++ b/go/test/endtoend/vtgate/transaction/tx_test.go
@@ -0,0 +1,254 @@
+/*
+Copyright 2019 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package transaction
+
+import (
+ "context"
+ _ "embed"
+ "flag"
+ "fmt"
+ "os"
+ "testing"
+
+ "vitess.io/vitess/go/test/endtoend/utils"
+
+ "github.com/stretchr/testify/require"
+
+ "vitess.io/vitess/go/mysql"
+ "vitess.io/vitess/go/test/endtoend/cluster"
+)
+
+var (
+ clusterInstance *cluster.LocalProcessCluster
+ vtParams mysql.ConnParams
+ keyspaceName = "ks"
+ cell = "zone1"
+ hostname = "localhost"
+
+ //go:embed schema.sql
+ SchemaSQL string
+
+ //go:embed vschema.json
+ VSchema string
+)
+
+func TestMain(m *testing.M) {
+ defer cluster.PanicHandler(nil)
+ flag.Parse()
+
+ exitcode, err := func() (int, error) {
+ clusterInstance = cluster.NewCluster(cell, hostname)
+ defer clusterInstance.Teardown()
+
+ // Reserve vtGate port in order to pass it to vtTablet
+ clusterInstance.VtgateGrpcPort = clusterInstance.GetAndReservePort()
+ // Set extra tablet args for twopc
+ clusterInstance.VtTabletExtraArgs = []string{
+ "--twopc_enable",
+ "--twopc_coordinator_address", fmt.Sprintf("localhost:%d", clusterInstance.VtgateGrpcPort),
+ "--twopc_abandon_age", "3600",
+ }
+
+ // Start topo server
+ if err := clusterInstance.StartTopo(); err != nil {
+ return 1, err
+ }
+
+ // Start keyspace
+ keyspace := &cluster.Keyspace{
+ Name: keyspaceName,
+ SchemaSQL: SchemaSQL,
+ VSchema: VSchema,
+ }
+ if err := clusterInstance.StartKeyspace(*keyspace, []string{"-80", "80-"}, 1, false); err != nil {
+ return 1, err
+ }
+
+ // Starting Vtgate in default MULTI transaction mode
+ if err := clusterInstance.StartVtgate(); err != nil {
+ return 1, err
+ }
+ vtParams = clusterInstance.GetVTParams(keyspaceName)
+
+ return m.Run(), nil
+ }()
+ if err != nil {
+ fmt.Printf("%v\n", err)
+ os.Exit(1)
+ } else {
+ os.Exit(exitcode)
+ }
+}
+
+// TestTransactionModes tests transactions using twopc mode
+func TestTransactionModes(t *testing.T) {
+ defer cluster.PanicHandler(t)
+
+ ctx := context.Background()
+ conn, err := mysql.Connect(ctx, &vtParams)
+ require.NoError(t, err)
+ defer conn.Close()
+
+ // set transaction mode to SINGLE.
+ utils.Exec(t, conn, "set transaction_mode = 'single'")
+
+ // Insert targeted to multiple tables should fail as Transaction mode is SINGLE
+ utils.Exec(t, conn, "begin")
+ utils.Exec(t, conn, "insert into twopc_user(user_id, name) values(1,'john')")
+ _, err = conn.ExecuteFetch("insert into twopc_user(user_id, name) values(6,'vick')", 1000, false)
+ want := "multi-db transaction attempted"
+ require.Error(t, err)
+ require.Contains(t, err.Error(), want)
+ utils.Exec(t, conn, "rollback")
+
+ // set transaction mode to TWOPC.
+ utils.Exec(t, conn, "set transaction_mode = 'twopc'")
+
+ // Insert targeted to multiple db should PASS with TWOPC trx mode
+ utils.Exec(t, conn, "begin")
+ utils.Exec(t, conn, "insert into twopc_user(user_id, name) values(3,'mark')")
+ utils.Exec(t, conn, "insert into twopc_user(user_id, name) values(4,'doug')")
+ utils.Exec(t, conn, "insert into twopc_lookup(name, id) values('Tim',7)")
+ utils.Exec(t, conn, "commit")
+
+ // Verify the values are present
+ utils.AssertMatches(t, conn, "select user_id from twopc_user where name='mark'", `[[INT64(3)]]`)
+ utils.AssertMatches(t, conn, "select name from twopc_lookup where id=3", `[[VARCHAR("mark")]]`)
+
+ // DELETE from multiple tables using TWOPC transaction mode
+ utils.Exec(t, conn, "begin")
+ utils.Exec(t, conn, "delete from twopc_user where user_id = 3")
+ utils.Exec(t, conn, "delete from twopc_lookup where id = 3")
+ utils.Exec(t, conn, "commit")
+
+ // VERIFY that values are deleted
+ utils.AssertMatches(t, conn, "select user_id from twopc_user where user_id=3", `[]`)
+ utils.AssertMatches(t, conn, "select name from twopc_lookup where id=3", `[]`)
+}
+
+// TestTransactionIsolation tests transaction isolation level.
+func TestTransactionIsolation(t *testing.T) {
+ defer cluster.PanicHandler(t)
+ ctx := context.Background()
+
+ conn, err := mysql.Connect(ctx, &vtParams)
+ require.NoError(t, err)
+ defer conn.Close()
+
+ // inserting some data.
+ utils.Exec(t, conn, "insert into test(id, msg) values (1,'v1'), (2, 'v2')")
+ defer utils.Exec(t, conn, "delete from test")
+
+ conn1, err := mysql.Connect(ctx, &vtParams)
+ require.NoError(t, err)
+ defer conn1.Close()
+
+ conn2, err := mysql.Connect(ctx, &vtParams)
+ require.NoError(t, err)
+ defer conn2.Close()
+
+ // on connection 1 change the isolation level to read-committed.
+ // start a transaction and read the data for id = 1.
+ utils.Exec(t, conn1, "set transaction isolation level read committed")
+ utils.Exec(t, conn1, "begin")
+ utils.AssertMatches(t, conn1, "select id, msg from test where id = 1", `[[INT64(1) VARCHAR("v1")]]`)
+
+ // change the value of msg for id = 1 on connection 2.
+ utils.Exec(t, conn2, "update test set msg = 'foo' where id = 1")
+
+ // new value should be reflected on connection 1 within the open transaction.
+ utils.AssertMatches(t, conn1, "select id, msg from test where id = 1", `[[INT64(1) VARCHAR("foo")]]`)
+ utils.Exec(t, conn1, "rollback")
+}
+
+func TestTransactionAccessModes(t *testing.T) {
+ closer := start(t)
+ defer closer()
+
+ ctx := context.Background()
+
+ conn, err := mysql.Connect(ctx, &vtParams)
+ require.NoError(t, err)
+ defer conn.Close()
+
+ // start a transaction with read-only characteristic.
+ utils.Exec(t, conn, "start transaction read only")
+ _, err = utils.ExecAllowError(t, conn, "insert into test(id, msg) values (42,'foo')")
+ require.Error(t, err)
+ require.Contains(t, err.Error(), "Cannot execute statement in a READ ONLY transaction")
+ utils.Exec(t, conn, "rollback")
+
+ // trying autocommit, this should pass as transaction characteristics are limited to single transaction.
+ utils.Exec(t, conn, "insert into test(id, msg) values (42,'foo')")
+
+ // target replica
+ utils.Exec(t, conn, "use `ks@replica`")
+ // start a transaction with read-only characteristic.
+ utils.Exec(t, conn, "start transaction read only")
+ utils.Exec(t, conn, "select * from test")
+
+ // start a transaction with read-write characteristic. This should fail
+ utils.Exec(t, conn, "start transaction read write")
+ _, err = utils.ExecAllowError(t, conn, "select connection_id()")
+ require.Error(t, err)
+ require.Contains(t, err.Error(), "cannot start read write transaction on a read only tablet")
+ utils.Exec(t, conn, "rollback")
+}
+
+// TestTransactionIsolationInTx tests transaction isolation level inside transaction
+// and setting isolation level to different values.
+func TestTransactionIsolationInTx(t *testing.T) {
+ ctx := context.Background()
+
+ conn, err := mysql.Connect(ctx, &vtParams)
+ require.NoError(t, err)
+ defer conn.Close()
+
+ utils.Exec(t, conn, "set transaction isolation level read committed")
+ utils.Exec(t, conn, "begin")
+ utils.AssertMatches(t, conn, "select @@transaction_isolation", `[[VARCHAR("READ-COMMITTED")]]`)
+ utils.Exec(t, conn, "commit")
+
+ utils.Exec(t, conn, "set transaction isolation level serializable")
+ utils.Exec(t, conn, "begin")
+ utils.AssertMatches(t, conn, "select @@transaction_isolation", `[[VARCHAR("SERIALIZABLE")]]`)
+ utils.Exec(t, conn, "commit")
+
+ utils.Exec(t, conn, "set transaction isolation level read committed")
+ utils.Exec(t, conn, "begin")
+ utils.AssertMatches(t, conn, "select @@transaction_isolation", `[[VARCHAR("READ-COMMITTED")]]`)
+ utils.Exec(t, conn, "commit")
+}
+
+func start(t *testing.T) func() {
+ deleteAll := func() {
+ conn, err := mysql.Connect(context.Background(), &vtParams)
+ require.NoError(t, err)
+ tables := []string{"test", "twopc_user"}
+ for _, table := range tables {
+ _, _ = utils.ExecAllowError(t, conn, "delete from "+table)
+ }
+ conn.Close()
+ }
+
+ deleteAll()
+
+ return func() {
+ deleteAll()
+ cluster.PanicHandler(t)
+ }
+}
diff --git a/go/test/endtoend/vtgate/transaction/vschema.json b/go/test/endtoend/vtgate/transaction/vschema.json
new file mode 100644
index 00000000000..6b23786b09d
--- /dev/null
+++ b/go/test/endtoend/vtgate/transaction/vschema.json
@@ -0,0 +1,48 @@
+{
+ "sharded":true,
+ "vindexes": {
+ "hash_index": {
+ "type": "hash"
+ },
+ "twopc_lookup_vdx": {
+ "type": "lookup_hash_unique",
+ "params": {
+ "table": "twopc_lookup",
+ "from": "name",
+ "to": "id",
+ "autocommit": "true"
+ },
+ "owner": "twopc_user"
+ }
+ },
+ "tables": {
+ "twopc_user":{
+ "column_vindexes": [
+ {
+ "column": "user_id",
+ "name": "hash_index"
+ },
+ {
+ "column": "name",
+ "name": "twopc_lookup_vdx"
+ }
+ ]
+ },
+ "twopc_lookup": {
+ "column_vindexes": [
+ {
+ "column": "id",
+ "name": "hash_index"
+ }
+ ]
+ },
+ "test": {
+ "column_vindexes": [
+ {
+ "column": "id",
+ "name": "hash_index"
+ }
+ ]
+ }
+ }
+}
\ No newline at end of file
diff --git a/go/test/endtoend/vtgate/vindex_bindvars/main_test.go b/go/test/endtoend/vtgate/vindex_bindvars/main_test.go
index 7900d155932..83e20d9aa31 100644
--- a/go/test/endtoend/vtgate/vindex_bindvars/main_test.go
+++ b/go/test/endtoend/vtgate/vindex_bindvars/main_test.go
@@ -41,6 +41,10 @@ var (
id BIGINT NOT NULL,
field BIGINT NOT NULL,
field2 BIGINT,
+ field3 BIGINT,
+ field4 BIGINT,
+ field5 BIGINT,
+ field6 BIGINT,
PRIMARY KEY (id)
) ENGINE=Innodb;
@@ -56,6 +60,30 @@ CREATE TABLE lookup2 (
UNIQUE KEY (field2)
) ENGINE=Innodb;
+CREATE TABLE lookup3 (
+ field3 BIGINT NOT NULL,
+ keyspace_id binary(8),
+ UNIQUE KEY (field3)
+) ENGINE=Innodb;
+
+CREATE TABLE lookup4 (
+ field4 BIGINT NOT NULL,
+ keyspace_id binary(8),
+ UNIQUE KEY (field4)
+) ENGINE=Innodb;
+
+CREATE TABLE lookup5 (
+ field5 BIGINT NOT NULL,
+ keyspace_id binary(8),
+ UNIQUE KEY (field5)
+) ENGINE=Innodb;
+
+CREATE TABLE lookup6 (
+ field6 BIGINT NOT NULL,
+ keyspace_id binary(8),
+ UNIQUE KEY (field6)
+) ENGINE=Innodb;
+
CREATE TABLE thex (
id VARBINARY(64) NOT NULL,
field BIGINT NOT NULL,
@@ -88,7 +116,7 @@ CREATE TABLE thex (
"table": "lookup1",
"from": "field",
"to": "keyspace_id",
- "ignore_nulls": "true"
+ "ignore_nulls": "true"
},
"owner": "t1"
},
@@ -98,7 +126,47 @@ CREATE TABLE thex (
"table": "lookup2",
"from": "field2",
"to": "keyspace_id",
- "ignore_nulls": "true"
+ "ignore_nulls": "true"
+ },
+ "owner": "t1"
+ },
+ "lookup3": {
+ "type": "lookup",
+ "params": {
+ "from": "field3",
+ "no_verify": "true",
+ "table": "lookup3",
+ "to": "keyspace_id"
+ },
+ "owner": "t1"
+ },
+ "lookup4": {
+ "type": "lookup",
+ "params": {
+ "from": "field4",
+ "read_lock": "exclusive",
+ "table": "lookup4",
+ "to": "keyspace_id"
+ },
+ "owner": "t1"
+ },
+ "lookup5": {
+ "type": "lookup",
+ "params": {
+ "from": "field5",
+ "read_lock": "shared",
+ "table": "lookup5",
+ "to": "keyspace_id"
+ },
+ "owner": "t1"
+ },
+ "lookup6": {
+ "type": "lookup",
+ "params": {
+ "from": "field6",
+ "read_lock": "none",
+ "table": "lookup6",
+ "to": "keyspace_id"
},
"owner": "t1"
}
@@ -117,6 +185,22 @@ CREATE TABLE thex (
{
"column": "field2",
"name": "lookup2"
+ },
+ {
+ "column": "field3",
+ "name": "lookup3"
+ },
+ {
+ "column": "field4",
+ "name": "lookup4"
+ },
+ {
+ "column": "field5",
+ "name": "lookup5"
+ },
+ {
+ "column": "field6",
+ "name": "lookup6"
}
]
},
@@ -136,6 +220,38 @@ CREATE TABLE thex (
}
]
},
+ "lookup3": {
+ "column_vindexes": [
+ {
+ "column": "field3",
+ "name": "binary_md5_vdx"
+ }
+ ]
+ },
+ "lookup4": {
+ "column_vindexes": [
+ {
+ "column": "field4",
+ "name": "binary_md5_vdx"
+ }
+ ]
+ },
+ "lookup5": {
+ "column_vindexes": [
+ {
+ "column": "field5",
+ "name": "binary_md5_vdx"
+ }
+ ]
+ },
+ "lookup6": {
+ "column_vindexes": [
+ {
+ "column": "field6",
+ "name": "binary_md5_vdx"
+ }
+ ]
+ },
"thex": {
"column_vindexes": [
{
@@ -216,51 +332,51 @@ func TestVindexBindVarOverlap(t *testing.T) {
require.Nil(t, err)
defer conn.Close()
- utils.Exec(t, conn, "INSERT INTO t1 (id, field, field2) VALUES "+
- "(0,1,2), "+
- "(1,2,3), "+
- "(2,3,4), "+
- "(3,4,5), "+
- "(4,5,6), "+
- "(5,6,7), "+
- "(6,7,8), "+
- "(7,8,9), "+
- "(8,9,10), "+
- "(9,10,11), "+
- "(10,11,12), "+
- "(11,12,13), "+
- "(12,13,14), "+
- "(13,14,15), "+
- "(14,15,16), "+
- "(15,16,17), "+
- "(16,17,18), "+
- "(17,18,19), "+
- "(18,19,20), "+
- "(19,20,21), "+
- "(20,21,22)")
- result := utils.Exec(t, conn, "select id, field, field2 from t1 order by id")
+ utils.Exec(t, conn, "INSERT INTO t1 (id, field, field2, field3, field4, field5, field6) VALUES "+
+ "(0,1,2,3,4,5,6), "+
+ "(1,2,3,4,5,6,7), "+
+ "(2,3,4,5,6,7,8), "+
+ "(3,4,5,6,7,8,9), "+
+ "(4,5,6,7,8,9,10), "+
+ "(5,6,7,8,9,10,11), "+
+ "(6,7,8,9,10,11,12), "+
+ "(7,8,9,10,11,12,13), "+
+ "(8,9,10,11,12,13,14), "+
+ "(9,10,11,12,13,14,15), "+
+ "(10,11,12,13,14,15,16), "+
+ "(11,12,13,14,15,16,17), "+
+ "(12,13,14,15,16,17,18), "+
+ "(13,14,15,16,17,18,19), "+
+ "(14,15,16,17,18,19,20), "+
+ "(15,16,17,18,19,20,21), "+
+ "(16,17,18,19,20,21,22), "+
+ "(17,18,19,20,21,22,23), "+
+ "(18,19,20,21,22,23,24), "+
+ "(19,20,21,22,23,24,25), "+
+ "(20,21,22,23,24,25,26)")
+ result := utils.Exec(t, conn, "select id, field, field2, field3, field4, field5, field6 from t1 order by id")
expected :=
- "[[INT64(0) INT64(1) INT64(2)] " +
- "[INT64(1) INT64(2) INT64(3)] " +
- "[INT64(2) INT64(3) INT64(4)] " +
- "[INT64(3) INT64(4) INT64(5)] " +
- "[INT64(4) INT64(5) INT64(6)] " +
- "[INT64(5) INT64(6) INT64(7)] " +
- "[INT64(6) INT64(7) INT64(8)] " +
- "[INT64(7) INT64(8) INT64(9)] " +
- "[INT64(8) INT64(9) INT64(10)] " +
- "[INT64(9) INT64(10) INT64(11)] " +
- "[INT64(10) INT64(11) INT64(12)] " +
- "[INT64(11) INT64(12) INT64(13)] " +
- "[INT64(12) INT64(13) INT64(14)] " +
- "[INT64(13) INT64(14) INT64(15)] " +
- "[INT64(14) INT64(15) INT64(16)] " +
- "[INT64(15) INT64(16) INT64(17)] " +
- "[INT64(16) INT64(17) INT64(18)] " +
- "[INT64(17) INT64(18) INT64(19)] " +
- "[INT64(18) INT64(19) INT64(20)] " +
- "[INT64(19) INT64(20) INT64(21)] " +
- "[INT64(20) INT64(21) INT64(22)]]"
+ "[[INT64(0) INT64(1) INT64(2) INT64(3) INT64(4) INT64(5) INT64(6)] " +
+ "[INT64(1) INT64(2) INT64(3) INT64(4) INT64(5) INT64(6) INT64(7)] " +
+ "[INT64(2) INT64(3) INT64(4) INT64(5) INT64(6) INT64(7) INT64(8)] " +
+ "[INT64(3) INT64(4) INT64(5) INT64(6) INT64(7) INT64(8) INT64(9)] " +
+ "[INT64(4) INT64(5) INT64(6) INT64(7) INT64(8) INT64(9) INT64(10)] " +
+ "[INT64(5) INT64(6) INT64(7) INT64(8) INT64(9) INT64(10) INT64(11)] " +
+ "[INT64(6) INT64(7) INT64(8) INT64(9) INT64(10) INT64(11) INT64(12)] " +
+ "[INT64(7) INT64(8) INT64(9) INT64(10) INT64(11) INT64(12) INT64(13)] " +
+ "[INT64(8) INT64(9) INT64(10) INT64(11) INT64(12) INT64(13) INT64(14)] " +
+ "[INT64(9) INT64(10) INT64(11) INT64(12) INT64(13) INT64(14) INT64(15)] " +
+ "[INT64(10) INT64(11) INT64(12) INT64(13) INT64(14) INT64(15) INT64(16)] " +
+ "[INT64(11) INT64(12) INT64(13) INT64(14) INT64(15) INT64(16) INT64(17)] " +
+ "[INT64(12) INT64(13) INT64(14) INT64(15) INT64(16) INT64(17) INT64(18)] " +
+ "[INT64(13) INT64(14) INT64(15) INT64(16) INT64(17) INT64(18) INT64(19)] " +
+ "[INT64(14) INT64(15) INT64(16) INT64(17) INT64(18) INT64(19) INT64(20)] " +
+ "[INT64(15) INT64(16) INT64(17) INT64(18) INT64(19) INT64(20) INT64(21)] " +
+ "[INT64(16) INT64(17) INT64(18) INT64(19) INT64(20) INT64(21) INT64(22)] " +
+ "[INT64(17) INT64(18) INT64(19) INT64(20) INT64(21) INT64(22) INT64(23)] " +
+ "[INT64(18) INT64(19) INT64(20) INT64(21) INT64(22) INT64(23) INT64(24)] " +
+ "[INT64(19) INT64(20) INT64(21) INT64(22) INT64(23) INT64(24) INT64(25)] " +
+ "[INT64(20) INT64(21) INT64(22) INT64(23) INT64(24) INT64(25) INT64(26)]]"
assert.Equal(t, expected, fmt.Sprintf("%v", result.Rows))
}
diff --git a/go/test/endtoend/vtgate/vschema.json b/go/test/endtoend/vtgate/vschema.json
index 3aafd1106b5..8d16beec2a6 100644
--- a/go/test/endtoend/vtgate/vschema.json
+++ b/go/test/endtoend/vtgate/vschema.json
@@ -79,6 +79,16 @@
"to": "keyspace_id"
},
"owner": "t9"
+ },
+ "t10_id_to_keyspace_id_idx": {
+ "type": "lookup_unique",
+ "params": {
+ "autocommit": "true",
+ "table": "t10_id_to_keyspace_id_idx",
+ "from": "id",
+ "to": "keyspace_id"
+ },
+ "owner": "t10"
}
},
"tables": {
@@ -271,6 +281,26 @@
"name": "hash"
}
]
+ },
+ "t10": {
+ "column_vindexes": [
+ {
+ "column": "sharding_key",
+ "name": "hash"
+ },
+ {
+ "column": "id",
+ "name": "t10_id_to_keyspace_id_idx"
+ }
+ ]
+ },
+ "t10_id_to_keyspace_id_idx": {
+ "column_vindexes": [
+ {
+ "column": "id",
+ "name": "hash"
+ }
+ ]
}
}
}
diff --git a/go/test/endtoend/vtgate/vschema/vschema_test.go b/go/test/endtoend/vtgate/vschema/vschema_test.go
index 80a29c6aff4..92863ff7dc8 100644
--- a/go/test/endtoend/vtgate/vschema/vschema_test.go
+++ b/go/test/endtoend/vtgate/vschema/vschema_test.go
@@ -111,8 +111,15 @@ func TestVSchema(t *testing.T) {
utils.AssertMatches(t, conn, "delete from vt_user", `[]`)
+ vtgateVersion, err := cluster.GetMajorVersion("vtgate")
+ require.NoError(t, err)
+
// Test empty vschema
- utils.AssertMatches(t, conn, "SHOW VSCHEMA TABLES", `[[VARCHAR("dual")]]`)
+ if vtgateVersion >= 17 {
+ utils.AssertMatches(t, conn, "SHOW VSCHEMA TABLES", `[]`)
+ } else {
+ utils.AssertMatches(t, conn, "SHOW VSCHEMA TABLES", `[[VARCHAR("dual")]]`)
+ }
// Use the DDL to create an unsharded vschema and test again
@@ -128,9 +135,11 @@ func TestVSchema(t *testing.T) {
utils.Exec(t, conn, "commit")
// Test Showing Tables
- utils.AssertMatches(t, conn,
- "SHOW VSCHEMA TABLES",
- `[[VARCHAR("dual")] [VARCHAR("main")] [VARCHAR("vt_user")]]`)
+ if vtgateVersion >= 17 {
+ utils.AssertMatches(t, conn, "SHOW VSCHEMA TABLES", `[[VARCHAR("main")] [VARCHAR("vt_user")]]`)
+ } else {
+ utils.AssertMatches(t, conn, "SHOW VSCHEMA TABLES", `[[VARCHAR("dual")] [VARCHAR("main")] [VARCHAR("vt_user")]]`)
+ }
// Test Showing Vindexes
utils.AssertMatches(t, conn, "SHOW VSCHEMA VINDEXES", `[]`)
diff --git a/go/test/endtoend/vtorc/api/api_test.go b/go/test/endtoend/vtorc/api/api_test.go
index 87312004f7a..4885a67aa9c 100644
--- a/go/test/endtoend/vtorc/api/api_test.go
+++ b/go/test/endtoend/vtorc/api/api_test.go
@@ -107,10 +107,20 @@ func TestProblemsAPI(t *testing.T) {
assert.Equal(t, 200, status, resp)
assert.Contains(t, resp, fmt.Sprintf(`"Port": %d`, replica.MySQLPort))
+ // Verify that filtering by keyspace also works in the API as intended
+ status, resp = utils.MakeAPICall(t, vtorc, "/api/replication-analysis?keyspace=ks")
+ assert.Equal(t, 200, status, resp)
+ assert.Contains(t, resp, fmt.Sprintf(`"Port": %d`, replica.MySQLPort))
+
// Check that filtering using keyspace and shard works
status, resp = utils.MakeAPICall(t, vtorc, "/api/replication-analysis?keyspace=ks&shard=80-")
assert.Equal(t, 200, status, resp)
assert.Equal(t, "[]", resp)
+
+ // Check that filtering using just the shard fails
+ status, resp = utils.MakeAPICall(t, vtorc, "/api/replication-analysis?shard=0")
+ assert.Equal(t, 400, status, resp)
+ assert.Equal(t, "Filtering by shard without keyspace isn't supported\n", resp)
})
t.Run("Enable Recoveries API", func(t *testing.T) {
@@ -150,9 +160,19 @@ func TestProblemsAPI(t *testing.T) {
assert.Equal(t, 200, status, resp)
assert.Contains(t, resp, fmt.Sprintf(`"InstanceAlias": "%v"`, replica.Alias))
+ // Check that filtering using keyspace works
+ status, resp = utils.MakeAPICall(t, vtorc, "/api/problems?keyspace=ks")
+ assert.Equal(t, 200, status, resp)
+ assert.Contains(t, resp, fmt.Sprintf(`"InstanceAlias": "%v"`, replica.Alias))
+
// Check that filtering using keyspace and shard works
status, resp = utils.MakeAPICall(t, vtorc, "/api/problems?keyspace=ks&shard=80-")
assert.Equal(t, 200, status, resp)
assert.Equal(t, "null", resp)
+
+ // Check that filtering using just the shard fails
+ status, resp = utils.MakeAPICall(t, vtorc, "/api/problems?shard=0")
+ assert.Equal(t, 400, status, resp)
+ assert.Equal(t, "Filtering by shard without keyspace isn't supported\n", resp)
})
}
diff --git a/go/test/endtoend/vtorc/general/vtorc_test.go b/go/test/endtoend/vtorc/general/vtorc_test.go
index 4254606dd94..c0a845a5699 100644
--- a/go/test/endtoend/vtorc/general/vtorc_test.go
+++ b/go/test/endtoend/vtorc/general/vtorc_test.go
@@ -37,6 +37,7 @@ import (
// verify replication is setup
// verify that with multiple vtorc instances, we still only have 1 PlannedReparentShard call
func TestPrimaryElection(t *testing.T) {
+ defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance)
defer cluster.PanicHandler(t)
utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, nil, cluster.VTOrcConfiguration{
PreventCrossDataCenterPrimaryFailover: true,
@@ -64,6 +65,7 @@ func TestPrimaryElection(t *testing.T) {
// verify rdonly is not elected, only replica
// verify replication is setup
func TestSingleKeyspace(t *testing.T) {
+ defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance)
defer cluster.PanicHandler(t)
utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 1, 1, []string{"--clusters_to_watch", "ks"}, cluster.VTOrcConfiguration{
PreventCrossDataCenterPrimaryFailover: true,
@@ -81,6 +83,7 @@ func TestSingleKeyspace(t *testing.T) {
// verify rdonly is not elected, only replica
// verify replication is setup
func TestKeyspaceShard(t *testing.T) {
+ defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance)
defer cluster.PanicHandler(t)
utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 1, 1, []string{"--clusters_to_watch", "ks/0"}, cluster.VTOrcConfiguration{
PreventCrossDataCenterPrimaryFailover: true,
@@ -100,6 +103,7 @@ func TestKeyspaceShard(t *testing.T) {
// 4. setup replication from non-primary, let vtorc repair
// 5. make instance A replicates from B and B from A, wait for repair
func TestVTOrcRepairs(t *testing.T) {
+ defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance)
defer cluster.PanicHandler(t)
utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 3, 0, nil, cluster.VTOrcConfiguration{
PreventCrossDataCenterPrimaryFailover: true,
@@ -216,6 +220,7 @@ func TestVTOrcRepairs(t *testing.T) {
func TestRepairAfterTER(t *testing.T) {
// test fails intermittently on CI, skip until it can be fixed.
t.SkipNow()
+ defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance)
defer cluster.PanicHandler(t)
utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 0, nil, cluster.VTOrcConfiguration{
PreventCrossDataCenterPrimaryFailover: true,
@@ -252,6 +257,7 @@ func TestSemiSync(t *testing.T) {
// stop any vtorc instance running due to a previous test.
utils.StopVTOrcs(t, clusterInfo)
newCluster := utils.SetupNewClusterSemiSync(t)
+ defer utils.PrintVTOrcLogsOnFailure(t, newCluster.ClusterInstance)
utils.StartVTOrcs(t, newCluster, nil, cluster.VTOrcConfiguration{
PreventCrossDataCenterPrimaryFailover: true,
}, 1)
@@ -316,6 +322,7 @@ func TestSemiSync(t *testing.T) {
// TestVTOrcWithPrs tests that VTOrc works fine even when PRS is called from vtctld
func TestVTOrcWithPrs(t *testing.T) {
+ defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance)
defer cluster.PanicHandler(t)
utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 4, 0, nil, cluster.VTOrcConfiguration{
PreventCrossDataCenterPrimaryFailover: true,
@@ -364,6 +371,7 @@ func TestVTOrcWithPrs(t *testing.T) {
// TestMultipleDurabilities tests that VTOrc works with 2 keyspaces having 2 different durability policies
func TestMultipleDurabilities(t *testing.T) {
+ defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance)
defer cluster.PanicHandler(t)
// Setup a normal cluster and start vtorc
utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 1, 1, nil, cluster.VTOrcConfiguration{}, 1, "")
@@ -388,6 +396,7 @@ func TestDurabilityPolicySetLater(t *testing.T) {
// stop any vtorc instance running due to a previous test.
utils.StopVTOrcs(t, clusterInfo)
newCluster := utils.SetupNewClusterSemiSync(t)
+ defer utils.PrintVTOrcLogsOnFailure(t, newCluster.ClusterInstance)
keyspace := &newCluster.ClusterInstance.Keyspaces[0]
shard0 := &keyspace.Shards[0]
// Before starting VTOrc we explicity want to set the durability policy of the keyspace to an empty string
diff --git a/go/test/endtoend/vtorc/primaryfailure/main_test.go b/go/test/endtoend/vtorc/primaryfailure/main_test.go
index 8e9d622fd80..7d9c57b6b22 100644
--- a/go/test/endtoend/vtorc/primaryfailure/main_test.go
+++ b/go/test/endtoend/vtorc/primaryfailure/main_test.go
@@ -21,9 +21,8 @@ import (
"os"
"testing"
- "vitess.io/vitess/go/test/endtoend/vtorc/utils"
-
"vitess.io/vitess/go/test/endtoend/cluster"
+ "vitess.io/vitess/go/test/endtoend/vtorc/utils"
)
var clusterInfo *utils.VTOrcClusterInfo
@@ -34,7 +33,7 @@ func TestMain(m *testing.M) {
cellInfos = append(cellInfos, &utils.CellInfo{
CellName: utils.Cell1,
NumReplicas: 12,
- NumRdonly: 2,
+ NumRdonly: 3,
UIDBase: 100,
})
cellInfos = append(cellInfos, &utils.CellInfo{
diff --git a/go/test/endtoend/vtorc/primaryfailure/primary_failure_test.go b/go/test/endtoend/vtorc/primaryfailure/primary_failure_test.go
index 01bf01782e7..0ac4129fd8b 100644
--- a/go/test/endtoend/vtorc/primaryfailure/primary_failure_test.go
+++ b/go/test/endtoend/vtorc/primaryfailure/primary_failure_test.go
@@ -20,22 +20,26 @@ import (
"testing"
"time"
- "vitess.io/vitess/go/test/endtoend/vtorc/utils"
- "vitess.io/vitess/go/vt/vtorc/logic"
-
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"vitess.io/vitess/go/test/endtoend/cluster"
+ "vitess.io/vitess/go/test/endtoend/vtorc/utils"
+ "vitess.io/vitess/go/vt/vtorc/logic"
)
// bring down primary, let orc promote replica
// covers the test case master-failover from orchestrator
+// Also tests that VTOrc can handle multiple failures, if the durability policies allow it
func TestDownPrimary(t *testing.T) {
+ defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance)
defer cluster.PanicHandler(t)
- utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, nil, cluster.VTOrcConfiguration{
+ // We specify the --wait-replicas-timeout to a small value because we spawn a cross-cell replica later in the test.
+ // If that replica is more advanced than the same-cell-replica, then we try to promote the cross-cell replica as an intermediate source.
+ // If we don't specify a small value of --wait-replicas-timeout, then we would end up waiting for 30 seconds for the dead-primary to respond, failing this test.
+ utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, []string{"--remote_operation_timeout=10s", "--wait-replicas-timeout=5s"}, cluster.VTOrcConfiguration{
PreventCrossDataCenterPrimaryFailover: true,
- }, 1, "")
+ }, 1, "semi_sync")
keyspace := &clusterInfo.ClusterInstance.Keyspaces[0]
shard0 := &keyspace.Shards[0]
// find primary from topo
@@ -58,27 +62,39 @@ func TestDownPrimary(t *testing.T) {
assert.NotNil(t, replica, "could not find replica tablet")
assert.NotNil(t, rdonly, "could not find rdonly tablet")
+ // Start a cross-cell replica
+ crossCellReplica := utils.StartVttablet(t, clusterInfo, utils.Cell2, false)
+
// check that the replication is setup correctly before we failover
- utils.CheckReplication(t, clusterInfo, curPrimary, []*cluster.Vttablet{rdonly, replica}, 10*time.Second)
+ utils.CheckReplication(t, clusterInfo, curPrimary, []*cluster.Vttablet{rdonly, replica, crossCellReplica}, 10*time.Second)
- // Make the current primary database unavailable.
- err := curPrimary.MysqlctlProcess.Stop()
+ // Make the rdonly vttablet unavailable
+ err := rdonly.VttabletProcess.TearDown()
+ require.NoError(t, err)
+ err = rdonly.MysqlctlProcess.Stop()
+ require.NoError(t, err)
+ // Make the current primary vttablet unavailable.
+ err = curPrimary.VttabletProcess.TearDown()
+ require.NoError(t, err)
+ err = curPrimary.MysqlctlProcess.Stop()
require.NoError(t, err)
defer func() {
- // we remove the tablet from our global list since its mysqlctl process has stopped and cannot be reused for other tests
+ // we remove the tablet from our global list
utils.PermanentlyRemoveVttablet(clusterInfo, curPrimary)
+ utils.PermanentlyRemoveVttablet(clusterInfo, rdonly)
}()
// check that the replica gets promoted
utils.CheckPrimaryTablet(t, clusterInfo, replica, true)
// also check that the replication is working correctly after failover
- utils.VerifyWritesSucceed(t, clusterInfo, replica, []*cluster.Vttablet{rdonly}, 10*time.Second)
+ utils.VerifyWritesSucceed(t, clusterInfo, replica, []*cluster.Vttablet{crossCellReplica}, 10*time.Second)
utils.WaitForSuccessfulRecoveryCount(t, vtOrcProcess, logic.RecoverDeadPrimaryRecoveryName, 1)
}
// Failover should not be cross data centers, according to the configuration file
// covers part of the test case master-failover-lost-replicas from orchestrator
func TestCrossDataCenterFailure(t *testing.T) {
+ defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance)
defer cluster.PanicHandler(t)
utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, nil, cluster.VTOrcConfiguration{
PreventCrossDataCenterPrimaryFailover: true,
@@ -124,6 +140,7 @@ func TestCrossDataCenterFailure(t *testing.T) {
// Failover should not be cross data centers, according to the configuration file
// In case of no viable candidates, we should error out
func TestCrossDataCenterFailureError(t *testing.T) {
+ defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance)
defer cluster.PanicHandler(t)
utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 1, 1, nil, cluster.VTOrcConfiguration{
PreventCrossDataCenterPrimaryFailover: true,
@@ -170,6 +187,7 @@ func TestLostRdonlyOnPrimaryFailure(t *testing.T) {
// Earlier any replicas that were not able to replicate from the previous primary
// were detected by vtorc and could be configured to have their sources detached
t.Skip()
+ defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance)
defer cluster.PanicHandler(t)
utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 2, nil, cluster.VTOrcConfiguration{
PreventCrossDataCenterPrimaryFailover: true,
@@ -251,6 +269,7 @@ func TestLostRdonlyOnPrimaryFailure(t *testing.T) {
// This test checks that the promotion of a tablet succeeds if it passes the promotion lag test
// covers the test case master-failover-fail-promotion-lag-minutes-success from orchestrator
func TestPromotionLagSuccess(t *testing.T) {
+ defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance)
defer cluster.PanicHandler(t)
utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, nil, cluster.VTOrcConfiguration{
ReplicationLagQuery: "select 59",
@@ -300,6 +319,7 @@ func TestPromotionLagFailure(t *testing.T) {
// Earlier vtorc used to check that the promotion lag between the new primary and the old one
// was smaller than the configured value, otherwise it would fail the promotion
t.Skip()
+ defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance)
defer cluster.PanicHandler(t)
utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 3, 1, nil, cluster.VTOrcConfiguration{
ReplicationLagQuery: "select 61",
@@ -352,6 +372,7 @@ func TestPromotionLagFailure(t *testing.T) {
// We explicitly set one of the replicas to Prefer promotion rule.
// That is the replica which should be promoted in case of primary failure
func TestDownPrimaryPromotionRule(t *testing.T) {
+ defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance)
defer cluster.PanicHandler(t)
utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, nil, cluster.VTOrcConfiguration{
LockShardTimeoutSeconds: 5,
@@ -399,6 +420,7 @@ func TestDownPrimaryPromotionRule(t *testing.T) {
// That is the replica which should be promoted in case of primary failure
// It should also be caught up when it is promoted
func TestDownPrimaryPromotionRuleWithLag(t *testing.T) {
+ defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance)
defer cluster.PanicHandler(t)
utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, nil, cluster.VTOrcConfiguration{
LockShardTimeoutSeconds: 5,
@@ -478,6 +500,7 @@ func TestDownPrimaryPromotionRuleWithLag(t *testing.T) {
// We let a replica in our own cell lag. That is the replica which should be promoted in case of primary failure
// It should also be caught up when it is promoted
func TestDownPrimaryPromotionRuleWithLagCrossCenter(t *testing.T) {
+ defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance)
defer cluster.PanicHandler(t)
utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, nil, cluster.VTOrcConfiguration{
LockShardTimeoutSeconds: 5,
diff --git a/go/test/endtoend/vtorc/readtopologyinstance/main_test.go b/go/test/endtoend/vtorc/readtopologyinstance/main_test.go
index c6426021d6c..75ecbfd592c 100644
--- a/go/test/endtoend/vtorc/readtopologyinstance/main_test.go
+++ b/go/test/endtoend/vtorc/readtopologyinstance/main_test.go
@@ -30,9 +30,9 @@ import (
"vitess.io/vitess/go/vt/vtorc/server"
_ "github.com/go-sql-driver/mysql"
- _ "github.com/mattn/go-sqlite3"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
+ _ "modernc.org/sqlite"
)
func TestReadTopologyInstanceBufferable(t *testing.T) {
@@ -40,6 +40,7 @@ func TestReadTopologyInstanceBufferable(t *testing.T) {
defer func() {
clusterInfo.ClusterInstance.Teardown()
}()
+ defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance)
keyspace := &clusterInfo.ClusterInstance.Keyspaces[0]
shard0 := &keyspace.Shards[0]
oldArgs := os.Args
@@ -103,7 +104,6 @@ func TestReadTopologyInstanceBufferable(t *testing.T) {
assert.False(t, primaryInstance.HasReplicationCredentials)
assert.Equal(t, primaryInstance.ReplicationIOThreadState, inst.ReplicationThreadStateNoThread)
assert.Equal(t, primaryInstance.ReplicationSQLThreadState, inst.ReplicationThreadStateNoThread)
- assert.Equal(t, fmt.Sprintf("%v:%v", keyspace.Name, shard0.Name), primaryInstance.ClusterName)
// insert an errant GTID in the replica
_, err = utils.RunSQL(t, "insert into vt_insert_test(id, msg) values (10173, 'test 178342')", replica, "vt_ks")
@@ -147,7 +147,7 @@ func TestReadTopologyInstanceBufferable(t *testing.T) {
assert.Equal(t, replicaInstance.ReadBinlogCoordinates.LogFile, primaryInstance.SelfBinlogCoordinates.LogFile)
assert.Greater(t, replicaInstance.ReadBinlogCoordinates.LogPos, int64(0))
assert.Equal(t, replicaInstance.ExecBinlogCoordinates.LogFile, primaryInstance.SelfBinlogCoordinates.LogFile)
- assert.LessOrEqual(t, replicaInstance.ExecBinlogCoordinates.LogPos, replicaInstance.ReadBinlogCoordinates.LogPos)
+ assert.Greater(t, replicaInstance.ExecBinlogCoordinates.LogPos, int64(0))
assert.Contains(t, replicaInstance.RelaylogCoordinates.LogFile, fmt.Sprintf("vt-0000000%d-relay", replica.TabletUID))
assert.Greater(t, replicaInstance.RelaylogCoordinates.LogPos, int64(0))
assert.Empty(t, replicaInstance.LastIOError)
@@ -159,5 +159,4 @@ func TestReadTopologyInstanceBufferable(t *testing.T) {
assert.False(t, replicaInstance.HasReplicationFilters)
assert.LessOrEqual(t, int(replicaInstance.SecondsBehindPrimary.Int64), 1)
assert.False(t, replicaInstance.AllowTLS)
- assert.Equal(t, fmt.Sprintf("%v:%v", keyspace.Name, shard0.Name), replicaInstance.ClusterName)
}
diff --git a/go/test/endtoend/vtorc/utils/utils.go b/go/test/endtoend/vtorc/utils/utils.go
index 156c8f3728e..8d30e477e2d 100644
--- a/go/test/endtoend/vtorc/utils/utils.go
+++ b/go/test/endtoend/vtorc/utils/utils.go
@@ -29,21 +29,20 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
- // This imports toposervers to register their implementations of TopoServer.
- _ "vitess.io/vitess/go/vt/topo/consultopo"
- _ "vitess.io/vitess/go/vt/topo/etcd2topo"
- _ "vitess.io/vitess/go/vt/topo/k8stopo"
- _ "vitess.io/vitess/go/vt/topo/zk2topo"
-
"vitess.io/vitess/go/json2"
"vitess.io/vitess/go/mysql"
"vitess.io/vitess/go/sqltypes"
"vitess.io/vitess/go/test/endtoend/cluster"
"vitess.io/vitess/go/vt/log"
+ topodatapb "vitess.io/vitess/go/vt/proto/topodata"
"vitess.io/vitess/go/vt/topo"
"vitess.io/vitess/go/vt/topo/topoproto"
- topodatapb "vitess.io/vitess/go/vt/proto/topodata"
+ // Register topo implementations.
+ _ "vitess.io/vitess/go/vt/topo/consultopo"
+ _ "vitess.io/vitess/go/vt/topo/etcd2topo"
+ _ "vitess.io/vitess/go/vt/topo/k8stopo"
+ _ "vitess.io/vitess/go/vt/topo/zk2topo"
)
const (
@@ -647,7 +646,7 @@ func PermanentlyRemoveVttablet(clusterInfo *VTOrcClusterInfo, tablet *cluster.Vt
for i, vttablet := range cellInfo.RdonlyTablets {
if vttablet == tablet {
// remove this tablet since its mysql has stopped
- cellInfo.ReplicaTablets = append(cellInfo.ReplicaTablets[:i], cellInfo.ReplicaTablets[i+1:]...)
+ cellInfo.RdonlyTablets = append(cellInfo.RdonlyTablets[:i], cellInfo.RdonlyTablets[i+1:]...)
KillTablets([]*cluster.Vttablet{tablet})
return
}
@@ -947,3 +946,26 @@ func WaitForSuccessfulRecoveryCount(t *testing.T, vtorcInstance *cluster.VTOrcPr
successCount := successfulRecoveriesMap[recoveryName]
assert.EqualValues(t, countExpected, successCount)
}
+
+// PrintVTOrcLogsOnFailure prints the VTOrc logs on failure of the test.
+// This function is supposed to be called as the first defer command from the vtorc tests.
+func PrintVTOrcLogsOnFailure(t *testing.T, clusterInstance *cluster.LocalProcessCluster) {
+ // If the test has not failed, then we don't need to print anything.
+ if !t.Failed() {
+ return
+ }
+
+ log.Errorf("Printing VTOrc logs")
+ for _, vtorc := range clusterInstance.VTOrcProcesses {
+ if vtorc == nil || vtorc.LogFileName == "" {
+ continue
+ }
+ filePath := path.Join(vtorc.LogDir, vtorc.LogFileName)
+ log.Errorf("Printing file - %s", filePath)
+ content, err := os.ReadFile(filePath)
+ if err != nil {
+ log.Errorf("Error while reading the file - %v", err)
+ }
+ log.Errorf("%s", string(content))
+ }
+}
diff --git a/go/test/fuzzing/vtctl_fuzzer.go b/go/test/fuzzing/vtctl_fuzzer.go
index d51bdeb5fd4..aed11774cc8 100644
--- a/go/test/fuzzing/vtctl_fuzzer.go
+++ b/go/test/fuzzing/vtctl_fuzzer.go
@@ -95,13 +95,11 @@ func getCommandType(index int) string {
51: "ValidateKeyspace",
52: "Reshard",
53: "MoveTables",
- 54: "DropSources",
55: "CreateLookupVindex",
56: "ExternalizeVindex",
57: "Materialize",
60: "VDiff",
- 63: "SwitchReads",
- 64: "SwitchWrites",
+ 63: "SwitchTraffic",
67: "FindAllShardsInKeyspace",
}
return m[index]
@@ -170,8 +168,8 @@ func Fuzz(data []byte) int {
chunkSize := len(restOfArray) / numberOfCalls
command := 0
for i := 0; i < len(restOfArray); i = i + chunkSize {
- from := i //lower
- to := i + chunkSize //upper
+ from := i // lower
+ to := i + chunkSize // upper
// Index of command in getCommandType():
commandIndex := int(commandPart[command]) % 68
@@ -180,9 +178,7 @@ func Fuzz(data []byte) int {
args := strings.Split(string(restOfArray[from:to]), " ")
// Add params to the command
- for i := range args {
- commandSlice = append(commandSlice, args[i])
- }
+ commandSlice = append(commandSlice, args...)
_ = vtctl.RunCommand(ctx, wrangler.New(logger, topo, tmc), commandSlice)
command++
diff --git a/go/tools/asthelpergen/asthelpergen.go b/go/tools/asthelpergen/asthelpergen.go
index 1e6e7a68be2..9e1275182d0 100644
--- a/go/tools/asthelpergen/asthelpergen.go
+++ b/go/tools/asthelpergen/asthelpergen.go
@@ -31,7 +31,7 @@ import (
"golang.org/x/tools/go/packages"
)
-const licenseFileHeader = `Copyright 2021 The Vitess Authors.
+const licenseFileHeader = `Copyright 2023 The Vitess Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -190,16 +190,23 @@ var acceptableBuildErrorsOn = map[string]any{
"ast_visit.go": nil,
}
+type Options struct {
+ Packages []string
+ RootInterface string
+
+ Clone CloneOptions
+ Equals EqualsOptions
+}
+
// GenerateASTHelpers loads the input code, constructs the necessary generators,
// and generates the rewriter and clone methods for the AST
-func GenerateASTHelpers(packagePatterns []string, rootIface, exceptCloneType string) (map[string]*jen.File, error) {
+func GenerateASTHelpers(options *Options) (map[string]*jen.File, error) {
loaded, err := packages.Load(&packages.Config{
Mode: packages.NeedName | packages.NeedTypes | packages.NeedTypesSizes | packages.NeedTypesInfo | packages.NeedDeps | packages.NeedImports | packages.NeedModule,
- }, packagePatterns...)
+ }, options.Packages...)
if err != nil {
- log.Fatal("error loading package")
- return nil, err
+ return nil, fmt.Errorf("failed to load packages: %w", err)
}
checkErrors(loaded, func(fileName string) bool {
@@ -212,17 +219,17 @@ func GenerateASTHelpers(packagePatterns []string, rootIface, exceptCloneType str
scopes[pkg.PkgPath] = pkg.Types.Scope()
}
- pos := strings.LastIndexByte(rootIface, '.')
+ pos := strings.LastIndexByte(options.RootInterface, '.')
if pos < 0 {
- return nil, fmt.Errorf("unexpected input type: %s", rootIface)
+ return nil, fmt.Errorf("unexpected input type: %s", options.RootInterface)
}
- pkgname := rootIface[:pos]
- typename := rootIface[pos+1:]
+ pkgname := options.RootInterface[:pos]
+ typename := options.RootInterface[pos+1:]
scope := scopes[pkgname]
if scope == nil {
- return nil, fmt.Errorf("no scope found for type '%s'", rootIface)
+ return nil, fmt.Errorf("no scope found for type '%s'", options.RootInterface)
}
tt := scope.Lookup(typename)
@@ -233,10 +240,11 @@ func GenerateASTHelpers(packagePatterns []string, rootIface, exceptCloneType str
nt := tt.Type().(*types.Named)
pName := nt.Obj().Pkg().Name()
generator := newGenerator(loaded[0].Module, loaded[0].TypesSizes, nt,
- newEqualsGen(pName),
- newCloneGen(pName, exceptCloneType),
+ newEqualsGen(pName, &options.Equals),
+ newCloneGen(pName, &options.Clone),
newVisitGen(pName),
newRewriterGen(pName, types.TypeString(nt, noQualifier)),
+ newCOWGen(pName, nt),
)
it, err := generator.GenerateCode()
diff --git a/go/tools/asthelpergen/asthelpergen_test.go b/go/tools/asthelpergen/asthelpergen_test.go
index 16372b13d75..ce5a59c84e7 100644
--- a/go/tools/asthelpergen/asthelpergen_test.go
+++ b/go/tools/asthelpergen/asthelpergen_test.go
@@ -25,7 +25,13 @@ import (
)
func TestFullGeneration(t *testing.T) {
- result, err := GenerateASTHelpers([]string{"./integration/..."}, "vitess.io/vitess/go/tools/asthelpergen/integration.AST", "*NoCloneType")
+ result, err := GenerateASTHelpers(&Options{
+ Packages: []string{"./integration/..."},
+ RootInterface: "vitess.io/vitess/go/tools/asthelpergen/integration.AST",
+ Clone: CloneOptions{
+ Exclude: []string{"*NoCloneType"},
+ },
+ })
require.NoError(t, err)
verifyErrors := VerifyFilesOnDisk(result)
diff --git a/go/tools/asthelpergen/clone_gen.go b/go/tools/asthelpergen/clone_gen.go
index 0228945baee..79251140845 100644
--- a/go/tools/asthelpergen/clone_gen.go
+++ b/go/tools/asthelpergen/clone_gen.go
@@ -23,26 +23,31 @@ import (
"strings"
"github.com/dave/jennifer/jen"
+ "golang.org/x/exp/slices"
)
+type CloneOptions struct {
+ Exclude []string
+}
+
// cloneGen creates the deep clone methods for the AST. It works by discovering the types that it needs to support,
// starting from a root interface type. While creating the clone method for this root interface, more types that need
// to be cloned are discovered. This continues type by type until all necessary types have been traversed.
type cloneGen struct {
- exceptType string
- file *jen.File
+ exclude []string
+ file *jen.File
}
var _ generator = (*cloneGen)(nil)
-func newCloneGen(pkgname string, exceptType string) *cloneGen {
+func newCloneGen(pkgname string, options *CloneOptions) *cloneGen {
file := jen.NewFile(pkgname)
file.HeaderComment(licenseFileHeader)
file.HeaderComment("Code generated by ASTHelperGen. DO NOT EDIT.")
return &cloneGen{
- exceptType: exceptType,
- file: file,
+ exclude: options.Exclude,
+ file: file,
}
}
@@ -88,7 +93,7 @@ func (c *cloneGen) sliceMethod(t types.Type, slice *types.Slice, spi generatorSP
funcName := cloneName + name
c.addFunc(funcName,
- //func (n Bytes) Clone() Bytes {
+ // func (n Bytes) Clone() Bytes {
jen.Func().Id(funcName).Call(jen.Id("n").Id(typeString)).Id(typeString).Block(
// if n == nil { return nil }
ifNilReturnNil("n"),
@@ -111,9 +116,9 @@ func (c *cloneGen) copySliceElement(t types.Type, elType types.Type, spi generat
return jen.Id("copy").Call(jen.Id("res"), jen.Id("n"))
}
- //for i := range n {
+ // for i := range n {
// res[i] = CloneAST(x)
- //}
+ // }
spi.addType(elType)
return jen.For(jen.List(jen.Id("i"), jen.Id("x"))).Op(":=").Range().Id("n").Block(
@@ -123,17 +128,17 @@ func (c *cloneGen) copySliceElement(t types.Type, elType types.Type, spi generat
func (c *cloneGen) interfaceMethod(t types.Type, iface *types.Interface, spi generatorSPI) error {
- //func CloneAST(in AST) AST {
+ // func CloneAST(in AST) AST {
// if in == nil {
// return nil
- //}
+ // }
// switch in := in.(type) {
- //case *RefContainer:
+ // case *RefContainer:
// return in.CloneRefOfRefContainer()
- //}
+ // }
// // this should never happen
// return nil
- //}
+ // }
typeString := types.TypeString(t, noQualifier)
typeName := printableTypeName(t)
@@ -191,7 +196,7 @@ func (c *cloneGen) ptrToBasicMethod(t types.Type, _ *types.Basic, spi generatorS
func (c *cloneGen) ptrToOtherMethod(t types.Type, ptr *types.Pointer, spi generatorSPI) error {
receiveType := types.TypeString(t, noQualifier)
- funcName := "Clone" + printableTypeName(t)
+ funcName := cloneName + printableTypeName(t)
c.addFunc(funcName,
jen.Func().Id(funcName).Call(jen.Id("n").Id(receiveType)).Id(receiveType).Block(
ifNilReturnNil("n"),
@@ -219,10 +224,10 @@ func (c *cloneGen) ptrToStructMethod(t types.Type, strct *types.Struct, spi gene
receiveType := types.TypeString(t, noQualifier)
funcName := cloneName + printableTypeName(t)
- //func CloneRefOfType(n *Type) *Type
+ // func CloneRefOfType(n *Type) *Type
funcDeclaration := jen.Func().Id(funcName).Call(jen.Id("n").Id(receiveType)).Id(receiveType)
- if receiveType == c.exceptType {
+ if slices.Contains(c.exclude, receiveType) {
c.addFunc(funcName, funcDeclaration.Block(
jen.Return(jen.Id("n")),
))
diff --git a/go/tools/asthelpergen/copy_on_rewrite_gen.go b/go/tools/asthelpergen/copy_on_rewrite_gen.go
new file mode 100644
index 00000000000..09d00c26308
--- /dev/null
+++ b/go/tools/asthelpergen/copy_on_rewrite_gen.go
@@ -0,0 +1,389 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package asthelpergen
+
+import (
+ "go/types"
+
+ "github.com/dave/jennifer/jen"
+)
+
+type cowGen struct {
+ file *jen.File
+ baseType string
+}
+
+var _ generator = (*cowGen)(nil)
+
+func newCOWGen(pkgname string, nt *types.Named) *cowGen {
+ file := jen.NewFile(pkgname)
+ file.HeaderComment(licenseFileHeader)
+ file.HeaderComment("Code generated by ASTHelperGen. DO NOT EDIT.")
+
+ return &cowGen{
+ file: file,
+ baseType: nt.Obj().Id(),
+ }
+}
+
+func (c *cowGen) addFunc(code *jen.Statement) {
+ c.file.Add(code)
+}
+
+func (c *cowGen) genFile() (string, *jen.File) {
+ return "ast_copy_on_rewrite.go", c.file
+}
+
+const cowName = "copyOnRewrite"
+
+// readValueOfType produces code to read the expression of type `t`, and adds the type to the todo-list
+func (c *cowGen) readValueOfType(t types.Type, expr jen.Code, spi generatorSPI) jen.Code {
+ switch t.Underlying().(type) {
+ case *types.Interface:
+ if types.TypeString(t, noQualifier) == "any" {
+ // these fields have to be taken care of manually
+ return expr
+ }
+ }
+ spi.addType(t)
+ return jen.Id("c").Dot(cowName + printableTypeName(t)).Call(expr)
+}
+
+func (c *cowGen) sliceMethod(t types.Type, slice *types.Slice, spi generatorSPI) error {
+ if !types.Implements(t, spi.iface()) {
+ return nil
+ }
+
+ typeString := types.TypeString(t, noQualifier)
+
+ changedVarName := "changed"
+ fieldVar := "res"
+ elemTyp := types.TypeString(slice.Elem(), noQualifier)
+
+ name := printableTypeName(t)
+ funcName := cowName + name
+ var visitElements *jen.Statement
+
+ if types.Implements(slice.Elem(), spi.iface()) {
+ visitElements = ifPreNotNilOrReturnsTrue().Block(
+ jen.Id(fieldVar).Op(":=").Id("make").Params(jen.Id(typeString), jen.Id("len").Params(jen.Id("n"))), // _Foo := make([]Typ, len(n))
+ jen.For(jen.List(jen.Id("x"), jen.Id("el")).Op(":=").Id("range n")).Block(
+ c.visitFieldOrElement("this", "change", slice.Elem(), jen.Id("el"), spi),
+ // jen.Id(fieldVar).Index(jen.Id("x")).Op("=").Id("this").Op(".").Params(jen.Id(types.TypeString(elemTyp, noQualifier))),
+ jen.Id(fieldVar).Index(jen.Id("x")).Op("=").Id("this").Op(".").Params(jen.Id(elemTyp)),
+ jen.If(jen.Id("change")).Block(
+ jen.Id(changedVarName).Op("=").True(),
+ ),
+ ),
+ jen.If(jen.Id("changed")).Block(
+ jen.Id("out").Op("=").Id("res"),
+ ),
+ )
+ } else {
+ visitElements = jen.If(jen.Id("c.pre != nil")).Block(
+ jen.Id("c.pre(n, parent)"),
+ )
+ }
+
+ block := c.funcDecl(funcName, typeString).Block(
+ ifNilReturnNilAndFalse("n"),
+ jen.Id("out").Op("=").Id("n"),
+ visitElements,
+ ifPostNotNilVisit("out"),
+ jen.Return(),
+ )
+ c.addFunc(block)
+ return nil
+}
+
+func (c *cowGen) basicMethod(t types.Type, basic *types.Basic, spi generatorSPI) error {
+ if !types.Implements(t, spi.iface()) {
+ return nil
+ }
+
+ typeString := types.TypeString(t, noQualifier)
+ typeName := printableTypeName(t)
+
+ var stmts []jen.Code
+ stmts = append(stmts,
+ jen.If(jen.Id("c").Dot("cursor").Dot("stop")).Block(jen.Return(jen.Id("n"), jen.False())),
+ ifNotNil("c.pre", jen.Id("c.pre").Params(jen.Id("n"), jen.Id("parent"))),
+ ifNotNil("c.post", jen.List(jen.Id("out"), jen.Id("changed")).Op("=").Id("c.postVisit").Params(jen.Id("n"), jen.Id("parent"), jen.Id("changed"))).
+ Else().Block(jen.Id("out = n")),
+ jen.Return(),
+ )
+ funcName := cowName + typeName
+ funcDecl := c.funcDecl(funcName, typeString).Block(stmts...)
+ c.addFunc(funcDecl)
+ return nil
+}
+
+func (c *cowGen) copySliceElement(t types.Type, elType types.Type, spi generatorSPI) jen.Code {
+ if !isNamed(t) && isBasic(elType) {
+ // copy(res, n)
+ return jen.Id("copy").Call(jen.Id("res"), jen.Id("n"))
+ }
+
+ // for i := range n {
+ // res[i] = CloneAST(x)
+ // }
+ spi.addType(elType)
+
+ return jen.For(jen.List(jen.Id("i"), jen.Id("x"))).Op(":=").Range().Id("n").Block(
+ jen.Id("res").Index(jen.Id("i")).Op("=").Add(c.readValueOfType(elType, jen.Id("x"), spi)),
+ )
+}
+
+func ifNotNil(id string, stmts ...jen.Code) *jen.Statement {
+ return jen.If(jen.Id(id).Op("!=").Nil()).Block(stmts...)
+}
+
+func ifNilReturnNilAndFalse(id string) *jen.Statement {
+ return jen.If(jen.Id(id).Op("==").Nil().Op("||").Id("c").Dot("cursor").Dot("stop")).Block(jen.Return(jen.Id("n"), jen.False()))
+}
+
+func ifPreNotNilOrReturnsTrue() *jen.Statement {
+ // if c.pre == nil || c.pre(n, parent) {
+ return jen.If(
+ jen.Id("c").Dot("pre").Op("==").Nil().Op("||").Id("c").Dot("pre").Params(
+ jen.Id("n"),
+ jen.Id("parent"),
+ ))
+
+}
+
+func (c *cowGen) interfaceMethod(t types.Type, iface *types.Interface, spi generatorSPI) error {
+ if !types.Implements(t, spi.iface()) {
+ return nil
+ }
+
+ // func (c cow) cowAST(in AST) (AST, bool) {
+ // if in == nil {
+ // return nil, false
+ // }
+ //
+ // if c.old == in {
+ // return c.new, true
+ // }
+ // switch in := in.(type) {
+ // case *RefContainer:
+ // return c.CowRefOfRefContainer(in)
+ // }
+ // // this should never happen
+ // return nil
+ // }
+
+ typeString := types.TypeString(t, noQualifier)
+ typeName := printableTypeName(t)
+
+ stmts := []jen.Code{ifNilReturnNilAndFalse("n")}
+
+ var cases []jen.Code
+ _ = findImplementations(spi.scope(), iface, func(t types.Type) error {
+ if _, ok := t.Underlying().(*types.Interface); ok {
+ return nil
+ }
+ spi.addType(t)
+ typeString := types.TypeString(t, noQualifier)
+
+ // case Type: return CloneType(in)
+ block := jen.Case(jen.Id(typeString)).Block(jen.Return(c.readValueOfType(t, jen.List(jen.Id("n"), jen.Id("parent")), spi)))
+ cases = append(cases, block)
+
+ return nil
+ })
+
+ cases = append(cases,
+ jen.Default().Block(
+ jen.Comment("this should never happen"),
+ jen.Return(jen.Nil(), jen.False()),
+ ))
+
+ // switch n := node.(type) {
+ stmts = append(stmts, jen.Switch(jen.Id("n").Op(":=").Id("n").Assert(jen.Id("type")).Block(
+ cases...,
+ )))
+
+ funcName := cowName + typeName
+ funcDecl := c.funcDecl(funcName, typeString).Block(stmts...)
+ c.addFunc(funcDecl)
+ return nil
+}
+
+func (c *cowGen) ptrToBasicMethod(t types.Type, _ *types.Basic, spi generatorSPI) error {
+ if !types.Implements(t, spi.iface()) {
+ return nil
+ }
+
+ ptr := t.Underlying().(*types.Pointer)
+ return c.ptrToOtherMethod(t, ptr, spi)
+}
+
+func (c *cowGen) ptrToOtherMethod(t types.Type, ptr *types.Pointer, spi generatorSPI) error {
+ if !types.Implements(t, spi.iface()) {
+ return nil
+ }
+
+ receiveType := types.TypeString(t, noQualifier)
+
+ funcName := cowName + printableTypeName(t)
+ c.addFunc(c.funcDecl(funcName, receiveType).Block(
+ jen.Comment("apan was here"),
+ jen.Return(jen.Id("n"), jen.False()),
+ ))
+ return nil
+}
+
+// func (c cow) COWRefOfType(n *Type) (*Type, bool)
+func (c *cowGen) funcDecl(funcName, typeName string) *jen.Statement {
+ return jen.Func().Params(jen.Id("c").Id("*cow")).Id(funcName).Call(jen.List(jen.Id("n").Id(typeName), jen.Id("parent").Id(c.baseType))).Params(jen.Id("out").Id(c.baseType), jen.Id("changed").Id("bool"))
+}
+
+func (c *cowGen) visitFieldOrElement(varName, changedVarName string, typ types.Type, el *jen.Statement, spi generatorSPI) *jen.Statement {
+ // _Field, changedField := c.COWType(n., n)
+ return jen.List(jen.Id(varName), jen.Id(changedVarName)).Op(":=").Add(c.readValueOfType(typ, jen.List(el, jen.Id("n")), spi))
+}
+
+func (c *cowGen) structMethod(t types.Type, strct *types.Struct, spi generatorSPI) error {
+ if !types.Implements(t, spi.iface()) {
+ return nil
+ }
+
+ c.visitStruct(t, strct, spi, nil, false)
+ return nil
+}
+
+func (c *cowGen) ptrToStructMethod(t types.Type, strct *types.Struct, spi generatorSPI) error {
+ if !types.Implements(t, spi.iface()) {
+ return nil
+ }
+ start := ifNilReturnNilAndFalse("n")
+
+ c.visitStruct(t, strct, spi, start, true)
+ return nil
+}
+
+func (c *cowGen) visitStruct(t types.Type, strct *types.Struct, spi generatorSPI, start *jen.Statement, ref bool) {
+ receiveType := types.TypeString(t, noQualifier)
+ funcName := cowName + printableTypeName(t)
+
+ funcDeclaration := c.funcDecl(funcName, receiveType)
+
+ var fields []jen.Code
+ out := "out"
+ changed := "res"
+ var fieldSetters []jen.Code
+ kopy := jen.Id(changed).Op(":=")
+ if ref {
+ fieldSetters = append(fieldSetters, kopy.Op("*").Id("n")) // changed := *n
+ } else {
+ fieldSetters = append(fieldSetters, kopy.Id("n")) // changed := n
+ }
+ var changedVariables []string
+ for i := 0; i < strct.NumFields(); i++ {
+ field := strct.Field(i).Name()
+ typ := strct.Field(i).Type()
+ changedVarName := "changed" + field
+
+ fieldType := types.TypeString(typ, noQualifier)
+ fieldVar := "_" + field
+ if types.Implements(typ, spi.iface()) {
+ fields = append(fields, c.visitFieldOrElement(fieldVar, changedVarName, typ, jen.Id("n").Dot(field), spi))
+ changedVariables = append(changedVariables, changedVarName)
+ fieldSetters = append(fieldSetters, jen.List(jen.Id(changed).Dot(field), jen.Op("_")).Op("=").Id(fieldVar).Op(".").Params(jen.Id(fieldType)))
+ } else {
+ // _Foo := make([]*Type, len(n.Foo))
+ // var changedFoo bool
+ // for x, el := range n.Foo {
+ // c, changed := c.COWSliceOfRefOfType(el, n)
+ // if changed {
+ // changedFoo = true
+ // }
+ // _Foo[i] = c.(*Type)
+ // }
+
+ slice, isSlice := typ.(*types.Slice)
+ if isSlice && types.Implements(slice.Elem(), spi.iface()) {
+ elemTyp := slice.Elem()
+ spi.addType(elemTyp)
+ x := jen.Id("x")
+ el := jen.Id("el")
+ // changed := jen.Id("changed")
+ fields = append(fields,
+ jen.Var().Id(changedVarName).Bool(), // var changedFoo bool
+ jen.Id(fieldVar).Op(":=").Id("make").Params(jen.Id(fieldType), jen.Id("len").Params(jen.Id("n").Dot(field))), // _Foo := make([]Typ, len(n.Foo))
+ jen.For(jen.List(x, el).Op(":=").Id("range n").Dot(field)).Block(
+ c.visitFieldOrElement("this", "changed", elemTyp, jen.Id("el"), spi),
+ jen.Id(fieldVar).Index(jen.Id("x")).Op("=").Id("this").Op(".").Params(jen.Id(types.TypeString(elemTyp, noQualifier))),
+ jen.If(jen.Id("changed")).Block(
+ jen.Id(changedVarName).Op("=").True(),
+ ),
+ ),
+ )
+ changedVariables = append(changedVariables, changedVarName)
+ fieldSetters = append(fieldSetters, jen.Id(changed).Dot(field).Op("=").Id(fieldVar))
+ }
+ }
+ }
+
+ var cond *jen.Statement
+ for _, variable := range changedVariables {
+ if cond == nil {
+ cond = jen.Id(variable)
+ } else {
+ cond = cond.Op("||").Add(jen.Id(variable))
+ }
+
+ }
+
+ fieldSetters = append(fieldSetters,
+ jen.Id(out).Op("=").Op("&").Id(changed),
+ ifNotNil("c.cloned", jen.Id("c.cloned").Params(jen.Id("n, out"))),
+ jen.Id("changed").Op("=").True(),
+ )
+ ifChanged := jen.If(cond).Block(fieldSetters...)
+
+ var stmts []jen.Code
+ if start != nil {
+ stmts = append(stmts, start)
+ }
+
+ // handle all fields with CloneAble types
+ var visitChildren []jen.Code
+ visitChildren = append(visitChildren, fields...)
+ if len(fieldSetters) > 4 /*we add three statements always*/ {
+ visitChildren = append(visitChildren, ifChanged)
+ }
+
+ children := ifPreNotNilOrReturnsTrue().Block(visitChildren...)
+ stmts = append(stmts,
+ jen.Id(out).Op("=").Id("n"),
+ children,
+ )
+
+ stmts = append(
+ stmts,
+ ifPostNotNilVisit(out),
+ jen.Return(),
+ )
+
+ c.addFunc(funcDeclaration.Block(stmts...))
+}
+
+func ifPostNotNilVisit(out string) *jen.Statement {
+ return ifNotNil("c.post", jen.List(jen.Id(out), jen.Id("changed")).Op("=").Id("c").Dot("postVisit").Params(jen.Id(out), jen.Id("parent"), jen.Id("changed")))
+}
diff --git a/go/tools/asthelpergen/equals_gen.go b/go/tools/asthelpergen/equals_gen.go
index 16dde161732..e00c3ef596a 100644
--- a/go/tools/asthelpergen/equals_gen.go
+++ b/go/tools/asthelpergen/equals_gen.go
@@ -24,21 +24,32 @@ import (
"github.com/dave/jennifer/jen"
)
-const equalsName = "Equals"
+const Comparator = "Comparator"
+
+type EqualsOptions struct {
+ AllowCustom []string
+}
type equalsGen struct {
- file *jen.File
+ file *jen.File
+ comparators map[string]types.Type
}
var _ generator = (*equalsGen)(nil)
-func newEqualsGen(pkgname string) *equalsGen {
+func newEqualsGen(pkgname string, options *EqualsOptions) *equalsGen {
file := jen.NewFile(pkgname)
file.HeaderComment(licenseFileHeader)
file.HeaderComment("Code generated by ASTHelperGen. DO NOT EDIT.")
+ customComparators := make(map[string]types.Type, len(options.AllowCustom))
+ for _, tt := range options.AllowCustom {
+ customComparators[tt] = nil
+ }
+
return &equalsGen{
- file: file,
+ file: file,
+ comparators: customComparators,
}
}
@@ -47,13 +58,27 @@ func (e *equalsGen) addFunc(name string, code *jen.Statement) {
e.file.Add(code)
}
+func (e *equalsGen) customComparatorField(t types.Type) string {
+ return printableTypeName(t) + "_"
+}
+
func (e *equalsGen) genFile() (string, *jen.File) {
+ e.file.Type().Id(Comparator).StructFunc(func(g *jen.Group) {
+ for tname, t := range e.comparators {
+ if t == nil {
+ continue
+ }
+ method := e.customComparatorField(t)
+ g.Add(jen.Id(method).Func().Call(jen.List(jen.Id("a"), jen.Id("b")).Id(tname)).Bool())
+ }
+ })
+
return "ast_equals.go", e.file
}
func (e *equalsGen) interfaceMethod(t types.Type, iface *types.Interface, spi generatorSPI) error {
/*
- func EqualsAST(inA, inB AST) bool {
+ func (cmp *Comparator) AST(inA, inB AST) bool {
if inA == inB {
return true
}
@@ -66,7 +91,7 @@ func (e *equalsGen) interfaceMethod(t types.Type, iface *types.Interface, spi ge
if !ok {
return false
}
- return EqualsSubImpl(a, b)
+ return cmp.SubImpl(a, b)
}
return false
}
@@ -101,10 +126,8 @@ func (e *equalsGen) interfaceMethod(t types.Type, iface *types.Interface, spi ge
cases...,
)))
- typeString := types.TypeString(t, noQualifier)
- funcName := equalsName + printableTypeName(t)
- funcDecl := jen.Func().Id(funcName).Call(jen.List(jen.Id("inA"), jen.Id("inB")).Id(typeString)).Bool().Block(stmts...)
- e.addFunc(funcName, funcDecl)
+ funcDecl, funcName := e.declareFunc(t, "inA", "inB")
+ e.addFunc(funcName, funcDecl.Block(stmts...))
return nil
}
@@ -118,27 +141,23 @@ func compareValueType(t types.Type, a, b *jen.Statement, eq bool, spi generatorS
return a.Op("!=").Add(b)
}
spi.addType(t)
- var neg = "!"
- if eq {
- neg = ""
+ fcall := jen.Id("cmp").Dot(printableTypeName(t)).Call(a, b)
+ if !eq {
+ return jen.Op("!").Add(fcall)
}
- return jen.Id(neg+equalsName+printableTypeName(t)).Call(a, b)
+ return fcall
}
func (e *equalsGen) structMethod(t types.Type, strct *types.Struct, spi generatorSPI) error {
/*
- func EqualsRefOfRefContainer(inA RefContainer, inB RefContainer) bool {
- return EqualsRefOfLeaf(inA.ASTImplementationType, inB.ASTImplementationType) &&
- EqualsAST(inA.ASTType, inB.ASTType) && inA.NotASTType == inB.NotASTType
+ func EqualsRefOfRefContainer(inA RefContainer, inB RefContainer, f ASTComparison) bool {
+ return EqualsRefOfLeaf(inA.ASTImplementationType, inB.ASTImplementationType, f) &&
+ EqualsAST(inA.ASTType, inB.ASTType, f) && inA.NotASTType == inB.NotASTType
}
-
*/
- typeString := types.TypeString(t, noQualifier)
- funcName := equalsName + printableTypeName(t)
- funcDecl := jen.Func().Id(funcName).Call(jen.List(jen.Id("a"), jen.Id("b")).Id(typeString)).Bool().
- Block(jen.Return(compareAllStructFields(strct, spi)))
- e.addFunc(funcName, funcDecl)
+ funcDecl, funcName := e.declareFunc(t, "a", "b")
+ e.addFunc(funcName, funcDecl.Block(jen.Return(compareAllStructFields(strct, spi))))
return nil
}
@@ -186,24 +205,51 @@ func compareAllStructFields(strct *types.Struct, spi generatorSPI) jen.Code {
}
func (e *equalsGen) ptrToStructMethod(t types.Type, strct *types.Struct, spi generatorSPI) error {
- typeString := types.TypeString(t, noQualifier)
- funcName := equalsName + printableTypeName(t)
+ /*
+ func EqualsRefOfType(a, b *Type, f ASTComparison) *Type {
+ if a == b {
+ return true
+ }
+ if a == nil || b == nil {
+ return false
+ }
- //func EqualsRefOfType(a,b *Type) *Type
- funcDeclaration := jen.Func().Id(funcName).Call(jen.Id("a"), jen.Id("b").Id(typeString)).Bool()
+ // only if it is a *ColName
+ if f != nil {
+ return f.ColNames(a, b)
+ }
+
+ return compareAllStructFields
+ }
+ */
+ // func EqualsRefOfType(a,b *Type) *Type
+ funcDeclaration, funcName := e.declareFunc(t, "a", "b")
stmts := []jen.Code{
jen.If(jen.Id("a == b")).Block(jen.Return(jen.True())),
jen.If(jen.Id("a == nil").Op("||").Id("b == nil")).Block(jen.Return(jen.False())),
- jen.Return(compareAllStructFields(strct, spi)),
}
+ typeString := types.TypeString(t, noQualifier)
+
+ if _, ok := e.comparators[typeString]; ok {
+ e.comparators[typeString] = t
+
+ method := e.customComparatorField(t)
+ stmts = append(stmts,
+ jen.If(jen.Id("cmp").Dot(method).Op("!=").Nil()).Block(
+ jen.Return(jen.Id("cmp").Dot(method).Call(jen.Id("a"), jen.Id("b"))),
+ ))
+ }
+
+ stmts = append(stmts, jen.Return(compareAllStructFields(strct, spi)))
+
e.addFunc(funcName, funcDeclaration.Block(stmts...))
return nil
}
func (e *equalsGen) ptrToBasicMethod(t types.Type, _ *types.Basic, spi generatorSPI) error {
/*
- func EqualsRefOfBool(a, b *bool) bool {
+ func EqualsRefOfBool(a, b *bool, f ASTComparison) bool {
if a == b {
return true
}
@@ -213,11 +259,7 @@ func (e *equalsGen) ptrToBasicMethod(t types.Type, _ *types.Basic, spi generator
return *a == *b
}
*/
- typeString := types.TypeString(t, noQualifier)
- funcName := equalsName + printableTypeName(t)
-
- //func EqualsRefOfType(a,b *Type) *Type
- funcDeclaration := jen.Func().Id(funcName).Call(jen.Id("a"), jen.Id("b").Id(typeString)).Bool()
+ funcDeclaration, funcName := e.declareFunc(t, "a", "b")
stmts := []jen.Code{
jen.If(jen.Id("a == b")).Block(jen.Return(jen.True())),
jen.If(jen.Id("a == nil").Op("||").Id("b == nil")).Block(jen.Return(jen.False())),
@@ -227,6 +269,14 @@ func (e *equalsGen) ptrToBasicMethod(t types.Type, _ *types.Basic, spi generator
return nil
}
+func (e *equalsGen) declareFunc(t types.Type, aArg, bArg string) (*jen.Statement, string) {
+ typeString := types.TypeString(t, noQualifier)
+ funcName := printableTypeName(t)
+
+ // func EqualsFunNameS(a, b , f ASTComparison) bool
+ return jen.Func().Params(jen.Id("cmp").Op("*").Id(Comparator)).Id(funcName).Call(jen.Id(aArg), jen.Id(bArg).Id(typeString)).Bool(), funcName
+}
+
func (e *equalsGen) sliceMethod(t types.Type, slice *types.Slice, spi generatorSPI) error {
/*
func EqualsSliceOfRefOfLeaf(a, b []*Leaf) bool {
@@ -248,13 +298,11 @@ func (e *equalsGen) sliceMethod(t types.Type, slice *types.Slice, spi generatorS
jen.Return(jen.True()),
}
- typeString := types.TypeString(t, noQualifier)
- funcName := equalsName + printableTypeName(t)
- funcDecl := jen.Func().Id(funcName).Call(jen.List(jen.Id("a"), jen.Id("b")).Id(typeString)).Bool().Block(stmts...)
- e.addFunc(funcName, funcDecl)
+ funcDecl, funcName := e.declareFunc(t, "a", "b")
+ e.addFunc(funcName, funcDecl.Block(stmts...))
return nil
}
-func (e *equalsGen) basicMethod(t types.Type, basic *types.Basic, spi generatorSPI) error {
+func (e *equalsGen) basicMethod(types.Type, *types.Basic, generatorSPI) error {
return nil
}
diff --git a/go/tools/asthelpergen/integration/ast_clone.go b/go/tools/asthelpergen/integration/ast_clone.go
index 3ed3dc1be2c..d5857c89834 100644
--- a/go/tools/asthelpergen/integration/ast_clone.go
+++ b/go/tools/asthelpergen/integration/ast_clone.go
@@ -1,5 +1,5 @@
/*
-Copyright 2021 The Vitess Authors.
+Copyright 2023 The Vitess Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/go/tools/asthelpergen/integration/ast_copy_on_rewrite.go b/go/tools/asthelpergen/integration/ast_copy_on_rewrite.go
new file mode 100644
index 00000000000..d48e8621692
--- /dev/null
+++ b/go/tools/asthelpergen/integration/ast_copy_on_rewrite.go
@@ -0,0 +1,391 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+// Code generated by ASTHelperGen. DO NOT EDIT.
+
+package integration
+
+func (c *cow) copyOnRewriteAST(n AST, parent AST) (out AST, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ switch n := n.(type) {
+ case BasicType:
+ return c.copyOnRewriteBasicType(n, parent)
+ case Bytes:
+ return c.copyOnRewriteBytes(n, parent)
+ case InterfaceContainer:
+ return c.copyOnRewriteInterfaceContainer(n, parent)
+ case InterfaceSlice:
+ return c.copyOnRewriteInterfaceSlice(n, parent)
+ case *Leaf:
+ return c.copyOnRewriteRefOfLeaf(n, parent)
+ case LeafSlice:
+ return c.copyOnRewriteLeafSlice(n, parent)
+ case *NoCloneType:
+ return c.copyOnRewriteRefOfNoCloneType(n, parent)
+ case *RefContainer:
+ return c.copyOnRewriteRefOfRefContainer(n, parent)
+ case *RefSliceContainer:
+ return c.copyOnRewriteRefOfRefSliceContainer(n, parent)
+ case *SubImpl:
+ return c.copyOnRewriteRefOfSubImpl(n, parent)
+ case ValueContainer:
+ return c.copyOnRewriteValueContainer(n, parent)
+ case ValueSliceContainer:
+ return c.copyOnRewriteValueSliceContainer(n, parent)
+ default:
+ // this should never happen
+ return nil, false
+ }
+}
+func (c *cow) copyOnRewriteBytes(n Bytes, parent AST) (out AST, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre != nil {
+ c.pre(n, parent)
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteInterfaceContainer(n InterfaceContainer, parent AST) (out AST, changed bool) {
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteInterfaceSlice(n InterfaceSlice, parent AST) (out AST, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ res := make(InterfaceSlice, len(n))
+ for x, el := range n {
+ this, change := c.copyOnRewriteAST(el, n)
+ res[x] = this.(AST)
+ if change {
+ changed = true
+ }
+ }
+ if changed {
+ out = res
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfLeaf(n *Leaf, parent AST) (out AST, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteLeafSlice(n LeafSlice, parent AST) (out AST, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ res := make(LeafSlice, len(n))
+ for x, el := range n {
+ this, change := c.copyOnRewriteRefOfLeaf(el, n)
+ res[x] = this.(*Leaf)
+ if change {
+ changed = true
+ }
+ }
+ if changed {
+ out = res
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfNoCloneType(n *NoCloneType, parent AST) (out AST, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfRefContainer(n *RefContainer, parent AST) (out AST, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _ASTType, changedASTType := c.copyOnRewriteAST(n.ASTType, n)
+ _ASTImplementationType, changedASTImplementationType := c.copyOnRewriteRefOfLeaf(n.ASTImplementationType, n)
+ if changedASTType || changedASTImplementationType {
+ res := *n
+ res.ASTType, _ = _ASTType.(AST)
+ res.ASTImplementationType, _ = _ASTImplementationType.(*Leaf)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfRefSliceContainer(n *RefSliceContainer, parent AST) (out AST, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ var changedASTElements bool
+ _ASTElements := make([]AST, len(n.ASTElements))
+ for x, el := range n.ASTElements {
+ this, changed := c.copyOnRewriteAST(el, n)
+ _ASTElements[x] = this.(AST)
+ if changed {
+ changedASTElements = true
+ }
+ }
+ var changedASTImplementationElements bool
+ _ASTImplementationElements := make([]*Leaf, len(n.ASTImplementationElements))
+ for x, el := range n.ASTImplementationElements {
+ this, changed := c.copyOnRewriteRefOfLeaf(el, n)
+ _ASTImplementationElements[x] = this.(*Leaf)
+ if changed {
+ changedASTImplementationElements = true
+ }
+ }
+ if changedASTElements || changedASTImplementationElements {
+ res := *n
+ res.ASTElements = _ASTElements
+ res.ASTImplementationElements = _ASTImplementationElements
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfSubImpl(n *SubImpl, parent AST) (out AST, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _inner, changedinner := c.copyOnRewriteSubIface(n.inner, n)
+ if changedinner {
+ res := *n
+ res.inner, _ = _inner.(SubIface)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteValueContainer(n ValueContainer, parent AST) (out AST, changed bool) {
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _ASTType, changedASTType := c.copyOnRewriteAST(n.ASTType, n)
+ _ASTImplementationType, changedASTImplementationType := c.copyOnRewriteRefOfLeaf(n.ASTImplementationType, n)
+ if changedASTType || changedASTImplementationType {
+ res := n
+ res.ASTType, _ = _ASTType.(AST)
+ res.ASTImplementationType, _ = _ASTImplementationType.(*Leaf)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteValueSliceContainer(n ValueSliceContainer, parent AST) (out AST, changed bool) {
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ var changedASTElements bool
+ _ASTElements := make([]AST, len(n.ASTElements))
+ for x, el := range n.ASTElements {
+ this, changed := c.copyOnRewriteAST(el, n)
+ _ASTElements[x] = this.(AST)
+ if changed {
+ changedASTElements = true
+ }
+ }
+ var changedASTImplementationElements bool
+ _ASTImplementationElements := make([]*Leaf, len(n.ASTImplementationElements))
+ for x, el := range n.ASTImplementationElements {
+ this, changed := c.copyOnRewriteRefOfLeaf(el, n)
+ _ASTImplementationElements[x] = this.(*Leaf)
+ if changed {
+ changedASTImplementationElements = true
+ }
+ }
+ if changedASTElements || changedASTImplementationElements {
+ res := n
+ res.ASTElements = _ASTElements
+ res.ASTImplementationElements = _ASTImplementationElements
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteSubIface(n SubIface, parent AST) (out AST, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ switch n := n.(type) {
+ case *SubImpl:
+ return c.copyOnRewriteRefOfSubImpl(n, parent)
+ default:
+ // this should never happen
+ return nil, false
+ }
+}
+func (c *cow) copyOnRewriteBasicType(n BasicType, parent AST) (out AST, changed bool) {
+ if c.cursor.stop {
+ return n, false
+ }
+ if c.pre != nil {
+ c.pre(n, parent)
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(n, parent, changed)
+ } else {
+ out = n
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfInterfaceContainer(n *InterfaceContainer, parent AST) (out AST, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfValueContainer(n *ValueContainer, parent AST) (out AST, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _ASTType, changedASTType := c.copyOnRewriteAST(n.ASTType, n)
+ _ASTImplementationType, changedASTImplementationType := c.copyOnRewriteRefOfLeaf(n.ASTImplementationType, n)
+ if changedASTType || changedASTImplementationType {
+ res := *n
+ res.ASTType, _ = _ASTType.(AST)
+ res.ASTImplementationType, _ = _ASTImplementationType.(*Leaf)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfValueSliceContainer(n *ValueSliceContainer, parent AST) (out AST, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ var changedASTElements bool
+ _ASTElements := make([]AST, len(n.ASTElements))
+ for x, el := range n.ASTElements {
+ this, changed := c.copyOnRewriteAST(el, n)
+ _ASTElements[x] = this.(AST)
+ if changed {
+ changedASTElements = true
+ }
+ }
+ var changedASTImplementationElements bool
+ _ASTImplementationElements := make([]*Leaf, len(n.ASTImplementationElements))
+ for x, el := range n.ASTImplementationElements {
+ this, changed := c.copyOnRewriteRefOfLeaf(el, n)
+ _ASTImplementationElements[x] = this.(*Leaf)
+ if changed {
+ changedASTImplementationElements = true
+ }
+ }
+ if changedASTElements || changedASTImplementationElements {
+ res := *n
+ res.ASTElements = _ASTElements
+ res.ASTImplementationElements = _ASTImplementationElements
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
diff --git a/go/tools/asthelpergen/integration/ast_equals.go b/go/tools/asthelpergen/integration/ast_equals.go
index 95bce62e7a6..553851a8c97 100644
--- a/go/tools/asthelpergen/integration/ast_equals.go
+++ b/go/tools/asthelpergen/integration/ast_equals.go
@@ -1,5 +1,5 @@
/*
-Copyright 2021 The Vitess Authors.
+Copyright 2023 The Vitess Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -17,8 +17,8 @@ limitations under the License.
package integration
-// EqualsAST does deep equals between the two objects.
-func EqualsAST(inA, inB AST) bool {
+// AST does deep equals between the two objects.
+func (cmp *Comparator) AST(inA, inB AST) bool {
if inA == nil && inB == nil {
return true
}
@@ -37,75 +37,75 @@ func EqualsAST(inA, inB AST) bool {
if !ok {
return false
}
- return EqualsBytes(a, b)
+ return cmp.Bytes(a, b)
case InterfaceContainer:
b, ok := inB.(InterfaceContainer)
if !ok {
return false
}
- return EqualsInterfaceContainer(a, b)
+ return cmp.InterfaceContainer(a, b)
case InterfaceSlice:
b, ok := inB.(InterfaceSlice)
if !ok {
return false
}
- return EqualsInterfaceSlice(a, b)
+ return cmp.InterfaceSlice(a, b)
case *Leaf:
b, ok := inB.(*Leaf)
if !ok {
return false
}
- return EqualsRefOfLeaf(a, b)
+ return cmp.RefOfLeaf(a, b)
case LeafSlice:
b, ok := inB.(LeafSlice)
if !ok {
return false
}
- return EqualsLeafSlice(a, b)
+ return cmp.LeafSlice(a, b)
case *NoCloneType:
b, ok := inB.(*NoCloneType)
if !ok {
return false
}
- return EqualsRefOfNoCloneType(a, b)
+ return cmp.RefOfNoCloneType(a, b)
case *RefContainer:
b, ok := inB.(*RefContainer)
if !ok {
return false
}
- return EqualsRefOfRefContainer(a, b)
+ return cmp.RefOfRefContainer(a, b)
case *RefSliceContainer:
b, ok := inB.(*RefSliceContainer)
if !ok {
return false
}
- return EqualsRefOfRefSliceContainer(a, b)
+ return cmp.RefOfRefSliceContainer(a, b)
case *SubImpl:
b, ok := inB.(*SubImpl)
if !ok {
return false
}
- return EqualsRefOfSubImpl(a, b)
+ return cmp.RefOfSubImpl(a, b)
case ValueContainer:
b, ok := inB.(ValueContainer)
if !ok {
return false
}
- return EqualsValueContainer(a, b)
+ return cmp.ValueContainer(a, b)
case ValueSliceContainer:
b, ok := inB.(ValueSliceContainer)
if !ok {
return false
}
- return EqualsValueSliceContainer(a, b)
+ return cmp.ValueSliceContainer(a, b)
default:
// this should never happen
return false
}
}
-// EqualsBytes does deep equals between the two objects.
-func EqualsBytes(a, b Bytes) bool {
+// Bytes does deep equals between the two objects.
+func (cmp *Comparator) Bytes(a, b Bytes) bool {
if len(a) != len(b) {
return false
}
@@ -117,26 +117,26 @@ func EqualsBytes(a, b Bytes) bool {
return true
}
-// EqualsInterfaceContainer does deep equals between the two objects.
-func EqualsInterfaceContainer(a, b InterfaceContainer) bool {
+// InterfaceContainer does deep equals between the two objects.
+func (cmp *Comparator) InterfaceContainer(a, b InterfaceContainer) bool {
return true
}
-// EqualsInterfaceSlice does deep equals between the two objects.
-func EqualsInterfaceSlice(a, b InterfaceSlice) bool {
+// InterfaceSlice does deep equals between the two objects.
+func (cmp *Comparator) InterfaceSlice(a, b InterfaceSlice) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
- if !EqualsAST(a[i], b[i]) {
+ if !cmp.AST(a[i], b[i]) {
return false
}
}
return true
}
-// EqualsRefOfLeaf does deep equals between the two objects.
-func EqualsRefOfLeaf(a, b *Leaf) bool {
+// RefOfLeaf does deep equals between the two objects.
+func (cmp *Comparator) RefOfLeaf(a, b *Leaf) bool {
if a == b {
return true
}
@@ -146,21 +146,21 @@ func EqualsRefOfLeaf(a, b *Leaf) bool {
return a.v == b.v
}
-// EqualsLeafSlice does deep equals between the two objects.
-func EqualsLeafSlice(a, b LeafSlice) bool {
+// LeafSlice does deep equals between the two objects.
+func (cmp *Comparator) LeafSlice(a, b LeafSlice) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
- if !EqualsRefOfLeaf(a[i], b[i]) {
+ if !cmp.RefOfLeaf(a[i], b[i]) {
return false
}
}
return true
}
-// EqualsRefOfNoCloneType does deep equals between the two objects.
-func EqualsRefOfNoCloneType(a, b *NoCloneType) bool {
+// RefOfNoCloneType does deep equals between the two objects.
+func (cmp *Comparator) RefOfNoCloneType(a, b *NoCloneType) bool {
if a == b {
return true
}
@@ -170,8 +170,8 @@ func EqualsRefOfNoCloneType(a, b *NoCloneType) bool {
return a.v == b.v
}
-// EqualsRefOfRefContainer does deep equals between the two objects.
-func EqualsRefOfRefContainer(a, b *RefContainer) bool {
+// RefOfRefContainer does deep equals between the two objects.
+func (cmp *Comparator) RefOfRefContainer(a, b *RefContainer) bool {
if a == b {
return true
}
@@ -179,51 +179,51 @@ func EqualsRefOfRefContainer(a, b *RefContainer) bool {
return false
}
return a.NotASTType == b.NotASTType &&
- EqualsAST(a.ASTType, b.ASTType) &&
- EqualsRefOfLeaf(a.ASTImplementationType, b.ASTImplementationType)
+ cmp.AST(a.ASTType, b.ASTType) &&
+ cmp.RefOfLeaf(a.ASTImplementationType, b.ASTImplementationType)
}
-// EqualsRefOfRefSliceContainer does deep equals between the two objects.
-func EqualsRefOfRefSliceContainer(a, b *RefSliceContainer) bool {
+// RefOfRefSliceContainer does deep equals between the two objects.
+func (cmp *Comparator) RefOfRefSliceContainer(a, b *RefSliceContainer) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsSliceOfAST(a.ASTElements, b.ASTElements) &&
- EqualsSliceOfInt(a.NotASTElements, b.NotASTElements) &&
- EqualsSliceOfRefOfLeaf(a.ASTImplementationElements, b.ASTImplementationElements)
+ return cmp.SliceOfAST(a.ASTElements, b.ASTElements) &&
+ cmp.SliceOfInt(a.NotASTElements, b.NotASTElements) &&
+ cmp.SliceOfRefOfLeaf(a.ASTImplementationElements, b.ASTImplementationElements)
}
-// EqualsRefOfSubImpl does deep equals between the two objects.
-func EqualsRefOfSubImpl(a, b *SubImpl) bool {
+// RefOfSubImpl does deep equals between the two objects.
+func (cmp *Comparator) RefOfSubImpl(a, b *SubImpl) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsSubIface(a.inner, b.inner) &&
- EqualsRefOfBool(a.field, b.field)
+ return cmp.SubIface(a.inner, b.inner) &&
+ cmp.RefOfBool(a.field, b.field)
}
-// EqualsValueContainer does deep equals between the two objects.
-func EqualsValueContainer(a, b ValueContainer) bool {
+// ValueContainer does deep equals between the two objects.
+func (cmp *Comparator) ValueContainer(a, b ValueContainer) bool {
return a.NotASTType == b.NotASTType &&
- EqualsAST(a.ASTType, b.ASTType) &&
- EqualsRefOfLeaf(a.ASTImplementationType, b.ASTImplementationType)
+ cmp.AST(a.ASTType, b.ASTType) &&
+ cmp.RefOfLeaf(a.ASTImplementationType, b.ASTImplementationType)
}
-// EqualsValueSliceContainer does deep equals between the two objects.
-func EqualsValueSliceContainer(a, b ValueSliceContainer) bool {
- return EqualsSliceOfAST(a.ASTElements, b.ASTElements) &&
- EqualsSliceOfInt(a.NotASTElements, b.NotASTElements) &&
- EqualsSliceOfRefOfLeaf(a.ASTImplementationElements, b.ASTImplementationElements)
+// ValueSliceContainer does deep equals between the two objects.
+func (cmp *Comparator) ValueSliceContainer(a, b ValueSliceContainer) bool {
+ return cmp.SliceOfAST(a.ASTElements, b.ASTElements) &&
+ cmp.SliceOfInt(a.NotASTElements, b.NotASTElements) &&
+ cmp.SliceOfRefOfLeaf(a.ASTImplementationElements, b.ASTImplementationElements)
}
-// EqualsSubIface does deep equals between the two objects.
-func EqualsSubIface(inA, inB SubIface) bool {
+// SubIface does deep equals between the two objects.
+func (cmp *Comparator) SubIface(inA, inB SubIface) bool {
if inA == nil && inB == nil {
return true
}
@@ -236,15 +236,15 @@ func EqualsSubIface(inA, inB SubIface) bool {
if !ok {
return false
}
- return EqualsRefOfSubImpl(a, b)
+ return cmp.RefOfSubImpl(a, b)
default:
// this should never happen
return false
}
}
-// EqualsRefOfInterfaceContainer does deep equals between the two objects.
-func EqualsRefOfInterfaceContainer(a, b *InterfaceContainer) bool {
+// RefOfInterfaceContainer does deep equals between the two objects.
+func (cmp *Comparator) RefOfInterfaceContainer(a, b *InterfaceContainer) bool {
if a == b {
return true
}
@@ -254,21 +254,21 @@ func EqualsRefOfInterfaceContainer(a, b *InterfaceContainer) bool {
return true
}
-// EqualsSliceOfAST does deep equals between the two objects.
-func EqualsSliceOfAST(a, b []AST) bool {
+// SliceOfAST does deep equals between the two objects.
+func (cmp *Comparator) SliceOfAST(a, b []AST) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
- if !EqualsAST(a[i], b[i]) {
+ if !cmp.AST(a[i], b[i]) {
return false
}
}
return true
}
-// EqualsSliceOfInt does deep equals between the two objects.
-func EqualsSliceOfInt(a, b []int) bool {
+// SliceOfInt does deep equals between the two objects.
+func (cmp *Comparator) SliceOfInt(a, b []int) bool {
if len(a) != len(b) {
return false
}
@@ -280,21 +280,21 @@ func EqualsSliceOfInt(a, b []int) bool {
return true
}
-// EqualsSliceOfRefOfLeaf does deep equals between the two objects.
-func EqualsSliceOfRefOfLeaf(a, b []*Leaf) bool {
+// SliceOfRefOfLeaf does deep equals between the two objects.
+func (cmp *Comparator) SliceOfRefOfLeaf(a, b []*Leaf) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
- if !EqualsRefOfLeaf(a[i], b[i]) {
+ if !cmp.RefOfLeaf(a[i], b[i]) {
return false
}
}
return true
}
-// EqualsRefOfBool does deep equals between the two objects.
-func EqualsRefOfBool(a, b *bool) bool {
+// RefOfBool does deep equals between the two objects.
+func (cmp *Comparator) RefOfBool(a, b *bool) bool {
if a == b {
return true
}
@@ -304,8 +304,8 @@ func EqualsRefOfBool(a, b *bool) bool {
return *a == *b
}
-// EqualsRefOfValueContainer does deep equals between the two objects.
-func EqualsRefOfValueContainer(a, b *ValueContainer) bool {
+// RefOfValueContainer does deep equals between the two objects.
+func (cmp *Comparator) RefOfValueContainer(a, b *ValueContainer) bool {
if a == b {
return true
}
@@ -313,19 +313,21 @@ func EqualsRefOfValueContainer(a, b *ValueContainer) bool {
return false
}
return a.NotASTType == b.NotASTType &&
- EqualsAST(a.ASTType, b.ASTType) &&
- EqualsRefOfLeaf(a.ASTImplementationType, b.ASTImplementationType)
+ cmp.AST(a.ASTType, b.ASTType) &&
+ cmp.RefOfLeaf(a.ASTImplementationType, b.ASTImplementationType)
}
-// EqualsRefOfValueSliceContainer does deep equals between the two objects.
-func EqualsRefOfValueSliceContainer(a, b *ValueSliceContainer) bool {
+// RefOfValueSliceContainer does deep equals between the two objects.
+func (cmp *Comparator) RefOfValueSliceContainer(a, b *ValueSliceContainer) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsSliceOfAST(a.ASTElements, b.ASTElements) &&
- EqualsSliceOfInt(a.NotASTElements, b.NotASTElements) &&
- EqualsSliceOfRefOfLeaf(a.ASTImplementationElements, b.ASTImplementationElements)
+ return cmp.SliceOfAST(a.ASTElements, b.ASTElements) &&
+ cmp.SliceOfInt(a.NotASTElements, b.NotASTElements) &&
+ cmp.SliceOfRefOfLeaf(a.ASTImplementationElements, b.ASTImplementationElements)
}
+
+type Comparator struct{}
diff --git a/go/tools/asthelpergen/integration/ast_rewrite.go b/go/tools/asthelpergen/integration/ast_rewrite.go
index 3741b2080cb..cf92b358862 100644
--- a/go/tools/asthelpergen/integration/ast_rewrite.go
+++ b/go/tools/asthelpergen/integration/ast_rewrite.go
@@ -1,5 +1,5 @@
/*
-Copyright 2021 The Vitess Authors.
+Copyright 2023 The Vitess Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/go/tools/asthelpergen/integration/ast_visit.go b/go/tools/asthelpergen/integration/ast_visit.go
index 8fb3c89ad56..6ceec4e2fc5 100644
--- a/go/tools/asthelpergen/integration/ast_visit.go
+++ b/go/tools/asthelpergen/integration/ast_visit.go
@@ -1,5 +1,5 @@
/*
-Copyright 2021 The Vitess Authors.
+Copyright 2023 The Vitess Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/go/tools/asthelpergen/integration/integration_equals_test.go b/go/tools/asthelpergen/integration/integration_equals_test.go
index df3316cfe17..3e30142603e 100644
--- a/go/tools/asthelpergen/integration/integration_equals_test.go
+++ b/go/tools/asthelpergen/integration/integration_equals_test.go
@@ -28,9 +28,9 @@ func TestEquals(t *testing.T) {
for idxB, objB := range createObjs() {
t.Run(fmt.Sprintf("%s == %s", name(objA), name(objB)), func(t *testing.T) {
if idxA == idxB {
- require.True(t, EqualsAST(objA, objB))
+ require.True(t, Equals.AST(objA, objB))
} else {
- require.False(t, EqualsAST(objA, objB))
+ require.False(t, Equals.AST(objA, objB))
}
})
}
diff --git a/go/tools/asthelpergen/integration/test_helpers.go b/go/tools/asthelpergen/integration/test_helpers.go
index db9facb3c3b..67745f19687 100644
--- a/go/tools/asthelpergen/integration/test_helpers.go
+++ b/go/tools/asthelpergen/integration/test_helpers.go
@@ -98,3 +98,19 @@ func Rewrite(node AST, pre, post ApplyFunc) AST {
return outer.AST
}
+
+type (
+ cow struct {
+ pre func(node, parent AST) bool
+ post func(cursor *cursor)
+ cloned func(old, new AST)
+ cursor cursor
+ }
+ cursor struct {
+ stop bool
+ }
+)
+
+func (c *cow) postVisit(a, b AST, d bool) (AST, bool) {
+ return a, d
+}
diff --git a/go/tools/asthelpergen/integration/types.go b/go/tools/asthelpergen/integration/types.go
index 0fe2d7fee50..921759f3ad1 100644
--- a/go/tools/asthelpergen/integration/types.go
+++ b/go/tools/asthelpergen/integration/types.go
@@ -22,12 +22,8 @@ import (
"strings"
)
-/*
-These types are used to test the rewriter generator against these types.
-To recreate them, just run:
+//go:generate go run ../main --in . --iface vitess.io/vitess/go/tools/asthelpergen/integration.AST --clone_exclude "*NoCloneType"
-go run go/tools/asthelpergen -in ./go/tools/asthelpergen/integration -iface vitess.io/vitess/go/tools/asthelpergen/integration.AST -except "*NoCloneType"
-*/
// AST is the interface all interface types implement
type AST interface {
String() string
@@ -178,3 +174,5 @@ type application struct {
pre, post ApplyFunc
cur Cursor
}
+
+var Equals = &Comparator{}
diff --git a/go/tools/asthelpergen/main/main.go b/go/tools/asthelpergen/main/main.go
index 2a691291027..e4774943703 100644
--- a/go/tools/asthelpergen/main/main.go
+++ b/go/tools/asthelpergen/main/main.go
@@ -28,19 +28,17 @@ import (
)
func main() {
- var (
- patterns []string
- generate, except string
- verify bool
- )
+ var options asthelpergen.Options
+ var verify bool
- pflag.StringSliceVar(&patterns, "in", nil, "Go packages to load the generator")
- pflag.StringVar(&generate, "iface", "", "Root interface generate rewriter for")
+ pflag.StringSliceVar(&options.Packages, "in", nil, "Go packages to load the generator")
+ pflag.StringVar(&options.RootInterface, "iface", "", "Root interface generate rewriter for")
+ pflag.StringSliceVar(&options.Clone.Exclude, "clone_exclude", nil, "don't deep clone these types")
+ pflag.StringSliceVar(&options.Equals.AllowCustom, "equals_custom", nil, "generate custom comparators for these types")
pflag.BoolVar(&verify, "verify", false, "ensure that the generated files are correct")
- pflag.StringVar(&except, "except", "", "don't deep clone these types")
pflag.Parse()
- result, err := asthelpergen.GenerateASTHelpers(patterns, generate, except)
+ result, err := asthelpergen.GenerateASTHelpers(&options)
if err != nil {
log.Fatal(err)
}
diff --git a/go/tools/ci-config/main.go b/go/tools/ci-config/main.go
new file mode 100644
index 00000000000..d767b6f4d32
--- /dev/null
+++ b/go/tools/ci-config/main.go
@@ -0,0 +1,74 @@
+/*
+Copyright 2022 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package main
+
+import (
+ "encoding/json"
+ "fmt"
+ "log"
+ "os"
+ "strings"
+)
+
+type Test struct {
+ Args []string
+}
+
+type Config struct {
+ Tests map[string]*Test
+}
+
+func main() {
+ content, err := os.ReadFile("./test/config.json")
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ tests := &Config{}
+ err = json.Unmarshal(content, tests)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ var failedConfig []string
+ for name, test := range tests.Tests {
+ if len(test.Args) == 0 {
+ continue
+ }
+ path := test.Args[0]
+ if !strings.HasPrefix(path, "vitess.io/vitess/") {
+ continue
+ }
+ path = path[len("vitess.io/vitess/"):]
+
+ stat, err := os.Stat(path)
+ if err != nil || !stat.IsDir() {
+ failedConfig = append(failedConfig, fmt.Sprintf("%s: %s", name, path))
+ continue
+ }
+ }
+
+ if len(failedConfig) > 0 {
+ fmt.Println("Some packages in test/config.json were not found in the codebase:")
+ for _, failed := range failedConfig {
+ fmt.Println("\t" + failed)
+ }
+ fmt.Println("\nYou must remove them from test/config.json to avoid unnecessary CI load.")
+ os.Exit(1)
+ }
+ fmt.Println("The file: test/config.json is clean.")
+}
diff --git a/go/tools/go-upgrade/go-upgrade.go b/go/tools/go-upgrade/go-upgrade.go
new file mode 100644
index 00000000000..26224dfdf8d
--- /dev/null
+++ b/go/tools/go-upgrade/go-upgrade.go
@@ -0,0 +1,531 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package main
+
+import (
+ "fmt"
+ "io"
+ "log"
+ "net/http"
+ "os"
+ "path"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+
+ "encoding/json"
+
+ "github.com/hashicorp/go-version"
+ "github.com/spf13/cobra"
+)
+
+const (
+ goDevAPI = "https://go.dev/dl/?mode=json"
+)
+
+type (
+ latestGolangRelease struct {
+ Version string `json:"version"`
+ Stable bool `json:"stable"`
+ }
+
+ bootstrapVersion struct {
+ major, minor int // when minor == -1, it means there are no minor version
+ }
+)
+
+var (
+ workflowUpdate = true
+ allowMajorUpgrade = false
+ isMainBranch = false
+ goTo = ""
+
+ rootCmd = &cobra.Command{
+ Use: "go-upgrade",
+ Short: "Automates the Golang upgrade.",
+ Long: `go-upgrade allows us to automate some tasks required to bump the version of Golang used throughout our codebase.
+
+It mostly used by the update_golang_version.yml CI workflow that runs on a CRON.
+
+This tool is meant to be run at the root of the repository.
+`,
+ Run: func(cmd *cobra.Command, args []string) {
+ _ = cmd.Help()
+ },
+ Args: cobra.NoArgs,
+ }
+
+ getCmd = &cobra.Command{
+ Use: "get",
+ Short: "Command to get useful information about the codebase.",
+ Long: "Command to get useful information about the codebase.",
+ Run: func(cmd *cobra.Command, args []string) {
+ _ = cmd.Help()
+ },
+ Args: cobra.NoArgs,
+ }
+
+ getGoCmd = &cobra.Command{
+ Use: "go-version",
+ Short: "go-version prints the Golang version used by the current codebase.",
+ Long: "go-version prints the Golang version used by the current codebase.",
+ Run: runGetGoCmd,
+ Args: cobra.NoArgs,
+ }
+
+ getBootstrapCmd = &cobra.Command{
+ Use: "bootstrap-version",
+ Short: "bootstrap-version prints the Docker Bootstrap version used by the current codebase.",
+ Long: "bootstrap-version prints the Docker Bootstrap version used by the current codebase.",
+ Run: runGetBootstrapCmd,
+ Args: cobra.NoArgs,
+ }
+
+ upgradeCmd = &cobra.Command{
+ Use: "upgrade",
+ Short: "upgrade will upgrade the Golang and Bootstrap versions of the codebase to the latest available version.",
+ Long: `This command bumps the Golang and Bootstrap versions of the codebase.
+
+The latest available version of Golang will be fetched and used instead of the old version.
+
+By default, we do not allow major Golang version upgrade such as 1.20 to 1.21 but this can be overridden using the
+--allow-major-upgrade CLI flag. Usually, we only allow such upgrade on the main branch of the repository.
+
+In CI, particularly, we do not want to modify the workflow files before automatically creating a Pull Request to
+avoid permission issues. The rewrite of workflow files can be disabled using the --workflow-update=false CLI flag.
+
+Moreover, this command automatically bumps the bootstrap version of our codebase. If we are on the main branch, we
+want to use the CLI flag --main to remember to increment the bootstrap version by 1 instead of 0.1.`,
+ Run: runUpgradeCmd,
+ Args: cobra.NoArgs,
+ }
+
+ upgradeWorkflowsCmd = &cobra.Command{
+ Use: "workflows",
+ Short: "workflows will upgrade the Golang version used in our CI workflows files.",
+ Long: "This step is omitted by the bot since. We let the maintainers of Vitess manually upgrade the version used by the workflows using this command.",
+ Run: runUpgradeWorkflowsCmd,
+ Args: cobra.NoArgs,
+ }
+)
+
+func init() {
+ rootCmd.AddCommand(getCmd)
+ rootCmd.AddCommand(upgradeCmd)
+
+ getCmd.AddCommand(getGoCmd)
+ getCmd.AddCommand(getBootstrapCmd)
+
+ upgradeCmd.AddCommand(upgradeWorkflowsCmd)
+
+ upgradeCmd.Flags().BoolVar(&workflowUpdate, "workflow-update", workflowUpdate, "Whether or not the workflow files should be updated. Useful when using this script to auto-create PRs.")
+ upgradeCmd.Flags().BoolVar(&allowMajorUpgrade, "allow-major-upgrade", allowMajorUpgrade, "Defines if Golang major version upgrade are allowed.")
+ upgradeCmd.Flags().BoolVar(&isMainBranch, "main", isMainBranch, "Defines if the current branch is the main branch.")
+
+ upgradeWorkflowsCmd.Flags().StringVar(&goTo, "go-to", goTo, "The Golang version we want to upgrade to.")
+}
+
+func main() {
+ cobra.CheckErr(rootCmd.Execute())
+}
+
+func runGetGoCmd(_ *cobra.Command, _ []string) {
+ currentVersion, err := currentGolangVersion()
+ if err != nil {
+ log.Fatal(err)
+ }
+ fmt.Println(currentVersion.String())
+}
+
+func runGetBootstrapCmd(_ *cobra.Command, _ []string) {
+ currentVersion, err := currentBootstrapVersion()
+ if err != nil {
+ log.Fatal(err)
+ }
+ fmt.Println(currentVersion.toString())
+}
+
+func runUpgradeWorkflowsCmd(_ *cobra.Command, _ []string) {
+ err := updateWorkflowFilesOnly(goTo)
+ if err != nil {
+ log.Fatal(err)
+ }
+}
+
+func runUpgradeCmd(_ *cobra.Command, _ []string) {
+ err := upgradePath(allowMajorUpgrade, workflowUpdate, isMainBranch)
+ if err != nil {
+ log.Fatal(err)
+ }
+}
+
+func updateWorkflowFilesOnly(goTo string) error {
+ newV, err := version.NewVersion(goTo)
+ if err != nil {
+ return err
+ }
+ filesToChange, err := getListOfFilesInPaths([]string{"./.github/workflows"})
+ if err != nil {
+ return err
+ }
+
+ for _, fileToChange := range filesToChange {
+ err = replaceInFile(
+ []*regexp.Regexp{regexp.MustCompile(`go-version:[[:space:]]*([0-9.]+).*`)},
+ []string{"go-version: " + newV.String()},
+ fileToChange,
+ )
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func upgradePath(allowMajorUpgrade, workflowUpdate, isMainBranch bool) error {
+ currentVersion, err := currentGolangVersion()
+ if err != nil {
+ return err
+ }
+
+ availableVersions, err := getLatestStableGolangReleases()
+ if err != nil {
+ return err
+ }
+
+ upgradeTo := chooseNewVersion(currentVersion, availableVersions, allowMajorUpgrade)
+ if upgradeTo == nil {
+ return nil
+ }
+
+ err = replaceGoVersionInCodebase(currentVersion, upgradeTo, workflowUpdate)
+ if err != nil {
+ return err
+ }
+
+ currentBootstrapVersionF, err := currentBootstrapVersion()
+ if err != nil {
+ return err
+ }
+ nextBootstrapVersionF := currentBootstrapVersionF
+ if isMainBranch {
+ nextBootstrapVersionF.major += 1
+ } else {
+ nextBootstrapVersionF.minor += 1
+ }
+ err = updateBootstrapVersionInCodebase(currentBootstrapVersionF.toString(), nextBootstrapVersionF.toString(), upgradeTo)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// currentGolangVersion gets the running version of Golang in Vitess
+// and returns it as a *version.Version.
+//
+// The file `./build.env` describes which version of Golang is expected by Vitess.
+// We use this file to detect the current Golang version of our codebase.
+// The file contains `goversion_min x.xx.xx`, we will grep `goversion_min` to finally find
+// the precise golang version we're using.
+func currentGolangVersion() (*version.Version, error) {
+ contentRaw, err := os.ReadFile("build.env")
+ if err != nil {
+ return nil, err
+ }
+ content := string(contentRaw)
+
+ versre := regexp.MustCompile("(?i).*goversion_min[[:space:]]*([0-9.]+).*")
+ versionStr := versre.FindStringSubmatch(content)
+ if len(versionStr) != 2 {
+ return nil, fmt.Errorf("malformatted error, got: %v", versionStr)
+ }
+ return version.NewVersion(versionStr[1])
+}
+
+func currentBootstrapVersion() (bootstrapVersion, error) {
+ contentRaw, err := os.ReadFile("Makefile")
+ if err != nil {
+ return bootstrapVersion{}, err
+ }
+ content := string(contentRaw)
+
+ versre := regexp.MustCompile("(?i).*BOOTSTRAP_VERSION[[:space:]]*=[[:space:]]*([0-9.]+).*")
+ versionStr := versre.FindStringSubmatch(content)
+ if len(versionStr) != 2 {
+ return bootstrapVersion{}, fmt.Errorf("malformatted error, got: %v", versionStr)
+ }
+
+ vs := strings.Split(versionStr[1], ".")
+ major, err := strconv.Atoi(vs[0])
+ if err != nil {
+ return bootstrapVersion{}, err
+ }
+
+ minor := -1
+ if len(vs) > 1 {
+ minor, err = strconv.Atoi(vs[1])
+ if err != nil {
+ return bootstrapVersion{}, err
+ }
+ }
+
+ return bootstrapVersion{
+ major: major,
+ minor: minor,
+ }, nil
+}
+
+// getLatestStableGolangReleases fetches the latest stable releases of Golang from
+// the official website using the goDevAPI URL.
+// Once fetched, the releases are returned as version.Collection.
+func getLatestStableGolangReleases() (version.Collection, error) {
+ resp, err := http.Get(goDevAPI)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ body, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return nil, err
+ }
+
+ var latestGoReleases []latestGolangRelease
+ err = json.Unmarshal(body, &latestGoReleases)
+ if err != nil {
+ return nil, err
+ }
+
+ var versions version.Collection
+ for _, release := range latestGoReleases {
+ if !release.Stable {
+ continue
+ }
+ if !strings.HasPrefix(release.Version, "go") {
+ return nil, fmt.Errorf("golang version malformatted: %s", release.Version)
+ }
+ newVersion, err := version.NewVersion(release.Version[2:])
+ if err != nil {
+ return nil, err
+ }
+ versions = append(versions, newVersion)
+ }
+ return versions, nil
+}
+
+// chooseNewVersion decides what will be the next version we're going to use in our codebase.
+// Given the current Golang version, the available latest versions and whether we allow major upgrade or not,
+// chooseNewVersion will return either the new version or nil if we cannot/don't need to upgrade.
+func chooseNewVersion(curVersion *version.Version, latestVersions version.Collection, allowMajorUpgrade bool) *version.Version {
+ selectedVersion := curVersion
+ for _, latestVersion := range latestVersions {
+ if !allowMajorUpgrade && !isSameMajorMinorVersion(latestVersion, selectedVersion) {
+ continue
+ }
+ if latestVersion.GreaterThan(selectedVersion) {
+ selectedVersion = latestVersion
+ }
+ }
+ // No change detected, return nil meaning that we do not want to have a new Golang version.
+ if selectedVersion.Equal(curVersion) {
+ return nil
+ }
+ return selectedVersion
+}
+
+// replaceGoVersionInCodebase goes through all the files in the codebase where the
+// Golang version must be updated
+func replaceGoVersionInCodebase(old, new *version.Version, workflowUpdate bool) error {
+ if old.Equal(new) {
+ return nil
+ }
+ explore := []string{
+ "./test/templates",
+ "./build.env",
+ "./docker/bootstrap/Dockerfile.common",
+ }
+ if workflowUpdate {
+ explore = append(explore, "./.github/workflows")
+ }
+ filesToChange, err := getListOfFilesInPaths(explore)
+ if err != nil {
+ return err
+ }
+
+ for _, fileToChange := range filesToChange {
+ err = replaceInFile(
+ []*regexp.Regexp{regexp.MustCompile(fmt.Sprintf(`(%s)`, old.String()))},
+ []string{new.String()},
+ fileToChange,
+ )
+ if err != nil {
+ return err
+ }
+ }
+
+ if !isSameMajorMinorVersion(old, new) {
+ err = replaceInFile(
+ []*regexp.Regexp{regexp.MustCompile(`go[[:space:]]*([0-9.]+)`)},
+ []string{fmt.Sprintf("go %d.%d", new.Segments()[0], new.Segments()[1])},
+ "./go.mod",
+ )
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func updateBootstrapVersionInCodebase(old, new string, newGoVersion *version.Version) error {
+ if old == new {
+ return nil
+ }
+ files, err := getListOfFilesInPaths([]string{
+ "./docker/base",
+ "./docker/lite",
+ "./docker/local",
+ "./docker/vttestserver",
+ "./Makefile",
+ "./test/templates",
+ })
+ if err != nil {
+ return err
+ }
+
+ for _, file := range files {
+ err = replaceInFile(
+ []*regexp.Regexp{
+ regexp.MustCompile(`ARG[[:space:]]*bootstrap_version[[:space:]]*=[[:space:]]*[0-9.]+`), // Dockerfile
+ regexp.MustCompile(`BOOTSTRAP_VERSION[[:space:]]*=[[:space:]]*[0-9.]+`), // Makefile
+ },
+ []string{
+ fmt.Sprintf("ARG bootstrap_version=%s", new), // Dockerfile
+ fmt.Sprintf("BOOTSTRAP_VERSION=%s", new), // Makefile
+ },
+ file,
+ )
+ if err != nil {
+ return err
+ }
+ }
+
+ err = replaceInFile(
+ []*regexp.Regexp{regexp.MustCompile(`\"bootstrap-version\",[[:space:]]*\"([0-9.]+)\"`)},
+ []string{fmt.Sprintf("\"bootstrap-version\", \"%s\"", new)},
+ "./test.go",
+ )
+ if err != nil {
+ return err
+ }
+
+ err = updateBootstrapChangelog(new, newGoVersion)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func updateBootstrapChangelog(new string, goVersion *version.Version) error {
+ file, err := os.OpenFile("./docker/bootstrap/CHANGELOG.md", os.O_RDWR, 0600)
+ if err != nil {
+ return err
+ }
+ defer file.Close()
+
+ s, err := file.Stat()
+ if err != nil {
+ return err
+ }
+ newContent := fmt.Sprintf(`
+
+## [%s] - %s
+### Changes
+- Update build to golang %s`, new, time.Now().Format(time.DateOnly), goVersion.String())
+
+ _, err = file.WriteAt([]byte(newContent), s.Size())
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func isSameMajorMinorVersion(a, b *version.Version) bool {
+ return a.Segments()[0] == b.Segments()[0] && a.Segments()[1] == b.Segments()[1]
+}
+
+func getListOfFilesInPaths(pathsToExplore []string) ([]string, error) {
+ var filesToChange []string
+ for _, pathToExplore := range pathsToExplore {
+ stat, err := os.Stat(pathToExplore)
+ if err != nil {
+ return nil, err
+ }
+ if stat.IsDir() {
+ dirEntries, err := os.ReadDir(pathToExplore)
+ if err != nil {
+ return nil, err
+ }
+ for _, entry := range dirEntries {
+ if entry.IsDir() {
+ continue
+ }
+ filesToChange = append(filesToChange, path.Join(pathToExplore, entry.Name()))
+ }
+ } else {
+ filesToChange = append(filesToChange, pathToExplore)
+ }
+ }
+ return filesToChange, nil
+}
+
+// replaceInFile replaces old with new in the given file.
+func replaceInFile(oldexps []*regexp.Regexp, new []string, fileToChange string) error {
+ if len(oldexps) != len(new) {
+ panic("old and new should be of the same length")
+ }
+
+ f, err := os.OpenFile(fileToChange, os.O_RDWR, 0600)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ content, err := io.ReadAll(f)
+ if err != nil {
+ return err
+ }
+ contentStr := string(content)
+
+ for i, oldex := range oldexps {
+ contentStr = oldex.ReplaceAllString(contentStr, new[i])
+ }
+
+ _, err = f.WriteAt([]byte(contentStr), 0)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func (b bootstrapVersion) toString() string {
+ if b.minor == -1 {
+ return fmt.Sprintf("%d", b.major)
+ }
+ return fmt.Sprintf("%d.%d", b.major, b.minor)
+}
diff --git a/go/tools/release-notes/release_notes.go b/go/tools/release-notes/release_notes.go
index 61d899f370e..5bb03339245 100644
--- a/go/tools/release-notes/release_notes.go
+++ b/go/tools/release-notes/release_notes.go
@@ -27,7 +27,6 @@ import (
"regexp"
"sort"
"strings"
- "sync"
"text/template"
"github.com/spf13/pflag"
@@ -40,24 +39,24 @@ type (
labels []label
- author struct {
- Login string `json:"login"`
+ pullRequestAuthor struct {
+ Login string
}
- prInfo struct {
- Labels labels `json:"labels"`
- Number int `json:"number"`
- Title string `json:"title"`
- Author author `json:"author"`
+ pullRequestInformation struct {
+ Number int
+ Title string
+ Labels labels
+ Author pullRequestAuthor
}
- prsByComponent = map[string][]prInfo
+ prsByComponent = map[string][]pullRequestInformation
prsByType = map[string]prsByComponent
sortedPRComponent struct {
Name string
- PrInfos []prInfo
+ PrInfos []pullRequestInformation
}
sortedPRType struct {
@@ -76,14 +75,17 @@ type (
KnownIssues string
AddDetails string
PathToChangeLogFileOnGH, ChangeLog, ChangeMetrics string
+ SubDirPath string
}
)
-const (
- releaseNotesPath = `doc/releasenotes/`
- releaseNotesPathGitHub = `https://github.com/vitessio/vitess/blob/main/` + releaseNotesPath
+var (
+ releaseNotesPath = `changelog/`
+)
- markdownTemplate = `# Release of Vitess {{.Version}}
+const (
+ releaseNotesPathGitHub = `https://github.com/vitessio/vitess/blob/main/`
+ markdownTemplate = `# Release of Vitess {{.Version}}
{{- if or .Announcement .AddDetails }}
{{ .Announcement }}
@@ -131,16 +133,15 @@ The entire changelog for this release can be found [here]({{ .PathToChangeLogFil
prefixType = "Type: "
prefixComponent = "Component: "
- numberOfThreads = 10
lengthOfSingleSHA = 40
)
func (rn *releaseNote) generate(rnFile, changelogFile *os.File) error {
var err error
// Generate the release notes
- rn.PathToChangeLogFileOnGH = fmt.Sprintf(releaseNotesPathGitHub+"%s_changelog.md", rn.VersionUnderscore)
+ rn.PathToChangeLogFileOnGH = releaseNotesPathGitHub + path.Join(rn.SubDirPath, "changelog.md")
if rnFile == nil {
- rnFile, err = os.OpenFile(fmt.Sprintf(path.Join(releaseNotesPath, "%s_release_notes.md"), rn.VersionUnderscore), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
+ rnFile, err = os.OpenFile(path.Join(rn.SubDirPath, "release_notes.md"), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
if err != nil {
return err
}
@@ -154,7 +155,7 @@ func (rn *releaseNote) generate(rnFile, changelogFile *os.File) error {
// Generate the changelog
if changelogFile == nil {
- changelogFile, err = os.OpenFile(fmt.Sprintf(path.Join(releaseNotesPath, "%s_changelog.md"), rn.VersionUnderscore), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
+ changelogFile, err = os.OpenFile(path.Join(rn.SubDirPath, "changelog.md"), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
if err != nil {
return err
}
@@ -185,61 +186,27 @@ func loadKnownIssues(release string) ([]knownIssue, error) {
return knownIssues, nil
}
-func loadMergedPRs(from, to string) (prs []string, authors []string, commitCount int, err error) {
- // load the git log with "author \t title \t parents"
- out, err := execCmd("git", "log", `--pretty=format:%ae%x09%s%x09%P%x09%h`, fmt.Sprintf("%s..%s", from, to))
-
+func loadMergedPRsAndAuthors(name string) (pris []pullRequestInformation, authors []string, err error) {
+ out, err := execCmd("gh", "pr", "list", "-s", "merged", "-S", fmt.Sprintf("milestone:%s", name), "--json", "number,title,labels,author", "--limit", "5000")
if err != nil {
return
}
- return parseGitLog(string(out))
-}
-
-func parseGitLog(s string) (prs []string, authorCommits []string, commitCount int, err error) {
- rx := regexp.MustCompile(`(.+)\t(.+)\t(.+)\t(.+)`)
- mergePR := regexp.MustCompile(`Merge pull request #(\d+)`)
- squashPR := regexp.MustCompile(`\(#(\d+)\)`)
- authMap := map[string]string{} // here we will store email <-> gh user mappings
- lines := strings.Split(s, "\n")
- for _, line := range lines {
- lineInfo := rx.FindStringSubmatch(line)
- if len(lineInfo) != 5 {
- log.Fatalf("failed to parse the output from git log: %s", line)
- }
- authorEmail := lineInfo[1]
- title := lineInfo[2]
- parents := lineInfo[3]
- sha := lineInfo[4]
- merged := mergePR.FindStringSubmatch(title)
- if len(merged) == 2 {
- // this is a merged PR. remember the PR #
- prs = append(prs, merged[1])
- continue
- }
-
- if len(parents) <= lengthOfSingleSHA {
- // we have a single parent, and the commit counts
- commitCount++
- if _, exists := authMap[authorEmail]; !exists {
- authMap[authorEmail] = sha
- }
- }
-
- squashed := squashPR.FindStringSubmatch(title)
- if len(squashed) == 2 {
- // this is a merged PR. remember the PR #
- prs = append(prs, squashed[1])
- continue
- }
+ err = json.Unmarshal(out, &pris)
+ if err != nil {
+ return nil, nil, err
}
- for _, author := range authMap {
- authorCommits = append(authorCommits, author)
+ // Get the full list of distinct PRs authors and sort them
+ authorMap := map[string]bool{}
+ for _, pri := range pris {
+ login := pri.Author.Login
+ if ok := authorMap[login]; !ok {
+ authors = append(authors, login)
+ authorMap[login] = true
+ }
}
-
- sort.Strings(prs)
- sort.Strings(authorCommits) // not really needed, but makes testing easier
+ sort.Strings(authors)
return
}
@@ -259,134 +226,10 @@ func execCmd(name string, arg ...string) ([]byte, error) {
return out, nil
}
-func loadPRInfo(pr string) (prInfo, error) {
- out, err := execCmd("gh", "pr", "view", pr, "--json", "title,number,labels,author")
- if err != nil {
- return prInfo{}, err
- }
- var prInfo prInfo
- err = json.Unmarshal(out, &prInfo)
- return prInfo, err
-}
-
-func loadAuthorInfo(sha string) (string, error) {
- out, err := execCmd("gh", "api", "/repos/vitessio/vitess/commits/"+sha)
- if err != nil {
- return "", err
- }
- var prInfo prInfo
- err = json.Unmarshal(out, &prInfo)
- if err != nil {
- return "", err
- }
- return prInfo.Author.Login, nil
-}
-
-type req struct {
- isPR bool
- key string
-}
-
-func loadAllPRs(prs, authorCommits []string) ([]prInfo, []string, error) {
- errChan := make(chan error)
- wgDone := make(chan bool)
- prChan := make(chan req, len(prs)+len(authorCommits))
- // fill the work queue
- for _, s := range prs {
- prChan <- req{isPR: true, key: s}
- }
- for _, s := range authorCommits {
- prChan <- req{isPR: false, key: s}
- }
- close(prChan)
-
- var prInfos []prInfo
- var authors []string
- fmt.Printf("Found %d merged PRs. Loading PR info", len(prs))
- wg := sync.WaitGroup{}
- mu := sync.Mutex{}
-
- shouldLoad := func(in string) bool {
- if in == "" {
- return false
- }
- mu.Lock()
- defer mu.Unlock()
-
- for _, existing := range authors {
- if existing == in {
- return false
- }
- }
- return true
- }
- addAuthor := func(in string) {
- mu.Lock()
- defer mu.Unlock()
- authors = append(authors, in)
- }
- addPR := func(in prInfo) {
- mu.Lock()
- defer mu.Unlock()
- prInfos = append(prInfos, in)
- }
-
- for i := 0; i < numberOfThreads; i++ {
- wg.Add(1)
- go func() {
- // load meta data about PRs
- defer wg.Done()
-
- for b := range prChan {
- fmt.Print(".")
-
- if b.isPR {
- prInfo, err := loadPRInfo(b.key)
- if err != nil {
- errChan <- err
- break
- }
- addPR(prInfo)
- continue
- }
- author, err := loadAuthorInfo(b.key)
- if err != nil {
- errChan <- err
- break
- }
- if shouldLoad(author) {
- addAuthor(author)
- }
-
- }
- }()
- }
-
- go func() {
- // wait for the loading to finish
- wg.Wait()
- close(wgDone)
- }()
-
- var err error
- select {
- case <-wgDone:
- break
- case err = <-errChan:
- break
- }
-
- fmt.Println()
-
- sort.Strings(authors)
-
- return prInfos, authors, err
-}
-
-func groupPRs(prInfos []prInfo) prsByType {
+func groupPRs(pris []pullRequestInformation) prsByType {
prPerType := prsByType{}
- for _, info := range prInfos {
+ for _, info := range pris {
var typ, component string
for _, lbl := range info.Labels {
switch {
@@ -476,11 +319,11 @@ func getStringForKnownIssues(issues []knownIssue) (string, error) {
return buff.String(), nil
}
-func groupAndStringifyPullRequest(pr []prInfo) (string, error) {
- if len(pr) == 0 {
+func groupAndStringifyPullRequest(pris []pullRequestInformation) (string, error) {
+ if len(pris) == 0 {
return "", nil
}
- prPerType := groupPRs(pr)
+ prPerType := groupPRs(pris)
prStr, err := getStringForPullRequestInfos(prPerType)
if err != nil {
return "", err
@@ -490,11 +333,8 @@ func groupAndStringifyPullRequest(pr []prInfo) (string, error) {
func main() {
var (
- from, versionName, summaryFile string
- to = "HEAD"
+ versionName, summaryFile string
)
- pflag.StringVarP(&from, "from", "f", "", "from sha/tag/branch")
- pflag.StringVarP(&to, "to", to, "t", "to sha/tag/branch")
pflag.StringVarP(&versionName, "version", "v", "", "name of the version (has to be the following format: v11.0.0)")
pflag.StringVarP(&summaryFile, "summary", "s", "", "readme file on which there is a summary of the release")
pflag.Parse()
@@ -507,9 +347,20 @@ func main() {
log.Fatal("The --version flag must be set using a valid format. Format: 'vX.X.X'.")
}
+ // Define the path to the release notes folder
+ majorVersion := versionMatch[1] + "." + versionMatch[2]
+ patchVersion := versionMatch[1] + "." + versionMatch[2] + "." + versionMatch[3]
+ releaseNotesPath = path.Join(releaseNotesPath, majorVersion, patchVersion)
+
+ err := os.MkdirAll(releaseNotesPath, os.ModePerm)
+ if err != nil {
+ log.Fatal(err)
+ }
+
releaseNotes := releaseNote{
Version: versionName,
VersionUnderscore: fmt.Sprintf("%s_%s_%s", versionMatch[1], versionMatch[2], versionMatch[3]), // v14.0.0 -> 14_0_0, this is used to format filenames.
+ SubDirPath: releaseNotesPath,
}
// summary of the release
@@ -533,26 +384,23 @@ func main() {
releaseNotes.KnownIssues = knownIssuesStr
// changelog with pull requests
- prs, authorCommits, commits, err := loadMergedPRs(from, to)
+ prs, authors, err := loadMergedPRsAndAuthors(versionName)
if err != nil {
log.Fatal(err)
}
- prInfos, authors, err := loadAllPRs(prs, authorCommits)
- if err != nil {
- log.Fatal(err)
- }
- releaseNotes.ChangeLog, err = groupAndStringifyPullRequest(prInfos)
+
+ releaseNotes.ChangeLog, err = groupAndStringifyPullRequest(prs)
if err != nil {
log.Fatal(err)
}
// changelog metrics
- if commits > 0 && len(authors) > 0 {
+ if len(prs) > 0 && len(authors) > 0 {
releaseNotes.ChangeMetrics = fmt.Sprintf(`
-The release includes %d commits (excluding merges)
+The release includes %d merged Pull Requests.
Thanks to all our contributors: @%s
-`, commits, strings.Join(authors, ", @"))
+`, len(prs), strings.Join(authors, ", @"))
}
if err := releaseNotes.generate(nil, nil); err != nil {
diff --git a/go/tools/release-notes/release_notes_test.go b/go/tools/release-notes/release_notes_test.go
index 0622d458d28..19f946525c3 100644
--- a/go/tools/release-notes/release_notes_test.go
+++ b/go/tools/release-notes/release_notes_test.go
@@ -20,7 +20,6 @@ import (
"os"
"testing"
- "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"vitess.io/vitess/go/test/utils"
@@ -29,26 +28,26 @@ import (
func Test_groupPRs(t *testing.T) {
tests := []struct {
name string
- prInfos []prInfo
- want map[string]map[string][]prInfo
+ prInfos []pullRequestInformation
+ want map[string]map[string][]pullRequestInformation
}{
{
name: "Single PR info with no labels",
- prInfos: []prInfo{{Title: "pr 1", Number: 1}},
- want: map[string]map[string][]prInfo{"Other": {"Other": []prInfo{{Title: "pr 1", Number: 1}}}},
+ prInfos: []pullRequestInformation{{Title: "pr 1", Number: 1}},
+ want: map[string]map[string][]pullRequestInformation{"Other": {"Other": []pullRequestInformation{{Title: "pr 1", Number: 1}}}},
}, {
name: "Single PR info with type label",
- prInfos: []prInfo{{Title: "pr 1", Number: 1, Labels: []label{{Name: prefixType + "Bug"}}}},
- want: map[string]map[string][]prInfo{"Bug fixes": {"Other": []prInfo{{Title: "pr 1", Number: 1, Labels: []label{{Name: prefixType + "Bug"}}}}}}},
+ prInfos: []pullRequestInformation{{Title: "pr 1", Number: 1, Labels: []label{{Name: prefixType + "Bug"}}}},
+ want: map[string]map[string][]pullRequestInformation{"Bug fixes": {"Other": []pullRequestInformation{{Title: "pr 1", Number: 1, Labels: []label{{Name: prefixType + "Bug"}}}}}}},
{
name: "Single PR info with type and component labels",
- prInfos: []prInfo{{Title: "pr 1", Number: 1, Labels: []label{{Name: prefixType + "Bug"}, {Name: prefixComponent + "VTGate"}}}},
- want: map[string]map[string][]prInfo{"Bug fixes": {"VTGate": []prInfo{{Title: "pr 1", Number: 1, Labels: []label{{Name: prefixType + "Bug"}, {Name: prefixComponent + "VTGate"}}}}}}},
+ prInfos: []pullRequestInformation{{Title: "pr 1", Number: 1, Labels: []label{{Name: prefixType + "Bug"}, {Name: prefixComponent + "VTGate"}}}},
+ want: map[string]map[string][]pullRequestInformation{"Bug fixes": {"VTGate": []pullRequestInformation{{Title: "pr 1", Number: 1, Labels: []label{{Name: prefixType + "Bug"}, {Name: prefixComponent + "VTGate"}}}}}}},
{
- name: "Multiple PR infos with type and component labels", prInfos: []prInfo{
+ name: "Multiple PR infos with type and component labels", prInfos: []pullRequestInformation{
{Title: "pr 1", Number: 1, Labels: []label{{Name: prefixType + "Bug"}, {Name: prefixComponent + "VTGate"}}},
{Title: "pr 2", Number: 2, Labels: []label{{Name: prefixType + "Feature"}, {Name: prefixComponent + "VTTablet"}}}},
- want: map[string]map[string][]prInfo{"Bug fixes": {"VTGate": []prInfo{{Title: "pr 1", Number: 1, Labels: []label{{Name: prefixType + "Bug"}, {Name: prefixComponent + "VTGate"}}}}}, "Feature": {"VTTablet": []prInfo{{Title: "pr 2", Number: 2, Labels: []label{{Name: prefixType + "Feature"}, {Name: prefixComponent + "VTTablet"}}}}}}},
+ want: map[string]map[string][]pullRequestInformation{"Bug fixes": {"VTGate": []pullRequestInformation{{Title: "pr 1", Number: 1, Labels: []label{{Name: prefixType + "Bug"}, {Name: prefixComponent + "VTGate"}}}}}, "Feature": {"VTTablet": []pullRequestInformation{{Title: "pr 2", Number: 2, Labels: []label{{Name: prefixType + "Feature"}, {Name: prefixComponent + "VTTablet"}}}}}}},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
@@ -58,54 +57,6 @@ func Test_groupPRs(t *testing.T) {
}
}
-func TestParseGitLogOutput(t *testing.T) {
- in := `harshTEST@planetscale.com Merge pull request #7968 from planetscale/bump_java_snapshot_v11 7e8ebbb5b79b65d2d45fd6c838efb51bdafc7c0b 195a09df191d3e86a32ebcc7a1f1dde168fe819e 168fe819e
-deeptTEST@planetscale.com Merge pull request #7970 from planetscale/vttestserver-default-charset 887be6914690b6d106aba001c72deea80a4d8dab ff8c750eda4b30787e772547a451ed1f50931150 f50931150
-deeptTEST@planetscale.com Merge pull request #7943 from planetscale/fix-mysql80-container-image 01fb7e55ab92df7c3f300b85976fdf3fd5bd35b3 3cc94a10752014c9ce311d88af9e1aa18e7fa2d8 18e7fa2d8
-57520317+rohit-nayak-TEST@users.noreply.github.com Merge pull request #7831 from planetscale/rn-vr-log2 37c09d3be83922a8ef936fbc028a5031f96b7dbf f57350c3ea1720496e5f1cec35d58f069e4df515 69e4df515
-TEST@planetscale.com docker/vttestserver/run.sh: Add $CHARSET environment variable 482a7008117ee3215663aeb33cad981e5242a88a e5242a88a
-rohTEST@planetscale.com Add ability to select from vreplication_log in VReplicationExec 427cac89cd6b143d3a1928ee682b3a9538709da5 538709da5
-rohTEST@planetscale.com Use withDDL for vreplication log queries 4a1ab946e3628ba8ef610ea4a158186a5fdd17ba a5fdd17ba
-rohTEST@planetscale.com Add license file. Minor refactor fa9de690ce0d27a781befbc1866aca5cd447798f cd447798f
-rohTEST@planetscale.com Added comments and refactored tests b6d39acb08939ba56e9e9587f34f3b8bcdcdc504 bcdcdc504
-rohTEST@planetscale.com Add logs for start and end of the copy phase 1cf72866ddfbd554700d6c9e32b9835ebb3b444c ebb3b444c
-rohTEST@planetscale.com Fix test 0992d39c6d473b548679d012cfa5a889ffa448ef 9ffa448ef
-rohTEST@planetscale.com Add test for vreplication log and fix string conversion bug b616143b14b75e7c23042c2eef4f6b27a275b0f7 7a275b0f7
-rohTEST@planetscale.com Ignore queries related to _vt.vreplication_log in tests e6926932c14da9a2213be246bc2de5f011668551 011668551
-rohTEST@planetscale.com Create log table. Util functions to insert logs. Insert logs in VReplicationExec and setMessage/State 37c09d3be83922a8ef936fbc028a5031f96b7dbf 1f96b7dbf
-harshTEST@planetscale.com Merge pull request #7951 from vmg/vmg/vr-client-perf 7794c62651066970e1176181cb7000d385d0b327 172fac7dec8b11937a4efb26ebf4bedf1771f189 f1771f189
-alkin.tezuysTEST@gmail.com java: Bump SNAPSHOT version to 11.0.0-SNAPSHOT after Vitess release v10 7794c62651066970e1176181cb7000d385d0b327 385d0b327
-alkin.tezuysTEST@gmail.com Merge pull request #7964 from planetscale/10_0_RC1_release_notes 31d84d6ce8e233a053794ad0ffe5168d34d04450 b020dc71f5c7dc663d814563f1b6c97340f4411f 340f4411f
-vTEST@strn.cat vstreamer: fix docs e7bf329da0029414c3b18e18e5cb2226b9a731a2 6b9a731a2
-amasTEST@slack-corp.com [workflow] extract migration targets from wrangler (#7934) 8bd5a7cb093369b50a0926bfa3a112b3b744e782 3b744e782
-alkin.tezuysTEST@gmail.com More spacing issues fixed 7509d47ba785e7a39b8726dc80f93955953ab98d 5953ab98d
-alkin.tezuysTEST@gmail.com Minor spacing fixes d31362e76ac69fb2bc4083e22e7c87683099fecd 83099fecd
-alkin.tezuysTEST@gmail.com Update 10_0_0_release_notes.md a7034bdf5d454a47738335ed2afc75f72bdbcf37 72bdbcf37
-alkin.tezuysTEST@gmail.com v10 GA Release Notes ad37320b2637620ee36d44d163399ecc2c1eea6c c2c1eea6c
-andrTEST@planetscale.com Merge pull request #7912 from planetscale/show-databases-like 7e13d4bccca0325ca07a488334e77c4f2f964f6b 95eceb17d10c62d56f2e94e5478afb5a1b63e1c2 a1b63e1c2
-andrTEST@planetscale.com Merge pull request #7629 from planetscale/gen4-table-aliases 2e1b1e9322a6bfcfe792cca341b0d52860d3c66e 7ad14e3f3d26cb1780cdbf9c22029740e5aebde4 0e5aebde4
-andrTEST@planetscale.com Merge remote-tracking branch 'upstream/master' into show-databases-like 6b3ee1c31a939fc6628515f00087baa3e1e8acf7 2e1b1e9322a6bfcfe792cca341b0d52860d3c66e 860d3c66e
-2607934+shlomi-noaTEST@users.noreply.github.com Merge pull request #7959 from Hellcatlk/master 6c826115937d28ef83f05a1f0d54db0fcb814db4 cdab3040aaaa11c51e291d6b1a7af6fadd83dedf add83dedf
-zouy.fnTEST@cn.fujitsu.com Fix a gofmt warning 08038850a258d6de250cf9d864d6118616f5562c 616f5562c
-vTEST@strn.cat mysql: allow reusing row storage when reading from a stream a2850bbf41100618cb1192067b16585ba7c6b0c7 ba7c6b0c7
-vTEST@strn.cat throttle: do not check for time constantly e0b90daebe9e6b98d969934a24899b41d25e3a68 1d25e3a68
-andrTEST@planetscale.com fix compilation error 18036f5fb5f58523dbf50726beb741cedac2baf8 edac2baf8
-andrTEST@planetscale.com better code comment c173c945cf0e75e8649e6fa621509b5fb4ebd6c9 fb4ebd6c9
-vTEST@strn.cat conn: do not let header escape to the heap d31fb23d8cb9463810ed9fc132df4060a6812f6e 0a6812f6e
-vTEST@strn.cat vstreamer: do not allocate when filtering rows dafc1cb729d7be7dff2c05bd05a926005eb9a044 05eb9a044
-vTEST@strn.cat vstreamer: do not allocate when converting rows c5cd3067aeb9d952a2f45084c37634267e4f9062 67e4f9062
-andrTEST@planetscale.com Merge remote-tracking branch 'upstream/master' into gen4-table-aliases 8c01827ed8b748240f213d9476ee162306ab01eb b1f9000ddd166d49adda6581e7ca9e0aca10c252 aca10c252
-aquarapTEST@gmail.com Fix mysql80 docker build with dep. a28591577b8d432b9c5d78abf59ad494a0a943b0 4a0a943b0
-TEST@planetscale.com Revert "docker/lite/install_dependencies.sh: Upgrade MySQL 8 to 8.0.24" 7858ff46545cff749b3663c92ae90ef27a5dfbc2 27a5dfbc2
-TEST@planetscale.com docker/lite/install_dependencies.sh: Upgrade MySQL 8 to 8.0.24 c91d46782933292941a846fef2590ff1a6fa193f a6fa193f`
-
- prs, authorCommits, nonMergeCommits, err := parseGitLog(in)
- require.NoError(t, err)
- assert.Equal(t, prs, []string{"7629", "7831", "7912", "7934", "7943", "7951", "7959", "7964", "7968", "7970"})
- assert.Equal(t, authorCommits, []string{"385d0b327", "3b744e782", "4a0a943b0", "538709da5", "616f5562c", "6b9a731a2", "e5242a88a", "edac2baf8"})
- assert.Equal(t, 28, nonMergeCommits)
-}
-
func TestLoadSummaryReadme(t *testing.T) {
readmeFile, err := os.CreateTemp("", "*.md")
require.NoError(t, err)
@@ -160,11 +111,12 @@ func TestGenerateReleaseNotes(t *testing.T) {
VersionUnderscore: "12_0_0",
ChangeLog: "* PR 1\n* PR 2\n",
ChangeMetrics: "optimization is the root of all evil",
+ SubDirPath: "changelog/12.0/12.0.0",
},
expectedOut: "# Release of Vitess v12.0.0\n" +
"This is the new release.\n\nNew features got added.\n" +
"------------\n" +
- "The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/doc/releasenotes/12_0_0_changelog.md).\n" +
+ "The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/12.0/12.0.0/changelog.md).\n" +
"optimization is the root of all evil\n",
expectedOutChangeLog: "# Changelog of Vitess v12.0.0\n" +
"* PR 1\n" +
@@ -176,9 +128,10 @@ func TestGenerateReleaseNotes(t *testing.T) {
VersionUnderscore: "12_0_0",
ChangeLog: "* PR 1\n* PR 2\n",
ChangeMetrics: "optimization is the root of all evil",
+ SubDirPath: "changelog/12.0/12.0.0",
},
expectedOut: "# Release of Vitess v12.0.0\n" +
- "The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/doc/releasenotes/12_0_0_changelog.md).\n" +
+ "The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/12.0/12.0.0/changelog.md).\n" +
"optimization is the root of all evil\n",
expectedOutChangeLog: "# Changelog of Vitess v12.0.0\n" +
"* PR 1\n" +
diff --git a/go/tools/releases/releases.go b/go/tools/releases/releases.go
new file mode 100644
index 00000000000..10c29233494
--- /dev/null
+++ b/go/tools/releases/releases.go
@@ -0,0 +1,143 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package main
+
+// The changelog directory is composed of a README that lists
+// and links to all major releases of Vitess. It has one
+// sub-directory for each major version. Each sub-directory is
+// composed of another README that also lists and links all the
+// patch releases of this major release. Those sub-directories
+// are composed of one directory per patch release. Finally,
+// the patch release directory contains the old files markdown:
+// summary, release_notes, changelog.
+//
+// This tool is solely responsible for generating the READMEs
+// and making sure they are up-to-date with the list of major
+// and patch releases we have.
+
+import (
+ "log"
+ "os"
+ "path"
+ "sort"
+ "strings"
+ "text/template"
+)
+
+const (
+ rootDir = "./changelog/"
+
+ rootFileTmpl = `## Releases
+
+{{- range $r := .SubDirs }}
+* [{{ $r.Name }}]({{ $r.Name }})
+{{- end -}}
+`
+
+ majorVersionTmpl = `## v{{ .Name }}
+
+{{- if .Team }}
+The dedicated team for this release can be found [here]({{.Team}}).{{ end }}
+
+{{- range $r := .SubDirs }}
+* **[{{ $r.Name }}]({{ $r.Name }})**
+{{ if $r.Changelog }} * [Changelog]({{ $r.Name }}/{{ $r.Changelog }})
+{{ end -}}
+{{ if $r.ReleaseNotes }} * [Release Notes]({{ $r.Name }}/{{ $r.ReleaseNotes }})
+{{ end -}}
+{{- end -}}
+`
+)
+
+type dir struct {
+ Name string
+ Path string
+ Changelog string
+ ReleaseNotes string
+ Team string
+ SubDirs []dir
+}
+
+func main() {
+ rootDir, err := getDirs(dir{Path: rootDir})
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ err = execReadMeTemplateWithDir(rootDir, rootFileTmpl)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ for _, subDir := range rootDir.SubDirs {
+ err := execReadMeTemplateWithDir(subDir, majorVersionTmpl)
+ if err != nil {
+ log.Fatal(err)
+ }
+ }
+}
+
+func execReadMeTemplateWithDir(d dir, tmpl string) error {
+ rootRM, err := os.OpenFile(path.Join(d.Path, "README.md"), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0640)
+ if err != nil {
+ return err
+ }
+
+ t := template.Must(template.New("root_readme").Parse(tmpl))
+ err = t.ExecuteTemplate(rootRM, "root_readme", d)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func getDirs(curDir dir) (dir, error) {
+ entries, err := os.ReadDir(curDir.Path)
+ if err != nil {
+ return dir{}, err
+ }
+
+ for _, entry := range entries {
+ if entry.IsDir() {
+ subDir, err := getDirs(dir{
+ Name: entry.Name(),
+ Path: path.Join(curDir.Path, entry.Name()),
+ })
+ if err != nil {
+ return dir{}, err
+ }
+ curDir.SubDirs = append(curDir.SubDirs, subDir)
+ continue
+ }
+
+ switch {
+ case strings.Contains(entry.Name(), "changelog.md"):
+ curDir.Changelog = entry.Name()
+ case strings.Contains(entry.Name(), "release_notes.md"):
+ curDir.ReleaseNotes = entry.Name()
+ case strings.Contains(entry.Name(), "team.md"):
+ curDir.Team = entry.Name()
+ }
+ }
+ sort.Slice(curDir.SubDirs, func(i, j int) bool {
+ if len(curDir.SubDirs[i].Name) < len(curDir.SubDirs[j].Name) {
+ return false
+ }
+ return curDir.SubDirs[i].Name > curDir.SubDirs[j].Name
+ })
+ return curDir, nil
+}
diff --git a/go/vt/binlog/binlog_connection.go b/go/vt/binlog/binlog_connection.go
index 126e13399d8..1cdb2d6cacc 100644
--- a/go/vt/binlog/binlog_connection.go
+++ b/go/vt/binlog/binlog_connection.go
@@ -99,16 +99,16 @@ func connectForReplication(cp dbconfigs.Connector) (*mysql.Conn, error) {
// StartBinlogDumpFromCurrent requests a replication binlog dump from
// the current position.
-func (bc *BinlogConnection) StartBinlogDumpFromCurrent(ctx context.Context) (mysql.Position, <-chan mysql.BinlogEvent, error) {
+func (bc *BinlogConnection) StartBinlogDumpFromCurrent(ctx context.Context) (mysql.Position, <-chan mysql.BinlogEvent, <-chan error, error) {
ctx, bc.cancel = context.WithCancel(ctx)
position, err := bc.Conn.PrimaryPosition()
if err != nil {
- return mysql.Position{}, nil, fmt.Errorf("failed to get primary position: %v", err)
+ return mysql.Position{}, nil, nil, fmt.Errorf("failed to get primary position: %v", err)
}
- c, err := bc.StartBinlogDumpFromPosition(ctx, position)
- return position, c, err
+ c, e, err := bc.StartBinlogDumpFromPosition(ctx, "", position)
+ return position, c, e, err
}
// StartBinlogDumpFromPosition requests a replication binlog dump from
@@ -120,33 +120,42 @@ func (bc *BinlogConnection) StartBinlogDumpFromCurrent(ctx context.Context) (mys
// by canceling the context.
//
// Note the context is valid and used until eventChan is closed.
-func (bc *BinlogConnection) StartBinlogDumpFromPosition(ctx context.Context, startPos mysql.Position) (<-chan mysql.BinlogEvent, error) {
+func (bc *BinlogConnection) StartBinlogDumpFromPosition(ctx context.Context, binlogFilename string, startPos mysql.Position) (<-chan mysql.BinlogEvent, <-chan error, error) {
ctx, bc.cancel = context.WithCancel(ctx)
log.Infof("sending binlog dump command: startPos=%v, serverID=%v", startPos, bc.serverID)
- if err := bc.SendBinlogDumpCommand(bc.serverID, startPos); err != nil {
+ if err := bc.SendBinlogDumpCommand(bc.serverID, binlogFilename, startPos); err != nil {
log.Errorf("couldn't send binlog dump command: %v", err)
- return nil, err
+ return nil, nil, err
}
- return bc.streamEvents(ctx), nil
+ c, e := bc.streamEvents(ctx)
+
+ return c, e, nil
}
-// streamEvents returns a channel on which events are streamed.
-func (bc *BinlogConnection) streamEvents(ctx context.Context) chan mysql.BinlogEvent {
+// streamEvents returns a channel on which events are streamed and a channel on
+// which errors are propagated.
+func (bc *BinlogConnection) streamEvents(ctx context.Context) (chan mysql.BinlogEvent, chan error) {
// FIXME(alainjobart) I think we can use a buffered channel for better performance.
eventChan := make(chan mysql.BinlogEvent)
+ errChan := make(chan error)
// Start reading events.
bc.wg.Add(1)
go func() {
defer func() {
close(eventChan)
+ close(errChan)
bc.wg.Done()
}()
for {
event, err := bc.Conn.ReadBinlogEvent()
if err != nil {
+ select {
+ case errChan <- err:
+ case <-ctx.Done():
+ }
if sqlErr, ok := err.(*mysql.SQLError); ok && sqlErr.Number() == mysql.CRServerLost {
// CRServerLost = Lost connection to MySQL server during query
// This is not necessarily an error. It could just be that we closed
@@ -165,7 +174,7 @@ func (bc *BinlogConnection) streamEvents(ctx context.Context) chan mysql.BinlogE
}
}
}()
- return eventChan
+ return eventChan, errChan
}
// StartBinlogDumpFromBinlogBeforeTimestamp requests a replication
@@ -196,21 +205,22 @@ func (bc *BinlogConnection) streamEvents(ctx context.Context) chan mysql.BinlogE
// by canceling the context.
//
// Note the context is valid and used until eventChan is closed.
-func (bc *BinlogConnection) StartBinlogDumpFromBinlogBeforeTimestamp(ctx context.Context, timestamp int64) (<-chan mysql.BinlogEvent, error) {
+func (bc *BinlogConnection) StartBinlogDumpFromBinlogBeforeTimestamp(ctx context.Context, timestamp int64) (<-chan mysql.BinlogEvent, <-chan error, error) {
ctx, bc.cancel = context.WithCancel(ctx)
filename, err := bc.findFileBeforeTimestamp(ctx, timestamp)
if err != nil {
- return nil, err
+ return nil, nil, err
}
// Start dumping the logs. The position is '4' to skip the
// Binlog File Header. See this page for more info:
// https://dev.mysql.com/doc/internals/en/binlog-file.html
if err := bc.Conn.WriteComBinlogDump(bc.serverID, filename, 4, 0); err != nil {
- return nil, fmt.Errorf("failed to send the ComBinlogDump command: %v", err)
+ return nil, nil, fmt.Errorf("failed to send the ComBinlogDump command: %v", err)
}
- return bc.streamEvents(ctx), nil
+ e, c := bc.streamEvents(ctx)
+ return e, c, nil
}
func (bc *BinlogConnection) findFileBeforeTimestamp(ctx context.Context, timestamp int64) (filename string, err error) {
diff --git a/go/vt/binlog/binlog_streamer.go b/go/vt/binlog/binlog_streamer.go
index 1d0431faff3..6bf2c26bf9c 100644
--- a/go/vt/binlog/binlog_streamer.go
+++ b/go/vt/binlog/binlog_streamer.go
@@ -207,6 +207,7 @@ func (bls *Streamer) Stream(ctx context.Context) (err error) {
}
var events <-chan mysql.BinlogEvent
+ var errs <-chan error
if bls.timestamp != 0 {
// MySQL 5.6 only: We are going to start reading the
// logs from the beginning of a binlog file. That is
@@ -214,7 +215,7 @@ func (bls *Streamer) Stream(ctx context.Context) (err error) {
// contains the starting GTIDSet, and we will save
// that as the current position.
bls.usePreviousGTIDs = true
- events, err = bls.conn.StartBinlogDumpFromBinlogBeforeTimestamp(ctx, bls.timestamp)
+ events, errs, err = bls.conn.StartBinlogDumpFromBinlogBeforeTimestamp(ctx, bls.timestamp)
} else if !bls.startPos.IsZero() {
// MySQL 5.6 only: we are starting from a random
// binlog position. It turns out we will receive a
@@ -223,16 +224,17 @@ func (bls *Streamer) Stream(ctx context.Context) (err error) {
// the starting position we pass in, it seems it is
// just the PREVIOUS_GTIDS_EVENT from the file we're reading.
// So we have to skip it.
- events, err = bls.conn.StartBinlogDumpFromPosition(ctx, bls.startPos)
+ events, errs, err = bls.conn.StartBinlogDumpFromPosition(ctx, "", bls.startPos)
} else {
- bls.startPos, events, err = bls.conn.StartBinlogDumpFromCurrent(ctx)
+ bls.startPos, events, errs, err = bls.conn.StartBinlogDumpFromCurrent(ctx)
}
if err != nil {
return err
}
+
// parseEvents will loop until the events channel is closed, the
// service enters the SHUTTING_DOWN state, or an error occurs.
- stopPos, err = bls.parseEvents(ctx, events)
+ stopPos, err = bls.parseEvents(ctx, events, errs)
return err
}
@@ -243,7 +245,7 @@ func (bls *Streamer) Stream(ctx context.Context) (err error) {
// If the sendTransaction func returns io.EOF, parseEvents returns ErrClientEOF.
// If the events channel is closed, parseEvents returns ErrServerEOF.
// If the context is done, returns ctx.Err().
-func (bls *Streamer) parseEvents(ctx context.Context, events <-chan mysql.BinlogEvent) (mysql.Position, error) {
+func (bls *Streamer) parseEvents(ctx context.Context, events <-chan mysql.BinlogEvent, errs <-chan error) (mysql.Position, error) {
var statements []FullBinlogStatement
var format mysql.BinlogFormat
var gtid mysql.GTID
@@ -297,6 +299,8 @@ func (bls *Streamer) parseEvents(ctx context.Context, events <-chan mysql.Binlog
log.Infof("reached end of binlog event stream")
return pos, ErrServerEOF
}
+ case err = <-errs:
+ return pos, err
case <-ctx.Done():
log.Infof("stopping early due to binlog Streamer service shutdown or client disconnect")
return pos, ctx.Err()
diff --git a/go/vt/binlog/binlog_streamer_rbr_test.go b/go/vt/binlog/binlog_streamer_rbr_test.go
index eb7578dd1fe..6a5c22723fd 100644
--- a/go/vt/binlog/binlog_streamer_rbr_test.go
+++ b/go/vt/binlog/binlog_streamer_rbr_test.go
@@ -175,6 +175,7 @@ func TestStreamerParseRBREvents(t *testing.T) {
}
events := make(chan mysql.BinlogEvent)
+ errs := make(chan error)
want := []fullBinlogTransaction{
{
@@ -263,7 +264,7 @@ func TestStreamerParseRBREvents(t *testing.T) {
bls := NewStreamer(dbcfgs, se, nil, mysql.Position{}, 0, sendTransaction)
go sendTestEvents(events, input)
- _, err := bls.parseEvents(context.Background(), events)
+ _, err := bls.parseEvents(context.Background(), events, errs)
if err != ErrServerEOF {
t.Errorf("unexpected error: %v", err)
}
@@ -420,6 +421,7 @@ func TestStreamerParseRBRNameEscapes(t *testing.T) {
}
events := make(chan mysql.BinlogEvent)
+ errs := make(chan error)
want := []fullBinlogTransaction{
{
@@ -508,7 +510,7 @@ func TestStreamerParseRBRNameEscapes(t *testing.T) {
bls := NewStreamer(dbcfgs, se, nil, mysql.Position{}, 0, sendTransaction)
go sendTestEvents(events, input)
- _, err := bls.parseEvents(context.Background(), events)
+ _, err := bls.parseEvents(context.Background(), events, errs)
if err != ErrServerEOF {
t.Errorf("unexpected error: %v", err)
}
diff --git a/go/vt/binlog/binlog_streamer_test.go b/go/vt/binlog/binlog_streamer_test.go
index 3da7e52c25a..df2af984e21 100644
--- a/go/vt/binlog/binlog_streamer_test.go
+++ b/go/vt/binlog/binlog_streamer_test.go
@@ -23,6 +23,7 @@ import (
"testing"
"time"
+ "github.com/stretchr/testify/require"
"google.golang.org/protobuf/proto"
"context"
@@ -95,6 +96,7 @@ func TestStreamerParseEventsXID(t *testing.T) {
}
events := make(chan mysql.BinlogEvent)
+ errs := make(chan error)
want := []*binlogdatapb.BinlogTransaction{
{
@@ -127,7 +129,7 @@ func TestStreamerParseEventsXID(t *testing.T) {
bls := NewStreamer(dbcfgs, nil, nil, mysql.Position{}, 0, (&got).sendTransaction)
go sendTestEvents(events, input)
- _, err := bls.parseEvents(context.Background(), events)
+ _, err := bls.parseEvents(context.Background(), events, errs)
if err != ErrServerEOF {
t.Errorf("unexpected error: %v", err)
}
@@ -158,6 +160,7 @@ func TestStreamerParseEventsCommit(t *testing.T) {
}
events := make(chan mysql.BinlogEvent)
+ errs := make(chan error)
want := []*binlogdatapb.BinlogTransaction{
{
@@ -189,7 +192,7 @@ func TestStreamerParseEventsCommit(t *testing.T) {
bls := NewStreamer(dbcfgs, nil, nil, mysql.Position{}, 0, (&got).sendTransaction)
go sendTestEvents(events, input)
- _, err := bls.parseEvents(context.Background(), events)
+ _, err := bls.parseEvents(context.Background(), events, errs)
if err != ErrServerEOF {
t.Errorf("unexpected error: %v", err)
}
@@ -201,6 +204,7 @@ func TestStreamerParseEventsCommit(t *testing.T) {
func TestStreamerStop(t *testing.T) {
events := make(chan mysql.BinlogEvent)
+ errs := make(chan error)
sendTransaction := func(eventToken *querypb.EventToken, statements []FullBinlogStatement) error {
return nil
@@ -218,7 +222,7 @@ func TestStreamerStop(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
done := make(chan error)
go func() {
- _, err := bls.parseEvents(ctx, events)
+ _, err := bls.parseEvents(ctx, events, errs)
done <- err
}()
@@ -253,6 +257,7 @@ func TestStreamerParseEventsClientEOF(t *testing.T) {
want := ErrClientEOF
events := make(chan mysql.BinlogEvent)
+ errs := make(chan error)
sendTransaction := func(eventToken *querypb.EventToken, statements []FullBinlogStatement) error {
return io.EOF
@@ -267,7 +272,7 @@ func TestStreamerParseEventsClientEOF(t *testing.T) {
bls := NewStreamer(dbcfgs, nil, nil, mysql.Position{}, 0, sendTransaction)
go sendTestEvents(events, input)
- _, err := bls.parseEvents(context.Background(), events)
+ _, err := bls.parseEvents(context.Background(), events, errs)
if err != want {
t.Errorf("wrong error, got %#v, want %#v", err, want)
}
@@ -277,6 +282,7 @@ func TestStreamerParseEventsServerEOF(t *testing.T) {
want := ErrServerEOF
events := make(chan mysql.BinlogEvent)
+ errs := make(chan error)
close(events)
sendTransaction := func(eventToken *querypb.EventToken, statements []FullBinlogStatement) error {
@@ -289,12 +295,50 @@ func TestStreamerParseEventsServerEOF(t *testing.T) {
dbcfgs := dbconfigs.New(mcp)
bls := NewStreamer(dbcfgs, nil, nil, mysql.Position{}, 0, sendTransaction)
- _, err := bls.parseEvents(context.Background(), events)
+ _, err := bls.parseEvents(context.Background(), events, errs)
if err != want {
t.Errorf("wrong error, got %#v, want %#v", err, want)
}
}
+// TestStreamerParseEventsGTIDPurged tests binlog streamer error
+// propagation generally, as well as testing specifically for
+// the error seen when the client needs GTIDs that have been
+// purged on the source.
+func TestStreamerParseEventsGTIDPurged(t *testing.T) {
+ events := make(chan mysql.BinlogEvent)
+ errs := make(chan error)
+ expectedStreamErr := mysql.NewSQLError(mysql.ERMasterFatalReadingBinlog, mysql.SSUnknownSQLState,
+ "Cannot replicate because the master purged required binary logs.")
+
+ sendTransaction := func(eventToken *querypb.EventToken, statements []FullBinlogStatement) error {
+ return nil
+ }
+ // Set mock mysql.ConnParams and dbconfig
+ mcp := &mysql.ConnParams{
+ DbName: "vt_test_keyspace",
+ }
+ dbcfgs := dbconfigs.New(mcp)
+
+ go func() {
+ tmr := time.NewTimer(10 * time.Second)
+ defer tmr.Stop()
+ select {
+ case errs <- expectedStreamErr:
+ case <-tmr.C:
+ require.FailNow(t, "timed out sending error message")
+ }
+ }()
+
+ bls := NewStreamer(dbcfgs, nil, nil, mysql.Position{}, 0, sendTransaction)
+ _, err := bls.parseEvents(context.Background(), events, errs)
+ require.Error(t, err)
+ sqlErr, ok := err.(*mysql.SQLError)
+ require.True(t, ok, "expected SQLError, got %T", err)
+ require.True(t, sqlErr.Num == mysql.ERMasterFatalReadingBinlog, "expected ERMasterFatalReadingBinlog (%d), got %d",
+ mysql.ERMasterFatalReadingBinlog, sqlErr.Num)
+}
+
func TestStreamerParseEventsSendErrorXID(t *testing.T) {
f := mysql.NewMySQL56BinlogFormat()
s := mysql.NewFakeBinlogStream()
@@ -313,6 +357,7 @@ func TestStreamerParseEventsSendErrorXID(t *testing.T) {
want := "send reply error: foobar"
events := make(chan mysql.BinlogEvent)
+ errs := make(chan error)
sendTransaction := func(eventToken *querypb.EventToken, statements []FullBinlogStatement) error {
return fmt.Errorf("foobar")
@@ -328,7 +373,7 @@ func TestStreamerParseEventsSendErrorXID(t *testing.T) {
go sendTestEvents(events, input)
- _, err := bls.parseEvents(context.Background(), events)
+ _, err := bls.parseEvents(context.Background(), events, errs)
if err == nil {
t.Errorf("expected error, got none")
return
@@ -358,6 +403,7 @@ func TestStreamerParseEventsSendErrorCommit(t *testing.T) {
want := "send reply error: foobar"
events := make(chan mysql.BinlogEvent)
+ errs := make(chan error)
sendTransaction := func(eventToken *querypb.EventToken, statements []FullBinlogStatement) error {
return fmt.Errorf("foobar")
@@ -372,7 +418,7 @@ func TestStreamerParseEventsSendErrorCommit(t *testing.T) {
bls := NewStreamer(dbcfgs, nil, nil, mysql.Position{}, 0, sendTransaction)
go sendTestEvents(events, input)
- _, err := bls.parseEvents(context.Background(), events)
+ _, err := bls.parseEvents(context.Background(), events, errs)
if err == nil {
t.Errorf("expected error, got none")
return
@@ -398,6 +444,7 @@ func TestStreamerParseEventsInvalid(t *testing.T) {
want := "can't parse binlog event, invalid data:"
events := make(chan mysql.BinlogEvent)
+ errs := make(chan error)
sendTransaction := func(eventToken *querypb.EventToken, statements []FullBinlogStatement) error {
return nil
@@ -412,7 +459,7 @@ func TestStreamerParseEventsInvalid(t *testing.T) {
bls := NewStreamer(dbcfgs, nil, nil, mysql.Position{}, 0, sendTransaction)
go sendTestEvents(events, input)
- _, err := bls.parseEvents(context.Background(), events)
+ _, err := bls.parseEvents(context.Background(), events, errs)
if err == nil {
t.Errorf("expected error, got none")
return
@@ -440,6 +487,7 @@ func TestStreamerParseEventsInvalidFormat(t *testing.T) {
want := "can't parse FORMAT_DESCRIPTION_EVENT:"
events := make(chan mysql.BinlogEvent)
+ errs := make(chan error)
sendTransaction := func(eventToken *querypb.EventToken, statements []FullBinlogStatement) error {
return nil
@@ -454,7 +502,7 @@ func TestStreamerParseEventsInvalidFormat(t *testing.T) {
bls := NewStreamer(dbcfgs, nil, nil, mysql.Position{}, 0, sendTransaction)
go sendTestEvents(events, input)
- _, err := bls.parseEvents(context.Background(), events)
+ _, err := bls.parseEvents(context.Background(), events, errs)
if err == nil {
t.Errorf("expected error, got none")
return
@@ -482,6 +530,7 @@ func TestStreamerParseEventsNoFormat(t *testing.T) {
want := "got a real event before FORMAT_DESCRIPTION_EVENT:"
events := make(chan mysql.BinlogEvent)
+ errs := make(chan error)
sendTransaction := func(eventToken *querypb.EventToken, statements []FullBinlogStatement) error {
return nil
@@ -496,7 +545,7 @@ func TestStreamerParseEventsNoFormat(t *testing.T) {
bls := NewStreamer(dbcfgs, nil, nil, mysql.Position{}, 0, sendTransaction)
go sendTestEvents(events, input)
- _, err := bls.parseEvents(context.Background(), events)
+ _, err := bls.parseEvents(context.Background(), events, errs)
if err == nil {
t.Errorf("expected error, got none")
return
@@ -522,6 +571,7 @@ func TestStreamerParseEventsInvalidQuery(t *testing.T) {
want := "can't get query from binlog event:"
events := make(chan mysql.BinlogEvent)
+ errs := make(chan error)
sendTransaction := func(eventToken *querypb.EventToken, statements []FullBinlogStatement) error {
return nil
@@ -536,7 +586,7 @@ func TestStreamerParseEventsInvalidQuery(t *testing.T) {
bls := NewStreamer(dbcfgs, nil, nil, mysql.Position{}, 0, sendTransaction)
go sendTestEvents(events, input)
- _, err := bls.parseEvents(context.Background(), events)
+ _, err := bls.parseEvents(context.Background(), events, errs)
if err == nil {
t.Errorf("expected error, got none")
return
@@ -577,6 +627,7 @@ func TestStreamerParseEventsRollback(t *testing.T) {
}
events := make(chan mysql.BinlogEvent)
+ errs := make(chan error)
want := []*binlogdatapb.BinlogTransaction{
{
@@ -623,7 +674,7 @@ func TestStreamerParseEventsRollback(t *testing.T) {
bls := NewStreamer(dbcfgs, nil, nil, mysql.Position{}, 0, (&got).sendTransaction)
go sendTestEvents(events, input)
- if _, err := bls.parseEvents(context.Background(), events); err != ErrServerEOF {
+ if _, err := bls.parseEvents(context.Background(), events, errs); err != ErrServerEOF {
t.Errorf("unexpected error: %v", err)
}
@@ -648,6 +699,7 @@ func TestStreamerParseEventsDMLWithoutBegin(t *testing.T) {
}
events := make(chan mysql.BinlogEvent)
+ errs := make(chan error)
want := []*binlogdatapb.BinlogTransaction{
{
@@ -695,7 +747,7 @@ func TestStreamerParseEventsDMLWithoutBegin(t *testing.T) {
bls := NewStreamer(dbcfgs, nil, nil, mysql.Position{}, 0, (&got).sendTransaction)
go sendTestEvents(events, input)
- if _, err := bls.parseEvents(context.Background(), events); err != ErrServerEOF {
+ if _, err := bls.parseEvents(context.Background(), events, errs); err != ErrServerEOF {
t.Errorf("unexpected error: %v", err)
}
@@ -723,6 +775,7 @@ func TestStreamerParseEventsBeginWithoutCommit(t *testing.T) {
}
events := make(chan mysql.BinlogEvent)
+ errs := make(chan error)
want := []*binlogdatapb.BinlogTransaction{
{
@@ -770,7 +823,7 @@ func TestStreamerParseEventsBeginWithoutCommit(t *testing.T) {
bls := NewStreamer(dbcfgs, nil, nil, mysql.Position{}, 0, (&got).sendTransaction)
go sendTestEvents(events, input)
- if _, err := bls.parseEvents(context.Background(), events); err != ErrServerEOF {
+ if _, err := bls.parseEvents(context.Background(), events, errs); err != ErrServerEOF {
t.Errorf("unexpected error: %v", err)
}
@@ -799,6 +852,7 @@ func TestStreamerParseEventsSetInsertID(t *testing.T) {
}
events := make(chan mysql.BinlogEvent)
+ errs := make(chan error)
want := []*binlogdatapb.BinlogTransaction{
{
@@ -831,7 +885,7 @@ func TestStreamerParseEventsSetInsertID(t *testing.T) {
bls := NewStreamer(dbcfgs, nil, nil, mysql.Position{}, 0, (&got).sendTransaction)
go sendTestEvents(events, input)
- if _, err := bls.parseEvents(context.Background(), events); err != ErrServerEOF {
+ if _, err := bls.parseEvents(context.Background(), events, errs); err != ErrServerEOF {
t.Errorf("unexpected error: %v", err)
}
@@ -859,6 +913,7 @@ func TestStreamerParseEventsInvalidIntVar(t *testing.T) {
want := "can't parse INTVAR_EVENT:"
events := make(chan mysql.BinlogEvent)
+ errs := make(chan error)
sendTransaction := func(eventToken *querypb.EventToken, statements []FullBinlogStatement) error {
return nil
@@ -872,7 +927,7 @@ func TestStreamerParseEventsInvalidIntVar(t *testing.T) {
bls := NewStreamer(dbcfgs, nil, nil, mysql.Position{}, 0, sendTransaction)
go sendTestEvents(events, input)
- _, err := bls.parseEvents(context.Background(), events)
+ _, err := bls.parseEvents(context.Background(), events, errs)
if err == nil {
t.Errorf("expected error, got none")
return
@@ -904,6 +959,7 @@ func TestStreamerParseEventsOtherDB(t *testing.T) {
}
events := make(chan mysql.BinlogEvent)
+ errs := make(chan error)
want := []*binlogdatapb.BinlogTransaction{
{
@@ -935,7 +991,7 @@ func TestStreamerParseEventsOtherDB(t *testing.T) {
bls := NewStreamer(dbcfgs, nil, nil, mysql.Position{}, 0, (&got).sendTransaction)
go sendTestEvents(events, input)
- if _, err := bls.parseEvents(context.Background(), events); err != ErrServerEOF {
+ if _, err := bls.parseEvents(context.Background(), events, errs); err != ErrServerEOF {
t.Errorf("unexpected error: %v", err)
}
@@ -966,6 +1022,7 @@ func TestStreamerParseEventsOtherDBBegin(t *testing.T) {
}
events := make(chan mysql.BinlogEvent)
+ errs := make(chan error)
want := []*binlogdatapb.BinlogTransaction{
{
@@ -997,7 +1054,7 @@ func TestStreamerParseEventsOtherDBBegin(t *testing.T) {
bls := NewStreamer(dbcfgs, nil, nil, mysql.Position{}, 0, (&got).sendTransaction)
go sendTestEvents(events, input)
- if _, err := bls.parseEvents(context.Background(), events); err != ErrServerEOF {
+ if _, err := bls.parseEvents(context.Background(), events, errs); err != ErrServerEOF {
t.Errorf("unexpected error: %v", err)
}
@@ -1025,6 +1082,7 @@ func TestStreamerParseEventsBeginAgain(t *testing.T) {
}
events := make(chan mysql.BinlogEvent)
+ errs := make(chan error)
sendTransaction := func(eventToken *querypb.EventToken, statements []FullBinlogStatement) error {
return nil
@@ -1039,7 +1097,7 @@ func TestStreamerParseEventsBeginAgain(t *testing.T) {
before := binlogStreamerErrors.Counts()["ParseEvents"]
go sendTestEvents(events, input)
- if _, err := bls.parseEvents(context.Background(), events); err != ErrServerEOF {
+ if _, err := bls.parseEvents(context.Background(), events, errs); err != ErrServerEOF {
t.Errorf("unexpected error: %v", err)
}
after := binlogStreamerErrors.Counts()["ParseEvents"]
@@ -1068,6 +1126,7 @@ func TestStreamerParseEventsMariadbBeginGTID(t *testing.T) {
}
events := make(chan mysql.BinlogEvent)
+ errs := make(chan error)
want := []*binlogdatapb.BinlogTransaction{
{
@@ -1107,7 +1166,7 @@ func TestStreamerParseEventsMariadbBeginGTID(t *testing.T) {
bls := NewStreamer(dbcfgs, nil, nil, mysql.Position{}, 0, (&got).sendTransaction)
go sendTestEvents(events, input)
- if _, err := bls.parseEvents(context.Background(), events); err != ErrServerEOF {
+ if _, err := bls.parseEvents(context.Background(), events, errs); err != ErrServerEOF {
t.Errorf("unexpected error: %v", err)
}
@@ -1135,6 +1194,7 @@ func TestStreamerParseEventsMariadbStandaloneGTID(t *testing.T) {
}
events := make(chan mysql.BinlogEvent)
+ errs := make(chan error)
want := []*binlogdatapb.BinlogTransaction{
{
@@ -1166,7 +1226,7 @@ func TestStreamerParseEventsMariadbStandaloneGTID(t *testing.T) {
bls := NewStreamer(dbcfgs, nil, nil, mysql.Position{}, 0, (&got).sendTransaction)
go sendTestEvents(events, input)
- if _, err := bls.parseEvents(context.Background(), events); err != ErrServerEOF {
+ if _, err := bls.parseEvents(context.Background(), events, errs); err != ErrServerEOF {
t.Errorf("unexpected error: %v", err)
}
diff --git a/go/vt/binlog/binlogplayer/binlog_player.go b/go/vt/binlog/binlogplayer/binlog_player.go
index 4f35626222a..1f75a396505 100644
--- a/go/vt/binlog/binlogplayer/binlog_player.go
+++ b/go/vt/binlog/binlogplayer/binlog_player.go
@@ -46,7 +46,6 @@ import (
topodatapb "vitess.io/vitess/go/vt/proto/topodata"
"vitess.io/vitess/go/vt/servenv"
"vitess.io/vitess/go/vt/throttler"
- "vitess.io/vitess/go/vt/withddl"
)
var (
@@ -102,6 +101,9 @@ type Stats struct {
VReplicationLags *stats.Timings
VReplicationLagRates *stats.Rates
+
+ TableCopyRowCounts *stats.CountersWithSingleLabel
+ TableCopyTimings *stats.Timings
}
// RecordHeartbeat updates the time the last heartbeat from vstreamer was seen
@@ -160,6 +162,8 @@ func NewStats() *Stats {
bps.NoopQueryCount = stats.NewCountersWithSingleLabel("", "", "Statement", "")
bps.VReplicationLags = stats.NewTimings("", "", "")
bps.VReplicationLagRates = stats.NewRates("", bps.VReplicationLags, 15*60/5, 5*time.Second)
+ bps.TableCopyRowCounts = stats.NewCountersWithSingleLabel("", "", "Table", "")
+ bps.TableCopyTimings = stats.NewTimings("", "", "Table")
return bps
}
@@ -519,92 +523,23 @@ func (blp *BinlogPlayer) setVReplicationState(state, message string) error {
return nil
}
-// CreateVReplicationTable returns the statements required to create
-// the _vt.vreplication table.
-// id: is an auto-increment column that identifies the stream.
-// workflow: documents the creator/manager of the stream. Example: 'MoveTables'.
-// source: contains a string proto representation of binlogpb.BinlogSource.
-// pos: initially, a start position, and is updated to the current position by the binlog player.
-// stop_pos: optional column that specifies the stop position.
-// max_tps: max transactions per second.
-// max_replication_lag: if replication lag exceeds this amount writing is throttled accordingly.
-// cell: optional column that overrides the current cell to replicate from.
-// tablet_types: optional column that overrides the tablet types to look to replicate from.
-// time_update: last time an event was applied.
-// transaction_timestamp: timestamp of the transaction (from the primary).
-// state: Running, Error or Stopped.
-// message: Reason for current state.
-func CreateVReplicationTable() []string {
- return []string{
- "CREATE DATABASE IF NOT EXISTS _vt",
- "DROP TABLE IF EXISTS _vt.blp_checkpoint",
- `CREATE TABLE IF NOT EXISTS _vt.vreplication (
- id INT AUTO_INCREMENT,
- workflow VARBINARY(1000),
- source VARBINARY(10000) NOT NULL,
- pos VARBINARY(10000) NOT NULL,
- stop_pos VARBINARY(10000) DEFAULT NULL,
- max_tps BIGINT(20) NOT NULL,
- max_replication_lag BIGINT(20) NOT NULL,
- cell VARBINARY(1000) DEFAULT NULL,
- tablet_types VARBINARY(100) DEFAULT NULL,
- time_updated BIGINT(20) NOT NULL,
- transaction_timestamp BIGINT(20) NOT NULL,
- state VARBINARY(100) NOT NULL,
- message VARBINARY(1000) DEFAULT NULL,
- db_name VARBINARY(255) NOT NULL,
- PRIMARY KEY (id)
-) ENGINE=InnoDB`,
- }
-}
-
-// AlterVReplicationTable adds new columns to vreplication table
-var AlterVReplicationTable = []string{
- "ALTER TABLE _vt.vreplication ADD COLUMN db_name VARBINARY(255) NOT NULL",
- "ALTER TABLE _vt.vreplication MODIFY source MEDIUMBLOB NOT NULL",
- "ALTER TABLE _vt.vreplication ADD KEY workflow_idx (workflow(64))",
- "ALTER TABLE _vt.vreplication ADD COLUMN rows_copied BIGINT(20) NOT NULL DEFAULT 0",
- "ALTER TABLE _vt.vreplication ADD COLUMN tags VARBINARY(1024) NOT NULL DEFAULT ''",
-
- // records the time of the last heartbeat. Heartbeats are only received if the source has no recent events
- "ALTER TABLE _vt.vreplication ADD COLUMN time_heartbeat BIGINT(20) NOT NULL DEFAULT 0",
- "ALTER TABLE _vt.vreplication ADD COLUMN workflow_type int NOT NULL DEFAULT 0",
- "ALTER TABLE _vt.vreplication ADD COLUMN time_throttled BIGINT NOT NULL DEFAULT 0",
- "ALTER TABLE _vt.vreplication ADD COLUMN component_throttled VARCHAR(255) NOT NULL DEFAULT ''",
- "ALTER TABLE _vt.vreplication ADD COLUMN workflow_sub_type int NOT NULL DEFAULT 0",
-}
-
-// WithDDLInitialQueries contains the queries that:
-// - are to be expected by the mock db client during tests, or
-// - trigger some of the above _vt.vreplication schema changes to take effect
-// when the binlogplayer starts up
-//
-// todo: cleanup here. QueryToTriggerWithDDL will be enough to ensure vreplication schema gets created/altered correctly.
-//
-// So do that explicitly and move queries required into the mock code.
-var WithDDLInitialQueries = []string{
- "SELECT db_name FROM _vt.vreplication LIMIT 0",
- "SELECT rows_copied FROM _vt.vreplication LIMIT 0",
- "SELECT time_heartbeat FROM _vt.vreplication LIMIT 0",
- withddl.QueryToTriggerWithDDL,
-}
-
// VRSettings contains the settings of a vreplication table.
type VRSettings struct {
- StartPos mysql.Position
- StopPos mysql.Position
- MaxTPS int64
- MaxReplicationLag int64
- State string
- WorkflowType int32
- WorkflowSubType int32
- WorkflowName string
+ StartPos mysql.Position
+ StopPos mysql.Position
+ MaxTPS int64
+ MaxReplicationLag int64
+ State string
+ WorkflowType int32
+ WorkflowSubType int32
+ WorkflowName string
+ DeferSecondaryKeys bool
}
// ReadVRSettings retrieves the throttler settings for
// vreplication from the checkpoint table.
func ReadVRSettings(dbClient DBClient, uid uint32) (VRSettings, error) {
- query := fmt.Sprintf("select pos, stop_pos, max_tps, max_replication_lag, state, workflow_type, workflow, workflow_sub_type from _vt.vreplication where id=%v", uid)
+ query := fmt.Sprintf("select pos, stop_pos, max_tps, max_replication_lag, state, workflow_type, workflow, workflow_sub_type, defer_secondary_keys from _vt.vreplication where id=%v", uid)
qr, err := dbClient.ExecuteFetch(query, 1)
if err != nil {
return VRSettings{}, fmt.Errorf("error %v in selecting vreplication settings %v", err, query)
@@ -641,27 +576,32 @@ func ReadVRSettings(dbClient DBClient, uid uint32) (VRSettings, error) {
if err != nil {
return VRSettings{}, fmt.Errorf("failed to parse workflow_sub_type column: %v", err)
}
+ deferSecondaryKeys, err := vrRow.ToBool("defer_secondary_keys")
+ if err != nil {
+ return VRSettings{}, fmt.Errorf("failed to parse defer_secondary_keys column: %v", err)
+ }
return VRSettings{
- StartPos: startPos,
- StopPos: stopPos,
- MaxTPS: maxTPS,
- MaxReplicationLag: maxReplicationLag,
- State: vrRow.AsString("state", ""),
- WorkflowType: workflowType,
- WorkflowName: vrRow.AsString("workflow", ""),
- WorkflowSubType: workflowSubType,
+ StartPos: startPos,
+ StopPos: stopPos,
+ MaxTPS: maxTPS,
+ MaxReplicationLag: maxReplicationLag,
+ State: vrRow.AsString("state", ""),
+ WorkflowType: workflowType,
+ WorkflowName: vrRow.AsString("workflow", ""),
+ WorkflowSubType: workflowSubType,
+ DeferSecondaryKeys: deferSecondaryKeys,
}, nil
}
// CreateVReplication returns a statement to populate the first value into
// the _vt.vreplication table.
func CreateVReplication(workflow string, source *binlogdatapb.BinlogSource, position string, maxTPS, maxReplicationLag, timeUpdated int64, dbName string,
- workflowType binlogdatapb.VReplicationWorkflowType, workflowSubType binlogdatapb.VReplicationWorkflowSubType) string {
+ workflowType binlogdatapb.VReplicationWorkflowType, workflowSubType binlogdatapb.VReplicationWorkflowSubType, deferSecondaryKeys bool) string {
return fmt.Sprintf("insert into _vt.vreplication "+
- "(workflow, source, pos, max_tps, max_replication_lag, time_updated, transaction_timestamp, state, db_name, workflow_type, workflow_sub_type) "+
- "values (%v, %v, %v, %v, %v, %v, 0, '%v', %v, %v, %v)",
+ "(workflow, source, pos, max_tps, max_replication_lag, time_updated, transaction_timestamp, state, db_name, workflow_type, workflow_sub_type, defer_secondary_keys) "+
+ "values (%v, %v, %v, %v, %v, %v, 0, '%v', %v, %v, %v, %v)",
encodeString(workflow), encodeString(source.String()), encodeString(position), maxTPS, maxReplicationLag,
- timeUpdated, BlpRunning, encodeString(dbName), int64(workflowType), int64(workflowSubType))
+ timeUpdated, BlpRunning, encodeString(dbName), int64(workflowType), int64(workflowSubType), deferSecondaryKeys)
}
// CreateVReplicationState returns a statement to create a stopped vreplication.
diff --git a/go/vt/binlog/binlogplayer/binlog_player_test.go b/go/vt/binlog/binlogplayer/binlog_player_test.go
index 04f3b7844a2..20f75430644 100644
--- a/go/vt/binlog/binlogplayer/binlog_player_test.go
+++ b/go/vt/binlog/binlogplayer/binlog_player_test.go
@@ -44,6 +44,7 @@ var (
{Name: "workflow_type", Type: sqltypes.Int64},
{Name: "workflow", Type: sqltypes.VarChar},
{Name: "workflow_sub_type", Type: sqltypes.Int64},
+ {Name: "defer_secondary_keys", Type: sqltypes.Int64},
},
RowsAffected: 1,
InsertID: 0,
@@ -57,6 +58,7 @@ var (
sqltypes.NewInt64(1), // workflow_type
sqltypes.NewVarChar("wf"), // workflow
sqltypes.NewInt64(0), // workflow_sub_type
+ sqltypes.NewInt64(0), // defer_secondary_keys
},
},
}
@@ -67,7 +69,7 @@ var (
func TestNewBinlogPlayerKeyRange(t *testing.T) {
dbClient := NewMockDBClient(t)
dbClient.ExpectRequest("update _vt.vreplication set state='Running', message='' where id=1", testDMLResponse, nil)
- dbClient.ExpectRequest("select pos, stop_pos, max_tps, max_replication_lag, state, workflow_type, workflow, workflow_sub_type from _vt.vreplication where id=1", testSettingsResponse, nil)
+ dbClient.ExpectRequest("select pos, stop_pos, max_tps, max_replication_lag, state, workflow_type, workflow, workflow_sub_type, defer_secondary_keys from _vt.vreplication where id=1", testSettingsResponse, nil)
dbClient.ExpectRequest("begin", nil, nil)
dbClient.ExpectRequest("insert into t values(1)", testDMLResponse, nil)
dbClient.ExpectRequestRE("update _vt.vreplication set pos='MariaDB/0-1-1235', time_updated=.*", testDMLResponse, nil)
@@ -98,7 +100,7 @@ func TestNewBinlogPlayerKeyRange(t *testing.T) {
func TestNewBinlogPlayerTables(t *testing.T) {
dbClient := NewMockDBClient(t)
dbClient.ExpectRequest("update _vt.vreplication set state='Running', message='' where id=1", testDMLResponse, nil)
- dbClient.ExpectRequest("select pos, stop_pos, max_tps, max_replication_lag, state, workflow_type, workflow, workflow_sub_type from _vt.vreplication where id=1", testSettingsResponse, nil)
+ dbClient.ExpectRequest("select pos, stop_pos, max_tps, max_replication_lag, state, workflow_type, workflow, workflow_sub_type, defer_secondary_keys from _vt.vreplication where id=1", testSettingsResponse, nil)
dbClient.ExpectRequest("begin", nil, nil)
dbClient.ExpectRequest("insert into t values(1)", testDMLResponse, nil)
dbClient.ExpectRequestRE("update _vt.vreplication set pos='MariaDB/0-1-1235', time_updated=.*", testDMLResponse, nil)
@@ -130,7 +132,7 @@ func TestNewBinlogPlayerTables(t *testing.T) {
func TestApplyEventsFail(t *testing.T) {
dbClient := NewMockDBClient(t)
dbClient.ExpectRequest("update _vt.vreplication set state='Running', message='' where id=1", testDMLResponse, nil)
- dbClient.ExpectRequest("select pos, stop_pos, max_tps, max_replication_lag, state, workflow_type, workflow, workflow_sub_type from _vt.vreplication where id=1", testSettingsResponse, nil)
+ dbClient.ExpectRequest("select pos, stop_pos, max_tps, max_replication_lag, state, workflow_type, workflow, workflow_sub_type, defer_secondary_keys from _vt.vreplication where id=1", testSettingsResponse, nil)
dbClient.ExpectRequest("begin", nil, errors.New("err"))
dbClient.ExpectRequest("update _vt.vreplication set state='Error', message='error in processing binlog event failed query BEGIN, err: err' where id=1", testDMLResponse, nil)
@@ -156,6 +158,7 @@ var settingsFields []*querypb.Field = []*querypb.Field{
{Name: "workflow_type", Type: sqltypes.Int64},
{Name: "workflow", Type: sqltypes.VarChar},
{Name: "workflow_sub_type", Type: sqltypes.Int64},
+ {Name: "defer_secondary_keys", Type: sqltypes.Int64},
}
// TestStopPosEqual ensures player stops if stopPos==pos.
@@ -176,10 +179,11 @@ func TestStopPosEqual(t *testing.T) {
sqltypes.NewInt64(1), // workflow_type
sqltypes.NewVarChar("wf"), // workflow
sqltypes.NewInt64(1), // workflow_sub_type
+ sqltypes.NewInt64(1), // defer_secondary_keys
},
},
}
- dbClient.ExpectRequest("select pos, stop_pos, max_tps, max_replication_lag, state, workflow_type, workflow, workflow_sub_type from _vt.vreplication where id=1", posEqual, nil)
+ dbClient.ExpectRequest("select pos, stop_pos, max_tps, max_replication_lag, state, workflow_type, workflow, workflow_sub_type, defer_secondary_keys from _vt.vreplication where id=1", posEqual, nil)
dbClient.ExpectRequest(`update _vt.vreplication set state='Stopped', message='not starting BinlogPlayer, we\'re already at the desired position 0-1-1083' where id=1`, testDMLResponse, nil)
_ = newFakeBinlogClient()
@@ -212,10 +216,11 @@ func TestStopPosLess(t *testing.T) {
sqltypes.NewInt64(1), // workflow_type
sqltypes.NewVarChar("wf"), // workflow
sqltypes.NewInt64(1), // workflow_sub_type
+ sqltypes.NewInt64(1), // defer_secondary_keys
},
},
}
- dbClient.ExpectRequest("select pos, stop_pos, max_tps, max_replication_lag, state, workflow_type, workflow, workflow_sub_type from _vt.vreplication where id=1", posEqual, nil)
+ dbClient.ExpectRequest("select pos, stop_pos, max_tps, max_replication_lag, state, workflow_type, workflow, workflow_sub_type, defer_secondary_keys from _vt.vreplication where id=1", posEqual, nil)
dbClient.ExpectRequest(`update _vt.vreplication set state='Stopped', message='starting point 0-1-1083 greater than stopping point 0-1-1082' where id=1`, testDMLResponse, nil)
_ = newFakeBinlogClient()
@@ -248,10 +253,11 @@ func TestStopPosGreater(t *testing.T) {
sqltypes.NewInt64(1), // workflow_type
sqltypes.NewVarChar("wf"), // workflow
sqltypes.NewInt64(1), // workflow_sub_type
+ sqltypes.NewInt64(1), // defer_secondary_keys
},
},
}
- dbClient.ExpectRequest("select pos, stop_pos, max_tps, max_replication_lag, state, workflow_type, workflow, workflow_sub_type from _vt.vreplication where id=1", posEqual, nil)
+ dbClient.ExpectRequest("select pos, stop_pos, max_tps, max_replication_lag, state, workflow_type, workflow, workflow_sub_type, defer_secondary_keys from _vt.vreplication where id=1", posEqual, nil)
dbClient.ExpectRequest("begin", nil, nil)
dbClient.ExpectRequest("insert into t values(1)", testDMLResponse, nil)
dbClient.ExpectRequestRE("update _vt.vreplication set pos='MariaDB/0-1-1235', time_updated=.*", testDMLResponse, nil)
@@ -288,10 +294,11 @@ func TestContextCancel(t *testing.T) {
sqltypes.NewInt64(1), // workflow_type
sqltypes.NewVarChar("wf"), // workflow
sqltypes.NewInt64(1), // workflow_sub_type
+ sqltypes.NewInt64(1), // defer_secondary_keys
},
},
}
- dbClient.ExpectRequest("select pos, stop_pos, max_tps, max_replication_lag, state, workflow_type, workflow, workflow_sub_type from _vt.vreplication where id=1", posEqual, nil)
+ dbClient.ExpectRequest("select pos, stop_pos, max_tps, max_replication_lag, state, workflow_type, workflow, workflow_sub_type, defer_secondary_keys from _vt.vreplication where id=1", posEqual, nil)
dbClient.ExpectRequest("begin", nil, nil)
dbClient.ExpectRequest("insert into t values(1)", testDMLResponse, nil)
dbClient.ExpectRequestRE("update _vt.vreplication set pos='MariaDB/0-1-1235', time_updated=.*", testDMLResponse, nil)
@@ -318,7 +325,7 @@ func TestContextCancel(t *testing.T) {
func TestRetryOnDeadlock(t *testing.T) {
dbClient := NewMockDBClient(t)
dbClient.ExpectRequest("update _vt.vreplication set state='Running', message='' where id=1", testDMLResponse, nil)
- dbClient.ExpectRequest("select pos, stop_pos, max_tps, max_replication_lag, state, workflow_type, workflow, workflow_sub_type from _vt.vreplication where id=1", testSettingsResponse, nil)
+ dbClient.ExpectRequest("select pos, stop_pos, max_tps, max_replication_lag, state, workflow_type, workflow, workflow_sub_type, defer_secondary_keys from _vt.vreplication where id=1", testSettingsResponse, nil)
deadlocked := &mysql.SQLError{Num: 1213, Message: "deadlocked"}
dbClient.ExpectRequest("begin", nil, nil)
dbClient.ExpectRequest("insert into t values(1)", nil, deadlocked)
@@ -358,8 +365,8 @@ func applyEvents(blp *BinlogPlayer) func() error {
func TestCreateVReplicationKeyRange(t *testing.T) {
want := "insert into _vt.vreplication " +
- "(workflow, source, pos, max_tps, max_replication_lag, time_updated, transaction_timestamp, state, db_name, workflow_type, workflow_sub_type) " +
- `values ('Resharding', 'keyspace:\"ks\" shard:\"0\" key_range:{end:\"\\x80\"}', 'MariaDB/0-1-1083', 9223372036854775807, 9223372036854775807, 481823, 0, 'Running', 'db', 0, 0)`
+ "(workflow, source, pos, max_tps, max_replication_lag, time_updated, transaction_timestamp, state, db_name, workflow_type, workflow_sub_type, defer_secondary_keys) " +
+ `values ('Resharding', 'keyspace:\"ks\" shard:\"0\" key_range:{end:\"\\x80\"}', 'MariaDB/0-1-1083', 9223372036854775807, 9223372036854775807, 481823, 0, 'Running', 'db', 0, 0, false)`
bls := binlogdatapb.BinlogSource{
Keyspace: "ks",
@@ -369,7 +376,7 @@ func TestCreateVReplicationKeyRange(t *testing.T) {
},
}
- got := CreateVReplication("Resharding", &bls, "MariaDB/0-1-1083", throttler.MaxRateModuleDisabled, throttler.ReplicationLagModuleDisabled, 481823, "db", 0, 0)
+ got := CreateVReplication("Resharding", &bls, "MariaDB/0-1-1083", throttler.MaxRateModuleDisabled, throttler.ReplicationLagModuleDisabled, 481823, "db", 0, 0, false)
if got != want {
t.Errorf("CreateVReplication() =\n%v, want\n%v", got, want)
}
@@ -377,8 +384,8 @@ func TestCreateVReplicationKeyRange(t *testing.T) {
func TestCreateVReplicationTables(t *testing.T) {
want := "insert into _vt.vreplication " +
- "(workflow, source, pos, max_tps, max_replication_lag, time_updated, transaction_timestamp, state, db_name, workflow_type, workflow_sub_type) " +
- `values ('Resharding', 'keyspace:\"ks\" shard:\"0\" tables:\"a\" tables:\"b\"', 'MariaDB/0-1-1083', 9223372036854775807, 9223372036854775807, 481823, 0, 'Running', 'db', 0, 0)`
+ "(workflow, source, pos, max_tps, max_replication_lag, time_updated, transaction_timestamp, state, db_name, workflow_type, workflow_sub_type, defer_secondary_keys) " +
+ `values ('Resharding', 'keyspace:\"ks\" shard:\"0\" tables:\"a\" tables:\"b\"', 'MariaDB/0-1-1083', 9223372036854775807, 9223372036854775807, 481823, 0, 'Running', 'db', 0, 0, false)`
bls := binlogdatapb.BinlogSource{
Keyspace: "ks",
@@ -386,7 +393,7 @@ func TestCreateVReplicationTables(t *testing.T) {
Tables: []string{"a", "b"},
}
- got := CreateVReplication("Resharding", &bls, "MariaDB/0-1-1083", throttler.MaxRateModuleDisabled, throttler.ReplicationLagModuleDisabled, 481823, "db", 0, 0)
+ got := CreateVReplication("Resharding", &bls, "MariaDB/0-1-1083", throttler.MaxRateModuleDisabled, throttler.ReplicationLagModuleDisabled, 481823, "db", 0, 0, false)
if got != want {
t.Errorf("CreateVReplication() =\n%v, want\n%v", got, want)
}
diff --git a/go/vt/binlog/binlogplayer/mock_dbclient.go b/go/vt/binlog/binlogplayer/mock_dbclient.go
index d1b9836745e..50df683976d 100644
--- a/go/vt/binlog/binlogplayer/mock_dbclient.go
+++ b/go/vt/binlog/binlogplayer/mock_dbclient.go
@@ -22,8 +22,6 @@ import (
"testing"
"time"
- "vitess.io/vitess/go/vt/withddl"
-
"vitess.io/vitess/go/sqltypes"
)
@@ -33,13 +31,12 @@ const mockClientUNameDba = "Dba"
// MockDBClient mocks a DBClient.
// It must be configured to expect requests in a specific order.
type MockDBClient struct {
- t *testing.T
- UName string
- expect []*mockExpect
- currentResult int
- done chan struct{}
- queriesToIgnore []*mockExpect // these queries will return a standard nil result, you SHOULD NOT expect them in the tests
- invariants map[string]*sqltypes.Result
+ t *testing.T
+ UName string
+ expect []*mockExpect
+ currentResult int
+ done chan struct{}
+ invariants map[string]*sqltypes.Result
}
type mockExpect struct {
@@ -49,31 +46,12 @@ type mockExpect struct {
err error
}
-func getQueriesToIgnore() []*mockExpect {
- var queriesToIgnore []*mockExpect
- var queries []string
- queries = append(queries, WithDDLInitialQueries...)
- queries = append(queries, withddl.QueryToTriggerWithDDL)
- for _, query := range queries {
- exp := &mockExpect{
- query: query,
- re: nil,
- result: &sqltypes.Result{},
- err: nil,
- }
- queriesToIgnore = append(queriesToIgnore, exp)
-
- }
- return queriesToIgnore
-}
-
// NewMockDBClient returns a new DBClientMock with the default "Filtered" UName.
func NewMockDBClient(t *testing.T) *MockDBClient {
return &MockDBClient{
- t: t,
- UName: mockClientUNameFiltered,
- done: make(chan struct{}),
- queriesToIgnore: getQueriesToIgnore(),
+ t: t,
+ UName: mockClientUNameFiltered,
+ done: make(chan struct{}),
invariants: map[string]*sqltypes.Result{
"CREATE TABLE IF NOT EXISTS _vt.vreplication_log": {},
"select id, type, state, message from _vt.vreplication_log": {},
@@ -85,10 +63,9 @@ func NewMockDBClient(t *testing.T) *MockDBClient {
// NewMockDbaClient returns a new DBClientMock with the default "Dba" UName.
func NewMockDbaClient(t *testing.T) *MockDBClient {
return &MockDBClient{
- t: t,
- UName: mockClientUNameDba,
- done: make(chan struct{}),
- queriesToIgnore: getQueriesToIgnore(),
+ t: t,
+ UName: mockClientUNameDba,
+ done: make(chan struct{}),
}
}
@@ -174,11 +151,6 @@ func (dc *MockDBClient) ExecuteFetch(query string, maxrows int) (qr *sqltypes.Re
dc.t.Helper()
dc.t.Logf("DBClient query: %v", query)
- for _, q := range dc.queriesToIgnore {
- if strings.EqualFold(q.query, query) || strings.Contains(strings.ToLower(query), strings.ToLower(q.query)) {
- return q.result, q.err
- }
- }
for q, result := range dc.invariants {
if strings.Contains(query, q) {
return result, nil
diff --git a/go/vt/dbconfigs/credentials.go b/go/vt/dbconfigs/credentials.go
index 1f0a0bbb0e2..5a5dbc1c1a1 100644
--- a/go/vt/dbconfigs/credentials.go
+++ b/go/vt/dbconfigs/credentials.go
@@ -24,7 +24,6 @@ package dbconfigs
import (
"encoding/json"
"errors"
- "flag"
"os"
"os/signal"
"strings"
@@ -112,15 +111,15 @@ func init() {
fs.StringVar(&dbCredentialsFile, "db-credentials-file", dbCredentialsFile, "db credentials file; send SIGHUP to reload this file")
// 'vault' implementation flags
- flag.StringVar(&vaultAddr, "db-credentials-vault-addr", vaultAddr, "URL to Vault server")
- flag.DurationVar(&vaultTimeout, "db-credentials-vault-timeout", vaultTimeout, "Timeout for vault API operations")
- flag.StringVar(&vaultCACert, "db-credentials-vault-tls-ca", vaultCACert, "Path to CA PEM for validating Vault server certificate")
- flag.StringVar(&vaultPath, "db-credentials-vault-path", vaultPath, "Vault path to credentials JSON blob, e.g.: secret/data/prod/dbcreds")
- flag.DurationVar(&vaultCacheTTL, "db-credentials-vault-ttl", vaultCacheTTL, "How long to cache DB credentials from the Vault server")
- flag.StringVar(&vaultTokenFile, "db-credentials-vault-tokenfile", vaultTokenFile, "Path to file containing Vault auth token; token can also be passed using VAULT_TOKEN environment variable")
- flag.StringVar(&vaultRoleID, "db-credentials-vault-roleid", vaultRoleID, "Vault AppRole id; can also be passed using VAULT_ROLEID environment variable")
- flag.StringVar(&vaultRoleSecretIDFile, "db-credentials-vault-role-secretidfile", vaultRoleSecretIDFile, "Path to file containing Vault AppRole secret_id; can also be passed using VAULT_SECRETID environment variable")
- flag.StringVar(&vaultRoleMountPoint, "db-credentials-vault-role-mountpoint", vaultRoleMountPoint, "Vault AppRole mountpoint; can also be passed using VAULT_MOUNTPOINT environment variable")
+ fs.StringVar(&vaultAddr, "db-credentials-vault-addr", vaultAddr, "URL to Vault server")
+ fs.DurationVar(&vaultTimeout, "db-credentials-vault-timeout", vaultTimeout, "Timeout for vault API operations")
+ fs.StringVar(&vaultCACert, "db-credentials-vault-tls-ca", vaultCACert, "Path to CA PEM for validating Vault server certificate")
+ fs.StringVar(&vaultPath, "db-credentials-vault-path", vaultPath, "Vault path to credentials JSON blob, e.g.: secret/data/prod/dbcreds")
+ fs.DurationVar(&vaultCacheTTL, "db-credentials-vault-ttl", vaultCacheTTL, "How long to cache DB credentials from the Vault server")
+ fs.StringVar(&vaultTokenFile, "db-credentials-vault-tokenfile", vaultTokenFile, "Path to file containing Vault auth token; token can also be passed using VAULT_TOKEN environment variable")
+ fs.StringVar(&vaultRoleID, "db-credentials-vault-roleid", vaultRoleID, "Vault AppRole id; can also be passed using VAULT_ROLEID environment variable")
+ fs.StringVar(&vaultRoleSecretIDFile, "db-credentials-vault-role-secretidfile", vaultRoleSecretIDFile, "Path to file containing Vault AppRole secret_id; can also be passed using VAULT_SECRETID environment variable")
+ fs.StringVar(&vaultRoleMountPoint, "db-credentials-vault-role-mountpoint", vaultRoleMountPoint, "Vault AppRole mountpoint; can also be passed using VAULT_MOUNTPOINT environment variable")
})
}
}
diff --git a/go/vt/dbconfigs/dbconfigs.go b/go/vt/dbconfigs/dbconfigs.go
index 5ab5e7a9356..371892144d3 100644
--- a/go/vt/dbconfigs/dbconfigs.go
+++ b/go/vt/dbconfigs/dbconfigs.go
@@ -254,12 +254,6 @@ func (dbcfgs *DBConfigs) ExternalRepl() Connector {
// ExternalReplWithDB returns connection parameters for repl with dbname set.
func (dbcfgs *DBConfigs) ExternalReplWithDB() Connector {
params := dbcfgs.makeParams(&dbcfgs.externalReplParams, true)
- // TODO @rafael: This is a hack to allows to configure external databases by providing
- // db-config-erepl-dbname.
- if params.connParams.DeprecatedDBName != "" {
- params.connParams.DbName = params.connParams.DeprecatedDBName
- return params
- }
return params
}
@@ -280,7 +274,7 @@ func (dbcfgs *DBConfigs) IsZero() bool {
}
// HasGlobalSettings returns true if DBConfigs contains values
-// for gloabl configs.
+// for global configs.
func (dbcfgs *DBConfigs) HasGlobalSettings() bool {
return dbcfgs.Host != "" || dbcfgs.Socket != ""
}
diff --git a/go/vt/dbconnpool/connection_pool.go b/go/vt/dbconnpool/connection_pool.go
index 2ab703b812b..bb339862dcf 100644
--- a/go/vt/dbconnpool/connection_pool.go
+++ b/go/vt/dbconnpool/connection_pool.go
@@ -54,6 +54,7 @@ type ConnectionPool struct {
connections pools.IResourcePool
capacity int
idleTimeout time.Duration
+ maxLifetime time.Duration
resolutionFrequency time.Duration
// info is set at Open() time
@@ -63,8 +64,8 @@ type ConnectionPool struct {
// NewConnectionPool creates a new ConnectionPool. The name is used
// to publish stats only.
-func NewConnectionPool(name string, capacity int, idleTimeout time.Duration, dnsResolutionFrequency time.Duration) *ConnectionPool {
- cp := &ConnectionPool{name: name, capacity: capacity, idleTimeout: idleTimeout, resolutionFrequency: dnsResolutionFrequency}
+func NewConnectionPool(name string, capacity int, idleTimeout time.Duration, maxLifetime time.Duration, dnsResolutionFrequency time.Duration) *ConnectionPool {
+ cp := &ConnectionPool{name: name, capacity: capacity, idleTimeout: idleTimeout, maxLifetime: maxLifetime, resolutionFrequency: dnsResolutionFrequency}
if name == "" || usedNames[name] {
return cp
}
@@ -78,6 +79,7 @@ func NewConnectionPool(name string, capacity int, idleTimeout time.Duration, dns
stats.NewCounterDurationFunc(name+"WaitTime", "Connection pool wait time", cp.WaitTime)
stats.NewGaugeDurationFunc(name+"IdleTimeout", "Connection pool idle timeout", cp.IdleTimeout)
stats.NewGaugeFunc(name+"IdleClosed", "Connection pool idle closed", cp.IdleClosed)
+ stats.NewGaugeFunc(name+"MaxLifetimeClosed", "Connection pool refresh closed", cp.MaxLifetimeClosed)
stats.NewCounterFunc(name+"Exhausted", "Number of times pool had zero available slots", cp.Exhausted)
return cp
}
@@ -107,7 +109,7 @@ func (cp *ConnectionPool) Open(info dbconfigs.Connector) {
cp.mu.Lock()
defer cp.mu.Unlock()
cp.info = info
- cp.connections = pools.NewResourcePool(cp.connect, cp.capacity, cp.capacity, cp.idleTimeout, nil, refreshCheck, cp.resolutionFrequency)
+ cp.connections = pools.NewResourcePool(cp.connect, cp.capacity, cp.capacity, cp.idleTimeout, cp.maxLifetime, nil, refreshCheck, cp.resolutionFrequency)
}
// connect is used by the resource pool to create a new Resource.
@@ -118,6 +120,7 @@ func (cp *ConnectionPool) connect(ctx context.Context) (pools.Resource, error) {
}
return &PooledDBConnection{
DBConnection: c,
+ timeCreated: time.Now(),
pool: cp,
}, nil
}
@@ -273,7 +276,7 @@ func (cp *ConnectionPool) IdleTimeout() time.Duration {
return p.IdleTimeout()
}
-// IdleClosed returns the number of closed connections for the pool.
+// IdleClosed returns the number of connections closed due to idle timeout for the pool.
func (cp *ConnectionPool) IdleClosed() int64 {
p := cp.pool()
if p == nil {
@@ -282,6 +285,15 @@ func (cp *ConnectionPool) IdleClosed() int64 {
return p.IdleClosed()
}
+// MaxLifetimeClosed returns the number of connections closed due to refresh timeout for the pool.
+func (cp *ConnectionPool) MaxLifetimeClosed() int64 {
+ p := cp.pool()
+ if p == nil {
+ return 0
+ }
+ return p.MaxLifetimeClosed()
+}
+
// Exhausted returns the number of times available went to zero for the pool.
func (cp *ConnectionPool) Exhausted() int64 {
p := cp.pool()
diff --git a/go/vt/dbconnpool/pooled_connection.go b/go/vt/dbconnpool/pooled_connection.go
index f0c48a730e9..b4ca428e973 100644
--- a/go/vt/dbconnpool/pooled_connection.go
+++ b/go/vt/dbconnpool/pooled_connection.go
@@ -18,6 +18,7 @@ package dbconnpool
import (
"context"
+ "time"
"vitess.io/vitess/go/pools"
)
@@ -25,7 +26,12 @@ import (
// PooledDBConnection re-exposes DBConnection to be used by ConnectionPool.
type PooledDBConnection struct {
*DBConnection
- pool *ConnectionPool
+ timeCreated time.Time
+ pool *ConnectionPool
+}
+
+func (pc *PooledDBConnection) Expired(lifetimeTimeout time.Duration) bool {
+ return lifetimeTimeout > 0 && time.Until(pc.timeCreated.Add(lifetimeTimeout)) < 0
}
func (pc *PooledDBConnection) ApplySetting(context.Context, *pools.Setting) error {
diff --git a/go/vt/discovery/healthcheck.go b/go/vt/discovery/healthcheck.go
index 9a03823e9ec..be0d022ff98 100644
--- a/go/vt/discovery/healthcheck.go
+++ b/go/vt/discovery/healthcheck.go
@@ -147,19 +147,25 @@ func ParseTabletURLTemplateFromFlag() {
}
func init() {
- servenv.OnParseFor("vtgate", registerDiscoveryFlags)
- servenv.OnParseFor("vtcombo", registerDiscoveryFlags)
- servenv.OnParseFor("vtctld", registerDiscoveryFlags)
+ for _, cmd := range []string{"vtgate", "vtcombo"} {
+ servenv.OnParseFor(cmd, registerDiscoveryFlags)
+ servenv.OnParseFor(cmd, registerWebUIFlags)
+ }
+
+ servenv.OnParseFor("vtctld", registerWebUIFlags)
}
func registerDiscoveryFlags(fs *pflag.FlagSet) {
+ fs.StringSliceVar(&tabletFilters, "tablet_filters", []string{}, "Specifies a comma-separated list of 'keyspace|shard_name or keyrange' values to filter the tablets to watch.")
+ fs.Var((*topoproto.TabletTypeListFlag)(&AllowedTabletTypes), "allowed_tablet_types", "Specifies the tablet types this vtgate is allowed to route queries to. Should be provided as a comma-separated set of tablet types.")
+ fs.StringSliceVar(&KeyspacesToWatch, "keyspaces_to_watch", []string{}, "Specifies which keyspaces this vtgate should have access to while routing queries or accessing the vschema.")
+}
+
+func registerWebUIFlags(fs *pflag.FlagSet) {
fs.StringVar(&TabletURLTemplateString, "tablet_url_template", "http://{{.GetTabletHostPort}}", "Format string describing debug tablet url formatting. See getTabletDebugURL() for how to customize this.")
fs.DurationVar(&refreshInterval, "tablet_refresh_interval", 1*time.Minute, "Tablet refresh interval.")
fs.BoolVar(&refreshKnownTablets, "tablet_refresh_known_tablets", true, "Whether to reload the tablet's address/port map from topo in case they change.")
fs.IntVar(&topoReadConcurrency, "topo_read_concurrency", 32, "Concurrency of topo reads.")
- fs.StringSliceVar(&tabletFilters, "tablet_filters", []string{}, "Specifies a comma-separated list of 'keyspace|shard_name or keyrange' values to filter the tablets to watch.")
- fs.Var((*topoproto.TabletTypeListFlag)(&AllowedTabletTypes), "allowed_tablet_types", "Specifies the tablet types this vtgate is allowed to route queries to.")
- fs.StringSliceVar(&KeyspacesToWatch, "keyspaces_to_watch", []string{}, "Specifies which keyspaces this vtgate should have access to while routing queries or accessing the vschema.")
ParseTabletURLTemplateFromFlag()
}
@@ -423,8 +429,29 @@ func (hc *HealthCheckImpl) deleteTablet(tablet *topodata.Tablet) {
hc.mu.Lock()
defer hc.mu.Unlock()
- key := KeyFromTablet(tablet)
tabletAlias := tabletAliasString(topoproto.TabletAliasString(tablet.Alias))
+ defer func() {
+ // We want to be sure the tablet is gone from the secondary
+ // maps even if it's already gone from the authoritative map.
+ // The tablet's type also may have recently changed as well,
+ // so ensure that the tablet we're removing is removed from
+ // any possible secondary map keys:
+ // key: keyspace.shard.tabletType -> val: map[tabletAlias]tabletHealth
+ for _, tabletType := range topoproto.AllTabletTypes {
+ key := KeyspaceShardTabletType(fmt.Sprintf("%s.%s.%s", tablet.Keyspace, tablet.Shard, topoproto.TabletTypeLString(tabletType)))
+ // delete from map by keyspace.shard.tabletType
+ ths, ok := hc.healthData[key]
+ if !ok {
+ continue
+ }
+ delete(ths, tabletAlias)
+ // delete from healthy list
+ healthy, ok := hc.healthy[key]
+ if ok && len(healthy) > 0 {
+ hc.recomputeHealthy(key)
+ }
+ }
+ }()
// delete from authoritative map
th, ok := hc.healthByAlias[tabletAlias]
if !ok {
@@ -435,18 +462,6 @@ func (hc *HealthCheckImpl) deleteTablet(tablet *topodata.Tablet) {
// which will call finalizeConn, which will close the connection.
th.cancelFunc()
delete(hc.healthByAlias, tabletAlias)
- // delete from map by keyspace.shard.tabletType
- ths, ok := hc.healthData[key]
- if !ok {
- log.Warningf("We have no health data for target: %v", key)
- return
- }
- delete(ths, tabletAlias)
- // delete from healthy list
- healthy, ok := hc.healthy[key]
- if ok && len(healthy) > 0 {
- hc.recomputeHealthy(key)
- }
}
func (hc *HealthCheckImpl) updateHealth(th *TabletHealth, prevTarget *query.Target, trivialUpdate bool, up bool) {
diff --git a/go/vt/discovery/healthcheck_test.go b/go/vt/discovery/healthcheck_test.go
index 5ba47f69025..e61915a9b85 100644
--- a/go/vt/discovery/healthcheck_test.go
+++ b/go/vt/discovery/healthcheck_test.go
@@ -684,7 +684,7 @@ func TestRemoveTablet(t *testing.T) {
// there will be a first result, get and discard it
<-resultChan
- shr := &querypb.StreamHealthResponse{
+ shrReplica := &querypb.StreamHealthResponse{
TabletAlias: tablet.Alias,
Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA},
Serving: true,
@@ -698,7 +698,7 @@ func TestRemoveTablet(t *testing.T) {
Stats: &querypb.RealtimeStats{ReplicationLagSeconds: 1, CpuUsage: 0.2},
PrimaryTermStartTime: 0,
}}
- input <- shr
+ input <- shrReplica
<-resultChan
// check it's there
a := hc.GetHealthyTabletStats(&querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA})
@@ -708,6 +708,71 @@ func TestRemoveTablet(t *testing.T) {
hc.RemoveTablet(tablet)
a = hc.GetHealthyTabletStats(&querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA})
assert.Empty(t, a, "wrong result, expected empty list")
+
+ // Now confirm that when a tablet's type changes between when it's added to the
+ // cache and when it's removed, that the tablet is entirely removed from the
+ // cache since in the secondary maps it's keyed in part by tablet type.
+ // Note: we are using GetTabletStats here to check the healthData map (rather
+ // than the healthy map that we checked above) because that is the data
+ // structure that is used when printing the contents of the healthcheck cache
+ // in the /debug/status endpoint and in the SHOW VITESS_TABLETS; SQL command
+ // output.
+
+ // Add the tablet back.
+ hc.AddTablet(tablet)
+ // Receive and discard the initial result as we have not yet sent the first
+ // StreamHealthResponse with the dynamic serving and stats information.
+ <-resultChan
+ // Send the first StreamHealthResponse with the dynamic serving and stats
+ // information.
+ input <- shrReplica
+ <-resultChan
+ // Confirm it's there in the cache.
+ a = hc.GetTabletStats(&querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA})
+ mustMatch(t, want, a, "unexpected result")
+
+ // Change the tablet type to RDONLY.
+ tablet.Type = topodatapb.TabletType_RDONLY
+ shrRdonly := &querypb.StreamHealthResponse{
+ TabletAlias: tablet.Alias,
+ Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_RDONLY},
+ Serving: true,
+ TabletExternallyReparentedTimestamp: 0,
+ RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 2, CpuUsage: 0.4},
+ }
+
+ // Now Replace it, which does a Remove and Add. The tablet should be removed
+ // from the cache and all its maps even though the tablet type had changed
+ // in-between the initial Add and Remove.
+ hc.ReplaceTablet(tablet, tablet)
+ // Receive and discard the initial result as we have not yet sent the first
+ // StreamHealthResponse with the dynamic serving and stats information.
+ <-resultChan
+ // Confirm that the old entry is gone.
+ a = hc.GetTabletStats(&querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA})
+ assert.Empty(t, a, "wrong result, expected empty list")
+ // Send the first StreamHealthResponse with the dynamic serving and stats
+ // information.
+ input <- shrRdonly
+ <-resultChan
+ // Confirm that the new entry is there in the cache.
+ want = []*TabletHealth{{
+ Tablet: tablet,
+ Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_RDONLY},
+ Serving: true,
+ Stats: &querypb.RealtimeStats{ReplicationLagSeconds: 2, CpuUsage: 0.4},
+ PrimaryTermStartTime: 0,
+ }}
+ a = hc.GetTabletStats(&querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_RDONLY})
+ mustMatch(t, want, a, "unexpected result")
+
+ // Delete the tablet, confirm again that it's gone in both tablet type
+ // forms.
+ hc.RemoveTablet(tablet)
+ a = hc.GetTabletStats(&querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA})
+ assert.Empty(t, a, "wrong result, expected empty list")
+ a = hc.GetTabletStats(&querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_RDONLY})
+ assert.Empty(t, a, "wrong result, expected empty list")
}
// TestGetHealthyTablets tests the functionality of GetHealthyTabletStats.
diff --git a/go/vt/discovery/topology_watcher.go b/go/vt/discovery/topology_watcher.go
index a2a70a1f2d0..5e5c54ab57a 100644
--- a/go/vt/discovery/topology_watcher.go
+++ b/go/vt/discovery/topology_watcher.go
@@ -199,9 +199,6 @@ func (tw *TopologyWatcher) loadTablets() {
log.Errorf("cannot get tablet for alias %v: %v", alias, err)
return
}
- if !(tw.tabletFilter == nil || tw.tabletFilter.IsIncluded(tablet.Tablet)) {
- return
- }
tw.mu.Lock()
aliasStr := topoproto.TabletAliasString(alias)
newTablets[aliasStr] = &tabletInfo{
@@ -217,6 +214,10 @@ func (tw *TopologyWatcher) loadTablets() {
tw.mu.Lock()
for alias, newVal := range newTablets {
+ if tw.tabletFilter != nil && !tw.tabletFilter.IsIncluded(newVal.tablet) {
+ continue
+ }
+
// trust the alias from topo and add it if it doesn't exist
if val, ok := tw.tablets[alias]; ok {
// check if the host and port have changed. If yes, replace tablet.
@@ -236,6 +237,10 @@ func (tw *TopologyWatcher) loadTablets() {
}
for _, val := range tw.tablets {
+ if tw.tabletFilter != nil && !tw.tabletFilter.IsIncluded(val.tablet) {
+ continue
+ }
+
if _, ok := newTablets[val.alias]; !ok {
tw.healthcheck.RemoveTablet(val.tablet)
topologyWatcherOperations.Add(topologyWatcherOpRemoveTablet, 1)
diff --git a/go/vt/discovery/topology_watcher_test.go b/go/vt/discovery/topology_watcher_test.go
index 9d0876cf424..dff8ba720c7 100644
--- a/go/vt/discovery/topology_watcher_test.go
+++ b/go/vt/discovery/topology_watcher_test.go
@@ -17,12 +17,13 @@ limitations under the License.
package discovery
import (
+ "context"
"math/rand"
"testing"
"time"
- "context"
-
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
"google.golang.org/protobuf/proto"
"vitess.io/vitess/go/vt/logutil"
@@ -44,19 +45,14 @@ func checkOpCounts(t *testing.T, prevCounts, deltas map[string]int64) map[string
newVal = 0
}
- if newVal != prevVal+delta {
- t.Errorf("expected %v to increase by %v, got %v -> %v", key, delta, prevVal, newVal)
- }
+ assert.Equal(t, newVal, prevVal+delta, "expected %v to increase by %v, got %v -> %v", key, delta, prevVal, newVal)
}
return newCounts
}
func checkChecksum(t *testing.T, tw *TopologyWatcher, want uint32) {
t.Helper()
- got := tw.TopoChecksum()
- if want != got {
- t.Errorf("want checksum %v got %v", want, got)
- }
+ assert.Equal(t, want, tw.TopoChecksum())
}
func TestStartAndCloseTopoWatcher(t *testing.T) {
@@ -506,3 +502,115 @@ func TestFilterByKeyspace(t *testing.T) {
}
}
}
+
+// TestFilterByKeypsaceSkipsIgnoredTablets confirms a bug fix for the case when a TopologyWatcher
+// has a FilterByKeyspace TabletFilter configured along with refreshKnownTablets turned off. We want
+// to ensure that the TopologyWatcher:
+// - does not continuosly call GetTablets for tablets that do not satisfy the filter
+// - does not add or remove these filtered out tablets from the its healtcheck
+func TestFilterByKeypsaceSkipsIgnoredTablets(t *testing.T) {
+ ts := memorytopo.NewServer("aa")
+ fhc := NewFakeHealthCheck(nil)
+ topologyWatcherOperations.ZeroAll()
+ counts := topologyWatcherOperations.Counts()
+ f := NewFilterByKeyspace(testKeyspacesToWatch)
+ tw := NewCellTabletsWatcher(context.Background(), ts, fhc, f, "aa", 10*time.Minute, false /*refreshKnownTablets*/, 5)
+
+ counts = checkOpCounts(t, counts, map[string]int64{})
+ checkChecksum(t, tw, 0)
+
+ // Add a tablet from a tracked keyspace to the topology.
+ tablet := &topodatapb.Tablet{
+ Alias: &topodatapb.TabletAlias{
+ Cell: "aa",
+ Uid: 0,
+ },
+ Hostname: "host1",
+ PortMap: map[string]int32{
+ "vt": 123,
+ },
+ Keyspace: "ks1",
+ Shard: "shard",
+ }
+ require.NoError(t, ts.CreateTablet(context.Background(), tablet))
+
+ tw.loadTablets()
+ counts = checkOpCounts(t, counts, map[string]int64{"ListTablets": 1, "GetTablet": 1, "AddTablet": 1})
+ checkChecksum(t, tw, 3238442862)
+
+ // Check tablet is reported by HealthCheck
+ allTablets := fhc.GetAllTablets()
+ key := TabletToMapKey(tablet)
+ assert.Contains(t, allTablets, key)
+ assert.True(t, proto.Equal(tablet, allTablets[key]))
+
+ // Add a second tablet to the topology that should get filtered out by the keyspace filter
+ tablet2 := &topodatapb.Tablet{
+ Alias: &topodatapb.TabletAlias{
+ Cell: "aa",
+ Uid: 2,
+ },
+ Hostname: "host2",
+ PortMap: map[string]int32{
+ "vt": 789,
+ },
+ Keyspace: "ks3",
+ Shard: "shard",
+ }
+ require.NoError(t, ts.CreateTablet(context.Background(), tablet2))
+
+ tw.loadTablets()
+ counts = checkOpCounts(t, counts, map[string]int64{"ListTablets": 1, "GetTablet": 1})
+ checkChecksum(t, tw, 2762153755)
+
+ // Check the new tablet is NOT reported by HealthCheck.
+ allTablets = fhc.GetAllTablets()
+ assert.Len(t, allTablets, 1)
+ key = TabletToMapKey(tablet2)
+ assert.NotContains(t, allTablets, key)
+
+ // Load the tablets again to show that when refreshKnownTablets is disabled,
+ // only the list is read from the topo and the checksum doesn't change
+ tw.loadTablets()
+ counts = checkOpCounts(t, counts, map[string]int64{"ListTablets": 1})
+ checkChecksum(t, tw, 2762153755)
+
+ // With refreshKnownTablets set to false, changes to the port map for the same tablet alias
+ // should not be reflected in the HealtCheck state
+ _, err := ts.UpdateTabletFields(context.Background(), tablet.Alias, func(t *topodatapb.Tablet) error {
+ t.PortMap["vt"] = 456
+ return nil
+ })
+ require.NoError(t, err)
+
+ tw.loadTablets()
+ counts = checkOpCounts(t, counts, map[string]int64{"ListTablets": 1})
+ checkChecksum(t, tw, 2762153755)
+
+ allTablets = fhc.GetAllTablets()
+ assert.Len(t, allTablets, 1)
+ origKey := TabletToMapKey(tablet)
+ tabletWithNewPort := proto.Clone(tablet).(*topodatapb.Tablet)
+ tabletWithNewPort.PortMap["vt"] = 456
+ keyWithNewPort := TabletToMapKey(tabletWithNewPort)
+ assert.Contains(t, allTablets, origKey)
+ assert.NotContains(t, allTablets, keyWithNewPort)
+
+ // Remove the tracked tablet from the topo and check that it is detected as being gone.
+ require.NoError(t, ts.DeleteTablet(context.Background(), tablet.Alias))
+
+ tw.loadTablets()
+ counts = checkOpCounts(t, counts, map[string]int64{"ListTablets": 1, "RemoveTablet": 1})
+ checkChecksum(t, tw, 789108290)
+ assert.Empty(t, fhc.GetAllTablets())
+
+ // Remove ignored tablet and check that we didn't try to remove it from the health check
+ require.NoError(t, ts.DeleteTablet(context.Background(), tablet2.Alias))
+
+ tw.loadTablets()
+ checkOpCounts(t, counts, map[string]int64{"ListTablets": 1})
+ checkChecksum(t, tw, 0)
+ assert.Empty(t, fhc.GetAllTablets())
+
+ tw.Stop()
+}
diff --git a/go/vt/vtgr/external/golib/sqlutils/dialect.go b/go/vt/external/golib/sqlutils/dialect.go
similarity index 100%
rename from go/vt/vtgr/external/golib/sqlutils/dialect.go
rename to go/vt/external/golib/sqlutils/dialect.go
diff --git a/go/vt/vtgr/external/golib/sqlutils/sqlite_dialect.go b/go/vt/external/golib/sqlutils/sqlite_dialect.go
similarity index 100%
rename from go/vt/vtgr/external/golib/sqlutils/sqlite_dialect.go
rename to go/vt/external/golib/sqlutils/sqlite_dialect.go
diff --git a/go/vt/vtgr/external/golib/sqlutils/sqlite_dialect_test.go b/go/vt/external/golib/sqlutils/sqlite_dialect_test.go
similarity index 100%
rename from go/vt/vtgr/external/golib/sqlutils/sqlite_dialect_test.go
rename to go/vt/external/golib/sqlutils/sqlite_dialect_test.go
diff --git a/go/vt/vtgr/external/golib/sqlutils/sqlutils.go b/go/vt/external/golib/sqlutils/sqlutils.go
similarity index 96%
rename from go/vt/vtgr/external/golib/sqlutils/sqlutils.go
rename to go/vt/external/golib/sqlutils/sqlutils.go
index f89d96229a3..91e83f0a4e4 100644
--- a/go/vt/vtgr/external/golib/sqlutils/sqlutils.go
+++ b/go/vt/external/golib/sqlutils/sqlutils.go
@@ -38,7 +38,7 @@ const DateTimeFormat = "2006-01-02 15:04:05.999999"
// for easy, typed getters by column name.
type RowMap map[string]CellData
-// Cell data is the result of a single (atomic) column in a single row
+// CellData is the result of a single (atomic) column in a single row
type CellData sql.NullString
func (this *CellData) MarshalJSON() ([]byte, error) {
@@ -200,22 +200,22 @@ func GetDB(mysql_uri string) (*sql.DB, bool, error) {
return GetGenericDB("mysql", mysql_uri)
}
-// GetDB returns a SQLite DB instance based on DB file name.
+// GetSQLiteDB returns a SQLite DB instance based on DB file name.
// bool result indicates whether the DB was returned from cache; err
func GetSQLiteDB(dbFile string) (*sql.DB, bool, error) {
- return GetGenericDB("sqlite3", dbFile)
+ return GetGenericDB("sqlite", dbFile)
}
// RowToArray is a convenience function, typically not called directly, which maps a
// single read database row into a NullString
-func RowToArray(rows *sql.Rows, columns []string) []CellData {
+func RowToArray(rows *sql.Rows, columns []string) ([]CellData, error) {
buff := make([]any, len(columns))
data := make([]CellData, len(columns))
for i := range buff {
buff[i] = data[i].NullString()
}
- rows.Scan(buff...)
- return data
+ err := rows.Scan(buff...)
+ return data, err
}
// ScanRowsToArrays is a convenience function, typically not called directly, which maps rows
@@ -223,8 +223,11 @@ func RowToArray(rows *sql.Rows, columns []string) []CellData {
func ScanRowsToArrays(rows *sql.Rows, on_row func([]CellData) error) error {
columns, _ := rows.Columns()
for rows.Next() {
- arr := RowToArray(rows, columns)
- err := on_row(arr)
+ arr, err := RowToArray(rows, columns)
+ if err != nil {
+ return err
+ }
+ err = on_row(arr)
if err != nil {
return err
}
diff --git a/go/vt/grpcclient/client.go b/go/vt/grpcclient/client.go
index 8ad995721da..d3865c88c84 100644
--- a/go/vt/grpcclient/client.go
+++ b/go/vt/grpcclient/client.go
@@ -46,7 +46,6 @@ var (
// every vitess binary that makes grpc client-side calls.
grpcclientBinaries = []string{
- "mysqlctl",
"mysqlctld",
"vtadmin",
"vtbackup",
@@ -105,7 +104,6 @@ func Dial(target string, failFast FailFast, opts ...grpc.DialOption) (*grpc.Clie
// failFast is a non-optional parameter because callers are required to specify
// what that should be.
func DialContext(ctx context.Context, target string, failFast FailFast, opts ...grpc.DialOption) (*grpc.ClientConn, error) {
- grpccommon.EnableTracingOpt()
msgSize := grpccommon.MaxMessageSize()
newopts := []grpc.DialOption{
grpc.WithDefaultCallOptions(
diff --git a/go/vt/grpccommon/options.go b/go/vt/grpccommon/options.go
index ae9bea0172d..7013b95b95a 100644
--- a/go/vt/grpccommon/options.go
+++ b/go/vt/grpccommon/options.go
@@ -17,8 +17,6 @@ limitations under the License.
package grpccommon
import (
- "sync"
-
"github.com/spf13/pflag"
"google.golang.org/grpc"
@@ -30,8 +28,6 @@ var (
// accept. Larger messages will be rejected.
// Note: We're using 16 MiB as default value because that's the default in MySQL
maxMessageSize = 16 * 1024 * 1024
- // enableTracing sets a flag to enable grpc client/server tracing.
- enableTracing bool
// enablePrometheus sets a flag to enable grpc client/server grpc monitoring.
enablePrometheus bool
)
@@ -43,23 +39,10 @@ var (
// command-line arguments.
func RegisterFlags(fs *pflag.FlagSet) {
fs.IntVar(&maxMessageSize, "grpc_max_message_size", maxMessageSize, "Maximum allowed RPC message size. Larger messages will be rejected by gRPC with the error 'exceeding the max size'.")
- fs.BoolVar(&enableTracing, "grpc_enable_tracing", enableTracing, "Enable gRPC tracing.")
+ fs.BoolVar(&grpc.EnableTracing, "grpc_enable_tracing", grpc.EnableTracing, "Enable gRPC tracing.")
fs.BoolVar(&enablePrometheus, "grpc_prometheus", enablePrometheus, "Enable gRPC monitoring with Prometheus.")
}
-var (
- enableTracingOnce sync.Once
-)
-
-// EnableTracingOpt enables grpc tracing if requested.
-// It must be called before any grpc server or client is created but is safe
-// to be called multiple times.
-func EnableTracingOpt() {
- enableTracingOnce.Do(func() {
- grpc.EnableTracing = enableTracing
- })
-}
-
// EnableGRPCPrometheus returns the value of the --grpc_prometheus flag.
func EnableGRPCPrometheus() bool {
return enablePrometheus
diff --git a/go/vt/log/log.go b/go/vt/log/log.go
index 339b80fef02..79be1da464c 100644
--- a/go/vt/log/log.go
+++ b/go/vt/log/log.go
@@ -22,6 +22,10 @@ limitations under the License.
package log
import (
+ "fmt"
+ "strconv"
+ "sync/atomic"
+
"github.com/golang/glog"
"github.com/spf13/pflag"
)
@@ -78,5 +82,32 @@ var (
// calls this function, or call this function directly before parsing
// command-line arguments.
func RegisterFlags(fs *pflag.FlagSet) {
- fs.Uint64Var(&glog.MaxSize, "log_rotate_max_size", glog.MaxSize, "size in bytes at which logs are rotated (glog.MaxSize)")
+ flagVal := logRotateMaxSize{
+ val: fmt.Sprintf("%d", atomic.LoadUint64(&glog.MaxSize)),
+ }
+ fs.Var(&flagVal, "log_rotate_max_size", "size in bytes at which logs are rotated (glog.MaxSize)")
+}
+
+// logRotateMaxSize implements pflag.Value and is used to
+// try and provide thread-safe access to glog.MaxSize.
+type logRotateMaxSize struct {
+ val string
+}
+
+func (lrms *logRotateMaxSize) Set(s string) error {
+ maxSize, err := strconv.ParseUint(s, 10, 64)
+ if err != nil {
+ return err
+ }
+ atomic.StoreUint64(&glog.MaxSize, maxSize)
+ lrms.val = s
+ return nil
+}
+
+func (lrms *logRotateMaxSize) String() string {
+ return lrms.val
+}
+
+func (lrms *logRotateMaxSize) Type() string {
+ return "uint64"
}
diff --git a/go/vt/logutil/purge.go b/go/vt/logutil/purge.go
index 20f028d7187..8d85e15c5c9 100644
--- a/go/vt/logutil/purge.go
+++ b/go/vt/logutil/purge.go
@@ -127,7 +127,10 @@ func PurgeLogs() {
logDir := f.Value.String()
program := filepath.Base(os.Args[0])
ticker := time.NewTicker(purgeLogsInterval)
- for range ticker.C {
- purgeLogsOnce(time.Now(), logDir, program, keepLogsByCtime, keepLogsByMtime)
- }
+
+ go func() {
+ for range ticker.C {
+ purgeLogsOnce(time.Now(), logDir, program, keepLogsByCtime, keepLogsByMtime)
+ }
+ }()
}
diff --git a/go/vt/mysqlctl/azblobbackupstorage/azblob.go b/go/vt/mysqlctl/azblobbackupstorage/azblob.go
index beddc33333c..144b9435437 100644
--- a/go/vt/mysqlctl/azblobbackupstorage/azblob.go
+++ b/go/vt/mysqlctl/azblobbackupstorage/azblob.go
@@ -52,6 +52,7 @@ var (
storageRoot string
azBlobParallelism int
+ azBlobBufferSize = 100 << (10 * 2) // 100 MiB
)
func registerFlags(fs *pflag.FlagSet) {
@@ -59,6 +60,7 @@ func registerFlags(fs *pflag.FlagSet) {
fs.StringVar(&accountKeyFile, "azblob_backup_account_key_file", "", "Path to a file containing the Azure Storage account key; if this flag is unset, the environment variable VT_AZBLOB_ACCOUNT_KEY will be used as the key itself (NOT a file path).")
fs.StringVar(&containerName, "azblob_backup_container_name", "", "Azure Blob Container Name.")
fs.StringVar(&storageRoot, "azblob_backup_storage_root", "", "Root prefix for all backup-related Azure Blobs; this should exclude both initial and trailing '/' (e.g. just 'a/b' not '/a/b/').")
+ fs.IntVar(&azBlobBufferSize, "azblob_backup_buffer_size", azBlobBufferSize, "The memory buffer size to use in bytes, per file or stripe, when streaming to Azure Blob Service.")
fs.IntVar(&azBlobParallelism, "azblob_backup_parallelism", 1, "Azure Blob operation parallelism (requires extra memory when increased).")
}
@@ -218,7 +220,7 @@ func (bh *AZBlobBackupHandle) AddFile(ctx context.Context, filename string, file
go func() {
defer bh.waitGroup.Done()
_, err := azblob.UploadStreamToBlockBlob(bh.ctx, reader, blockBlobURL, azblob.UploadStreamToBlockBlobOptions{
- BufferSize: azblob.BlockBlobMaxStageBlockBytes,
+ BufferSize: azBlobBufferSize,
MaxBuffers: azBlobParallelism,
})
if err != nil {
@@ -264,7 +266,7 @@ func (bh *AZBlobBackupHandle) ReadFile(ctx context.Context, filename string) (io
}
blobURL := containerURL.NewBlobURL(obj)
- resp, err := blobURL.Download(ctx, 0, azblob.CountToEnd, azblob.BlobAccessConditions{}, false)
+ resp, err := blobURL.Download(ctx, 0, azblob.CountToEnd, azblob.BlobAccessConditions{}, false, azblob.ClientProvidedKeyOptions{})
if err != nil {
return nil, err
}
diff --git a/go/vt/mysqlctl/backup.go b/go/vt/mysqlctl/backup.go
index 63868bf48ac..4a7781c1be9 100644
--- a/go/vt/mysqlctl/backup.go
+++ b/go/vt/mysqlctl/backup.go
@@ -48,6 +48,7 @@ const (
// the three bases for files to restore
backupInnodbDataHomeDir = "InnoDBData"
backupInnodbLogGroupHomeDir = "InnoDBLog"
+ backupBinlogDir = "BinLog"
backupData = "Data"
// backupManifestFileName is the MANIFEST file name within a backup.
@@ -72,16 +73,8 @@ var (
// but none of them are complete.
ErrNoCompleteBackup = errors.New("backup(s) found but none are complete")
- // backupStorageHook contains the hook name to use to process
- // backup files. If not set, we will not process the files. It is
- // only used at backup time. Then it is put in the manifest,
- // and when decoding a backup, it is read from the manifest,
- // and used as the transform hook name again.
- backupStorageHook string
-
// backupStorageCompress can be set to false to not use gzip
- // on the backups. Usually would be set if a hook is used, and
- // the hook compresses the data.
+ // on the backups.
backupStorageCompress = true
// backupCompressBlockSize is the splitting size for each
@@ -97,14 +90,13 @@ var (
)
func init() {
- for _, cmd := range []string{"mysqlctl", "mysqlctld", "vtcombo", "vttablet", "vttestserver", "vtbackup", "vtctld", "vtctldclient", "vtexplain"} {
+ for _, cmd := range []string{"vtcombo", "vttablet", "vttestserver", "vtbackup", "vtctld"} {
servenv.OnParseFor(cmd, registerBackupFlags)
}
}
func registerBackupFlags(fs *pflag.FlagSet) {
- fs.StringVar(&backupStorageHook, "backup_storage_hook", backupStorageHook, "if set, we send the contents of the backup files through this hook.")
- fs.BoolVar(&backupStorageCompress, "backup_storage_compress", backupStorageCompress, "if set, the backup files will be compressed (default is true). Set to false for instance if a backup_storage_hook is specified and it compresses the data.")
+ fs.BoolVar(&backupStorageCompress, "backup_storage_compress", backupStorageCompress, "if set, the backup files will be compressed.")
fs.IntVar(&backupCompressBlockSize, "backup_storage_block_size", backupCompressBlockSize, "if backup_storage_compress is true, backup_storage_block_size sets the byte size for each block while compressing (default is 250000).")
fs.IntVar(&backupCompressBlocks, "backup_storage_number_blocks", backupCompressBlocks, "if backup_storage_compress is true, backup_storage_number_blocks sets the number of blocks that can be processed, at once, before the writer blocks, during compression (default is 2). It should be equal to the number of CPUs available for compression.")
}
@@ -299,8 +291,6 @@ func Restore(ctx context.Context, params RestoreParams) (*BackupManifest, error)
return nil, vterrors.Wrap(err, "ListBackups failed")
}
- metadataManager := &MetadataManager{}
-
if len(bhs) == 0 {
// There are no backups (not even broken/incomplete ones).
params.Logger.Errorf("no backup to restore on BackupStorage for directory %v. Starting up empty.", backupDir)
@@ -314,24 +304,27 @@ func Restore(ctx context.Context, params RestoreParams) (*BackupManifest, error)
params.Logger.Errorf("error resetting replication: %v. Continuing", err)
}
- if err := metadataManager.PopulateMetadataTables(params.Mysqld, params.LocalMetadata, params.DbName); err != nil {
- params.Logger.Errorf("error populating metadata tables: %v. Continuing", err)
-
- }
// Always return ErrNoBackup
return nil, ErrNoBackup
}
- bh, err := FindBackupToRestore(ctx, params, bhs)
+ restorePath, err := FindBackupToRestore(ctx, params, bhs)
if err != nil {
return nil, err
}
-
+ if restorePath.IsEmpty() {
+ // This condition should not happen; but we validate for sanity
+ return nil, vterrors.Errorf(vtrpc.Code_INTERNAL, "empty restore path")
+ }
+ bh := restorePath.FullBackupHandle()
re, err := GetRestoreEngine(ctx, bh)
if err != nil {
return nil, vterrors.Wrap(err, "Failed to find restore engine")
}
-
+ params.Logger.Infof("Restore: %v", restorePath.String())
+ if params.DryRun {
+ return nil, nil
+ }
manifest, err := re.ExecuteRestore(ctx, params, bh)
if err != nil {
return nil, err
@@ -371,18 +364,6 @@ func Restore(ctx context.Context, params RestoreParams) (*BackupManifest, error)
return nil, vterrors.Wrap(err, "mysql_upgrade failed")
}
- // Add backupTime and restorePosition to LocalMetadata
- params.LocalMetadata["RestoredBackupTime"] = manifest.BackupTime
- params.LocalMetadata["RestorePosition"] = mysql.EncodePosition(manifest.Position)
-
- // Populate local_metadata before starting without --skip-networking,
- // so it's there before we start announcing ourselves.
- params.Logger.Infof("Restore: populating local_metadata")
- err = metadataManager.PopulateMetadataTables(params.Mysqld, params.LocalMetadata, params.DbName)
- if err != nil {
- return nil, err
- }
-
// The MySQL manual recommends restarting mysqld after running mysql_upgrade,
// so that any changes made to system tables take effect.
params.Logger.Infof("Restore: restarting mysqld after mysql_upgrade")
@@ -395,10 +376,24 @@ func Restore(ctx context.Context, params RestoreParams) (*BackupManifest, error)
return nil, err
}
+ if handles := restorePath.IncrementalBackupHandles(); len(handles) > 0 {
+ params.Logger.Infof("Restore: applying %v incremental backups", len(handles))
+ for _, bh := range handles {
+ manifest, err := re.ExecuteRestore(ctx, params, bh)
+ if err != nil {
+ return nil, err
+ }
+ params.Logger.Infof("Restore: applied incremental backup: %v", manifest.Position)
+ }
+ params.Logger.Infof("Restore: done applying incremental backups")
+ }
+
+ params.Logger.Infof("Restore: removing state file")
if err = removeStateFile(params.Cnf); err != nil {
return nil, err
}
restoreDuration.Set(int64(time.Since(startTs).Seconds()))
+ params.Logger.Infof("Restore: complete")
return manifest, nil
}
diff --git a/go/vt/mysqlctl/backup_test.go b/go/vt/mysqlctl/backup_test.go
index 16db1a72f8a..08d5e31a116 100644
--- a/go/vt/mysqlctl/backup_test.go
+++ b/go/vt/mysqlctl/backup_test.go
@@ -23,21 +23,12 @@ import (
"sort"
"testing"
- "github.com/stretchr/testify/require"
-
"vitess.io/vitess/go/mysql"
)
-func TestFindFilesToBackup(t *testing.T) {
+func TestFindFilesToBackupWithoutRedoLog(t *testing.T) {
root := t.TempDir()
- // get the flavor and version to deal with any behavioral differences
- versionStr, err := GetVersionString()
- require.NoError(t, err)
- flavor, version, err := ParseVersionString(versionStr)
- require.NoError(t, err)
- features := newCapabilitySet(flavor, version)
-
// Initialize the fake mysql root directories
innodbDataDir := path.Join(root, "innodb_data")
innodbLogDir := path.Join(root, "innodb_log")
@@ -54,10 +45,6 @@ func TestFindFilesToBackup(t *testing.T) {
}
innodbLogFile := "innodb_log_1"
- if features.hasDynamicRedoLogCapacity() {
- os.Mkdir(path.Join(innodbLogDir, mysql.DynamicRedoLogSubdir), os.ModePerm)
- innodbLogFile = path.Join(mysql.DynamicRedoLogSubdir, "#ib_redo1")
- }
if err := os.WriteFile(path.Join(innodbDataDir, "innodb_data_1"), []byte("innodb data 1 contents"), os.ModePerm); err != nil {
t.Fatalf("failed to write file innodb_data_1: %v", err)
@@ -130,6 +117,98 @@ func TestFindFilesToBackup(t *testing.T) {
}
}
+func TestFindFilesToBackupWithRedoLog(t *testing.T) {
+ root := t.TempDir()
+
+ // Initialize the fake mysql root directories
+ innodbDataDir := path.Join(root, "innodb_data")
+ innodbLogDir := path.Join(root, "innodb_log")
+ dataDir := path.Join(root, "data")
+ dataDbDir := path.Join(dataDir, "vt_db")
+ extraDir := path.Join(dataDir, "extra_dir")
+ outsideDbDir := path.Join(root, "outside_db")
+ rocksdbDir := path.Join(dataDir, ".rocksdb")
+ sdiOnlyDir := path.Join(dataDir, "sdi_dir")
+ for _, s := range []string{innodbDataDir, innodbLogDir, dataDbDir, extraDir, outsideDbDir, rocksdbDir, sdiOnlyDir} {
+ if err := os.MkdirAll(s, os.ModePerm); err != nil {
+ t.Fatalf("failed to create directory %v: %v", s, err)
+ }
+ }
+
+ cnf := &Mycnf{
+ InnodbDataHomeDir: innodbDataDir,
+ InnodbLogGroupHomeDir: innodbLogDir,
+ DataDir: dataDir,
+ }
+
+ os.Mkdir(path.Join(innodbLogDir, mysql.DynamicRedoLogSubdir), os.ModePerm)
+ innodbLogFile := path.Join(mysql.DynamicRedoLogSubdir, "#ib_redo1")
+
+ if err := os.WriteFile(path.Join(innodbDataDir, "innodb_data_1"), []byte("innodb data 1 contents"), os.ModePerm); err != nil {
+ t.Fatalf("failed to write file innodb_data_1: %v", err)
+ }
+ if err := os.WriteFile(path.Join(innodbLogDir, innodbLogFile), []byte("innodb log 1 contents"), os.ModePerm); err != nil {
+ t.Fatalf("failed to write file %s: %v", innodbLogFile, err)
+ }
+ if err := os.WriteFile(path.Join(dataDbDir, "db.opt"), []byte("db opt file"), os.ModePerm); err != nil {
+ t.Fatalf("failed to write file db.opt: %v", err)
+ }
+ if err := os.WriteFile(path.Join(extraDir, "extra.stuff"), []byte("extra file"), os.ModePerm); err != nil {
+ t.Fatalf("failed to write file extra.stuff: %v", err)
+ }
+ if err := os.WriteFile(path.Join(outsideDbDir, "table1.frm"), []byte("frm file"), os.ModePerm); err != nil {
+ t.Fatalf("failed to write file table1.opt: %v", err)
+ }
+ if err := os.Symlink(outsideDbDir, path.Join(dataDir, "vt_symlink")); err != nil {
+ t.Fatalf("failed to symlink vt_symlink: %v", err)
+ }
+ if err := os.WriteFile(path.Join(rocksdbDir, "000011.sst"), []byte("rocksdb file"), os.ModePerm); err != nil {
+ t.Fatalf("failed to write file 000011.sst: %v", err)
+ }
+ if err := os.WriteFile(path.Join(sdiOnlyDir, "table1.sdi"), []byte("sdi file"), os.ModePerm); err != nil {
+ t.Fatalf("failed to write file table1.sdi: %v", err)
+ }
+
+ result, totalSize, err := findFilesToBackup(cnf)
+ if err != nil {
+ t.Fatalf("findFilesToBackup failed: %v", err)
+ }
+ sort.Sort(forTest(result))
+ t.Logf("findFilesToBackup returned: %v", result)
+ expected := []FileEntry{
+ {
+ Base: "Data",
+ Name: ".rocksdb/000011.sst",
+ },
+ {
+ Base: "Data",
+ Name: "sdi_dir/table1.sdi",
+ },
+ {
+ Base: "Data",
+ Name: "vt_db/db.opt",
+ },
+ {
+ Base: "Data",
+ Name: "vt_symlink/table1.frm",
+ },
+ {
+ Base: "InnoDBData",
+ Name: "innodb_data_1",
+ },
+ {
+ Base: "InnoDBLog",
+ Name: innodbLogFile,
+ },
+ }
+ if !reflect.DeepEqual(result, expected) {
+ t.Fatalf("got wrong list of FileEntry %v, expected %v", result, expected)
+ }
+ if totalSize <= 0 {
+ t.Fatalf("backup size should be > 0, got %v", totalSize)
+ }
+}
+
type forTest []FileEntry
func (f forTest) Len() int { return len(f) }
diff --git a/go/vt/mysqlctl/backupengine.go b/go/vt/mysqlctl/backupengine.go
index dceb34e3d40..7473edd5dba 100644
--- a/go/vt/mysqlctl/backupengine.go
+++ b/go/vt/mysqlctl/backupengine.go
@@ -56,7 +56,7 @@ type BackupParams struct {
// Concurrency is the value of -concurrency flag given to Backup command
// It determines how many files are processed in parallel
Concurrency int
- // Extra env variables for pre-backup and post-backup transform hooks
+ // Extra env variables used while stopping and starting mysqld
HookExtraEnv map[string]string
// TopoServer, Keyspace and Shard are used to discover primary tablet
TopoServer *topo.Server
@@ -67,6 +67,9 @@ type BackupParams struct {
TabletAlias string
// BackupTime is the time at which the backup is being started
BackupTime time.Time
+ // Position of last known backup. If non empty, then this value indicates the backup should be incremental
+ // and as of this position
+ IncrementalFromPos string
}
// RestoreParams is the struct that holds all params passed to ExecuteRestore
@@ -79,8 +82,6 @@ type RestoreParams struct {
Concurrency int
// Extra env variables for pre-restore and post-restore transform hooks
HookExtraEnv map[string]string
- // Metadata to write into database after restore. See PopulateMetadataTables
- LocalMetadata map[string]string
// DeleteBeforeRestore tells us whether existing data should be deleted before
// restoring. This is always set to false when starting a tablet with -restore_from_backup,
// but is set to true when executing a RestoreFromBackup command on an already running vttablet
@@ -93,6 +94,15 @@ type RestoreParams struct {
// StartTime: if non-zero, look for a backup that was taken at or before this time
// Otherwise, find the most recent backup
StartTime time.Time
+ // RestoreToPos hints that a point in time recovery is requested, to recover up to the specific given pos.
+ // When empty, the restore is a normal from full backup
+ RestoreToPos mysql.Position
+ // When DryRun is set, no restore actually takes place; but some of its steps are validated.
+ DryRun bool
+}
+
+func (p *RestoreParams) IsIncrementalRecovery() bool {
+ return !p.RestoreToPos.IsZero()
}
// RestoreEngine is the interface to restore a backup with a given engine.
@@ -112,7 +122,7 @@ type BackupRestoreEngine interface {
var BackupRestoreEngineMap = make(map[string]BackupRestoreEngine)
func init() {
- for _, cmd := range []string{"mysqlctl", "mysqlctld", "vtcombo", "vttablet", "vttestserver", "vtctld", "vtctldclient", "vtexplain", "vtbackup"} {
+ for _, cmd := range []string{"vtcombo", "vttablet", "vttestserver", "vtctld", "vtbackup"} {
servenv.OnParseFor(cmd, registerBackupEngineFlags)
}
}
@@ -193,51 +203,198 @@ type BackupManifest struct {
// Position is the replication position at which the backup was taken.
Position mysql.Position
+ // PurgedPosition stands for purged GTIDs, information that is necessary for PITR recovery. This is specific to MySQL56
+ PurgedPosition mysql.Position
+
+ // FromPosition is only applicable to incremental backups, and stands for the position from
+ // which incremental changes are backed up.
+ FromPosition mysql.Position
+
+ // Incremental indicates whether this is an incremental backup
+ Incremental bool
+
// BackupTime is when the backup was taken in UTC time (RFC 3339 format)
BackupTime string
// FinishedTime is the time (in RFC 3339 format, UTC) at which the backup finished, if known.
// Some backups may not set this field if they were created before the field was added.
FinishedTime string
+
+ // ServerUUID identifies the server from which backup was taken
+ ServerUUID string
+
+ TabletAlias string
+
+ Keyspace string
+
+ Shard string
}
-// FindBackupToRestore returns a selected candidate backup to be restored.
-// It returns the most recent backup that is complete, meaning it has a valid
-// MANIFEST file.
-func FindBackupToRestore(ctx context.Context, params RestoreParams, bhs []backupstorage.BackupHandle) (backupstorage.BackupHandle, error) {
- var bh backupstorage.BackupHandle
- var index int
- // if a StartTime is provided in params, then find a backup that was taken at or before that time
- checkBackupTime := !params.StartTime.IsZero()
- backupDir := GetBackupDir(params.Keyspace, params.Shard)
+func (m *BackupManifest) HashKey() string {
+ return fmt.Sprintf("%v/%v/%v/%t/%v", m.BackupMethod, m.Position, m.FromPosition, m.Incremental, m.BackupTime)
+}
+
+// ManifestHandleMap is a utility container to map manifests to handles, making it possible to search for, and iterate, handles based on manifests.
+type ManifestHandleMap struct {
+ mp map[string]backupstorage.BackupHandle
+}
+
+func NewManifestHandleMap() *ManifestHandleMap {
+ return &ManifestHandleMap{
+ mp: map[string]backupstorage.BackupHandle{},
+ }
+}
+
+// Map assigns a handle to a manifest
+func (m *ManifestHandleMap) Map(manifest *BackupManifest, handle backupstorage.BackupHandle) {
+ if manifest == nil {
+ return
+ }
+ m.mp[manifest.HashKey()] = handle
+}
+
+// Handle returns the backup handles assigned to given manifest
+func (m *ManifestHandleMap) Handle(manifest *BackupManifest) (handle backupstorage.BackupHandle) {
+ return m.mp[manifest.HashKey()]
+}
+
+// Handles returns an ordered list of handles, by given list of manifests
+func (m *ManifestHandleMap) Handles(manifests []*BackupManifest) (handles []backupstorage.BackupHandle) {
+ handles = make([]backupstorage.BackupHandle, 0, len(manifests))
+ for _, manifest := range manifests {
+ handles = append(handles, m.mp[manifest.HashKey()])
+ }
+ return handles
+}
+
+// RestorePath is an ordered sequence of backup handles & manifests, that can be used to restore from backup.
+// The path could be empty, in which case it's invalid, there's no way to restore. Otherwise, the path
+// consists of exactly one full backup, followed by zero or more incremental backups.
+type RestorePath struct {
+ manifests []*BackupManifest
+ manifestHandleMap *ManifestHandleMap
+}
+
+func (p *RestorePath) IsEmpty() bool {
+ return len(p.manifests) == 0
+}
+
+func (p *RestorePath) Len() int {
+ return len(p.manifests)
+}
+
+func (p *RestorePath) Add(m *BackupManifest) {
+ p.manifests = append(p.manifests, m)
+}
- for index = len(bhs) - 1; index >= 0; index-- {
- bh = bhs[index]
+// FullBackupHandle returns the single (if any) full backup handle, which is always the first handle in the sequence
+func (p *RestorePath) FullBackupHandle() backupstorage.BackupHandle {
+ if p.IsEmpty() {
+ return nil
+ }
+ return p.manifestHandleMap.Handle(p.manifests[0])
+}
+
+// IncrementalBackupHandles returns an ordered list of backup handles comprising of the incremental (non-full) path
+func (p *RestorePath) IncrementalBackupHandles() []backupstorage.BackupHandle {
+ if p.IsEmpty() {
+ return nil
+ }
+ return p.manifestHandleMap.Handles(p.manifests[1:])
+}
+
+func (p *RestorePath) String() string {
+ var sb strings.Builder
+ sb.WriteString("RestorePath: [")
+ for i, m := range p.manifests {
+ if i > 0 {
+ sb.WriteString(", ")
+ }
+ if m.Incremental {
+ sb.WriteString("incremental:")
+ } else {
+ sb.WriteString("full:")
+ }
+ sb.WriteString(p.manifestHandleMap.Handle(m).Name())
+ }
+ sb.WriteString("]")
+ return sb.String()
+}
+
+// FindLatestSuccessfulBackup returns the handle and manifest for the last good backup,
+// which can be either full or increment
+func FindLatestSuccessfulBackup(ctx context.Context, logger logutil.Logger, bhs []backupstorage.BackupHandle) (backupstorage.BackupHandle, *BackupManifest, error) {
+ for index := len(bhs) - 1; index >= 0; index-- {
+ bh := bhs[index]
// Check that the backup MANIFEST exists and can be successfully decoded.
bm, err := GetBackupManifest(ctx, bh)
if err != nil {
- params.Logger.Warningf("Possibly incomplete backup %v in directory %v on BackupStorage: can't read MANIFEST: %v)", bh.Name(), backupDir, err)
+ logger.Warningf("Possibly incomplete backup %v on BackupStorage: can't read MANIFEST: %v)", bh.Name(), err)
continue
}
+ return bh, bm, nil
+ }
+ return nil, nil, ErrNoCompleteBackup
+}
- var backupTime time.Time
- if checkBackupTime {
- backupTime, err = time.Parse(time.RFC3339, bm.BackupTime)
+// FindBackupToRestore returns a path, a sequence of backup handles, to be restored.
+// The returned handles stand for valid backups with complete manifests.
+func FindBackupToRestore(ctx context.Context, params RestoreParams, bhs []backupstorage.BackupHandle) (*RestorePath, error) {
+ // if a StartTime is provided in params, then find a backup that was taken at or before that time
+ checkBackupTime := !params.StartTime.IsZero()
+ backupDir := GetBackupDir(params.Keyspace, params.Shard)
+
+ manifests := make([]*BackupManifest, len(bhs))
+ manifestHandleMap := NewManifestHandleMap()
+
+ fullBackupIndex := func() int {
+ for index := len(bhs) - 1; index >= 0; index-- {
+ bh := bhs[index]
+ // Check that the backup MANIFEST exists and can be successfully decoded.
+ bm, err := GetBackupManifest(ctx, bh)
if err != nil {
- params.Logger.Warningf("Restore: skipping backup %v/%v with invalid time %v: %v", backupDir, bh.Name(), bm.BackupTime, err)
+ params.Logger.Warningf("Possibly incomplete backup %v in directory %v on BackupStorage: can't read MANIFEST: %v)", bh.Name(), backupDir, err)
continue
}
- }
- if !checkBackupTime || backupTime.Equal(params.StartTime) || backupTime.Before(params.StartTime) {
- if !checkBackupTime {
+ // the manifest is valid
+ manifests[index] = bm // manifests's order is insignificant, it will be sorted later on
+ manifestHandleMap.Map(bm, bh)
+ if bm.Incremental {
+ // We're looking for a full backup
+ continue
+ }
+
+ var backupTime time.Time
+ if checkBackupTime {
+ backupTime, err = time.Parse(time.RFC3339, bm.BackupTime)
+ if err != nil {
+ params.Logger.Warningf("Restore: skipping backup %v/%v with invalid time %v: %v", backupDir, bh.Name(), bm.BackupTime, err)
+ continue
+ }
+ }
+
+ switch {
+ case checkBackupTime:
+ // restore to specific time
+ if backupTime.Equal(params.StartTime) || backupTime.Before(params.StartTime) {
+ params.Logger.Infof("Restore: found backup %v %v to restore using the specified timestamp of '%v'", bh.Directory(), bh.Name(), params.StartTime.Format(BackupTimestampFormat))
+ return index
+ }
+ case !params.RestoreToPos.IsZero():
+ // restore to specific pos
+ if params.RestoreToPos.GTIDSet.Contains(bm.Position.GTIDSet) {
+ // this is the most recent backup which is <= desired position
+ return index
+ }
+ default:
+ // restore latest full backup
params.Logger.Infof("Restore: found latest backup %v %v to restore", bh.Directory(), bh.Name())
- } else {
- params.Logger.Infof("Restore: found backup %v %v to restore using the specified timestamp of '%v'", bh.Directory(), bh.Name(), params.StartTime.Format(BackupTimestampFormat))
+ return index
}
- break
}
- }
- if index < 0 {
+ return -1
+ }()
+ if fullBackupIndex < 0 {
if checkBackupTime {
params.Logger.Errorf("No valid backup found before time %v", params.StartTime.Format(BackupTimestampFormat))
}
@@ -246,8 +403,25 @@ func FindBackupToRestore(ctx context.Context, params RestoreParams, bhs []backup
// up empty.
return nil, ErrNoCompleteBackup
}
-
- return bh, nil
+ // Anything taken before the full backup that we picked, is not of interest:
+ manifests = manifests[fullBackupIndex:]
+ restorePath := &RestorePath{
+ manifestHandleMap: manifestHandleMap,
+ }
+ if params.RestoreToPos.IsZero() {
+ // restoring from a single full backup:
+ restorePath.Add(manifests[0])
+ return restorePath, nil
+ }
+ // restore to a position (using incremental backups):
+ // we calculate a possible restore path based on the manifests. The resulting manifests are
+ // a sorted subsequence, with the full backup first, and zero or more incremental backups to follow.
+ manifests, err := FindPITRPath(params.RestoreToPos.GTIDSet, manifests)
+ if err != nil {
+ return nil, err
+ }
+ restorePath.manifests = manifests
+ return restorePath, nil
}
func prepareToRestore(ctx context.Context, cnf *Mycnf, mysqld MysqlDaemon, logger logutil.Logger) error {
@@ -390,31 +564,24 @@ func addMySQL8DataDictionary(fes []FileEntry, base string, baseDir string) ([]Fi
return fes, fi.Size(), nil
}
+func hasDynamicRedoLog(cnf *Mycnf) bool {
+ dynamicRedoLogPath := path.Join(cnf.InnodbLogGroupHomeDir, mysql.DynamicRedoLogSubdir)
+ info, err := os.Stat(dynamicRedoLogPath)
+ return !os.IsNotExist(err) && info.IsDir()
+}
+
func findFilesToBackup(cnf *Mycnf) ([]FileEntry, int64, error) {
var err error
var result []FileEntry
var size, totalSize int64
- var flavor MySQLFlavor
- var version ServerVersion
- var features capabilitySet
-
- // get the flavor and version to deal with any behavioral differences
- versionStr, err := GetVersionString()
- if err != nil {
- return nil, 0, err
- }
- flavor, version, err = ParseVersionString(versionStr)
- if err != nil {
- return nil, 0, err
- }
- features = newCapabilitySet(flavor, version)
// first add innodb files
result, totalSize, err = addDirectory(result, backupInnodbDataHomeDir, cnf.InnodbDataHomeDir, "")
if err != nil {
return nil, 0, err
}
- if features.hasDynamicRedoLogCapacity() {
+
+ if hasDynamicRedoLog(cnf) {
result, size, err = addDirectory(result, backupInnodbLogGroupHomeDir, cnf.InnodbLogGroupHomeDir, mysql.DynamicRedoLogSubdir)
} else {
result, size, err = addDirectory(result, backupInnodbLogGroupHomeDir, cnf.InnodbLogGroupHomeDir, "")
@@ -449,3 +616,33 @@ func findFilesToBackup(cnf *Mycnf) ([]FileEntry, int64, error) {
return result, totalSize, nil
}
+
+// binlogFilesToBackup returns the file entries for given binlog files (identified by file name, no path)
+func binlogFilesToBackup(cnf *Mycnf, binlogFiles []string) (result []FileEntry, totalSize int64, err error) {
+ binlogsDirectory := filepath.Dir(cnf.BinLogPath)
+ entries, err := os.ReadDir(binlogsDirectory)
+ if err != nil {
+ return nil, 0, err
+ }
+ binlogFilesMap := map[string]bool{}
+ for _, b := range binlogFiles {
+ binlogFilesMap[b] = true
+ }
+ for _, entry := range entries {
+ if !binlogFilesMap[entry.Name()] {
+ // not a file we're looking for
+ continue
+ }
+ fi, err := entry.Info()
+ if err != nil {
+ return nil, 0, err
+ }
+
+ result = append(result, FileEntry{
+ Base: backupBinlogDir,
+ Name: fi.Name(),
+ })
+ totalSize = totalSize + fi.Size()
+ }
+ return result, totalSize, nil
+}
diff --git a/go/vt/mysqlctl/binlogs_gtid.go b/go/vt/mysqlctl/binlogs_gtid.go
new file mode 100644
index 00000000000..73a69feda0b
--- /dev/null
+++ b/go/vt/mysqlctl/binlogs_gtid.go
@@ -0,0 +1,237 @@
+/*
+Copyright 2022 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package mysqlctl
+
+import (
+ "context"
+ "fmt"
+ "sort"
+ "strings"
+
+ "vitess.io/vitess/go/mysql"
+ "vitess.io/vitess/go/vt/proto/vtrpc"
+ "vitess.io/vitess/go/vt/vterrors"
+)
+
+type BackupManifestPath []*BackupManifest
+
+func (p *BackupManifestPath) String() string {
+ var sb strings.Builder
+ sb.WriteString("BackupManifestPath: [")
+ for i, m := range *p {
+ if i > 0 {
+ sb.WriteString(", ")
+ }
+ if m.Incremental {
+ sb.WriteString("incremental:")
+ } else {
+ sb.WriteString("full:")
+ }
+ sb.WriteString(fmt.Sprintf("%v...%v", m.FromPosition, m.Position))
+ }
+ sb.WriteString("]")
+ return sb.String()
+}
+
+// ChooseBinlogsForIncrementalBackup chooses which binary logs need to be backed up in an incremental backup,
+// given a list of known binary logs, a function that returns the "Previous GTIDs" per binary log, and a
+// position from which to backup (normally the position of last known backup)
+// The function returns an error if the request could not be fulfilled: whether backup is not at all
+// possible, or is empty.
+func ChooseBinlogsForIncrementalBackup(
+ ctx context.Context,
+ lookFromGTIDSet mysql.GTIDSet,
+ binaryLogs []string,
+ pgtids func(ctx context.Context, binlog string) (gtids string, err error),
+ unionPreviousGTIDs bool,
+) (
+ binaryLogsToBackup []string,
+ incrementalBackupFromGTID string,
+ incrementalBackupToGTID string,
+ err error,
+) {
+
+ var prevGTIDsUnion mysql.GTIDSet
+ for i, binlog := range binaryLogs {
+ previousGtids, err := pgtids(ctx, binlog)
+ if err != nil {
+ return nil, "", "", vterrors.Wrapf(err, "cannot get previous gtids for binlog %v", binlog)
+ }
+ prevPos, err := mysql.ParsePosition(mysql.Mysql56FlavorID, previousGtids)
+ if err != nil {
+ return nil, "", "", vterrors.Wrapf(err, "cannot decode binlog %s position in incremental backup: %v", binlog, prevPos)
+ }
+ if prevGTIDsUnion == nil {
+ prevGTIDsUnion = prevPos.GTIDSet
+ } else {
+ prevGTIDsUnion = prevGTIDsUnion.Union(prevPos.GTIDSet)
+ }
+
+ containedInFromPos := lookFromGTIDSet.Contains(prevPos.GTIDSet)
+ // The binary logs are read in-order. They are build one on top of the other: we know
+ // the PreviousGTIDs of once binary log fully cover the previous binary log's.
+ if containedInFromPos {
+ // All previous binary logs are fully contained by backupPos. Carry on
+ continue
+ }
+ // We look for the first binary log whose "PreviousGTIDs" isn't already fully covered
+ // by "backupPos" (the position from which we want to create the inreemental backup).
+ // That means the *previous* binary log is the first binary log to introduce GTID events on top
+ // of "backupPos"
+ if i == 0 {
+ return nil, "", "", vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "the very first binlog file %v has PreviousGTIDs %s that exceed given incremental backup pos. There are GTID entries that are missing and this backup cannot run", binlog, prevPos)
+ }
+ if unionPreviousGTIDs {
+ prevPos.GTIDSet = prevGTIDsUnion
+ }
+ if !prevPos.GTIDSet.Contains(lookFromGTIDSet) {
+ return nil, "", "", vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "binary log %v with previous GTIDS %s neither contains requested GTID %s nor contains it. Backup cannot take place", binlog, prevPos.GTIDSet, lookFromGTIDSet)
+ }
+ // We begin with the previous binary log, and we ignore the last binary log, because it's still open and being written to.
+ binaryLogsToBackup = binaryLogs[i-1 : len(binaryLogs)-1]
+ incrementalBackupFromGTID, err := pgtids(ctx, binaryLogsToBackup[0])
+ if err != nil {
+ return nil, "", "", vterrors.Wrapf(err, "cannot evaluate incremental backup from pos")
+ }
+ // The "previous GTIDs" of the binary logs that _follows_ our binary-logs-to-backup indicates
+ // the backup's position.
+ incrementalBackupToGTID, err := pgtids(ctx, binaryLogs[len(binaryLogs)-1])
+ if err != nil {
+ return nil, "", "", vterrors.Wrapf(err, "cannot evaluate incremental backup to pos")
+ }
+ return binaryLogsToBackup, incrementalBackupFromGTID, incrementalBackupToGTID, nil
+ }
+ return nil, "", "", vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "no binary logs to backup (increment is empty)")
+}
+
+// IsValidIncrementalBakcup determines whether the given manifest can be used to extend a backup
+// based on baseGTIDSet. The manifest must be able to pick up from baseGTIDSet, and must extend it by at least
+// one entry.
+func IsValidIncrementalBakcup(baseGTIDSet mysql.GTIDSet, purgedGTIDSet mysql.GTIDSet, manifest *BackupManifest) bool {
+ if manifest == nil {
+ return false
+ }
+ if !manifest.Incremental {
+ return false
+ }
+ // We want to validate:
+ // manifest.FromPosition <= baseGTID < manifest.Position
+ if !baseGTIDSet.Contains(manifest.FromPosition.GTIDSet) {
+ // the incremental backup has a gap from the base set.
+ return false
+ }
+ if baseGTIDSet.Contains(manifest.Position.GTIDSet) {
+ // the incremental backup adds nothing; it's already contained in the base set
+ return false
+ }
+ if !manifest.Position.GTIDSet.Union(purgedGTIDSet).Contains(baseGTIDSet) {
+ // the base set seems to have extra entries?
+ return false
+ }
+ return true
+}
+
+// FindPITRPath evaluates the shortest path to recover a restoreToGTIDSet. The past is composed of:
+// - a full backup, followed by:
+// - zero or more incremental backups
+// The path ends with restoreToGTIDSet or goes beyond it. No shorter path will do the same.
+// The function returns an error when a path cannot be found.
+func FindPITRPath(restoreToGTIDSet mysql.GTIDSet, manifests [](*BackupManifest)) (shortestPath [](*BackupManifest), err error) {
+ sortedManifests := make([](*BackupManifest), 0, len(manifests))
+ for _, m := range manifests {
+ if m != nil {
+ sortedManifests = append(sortedManifests, m)
+ }
+ }
+ sort.SliceStable(sortedManifests, func(i, j int) bool {
+ return sortedManifests[j].Position.GTIDSet.Union(sortedManifests[i].PurgedPosition.GTIDSet).Contains(sortedManifests[i].Position.GTIDSet)
+ })
+ mostRelevantFullBackupIndex := -1 // an invalid value
+ for i, manifest := range sortedManifests {
+ if manifest.Incremental {
+ continue
+ }
+ if restoreToGTIDSet.Contains(manifest.Position.GTIDSet) {
+ // This backup is <= desired restore point, therefore it's valid
+ mostRelevantFullBackupIndex = i
+ }
+ }
+
+ if mostRelevantFullBackupIndex < 0 {
+ // No full backup prior to desired restore point...
+ return nil, vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "no full backup found before GTID %v", restoreToGTIDSet)
+ }
+ // All that interests us starts with mostRelevantFullBackupIndex: that's where the full backup is,
+ // and any relevant incremental backups follow that point (because manifests are sorted by backup pos, ascending)
+ sortedManifests = sortedManifests[mostRelevantFullBackupIndex:]
+ // Of all relevant backups, we take the most recent one.
+ fullBackup := sortedManifests[0]
+ if restoreToGTIDSet.Equal(fullBackup.Position.GTIDSet) {
+ // Perfect match, we don't need to look for incremental backups.
+ // We just skip the complexity of the followup section.
+ // The result path is a single full backup.
+ return append(shortestPath, fullBackup), nil
+ }
+ purgedGTIDSet := fullBackup.PurgedPosition.GTIDSet
+
+ var validRestorePaths []BackupManifestPath
+ // recursive function that searches for all possible paths:
+ var findPaths func(baseGTIDSet mysql.GTIDSet, pathManifests []*BackupManifest, remainingManifests []*BackupManifest)
+ findPaths = func(baseGTIDSet mysql.GTIDSet, pathManifests []*BackupManifest, remainingManifests []*BackupManifest) {
+ // The algorithm was first designed to find all possible paths. But then we recognized that it will be
+ // doing excessive work. At this time we choose to end the search once we find the first valid path, even if
+ // it's not the most optimal. The next "if" statement is the addition to the algorithm, where we suffice with
+ // a single result.
+ if len(validRestorePaths) > 0 {
+ return
+ }
+ // remove the above if you wish to explore all paths.
+ if baseGTIDSet.Contains(restoreToGTIDSet) {
+ // successful end of path. Update list of successful paths
+ validRestorePaths = append(validRestorePaths, pathManifests)
+ return
+ }
+ if len(remainingManifests) == 0 {
+ // end of the road. No possibilities from here.
+ return
+ }
+ // if the next manifest is eligible to be part of the path, try it out
+ if IsValidIncrementalBakcup(baseGTIDSet, purgedGTIDSet, remainingManifests[0]) {
+ nextGTIDSet := baseGTIDSet.Union(remainingManifests[0].Position.GTIDSet)
+ findPaths(nextGTIDSet, append(pathManifests, remainingManifests[0]), remainingManifests[1:])
+ }
+ // also, try without the next manifest
+ findPaths(baseGTIDSet, pathManifests, remainingManifests[1:])
+ }
+ // find all paths, entry point
+ findPaths(fullBackup.Position.GTIDSet, sortedManifests[0:1], sortedManifests[1:])
+ if len(validRestorePaths) == 0 {
+ return nil, vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "no path found that leads to GTID %v", restoreToGTIDSet)
+ }
+ // Now find a shortest path
+ for i := range validRestorePaths {
+ path := validRestorePaths[i]
+ if shortestPath == nil {
+ shortestPath = path
+ continue
+ }
+ if len(path) < len(shortestPath) {
+ shortestPath = path
+ }
+ }
+ return shortestPath, nil
+}
diff --git a/go/vt/mysqlctl/binlogs_gtid_test.go b/go/vt/mysqlctl/binlogs_gtid_test.go
new file mode 100644
index 00000000000..f09d88c6544
--- /dev/null
+++ b/go/vt/mysqlctl/binlogs_gtid_test.go
@@ -0,0 +1,451 @@
+// Package mysqlctl_test is the blackbox tests for package mysqlctl.
+// Tests that need to use fakemysqldaemon must be written as blackbox tests;
+// since fakemysqldaemon imports mysqlctl, importing fakemysqldaemon in
+// a `package mysqlctl` test would cause a circular import.
+package mysqlctl
+
+import (
+ "context"
+ "fmt"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "vitess.io/vitess/go/mysql"
+)
+
+func TestChooseBinlogsForIncrementalBackup(t *testing.T) {
+ binlogs := []string{
+ "vt-bin.000001",
+ "vt-bin.000002",
+ "vt-bin.000003",
+ "vt-bin.000004",
+ "vt-bin.000005",
+ "vt-bin.000006",
+ }
+ basePreviousGTIDs := map[string]string{
+ "vt-bin.000001": "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-50",
+ "vt-bin.000002": "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-60",
+ "vt-bin.000003": "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-60",
+ "vt-bin.000004": "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-78",
+ "vt-bin.000005": "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-243",
+ "vt-bin.000006": "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-331",
+ }
+ tt := []struct {
+ previousGTIDs map[string]string
+ backupPos string
+ expectBinlogs []string
+ expectError string
+ }{
+ {
+ previousGTIDs: basePreviousGTIDs,
+ backupPos: "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-78",
+ expectBinlogs: []string{"vt-bin.000004", "vt-bin.000005"},
+ },
+ {
+ previousGTIDs: basePreviousGTIDs,
+ backupPos: "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-60",
+ expectBinlogs: []string{"vt-bin.000003", "vt-bin.000004", "vt-bin.000005"},
+ },
+ {
+ previousGTIDs: basePreviousGTIDs,
+ backupPos: "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-63",
+ expectBinlogs: []string{"vt-bin.000003", "vt-bin.000004", "vt-bin.000005"},
+ },
+ {
+ previousGTIDs: basePreviousGTIDs,
+ backupPos: "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-243",
+ expectBinlogs: []string{"vt-bin.000005"},
+ },
+ {
+ previousGTIDs: basePreviousGTIDs,
+ backupPos: "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-331",
+ expectError: "no binary logs to backup",
+ },
+ {
+ previousGTIDs: basePreviousGTIDs,
+ backupPos: "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-630000",
+ expectError: "no binary logs to backup",
+ },
+ {
+ previousGTIDs: basePreviousGTIDs,
+ backupPos: "16b1039f-22b6-11ed-b765-0a43f95f0000:1-63",
+ expectError: "There are GTID entries that are missing",
+ },
+ {
+ previousGTIDs: map[string]string{
+ "vt-bin.000001": "",
+ "vt-bin.000002": "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-60",
+ "vt-bin.000003": "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-60",
+ "vt-bin.000004": "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-78",
+ "vt-bin.000005": "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-243",
+ "vt-bin.000006": "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-331",
+ },
+ backupPos: "16b1039f-22b6-11ed-b765-0a43f95f0000:1-63",
+ expectError: "neither contains requested GTID",
+ },
+ {
+ previousGTIDs: map[string]string{
+ "vt-bin.000001": "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-50",
+ "vt-bin.000002": "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-60",
+ "vt-bin.000003": "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-60",
+ "vt-bin.000004": "16b1039f-22b6-11ed-b765-0a43f95f28a3:3-78",
+ "vt-bin.000005": "16b1039f-22b6-11ed-b765-0a43f95f28a3:20-243",
+ "vt-bin.000006": "16b1039f-22b6-11ed-b765-0a43f95f28a3:200-331",
+ },
+ backupPos: "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-63",
+ expectBinlogs: []string{"vt-bin.000003", "vt-bin.000004", "vt-bin.000005"},
+ },
+ }
+ for i, tc := range tt {
+ t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
+ backupPos, err := mysql.ParsePosition(mysql.Mysql56FlavorID, tc.backupPos)
+ require.NoError(t, err)
+ require.NoError(t, err)
+ binlogsToBackup, fromGTID, toGTID, err := ChooseBinlogsForIncrementalBackup(
+ context.Background(),
+ backupPos.GTIDSet,
+ binlogs,
+ func(ctx context.Context, binlog string) (gtids string, err error) {
+ gtids, ok := tc.previousGTIDs[binlog]
+ if !ok {
+ return "", fmt.Errorf("previous gtids not found for binary log %v", binlog)
+ }
+ return gtids, nil
+ },
+ true,
+ )
+ if tc.expectError != "" {
+ require.Error(t, err)
+ assert.Contains(t, err.Error(), tc.expectError)
+ return
+ }
+ require.NoError(t, err)
+ require.NotEmpty(t, binlogsToBackup)
+ assert.Equal(t, tc.expectBinlogs, binlogsToBackup)
+ assert.Equal(t, tc.previousGTIDs[binlogsToBackup[0]], fromGTID)
+ assert.Equal(t, tc.previousGTIDs[binlogs[len(binlogs)-1]], toGTID)
+ assert.NotEqual(t, fromGTID, toGTID)
+ })
+ }
+}
+
+func TestIsValidIncrementalBakcup(t *testing.T) {
+ incrementalManifest := func(backupPos string, backupFromPos string) *BackupManifest {
+ return &BackupManifest{
+ Position: mysql.MustParsePosition(mysql.Mysql56FlavorID, fmt.Sprintf("16b1039f-22b6-11ed-b765-0a43f95f28a3:%s", backupPos)),
+ FromPosition: mysql.MustParsePosition(mysql.Mysql56FlavorID, fmt.Sprintf("16b1039f-22b6-11ed-b765-0a43f95f28a3:%s", backupFromPos)),
+ Incremental: true,
+ }
+ }
+ tt := []struct {
+ baseGTID string
+ purgedGTID string
+ backupFromPos string
+ backupPos string
+ expectIsValid bool
+ }{
+ {
+ baseGTID: "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-58",
+ backupFromPos: "1-58",
+ backupPos: "1-70",
+ expectIsValid: true,
+ },
+ {
+ baseGTID: "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-58",
+ backupFromPos: "1-51",
+ backupPos: "1-70",
+ expectIsValid: true,
+ },
+ {
+ baseGTID: "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-58",
+ backupFromPos: "1-51",
+ backupPos: "1-58",
+ expectIsValid: false,
+ },
+ {
+ baseGTID: "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-58",
+ backupFromPos: "1-58",
+ backupPos: "1-58",
+ expectIsValid: false,
+ },
+ {
+ baseGTID: "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-58",
+ backupFromPos: "1-51",
+ backupPos: "1-55",
+ expectIsValid: false,
+ },
+ {
+ baseGTID: "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-58",
+ backupFromPos: "1-59",
+ backupPos: "1-70",
+ expectIsValid: false,
+ },
+ {
+ baseGTID: "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-58",
+ backupFromPos: "1-60",
+ backupPos: "1-70",
+ expectIsValid: false,
+ },
+ {
+ baseGTID: "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-58",
+ backupFromPos: "3-51",
+ backupPos: "3-70",
+ expectIsValid: false,
+ },
+ {
+ baseGTID: "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-58",
+ purgedGTID: "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-2",
+ backupFromPos: "3-51",
+ backupPos: "3-70",
+ expectIsValid: true,
+ },
+ {
+ baseGTID: "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-58",
+ purgedGTID: "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-2",
+ backupFromPos: "4-51",
+ backupPos: "4-70",
+ expectIsValid: false,
+ },
+ }
+ for i, tc := range tt {
+ t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
+ basePos, err := mysql.ParsePosition(mysql.Mysql56FlavorID, tc.baseGTID)
+ require.NoError(t, err)
+ purgedPos, err := mysql.ParsePosition(mysql.Mysql56FlavorID, tc.purgedGTID)
+ require.NoError(t, err)
+ isValid := IsValidIncrementalBakcup(basePos.GTIDSet, purgedPos.GTIDSet, incrementalManifest(tc.backupPos, tc.backupFromPos))
+ assert.Equal(t, tc.expectIsValid, isValid)
+ })
+ }
+}
+
+func TestFindPITRPath(t *testing.T) {
+ generatePosition := func(posRange string) mysql.Position {
+ return mysql.MustParsePosition(mysql.Mysql56FlavorID, fmt.Sprintf("16b1039f-22b6-11ed-b765-0a43f95f28a3:%s", posRange))
+ }
+ fullManifest := func(backupPos string) *BackupManifest {
+ return &BackupManifest{
+ Position: generatePosition(backupPos),
+ }
+ }
+ incrementalManifest := func(backupPos string, backupFromPos string) *BackupManifest {
+ return &BackupManifest{
+ Position: generatePosition(backupPos),
+ FromPosition: generatePosition(backupFromPos),
+ Incremental: true,
+ }
+ }
+ fullBackups := []*BackupManifest{
+ fullManifest("1-50"),
+ fullManifest("1-5"),
+ fullManifest("1-80"),
+ fullManifest("1-70"),
+ fullManifest("1-70"),
+ }
+ incrementalBackups := []*BackupManifest{
+ incrementalManifest("1-34", "1-5"),
+ incrementalManifest("1-38", "1-34"),
+ incrementalManifest("1-52", "1-35"),
+ incrementalManifest("1-60", "1-50"),
+ incrementalManifest("1-70", "1-60"),
+ incrementalManifest("1-82", "1-70"),
+ incrementalManifest("1-92", "1-79"),
+ incrementalManifest("1-95", "1-89"),
+ }
+ tt := []struct {
+ name string
+ restoreGTID string
+ purgedGTID string
+ incrementalBackups []*BackupManifest
+ expectFullManifest *BackupManifest
+ expectIncrementalManifests []*BackupManifest
+ expectError string
+ }{
+ {
+ name: "1-58",
+ restoreGTID: "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-58",
+ expectFullManifest: fullManifest("1-50"),
+ expectIncrementalManifests: []*BackupManifest{
+ incrementalManifest("1-52", "1-35"),
+ incrementalManifest("1-60", "1-50"),
+ },
+ },
+ {
+ name: "1-50",
+ restoreGTID: "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-50",
+ expectFullManifest: fullManifest("1-50"),
+ },
+ {
+ name: "1-78",
+ restoreGTID: "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-78",
+ expectFullManifest: fullManifest("1-70"),
+ expectIncrementalManifests: []*BackupManifest{
+ incrementalManifest("1-82", "1-70"),
+ },
+ },
+ {
+ name: "1-45",
+ restoreGTID: "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-45",
+ expectFullManifest: fullManifest("1-5"),
+ expectIncrementalManifests: []*BackupManifest{
+ incrementalManifest("1-34", "1-5"),
+ incrementalManifest("1-38", "1-34"),
+ incrementalManifest("1-52", "1-35"),
+ },
+ },
+ {
+ name: "1-28",
+ restoreGTID: "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-28",
+ expectFullManifest: fullManifest("1-5"),
+ expectIncrementalManifests: []*BackupManifest{
+ incrementalManifest("1-34", "1-5"),
+ },
+ },
+ {
+ name: "1-88",
+ restoreGTID: "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-88",
+ expectFullManifest: fullManifest("1-80"),
+ expectIncrementalManifests: []*BackupManifest{
+ incrementalManifest("1-82", "1-70"),
+ incrementalManifest("1-92", "1-79"),
+ },
+ },
+ {
+ name: "fail 1-2",
+ restoreGTID: "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-2",
+ expectError: "no full backup",
+ },
+ {
+ name: "fail unknown UUID",
+ restoreGTID: "00000000-0000-0000-0000-0a43f95f28a3:1-50",
+ expectError: "no full backup",
+ },
+ {
+ name: "fail 1-99",
+ restoreGTID: "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-99",
+ expectError: "no path found",
+ },
+ {
+ name: "1-94",
+ restoreGTID: "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-94",
+ expectFullManifest: fullManifest("1-80"),
+ expectIncrementalManifests: []*BackupManifest{
+ incrementalManifest("1-82", "1-70"),
+ incrementalManifest("1-92", "1-79"),
+ incrementalManifest("1-95", "1-89"),
+ },
+ },
+ {
+ name: "1-95",
+ restoreGTID: "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-95",
+ expectFullManifest: fullManifest("1-80"),
+ expectIncrementalManifests: []*BackupManifest{
+ incrementalManifest("1-82", "1-70"),
+ incrementalManifest("1-92", "1-79"),
+ incrementalManifest("1-95", "1-89"),
+ },
+ },
+ {
+ name: "fail 1-88 with gaps",
+ restoreGTID: "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-88",
+ incrementalBackups: []*BackupManifest{
+ incrementalManifest("1-34", "1-5"),
+ incrementalManifest("1-38", "1-34"),
+ incrementalManifest("1-52", "1-35"),
+ incrementalManifest("1-60", "1-50"),
+ incrementalManifest("1-70", "1-60"),
+ incrementalManifest("1-82", "1-70"),
+ incrementalManifest("1-92", "1-84"),
+ incrementalManifest("1-95", "1-89"),
+ },
+ expectError: "no path found",
+ },
+ {
+ name: "1-45 first solution even when shorter exists",
+ restoreGTID: "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-45",
+ incrementalBackups: append(
+ incrementalBackups,
+ incrementalManifest("1-99", "1-5"),
+ ),
+ expectFullManifest: fullManifest("1-5"),
+ expectIncrementalManifests: []*BackupManifest{
+ incrementalManifest("1-34", "1-5"),
+ incrementalManifest("1-38", "1-34"),
+ incrementalManifest("1-52", "1-35"),
+ },
+ },
+ {
+ name: "fail incomplete binlog previous GTIDs",
+ restoreGTID: "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-92",
+ incrementalBackups: []*BackupManifest{
+ incrementalManifest("3-90", "3-75"),
+ incrementalManifest("3-95", "3-90"),
+ },
+ expectFullManifest: fullManifest("1-80"),
+ expectIncrementalManifests: []*BackupManifest{
+ incrementalManifest("3-90", "3-75"),
+ incrementalManifest("3-95", "3-90"),
+ },
+ expectError: "no path found",
+ },
+ {
+ name: "incomplete binlog previous GTIDs",
+ restoreGTID: "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-92",
+ purgedGTID: "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-2",
+ incrementalBackups: []*BackupManifest{
+ incrementalManifest("3-90", "3-75"),
+ incrementalManifest("3-95", "3-90"),
+ },
+ expectFullManifest: fullManifest("1-80"),
+ expectIncrementalManifests: []*BackupManifest{
+ incrementalManifest("3-90", "3-75"),
+ incrementalManifest("3-95", "3-90"),
+ },
+ },
+ }
+ for _, tc := range tt {
+ t.Run(tc.name, func(t *testing.T) {
+ if tc.incrementalBackups == nil {
+ tc.incrementalBackups = incrementalBackups
+ }
+ for i := range fullBackups {
+ var err error
+ fullBackup := fullBackups[i]
+ fullBackup.PurgedPosition, err = mysql.ParsePosition(mysql.Mysql56FlavorID, tc.purgedGTID)
+ require.NoError(t, err)
+ defer func() {
+ fullBackup.PurgedPosition = mysql.Position{}
+ }()
+ }
+ var manifests []*BackupManifest
+ manifests = append(manifests, fullBackups...)
+ manifests = append(manifests, tc.incrementalBackups...)
+
+ restorePos, err := mysql.ParsePosition(mysql.Mysql56FlavorID, tc.restoreGTID)
+ require.NoErrorf(t, err, "%v", err)
+ path, err := FindPITRPath(restorePos.GTIDSet, manifests)
+ if tc.expectError != "" {
+ require.Error(t, err)
+ assert.Contains(t, err.Error(), tc.expectError)
+ return
+ }
+ require.NoErrorf(t, err, "%v", err)
+ require.NotEmpty(t, path)
+ // the path always consists of one full backup and zero or more incremental backups
+ fullBackup := path[0]
+ require.False(t, fullBackup.Incremental)
+ for _, manifest := range path[1:] {
+ require.True(t, manifest.Incremental)
+ }
+ assert.Equal(t, tc.expectFullManifest.Position.GTIDSet, fullBackup.Position.GTIDSet)
+ if tc.expectIncrementalManifests == nil {
+ tc.expectIncrementalManifests = []*BackupManifest{}
+ }
+ expected := BackupManifestPath(tc.expectIncrementalManifests)
+ got := BackupManifestPath(path[1:])
+ assert.Equal(t, expected, got, "expected: %s, got: %s", expected.String(), got.String())
+ })
+ }
+}
diff --git a/go/vt/mysqlctl/builtinbackupengine.go b/go/vt/mysqlctl/builtinbackupengine.go
index 09323c9387b..56fabc1dd99 100644
--- a/go/vt/mysqlctl/builtinbackupengine.go
+++ b/go/vt/mysqlctl/builtinbackupengine.go
@@ -27,6 +27,7 @@ import (
"io"
"os"
"path"
+ "path/filepath"
"sync"
"sync/atomic"
"time"
@@ -36,7 +37,6 @@ import (
"vitess.io/vitess/go/mysql"
"vitess.io/vitess/go/sync2"
"vitess.io/vitess/go/vt/concurrency"
- "vitess.io/vitess/go/vt/hook"
"vitess.io/vitess/go/vt/log"
"vitess.io/vitess/go/vt/logutil"
"vitess.io/vitess/go/vt/mysqlctl/backupstorage"
@@ -50,6 +50,7 @@ import (
const (
builtinBackupEngineName = "builtin"
+ autoIncrementalFromPos = "auto"
writerBufferSize = 2 * 1024 * 1024
dataDictionaryFile = "mysql.ibd"
)
@@ -71,8 +72,7 @@ type BuiltinBackupEngine struct {
}
// builtinBackupManifest represents the backup. It lists all the files, the
-// Position that the backup was taken at, and the transform hook used,
-// if any.
+// Position that the backup was taken at, the compression engine used, etc.
type builtinBackupManifest struct {
// BackupManifest is an anonymous embedding of the base manifest struct.
BackupManifest
@@ -86,9 +86,6 @@ type builtinBackupManifest struct {
// FileEntries contains all the files in the backup
FileEntries []FileEntry
- // TransformHook that was used on the files, if any.
- TransformHook string
-
// SkipCompress is true if the backup files were NOT run through gzip.
// The field is expressed as a negative because it will come through as
// false for backups that were created before the field existed, and those
@@ -101,6 +98,7 @@ type FileEntry struct {
// Base is one of:
// - backupInnodbDataHomeDir for files that go into Mycnf.InnodbDataHomeDir
// - backupInnodbLogGroupHomeDir for files that go into Mycnf.InnodbLogGroupHomeDir
+ // - binLogDir for files that go in the binlog dir (base path of Mycnf.BinLogPath)
// - backupData for files that go into Mycnf.DataDir
Base string
@@ -110,10 +108,14 @@ type FileEntry struct {
// Hash is the hash of the final data (transformed and
// compressed if specified) stored in the BackupStorage.
Hash string
+
+ // ParentPath is an optional prefix to the Base path. If empty, it is ignored. Useful
+ // for writing files in a temporary directory
+ ParentPath string
}
func init() {
- for _, cmd := range []string{"mysqlctl", "mysqlctld", "vtcombo", "vttablet", "vttestserver", "vtctld", "vtctldclient", "vtexplain"} {
+ for _, cmd := range []string{"vtcombo", "vttablet", "vttestserver", "vtctld", "vtctldclient"} {
servenv.OnParseFor(cmd, registerBuiltinBackupEngineFlags)
}
}
@@ -123,7 +125,13 @@ func registerBuiltinBackupEngineFlags(fs *pflag.FlagSet) {
fs.DurationVar(&builtinBackupProgress, "builtinbackup_progress", builtinBackupProgress, "how often to send progress updates when backing up large files.")
}
-func (fe *FileEntry) open(cnf *Mycnf, readOnly bool) (*os.File, error) {
+// isIncrementalBackup is a convenience function to check whether the params indicate an incremental backup request
+func isIncrementalBackup(params BackupParams) bool {
+ return params.IncrementalFromPos != ""
+}
+
+// fullPath returns the full path of the entry, based on its type
+func (fe *FileEntry) fullPath(cnf *Mycnf) (string, error) {
// find the root to use
var root string
switch fe.Base {
@@ -133,14 +141,22 @@ func (fe *FileEntry) open(cnf *Mycnf, readOnly bool) (*os.File, error) {
root = cnf.InnodbLogGroupHomeDir
case backupData:
root = cnf.DataDir
+ case backupBinlogDir:
+ root = filepath.Dir(cnf.BinLogPath)
default:
- return nil, vterrors.Errorf(vtrpc.Code_UNKNOWN, "unknown base: %v", fe.Base)
+ return "", vterrors.Errorf(vtrpc.Code_UNKNOWN, "unknown base: %v", fe.Base)
}
- // and open the file
- name := path.Join(root, fe.Name)
+ return path.Join(fe.ParentPath, root, fe.Name), nil
+}
+
+// open attempts t oopen the file
+func (fe *FileEntry) open(cnf *Mycnf, readOnly bool) (*os.File, error) {
+ name, err := fe.fullPath(cnf)
+ if err != nil {
+ return nil, vterrors.Wrapf(err, "cannot evaluate full name for %v", fe.Name)
+ }
var fd *os.File
- var err error
if readOnly {
if fd, err = os.Open(name); err != nil {
return nil, vterrors.Wrapf(err, "cannot open source file %v", name)
@@ -157,11 +173,133 @@ func (fe *FileEntry) open(cnf *Mycnf, readOnly bool) (*os.File, error) {
return fd, nil
}
-// ExecuteBackup returns a boolean that indicates if the backup is usable,
-// and an overall error.
+// ExecuteBackup runs a backup based on given params. This could be a full or incremental backup.
+// The function returns a boolean that indicates if the backup is usable, and an overall error.
func (be *BuiltinBackupEngine) ExecuteBackup(ctx context.Context, params BackupParams, bh backupstorage.BackupHandle) (bool, error) {
+ params.Logger.Infof("Executing Backup at %v for keyspace/shard %v/%v on tablet %v, concurrency: %v, compress: %v, incrementalFromPos: %v",
+ params.BackupTime, params.Keyspace, params.Shard, params.TabletAlias, params.Concurrency, backupStorageCompress, params.IncrementalFromPos)
- params.Logger.Infof("Hook: %v, Compress: %v", backupStorageHook, backupStorageCompress)
+ if isIncrementalBackup(params) {
+ return be.executeIncrementalBackup(ctx, params, bh)
+ }
+ return be.executeFullBackup(ctx, params, bh)
+}
+
+// executeIncrementalBackup runs an incremental backup, based on given 'incremental_from_pos', which can be:
+// - A valid position
+// - "auto", indicating the incremental backup should begin with last successful backup end position.
+func (be *BuiltinBackupEngine) executeIncrementalBackup(ctx context.Context, params BackupParams, bh backupstorage.BackupHandle) (bool, error) {
+ if params.IncrementalFromPos == autoIncrementalFromPos {
+ params.Logger.Infof("auto evaluating incremental_from_pos")
+ bs, err := backupstorage.GetBackupStorage()
+ if err != nil {
+ return false, err
+ }
+ defer bs.Close()
+
+ // Backups are stored in a directory structure that starts with
+ // /
+ backupDir := GetBackupDir(params.Keyspace, params.Shard)
+ bhs, err := bs.ListBackups(ctx, backupDir)
+ if err != nil {
+ return false, vterrors.Wrap(err, "ListBackups failed")
+ }
+ _, manifest, err := FindLatestSuccessfulBackup(ctx, params.Logger, bhs)
+ if err != nil {
+ return false, vterrors.Wrap(err, "FindLatestSuccessfulBackup failed")
+ }
+ params.IncrementalFromPos = mysql.EncodePosition(manifest.Position)
+ params.Logger.Infof("auto evaluated incremental_from_pos: %s", params.IncrementalFromPos)
+ }
+
+ rp, err := mysql.DecodePosition(params.IncrementalFromPos)
+ if err != nil {
+ return false, vterrors.Wrapf(err, "cannot decode position in incremental backup: %v", params.IncrementalFromPos)
+ }
+ if !rp.MatchesFlavor(mysql.Mysql56FlavorID) {
+ // incrementalFromGtidSet, ok := rp.GTIDSet.(mysql.Mysql56GTIDSet)
+ // if !ok {
+ return false, vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "incremental backup only supports MySQL GTID positions. Got: %v", params.IncrementalFromPos)
+ }
+ serverUUID, err := params.Mysqld.GetServerUUID(ctx)
+ if err != nil {
+ return false, vterrors.Wrap(err, "can't get server uuid")
+ }
+ gtidPurged, err := params.Mysqld.GetGTIDPurged(ctx)
+ if err != nil {
+ return false, vterrors.Wrap(err, "can't get gtid_purged")
+ }
+ rpGTID, ok := rp.GTIDSet.(mysql.Mysql56GTIDSet)
+ if !ok {
+ return false, vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "cannot get MySQL GTID value: %v", rpGTID)
+ }
+ purgedGTID, ok := gtidPurged.GTIDSet.(mysql.Mysql56GTIDSet)
+ if !ok {
+ return false, vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "cannot get MySQL GTID purged value: %v", rpGTID)
+ }
+ // binlogs may not contain information about purged GTIDs. e.g. some binlog.000003 may have
+ // previous GTIDs like 00021324-1111-1111-1111-111111111111:30-60, ie 1-29 range is missing. This can happen
+ // when a server is restored from backup and set with gtid_purged != "".
+ // This is fine!
+ // Shortly we will compare a binlog's "Previous GTIDs" with the backup's position. For the purpose of comparison, we
+ // ignore the purged GTIDs:
+ binlogCompareGTID := rpGTID.Difference(purgedGTID)
+
+ if err := params.Mysqld.FlushBinaryLogs(ctx); err != nil {
+ return false, vterrors.Wrapf(err, "cannot flush binary logs in incremental backup")
+ }
+ binaryLogs, err := params.Mysqld.GetBinaryLogs(ctx)
+ if err != nil {
+ return false, vterrors.Wrapf(err, "cannot get binary logs in incremental backup")
+ }
+ previousGTIDs := map[string]string{}
+ getPreviousGTIDs := func(ctx context.Context, binlog string) (gtids string, err error) {
+ gtids, ok := previousGTIDs[binlog]
+ if ok {
+ // Found a cached entry! No need to query again
+ return gtids, nil
+ }
+ gtids, err = params.Mysqld.GetPreviousGTIDs(ctx, binlog)
+ if err != nil {
+ return gtids, err
+ }
+ previousGTIDs[binlog] = gtids
+ return gtids, nil
+ }
+ binaryLogsToBackup, incrementalBackupFromGTID, incrementalBackupToGTID, err := ChooseBinlogsForIncrementalBackup(ctx, binlogCompareGTID, binaryLogs, getPreviousGTIDs, true)
+ if err != nil {
+ return false, vterrors.Wrapf(err, "cannot get binary logs to backup in incremental backup")
+ }
+ incrementalBackupFromPosition, err := mysql.ParsePosition(mysql.Mysql56FlavorID, incrementalBackupFromGTID)
+ if err != nil {
+ return false, vterrors.Wrapf(err, "cannot parse position %v", incrementalBackupFromGTID)
+ }
+ incrementalBackupToPosition, err := mysql.ParsePosition(mysql.Mysql56FlavorID, incrementalBackupToGTID)
+ if err != nil {
+ return false, vterrors.Wrapf(err, "cannot parse position %v", incrementalBackupToGTID)
+ }
+ // It's worthwhile we explain the difference between params.IncrementalFromPos and incrementalBackupFromPosition.
+ // params.IncrementalFromPos is supplied by the user. They want an incremental backup that covers that position.
+ // However, we implement incremental backups by copying complete binlog files. That position could potentially
+ // be somewhere in the middle of some binlog. So we look at the earliest binlog file that covers the user's position.
+ // The backup we take either starts exactly at the user's position or at some prior position, depending where in the
+ // binlog file the user's requested position is found.
+ // incrementalBackupFromGTID is the "previous GTIDs" of the first binlog file we back up.
+ // It is a fact that incrementalBackupFromGTID is earlier or equal to params.IncrementalFromPos.
+ // In the backup manifest file, we document incrementalBackupFromGTID, not the user's requested position.
+ if err := be.backupFiles(ctx, params, bh, incrementalBackupToPosition, mysql.Position{}, incrementalBackupFromPosition, binaryLogsToBackup, serverUUID); err != nil {
+ return false, err
+ }
+ return true, nil
+}
+
+// executeFullBackup returns a boolean that indicates if the backup is usable,
+// and an overall error.
+func (be *BuiltinBackupEngine) executeFullBackup(ctx context.Context, params BackupParams, bh backupstorage.BackupHandle) (bool, error) {
+
+ if params.IncrementalFromPos != "" {
+ return be.executeIncrementalBackup(ctx, params, bh)
+ }
// Save initial state so we can restore.
replicaStartRequired := false
@@ -202,11 +340,11 @@ func (be *BuiltinBackupEngine) ExecuteBackup(ctx context.Context, params BackupP
return false, vterrors.Wrap(err, "can't get position on primary")
}
} else {
- if err = params.Mysqld.StopReplication(params.HookExtraEnv); err != nil {
+ // This is a replica
+ if err := params.Mysqld.StopReplication(params.HookExtraEnv); err != nil {
return false, vterrors.Wrapf(err, "can't stop replica")
}
- var replicaStatus mysql.ReplicationStatus
- replicaStatus, err = params.Mysqld.ReplicationStatus()
+ replicaStatus, err := params.Mysqld.ReplicationStatus()
if err != nil {
return false, vterrors.Wrap(err, "can't get replica status")
}
@@ -214,6 +352,20 @@ func (be *BuiltinBackupEngine) ExecuteBackup(ctx context.Context, params BackupP
}
params.Logger.Infof("using replication position: %v", replicationPosition)
+ gtidPurgedPosition, err := params.Mysqld.GetGTIDPurged(ctx)
+ if err != nil {
+ return false, vterrors.Wrap(err, "can't get gtid_purged")
+ }
+
+ if err != nil {
+ return false, vterrors.Wrap(err, "can't get purged position")
+ }
+
+ serverUUID, err := params.Mysqld.GetServerUUID(ctx)
+ if err != nil {
+ return false, vterrors.Wrap(err, "can't get server uuid")
+ }
+
// shutdown mysqld
shutdownCtx, cancel := context.WithTimeout(ctx, BuiltinBackupMysqldTimeout)
err = params.Mysqld.Shutdown(shutdownCtx, params.Cnf, true)
@@ -223,7 +375,7 @@ func (be *BuiltinBackupEngine) ExecuteBackup(ctx context.Context, params BackupP
}
// Backup everything, capture the error.
- backupErr := be.backupFiles(ctx, params, bh, replicationPosition)
+ backupErr := be.backupFiles(ctx, params, bh, replicationPosition, gtidPurgedPosition, mysql.Position{}, nil, serverUUID)
usable := backupErr == nil
// Try to restart mysqld, use background context in case we timed out the original context
@@ -299,11 +451,26 @@ func (be *BuiltinBackupEngine) ExecuteBackup(ctx context.Context, params BackupP
}
// backupFiles finds the list of files to backup, and creates the backup.
-func (be *BuiltinBackupEngine) backupFiles(ctx context.Context, params BackupParams, bh backupstorage.BackupHandle, replicationPosition mysql.Position) (finalErr error) {
+func (be *BuiltinBackupEngine) backupFiles(
+ ctx context.Context,
+ params BackupParams,
+ bh backupstorage.BackupHandle,
+ replicationPosition mysql.Position,
+ purgedPosition mysql.Position,
+ fromPosition mysql.Position,
+ binlogFiles []string,
+ serverUUID string,
+) (finalErr error) {
// Get the files to backup.
// We don't care about totalSize because we add each file separately.
- fes, _, err := findFilesToBackup(params.Cnf)
+ var fes []FileEntry
+ var err error
+ if isIncrementalBackup(params) {
+ fes, _, err = binlogFilesToBackup(params.Cnf, binlogFiles)
+ } else {
+ fes, _, err = findFilesToBackup(params.Cnf)
+ }
if err != nil {
return vterrors.Wrap(err, "can't find files to backup")
}
@@ -360,15 +527,21 @@ func (be *BuiltinBackupEngine) backupFiles(ctx context.Context, params BackupPar
bm := &builtinBackupManifest{
// Common base fields
BackupManifest: BackupManifest{
- BackupMethod: builtinBackupEngineName,
- Position: replicationPosition,
- BackupTime: params.BackupTime.UTC().Format(time.RFC3339),
- FinishedTime: time.Now().UTC().Format(time.RFC3339),
+ BackupMethod: builtinBackupEngineName,
+ Position: replicationPosition,
+ PurgedPosition: purgedPosition,
+ FromPosition: fromPosition,
+ Incremental: !fromPosition.IsZero(),
+ ServerUUID: serverUUID,
+ TabletAlias: params.TabletAlias,
+ Keyspace: params.Keyspace,
+ Shard: params.Shard,
+ BackupTime: params.BackupTime.UTC().Format(time.RFC3339),
+ FinishedTime: time.Now().UTC().Format(time.RFC3339),
},
// Builtin-specific fields
FileEntries: fes,
- TransformHook: backupStorageHook,
SkipCompress: !backupStorageCompress,
CompressionEngine: CompressionEngineName,
}
@@ -503,19 +676,6 @@ func (be *BuiltinBackupEngine) backupFile(ctx context.Context, params BackupPara
var writer io.Writer = bw
- // Create the external write pipe, if any.
- var pipe io.WriteCloser
- var wait hook.WaitFunc
- if backupStorageHook != "" {
- h := hook.NewHook(backupStorageHook, []string{"-operation", "write"})
- h.ExtraEnv = params.HookExtraEnv
- pipe, wait, _, err = h.ExecuteAsWritePipe(writer)
- if err != nil {
- return vterrors.Wrapf(err, "'%v' hook returned error", backupStorageHook)
- }
- writer = pipe
- }
-
// Create the gzip compression pipe, if necessary.
var compressor io.WriteCloser
if backupStorageCompress {
@@ -544,20 +704,6 @@ func (be *BuiltinBackupEngine) backupFile(ctx context.Context, params BackupPara
}
}
- // Close the hook pipe if necessary.
- if pipe != nil {
- if err := pipe.Close(); err != nil {
- return vterrors.Wrap(err, "cannot close hook pipe")
- }
- stderr, err := wait()
- if stderr != "" {
- params.Logger.Infof("'%v' hook returned stderr: %v", backupStorageHook, stderr)
- }
- if err != nil {
- return vterrors.Wrapf(err, "'%v' returned error", backupStorageHook)
- }
- }
-
// Close the backupPipe to finish writing on destination.
if err = bw.Close(); err != nil {
return vterrors.Wrapf(err, "cannot flush destination: %v", name)
@@ -572,6 +718,55 @@ func (be *BuiltinBackupEngine) backupFile(ctx context.Context, params BackupPara
return nil
}
+// executeRestoreFullBackup restores the files from a full backup. The underlying mysql database service is expected to be stopped.
+func (be *BuiltinBackupEngine) executeRestoreFullBackup(ctx context.Context, params RestoreParams, bh backupstorage.BackupHandle, bm builtinBackupManifest) error {
+ if err := prepareToRestore(ctx, params.Cnf, params.Mysqld, params.Logger); err != nil {
+ return err
+ }
+
+ params.Logger.Infof("Restore: copying %v files", len(bm.FileEntries))
+
+ if _, err := be.restoreFiles(context.Background(), params, bh, bm); err != nil {
+ // don't delete the file here because that is how we detect an interrupted restore
+ return vterrors.Wrap(err, "failed to restore files")
+ }
+ return nil
+}
+
+// executeRestoreIncrementalBackup executes a restore of an incremental backup, and expect to run on top of a full backup's restore.
+// It restores any (zero or more) binary log files and applies them onto the underlying database one at a time, but only applies those transactions
+// that fall within params.RestoreToPos.GTIDSet. The rest (typically a suffix of the last binary log) are discarded.
+// The underlying mysql database is expected to be up and running.
+func (be *BuiltinBackupEngine) executeRestoreIncrementalBackup(ctx context.Context, params RestoreParams, bh backupstorage.BackupHandle, bm builtinBackupManifest) error {
+ params.Logger.Infof("Restoring incremental backup to position: %v", bm.Position)
+
+ createdDir, err := be.restoreFiles(context.Background(), params, bh, bm)
+ defer os.RemoveAll(createdDir)
+ mysqld, ok := params.Mysqld.(*Mysqld)
+ if !ok {
+ return vterrors.Errorf(vtrpc.Code_UNIMPLEMENTED, "expected: Mysqld")
+ }
+ for _, fe := range bm.FileEntries {
+ fe.ParentPath = createdDir
+ binlogFile, err := fe.fullPath(params.Cnf)
+ if err != nil {
+ return vterrors.Wrap(err, "failed to restore file")
+ }
+ if err := mysqld.applyBinlogFile(binlogFile, params.RestoreToPos.GTIDSet); err != nil {
+ return vterrors.Wrap(err, "failed to extract binlog file")
+ }
+ defer os.Remove(binlogFile)
+ params.Logger.Infof("Applied binlog file: %v", binlogFile)
+ }
+ if err != nil {
+ // don't delete the file here because that is how we detect an interrupted restore
+ return vterrors.Wrap(err, "failed to restore files")
+ }
+ params.Logger.Infof("Restored incremental backup files to: %v", createdDir)
+
+ return nil
+}
+
// ExecuteRestore restores from a backup. If the restore is successful
// we return the position from which replication should start
// otherwise an error is returned
@@ -588,24 +783,38 @@ func (be *BuiltinBackupEngine) ExecuteRestore(ctx context.Context, params Restor
return nil, err
}
- if err := prepareToRestore(ctx, params.Cnf, params.Mysqld, params.Logger); err != nil {
- return nil, err
+ var err error
+ if bm.Incremental {
+ err = be.executeRestoreIncrementalBackup(ctx, params, bh, bm)
+ } else {
+ err = be.executeRestoreFullBackup(ctx, params, bh, bm)
}
-
- params.Logger.Infof("Restore: copying %v files", len(bm.FileEntries))
-
- if err := be.restoreFiles(context.Background(), params, bh, bm); err != nil {
- // don't delete the file here because that is how we detect an interrupted restore
- return nil, vterrors.Wrap(err, "failed to restore files")
+ if err != nil {
+ return nil, err
}
-
params.Logger.Infof("Restore: returning replication position %v", bm.Position)
return &bm.BackupManifest, nil
}
// restoreFiles will copy all the files from the BackupStorage to the
// right place.
-func (be *BuiltinBackupEngine) restoreFiles(ctx context.Context, params RestoreParams, bh backupstorage.BackupHandle, bm builtinBackupManifest) error {
+func (be *BuiltinBackupEngine) restoreFiles(ctx context.Context, params RestoreParams, bh backupstorage.BackupHandle, bm builtinBackupManifest) (createdDir string, err error) {
+ // For optimization, we are replacing pargzip with pgzip, so newBuiltinDecompressor doesn't have to compare and print warning for every file
+ // since newBuiltinDecompressor is helper method and does not hold any state, it was hard to do it in that method itself.
+ if bm.CompressionEngine == PargzipCompressor {
+ params.Logger.Warningf(`engine "pargzip" doesn't support decompression, using "pgzip" instead`)
+ bm.CompressionEngine = PgzipCompressor
+ defer func() {
+ bm.CompressionEngine = PargzipCompressor
+ }()
+ }
+
+ if bm.Incremental {
+ createdDir, err = os.MkdirTemp("", "restore-incremental-*")
+ if err != nil {
+ return "", err
+ }
+ }
fes := bm.FileEntries
sema := sync2.NewSemaphore(params.Concurrency, 0)
rec := concurrency.AllErrorRecorder{}
@@ -623,17 +832,19 @@ func (be *BuiltinBackupEngine) restoreFiles(ctx context.Context, params RestoreP
return
}
+ fe := &fes[i]
+ fe.ParentPath = createdDir
// And restore the file.
name := fmt.Sprintf("%v", i)
- params.Logger.Infof("Copying file %v: %v", name, fes[i].Name)
- err := be.restoreFile(ctx, params, bh, &fes[i], bm, name)
+ params.Logger.Infof("Copying file %v: %v", name, fe.Name)
+ err := be.restoreFile(ctx, params, bh, fe, bm, name)
if err != nil {
- rec.RecordError(vterrors.Wrapf(err, "can't restore file %v to %v", name, fes[i].Name))
+ rec.RecordError(vterrors.Wrapf(err, "can't restore file %v to %v", name, fe.Name))
}
}(i)
}
wg.Wait()
- return rec.Error()
+ return createdDir, rec.Error()
}
// restoreFile restores an individual file.
@@ -667,17 +878,6 @@ func (be *BuiltinBackupEngine) restoreFile(ctx context.Context, params RestorePa
dst := bufio.NewWriterSize(dstFile, writerBufferSize)
var reader io.Reader = bp
- // Create the external read pipe, if any.
- var wait hook.WaitFunc
- if bm.TransformHook != "" {
- h := hook.NewHook(bm.TransformHook, []string{"-operation", "read"})
- h.ExtraEnv = params.HookExtraEnv
- reader, wait, _, err = h.ExecuteAsReadPipe(reader)
- if err != nil {
- return vterrors.Wrapf(err, "'%v' hook returned error", bm.TransformHook)
- }
- }
-
// Create the uncompresser if needed.
if !bm.SkipCompress {
var decompressor io.ReadCloser
@@ -722,17 +922,6 @@ func (be *BuiltinBackupEngine) restoreFile(ctx context.Context, params RestorePa
return vterrors.Wrap(err, "failed to copy file contents")
}
- // Close the Pipe.
- if wait != nil {
- stderr, err := wait()
- if stderr != "" {
- log.Infof("'%v' hook returned stderr: %v", bm.TransformHook, stderr)
- }
- if err != nil {
- return vterrors.Wrapf(err, "'%v' returned error", bm.TransformHook)
- }
- }
-
// Check the hash.
hash := bp.HashString()
if hash != fe.Hash {
diff --git a/go/vt/mysqlctl/capabilityset.go b/go/vt/mysqlctl/capabilityset.go
index 331592a76bc..a9d655c2bc4 100644
--- a/go/vt/mysqlctl/capabilityset.go
+++ b/go/vt/mysqlctl/capabilityset.go
@@ -51,15 +51,16 @@ func (c *capabilitySet) hasMaria104InstallDb() bool {
return c.isMariaDB() && c.version.atLeast(ServerVersion{Major: 10, Minor: 4, Patch: 0})
}
-// hasDynamicRedoLogCapacity tells you if the version of MySQL in use supports dynamic redo log
-// capacity.
-// Starting with MySQL 8.0.30, the InnoDB redo logs are stored in a subdirectory of the
-// (/. by default) called "#innodb_redo" and you can
-// dynamically adjust the capacity of redo log space in the running server. See:
+// hasDisableRedoLog tells you if the version of MySQL in use can disable redo logging.
//
-// https://dev.mysql.com/doc/refman/8.0/en/innodb-redo-log.html#innodb-modifying-redo-log-capacity
-func (c *capabilitySet) hasDynamicRedoLogCapacity() bool {
- return c.isMySQLLike() && c.version.atLeast(ServerVersion{Major: 8, Minor: 0, Patch: 30})
+// As of MySQL 8.0.21, you can disable redo logging using the ALTER INSTANCE
+// DISABLE INNODB REDO_LOG statement. This functionality is intended for
+// loading data into a new MySQL instance. Disabling redo logging speeds up
+// data loading by avoiding redo log writes and doublewrite buffering.
+//
+// https://dev.mysql.com/doc/refman/8.0/en/innodb-redo-log.html#innodb-disable-redo-logging
+func (c *capabilitySet) hasDisableRedoLog() bool {
+ return c.isMySQLLike() && c.version.atLeast(ServerVersion{Major: 8, Minor: 0, Patch: 21})
}
// IsMySQLLike tests if the server is either MySQL
diff --git a/go/vt/mysqlctl/compression.go b/go/vt/mysqlctl/compression.go
index 40c4dc344a3..ea8f96cc100 100644
--- a/go/vt/mysqlctl/compression.go
+++ b/go/vt/mysqlctl/compression.go
@@ -65,7 +65,7 @@ var (
)
func init() {
- for _, cmd := range []string{"mysqlctl", "mysqlctld", "vtcombo", "vttablet", "vttestserver", "vtctld", "vtctldclient", "vtexplain"} {
+ for _, cmd := range []string{"vtbackup", "vtcombo", "vttablet", "vttestserver", "vtctld", "vtctldclient"} {
servenv.OnParseFor(cmd, registerBackupCompressionFlags)
}
}
@@ -193,7 +193,7 @@ func newExternalDecompressor(ctx context.Context, cmdStr string, reader io.Reade
// This returns a reader that will decompress the underlying provided reader and will use the specified supported engine.
func newBuiltinDecompressor(engine string, reader io.Reader, logger logutil.Logger) (decompressor io.ReadCloser, err error) {
if engine == PargzipCompressor {
- logger.Warningf("engine \"pargzip\" doesn't support decompression, using \"pgzip\" instead")
+ logger.Warningf(`engine "pargzip" doesn't support decompression, using "pgzip" instead`)
engine = PgzipCompressor
}
diff --git a/go/vt/mysqlctl/compression_benchmark_test.go b/go/vt/mysqlctl/compression_benchmark_test.go
new file mode 100644
index 00000000000..73cd684c719
--- /dev/null
+++ b/go/vt/mysqlctl/compression_benchmark_test.go
@@ -0,0 +1,461 @@
+package mysqlctl
+
+import (
+ "bufio"
+ "context"
+ "crypto/md5"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "os"
+ "path"
+ "strconv"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/klauspost/compress/zstd"
+ "github.com/stretchr/testify/require"
+
+ "vitess.io/vitess/go/vt/logutil"
+)
+
+type (
+ benchmarkCompressArgs struct {
+ b *testing.B
+ builtin string
+ external string
+ }
+
+ benchmarkCompressEnv struct {
+ benchmarkCompressArgs
+ }
+
+ fnReadCloser struct {
+ io.Reader
+ closer func() error
+ }
+
+ meteredReader struct {
+ count int64
+ r io.Reader
+ }
+
+ meteredWriter struct {
+ count int64
+ w io.Writer
+ }
+
+ timedWriter struct {
+ duration time.Duration
+ w io.Writer
+ }
+)
+
+const (
+ // This is the default file which will be downloaded, decompressed, and
+ // used by the compression benchmarks in this suite. It's a ~1.5 GiB
+ // compressed tar file containing 3 InnoDB files. The InnoDB files were
+ // built from this Wikipedia dataset:
+ //
+ // https://dumps.wikimedia.org/archive/enwiki/20080103/enwiki-20080103-pages-articles.xml.bz2
+ defaultDataURL = "https://github.com/vitessio/vitess-resources/releases/download/testdata-v1.0/enwiki-20080103-pages-articles.ibd.tar.zst"
+
+ // By default, don't limit how many bytes we input into compression.
+ defaultMaxBytes int64 = 0
+
+ // By default the benchmarks will remove any downloaded data after all
+ // benchmarks are run, unless the data URL is a local path, in which case
+ // it will be left alone.
+ //
+ // Users may override this behavior. This option is
+ // intended purely for debugging purposes.
+ //
+ // export VT_MYSQLCTL_COMPRESSION_BENCHMARK_CLEANUP=false
+ envVarCleanup = "VT_MYSQLCTL_COMPRESSION_BENCHMARK_CLEANUP"
+
+ // Users may specify an alternate gzipped URL. This option is intended
+ // purely for development and debugging purposes. For example:
+ //
+ // export VT_MYSQLCTL_COMPRESSION_BENCHMARK_DATA_URL=https://wiki.mozilla.org/images/f/ff/Example.json.gz
+ //
+ // A local path can also be specified:
+ //
+ // export VT_MYSQLCTL_COMPRESSION_BENCHMARK_DATA_URL=file:///tmp/custom.dat
+ envVarDataURL = "VT_MYSQLCTL_COMPRESSION_BENCHMARK_DATA_URL"
+
+ // Users may override how many bytes are downloaded. This option is
+ // intended purely for development and debugging purposes. For example:
+ //
+ // export VT_MYSQLCTL_COMPRESSION_BENCHMARK_MAX_BYTES=256
+ envVarMaxBytes = "VT_MYSQLCTL_COMPRESSION_BENCHMARK_MAX_BYTES"
+)
+
+func (frc *fnReadCloser) Close() error {
+ return frc.closer()
+}
+
+func dataLocalPath(u *url.URL) string {
+ if isLocal(u) {
+ return u.Path
+ }
+ // Compute a local path for a file by hashing the URL.
+ return path.Join(os.TempDir(), fmt.Sprintf("%x.dat", md5.Sum([]byte(u.String()))))
+}
+
+func dataURL() (*url.URL, error) {
+ u := defaultDataURL
+
+ // Use user-defined URL, if specified.
+ if udURL := os.Getenv(envVarDataURL); udURL != "" {
+ u = udURL
+ }
+
+ return url.Parse(u)
+}
+
+func downloadData(url, localPath string, maxBytes int64) error {
+ var err error
+ var rdr io.Reader
+
+ // If the local path does not exist, download the file from the URL.
+ httpClient := http.Client{
+ CheckRedirect: func(r *http.Request, via []*http.Request) error {
+ r.URL.Opaque = r.URL.Path
+ return nil
+ },
+ }
+
+ resp, err := httpClient.Get(url)
+ if err != nil {
+ return fmt.Errorf("failed to get data at URL %q: %v", url, err)
+ }
+ defer resp.Body.Close()
+ rdr = resp.Body
+
+ // Assume the data we're downloading is compressed with zstd.
+ zr, err := zstd.NewReader(rdr)
+ if err != nil {
+ return fmt.Errorf("failed to decompress data at URL %q: %v", url, err)
+ }
+ defer zr.Close()
+ rdr = zr
+
+ if maxBytes > 0 {
+ rdr = io.LimitReader(rdr, maxBytes)
+ }
+
+ // Create a local file to write the HTTP response to.
+ file, err := os.OpenFile(localPath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0666)
+ if err != nil {
+ return err
+ }
+ defer file.Close()
+
+ // Write the decompressed data to local path.
+ if _, err := io.Copy(file, rdr); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func isHTTP(u *url.URL) bool {
+ return u.Scheme == "http" || u.Scheme == "https"
+}
+
+func isLocal(u *url.URL) bool {
+ return u.Scheme == "file" || (u.Scheme == "" && u.Hostname() == "")
+}
+
+func maxBytes() (int64, error) {
+ // Limit how many bytes we unpack from the archive.
+ mb := defaultMaxBytes
+
+ // Use user-defined max bytes, if specified and valid.
+ if udMaxBytes := os.Getenv(envVarMaxBytes); udMaxBytes != "" {
+ udmb, err := strconv.ParseInt(udMaxBytes, 10, 64)
+ if err != nil {
+ return mb, err
+ }
+ mb = udmb
+ }
+
+ return mb, nil
+}
+
+func newBenchmarkCompressEnv(args benchmarkCompressArgs) benchmarkCompressEnv {
+ bce := benchmarkCompressEnv{
+ benchmarkCompressArgs: args,
+ }
+ bce.validate()
+ bce.prepare()
+ return bce
+}
+
+func shouldCleanup(u *url.URL) (bool, error) {
+ c := true
+
+ // Don't cleanup local paths provided by the user.
+ if isLocal(u) {
+ c = false
+ }
+
+ // Use user-defined cleanup, if specified and valid.
+ if udCleanup := os.Getenv(envVarCleanup); udCleanup != "" {
+ udc, err := strconv.ParseBool(udCleanup)
+ if err != nil {
+ return c, err
+ }
+ c = udc
+ }
+
+ return c, nil
+}
+
+func (bce *benchmarkCompressEnv) compress() {
+ var durCompressed time.Duration
+ var numUncompressedBytes, numCompressedBytes int64
+
+ // The Benchmark, Reader and Writer interfaces make it difficult to time
+ // compression without frequent calls to {Start,Stop}Timer or including
+ // disk read/write times the measurement. Instead we'll use ReportMetric
+ // after all loops are completed.
+ bce.b.StopTimer()
+ bce.b.ResetTimer()
+
+ for i := 0; i < bce.b.N; i++ {
+ logger := logutil.NewMemoryLogger()
+
+ // Don't write anywhere. We're just interested in compression time.
+ w := io.Discard
+
+ // Keep track of how many compressed bytes come through.
+ mw := &meteredWriter{w: w}
+
+ // Create compressor.
+ c := bce.compressor(logger, mw)
+
+ // Time how long we spend on c.Write.
+ tc := &timedWriter{w: c}
+
+ r, err := bce.reader()
+ require.Nil(bce.b, err, "Failed to get data reader.")
+
+ // Track how many bytes we read.
+ mr := &meteredReader{r: r}
+
+ // It makes sense to use {Start,Stop}Timer here, but we're not
+ // interested in how long it takes to read from disk.
+ _, err = io.Copy(tc, mr)
+
+ // Don't defer closing things, otherwise we can exhaust open file limit.
+ r.Close()
+ c.Close()
+
+ require.Nil(bce.b, err, logger.Events)
+
+ // Record how many bytes compressed so we can report these later.
+ durCompressed += tc.duration
+ numCompressedBytes += mw.count
+ numUncompressedBytes += mr.count
+ }
+
+ bce.b.ReportMetric(
+ float64(durCompressed.Nanoseconds()/int64(bce.b.N)),
+ "ns/op",
+ )
+
+ mbOut := numUncompressedBytes / 1024 / 1024
+ bce.b.ReportMetric(
+ float64(mbOut)/durCompressed.Seconds(),
+ "MB/s",
+ )
+
+ bce.b.ReportMetric(
+ float64(numUncompressedBytes)/float64(numCompressedBytes),
+ "compression-ratio",
+ )
+}
+
+func (bce *benchmarkCompressEnv) compressor(logger logutil.Logger, writer io.Writer) io.WriteCloser {
+ var compressor io.WriteCloser
+ var err error
+
+ if bce.builtin != "" {
+ compressor, err = newBuiltinCompressor(bce.builtin, writer, logger)
+ } else if bce.external != "" {
+ compressor, err = newExternalCompressor(context.Background(), bce.external, writer, logger)
+ }
+
+ require.Nil(bce.b, err, "failed to create compressor.")
+ return compressor
+}
+
+func (bce *benchmarkCompressEnv) prepare() {
+ u, err := dataURL()
+ require.NoError(bce.b, err, "failed to get data url")
+
+ localPath := dataLocalPath(u)
+
+ if isLocal(u) {
+ if _, err := os.Stat(localPath); errors.Is(err, os.ErrNotExist) {
+ require.Failf(bce.b, "local path does not exist", localPath)
+ }
+ } else if isHTTP(u) {
+ if _, err := os.Stat(localPath); errors.Is(err, os.ErrNotExist) {
+ mb, _ := maxBytes()
+ bce.b.Logf("downloading data from %s", u.String())
+ if err := downloadData(u.String(), localPath, mb); err != nil {
+ require.Failf(bce.b, "failed to download data", err.Error())
+ }
+ }
+ } else {
+ require.Failf(bce.b, "don't know how to get data from url", u.String())
+ }
+}
+
+func (bce *benchmarkCompressEnv) reader() (io.ReadCloser, error) {
+ var r io.Reader
+
+ u, _ := dataURL()
+
+ f, err := os.Open(dataLocalPath(u))
+ if err != nil {
+ return nil, err
+ }
+ r = f
+
+ mb, _ := maxBytes()
+ if mb > 0 {
+ r = io.LimitReader(f, mb)
+ }
+
+ buf := bufio.NewReaderSize(r, 2*1024*1024)
+ return &fnReadCloser{buf, f.Close}, nil
+}
+
+func (bce *benchmarkCompressEnv) validate() {
+ if bce.external != "" {
+ cmdArgs := strings.Split(bce.external, " ")
+
+ _, err := validateExternalCmd(cmdArgs[0])
+ if err != nil {
+ bce.b.Skipf("command %q not available in this host: %v; skipping...", cmdArgs[0], err)
+ }
+ }
+
+ if bce.builtin == "" && bce.external == "" {
+ require.Fail(bce.b, "either builtin or external compressor must be specified.")
+ }
+}
+
+func (mr *meteredReader) Read(p []byte) (nbytes int, err error) {
+ nbytes, err = mr.r.Read(p)
+ mr.count += int64(nbytes)
+ return
+}
+
+func (mw *meteredWriter) Write(p []byte) (nbytes int, err error) {
+ nbytes, err = mw.w.Write(p)
+ mw.count += int64(nbytes)
+ return
+}
+
+func (tw *timedWriter) Write(p []byte) (nbytes int, err error) {
+ start := time.Now()
+ nbytes, err = tw.w.Write(p)
+ tw.duration += time.Since(start)
+ return
+}
+
+func TestMain(m *testing.M) {
+ code := m.Run()
+
+ u, _ := dataURL()
+ localPath := dataLocalPath(u)
+
+ cleanup, err := shouldCleanup(u)
+ if cleanup {
+ msg := "cleaning up %q"
+ args := []any{localPath}
+
+ if err != nil {
+ args = append(args, err)
+ msg = msg + "; %v"
+ }
+
+ fmt.Printf(msg+"\n", args...)
+ if _, err := os.Stat(localPath); !errors.Is(err, os.ErrNotExist) {
+ os.Remove(localPath)
+ }
+ }
+
+ os.Exit(code)
+}
+
+func BenchmarkCompressLz4Builtin(b *testing.B) {
+ env := newBenchmarkCompressEnv(benchmarkCompressArgs{
+ b: b,
+ builtin: Lz4Compressor,
+ })
+ env.compress()
+}
+
+func BenchmarkCompressPargzipBuiltin(b *testing.B) {
+ env := newBenchmarkCompressEnv(benchmarkCompressArgs{
+ b: b,
+ builtin: PargzipCompressor,
+ })
+ env.compress()
+}
+
+func BenchmarkCompressPgzipBuiltin(b *testing.B) {
+ env := newBenchmarkCompressEnv(benchmarkCompressArgs{
+ b: b,
+ builtin: PgzipCompressor,
+ })
+ env.compress()
+}
+
+func BenchmarkCompressZstdBuiltin(b *testing.B) {
+ env := newBenchmarkCompressEnv(benchmarkCompressArgs{
+ b: b,
+ builtin: ZstdCompressor,
+ })
+ env.compress()
+}
+
+func BenchmarkCompressZstdExternal(b *testing.B) {
+ env := newBenchmarkCompressEnv(benchmarkCompressArgs{
+ b: b,
+ external: fmt.Sprintf("zstd -%d -c", compressionLevel),
+ })
+ env.compress()
+}
+
+func BenchmarkCompressZstdExternalFast4(b *testing.B) {
+ env := newBenchmarkCompressEnv(benchmarkCompressArgs{
+ b: b,
+ external: fmt.Sprintf("zstd -%d --fast=4 -c", compressionLevel),
+ })
+ env.compress()
+}
+
+func BenchmarkCompressZstdExternalT0(b *testing.B) {
+ env := newBenchmarkCompressEnv(benchmarkCompressArgs{
+ b: b,
+ external: fmt.Sprintf("zstd -%d -T0 -c", compressionLevel),
+ })
+ env.compress()
+}
+
+func BenchmarkCompressZstdExternalT4(b *testing.B) {
+ env := newBenchmarkCompressEnv(benchmarkCompressArgs{
+ b: b,
+ external: fmt.Sprintf("zstd -%d -T4 -c", compressionLevel),
+ })
+ env.compress()
+}
diff --git a/go/vt/mysqlctl/fakemysqldaemon/fakemysqldaemon.go b/go/vt/mysqlctl/fakemysqldaemon/fakemysqldaemon.go
index 5402be5e540..dfa24555f25 100644
--- a/go/vt/mysqlctl/fakemysqldaemon/fakemysqldaemon.go
+++ b/go/vt/mysqlctl/fakemysqldaemon/fakemysqldaemon.go
@@ -183,7 +183,7 @@ func NewFakeMysqlDaemon(db *fakesqldb.DB) *FakeMysqlDaemon {
IOThreadRunning: true,
}
if db != nil {
- result.appPool = dbconnpool.NewConnectionPool("AppConnPool", 5, time.Minute, 0)
+ result.appPool = dbconnpool.NewConnectionPool("AppConnPool", 5, time.Minute, 0, 0)
result.appPool.Open(db.ConnParams())
}
return result
@@ -335,6 +335,27 @@ func (fmd *FakeMysqlDaemon) GetGTIDMode(ctx context.Context) (gtidMode string, e
})
}
+// FlushBinaryLogs is part of the MysqlDaemon interface.
+func (fmd *FakeMysqlDaemon) FlushBinaryLogs(ctx context.Context) (err error) {
+ return fmd.ExecuteSuperQueryList(ctx, []string{
+ "FAKE FLUSH BINARY LOGS",
+ })
+}
+
+// GetBinaryLogs is part of the MysqlDaemon interface.
+func (fmd *FakeMysqlDaemon) GetBinaryLogs(ctx context.Context) (binaryLogs []string, err error) {
+ return []string{}, fmd.ExecuteSuperQueryList(ctx, []string{
+ "FAKE SHOW BINARY LOGS",
+ })
+}
+
+// GetPreviousGTIDs is part of the MysqlDaemon interface.
+func (fmd *FakeMysqlDaemon) GetPreviousGTIDs(ctx context.Context, binlog string) (previousGtids string, err error) {
+ return "", fmd.ExecuteSuperQueryList(ctx, []string{
+ fmt.Sprintf("FAKE SHOW BINLOG EVENTS IN '%s' LIMIT 2", binlog),
+ })
+}
+
// PrimaryPosition is part of the MysqlDaemon interface
func (fmd *FakeMysqlDaemon) PrimaryPosition() (mysql.Position, error) {
return fmd.CurrentPrimaryPosition, nil
@@ -431,7 +452,6 @@ func (fmd *FakeMysqlDaemon) SetReplicationSource(ctx context.Context, host strin
if stopReplicationBefore {
cmds = append(cmds, "STOP SLAVE")
}
- cmds = append(cmds, "RESET SLAVE ALL")
cmds = append(cmds, "FAKE SET MASTER")
if startReplicationAfter {
cmds = append(cmds, "START SLAVE")
diff --git a/go/vt/mysqlctl/metadata_tables.go b/go/vt/mysqlctl/metadata_tables.go
deleted file mode 100644
index 5ed7bb9cfe1..00000000000
--- a/go/vt/mysqlctl/metadata_tables.go
+++ /dev/null
@@ -1,234 +0,0 @@
-/*
-Copyright 2019 The Vitess Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package mysqlctl
-
-import (
- "bytes"
- "fmt"
-
- "context"
-
- "vitess.io/vitess/go/mysql"
- "vitess.io/vitess/go/sqltypes"
- "vitess.io/vitess/go/vt/dbconnpool"
- "vitess.io/vitess/go/vt/log"
-)
-
-// Note that definitions of local_metadata and shard_metadata should be the same
-// as in testing which is defined in config/init_db.sql.
-const (
- sqlCreateLocalMetadataTable = `CREATE TABLE IF NOT EXISTS _vt.local_metadata (
- name VARCHAR(255) NOT NULL,
- value MEDIUMBLOB NOT NULL,
- PRIMARY KEY (name)
- ) ENGINE=InnoDB`
- sqlCreateShardMetadataTable = `CREATE TABLE IF NOT EXISTS _vt.shard_metadata (
- name VARCHAR(255) NOT NULL,
- value MEDIUMBLOB NOT NULL,
- PRIMARY KEY (name)
- ) ENGINE=InnoDB`
- sqlUpdateLocalMetadataTable = "UPDATE _vt.local_metadata SET db_name='%s' WHERE db_name=''"
- sqlUpdateShardMetadataTable = "UPDATE _vt.shard_metadata SET db_name='%s' WHERE db_name=''"
-)
-
-var (
- sqlAlterLocalMetadataTable = []string{
- `ALTER TABLE _vt.local_metadata ADD COLUMN db_name VARBINARY(255) NOT NULL DEFAULT ''`,
- `ALTER TABLE _vt.local_metadata DROP PRIMARY KEY, ADD PRIMARY KEY(name, db_name)`,
- // VARCHAR(255) is not long enough to hold replication positions, hence changing to
- // MEDIUMBLOB.
- `ALTER TABLE _vt.local_metadata CHANGE value value MEDIUMBLOB NOT NULL`,
- }
- sqlAlterShardMetadataTable = []string{
- `ALTER TABLE _vt.shard_metadata ADD COLUMN db_name VARBINARY(255) NOT NULL DEFAULT ''`,
- `ALTER TABLE _vt.shard_metadata DROP PRIMARY KEY, ADD PRIMARY KEY(name, db_name)`,
- }
-)
-
-// MetadataManager manages the creation and filling of the _vt.local_metadata
-// and _vt.shard_metadata tables.
-type MetadataManager struct{}
-
-// PopulateMetadataTables creates and fills the _vt.local_metadata table and
-// creates the _vt.shard_metadata table.
-//
-// _vt.local_metadata table is a per-tablet table that is never replicated.
-// This allows queries against local_metadata to return different values on
-// different tablets, which is used for communicating between Vitess and
-// MySQL-level tools like Orchestrator (https://github.com/openark/orchestrator).
-//
-// _vt.shard_metadata is a replicated table with per-shard information, but it's
-// created here to make it easier to create it on databases that were running
-// old version of Vitess, or databases that are getting converted to run under
-// Vitess.
-//
-// This function is semantically equivalent to calling createMetadataTables
-// followed immediately by upsertLocalMetadata.
-func (m *MetadataManager) PopulateMetadataTables(mysqld MysqlDaemon, localMetadata map[string]string, dbName string) error {
- log.Infof("Populating _vt.local_metadata table...")
-
- // Get a non-pooled DBA connection.
- conn, err := mysqld.GetDbaConnection(context.TODO())
- if err != nil {
- return err
- }
- defer conn.Close()
-
- // Disable replication on this session. We close the connection after using
- // it, so there's no need to re-enable replication when we're done.
- if _, err := conn.ExecuteFetch("SET @@session.sql_log_bin = 0", 0, false); err != nil {
- return err
- }
-
- // Create the database and table if necessary.
- if err := createMetadataTables(conn, dbName); err != nil {
- return err
- }
-
- // Populate local_metadata from the passed list of values.
- return upsertLocalMetadata(conn, localMetadata, dbName)
-}
-
-// UpsertLocalMetadata adds the given metadata map to the _vt.local_metadata
-// table, updating any rows that exist for a given `_vt.local_metadata.name`
-// with the map value. The session that performs these upserts sets
-// sql_log_bin=0, as the _vt.local_metadata table is meant to never be
-// replicated.
-//
-// Callers are responsible for ensuring the _vt.local_metadata table exists
-// before calling this function, usually by calling CreateMetadataTables at
-// least once prior.
-func (m *MetadataManager) UpsertLocalMetadata(mysqld MysqlDaemon, localMetadata map[string]string, dbName string) error {
- log.Infof("Upserting _vt.local_metadata ...")
-
- conn, err := mysqld.GetDbaConnection(context.TODO())
- if err != nil {
- return err
- }
- defer conn.Close()
-
- // Disable replication on this session. We close the connection after using
- // it, so there's no need to re-enable replication when we're done.
- if _, err := conn.ExecuteFetch("SET @@session.sql_log_bin = 0", 0, false); err != nil {
- return err
- }
-
- return upsertLocalMetadata(conn, localMetadata, dbName)
-}
-
-func createMetadataTables(conn *dbconnpool.DBConnection, dbName string) error {
- if _, err := conn.ExecuteFetch("CREATE DATABASE IF NOT EXISTS _vt", 0, false); err != nil {
- return err
- }
-
- if err := createLocalMetadataTable(conn, dbName); err != nil {
- return err
- }
-
- if err := createShardMetadataTable(conn, dbName); err != nil {
- return err
- }
-
- return nil
-}
-
-func createLocalMetadataTable(conn *dbconnpool.DBConnection, dbName string) error {
- if _, err := conn.ExecuteFetch(sqlCreateLocalMetadataTable, 0, false); err != nil {
- return err
- }
-
- for _, sql := range sqlAlterLocalMetadataTable {
- if _, err := conn.ExecuteFetch(sql, 0, false); err != nil {
- // Ignore "Duplicate column name 'db_name'" errors which can happen on every restart.
- if merr, ok := err.(*mysql.SQLError); !ok || merr.Num != mysql.ERDupFieldName {
- log.Errorf("Error executing %v: %v", sql, err)
- return err
- }
- }
- }
-
- sql := fmt.Sprintf(sqlUpdateLocalMetadataTable, dbName)
- if _, err := conn.ExecuteFetch(sql, 0, false); err != nil {
- log.Errorf("Error executing %v: %v, continuing. Please check the data in _vt.local_metadata and take corrective action.", sql, err)
- }
-
- return nil
-}
-
-func createShardMetadataTable(conn *dbconnpool.DBConnection, dbName string) error {
- if _, err := conn.ExecuteFetch(sqlCreateShardMetadataTable, 0, false); err != nil {
- return err
- }
-
- for _, sql := range sqlAlterShardMetadataTable {
- if _, err := conn.ExecuteFetch(sql, 0, false); err != nil {
- // Ignore "Duplicate column name 'db_name'" errors which can happen on every restart.
- if merr, ok := err.(*mysql.SQLError); !ok || merr.Num != mysql.ERDupFieldName {
- log.Errorf("Error executing %v: %v", sql, err)
- return err
- }
- }
- }
-
- sql := fmt.Sprintf(sqlUpdateShardMetadataTable, dbName)
- if _, err := conn.ExecuteFetch(sql, 0, false); err != nil {
- log.Errorf("Error executing %v: %v, continuing. Please check the data in _vt.shard_metadata and take corrective action.", sql, err)
- }
-
- return nil
-}
-
-// upsertLocalMetadata adds the given metadata map to the _vt.local_metadata
-// table, updating any rows that exist for a given `_vt.local_metadata.name`
-// with the map value. The session that performs these upserts sets
-// sql_log_bin=0, as the _vt.local_metadata table is meant to never be
-// replicated.
-//
-// Callers are responsible for ensuring the _vt.local_metadata table exists
-// before calling this function, usually by calling CreateMetadataTables at
-// least once prior.
-func upsertLocalMetadata(conn *dbconnpool.DBConnection, localMetadata map[string]string, dbName string) error {
- // Populate local_metadata from the passed list of values.
- if _, err := conn.ExecuteFetch("BEGIN", 0, false); err != nil {
- return err
- }
- for name, val := range localMetadata {
- nameValue := sqltypes.NewVarChar(name)
- valValue := sqltypes.NewVarChar(val)
- dbNameValue := sqltypes.NewVarBinary(dbName)
-
- queryBuf := bytes.Buffer{}
- queryBuf.WriteString("INSERT INTO _vt.local_metadata (name,value, db_name) VALUES (")
- nameValue.EncodeSQL(&queryBuf)
- queryBuf.WriteByte(',')
- valValue.EncodeSQL(&queryBuf)
- queryBuf.WriteByte(',')
- dbNameValue.EncodeSQL(&queryBuf)
- queryBuf.WriteString(") ON DUPLICATE KEY UPDATE value = ")
- valValue.EncodeSQL(&queryBuf)
-
- if _, err := conn.ExecuteFetch(queryBuf.String(), 0, false); err != nil {
- return err
- }
- }
-
- if _, err := conn.ExecuteFetch("COMMIT", 0, false); err != nil {
- return err
- }
-
- return nil
-}
diff --git a/go/vt/mysqlctl/mycnf_gen.go b/go/vt/mysqlctl/mycnf_gen.go
index 3024b3bb5e6..2ac47fe617b 100644
--- a/go/vt/mysqlctl/mycnf_gen.go
+++ b/go/vt/mysqlctl/mycnf_gen.go
@@ -59,7 +59,7 @@ var (
)
func init() {
- for _, cmd := range []string{"mysqlctl", "mysqlctld", "vtcombo", "vttablet", "vttestserver", "vtctld", "vtctldclient", "vtexplain"} {
+ for _, cmd := range []string{"mysqlctl", "mysqlctld", "vtcombo", "vttablet", "vttestserver", "vtctld", "vtctldclient"} {
servenv.OnParseFor(cmd, registerMyCnfFlags)
}
}
diff --git a/go/vt/mysqlctl/mysql_daemon.go b/go/vt/mysqlctl/mysql_daemon.go
index c4c76224b3b..ac0aede5614 100644
--- a/go/vt/mysqlctl/mysql_daemon.go
+++ b/go/vt/mysqlctl/mysql_daemon.go
@@ -64,6 +64,9 @@ type MysqlDaemon interface {
ResetReplicationParameters(ctx context.Context) error
GetBinlogInformation(ctx context.Context) (binlogFormat string, logEnabled bool, logReplicaUpdate bool, binlogRowImage string, err error)
GetGTIDMode(ctx context.Context) (gtidMode string, err error)
+ FlushBinaryLogs(ctx context.Context) (err error)
+ GetBinaryLogs(ctx context.Context) (binaryLogs []string, err error)
+ GetPreviousGTIDs(ctx context.Context, binlog string) (previousGtids string, err error)
// reparenting related methods
ResetReplication(ctx context.Context) error
diff --git a/go/vt/mysqlctl/mysqld.go b/go/vt/mysqlctl/mysqld.go
index 2738caed2bd..f91d880bc5f 100644
--- a/go/vt/mysqlctl/mysqld.go
+++ b/go/vt/mysqlctl/mysqld.go
@@ -100,23 +100,35 @@ type Mysqld struct {
}
func init() {
- for _, cmd := range []string{"mysqlctl", "mysqlctld", "vtcombo", "vttablet", "vttestserver", "vtctld", "vtctldclient", "vtexplain"} {
+ for _, cmd := range []string{"mysqlctl", "mysqlctld", "vtcombo", "vttablet", "vttestserver"} {
servenv.OnParseFor(cmd, registerMySQLDFlags)
}
+ for _, cmd := range []string{"vtcombo", "vttablet", "vttestserver", "vtctld", "vtctldclient"} {
+ servenv.OnParseFor(cmd, registerReparentFlags)
+ }
+ for _, cmd := range []string{"mysqlctl", "mysqlctld", "vtcombo", "vttablet", "vttestserver"} {
+ servenv.OnParseFor(cmd, registerPoolFlags)
+ }
}
func registerMySQLDFlags(fs *pflag.FlagSet) {
- fs.BoolVar(&DisableActiveReparents, "disable_active_reparents", DisableActiveReparents, "if set, do not allow active reparents. Use this to protect a cluster using external reparents.")
- fs.IntVar(&dbaPoolSize, "dba_pool_size", dbaPoolSize, "Size of the connection pool for dba connections")
- fs.DurationVar(&DbaIdleTimeout, "dba_idle_timeout", DbaIdleTimeout, "Idle timeout for dba connections")
- fs.IntVar(&appPoolSize, "app_pool_size", appPoolSize, "Size of the connection pool for app connections")
- fs.DurationVar(&appIdleTimeout, "app_idle_timeout", appIdleTimeout, "Idle timeout for app connections")
fs.DurationVar(&PoolDynamicHostnameResolution, "pool_hostname_resolve_interval", PoolDynamicHostnameResolution, "if set force an update to all hostnames and reconnect if changed, defaults to 0 (disabled)")
fs.StringVar(&mycnfTemplateFile, "mysqlctl_mycnf_template", mycnfTemplateFile, "template file to use for generating the my.cnf file during server init")
fs.StringVar(&socketFile, "mysqlctl_socket", socketFile, "socket file to use for remote mysqlctl actions (empty for local actions)")
fs.DurationVar(&replicationConnectRetry, "replication_connect_retry", replicationConnectRetry, "how long to wait in between replica reconnect attempts. Only precise to the second.")
}
+func registerReparentFlags(fs *pflag.FlagSet) {
+ fs.BoolVar(&DisableActiveReparents, "disable_active_reparents", DisableActiveReparents, "if set, do not allow active reparents. Use this to protect a cluster using external reparents.")
+}
+
+func registerPoolFlags(fs *pflag.FlagSet) {
+ fs.IntVar(&dbaPoolSize, "dba_pool_size", dbaPoolSize, "Size of the connection pool for dba connections")
+ fs.DurationVar(&DbaIdleTimeout, "dba_idle_timeout", DbaIdleTimeout, "Idle timeout for dba connections")
+ fs.DurationVar(&appIdleTimeout, "app_idle_timeout", appIdleTimeout, "Idle timeout for app connections")
+ fs.IntVar(&appPoolSize, "app_pool_size", appPoolSize, "Size of the connection pool for app connections")
+}
+
// NewMysqld creates a Mysqld object based on the provided configuration
// and connection parameters.
func NewMysqld(dbcfgs *dbconfigs.DBConfigs) *Mysqld {
@@ -125,11 +137,11 @@ func NewMysqld(dbcfgs *dbconfigs.DBConfigs) *Mysqld {
}
// Create and open the connection pool for dba access.
- result.dbaPool = dbconnpool.NewConnectionPool("DbaConnPool", dbaPoolSize, DbaIdleTimeout, PoolDynamicHostnameResolution)
+ result.dbaPool = dbconnpool.NewConnectionPool("DbaConnPool", dbaPoolSize, DbaIdleTimeout, 0, PoolDynamicHostnameResolution)
result.dbaPool.Open(dbcfgs.DbaWithDB())
// Create and open the connection pool for app access.
- result.appPool = dbconnpool.NewConnectionPool("AppConnPool", appPoolSize, appIdleTimeout, PoolDynamicHostnameResolution)
+ result.appPool = dbconnpool.NewConnectionPool("AppConnPool", appPoolSize, appIdleTimeout, 0, PoolDynamicHostnameResolution)
result.appPool.Open(dbcfgs.AppWithDB())
/*
@@ -205,9 +217,7 @@ func GetVersionFromEnv() (flavor MySQLFlavor, ver ServerVersion, err error) {
env := os.Getenv("MYSQL_FLAVOR")
switch env {
case "MariaDB":
- return FlavorMariaDB, ServerVersion{10, 0, 10}, nil
- case "MariaDB103":
- return FlavorMariaDB, ServerVersion{10, 3, 7}, nil
+ return FlavorMariaDB, ServerVersion{10, 6, 11}, nil
case "MySQL80":
return FlavorMySQL, ServerVersion{8, 0, 11}, nil
case "MySQL56":
@@ -842,17 +852,9 @@ func (mysqld *Mysqld) getMycnfTemplate() string {
log.Infof("this version of Vitess does not include built-in support for %v %v", mysqld.capabilities.flavor, mysqld.capabilities.version)
}
case FlavorMariaDB:
- switch mysqld.capabilities.version.Minor {
- case 0:
- versionConfig = config.MycnfMariaDB100
- case 1:
- versionConfig = config.MycnfMariaDB101
- case 2:
- versionConfig = config.MycnfMariaDB102
- case 3:
- versionConfig = config.MycnfMariaDB103
- case 4:
- versionConfig = config.MycnfMariaDB104
+ switch mysqld.capabilities.version.Major {
+ case 10:
+ versionConfig = config.MycnfMariaDB10
default:
log.Infof("this version of Vitess does not include built-in support for %v %v", mysqld.capabilities.flavor, mysqld.capabilities.version)
}
@@ -1193,3 +1195,80 @@ func (mysqld *Mysqld) GetVersionComment(ctx context.Context) string {
versionComment, _ := res.ToString("@@global.version_comment")
return versionComment
}
+
+// applyBinlogFile extracts a binary log file and applies it to MySQL. It is the equivalent of:
+// $ mysqlbinlog --include-gtids binlog.file | mysql
+func (mysqld *Mysqld) applyBinlogFile(binlogFile string, includeGTIDs mysql.GTIDSet) error {
+ var pipe io.ReadCloser
+ var mysqlbinlogCmd *exec.Cmd
+ var mysqlCmd *exec.Cmd
+
+ dir, err := vtenv.VtMysqlRoot()
+ if err != nil {
+ return err
+ }
+ env, err := buildLdPaths()
+ if err != nil {
+ return err
+ }
+ {
+ name, err := binaryPath(dir, "mysqlbinlog")
+ if err != nil {
+ return err
+ }
+ args := []string{}
+ if gtids := includeGTIDs.String(); gtids != "" {
+ args = append(args,
+ "--include-gtids",
+ gtids,
+ )
+ }
+ args = append(args, binlogFile)
+
+ mysqlbinlogCmd = exec.Command(name, args...)
+ mysqlbinlogCmd.Dir = dir
+ mysqlbinlogCmd.Env = env
+ log.Infof("applyBinlogFile: running %#v", mysqlbinlogCmd)
+ pipe, err = mysqlbinlogCmd.StdoutPipe() // to be piped into mysql
+ if err != nil {
+ return err
+ }
+ }
+ {
+ name, err := binaryPath(dir, "mysql")
+ if err != nil {
+ return err
+ }
+ params, err := mysqld.dbcfgs.DbaConnector().MysqlParams()
+ if err != nil {
+ return err
+ }
+ cnf, err := mysqld.defaultsExtraFile(params)
+ if err != nil {
+ return err
+ }
+ defer os.Remove(cnf)
+ args := []string{
+ "--defaults-extra-file=" + cnf,
+ }
+ mysqlCmd = exec.Command(name, args...)
+ mysqlCmd.Dir = dir
+ mysqlCmd.Env = env
+ mysqlCmd.Stdin = pipe // piped from mysqlbinlog
+ }
+ // Run both processes, piped:
+ if err := mysqlbinlogCmd.Start(); err != nil {
+ return err
+ }
+ if err := mysqlCmd.Start(); err != nil {
+ return err
+ }
+ // Wait for both to complete:
+ if err := mysqlbinlogCmd.Wait(); err != nil {
+ return err
+ }
+ if err := mysqlCmd.Wait(); err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/go/vt/mysqlctl/mysqld_test.go b/go/vt/mysqlctl/mysqld_test.go
index 713b3c7077d..33fe00727e4 100644
--- a/go/vt/mysqlctl/mysqld_test.go
+++ b/go/vt/mysqlctl/mysqld_test.go
@@ -125,12 +125,7 @@ func TestAssumeVersionString(t *testing.T) {
},
{
versionString: "MariaDB",
- version: ServerVersion{10, 0, 10},
- flavor: FlavorMariaDB,
- },
- {
- versionString: "MariaDB103",
- version: ServerVersion{10, 3, 7},
+ version: ServerVersion{10, 6, 11},
flavor: FlavorMariaDB,
},
}
diff --git a/go/vt/mysqlctl/redo_log.go b/go/vt/mysqlctl/redo_log.go
new file mode 100644
index 00000000000..e29a28ae49f
--- /dev/null
+++ b/go/vt/mysqlctl/redo_log.go
@@ -0,0 +1,48 @@
+/*
+Copyright 2022 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package mysqlctl
+
+import (
+ "context"
+ "fmt"
+)
+
+func (mysqld *Mysqld) BinaryHasDisableRedoLog() bool {
+ return mysqld.capabilities.hasDisableRedoLog()
+}
+
+func (mysqld *Mysqld) DisableRedoLog(ctx context.Context) error {
+ return mysqld.ExecuteSuperQuery(ctx, "ALTER INSTANCE DISABLE INNODB REDO_LOG")
+}
+
+func (mysqld *Mysqld) EnableRedoLog(ctx context.Context) error {
+ return mysqld.ExecuteSuperQuery(ctx, "ALTER INSTANCE ENABLE INNODB REDO_LOG")
+}
+
+func (mysqld *Mysqld) ProcessCanDisableRedoLog(ctx context.Context) (bool, error) {
+ qr, err := mysqld.FetchSuperQuery(ctx, "SELECT variable_value FROM performance_schema.global_status WHERE variable_name = 'innodb_redo_log_enabled'")
+ if err != nil {
+ // It's possible that the MySQL process can disable redo logging, but
+ // we were unable to connect in order to verify. Let's assume not and
+ // let the caller decide if they want to retry.
+ return false, err
+ }
+ if len(qr.Rows) == 0 {
+ return false, fmt.Errorf("mysqld >= 8.0.21 required to disable the redo log")
+ }
+ return true, nil
+}
diff --git a/go/vt/mysqlctl/reparent.go b/go/vt/mysqlctl/reparent.go
index d8a4640bc7a..5a25cf8d7e3 100644
--- a/go/vt/mysqlctl/reparent.go
+++ b/go/vt/mysqlctl/reparent.go
@@ -24,37 +24,18 @@ import (
"fmt"
"time"
+ "vitess.io/vitess/go/vt/sidecardb"
+
"vitess.io/vitess/go/mysql"
"vitess.io/vitess/go/vt/log"
"context"
)
-// CreateReparentJournal returns the commands to execute to create
-// the _vt.reparent_journal table. It is safe to run these commands
-// even if the table already exists.
-func CreateReparentJournal() []string {
- return []string{
- "CREATE DATABASE IF NOT EXISTS _vt",
- fmt.Sprintf(`CREATE TABLE IF NOT EXISTS _vt.reparent_journal (
- time_created_ns BIGINT UNSIGNED NOT NULL,
- action_name VARBINARY(250) NOT NULL,
- primary_alias VARBINARY(32) NOT NULL,
- replication_position VARBINARY(%v) DEFAULT NULL,
- PRIMARY KEY (time_created_ns))
-ENGINE=InnoDB`, mysql.MaximumPositionSize)}
-}
-
-// AlterReparentJournal returns the commands to execute to change
-// column master_alias -> primary_alias or the other way
-// In 13.0.0 we introduced renaming of primary_alias -> master_alias.
-// This was to support in-place downgrade from a later version.
-// In 14.0.0 we replace that with renaming of master_alias -> primary_alias.
-// This is to support in-place upgrades from 13.0.x to 14.0.x
-func AlterReparentJournal() []string {
- return []string{
- "ALTER TABLE _vt.reparent_journal CHANGE COLUMN master_alias primary_alias VARBINARY(32) NOT NULL",
- }
+// GenerateInitialBinlogEntry is used to create a binlog entry when a primary comes up and we need to get a
+// MySQL position so that we can set it as the starting position for replicas to do MySQL Replication from.
+func GenerateInitialBinlogEntry() string {
+ return sidecardb.CreateSidecarDatabaseQuery
}
// PopulateReparentJournal returns the SQL command to use to populate
@@ -82,6 +63,9 @@ func queryReparentJournal(timeCreatedNS int64) string {
func (mysqld *Mysqld) WaitForReparentJournal(ctx context.Context, timeCreatedNS int64) error {
for {
qr, err := mysqld.FetchSuperQuery(ctx, queryReparentJournal(timeCreatedNS))
+ if err != nil {
+ log.Infof("Error querying reparent journal: %v", err)
+ }
if err == nil && len(qr.Rows) == 1 {
// we have the row, we're done
return nil
diff --git a/go/vt/mysqlctl/replication.go b/go/vt/mysqlctl/replication.go
index 4aba50b8903..bcad45012d1 100644
--- a/go/vt/mysqlctl/replication.go
+++ b/go/vt/mysqlctl/replication.go
@@ -233,6 +233,17 @@ func (mysqld *Mysqld) IsReadOnly() (bool, error) {
// SetReadOnly set/unset the read_only flag
func (mysqld *Mysqld) SetReadOnly(on bool) error {
+ // temp logging, to be removed in v17
+ var newState string
+ switch on {
+ case false:
+ newState = "ReadWrite"
+ case true:
+ newState = "ReadOnly"
+ }
+ log.Infof("SetReadOnly setting connection setting of %s:%d to : %s",
+ mysqld.dbcfgs.Host, mysqld.dbcfgs.Port, newState)
+
query := "SET GLOBAL read_only = "
if on {
query += "ON"
@@ -396,14 +407,6 @@ func (mysqld *Mysqld) SetReplicationSource(ctx context.Context, host string, por
if replicationStopBefore {
cmds = append(cmds, conn.StopReplicationCommand())
}
- // Reset replication parameters commands makes the instance forget the source host port
- // This is required because sometimes MySQL gets stuck due to improper initialization of
- // master info structure or related failures and throws errors like
- // ERROR 1201 (HY000): Could not initialize master info structure; more error messages can be found in the MySQL error log
- // These errors can only be resolved by resetting the replication parameters, otherwise START SLAVE fails.
- // Therefore, we have elected to always reset the replication parameters whenever we try to set the source host port
- // Since there is no real overhead, but it makes this function robust enough to also handle failures like these.
- cmds = append(cmds, conn.ResetReplicationParametersCommands()...)
smc := conn.SetReplicationSourceCommand(params, host, port, int(replicationConnectRetry.Seconds()))
cmds = append(cmds, smc)
if replicationStartAfter {
@@ -579,6 +582,44 @@ func (mysqld *Mysqld) GetGTIDMode(ctx context.Context) (string, error) {
return conn.GetGTIDMode()
}
+// FlushBinaryLogs is part of the MysqlDaemon interface.
+func (mysqld *Mysqld) FlushBinaryLogs(ctx context.Context) (err error) {
+ _, err = mysqld.FetchSuperQuery(ctx, "FLUSH BINARY LOGS")
+ return err
+}
+
+// GetBinaryLogs is part of the MysqlDaemon interface.
+func (mysqld *Mysqld) GetBinaryLogs(ctx context.Context) (binaryLogs []string, err error) {
+ qr, err := mysqld.FetchSuperQuery(ctx, "SHOW BINARY LOGS")
+ if err != nil {
+ return binaryLogs, err
+ }
+ for _, row := range qr.Rows {
+ binaryLogs = append(binaryLogs, row[0].ToString())
+ }
+ return binaryLogs, err
+}
+
+// GetPreviousGTIDs is part of the MysqlDaemon interface.
+func (mysqld *Mysqld) GetPreviousGTIDs(ctx context.Context, binlog string) (previousGtids string, err error) {
+ query := fmt.Sprintf("SHOW BINLOG EVENTS IN '%s' LIMIT 2", binlog)
+ qr, err := mysqld.FetchSuperQuery(ctx, query)
+ if err != nil {
+ return previousGtids, err
+ }
+ previousGtidsFound := false
+ for _, row := range qr.Named().Rows {
+ if row.AsString("Event_type", "") == "Previous_gtids" {
+ previousGtids = row.AsString("Info", "")
+ previousGtidsFound = true
+ }
+ }
+ if !previousGtidsFound {
+ return previousGtids, fmt.Errorf("GetPreviousGTIDs: previous GTIDs not found")
+ }
+ return previousGtids, nil
+}
+
// SetSemiSyncEnabled enables or disables semi-sync replication for
// primary and/or replica mode.
func (mysqld *Mysqld) SetSemiSyncEnabled(primary, replica bool) error {
diff --git a/go/vt/mysqlctl/schema.go b/go/vt/mysqlctl/schema.go
index 9c7ed553a25..406b5c59499 100644
--- a/go/vt/mysqlctl/schema.go
+++ b/go/vt/mysqlctl/schema.go
@@ -24,6 +24,7 @@ import (
"strings"
"sync"
+ "vitess.io/vitess/go/mysql"
"vitess.io/vitess/go/sqltypes"
"vitess.io/vitess/go/vt/concurrency"
"vitess.io/vitess/go/vt/vterrors"
@@ -40,6 +41,14 @@ import (
var autoIncr = regexp.MustCompile(` AUTO_INCREMENT=\d+`)
+type EmptyColumnsErr struct {
+ dbName, tableName, query string
+}
+
+func (e EmptyColumnsErr) Error() string {
+ return fmt.Sprintf("unable to get columns for table %s.%s using query %s", e.dbName, e.tableName, e.query)
+}
+
// executeSchemaCommands executes some SQL commands, using the mysql
// command line tool. It uses the dba connection parameters, with credentials.
func (mysqld *Mysqld) executeSchemaCommands(sql string) error {
@@ -51,9 +60,9 @@ func (mysqld *Mysqld) executeSchemaCommands(sql string) error {
return mysqld.executeMysqlScript(params, strings.NewReader(sql))
}
-func encodeTableName(tableName string) string {
+func encodeEntityName(name string) string {
var buf strings.Builder
- sqltypes.NewVarChar(tableName).EncodeSQL(&buf)
+ sqltypes.NewVarChar(name).EncodeSQL(&buf)
return buf.String()
}
@@ -65,7 +74,7 @@ func tableListSQL(tables []string) (string, error) {
encodedTables := make([]string, len(tables))
for i, tableName := range tables {
- encodedTables[i] = encodeTableName(tableName)
+ encodedTables[i] = encodeEntityName(tableName)
}
return "(" + strings.Join(encodedTables, ", ") + ")", nil
@@ -109,6 +118,15 @@ func (mysqld *Mysqld) GetSchema(ctx context.Context, dbName string, request *tab
fields, columns, schema, err := mysqld.collectSchema(ctx, dbName, td.Name, td.Type, request.TableSchemaOnly)
if err != nil {
+ // There's a possible race condition: it could happen that a table was dropped in between reading
+ // the list of tables (collectBasicTableData(), earlier) and the point above where we investigate
+ // the table.
+ // This is fine. We identify the situation and keep the table without any fields/columns/key information
+ sqlErr, isSQLErr := mysql.NewSQLErrorFromError(err).(*mysql.SQLError)
+ if isSQLErr && sqlErr != nil && sqlErr.Number() == mysql.ERNoSuchTable {
+ return
+ }
+
allErrors.RecordError(err)
cancel()
return
@@ -121,6 +139,8 @@ func (mysqld *Mysqld) GetSchema(ctx context.Context, dbName string, request *tab
}
// Get primary columns concurrently.
+ // The below runs a single query on `INFORMATION_SCHEMA` and does not interact with the actual tables.
+ // It is therefore safe to run even if some tables are dropped in the interim.
colMap := map[string][]string{}
if len(tableNames) > 0 {
wg.Add(1)
@@ -232,7 +252,7 @@ func (mysqld *Mysqld) normalizedSchema(ctx context.Context, dbName, tableName, t
backtickDBName := sqlescape.EscapeID(dbName)
qr, fetchErr := mysqld.FetchSuperQuery(ctx, fmt.Sprintf("SHOW CREATE TABLE %s.%s", backtickDBName, sqlescape.EscapeID(tableName)))
if fetchErr != nil {
- return "", fetchErr
+ return "", vterrors.Wrapf(fetchErr, "in Mysqld.normalizedSchema()")
}
if len(qr.Rows) == 0 {
return "", fmt.Errorf("empty create table statement for %v", tableName)
@@ -270,26 +290,30 @@ func ResolveTables(ctx context.Context, mysqld MysqlDaemon, dbName string, table
const (
GetColumnNamesQuery = `SELECT COLUMN_NAME as column_name
FROM INFORMATION_SCHEMA.COLUMNS
- WHERE TABLE_SCHEMA = %s AND TABLE_NAME = '%s'
+ WHERE TABLE_SCHEMA = %s AND TABLE_NAME = %s
ORDER BY ORDINAL_POSITION`
GetFieldsQuery = "SELECT %s FROM %s WHERE 1 != 1"
)
+// GetColumnsList returns the column names for a given table/view, using a query generating function.
+// Returned values:
+// - selectColumns: a string of comma delimited qualified names to be used in a SELECT query. e.g. "`id`, `name`, `val`"
+// - err: error
func GetColumnsList(dbName, tableName string, exec func(string, int, bool) (*sqltypes.Result, error)) (string, error) {
var dbName2 string
if dbName == "" {
dbName2 = "database()"
} else {
- dbName2 = fmt.Sprintf("'%s'", dbName)
+ dbName2 = encodeEntityName(dbName)
}
- query := fmt.Sprintf(GetColumnNamesQuery, dbName2, sqlescape.UnescapeID(tableName))
+ query := fmt.Sprintf(GetColumnNamesQuery, dbName2, encodeEntityName(sqlescape.UnescapeID(tableName)))
qr, err := exec(query, -1, true)
if err != nil {
return "", err
}
if qr == nil || len(qr.Rows) == 0 {
- err = fmt.Errorf("unable to get columns for table %s.%s using query %s", dbName, tableName, query)
- log.Errorf("%s", fmt.Errorf("unable to get columns for table %s.%s using query %s", dbName, tableName, query))
+ err := &EmptyColumnsErr{dbName: dbName, tableName: tableName, query: query}
+ log.Error(err.Error())
return "", err
}
selectColumns := ""
@@ -322,7 +346,7 @@ func GetColumns(dbName, table string, exec func(string, int, bool) (*sqltypes.Re
query := fmt.Sprintf(GetFieldsQuery, selectColumns, tableSpec)
qr, err := exec(query, 0, true)
if err != nil {
- return nil, nil, err
+ return nil, nil, vterrors.Wrapf(err, "in Mysqld.GetColumns()")
}
columns := make([]string, len(qr.Fields))
@@ -367,9 +391,9 @@ func (mysqld *Mysqld) getPrimaryKeyColumns(ctx context.Context, dbName string, t
sql := `
SELECT TABLE_NAME as table_name, COLUMN_NAME as column_name
FROM information_schema.STATISTICS
- WHERE TABLE_SCHEMA = '%s' AND TABLE_NAME IN %s AND LOWER(INDEX_NAME) = 'primary'
+ WHERE TABLE_SCHEMA = %s AND TABLE_NAME IN %s AND LOWER(INDEX_NAME) = 'primary'
ORDER BY table_name, SEQ_IN_INDEX`
- sql = fmt.Sprintf(sql, dbName, tableList)
+ sql = fmt.Sprintf(sql, encodeEntityName(dbName), tableList)
qr, err := conn.ExecuteFetch(sql, len(tables)*100, true)
if err != nil {
return nil, err
@@ -589,16 +613,18 @@ func (mysqld *Mysqld) GetPrimaryKeyEquivalentColumns(ctx context.Context, dbName
END
) AS type_cost, COUNT(stats.COLUMN_NAME) AS col_count FROM information_schema.STATISTICS AS stats INNER JOIN
information_schema.COLUMNS AS cols ON stats.TABLE_SCHEMA = cols.TABLE_SCHEMA AND stats.TABLE_NAME = cols.TABLE_NAME AND stats.COLUMN_NAME = cols.COLUMN_NAME
- WHERE stats.TABLE_SCHEMA = '%s' AND stats.TABLE_NAME = '%s' AND stats.INDEX_NAME NOT IN
+ WHERE stats.TABLE_SCHEMA = %s AND stats.TABLE_NAME = %s AND stats.INDEX_NAME NOT IN
(
SELECT DISTINCT INDEX_NAME FROM information_schema.STATISTICS
- WHERE TABLE_SCHEMA = '%s' AND TABLE_NAME = '%s' AND (NON_UNIQUE = 1 OR NULLABLE = 'YES')
+ WHERE TABLE_SCHEMA = %s AND TABLE_NAME = %s AND (NON_UNIQUE = 1 OR NULLABLE = 'YES')
)
GROUP BY INDEX_NAME ORDER BY type_cost ASC, col_count ASC LIMIT 1
) AS pke ON index_cols.INDEX_NAME = pke.INDEX_NAME
- WHERE index_cols.TABLE_SCHEMA = '%s' AND index_cols.TABLE_NAME = '%s' AND NON_UNIQUE = 0 AND NULLABLE != 'YES'
+ WHERE index_cols.TABLE_SCHEMA = %s AND index_cols.TABLE_NAME = %s AND NON_UNIQUE = 0 AND NULLABLE != 'YES'
ORDER BY SEQ_IN_INDEX ASC`
- sql = fmt.Sprintf(sql, dbName, table, dbName, table, dbName, table)
+ encodedDbName := encodeEntityName(dbName)
+ encodedTable := encodeEntityName(table)
+ sql = fmt.Sprintf(sql, encodedDbName, encodedTable, encodedDbName, encodedTable, encodedDbName, encodedTable)
qr, err := conn.ExecuteFetch(sql, 1000, true)
if err != nil {
return nil, err
diff --git a/go/vt/mysqlctl/schema_test.go b/go/vt/mysqlctl/schema_test.go
index 5ec02be9960..fb64f8ca8ee 100644
--- a/go/vt/mysqlctl/schema_test.go
+++ b/go/vt/mysqlctl/schema_test.go
@@ -15,7 +15,7 @@ var queryMap map[string]*sqltypes.Result
func mockExec(query string, maxRows int, wantFields bool) (*sqltypes.Result, error) {
queryMap = make(map[string]*sqltypes.Result)
- getColsQuery := fmt.Sprintf(GetColumnNamesQuery, "'test'", "t1")
+ getColsQuery := fmt.Sprintf(GetColumnNamesQuery, "'test'", "'t1'")
queryMap[getColsQuery] = &sqltypes.Result{
Fields: []*querypb.Field{{
Name: "column_name",
@@ -40,7 +40,7 @@ func mockExec(query string, maxRows int, wantFields bool) (*sqltypes.Result, err
Type: sqltypes.VarBinary,
}},
}
- getColsQuery = fmt.Sprintf(GetColumnNamesQuery, "database()", "t2")
+ getColsQuery = fmt.Sprintf(GetColumnNamesQuery, "database()", "'t2'")
queryMap[getColsQuery] = &sqltypes.Result{
Fields: []*querypb.Field{{
Name: "column_name",
@@ -61,6 +61,29 @@ func mockExec(query string, maxRows int, wantFields bool) (*sqltypes.Result, err
if ok {
return result, nil
}
+
+ getColsQuery = fmt.Sprintf(GetColumnNamesQuery, "database()", "'with \\' quote'")
+ queryMap[getColsQuery] = &sqltypes.Result{
+ Fields: []*querypb.Field{{
+ Name: "column_name",
+ Type: sqltypes.VarChar,
+ }},
+ Rows: [][]sqltypes.Value{
+ {sqltypes.NewVarChar("col1")},
+ },
+ }
+
+ queryMap["SELECT `col1` FROM `with ' quote` WHERE 1 != 1"] = &sqltypes.Result{
+ Fields: []*querypb.Field{{
+ Name: "col1",
+ Type: sqltypes.VarChar,
+ }},
+ }
+ result, ok = queryMap[query]
+ if ok {
+ return result, nil
+ }
+
return nil, fmt.Errorf("query %s not found in mock setup", query)
}
@@ -74,4 +97,9 @@ func TestColumnList(t *testing.T) {
fields, _, err = GetColumns("", "t2", mockExec)
require.NoError(t, err)
require.Equal(t, `[name:"col1" type:VARCHAR]`, fmt.Sprintf("%+v", fields))
+
+ fields, _, err = GetColumns("", "with ' quote", mockExec)
+ require.NoError(t, err)
+ require.Equal(t, `[name:"col1" type:VARCHAR]`, fmt.Sprintf("%+v", fields))
+
}
diff --git a/go/vt/mysqlctl/tmutils/schema.go b/go/vt/mysqlctl/tmutils/schema.go
index a723e58a17d..41842d40c07 100644
--- a/go/vt/mysqlctl/tmutils/schema.go
+++ b/go/vt/mysqlctl/tmutils/schema.go
@@ -119,7 +119,7 @@ func NewTableFilter(tables, excludeTables []string, includeViews bool) (*TableFi
return nil, fmt.Errorf("cannot compile regexp %v for excludeTable: %v", table, err)
}
- f.excludeTableREs = append(f.tableREs, re)
+ f.excludeTableREs = append(f.excludeTableREs, re)
} else {
f.excludeTableNames = append(f.excludeTableNames, table)
}
diff --git a/go/vt/mysqlctl/tmutils/schema_test.go b/go/vt/mysqlctl/tmutils/schema_test.go
index b355206ff7f..472093cb869 100644
--- a/go/vt/mysqlctl/tmutils/schema_test.go
+++ b/go/vt/mysqlctl/tmutils/schema_test.go
@@ -19,9 +19,10 @@ package tmutils
import (
"errors"
"fmt"
- "reflect"
"testing"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
"google.golang.org/protobuf/proto"
tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata"
@@ -135,9 +136,7 @@ func TestToSQLStrings(t *testing.T) {
for _, tc := range testcases {
got := SchemaDefinitionToSQLStrings(tc.input)
- if !reflect.DeepEqual(got, tc.want) {
- t.Errorf("ToSQLStrings() on SchemaDefinition %v returned %v; want %v", tc.input, got, tc.want)
- }
+ assert.Equal(t, tc.want, got)
}
}
@@ -156,12 +155,7 @@ func testDiff(t *testing.T, left, right *tabletmanagerdatapb.SchemaDefinition, l
}
}
}
-
- if !equal {
- t.Logf("Expected: %v", expected)
- t.Logf("Actual: %v", actual)
- t.Fail()
- }
+ assert.Truef(t, equal, "expected: %v, actual: %v", expected, actual)
}
func TestSchemaDiff(t *testing.T) {
@@ -433,6 +427,26 @@ func TestTableFilter(t *testing.T) {
included: false,
},
+ {
+ desc: "exclude table list does not list table",
+ excludeTables: []string{"nomatch1", "nomatch2", "/nomatch3/", "/nomatch4/", "/nomatch5/"},
+ includeViews: true,
+
+ tableName: excludedTable,
+ tableType: TableBaseTable,
+
+ included: true,
+ },
+ {
+ desc: "exclude table list with re match",
+ excludeTables: []string{"nomatch1", "nomatch2", "/nomatch3/", "/" + excludedTable + "/", "/nomatch5/"},
+ includeViews: true,
+
+ tableName: excludedTable,
+ tableType: TableBaseTable,
+
+ included: false,
+ },
{
desc: "bad table regexp",
tables: []string{"/*/"},
@@ -450,18 +464,16 @@ func TestTableFilter(t *testing.T) {
for _, tc := range tcs {
t.Run(tc.desc, func(t *testing.T) {
f, err := NewTableFilter(tc.tables, tc.excludeTables, tc.includeViews)
- if tc.hasErr != (err != nil) {
- t.Fatalf("hasErr not right: %v, tc: %+v", err, tc)
- }
-
if tc.hasErr {
+ assert.Error(t, err)
return
}
+ assert.NoError(t, err)
+ assert.Equal(t, len(tc.tables), len(f.tableNames)+len(f.tableREs))
+ assert.Equal(t, len(tc.excludeTables), len(f.excludeTableNames)+len(f.excludeTableREs))
included := f.Includes(tc.tableName, tc.tableType)
- if tc.included != included {
- t.Fatalf("included is not right: %v\nfilter: %+v\ntc: %+v", included, f, tc)
- }
+ assert.Equalf(t, tc.included, included, "filter: %v", f)
})
}
}
@@ -638,21 +650,15 @@ func TestFilterTables(t *testing.T) {
}
for _, tc := range testcases {
- got, err := FilterTables(tc.input, tc.tables, tc.excludeTables, tc.includeViews)
- if tc.wantError != nil {
- if err == nil {
- t.Fatalf("FilterTables() test '%v' on SchemaDefinition %v did not return an error (result: %v), but should have, wantError %v", tc.desc, tc.input, got, tc.wantError)
- }
- if err.Error() != tc.wantError.Error() {
- t.Errorf("FilterTables() test '%v' on SchemaDefinition %v returned wrong error '%v'; wanted error '%v'", tc.desc, tc.input, err, tc.wantError)
- }
- } else {
- if err != nil {
- t.Errorf("FilterTables() test '%v' on SchemaDefinition %v failed with error %v, want %v", tc.desc, tc.input, err, tc.want)
- }
- if !proto.Equal(got, tc.want) {
- t.Errorf("FilterTables() test '%v' on SchemaDefinition %v returned %v; want %v", tc.desc, tc.input, got, tc.want)
+ t.Run(tc.desc, func(t *testing.T) {
+ got, err := FilterTables(tc.input, tc.tables, tc.excludeTables, tc.includeViews)
+ if tc.wantError != nil {
+ require.Error(t, err)
+ require.Equal(t, tc.wantError, err)
+ } else {
+ assert.NoError(t, err)
+ assert.Truef(t, proto.Equal(tc.want, got), "wanted: %v, got: %v", tc.want, got)
}
- }
+ })
}
}
diff --git a/go/vt/mysqlctl/xtrabackupengine.go b/go/vt/mysqlctl/xtrabackupengine.go
index d0d131e3cee..2831f682e29 100644
--- a/go/vt/mysqlctl/xtrabackupengine.go
+++ b/go/vt/mysqlctl/xtrabackupengine.go
@@ -105,17 +105,17 @@ type xtraBackupManifest struct {
}
func init() {
- for _, cmd := range []string{"mysqlctl", "mysqlctld", "vtcombo", "vttablet", "vtbackup", "vttestserver", "vtctld", "vtctldclient", "vtexplain"} {
+ for _, cmd := range []string{"vtcombo", "vttablet", "vtbackup", "vttestserver", "vtctldclient"} {
servenv.OnParseFor(cmd, registerXtraBackupEngineFlags)
}
}
func registerXtraBackupEngineFlags(fs *pflag.FlagSet) {
- fs.StringVar(&xtrabackupEnginePath, "xtrabackup_root_path", xtrabackupEnginePath, "directory location of the xtrabackup and xbstream executables, e.g., /usr/bin")
- fs.StringVar(&xtrabackupBackupFlags, "xtrabackup_backup_flags", xtrabackupBackupFlags, "flags to pass to backup command. These should be space separated and will be added to the end of the command")
- fs.StringVar(&xtrabackupPrepareFlags, "xtrabackup_prepare_flags", xtrabackupPrepareFlags, "flags to pass to prepare command. These should be space separated and will be added to the end of the command")
- fs.StringVar(&xbstreamRestoreFlags, "xbstream_restore_flags", xbstreamRestoreFlags, "flags to pass to xbstream command during restore. These should be space separated and will be added to the end of the command. These need to match the ones used for backup e.g. --compress / --decompress, --encrypt / --decrypt")
- fs.StringVar(&xtrabackupStreamMode, "xtrabackup_stream_mode", xtrabackupStreamMode, "which mode to use if streaming, valid values are tar and xbstream")
+ fs.StringVar(&xtrabackupEnginePath, "xtrabackup_root_path", xtrabackupEnginePath, "Directory location of the xtrabackup and xbstream executables, e.g., /usr/bin")
+ fs.StringVar(&xtrabackupBackupFlags, "xtrabackup_backup_flags", xtrabackupBackupFlags, "Flags to pass to backup command. These should be space separated and will be added to the end of the command")
+ fs.StringVar(&xtrabackupPrepareFlags, "xtrabackup_prepare_flags", xtrabackupPrepareFlags, "Flags to pass to prepare command. These should be space separated and will be added to the end of the command")
+ fs.StringVar(&xbstreamRestoreFlags, "xbstream_restore_flags", xbstreamRestoreFlags, "Flags to pass to xbstream command during restore. These should be space separated and will be added to the end of the command. These need to match the ones used for backup e.g. --compress / --decompress, --encrypt / --decrypt")
+ fs.StringVar(&xtrabackupStreamMode, "xtrabackup_stream_mode", xtrabackupStreamMode, "Which mode to use if streaming, valid values are tar and xbstream. Please note that tar is not supported in XtraBackup 8.0")
fs.StringVar(&xtrabackupUser, "xtrabackup_user", xtrabackupUser, "User that xtrabackup will use to connect to the database server. This user must have all necessary privileges. For details, please refer to xtrabackup documentation.")
fs.UintVar(&xtrabackupStripes, "xtrabackup_stripes", xtrabackupStripes, "If greater than 0, use data striping across this many destination files to parallelize data transfer and decompression")
fs.UintVar(&xtrabackupStripeBlockSize, "xtrabackup_stripe_block_size", xtrabackupStripeBlockSize, "Size in bytes of each block that gets sent to a given stripe before rotating to the next stripe")
@@ -156,6 +156,9 @@ func closeFile(wc io.WriteCloser, fileName string, logger logutil.Logger, finalE
// and an overall error.
func (be *XtrabackupEngine) ExecuteBackup(ctx context.Context, params BackupParams, bh backupstorage.BackupHandle) (complete bool, finalErr error) {
+ if params.IncrementalFromPos != "" {
+ return false, vterrors.New(vtrpc.Code_INVALID_ARGUMENT, "incremental backups not supported in xtrabackup engine.")
+ }
if xtrabackupUser == "" {
return false, vterrors.New(vtrpc.Code_INVALID_ARGUMENT, "xtrabackupUser must be specified.")
}
@@ -179,6 +182,11 @@ func (be *XtrabackupEngine) ExecuteBackup(ctx context.Context, params BackupPara
if err != nil {
return false, vterrors.Wrap(err, "unable to obtain primary position")
}
+ serverUUID, err := conn.GetServerUUID()
+ if err != nil {
+ return false, vterrors.Wrap(err, "can't get server uuid")
+ }
+
flavor := pos.GTIDSet.Flavor()
params.Logger.Infof("Detected MySQL flavor: %v", flavor)
@@ -211,6 +219,10 @@ func (be *XtrabackupEngine) ExecuteBackup(ctx context.Context, params BackupPara
BackupManifest: BackupManifest{
BackupMethod: xtrabackupEngineName,
Position: replicationPosition,
+ ServerUUID: serverUUID,
+ TabletAlias: params.TabletAlias,
+ Keyspace: params.Keyspace,
+ Shard: params.Shard,
BackupTime: params.BackupTime.UTC().Format(time.RFC3339),
FinishedTime: time.Now().UTC().Format(time.RFC3339),
},
@@ -451,7 +463,6 @@ func (be *XtrabackupEngine) ExecuteRestore(ctx context.Context, params RestorePa
func (be *XtrabackupEngine) restoreFromBackup(ctx context.Context, cnf *Mycnf, bh backupstorage.BackupHandle, bm xtraBackupManifest, logger logutil.Logger) error {
// first download the file into a tmp dir
// and extract all the files
-
tempDir := fmt.Sprintf("%v/%v", cnf.TmpDir, time.Now().UTC().Format("xtrabackup-2006-01-02.150405"))
// create tempDir
if err := os.MkdirAll(tempDir, os.ModePerm); err != nil {
@@ -465,6 +476,16 @@ func (be *XtrabackupEngine) restoreFromBackup(ctx context.Context, cnf *Mycnf, b
}
}(tempDir, logger)
+ // For optimization, we are replacing pargzip with pgzip, so newBuiltinDecompressor doesn't have to compare and print warning for every file
+ // since newBuiltinDecompressor is helper method and does not hold any state, it was hard to do it in that method itself.
+ if bm.CompressionEngine == PargzipCompressor {
+ logger.Warningf(`engine "pargzip" doesn't support decompression, using "pgzip" instead`)
+ bm.CompressionEngine = PgzipCompressor
+ defer func() {
+ bm.CompressionEngine = PargzipCompressor
+ }()
+ }
+
if err := be.extractFiles(ctx, logger, bh, bm, tempDir); err != nil {
logger.Errorf("error extracting backup files: %v", err)
return err
diff --git a/go/vt/proto/automation/automation.pb.go b/go/vt/proto/automation/automation.pb.go
index 5282daee098..45a2d192e46 100644
--- a/go/vt/proto/automation/automation.pb.go
+++ b/go/vt/proto/automation/automation.pb.go
@@ -20,7 +20,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.28.0
+// protoc-gen-go v1.28.1
// protoc v3.21.3
// source: automation.proto
diff --git a/go/vt/proto/automation/automation_vtproto.pb.go b/go/vt/proto/automation/automation_vtproto.pb.go
index 6ace649a486..ec4d0d7f1bf 100644
--- a/go/vt/proto/automation/automation_vtproto.pb.go
+++ b/go/vt/proto/automation/automation_vtproto.pb.go
@@ -1,5 +1,5 @@
// Code generated by protoc-gen-go-vtproto. DO NOT EDIT.
-// protoc-gen-go-vtproto version: v0.3.0
+// protoc-gen-go-vtproto version: v0.4.0
// source: automation.proto
package automation
@@ -511,9 +511,7 @@ func (m *ClusterOperation) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -532,9 +530,7 @@ func (m *TaskContainer) SizeVT() (n int) {
if m.Concurrency != 0 {
n += 1 + sov(uint64(m.Concurrency))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -571,9 +567,7 @@ func (m *Task) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -595,9 +589,7 @@ func (m *EnqueueClusterOperationRequest) SizeVT() (n int) {
n += mapEntrySize + 1 + sov(uint64(mapEntrySize))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -611,9 +603,7 @@ func (m *EnqueueClusterOperationResponse) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -627,9 +617,7 @@ func (m *GetClusterOperationStateRequest) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -642,9 +630,7 @@ func (m *GetClusterOperationStateResponse) SizeVT() (n int) {
if m.State != 0 {
n += 1 + sov(uint64(m.State))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -658,9 +644,7 @@ func (m *GetClusterOperationDetailsRequest) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -674,9 +658,7 @@ func (m *GetClusterOperationDetailsResponse) SizeVT() (n int) {
l = m.ClusterOp.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -1899,6 +1881,7 @@ func (m *GetClusterOperationDetailsResponse) UnmarshalVT(dAtA []byte) error {
}
return nil
}
+
func skip(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
diff --git a/go/vt/proto/automationservice/automationservice.pb.go b/go/vt/proto/automationservice/automationservice.pb.go
index a179f8f52f7..2c3ff0b031c 100644
--- a/go/vt/proto/automationservice/automationservice.pb.go
+++ b/go/vt/proto/automationservice/automationservice.pb.go
@@ -17,7 +17,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.28.0
+// protoc-gen-go v1.28.1
// protoc v3.21.3
// source: automationservice.proto
diff --git a/go/vt/proto/binlogdata/binlogdata.pb.go b/go/vt/proto/binlogdata/binlogdata.pb.go
index 22d61cfdc5f..df2467d3e3c 100644
--- a/go/vt/proto/binlogdata/binlogdata.pb.go
+++ b/go/vt/proto/binlogdata/binlogdata.pb.go
@@ -19,7 +19,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.28.0
+// protoc-gen-go v1.28.1
// protoc v3.21.3
// source: binlogdata.proto
@@ -234,6 +234,10 @@ const (
VEventType_VERSION VEventType = 17
VEventType_LASTPK VEventType = 18
VEventType_SAVEPOINT VEventType = 19
+ // COPY_COMPLETED is sent when VTGate's VStream copy operation is done.
+ // If a client experiences some disruptions before receiving the event,
+ // the client should restart the copy operation.
+ VEventType_COPY_COMPLETED VEventType = 20
)
// Enum value maps for VEventType.
@@ -259,28 +263,30 @@ var (
17: "VERSION",
18: "LASTPK",
19: "SAVEPOINT",
+ 20: "COPY_COMPLETED",
}
VEventType_value = map[string]int32{
- "UNKNOWN": 0,
- "GTID": 1,
- "BEGIN": 2,
- "COMMIT": 3,
- "ROLLBACK": 4,
- "DDL": 5,
- "INSERT": 6,
- "REPLACE": 7,
- "UPDATE": 8,
- "DELETE": 9,
- "SET": 10,
- "OTHER": 11,
- "ROW": 12,
- "FIELD": 13,
- "HEARTBEAT": 14,
- "VGTID": 15,
- "JOURNAL": 16,
- "VERSION": 17,
- "LASTPK": 18,
- "SAVEPOINT": 19,
+ "UNKNOWN": 0,
+ "GTID": 1,
+ "BEGIN": 2,
+ "COMMIT": 3,
+ "ROLLBACK": 4,
+ "DDL": 5,
+ "INSERT": 6,
+ "REPLACE": 7,
+ "UPDATE": 8,
+ "DELETE": 9,
+ "SET": 10,
+ "OTHER": 11,
+ "ROW": 12,
+ "FIELD": 13,
+ "HEARTBEAT": 14,
+ "VGTID": 15,
+ "JOURNAL": 16,
+ "VERSION": 17,
+ "LASTPK": 18,
+ "SAVEPOINT": 19,
+ "COPY_COMPLETED": 20,
}
)
@@ -932,6 +938,10 @@ type Rule struct {
// SourceUniqueKeyTargetColumns represents the names of columns in target table, mapped from the chosen unique
// key on source tables (some columns may be renamed from source to target)
SourceUniqueKeyTargetColumns string `protobuf:"bytes,7,opt,name=source_unique_key_target_columns,json=sourceUniqueKeyTargetColumns,proto3" json:"source_unique_key_target_columns,omitempty"`
+ // ConvertIntToEnum lists any columns that are converted from an integral value into an enum.
+ // such columns need to have special transofrmation of the data, from an integral format into a
+ // string format. e.g. the value 0 needs to be converted to '0'.
+ ConvertIntToEnum map[string]bool `protobuf:"bytes,8,rep,name=convert_int_to_enum,json=convertIntToEnum,proto3" json:"convert_int_to_enum,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"`
}
func (x *Rule) Reset() {
@@ -1015,6 +1025,13 @@ func (x *Rule) GetSourceUniqueKeyTargetColumns() string {
return ""
}
+func (x *Rule) GetConvertIntToEnum() map[string]bool {
+ if x != nil {
+ return x.ConvertIntToEnum
+ }
+ return nil
+}
+
// Filter represents a list of ordered rules. The first
// match wins.
type Filter struct {
@@ -2739,7 +2756,7 @@ var file_binlogdata_proto_rawDesc = []byte{
0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x66, 0x72, 0x6f, 0x6d, 0x43, 0x68,
0x61, 0x72, 0x73, 0x65, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x6f, 0x5f, 0x63, 0x68, 0x61, 0x72,
0x73, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x74, 0x6f, 0x43, 0x68, 0x61,
- 0x72, 0x73, 0x65, 0x74, 0x22, 0xc3, 0x04, 0x0a, 0x04, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x14, 0x0a,
+ 0x72, 0x73, 0x65, 0x74, 0x22, 0xdf, 0x05, 0x0a, 0x04, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x14, 0x0a,
0x05, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6d, 0x61,
0x74, 0x63, 0x68, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20,
0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x58, 0x0a, 0x14, 0x63,
@@ -2765,238 +2782,203 @@ var file_binlogdata_proto_rawDesc = []byte{
0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x18, 0x07,
0x20, 0x01, 0x28, 0x09, 0x52, 0x1c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x55, 0x6e, 0x69, 0x71,
0x75, 0x65, 0x4b, 0x65, 0x79, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x43, 0x6f, 0x6c, 0x75, 0x6d,
- 0x6e, 0x73, 0x1a, 0x44, 0x0a, 0x16, 0x43, 0x6f, 0x6e, 0x76, 0x65, 0x72, 0x74, 0x45, 0x6e, 0x75,
- 0x6d, 0x54, 0x6f, 0x54, 0x65, 0x78, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03,
- 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14,
- 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76,
- 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x60, 0x0a, 0x13, 0x43, 0x6f, 0x6e, 0x76,
- 0x65, 0x72, 0x74, 0x43, 0x68, 0x61, 0x72, 0x73, 0x65, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12,
- 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65,
- 0x79, 0x12, 0x33, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x1d, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x68,
- 0x61, 0x72, 0x73, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52,
- 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xff, 0x01, 0x0a, 0x06, 0x46,
- 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x26, 0x0a, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01,
- 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74,
- 0x61, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x4b, 0x0a,
- 0x10, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x6f, 0x64,
- 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x21, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67,
- 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x46, 0x69, 0x65, 0x6c,
- 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x0e, 0x66, 0x69, 0x65, 0x6c,
- 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x77, 0x6f,
- 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28,
- 0x03, 0x52, 0x0c, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x79, 0x70, 0x65, 0x12,
- 0x23, 0x0a, 0x0d, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x6e, 0x61, 0x6d, 0x65,
- 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77,
- 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x36, 0x0a, 0x0e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x76, 0x65,
- 0x6e, 0x74, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x13, 0x0a, 0x0f, 0x45, 0x52, 0x52, 0x5f, 0x4f, 0x4e,
- 0x5f, 0x4d, 0x49, 0x53, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x42,
- 0x45, 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x10, 0x01, 0x22, 0xea, 0x03, 0x0a,
- 0x0c, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x1a, 0x0a,
- 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61,
- 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12,
- 0x35, 0x0a, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03,
- 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e,
- 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x74, 0x61, 0x62, 0x6c,
- 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2f, 0x0a, 0x09, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x61,
- 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f,
- 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x08, 0x6b,
- 0x65, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65,
- 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12,
- 0x2a, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x12, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, 0x69, 0x6c,
- 0x74, 0x65, 0x72, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x2e, 0x0a, 0x06, 0x6f,
- 0x6e, 0x5f, 0x64, 0x64, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x62, 0x69,
- 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4f, 0x6e, 0x44, 0x44, 0x4c, 0x41, 0x63,
- 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x05, 0x6f, 0x6e, 0x44, 0x64, 0x6c, 0x12, 0x25, 0x0a, 0x0e, 0x65,
- 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x18, 0x08, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x0d, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x4d, 0x79, 0x73,
- 0x71, 0x6c, 0x12, 0x26, 0x0a, 0x0f, 0x73, 0x74, 0x6f, 0x70, 0x5f, 0x61, 0x66, 0x74, 0x65, 0x72,
- 0x5f, 0x63, 0x6f, 0x70, 0x79, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x73, 0x74, 0x6f,
- 0x70, 0x41, 0x66, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x70, 0x79, 0x12, 0x29, 0x0a, 0x10, 0x65, 0x78,
- 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x0a,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x43, 0x6c,
- 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f,
- 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x7a, 0x6f, 0x6e, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x5a, 0x6f, 0x6e, 0x65, 0x12,
- 0x28, 0x0a, 0x10, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x7a,
- 0x6f, 0x6e, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x74, 0x61, 0x72, 0x67, 0x65,
- 0x74, 0x54, 0x69, 0x6d, 0x65, 0x5a, 0x6f, 0x6e, 0x65, 0x22, 0x51, 0x0a, 0x09, 0x52, 0x6f, 0x77,
- 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x22, 0x0a, 0x06, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x52,
- 0x6f, 0x77, 0x52, 0x06, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x20, 0x0a, 0x05, 0x61, 0x66,
- 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x71, 0x75, 0x65, 0x72,
- 0x79, 0x2e, 0x52, 0x6f, 0x77, 0x52, 0x05, 0x61, 0x66, 0x74, 0x65, 0x72, 0x22, 0x93, 0x01, 0x0a,
- 0x08, 0x52, 0x6f, 0x77, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x61, 0x62,
- 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x74,
- 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x36, 0x0a, 0x0b, 0x72, 0x6f, 0x77, 0x5f,
- 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e,
- 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x6f, 0x77, 0x43, 0x68,
- 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0a, 0x72, 0x6f, 0x77, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73,
- 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01,
+ 0x6e, 0x73, 0x12, 0x55, 0x0a, 0x13, 0x63, 0x6f, 0x6e, 0x76, 0x65, 0x72, 0x74, 0x5f, 0x69, 0x6e,
+ 0x74, 0x5f, 0x74, 0x6f, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32,
+ 0x26, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x75, 0x6c,
+ 0x65, 0x2e, 0x43, 0x6f, 0x6e, 0x76, 0x65, 0x72, 0x74, 0x49, 0x6e, 0x74, 0x54, 0x6f, 0x45, 0x6e,
+ 0x75, 0x6d, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x10, 0x63, 0x6f, 0x6e, 0x76, 0x65, 0x72, 0x74,
+ 0x49, 0x6e, 0x74, 0x54, 0x6f, 0x45, 0x6e, 0x75, 0x6d, 0x1a, 0x44, 0x0a, 0x16, 0x43, 0x6f, 0x6e,
+ 0x76, 0x65, 0x72, 0x74, 0x45, 0x6e, 0x75, 0x6d, 0x54, 0x6f, 0x54, 0x65, 0x78, 0x74, 0x45, 0x6e,
+ 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a,
+ 0x60, 0x0a, 0x13, 0x43, 0x6f, 0x6e, 0x76, 0x65, 0x72, 0x74, 0x43, 0x68, 0x61, 0x72, 0x73, 0x65,
+ 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x33, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67,
+ 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x68, 0x61, 0x72, 0x73, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x76,
+ 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38,
+ 0x01, 0x1a, 0x43, 0x0a, 0x15, 0x43, 0x6f, 0x6e, 0x76, 0x65, 0x72, 0x74, 0x49, 0x6e, 0x74, 0x54,
+ 0x6f, 0x45, 0x6e, 0x75, 0x6d, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65,
+ 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x76, 0x61, 0x6c,
+ 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xff, 0x01, 0x0a, 0x06, 0x46, 0x69, 0x6c, 0x74, 0x65,
+ 0x72, 0x12, 0x26, 0x0a, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b,
+ 0x32, 0x10, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x75,
+ 0x6c, 0x65, 0x52, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x4b, 0x0a, 0x10, 0x66, 0x69, 0x65,
+ 0x6c, 0x64, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x0e, 0x32, 0x21, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61,
+ 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x76, 0x65,
+ 0x6e, 0x74, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x0e, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x76, 0x65,
+ 0x6e, 0x74, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c,
+ 0x6f, 0x77, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0c, 0x77,
+ 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x77,
+ 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x0c, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4e, 0x61, 0x6d, 0x65,
+ 0x22, 0x36, 0x0a, 0x0e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x4d, 0x6f,
+ 0x64, 0x65, 0x12, 0x13, 0x0a, 0x0f, 0x45, 0x52, 0x52, 0x5f, 0x4f, 0x4e, 0x5f, 0x4d, 0x49, 0x53,
+ 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x42, 0x45, 0x53, 0x54, 0x5f,
+ 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x10, 0x01, 0x22, 0xea, 0x03, 0x0a, 0x0c, 0x42, 0x69, 0x6e,
+ 0x6c, 0x6f, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79,
+ 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79,
+ 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x35, 0x0a, 0x0b, 0x74,
+ 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e,
+ 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c,
+ 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79,
+ 0x70, 0x65, 0x12, 0x2f, 0x0a, 0x09, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18,
+ 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61,
+ 0x2e, 0x4b, 0x65, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x52, 0x61,
+ 0x6e, 0x67, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x05, 0x20,
+ 0x03, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x2a, 0x0a, 0x06, 0x66,
+ 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x62, 0x69,
+ 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52,
+ 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x2e, 0x0a, 0x06, 0x6f, 0x6e, 0x5f, 0x64, 0x64,
+ 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67,
+ 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4f, 0x6e, 0x44, 0x44, 0x4c, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e,
+ 0x52, 0x05, 0x6f, 0x6e, 0x44, 0x64, 0x6c, 0x12, 0x25, 0x0a, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x72,
+ 0x6e, 0x61, 0x6c, 0x5f, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x0d, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x4d, 0x79, 0x73, 0x71, 0x6c, 0x12, 0x26,
+ 0x0a, 0x0f, 0x73, 0x74, 0x6f, 0x70, 0x5f, 0x61, 0x66, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x70,
+ 0x79, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x73, 0x74, 0x6f, 0x70, 0x41, 0x66, 0x74,
+ 0x65, 0x72, 0x43, 0x6f, 0x70, 0x79, 0x12, 0x29, 0x0a, 0x10, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e,
+ 0x61, 0x6c, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65,
+ 0x72, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65,
+ 0x5f, 0x7a, 0x6f, 0x6e, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x6f, 0x75,
+ 0x72, 0x63, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x5a, 0x6f, 0x6e, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x74,
+ 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x7a, 0x6f, 0x6e, 0x65, 0x18,
+ 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x69, 0x6d,
+ 0x65, 0x5a, 0x6f, 0x6e, 0x65, 0x22, 0x51, 0x0a, 0x09, 0x52, 0x6f, 0x77, 0x43, 0x68, 0x61, 0x6e,
+ 0x67, 0x65, 0x12, 0x22, 0x0a, 0x06, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x52, 0x6f, 0x77, 0x52, 0x06,
+ 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x20, 0x0a, 0x05, 0x61, 0x66, 0x74, 0x65, 0x72, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x52, 0x6f,
+ 0x77, 0x52, 0x05, 0x61, 0x66, 0x74, 0x65, 0x72, 0x22, 0x93, 0x01, 0x0a, 0x08, 0x52, 0x6f, 0x77,
+ 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e,
+ 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x74, 0x61, 0x62, 0x6c, 0x65,
+ 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x36, 0x0a, 0x0b, 0x72, 0x6f, 0x77, 0x5f, 0x63, 0x68, 0x61, 0x6e,
+ 0x67, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x62, 0x69, 0x6e, 0x6c,
+ 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x6f, 0x77, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65,
+ 0x52, 0x0a, 0x72, 0x6f, 0x77, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08,
+ 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08,
+ 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72,
+ 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, 0x83,
+ 0x01, 0x0a, 0x0a, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x1d, 0x0a,
+ 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x09, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x24, 0x0a, 0x06,
+ 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x71,
+ 0x75, 0x65, 0x72, 0x79, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x52, 0x06, 0x66, 0x69, 0x65, 0x6c,
+ 0x64, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x03,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14,
+ 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73,
+ 0x68, 0x61, 0x72, 0x64, 0x22, 0x88, 0x01, 0x0a, 0x09, 0x53, 0x68, 0x61, 0x72, 0x64, 0x47, 0x74,
+ 0x69, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14,
+ 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73,
+ 0x68, 0x61, 0x72, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x67, 0x74, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x04, 0x67, 0x74, 0x69, 0x64, 0x12, 0x35, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c,
+ 0x65, 0x5f, 0x70, 0x5f, 0x6b, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x62,
+ 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4c,
+ 0x61, 0x73, 0x74, 0x50, 0x4b, 0x52, 0x08, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x50, 0x4b, 0x73, 0x22,
+ 0x3f, 0x0a, 0x05, 0x56, 0x47, 0x74, 0x69, 0x64, 0x12, 0x36, 0x0a, 0x0b, 0x73, 0x68, 0x61, 0x72,
+ 0x64, 0x5f, 0x67, 0x74, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e,
+ 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64,
+ 0x47, 0x74, 0x69, 0x64, 0x52, 0x0a, 0x73, 0x68, 0x61, 0x72, 0x64, 0x47, 0x74, 0x69, 0x64, 0x73,
+ 0x22, 0x41, 0x0a, 0x0d, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72,
+ 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a,
+ 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68,
+ 0x61, 0x72, 0x64, 0x22, 0xbc, 0x02, 0x0a, 0x07, 0x4a, 0x6f, 0x75, 0x72, 0x6e, 0x61, 0x6c, 0x12,
+ 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x12,
+ 0x40, 0x0a, 0x0e, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x79, 0x70,
+ 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67,
+ 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79,
+ 0x70, 0x65, 0x52, 0x0d, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70,
+ 0x65, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28,
+ 0x09, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x6c, 0x6f, 0x63,
+ 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e,
+ 0x12, 0x36, 0x0a, 0x0b, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x67, 0x74, 0x69, 0x64, 0x73, 0x18,
+ 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61,
+ 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x47, 0x74, 0x69, 0x64, 0x52, 0x0a, 0x73, 0x68,
+ 0x61, 0x72, 0x64, 0x47, 0x74, 0x69, 0x64, 0x73, 0x12, 0x3d, 0x0a, 0x0c, 0x70, 0x61, 0x72, 0x74,
+ 0x69, 0x63, 0x69, 0x70, 0x61, 0x6e, 0x74, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19,
+ 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73,
+ 0x70, 0x61, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x0c, 0x70, 0x61, 0x72, 0x74, 0x69,
+ 0x63, 0x69, 0x70, 0x61, 0x6e, 0x74, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63,
+ 0x65, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28,
+ 0x09, 0x52, 0x0f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f,
+ 0x77, 0x73, 0x22, 0x8b, 0x04, 0x0a, 0x06, 0x56, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x2a, 0x0a,
+ 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x62, 0x69,
+ 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54,
+ 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d,
+ 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x74, 0x69,
+ 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x12, 0x0a, 0x04, 0x67, 0x74, 0x69, 0x64, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x67, 0x74, 0x69, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x73,
+ 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09,
+ 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x31, 0x0a, 0x09, 0x72, 0x6f, 0x77,
+ 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x62,
+ 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x6f, 0x77, 0x45, 0x76, 0x65,
+ 0x6e, 0x74, 0x52, 0x08, 0x72, 0x6f, 0x77, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x37, 0x0a, 0x0b,
+ 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x16, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46,
+ 0x69, 0x65, 0x6c, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x0a, 0x66, 0x69, 0x65, 0x6c, 0x64,
+ 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x27, 0x0a, 0x05, 0x76, 0x67, 0x74, 0x69, 0x64, 0x18, 0x07,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74,
+ 0x61, 0x2e, 0x56, 0x47, 0x74, 0x69, 0x64, 0x52, 0x05, 0x76, 0x67, 0x74, 0x69, 0x64, 0x12, 0x2d,
+ 0x0a, 0x07, 0x6a, 0x6f, 0x75, 0x72, 0x6e, 0x61, 0x6c, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x13, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4a, 0x6f, 0x75,
+ 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x07, 0x6a, 0x6f, 0x75, 0x72, 0x6e, 0x61, 0x6c, 0x12, 0x10, 0x0a,
+ 0x03, 0x64, 0x6d, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x64, 0x6d, 0x6c, 0x12,
+ 0x21, 0x0a, 0x0c, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18,
+ 0x14, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x69,
+ 0x6d, 0x65, 0x12, 0x3c, 0x0a, 0x0e, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x70, 0x5f, 0x6b, 0x5f, 0x65,
+ 0x76, 0x65, 0x6e, 0x74, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x62, 0x69, 0x6e,
+ 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4c, 0x61, 0x73, 0x74, 0x50, 0x4b, 0x45, 0x76,
+ 0x65, 0x6e, 0x74, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74, 0x50, 0x4b, 0x45, 0x76, 0x65, 0x6e, 0x74,
+ 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x16, 0x20, 0x01,
0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05,
- 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61,
- 0x72, 0x64, 0x22, 0x83, 0x01, 0x0a, 0x0a, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x76, 0x65, 0x6e,
- 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65,
- 0x12, 0x24, 0x0a, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b,
- 0x32, 0x0c, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x52, 0x06,
- 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61,
- 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61,
- 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, 0x88, 0x01, 0x0a, 0x09, 0x53, 0x68, 0x61,
- 0x72, 0x64, 0x47, 0x74, 0x69, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61,
- 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61,
- 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x67, 0x74, 0x69, 0x64,
- 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x67, 0x74, 0x69, 0x64, 0x12, 0x35, 0x0a, 0x0a,
- 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x70, 0x5f, 0x6b, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b,
+ 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x17, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61,
+ 0x72, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x18,
+ 0x18, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64,
+ 0x22, 0x68, 0x0a, 0x0c, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x61, 0x6c, 0x54, 0x61, 0x62, 0x6c, 0x65,
+ 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04,
+ 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x24, 0x0a, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x02,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x46, 0x69, 0x65,
+ 0x6c, 0x64, 0x52, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x1e, 0x0a, 0x0b, 0x70, 0x5f,
+ 0x6b, 0x5f, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x03, 0x52,
+ 0x09, 0x70, 0x4b, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x22, 0x41, 0x0a, 0x0d, 0x4d, 0x69,
+ 0x6e, 0x69, 0x6d, 0x61, 0x6c, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x30, 0x0a, 0x06, 0x74,
+ 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x62, 0x69,
+ 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x61, 0x6c,
+ 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x22, 0xc7, 0x02,
+ 0x0a, 0x0e, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x12, 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61,
+ 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e,
+ 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11,
+ 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49,
+ 0x64, 0x12, 0x45, 0x0a, 0x13, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x5f, 0x63,
+ 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15,
+ 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x56, 0x54, 0x47, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c,
+ 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65,
+ 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67,
+ 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79,
+ 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12,
+ 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x06, 0x66,
+ 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x62, 0x69,
+ 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52,
+ 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x3e, 0x0a, 0x0f, 0x74, 0x61, 0x62, 0x6c, 0x65,
+ 0x5f, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x70, 0x5f, 0x6b, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b,
0x32, 0x17, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61,
- 0x62, 0x6c, 0x65, 0x4c, 0x61, 0x73, 0x74, 0x50, 0x4b, 0x52, 0x08, 0x74, 0x61, 0x62, 0x6c, 0x65,
- 0x50, 0x4b, 0x73, 0x22, 0x3f, 0x0a, 0x05, 0x56, 0x47, 0x74, 0x69, 0x64, 0x12, 0x36, 0x0a, 0x0b,
- 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x67, 0x74, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28,
- 0x0b, 0x32, 0x15, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53,
- 0x68, 0x61, 0x72, 0x64, 0x47, 0x74, 0x69, 0x64, 0x52, 0x0a, 0x73, 0x68, 0x61, 0x72, 0x64, 0x47,
- 0x74, 0x69, 0x64, 0x73, 0x22, 0x41, 0x0a, 0x0d, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65,
- 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63,
- 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63,
- 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, 0xbc, 0x02, 0x0a, 0x07, 0x4a, 0x6f, 0x75, 0x72,
- 0x6e, 0x61, 0x6c, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52,
- 0x02, 0x69, 0x64, 0x12, 0x40, 0x0a, 0x0e, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x62, 0x69,
- 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0d, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18,
- 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x25, 0x0a,
- 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18,
- 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x50, 0x6f, 0x73, 0x69,
- 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x36, 0x0a, 0x0b, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x67, 0x74,
- 0x69, 0x64, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x62, 0x69, 0x6e, 0x6c,
- 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x47, 0x74, 0x69, 0x64,
- 0x52, 0x0a, 0x73, 0x68, 0x61, 0x72, 0x64, 0x47, 0x74, 0x69, 0x64, 0x73, 0x12, 0x3d, 0x0a, 0x0c,
- 0x70, 0x61, 0x72, 0x74, 0x69, 0x63, 0x69, 0x70, 0x61, 0x6e, 0x74, 0x73, 0x18, 0x06, 0x20, 0x03,
- 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e,
- 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x0c, 0x70,
- 0x61, 0x72, 0x74, 0x69, 0x63, 0x69, 0x70, 0x61, 0x6e, 0x74, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x73,
- 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x18,
- 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x57, 0x6f, 0x72,
- 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x22, 0x8b, 0x04, 0x0a, 0x06, 0x56, 0x45, 0x76, 0x65, 0x6e,
- 0x74, 0x12, 0x2a, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32,
- 0x16, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x45, 0x76,
- 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1c, 0x0a,
- 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03,
- 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x12, 0x0a, 0x04, 0x67,
- 0x74, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x67, 0x74, 0x69, 0x64, 0x12,
- 0x1c, 0x0a, 0x09, 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x09, 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x31, 0x0a,
- 0x09, 0x72, 0x6f, 0x77, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x14, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x6f,
- 0x77, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x08, 0x72, 0x6f, 0x77, 0x45, 0x76, 0x65, 0x6e, 0x74,
- 0x12, 0x37, 0x0a, 0x0b, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18,
- 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61,
- 0x74, 0x61, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x0a, 0x66,
- 0x69, 0x65, 0x6c, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x27, 0x0a, 0x05, 0x76, 0x67, 0x74,
- 0x69, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f,
- 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x47, 0x74, 0x69, 0x64, 0x52, 0x05, 0x76, 0x67, 0x74,
- 0x69, 0x64, 0x12, 0x2d, 0x0a, 0x07, 0x6a, 0x6f, 0x75, 0x72, 0x6e, 0x61, 0x6c, 0x18, 0x08, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61,
- 0x2e, 0x4a, 0x6f, 0x75, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x07, 0x6a, 0x6f, 0x75, 0x72, 0x6e, 0x61,
- 0x6c, 0x12, 0x10, 0x0a, 0x03, 0x64, 0x6d, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03,
- 0x64, 0x6d, 0x6c, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74,
- 0x69, 0x6d, 0x65, 0x18, 0x14, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x63, 0x75, 0x72, 0x72, 0x65,
- 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3c, 0x0a, 0x0e, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x70,
- 0x5f, 0x6b, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17,
- 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4c, 0x61, 0x73, 0x74,
- 0x50, 0x4b, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74, 0x50, 0x4b, 0x45,
- 0x76, 0x65, 0x6e, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65,
- 0x18, 0x16, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65,
- 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x17, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74,
- 0x6c, 0x65, 0x64, 0x18, 0x18, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x74, 0x68, 0x72, 0x6f, 0x74,
- 0x74, 0x6c, 0x65, 0x64, 0x22, 0x68, 0x0a, 0x0c, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x61, 0x6c, 0x54,
- 0x61, 0x62, 0x6c, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x24, 0x0a, 0x06, 0x66, 0x69, 0x65, 0x6c,
- 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79,
- 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x52, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x1e,
- 0x0a, 0x0b, 0x70, 0x5f, 0x6b, 0x5f, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x18, 0x03, 0x20,
- 0x03, 0x28, 0x03, 0x52, 0x09, 0x70, 0x4b, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x22, 0x41,
- 0x0a, 0x0d, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x61, 0x6c, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12,
- 0x30, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32,
- 0x18, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4d, 0x69, 0x6e,
- 0x69, 0x6d, 0x61, 0x6c, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65,
- 0x73, 0x22, 0xc7, 0x02, 0x0a, 0x0e, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76,
- 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72,
- 0x49, 0x44, 0x52, 0x11, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x43, 0x61, 0x6c,
- 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x45, 0x0a, 0x13, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61,
- 0x74, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x56, 0x54, 0x47, 0x61, 0x74,
- 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x69, 0x6d, 0x6d, 0x65, 0x64,
- 0x69, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x06,
- 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71,
- 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72,
- 0x67, 0x65, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18,
- 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12,
- 0x2a, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x12, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, 0x69, 0x6c,
- 0x74, 0x65, 0x72, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x3e, 0x0a, 0x0f, 0x74,
- 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x70, 0x5f, 0x6b, 0x73, 0x18, 0x06,
- 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74,
- 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4c, 0x61, 0x73, 0x74, 0x50, 0x4b, 0x52, 0x0c, 0x74,
- 0x61, 0x62, 0x6c, 0x65, 0x4c, 0x61, 0x73, 0x74, 0x50, 0x4b, 0x73, 0x22, 0x3d, 0x0a, 0x0f, 0x56,
- 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a,
- 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12,
- 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x45, 0x76, 0x65,
- 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x85, 0x02, 0x0a, 0x12, 0x56,
- 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x12, 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63,
- 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f,
- 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52,
- 0x11, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72,
- 0x49, 0x64, 0x12, 0x45, 0x0a, 0x13, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x5f,
- 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x56, 0x54, 0x47, 0x61, 0x74, 0x65, 0x43, 0x61,
- 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74,
- 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72,
- 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72,
- 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74,
- 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x2a, 0x0a, 0x06, 0x6c, 0x61, 0x73, 0x74, 0x70, 0x6b,
- 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51,
- 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x6c, 0x61, 0x73, 0x74,
- 0x70, 0x6b, 0x22, 0xf9, 0x01, 0x0a, 0x13, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x6f,
- 0x77, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x24, 0x0a, 0x06, 0x66, 0x69,
- 0x65, 0x6c, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x71, 0x75, 0x65,
- 0x72, 0x79, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x52, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73,
- 0x12, 0x28, 0x0a, 0x08, 0x70, 0x6b, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03,
- 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64,
- 0x52, 0x08, 0x70, 0x6b, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x67, 0x74,
- 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x67, 0x74, 0x69, 0x64, 0x12, 0x1e,
- 0x0a, 0x04, 0x72, 0x6f, 0x77, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x71,
- 0x75, 0x65, 0x72, 0x79, 0x2e, 0x52, 0x6f, 0x77, 0x52, 0x04, 0x72, 0x6f, 0x77, 0x73, 0x12, 0x22,
- 0x0a, 0x06, 0x6c, 0x61, 0x73, 0x74, 0x70, 0x6b, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a,
- 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x52, 0x6f, 0x77, 0x52, 0x06, 0x6c, 0x61, 0x73, 0x74,
- 0x70, 0x6b, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x18,
- 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64,
- 0x12, 0x1c, 0x0a, 0x09, 0x68, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x18, 0x07, 0x20,
- 0x01, 0x28, 0x08, 0x52, 0x09, 0x68, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x22, 0x69,
- 0x0a, 0x0b, 0x4c, 0x61, 0x73, 0x74, 0x50, 0x4b, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x3c, 0x0a,
- 0x0e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x70, 0x5f, 0x6b, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61,
- 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4c, 0x61, 0x73, 0x74, 0x50, 0x4b, 0x52, 0x0b,
- 0x74, 0x61, 0x62, 0x6c, 0x65, 0x4c, 0x61, 0x73, 0x74, 0x50, 0x4b, 0x12, 0x1c, 0x0a, 0x09, 0x63,
- 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09,
- 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x22, 0x58, 0x0a, 0x0b, 0x54, 0x61, 0x62,
- 0x6c, 0x65, 0x4c, 0x61, 0x73, 0x74, 0x50, 0x4b, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c,
- 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x74, 0x61,
- 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x6c, 0x61, 0x73, 0x74, 0x70,
- 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e,
- 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x6c, 0x61, 0x73,
- 0x74, 0x70, 0x6b, 0x22, 0xdc, 0x01, 0x0a, 0x15, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52,
- 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a,
+ 0x62, 0x6c, 0x65, 0x4c, 0x61, 0x73, 0x74, 0x50, 0x4b, 0x52, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65,
+ 0x4c, 0x61, 0x73, 0x74, 0x50, 0x4b, 0x73, 0x22, 0x3d, 0x0a, 0x0f, 0x56, 0x53, 0x74, 0x72, 0x65,
+ 0x61, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x65, 0x76,
+ 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x62, 0x69, 0x6e,
+ 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06,
+ 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x85, 0x02, 0x0a, 0x12, 0x56, 0x53, 0x74, 0x72, 0x65,
+ 0x61, 0x6d, 0x52, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a,
0x13, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65,
0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72,
0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x65, 0x66, 0x66,
@@ -3009,45 +2991,91 @@ var file_binlogdata_proto_rawDesc = []byte{
0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61,
0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x14, 0x0a, 0x05,
0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x71, 0x75, 0x65,
- 0x72, 0x79, 0x22, 0x72, 0x0a, 0x16, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x73,
- 0x75, 0x6c, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x24, 0x0a, 0x06,
- 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x71,
- 0x75, 0x65, 0x72, 0x79, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x52, 0x06, 0x66, 0x69, 0x65, 0x6c,
- 0x64, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x67, 0x74, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x04, 0x67, 0x74, 0x69, 0x64, 0x12, 0x1e, 0x0a, 0x04, 0x72, 0x6f, 0x77, 0x73, 0x18, 0x04,
- 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x52, 0x6f, 0x77,
- 0x52, 0x04, 0x72, 0x6f, 0x77, 0x73, 0x2a, 0x3e, 0x0a, 0x0b, 0x4f, 0x6e, 0x44, 0x44, 0x4c, 0x41,
- 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0a, 0x0a, 0x06, 0x49, 0x47, 0x4e, 0x4f, 0x52, 0x45, 0x10,
- 0x00, 0x12, 0x08, 0x0a, 0x04, 0x53, 0x54, 0x4f, 0x50, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x45,
- 0x58, 0x45, 0x43, 0x10, 0x02, 0x12, 0x0f, 0x0a, 0x0b, 0x45, 0x58, 0x45, 0x43, 0x5f, 0x49, 0x47,
- 0x4e, 0x4f, 0x52, 0x45, 0x10, 0x03, 0x2a, 0x7b, 0x0a, 0x18, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69,
- 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x79,
- 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a,
- 0x65, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x4d, 0x6f, 0x76, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65,
- 0x73, 0x10, 0x01, 0x12, 0x15, 0x0a, 0x11, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4c, 0x6f, 0x6f,
- 0x6b, 0x75, 0x70, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x4d, 0x69,
- 0x67, 0x72, 0x61, 0x74, 0x65, 0x10, 0x03, 0x12, 0x0b, 0x0a, 0x07, 0x52, 0x65, 0x73, 0x68, 0x61,
- 0x72, 0x64, 0x10, 0x04, 0x12, 0x0d, 0x0a, 0x09, 0x4f, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x44, 0x44,
- 0x4c, 0x10, 0x05, 0x2a, 0x34, 0x0a, 0x1b, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x75, 0x62, 0x54, 0x79,
- 0x70, 0x65, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x6f, 0x6e, 0x65, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07,
- 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x10, 0x01, 0x2a, 0xf9, 0x01, 0x0a, 0x0a, 0x56, 0x45,
- 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e,
- 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x47, 0x54, 0x49, 0x44, 0x10, 0x01, 0x12,
- 0x09, 0x0a, 0x05, 0x42, 0x45, 0x47, 0x49, 0x4e, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x4f,
- 0x4d, 0x4d, 0x49, 0x54, 0x10, 0x03, 0x12, 0x0c, 0x0a, 0x08, 0x52, 0x4f, 0x4c, 0x4c, 0x42, 0x41,
- 0x43, 0x4b, 0x10, 0x04, 0x12, 0x07, 0x0a, 0x03, 0x44, 0x44, 0x4c, 0x10, 0x05, 0x12, 0x0a, 0x0a,
- 0x06, 0x49, 0x4e, 0x53, 0x45, 0x52, 0x54, 0x10, 0x06, 0x12, 0x0b, 0x0a, 0x07, 0x52, 0x45, 0x50,
- 0x4c, 0x41, 0x43, 0x45, 0x10, 0x07, 0x12, 0x0a, 0x0a, 0x06, 0x55, 0x50, 0x44, 0x41, 0x54, 0x45,
- 0x10, 0x08, 0x12, 0x0a, 0x0a, 0x06, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x10, 0x09, 0x12, 0x07,
- 0x0a, 0x03, 0x53, 0x45, 0x54, 0x10, 0x0a, 0x12, 0x09, 0x0a, 0x05, 0x4f, 0x54, 0x48, 0x45, 0x52,
- 0x10, 0x0b, 0x12, 0x07, 0x0a, 0x03, 0x52, 0x4f, 0x57, 0x10, 0x0c, 0x12, 0x09, 0x0a, 0x05, 0x46,
- 0x49, 0x45, 0x4c, 0x44, 0x10, 0x0d, 0x12, 0x0d, 0x0a, 0x09, 0x48, 0x45, 0x41, 0x52, 0x54, 0x42,
- 0x45, 0x41, 0x54, 0x10, 0x0e, 0x12, 0x09, 0x0a, 0x05, 0x56, 0x47, 0x54, 0x49, 0x44, 0x10, 0x0f,
- 0x12, 0x0b, 0x0a, 0x07, 0x4a, 0x4f, 0x55, 0x52, 0x4e, 0x41, 0x4c, 0x10, 0x10, 0x12, 0x0b, 0x0a,
- 0x07, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x10, 0x11, 0x12, 0x0a, 0x0a, 0x06, 0x4c, 0x41,
- 0x53, 0x54, 0x50, 0x4b, 0x10, 0x12, 0x12, 0x0d, 0x0a, 0x09, 0x53, 0x41, 0x56, 0x45, 0x50, 0x4f,
- 0x49, 0x4e, 0x54, 0x10, 0x13, 0x2a, 0x27, 0x0a, 0x0d, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69,
+ 0x72, 0x79, 0x12, 0x2a, 0x0a, 0x06, 0x6c, 0x61, 0x73, 0x74, 0x70, 0x6b, 0x18, 0x05, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79,
+ 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x6c, 0x61, 0x73, 0x74, 0x70, 0x6b, 0x22, 0xf9,
+ 0x01, 0x0a, 0x13, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x6f, 0x77, 0x73, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x24, 0x0a, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73,
+ 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x46,
+ 0x69, 0x65, 0x6c, 0x64, 0x52, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x28, 0x0a, 0x08,
+ 0x70, 0x6b, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c,
+ 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x52, 0x08, 0x70, 0x6b,
+ 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x67, 0x74, 0x69, 0x64, 0x18, 0x03,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x67, 0x74, 0x69, 0x64, 0x12, 0x1e, 0x0a, 0x04, 0x72, 0x6f,
+ 0x77, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79,
+ 0x2e, 0x52, 0x6f, 0x77, 0x52, 0x04, 0x72, 0x6f, 0x77, 0x73, 0x12, 0x22, 0x0a, 0x06, 0x6c, 0x61,
+ 0x73, 0x74, 0x70, 0x6b, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x71, 0x75, 0x65,
+ 0x72, 0x79, 0x2e, 0x52, 0x6f, 0x77, 0x52, 0x06, 0x6c, 0x61, 0x73, 0x74, 0x70, 0x6b, 0x12, 0x1c,
+ 0x0a, 0x09, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28,
+ 0x08, 0x52, 0x09, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x12, 0x1c, 0x0a, 0x09,
+ 0x68, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52,
+ 0x09, 0x68, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x22, 0x69, 0x0a, 0x0b, 0x4c, 0x61,
+ 0x73, 0x74, 0x50, 0x4b, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x3c, 0x0a, 0x0e, 0x74, 0x61, 0x62,
+ 0x6c, 0x65, 0x5f, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x70, 0x5f, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x17, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54,
+ 0x61, 0x62, 0x6c, 0x65, 0x4c, 0x61, 0x73, 0x74, 0x50, 0x4b, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c,
+ 0x65, 0x4c, 0x61, 0x73, 0x74, 0x50, 0x4b, 0x12, 0x1c, 0x0a, 0x09, 0x63, 0x6f, 0x6d, 0x70, 0x6c,
+ 0x65, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x63, 0x6f, 0x6d, 0x70,
+ 0x6c, 0x65, 0x74, 0x65, 0x64, 0x22, 0x58, 0x0a, 0x0b, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4c, 0x61,
+ 0x73, 0x74, 0x50, 0x4b, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61,
+ 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x4e,
+ 0x61, 0x6d, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x6c, 0x61, 0x73, 0x74, 0x70, 0x6b, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72,
+ 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x6c, 0x61, 0x73, 0x74, 0x70, 0x6b, 0x22,
+ 0xdc, 0x01, 0x0a, 0x15, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x73, 0x75, 0x6c,
+ 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66,
+ 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43,
+ 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69,
+ 0x76, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x45, 0x0a, 0x13, 0x69, 0x6d,
+ 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69,
+ 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e,
+ 0x56, 0x54, 0x47, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11,
+ 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49,
+ 0x64, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74,
+ 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72,
+ 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x22, 0x72,
+ 0x0a, 0x16, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x24, 0x0a, 0x06, 0x66, 0x69, 0x65, 0x6c,
+ 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79,
+ 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x52, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x12,
+ 0x0a, 0x04, 0x67, 0x74, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x67, 0x74,
+ 0x69, 0x64, 0x12, 0x1e, 0x0a, 0x04, 0x72, 0x6f, 0x77, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b,
+ 0x32, 0x0a, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x52, 0x6f, 0x77, 0x52, 0x04, 0x72, 0x6f,
+ 0x77, 0x73, 0x2a, 0x3e, 0x0a, 0x0b, 0x4f, 0x6e, 0x44, 0x44, 0x4c, 0x41, 0x63, 0x74, 0x69, 0x6f,
+ 0x6e, 0x12, 0x0a, 0x0a, 0x06, 0x49, 0x47, 0x4e, 0x4f, 0x52, 0x45, 0x10, 0x00, 0x12, 0x08, 0x0a,
+ 0x04, 0x53, 0x54, 0x4f, 0x50, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x45, 0x58, 0x45, 0x43, 0x10,
+ 0x02, 0x12, 0x0f, 0x0a, 0x0b, 0x45, 0x58, 0x45, 0x43, 0x5f, 0x49, 0x47, 0x4e, 0x4f, 0x52, 0x45,
+ 0x10, 0x03, 0x2a, 0x7b, 0x0a, 0x18, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f,
+ 0x0a, 0x0b, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x10, 0x00, 0x12,
+ 0x0e, 0x0a, 0x0a, 0x4d, 0x6f, 0x76, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x10, 0x01, 0x12,
+ 0x15, 0x0a, 0x11, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x49,
+ 0x6e, 0x64, 0x65, 0x78, 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74,
+ 0x65, 0x10, 0x03, 0x12, 0x0b, 0x0a, 0x07, 0x52, 0x65, 0x73, 0x68, 0x61, 0x72, 0x64, 0x10, 0x04,
+ 0x12, 0x0d, 0x0a, 0x09, 0x4f, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x44, 0x44, 0x4c, 0x10, 0x05, 0x2a,
+ 0x34, 0x0a, 0x1b, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57,
+ 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x75, 0x62, 0x54, 0x79, 0x70, 0x65, 0x12, 0x08,
+ 0x0a, 0x04, 0x4e, 0x6f, 0x6e, 0x65, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x50, 0x61, 0x72, 0x74,
+ 0x69, 0x61, 0x6c, 0x10, 0x01, 0x2a, 0x8d, 0x02, 0x0a, 0x0a, 0x56, 0x45, 0x76, 0x65, 0x6e, 0x74,
+ 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10,
+ 0x00, 0x12, 0x08, 0x0a, 0x04, 0x47, 0x54, 0x49, 0x44, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x42,
+ 0x45, 0x47, 0x49, 0x4e, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x4f, 0x4d, 0x4d, 0x49, 0x54,
+ 0x10, 0x03, 0x12, 0x0c, 0x0a, 0x08, 0x52, 0x4f, 0x4c, 0x4c, 0x42, 0x41, 0x43, 0x4b, 0x10, 0x04,
+ 0x12, 0x07, 0x0a, 0x03, 0x44, 0x44, 0x4c, 0x10, 0x05, 0x12, 0x0a, 0x0a, 0x06, 0x49, 0x4e, 0x53,
+ 0x45, 0x52, 0x54, 0x10, 0x06, 0x12, 0x0b, 0x0a, 0x07, 0x52, 0x45, 0x50, 0x4c, 0x41, 0x43, 0x45,
+ 0x10, 0x07, 0x12, 0x0a, 0x0a, 0x06, 0x55, 0x50, 0x44, 0x41, 0x54, 0x45, 0x10, 0x08, 0x12, 0x0a,
+ 0x0a, 0x06, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x10, 0x09, 0x12, 0x07, 0x0a, 0x03, 0x53, 0x45,
+ 0x54, 0x10, 0x0a, 0x12, 0x09, 0x0a, 0x05, 0x4f, 0x54, 0x48, 0x45, 0x52, 0x10, 0x0b, 0x12, 0x07,
+ 0x0a, 0x03, 0x52, 0x4f, 0x57, 0x10, 0x0c, 0x12, 0x09, 0x0a, 0x05, 0x46, 0x49, 0x45, 0x4c, 0x44,
+ 0x10, 0x0d, 0x12, 0x0d, 0x0a, 0x09, 0x48, 0x45, 0x41, 0x52, 0x54, 0x42, 0x45, 0x41, 0x54, 0x10,
+ 0x0e, 0x12, 0x09, 0x0a, 0x05, 0x56, 0x47, 0x54, 0x49, 0x44, 0x10, 0x0f, 0x12, 0x0b, 0x0a, 0x07,
+ 0x4a, 0x4f, 0x55, 0x52, 0x4e, 0x41, 0x4c, 0x10, 0x10, 0x12, 0x0b, 0x0a, 0x07, 0x56, 0x45, 0x52,
+ 0x53, 0x49, 0x4f, 0x4e, 0x10, 0x11, 0x12, 0x0a, 0x0a, 0x06, 0x4c, 0x41, 0x53, 0x54, 0x50, 0x4b,
+ 0x10, 0x12, 0x12, 0x0d, 0x0a, 0x09, 0x53, 0x41, 0x56, 0x45, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x10,
+ 0x13, 0x12, 0x12, 0x0a, 0x0e, 0x43, 0x4f, 0x50, 0x59, 0x5f, 0x43, 0x4f, 0x4d, 0x50, 0x4c, 0x45,
+ 0x54, 0x45, 0x44, 0x10, 0x14, 0x2a, 0x27, 0x0a, 0x0d, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69,
0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x54, 0x41, 0x42, 0x4c, 0x45, 0x53,
0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x48, 0x41, 0x52, 0x44, 0x53, 0x10, 0x01, 0x42, 0x29,
0x5a, 0x27, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x69, 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65,
@@ -3069,7 +3097,7 @@ func file_binlogdata_proto_rawDescGZIP() []byte {
}
var file_binlogdata_proto_enumTypes = make([]protoimpl.EnumInfo, 7)
-var file_binlogdata_proto_msgTypes = make([]protoimpl.MessageInfo, 31)
+var file_binlogdata_proto_msgTypes = make([]protoimpl.MessageInfo, 32)
var file_binlogdata_proto_goTypes = []interface{}{
(OnDDLAction)(0), // 0: binlogdata.OnDDLAction
(VReplicationWorkflowType)(0), // 1: binlogdata.VReplicationWorkflowType
@@ -3109,78 +3137,80 @@ var file_binlogdata_proto_goTypes = []interface{}{
(*BinlogTransaction_Statement)(nil), // 35: binlogdata.BinlogTransaction.Statement
nil, // 36: binlogdata.Rule.ConvertEnumToTextEntry
nil, // 37: binlogdata.Rule.ConvertCharsetEntry
- (*query.EventToken)(nil), // 38: query.EventToken
- (*topodata.KeyRange)(nil), // 39: topodata.KeyRange
- (topodata.TabletType)(0), // 40: topodata.TabletType
- (*query.Row)(nil), // 41: query.Row
- (*query.Field)(nil), // 42: query.Field
- (*vtrpc.CallerID)(nil), // 43: vtrpc.CallerID
- (*query.VTGateCallerID)(nil), // 44: query.VTGateCallerID
- (*query.Target)(nil), // 45: query.Target
- (*query.QueryResult)(nil), // 46: query.QueryResult
+ nil, // 38: binlogdata.Rule.ConvertIntToEnumEntry
+ (*query.EventToken)(nil), // 39: query.EventToken
+ (*topodata.KeyRange)(nil), // 40: topodata.KeyRange
+ (topodata.TabletType)(0), // 41: topodata.TabletType
+ (*query.Row)(nil), // 42: query.Row
+ (*query.Field)(nil), // 43: query.Field
+ (*vtrpc.CallerID)(nil), // 44: vtrpc.CallerID
+ (*query.VTGateCallerID)(nil), // 45: query.VTGateCallerID
+ (*query.Target)(nil), // 46: query.Target
+ (*query.QueryResult)(nil), // 47: query.QueryResult
}
var file_binlogdata_proto_depIdxs = []int32{
35, // 0: binlogdata.BinlogTransaction.statements:type_name -> binlogdata.BinlogTransaction.Statement
- 38, // 1: binlogdata.BinlogTransaction.event_token:type_name -> query.EventToken
- 39, // 2: binlogdata.StreamKeyRangeRequest.key_range:type_name -> topodata.KeyRange
+ 39, // 1: binlogdata.BinlogTransaction.event_token:type_name -> query.EventToken
+ 40, // 2: binlogdata.StreamKeyRangeRequest.key_range:type_name -> topodata.KeyRange
7, // 3: binlogdata.StreamKeyRangeRequest.charset:type_name -> binlogdata.Charset
8, // 4: binlogdata.StreamKeyRangeResponse.binlog_transaction:type_name -> binlogdata.BinlogTransaction
7, // 5: binlogdata.StreamTablesRequest.charset:type_name -> binlogdata.Charset
8, // 6: binlogdata.StreamTablesResponse.binlog_transaction:type_name -> binlogdata.BinlogTransaction
36, // 7: binlogdata.Rule.convert_enum_to_text:type_name -> binlogdata.Rule.ConvertEnumToTextEntry
37, // 8: binlogdata.Rule.convert_charset:type_name -> binlogdata.Rule.ConvertCharsetEntry
- 14, // 9: binlogdata.Filter.rules:type_name -> binlogdata.Rule
- 6, // 10: binlogdata.Filter.field_event_mode:type_name -> binlogdata.Filter.FieldEventMode
- 40, // 11: binlogdata.BinlogSource.tablet_type:type_name -> topodata.TabletType
- 39, // 12: binlogdata.BinlogSource.key_range:type_name -> topodata.KeyRange
- 15, // 13: binlogdata.BinlogSource.filter:type_name -> binlogdata.Filter
- 0, // 14: binlogdata.BinlogSource.on_ddl:type_name -> binlogdata.OnDDLAction
- 41, // 15: binlogdata.RowChange.before:type_name -> query.Row
- 41, // 16: binlogdata.RowChange.after:type_name -> query.Row
- 17, // 17: binlogdata.RowEvent.row_changes:type_name -> binlogdata.RowChange
- 42, // 18: binlogdata.FieldEvent.fields:type_name -> query.Field
- 32, // 19: binlogdata.ShardGtid.table_p_ks:type_name -> binlogdata.TableLastPK
- 20, // 20: binlogdata.VGtid.shard_gtids:type_name -> binlogdata.ShardGtid
- 4, // 21: binlogdata.Journal.migration_type:type_name -> binlogdata.MigrationType
- 20, // 22: binlogdata.Journal.shard_gtids:type_name -> binlogdata.ShardGtid
- 22, // 23: binlogdata.Journal.participants:type_name -> binlogdata.KeyspaceShard
- 3, // 24: binlogdata.VEvent.type:type_name -> binlogdata.VEventType
- 18, // 25: binlogdata.VEvent.row_event:type_name -> binlogdata.RowEvent
- 19, // 26: binlogdata.VEvent.field_event:type_name -> binlogdata.FieldEvent
- 21, // 27: binlogdata.VEvent.vgtid:type_name -> binlogdata.VGtid
- 23, // 28: binlogdata.VEvent.journal:type_name -> binlogdata.Journal
- 31, // 29: binlogdata.VEvent.last_p_k_event:type_name -> binlogdata.LastPKEvent
- 42, // 30: binlogdata.MinimalTable.fields:type_name -> query.Field
- 25, // 31: binlogdata.MinimalSchema.tables:type_name -> binlogdata.MinimalTable
- 43, // 32: binlogdata.VStreamRequest.effective_caller_id:type_name -> vtrpc.CallerID
- 44, // 33: binlogdata.VStreamRequest.immediate_caller_id:type_name -> query.VTGateCallerID
- 45, // 34: binlogdata.VStreamRequest.target:type_name -> query.Target
- 15, // 35: binlogdata.VStreamRequest.filter:type_name -> binlogdata.Filter
- 32, // 36: binlogdata.VStreamRequest.table_last_p_ks:type_name -> binlogdata.TableLastPK
- 24, // 37: binlogdata.VStreamResponse.events:type_name -> binlogdata.VEvent
- 43, // 38: binlogdata.VStreamRowsRequest.effective_caller_id:type_name -> vtrpc.CallerID
- 44, // 39: binlogdata.VStreamRowsRequest.immediate_caller_id:type_name -> query.VTGateCallerID
- 45, // 40: binlogdata.VStreamRowsRequest.target:type_name -> query.Target
- 46, // 41: binlogdata.VStreamRowsRequest.lastpk:type_name -> query.QueryResult
- 42, // 42: binlogdata.VStreamRowsResponse.fields:type_name -> query.Field
- 42, // 43: binlogdata.VStreamRowsResponse.pkfields:type_name -> query.Field
- 41, // 44: binlogdata.VStreamRowsResponse.rows:type_name -> query.Row
- 41, // 45: binlogdata.VStreamRowsResponse.lastpk:type_name -> query.Row
- 32, // 46: binlogdata.LastPKEvent.table_last_p_k:type_name -> binlogdata.TableLastPK
- 46, // 47: binlogdata.TableLastPK.lastpk:type_name -> query.QueryResult
- 43, // 48: binlogdata.VStreamResultsRequest.effective_caller_id:type_name -> vtrpc.CallerID
- 44, // 49: binlogdata.VStreamResultsRequest.immediate_caller_id:type_name -> query.VTGateCallerID
- 45, // 50: binlogdata.VStreamResultsRequest.target:type_name -> query.Target
- 42, // 51: binlogdata.VStreamResultsResponse.fields:type_name -> query.Field
- 41, // 52: binlogdata.VStreamResultsResponse.rows:type_name -> query.Row
- 5, // 53: binlogdata.BinlogTransaction.Statement.category:type_name -> binlogdata.BinlogTransaction.Statement.Category
- 7, // 54: binlogdata.BinlogTransaction.Statement.charset:type_name -> binlogdata.Charset
- 13, // 55: binlogdata.Rule.ConvertCharsetEntry.value:type_name -> binlogdata.CharsetConversion
- 56, // [56:56] is the sub-list for method output_type
- 56, // [56:56] is the sub-list for method input_type
- 56, // [56:56] is the sub-list for extension type_name
- 56, // [56:56] is the sub-list for extension extendee
- 0, // [0:56] is the sub-list for field type_name
+ 38, // 9: binlogdata.Rule.convert_int_to_enum:type_name -> binlogdata.Rule.ConvertIntToEnumEntry
+ 14, // 10: binlogdata.Filter.rules:type_name -> binlogdata.Rule
+ 6, // 11: binlogdata.Filter.field_event_mode:type_name -> binlogdata.Filter.FieldEventMode
+ 41, // 12: binlogdata.BinlogSource.tablet_type:type_name -> topodata.TabletType
+ 40, // 13: binlogdata.BinlogSource.key_range:type_name -> topodata.KeyRange
+ 15, // 14: binlogdata.BinlogSource.filter:type_name -> binlogdata.Filter
+ 0, // 15: binlogdata.BinlogSource.on_ddl:type_name -> binlogdata.OnDDLAction
+ 42, // 16: binlogdata.RowChange.before:type_name -> query.Row
+ 42, // 17: binlogdata.RowChange.after:type_name -> query.Row
+ 17, // 18: binlogdata.RowEvent.row_changes:type_name -> binlogdata.RowChange
+ 43, // 19: binlogdata.FieldEvent.fields:type_name -> query.Field
+ 32, // 20: binlogdata.ShardGtid.table_p_ks:type_name -> binlogdata.TableLastPK
+ 20, // 21: binlogdata.VGtid.shard_gtids:type_name -> binlogdata.ShardGtid
+ 4, // 22: binlogdata.Journal.migration_type:type_name -> binlogdata.MigrationType
+ 20, // 23: binlogdata.Journal.shard_gtids:type_name -> binlogdata.ShardGtid
+ 22, // 24: binlogdata.Journal.participants:type_name -> binlogdata.KeyspaceShard
+ 3, // 25: binlogdata.VEvent.type:type_name -> binlogdata.VEventType
+ 18, // 26: binlogdata.VEvent.row_event:type_name -> binlogdata.RowEvent
+ 19, // 27: binlogdata.VEvent.field_event:type_name -> binlogdata.FieldEvent
+ 21, // 28: binlogdata.VEvent.vgtid:type_name -> binlogdata.VGtid
+ 23, // 29: binlogdata.VEvent.journal:type_name -> binlogdata.Journal
+ 31, // 30: binlogdata.VEvent.last_p_k_event:type_name -> binlogdata.LastPKEvent
+ 43, // 31: binlogdata.MinimalTable.fields:type_name -> query.Field
+ 25, // 32: binlogdata.MinimalSchema.tables:type_name -> binlogdata.MinimalTable
+ 44, // 33: binlogdata.VStreamRequest.effective_caller_id:type_name -> vtrpc.CallerID
+ 45, // 34: binlogdata.VStreamRequest.immediate_caller_id:type_name -> query.VTGateCallerID
+ 46, // 35: binlogdata.VStreamRequest.target:type_name -> query.Target
+ 15, // 36: binlogdata.VStreamRequest.filter:type_name -> binlogdata.Filter
+ 32, // 37: binlogdata.VStreamRequest.table_last_p_ks:type_name -> binlogdata.TableLastPK
+ 24, // 38: binlogdata.VStreamResponse.events:type_name -> binlogdata.VEvent
+ 44, // 39: binlogdata.VStreamRowsRequest.effective_caller_id:type_name -> vtrpc.CallerID
+ 45, // 40: binlogdata.VStreamRowsRequest.immediate_caller_id:type_name -> query.VTGateCallerID
+ 46, // 41: binlogdata.VStreamRowsRequest.target:type_name -> query.Target
+ 47, // 42: binlogdata.VStreamRowsRequest.lastpk:type_name -> query.QueryResult
+ 43, // 43: binlogdata.VStreamRowsResponse.fields:type_name -> query.Field
+ 43, // 44: binlogdata.VStreamRowsResponse.pkfields:type_name -> query.Field
+ 42, // 45: binlogdata.VStreamRowsResponse.rows:type_name -> query.Row
+ 42, // 46: binlogdata.VStreamRowsResponse.lastpk:type_name -> query.Row
+ 32, // 47: binlogdata.LastPKEvent.table_last_p_k:type_name -> binlogdata.TableLastPK
+ 47, // 48: binlogdata.TableLastPK.lastpk:type_name -> query.QueryResult
+ 44, // 49: binlogdata.VStreamResultsRequest.effective_caller_id:type_name -> vtrpc.CallerID
+ 45, // 50: binlogdata.VStreamResultsRequest.immediate_caller_id:type_name -> query.VTGateCallerID
+ 46, // 51: binlogdata.VStreamResultsRequest.target:type_name -> query.Target
+ 43, // 52: binlogdata.VStreamResultsResponse.fields:type_name -> query.Field
+ 42, // 53: binlogdata.VStreamResultsResponse.rows:type_name -> query.Row
+ 5, // 54: binlogdata.BinlogTransaction.Statement.category:type_name -> binlogdata.BinlogTransaction.Statement.Category
+ 7, // 55: binlogdata.BinlogTransaction.Statement.charset:type_name -> binlogdata.Charset
+ 13, // 56: binlogdata.Rule.ConvertCharsetEntry.value:type_name -> binlogdata.CharsetConversion
+ 57, // [57:57] is the sub-list for method output_type
+ 57, // [57:57] is the sub-list for method input_type
+ 57, // [57:57] is the sub-list for extension type_name
+ 57, // [57:57] is the sub-list for extension extendee
+ 0, // [0:57] is the sub-list for field type_name
}
func init() { file_binlogdata_proto_init() }
@@ -3544,7 +3574,7 @@ func file_binlogdata_proto_init() {
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_binlogdata_proto_rawDesc,
NumEnums: 7,
- NumMessages: 31,
+ NumMessages: 32,
NumExtensions: 0,
NumServices: 0,
},
diff --git a/go/vt/proto/binlogdata/binlogdata_vtproto.pb.go b/go/vt/proto/binlogdata/binlogdata_vtproto.pb.go
index cda4788cb6f..ab1c3bc2495 100644
--- a/go/vt/proto/binlogdata/binlogdata_vtproto.pb.go
+++ b/go/vt/proto/binlogdata/binlogdata_vtproto.pb.go
@@ -1,5 +1,5 @@
// Code generated by protoc-gen-go-vtproto. DO NOT EDIT.
-// protoc-gen-go-vtproto version: v0.3.0
+// protoc-gen-go-vtproto version: v0.4.0
// source: binlogdata.proto
package binlogdata
@@ -462,6 +462,28 @@ func (m *Rule) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
i -= len(m.unknownFields)
copy(dAtA[i:], m.unknownFields)
}
+ if len(m.ConvertIntToEnum) > 0 {
+ for k := range m.ConvertIntToEnum {
+ v := m.ConvertIntToEnum[k]
+ baseI := i
+ i--
+ if v {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x10
+ i -= len(k)
+ copy(dAtA[i:], k)
+ i = encodeVarint(dAtA, i, uint64(len(k)))
+ i--
+ dAtA[i] = 0xa
+ i = encodeVarint(dAtA, i, uint64(baseI-i))
+ i--
+ dAtA[i] = 0x42
+ }
+ }
if len(m.SourceUniqueKeyTargetColumns) > 0 {
i -= len(m.SourceUniqueKeyTargetColumns)
copy(dAtA[i:], m.SourceUniqueKeyTargetColumns)
@@ -2042,9 +2064,7 @@ func (m *Charset) SizeVT() (n int) {
if m.Server != 0 {
n += 1 + sov(uint64(m.Server))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -2065,9 +2085,7 @@ func (m *BinlogTransaction_Statement) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -2087,9 +2105,7 @@ func (m *BinlogTransaction) SizeVT() (n int) {
l = m.EventToken.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -2111,9 +2127,7 @@ func (m *StreamKeyRangeRequest) SizeVT() (n int) {
l = m.Charset.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -2127,9 +2141,7 @@ func (m *StreamKeyRangeResponse) SizeVT() (n int) {
l = m.BinlogTransaction.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -2153,9 +2165,7 @@ func (m *StreamTablesRequest) SizeVT() (n int) {
l = m.Charset.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -2169,9 +2179,7 @@ func (m *StreamTablesResponse) SizeVT() (n int) {
l = m.BinlogTransaction.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -2189,9 +2197,7 @@ func (m *CharsetConversion) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -2242,9 +2248,15 @@ func (m *Rule) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
+ if len(m.ConvertIntToEnum) > 0 {
+ for k, v := range m.ConvertIntToEnum {
+ _ = k
+ _ = v
+ mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + 1 + 1
+ n += mapEntrySize + 1 + sov(uint64(mapEntrySize))
+ }
}
+ n += len(m.unknownFields)
return n
}
@@ -2270,9 +2282,7 @@ func (m *Filter) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -2329,9 +2339,7 @@ func (m *BinlogSource) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -2349,9 +2357,7 @@ func (m *RowChange) SizeVT() (n int) {
l = m.After.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -2379,9 +2385,7 @@ func (m *RowEvent) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -2409,9 +2413,7 @@ func (m *FieldEvent) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -2439,9 +2441,7 @@ func (m *ShardGtid) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -2457,9 +2457,7 @@ func (m *VGtid) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -2477,9 +2475,7 @@ func (m *KeyspaceShard) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -2523,9 +2519,7 @@ func (m *Journal) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -2587,9 +2581,7 @@ func (m *VEvent) SizeVT() (n int) {
if m.Throttled {
n += 3
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -2616,9 +2608,7 @@ func (m *MinimalTable) SizeVT() (n int) {
}
n += 1 + sov(uint64(l)) + l
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -2634,9 +2624,7 @@ func (m *MinimalSchema) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -2672,9 +2660,7 @@ func (m *VStreamRequest) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -2690,9 +2676,7 @@ func (m *VStreamResponse) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -2722,9 +2706,7 @@ func (m *VStreamRowsRequest) SizeVT() (n int) {
l = m.Lastpk.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -2766,9 +2748,7 @@ func (m *VStreamRowsResponse) SizeVT() (n int) {
if m.Heartbeat {
n += 2
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -2785,9 +2765,7 @@ func (m *LastPKEvent) SizeVT() (n int) {
if m.Completed {
n += 2
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -2805,9 +2783,7 @@ func (m *TableLastPK) SizeVT() (n int) {
l = m.Lastpk.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -2833,9 +2809,7 @@ func (m *VStreamResultsRequest) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -2861,9 +2835,7 @@ func (m *VStreamResultsResponse) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -4282,6 +4254,121 @@ func (m *Rule) UnmarshalVT(dAtA []byte) error {
}
m.SourceUniqueKeyTargetColumns = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ConvertIntToEnum", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLength
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLength
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ConvertIntToEnum == nil {
+ m.ConvertIntToEnum = make(map[string]bool)
+ }
+ var mapkey string
+ var mapvalue bool
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLength
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey < 0 {
+ return ErrInvalidLength
+ }
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var mapvaluetemp int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ mapvaluetemp |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ mapvalue = bool(mapvaluetemp != 0)
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skip(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLength
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+ m.ConvertIntToEnum[mapkey] = mapvalue
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skip(dAtA[iNdEx:])
@@ -8152,6 +8239,7 @@ func (m *VStreamResultsResponse) UnmarshalVT(dAtA []byte) error {
}
return nil
}
+
func skip(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
diff --git a/go/vt/proto/binlogservice/binlogservice.pb.go b/go/vt/proto/binlogservice/binlogservice.pb.go
index 2c3b33de1cd..44b47ac33f7 100644
--- a/go/vt/proto/binlogservice/binlogservice.pb.go
+++ b/go/vt/proto/binlogservice/binlogservice.pb.go
@@ -19,7 +19,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.28.0
+// protoc-gen-go v1.28.1
// protoc v3.21.3
// source: binlogservice.proto
diff --git a/go/vt/proto/logutil/logutil.pb.go b/go/vt/proto/logutil/logutil.pb.go
index 72409893b4c..4e41806d4a8 100644
--- a/go/vt/proto/logutil/logutil.pb.go
+++ b/go/vt/proto/logutil/logutil.pb.go
@@ -17,7 +17,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.28.0
+// protoc-gen-go v1.28.1
// protoc v3.21.3
// source: logutil.proto
diff --git a/go/vt/proto/logutil/logutil_vtproto.pb.go b/go/vt/proto/logutil/logutil_vtproto.pb.go
index 9cf8af3253c..234c26eea93 100644
--- a/go/vt/proto/logutil/logutil_vtproto.pb.go
+++ b/go/vt/proto/logutil/logutil_vtproto.pb.go
@@ -1,5 +1,5 @@
// Code generated by protoc-gen-go-vtproto. DO NOT EDIT.
-// protoc-gen-go-vtproto version: v0.3.0
+// protoc-gen-go-vtproto version: v0.4.0
// source: logutil.proto
package logutil
@@ -121,9 +121,7 @@ func (m *Event) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -322,6 +320,7 @@ func (m *Event) UnmarshalVT(dAtA []byte) error {
}
return nil
}
+
func skip(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
diff --git a/go/vt/proto/mysqlctl/mysqlctl.pb.go b/go/vt/proto/mysqlctl/mysqlctl.pb.go
index a4e67f96fe3..b6a407ea601 100644
--- a/go/vt/proto/mysqlctl/mysqlctl.pb.go
+++ b/go/vt/proto/mysqlctl/mysqlctl.pb.go
@@ -18,7 +18,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.28.0
+// protoc-gen-go v1.28.1
// protoc v3.21.3
// source: mysqlctl.proto
diff --git a/go/vt/proto/mysqlctl/mysqlctl_vtproto.pb.go b/go/vt/proto/mysqlctl/mysqlctl_vtproto.pb.go
index 2300c293fe9..00e37e145a6 100644
--- a/go/vt/proto/mysqlctl/mysqlctl_vtproto.pb.go
+++ b/go/vt/proto/mysqlctl/mysqlctl_vtproto.pb.go
@@ -1,5 +1,5 @@
// Code generated by protoc-gen-go-vtproto. DO NOT EDIT.
-// protoc-gen-go-vtproto version: v0.3.0
+// protoc-gen-go-vtproto version: v0.4.0
// source: mysqlctl.proto
package mysqlctl
@@ -485,9 +485,7 @@ func (m *StartRequest) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -497,9 +495,7 @@ func (m *StartResponse) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -512,9 +508,7 @@ func (m *ShutdownRequest) SizeVT() (n int) {
if m.WaitForMysqld {
n += 2
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -524,9 +518,7 @@ func (m *ShutdownResponse) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -536,9 +528,7 @@ func (m *RunMysqlUpgradeRequest) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -548,9 +538,7 @@ func (m *RunMysqlUpgradeResponse) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -560,9 +548,7 @@ func (m *ReinitConfigRequest) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -572,9 +558,7 @@ func (m *ReinitConfigResponse) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -584,9 +568,7 @@ func (m *RefreshConfigRequest) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -596,9 +578,7 @@ func (m *RefreshConfigResponse) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -639,9 +619,7 @@ func (m *BackupInfo) SizeVT() (n int) {
if m.Status != 0 {
n += 1 + sov(uint64(m.Status))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -1515,6 +1493,7 @@ func (m *BackupInfo) UnmarshalVT(dAtA []byte) error {
}
return nil
}
+
func skip(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
diff --git a/go/vt/proto/query/query.pb.go b/go/vt/proto/query/query.pb.go
index 67328389a72..29302b4e662 100644
--- a/go/vt/proto/query/query.pb.go
+++ b/go/vt/proto/query/query.pb.go
@@ -18,7 +18,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.28.0
+// protoc-gen-go v1.28.1
// protoc v3.21.3
// source: query.proto
@@ -475,6 +475,56 @@ func (TransactionState) EnumDescriptor() ([]byte, []int) {
return file_query_proto_rawDescGZIP(), []int{3}
}
+// SchemaTableType represents the type of table requested.
+type SchemaTableType int32
+
+const (
+ SchemaTableType_VIEWS SchemaTableType = 0
+ SchemaTableType_TABLES SchemaTableType = 1
+ SchemaTableType_ALL SchemaTableType = 2
+)
+
+// Enum value maps for SchemaTableType.
+var (
+ SchemaTableType_name = map[int32]string{
+ 0: "VIEWS",
+ 1: "TABLES",
+ 2: "ALL",
+ }
+ SchemaTableType_value = map[string]int32{
+ "VIEWS": 0,
+ "TABLES": 1,
+ "ALL": 2,
+ }
+)
+
+func (x SchemaTableType) Enum() *SchemaTableType {
+ p := new(SchemaTableType)
+ *p = x
+ return p
+}
+
+func (x SchemaTableType) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (SchemaTableType) Descriptor() protoreflect.EnumDescriptor {
+ return file_query_proto_enumTypes[4].Descriptor()
+}
+
+func (SchemaTableType) Type() protoreflect.EnumType {
+ return &file_query_proto_enumTypes[4]
+}
+
+func (x SchemaTableType) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use SchemaTableType.Descriptor instead.
+func (SchemaTableType) EnumDescriptor() ([]byte, []int) {
+ return file_query_proto_rawDescGZIP(), []int{4}
+}
+
type ExecuteOptions_IncludedFields int32
const (
@@ -508,11 +558,11 @@ func (x ExecuteOptions_IncludedFields) String() string {
}
func (ExecuteOptions_IncludedFields) Descriptor() protoreflect.EnumDescriptor {
- return file_query_proto_enumTypes[4].Descriptor()
+ return file_query_proto_enumTypes[5].Descriptor()
}
func (ExecuteOptions_IncludedFields) Type() protoreflect.EnumType {
- return &file_query_proto_enumTypes[4]
+ return &file_query_proto_enumTypes[5]
}
func (x ExecuteOptions_IncludedFields) Number() protoreflect.EnumNumber {
@@ -560,11 +610,11 @@ func (x ExecuteOptions_Workload) String() string {
}
func (ExecuteOptions_Workload) Descriptor() protoreflect.EnumDescriptor {
- return file_query_proto_enumTypes[5].Descriptor()
+ return file_query_proto_enumTypes[6].Descriptor()
}
func (ExecuteOptions_Workload) Type() protoreflect.EnumType {
- return &file_query_proto_enumTypes[5]
+ return &file_query_proto_enumTypes[6]
}
func (x ExecuteOptions_Workload) Number() protoreflect.EnumNumber {
@@ -625,11 +675,11 @@ func (x ExecuteOptions_TransactionIsolation) String() string {
}
func (ExecuteOptions_TransactionIsolation) Descriptor() protoreflect.EnumDescriptor {
- return file_query_proto_enumTypes[6].Descriptor()
+ return file_query_proto_enumTypes[7].Descriptor()
}
func (ExecuteOptions_TransactionIsolation) Type() protoreflect.EnumType {
- return &file_query_proto_enumTypes[6]
+ return &file_query_proto_enumTypes[7]
}
func (x ExecuteOptions_TransactionIsolation) Number() protoreflect.EnumNumber {
@@ -686,11 +736,11 @@ func (x ExecuteOptions_PlannerVersion) String() string {
}
func (ExecuteOptions_PlannerVersion) Descriptor() protoreflect.EnumDescriptor {
- return file_query_proto_enumTypes[7].Descriptor()
+ return file_query_proto_enumTypes[8].Descriptor()
}
func (ExecuteOptions_PlannerVersion) Type() protoreflect.EnumType {
- return &file_query_proto_enumTypes[7]
+ return &file_query_proto_enumTypes[8]
}
func (x ExecuteOptions_PlannerVersion) Number() protoreflect.EnumNumber {
@@ -702,6 +752,107 @@ func (ExecuteOptions_PlannerVersion) EnumDescriptor() ([]byte, []int) {
return file_query_proto_rawDescGZIP(), []int{6, 3}
}
+type ExecuteOptions_Consolidator int32
+
+const (
+ ExecuteOptions_CONSOLIDATOR_UNSPECIFIED ExecuteOptions_Consolidator = 0
+ ExecuteOptions_CONSOLIDATOR_DISABLED ExecuteOptions_Consolidator = 1
+ ExecuteOptions_CONSOLIDATOR_ENABLED ExecuteOptions_Consolidator = 2
+ ExecuteOptions_CONSOLIDATOR_ENABLED_REPLICAS ExecuteOptions_Consolidator = 3
+)
+
+// Enum value maps for ExecuteOptions_Consolidator.
+var (
+ ExecuteOptions_Consolidator_name = map[int32]string{
+ 0: "CONSOLIDATOR_UNSPECIFIED",
+ 1: "CONSOLIDATOR_DISABLED",
+ 2: "CONSOLIDATOR_ENABLED",
+ 3: "CONSOLIDATOR_ENABLED_REPLICAS",
+ }
+ ExecuteOptions_Consolidator_value = map[string]int32{
+ "CONSOLIDATOR_UNSPECIFIED": 0,
+ "CONSOLIDATOR_DISABLED": 1,
+ "CONSOLIDATOR_ENABLED": 2,
+ "CONSOLIDATOR_ENABLED_REPLICAS": 3,
+ }
+)
+
+func (x ExecuteOptions_Consolidator) Enum() *ExecuteOptions_Consolidator {
+ p := new(ExecuteOptions_Consolidator)
+ *p = x
+ return p
+}
+
+func (x ExecuteOptions_Consolidator) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (ExecuteOptions_Consolidator) Descriptor() protoreflect.EnumDescriptor {
+ return file_query_proto_enumTypes[9].Descriptor()
+}
+
+func (ExecuteOptions_Consolidator) Type() protoreflect.EnumType {
+ return &file_query_proto_enumTypes[9]
+}
+
+func (x ExecuteOptions_Consolidator) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use ExecuteOptions_Consolidator.Descriptor instead.
+func (ExecuteOptions_Consolidator) EnumDescriptor() ([]byte, []int) {
+ return file_query_proto_rawDescGZIP(), []int{6, 4}
+}
+
+type ExecuteOptions_TransactionAccessMode int32
+
+const (
+ ExecuteOptions_CONSISTENT_SNAPSHOT ExecuteOptions_TransactionAccessMode = 0
+ ExecuteOptions_READ_WRITE ExecuteOptions_TransactionAccessMode = 1
+ ExecuteOptions_READ_ONLY ExecuteOptions_TransactionAccessMode = 2
+)
+
+// Enum value maps for ExecuteOptions_TransactionAccessMode.
+var (
+ ExecuteOptions_TransactionAccessMode_name = map[int32]string{
+ 0: "CONSISTENT_SNAPSHOT",
+ 1: "READ_WRITE",
+ 2: "READ_ONLY",
+ }
+ ExecuteOptions_TransactionAccessMode_value = map[string]int32{
+ "CONSISTENT_SNAPSHOT": 0,
+ "READ_WRITE": 1,
+ "READ_ONLY": 2,
+ }
+)
+
+func (x ExecuteOptions_TransactionAccessMode) Enum() *ExecuteOptions_TransactionAccessMode {
+ p := new(ExecuteOptions_TransactionAccessMode)
+ *p = x
+ return p
+}
+
+func (x ExecuteOptions_TransactionAccessMode) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (ExecuteOptions_TransactionAccessMode) Descriptor() protoreflect.EnumDescriptor {
+ return file_query_proto_enumTypes[10].Descriptor()
+}
+
+func (ExecuteOptions_TransactionAccessMode) Type() protoreflect.EnumType {
+ return &file_query_proto_enumTypes[10]
+}
+
+func (x ExecuteOptions_TransactionAccessMode) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use ExecuteOptions_TransactionAccessMode.Descriptor instead.
+func (ExecuteOptions_TransactionAccessMode) EnumDescriptor() ([]byte, []int) {
+ return file_query_proto_rawDescGZIP(), []int{6, 5}
+}
+
// The category of one statement.
type StreamEvent_Statement_Category int32
@@ -736,11 +887,11 @@ func (x StreamEvent_Statement_Category) String() string {
}
func (StreamEvent_Statement_Category) Descriptor() protoreflect.EnumDescriptor {
- return file_query_proto_enumTypes[8].Descriptor()
+ return file_query_proto_enumTypes[11].Descriptor()
}
func (StreamEvent_Statement_Category) Type() protoreflect.EnumType {
- return &file_query_proto_enumTypes[8]
+ return &file_query_proto_enumTypes[11]
}
func (x StreamEvent_Statement_Category) Number() protoreflect.EnumNumber {
@@ -1177,7 +1328,11 @@ type ExecuteOptions struct {
// has_created_temp_tables signals whether plans created in this session should be cached or not
// if the user has created temp tables, Vitess will not reuse plans created for this session in other sessions.
// The current session can still use other sessions cached plans.
- HasCreatedTempTables bool `protobuf:"varint,12,opt,name=has_created_temp_tables,json=hasCreatedTempTables,proto3" json:"has_created_temp_tables,omitempty"`
+ HasCreatedTempTables bool `protobuf:"varint,12,opt,name=has_created_temp_tables,json=hasCreatedTempTables,proto3" json:"has_created_temp_tables,omitempty"`
+ Consolidator ExecuteOptions_Consolidator `protobuf:"varint,13,opt,name=consolidator,proto3,enum=query.ExecuteOptions_Consolidator" json:"consolidator,omitempty"`
+ // TransactionAccessMode specifies the access modes to be used while starting the transaction i.e. READ WRITE/READ ONLY/WITH CONSISTENT SNAPSHOT
+ // If not specified, the transaction will be started with the default access mode on the connection.
+ TransactionAccessMode []ExecuteOptions_TransactionAccessMode `protobuf:"varint,14,rep,packed,name=transaction_access_mode,json=transactionAccessMode,proto3,enum=query.ExecuteOptions_TransactionAccessMode" json:"transaction_access_mode,omitempty"`
}
func (x *ExecuteOptions) Reset() {
@@ -1268,6 +1423,20 @@ func (x *ExecuteOptions) GetHasCreatedTempTables() bool {
return false
}
+func (x *ExecuteOptions) GetConsolidator() ExecuteOptions_Consolidator {
+ if x != nil {
+ return x.Consolidator
+ }
+ return ExecuteOptions_CONSOLIDATOR_UNSPECIFIED
+}
+
+func (x *ExecuteOptions) GetTransactionAccessMode() []ExecuteOptions_TransactionAccessMode {
+ if x != nil {
+ return x.TransactionAccessMode
+ }
+ return nil
+}
+
// Field describes a single column returned by a query
type Field struct {
state protoimpl.MessageState
@@ -4872,6 +5041,8 @@ type RealtimeStats struct {
Qps float64 `protobuf:"fixed64,6,opt,name=qps,proto3" json:"qps,omitempty"`
// table_schema_changed is to provide list of tables that have schema changes detected by the tablet.
TableSchemaChanged []string `protobuf:"bytes,7,rep,name=table_schema_changed,json=tableSchemaChanged,proto3" json:"table_schema_changed,omitempty"`
+ // view_schema_changed is to provide list of views that have schema changes detected by the tablet.
+ ViewSchemaChanged []string `protobuf:"bytes,8,rep,name=view_schema_changed,json=viewSchemaChanged,proto3" json:"view_schema_changed,omitempty"`
}
func (x *RealtimeStats) Reset() {
@@ -4955,6 +5126,13 @@ func (x *RealtimeStats) GetTableSchemaChanged() []string {
return nil
}
+func (x *RealtimeStats) GetViewSchemaChanged() []string {
+ if x != nil {
+ return x.ViewSchemaChanged
+ }
+ return nil
+}
+
// AggregateStats contains information about the health of a group of
// tablets for a Target. It is used to propagate stats from a vtgate
// to another, or from the Gateway layer of a vtgate to the routing
@@ -5069,16 +5247,22 @@ type StreamHealthResponse struct {
//
// In practice, this field is set to:
// a) the last time the RPC tabletmanager.TabletExternallyReparented was
- // called on this tablet (usually done by an external failover tool e.g.
- // Orchestrator). The failover tool can call this as long as we are the
- // primary i.e. even ages after the last reparent occurred.
+ //
+ // called on this tablet (usually done by an external failover tool e.g.
+ // Orchestrator). The failover tool can call this as long as we are the
+ // primary i.e. even ages after the last reparent occurred.
+ //
// OR
// b) the last time an active reparent was executed through a vtctl command
- // (InitShardPrimary, PlannedReparentShard, EmergencyReparentShard)
+ //
+ // (InitShardPrimary, PlannedReparentShard, EmergencyReparentShard)
+ //
// OR
// c) the last time vttablet was started and it initialized its tablet type
- // as PRIMARY because it was recorded as the shard's current primary in the
- // topology (see go/vt/vttablet/tabletmanager/init_tablet.go)
+ //
+ // as PRIMARY because it was recorded as the shard's current primary in the
+ // topology (see go/vt/vttablet/tabletmanager/init_tablet.go)
+ //
// OR
// d) 0 if the vttablet was never a PRIMARY.
TabletExternallyReparentedTimestamp int64 `protobuf:"varint,3,opt,name=tablet_externally_reparented_timestamp,json=tabletExternallyReparentedTimestamp,proto3" json:"tablet_externally_reparented_timestamp,omitempty"`
@@ -5231,6 +5415,119 @@ func (x *TransactionMetadata) GetParticipants() []*Target {
return nil
}
+// GetSchemaRequest is the payload to GetSchema
+type GetSchemaRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Target *Target `protobuf:"bytes,1,opt,name=target,proto3" json:"target,omitempty"`
+ TableType SchemaTableType `protobuf:"varint,2,opt,name=table_type,json=tableType,proto3,enum=query.SchemaTableType" json:"table_type,omitempty"`
+ TableNames []string `protobuf:"bytes,3,rep,name=table_names,json=tableNames,proto3" json:"table_names,omitempty"`
+}
+
+func (x *GetSchemaRequest) Reset() {
+ *x = GetSchemaRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_query_proto_msgTypes[62]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetSchemaRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetSchemaRequest) ProtoMessage() {}
+
+func (x *GetSchemaRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_query_proto_msgTypes[62]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetSchemaRequest.ProtoReflect.Descriptor instead.
+func (*GetSchemaRequest) Descriptor() ([]byte, []int) {
+ return file_query_proto_rawDescGZIP(), []int{62}
+}
+
+func (x *GetSchemaRequest) GetTarget() *Target {
+ if x != nil {
+ return x.Target
+ }
+ return nil
+}
+
+func (x *GetSchemaRequest) GetTableType() SchemaTableType {
+ if x != nil {
+ return x.TableType
+ }
+ return SchemaTableType_VIEWS
+}
+
+func (x *GetSchemaRequest) GetTableNames() []string {
+ if x != nil {
+ return x.TableNames
+ }
+ return nil
+}
+
+// GetSchemaResponse is the returned value from GetSchema
+type GetSchemaResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // this is for the schema definition for the requested tables.
+ TableDefinition map[string]string `protobuf:"bytes,2,rep,name=table_definition,json=tableDefinition,proto3" json:"table_definition,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+}
+
+func (x *GetSchemaResponse) Reset() {
+ *x = GetSchemaResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_query_proto_msgTypes[63]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetSchemaResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetSchemaResponse) ProtoMessage() {}
+
+func (x *GetSchemaResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_query_proto_msgTypes[63]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetSchemaResponse.ProtoReflect.Descriptor instead.
+func (*GetSchemaResponse) Descriptor() ([]byte, []int) {
+ return file_query_proto_rawDescGZIP(), []int{63}
+}
+
+func (x *GetSchemaResponse) GetTableDefinition() map[string]string {
+ if x != nil {
+ return x.TableDefinition
+ }
+ return nil
+}
+
// One individual Statement in a transaction.
type StreamEvent_Statement struct {
state protoimpl.MessageState
@@ -5250,7 +5547,7 @@ type StreamEvent_Statement struct {
func (x *StreamEvent_Statement) Reset() {
*x = StreamEvent_Statement{}
if protoimpl.UnsafeEnabled {
- mi := &file_query_proto_msgTypes[63]
+ mi := &file_query_proto_msgTypes[65]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5263,7 +5560,7 @@ func (x *StreamEvent_Statement) String() string {
func (*StreamEvent_Statement) ProtoMessage() {}
func (x *StreamEvent_Statement) ProtoReflect() protoreflect.Message {
- mi := &file_query_proto_msgTypes[63]
+ mi := &file_query_proto_msgTypes[65]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5362,7 +5659,7 @@ var file_query_proto_rawDesc = []byte{
0x12, 0x29, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
0x13, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x42, 0x69, 0x6e, 0x64, 0x56, 0x61, 0x72, 0x69,
0x61, 0x62, 0x6c, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22,
- 0xc5, 0x07, 0x0a, 0x0e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f,
+ 0xca, 0x0a, 0x0a, 0x0e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f,
0x6e, 0x73, 0x12, 0x4d, 0x0a, 0x0f, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x64, 0x5f, 0x66,
0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x71, 0x75,
0x65, 0x72, 0x79, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f,
@@ -5394,241 +5691,230 @@ var file_query_proto_rawDesc = []byte{
0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x35, 0x0a, 0x17, 0x68, 0x61, 0x73, 0x5f, 0x63, 0x72,
0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x65, 0x6d, 0x70, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65,
0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x68, 0x61, 0x73, 0x43, 0x72, 0x65, 0x61,
- 0x74, 0x65, 0x64, 0x54, 0x65, 0x6d, 0x70, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x22, 0x3b, 0x0a,
- 0x0e, 0x49, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12,
- 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x4e, 0x41, 0x4d, 0x45,
- 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10,
- 0x01, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x4c, 0x4c, 0x10, 0x02, 0x22, 0x38, 0x0a, 0x08, 0x57, 0x6f,
- 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43,
- 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4f, 0x4c, 0x54, 0x50, 0x10,
- 0x01, 0x12, 0x08, 0x0a, 0x04, 0x4f, 0x4c, 0x41, 0x50, 0x10, 0x02, 0x12, 0x07, 0x0a, 0x03, 0x44,
- 0x42, 0x41, 0x10, 0x03, 0x22, 0xa7, 0x01, 0x0a, 0x14, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63,
- 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x73, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0b, 0x0a,
- 0x07, 0x44, 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x52, 0x45,
- 0x50, 0x45, 0x41, 0x54, 0x41, 0x42, 0x4c, 0x45, 0x5f, 0x52, 0x45, 0x41, 0x44, 0x10, 0x01, 0x12,
- 0x12, 0x0a, 0x0e, 0x52, 0x45, 0x41, 0x44, 0x5f, 0x43, 0x4f, 0x4d, 0x4d, 0x49, 0x54, 0x54, 0x45,
- 0x44, 0x10, 0x02, 0x12, 0x14, 0x0a, 0x10, 0x52, 0x45, 0x41, 0x44, 0x5f, 0x55, 0x4e, 0x43, 0x4f,
- 0x4d, 0x4d, 0x49, 0x54, 0x54, 0x45, 0x44, 0x10, 0x03, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x45, 0x52,
- 0x49, 0x41, 0x4c, 0x49, 0x5a, 0x41, 0x42, 0x4c, 0x45, 0x10, 0x04, 0x12, 0x21, 0x0a, 0x1d, 0x43,
- 0x4f, 0x4e, 0x53, 0x49, 0x53, 0x54, 0x45, 0x4e, 0x54, 0x5f, 0x53, 0x4e, 0x41, 0x50, 0x53, 0x48,
- 0x4f, 0x54, 0x5f, 0x52, 0x45, 0x41, 0x44, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x05, 0x12, 0x0e,
- 0x0a, 0x0a, 0x41, 0x55, 0x54, 0x4f, 0x43, 0x4f, 0x4d, 0x4d, 0x49, 0x54, 0x10, 0x06, 0x22, 0x84,
- 0x01, 0x0a, 0x0e, 0x50, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f,
- 0x6e, 0x12, 0x13, 0x0a, 0x0f, 0x44, 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x5f, 0x50, 0x4c, 0x41,
- 0x4e, 0x4e, 0x45, 0x52, 0x10, 0x00, 0x12, 0x06, 0x0a, 0x02, 0x56, 0x33, 0x10, 0x01, 0x12, 0x08,
- 0x0a, 0x04, 0x47, 0x65, 0x6e, 0x34, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x47, 0x65, 0x6e, 0x34,
- 0x47, 0x72, 0x65, 0x65, 0x64, 0x79, 0x10, 0x03, 0x12, 0x12, 0x0a, 0x0e, 0x47, 0x65, 0x6e, 0x34,
- 0x4c, 0x65, 0x66, 0x74, 0x32, 0x52, 0x69, 0x67, 0x68, 0x74, 0x10, 0x04, 0x12, 0x14, 0x0a, 0x10,
- 0x47, 0x65, 0x6e, 0x34, 0x57, 0x69, 0x74, 0x68, 0x46, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b,
- 0x10, 0x05, 0x12, 0x11, 0x0a, 0x0d, 0x47, 0x65, 0x6e, 0x34, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72,
- 0x65, 0x56, 0x33, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x02, 0x10,
- 0x03, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, 0x22, 0xb8, 0x02, 0x0a, 0x05, 0x46, 0x69, 0x65, 0x6c,
- 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20,
- 0x01, 0x28, 0x0e, 0x32, 0x0b, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x79, 0x70, 0x65,
- 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18,
- 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x1b, 0x0a, 0x09,
- 0x6f, 0x72, 0x67, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x08, 0x6f, 0x72, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x61, 0x74,
- 0x61, 0x62, 0x61, 0x73, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x61, 0x74,
- 0x61, 0x62, 0x61, 0x73, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x6f, 0x72, 0x67, 0x5f, 0x6e, 0x61, 0x6d,
- 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6f, 0x72, 0x67, 0x4e, 0x61, 0x6d, 0x65,
- 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74,
- 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x4c,
- 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x68, 0x61, 0x72, 0x73, 0x65, 0x74,
- 0x18, 0x08, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x63, 0x68, 0x61, 0x72, 0x73, 0x65, 0x74, 0x12,
- 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x63, 0x69, 0x6d, 0x61, 0x6c, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28,
- 0x0d, 0x52, 0x08, 0x64, 0x65, 0x63, 0x69, 0x6d, 0x61, 0x6c, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x66,
- 0x6c, 0x61, 0x67, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x66, 0x6c, 0x61, 0x67,
- 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65,
- 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x54, 0x79,
- 0x70, 0x65, 0x22, 0x37, 0x0a, 0x03, 0x52, 0x6f, 0x77, 0x12, 0x18, 0x0a, 0x07, 0x6c, 0x65, 0x6e,
- 0x67, 0x74, 0x68, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x12, 0x52, 0x07, 0x6c, 0x65, 0x6e, 0x67,
- 0x74, 0x68, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x02, 0x20,
- 0x01, 0x28, 0x0c, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x22, 0xe3, 0x01, 0x0a, 0x0b,
- 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x24, 0x0a, 0x06, 0x66,
- 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x71, 0x75,
- 0x65, 0x72, 0x79, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x52, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64,
- 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x6f, 0x77, 0x73, 0x5f, 0x61, 0x66, 0x66, 0x65, 0x63, 0x74,
- 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x72, 0x6f, 0x77, 0x73, 0x41, 0x66,
- 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x74,
- 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x69, 0x6e, 0x73, 0x65, 0x72,
- 0x74, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x04, 0x72, 0x6f, 0x77, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28,
- 0x0b, 0x32, 0x0a, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x52, 0x6f, 0x77, 0x52, 0x04, 0x72,
- 0x6f, 0x77, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x06, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x12, 0x32, 0x0a, 0x15, 0x73, 0x65, 0x73, 0x73, 0x69,
- 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73,
- 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x53,
- 0x74, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x4a, 0x04, 0x08, 0x05, 0x10,
- 0x06, 0x22, 0x3c, 0x0a, 0x0c, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x61, 0x72, 0x6e, 0x69, 0x6e,
- 0x67, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52,
- 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
- 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22,
- 0xa0, 0x03, 0x0a, 0x0b, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12,
- 0x3c, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20,
- 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x53, 0x74, 0x72, 0x65,
- 0x61, 0x6d, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e,
- 0x74, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x32, 0x0a,
- 0x0b, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74,
- 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x0a, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x6f, 0x6b, 0x65,
- 0x6e, 0x1a, 0x9e, 0x02, 0x0a, 0x09, 0x53, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12,
- 0x41, 0x0a, 0x08, 0x63, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x0e, 0x32, 0x25, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d,
- 0x45, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e,
- 0x43, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x52, 0x08, 0x63, 0x61, 0x74, 0x65, 0x67, 0x6f,
- 0x72, 0x79, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65,
- 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d,
- 0x65, 0x12, 0x3a, 0x0a, 0x12, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x5f, 0x6b, 0x65, 0x79,
- 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e,
- 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x52, 0x10, 0x70, 0x72, 0x69,
- 0x6d, 0x61, 0x72, 0x79, 0x4b, 0x65, 0x79, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x38, 0x0a,
- 0x12, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x76, 0x61, 0x6c,
- 0x75, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x71, 0x75, 0x65, 0x72,
- 0x79, 0x2e, 0x52, 0x6f, 0x77, 0x52, 0x10, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x4b, 0x65,
- 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x71, 0x6c, 0x18, 0x05,
- 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x73, 0x71, 0x6c, 0x22, 0x27, 0x0a, 0x08, 0x43, 0x61, 0x74,
- 0x65, 0x67, 0x6f, 0x72, 0x79, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x10, 0x00,
- 0x12, 0x07, 0x0a, 0x03, 0x44, 0x4d, 0x4c, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x44, 0x44, 0x4c,
- 0x10, 0x02, 0x22, 0xe1, 0x02, 0x0a, 0x0e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69,
- 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65,
- 0x72, 0x49, 0x44, 0x52, 0x11, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x43, 0x61,
- 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x45, 0x0a, 0x13, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69,
- 0x61, 0x74, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x56, 0x54, 0x47, 0x61,
- 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x69, 0x6d, 0x6d, 0x65,
- 0x64, 0x69, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x25, 0x0a,
- 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e,
- 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61,
- 0x72, 0x67, 0x65, 0x74, 0x12, 0x27, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x04, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x42, 0x6f, 0x75, 0x6e,
- 0x64, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x25, 0x0a,
- 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18,
- 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69,
- 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x2f, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18,
- 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x45, 0x78,
- 0x65, 0x63, 0x75, 0x74, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70,
- 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65,
- 0x64, 0x5f, 0x69, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x65,
- 0x72, 0x76, 0x65, 0x64, 0x49, 0x64, 0x22, 0x3d, 0x0a, 0x0f, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74,
- 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73,
- 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72,
- 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72,
- 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0x64, 0x0a, 0x0f, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x57,
- 0x69, 0x74, 0x68, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x25, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f,
- 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e,
- 0x52, 0x50, 0x43, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12,
- 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73,
- 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0xe7, 0x02, 0x0a, 0x14,
- 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76,
- 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72,
- 0x49, 0x44, 0x52, 0x11, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x43, 0x61, 0x6c,
- 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x45, 0x0a, 0x13, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61,
- 0x74, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x56, 0x54, 0x47, 0x61, 0x74,
- 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x69, 0x6d, 0x6d, 0x65, 0x64,
- 0x69, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x06,
- 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71,
- 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72,
- 0x67, 0x65, 0x74, 0x12, 0x27, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x42, 0x6f, 0x75, 0x6e, 0x64,
- 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x2f, 0x0a, 0x07,
- 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e,
- 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x4f, 0x70, 0x74,
- 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a,
- 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18,
- 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69,
- 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64,
- 0x5f, 0x69, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x65, 0x72,
- 0x76, 0x65, 0x64, 0x49, 0x64, 0x22, 0x43, 0x0a, 0x15, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45,
+ 0x74, 0x65, 0x64, 0x54, 0x65, 0x6d, 0x70, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x46, 0x0a,
+ 0x0c, 0x63, 0x6f, 0x6e, 0x73, 0x6f, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x0d, 0x20,
+ 0x01, 0x28, 0x0e, 0x32, 0x22, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x45, 0x78, 0x65, 0x63,
+ 0x75, 0x74, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x6f,
+ 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x73, 0x6f, 0x6c, 0x69,
+ 0x64, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x63, 0x0a, 0x17, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63,
+ 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x6d, 0x6f, 0x64, 0x65,
+ 0x18, 0x0e, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x45,
+ 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x54, 0x72,
+ 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4d,
+ 0x6f, 0x64, 0x65, 0x52, 0x15, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e,
+ 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4d, 0x6f, 0x64, 0x65, 0x22, 0x3b, 0x0a, 0x0e, 0x49, 0x6e,
+ 0x63, 0x6c, 0x75, 0x64, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x11, 0x0a, 0x0d,
+ 0x54, 0x59, 0x50, 0x45, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x4e, 0x41, 0x4d, 0x45, 0x10, 0x00, 0x12,
+ 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x01, 0x12, 0x07,
+ 0x0a, 0x03, 0x41, 0x4c, 0x4c, 0x10, 0x02, 0x22, 0x38, 0x0a, 0x08, 0x57, 0x6f, 0x72, 0x6b, 0x6c,
+ 0x6f, 0x61, 0x64, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49,
+ 0x45, 0x44, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4f, 0x4c, 0x54, 0x50, 0x10, 0x01, 0x12, 0x08,
+ 0x0a, 0x04, 0x4f, 0x4c, 0x41, 0x50, 0x10, 0x02, 0x12, 0x07, 0x0a, 0x03, 0x44, 0x42, 0x41, 0x10,
+ 0x03, 0x22, 0xa7, 0x01, 0x0a, 0x14, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f,
+ 0x6e, 0x49, 0x73, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x45,
+ 0x46, 0x41, 0x55, 0x4c, 0x54, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x52, 0x45, 0x50, 0x45, 0x41,
+ 0x54, 0x41, 0x42, 0x4c, 0x45, 0x5f, 0x52, 0x45, 0x41, 0x44, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e,
+ 0x52, 0x45, 0x41, 0x44, 0x5f, 0x43, 0x4f, 0x4d, 0x4d, 0x49, 0x54, 0x54, 0x45, 0x44, 0x10, 0x02,
+ 0x12, 0x14, 0x0a, 0x10, 0x52, 0x45, 0x41, 0x44, 0x5f, 0x55, 0x4e, 0x43, 0x4f, 0x4d, 0x4d, 0x49,
+ 0x54, 0x54, 0x45, 0x44, 0x10, 0x03, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x45, 0x52, 0x49, 0x41, 0x4c,
+ 0x49, 0x5a, 0x41, 0x42, 0x4c, 0x45, 0x10, 0x04, 0x12, 0x21, 0x0a, 0x1d, 0x43, 0x4f, 0x4e, 0x53,
+ 0x49, 0x53, 0x54, 0x45, 0x4e, 0x54, 0x5f, 0x53, 0x4e, 0x41, 0x50, 0x53, 0x48, 0x4f, 0x54, 0x5f,
+ 0x52, 0x45, 0x41, 0x44, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x05, 0x12, 0x0e, 0x0a, 0x0a, 0x41,
+ 0x55, 0x54, 0x4f, 0x43, 0x4f, 0x4d, 0x4d, 0x49, 0x54, 0x10, 0x06, 0x22, 0x84, 0x01, 0x0a, 0x0e,
+ 0x50, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x13,
+ 0x0a, 0x0f, 0x44, 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x5f, 0x50, 0x4c, 0x41, 0x4e, 0x4e, 0x45,
+ 0x52, 0x10, 0x00, 0x12, 0x06, 0x0a, 0x02, 0x56, 0x33, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x47,
+ 0x65, 0x6e, 0x34, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x47, 0x65, 0x6e, 0x34, 0x47, 0x72, 0x65,
+ 0x65, 0x64, 0x79, 0x10, 0x03, 0x12, 0x12, 0x0a, 0x0e, 0x47, 0x65, 0x6e, 0x34, 0x4c, 0x65, 0x66,
+ 0x74, 0x32, 0x52, 0x69, 0x67, 0x68, 0x74, 0x10, 0x04, 0x12, 0x14, 0x0a, 0x10, 0x47, 0x65, 0x6e,
+ 0x34, 0x57, 0x69, 0x74, 0x68, 0x46, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x10, 0x05, 0x12,
+ 0x11, 0x0a, 0x0d, 0x47, 0x65, 0x6e, 0x34, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x65, 0x56, 0x33,
+ 0x10, 0x06, 0x22, 0x84, 0x01, 0x0a, 0x0c, 0x43, 0x6f, 0x6e, 0x73, 0x6f, 0x6c, 0x69, 0x64, 0x61,
+ 0x74, 0x6f, 0x72, 0x12, 0x1c, 0x0a, 0x18, 0x43, 0x4f, 0x4e, 0x53, 0x4f, 0x4c, 0x49, 0x44, 0x41,
+ 0x54, 0x4f, 0x52, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10,
+ 0x00, 0x12, 0x19, 0x0a, 0x15, 0x43, 0x4f, 0x4e, 0x53, 0x4f, 0x4c, 0x49, 0x44, 0x41, 0x54, 0x4f,
+ 0x52, 0x5f, 0x44, 0x49, 0x53, 0x41, 0x42, 0x4c, 0x45, 0x44, 0x10, 0x01, 0x12, 0x18, 0x0a, 0x14,
+ 0x43, 0x4f, 0x4e, 0x53, 0x4f, 0x4c, 0x49, 0x44, 0x41, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x4e, 0x41,
+ 0x42, 0x4c, 0x45, 0x44, 0x10, 0x02, 0x12, 0x21, 0x0a, 0x1d, 0x43, 0x4f, 0x4e, 0x53, 0x4f, 0x4c,
+ 0x49, 0x44, 0x41, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x4e, 0x41, 0x42, 0x4c, 0x45, 0x44, 0x5f, 0x52,
+ 0x45, 0x50, 0x4c, 0x49, 0x43, 0x41, 0x53, 0x10, 0x03, 0x22, 0x4f, 0x0a, 0x15, 0x54, 0x72, 0x61,
+ 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4d, 0x6f,
+ 0x64, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x43, 0x4f, 0x4e, 0x53, 0x49, 0x53, 0x54, 0x45, 0x4e, 0x54,
+ 0x5f, 0x53, 0x4e, 0x41, 0x50, 0x53, 0x48, 0x4f, 0x54, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x52,
+ 0x45, 0x41, 0x44, 0x5f, 0x57, 0x52, 0x49, 0x54, 0x45, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x52,
+ 0x45, 0x41, 0x44, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02,
+ 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, 0x22, 0xb8, 0x02, 0x0a,
+ 0x05, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x04, 0x74, 0x79,
+ 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0b, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79,
+ 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x74,
+ 0x61, 0x62, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x61, 0x62, 0x6c,
+ 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x6f, 0x72, 0x67, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x04,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6f, 0x72, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x1a,
+ 0x0a, 0x08, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x08, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x6f, 0x72,
+ 0x67, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6f, 0x72,
+ 0x67, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x5f,
+ 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x63, 0x6f,
+ 0x6c, 0x75, 0x6d, 0x6e, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x68,
+ 0x61, 0x72, 0x73, 0x65, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x63, 0x68, 0x61,
+ 0x72, 0x73, 0x65, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x63, 0x69, 0x6d, 0x61, 0x6c, 0x73,
+ 0x18, 0x09, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x64, 0x65, 0x63, 0x69, 0x6d, 0x61, 0x6c, 0x73,
+ 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0d, 0x52,
+ 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e,
+ 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c,
+ 0x75, 0x6d, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x22, 0x37, 0x0a, 0x03, 0x52, 0x6f, 0x77, 0x12, 0x18,
+ 0x0a, 0x07, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x12, 0x52,
+ 0x07, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73,
+ 0x22, 0xe3, 0x01, 0x0a, 0x0b, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74,
+ 0x12, 0x24, 0x0a, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b,
+ 0x32, 0x0c, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x52, 0x06,
+ 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x6f, 0x77, 0x73, 0x5f, 0x61,
+ 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x72,
+ 0x6f, 0x77, 0x73, 0x41, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x69,
+ 0x6e, 0x73, 0x65, 0x72, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08,
+ 0x69, 0x6e, 0x73, 0x65, 0x72, 0x74, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x04, 0x72, 0x6f, 0x77, 0x73,
+ 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x52,
+ 0x6f, 0x77, 0x52, 0x04, 0x72, 0x6f, 0x77, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x69, 0x6e, 0x66, 0x6f,
+ 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x12, 0x32, 0x0a, 0x15,
+ 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68,
+ 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x73, 0x65, 0x73,
+ 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73,
+ 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0x3c, 0x0a, 0x0c, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57,
+ 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65,
+ 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73,
+ 0x73, 0x61, 0x67, 0x65, 0x22, 0xa0, 0x03, 0x0a, 0x0b, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45,
+ 0x76, 0x65, 0x6e, 0x74, 0x12, 0x3c, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e,
+ 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79,
+ 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x74, 0x61,
+ 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e,
+ 0x74, 0x73, 0x12, 0x32, 0x0a, 0x0b, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x6f, 0x6b, 0x65,
+ 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e,
+ 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x0a, 0x65, 0x76, 0x65, 0x6e,
+ 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x1a, 0x9e, 0x02, 0x0a, 0x09, 0x53, 0x74, 0x61, 0x74, 0x65,
+ 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x41, 0x0a, 0x08, 0x63, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x53,
+ 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65,
+ 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x43, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x52, 0x08, 0x63,
+ 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65,
+ 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x74, 0x61, 0x62,
+ 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x3a, 0x0a, 0x12, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72,
+ 0x79, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03,
+ 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64,
+ 0x52, 0x10, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x4b, 0x65, 0x79, 0x46, 0x69, 0x65, 0x6c,
+ 0x64, 0x73, 0x12, 0x38, 0x0a, 0x12, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x5f, 0x6b, 0x65,
+ 0x79, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0a,
+ 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x52, 0x6f, 0x77, 0x52, 0x10, 0x70, 0x72, 0x69, 0x6d,
+ 0x61, 0x72, 0x79, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x10, 0x0a, 0x03,
+ 0x73, 0x71, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x73, 0x71, 0x6c, 0x22, 0x27,
+ 0x0a, 0x08, 0x43, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x72,
+ 0x72, 0x6f, 0x72, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x44, 0x4d, 0x4c, 0x10, 0x01, 0x12, 0x07,
+ 0x0a, 0x03, 0x44, 0x44, 0x4c, 0x10, 0x02, 0x22, 0xe1, 0x02, 0x0a, 0x0e, 0x45, 0x78, 0x65, 0x63,
+ 0x75, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x13, 0x65, 0x66,
+ 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69,
+ 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e,
+ 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74,
+ 0x69, 0x76, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x45, 0x0a, 0x13, 0x69,
+ 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f,
+ 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79,
+ 0x2e, 0x56, 0x54, 0x47, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52,
+ 0x11, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72,
+ 0x49, 0x64, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65,
+ 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x27, 0x0a, 0x05, 0x71, 0x75, 0x65,
+ 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79,
+ 0x2e, 0x42, 0x6f, 0x75, 0x6e, 0x64, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x05, 0x71, 0x75, 0x65,
+ 0x72, 0x79, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f,
+ 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x74, 0x72, 0x61, 0x6e,
+ 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x2f, 0x0a, 0x07, 0x6f, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65,
+ 0x72, 0x79, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65,
+ 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x52,
+ 0x0a, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x49, 0x64, 0x22, 0x3d, 0x0a, 0x0f, 0x45,
0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a,
0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12,
0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75,
- 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0xee, 0x01, 0x0a, 0x0c, 0x42,
- 0x65, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x13, 0x65,
- 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f,
- 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63,
- 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x65, 0x66, 0x66, 0x65, 0x63,
- 0x74, 0x69, 0x76, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x45, 0x0a, 0x13,
- 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72,
- 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72,
- 0x79, 0x2e, 0x56, 0x54, 0x47, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44,
- 0x52, 0x11, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65,
- 0x72, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x03, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67,
- 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x2f, 0x0a, 0x07, 0x6f, 0x70,
- 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75,
- 0x65, 0x72, 0x79, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f,
- 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa4, 0x01, 0x0a, 0x0d,
- 0x42, 0x65, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a,
- 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69,
- 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61,
- 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70,
- 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61,
- 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x32,
- 0x0a, 0x15, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f,
- 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x73,
- 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67,
- 0x65, 0x73, 0x22, 0xe5, 0x01, 0x0a, 0x0d, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76,
- 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72,
- 0x49, 0x44, 0x52, 0x11, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x43, 0x61, 0x6c,
- 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x45, 0x0a, 0x13, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61,
- 0x74, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x56, 0x54, 0x47, 0x61, 0x74,
- 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x69, 0x6d, 0x6d, 0x65, 0x64,
- 0x69, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x06,
- 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71,
- 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72,
- 0x67, 0x65, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69,
- 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x74, 0x72, 0x61,
- 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x22, 0x31, 0x0a, 0x0e, 0x43, 0x6f,
- 0x6d, 0x6d, 0x69, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1f, 0x0a, 0x0b,
- 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x03, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x49, 0x64, 0x22, 0xe7, 0x01,
- 0x0a, 0x0f, 0x52, 0x6f, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x12, 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63,
- 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f,
- 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52,
- 0x11, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72,
- 0x49, 0x64, 0x12, 0x45, 0x0a, 0x13, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x5f,
- 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x56, 0x54, 0x47, 0x61, 0x74, 0x65, 0x43, 0x61,
- 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74,
- 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72,
- 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72,
- 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74,
- 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
- 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61,
- 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x22, 0x33, 0x0a, 0x10, 0x52, 0x6f, 0x6c, 0x6c, 0x62,
- 0x61, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x72,
- 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03,
- 0x52, 0x0a, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x49, 0x64, 0x22, 0xfa, 0x01, 0x0a,
- 0x0e, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
- 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c,
- 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76,
- 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x65,
- 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64,
- 0x12, 0x45, 0x0a, 0x13, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x61,
- 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e,
- 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x56, 0x54, 0x47, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c,
- 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x43,
- 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65,
- 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e,
- 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x25,
- 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64,
- 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74,
- 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x74, 0x69, 0x64, 0x18, 0x05, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, 0x74, 0x69, 0x64, 0x22, 0x11, 0x0a, 0x0f, 0x50, 0x72, 0x65,
- 0x70, 0x61, 0x72, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xda, 0x01, 0x0a,
- 0x15, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x64, 0x52,
+ 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0x64, 0x0a, 0x0f, 0x52, 0x65,
+ 0x73, 0x75, 0x6c, 0x74, 0x57, 0x69, 0x74, 0x68, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x25, 0x0a,
+ 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76,
+ 0x74, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x50, 0x43, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65,
+ 0x72, 0x72, 0x6f, 0x72, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65,
+ 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74,
+ 0x22, 0xe7, 0x02, 0x0a, 0x14, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x78, 0x65, 0x63, 0x75,
+ 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66,
+ 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43,
+ 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69,
+ 0x76, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x45, 0x0a, 0x13, 0x69, 0x6d,
+ 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69,
+ 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e,
+ 0x56, 0x54, 0x47, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11,
+ 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49,
+ 0x64, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74,
+ 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x27, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72,
+ 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e,
+ 0x42, 0x6f, 0x75, 0x6e, 0x64, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72,
+ 0x79, 0x12, 0x2f, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x05, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75,
+ 0x74, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f,
+ 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x74, 0x72, 0x61, 0x6e,
+ 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x73,
+ 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a,
+ 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x49, 0x64, 0x22, 0x43, 0x0a, 0x15, 0x53, 0x74,
+ 0x72, 0x65, 0x61, 0x6d, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72,
+ 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22,
+ 0xee, 0x01, 0x0a, 0x0c, 0x42, 0x65, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x12, 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61,
+ 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e,
+ 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11,
+ 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49,
+ 0x64, 0x12, 0x45, 0x0a, 0x13, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x5f, 0x63,
+ 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15,
+ 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x56, 0x54, 0x47, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c,
+ 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65,
+ 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67,
+ 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79,
+ 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12,
+ 0x2f, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65,
+ 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x22, 0xa4, 0x01, 0x0a, 0x0d, 0x42, 0x65, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f,
+ 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x74, 0x72, 0x61, 0x6e,
+ 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62,
+ 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65,
+ 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c,
+ 0x69, 0x61, 0x73, 0x12, 0x32, 0x0a, 0x15, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x73,
+ 0x74, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x13, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65,
+ 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x22, 0xe5, 0x01, 0x0a, 0x0d, 0x43, 0x6f, 0x6d, 0x6d,
+ 0x69, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66,
+ 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43,
+ 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69,
+ 0x76, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x45, 0x0a, 0x13, 0x69, 0x6d,
+ 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69,
+ 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e,
+ 0x56, 0x54, 0x47, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11,
+ 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49,
+ 0x64, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74,
+ 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x6e,
+ 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03,
+ 0x52, 0x0d, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x22,
+ 0x31, 0x0a, 0x0e, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x69, 0x64,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64,
+ 0x49, 0x64, 0x22, 0xe7, 0x01, 0x0a, 0x0f, 0x52, 0x6f, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x52,
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74,
0x69, 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20,
0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c,
@@ -5640,65 +5926,13 @@ var file_query_proto_rawDesc = []byte{
0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x25,
0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d,
0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74,
- 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x74, 0x69, 0x64, 0x18, 0x04, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, 0x74, 0x69, 0x64, 0x22, 0x18, 0x0a, 0x16, 0x43, 0x6f, 0x6d,
- 0x6d, 0x69, 0x74, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f,
- 0x6e, 0x73, 0x65, 0x22, 0x83, 0x02, 0x0a, 0x17, 0x52, 0x6f, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b,
- 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
- 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c,
- 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76,
- 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x65,
- 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64,
- 0x12, 0x45, 0x0a, 0x13, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x61,
- 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e,
- 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x56, 0x54, 0x47, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c,
- 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x43,
- 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65,
- 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e,
- 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x25,
- 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64,
- 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74,
- 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x74, 0x69, 0x64, 0x18, 0x05, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, 0x74, 0x69, 0x64, 0x22, 0x1a, 0x0a, 0x18, 0x52, 0x6f, 0x6c,
- 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x90, 0x02, 0x0a, 0x18, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65,
- 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f,
- 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44,
- 0x52, 0x11, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65,
- 0x72, 0x49, 0x64, 0x12, 0x45, 0x0a, 0x13, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65,
- 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x56, 0x54, 0x47, 0x61, 0x74, 0x65, 0x43,
- 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61,
- 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61,
- 0x72, 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65,
- 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65,
- 0x74, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x74, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x04, 0x64, 0x74, 0x69, 0x64, 0x12, 0x31, 0x0a, 0x0c, 0x70, 0x61, 0x72, 0x74, 0x69, 0x63, 0x69,
- 0x70, 0x61, 0x6e, 0x74, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75,
- 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x0c, 0x70, 0x61, 0x72, 0x74,
- 0x69, 0x63, 0x69, 0x70, 0x61, 0x6e, 0x74, 0x73, 0x22, 0x1b, 0x0a, 0x19, 0x43, 0x72, 0x65, 0x61,
- 0x74, 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xfe, 0x01, 0x0a, 0x12, 0x53, 0x74, 0x61, 0x72, 0x74, 0x43,
- 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x13,
- 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72,
- 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70,
- 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x65, 0x66, 0x66, 0x65,
- 0x63, 0x74, 0x69, 0x76, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x45, 0x0a,
- 0x13, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65,
- 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65,
- 0x72, 0x79, 0x2e, 0x56, 0x54, 0x47, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49,
- 0x44, 0x52, 0x11, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c,
- 0x65, 0x72, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x03,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72,
- 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x74,
- 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20,
- 0x01, 0x28, 0x03, 0x52, 0x0d, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e,
- 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x74, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x04, 0x64, 0x74, 0x69, 0x64, 0x22, 0x15, 0x0a, 0x13, 0x53, 0x74, 0x61, 0x72, 0x74, 0x43,
- 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xfe, 0x01,
- 0x0a, 0x12, 0x53, 0x65, 0x74, 0x52, 0x6f, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x52, 0x65, 0x71,
+ 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63,
+ 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x74,
+ 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x22, 0x33, 0x0a, 0x10,
+ 0x52, 0x6f, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x69, 0x64, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x49,
+ 0x64, 0x22, 0xfa, 0x01, 0x0a, 0x0e, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x52, 0x65, 0x71,
0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76,
0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28,
0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72,
@@ -5713,10 +5947,25 @@ var file_query_proto_rawDesc = []byte{
0x67, 0x65, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69,
0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x74, 0x72, 0x61,
0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x74,
- 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, 0x74, 0x69, 0x64, 0x22, 0x15,
- 0x0a, 0x13, 0x53, 0x65, 0x74, 0x52, 0x6f, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xdf, 0x01, 0x0a, 0x1a, 0x43, 0x6f, 0x6e, 0x63, 0x6c, 0x75,
- 0x64, 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71,
+ 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, 0x74, 0x69, 0x64, 0x22, 0x11,
+ 0x0a, 0x0f, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x22, 0xda, 0x01, 0x0a, 0x15, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x50, 0x72, 0x65, 0x70,
+ 0x61, 0x72, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x13, 0x65,
+ 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f,
+ 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63,
+ 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x65, 0x66, 0x66, 0x65, 0x63,
+ 0x74, 0x69, 0x76, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x45, 0x0a, 0x13,
+ 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72,
+ 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72,
+ 0x79, 0x2e, 0x56, 0x54, 0x47, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44,
+ 0x52, 0x11, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65,
+ 0x72, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67,
+ 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x74,
+ 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, 0x74, 0x69, 0x64, 0x22, 0x18,
+ 0x0a, 0x16, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x64,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x83, 0x02, 0x0a, 0x17, 0x52, 0x6f, 0x6c,
+ 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x64, 0x52, 0x65, 0x71,
0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76,
0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28,
0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72,
@@ -5728,11 +5977,32 @@ var file_query_proto_rawDesc = []byte{
0x69, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x06,
0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71,
0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72,
- 0x67, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x74, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x04, 0x64, 0x74, 0x69, 0x64, 0x22, 0x1d, 0x0a, 0x1b, 0x43, 0x6f, 0x6e, 0x63, 0x6c,
- 0x75, 0x64, 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65,
- 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xdb, 0x01, 0x0a, 0x16, 0x52, 0x65, 0x61, 0x64, 0x54,
- 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x67, 0x65, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69,
+ 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x74, 0x72, 0x61,
+ 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x74,
+ 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, 0x74, 0x69, 0x64, 0x22, 0x1a,
+ 0x0a, 0x18, 0x52, 0x6f, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72,
+ 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x90, 0x02, 0x0a, 0x18, 0x43,
+ 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66, 0x65, 0x63,
+ 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c,
+ 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65,
+ 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x45, 0x0a, 0x13, 0x69, 0x6d, 0x6d, 0x65,
+ 0x64, 0x69, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x56, 0x54,
+ 0x47, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x69, 0x6d,
+ 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12,
+ 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06,
+ 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x74, 0x69, 0x64, 0x18, 0x04,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, 0x74, 0x69, 0x64, 0x12, 0x31, 0x0a, 0x0c, 0x70, 0x61,
+ 0x72, 0x74, 0x69, 0x63, 0x69, 0x70, 0x61, 0x6e, 0x74, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b,
+ 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52,
+ 0x0c, 0x70, 0x61, 0x72, 0x74, 0x69, 0x63, 0x69, 0x70, 0x61, 0x6e, 0x74, 0x73, 0x22, 0x1b, 0x0a,
+ 0x19, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69,
+ 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xfe, 0x01, 0x0a, 0x12, 0x53,
+ 0x74, 0x61, 0x72, 0x74, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
0x74, 0x12, 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63,
0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f,
0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52,
@@ -5744,53 +6014,13 @@ var file_query_proto_rawDesc = []byte{
0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72,
0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72,
0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74,
- 0x12, 0x12, 0x0a, 0x04, 0x64, 0x74, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04,
- 0x64, 0x74, 0x69, 0x64, 0x22, 0x51, 0x0a, 0x17, 0x52, 0x65, 0x61, 0x64, 0x54, 0x72, 0x61, 0x6e,
- 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
- 0x36, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x1a, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61,
- 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d,
- 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0xe0, 0x02, 0x0a, 0x13, 0x42, 0x65, 0x67, 0x69,
- 0x6e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
- 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c,
- 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76,
- 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x65,
- 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64,
- 0x12, 0x45, 0x0a, 0x13, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x61,
- 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e,
- 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x56, 0x54, 0x47, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c,
- 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x43,
- 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65,
- 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e,
- 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x27,
- 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e,
- 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x42, 0x6f, 0x75, 0x6e, 0x64, 0x51, 0x75, 0x65, 0x72, 0x79,
- 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x2f, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f,
- 0x6e, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79,
- 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52,
- 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x73, 0x65,
- 0x72, 0x76, 0x65, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x72,
- 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x72, 0x65,
- 0x5f, 0x71, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a,
- 0x70, 0x72, 0x65, 0x51, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x22, 0xfe, 0x01, 0x0a, 0x14, 0x42,
- 0x65, 0x67, 0x69, 0x6e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f,
- 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x50, 0x43, 0x45, 0x72,
- 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65,
- 0x73, 0x75, 0x6c, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65,
- 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06,
- 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61,
- 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d,
- 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x38, 0x0a,
- 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x04, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54,
- 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c,
- 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x32, 0x0a, 0x15, 0x73, 0x65, 0x73, 0x73, 0x69,
- 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73,
- 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x53,
- 0x74, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x22, 0xe6, 0x02, 0x0a, 0x19,
- 0x42, 0x65, 0x67, 0x69, 0x6e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x78, 0x65, 0x63, 0x75,
- 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66,
+ 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
+ 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61,
+ 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x74, 0x69, 0x64, 0x18,
+ 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, 0x74, 0x69, 0x64, 0x22, 0x15, 0x0a, 0x13, 0x53,
+ 0x74, 0x61, 0x72, 0x74, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x22, 0xfe, 0x01, 0x0a, 0x12, 0x53, 0x65, 0x74, 0x52, 0x6f, 0x6c, 0x6c, 0x62, 0x61,
+ 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66,
0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64,
0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43,
0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69,
@@ -5801,34 +6031,48 @@ var file_query_proto_rawDesc = []byte{
0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49,
0x64, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28,
0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74,
- 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x27, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72,
- 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e,
- 0x42, 0x6f, 0x75, 0x6e, 0x64, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72,
- 0x79, 0x12, 0x2f, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x05, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75,
- 0x74, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f,
- 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x72, 0x65, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x69, 0x65,
- 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x70, 0x72, 0x65, 0x51, 0x75, 0x65, 0x72,
- 0x69, 0x65, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f,
- 0x69, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76,
- 0x65, 0x64, 0x49, 0x64, 0x22, 0x84, 0x02, 0x0a, 0x1a, 0x42, 0x65, 0x67, 0x69, 0x6e, 0x53, 0x74,
- 0x72, 0x65, 0x61, 0x6d, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f,
- 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x50, 0x43, 0x45, 0x72,
- 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65,
- 0x73, 0x75, 0x6c, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65,
- 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06,
- 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61,
- 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d,
- 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x38, 0x0a,
- 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x04, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54,
- 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c,
- 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x32, 0x0a, 0x15, 0x73, 0x65, 0x73, 0x73, 0x69,
- 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73,
- 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x53,
- 0x74, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x22, 0xd9, 0x01, 0x0a, 0x14,
- 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x71,
+ 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x6e,
+ 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03,
+ 0x52, 0x0d, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12,
+ 0x12, 0x0a, 0x04, 0x64, 0x74, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x64,
+ 0x74, 0x69, 0x64, 0x22, 0x15, 0x0a, 0x13, 0x53, 0x65, 0x74, 0x52, 0x6f, 0x6c, 0x6c, 0x62, 0x61,
+ 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xdf, 0x01, 0x0a, 0x1a, 0x43,
+ 0x6f, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69,
+ 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66,
+ 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43,
+ 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69,
+ 0x76, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x45, 0x0a, 0x13, 0x69, 0x6d,
+ 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69,
+ 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e,
+ 0x56, 0x54, 0x47, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11,
+ 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49,
+ 0x64, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74,
+ 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x74, 0x69, 0x64,
+ 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, 0x74, 0x69, 0x64, 0x22, 0x1d, 0x0a, 0x1b,
+ 0x43, 0x6f, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74,
+ 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xdb, 0x01, 0x0a, 0x16,
+ 0x52, 0x65, 0x61, 0x64, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74,
+ 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c,
+ 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x43,
+ 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x45, 0x0a, 0x13, 0x69, 0x6d, 0x6d, 0x65, 0x64,
+ 0x69, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x56, 0x54, 0x47,
+ 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x69, 0x6d, 0x6d,
+ 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x25,
+ 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d,
+ 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74,
+ 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x74, 0x69, 0x64, 0x18, 0x04, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, 0x74, 0x69, 0x64, 0x22, 0x51, 0x0a, 0x17, 0x52, 0x65, 0x61,
+ 0x64, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x36, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54,
+ 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61,
+ 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0xe0, 0x02, 0x0a,
+ 0x13, 0x42, 0x65, 0x67, 0x69, 0x6e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x71,
0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76,
0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28,
0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72,
@@ -5840,146 +6084,145 @@ var file_query_proto_rawDesc = []byte{
0x69, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x06,
0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71,
0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72,
- 0x67, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x43, 0x0a, 0x15, 0x4d, 0x65, 0x73, 0x73, 0x61,
- 0x67, 0x65, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x67, 0x65, 0x74, 0x12, 0x27, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x42, 0x6f, 0x75, 0x6e, 0x64,
+ 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x2f, 0x0a, 0x07,
+ 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e,
+ 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x4f, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a,
+ 0x0b, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01,
+ 0x28, 0x03, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x49, 0x64, 0x12, 0x1f,
+ 0x0a, 0x0b, 0x70, 0x72, 0x65, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x18, 0x07, 0x20,
+ 0x03, 0x28, 0x09, 0x52, 0x0a, 0x70, 0x72, 0x65, 0x51, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x22,
+ 0xfe, 0x01, 0x0a, 0x14, 0x42, 0x65, 0x67, 0x69, 0x6e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f,
+ 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e,
+ 0x52, 0x50, 0x43, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12,
+ 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73,
+ 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x74,
+ 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x03, 0x52, 0x0d, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e,
+ 0x49, 0x64, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69,
+ 0x61, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64,
+ 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52,
+ 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x32, 0x0a, 0x15,
+ 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68,
+ 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x73, 0x65, 0x73,
+ 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73,
+ 0x22, 0xe6, 0x02, 0x0a, 0x19, 0x42, 0x65, 0x67, 0x69, 0x6e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d,
+ 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f,
+ 0x0a, 0x13, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c,
+ 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74,
+ 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x65, 0x66,
+ 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12,
+ 0x45, 0x0a, 0x13, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x61, 0x6c,
+ 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71,
+ 0x75, 0x65, 0x72, 0x79, 0x2e, 0x56, 0x54, 0x47, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65,
+ 0x72, 0x49, 0x44, 0x52, 0x11, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x43, 0x61,
+ 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54,
+ 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x27, 0x0a,
+ 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x71,
+ 0x75, 0x65, 0x72, 0x79, 0x2e, 0x42, 0x6f, 0x75, 0x6e, 0x64, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52,
+ 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x2f, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e,
+ 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07,
+ 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x72, 0x65, 0x5f, 0x71,
+ 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x70, 0x72,
+ 0x65, 0x51, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x73, 0x65,
+ 0x72, 0x76, 0x65, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x72,
+ 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x49, 0x64, 0x22, 0x84, 0x02, 0x0a, 0x1a, 0x42, 0x65,
+ 0x67, 0x69, 0x6e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f,
+ 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e,
+ 0x52, 0x50, 0x43, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12,
+ 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73,
+ 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x74,
+ 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x03, 0x52, 0x0d, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e,
+ 0x49, 0x64, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69,
+ 0x61, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64,
+ 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52,
+ 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x32, 0x0a, 0x15,
+ 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68,
+ 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x73, 0x65, 0x73,
+ 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73,
+ 0x22, 0xd9, 0x01, 0x0a, 0x14, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x74, 0x72, 0x65,
+ 0x61, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66,
+ 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43,
+ 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69,
+ 0x76, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x45, 0x0a, 0x13, 0x69, 0x6d,
+ 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69,
+ 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e,
+ 0x56, 0x54, 0x47, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11,
+ 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49,
+ 0x64, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74,
+ 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+ 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x43, 0x0a, 0x15,
+ 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75,
+ 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c,
+ 0x74, 0x22, 0xf6, 0x01, 0x0a, 0x11, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x41, 0x63, 0x6b,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66, 0x65, 0x63,
+ 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c,
+ 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65,
+ 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x45, 0x0a, 0x13, 0x69, 0x6d, 0x6d, 0x65,
+ 0x64, 0x69, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x56, 0x54,
+ 0x47, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x69, 0x6d,
+ 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12,
+ 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06,
+ 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1e, 0x0a, 0x03, 0x69, 0x64,
+ 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e,
+ 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x03, 0x69, 0x64, 0x73, 0x22, 0x40, 0x0a, 0x12, 0x4d, 0x65,
+ 0x73, 0x73, 0x61, 0x67, 0x65, 0x41, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65,
- 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0xf6, 0x01, 0x0a,
- 0x11, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x41, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f,
- 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44,
- 0x52, 0x11, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65,
- 0x72, 0x49, 0x64, 0x12, 0x45, 0x0a, 0x13, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65,
- 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x56, 0x54, 0x47, 0x61, 0x74, 0x65, 0x43,
- 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61,
- 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61,
- 0x72, 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65,
- 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65,
- 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1e, 0x0a, 0x03, 0x69, 0x64, 0x73, 0x18, 0x05, 0x20, 0x03,
- 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65,
- 0x52, 0x03, 0x69, 0x64, 0x73, 0x22, 0x40, 0x0a, 0x12, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
- 0x41, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x72,
- 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75,
- 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52,
- 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0xe8, 0x02, 0x0a, 0x15, 0x52, 0x65, 0x73, 0x65,
- 0x72, 0x76, 0x65, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x12, 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63,
- 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f,
- 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52,
- 0x11, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72,
- 0x49, 0x64, 0x12, 0x45, 0x0a, 0x13, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x5f,
- 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x56, 0x54, 0x47, 0x61, 0x74, 0x65, 0x43, 0x61,
- 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74,
- 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72,
- 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72,
- 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74,
- 0x12, 0x27, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x11, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x42, 0x6f, 0x75, 0x6e, 0x64, 0x51, 0x75, 0x65,
- 0x72, 0x79, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x72, 0x61,
- 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28,
- 0x03, 0x52, 0x0d, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64,
- 0x12, 0x2f, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74,
- 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
- 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x72, 0x65, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73,
- 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x70, 0x72, 0x65, 0x51, 0x75, 0x65, 0x72, 0x69,
- 0x65, 0x73, 0x22, 0xc6, 0x01, 0x0a, 0x16, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x45, 0x78,
- 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a,
- 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76,
- 0x74, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x50, 0x43, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65,
- 0x72, 0x72, 0x6f, 0x72, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x02,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65,
- 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74,
- 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x69, 0x64, 0x18,
- 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x49,
- 0x64, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61,
- 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61,
- 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b,
- 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0xee, 0x02, 0x0a, 0x1b,
- 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x78, 0x65,
- 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x13, 0x65,
- 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f,
- 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63,
- 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x65, 0x66, 0x66, 0x65, 0x63,
- 0x74, 0x69, 0x76, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x45, 0x0a, 0x13,
- 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72,
- 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72,
- 0x79, 0x2e, 0x56, 0x54, 0x47, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44,
- 0x52, 0x11, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65,
- 0x72, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x03, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67,
- 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x27, 0x0a, 0x05, 0x71, 0x75,
- 0x65, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x71, 0x75, 0x65, 0x72,
- 0x79, 0x2e, 0x42, 0x6f, 0x75, 0x6e, 0x64, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x05, 0x71, 0x75,
- 0x65, 0x72, 0x79, 0x12, 0x2f, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x05,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x45, 0x78, 0x65,
- 0x63, 0x75, 0x74, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74,
- 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74,
- 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x74, 0x72,
- 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x70,
- 0x72, 0x65, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09,
- 0x52, 0x0a, 0x70, 0x72, 0x65, 0x51, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x22, 0xcc, 0x01, 0x0a,
- 0x1c, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x78,
- 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a,
- 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76,
- 0x74, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x50, 0x43, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65,
- 0x72, 0x72, 0x6f, 0x72, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x02,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65,
- 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74,
- 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x69, 0x64, 0x18,
- 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x49,
- 0x64, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61,
- 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61,
- 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b,
- 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0xf4, 0x02, 0x0a, 0x1a,
- 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x42, 0x65, 0x67, 0x69, 0x6e, 0x45, 0x78, 0x65, 0x63,
- 0x75, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x13, 0x65, 0x66,
- 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69,
- 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e,
- 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74,
- 0x69, 0x76, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x45, 0x0a, 0x13, 0x69,
- 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f,
- 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79,
- 0x2e, 0x56, 0x54, 0x47, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52,
- 0x11, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72,
- 0x49, 0x64, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65,
- 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x27, 0x0a, 0x05, 0x71, 0x75, 0x65,
- 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79,
- 0x2e, 0x42, 0x6f, 0x75, 0x6e, 0x64, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x05, 0x71, 0x75, 0x65,
- 0x72, 0x79, 0x12, 0x2f, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x05, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x45, 0x78, 0x65, 0x63,
- 0x75, 0x74, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69,
- 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x72, 0x65, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x69,
- 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x70, 0x72, 0x65, 0x51, 0x75, 0x65,
- 0x72, 0x69, 0x65, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x6f, 0x73, 0x74, 0x5f, 0x62, 0x65, 0x67,
- 0x69, 0x6e, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09,
- 0x52, 0x10, 0x70, 0x6f, 0x73, 0x74, 0x42, 0x65, 0x67, 0x69, 0x6e, 0x51, 0x75, 0x65, 0x72, 0x69,
- 0x65, 0x73, 0x22, 0xa6, 0x02, 0x0a, 0x1b, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x42, 0x65,
- 0x67, 0x69, 0x6e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0xe8, 0x02, 0x0a,
+ 0x15, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74,
+ 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c,
+ 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x43,
+ 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x45, 0x0a, 0x13, 0x69, 0x6d, 0x6d, 0x65, 0x64,
+ 0x69, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x56, 0x54, 0x47,
+ 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x69, 0x6d, 0x6d,
+ 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x25,
+ 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d,
+ 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74,
+ 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x27, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x04,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x42, 0x6f, 0x75,
+ 0x6e, 0x64, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x25,
+ 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64,
+ 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74,
+ 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x2f, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x45,
+ 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x72, 0x65, 0x5f, 0x71, 0x75,
+ 0x65, 0x72, 0x69, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x70, 0x72, 0x65,
+ 0x51, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x22, 0xc6, 0x01, 0x0a, 0x16, 0x52, 0x65, 0x73, 0x65,
+ 0x72, 0x76, 0x65, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28,
0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x50, 0x43, 0x45, 0x72, 0x72,
0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73,
0x75, 0x6c, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72,
0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72,
- 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63,
- 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x74,
- 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b,
- 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28,
- 0x03, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x49, 0x64, 0x12, 0x38, 0x0a,
- 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x05, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54,
- 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c,
- 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x32, 0x0a, 0x15, 0x73, 0x65, 0x73, 0x73, 0x69,
- 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73,
- 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x53,
- 0x74, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x22, 0xfa, 0x02, 0x0a, 0x20,
- 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x42, 0x65, 0x67, 0x69, 0x6e, 0x53, 0x74, 0x72, 0x65,
+ 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65,
+ 0x64, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x65,
+ 0x72, 0x76, 0x65, 0x64, 0x49, 0x64, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74,
+ 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74,
+ 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c,
+ 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73,
+ 0x22, 0xee, 0x02, 0x0a, 0x1b, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x53, 0x74, 0x72, 0x65,
0x61, 0x6d, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
0x12, 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61,
0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e,
@@ -5997,181 +6240,286 @@ var file_query_proto_rawDesc = []byte{
0x79, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x2f, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69,
0x6f, 0x6e, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72,
0x79, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
- 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x72, 0x65,
- 0x5f, 0x71, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a,
- 0x70, 0x72, 0x65, 0x51, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x6f,
- 0x73, 0x74, 0x5f, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73,
- 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x70, 0x6f, 0x73, 0x74, 0x42, 0x65, 0x67, 0x69,
- 0x6e, 0x51, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x22, 0xac, 0x02, 0x0a, 0x21, 0x52, 0x65, 0x73,
- 0x65, 0x72, 0x76, 0x65, 0x42, 0x65, 0x67, 0x69, 0x6e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45,
- 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25,
- 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e,
- 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x50, 0x43, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05,
- 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75,
- 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c,
- 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e,
- 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x74, 0x72, 0x61, 0x6e, 0x73,
- 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x73, 0x65,
- 0x72, 0x76, 0x65, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x72,
- 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x49, 0x64, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62,
- 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65,
- 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c,
- 0x69, 0x61, 0x73, 0x12, 0x32, 0x0a, 0x15, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x73,
- 0x74, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x06, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x13, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65,
- 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x22, 0x87, 0x02, 0x0a, 0x0e, 0x52, 0x65, 0x6c, 0x65,
- 0x61, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x13, 0x65, 0x66,
- 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69,
- 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e,
- 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74,
- 0x69, 0x76, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x45, 0x0a, 0x13, 0x69,
- 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f,
- 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79,
- 0x2e, 0x56, 0x54, 0x47, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52,
- 0x11, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72,
- 0x49, 0x64, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65,
- 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x72, 0x61,
- 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28,
+ 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x72, 0x61,
+ 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28,
0x03, 0x52, 0x0d, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64,
- 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x69, 0x64, 0x18,
- 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x49,
- 0x64, 0x22, 0x11, 0x0a, 0x0f, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x52, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x15, 0x0a, 0x13, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x48, 0x65,
- 0x61, 0x6c, 0x74, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0xc6, 0x02, 0x0a, 0x0d,
- 0x52, 0x65, 0x61, 0x6c, 0x74, 0x69, 0x6d, 0x65, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x21, 0x0a,
- 0x0c, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x0b, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x45, 0x72, 0x72, 0x6f, 0x72,
- 0x12, 0x36, 0x0a, 0x17, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
- 0x6c, 0x61, 0x67, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28,
- 0x0d, 0x52, 0x15, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x61,
- 0x67, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x62, 0x69, 0x6e, 0x6c,
- 0x6f, 0x67, 0x5f, 0x70, 0x6c, 0x61, 0x79, 0x65, 0x72, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74,
- 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x12, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x50, 0x6c,
- 0x61, 0x79, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x47, 0x0a, 0x20, 0x66, 0x69,
- 0x6c, 0x74, 0x65, 0x72, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x5f, 0x6c, 0x61, 0x67, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x04,
- 0x20, 0x01, 0x28, 0x03, 0x52, 0x1d, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x65, 0x64, 0x52, 0x65,
- 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x61, 0x67, 0x53, 0x65, 0x63, 0x6f,
- 0x6e, 0x64, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x63, 0x70, 0x75, 0x5f, 0x75, 0x73, 0x61, 0x67, 0x65,
- 0x18, 0x05, 0x20, 0x01, 0x28, 0x01, 0x52, 0x08, 0x63, 0x70, 0x75, 0x55, 0x73, 0x61, 0x67, 0x65,
- 0x12, 0x10, 0x0a, 0x03, 0x71, 0x70, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x03, 0x71,
- 0x70, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x63, 0x68, 0x65,
- 0x6d, 0x61, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09,
- 0x52, 0x12, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x43, 0x68, 0x61,
- 0x6e, 0x67, 0x65, 0x64, 0x22, 0xf6, 0x01, 0x0a, 0x0e, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61,
- 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x68, 0x65, 0x61, 0x6c, 0x74,
- 0x68, 0x79, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x12, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x54, 0x61,
- 0x62, 0x6c, 0x65, 0x74, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x34, 0x0a, 0x16, 0x75, 0x6e, 0x68,
- 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x63, 0x6f,
- 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x14, 0x75, 0x6e, 0x68, 0x65, 0x61,
- 0x6c, 0x74, 0x68, 0x79, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12,
- 0x3d, 0x0a, 0x1b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c,
- 0x61, 0x67, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x5f, 0x6d, 0x69, 0x6e, 0x18, 0x03,
- 0x20, 0x01, 0x28, 0x0d, 0x52, 0x18, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x4c, 0x61, 0x67, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x4d, 0x69, 0x6e, 0x12, 0x3d,
- 0x0a, 0x1b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x61,
- 0x67, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x5f, 0x6d, 0x61, 0x78, 0x18, 0x04, 0x20,
- 0x01, 0x28, 0x0d, 0x52, 0x18, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x4c, 0x61, 0x67, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x4d, 0x61, 0x78, 0x22, 0xa9, 0x02,
- 0x0a, 0x14, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x52, 0x65,
- 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54,
- 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x18, 0x0a,
- 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07,
- 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x12, 0x53, 0x0a, 0x26, 0x74, 0x61, 0x62, 0x6c, 0x65,
- 0x74, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x5f, 0x72, 0x65, 0x70,
- 0x61, 0x72, 0x65, 0x6e, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d,
- 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x23, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x45,
- 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e,
- 0x74, 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x3b, 0x0a, 0x0e,
- 0x72, 0x65, 0x61, 0x6c, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x04,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x52, 0x65, 0x61,
- 0x6c, 0x74, 0x69, 0x6d, 0x65, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x0d, 0x72, 0x65, 0x61, 0x6c,
- 0x74, 0x69, 0x6d, 0x65, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62,
- 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65,
- 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c,
- 0x69, 0x61, 0x73, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x22, 0xae, 0x01, 0x0a, 0x13, 0x54, 0x72,
- 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74,
- 0x61, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x74, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x04, 0x64, 0x74, 0x69, 0x64, 0x12, 0x2d, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02,
- 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x72, 0x61,
- 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x73,
- 0x74, 0x61, 0x74, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63, 0x72, 0x65,
- 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x74, 0x69, 0x6d, 0x65,
- 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x12, 0x31, 0x0a, 0x0c, 0x70, 0x61, 0x72, 0x74, 0x69,
- 0x63, 0x69, 0x70, 0x61, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0d, 0x2e,
- 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x0c, 0x70, 0x61,
- 0x72, 0x74, 0x69, 0x63, 0x69, 0x70, 0x61, 0x6e, 0x74, 0x73, 0x2a, 0x92, 0x03, 0x0a, 0x09, 0x4d,
- 0x79, 0x53, 0x71, 0x6c, 0x46, 0x6c, 0x61, 0x67, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x4d, 0x50, 0x54,
- 0x59, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x4e, 0x4f, 0x54, 0x5f, 0x4e, 0x55, 0x4c, 0x4c, 0x5f,
- 0x46, 0x4c, 0x41, 0x47, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x50, 0x52, 0x49, 0x5f, 0x4b, 0x45,
- 0x59, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x55, 0x4e, 0x49, 0x51,
- 0x55, 0x45, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x10, 0x04, 0x12, 0x15, 0x0a,
- 0x11, 0x4d, 0x55, 0x4c, 0x54, 0x49, 0x50, 0x4c, 0x45, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x46, 0x4c,
- 0x41, 0x47, 0x10, 0x08, 0x12, 0x0d, 0x0a, 0x09, 0x42, 0x4c, 0x4f, 0x42, 0x5f, 0x46, 0x4c, 0x41,
- 0x47, 0x10, 0x10, 0x12, 0x11, 0x0a, 0x0d, 0x55, 0x4e, 0x53, 0x49, 0x47, 0x4e, 0x45, 0x44, 0x5f,
- 0x46, 0x4c, 0x41, 0x47, 0x10, 0x20, 0x12, 0x11, 0x0a, 0x0d, 0x5a, 0x45, 0x52, 0x4f, 0x46, 0x49,
- 0x4c, 0x4c, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x10, 0x40, 0x12, 0x10, 0x0a, 0x0b, 0x42, 0x49, 0x4e,
- 0x41, 0x52, 0x59, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x10, 0x80, 0x01, 0x12, 0x0e, 0x0a, 0x09, 0x45,
- 0x4e, 0x55, 0x4d, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x10, 0x80, 0x02, 0x12, 0x18, 0x0a, 0x13, 0x41,
- 0x55, 0x54, 0x4f, 0x5f, 0x49, 0x4e, 0x43, 0x52, 0x45, 0x4d, 0x45, 0x4e, 0x54, 0x5f, 0x46, 0x4c,
- 0x41, 0x47, 0x10, 0x80, 0x04, 0x12, 0x13, 0x0a, 0x0e, 0x54, 0x49, 0x4d, 0x45, 0x53, 0x54, 0x41,
- 0x4d, 0x50, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x10, 0x80, 0x08, 0x12, 0x0d, 0x0a, 0x08, 0x53, 0x45,
- 0x54, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x10, 0x80, 0x10, 0x12, 0x1a, 0x0a, 0x15, 0x4e, 0x4f, 0x5f,
- 0x44, 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x46, 0x4c,
- 0x41, 0x47, 0x10, 0x80, 0x20, 0x12, 0x17, 0x0a, 0x12, 0x4f, 0x4e, 0x5f, 0x55, 0x50, 0x44, 0x41,
- 0x54, 0x45, 0x5f, 0x4e, 0x4f, 0x57, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x10, 0x80, 0x40, 0x12, 0x0e,
- 0x0a, 0x08, 0x4e, 0x55, 0x4d, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x10, 0x80, 0x80, 0x02, 0x12, 0x13,
- 0x0a, 0x0d, 0x50, 0x41, 0x52, 0x54, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x10,
- 0x80, 0x80, 0x01, 0x12, 0x10, 0x0a, 0x0a, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x5f, 0x46, 0x4c, 0x41,
- 0x47, 0x10, 0x80, 0x80, 0x02, 0x12, 0x11, 0x0a, 0x0b, 0x55, 0x4e, 0x49, 0x51, 0x55, 0x45, 0x5f,
- 0x46, 0x4c, 0x41, 0x47, 0x10, 0x80, 0x80, 0x04, 0x12, 0x11, 0x0a, 0x0b, 0x42, 0x49, 0x4e, 0x43,
- 0x4d, 0x50, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x10, 0x80, 0x80, 0x08, 0x1a, 0x02, 0x10, 0x01, 0x2a,
- 0x6b, 0x0a, 0x04, 0x46, 0x6c, 0x61, 0x67, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10,
- 0x00, 0x12, 0x0f, 0x0a, 0x0a, 0x49, 0x53, 0x49, 0x4e, 0x54, 0x45, 0x47, 0x52, 0x41, 0x4c, 0x10,
- 0x80, 0x02, 0x12, 0x0f, 0x0a, 0x0a, 0x49, 0x53, 0x55, 0x4e, 0x53, 0x49, 0x47, 0x4e, 0x45, 0x44,
- 0x10, 0x80, 0x04, 0x12, 0x0c, 0x0a, 0x07, 0x49, 0x53, 0x46, 0x4c, 0x4f, 0x41, 0x54, 0x10, 0x80,
- 0x08, 0x12, 0x0d, 0x0a, 0x08, 0x49, 0x53, 0x51, 0x55, 0x4f, 0x54, 0x45, 0x44, 0x10, 0x80, 0x10,
- 0x12, 0x0b, 0x0a, 0x06, 0x49, 0x53, 0x54, 0x45, 0x58, 0x54, 0x10, 0x80, 0x20, 0x12, 0x0d, 0x0a,
- 0x08, 0x49, 0x53, 0x42, 0x49, 0x4e, 0x41, 0x52, 0x59, 0x10, 0x80, 0x40, 0x2a, 0xc0, 0x03, 0x0a,
- 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0d, 0x0a, 0x09, 0x4e, 0x55, 0x4c, 0x4c, 0x5f, 0x54, 0x59,
- 0x50, 0x45, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x04, 0x49, 0x4e, 0x54, 0x38, 0x10, 0x81, 0x02, 0x12,
- 0x0a, 0x0a, 0x05, 0x55, 0x49, 0x4e, 0x54, 0x38, 0x10, 0x82, 0x06, 0x12, 0x0a, 0x0a, 0x05, 0x49,
- 0x4e, 0x54, 0x31, 0x36, 0x10, 0x83, 0x02, 0x12, 0x0b, 0x0a, 0x06, 0x55, 0x49, 0x4e, 0x54, 0x31,
- 0x36, 0x10, 0x84, 0x06, 0x12, 0x0a, 0x0a, 0x05, 0x49, 0x4e, 0x54, 0x32, 0x34, 0x10, 0x85, 0x02,
- 0x12, 0x0b, 0x0a, 0x06, 0x55, 0x49, 0x4e, 0x54, 0x32, 0x34, 0x10, 0x86, 0x06, 0x12, 0x0a, 0x0a,
- 0x05, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x87, 0x02, 0x12, 0x0b, 0x0a, 0x06, 0x55, 0x49, 0x4e,
- 0x54, 0x33, 0x32, 0x10, 0x88, 0x06, 0x12, 0x0a, 0x0a, 0x05, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10,
- 0x89, 0x02, 0x12, 0x0b, 0x0a, 0x06, 0x55, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x8a, 0x06, 0x12,
- 0x0c, 0x0a, 0x07, 0x46, 0x4c, 0x4f, 0x41, 0x54, 0x33, 0x32, 0x10, 0x8b, 0x08, 0x12, 0x0c, 0x0a,
- 0x07, 0x46, 0x4c, 0x4f, 0x41, 0x54, 0x36, 0x34, 0x10, 0x8c, 0x08, 0x12, 0x0e, 0x0a, 0x09, 0x54,
- 0x49, 0x4d, 0x45, 0x53, 0x54, 0x41, 0x4d, 0x50, 0x10, 0x8d, 0x10, 0x12, 0x09, 0x0a, 0x04, 0x44,
- 0x41, 0x54, 0x45, 0x10, 0x8e, 0x10, 0x12, 0x09, 0x0a, 0x04, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x8f,
- 0x10, 0x12, 0x0d, 0x0a, 0x08, 0x44, 0x41, 0x54, 0x45, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x90, 0x10,
- 0x12, 0x09, 0x0a, 0x04, 0x59, 0x45, 0x41, 0x52, 0x10, 0x91, 0x06, 0x12, 0x0b, 0x0a, 0x07, 0x44,
- 0x45, 0x43, 0x49, 0x4d, 0x41, 0x4c, 0x10, 0x12, 0x12, 0x09, 0x0a, 0x04, 0x54, 0x45, 0x58, 0x54,
- 0x10, 0x93, 0x30, 0x12, 0x09, 0x0a, 0x04, 0x42, 0x4c, 0x4f, 0x42, 0x10, 0x94, 0x50, 0x12, 0x0c,
- 0x0a, 0x07, 0x56, 0x41, 0x52, 0x43, 0x48, 0x41, 0x52, 0x10, 0x95, 0x30, 0x12, 0x0e, 0x0a, 0x09,
- 0x56, 0x41, 0x52, 0x42, 0x49, 0x4e, 0x41, 0x52, 0x59, 0x10, 0x96, 0x50, 0x12, 0x09, 0x0a, 0x04,
- 0x43, 0x48, 0x41, 0x52, 0x10, 0x97, 0x30, 0x12, 0x0b, 0x0a, 0x06, 0x42, 0x49, 0x4e, 0x41, 0x52,
- 0x59, 0x10, 0x98, 0x50, 0x12, 0x08, 0x0a, 0x03, 0x42, 0x49, 0x54, 0x10, 0x99, 0x10, 0x12, 0x09,
- 0x0a, 0x04, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x9a, 0x10, 0x12, 0x08, 0x0a, 0x03, 0x53, 0x45, 0x54,
- 0x10, 0x9b, 0x10, 0x12, 0x09, 0x0a, 0x05, 0x54, 0x55, 0x50, 0x4c, 0x45, 0x10, 0x1c, 0x12, 0x0d,
- 0x0a, 0x08, 0x47, 0x45, 0x4f, 0x4d, 0x45, 0x54, 0x52, 0x59, 0x10, 0x9d, 0x10, 0x12, 0x09, 0x0a,
- 0x04, 0x4a, 0x53, 0x4f, 0x4e, 0x10, 0x9e, 0x10, 0x12, 0x0e, 0x0a, 0x0a, 0x45, 0x58, 0x50, 0x52,
- 0x45, 0x53, 0x53, 0x49, 0x4f, 0x4e, 0x10, 0x1f, 0x12, 0x0b, 0x0a, 0x06, 0x48, 0x45, 0x58, 0x4e,
- 0x55, 0x4d, 0x10, 0xa0, 0x20, 0x12, 0x0b, 0x0a, 0x06, 0x48, 0x45, 0x58, 0x56, 0x41, 0x4c, 0x10,
- 0xa1, 0x20, 0x12, 0x0b, 0x0a, 0x06, 0x42, 0x49, 0x54, 0x4e, 0x55, 0x4d, 0x10, 0xa2, 0x20, 0x2a,
- 0x46, 0x0a, 0x10, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74,
- 0x61, 0x74, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00,
- 0x12, 0x0b, 0x0a, 0x07, 0x50, 0x52, 0x45, 0x50, 0x41, 0x52, 0x45, 0x10, 0x01, 0x12, 0x0a, 0x0a,
- 0x06, 0x43, 0x4f, 0x4d, 0x4d, 0x49, 0x54, 0x10, 0x02, 0x12, 0x0c, 0x0a, 0x08, 0x52, 0x4f, 0x4c,
- 0x4c, 0x42, 0x41, 0x43, 0x4b, 0x10, 0x03, 0x42, 0x35, 0x0a, 0x0f, 0x69, 0x6f, 0x2e, 0x76, 0x69,
+ 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x72, 0x65, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x18,
+ 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x70, 0x72, 0x65, 0x51, 0x75, 0x65, 0x72, 0x69, 0x65,
+ 0x73, 0x22, 0xcc, 0x01, 0x0a, 0x1c, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x53, 0x74, 0x72,
+ 0x65, 0x61, 0x6d, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x50, 0x43, 0x45, 0x72, 0x72,
+ 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73,
+ 0x75, 0x6c, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72,
+ 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72,
+ 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65,
+ 0x64, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x65,
+ 0x72, 0x76, 0x65, 0x64, 0x49, 0x64, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74,
+ 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74,
+ 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c,
+ 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73,
+ 0x22, 0xf4, 0x02, 0x0a, 0x1a, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x42, 0x65, 0x67, 0x69,
+ 0x6e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
+ 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c,
+ 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76,
+ 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x65,
+ 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64,
+ 0x12, 0x45, 0x0a, 0x13, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x61,
+ 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e,
+ 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x56, 0x54, 0x47, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c,
+ 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x43,
+ 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65,
+ 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e,
+ 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x27,
+ 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e,
+ 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x42, 0x6f, 0x75, 0x6e, 0x64, 0x51, 0x75, 0x65, 0x72, 0x79,
+ 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x2f, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79,
+ 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52,
+ 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x72, 0x65, 0x5f,
+ 0x71, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x70,
+ 0x72, 0x65, 0x51, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x6f, 0x73,
+ 0x74, 0x5f, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x18,
+ 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x70, 0x6f, 0x73, 0x74, 0x42, 0x65, 0x67, 0x69, 0x6e,
+ 0x51, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x22, 0xa6, 0x02, 0x0a, 0x1b, 0x52, 0x65, 0x73, 0x65,
+ 0x72, 0x76, 0x65, 0x42, 0x65, 0x67, 0x69, 0x6e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x52,
+ 0x50, 0x43, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x2a,
+ 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12,
+ 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75,
+ 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x72,
+ 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x03, 0x52, 0x0d, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49,
+ 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x69, 0x64,
+ 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64,
+ 0x49, 0x64, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69,
+ 0x61, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64,
+ 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52,
+ 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x32, 0x0a, 0x15,
+ 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68,
+ 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x73, 0x65, 0x73,
+ 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73,
+ 0x22, 0xfa, 0x02, 0x0a, 0x20, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x42, 0x65, 0x67, 0x69,
+ 0x6e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69,
+ 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65,
+ 0x72, 0x49, 0x44, 0x52, 0x11, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x43, 0x61,
+ 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x45, 0x0a, 0x13, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69,
+ 0x61, 0x74, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x56, 0x54, 0x47, 0x61,
+ 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x69, 0x6d, 0x6d, 0x65,
+ 0x64, 0x69, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x25, 0x0a,
+ 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e,
+ 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61,
+ 0x72, 0x67, 0x65, 0x74, 0x12, 0x27, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x04, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x42, 0x6f, 0x75, 0x6e,
+ 0x64, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x2f, 0x0a,
+ 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15,
+ 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x4f, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f,
+ 0x0a, 0x0b, 0x70, 0x72, 0x65, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x18, 0x06, 0x20,
+ 0x03, 0x28, 0x09, 0x52, 0x0a, 0x70, 0x72, 0x65, 0x51, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x12,
+ 0x2c, 0x0a, 0x12, 0x70, 0x6f, 0x73, 0x74, 0x5f, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x5f, 0x71, 0x75,
+ 0x65, 0x72, 0x69, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x70, 0x6f, 0x73,
+ 0x74, 0x42, 0x65, 0x67, 0x69, 0x6e, 0x51, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x22, 0xac, 0x02,
+ 0x0a, 0x21, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x42, 0x65, 0x67, 0x69, 0x6e, 0x53, 0x74,
+ 0x72, 0x65, 0x61, 0x6d, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x50, 0x43, 0x45, 0x72,
+ 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65,
+ 0x73, 0x75, 0x6c, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65,
+ 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06,
+ 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61,
+ 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d,
+ 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x1f, 0x0a,
+ 0x0b, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x03, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x49, 0x64, 0x12, 0x38,
+ 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x05,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e,
+ 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62,
+ 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x32, 0x0a, 0x15, 0x73, 0x65, 0x73, 0x73,
+ 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65,
+ 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e,
+ 0x53, 0x74, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x22, 0x87, 0x02, 0x0a,
+ 0x0e, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
+ 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c,
+ 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76,
+ 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x65,
+ 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64,
+ 0x12, 0x45, 0x0a, 0x13, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x61,
+ 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e,
+ 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x56, 0x54, 0x47, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c,
+ 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x43,
+ 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65,
+ 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e,
+ 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x25,
+ 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64,
+ 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74,
+ 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65,
+ 0x64, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x65,
+ 0x72, 0x76, 0x65, 0x64, 0x49, 0x64, 0x22, 0x11, 0x0a, 0x0f, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73,
+ 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x15, 0x0a, 0x13, 0x53, 0x74, 0x72,
+ 0x65, 0x61, 0x6d, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x22, 0xf6, 0x02, 0x0a, 0x0d, 0x52, 0x65, 0x61, 0x6c, 0x74, 0x69, 0x6d, 0x65, 0x53, 0x74, 0x61,
+ 0x74, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x65, 0x72, 0x72,
+ 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68,
+ 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x36, 0x0a, 0x17, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x61, 0x67, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x15, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x4c, 0x61, 0x67, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x30, 0x0a,
+ 0x14, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x5f, 0x70, 0x6c, 0x61, 0x79, 0x65, 0x72, 0x73, 0x5f,
+ 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x12, 0x62, 0x69, 0x6e,
+ 0x6c, 0x6f, 0x67, 0x50, 0x6c, 0x61, 0x79, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12,
+ 0x47, 0x0a, 0x20, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x70, 0x6c,
+ 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x61, 0x67, 0x5f, 0x73, 0x65, 0x63, 0x6f,
+ 0x6e, 0x64, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x1d, 0x66, 0x69, 0x6c, 0x74, 0x65,
+ 0x72, 0x65, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x61,
+ 0x67, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x63, 0x70, 0x75, 0x5f,
+ 0x75, 0x73, 0x61, 0x67, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x01, 0x52, 0x08, 0x63, 0x70, 0x75,
+ 0x55, 0x73, 0x61, 0x67, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x71, 0x70, 0x73, 0x18, 0x06, 0x20, 0x01,
+ 0x28, 0x01, 0x52, 0x03, 0x71, 0x70, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x74, 0x61, 0x62, 0x6c, 0x65,
+ 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, 0x18,
+ 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x12, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x63, 0x68, 0x65,
+ 0x6d, 0x61, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x13, 0x76, 0x69, 0x65,
+ 0x77, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64,
+ 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x11, 0x76, 0x69, 0x65, 0x77, 0x53, 0x63, 0x68, 0x65,
+ 0x6d, 0x61, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, 0x22, 0xf6, 0x01, 0x0a, 0x0e, 0x41, 0x67,
+ 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x30, 0x0a, 0x14,
+ 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x63,
+ 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x12, 0x68, 0x65, 0x61, 0x6c,
+ 0x74, 0x68, 0x79, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x34,
+ 0x0a, 0x16, 0x75, 0x6e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x5f, 0x74, 0x61, 0x62, 0x6c,
+ 0x65, 0x74, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x14,
+ 0x75, 0x6e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x43,
+ 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x3d, 0x0a, 0x1b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x61, 0x67, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x5f,
+ 0x6d, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x18, 0x72, 0x65, 0x70, 0x6c, 0x69,
+ 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x61, 0x67, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73,
+ 0x4d, 0x69, 0x6e, 0x12, 0x3d, 0x0a, 0x1b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x5f, 0x6c, 0x61, 0x67, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x5f, 0x6d,
+ 0x61, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x18, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x61, 0x67, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x4d,
+ 0x61, 0x78, 0x22, 0xa9, 0x02, 0x0a, 0x14, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x48, 0x65, 0x61,
+ 0x6c, 0x74, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x06, 0x74,
+ 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75,
+ 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67,
+ 0x65, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x08, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x12, 0x53, 0x0a, 0x26,
+ 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c,
+ 0x79, 0x5f, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d,
+ 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x23, 0x74, 0x61,
+ 0x62, 0x6c, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x52, 0x65,
+ 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d,
+ 0x70, 0x12, 0x3b, 0x0a, 0x0e, 0x72, 0x65, 0x61, 0x6c, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x74,
+ 0x61, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x71, 0x75, 0x65, 0x72,
+ 0x79, 0x2e, 0x52, 0x65, 0x61, 0x6c, 0x74, 0x69, 0x6d, 0x65, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52,
+ 0x0d, 0x72, 0x65, 0x61, 0x6c, 0x74, 0x69, 0x6d, 0x65, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x38,
+ 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x05,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e,
+ 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62,
+ 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x22, 0xae,
+ 0x01, 0x0a, 0x13, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65,
+ 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x74, 0x69, 0x64, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, 0x74, 0x69, 0x64, 0x12, 0x2d, 0x0a, 0x05, 0x73, 0x74,
+ 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x71, 0x75, 0x65, 0x72,
+ 0x79, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61,
+ 0x74, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x69, 0x6d,
+ 0x65, 0x5f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52,
+ 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x12, 0x31, 0x0a, 0x0c,
+ 0x70, 0x61, 0x72, 0x74, 0x69, 0x63, 0x69, 0x70, 0x61, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x03,
+ 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65,
+ 0x74, 0x52, 0x0c, 0x70, 0x61, 0x72, 0x74, 0x69, 0x63, 0x69, 0x70, 0x61, 0x6e, 0x74, 0x73, 0x22,
+ 0x91, 0x01, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72,
+ 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x35, 0x0a, 0x0a, 0x74,
+ 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32,
+ 0x16, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x54, 0x61,
+ 0x62, 0x6c, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x09, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x79,
+ 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65,
+ 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x61,
+ 0x6d, 0x65, 0x73, 0x22, 0xb1, 0x01, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d,
+ 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x58, 0x0a, 0x10, 0x74, 0x61, 0x62,
+ 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20,
+ 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x47, 0x65, 0x74, 0x53,
+ 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x54, 0x61,
+ 0x62, 0x6c, 0x65, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x74,
+ 0x72, 0x79, 0x52, 0x0f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74,
+ 0x69, 0x6f, 0x6e, 0x1a, 0x42, 0x0a, 0x14, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x44, 0x65, 0x66, 0x69,
+ 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b,
+ 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a,
+ 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61,
+ 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x2a, 0x92, 0x03, 0x0a, 0x09, 0x4d, 0x79, 0x53, 0x71,
+ 0x6c, 0x46, 0x6c, 0x61, 0x67, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x4d, 0x50, 0x54, 0x59, 0x10, 0x00,
+ 0x12, 0x11, 0x0a, 0x0d, 0x4e, 0x4f, 0x54, 0x5f, 0x4e, 0x55, 0x4c, 0x4c, 0x5f, 0x46, 0x4c, 0x41,
+ 0x47, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x50, 0x52, 0x49, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x46,
+ 0x4c, 0x41, 0x47, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x55, 0x4e, 0x49, 0x51, 0x55, 0x45, 0x5f,
+ 0x4b, 0x45, 0x59, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x4d, 0x55,
+ 0x4c, 0x54, 0x49, 0x50, 0x4c, 0x45, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x10,
+ 0x08, 0x12, 0x0d, 0x0a, 0x09, 0x42, 0x4c, 0x4f, 0x42, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x10, 0x10,
+ 0x12, 0x11, 0x0a, 0x0d, 0x55, 0x4e, 0x53, 0x49, 0x47, 0x4e, 0x45, 0x44, 0x5f, 0x46, 0x4c, 0x41,
+ 0x47, 0x10, 0x20, 0x12, 0x11, 0x0a, 0x0d, 0x5a, 0x45, 0x52, 0x4f, 0x46, 0x49, 0x4c, 0x4c, 0x5f,
+ 0x46, 0x4c, 0x41, 0x47, 0x10, 0x40, 0x12, 0x10, 0x0a, 0x0b, 0x42, 0x49, 0x4e, 0x41, 0x52, 0x59,
+ 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x10, 0x80, 0x01, 0x12, 0x0e, 0x0a, 0x09, 0x45, 0x4e, 0x55, 0x4d,
+ 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x10, 0x80, 0x02, 0x12, 0x18, 0x0a, 0x13, 0x41, 0x55, 0x54, 0x4f,
+ 0x5f, 0x49, 0x4e, 0x43, 0x52, 0x45, 0x4d, 0x45, 0x4e, 0x54, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x10,
+ 0x80, 0x04, 0x12, 0x13, 0x0a, 0x0e, 0x54, 0x49, 0x4d, 0x45, 0x53, 0x54, 0x41, 0x4d, 0x50, 0x5f,
+ 0x46, 0x4c, 0x41, 0x47, 0x10, 0x80, 0x08, 0x12, 0x0d, 0x0a, 0x08, 0x53, 0x45, 0x54, 0x5f, 0x46,
+ 0x4c, 0x41, 0x47, 0x10, 0x80, 0x10, 0x12, 0x1a, 0x0a, 0x15, 0x4e, 0x4f, 0x5f, 0x44, 0x45, 0x46,
+ 0x41, 0x55, 0x4c, 0x54, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x10,
+ 0x80, 0x20, 0x12, 0x17, 0x0a, 0x12, 0x4f, 0x4e, 0x5f, 0x55, 0x50, 0x44, 0x41, 0x54, 0x45, 0x5f,
+ 0x4e, 0x4f, 0x57, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x10, 0x80, 0x40, 0x12, 0x0e, 0x0a, 0x08, 0x4e,
+ 0x55, 0x4d, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x10, 0x80, 0x80, 0x02, 0x12, 0x13, 0x0a, 0x0d, 0x50,
+ 0x41, 0x52, 0x54, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x10, 0x80, 0x80, 0x01,
+ 0x12, 0x10, 0x0a, 0x0a, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x10, 0x80,
+ 0x80, 0x02, 0x12, 0x11, 0x0a, 0x0b, 0x55, 0x4e, 0x49, 0x51, 0x55, 0x45, 0x5f, 0x46, 0x4c, 0x41,
+ 0x47, 0x10, 0x80, 0x80, 0x04, 0x12, 0x11, 0x0a, 0x0b, 0x42, 0x49, 0x4e, 0x43, 0x4d, 0x50, 0x5f,
+ 0x46, 0x4c, 0x41, 0x47, 0x10, 0x80, 0x80, 0x08, 0x1a, 0x02, 0x10, 0x01, 0x2a, 0x6b, 0x0a, 0x04,
+ 0x46, 0x6c, 0x61, 0x67, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x0f,
+ 0x0a, 0x0a, 0x49, 0x53, 0x49, 0x4e, 0x54, 0x45, 0x47, 0x52, 0x41, 0x4c, 0x10, 0x80, 0x02, 0x12,
+ 0x0f, 0x0a, 0x0a, 0x49, 0x53, 0x55, 0x4e, 0x53, 0x49, 0x47, 0x4e, 0x45, 0x44, 0x10, 0x80, 0x04,
+ 0x12, 0x0c, 0x0a, 0x07, 0x49, 0x53, 0x46, 0x4c, 0x4f, 0x41, 0x54, 0x10, 0x80, 0x08, 0x12, 0x0d,
+ 0x0a, 0x08, 0x49, 0x53, 0x51, 0x55, 0x4f, 0x54, 0x45, 0x44, 0x10, 0x80, 0x10, 0x12, 0x0b, 0x0a,
+ 0x06, 0x49, 0x53, 0x54, 0x45, 0x58, 0x54, 0x10, 0x80, 0x20, 0x12, 0x0d, 0x0a, 0x08, 0x49, 0x53,
+ 0x42, 0x49, 0x4e, 0x41, 0x52, 0x59, 0x10, 0x80, 0x40, 0x2a, 0xc0, 0x03, 0x0a, 0x04, 0x54, 0x79,
+ 0x70, 0x65, 0x12, 0x0d, 0x0a, 0x09, 0x4e, 0x55, 0x4c, 0x4c, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x10,
+ 0x00, 0x12, 0x09, 0x0a, 0x04, 0x49, 0x4e, 0x54, 0x38, 0x10, 0x81, 0x02, 0x12, 0x0a, 0x0a, 0x05,
+ 0x55, 0x49, 0x4e, 0x54, 0x38, 0x10, 0x82, 0x06, 0x12, 0x0a, 0x0a, 0x05, 0x49, 0x4e, 0x54, 0x31,
+ 0x36, 0x10, 0x83, 0x02, 0x12, 0x0b, 0x0a, 0x06, 0x55, 0x49, 0x4e, 0x54, 0x31, 0x36, 0x10, 0x84,
+ 0x06, 0x12, 0x0a, 0x0a, 0x05, 0x49, 0x4e, 0x54, 0x32, 0x34, 0x10, 0x85, 0x02, 0x12, 0x0b, 0x0a,
+ 0x06, 0x55, 0x49, 0x4e, 0x54, 0x32, 0x34, 0x10, 0x86, 0x06, 0x12, 0x0a, 0x0a, 0x05, 0x49, 0x4e,
+ 0x54, 0x33, 0x32, 0x10, 0x87, 0x02, 0x12, 0x0b, 0x0a, 0x06, 0x55, 0x49, 0x4e, 0x54, 0x33, 0x32,
+ 0x10, 0x88, 0x06, 0x12, 0x0a, 0x0a, 0x05, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x89, 0x02, 0x12,
+ 0x0b, 0x0a, 0x06, 0x55, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x8a, 0x06, 0x12, 0x0c, 0x0a, 0x07,
+ 0x46, 0x4c, 0x4f, 0x41, 0x54, 0x33, 0x32, 0x10, 0x8b, 0x08, 0x12, 0x0c, 0x0a, 0x07, 0x46, 0x4c,
+ 0x4f, 0x41, 0x54, 0x36, 0x34, 0x10, 0x8c, 0x08, 0x12, 0x0e, 0x0a, 0x09, 0x54, 0x49, 0x4d, 0x45,
+ 0x53, 0x54, 0x41, 0x4d, 0x50, 0x10, 0x8d, 0x10, 0x12, 0x09, 0x0a, 0x04, 0x44, 0x41, 0x54, 0x45,
+ 0x10, 0x8e, 0x10, 0x12, 0x09, 0x0a, 0x04, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x8f, 0x10, 0x12, 0x0d,
+ 0x0a, 0x08, 0x44, 0x41, 0x54, 0x45, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x90, 0x10, 0x12, 0x09, 0x0a,
+ 0x04, 0x59, 0x45, 0x41, 0x52, 0x10, 0x91, 0x06, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x45, 0x43, 0x49,
+ 0x4d, 0x41, 0x4c, 0x10, 0x12, 0x12, 0x09, 0x0a, 0x04, 0x54, 0x45, 0x58, 0x54, 0x10, 0x93, 0x30,
+ 0x12, 0x09, 0x0a, 0x04, 0x42, 0x4c, 0x4f, 0x42, 0x10, 0x94, 0x50, 0x12, 0x0c, 0x0a, 0x07, 0x56,
+ 0x41, 0x52, 0x43, 0x48, 0x41, 0x52, 0x10, 0x95, 0x30, 0x12, 0x0e, 0x0a, 0x09, 0x56, 0x41, 0x52,
+ 0x42, 0x49, 0x4e, 0x41, 0x52, 0x59, 0x10, 0x96, 0x50, 0x12, 0x09, 0x0a, 0x04, 0x43, 0x48, 0x41,
+ 0x52, 0x10, 0x97, 0x30, 0x12, 0x0b, 0x0a, 0x06, 0x42, 0x49, 0x4e, 0x41, 0x52, 0x59, 0x10, 0x98,
+ 0x50, 0x12, 0x08, 0x0a, 0x03, 0x42, 0x49, 0x54, 0x10, 0x99, 0x10, 0x12, 0x09, 0x0a, 0x04, 0x45,
+ 0x4e, 0x55, 0x4d, 0x10, 0x9a, 0x10, 0x12, 0x08, 0x0a, 0x03, 0x53, 0x45, 0x54, 0x10, 0x9b, 0x10,
+ 0x12, 0x09, 0x0a, 0x05, 0x54, 0x55, 0x50, 0x4c, 0x45, 0x10, 0x1c, 0x12, 0x0d, 0x0a, 0x08, 0x47,
+ 0x45, 0x4f, 0x4d, 0x45, 0x54, 0x52, 0x59, 0x10, 0x9d, 0x10, 0x12, 0x09, 0x0a, 0x04, 0x4a, 0x53,
+ 0x4f, 0x4e, 0x10, 0x9e, 0x10, 0x12, 0x0e, 0x0a, 0x0a, 0x45, 0x58, 0x50, 0x52, 0x45, 0x53, 0x53,
+ 0x49, 0x4f, 0x4e, 0x10, 0x1f, 0x12, 0x0b, 0x0a, 0x06, 0x48, 0x45, 0x58, 0x4e, 0x55, 0x4d, 0x10,
+ 0xa0, 0x20, 0x12, 0x0b, 0x0a, 0x06, 0x48, 0x45, 0x58, 0x56, 0x41, 0x4c, 0x10, 0xa1, 0x20, 0x12,
+ 0x0b, 0x0a, 0x06, 0x42, 0x49, 0x54, 0x4e, 0x55, 0x4d, 0x10, 0xa2, 0x20, 0x2a, 0x46, 0x0a, 0x10,
+ 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65,
+ 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0b, 0x0a,
+ 0x07, 0x50, 0x52, 0x45, 0x50, 0x41, 0x52, 0x45, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x4f,
+ 0x4d, 0x4d, 0x49, 0x54, 0x10, 0x02, 0x12, 0x0c, 0x0a, 0x08, 0x52, 0x4f, 0x4c, 0x4c, 0x42, 0x41,
+ 0x43, 0x4b, 0x10, 0x03, 0x2a, 0x31, 0x0a, 0x0f, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x54, 0x61,
+ 0x62, 0x6c, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x56, 0x49, 0x45, 0x57, 0x53,
+ 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x54, 0x41, 0x42, 0x4c, 0x45, 0x53, 0x10, 0x01, 0x12, 0x07,
+ 0x0a, 0x03, 0x41, 0x4c, 0x4c, 0x10, 0x02, 0x42, 0x35, 0x0a, 0x0f, 0x69, 0x6f, 0x2e, 0x76, 0x69,
0x74, 0x65, 0x73, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x5a, 0x22, 0x76, 0x69, 0x74, 0x65,
0x73, 0x73, 0x2e, 0x69, 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f,
0x76, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x62, 0x06,
@@ -6190,227 +6538,238 @@ func file_query_proto_rawDescGZIP() []byte {
return file_query_proto_rawDescData
}
-var file_query_proto_enumTypes = make([]protoimpl.EnumInfo, 9)
-var file_query_proto_msgTypes = make([]protoimpl.MessageInfo, 64)
+var file_query_proto_enumTypes = make([]protoimpl.EnumInfo, 12)
+var file_query_proto_msgTypes = make([]protoimpl.MessageInfo, 67)
var file_query_proto_goTypes = []interface{}{
(MySqlFlag)(0), // 0: query.MySqlFlag
(Flag)(0), // 1: query.Flag
(Type)(0), // 2: query.Type
(TransactionState)(0), // 3: query.TransactionState
- (ExecuteOptions_IncludedFields)(0), // 4: query.ExecuteOptions.IncludedFields
- (ExecuteOptions_Workload)(0), // 5: query.ExecuteOptions.Workload
- (ExecuteOptions_TransactionIsolation)(0), // 6: query.ExecuteOptions.TransactionIsolation
- (ExecuteOptions_PlannerVersion)(0), // 7: query.ExecuteOptions.PlannerVersion
- (StreamEvent_Statement_Category)(0), // 8: query.StreamEvent.Statement.Category
- (*Target)(nil), // 9: query.Target
- (*VTGateCallerID)(nil), // 10: query.VTGateCallerID
- (*EventToken)(nil), // 11: query.EventToken
- (*Value)(nil), // 12: query.Value
- (*BindVariable)(nil), // 13: query.BindVariable
- (*BoundQuery)(nil), // 14: query.BoundQuery
- (*ExecuteOptions)(nil), // 15: query.ExecuteOptions
- (*Field)(nil), // 16: query.Field
- (*Row)(nil), // 17: query.Row
- (*QueryResult)(nil), // 18: query.QueryResult
- (*QueryWarning)(nil), // 19: query.QueryWarning
- (*StreamEvent)(nil), // 20: query.StreamEvent
- (*ExecuteRequest)(nil), // 21: query.ExecuteRequest
- (*ExecuteResponse)(nil), // 22: query.ExecuteResponse
- (*ResultWithError)(nil), // 23: query.ResultWithError
- (*StreamExecuteRequest)(nil), // 24: query.StreamExecuteRequest
- (*StreamExecuteResponse)(nil), // 25: query.StreamExecuteResponse
- (*BeginRequest)(nil), // 26: query.BeginRequest
- (*BeginResponse)(nil), // 27: query.BeginResponse
- (*CommitRequest)(nil), // 28: query.CommitRequest
- (*CommitResponse)(nil), // 29: query.CommitResponse
- (*RollbackRequest)(nil), // 30: query.RollbackRequest
- (*RollbackResponse)(nil), // 31: query.RollbackResponse
- (*PrepareRequest)(nil), // 32: query.PrepareRequest
- (*PrepareResponse)(nil), // 33: query.PrepareResponse
- (*CommitPreparedRequest)(nil), // 34: query.CommitPreparedRequest
- (*CommitPreparedResponse)(nil), // 35: query.CommitPreparedResponse
- (*RollbackPreparedRequest)(nil), // 36: query.RollbackPreparedRequest
- (*RollbackPreparedResponse)(nil), // 37: query.RollbackPreparedResponse
- (*CreateTransactionRequest)(nil), // 38: query.CreateTransactionRequest
- (*CreateTransactionResponse)(nil), // 39: query.CreateTransactionResponse
- (*StartCommitRequest)(nil), // 40: query.StartCommitRequest
- (*StartCommitResponse)(nil), // 41: query.StartCommitResponse
- (*SetRollbackRequest)(nil), // 42: query.SetRollbackRequest
- (*SetRollbackResponse)(nil), // 43: query.SetRollbackResponse
- (*ConcludeTransactionRequest)(nil), // 44: query.ConcludeTransactionRequest
- (*ConcludeTransactionResponse)(nil), // 45: query.ConcludeTransactionResponse
- (*ReadTransactionRequest)(nil), // 46: query.ReadTransactionRequest
- (*ReadTransactionResponse)(nil), // 47: query.ReadTransactionResponse
- (*BeginExecuteRequest)(nil), // 48: query.BeginExecuteRequest
- (*BeginExecuteResponse)(nil), // 49: query.BeginExecuteResponse
- (*BeginStreamExecuteRequest)(nil), // 50: query.BeginStreamExecuteRequest
- (*BeginStreamExecuteResponse)(nil), // 51: query.BeginStreamExecuteResponse
- (*MessageStreamRequest)(nil), // 52: query.MessageStreamRequest
- (*MessageStreamResponse)(nil), // 53: query.MessageStreamResponse
- (*MessageAckRequest)(nil), // 54: query.MessageAckRequest
- (*MessageAckResponse)(nil), // 55: query.MessageAckResponse
- (*ReserveExecuteRequest)(nil), // 56: query.ReserveExecuteRequest
- (*ReserveExecuteResponse)(nil), // 57: query.ReserveExecuteResponse
- (*ReserveStreamExecuteRequest)(nil), // 58: query.ReserveStreamExecuteRequest
- (*ReserveStreamExecuteResponse)(nil), // 59: query.ReserveStreamExecuteResponse
- (*ReserveBeginExecuteRequest)(nil), // 60: query.ReserveBeginExecuteRequest
- (*ReserveBeginExecuteResponse)(nil), // 61: query.ReserveBeginExecuteResponse
- (*ReserveBeginStreamExecuteRequest)(nil), // 62: query.ReserveBeginStreamExecuteRequest
- (*ReserveBeginStreamExecuteResponse)(nil), // 63: query.ReserveBeginStreamExecuteResponse
- (*ReleaseRequest)(nil), // 64: query.ReleaseRequest
- (*ReleaseResponse)(nil), // 65: query.ReleaseResponse
- (*StreamHealthRequest)(nil), // 66: query.StreamHealthRequest
- (*RealtimeStats)(nil), // 67: query.RealtimeStats
- (*AggregateStats)(nil), // 68: query.AggregateStats
- (*StreamHealthResponse)(nil), // 69: query.StreamHealthResponse
- (*TransactionMetadata)(nil), // 70: query.TransactionMetadata
- nil, // 71: query.BoundQuery.BindVariablesEntry
- (*StreamEvent_Statement)(nil), // 72: query.StreamEvent.Statement
- (topodata.TabletType)(0), // 73: topodata.TabletType
- (*vtrpc.CallerID)(nil), // 74: vtrpc.CallerID
- (*vtrpc.RPCError)(nil), // 75: vtrpc.RPCError
- (*topodata.TabletAlias)(nil), // 76: topodata.TabletAlias
+ (SchemaTableType)(0), // 4: query.SchemaTableType
+ (ExecuteOptions_IncludedFields)(0), // 5: query.ExecuteOptions.IncludedFields
+ (ExecuteOptions_Workload)(0), // 6: query.ExecuteOptions.Workload
+ (ExecuteOptions_TransactionIsolation)(0), // 7: query.ExecuteOptions.TransactionIsolation
+ (ExecuteOptions_PlannerVersion)(0), // 8: query.ExecuteOptions.PlannerVersion
+ (ExecuteOptions_Consolidator)(0), // 9: query.ExecuteOptions.Consolidator
+ (ExecuteOptions_TransactionAccessMode)(0), // 10: query.ExecuteOptions.TransactionAccessMode
+ (StreamEvent_Statement_Category)(0), // 11: query.StreamEvent.Statement.Category
+ (*Target)(nil), // 12: query.Target
+ (*VTGateCallerID)(nil), // 13: query.VTGateCallerID
+ (*EventToken)(nil), // 14: query.EventToken
+ (*Value)(nil), // 15: query.Value
+ (*BindVariable)(nil), // 16: query.BindVariable
+ (*BoundQuery)(nil), // 17: query.BoundQuery
+ (*ExecuteOptions)(nil), // 18: query.ExecuteOptions
+ (*Field)(nil), // 19: query.Field
+ (*Row)(nil), // 20: query.Row
+ (*QueryResult)(nil), // 21: query.QueryResult
+ (*QueryWarning)(nil), // 22: query.QueryWarning
+ (*StreamEvent)(nil), // 23: query.StreamEvent
+ (*ExecuteRequest)(nil), // 24: query.ExecuteRequest
+ (*ExecuteResponse)(nil), // 25: query.ExecuteResponse
+ (*ResultWithError)(nil), // 26: query.ResultWithError
+ (*StreamExecuteRequest)(nil), // 27: query.StreamExecuteRequest
+ (*StreamExecuteResponse)(nil), // 28: query.StreamExecuteResponse
+ (*BeginRequest)(nil), // 29: query.BeginRequest
+ (*BeginResponse)(nil), // 30: query.BeginResponse
+ (*CommitRequest)(nil), // 31: query.CommitRequest
+ (*CommitResponse)(nil), // 32: query.CommitResponse
+ (*RollbackRequest)(nil), // 33: query.RollbackRequest
+ (*RollbackResponse)(nil), // 34: query.RollbackResponse
+ (*PrepareRequest)(nil), // 35: query.PrepareRequest
+ (*PrepareResponse)(nil), // 36: query.PrepareResponse
+ (*CommitPreparedRequest)(nil), // 37: query.CommitPreparedRequest
+ (*CommitPreparedResponse)(nil), // 38: query.CommitPreparedResponse
+ (*RollbackPreparedRequest)(nil), // 39: query.RollbackPreparedRequest
+ (*RollbackPreparedResponse)(nil), // 40: query.RollbackPreparedResponse
+ (*CreateTransactionRequest)(nil), // 41: query.CreateTransactionRequest
+ (*CreateTransactionResponse)(nil), // 42: query.CreateTransactionResponse
+ (*StartCommitRequest)(nil), // 43: query.StartCommitRequest
+ (*StartCommitResponse)(nil), // 44: query.StartCommitResponse
+ (*SetRollbackRequest)(nil), // 45: query.SetRollbackRequest
+ (*SetRollbackResponse)(nil), // 46: query.SetRollbackResponse
+ (*ConcludeTransactionRequest)(nil), // 47: query.ConcludeTransactionRequest
+ (*ConcludeTransactionResponse)(nil), // 48: query.ConcludeTransactionResponse
+ (*ReadTransactionRequest)(nil), // 49: query.ReadTransactionRequest
+ (*ReadTransactionResponse)(nil), // 50: query.ReadTransactionResponse
+ (*BeginExecuteRequest)(nil), // 51: query.BeginExecuteRequest
+ (*BeginExecuteResponse)(nil), // 52: query.BeginExecuteResponse
+ (*BeginStreamExecuteRequest)(nil), // 53: query.BeginStreamExecuteRequest
+ (*BeginStreamExecuteResponse)(nil), // 54: query.BeginStreamExecuteResponse
+ (*MessageStreamRequest)(nil), // 55: query.MessageStreamRequest
+ (*MessageStreamResponse)(nil), // 56: query.MessageStreamResponse
+ (*MessageAckRequest)(nil), // 57: query.MessageAckRequest
+ (*MessageAckResponse)(nil), // 58: query.MessageAckResponse
+ (*ReserveExecuteRequest)(nil), // 59: query.ReserveExecuteRequest
+ (*ReserveExecuteResponse)(nil), // 60: query.ReserveExecuteResponse
+ (*ReserveStreamExecuteRequest)(nil), // 61: query.ReserveStreamExecuteRequest
+ (*ReserveStreamExecuteResponse)(nil), // 62: query.ReserveStreamExecuteResponse
+ (*ReserveBeginExecuteRequest)(nil), // 63: query.ReserveBeginExecuteRequest
+ (*ReserveBeginExecuteResponse)(nil), // 64: query.ReserveBeginExecuteResponse
+ (*ReserveBeginStreamExecuteRequest)(nil), // 65: query.ReserveBeginStreamExecuteRequest
+ (*ReserveBeginStreamExecuteResponse)(nil), // 66: query.ReserveBeginStreamExecuteResponse
+ (*ReleaseRequest)(nil), // 67: query.ReleaseRequest
+ (*ReleaseResponse)(nil), // 68: query.ReleaseResponse
+ (*StreamHealthRequest)(nil), // 69: query.StreamHealthRequest
+ (*RealtimeStats)(nil), // 70: query.RealtimeStats
+ (*AggregateStats)(nil), // 71: query.AggregateStats
+ (*StreamHealthResponse)(nil), // 72: query.StreamHealthResponse
+ (*TransactionMetadata)(nil), // 73: query.TransactionMetadata
+ (*GetSchemaRequest)(nil), // 74: query.GetSchemaRequest
+ (*GetSchemaResponse)(nil), // 75: query.GetSchemaResponse
+ nil, // 76: query.BoundQuery.BindVariablesEntry
+ (*StreamEvent_Statement)(nil), // 77: query.StreamEvent.Statement
+ nil, // 78: query.GetSchemaResponse.TableDefinitionEntry
+ (topodata.TabletType)(0), // 79: topodata.TabletType
+ (*vtrpc.CallerID)(nil), // 80: vtrpc.CallerID
+ (*vtrpc.RPCError)(nil), // 81: vtrpc.RPCError
+ (*topodata.TabletAlias)(nil), // 82: topodata.TabletAlias
}
var file_query_proto_depIdxs = []int32{
- 73, // 0: query.Target.tablet_type:type_name -> topodata.TabletType
+ 79, // 0: query.Target.tablet_type:type_name -> topodata.TabletType
2, // 1: query.Value.type:type_name -> query.Type
2, // 2: query.BindVariable.type:type_name -> query.Type
- 12, // 3: query.BindVariable.values:type_name -> query.Value
- 71, // 4: query.BoundQuery.bind_variables:type_name -> query.BoundQuery.BindVariablesEntry
- 4, // 5: query.ExecuteOptions.included_fields:type_name -> query.ExecuteOptions.IncludedFields
- 5, // 6: query.ExecuteOptions.workload:type_name -> query.ExecuteOptions.Workload
- 6, // 7: query.ExecuteOptions.transaction_isolation:type_name -> query.ExecuteOptions.TransactionIsolation
- 7, // 8: query.ExecuteOptions.planner_version:type_name -> query.ExecuteOptions.PlannerVersion
- 2, // 9: query.Field.type:type_name -> query.Type
- 16, // 10: query.QueryResult.fields:type_name -> query.Field
- 17, // 11: query.QueryResult.rows:type_name -> query.Row
- 72, // 12: query.StreamEvent.statements:type_name -> query.StreamEvent.Statement
- 11, // 13: query.StreamEvent.event_token:type_name -> query.EventToken
- 74, // 14: query.ExecuteRequest.effective_caller_id:type_name -> vtrpc.CallerID
- 10, // 15: query.ExecuteRequest.immediate_caller_id:type_name -> query.VTGateCallerID
- 9, // 16: query.ExecuteRequest.target:type_name -> query.Target
- 14, // 17: query.ExecuteRequest.query:type_name -> query.BoundQuery
- 15, // 18: query.ExecuteRequest.options:type_name -> query.ExecuteOptions
- 18, // 19: query.ExecuteResponse.result:type_name -> query.QueryResult
- 75, // 20: query.ResultWithError.error:type_name -> vtrpc.RPCError
- 18, // 21: query.ResultWithError.result:type_name -> query.QueryResult
- 74, // 22: query.StreamExecuteRequest.effective_caller_id:type_name -> vtrpc.CallerID
- 10, // 23: query.StreamExecuteRequest.immediate_caller_id:type_name -> query.VTGateCallerID
- 9, // 24: query.StreamExecuteRequest.target:type_name -> query.Target
- 14, // 25: query.StreamExecuteRequest.query:type_name -> query.BoundQuery
- 15, // 26: query.StreamExecuteRequest.options:type_name -> query.ExecuteOptions
- 18, // 27: query.StreamExecuteResponse.result:type_name -> query.QueryResult
- 74, // 28: query.BeginRequest.effective_caller_id:type_name -> vtrpc.CallerID
- 10, // 29: query.BeginRequest.immediate_caller_id:type_name -> query.VTGateCallerID
- 9, // 30: query.BeginRequest.target:type_name -> query.Target
- 15, // 31: query.BeginRequest.options:type_name -> query.ExecuteOptions
- 76, // 32: query.BeginResponse.tablet_alias:type_name -> topodata.TabletAlias
- 74, // 33: query.CommitRequest.effective_caller_id:type_name -> vtrpc.CallerID
- 10, // 34: query.CommitRequest.immediate_caller_id:type_name -> query.VTGateCallerID
- 9, // 35: query.CommitRequest.target:type_name -> query.Target
- 74, // 36: query.RollbackRequest.effective_caller_id:type_name -> vtrpc.CallerID
- 10, // 37: query.RollbackRequest.immediate_caller_id:type_name -> query.VTGateCallerID
- 9, // 38: query.RollbackRequest.target:type_name -> query.Target
- 74, // 39: query.PrepareRequest.effective_caller_id:type_name -> vtrpc.CallerID
- 10, // 40: query.PrepareRequest.immediate_caller_id:type_name -> query.VTGateCallerID
- 9, // 41: query.PrepareRequest.target:type_name -> query.Target
- 74, // 42: query.CommitPreparedRequest.effective_caller_id:type_name -> vtrpc.CallerID
- 10, // 43: query.CommitPreparedRequest.immediate_caller_id:type_name -> query.VTGateCallerID
- 9, // 44: query.CommitPreparedRequest.target:type_name -> query.Target
- 74, // 45: query.RollbackPreparedRequest.effective_caller_id:type_name -> vtrpc.CallerID
- 10, // 46: query.RollbackPreparedRequest.immediate_caller_id:type_name -> query.VTGateCallerID
- 9, // 47: query.RollbackPreparedRequest.target:type_name -> query.Target
- 74, // 48: query.CreateTransactionRequest.effective_caller_id:type_name -> vtrpc.CallerID
- 10, // 49: query.CreateTransactionRequest.immediate_caller_id:type_name -> query.VTGateCallerID
- 9, // 50: query.CreateTransactionRequest.target:type_name -> query.Target
- 9, // 51: query.CreateTransactionRequest.participants:type_name -> query.Target
- 74, // 52: query.StartCommitRequest.effective_caller_id:type_name -> vtrpc.CallerID
- 10, // 53: query.StartCommitRequest.immediate_caller_id:type_name -> query.VTGateCallerID
- 9, // 54: query.StartCommitRequest.target:type_name -> query.Target
- 74, // 55: query.SetRollbackRequest.effective_caller_id:type_name -> vtrpc.CallerID
- 10, // 56: query.SetRollbackRequest.immediate_caller_id:type_name -> query.VTGateCallerID
- 9, // 57: query.SetRollbackRequest.target:type_name -> query.Target
- 74, // 58: query.ConcludeTransactionRequest.effective_caller_id:type_name -> vtrpc.CallerID
- 10, // 59: query.ConcludeTransactionRequest.immediate_caller_id:type_name -> query.VTGateCallerID
- 9, // 60: query.ConcludeTransactionRequest.target:type_name -> query.Target
- 74, // 61: query.ReadTransactionRequest.effective_caller_id:type_name -> vtrpc.CallerID
- 10, // 62: query.ReadTransactionRequest.immediate_caller_id:type_name -> query.VTGateCallerID
- 9, // 63: query.ReadTransactionRequest.target:type_name -> query.Target
- 70, // 64: query.ReadTransactionResponse.metadata:type_name -> query.TransactionMetadata
- 74, // 65: query.BeginExecuteRequest.effective_caller_id:type_name -> vtrpc.CallerID
- 10, // 66: query.BeginExecuteRequest.immediate_caller_id:type_name -> query.VTGateCallerID
- 9, // 67: query.BeginExecuteRequest.target:type_name -> query.Target
- 14, // 68: query.BeginExecuteRequest.query:type_name -> query.BoundQuery
- 15, // 69: query.BeginExecuteRequest.options:type_name -> query.ExecuteOptions
- 75, // 70: query.BeginExecuteResponse.error:type_name -> vtrpc.RPCError
- 18, // 71: query.BeginExecuteResponse.result:type_name -> query.QueryResult
- 76, // 72: query.BeginExecuteResponse.tablet_alias:type_name -> topodata.TabletAlias
- 74, // 73: query.BeginStreamExecuteRequest.effective_caller_id:type_name -> vtrpc.CallerID
- 10, // 74: query.BeginStreamExecuteRequest.immediate_caller_id:type_name -> query.VTGateCallerID
- 9, // 75: query.BeginStreamExecuteRequest.target:type_name -> query.Target
- 14, // 76: query.BeginStreamExecuteRequest.query:type_name -> query.BoundQuery
- 15, // 77: query.BeginStreamExecuteRequest.options:type_name -> query.ExecuteOptions
- 75, // 78: query.BeginStreamExecuteResponse.error:type_name -> vtrpc.RPCError
- 18, // 79: query.BeginStreamExecuteResponse.result:type_name -> query.QueryResult
- 76, // 80: query.BeginStreamExecuteResponse.tablet_alias:type_name -> topodata.TabletAlias
- 74, // 81: query.MessageStreamRequest.effective_caller_id:type_name -> vtrpc.CallerID
- 10, // 82: query.MessageStreamRequest.immediate_caller_id:type_name -> query.VTGateCallerID
- 9, // 83: query.MessageStreamRequest.target:type_name -> query.Target
- 18, // 84: query.MessageStreamResponse.result:type_name -> query.QueryResult
- 74, // 85: query.MessageAckRequest.effective_caller_id:type_name -> vtrpc.CallerID
- 10, // 86: query.MessageAckRequest.immediate_caller_id:type_name -> query.VTGateCallerID
- 9, // 87: query.MessageAckRequest.target:type_name -> query.Target
- 12, // 88: query.MessageAckRequest.ids:type_name -> query.Value
- 18, // 89: query.MessageAckResponse.result:type_name -> query.QueryResult
- 74, // 90: query.ReserveExecuteRequest.effective_caller_id:type_name -> vtrpc.CallerID
- 10, // 91: query.ReserveExecuteRequest.immediate_caller_id:type_name -> query.VTGateCallerID
- 9, // 92: query.ReserveExecuteRequest.target:type_name -> query.Target
- 14, // 93: query.ReserveExecuteRequest.query:type_name -> query.BoundQuery
- 15, // 94: query.ReserveExecuteRequest.options:type_name -> query.ExecuteOptions
- 75, // 95: query.ReserveExecuteResponse.error:type_name -> vtrpc.RPCError
- 18, // 96: query.ReserveExecuteResponse.result:type_name -> query.QueryResult
- 76, // 97: query.ReserveExecuteResponse.tablet_alias:type_name -> topodata.TabletAlias
- 74, // 98: query.ReserveStreamExecuteRequest.effective_caller_id:type_name -> vtrpc.CallerID
- 10, // 99: query.ReserveStreamExecuteRequest.immediate_caller_id:type_name -> query.VTGateCallerID
- 9, // 100: query.ReserveStreamExecuteRequest.target:type_name -> query.Target
- 14, // 101: query.ReserveStreamExecuteRequest.query:type_name -> query.BoundQuery
- 15, // 102: query.ReserveStreamExecuteRequest.options:type_name -> query.ExecuteOptions
- 75, // 103: query.ReserveStreamExecuteResponse.error:type_name -> vtrpc.RPCError
- 18, // 104: query.ReserveStreamExecuteResponse.result:type_name -> query.QueryResult
- 76, // 105: query.ReserveStreamExecuteResponse.tablet_alias:type_name -> topodata.TabletAlias
- 74, // 106: query.ReserveBeginExecuteRequest.effective_caller_id:type_name -> vtrpc.CallerID
- 10, // 107: query.ReserveBeginExecuteRequest.immediate_caller_id:type_name -> query.VTGateCallerID
- 9, // 108: query.ReserveBeginExecuteRequest.target:type_name -> query.Target
- 14, // 109: query.ReserveBeginExecuteRequest.query:type_name -> query.BoundQuery
- 15, // 110: query.ReserveBeginExecuteRequest.options:type_name -> query.ExecuteOptions
- 75, // 111: query.ReserveBeginExecuteResponse.error:type_name -> vtrpc.RPCError
- 18, // 112: query.ReserveBeginExecuteResponse.result:type_name -> query.QueryResult
- 76, // 113: query.ReserveBeginExecuteResponse.tablet_alias:type_name -> topodata.TabletAlias
- 74, // 114: query.ReserveBeginStreamExecuteRequest.effective_caller_id:type_name -> vtrpc.CallerID
- 10, // 115: query.ReserveBeginStreamExecuteRequest.immediate_caller_id:type_name -> query.VTGateCallerID
- 9, // 116: query.ReserveBeginStreamExecuteRequest.target:type_name -> query.Target
- 14, // 117: query.ReserveBeginStreamExecuteRequest.query:type_name -> query.BoundQuery
- 15, // 118: query.ReserveBeginStreamExecuteRequest.options:type_name -> query.ExecuteOptions
- 75, // 119: query.ReserveBeginStreamExecuteResponse.error:type_name -> vtrpc.RPCError
- 18, // 120: query.ReserveBeginStreamExecuteResponse.result:type_name -> query.QueryResult
- 76, // 121: query.ReserveBeginStreamExecuteResponse.tablet_alias:type_name -> topodata.TabletAlias
- 74, // 122: query.ReleaseRequest.effective_caller_id:type_name -> vtrpc.CallerID
- 10, // 123: query.ReleaseRequest.immediate_caller_id:type_name -> query.VTGateCallerID
- 9, // 124: query.ReleaseRequest.target:type_name -> query.Target
- 9, // 125: query.StreamHealthResponse.target:type_name -> query.Target
- 67, // 126: query.StreamHealthResponse.realtime_stats:type_name -> query.RealtimeStats
- 76, // 127: query.StreamHealthResponse.tablet_alias:type_name -> topodata.TabletAlias
- 3, // 128: query.TransactionMetadata.state:type_name -> query.TransactionState
- 9, // 129: query.TransactionMetadata.participants:type_name -> query.Target
- 13, // 130: query.BoundQuery.BindVariablesEntry.value:type_name -> query.BindVariable
- 8, // 131: query.StreamEvent.Statement.category:type_name -> query.StreamEvent.Statement.Category
- 16, // 132: query.StreamEvent.Statement.primary_key_fields:type_name -> query.Field
- 17, // 133: query.StreamEvent.Statement.primary_key_values:type_name -> query.Row
- 134, // [134:134] is the sub-list for method output_type
- 134, // [134:134] is the sub-list for method input_type
- 134, // [134:134] is the sub-list for extension type_name
- 134, // [134:134] is the sub-list for extension extendee
- 0, // [0:134] is the sub-list for field type_name
+ 15, // 3: query.BindVariable.values:type_name -> query.Value
+ 76, // 4: query.BoundQuery.bind_variables:type_name -> query.BoundQuery.BindVariablesEntry
+ 5, // 5: query.ExecuteOptions.included_fields:type_name -> query.ExecuteOptions.IncludedFields
+ 6, // 6: query.ExecuteOptions.workload:type_name -> query.ExecuteOptions.Workload
+ 7, // 7: query.ExecuteOptions.transaction_isolation:type_name -> query.ExecuteOptions.TransactionIsolation
+ 8, // 8: query.ExecuteOptions.planner_version:type_name -> query.ExecuteOptions.PlannerVersion
+ 9, // 9: query.ExecuteOptions.consolidator:type_name -> query.ExecuteOptions.Consolidator
+ 10, // 10: query.ExecuteOptions.transaction_access_mode:type_name -> query.ExecuteOptions.TransactionAccessMode
+ 2, // 11: query.Field.type:type_name -> query.Type
+ 19, // 12: query.QueryResult.fields:type_name -> query.Field
+ 20, // 13: query.QueryResult.rows:type_name -> query.Row
+ 77, // 14: query.StreamEvent.statements:type_name -> query.StreamEvent.Statement
+ 14, // 15: query.StreamEvent.event_token:type_name -> query.EventToken
+ 80, // 16: query.ExecuteRequest.effective_caller_id:type_name -> vtrpc.CallerID
+ 13, // 17: query.ExecuteRequest.immediate_caller_id:type_name -> query.VTGateCallerID
+ 12, // 18: query.ExecuteRequest.target:type_name -> query.Target
+ 17, // 19: query.ExecuteRequest.query:type_name -> query.BoundQuery
+ 18, // 20: query.ExecuteRequest.options:type_name -> query.ExecuteOptions
+ 21, // 21: query.ExecuteResponse.result:type_name -> query.QueryResult
+ 81, // 22: query.ResultWithError.error:type_name -> vtrpc.RPCError
+ 21, // 23: query.ResultWithError.result:type_name -> query.QueryResult
+ 80, // 24: query.StreamExecuteRequest.effective_caller_id:type_name -> vtrpc.CallerID
+ 13, // 25: query.StreamExecuteRequest.immediate_caller_id:type_name -> query.VTGateCallerID
+ 12, // 26: query.StreamExecuteRequest.target:type_name -> query.Target
+ 17, // 27: query.StreamExecuteRequest.query:type_name -> query.BoundQuery
+ 18, // 28: query.StreamExecuteRequest.options:type_name -> query.ExecuteOptions
+ 21, // 29: query.StreamExecuteResponse.result:type_name -> query.QueryResult
+ 80, // 30: query.BeginRequest.effective_caller_id:type_name -> vtrpc.CallerID
+ 13, // 31: query.BeginRequest.immediate_caller_id:type_name -> query.VTGateCallerID
+ 12, // 32: query.BeginRequest.target:type_name -> query.Target
+ 18, // 33: query.BeginRequest.options:type_name -> query.ExecuteOptions
+ 82, // 34: query.BeginResponse.tablet_alias:type_name -> topodata.TabletAlias
+ 80, // 35: query.CommitRequest.effective_caller_id:type_name -> vtrpc.CallerID
+ 13, // 36: query.CommitRequest.immediate_caller_id:type_name -> query.VTGateCallerID
+ 12, // 37: query.CommitRequest.target:type_name -> query.Target
+ 80, // 38: query.RollbackRequest.effective_caller_id:type_name -> vtrpc.CallerID
+ 13, // 39: query.RollbackRequest.immediate_caller_id:type_name -> query.VTGateCallerID
+ 12, // 40: query.RollbackRequest.target:type_name -> query.Target
+ 80, // 41: query.PrepareRequest.effective_caller_id:type_name -> vtrpc.CallerID
+ 13, // 42: query.PrepareRequest.immediate_caller_id:type_name -> query.VTGateCallerID
+ 12, // 43: query.PrepareRequest.target:type_name -> query.Target
+ 80, // 44: query.CommitPreparedRequest.effective_caller_id:type_name -> vtrpc.CallerID
+ 13, // 45: query.CommitPreparedRequest.immediate_caller_id:type_name -> query.VTGateCallerID
+ 12, // 46: query.CommitPreparedRequest.target:type_name -> query.Target
+ 80, // 47: query.RollbackPreparedRequest.effective_caller_id:type_name -> vtrpc.CallerID
+ 13, // 48: query.RollbackPreparedRequest.immediate_caller_id:type_name -> query.VTGateCallerID
+ 12, // 49: query.RollbackPreparedRequest.target:type_name -> query.Target
+ 80, // 50: query.CreateTransactionRequest.effective_caller_id:type_name -> vtrpc.CallerID
+ 13, // 51: query.CreateTransactionRequest.immediate_caller_id:type_name -> query.VTGateCallerID
+ 12, // 52: query.CreateTransactionRequest.target:type_name -> query.Target
+ 12, // 53: query.CreateTransactionRequest.participants:type_name -> query.Target
+ 80, // 54: query.StartCommitRequest.effective_caller_id:type_name -> vtrpc.CallerID
+ 13, // 55: query.StartCommitRequest.immediate_caller_id:type_name -> query.VTGateCallerID
+ 12, // 56: query.StartCommitRequest.target:type_name -> query.Target
+ 80, // 57: query.SetRollbackRequest.effective_caller_id:type_name -> vtrpc.CallerID
+ 13, // 58: query.SetRollbackRequest.immediate_caller_id:type_name -> query.VTGateCallerID
+ 12, // 59: query.SetRollbackRequest.target:type_name -> query.Target
+ 80, // 60: query.ConcludeTransactionRequest.effective_caller_id:type_name -> vtrpc.CallerID
+ 13, // 61: query.ConcludeTransactionRequest.immediate_caller_id:type_name -> query.VTGateCallerID
+ 12, // 62: query.ConcludeTransactionRequest.target:type_name -> query.Target
+ 80, // 63: query.ReadTransactionRequest.effective_caller_id:type_name -> vtrpc.CallerID
+ 13, // 64: query.ReadTransactionRequest.immediate_caller_id:type_name -> query.VTGateCallerID
+ 12, // 65: query.ReadTransactionRequest.target:type_name -> query.Target
+ 73, // 66: query.ReadTransactionResponse.metadata:type_name -> query.TransactionMetadata
+ 80, // 67: query.BeginExecuteRequest.effective_caller_id:type_name -> vtrpc.CallerID
+ 13, // 68: query.BeginExecuteRequest.immediate_caller_id:type_name -> query.VTGateCallerID
+ 12, // 69: query.BeginExecuteRequest.target:type_name -> query.Target
+ 17, // 70: query.BeginExecuteRequest.query:type_name -> query.BoundQuery
+ 18, // 71: query.BeginExecuteRequest.options:type_name -> query.ExecuteOptions
+ 81, // 72: query.BeginExecuteResponse.error:type_name -> vtrpc.RPCError
+ 21, // 73: query.BeginExecuteResponse.result:type_name -> query.QueryResult
+ 82, // 74: query.BeginExecuteResponse.tablet_alias:type_name -> topodata.TabletAlias
+ 80, // 75: query.BeginStreamExecuteRequest.effective_caller_id:type_name -> vtrpc.CallerID
+ 13, // 76: query.BeginStreamExecuteRequest.immediate_caller_id:type_name -> query.VTGateCallerID
+ 12, // 77: query.BeginStreamExecuteRequest.target:type_name -> query.Target
+ 17, // 78: query.BeginStreamExecuteRequest.query:type_name -> query.BoundQuery
+ 18, // 79: query.BeginStreamExecuteRequest.options:type_name -> query.ExecuteOptions
+ 81, // 80: query.BeginStreamExecuteResponse.error:type_name -> vtrpc.RPCError
+ 21, // 81: query.BeginStreamExecuteResponse.result:type_name -> query.QueryResult
+ 82, // 82: query.BeginStreamExecuteResponse.tablet_alias:type_name -> topodata.TabletAlias
+ 80, // 83: query.MessageStreamRequest.effective_caller_id:type_name -> vtrpc.CallerID
+ 13, // 84: query.MessageStreamRequest.immediate_caller_id:type_name -> query.VTGateCallerID
+ 12, // 85: query.MessageStreamRequest.target:type_name -> query.Target
+ 21, // 86: query.MessageStreamResponse.result:type_name -> query.QueryResult
+ 80, // 87: query.MessageAckRequest.effective_caller_id:type_name -> vtrpc.CallerID
+ 13, // 88: query.MessageAckRequest.immediate_caller_id:type_name -> query.VTGateCallerID
+ 12, // 89: query.MessageAckRequest.target:type_name -> query.Target
+ 15, // 90: query.MessageAckRequest.ids:type_name -> query.Value
+ 21, // 91: query.MessageAckResponse.result:type_name -> query.QueryResult
+ 80, // 92: query.ReserveExecuteRequest.effective_caller_id:type_name -> vtrpc.CallerID
+ 13, // 93: query.ReserveExecuteRequest.immediate_caller_id:type_name -> query.VTGateCallerID
+ 12, // 94: query.ReserveExecuteRequest.target:type_name -> query.Target
+ 17, // 95: query.ReserveExecuteRequest.query:type_name -> query.BoundQuery
+ 18, // 96: query.ReserveExecuteRequest.options:type_name -> query.ExecuteOptions
+ 81, // 97: query.ReserveExecuteResponse.error:type_name -> vtrpc.RPCError
+ 21, // 98: query.ReserveExecuteResponse.result:type_name -> query.QueryResult
+ 82, // 99: query.ReserveExecuteResponse.tablet_alias:type_name -> topodata.TabletAlias
+ 80, // 100: query.ReserveStreamExecuteRequest.effective_caller_id:type_name -> vtrpc.CallerID
+ 13, // 101: query.ReserveStreamExecuteRequest.immediate_caller_id:type_name -> query.VTGateCallerID
+ 12, // 102: query.ReserveStreamExecuteRequest.target:type_name -> query.Target
+ 17, // 103: query.ReserveStreamExecuteRequest.query:type_name -> query.BoundQuery
+ 18, // 104: query.ReserveStreamExecuteRequest.options:type_name -> query.ExecuteOptions
+ 81, // 105: query.ReserveStreamExecuteResponse.error:type_name -> vtrpc.RPCError
+ 21, // 106: query.ReserveStreamExecuteResponse.result:type_name -> query.QueryResult
+ 82, // 107: query.ReserveStreamExecuteResponse.tablet_alias:type_name -> topodata.TabletAlias
+ 80, // 108: query.ReserveBeginExecuteRequest.effective_caller_id:type_name -> vtrpc.CallerID
+ 13, // 109: query.ReserveBeginExecuteRequest.immediate_caller_id:type_name -> query.VTGateCallerID
+ 12, // 110: query.ReserveBeginExecuteRequest.target:type_name -> query.Target
+ 17, // 111: query.ReserveBeginExecuteRequest.query:type_name -> query.BoundQuery
+ 18, // 112: query.ReserveBeginExecuteRequest.options:type_name -> query.ExecuteOptions
+ 81, // 113: query.ReserveBeginExecuteResponse.error:type_name -> vtrpc.RPCError
+ 21, // 114: query.ReserveBeginExecuteResponse.result:type_name -> query.QueryResult
+ 82, // 115: query.ReserveBeginExecuteResponse.tablet_alias:type_name -> topodata.TabletAlias
+ 80, // 116: query.ReserveBeginStreamExecuteRequest.effective_caller_id:type_name -> vtrpc.CallerID
+ 13, // 117: query.ReserveBeginStreamExecuteRequest.immediate_caller_id:type_name -> query.VTGateCallerID
+ 12, // 118: query.ReserveBeginStreamExecuteRequest.target:type_name -> query.Target
+ 17, // 119: query.ReserveBeginStreamExecuteRequest.query:type_name -> query.BoundQuery
+ 18, // 120: query.ReserveBeginStreamExecuteRequest.options:type_name -> query.ExecuteOptions
+ 81, // 121: query.ReserveBeginStreamExecuteResponse.error:type_name -> vtrpc.RPCError
+ 21, // 122: query.ReserveBeginStreamExecuteResponse.result:type_name -> query.QueryResult
+ 82, // 123: query.ReserveBeginStreamExecuteResponse.tablet_alias:type_name -> topodata.TabletAlias
+ 80, // 124: query.ReleaseRequest.effective_caller_id:type_name -> vtrpc.CallerID
+ 13, // 125: query.ReleaseRequest.immediate_caller_id:type_name -> query.VTGateCallerID
+ 12, // 126: query.ReleaseRequest.target:type_name -> query.Target
+ 12, // 127: query.StreamHealthResponse.target:type_name -> query.Target
+ 70, // 128: query.StreamHealthResponse.realtime_stats:type_name -> query.RealtimeStats
+ 82, // 129: query.StreamHealthResponse.tablet_alias:type_name -> topodata.TabletAlias
+ 3, // 130: query.TransactionMetadata.state:type_name -> query.TransactionState
+ 12, // 131: query.TransactionMetadata.participants:type_name -> query.Target
+ 12, // 132: query.GetSchemaRequest.target:type_name -> query.Target
+ 4, // 133: query.GetSchemaRequest.table_type:type_name -> query.SchemaTableType
+ 78, // 134: query.GetSchemaResponse.table_definition:type_name -> query.GetSchemaResponse.TableDefinitionEntry
+ 16, // 135: query.BoundQuery.BindVariablesEntry.value:type_name -> query.BindVariable
+ 11, // 136: query.StreamEvent.Statement.category:type_name -> query.StreamEvent.Statement.Category
+ 19, // 137: query.StreamEvent.Statement.primary_key_fields:type_name -> query.Field
+ 20, // 138: query.StreamEvent.Statement.primary_key_values:type_name -> query.Row
+ 139, // [139:139] is the sub-list for method output_type
+ 139, // [139:139] is the sub-list for method input_type
+ 139, // [139:139] is the sub-list for extension type_name
+ 139, // [139:139] is the sub-list for extension extendee
+ 0, // [0:139] is the sub-list for field type_name
}
func init() { file_query_proto_init() }
@@ -7163,7 +7522,31 @@ func file_query_proto_init() {
return nil
}
}
+ file_query_proto_msgTypes[62].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetSchemaRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
file_query_proto_msgTypes[63].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetSchemaResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_query_proto_msgTypes[65].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*StreamEvent_Statement); i {
case 0:
return &v.state
@@ -7181,8 +7564,8 @@ func file_query_proto_init() {
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_query_proto_rawDesc,
- NumEnums: 9,
- NumMessages: 64,
+ NumEnums: 12,
+ NumMessages: 67,
NumExtensions: 0,
NumServices: 0,
},
diff --git a/go/vt/proto/query/query_vtproto.pb.go b/go/vt/proto/query/query_vtproto.pb.go
index 7ed228cb100..3abec4b7d1a 100644
--- a/go/vt/proto/query/query_vtproto.pb.go
+++ b/go/vt/proto/query/query_vtproto.pb.go
@@ -1,5 +1,5 @@
// Code generated by protoc-gen-go-vtproto. DO NOT EDIT.
-// protoc-gen-go-vtproto version: v0.3.0
+// protoc-gen-go-vtproto version: v0.4.0
// source: query.proto
package query
@@ -377,6 +377,32 @@ func (m *ExecuteOptions) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
i -= len(m.unknownFields)
copy(dAtA[i:], m.unknownFields)
}
+ if len(m.TransactionAccessMode) > 0 {
+ var pksize2 int
+ for _, num := range m.TransactionAccessMode {
+ pksize2 += sov(uint64(num))
+ }
+ i -= pksize2
+ j1 := i
+ for _, num1 := range m.TransactionAccessMode {
+ num := uint64(num1)
+ for num >= 1<<7 {
+ dAtA[j1] = uint8(uint64(num)&0x7f | 0x80)
+ num >>= 7
+ j1++
+ }
+ dAtA[j1] = uint8(num)
+ j1++
+ }
+ i = encodeVarint(dAtA, i, uint64(pksize2))
+ i--
+ dAtA[i] = 0x72
+ }
+ if m.Consolidator != 0 {
+ i = encodeVarint(dAtA, i, uint64(m.Consolidator))
+ i--
+ dAtA[i] = 0x68
+ }
if m.HasCreatedTempTables {
i--
if m.HasCreatedTempTables {
@@ -3827,6 +3853,15 @@ func (m *RealtimeStats) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
i -= len(m.unknownFields)
copy(dAtA[i:], m.unknownFields)
}
+ if len(m.ViewSchemaChanged) > 0 {
+ for iNdEx := len(m.ViewSchemaChanged) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.ViewSchemaChanged[iNdEx])
+ copy(dAtA[i:], m.ViewSchemaChanged[iNdEx])
+ i = encodeVarint(dAtA, i, uint64(len(m.ViewSchemaChanged[iNdEx])))
+ i--
+ dAtA[i] = 0x42
+ }
+ }
if len(m.TableSchemaChanged) > 0 {
for iNdEx := len(m.TableSchemaChanged) - 1; iNdEx >= 0; iNdEx-- {
i -= len(m.TableSchemaChanged[iNdEx])
@@ -4066,6 +4101,115 @@ func (m *TransactionMetadata) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
return len(dAtA) - i, nil
}
+func (m *GetSchemaRequest) MarshalVT() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVT(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *GetSchemaRequest) MarshalToVT(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVT(dAtA[:size])
+}
+
+func (m *GetSchemaRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if len(m.TableNames) > 0 {
+ for iNdEx := len(m.TableNames) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.TableNames[iNdEx])
+ copy(dAtA[i:], m.TableNames[iNdEx])
+ i = encodeVarint(dAtA, i, uint64(len(m.TableNames[iNdEx])))
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ if m.TableType != 0 {
+ i = encodeVarint(dAtA, i, uint64(m.TableType))
+ i--
+ dAtA[i] = 0x10
+ }
+ if m.Target != nil {
+ size, err := m.Target.MarshalToSizedBufferVT(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *GetSchemaResponse) MarshalVT() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVT(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *GetSchemaResponse) MarshalToVT(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVT(dAtA[:size])
+}
+
+func (m *GetSchemaResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if len(m.TableDefinition) > 0 {
+ for k := range m.TableDefinition {
+ v := m.TableDefinition[k]
+ baseI := i
+ i -= len(v)
+ copy(dAtA[i:], v)
+ i = encodeVarint(dAtA, i, uint64(len(v)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(k)
+ copy(dAtA[i:], k)
+ i = encodeVarint(dAtA, i, uint64(len(k)))
+ i--
+ dAtA[i] = 0xa
+ i = encodeVarint(dAtA, i, uint64(baseI-i))
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
func encodeVarint(dAtA []byte, offset int, v uint64) int {
offset -= sov(v)
base := offset
@@ -4121,9 +4265,7 @@ func (m *Target) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -4143,9 +4285,7 @@ func (m *VTGateCallerID) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -4166,9 +4306,7 @@ func (m *EventToken) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -4185,9 +4323,7 @@ func (m *Value) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -4210,9 +4346,7 @@ func (m *BindVariable) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -4239,9 +4373,7 @@ func (m *BoundQuery) SizeVT() (n int) {
n += mapEntrySize + 1 + sov(uint64(mapEntrySize))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -4275,9 +4407,17 @@ func (m *ExecuteOptions) SizeVT() (n int) {
if m.HasCreatedTempTables {
n += 2
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
+ if m.Consolidator != 0 {
+ n += 1 + sov(uint64(m.Consolidator))
+ }
+ if len(m.TransactionAccessMode) > 0 {
+ l = 0
+ for _, e := range m.TransactionAccessMode {
+ l += sov(uint64(e))
+ }
+ n += 1 + sov(uint64(l)) + l
}
+ n += len(m.unknownFields)
return n
}
@@ -4326,9 +4466,7 @@ func (m *Field) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -4349,9 +4487,7 @@ func (m *Row) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -4387,9 +4523,7 @@ func (m *QueryResult) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -4406,9 +4540,7 @@ func (m *QueryWarning) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -4441,9 +4573,7 @@ func (m *StreamEvent_Statement) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -4463,9 +4593,7 @@ func (m *StreamEvent) SizeVT() (n int) {
l = m.EventToken.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -4501,9 +4629,7 @@ func (m *ExecuteRequest) SizeVT() (n int) {
if m.ReservedId != 0 {
n += 1 + sov(uint64(m.ReservedId))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -4517,9 +4643,7 @@ func (m *ExecuteResponse) SizeVT() (n int) {
l = m.Result.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -4537,9 +4661,7 @@ func (m *ResultWithError) SizeVT() (n int) {
l = m.Result.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -4575,9 +4697,7 @@ func (m *StreamExecuteRequest) SizeVT() (n int) {
if m.ReservedId != 0 {
n += 1 + sov(uint64(m.ReservedId))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -4591,9 +4711,7 @@ func (m *StreamExecuteResponse) SizeVT() (n int) {
l = m.Result.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -4619,9 +4737,7 @@ func (m *BeginRequest) SizeVT() (n int) {
l = m.Options.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -4642,9 +4758,7 @@ func (m *BeginResponse) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -4669,9 +4783,7 @@ func (m *CommitRequest) SizeVT() (n int) {
if m.TransactionId != 0 {
n += 1 + sov(uint64(m.TransactionId))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -4684,9 +4796,7 @@ func (m *CommitResponse) SizeVT() (n int) {
if m.ReservedId != 0 {
n += 1 + sov(uint64(m.ReservedId))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -4711,9 +4821,7 @@ func (m *RollbackRequest) SizeVT() (n int) {
if m.TransactionId != 0 {
n += 1 + sov(uint64(m.TransactionId))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -4726,9 +4834,7 @@ func (m *RollbackResponse) SizeVT() (n int) {
if m.ReservedId != 0 {
n += 1 + sov(uint64(m.ReservedId))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -4757,9 +4863,7 @@ func (m *PrepareRequest) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -4769,9 +4873,7 @@ func (m *PrepareResponse) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -4797,9 +4899,7 @@ func (m *CommitPreparedRequest) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -4809,9 +4909,7 @@ func (m *CommitPreparedResponse) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -4840,9 +4938,7 @@ func (m *RollbackPreparedRequest) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -4852,9 +4948,7 @@ func (m *RollbackPreparedResponse) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -4886,9 +4980,7 @@ func (m *CreateTransactionRequest) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -4898,9 +4990,7 @@ func (m *CreateTransactionResponse) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -4929,9 +5019,7 @@ func (m *StartCommitRequest) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -4941,9 +5029,7 @@ func (m *StartCommitResponse) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -4972,9 +5058,7 @@ func (m *SetRollbackRequest) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -4984,9 +5068,7 @@ func (m *SetRollbackResponse) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5012,9 +5094,7 @@ func (m *ConcludeTransactionRequest) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5024,9 +5104,7 @@ func (m *ConcludeTransactionResponse) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5052,9 +5130,7 @@ func (m *ReadTransactionRequest) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5068,9 +5144,7 @@ func (m *ReadTransactionResponse) SizeVT() (n int) {
l = m.Metadata.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5109,9 +5183,7 @@ func (m *BeginExecuteRequest) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5140,9 +5212,7 @@ func (m *BeginExecuteResponse) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5181,9 +5251,7 @@ func (m *BeginStreamExecuteRequest) SizeVT() (n int) {
if m.ReservedId != 0 {
n += 1 + sov(uint64(m.ReservedId))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5212,9 +5280,7 @@ func (m *BeginStreamExecuteResponse) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5240,9 +5306,7 @@ func (m *MessageStreamRequest) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5256,9 +5320,7 @@ func (m *MessageStreamResponse) SizeVT() (n int) {
l = m.Result.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5290,9 +5352,7 @@ func (m *MessageAckRequest) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5306,9 +5366,7 @@ func (m *MessageAckResponse) SizeVT() (n int) {
l = m.Result.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5347,9 +5405,7 @@ func (m *ReserveExecuteRequest) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5374,9 +5430,7 @@ func (m *ReserveExecuteResponse) SizeVT() (n int) {
l = m.TabletAlias.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5415,9 +5469,7 @@ func (m *ReserveStreamExecuteRequest) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5442,9 +5494,7 @@ func (m *ReserveStreamExecuteResponse) SizeVT() (n int) {
l = m.TabletAlias.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5486,9 +5536,7 @@ func (m *ReserveBeginExecuteRequest) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5520,9 +5568,7 @@ func (m *ReserveBeginExecuteResponse) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5564,9 +5610,7 @@ func (m *ReserveBeginStreamExecuteRequest) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5598,9 +5642,7 @@ func (m *ReserveBeginStreamExecuteResponse) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5628,9 +5670,7 @@ func (m *ReleaseRequest) SizeVT() (n int) {
if m.ReservedId != 0 {
n += 1 + sov(uint64(m.ReservedId))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5640,9 +5680,7 @@ func (m *ReleaseResponse) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5652,9 +5690,7 @@ func (m *StreamHealthRequest) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5689,9 +5725,13 @@ func (m *RealtimeStats) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
+ if len(m.ViewSchemaChanged) > 0 {
+ for _, s := range m.ViewSchemaChanged {
+ l = len(s)
+ n += 1 + l + sov(uint64(l))
+ }
}
+ n += len(m.unknownFields)
return n
}
@@ -5713,9 +5753,7 @@ func (m *AggregateStats) SizeVT() (n int) {
if m.ReplicationLagSecondsMax != 0 {
n += 1 + sov(uint64(m.ReplicationLagSecondsMax))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5743,9 +5781,7 @@ func (m *StreamHealthResponse) SizeVT() (n int) {
l = m.TabletAlias.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5771,17 +5807,56 @@ func (m *TransactionMetadata) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
-func sov(x uint64) (n int) {
- return (bits.Len64(x|1) + 6) / 7
+func (m *GetSchemaRequest) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Target != nil {
+ l = m.Target.SizeVT()
+ n += 1 + l + sov(uint64(l))
+ }
+ if m.TableType != 0 {
+ n += 1 + sov(uint64(m.TableType))
+ }
+ if len(m.TableNames) > 0 {
+ for _, s := range m.TableNames {
+ l = len(s)
+ n += 1 + l + sov(uint64(l))
+ }
+ }
+ n += len(m.unknownFields)
+ return n
}
-func soz(x uint64) (n int) {
- return sov(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+
+func (m *GetSchemaResponse) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.TableDefinition) > 0 {
+ for k, v := range m.TableDefinition {
+ _ = k
+ _ = v
+ mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + 1 + len(v) + sov(uint64(len(v)))
+ n += mapEntrySize + 1 + sov(uint64(mapEntrySize))
+ }
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func sov(x uint64) (n int) {
+ return (bits.Len64(x|1) + 6) / 7
+}
+func soz(x uint64) (n int) {
+ return sov(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (m *Target) UnmarshalVT(dAtA []byte) error {
l := len(dAtA)
@@ -6836,6 +6911,94 @@ func (m *ExecuteOptions) UnmarshalVT(dAtA []byte) error {
}
}
m.HasCreatedTempTables = bool(v != 0)
+ case 13:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Consolidator", wireType)
+ }
+ m.Consolidator = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Consolidator |= ExecuteOptions_Consolidator(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 14:
+ if wireType == 0 {
+ var v ExecuteOptions_TransactionAccessMode
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= ExecuteOptions_TransactionAccessMode(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.TransactionAccessMode = append(m.TransactionAccessMode, v)
+ } else if wireType == 2 {
+ var packedLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ packedLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if packedLen < 0 {
+ return ErrInvalidLength
+ }
+ postIndex := iNdEx + packedLen
+ if postIndex < 0 {
+ return ErrInvalidLength
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ var elementCount int
+ if elementCount != 0 && len(m.TransactionAccessMode) == 0 {
+ m.TransactionAccessMode = make([]ExecuteOptions_TransactionAccessMode, 0, elementCount)
+ }
+ for iNdEx < postIndex {
+ var v ExecuteOptions_TransactionAccessMode
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= ExecuteOptions_TransactionAccessMode(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.TransactionAccessMode = append(m.TransactionAccessMode, v)
+ }
+ } else {
+ return fmt.Errorf("proto: wrong wireType = %d for field TransactionAccessMode", wireType)
+ }
default:
iNdEx = preIndex
skippy, err := skip(dAtA[iNdEx:])
@@ -15766,6 +15929,38 @@ func (m *RealtimeStats) UnmarshalVT(dAtA []byte) error {
}
m.TableSchemaChanged = append(m.TableSchemaChanged, string(dAtA[iNdEx:postIndex]))
iNdEx = postIndex
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ViewSchemaChanged", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLength
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLength
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ViewSchemaChanged = append(m.ViewSchemaChanged, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skip(dAtA[iNdEx:])
@@ -16268,6 +16463,323 @@ func (m *TransactionMetadata) UnmarshalVT(dAtA []byte) error {
}
return nil
}
+func (m *GetSchemaRequest) UnmarshalVT(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: GetSchemaRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: GetSchemaRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLength
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLength
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Target == nil {
+ m.Target = &Target{}
+ }
+ if err := m.Target.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TableType", wireType)
+ }
+ m.TableType = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.TableType |= SchemaTableType(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TableNames", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLength
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLength
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.TableNames = append(m.TableNames, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skip(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLength
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *GetSchemaResponse) UnmarshalVT(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: GetSchemaResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: GetSchemaResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TableDefinition", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLength
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLength
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.TableDefinition == nil {
+ m.TableDefinition = make(map[string]string)
+ }
+ var mapkey string
+ var mapvalue string
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLength
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey < 0 {
+ return ErrInvalidLength
+ }
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var stringLenmapvalue uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapvalue |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapvalue := int(stringLenmapvalue)
+ if intStringLenmapvalue < 0 {
+ return ErrInvalidLength
+ }
+ postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+ if postStringIndexmapvalue < 0 {
+ return ErrInvalidLength
+ }
+ if postStringIndexmapvalue > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+ iNdEx = postStringIndexmapvalue
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skip(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLength
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+ m.TableDefinition[mapkey] = mapvalue
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skip(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLength
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+
func skip(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
diff --git a/go/vt/proto/queryservice/queryservice.pb.go b/go/vt/proto/queryservice/queryservice.pb.go
index 195a49579af..3d72458cb27 100644
--- a/go/vt/proto/queryservice/queryservice.pb.go
+++ b/go/vt/proto/queryservice/queryservice.pb.go
@@ -17,7 +17,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.28.0
+// protoc-gen-go v1.28.1
// protoc v3.21.3
// source: queryservice.proto
@@ -45,7 +45,7 @@ var file_queryservice_proto_rawDesc = []byte{
0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, 0x71, 0x75, 0x65, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69,
0x63, 0x65, 0x1a, 0x0b, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a,
0x10, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x32, 0x8e, 0x10, 0x0a, 0x05, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x3a, 0x0a, 0x07, 0x45,
+ 0x6f, 0x32, 0xd2, 0x10, 0x0a, 0x05, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x3a, 0x0a, 0x07, 0x45,
0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x12, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x45,
0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e,
0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x73,
@@ -174,10 +174,14 @@ var file_queryservice_proto_rawDesc = []byte{
0x6c, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x62, 0x69, 0x6e,
0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52,
0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00,
- 0x30, 0x01, 0x42, 0x2b, 0x5a, 0x29, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x69, 0x6f, 0x2f,
- 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, 0x2f, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x2f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x62,
- 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+ 0x30, 0x01, 0x12, 0x42, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12,
+ 0x17, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d,
+ 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x18, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79,
+ 0x2e, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x42, 0x2b, 0x5a, 0x29, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73,
+ 0x2e, 0x69, 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74,
+ 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76,
+ 0x69, 0x63, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var file_queryservice_proto_goTypes = []interface{}{
@@ -207,32 +211,34 @@ var file_queryservice_proto_goTypes = []interface{}{
(*binlogdata.VStreamRequest)(nil), // 23: binlogdata.VStreamRequest
(*binlogdata.VStreamRowsRequest)(nil), // 24: binlogdata.VStreamRowsRequest
(*binlogdata.VStreamResultsRequest)(nil), // 25: binlogdata.VStreamResultsRequest
- (*query.ExecuteResponse)(nil), // 26: query.ExecuteResponse
- (*query.StreamExecuteResponse)(nil), // 27: query.StreamExecuteResponse
- (*query.BeginResponse)(nil), // 28: query.BeginResponse
- (*query.CommitResponse)(nil), // 29: query.CommitResponse
- (*query.RollbackResponse)(nil), // 30: query.RollbackResponse
- (*query.PrepareResponse)(nil), // 31: query.PrepareResponse
- (*query.CommitPreparedResponse)(nil), // 32: query.CommitPreparedResponse
- (*query.RollbackPreparedResponse)(nil), // 33: query.RollbackPreparedResponse
- (*query.CreateTransactionResponse)(nil), // 34: query.CreateTransactionResponse
- (*query.StartCommitResponse)(nil), // 35: query.StartCommitResponse
- (*query.SetRollbackResponse)(nil), // 36: query.SetRollbackResponse
- (*query.ConcludeTransactionResponse)(nil), // 37: query.ConcludeTransactionResponse
- (*query.ReadTransactionResponse)(nil), // 38: query.ReadTransactionResponse
- (*query.BeginExecuteResponse)(nil), // 39: query.BeginExecuteResponse
- (*query.BeginStreamExecuteResponse)(nil), // 40: query.BeginStreamExecuteResponse
- (*query.MessageStreamResponse)(nil), // 41: query.MessageStreamResponse
- (*query.MessageAckResponse)(nil), // 42: query.MessageAckResponse
- (*query.ReserveExecuteResponse)(nil), // 43: query.ReserveExecuteResponse
- (*query.ReserveBeginExecuteResponse)(nil), // 44: query.ReserveBeginExecuteResponse
- (*query.ReserveStreamExecuteResponse)(nil), // 45: query.ReserveStreamExecuteResponse
- (*query.ReserveBeginStreamExecuteResponse)(nil), // 46: query.ReserveBeginStreamExecuteResponse
- (*query.ReleaseResponse)(nil), // 47: query.ReleaseResponse
- (*query.StreamHealthResponse)(nil), // 48: query.StreamHealthResponse
- (*binlogdata.VStreamResponse)(nil), // 49: binlogdata.VStreamResponse
- (*binlogdata.VStreamRowsResponse)(nil), // 50: binlogdata.VStreamRowsResponse
- (*binlogdata.VStreamResultsResponse)(nil), // 51: binlogdata.VStreamResultsResponse
+ (*query.GetSchemaRequest)(nil), // 26: query.GetSchemaRequest
+ (*query.ExecuteResponse)(nil), // 27: query.ExecuteResponse
+ (*query.StreamExecuteResponse)(nil), // 28: query.StreamExecuteResponse
+ (*query.BeginResponse)(nil), // 29: query.BeginResponse
+ (*query.CommitResponse)(nil), // 30: query.CommitResponse
+ (*query.RollbackResponse)(nil), // 31: query.RollbackResponse
+ (*query.PrepareResponse)(nil), // 32: query.PrepareResponse
+ (*query.CommitPreparedResponse)(nil), // 33: query.CommitPreparedResponse
+ (*query.RollbackPreparedResponse)(nil), // 34: query.RollbackPreparedResponse
+ (*query.CreateTransactionResponse)(nil), // 35: query.CreateTransactionResponse
+ (*query.StartCommitResponse)(nil), // 36: query.StartCommitResponse
+ (*query.SetRollbackResponse)(nil), // 37: query.SetRollbackResponse
+ (*query.ConcludeTransactionResponse)(nil), // 38: query.ConcludeTransactionResponse
+ (*query.ReadTransactionResponse)(nil), // 39: query.ReadTransactionResponse
+ (*query.BeginExecuteResponse)(nil), // 40: query.BeginExecuteResponse
+ (*query.BeginStreamExecuteResponse)(nil), // 41: query.BeginStreamExecuteResponse
+ (*query.MessageStreamResponse)(nil), // 42: query.MessageStreamResponse
+ (*query.MessageAckResponse)(nil), // 43: query.MessageAckResponse
+ (*query.ReserveExecuteResponse)(nil), // 44: query.ReserveExecuteResponse
+ (*query.ReserveBeginExecuteResponse)(nil), // 45: query.ReserveBeginExecuteResponse
+ (*query.ReserveStreamExecuteResponse)(nil), // 46: query.ReserveStreamExecuteResponse
+ (*query.ReserveBeginStreamExecuteResponse)(nil), // 47: query.ReserveBeginStreamExecuteResponse
+ (*query.ReleaseResponse)(nil), // 48: query.ReleaseResponse
+ (*query.StreamHealthResponse)(nil), // 49: query.StreamHealthResponse
+ (*binlogdata.VStreamResponse)(nil), // 50: binlogdata.VStreamResponse
+ (*binlogdata.VStreamRowsResponse)(nil), // 51: binlogdata.VStreamRowsResponse
+ (*binlogdata.VStreamResultsResponse)(nil), // 52: binlogdata.VStreamResultsResponse
+ (*query.GetSchemaResponse)(nil), // 53: query.GetSchemaResponse
}
var file_queryservice_proto_depIdxs = []int32{
0, // 0: queryservice.Query.Execute:input_type -> query.ExecuteRequest
@@ -261,34 +267,36 @@ var file_queryservice_proto_depIdxs = []int32{
23, // 23: queryservice.Query.VStream:input_type -> binlogdata.VStreamRequest
24, // 24: queryservice.Query.VStreamRows:input_type -> binlogdata.VStreamRowsRequest
25, // 25: queryservice.Query.VStreamResults:input_type -> binlogdata.VStreamResultsRequest
- 26, // 26: queryservice.Query.Execute:output_type -> query.ExecuteResponse
- 27, // 27: queryservice.Query.StreamExecute:output_type -> query.StreamExecuteResponse
- 28, // 28: queryservice.Query.Begin:output_type -> query.BeginResponse
- 29, // 29: queryservice.Query.Commit:output_type -> query.CommitResponse
- 30, // 30: queryservice.Query.Rollback:output_type -> query.RollbackResponse
- 31, // 31: queryservice.Query.Prepare:output_type -> query.PrepareResponse
- 32, // 32: queryservice.Query.CommitPrepared:output_type -> query.CommitPreparedResponse
- 33, // 33: queryservice.Query.RollbackPrepared:output_type -> query.RollbackPreparedResponse
- 34, // 34: queryservice.Query.CreateTransaction:output_type -> query.CreateTransactionResponse
- 35, // 35: queryservice.Query.StartCommit:output_type -> query.StartCommitResponse
- 36, // 36: queryservice.Query.SetRollback:output_type -> query.SetRollbackResponse
- 37, // 37: queryservice.Query.ConcludeTransaction:output_type -> query.ConcludeTransactionResponse
- 38, // 38: queryservice.Query.ReadTransaction:output_type -> query.ReadTransactionResponse
- 39, // 39: queryservice.Query.BeginExecute:output_type -> query.BeginExecuteResponse
- 40, // 40: queryservice.Query.BeginStreamExecute:output_type -> query.BeginStreamExecuteResponse
- 41, // 41: queryservice.Query.MessageStream:output_type -> query.MessageStreamResponse
- 42, // 42: queryservice.Query.MessageAck:output_type -> query.MessageAckResponse
- 43, // 43: queryservice.Query.ReserveExecute:output_type -> query.ReserveExecuteResponse
- 44, // 44: queryservice.Query.ReserveBeginExecute:output_type -> query.ReserveBeginExecuteResponse
- 45, // 45: queryservice.Query.ReserveStreamExecute:output_type -> query.ReserveStreamExecuteResponse
- 46, // 46: queryservice.Query.ReserveBeginStreamExecute:output_type -> query.ReserveBeginStreamExecuteResponse
- 47, // 47: queryservice.Query.Release:output_type -> query.ReleaseResponse
- 48, // 48: queryservice.Query.StreamHealth:output_type -> query.StreamHealthResponse
- 49, // 49: queryservice.Query.VStream:output_type -> binlogdata.VStreamResponse
- 50, // 50: queryservice.Query.VStreamRows:output_type -> binlogdata.VStreamRowsResponse
- 51, // 51: queryservice.Query.VStreamResults:output_type -> binlogdata.VStreamResultsResponse
- 26, // [26:52] is the sub-list for method output_type
- 0, // [0:26] is the sub-list for method input_type
+ 26, // 26: queryservice.Query.GetSchema:input_type -> query.GetSchemaRequest
+ 27, // 27: queryservice.Query.Execute:output_type -> query.ExecuteResponse
+ 28, // 28: queryservice.Query.StreamExecute:output_type -> query.StreamExecuteResponse
+ 29, // 29: queryservice.Query.Begin:output_type -> query.BeginResponse
+ 30, // 30: queryservice.Query.Commit:output_type -> query.CommitResponse
+ 31, // 31: queryservice.Query.Rollback:output_type -> query.RollbackResponse
+ 32, // 32: queryservice.Query.Prepare:output_type -> query.PrepareResponse
+ 33, // 33: queryservice.Query.CommitPrepared:output_type -> query.CommitPreparedResponse
+ 34, // 34: queryservice.Query.RollbackPrepared:output_type -> query.RollbackPreparedResponse
+ 35, // 35: queryservice.Query.CreateTransaction:output_type -> query.CreateTransactionResponse
+ 36, // 36: queryservice.Query.StartCommit:output_type -> query.StartCommitResponse
+ 37, // 37: queryservice.Query.SetRollback:output_type -> query.SetRollbackResponse
+ 38, // 38: queryservice.Query.ConcludeTransaction:output_type -> query.ConcludeTransactionResponse
+ 39, // 39: queryservice.Query.ReadTransaction:output_type -> query.ReadTransactionResponse
+ 40, // 40: queryservice.Query.BeginExecute:output_type -> query.BeginExecuteResponse
+ 41, // 41: queryservice.Query.BeginStreamExecute:output_type -> query.BeginStreamExecuteResponse
+ 42, // 42: queryservice.Query.MessageStream:output_type -> query.MessageStreamResponse
+ 43, // 43: queryservice.Query.MessageAck:output_type -> query.MessageAckResponse
+ 44, // 44: queryservice.Query.ReserveExecute:output_type -> query.ReserveExecuteResponse
+ 45, // 45: queryservice.Query.ReserveBeginExecute:output_type -> query.ReserveBeginExecuteResponse
+ 46, // 46: queryservice.Query.ReserveStreamExecute:output_type -> query.ReserveStreamExecuteResponse
+ 47, // 47: queryservice.Query.ReserveBeginStreamExecute:output_type -> query.ReserveBeginStreamExecuteResponse
+ 48, // 48: queryservice.Query.Release:output_type -> query.ReleaseResponse
+ 49, // 49: queryservice.Query.StreamHealth:output_type -> query.StreamHealthResponse
+ 50, // 50: queryservice.Query.VStream:output_type -> binlogdata.VStreamResponse
+ 51, // 51: queryservice.Query.VStreamRows:output_type -> binlogdata.VStreamRowsResponse
+ 52, // 52: queryservice.Query.VStreamResults:output_type -> binlogdata.VStreamResultsResponse
+ 53, // 53: queryservice.Query.GetSchema:output_type -> query.GetSchemaResponse
+ 27, // [27:54] is the sub-list for method output_type
+ 0, // [0:27] is the sub-list for method input_type
0, // [0:0] is the sub-list for extension type_name
0, // [0:0] is the sub-list for extension extendee
0, // [0:0] is the sub-list for field type_name
diff --git a/go/vt/proto/queryservice/queryservice_grpc.pb.go b/go/vt/proto/queryservice/queryservice_grpc.pb.go
index 37fcc3ee240..f9d596351e2 100644
--- a/go/vt/proto/queryservice/queryservice_grpc.pb.go
+++ b/go/vt/proto/queryservice/queryservice_grpc.pb.go
@@ -81,6 +81,8 @@ type QueryClient interface {
VStreamRows(ctx context.Context, in *binlogdata.VStreamRowsRequest, opts ...grpc.CallOption) (Query_VStreamRowsClient, error)
// VStreamResults streams results along with the gtid of the snapshot.
VStreamResults(ctx context.Context, in *binlogdata.VStreamResultsRequest, opts ...grpc.CallOption) (Query_VStreamResultsClient, error)
+ // GetSchema returns the schema information.
+ GetSchema(ctx context.Context, in *query.GetSchemaRequest, opts ...grpc.CallOption) (Query_GetSchemaClient, error)
}
type queryClient struct {
@@ -532,6 +534,38 @@ func (x *queryVStreamResultsClient) Recv() (*binlogdata.VStreamResultsResponse,
return m, nil
}
+func (c *queryClient) GetSchema(ctx context.Context, in *query.GetSchemaRequest, opts ...grpc.CallOption) (Query_GetSchemaClient, error) {
+ stream, err := c.cc.NewStream(ctx, &Query_ServiceDesc.Streams[9], "/queryservice.Query/GetSchema", opts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &queryGetSchemaClient{stream}
+ if err := x.ClientStream.SendMsg(in); err != nil {
+ return nil, err
+ }
+ if err := x.ClientStream.CloseSend(); err != nil {
+ return nil, err
+ }
+ return x, nil
+}
+
+type Query_GetSchemaClient interface {
+ Recv() (*query.GetSchemaResponse, error)
+ grpc.ClientStream
+}
+
+type queryGetSchemaClient struct {
+ grpc.ClientStream
+}
+
+func (x *queryGetSchemaClient) Recv() (*query.GetSchemaResponse, error) {
+ m := new(query.GetSchemaResponse)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
// QueryServer is the server API for Query service.
// All implementations must embed UnimplementedQueryServer
// for forward compatibility
@@ -593,6 +627,8 @@ type QueryServer interface {
VStreamRows(*binlogdata.VStreamRowsRequest, Query_VStreamRowsServer) error
// VStreamResults streams results along with the gtid of the snapshot.
VStreamResults(*binlogdata.VStreamResultsRequest, Query_VStreamResultsServer) error
+ // GetSchema returns the schema information.
+ GetSchema(*query.GetSchemaRequest, Query_GetSchemaServer) error
mustEmbedUnimplementedQueryServer()
}
@@ -678,6 +714,9 @@ func (UnimplementedQueryServer) VStreamRows(*binlogdata.VStreamRowsRequest, Quer
func (UnimplementedQueryServer) VStreamResults(*binlogdata.VStreamResultsRequest, Query_VStreamResultsServer) error {
return status.Errorf(codes.Unimplemented, "method VStreamResults not implemented")
}
+func (UnimplementedQueryServer) GetSchema(*query.GetSchemaRequest, Query_GetSchemaServer) error {
+ return status.Errorf(codes.Unimplemented, "method GetSchema not implemented")
+}
func (UnimplementedQueryServer) mustEmbedUnimplementedQueryServer() {}
// UnsafeQueryServer may be embedded to opt out of forward compatibility for this service.
@@ -1186,6 +1225,27 @@ func (x *queryVStreamResultsServer) Send(m *binlogdata.VStreamResultsResponse) e
return x.ServerStream.SendMsg(m)
}
+func _Query_GetSchema_Handler(srv interface{}, stream grpc.ServerStream) error {
+ m := new(query.GetSchemaRequest)
+ if err := stream.RecvMsg(m); err != nil {
+ return err
+ }
+ return srv.(QueryServer).GetSchema(m, &queryGetSchemaServer{stream})
+}
+
+type Query_GetSchemaServer interface {
+ Send(*query.GetSchemaResponse) error
+ grpc.ServerStream
+}
+
+type queryGetSchemaServer struct {
+ grpc.ServerStream
+}
+
+func (x *queryGetSchemaServer) Send(m *query.GetSchemaResponse) error {
+ return x.ServerStream.SendMsg(m)
+}
+
// Query_ServiceDesc is the grpc.ServiceDesc for Query service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
@@ -1308,6 +1368,11 @@ var Query_ServiceDesc = grpc.ServiceDesc{
Handler: _Query_VStreamResults_Handler,
ServerStreams: true,
},
+ {
+ StreamName: "GetSchema",
+ Handler: _Query_GetSchema_Handler,
+ ServerStreams: true,
+ },
},
Metadata: "queryservice.proto",
}
diff --git a/go/vt/proto/replicationdata/replicationdata.pb.go b/go/vt/proto/replicationdata/replicationdata.pb.go
index 7fc537b9cca..55bcdf99b55 100644
--- a/go/vt/proto/replicationdata/replicationdata.pb.go
+++ b/go/vt/proto/replicationdata/replicationdata.pb.go
@@ -17,7 +17,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.28.0
+// protoc-gen-go v1.28.1
// protoc v3.21.3
// source: replicationdata.proto
diff --git a/go/vt/proto/replicationdata/replicationdata_vtproto.pb.go b/go/vt/proto/replicationdata/replicationdata_vtproto.pb.go
index 9a7b297a4fa..350a733e865 100644
--- a/go/vt/proto/replicationdata/replicationdata_vtproto.pb.go
+++ b/go/vt/proto/replicationdata/replicationdata_vtproto.pb.go
@@ -1,5 +1,5 @@
// Code generated by protoc-gen-go-vtproto. DO NOT EDIT.
-// protoc-gen-go-vtproto version: v0.3.0
+// protoc-gen-go-vtproto version: v0.4.0
// source: replicationdata.proto
package replicationdata
@@ -619,9 +619,7 @@ func (m *Status) SizeVT() (n int) {
if m.ReplicationLagUnknown {
n += 3
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -639,9 +637,7 @@ func (m *StopReplicationStatus) SizeVT() (n int) {
l = m.After.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -659,9 +655,7 @@ func (m *PrimaryStatus) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -740,9 +734,7 @@ func (m *FullStatus) SizeVT() (n int) {
if m.SemiSyncWaitForReplicaCount != 0 {
n += 2 + sov(uint64(m.SemiSyncWaitForReplicaCount))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -2157,6 +2149,7 @@ func (m *FullStatus) UnmarshalVT(dAtA []byte) error {
}
return nil
}
+
func skip(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
diff --git a/go/vt/proto/tableacl/tableacl.pb.go b/go/vt/proto/tableacl/tableacl.pb.go
index c8d91c6cfd8..bdf863601be 100644
--- a/go/vt/proto/tableacl/tableacl.pb.go
+++ b/go/vt/proto/tableacl/tableacl.pb.go
@@ -17,7 +17,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.28.0
+// protoc-gen-go v1.28.1
// protoc v3.21.3
// source: tableacl.proto
diff --git a/go/vt/proto/tableacl/tableacl_vtproto.pb.go b/go/vt/proto/tableacl/tableacl_vtproto.pb.go
index 4f430ccc395..462bf151230 100644
--- a/go/vt/proto/tableacl/tableacl_vtproto.pb.go
+++ b/go/vt/proto/tableacl/tableacl_vtproto.pb.go
@@ -1,5 +1,5 @@
// Code generated by protoc-gen-go-vtproto. DO NOT EDIT.
-// protoc-gen-go-vtproto version: v0.3.0
+// protoc-gen-go-vtproto version: v0.4.0
// source: tableacl.proto
package tableacl
@@ -184,9 +184,7 @@ func (m *TableGroupSpec) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -202,9 +200,7 @@ func (m *Config) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -510,6 +506,7 @@ func (m *Config) UnmarshalVT(dAtA []byte) error {
}
return nil
}
+
func skip(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
diff --git a/go/vt/proto/tabletmanagerdata/tabletmanagerdata.pb.go b/go/vt/proto/tabletmanagerdata/tabletmanagerdata.pb.go
index f32eeee32c7..8104614f35d 100644
--- a/go/vt/proto/tabletmanagerdata/tabletmanagerdata.pb.go
+++ b/go/vt/proto/tabletmanagerdata/tabletmanagerdata.pb.go
@@ -18,7 +18,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.28.0
+// protoc-gen-go v1.28.1
// protoc v3.21.3
// source: tabletmanagerdata.proto
@@ -4495,6 +4495,9 @@ type BackupRequest struct {
Concurrency int64 `protobuf:"varint,1,opt,name=concurrency,proto3" json:"concurrency,omitempty"`
AllowPrimary bool `protobuf:"varint,2,opt,name=allow_primary,json=allowPrimary,proto3" json:"allow_primary,omitempty"`
+ // IncrementalFromPos indicates a position of a previous backup. When this value is non-empty
+ // then the backup becomes incremental and applies as of given position.
+ IncrementalFromPos string `protobuf:"bytes,3,opt,name=incremental_from_pos,json=incrementalFromPos,proto3" json:"incremental_from_pos,omitempty"`
}
func (x *BackupRequest) Reset() {
@@ -4543,6 +4546,13 @@ func (x *BackupRequest) GetAllowPrimary() bool {
return false
}
+func (x *BackupRequest) GetIncrementalFromPos() string {
+ if x != nil {
+ return x.IncrementalFromPos
+ }
+ return ""
+}
+
type BackupResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -4596,6 +4606,12 @@ type RestoreFromBackupRequest struct {
unknownFields protoimpl.UnknownFields
BackupTime *vttime.Time `protobuf:"bytes,1,opt,name=backup_time,json=backupTime,proto3" json:"backup_time,omitempty"`
+ // RestoreToPos indicates a position for a point-in-time recovery. The recovery
+ // is expected to utilize one full backup, followed by zero or more incremental backups,
+ // that reach the precise desired position
+ RestoreToPos string `protobuf:"bytes,2,opt,name=restore_to_pos,json=restoreToPos,proto3" json:"restore_to_pos,omitempty"`
+ // Dry run does not actually performs the restore, but validates the steps and availability of backups
+ DryRun bool `protobuf:"varint,3,opt,name=dry_run,json=dryRun,proto3" json:"dry_run,omitempty"`
}
func (x *RestoreFromBackupRequest) Reset() {
@@ -4637,6 +4653,20 @@ func (x *RestoreFromBackupRequest) GetBackupTime() *vttime.Time {
return nil
}
+func (x *RestoreFromBackupRequest) GetRestoreToPos() string {
+ if x != nil {
+ return x.RestoreToPos
+ }
+ return ""
+}
+
+func (x *RestoreFromBackupRequest) GetDryRun() bool {
+ if x != nil {
+ return x.DryRun
+ }
+ return false
+}
+
type RestoreFromBackupResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -5014,7 +5044,7 @@ type VDiffReportOptions struct {
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- OnlyPKS bool `protobuf:"varint,1,opt,name=only_p_k_s,json=onlyPKS,proto3" json:"only_p_k_s,omitempty"`
+ OnlyPks bool `protobuf:"varint,1,opt,name=only_pks,json=onlyPks,proto3" json:"only_pks,omitempty"`
DebugQuery bool `protobuf:"varint,2,opt,name=debug_query,json=debugQuery,proto3" json:"debug_query,omitempty"`
Format string `protobuf:"bytes,3,opt,name=format,proto3" json:"format,omitempty"`
}
@@ -5051,9 +5081,9 @@ func (*VDiffReportOptions) Descriptor() ([]byte, []int) {
return file_tabletmanagerdata_proto_rawDescGZIP(), []int{101}
}
-func (x *VDiffReportOptions) GetOnlyPKS() bool {
+func (x *VDiffReportOptions) GetOnlyPks() bool {
if x != nil {
- return x.OnlyPKS
+ return x.OnlyPks
}
return false
}
@@ -5673,104 +5703,112 @@ var file_tabletmanagerdata_proto_rawDesc = []byte{
0x22, 0x34, 0x0a, 0x16, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69,
0x63, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f,
0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f,
- 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x56, 0x0a, 0x0d, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75,
- 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x63, 0x6f,
- 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x23, 0x0a, 0x0d, 0x61, 0x6c, 0x6c,
- 0x6f, 0x77, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08,
- 0x52, 0x0c, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x22, 0x36,
- 0x0a, 0x0e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
- 0x12, 0x24, 0x0a, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52,
- 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x22, 0x49, 0x0a, 0x18, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72,
- 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x12, 0x2d, 0x0a, 0x0b, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x74, 0x69, 0x6d,
- 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65,
- 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x0a, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x54, 0x69, 0x6d,
- 0x65, 0x22, 0x41, 0x0a, 0x19, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x46, 0x72, 0x6f, 0x6d,
- 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x24,
- 0x0a, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e,
- 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x05, 0x65,
- 0x76, 0x65, 0x6e, 0x74, 0x22, 0x5c, 0x0a, 0x0c, 0x56, 0x45, 0x78, 0x65, 0x63, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f,
- 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f,
- 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61,
- 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61,
- 0x63, 0x65, 0x22, 0x3b, 0x0a, 0x0d, 0x56, 0x45, 0x78, 0x65, 0x63, 0x52, 0x65, 0x73, 0x70, 0x6f,
- 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72,
- 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22,
- 0xd7, 0x01, 0x0a, 0x0c, 0x56, 0x44, 0x69, 0x66, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x08,
- 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08,
- 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x16, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69,
- 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e,
- 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x72, 0x67, 0x18, 0x04,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x72, 0x67, 0x12,
- 0x1d, 0x0a, 0x0a, 0x76, 0x64, 0x69, 0x66, 0x66, 0x5f, 0x75, 0x75, 0x69, 0x64, 0x18, 0x05, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x09, 0x76, 0x64, 0x69, 0x66, 0x66, 0x55, 0x75, 0x69, 0x64, 0x12, 0x39,
- 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x1f, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64,
- 0x61, 0x74, 0x61, 0x2e, 0x56, 0x44, 0x69, 0x66, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
- 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x6a, 0x0a, 0x0d, 0x56, 0x44, 0x69,
- 0x66, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x12, 0x2a, 0x0a, 0x06, 0x6f, 0x75,
- 0x74, 0x70, 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65,
- 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06,
- 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x76, 0x64, 0x69, 0x66, 0x66, 0x5f,
- 0x75, 0x75, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x76, 0x64, 0x69, 0x66,
- 0x66, 0x55, 0x75, 0x69, 0x64, 0x22, 0x79, 0x0a, 0x12, 0x56, 0x44, 0x69, 0x66, 0x66, 0x50, 0x69,
- 0x63, 0x6b, 0x65, 0x72, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x74,
- 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, 0x1f,
- 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x02, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x12,
- 0x1f, 0x0a, 0x0b, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x03,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c,
- 0x22, 0x6a, 0x0a, 0x12, 0x56, 0x44, 0x69, 0x66, 0x66, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x4f,
- 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1b, 0x0a, 0x0a, 0x6f, 0x6e, 0x6c, 0x79, 0x5f, 0x70,
- 0x5f, 0x6b, 0x5f, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x6f, 0x6e, 0x6c, 0x79,
- 0x50, 0x4b, 0x53, 0x12, 0x1f, 0x0a, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x71, 0x75, 0x65,
- 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x64, 0x65, 0x62, 0x75, 0x67, 0x51,
- 0x75, 0x65, 0x72, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x03,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, 0x82, 0x02, 0x0a,
- 0x10, 0x56, 0x44, 0x69, 0x66, 0x66, 0x43, 0x6f, 0x72, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
- 0x73, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x75, 0x74,
- 0x6f, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x61,
- 0x75, 0x74, 0x6f, 0x52, 0x65, 0x74, 0x72, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x61, 0x78, 0x5f,
- 0x72, 0x6f, 0x77, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x52,
- 0x6f, 0x77, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x18,
- 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x12,
- 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x70, 0x63, 0x74, 0x18, 0x05, 0x20,
- 0x01, 0x28, 0x03, 0x52, 0x09, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x50, 0x63, 0x74, 0x12, 0x27,
- 0x0a, 0x0f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64,
- 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74,
- 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x38, 0x0a, 0x19, 0x6d, 0x61, 0x78, 0x5f, 0x65,
- 0x78, 0x74, 0x72, 0x61, 0x5f, 0x72, 0x6f, 0x77, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x63, 0x6f, 0x6d,
- 0x70, 0x61, 0x72, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, 0x15, 0x6d, 0x61, 0x78, 0x45,
- 0x78, 0x74, 0x72, 0x61, 0x52, 0x6f, 0x77, 0x73, 0x54, 0x6f, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72,
- 0x65, 0x22, 0xf2, 0x01, 0x0a, 0x0c, 0x56, 0x44, 0x69, 0x66, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f,
- 0x6e, 0x73, 0x12, 0x4c, 0x0a, 0x0e, 0x70, 0x69, 0x63, 0x6b, 0x65, 0x72, 0x5f, 0x6f, 0x70, 0x74,
- 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x74, 0x61, 0x62,
- 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56,
- 0x44, 0x69, 0x66, 0x66, 0x50, 0x69, 0x63, 0x6b, 0x65, 0x72, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
- 0x73, 0x52, 0x0d, 0x70, 0x69, 0x63, 0x6b, 0x65, 0x72, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
- 0x12, 0x46, 0x0a, 0x0c, 0x63, 0x6f, 0x72, 0x65, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
- 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d,
- 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x44, 0x69, 0x66, 0x66,
- 0x43, 0x6f, 0x72, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0b, 0x63, 0x6f, 0x72,
- 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x4c, 0x0a, 0x0e, 0x72, 0x65, 0x70, 0x6f,
- 0x72, 0x74, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x25, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72,
- 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x44, 0x69, 0x66, 0x66, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74,
- 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0d, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x4f,
- 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x30, 0x5a, 0x2e, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73,
- 0x2e, 0x69, 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74,
- 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e,
- 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+ 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x88, 0x01, 0x0a, 0x0d, 0x42, 0x61, 0x63, 0x6b, 0x75,
+ 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x63,
+ 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x63,
+ 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x23, 0x0a, 0x0d, 0x61, 0x6c,
+ 0x6c, 0x6f, 0x77, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x08, 0x52, 0x0c, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12,
+ 0x30, 0x0a, 0x14, 0x69, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x5f, 0x66,
+ 0x72, 0x6f, 0x6d, 0x5f, 0x70, 0x6f, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x69,
+ 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x72, 0x6f, 0x6d, 0x50, 0x6f,
+ 0x73, 0x22, 0x36, 0x0a, 0x0e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x12, 0x24, 0x0a, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65,
+ 0x6e, 0x74, 0x52, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x22, 0x88, 0x01, 0x0a, 0x18, 0x52, 0x65,
+ 0x73, 0x74, 0x6f, 0x72, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2d, 0x0a, 0x0b, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70,
+ 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74,
+ 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x0a, 0x62, 0x61, 0x63, 0x6b, 0x75,
+ 0x70, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65,
+ 0x5f, 0x74, 0x6f, 0x5f, 0x70, 0x6f, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72,
+ 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x54, 0x6f, 0x50, 0x6f, 0x73, 0x12, 0x17, 0x0a, 0x07, 0x64,
+ 0x72, 0x79, 0x5f, 0x72, 0x75, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x64, 0x72,
+ 0x79, 0x52, 0x75, 0x6e, 0x22, 0x41, 0x0a, 0x19, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x46,
+ 0x72, 0x6f, 0x6d, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x12, 0x24, 0x0a, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74,
+ 0x52, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x22, 0x5c, 0x0a, 0x0c, 0x56, 0x45, 0x78, 0x65, 0x63,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x1a, 0x0a,
+ 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79,
+ 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79,
+ 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x3b, 0x0a, 0x0d, 0x56, 0x45, 0x78, 0x65, 0x63, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51,
+ 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75,
+ 0x6c, 0x74, 0x22, 0xd7, 0x01, 0x0a, 0x0c, 0x56, 0x44, 0x69, 0x66, 0x66, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12,
+ 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x16, 0x0a, 0x06, 0x61,
+ 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x61, 0x63, 0x74,
+ 0x69, 0x6f, 0x6e, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x72,
+ 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x41,
+ 0x72, 0x67, 0x12, 0x1d, 0x0a, 0x0a, 0x76, 0x64, 0x69, 0x66, 0x66, 0x5f, 0x75, 0x75, 0x69, 0x64,
+ 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x76, 0x64, 0x69, 0x66, 0x66, 0x55, 0x75, 0x69,
+ 0x64, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67,
+ 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x44, 0x69, 0x66, 0x66, 0x4f, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x6a, 0x0a, 0x0d,
+ 0x56, 0x44, 0x69, 0x66, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x0e, 0x0a,
+ 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x12, 0x2a, 0x0a,
+ 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e,
+ 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c,
+ 0x74, 0x52, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x76, 0x64, 0x69,
+ 0x66, 0x66, 0x5f, 0x75, 0x75, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x76,
+ 0x64, 0x69, 0x66, 0x66, 0x55, 0x75, 0x69, 0x64, 0x22, 0x79, 0x0a, 0x12, 0x56, 0x44, 0x69, 0x66,
+ 0x66, 0x50, 0x69, 0x63, 0x6b, 0x65, 0x72, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x21,
+ 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65,
+ 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x63, 0x65, 0x6c, 0x6c,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x65,
+ 0x6c, 0x6c, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x63, 0x65, 0x6c,
+ 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x43,
+ 0x65, 0x6c, 0x6c, 0x22, 0x68, 0x0a, 0x12, 0x56, 0x44, 0x69, 0x66, 0x66, 0x52, 0x65, 0x70, 0x6f,
+ 0x72, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x6f, 0x6e, 0x6c,
+ 0x79, 0x5f, 0x70, 0x6b, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x6f, 0x6e, 0x6c,
+ 0x79, 0x50, 0x6b, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x71, 0x75,
+ 0x65, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x64, 0x65, 0x62, 0x75, 0x67,
+ 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, 0x82, 0x02,
+ 0x0a, 0x10, 0x56, 0x44, 0x69, 0x66, 0x66, 0x43, 0x6f, 0x72, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x75,
+ 0x74, 0x6f, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09,
+ 0x61, 0x75, 0x74, 0x6f, 0x52, 0x65, 0x74, 0x72, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x61, 0x78,
+ 0x5f, 0x72, 0x6f, 0x77, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x6d, 0x61, 0x78,
+ 0x52, 0x6f, 0x77, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d,
+ 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d,
+ 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x70, 0x63, 0x74, 0x18, 0x05,
+ 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x50, 0x63, 0x74, 0x12,
+ 0x27, 0x0a, 0x0f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e,
+ 0x64, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75,
+ 0x74, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x38, 0x0a, 0x19, 0x6d, 0x61, 0x78, 0x5f,
+ 0x65, 0x78, 0x74, 0x72, 0x61, 0x5f, 0x72, 0x6f, 0x77, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x63, 0x6f,
+ 0x6d, 0x70, 0x61, 0x72, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, 0x15, 0x6d, 0x61, 0x78,
+ 0x45, 0x78, 0x74, 0x72, 0x61, 0x52, 0x6f, 0x77, 0x73, 0x54, 0x6f, 0x43, 0x6f, 0x6d, 0x70, 0x61,
+ 0x72, 0x65, 0x22, 0xf2, 0x01, 0x0a, 0x0c, 0x56, 0x44, 0x69, 0x66, 0x66, 0x4f, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x12, 0x4c, 0x0a, 0x0e, 0x70, 0x69, 0x63, 0x6b, 0x65, 0x72, 0x5f, 0x6f, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x74, 0x61,
+ 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e,
+ 0x56, 0x44, 0x69, 0x66, 0x66, 0x50, 0x69, 0x63, 0x6b, 0x65, 0x72, 0x4f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x52, 0x0d, 0x70, 0x69, 0x63, 0x6b, 0x65, 0x72, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x12, 0x46, 0x0a, 0x0c, 0x63, 0x6f, 0x72, 0x65, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74,
+ 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x44, 0x69, 0x66,
+ 0x66, 0x43, 0x6f, 0x72, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0b, 0x63, 0x6f,
+ 0x72, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x4c, 0x0a, 0x0e, 0x72, 0x65, 0x70,
+ 0x6f, 0x72, 0x74, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x25, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65,
+ 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x44, 0x69, 0x66, 0x66, 0x52, 0x65, 0x70, 0x6f, 0x72,
+ 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0d, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74,
+ 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x30, 0x5a, 0x2e, 0x76, 0x69, 0x74, 0x65, 0x73,
+ 0x73, 0x2e, 0x69, 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76,
+ 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61,
+ 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x33,
}
var (
diff --git a/go/vt/proto/tabletmanagerdata/tabletmanagerdata_vtproto.pb.go b/go/vt/proto/tabletmanagerdata/tabletmanagerdata_vtproto.pb.go
index 839d519ebc9..4b3bbbbf13d 100644
--- a/go/vt/proto/tabletmanagerdata/tabletmanagerdata_vtproto.pb.go
+++ b/go/vt/proto/tabletmanagerdata/tabletmanagerdata_vtproto.pb.go
@@ -1,5 +1,5 @@
// Code generated by protoc-gen-go-vtproto. DO NOT EDIT.
-// protoc-gen-go-vtproto version: v0.3.0
+// protoc-gen-go-vtproto version: v0.4.0
// source: tabletmanagerdata.proto
package tabletmanagerdata
@@ -4001,6 +4001,13 @@ func (m *BackupRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
i -= len(m.unknownFields)
copy(dAtA[i:], m.unknownFields)
}
+ if len(m.IncrementalFromPos) > 0 {
+ i -= len(m.IncrementalFromPos)
+ copy(dAtA[i:], m.IncrementalFromPos)
+ i = encodeVarint(dAtA, i, uint64(len(m.IncrementalFromPos)))
+ i--
+ dAtA[i] = 0x1a
+ }
if m.AllowPrimary {
i--
if m.AllowPrimary {
@@ -4092,6 +4099,23 @@ func (m *RestoreFromBackupRequest) MarshalToSizedBufferVT(dAtA []byte) (int, err
i -= len(m.unknownFields)
copy(dAtA[i:], m.unknownFields)
}
+ if m.DryRun {
+ i--
+ if m.DryRun {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x18
+ }
+ if len(m.RestoreToPos) > 0 {
+ i -= len(m.RestoreToPos)
+ copy(dAtA[i:], m.RestoreToPos)
+ i = encodeVarint(dAtA, i, uint64(len(m.RestoreToPos)))
+ i--
+ dAtA[i] = 0x12
+ }
if m.BackupTime != nil {
size, err := m.BackupTime.MarshalToSizedBufferVT(dAtA[:i])
if err != nil {
@@ -4479,9 +4503,9 @@ func (m *VDiffReportOptions) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
i--
dAtA[i] = 0x10
}
- if m.OnlyPKS {
+ if m.OnlyPks {
i--
- if m.OnlyPKS {
+ if m.OnlyPks {
dAtA[i] = 1
} else {
dAtA[i] = 0
@@ -4688,9 +4712,7 @@ func (m *TableDefinition) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -4714,9 +4736,7 @@ func (m *SchemaDefinition) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -4734,9 +4754,7 @@ func (m *SchemaChangeResult) SizeVT() (n int) {
l = m.AfterSchema.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -4765,9 +4783,7 @@ func (m *UserPermission) SizeVT() (n int) {
n += mapEntrySize + 1 + sov(uint64(mapEntrySize))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -4797,9 +4813,7 @@ func (m *DbPermission) SizeVT() (n int) {
n += mapEntrySize + 1 + sov(uint64(mapEntrySize))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -4821,9 +4835,7 @@ func (m *Permissions) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -4837,9 +4849,7 @@ func (m *PingRequest) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -4853,9 +4863,7 @@ func (m *PingResponse) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -4868,9 +4876,7 @@ func (m *SleepRequest) SizeVT() (n int) {
if m.Duration != 0 {
n += 1 + sov(uint64(m.Duration))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -4880,9 +4886,7 @@ func (m *SleepResponse) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -4910,9 +4914,7 @@ func (m *ExecuteHookRequest) SizeVT() (n int) {
n += mapEntrySize + 1 + sov(uint64(mapEntrySize))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -4933,9 +4935,7 @@ func (m *ExecuteHookResponse) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -4963,9 +4963,7 @@ func (m *GetSchemaRequest) SizeVT() (n int) {
if m.TableSchemaOnly {
n += 2
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -4979,9 +4977,7 @@ func (m *GetSchemaResponse) SizeVT() (n int) {
l = m.SchemaDefinition.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -4991,9 +4987,7 @@ func (m *GetPermissionsRequest) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5007,9 +5001,7 @@ func (m *GetPermissionsResponse) SizeVT() (n int) {
l = m.Permissions.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5019,9 +5011,7 @@ func (m *SetReadOnlyRequest) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5031,9 +5021,7 @@ func (m *SetReadOnlyResponse) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5043,9 +5031,7 @@ func (m *SetReadWriteRequest) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5055,9 +5041,7 @@ func (m *SetReadWriteResponse) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5073,9 +5057,7 @@ func (m *ChangeTypeRequest) SizeVT() (n int) {
if m.SemiSync {
n += 2
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5085,9 +5067,7 @@ func (m *ChangeTypeResponse) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5097,9 +5077,7 @@ func (m *RefreshStateRequest) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5109,9 +5087,7 @@ func (m *RefreshStateResponse) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5121,9 +5097,7 @@ func (m *RunHealthCheckRequest) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5133,9 +5107,7 @@ func (m *RunHealthCheckResponse) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5149,9 +5121,7 @@ func (m *ReloadSchemaRequest) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5161,9 +5131,7 @@ func (m *ReloadSchemaResponse) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5179,9 +5147,7 @@ func (m *PreflightSchemaRequest) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5197,9 +5163,7 @@ func (m *PreflightSchemaResponse) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5231,9 +5195,7 @@ func (m *ApplySchemaRequest) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5251,9 +5213,7 @@ func (m *ApplySchemaResponse) SizeVT() (n int) {
l = m.AfterSchema.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5263,9 +5223,7 @@ func (m *LockTablesRequest) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5275,9 +5233,7 @@ func (m *LockTablesResponse) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5287,9 +5243,7 @@ func (m *UnlockTablesRequest) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5299,9 +5253,7 @@ func (m *UnlockTablesResponse) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5326,9 +5278,7 @@ func (m *ExecuteQueryRequest) SizeVT() (n int) {
l = m.CallerId.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5342,9 +5292,7 @@ func (m *ExecuteQueryResponse) SizeVT() (n int) {
l = m.Result.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5371,9 +5319,7 @@ func (m *ExecuteFetchAsDbaRequest) SizeVT() (n int) {
if m.ReloadSchema {
n += 2
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5387,9 +5333,7 @@ func (m *ExecuteFetchAsDbaResponse) SizeVT() (n int) {
l = m.Result.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5413,9 +5357,7 @@ func (m *ExecuteFetchAsAllPrivsRequest) SizeVT() (n int) {
if m.ReloadSchema {
n += 2
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5429,9 +5371,7 @@ func (m *ExecuteFetchAsAllPrivsResponse) SizeVT() (n int) {
l = m.Result.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5448,9 +5388,7 @@ func (m *ExecuteFetchAsAppRequest) SizeVT() (n int) {
if m.MaxRows != 0 {
n += 1 + sov(uint64(m.MaxRows))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5464,9 +5402,7 @@ func (m *ExecuteFetchAsAppResponse) SizeVT() (n int) {
l = m.Result.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5476,9 +5412,7 @@ func (m *ReplicationStatusRequest) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5492,9 +5426,7 @@ func (m *ReplicationStatusResponse) SizeVT() (n int) {
l = m.Status.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5504,9 +5436,7 @@ func (m *PrimaryStatusRequest) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5520,9 +5450,7 @@ func (m *PrimaryStatusResponse) SizeVT() (n int) {
l = m.Status.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5532,9 +5460,7 @@ func (m *PrimaryPositionRequest) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5548,9 +5474,7 @@ func (m *PrimaryPositionResponse) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5564,9 +5488,7 @@ func (m *WaitForPositionRequest) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5576,9 +5498,7 @@ func (m *WaitForPositionResponse) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5588,9 +5508,7 @@ func (m *StopReplicationRequest) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5600,9 +5518,7 @@ func (m *StopReplicationResponse) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5619,9 +5535,7 @@ func (m *StopReplicationMinimumRequest) SizeVT() (n int) {
if m.WaitTimeout != 0 {
n += 1 + sov(uint64(m.WaitTimeout))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5635,9 +5549,7 @@ func (m *StopReplicationMinimumResponse) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5650,9 +5562,7 @@ func (m *StartReplicationRequest) SizeVT() (n int) {
if m.SemiSync {
n += 2
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5662,9 +5572,7 @@ func (m *StartReplicationResponse) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5681,9 +5589,7 @@ func (m *StartReplicationUntilAfterRequest) SizeVT() (n int) {
if m.WaitTimeout != 0 {
n += 1 + sov(uint64(m.WaitTimeout))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5693,9 +5599,7 @@ func (m *StartReplicationUntilAfterResponse) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5705,9 +5609,7 @@ func (m *GetReplicasRequest) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5723,9 +5625,7 @@ func (m *GetReplicasResponse) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5735,9 +5635,7 @@ func (m *ResetReplicationRequest) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5747,9 +5645,7 @@ func (m *ResetReplicationResponse) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5763,9 +5659,7 @@ func (m *VReplicationExecRequest) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5779,9 +5673,7 @@ func (m *VReplicationExecResponse) SizeVT() (n int) {
l = m.Result.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5798,9 +5690,7 @@ func (m *VReplicationWaitForPosRequest) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5810,9 +5700,7 @@ func (m *VReplicationWaitForPosResponse) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5825,9 +5713,7 @@ func (m *InitPrimaryRequest) SizeVT() (n int) {
if m.SemiSync {
n += 2
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5841,9 +5727,7 @@ func (m *InitPrimaryResponse) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5868,9 +5752,7 @@ func (m *PopulateReparentJournalRequest) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5880,9 +5762,7 @@ func (m *PopulateReparentJournalResponse) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5906,9 +5786,7 @@ func (m *InitReplicaRequest) SizeVT() (n int) {
if m.SemiSync {
n += 2
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5918,9 +5796,7 @@ func (m *InitReplicaResponse) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5930,9 +5806,7 @@ func (m *DemotePrimaryRequest) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5946,9 +5820,7 @@ func (m *DemotePrimaryResponse) SizeVT() (n int) {
l = m.PrimaryStatus.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5961,9 +5833,7 @@ func (m *UndoDemotePrimaryRequest) SizeVT() (n int) {
if m.SemiSync {
n += 2
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5973,9 +5843,7 @@ func (m *UndoDemotePrimaryResponse) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5985,9 +5853,7 @@ func (m *ReplicaWasPromotedRequest) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5997,9 +5863,7 @@ func (m *ReplicaWasPromotedResponse) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -6009,9 +5873,7 @@ func (m *ResetReplicationParametersRequest) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -6021,9 +5883,7 @@ func (m *ResetReplicationParametersResponse) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -6033,9 +5893,7 @@ func (m *FullStatusRequest) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -6049,9 +5907,7 @@ func (m *FullStatusResponse) SizeVT() (n int) {
l = m.Status.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -6078,9 +5934,7 @@ func (m *SetReplicationSourceRequest) SizeVT() (n int) {
if m.SemiSync {
n += 2
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -6090,9 +5944,7 @@ func (m *SetReplicationSourceResponse) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -6106,9 +5958,7 @@ func (m *ReplicaWasRestartedRequest) SizeVT() (n int) {
l = m.Parent.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -6118,9 +5968,7 @@ func (m *ReplicaWasRestartedResponse) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -6133,9 +5981,7 @@ func (m *StopReplicationAndGetStatusRequest) SizeVT() (n int) {
if m.StopReplicationMode != 0 {
n += 1 + sov(uint64(m.StopReplicationMode))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -6149,9 +5995,7 @@ func (m *StopReplicationAndGetStatusResponse) SizeVT() (n int) {
l = m.Status.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -6164,9 +6008,7 @@ func (m *PromoteReplicaRequest) SizeVT() (n int) {
if m.SemiSync {
n += 2
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -6180,9 +6022,7 @@ func (m *PromoteReplicaResponse) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -6198,9 +6038,11 @@ func (m *BackupRequest) SizeVT() (n int) {
if m.AllowPrimary {
n += 2
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
+ l = len(m.IncrementalFromPos)
+ if l > 0 {
+ n += 1 + l + sov(uint64(l))
}
+ n += len(m.unknownFields)
return n
}
@@ -6214,9 +6056,7 @@ func (m *BackupResponse) SizeVT() (n int) {
l = m.Event.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -6230,9 +6070,14 @@ func (m *RestoreFromBackupRequest) SizeVT() (n int) {
l = m.BackupTime.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
+ l = len(m.RestoreToPos)
+ if l > 0 {
+ n += 1 + l + sov(uint64(l))
}
+ if m.DryRun {
+ n += 2
+ }
+ n += len(m.unknownFields)
return n
}
@@ -6246,9 +6091,7 @@ func (m *RestoreFromBackupResponse) SizeVT() (n int) {
l = m.Event.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -6270,9 +6113,7 @@ func (m *VExecRequest) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -6286,9 +6127,7 @@ func (m *VExecResponse) SizeVT() (n int) {
l = m.Result.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -6322,9 +6161,7 @@ func (m *VDiffRequest) SizeVT() (n int) {
l = m.Options.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -6345,9 +6182,7 @@ func (m *VDiffResponse) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -6369,9 +6204,7 @@ func (m *VDiffPickerOptions) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -6381,7 +6214,7 @@ func (m *VDiffReportOptions) SizeVT() (n int) {
}
var l int
_ = l
- if m.OnlyPKS {
+ if m.OnlyPks {
n += 2
}
if m.DebugQuery {
@@ -6391,9 +6224,7 @@ func (m *VDiffReportOptions) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -6425,9 +6256,7 @@ func (m *VDiffCoreOptions) SizeVT() (n int) {
if m.MaxExtraRowsToCompare != 0 {
n += 1 + sov(uint64(m.MaxExtraRowsToCompare))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -6449,9 +6278,7 @@ func (m *VDiffOptions) SizeVT() (n int) {
l = m.ReportOptions.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -14671,6 +14498,38 @@ func (m *BackupRequest) UnmarshalVT(dAtA []byte) error {
}
}
m.AllowPrimary = bool(v != 0)
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field IncrementalFromPos", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLength
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLength
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.IncrementalFromPos = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skip(dAtA[iNdEx:])
@@ -14845,6 +14704,58 @@ func (m *RestoreFromBackupRequest) UnmarshalVT(dAtA []byte) error {
return err
}
iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RestoreToPos", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLength
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLength
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.RestoreToPos = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DryRun", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.DryRun = bool(v != 0)
default:
iNdEx = preIndex
skippy, err := skip(dAtA[iNdEx:])
@@ -15751,7 +15662,7 @@ func (m *VDiffReportOptions) UnmarshalVT(dAtA []byte) error {
switch fieldNum {
case 1:
if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field OnlyPKS", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field OnlyPks", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
@@ -15768,7 +15679,7 @@ func (m *VDiffReportOptions) UnmarshalVT(dAtA []byte) error {
break
}
}
- m.OnlyPKS = bool(v != 0)
+ m.OnlyPks = bool(v != 0)
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field DebugQuery", wireType)
@@ -16201,6 +16112,7 @@ func (m *VDiffOptions) UnmarshalVT(dAtA []byte) error {
}
return nil
}
+
func skip(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
diff --git a/go/vt/proto/tabletmanagerservice/tabletmanagerservice.pb.go b/go/vt/proto/tabletmanagerservice/tabletmanagerservice.pb.go
index 70af0d0ca16..1d5c48420ac 100644
--- a/go/vt/proto/tabletmanagerservice/tabletmanagerservice.pb.go
+++ b/go/vt/proto/tabletmanagerservice/tabletmanagerservice.pb.go
@@ -18,7 +18,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.28.0
+// protoc-gen-go v1.28.1
// protoc v3.21.3
// source: tabletmanagerservice.proto
diff --git a/go/vt/proto/throttlerdata/throttlerdata.pb.go b/go/vt/proto/throttlerdata/throttlerdata.pb.go
index 5aa03276cb0..8a52d2344f2 100644
--- a/go/vt/proto/throttlerdata/throttlerdata.pb.go
+++ b/go/vt/proto/throttlerdata/throttlerdata.pb.go
@@ -17,7 +17,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.28.0
+// protoc-gen-go v1.28.1
// protoc v3.21.3
// source: throttlerdata.proto
diff --git a/go/vt/proto/throttlerdata/throttlerdata_vtproto.pb.go b/go/vt/proto/throttlerdata/throttlerdata_vtproto.pb.go
index 5550ad14fb0..7a061d1fc38 100644
--- a/go/vt/proto/throttlerdata/throttlerdata_vtproto.pb.go
+++ b/go/vt/proto/throttlerdata/throttlerdata_vtproto.pb.go
@@ -1,5 +1,5 @@
// Code generated by protoc-gen-go-vtproto. DO NOT EDIT.
-// protoc-gen-go-vtproto version: v0.3.0
+// protoc-gen-go-vtproto version: v0.4.0
// source: throttlerdata.proto
package throttlerdata
@@ -586,9 +586,7 @@ func (m *MaxRatesRequest) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -606,9 +604,7 @@ func (m *MaxRatesResponse) SizeVT() (n int) {
n += mapEntrySize + 1 + sov(uint64(mapEntrySize))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -621,9 +617,7 @@ func (m *SetMaxRateRequest) SizeVT() (n int) {
if m.Rate != 0 {
n += 1 + sov(uint64(m.Rate))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -639,9 +633,7 @@ func (m *SetMaxRateResponse) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -693,9 +685,7 @@ func (m *Configuration) SizeVT() (n int) {
if m.MaxRateApproachThreshold != 0 {
n += 9
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -709,9 +699,7 @@ func (m *GetConfigurationRequest) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -734,9 +722,7 @@ func (m *GetConfigurationResponse) SizeVT() (n int) {
n += mapEntrySize + 1 + sov(uint64(mapEntrySize))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -757,9 +743,7 @@ func (m *UpdateConfigurationRequest) SizeVT() (n int) {
if m.CopyZeroValues {
n += 2
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -775,9 +759,7 @@ func (m *UpdateConfigurationResponse) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -791,9 +773,7 @@ func (m *ResetConfigurationRequest) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -809,9 +789,7 @@ func (m *ResetConfigurationResponse) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -2125,6 +2103,7 @@ func (m *ResetConfigurationResponse) UnmarshalVT(dAtA []byte) error {
}
return nil
}
+
func skip(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
diff --git a/go/vt/proto/throttlerservice/throttlerservice.pb.go b/go/vt/proto/throttlerservice/throttlerservice.pb.go
index b2f108003c6..83d6f506a79 100644
--- a/go/vt/proto/throttlerservice/throttlerservice.pb.go
+++ b/go/vt/proto/throttlerservice/throttlerservice.pb.go
@@ -18,7 +18,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.28.0
+// protoc-gen-go v1.28.1
// protoc v3.21.3
// source: throttlerservice.proto
diff --git a/go/vt/proto/topodata/topodata.pb.go b/go/vt/proto/topodata/topodata.pb.go
index a9797c58282..2d129763459 100644
--- a/go/vt/proto/topodata/topodata.pb.go
+++ b/go/vt/proto/topodata/topodata.pb.go
@@ -20,7 +20,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.28.0
+// protoc-gen-go v1.28.1
// protoc v3.21.3
// source: topodata.proto
@@ -402,7 +402,6 @@ type Tablet struct {
// about which tablet should be the primary, such as via Vitess
// replication-management commands like PlannedReparentShard,
// EmergencyReparentShard, and TabletExternallyReparented.
- //
PrimaryTermStartTime *vttime.Time `protobuf:"bytes,14,opt,name=primary_term_start_time,json=primaryTermStartTime,proto3" json:"primary_term_start_time,omitempty"`
// db_server_version represents the database version used by the tablet.
DbServerVersion string `protobuf:"bytes,15,opt,name=db_server_version,json=dbServerVersion,proto3" json:"db_server_version,omitempty"`
@@ -684,6 +683,10 @@ type Keyspace struct {
// DurabilityPolicy is the durability policy to be
// used for the keyspace.
DurabilityPolicy string `protobuf:"bytes,8,opt,name=durability_policy,json=durabilityPolicy,proto3" json:"durability_policy,omitempty"`
+ // ThrottlerConfig has the configuration for the tablet
+ // server's lag throttler, and applies to the entire
+ // keyspace, across all shards and tablets.
+ ThrottlerConfig *ThrottlerConfig `protobuf:"bytes,9,opt,name=throttler_config,json=throttlerConfig,proto3" json:"throttler_config,omitempty"`
}
func (x *Keyspace) Reset() {
@@ -753,6 +756,13 @@ func (x *Keyspace) GetDurabilityPolicy() string {
return ""
}
+func (x *Keyspace) GetThrottlerConfig() *ThrottlerConfig {
+ if x != nil {
+ return x.ThrottlerConfig
+ }
+ return nil
+}
+
// ShardReplication describes the MySQL replication relationships
// whithin a cell.
type ShardReplication struct {
@@ -986,6 +996,86 @@ func (x *ShardTabletControl) GetQueryServiceDisabled() bool {
return false
}
+type ThrottlerConfig struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Enabled indicates that the throttler is actually checking state for
+ // requests. When disabled, it automatically returns 200 OK for all
+ // checks.
+ Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"`
+ // Threshold is the threshold for either the default check (heartbeat
+ // lag) or custom check.
+ Threshold float64 `protobuf:"fixed64,2,opt,name=threshold,proto3" json:"threshold,omitempty"`
+ // CustomQuery is an optional query that overrides the default check
+ // query.
+ CustomQuery string `protobuf:"bytes,3,opt,name=custom_query,json=customQuery,proto3" json:"custom_query,omitempty"`
+ // CheckAsCheckSelf indicates whether a throttler /check request
+ // should behave like a /check-self.
+ CheckAsCheckSelf bool `protobuf:"varint,4,opt,name=check_as_check_self,json=checkAsCheckSelf,proto3" json:"check_as_check_self,omitempty"`
+}
+
+func (x *ThrottlerConfig) Reset() {
+ *x = ThrottlerConfig{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_topodata_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ThrottlerConfig) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ThrottlerConfig) ProtoMessage() {}
+
+func (x *ThrottlerConfig) ProtoReflect() protoreflect.Message {
+ mi := &file_topodata_proto_msgTypes[9]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ThrottlerConfig.ProtoReflect.Descriptor instead.
+func (*ThrottlerConfig) Descriptor() ([]byte, []int) {
+ return file_topodata_proto_rawDescGZIP(), []int{9}
+}
+
+func (x *ThrottlerConfig) GetEnabled() bool {
+ if x != nil {
+ return x.Enabled
+ }
+ return false
+}
+
+func (x *ThrottlerConfig) GetThreshold() float64 {
+ if x != nil {
+ return x.Threshold
+ }
+ return 0
+}
+
+func (x *ThrottlerConfig) GetCustomQuery() string {
+ if x != nil {
+ return x.CustomQuery
+ }
+ return ""
+}
+
+func (x *ThrottlerConfig) GetCheckAsCheckSelf() bool {
+ if x != nil {
+ return x.CheckAsCheckSelf
+ }
+ return false
+}
+
// SrvKeyspace is a rollup node for the keyspace itself.
type SrvKeyspace struct {
state protoimpl.MessageState
@@ -995,12 +1085,17 @@ type SrvKeyspace struct {
// The partitions this keyspace is serving, per tablet type.
Partitions []*SrvKeyspace_KeyspacePartition `protobuf:"bytes,1,rep,name=partitions,proto3" json:"partitions,omitempty"`
ServedFrom []*SrvKeyspace_ServedFrom `protobuf:"bytes,4,rep,name=served_from,json=servedFrom,proto3" json:"served_from,omitempty"`
+ // ThrottlerConfig has the configuration for the tablet server's
+ // lag throttler, and applies to the entire keyspace, across all
+ // shards and tablets. This is copied from the global keyspace
+ // object.
+ ThrottlerConfig *ThrottlerConfig `protobuf:"bytes,6,opt,name=throttler_config,json=throttlerConfig,proto3" json:"throttler_config,omitempty"`
}
func (x *SrvKeyspace) Reset() {
*x = SrvKeyspace{}
if protoimpl.UnsafeEnabled {
- mi := &file_topodata_proto_msgTypes[9]
+ mi := &file_topodata_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1013,7 +1108,7 @@ func (x *SrvKeyspace) String() string {
func (*SrvKeyspace) ProtoMessage() {}
func (x *SrvKeyspace) ProtoReflect() protoreflect.Message {
- mi := &file_topodata_proto_msgTypes[9]
+ mi := &file_topodata_proto_msgTypes[10]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1026,7 +1121,7 @@ func (x *SrvKeyspace) ProtoReflect() protoreflect.Message {
// Deprecated: Use SrvKeyspace.ProtoReflect.Descriptor instead.
func (*SrvKeyspace) Descriptor() ([]byte, []int) {
- return file_topodata_proto_rawDescGZIP(), []int{9}
+ return file_topodata_proto_rawDescGZIP(), []int{10}
}
func (x *SrvKeyspace) GetPartitions() []*SrvKeyspace_KeyspacePartition {
@@ -1043,6 +1138,13 @@ func (x *SrvKeyspace) GetServedFrom() []*SrvKeyspace_ServedFrom {
return nil
}
+func (x *SrvKeyspace) GetThrottlerConfig() *ThrottlerConfig {
+ if x != nil {
+ return x.ThrottlerConfig
+ }
+ return nil
+}
+
// CellInfo contains information about a cell. CellInfo objects are
// stored in the global topology server, and describe how to reach
// local topology servers.
@@ -1064,7 +1166,7 @@ type CellInfo struct {
func (x *CellInfo) Reset() {
*x = CellInfo{}
if protoimpl.UnsafeEnabled {
- mi := &file_topodata_proto_msgTypes[10]
+ mi := &file_topodata_proto_msgTypes[11]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1077,7 +1179,7 @@ func (x *CellInfo) String() string {
func (*CellInfo) ProtoMessage() {}
func (x *CellInfo) ProtoReflect() protoreflect.Message {
- mi := &file_topodata_proto_msgTypes[10]
+ mi := &file_topodata_proto_msgTypes[11]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1090,7 +1192,7 @@ func (x *CellInfo) ProtoReflect() protoreflect.Message {
// Deprecated: Use CellInfo.ProtoReflect.Descriptor instead.
func (*CellInfo) Descriptor() ([]byte, []int) {
- return file_topodata_proto_rawDescGZIP(), []int{10}
+ return file_topodata_proto_rawDescGZIP(), []int{11}
}
func (x *CellInfo) GetServerAddress() string {
@@ -1120,7 +1222,7 @@ type CellsAlias struct {
func (x *CellsAlias) Reset() {
*x = CellsAlias{}
if protoimpl.UnsafeEnabled {
- mi := &file_topodata_proto_msgTypes[11]
+ mi := &file_topodata_proto_msgTypes[12]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1133,7 +1235,7 @@ func (x *CellsAlias) String() string {
func (*CellsAlias) ProtoMessage() {}
func (x *CellsAlias) ProtoReflect() protoreflect.Message {
- mi := &file_topodata_proto_msgTypes[11]
+ mi := &file_topodata_proto_msgTypes[12]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1146,7 +1248,7 @@ func (x *CellsAlias) ProtoReflect() protoreflect.Message {
// Deprecated: Use CellsAlias.ProtoReflect.Descriptor instead.
func (*CellsAlias) Descriptor() ([]byte, []int) {
- return file_topodata_proto_rawDescGZIP(), []int{11}
+ return file_topodata_proto_rawDescGZIP(), []int{12}
}
func (x *CellsAlias) GetCells() []string {
@@ -1169,7 +1271,7 @@ type TopoConfig struct {
func (x *TopoConfig) Reset() {
*x = TopoConfig{}
if protoimpl.UnsafeEnabled {
- mi := &file_topodata_proto_msgTypes[12]
+ mi := &file_topodata_proto_msgTypes[13]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1182,7 +1284,7 @@ func (x *TopoConfig) String() string {
func (*TopoConfig) ProtoMessage() {}
func (x *TopoConfig) ProtoReflect() protoreflect.Message {
- mi := &file_topodata_proto_msgTypes[12]
+ mi := &file_topodata_proto_msgTypes[13]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1195,7 +1297,7 @@ func (x *TopoConfig) ProtoReflect() protoreflect.Message {
// Deprecated: Use TopoConfig.ProtoReflect.Descriptor instead.
func (*TopoConfig) Descriptor() ([]byte, []int) {
- return file_topodata_proto_rawDescGZIP(), []int{12}
+ return file_topodata_proto_rawDescGZIP(), []int{13}
}
func (x *TopoConfig) GetTopoType() string {
@@ -1230,7 +1332,7 @@ type ExternalVitessCluster struct {
func (x *ExternalVitessCluster) Reset() {
*x = ExternalVitessCluster{}
if protoimpl.UnsafeEnabled {
- mi := &file_topodata_proto_msgTypes[13]
+ mi := &file_topodata_proto_msgTypes[14]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1243,7 +1345,7 @@ func (x *ExternalVitessCluster) String() string {
func (*ExternalVitessCluster) ProtoMessage() {}
func (x *ExternalVitessCluster) ProtoReflect() protoreflect.Message {
- mi := &file_topodata_proto_msgTypes[13]
+ mi := &file_topodata_proto_msgTypes[14]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1256,7 +1358,7 @@ func (x *ExternalVitessCluster) ProtoReflect() protoreflect.Message {
// Deprecated: Use ExternalVitessCluster.ProtoReflect.Descriptor instead.
func (*ExternalVitessCluster) Descriptor() ([]byte, []int) {
- return file_topodata_proto_rawDescGZIP(), []int{13}
+ return file_topodata_proto_rawDescGZIP(), []int{14}
}
func (x *ExternalVitessCluster) GetTopoConfig() *TopoConfig {
@@ -1278,7 +1380,7 @@ type ExternalClusters struct {
func (x *ExternalClusters) Reset() {
*x = ExternalClusters{}
if protoimpl.UnsafeEnabled {
- mi := &file_topodata_proto_msgTypes[14]
+ mi := &file_topodata_proto_msgTypes[15]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1291,7 +1393,7 @@ func (x *ExternalClusters) String() string {
func (*ExternalClusters) ProtoMessage() {}
func (x *ExternalClusters) ProtoReflect() protoreflect.Message {
- mi := &file_topodata_proto_msgTypes[14]
+ mi := &file_topodata_proto_msgTypes[15]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1304,7 +1406,7 @@ func (x *ExternalClusters) ProtoReflect() protoreflect.Message {
// Deprecated: Use ExternalClusters.ProtoReflect.Descriptor instead.
func (*ExternalClusters) Descriptor() ([]byte, []int) {
- return file_topodata_proto_rawDescGZIP(), []int{14}
+ return file_topodata_proto_rawDescGZIP(), []int{15}
}
func (x *ExternalClusters) GetVitessCluster() []*ExternalVitessCluster {
@@ -1337,7 +1439,7 @@ type Shard_SourceShard struct {
func (x *Shard_SourceShard) Reset() {
*x = Shard_SourceShard{}
if protoimpl.UnsafeEnabled {
- mi := &file_topodata_proto_msgTypes[17]
+ mi := &file_topodata_proto_msgTypes[18]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1350,7 +1452,7 @@ func (x *Shard_SourceShard) String() string {
func (*Shard_SourceShard) ProtoMessage() {}
func (x *Shard_SourceShard) ProtoReflect() protoreflect.Message {
- mi := &file_topodata_proto_msgTypes[17]
+ mi := &file_topodata_proto_msgTypes[18]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1419,7 +1521,7 @@ type Shard_TabletControl struct {
func (x *Shard_TabletControl) Reset() {
*x = Shard_TabletControl{}
if protoimpl.UnsafeEnabled {
- mi := &file_topodata_proto_msgTypes[18]
+ mi := &file_topodata_proto_msgTypes[19]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1432,7 +1534,7 @@ func (x *Shard_TabletControl) String() string {
func (*Shard_TabletControl) ProtoMessage() {}
func (x *Shard_TabletControl) ProtoReflect() protoreflect.Message {
- mi := &file_topodata_proto_msgTypes[18]
+ mi := &file_topodata_proto_msgTypes[19]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1494,7 +1596,7 @@ type Keyspace_ServedFrom struct {
func (x *Keyspace_ServedFrom) Reset() {
*x = Keyspace_ServedFrom{}
if protoimpl.UnsafeEnabled {
- mi := &file_topodata_proto_msgTypes[19]
+ mi := &file_topodata_proto_msgTypes[20]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1507,7 +1609,7 @@ func (x *Keyspace_ServedFrom) String() string {
func (*Keyspace_ServedFrom) ProtoMessage() {}
func (x *Keyspace_ServedFrom) ProtoReflect() protoreflect.Message {
- mi := &file_topodata_proto_msgTypes[19]
+ mi := &file_topodata_proto_msgTypes[20]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1556,7 +1658,7 @@ type ShardReplication_Node struct {
func (x *ShardReplication_Node) Reset() {
*x = ShardReplication_Node{}
if protoimpl.UnsafeEnabled {
- mi := &file_topodata_proto_msgTypes[20]
+ mi := &file_topodata_proto_msgTypes[21]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1569,7 +1671,7 @@ func (x *ShardReplication_Node) String() string {
func (*ShardReplication_Node) ProtoMessage() {}
func (x *ShardReplication_Node) ProtoReflect() protoreflect.Message {
- mi := &file_topodata_proto_msgTypes[20]
+ mi := &file_topodata_proto_msgTypes[21]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1608,7 +1710,7 @@ type SrvKeyspace_KeyspacePartition struct {
func (x *SrvKeyspace_KeyspacePartition) Reset() {
*x = SrvKeyspace_KeyspacePartition{}
if protoimpl.UnsafeEnabled {
- mi := &file_topodata_proto_msgTypes[21]
+ mi := &file_topodata_proto_msgTypes[22]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1621,7 +1723,7 @@ func (x *SrvKeyspace_KeyspacePartition) String() string {
func (*SrvKeyspace_KeyspacePartition) ProtoMessage() {}
func (x *SrvKeyspace_KeyspacePartition) ProtoReflect() protoreflect.Message {
- mi := &file_topodata_proto_msgTypes[21]
+ mi := &file_topodata_proto_msgTypes[22]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1634,7 +1736,7 @@ func (x *SrvKeyspace_KeyspacePartition) ProtoReflect() protoreflect.Message {
// Deprecated: Use SrvKeyspace_KeyspacePartition.ProtoReflect.Descriptor instead.
func (*SrvKeyspace_KeyspacePartition) Descriptor() ([]byte, []int) {
- return file_topodata_proto_rawDescGZIP(), []int{9, 0}
+ return file_topodata_proto_rawDescGZIP(), []int{10, 0}
}
func (x *SrvKeyspace_KeyspacePartition) GetServedType() TabletType {
@@ -1674,7 +1776,7 @@ type SrvKeyspace_ServedFrom struct {
func (x *SrvKeyspace_ServedFrom) Reset() {
*x = SrvKeyspace_ServedFrom{}
if protoimpl.UnsafeEnabled {
- mi := &file_topodata_proto_msgTypes[22]
+ mi := &file_topodata_proto_msgTypes[23]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1687,7 +1789,7 @@ func (x *SrvKeyspace_ServedFrom) String() string {
func (*SrvKeyspace_ServedFrom) ProtoMessage() {}
func (x *SrvKeyspace_ServedFrom) ProtoReflect() protoreflect.Message {
- mi := &file_topodata_proto_msgTypes[22]
+ mi := &file_topodata_proto_msgTypes[23]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1700,7 +1802,7 @@ func (x *SrvKeyspace_ServedFrom) ProtoReflect() protoreflect.Message {
// Deprecated: Use SrvKeyspace_ServedFrom.ProtoReflect.Descriptor instead.
func (*SrvKeyspace_ServedFrom) Descriptor() ([]byte, []int) {
- return file_topodata_proto_rawDescGZIP(), []int{9, 1}
+ return file_topodata_proto_rawDescGZIP(), []int{10, 1}
}
func (x *SrvKeyspace_ServedFrom) GetTabletType() TabletType {
@@ -1819,7 +1921,7 @@ var file_topodata_proto_rawDesc = []byte{
0x69, 0x65, 0x64, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x72, 0x6f,
0x7a, 0x65, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x66, 0x72, 0x6f, 0x7a, 0x65,
0x6e, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, 0x4a, 0x04, 0x08,
- 0x05, 0x10, 0x06, 0x22, 0x97, 0x03, 0x0a, 0x08, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65,
+ 0x05, 0x10, 0x06, 0x22, 0xdd, 0x03, 0x0a, 0x08, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65,
0x12, 0x40, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x66, 0x72, 0x6f, 0x6d, 0x73,
0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74,
0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65,
@@ -1836,122 +1938,141 @@ var file_topodata_proto_rawDesc = []byte{
0x68, 0x6f, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x2b, 0x0a, 0x11, 0x64, 0x75, 0x72, 0x61, 0x62,
0x69, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x08, 0x20, 0x01,
0x28, 0x09, 0x52, 0x10, 0x64, 0x75, 0x72, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x50, 0x6f,
- 0x6c, 0x69, 0x63, 0x79, 0x1a, 0x75, 0x0a, 0x0a, 0x53, 0x65, 0x72, 0x76, 0x65, 0x64, 0x46, 0x72,
- 0x6f, 0x6d, 0x12, 0x35, 0x0a, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70,
- 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61,
- 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x74,
- 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c,
- 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12,
- 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x4a, 0x04, 0x08, 0x01, 0x10,
- 0x02, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, 0x22, 0x8b, 0x01,
- 0x0a, 0x10, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x12, 0x35, 0x0a, 0x05, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28,
- 0x0b, 0x32, 0x1f, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61,
- 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x6f,
- 0x64, 0x65, 0x52, 0x05, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x1a, 0x40, 0x0a, 0x04, 0x4e, 0x6f, 0x64,
- 0x65, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61,
- 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61,
- 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b,
- 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0xc6, 0x01, 0x0a, 0x15,
- 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x38, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53,
- 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45,
- 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12,
- 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61,
- 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61,
- 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x39, 0x0a, 0x04, 0x54, 0x79, 0x70,
- 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0d,
- 0x0a, 0x09, 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x01, 0x12, 0x15, 0x0a,
- 0x11, 0x54, 0x4f, 0x50, 0x4f, 0x4c, 0x4f, 0x47, 0x59, 0x5f, 0x4d, 0x49, 0x53, 0x4d, 0x41, 0x54,
- 0x43, 0x48, 0x10, 0x02, 0x22, 0x55, 0x0a, 0x0e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x66,
- 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x09, 0x6b, 0x65,
- 0x79, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e,
- 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x52, 0x61, 0x6e, 0x67,
- 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x22, 0x8f, 0x01, 0x0a, 0x12,
+ 0x6c, 0x69, 0x63, 0x79, 0x12, 0x44, 0x0a, 0x10, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65,
+ 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19,
+ 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74,
+ 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0f, 0x74, 0x68, 0x72, 0x6f, 0x74,
+ 0x74, 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x75, 0x0a, 0x0a, 0x53, 0x65,
+ 0x72, 0x76, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x12, 0x35, 0x0a, 0x0b, 0x74, 0x61, 0x62, 0x6c,
+ 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e,
+ 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54,
+ 0x79, 0x70, 0x65, 0x52, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12,
+ 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05,
+ 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63,
+ 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63,
+ 0x65, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x4a, 0x04, 0x08,
+ 0x03, 0x10, 0x04, 0x22, 0x8b, 0x01, 0x0a, 0x10, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70,
+ 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x35, 0x0a, 0x05, 0x6e, 0x6f, 0x64, 0x65,
+ 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61,
+ 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x05, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x1a,
+ 0x40, 0x0a, 0x04, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65,
+ 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e,
+ 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41,
+ 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61,
+ 0x73, 0x22, 0xc6, 0x01, 0x0a, 0x15, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69,
+ 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x38, 0x0a, 0x04, 0x74,
+ 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x74, 0x6f, 0x70, 0x6f,
+ 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52,
+ 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f,
+ 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f,
+ 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69,
+ 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22,
+ 0x39, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f,
+ 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, 0x4e,
+ 0x44, 0x10, 0x01, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x4f, 0x50, 0x4f, 0x4c, 0x4f, 0x47, 0x59, 0x5f,
+ 0x4d, 0x49, 0x53, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, 0x02, 0x22, 0x55, 0x0a, 0x0e, 0x53, 0x68,
+ 0x61, 0x72, 0x64, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04,
+ 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+ 0x12, 0x2f, 0x0a, 0x09, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b,
+ 0x65, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x52, 0x61, 0x6e, 0x67,
+ 0x65, 0x22, 0x8f, 0x01, 0x0a, 0x12, 0x53, 0x68, 0x61, 0x72, 0x64, 0x54, 0x61, 0x62, 0x6c, 0x65,
+ 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x09,
+ 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x52, 0x61,
+ 0x6e, 0x67, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x34, 0x0a,
+ 0x16, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x64,
+ 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x71,
+ 0x75, 0x65, 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x69, 0x73, 0x61, 0x62,
+ 0x6c, 0x65, 0x64, 0x22, 0x9b, 0x01, 0x0a, 0x0f, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65,
+ 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c,
+ 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65,
+ 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12,
+ 0x21, 0x0a, 0x0c, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x51, 0x75, 0x65,
+ 0x72, 0x79, 0x12, 0x2d, 0x0a, 0x13, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x61, 0x73, 0x5f, 0x63,
+ 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x73, 0x65, 0x6c, 0x66, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52,
+ 0x10, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x41, 0x73, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x53, 0x65, 0x6c,
+ 0x66, 0x22, 0xb6, 0x04, 0x0a, 0x0b, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63,
+ 0x65, 0x12, 0x47, 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18,
+ 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61,
+ 0x2e, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x4b, 0x65, 0x79,
+ 0x73, 0x70, 0x61, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a,
+ 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x0b, 0x73, 0x65,
+ 0x72, 0x76, 0x65, 0x64, 0x5f, 0x66, 0x72, 0x6f, 0x6d, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32,
+ 0x20, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x72, 0x76, 0x4b, 0x65,
+ 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x64, 0x46, 0x72, 0x6f,
+ 0x6d, 0x52, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x12, 0x44, 0x0a,
+ 0x10, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61,
+ 0x74, 0x61, 0x2e, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x52, 0x0f, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x1a, 0xe1, 0x01, 0x0a, 0x11, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65,
+ 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x35, 0x0a, 0x0b, 0x73, 0x65, 0x72,
+ 0x76, 0x65, 0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14,
+ 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74,
+ 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x54, 0x79, 0x70, 0x65,
+ 0x12, 0x43, 0x0a, 0x10, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65,
+ 0x6e, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x74, 0x6f, 0x70,
+ 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x66, 0x65, 0x72,
+ 0x65, 0x6e, 0x63, 0x65, 0x52, 0x0f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x66, 0x65, 0x72,
+ 0x65, 0x6e, 0x63, 0x65, 0x73, 0x12, 0x50, 0x0a, 0x15, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x74,
+ 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x73, 0x18, 0x03,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e,
0x53, 0x68, 0x61, 0x72, 0x64, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72,
- 0x6f, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x09, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x61,
- 0x6e, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f,
- 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x08, 0x6b,
- 0x65, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x34, 0x0a, 0x16, 0x71, 0x75, 0x65, 0x72, 0x79,
- 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65,
- 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x71, 0x75, 0x65, 0x72, 0x79, 0x53, 0x65,
- 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0xf0, 0x03,
- 0x0a, 0x0b, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x47, 0x0a,
- 0x0a, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28,
- 0x0b, 0x32, 0x27, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x72, 0x76,
- 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63,
- 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x74,
- 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64,
- 0x5f, 0x66, 0x72, 0x6f, 0x6d, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x74, 0x6f,
- 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61,
- 0x63, 0x65, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x52, 0x0a, 0x73,
- 0x65, 0x72, 0x76, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x1a, 0xe1, 0x01, 0x0a, 0x11, 0x4b, 0x65,
- 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12,
- 0x35, 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e,
- 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x73, 0x65, 0x72, 0x76,
- 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x43, 0x0a, 0x10, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f,
- 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b,
- 0x32, 0x18, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72,
- 0x64, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x0f, 0x73, 0x68, 0x61, 0x72,
- 0x64, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x12, 0x50, 0x0a, 0x15, 0x73,
- 0x68, 0x61, 0x72, 0x64, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x74,
- 0x72, 0x6f, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x74, 0x6f, 0x70,
- 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x54, 0x61, 0x62, 0x6c, 0x65,
- 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x13, 0x73, 0x68, 0x61, 0x72, 0x64, 0x54,
- 0x61, 0x62, 0x6c, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x73, 0x1a, 0x5f, 0x0a,
- 0x0a, 0x53, 0x65, 0x72, 0x76, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x12, 0x35, 0x0a, 0x0b, 0x74,
- 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e,
- 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c,
- 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79,
- 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x4a, 0x04,
- 0x08, 0x02, 0x10, 0x03, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06,
- 0x22, 0x4b, 0x0a, 0x08, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x25, 0x0a, 0x0e,
- 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x41, 0x64, 0x64, 0x72,
- 0x65, 0x73, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6f, 0x74, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, 0x22, 0x22, 0x0a,
- 0x0a, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x63,
- 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c,
- 0x73, 0x22, 0x55, 0x0a, 0x0a, 0x54, 0x6f, 0x70, 0x6f, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12,
- 0x1b, 0x0a, 0x09, 0x74, 0x6f, 0x70, 0x6f, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x08, 0x74, 0x6f, 0x70, 0x6f, 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06,
- 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x65,
- 0x72, 0x76, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x03, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6f, 0x74, 0x22, 0x4e, 0x0a, 0x15, 0x45, 0x78, 0x74, 0x65,
- 0x72, 0x6e, 0x61, 0x6c, 0x56, 0x69, 0x74, 0x65, 0x73, 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65,
- 0x72, 0x12, 0x35, 0x0a, 0x0b, 0x74, 0x6f, 0x70, 0x6f, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74,
- 0x61, 0x2e, 0x54, 0x6f, 0x70, 0x6f, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0a, 0x74, 0x6f,
- 0x70, 0x6f, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x5a, 0x0a, 0x10, 0x45, 0x78, 0x74, 0x65,
- 0x72, 0x6e, 0x61, 0x6c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x12, 0x46, 0x0a, 0x0e,
- 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01,
- 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e,
- 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x56, 0x69, 0x74, 0x65, 0x73, 0x73, 0x43, 0x6c,
- 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x0d, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x43, 0x6c, 0x75,
- 0x73, 0x74, 0x65, 0x72, 0x2a, 0x28, 0x0a, 0x0c, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65,
- 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00,
- 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x4e, 0x41, 0x50, 0x53, 0x48, 0x4f, 0x54, 0x10, 0x01, 0x2a, 0x9d,
- 0x01, 0x0a, 0x0a, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a,
- 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x50, 0x52,
- 0x49, 0x4d, 0x41, 0x52, 0x59, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x4d, 0x41, 0x53, 0x54, 0x45,
- 0x52, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x52, 0x45, 0x50, 0x4c, 0x49, 0x43, 0x41, 0x10, 0x02,
- 0x12, 0x0a, 0x0a, 0x06, 0x52, 0x44, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x03, 0x12, 0x09, 0x0a, 0x05,
- 0x42, 0x41, 0x54, 0x43, 0x48, 0x10, 0x03, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x50, 0x41, 0x52, 0x45,
- 0x10, 0x04, 0x12, 0x10, 0x0a, 0x0c, 0x45, 0x58, 0x50, 0x45, 0x52, 0x49, 0x4d, 0x45, 0x4e, 0x54,
- 0x41, 0x4c, 0x10, 0x05, 0x12, 0x0a, 0x0a, 0x06, 0x42, 0x41, 0x43, 0x4b, 0x55, 0x50, 0x10, 0x06,
- 0x12, 0x0b, 0x0a, 0x07, 0x52, 0x45, 0x53, 0x54, 0x4f, 0x52, 0x45, 0x10, 0x07, 0x12, 0x0b, 0x0a,
- 0x07, 0x44, 0x52, 0x41, 0x49, 0x4e, 0x45, 0x44, 0x10, 0x08, 0x1a, 0x02, 0x10, 0x01, 0x42, 0x38,
- 0x0a, 0x0f, 0x69, 0x6f, 0x2e, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x5a, 0x25, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x69, 0x6f, 0x2f, 0x76, 0x69, 0x74,
- 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f,
- 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+ 0x6f, 0x6c, 0x52, 0x13, 0x73, 0x68, 0x61, 0x72, 0x64, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x43,
+ 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x73, 0x1a, 0x5f, 0x0a, 0x0a, 0x53, 0x65, 0x72, 0x76, 0x65,
+ 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x12, 0x35, 0x0a, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f,
+ 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70,
+ 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65,
+ 0x52, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x08,
+ 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08,
+ 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x4a, 0x04,
+ 0x08, 0x03, 0x10, 0x04, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0x4b, 0x0a, 0x08, 0x43, 0x65,
+ 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72,
+ 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d,
+ 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x12, 0x0a,
+ 0x04, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6f,
+ 0x74, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, 0x22, 0x22, 0x0a, 0x0a, 0x43, 0x65, 0x6c, 0x6c, 0x73,
+ 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x02,
+ 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x22, 0x55, 0x0a, 0x0a, 0x54,
+ 0x6f, 0x70, 0x6f, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x6f, 0x70,
+ 0x6f, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x6f,
+ 0x70, 0x6f, 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x12,
+ 0x0a, 0x04, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f,
+ 0x6f, 0x74, 0x22, 0x4e, 0x0a, 0x15, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x56, 0x69,
+ 0x74, 0x65, 0x73, 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x35, 0x0a, 0x0b, 0x74,
+ 0x6f, 0x70, 0x6f, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x6f, 0x70, 0x6f,
+ 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0a, 0x74, 0x6f, 0x70, 0x6f, 0x43, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x22, 0x5a, 0x0a, 0x10, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x43, 0x6c,
+ 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x12, 0x46, 0x0a, 0x0e, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73,
+ 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f,
+ 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e,
+ 0x61, 0x6c, 0x56, 0x69, 0x74, 0x65, 0x73, 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52,
+ 0x0d, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2a, 0x28,
+ 0x0a, 0x0c, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a,
+ 0x0a, 0x06, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x4e,
+ 0x41, 0x50, 0x53, 0x48, 0x4f, 0x54, 0x10, 0x01, 0x2a, 0x9d, 0x01, 0x0a, 0x0a, 0x54, 0x61, 0x62,
+ 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f,
+ 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x50, 0x52, 0x49, 0x4d, 0x41, 0x52, 0x59, 0x10,
+ 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x4d, 0x41, 0x53, 0x54, 0x45, 0x52, 0x10, 0x01, 0x12, 0x0b, 0x0a,
+ 0x07, 0x52, 0x45, 0x50, 0x4c, 0x49, 0x43, 0x41, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x52, 0x44,
+ 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x03, 0x12, 0x09, 0x0a, 0x05, 0x42, 0x41, 0x54, 0x43, 0x48, 0x10,
+ 0x03, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x50, 0x41, 0x52, 0x45, 0x10, 0x04, 0x12, 0x10, 0x0a, 0x0c,
+ 0x45, 0x58, 0x50, 0x45, 0x52, 0x49, 0x4d, 0x45, 0x4e, 0x54, 0x41, 0x4c, 0x10, 0x05, 0x12, 0x0a,
+ 0x0a, 0x06, 0x42, 0x41, 0x43, 0x4b, 0x55, 0x50, 0x10, 0x06, 0x12, 0x0b, 0x0a, 0x07, 0x52, 0x45,
+ 0x53, 0x54, 0x4f, 0x52, 0x45, 0x10, 0x07, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x52, 0x41, 0x49, 0x4e,
+ 0x45, 0x44, 0x10, 0x08, 0x1a, 0x02, 0x10, 0x01, 0x42, 0x38, 0x0a, 0x0f, 0x69, 0x6f, 0x2e, 0x76,
+ 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x5a, 0x25, 0x76, 0x69, 0x74,
+ 0x65, 0x73, 0x73, 0x2e, 0x69, 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f,
+ 0x2f, 0x76, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61,
+ 0x74, 0x61, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
@@ -1967,7 +2088,7 @@ func file_topodata_proto_rawDescGZIP() []byte {
}
var file_topodata_proto_enumTypes = make([]protoimpl.EnumInfo, 3)
-var file_topodata_proto_msgTypes = make([]protoimpl.MessageInfo, 23)
+var file_topodata_proto_msgTypes = make([]protoimpl.MessageInfo, 24)
var file_topodata_proto_goTypes = []interface{}{
(KeyspaceType)(0), // 0: topodata.KeyspaceType
(TabletType)(0), // 1: topodata.TabletType
@@ -1981,59 +2102,62 @@ var file_topodata_proto_goTypes = []interface{}{
(*ShardReplicationError)(nil), // 9: topodata.ShardReplicationError
(*ShardReference)(nil), // 10: topodata.ShardReference
(*ShardTabletControl)(nil), // 11: topodata.ShardTabletControl
- (*SrvKeyspace)(nil), // 12: topodata.SrvKeyspace
- (*CellInfo)(nil), // 13: topodata.CellInfo
- (*CellsAlias)(nil), // 14: topodata.CellsAlias
- (*TopoConfig)(nil), // 15: topodata.TopoConfig
- (*ExternalVitessCluster)(nil), // 16: topodata.ExternalVitessCluster
- (*ExternalClusters)(nil), // 17: topodata.ExternalClusters
- nil, // 18: topodata.Tablet.PortMapEntry
- nil, // 19: topodata.Tablet.TagsEntry
- (*Shard_SourceShard)(nil), // 20: topodata.Shard.SourceShard
- (*Shard_TabletControl)(nil), // 21: topodata.Shard.TabletControl
- (*Keyspace_ServedFrom)(nil), // 22: topodata.Keyspace.ServedFrom
- (*ShardReplication_Node)(nil), // 23: topodata.ShardReplication.Node
- (*SrvKeyspace_KeyspacePartition)(nil), // 24: topodata.SrvKeyspace.KeyspacePartition
- (*SrvKeyspace_ServedFrom)(nil), // 25: topodata.SrvKeyspace.ServedFrom
- (*vttime.Time)(nil), // 26: vttime.Time
+ (*ThrottlerConfig)(nil), // 12: topodata.ThrottlerConfig
+ (*SrvKeyspace)(nil), // 13: topodata.SrvKeyspace
+ (*CellInfo)(nil), // 14: topodata.CellInfo
+ (*CellsAlias)(nil), // 15: topodata.CellsAlias
+ (*TopoConfig)(nil), // 16: topodata.TopoConfig
+ (*ExternalVitessCluster)(nil), // 17: topodata.ExternalVitessCluster
+ (*ExternalClusters)(nil), // 18: topodata.ExternalClusters
+ nil, // 19: topodata.Tablet.PortMapEntry
+ nil, // 20: topodata.Tablet.TagsEntry
+ (*Shard_SourceShard)(nil), // 21: topodata.Shard.SourceShard
+ (*Shard_TabletControl)(nil), // 22: topodata.Shard.TabletControl
+ (*Keyspace_ServedFrom)(nil), // 23: topodata.Keyspace.ServedFrom
+ (*ShardReplication_Node)(nil), // 24: topodata.ShardReplication.Node
+ (*SrvKeyspace_KeyspacePartition)(nil), // 25: topodata.SrvKeyspace.KeyspacePartition
+ (*SrvKeyspace_ServedFrom)(nil), // 26: topodata.SrvKeyspace.ServedFrom
+ (*vttime.Time)(nil), // 27: vttime.Time
}
var file_topodata_proto_depIdxs = []int32{
4, // 0: topodata.Tablet.alias:type_name -> topodata.TabletAlias
- 18, // 1: topodata.Tablet.port_map:type_name -> topodata.Tablet.PortMapEntry
+ 19, // 1: topodata.Tablet.port_map:type_name -> topodata.Tablet.PortMapEntry
3, // 2: topodata.Tablet.key_range:type_name -> topodata.KeyRange
1, // 3: topodata.Tablet.type:type_name -> topodata.TabletType
- 19, // 4: topodata.Tablet.tags:type_name -> topodata.Tablet.TagsEntry
- 26, // 5: topodata.Tablet.primary_term_start_time:type_name -> vttime.Time
+ 20, // 4: topodata.Tablet.tags:type_name -> topodata.Tablet.TagsEntry
+ 27, // 5: topodata.Tablet.primary_term_start_time:type_name -> vttime.Time
4, // 6: topodata.Shard.primary_alias:type_name -> topodata.TabletAlias
- 26, // 7: topodata.Shard.primary_term_start_time:type_name -> vttime.Time
+ 27, // 7: topodata.Shard.primary_term_start_time:type_name -> vttime.Time
3, // 8: topodata.Shard.key_range:type_name -> topodata.KeyRange
- 20, // 9: topodata.Shard.source_shards:type_name -> topodata.Shard.SourceShard
- 21, // 10: topodata.Shard.tablet_controls:type_name -> topodata.Shard.TabletControl
- 22, // 11: topodata.Keyspace.served_froms:type_name -> topodata.Keyspace.ServedFrom
+ 21, // 9: topodata.Shard.source_shards:type_name -> topodata.Shard.SourceShard
+ 22, // 10: topodata.Shard.tablet_controls:type_name -> topodata.Shard.TabletControl
+ 23, // 11: topodata.Keyspace.served_froms:type_name -> topodata.Keyspace.ServedFrom
0, // 12: topodata.Keyspace.keyspace_type:type_name -> topodata.KeyspaceType
- 26, // 13: topodata.Keyspace.snapshot_time:type_name -> vttime.Time
- 23, // 14: topodata.ShardReplication.nodes:type_name -> topodata.ShardReplication.Node
- 2, // 15: topodata.ShardReplicationError.type:type_name -> topodata.ShardReplicationError.Type
- 4, // 16: topodata.ShardReplicationError.tablet_alias:type_name -> topodata.TabletAlias
- 3, // 17: topodata.ShardReference.key_range:type_name -> topodata.KeyRange
- 3, // 18: topodata.ShardTabletControl.key_range:type_name -> topodata.KeyRange
- 24, // 19: topodata.SrvKeyspace.partitions:type_name -> topodata.SrvKeyspace.KeyspacePartition
- 25, // 20: topodata.SrvKeyspace.served_from:type_name -> topodata.SrvKeyspace.ServedFrom
- 15, // 21: topodata.ExternalVitessCluster.topo_config:type_name -> topodata.TopoConfig
- 16, // 22: topodata.ExternalClusters.vitess_cluster:type_name -> topodata.ExternalVitessCluster
- 3, // 23: topodata.Shard.SourceShard.key_range:type_name -> topodata.KeyRange
- 1, // 24: topodata.Shard.TabletControl.tablet_type:type_name -> topodata.TabletType
- 1, // 25: topodata.Keyspace.ServedFrom.tablet_type:type_name -> topodata.TabletType
- 4, // 26: topodata.ShardReplication.Node.tablet_alias:type_name -> topodata.TabletAlias
- 1, // 27: topodata.SrvKeyspace.KeyspacePartition.served_type:type_name -> topodata.TabletType
- 10, // 28: topodata.SrvKeyspace.KeyspacePartition.shard_references:type_name -> topodata.ShardReference
- 11, // 29: topodata.SrvKeyspace.KeyspacePartition.shard_tablet_controls:type_name -> topodata.ShardTabletControl
- 1, // 30: topodata.SrvKeyspace.ServedFrom.tablet_type:type_name -> topodata.TabletType
- 31, // [31:31] is the sub-list for method output_type
- 31, // [31:31] is the sub-list for method input_type
- 31, // [31:31] is the sub-list for extension type_name
- 31, // [31:31] is the sub-list for extension extendee
- 0, // [0:31] is the sub-list for field type_name
+ 27, // 13: topodata.Keyspace.snapshot_time:type_name -> vttime.Time
+ 12, // 14: topodata.Keyspace.throttler_config:type_name -> topodata.ThrottlerConfig
+ 24, // 15: topodata.ShardReplication.nodes:type_name -> topodata.ShardReplication.Node
+ 2, // 16: topodata.ShardReplicationError.type:type_name -> topodata.ShardReplicationError.Type
+ 4, // 17: topodata.ShardReplicationError.tablet_alias:type_name -> topodata.TabletAlias
+ 3, // 18: topodata.ShardReference.key_range:type_name -> topodata.KeyRange
+ 3, // 19: topodata.ShardTabletControl.key_range:type_name -> topodata.KeyRange
+ 25, // 20: topodata.SrvKeyspace.partitions:type_name -> topodata.SrvKeyspace.KeyspacePartition
+ 26, // 21: topodata.SrvKeyspace.served_from:type_name -> topodata.SrvKeyspace.ServedFrom
+ 12, // 22: topodata.SrvKeyspace.throttler_config:type_name -> topodata.ThrottlerConfig
+ 16, // 23: topodata.ExternalVitessCluster.topo_config:type_name -> topodata.TopoConfig
+ 17, // 24: topodata.ExternalClusters.vitess_cluster:type_name -> topodata.ExternalVitessCluster
+ 3, // 25: topodata.Shard.SourceShard.key_range:type_name -> topodata.KeyRange
+ 1, // 26: topodata.Shard.TabletControl.tablet_type:type_name -> topodata.TabletType
+ 1, // 27: topodata.Keyspace.ServedFrom.tablet_type:type_name -> topodata.TabletType
+ 4, // 28: topodata.ShardReplication.Node.tablet_alias:type_name -> topodata.TabletAlias
+ 1, // 29: topodata.SrvKeyspace.KeyspacePartition.served_type:type_name -> topodata.TabletType
+ 10, // 30: topodata.SrvKeyspace.KeyspacePartition.shard_references:type_name -> topodata.ShardReference
+ 11, // 31: topodata.SrvKeyspace.KeyspacePartition.shard_tablet_controls:type_name -> topodata.ShardTabletControl
+ 1, // 32: topodata.SrvKeyspace.ServedFrom.tablet_type:type_name -> topodata.TabletType
+ 33, // [33:33] is the sub-list for method output_type
+ 33, // [33:33] is the sub-list for method input_type
+ 33, // [33:33] is the sub-list for extension type_name
+ 33, // [33:33] is the sub-list for extension extendee
+ 0, // [0:33] is the sub-list for field type_name
}
func init() { file_topodata_proto_init() }
@@ -2151,7 +2275,7 @@ func file_topodata_proto_init() {
}
}
file_topodata_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SrvKeyspace); i {
+ switch v := v.(*ThrottlerConfig); i {
case 0:
return &v.state
case 1:
@@ -2163,7 +2287,7 @@ func file_topodata_proto_init() {
}
}
file_topodata_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*CellInfo); i {
+ switch v := v.(*SrvKeyspace); i {
case 0:
return &v.state
case 1:
@@ -2175,7 +2299,7 @@ func file_topodata_proto_init() {
}
}
file_topodata_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*CellsAlias); i {
+ switch v := v.(*CellInfo); i {
case 0:
return &v.state
case 1:
@@ -2187,7 +2311,7 @@ func file_topodata_proto_init() {
}
}
file_topodata_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*TopoConfig); i {
+ switch v := v.(*CellsAlias); i {
case 0:
return &v.state
case 1:
@@ -2199,7 +2323,7 @@ func file_topodata_proto_init() {
}
}
file_topodata_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ExternalVitessCluster); i {
+ switch v := v.(*TopoConfig); i {
case 0:
return &v.state
case 1:
@@ -2211,6 +2335,18 @@ func file_topodata_proto_init() {
}
}
file_topodata_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ExternalVitessCluster); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_topodata_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ExternalClusters); i {
case 0:
return &v.state
@@ -2222,7 +2358,7 @@ func file_topodata_proto_init() {
return nil
}
}
- file_topodata_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} {
+ file_topodata_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Shard_SourceShard); i {
case 0:
return &v.state
@@ -2234,7 +2370,7 @@ func file_topodata_proto_init() {
return nil
}
}
- file_topodata_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} {
+ file_topodata_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Shard_TabletControl); i {
case 0:
return &v.state
@@ -2246,7 +2382,7 @@ func file_topodata_proto_init() {
return nil
}
}
- file_topodata_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} {
+ file_topodata_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Keyspace_ServedFrom); i {
case 0:
return &v.state
@@ -2258,7 +2394,7 @@ func file_topodata_proto_init() {
return nil
}
}
- file_topodata_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} {
+ file_topodata_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ShardReplication_Node); i {
case 0:
return &v.state
@@ -2270,7 +2406,7 @@ func file_topodata_proto_init() {
return nil
}
}
- file_topodata_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} {
+ file_topodata_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*SrvKeyspace_KeyspacePartition); i {
case 0:
return &v.state
@@ -2282,7 +2418,7 @@ func file_topodata_proto_init() {
return nil
}
}
- file_topodata_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} {
+ file_topodata_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*SrvKeyspace_ServedFrom); i {
case 0:
return &v.state
@@ -2301,7 +2437,7 @@ func file_topodata_proto_init() {
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_topodata_proto_rawDesc,
NumEnums: 3,
- NumMessages: 23,
+ NumMessages: 24,
NumExtensions: 0,
NumServices: 0,
},
diff --git a/go/vt/proto/topodata/topodata_vtproto.pb.go b/go/vt/proto/topodata/topodata_vtproto.pb.go
index 4b989d5021b..14b3d524afb 100644
--- a/go/vt/proto/topodata/topodata_vtproto.pb.go
+++ b/go/vt/proto/topodata/topodata_vtproto.pb.go
@@ -1,13 +1,15 @@
// Code generated by protoc-gen-go-vtproto. DO NOT EDIT.
-// protoc-gen-go-vtproto version: v0.3.0
+// protoc-gen-go-vtproto version: v0.4.0
// source: topodata.proto
package topodata
import (
+ binary "encoding/binary"
fmt "fmt"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
io "io"
+ math "math"
bits "math/bits"
vttime "vitess.io/vitess/go/vt/proto/vttime"
)
@@ -587,6 +589,16 @@ func (m *Keyspace) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
i -= len(m.unknownFields)
copy(dAtA[i:], m.unknownFields)
}
+ if m.ThrottlerConfig != nil {
+ size, err := m.ThrottlerConfig.MarshalToSizedBufferVT(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x4a
+ }
if len(m.DurabilityPolicy) > 0 {
i -= len(m.DurabilityPolicy)
copy(dAtA[i:], m.DurabilityPolicy)
@@ -877,6 +889,72 @@ func (m *ShardTabletControl) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
return len(dAtA) - i, nil
}
+func (m *ThrottlerConfig) MarshalVT() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVT(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ThrottlerConfig) MarshalToVT(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVT(dAtA[:size])
+}
+
+func (m *ThrottlerConfig) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.CheckAsCheckSelf {
+ i--
+ if m.CheckAsCheckSelf {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x20
+ }
+ if len(m.CustomQuery) > 0 {
+ i -= len(m.CustomQuery)
+ copy(dAtA[i:], m.CustomQuery)
+ i = encodeVarint(dAtA, i, uint64(len(m.CustomQuery)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.Threshold != 0 {
+ i -= 8
+ binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Threshold))))
+ i--
+ dAtA[i] = 0x11
+ }
+ if m.Enabled {
+ i--
+ if m.Enabled {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
func (m *SrvKeyspace_KeyspacePartition) MarshalVT() (dAtA []byte, err error) {
if m == nil {
return nil, nil
@@ -1014,6 +1092,16 @@ func (m *SrvKeyspace) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
i -= len(m.unknownFields)
copy(dAtA[i:], m.unknownFields)
}
+ if m.ThrottlerConfig != nil {
+ size, err := m.ThrottlerConfig.MarshalToSizedBufferVT(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x32
+ }
if len(m.ServedFrom) > 0 {
for iNdEx := len(m.ServedFrom) - 1; iNdEx >= 0; iNdEx-- {
size, err := m.ServedFrom[iNdEx].MarshalToSizedBufferVT(dAtA[:i])
@@ -1297,9 +1385,7 @@ func (m *KeyRange) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -1316,9 +1402,7 @@ func (m *TabletAlias) SizeVT() (n int) {
if m.Uid != 0 {
n += 1 + sov(uint64(m.Uid))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -1389,9 +1473,7 @@ func (m *Tablet) SizeVT() (n int) {
if m.DefaultConnCollation != 0 {
n += 2 + sov(uint64(m.DefaultConnCollation))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -1422,9 +1504,7 @@ func (m *Shard_SourceShard) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -1452,9 +1532,7 @@ func (m *Shard_TabletControl) SizeVT() (n int) {
if m.Frozen {
n += 2
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -1491,9 +1569,7 @@ func (m *Shard) SizeVT() (n int) {
l = m.PrimaryTermStartTime.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -1516,9 +1592,7 @@ func (m *Keyspace_ServedFrom) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -1549,9 +1623,11 @@ func (m *Keyspace) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
+ if m.ThrottlerConfig != nil {
+ l = m.ThrottlerConfig.SizeVT()
+ n += 1 + l + sov(uint64(l))
}
+ n += len(m.unknownFields)
return n
}
@@ -1565,9 +1641,7 @@ func (m *ShardReplication_Node) SizeVT() (n int) {
l = m.TabletAlias.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -1583,9 +1657,7 @@ func (m *ShardReplication) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -1602,9 +1674,7 @@ func (m *ShardReplicationError) SizeVT() (n int) {
l = m.TabletAlias.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -1622,9 +1692,7 @@ func (m *ShardReference) SizeVT() (n int) {
l = m.KeyRange.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -1645,9 +1713,30 @@ func (m *ShardTabletControl) SizeVT() (n int) {
if m.QueryServiceDisabled {
n += 2
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *ThrottlerConfig) SizeVT() (n int) {
+ if m == nil {
+ return 0
}
+ var l int
+ _ = l
+ if m.Enabled {
+ n += 2
+ }
+ if m.Threshold != 0 {
+ n += 9
+ }
+ l = len(m.CustomQuery)
+ if l > 0 {
+ n += 1 + l + sov(uint64(l))
+ }
+ if m.CheckAsCheckSelf {
+ n += 2
+ }
+ n += len(m.unknownFields)
return n
}
@@ -1672,9 +1761,7 @@ func (m *SrvKeyspace_KeyspacePartition) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -1691,9 +1778,7 @@ func (m *SrvKeyspace_ServedFrom) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -1715,9 +1800,11 @@ func (m *SrvKeyspace) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
+ if m.ThrottlerConfig != nil {
+ l = m.ThrottlerConfig.SizeVT()
+ n += 1 + l + sov(uint64(l))
}
+ n += len(m.unknownFields)
return n
}
@@ -1735,9 +1822,7 @@ func (m *CellInfo) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -1753,9 +1838,7 @@ func (m *CellsAlias) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -1777,9 +1860,7 @@ func (m *TopoConfig) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -1793,9 +1874,7 @@ func (m *ExternalVitessCluster) SizeVT() (n int) {
l = m.TopoConfig.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -1811,9 +1890,7 @@ func (m *ExternalClusters) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -3611,6 +3688,42 @@ func (m *Keyspace) UnmarshalVT(dAtA []byte) error {
}
m.DurabilityPolicy = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
+ case 9:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ThrottlerConfig", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLength
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLength
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ThrottlerConfig == nil {
+ m.ThrottlerConfig = &ThrottlerConfig{}
+ }
+ if err := m.ThrottlerConfig.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skip(dAtA[iNdEx:])
@@ -4169,6 +4282,140 @@ func (m *ShardTabletControl) UnmarshalVT(dAtA []byte) error {
}
return nil
}
+func (m *ThrottlerConfig) UnmarshalVT(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ThrottlerConfig: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ThrottlerConfig: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Enabled", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Enabled = bool(v != 0)
+ case 2:
+ if wireType != 1 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Threshold", wireType)
+ }
+ var v uint64
+ if (iNdEx + 8) > l {
+ return io.ErrUnexpectedEOF
+ }
+ v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:]))
+ iNdEx += 8
+ m.Threshold = float64(math.Float64frombits(v))
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CustomQuery", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLength
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLength
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.CustomQuery = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CheckAsCheckSelf", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.CheckAsCheckSelf = bool(v != 0)
+ default:
+ iNdEx = preIndex
+ skippy, err := skip(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLength
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
func (m *SrvKeyspace_KeyspacePartition) UnmarshalVT(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
@@ -4506,6 +4753,42 @@ func (m *SrvKeyspace) UnmarshalVT(dAtA []byte) error {
return err
}
iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ThrottlerConfig", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLength
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLength
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ThrottlerConfig == nil {
+ m.ThrottlerConfig = &ThrottlerConfig{}
+ }
+ if err := m.ThrottlerConfig.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skip(dAtA[iNdEx:])
@@ -5045,6 +5328,7 @@ func (m *ExternalClusters) UnmarshalVT(dAtA []byte) error {
}
return nil
}
+
func skip(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
diff --git a/go/vt/proto/vschema/vschema.pb.go b/go/vt/proto/vschema/vschema.pb.go
index a6f44d9c23e..e86c1613682 100644
--- a/go/vt/proto/vschema/vschema.pb.go
+++ b/go/vt/proto/vschema/vschema.pb.go
@@ -17,7 +17,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.28.0
+// protoc-gen-go v1.28.1
// protoc v3.21.3
// source: vschema.proto
@@ -324,6 +324,8 @@ type Table struct {
// an authoritative list for the table. This allows
// us to expand 'select *' expressions.
ColumnListAuthoritative bool `protobuf:"varint,6,opt,name=column_list_authoritative,json=columnListAuthoritative,proto3" json:"column_list_authoritative,omitempty"`
+ // reference tables may optionally indicate their source table.
+ Source string `protobuf:"bytes,7,opt,name=source,proto3" json:"source,omitempty"`
}
func (x *Table) Reset() {
@@ -400,6 +402,13 @@ func (x *Table) GetColumnListAuthoritative() bool {
return false
}
+func (x *Table) GetSource() string {
+ if x != nil {
+ return x.Source
+ }
+ return ""
+}
+
// ColumnVindex is used to associate a column to a vindex.
type ColumnVindex struct {
state protoimpl.MessageState
@@ -804,7 +813,7 @@ var file_vschema_proto_rawDesc = []byte{
0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02,
0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22,
- 0x99, 0x02, 0x0a, 0x05, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70,
+ 0xb1, 0x02, 0x0a, 0x05, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70,
0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x3e, 0x0a,
0x0f, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x5f, 0x76, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x73,
0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61,
@@ -821,54 +830,55 @@ var file_vschema_proto_rawDesc = []byte{
0x3a, 0x0a, 0x19, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x61,
0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x61, 0x74, 0x69, 0x76, 0x65, 0x18, 0x06, 0x20, 0x01,
0x28, 0x08, 0x52, 0x17, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x75,
- 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x61, 0x74, 0x69, 0x76, 0x65, 0x22, 0x54, 0x0a, 0x0c, 0x43,
- 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x56, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x16, 0x0a, 0x06, 0x63,
- 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x63, 0x6f, 0x6c,
- 0x75, 0x6d, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6f, 0x6c, 0x75, 0x6d,
- 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e,
- 0x73, 0x22, 0x43, 0x0a, 0x0d, 0x41, 0x75, 0x74, 0x6f, 0x49, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65,
- 0x6e, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x06, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65,
- 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65,
- 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x22, 0x3d, 0x0a, 0x06, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e,
- 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04,
- 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01,
- 0x28, 0x0e, 0x32, 0x0b, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52,
- 0x04, 0x74, 0x79, 0x70, 0x65, 0x22, 0xa7, 0x02, 0x0a, 0x0a, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63,
- 0x68, 0x65, 0x6d, 0x61, 0x12, 0x40, 0x0a, 0x09, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65,
- 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d,
- 0x61, 0x2e, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x4b, 0x65, 0x79,
- 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x6b, 0x65, 0x79,
- 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x12, 0x3a, 0x0a, 0x0d, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e,
- 0x67, 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e,
- 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52,
- 0x75, 0x6c, 0x65, 0x73, 0x52, 0x0c, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c,
- 0x65, 0x73, 0x12, 0x4a, 0x0a, 0x13, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x72, 0x6f, 0x75, 0x74,
- 0x69, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x1a, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52,
- 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x11, 0x73, 0x68, 0x61,
- 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x1a, 0x4f,
- 0x0a, 0x0e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79,
- 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b,
- 0x65, 0x79, 0x12, 0x27, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x11, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73,
- 0x70, 0x61, 0x63, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22,
- 0x44, 0x0a, 0x11, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52,
- 0x75, 0x6c, 0x65, 0x73, 0x12, 0x2f, 0x0a, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20,
- 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x53, 0x68,
- 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x05,
- 0x72, 0x75, 0x6c, 0x65, 0x73, 0x22, 0x6e, 0x0a, 0x10, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f,
- 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x66, 0x72, 0x6f,
- 0x6d, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x0c, 0x66, 0x72, 0x6f, 0x6d, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1f,
- 0x0a, 0x0b, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12,
- 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05,
- 0x73, 0x68, 0x61, 0x72, 0x64, 0x42, 0x26, 0x5a, 0x24, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e,
- 0x69, 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, 0x2f,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x62, 0x06, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x33,
+ 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x61, 0x74, 0x69, 0x76, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73,
+ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x6f, 0x75,
+ 0x72, 0x63, 0x65, 0x22, 0x54, 0x0a, 0x0c, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x56, 0x69, 0x6e,
+ 0x64, 0x65, 0x78, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x06, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e,
+ 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12,
+ 0x18, 0x0a, 0x07, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09,
+ 0x52, 0x07, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x22, 0x43, 0x0a, 0x0d, 0x41, 0x75, 0x74,
+ 0x6f, 0x49, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x6f,
+ 0x6c, 0x75, 0x6d, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x63, 0x6f, 0x6c, 0x75,
+ 0x6d, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x22, 0x3d,
+ 0x0a, 0x06, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x04,
+ 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0b, 0x2e, 0x71, 0x75, 0x65,
+ 0x72, 0x79, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x22, 0xa7, 0x02,
+ 0x0a, 0x0a, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x40, 0x0a, 0x09,
+ 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32,
+ 0x22, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63,
+ 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x45, 0x6e,
+ 0x74, 0x72, 0x79, 0x52, 0x09, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x12, 0x3a,
+ 0x0a, 0x0d, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e,
+ 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x0c, 0x72, 0x6f,
+ 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x4a, 0x0a, 0x13, 0x73, 0x68,
+ 0x61, 0x72, 0x64, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6c, 0x65,
+ 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d,
+ 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75,
+ 0x6c, 0x65, 0x73, 0x52, 0x11, 0x73, 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e,
+ 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x1a, 0x4f, 0x0a, 0x0e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61,
+ 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x27, 0x0a, 0x05, 0x76, 0x61,
+ 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x76, 0x73, 0x63, 0x68,
+ 0x65, 0x6d, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x05, 0x76, 0x61,
+ 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x44, 0x0a, 0x11, 0x53, 0x68, 0x61, 0x72, 0x64,
+ 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x2f, 0x0a, 0x05,
+ 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x76, 0x73,
+ 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69,
+ 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x22, 0x6e, 0x0a,
+ 0x10, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c,
+ 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x66, 0x72, 0x6f, 0x6d, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61,
+ 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x66, 0x72, 0x6f, 0x6d, 0x4b, 0x65,
+ 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79,
+ 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x6f, 0x4b,
+ 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x42, 0x26, 0x5a,
+ 0x24, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x69, 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73,
+ 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x73,
+ 0x63, 0x68, 0x65, 0x6d, 0x61, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
diff --git a/go/vt/proto/vschema/vschema_vtproto.pb.go b/go/vt/proto/vschema/vschema_vtproto.pb.go
index 9aa144453ec..1b461eba1ff 100644
--- a/go/vt/proto/vschema/vschema_vtproto.pb.go
+++ b/go/vt/proto/vschema/vschema_vtproto.pb.go
@@ -1,5 +1,5 @@
// Code generated by protoc-gen-go-vtproto. DO NOT EDIT.
-// protoc-gen-go-vtproto version: v0.3.0
+// protoc-gen-go-vtproto version: v0.4.0
// source: vschema.proto
package vschema
@@ -306,6 +306,13 @@ func (m *Table) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
i -= len(m.unknownFields)
copy(dAtA[i:], m.unknownFields)
}
+ if len(m.Source) > 0 {
+ i -= len(m.Source)
+ copy(dAtA[i:], m.Source)
+ i = encodeVarint(dAtA, i, uint64(len(m.Source)))
+ i--
+ dAtA[i] = 0x3a
+ }
if m.ColumnListAuthoritative {
i--
if m.ColumnListAuthoritative {
@@ -712,9 +719,7 @@ func (m *RoutingRules) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -734,9 +739,7 @@ func (m *RoutingRule) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -778,9 +781,7 @@ func (m *Keyspace) SizeVT() (n int) {
if m.RequireExplicitRouting {
n += 2
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -806,9 +807,7 @@ func (m *Vindex) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -845,9 +844,11 @@ func (m *Table) SizeVT() (n int) {
if m.ColumnListAuthoritative {
n += 2
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
+ l = len(m.Source)
+ if l > 0 {
+ n += 1 + l + sov(uint64(l))
}
+ n += len(m.unknownFields)
return n
}
@@ -871,9 +872,7 @@ func (m *ColumnVindex) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -891,9 +890,7 @@ func (m *AutoIncrement) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -910,9 +907,7 @@ func (m *Column) SizeVT() (n int) {
if m.Type != 0 {
n += 1 + sov(uint64(m.Type))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -943,9 +938,7 @@ func (m *SrvVSchema) SizeVT() (n int) {
l = m.ShardRoutingRules.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -961,9 +954,7 @@ func (m *ShardRoutingRules) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -985,9 +976,7 @@ func (m *ShardRoutingRule) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -2005,6 +1994,38 @@ func (m *Table) UnmarshalVT(dAtA []byte) error {
}
}
m.ColumnListAuthoritative = bool(v != 0)
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLength
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLength
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Source = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skip(dAtA[iNdEx:])
@@ -2875,6 +2896,7 @@ func (m *ShardRoutingRule) UnmarshalVT(dAtA []byte) error {
}
return nil
}
+
func skip(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
diff --git a/go/vt/proto/vtadmin/vtadmin.pb.go b/go/vt/proto/vtadmin/vtadmin.pb.go
index c9f075b54c9..8b961cce4ac 100644
--- a/go/vt/proto/vtadmin/vtadmin.pb.go
+++ b/go/vt/proto/vtadmin/vtadmin.pb.go
@@ -17,7 +17,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.28.0
+// protoc-gen-go v1.28.1
// protoc v3.21.3
// source: vtadmin.proto
@@ -2081,6 +2081,61 @@ func (x *GetClustersResponse) GetClusters() []*Cluster {
return nil
}
+type GetFullStatusRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"`
+ Alias *topodata.TabletAlias `protobuf:"bytes,2,opt,name=alias,proto3" json:"alias,omitempty"`
+}
+
+func (x *GetFullStatusRequest) Reset() {
+ *x = GetFullStatusRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_vtadmin_proto_msgTypes[33]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetFullStatusRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetFullStatusRequest) ProtoMessage() {}
+
+func (x *GetFullStatusRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_vtadmin_proto_msgTypes[33]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetFullStatusRequest.ProtoReflect.Descriptor instead.
+func (*GetFullStatusRequest) Descriptor() ([]byte, []int) {
+ return file_vtadmin_proto_rawDescGZIP(), []int{33}
+}
+
+func (x *GetFullStatusRequest) GetClusterId() string {
+ if x != nil {
+ return x.ClusterId
+ }
+ return ""
+}
+
+func (x *GetFullStatusRequest) GetAlias() *topodata.TabletAlias {
+ if x != nil {
+ return x.Alias
+ }
+ return nil
+}
+
type GetGatesRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -2092,7 +2147,7 @@ type GetGatesRequest struct {
func (x *GetGatesRequest) Reset() {
*x = GetGatesRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[33]
+ mi := &file_vtadmin_proto_msgTypes[34]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2105,7 +2160,7 @@ func (x *GetGatesRequest) String() string {
func (*GetGatesRequest) ProtoMessage() {}
func (x *GetGatesRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[33]
+ mi := &file_vtadmin_proto_msgTypes[34]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2118,7 +2173,7 @@ func (x *GetGatesRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetGatesRequest.ProtoReflect.Descriptor instead.
func (*GetGatesRequest) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{33}
+ return file_vtadmin_proto_rawDescGZIP(), []int{34}
}
func (x *GetGatesRequest) GetClusterIds() []string {
@@ -2139,7 +2194,7 @@ type GetGatesResponse struct {
func (x *GetGatesResponse) Reset() {
*x = GetGatesResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[34]
+ mi := &file_vtadmin_proto_msgTypes[35]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2152,7 +2207,7 @@ func (x *GetGatesResponse) String() string {
func (*GetGatesResponse) ProtoMessage() {}
func (x *GetGatesResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[34]
+ mi := &file_vtadmin_proto_msgTypes[35]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2165,7 +2220,7 @@ func (x *GetGatesResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetGatesResponse.ProtoReflect.Descriptor instead.
func (*GetGatesResponse) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{34}
+ return file_vtadmin_proto_rawDescGZIP(), []int{35}
}
func (x *GetGatesResponse) GetGates() []*VTGate {
@@ -2187,7 +2242,7 @@ type GetKeyspaceRequest struct {
func (x *GetKeyspaceRequest) Reset() {
*x = GetKeyspaceRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[35]
+ mi := &file_vtadmin_proto_msgTypes[36]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2200,7 +2255,7 @@ func (x *GetKeyspaceRequest) String() string {
func (*GetKeyspaceRequest) ProtoMessage() {}
func (x *GetKeyspaceRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[35]
+ mi := &file_vtadmin_proto_msgTypes[36]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2213,7 +2268,7 @@ func (x *GetKeyspaceRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetKeyspaceRequest.ProtoReflect.Descriptor instead.
func (*GetKeyspaceRequest) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{35}
+ return file_vtadmin_proto_rawDescGZIP(), []int{36}
}
func (x *GetKeyspaceRequest) GetClusterId() string {
@@ -2241,7 +2296,7 @@ type GetKeyspacesRequest struct {
func (x *GetKeyspacesRequest) Reset() {
*x = GetKeyspacesRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[36]
+ mi := &file_vtadmin_proto_msgTypes[37]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2254,7 +2309,7 @@ func (x *GetKeyspacesRequest) String() string {
func (*GetKeyspacesRequest) ProtoMessage() {}
func (x *GetKeyspacesRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[36]
+ mi := &file_vtadmin_proto_msgTypes[37]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2267,7 +2322,7 @@ func (x *GetKeyspacesRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetKeyspacesRequest.ProtoReflect.Descriptor instead.
func (*GetKeyspacesRequest) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{36}
+ return file_vtadmin_proto_rawDescGZIP(), []int{37}
}
func (x *GetKeyspacesRequest) GetClusterIds() []string {
@@ -2288,7 +2343,7 @@ type GetKeyspacesResponse struct {
func (x *GetKeyspacesResponse) Reset() {
*x = GetKeyspacesResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[37]
+ mi := &file_vtadmin_proto_msgTypes[38]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2301,7 +2356,7 @@ func (x *GetKeyspacesResponse) String() string {
func (*GetKeyspacesResponse) ProtoMessage() {}
func (x *GetKeyspacesResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[37]
+ mi := &file_vtadmin_proto_msgTypes[38]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2314,7 +2369,7 @@ func (x *GetKeyspacesResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetKeyspacesResponse.ProtoReflect.Descriptor instead.
func (*GetKeyspacesResponse) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{37}
+ return file_vtadmin_proto_rawDescGZIP(), []int{38}
}
func (x *GetKeyspacesResponse) GetKeyspaces() []*Keyspace {
@@ -2338,7 +2393,7 @@ type GetSchemaRequest struct {
func (x *GetSchemaRequest) Reset() {
*x = GetSchemaRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[38]
+ mi := &file_vtadmin_proto_msgTypes[39]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2351,7 +2406,7 @@ func (x *GetSchemaRequest) String() string {
func (*GetSchemaRequest) ProtoMessage() {}
func (x *GetSchemaRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[38]
+ mi := &file_vtadmin_proto_msgTypes[39]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2364,7 +2419,7 @@ func (x *GetSchemaRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetSchemaRequest.ProtoReflect.Descriptor instead.
func (*GetSchemaRequest) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{38}
+ return file_vtadmin_proto_rawDescGZIP(), []int{39}
}
func (x *GetSchemaRequest) GetClusterId() string {
@@ -2407,7 +2462,7 @@ type GetSchemasRequest struct {
func (x *GetSchemasRequest) Reset() {
*x = GetSchemasRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[39]
+ mi := &file_vtadmin_proto_msgTypes[40]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2420,7 +2475,7 @@ func (x *GetSchemasRequest) String() string {
func (*GetSchemasRequest) ProtoMessage() {}
func (x *GetSchemasRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[39]
+ mi := &file_vtadmin_proto_msgTypes[40]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2433,7 +2488,7 @@ func (x *GetSchemasRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetSchemasRequest.ProtoReflect.Descriptor instead.
func (*GetSchemasRequest) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{39}
+ return file_vtadmin_proto_rawDescGZIP(), []int{40}
}
func (x *GetSchemasRequest) GetClusterIds() []string {
@@ -2461,7 +2516,7 @@ type GetSchemasResponse struct {
func (x *GetSchemasResponse) Reset() {
*x = GetSchemasResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[40]
+ mi := &file_vtadmin_proto_msgTypes[41]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2474,7 +2529,7 @@ func (x *GetSchemasResponse) String() string {
func (*GetSchemasResponse) ProtoMessage() {}
func (x *GetSchemasResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[40]
+ mi := &file_vtadmin_proto_msgTypes[41]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2487,7 +2542,7 @@ func (x *GetSchemasResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetSchemasResponse.ProtoReflect.Descriptor instead.
func (*GetSchemasResponse) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{40}
+ return file_vtadmin_proto_rawDescGZIP(), []int{41}
}
func (x *GetSchemasResponse) GetSchemas() []*Schema {
@@ -2517,7 +2572,7 @@ type GetShardReplicationPositionsRequest struct {
func (x *GetShardReplicationPositionsRequest) Reset() {
*x = GetShardReplicationPositionsRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[41]
+ mi := &file_vtadmin_proto_msgTypes[42]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2530,7 +2585,7 @@ func (x *GetShardReplicationPositionsRequest) String() string {
func (*GetShardReplicationPositionsRequest) ProtoMessage() {}
func (x *GetShardReplicationPositionsRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[41]
+ mi := &file_vtadmin_proto_msgTypes[42]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2543,7 +2598,7 @@ func (x *GetShardReplicationPositionsRequest) ProtoReflect() protoreflect.Messag
// Deprecated: Use GetShardReplicationPositionsRequest.ProtoReflect.Descriptor instead.
func (*GetShardReplicationPositionsRequest) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{41}
+ return file_vtadmin_proto_rawDescGZIP(), []int{42}
}
func (x *GetShardReplicationPositionsRequest) GetClusterIds() []string {
@@ -2578,7 +2633,7 @@ type GetShardReplicationPositionsResponse struct {
func (x *GetShardReplicationPositionsResponse) Reset() {
*x = GetShardReplicationPositionsResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[42]
+ mi := &file_vtadmin_proto_msgTypes[43]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2591,7 +2646,7 @@ func (x *GetShardReplicationPositionsResponse) String() string {
func (*GetShardReplicationPositionsResponse) ProtoMessage() {}
func (x *GetShardReplicationPositionsResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[42]
+ mi := &file_vtadmin_proto_msgTypes[43]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2604,7 +2659,7 @@ func (x *GetShardReplicationPositionsResponse) ProtoReflect() protoreflect.Messa
// Deprecated: Use GetShardReplicationPositionsResponse.ProtoReflect.Descriptor instead.
func (*GetShardReplicationPositionsResponse) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{42}
+ return file_vtadmin_proto_rawDescGZIP(), []int{43}
}
func (x *GetShardReplicationPositionsResponse) GetReplicationPositions() []*ClusterShardReplicationPosition {
@@ -2626,7 +2681,7 @@ type GetSrvVSchemaRequest struct {
func (x *GetSrvVSchemaRequest) Reset() {
*x = GetSrvVSchemaRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[43]
+ mi := &file_vtadmin_proto_msgTypes[44]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2639,7 +2694,7 @@ func (x *GetSrvVSchemaRequest) String() string {
func (*GetSrvVSchemaRequest) ProtoMessage() {}
func (x *GetSrvVSchemaRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[43]
+ mi := &file_vtadmin_proto_msgTypes[44]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2652,7 +2707,7 @@ func (x *GetSrvVSchemaRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetSrvVSchemaRequest.ProtoReflect.Descriptor instead.
func (*GetSrvVSchemaRequest) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{43}
+ return file_vtadmin_proto_rawDescGZIP(), []int{44}
}
func (x *GetSrvVSchemaRequest) GetClusterId() string {
@@ -2681,7 +2736,7 @@ type GetSrvVSchemasRequest struct {
func (x *GetSrvVSchemasRequest) Reset() {
*x = GetSrvVSchemasRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[44]
+ mi := &file_vtadmin_proto_msgTypes[45]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2694,7 +2749,7 @@ func (x *GetSrvVSchemasRequest) String() string {
func (*GetSrvVSchemasRequest) ProtoMessage() {}
func (x *GetSrvVSchemasRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[44]
+ mi := &file_vtadmin_proto_msgTypes[45]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2707,7 +2762,7 @@ func (x *GetSrvVSchemasRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetSrvVSchemasRequest.ProtoReflect.Descriptor instead.
func (*GetSrvVSchemasRequest) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{44}
+ return file_vtadmin_proto_rawDescGZIP(), []int{45}
}
func (x *GetSrvVSchemasRequest) GetClusterIds() []string {
@@ -2735,7 +2790,7 @@ type GetSrvVSchemasResponse struct {
func (x *GetSrvVSchemasResponse) Reset() {
*x = GetSrvVSchemasResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[45]
+ mi := &file_vtadmin_proto_msgTypes[46]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2748,7 +2803,7 @@ func (x *GetSrvVSchemasResponse) String() string {
func (*GetSrvVSchemasResponse) ProtoMessage() {}
func (x *GetSrvVSchemasResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[45]
+ mi := &file_vtadmin_proto_msgTypes[46]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2761,7 +2816,7 @@ func (x *GetSrvVSchemasResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetSrvVSchemasResponse.ProtoReflect.Descriptor instead.
func (*GetSrvVSchemasResponse) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{45}
+ return file_vtadmin_proto_rawDescGZIP(), []int{46}
}
func (x *GetSrvVSchemasResponse) GetSrvVSchemas() []*SrvVSchema {
@@ -2783,7 +2838,7 @@ type GetSchemaTableSizeOptions struct {
func (x *GetSchemaTableSizeOptions) Reset() {
*x = GetSchemaTableSizeOptions{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[46]
+ mi := &file_vtadmin_proto_msgTypes[47]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2796,7 +2851,7 @@ func (x *GetSchemaTableSizeOptions) String() string {
func (*GetSchemaTableSizeOptions) ProtoMessage() {}
func (x *GetSchemaTableSizeOptions) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[46]
+ mi := &file_vtadmin_proto_msgTypes[47]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2809,7 +2864,7 @@ func (x *GetSchemaTableSizeOptions) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetSchemaTableSizeOptions.ProtoReflect.Descriptor instead.
func (*GetSchemaTableSizeOptions) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{46}
+ return file_vtadmin_proto_rawDescGZIP(), []int{47}
}
func (x *GetSchemaTableSizeOptions) GetAggregateSizes() bool {
@@ -2842,7 +2897,7 @@ type GetTabletRequest struct {
func (x *GetTabletRequest) Reset() {
*x = GetTabletRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[47]
+ mi := &file_vtadmin_proto_msgTypes[48]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2855,7 +2910,7 @@ func (x *GetTabletRequest) String() string {
func (*GetTabletRequest) ProtoMessage() {}
func (x *GetTabletRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[47]
+ mi := &file_vtadmin_proto_msgTypes[48]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2868,7 +2923,7 @@ func (x *GetTabletRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetTabletRequest.ProtoReflect.Descriptor instead.
func (*GetTabletRequest) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{47}
+ return file_vtadmin_proto_rawDescGZIP(), []int{48}
}
func (x *GetTabletRequest) GetAlias() *topodata.TabletAlias {
@@ -2896,7 +2951,7 @@ type GetTabletsRequest struct {
func (x *GetTabletsRequest) Reset() {
*x = GetTabletsRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[48]
+ mi := &file_vtadmin_proto_msgTypes[49]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2909,7 +2964,7 @@ func (x *GetTabletsRequest) String() string {
func (*GetTabletsRequest) ProtoMessage() {}
func (x *GetTabletsRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[48]
+ mi := &file_vtadmin_proto_msgTypes[49]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2922,7 +2977,7 @@ func (x *GetTabletsRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetTabletsRequest.ProtoReflect.Descriptor instead.
func (*GetTabletsRequest) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{48}
+ return file_vtadmin_proto_rawDescGZIP(), []int{49}
}
func (x *GetTabletsRequest) GetClusterIds() []string {
@@ -2943,7 +2998,7 @@ type GetTabletsResponse struct {
func (x *GetTabletsResponse) Reset() {
*x = GetTabletsResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[49]
+ mi := &file_vtadmin_proto_msgTypes[50]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2956,7 +3011,7 @@ func (x *GetTabletsResponse) String() string {
func (*GetTabletsResponse) ProtoMessage() {}
func (x *GetTabletsResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[49]
+ mi := &file_vtadmin_proto_msgTypes[50]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2969,7 +3024,7 @@ func (x *GetTabletsResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetTabletsResponse.ProtoReflect.Descriptor instead.
func (*GetTabletsResponse) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{49}
+ return file_vtadmin_proto_rawDescGZIP(), []int{50}
}
func (x *GetTabletsResponse) GetTablets() []*Tablet {
@@ -2979,6 +3034,61 @@ func (x *GetTabletsResponse) GetTablets() []*Tablet {
return nil
}
+type GetTopologyPathRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"`
+ Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"`
+}
+
+func (x *GetTopologyPathRequest) Reset() {
+ *x = GetTopologyPathRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_vtadmin_proto_msgTypes[51]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetTopologyPathRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetTopologyPathRequest) ProtoMessage() {}
+
+func (x *GetTopologyPathRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_vtadmin_proto_msgTypes[51]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetTopologyPathRequest.ProtoReflect.Descriptor instead.
+func (*GetTopologyPathRequest) Descriptor() ([]byte, []int) {
+ return file_vtadmin_proto_rawDescGZIP(), []int{51}
+}
+
+func (x *GetTopologyPathRequest) GetClusterId() string {
+ if x != nil {
+ return x.ClusterId
+ }
+ return ""
+}
+
+func (x *GetTopologyPathRequest) GetPath() string {
+ if x != nil {
+ return x.Path
+ }
+ return ""
+}
+
type GetVSchemaRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -2991,7 +3101,7 @@ type GetVSchemaRequest struct {
func (x *GetVSchemaRequest) Reset() {
*x = GetVSchemaRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[50]
+ mi := &file_vtadmin_proto_msgTypes[52]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3004,7 +3114,7 @@ func (x *GetVSchemaRequest) String() string {
func (*GetVSchemaRequest) ProtoMessage() {}
func (x *GetVSchemaRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[50]
+ mi := &file_vtadmin_proto_msgTypes[52]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3017,7 +3127,7 @@ func (x *GetVSchemaRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetVSchemaRequest.ProtoReflect.Descriptor instead.
func (*GetVSchemaRequest) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{50}
+ return file_vtadmin_proto_rawDescGZIP(), []int{52}
}
func (x *GetVSchemaRequest) GetClusterId() string {
@@ -3045,7 +3155,7 @@ type GetVSchemasRequest struct {
func (x *GetVSchemasRequest) Reset() {
*x = GetVSchemasRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[51]
+ mi := &file_vtadmin_proto_msgTypes[53]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3058,7 +3168,7 @@ func (x *GetVSchemasRequest) String() string {
func (*GetVSchemasRequest) ProtoMessage() {}
func (x *GetVSchemasRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[51]
+ mi := &file_vtadmin_proto_msgTypes[53]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3071,7 +3181,7 @@ func (x *GetVSchemasRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetVSchemasRequest.ProtoReflect.Descriptor instead.
func (*GetVSchemasRequest) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{51}
+ return file_vtadmin_proto_rawDescGZIP(), []int{53}
}
func (x *GetVSchemasRequest) GetClusterIds() []string {
@@ -3092,7 +3202,7 @@ type GetVSchemasResponse struct {
func (x *GetVSchemasResponse) Reset() {
*x = GetVSchemasResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[52]
+ mi := &file_vtadmin_proto_msgTypes[54]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3105,7 +3215,7 @@ func (x *GetVSchemasResponse) String() string {
func (*GetVSchemasResponse) ProtoMessage() {}
func (x *GetVSchemasResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[52]
+ mi := &file_vtadmin_proto_msgTypes[54]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3118,7 +3228,7 @@ func (x *GetVSchemasResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetVSchemasResponse.ProtoReflect.Descriptor instead.
func (*GetVSchemasResponse) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{52}
+ return file_vtadmin_proto_rawDescGZIP(), []int{54}
}
func (x *GetVSchemasResponse) GetVSchemas() []*VSchema {
@@ -3139,7 +3249,7 @@ type GetVtctldsRequest struct {
func (x *GetVtctldsRequest) Reset() {
*x = GetVtctldsRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[53]
+ mi := &file_vtadmin_proto_msgTypes[55]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3152,7 +3262,7 @@ func (x *GetVtctldsRequest) String() string {
func (*GetVtctldsRequest) ProtoMessage() {}
func (x *GetVtctldsRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[53]
+ mi := &file_vtadmin_proto_msgTypes[55]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3165,7 +3275,7 @@ func (x *GetVtctldsRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetVtctldsRequest.ProtoReflect.Descriptor instead.
func (*GetVtctldsRequest) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{53}
+ return file_vtadmin_proto_rawDescGZIP(), []int{55}
}
func (x *GetVtctldsRequest) GetClusterIds() []string {
@@ -3186,7 +3296,7 @@ type GetVtctldsResponse struct {
func (x *GetVtctldsResponse) Reset() {
*x = GetVtctldsResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[54]
+ mi := &file_vtadmin_proto_msgTypes[56]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3199,7 +3309,7 @@ func (x *GetVtctldsResponse) String() string {
func (*GetVtctldsResponse) ProtoMessage() {}
func (x *GetVtctldsResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[54]
+ mi := &file_vtadmin_proto_msgTypes[56]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3212,7 +3322,7 @@ func (x *GetVtctldsResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetVtctldsResponse.ProtoReflect.Descriptor instead.
func (*GetVtctldsResponse) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{54}
+ return file_vtadmin_proto_rawDescGZIP(), []int{56}
}
func (x *GetVtctldsResponse) GetVtctlds() []*Vtctld {
@@ -3236,7 +3346,7 @@ type GetWorkflowRequest struct {
func (x *GetWorkflowRequest) Reset() {
*x = GetWorkflowRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[55]
+ mi := &file_vtadmin_proto_msgTypes[57]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3249,7 +3359,7 @@ func (x *GetWorkflowRequest) String() string {
func (*GetWorkflowRequest) ProtoMessage() {}
func (x *GetWorkflowRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[55]
+ mi := &file_vtadmin_proto_msgTypes[57]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3262,7 +3372,7 @@ func (x *GetWorkflowRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetWorkflowRequest.ProtoReflect.Descriptor instead.
func (*GetWorkflowRequest) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{55}
+ return file_vtadmin_proto_rawDescGZIP(), []int{57}
}
func (x *GetWorkflowRequest) GetClusterId() string {
@@ -3322,7 +3432,7 @@ type GetWorkflowsRequest struct {
func (x *GetWorkflowsRequest) Reset() {
*x = GetWorkflowsRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[56]
+ mi := &file_vtadmin_proto_msgTypes[58]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3335,7 +3445,7 @@ func (x *GetWorkflowsRequest) String() string {
func (*GetWorkflowsRequest) ProtoMessage() {}
func (x *GetWorkflowsRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[56]
+ mi := &file_vtadmin_proto_msgTypes[58]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3348,7 +3458,7 @@ func (x *GetWorkflowsRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetWorkflowsRequest.ProtoReflect.Descriptor instead.
func (*GetWorkflowsRequest) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{56}
+ return file_vtadmin_proto_rawDescGZIP(), []int{58}
}
func (x *GetWorkflowsRequest) GetClusterIds() []string {
@@ -3390,7 +3500,7 @@ type GetWorkflowsResponse struct {
func (x *GetWorkflowsResponse) Reset() {
*x = GetWorkflowsResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[57]
+ mi := &file_vtadmin_proto_msgTypes[59]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3403,7 +3513,7 @@ func (x *GetWorkflowsResponse) String() string {
func (*GetWorkflowsResponse) ProtoMessage() {}
func (x *GetWorkflowsResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[57]
+ mi := &file_vtadmin_proto_msgTypes[59]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3416,7 +3526,7 @@ func (x *GetWorkflowsResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetWorkflowsResponse.ProtoReflect.Descriptor instead.
func (*GetWorkflowsResponse) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{57}
+ return file_vtadmin_proto_rawDescGZIP(), []int{59}
}
func (x *GetWorkflowsResponse) GetWorkflowsByCluster() map[string]*ClusterWorkflows {
@@ -3442,7 +3552,7 @@ type PingTabletRequest struct {
func (x *PingTabletRequest) Reset() {
*x = PingTabletRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[58]
+ mi := &file_vtadmin_proto_msgTypes[60]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3455,7 +3565,7 @@ func (x *PingTabletRequest) String() string {
func (*PingTabletRequest) ProtoMessage() {}
func (x *PingTabletRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[58]
+ mi := &file_vtadmin_proto_msgTypes[60]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3468,7 +3578,7 @@ func (x *PingTabletRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use PingTabletRequest.ProtoReflect.Descriptor instead.
func (*PingTabletRequest) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{58}
+ return file_vtadmin_proto_rawDescGZIP(), []int{60}
}
func (x *PingTabletRequest) GetAlias() *topodata.TabletAlias {
@@ -3497,7 +3607,7 @@ type PingTabletResponse struct {
func (x *PingTabletResponse) Reset() {
*x = PingTabletResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[59]
+ mi := &file_vtadmin_proto_msgTypes[61]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3510,7 +3620,7 @@ func (x *PingTabletResponse) String() string {
func (*PingTabletResponse) ProtoMessage() {}
func (x *PingTabletResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[59]
+ mi := &file_vtadmin_proto_msgTypes[61]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3523,7 +3633,7 @@ func (x *PingTabletResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use PingTabletResponse.ProtoReflect.Descriptor instead.
func (*PingTabletResponse) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{59}
+ return file_vtadmin_proto_rawDescGZIP(), []int{61}
}
func (x *PingTabletResponse) GetStatus() string {
@@ -3552,7 +3662,7 @@ type PlannedFailoverShardRequest struct {
func (x *PlannedFailoverShardRequest) Reset() {
*x = PlannedFailoverShardRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[60]
+ mi := &file_vtadmin_proto_msgTypes[62]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3565,7 +3675,7 @@ func (x *PlannedFailoverShardRequest) String() string {
func (*PlannedFailoverShardRequest) ProtoMessage() {}
func (x *PlannedFailoverShardRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[60]
+ mi := &file_vtadmin_proto_msgTypes[62]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3578,7 +3688,7 @@ func (x *PlannedFailoverShardRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use PlannedFailoverShardRequest.ProtoReflect.Descriptor instead.
func (*PlannedFailoverShardRequest) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{60}
+ return file_vtadmin_proto_rawDescGZIP(), []int{62}
}
func (x *PlannedFailoverShardRequest) GetClusterId() string {
@@ -3614,7 +3724,7 @@ type PlannedFailoverShardResponse struct {
func (x *PlannedFailoverShardResponse) Reset() {
*x = PlannedFailoverShardResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[61]
+ mi := &file_vtadmin_proto_msgTypes[63]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3627,7 +3737,7 @@ func (x *PlannedFailoverShardResponse) String() string {
func (*PlannedFailoverShardResponse) ProtoMessage() {}
func (x *PlannedFailoverShardResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[61]
+ mi := &file_vtadmin_proto_msgTypes[63]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3640,7 +3750,7 @@ func (x *PlannedFailoverShardResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use PlannedFailoverShardResponse.ProtoReflect.Descriptor instead.
func (*PlannedFailoverShardResponse) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{61}
+ return file_vtadmin_proto_rawDescGZIP(), []int{63}
}
func (x *PlannedFailoverShardResponse) GetCluster() *Cluster {
@@ -3692,7 +3802,7 @@ type RebuildKeyspaceGraphRequest struct {
func (x *RebuildKeyspaceGraphRequest) Reset() {
*x = RebuildKeyspaceGraphRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[62]
+ mi := &file_vtadmin_proto_msgTypes[64]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3705,7 +3815,7 @@ func (x *RebuildKeyspaceGraphRequest) String() string {
func (*RebuildKeyspaceGraphRequest) ProtoMessage() {}
func (x *RebuildKeyspaceGraphRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[62]
+ mi := &file_vtadmin_proto_msgTypes[64]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3718,7 +3828,7 @@ func (x *RebuildKeyspaceGraphRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use RebuildKeyspaceGraphRequest.ProtoReflect.Descriptor instead.
func (*RebuildKeyspaceGraphRequest) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{62}
+ return file_vtadmin_proto_rawDescGZIP(), []int{64}
}
func (x *RebuildKeyspaceGraphRequest) GetClusterId() string {
@@ -3760,7 +3870,7 @@ type RebuildKeyspaceGraphResponse struct {
func (x *RebuildKeyspaceGraphResponse) Reset() {
*x = RebuildKeyspaceGraphResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[63]
+ mi := &file_vtadmin_proto_msgTypes[65]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3773,7 +3883,7 @@ func (x *RebuildKeyspaceGraphResponse) String() string {
func (*RebuildKeyspaceGraphResponse) ProtoMessage() {}
func (x *RebuildKeyspaceGraphResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[63]
+ mi := &file_vtadmin_proto_msgTypes[65]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3786,7 +3896,7 @@ func (x *RebuildKeyspaceGraphResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use RebuildKeyspaceGraphResponse.ProtoReflect.Descriptor instead.
func (*RebuildKeyspaceGraphResponse) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{63}
+ return file_vtadmin_proto_rawDescGZIP(), []int{65}
}
func (x *RebuildKeyspaceGraphResponse) GetStatus() string {
@@ -3808,7 +3918,7 @@ type RefreshStateRequest struct {
func (x *RefreshStateRequest) Reset() {
*x = RefreshStateRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[64]
+ mi := &file_vtadmin_proto_msgTypes[66]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3821,7 +3931,7 @@ func (x *RefreshStateRequest) String() string {
func (*RefreshStateRequest) ProtoMessage() {}
func (x *RefreshStateRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[64]
+ mi := &file_vtadmin_proto_msgTypes[66]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3834,7 +3944,7 @@ func (x *RefreshStateRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use RefreshStateRequest.ProtoReflect.Descriptor instead.
func (*RefreshStateRequest) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{64}
+ return file_vtadmin_proto_rawDescGZIP(), []int{66}
}
func (x *RefreshStateRequest) GetAlias() *topodata.TabletAlias {
@@ -3863,7 +3973,7 @@ type RefreshStateResponse struct {
func (x *RefreshStateResponse) Reset() {
*x = RefreshStateResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[65]
+ mi := &file_vtadmin_proto_msgTypes[67]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3876,7 +3986,7 @@ func (x *RefreshStateResponse) String() string {
func (*RefreshStateResponse) ProtoMessage() {}
func (x *RefreshStateResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[65]
+ mi := &file_vtadmin_proto_msgTypes[67]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3889,7 +3999,7 @@ func (x *RefreshStateResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use RefreshStateResponse.ProtoReflect.Descriptor instead.
func (*RefreshStateResponse) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{65}
+ return file_vtadmin_proto_rawDescGZIP(), []int{67}
}
func (x *RefreshStateResponse) GetStatus() string {
@@ -3958,7 +4068,7 @@ type ReloadSchemasRequest struct {
func (x *ReloadSchemasRequest) Reset() {
*x = ReloadSchemasRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[66]
+ mi := &file_vtadmin_proto_msgTypes[68]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3971,7 +4081,7 @@ func (x *ReloadSchemasRequest) String() string {
func (*ReloadSchemasRequest) ProtoMessage() {}
func (x *ReloadSchemasRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[66]
+ mi := &file_vtadmin_proto_msgTypes[68]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3984,7 +4094,7 @@ func (x *ReloadSchemasRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use ReloadSchemasRequest.ProtoReflect.Descriptor instead.
func (*ReloadSchemasRequest) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{66}
+ return file_vtadmin_proto_rawDescGZIP(), []int{68}
}
func (x *ReloadSchemasRequest) GetKeyspaces() []string {
@@ -4058,7 +4168,7 @@ type ReloadSchemasResponse struct {
func (x *ReloadSchemasResponse) Reset() {
*x = ReloadSchemasResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[67]
+ mi := &file_vtadmin_proto_msgTypes[69]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4071,7 +4181,7 @@ func (x *ReloadSchemasResponse) String() string {
func (*ReloadSchemasResponse) ProtoMessage() {}
func (x *ReloadSchemasResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[67]
+ mi := &file_vtadmin_proto_msgTypes[69]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4084,7 +4194,7 @@ func (x *ReloadSchemasResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use ReloadSchemasResponse.ProtoReflect.Descriptor instead.
func (*ReloadSchemasResponse) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{67}
+ return file_vtadmin_proto_rawDescGZIP(), []int{69}
}
func (x *ReloadSchemasResponse) GetKeyspaceResults() []*ReloadSchemasResponse_KeyspaceResult {
@@ -4124,7 +4234,7 @@ type ReloadSchemaShardRequest struct {
func (x *ReloadSchemaShardRequest) Reset() {
*x = ReloadSchemaShardRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[68]
+ mi := &file_vtadmin_proto_msgTypes[70]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4137,7 +4247,7 @@ func (x *ReloadSchemaShardRequest) String() string {
func (*ReloadSchemaShardRequest) ProtoMessage() {}
func (x *ReloadSchemaShardRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[68]
+ mi := &file_vtadmin_proto_msgTypes[70]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4150,7 +4260,7 @@ func (x *ReloadSchemaShardRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use ReloadSchemaShardRequest.ProtoReflect.Descriptor instead.
func (*ReloadSchemaShardRequest) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{68}
+ return file_vtadmin_proto_rawDescGZIP(), []int{70}
}
func (x *ReloadSchemaShardRequest) GetClusterId() string {
@@ -4206,7 +4316,7 @@ type ReloadSchemaShardResponse struct {
func (x *ReloadSchemaShardResponse) Reset() {
*x = ReloadSchemaShardResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[69]
+ mi := &file_vtadmin_proto_msgTypes[71]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4219,7 +4329,7 @@ func (x *ReloadSchemaShardResponse) String() string {
func (*ReloadSchemaShardResponse) ProtoMessage() {}
func (x *ReloadSchemaShardResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[69]
+ mi := &file_vtadmin_proto_msgTypes[71]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4232,7 +4342,7 @@ func (x *ReloadSchemaShardResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use ReloadSchemaShardResponse.ProtoReflect.Descriptor instead.
func (*ReloadSchemaShardResponse) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{69}
+ return file_vtadmin_proto_rawDescGZIP(), []int{71}
}
func (x *ReloadSchemaShardResponse) GetEvents() []*logutil.Event {
@@ -4254,7 +4364,7 @@ type RefreshTabletReplicationSourceRequest struct {
func (x *RefreshTabletReplicationSourceRequest) Reset() {
*x = RefreshTabletReplicationSourceRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[70]
+ mi := &file_vtadmin_proto_msgTypes[72]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4267,7 +4377,7 @@ func (x *RefreshTabletReplicationSourceRequest) String() string {
func (*RefreshTabletReplicationSourceRequest) ProtoMessage() {}
func (x *RefreshTabletReplicationSourceRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[70]
+ mi := &file_vtadmin_proto_msgTypes[72]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4280,7 +4390,7 @@ func (x *RefreshTabletReplicationSourceRequest) ProtoReflect() protoreflect.Mess
// Deprecated: Use RefreshTabletReplicationSourceRequest.ProtoReflect.Descriptor instead.
func (*RefreshTabletReplicationSourceRequest) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{70}
+ return file_vtadmin_proto_rawDescGZIP(), []int{72}
}
func (x *RefreshTabletReplicationSourceRequest) GetAlias() *topodata.TabletAlias {
@@ -4311,7 +4421,7 @@ type RefreshTabletReplicationSourceResponse struct {
func (x *RefreshTabletReplicationSourceResponse) Reset() {
*x = RefreshTabletReplicationSourceResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[71]
+ mi := &file_vtadmin_proto_msgTypes[73]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4324,7 +4434,7 @@ func (x *RefreshTabletReplicationSourceResponse) String() string {
func (*RefreshTabletReplicationSourceResponse) ProtoMessage() {}
func (x *RefreshTabletReplicationSourceResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[71]
+ mi := &file_vtadmin_proto_msgTypes[73]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4337,7 +4447,7 @@ func (x *RefreshTabletReplicationSourceResponse) ProtoReflect() protoreflect.Mes
// Deprecated: Use RefreshTabletReplicationSourceResponse.ProtoReflect.Descriptor instead.
func (*RefreshTabletReplicationSourceResponse) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{71}
+ return file_vtadmin_proto_rawDescGZIP(), []int{73}
}
func (x *RefreshTabletReplicationSourceResponse) GetKeyspace() string {
@@ -4383,7 +4493,7 @@ type RemoveKeyspaceCellRequest struct {
func (x *RemoveKeyspaceCellRequest) Reset() {
*x = RemoveKeyspaceCellRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[72]
+ mi := &file_vtadmin_proto_msgTypes[74]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4396,7 +4506,7 @@ func (x *RemoveKeyspaceCellRequest) String() string {
func (*RemoveKeyspaceCellRequest) ProtoMessage() {}
func (x *RemoveKeyspaceCellRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[72]
+ mi := &file_vtadmin_proto_msgTypes[74]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4409,7 +4519,7 @@ func (x *RemoveKeyspaceCellRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use RemoveKeyspaceCellRequest.ProtoReflect.Descriptor instead.
func (*RemoveKeyspaceCellRequest) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{72}
+ return file_vtadmin_proto_rawDescGZIP(), []int{74}
}
func (x *RemoveKeyspaceCellRequest) GetClusterId() string {
@@ -4458,7 +4568,7 @@ type RemoveKeyspaceCellResponse struct {
func (x *RemoveKeyspaceCellResponse) Reset() {
*x = RemoveKeyspaceCellResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[73]
+ mi := &file_vtadmin_proto_msgTypes[75]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4471,7 +4581,7 @@ func (x *RemoveKeyspaceCellResponse) String() string {
func (*RemoveKeyspaceCellResponse) ProtoMessage() {}
func (x *RemoveKeyspaceCellResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[73]
+ mi := &file_vtadmin_proto_msgTypes[75]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4484,7 +4594,7 @@ func (x *RemoveKeyspaceCellResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use RemoveKeyspaceCellResponse.ProtoReflect.Descriptor instead.
func (*RemoveKeyspaceCellResponse) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{73}
+ return file_vtadmin_proto_rawDescGZIP(), []int{75}
}
func (x *RemoveKeyspaceCellResponse) GetStatus() string {
@@ -4506,7 +4616,7 @@ type RunHealthCheckRequest struct {
func (x *RunHealthCheckRequest) Reset() {
*x = RunHealthCheckRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[74]
+ mi := &file_vtadmin_proto_msgTypes[76]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4519,7 +4629,7 @@ func (x *RunHealthCheckRequest) String() string {
func (*RunHealthCheckRequest) ProtoMessage() {}
func (x *RunHealthCheckRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[74]
+ mi := &file_vtadmin_proto_msgTypes[76]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4532,7 +4642,7 @@ func (x *RunHealthCheckRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use RunHealthCheckRequest.ProtoReflect.Descriptor instead.
func (*RunHealthCheckRequest) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{74}
+ return file_vtadmin_proto_rawDescGZIP(), []int{76}
}
func (x *RunHealthCheckRequest) GetAlias() *topodata.TabletAlias {
@@ -4561,7 +4671,7 @@ type RunHealthCheckResponse struct {
func (x *RunHealthCheckResponse) Reset() {
*x = RunHealthCheckResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[75]
+ mi := &file_vtadmin_proto_msgTypes[77]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4574,7 +4684,7 @@ func (x *RunHealthCheckResponse) String() string {
func (*RunHealthCheckResponse) ProtoMessage() {}
func (x *RunHealthCheckResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[75]
+ mi := &file_vtadmin_proto_msgTypes[77]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4587,7 +4697,7 @@ func (x *RunHealthCheckResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use RunHealthCheckResponse.ProtoReflect.Descriptor instead.
func (*RunHealthCheckResponse) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{75}
+ return file_vtadmin_proto_rawDescGZIP(), []int{77}
}
func (x *RunHealthCheckResponse) GetStatus() string {
@@ -4616,7 +4726,7 @@ type SetReadOnlyRequest struct {
func (x *SetReadOnlyRequest) Reset() {
*x = SetReadOnlyRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[76]
+ mi := &file_vtadmin_proto_msgTypes[78]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4629,7 +4739,7 @@ func (x *SetReadOnlyRequest) String() string {
func (*SetReadOnlyRequest) ProtoMessage() {}
func (x *SetReadOnlyRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[76]
+ mi := &file_vtadmin_proto_msgTypes[78]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4642,7 +4752,7 @@ func (x *SetReadOnlyRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use SetReadOnlyRequest.ProtoReflect.Descriptor instead.
func (*SetReadOnlyRequest) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{76}
+ return file_vtadmin_proto_rawDescGZIP(), []int{78}
}
func (x *SetReadOnlyRequest) GetAlias() *topodata.TabletAlias {
@@ -4668,7 +4778,7 @@ type SetReadOnlyResponse struct {
func (x *SetReadOnlyResponse) Reset() {
*x = SetReadOnlyResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[77]
+ mi := &file_vtadmin_proto_msgTypes[79]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4681,7 +4791,7 @@ func (x *SetReadOnlyResponse) String() string {
func (*SetReadOnlyResponse) ProtoMessage() {}
func (x *SetReadOnlyResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[77]
+ mi := &file_vtadmin_proto_msgTypes[79]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4694,7 +4804,7 @@ func (x *SetReadOnlyResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use SetReadOnlyResponse.ProtoReflect.Descriptor instead.
func (*SetReadOnlyResponse) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{77}
+ return file_vtadmin_proto_rawDescGZIP(), []int{79}
}
type SetReadWriteRequest struct {
@@ -4709,7 +4819,7 @@ type SetReadWriteRequest struct {
func (x *SetReadWriteRequest) Reset() {
*x = SetReadWriteRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[78]
+ mi := &file_vtadmin_proto_msgTypes[80]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4722,7 +4832,7 @@ func (x *SetReadWriteRequest) String() string {
func (*SetReadWriteRequest) ProtoMessage() {}
func (x *SetReadWriteRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[78]
+ mi := &file_vtadmin_proto_msgTypes[80]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4735,7 +4845,7 @@ func (x *SetReadWriteRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use SetReadWriteRequest.ProtoReflect.Descriptor instead.
func (*SetReadWriteRequest) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{78}
+ return file_vtadmin_proto_rawDescGZIP(), []int{80}
}
func (x *SetReadWriteRequest) GetAlias() *topodata.TabletAlias {
@@ -4761,7 +4871,7 @@ type SetReadWriteResponse struct {
func (x *SetReadWriteResponse) Reset() {
*x = SetReadWriteResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[79]
+ mi := &file_vtadmin_proto_msgTypes[81]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4774,7 +4884,7 @@ func (x *SetReadWriteResponse) String() string {
func (*SetReadWriteResponse) ProtoMessage() {}
func (x *SetReadWriteResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[79]
+ mi := &file_vtadmin_proto_msgTypes[81]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4787,7 +4897,7 @@ func (x *SetReadWriteResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use SetReadWriteResponse.ProtoReflect.Descriptor instead.
func (*SetReadWriteResponse) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{79}
+ return file_vtadmin_proto_rawDescGZIP(), []int{81}
}
type StartReplicationRequest struct {
@@ -4802,7 +4912,7 @@ type StartReplicationRequest struct {
func (x *StartReplicationRequest) Reset() {
*x = StartReplicationRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[80]
+ mi := &file_vtadmin_proto_msgTypes[82]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4815,7 +4925,7 @@ func (x *StartReplicationRequest) String() string {
func (*StartReplicationRequest) ProtoMessage() {}
func (x *StartReplicationRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[80]
+ mi := &file_vtadmin_proto_msgTypes[82]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4828,7 +4938,7 @@ func (x *StartReplicationRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use StartReplicationRequest.ProtoReflect.Descriptor instead.
func (*StartReplicationRequest) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{80}
+ return file_vtadmin_proto_rawDescGZIP(), []int{82}
}
func (x *StartReplicationRequest) GetAlias() *topodata.TabletAlias {
@@ -4857,7 +4967,7 @@ type StartReplicationResponse struct {
func (x *StartReplicationResponse) Reset() {
*x = StartReplicationResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[81]
+ mi := &file_vtadmin_proto_msgTypes[83]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4870,7 +4980,7 @@ func (x *StartReplicationResponse) String() string {
func (*StartReplicationResponse) ProtoMessage() {}
func (x *StartReplicationResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[81]
+ mi := &file_vtadmin_proto_msgTypes[83]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4883,7 +4993,7 @@ func (x *StartReplicationResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use StartReplicationResponse.ProtoReflect.Descriptor instead.
func (*StartReplicationResponse) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{81}
+ return file_vtadmin_proto_rawDescGZIP(), []int{83}
}
func (x *StartReplicationResponse) GetStatus() string {
@@ -4912,7 +5022,7 @@ type StopReplicationRequest struct {
func (x *StopReplicationRequest) Reset() {
*x = StopReplicationRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[82]
+ mi := &file_vtadmin_proto_msgTypes[84]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4925,7 +5035,7 @@ func (x *StopReplicationRequest) String() string {
func (*StopReplicationRequest) ProtoMessage() {}
func (x *StopReplicationRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[82]
+ mi := &file_vtadmin_proto_msgTypes[84]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4938,7 +5048,7 @@ func (x *StopReplicationRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use StopReplicationRequest.ProtoReflect.Descriptor instead.
func (*StopReplicationRequest) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{82}
+ return file_vtadmin_proto_rawDescGZIP(), []int{84}
}
func (x *StopReplicationRequest) GetAlias() *topodata.TabletAlias {
@@ -4967,7 +5077,7 @@ type StopReplicationResponse struct {
func (x *StopReplicationResponse) Reset() {
*x = StopReplicationResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[83]
+ mi := &file_vtadmin_proto_msgTypes[85]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4980,7 +5090,7 @@ func (x *StopReplicationResponse) String() string {
func (*StopReplicationResponse) ProtoMessage() {}
func (x *StopReplicationResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[83]
+ mi := &file_vtadmin_proto_msgTypes[85]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4993,7 +5103,7 @@ func (x *StopReplicationResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use StopReplicationResponse.ProtoReflect.Descriptor instead.
func (*StopReplicationResponse) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{83}
+ return file_vtadmin_proto_rawDescGZIP(), []int{85}
}
func (x *StopReplicationResponse) GetStatus() string {
@@ -5024,7 +5134,7 @@ type TabletExternallyPromotedRequest struct {
func (x *TabletExternallyPromotedRequest) Reset() {
*x = TabletExternallyPromotedRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[84]
+ mi := &file_vtadmin_proto_msgTypes[86]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5037,7 +5147,7 @@ func (x *TabletExternallyPromotedRequest) String() string {
func (*TabletExternallyPromotedRequest) ProtoMessage() {}
func (x *TabletExternallyPromotedRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[84]
+ mi := &file_vtadmin_proto_msgTypes[86]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5050,7 +5160,7 @@ func (x *TabletExternallyPromotedRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use TabletExternallyPromotedRequest.ProtoReflect.Descriptor instead.
func (*TabletExternallyPromotedRequest) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{84}
+ return file_vtadmin_proto_rawDescGZIP(), []int{86}
}
func (x *TabletExternallyPromotedRequest) GetAlias() *topodata.TabletAlias {
@@ -5082,7 +5192,7 @@ type TabletExternallyPromotedResponse struct {
func (x *TabletExternallyPromotedResponse) Reset() {
*x = TabletExternallyPromotedResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[85]
+ mi := &file_vtadmin_proto_msgTypes[87]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5095,7 +5205,7 @@ func (x *TabletExternallyPromotedResponse) String() string {
func (*TabletExternallyPromotedResponse) ProtoMessage() {}
func (x *TabletExternallyPromotedResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[85]
+ mi := &file_vtadmin_proto_msgTypes[87]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5108,7 +5218,7 @@ func (x *TabletExternallyPromotedResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use TabletExternallyPromotedResponse.ProtoReflect.Descriptor instead.
func (*TabletExternallyPromotedResponse) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{85}
+ return file_vtadmin_proto_rawDescGZIP(), []int{87}
}
func (x *TabletExternallyPromotedResponse) GetCluster() *Cluster {
@@ -5158,7 +5268,7 @@ type TabletExternallyReparentedRequest struct {
func (x *TabletExternallyReparentedRequest) Reset() {
*x = TabletExternallyReparentedRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[86]
+ mi := &file_vtadmin_proto_msgTypes[88]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5171,7 +5281,7 @@ func (x *TabletExternallyReparentedRequest) String() string {
func (*TabletExternallyReparentedRequest) ProtoMessage() {}
func (x *TabletExternallyReparentedRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[86]
+ mi := &file_vtadmin_proto_msgTypes[88]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5184,7 +5294,7 @@ func (x *TabletExternallyReparentedRequest) ProtoReflect() protoreflect.Message
// Deprecated: Use TabletExternallyReparentedRequest.ProtoReflect.Descriptor instead.
func (*TabletExternallyReparentedRequest) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{86}
+ return file_vtadmin_proto_rawDescGZIP(), []int{88}
}
func (x *TabletExternallyReparentedRequest) GetAlias() *topodata.TabletAlias {
@@ -5201,6 +5311,61 @@ func (x *TabletExternallyReparentedRequest) GetClusterIds() []string {
return nil
}
+type ValidateRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"`
+ PingTablets bool `protobuf:"varint,2,opt,name=ping_tablets,json=pingTablets,proto3" json:"ping_tablets,omitempty"`
+}
+
+func (x *ValidateRequest) Reset() {
+ *x = ValidateRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_vtadmin_proto_msgTypes[89]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ValidateRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ValidateRequest) ProtoMessage() {}
+
+func (x *ValidateRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_vtadmin_proto_msgTypes[89]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ValidateRequest.ProtoReflect.Descriptor instead.
+func (*ValidateRequest) Descriptor() ([]byte, []int) {
+ return file_vtadmin_proto_rawDescGZIP(), []int{89}
+}
+
+func (x *ValidateRequest) GetClusterId() string {
+ if x != nil {
+ return x.ClusterId
+ }
+ return ""
+}
+
+func (x *ValidateRequest) GetPingTablets() bool {
+ if x != nil {
+ return x.PingTablets
+ }
+ return false
+}
+
type ValidateKeyspaceRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -5214,7 +5379,7 @@ type ValidateKeyspaceRequest struct {
func (x *ValidateKeyspaceRequest) Reset() {
*x = ValidateKeyspaceRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[87]
+ mi := &file_vtadmin_proto_msgTypes[90]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5227,7 +5392,7 @@ func (x *ValidateKeyspaceRequest) String() string {
func (*ValidateKeyspaceRequest) ProtoMessage() {}
func (x *ValidateKeyspaceRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[87]
+ mi := &file_vtadmin_proto_msgTypes[90]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5240,7 +5405,7 @@ func (x *ValidateKeyspaceRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use ValidateKeyspaceRequest.ProtoReflect.Descriptor instead.
func (*ValidateKeyspaceRequest) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{87}
+ return file_vtadmin_proto_rawDescGZIP(), []int{90}
}
func (x *ValidateKeyspaceRequest) GetClusterId() string {
@@ -5276,7 +5441,7 @@ type ValidateSchemaKeyspaceRequest struct {
func (x *ValidateSchemaKeyspaceRequest) Reset() {
*x = ValidateSchemaKeyspaceRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[88]
+ mi := &file_vtadmin_proto_msgTypes[91]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5289,7 +5454,7 @@ func (x *ValidateSchemaKeyspaceRequest) String() string {
func (*ValidateSchemaKeyspaceRequest) ProtoMessage() {}
func (x *ValidateSchemaKeyspaceRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[88]
+ mi := &file_vtadmin_proto_msgTypes[91]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5302,7 +5467,7 @@ func (x *ValidateSchemaKeyspaceRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use ValidateSchemaKeyspaceRequest.ProtoReflect.Descriptor instead.
func (*ValidateSchemaKeyspaceRequest) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{88}
+ return file_vtadmin_proto_rawDescGZIP(), []int{91}
}
func (x *ValidateSchemaKeyspaceRequest) GetClusterId() string {
@@ -5319,32 +5484,34 @@ func (x *ValidateSchemaKeyspaceRequest) GetKeyspace() string {
return ""
}
-type ValidateVersionKeyspaceRequest struct {
+type ValidateShardRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"`
- Keyspace string `protobuf:"bytes,2,opt,name=keyspace,proto3" json:"keyspace,omitempty"`
+ ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"`
+ Keyspace string `protobuf:"bytes,2,opt,name=keyspace,proto3" json:"keyspace,omitempty"`
+ Shard string `protobuf:"bytes,3,opt,name=shard,proto3" json:"shard,omitempty"`
+ PingTablets bool `protobuf:"varint,4,opt,name=ping_tablets,json=pingTablets,proto3" json:"ping_tablets,omitempty"`
}
-func (x *ValidateVersionKeyspaceRequest) Reset() {
- *x = ValidateVersionKeyspaceRequest{}
+func (x *ValidateShardRequest) Reset() {
+ *x = ValidateShardRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[89]
+ mi := &file_vtadmin_proto_msgTypes[92]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
-func (x *ValidateVersionKeyspaceRequest) String() string {
+func (x *ValidateShardRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*ValidateVersionKeyspaceRequest) ProtoMessage() {}
+func (*ValidateShardRequest) ProtoMessage() {}
-func (x *ValidateVersionKeyspaceRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[89]
+func (x *ValidateShardRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_vtadmin_proto_msgTypes[92]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5355,52 +5522,184 @@ func (x *ValidateVersionKeyspaceRequest) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use ValidateVersionKeyspaceRequest.ProtoReflect.Descriptor instead.
-func (*ValidateVersionKeyspaceRequest) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{89}
+// Deprecated: Use ValidateShardRequest.ProtoReflect.Descriptor instead.
+func (*ValidateShardRequest) Descriptor() ([]byte, []int) {
+ return file_vtadmin_proto_rawDescGZIP(), []int{92}
}
-func (x *ValidateVersionKeyspaceRequest) GetClusterId() string {
+func (x *ValidateShardRequest) GetClusterId() string {
if x != nil {
return x.ClusterId
}
return ""
}
-func (x *ValidateVersionKeyspaceRequest) GetKeyspace() string {
+func (x *ValidateShardRequest) GetKeyspace() string {
if x != nil {
return x.Keyspace
}
return ""
}
-type VTExplainRequest struct {
+func (x *ValidateShardRequest) GetShard() string {
+ if x != nil {
+ return x.Shard
+ }
+ return ""
+}
+
+func (x *ValidateShardRequest) GetPingTablets() bool {
+ if x != nil {
+ return x.PingTablets
+ }
+ return false
+}
+
+type ValidateVersionKeyspaceRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- Cluster string `protobuf:"bytes,1,opt,name=cluster,proto3" json:"cluster,omitempty"`
- Keyspace string `protobuf:"bytes,2,opt,name=keyspace,proto3" json:"keyspace,omitempty"`
- Sql string `protobuf:"bytes,3,opt,name=sql,proto3" json:"sql,omitempty"`
+ ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"`
+ Keyspace string `protobuf:"bytes,2,opt,name=keyspace,proto3" json:"keyspace,omitempty"`
}
-func (x *VTExplainRequest) Reset() {
- *x = VTExplainRequest{}
+func (x *ValidateVersionKeyspaceRequest) Reset() {
+ *x = ValidateVersionKeyspaceRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[90]
+ mi := &file_vtadmin_proto_msgTypes[93]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
-func (x *VTExplainRequest) String() string {
+func (x *ValidateVersionKeyspaceRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*VTExplainRequest) ProtoMessage() {}
+func (*ValidateVersionKeyspaceRequest) ProtoMessage() {}
-func (x *VTExplainRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[90]
+func (x *ValidateVersionKeyspaceRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_vtadmin_proto_msgTypes[93]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ValidateVersionKeyspaceRequest.ProtoReflect.Descriptor instead.
+func (*ValidateVersionKeyspaceRequest) Descriptor() ([]byte, []int) {
+ return file_vtadmin_proto_rawDescGZIP(), []int{93}
+}
+
+func (x *ValidateVersionKeyspaceRequest) GetClusterId() string {
+ if x != nil {
+ return x.ClusterId
+ }
+ return ""
+}
+
+func (x *ValidateVersionKeyspaceRequest) GetKeyspace() string {
+ if x != nil {
+ return x.Keyspace
+ }
+ return ""
+}
+
+type ValidateVersionShardRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"`
+ Keyspace string `protobuf:"bytes,2,opt,name=keyspace,proto3" json:"keyspace,omitempty"`
+ Shard string `protobuf:"bytes,3,opt,name=shard,proto3" json:"shard,omitempty"`
+}
+
+func (x *ValidateVersionShardRequest) Reset() {
+ *x = ValidateVersionShardRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_vtadmin_proto_msgTypes[94]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ValidateVersionShardRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ValidateVersionShardRequest) ProtoMessage() {}
+
+func (x *ValidateVersionShardRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_vtadmin_proto_msgTypes[94]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ValidateVersionShardRequest.ProtoReflect.Descriptor instead.
+func (*ValidateVersionShardRequest) Descriptor() ([]byte, []int) {
+ return file_vtadmin_proto_rawDescGZIP(), []int{94}
+}
+
+func (x *ValidateVersionShardRequest) GetClusterId() string {
+ if x != nil {
+ return x.ClusterId
+ }
+ return ""
+}
+
+func (x *ValidateVersionShardRequest) GetKeyspace() string {
+ if x != nil {
+ return x.Keyspace
+ }
+ return ""
+}
+
+func (x *ValidateVersionShardRequest) GetShard() string {
+ if x != nil {
+ return x.Shard
+ }
+ return ""
+}
+
+type VTExplainRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Cluster string `protobuf:"bytes,1,opt,name=cluster,proto3" json:"cluster,omitempty"`
+ Keyspace string `protobuf:"bytes,2,opt,name=keyspace,proto3" json:"keyspace,omitempty"`
+ Sql string `protobuf:"bytes,3,opt,name=sql,proto3" json:"sql,omitempty"`
+}
+
+func (x *VTExplainRequest) Reset() {
+ *x = VTExplainRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_vtadmin_proto_msgTypes[95]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VTExplainRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VTExplainRequest) ProtoMessage() {}
+
+func (x *VTExplainRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_vtadmin_proto_msgTypes[95]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5413,7 +5712,7 @@ func (x *VTExplainRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use VTExplainRequest.ProtoReflect.Descriptor instead.
func (*VTExplainRequest) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{90}
+ return file_vtadmin_proto_rawDescGZIP(), []int{95}
}
func (x *VTExplainRequest) GetCluster() string {
@@ -5448,7 +5747,7 @@ type VTExplainResponse struct {
func (x *VTExplainResponse) Reset() {
*x = VTExplainResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[91]
+ mi := &file_vtadmin_proto_msgTypes[96]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5461,7 +5760,7 @@ func (x *VTExplainResponse) String() string {
func (*VTExplainResponse) ProtoMessage() {}
func (x *VTExplainResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[91]
+ mi := &file_vtadmin_proto_msgTypes[96]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5474,7 +5773,7 @@ func (x *VTExplainResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use VTExplainResponse.ProtoReflect.Descriptor instead.
func (*VTExplainResponse) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{91}
+ return file_vtadmin_proto_rawDescGZIP(), []int{96}
}
func (x *VTExplainResponse) GetResponse() string {
@@ -5496,7 +5795,7 @@ type Schema_ShardTableSize struct {
func (x *Schema_ShardTableSize) Reset() {
*x = Schema_ShardTableSize{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[95]
+ mi := &file_vtadmin_proto_msgTypes[100]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5509,7 +5808,7 @@ func (x *Schema_ShardTableSize) String() string {
func (*Schema_ShardTableSize) ProtoMessage() {}
func (x *Schema_ShardTableSize) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[95]
+ mi := &file_vtadmin_proto_msgTypes[100]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5554,7 +5853,7 @@ type Schema_TableSize struct {
func (x *Schema_TableSize) Reset() {
*x = Schema_TableSize{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[96]
+ mi := &file_vtadmin_proto_msgTypes[101]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5567,7 +5866,7 @@ func (x *Schema_TableSize) String() string {
func (*Schema_TableSize) ProtoMessage() {}
func (x *Schema_TableSize) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[96]
+ mi := &file_vtadmin_proto_msgTypes[101]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5622,7 +5921,7 @@ type ReloadSchemasResponse_KeyspaceResult struct {
func (x *ReloadSchemasResponse_KeyspaceResult) Reset() {
*x = ReloadSchemasResponse_KeyspaceResult{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[99]
+ mi := &file_vtadmin_proto_msgTypes[104]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5635,7 +5934,7 @@ func (x *ReloadSchemasResponse_KeyspaceResult) String() string {
func (*ReloadSchemasResponse_KeyspaceResult) ProtoMessage() {}
func (x *ReloadSchemasResponse_KeyspaceResult) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[99]
+ mi := &file_vtadmin_proto_msgTypes[104]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5648,7 +5947,7 @@ func (x *ReloadSchemasResponse_KeyspaceResult) ProtoReflect() protoreflect.Messa
// Deprecated: Use ReloadSchemasResponse_KeyspaceResult.ProtoReflect.Descriptor instead.
func (*ReloadSchemasResponse_KeyspaceResult) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{67, 0}
+ return file_vtadmin_proto_rawDescGZIP(), []int{69, 0}
}
func (x *ReloadSchemasResponse_KeyspaceResult) GetKeyspace() *Keyspace {
@@ -5683,7 +5982,7 @@ type ReloadSchemasResponse_ShardResult struct {
func (x *ReloadSchemasResponse_ShardResult) Reset() {
*x = ReloadSchemasResponse_ShardResult{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[100]
+ mi := &file_vtadmin_proto_msgTypes[105]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5696,7 +5995,7 @@ func (x *ReloadSchemasResponse_ShardResult) String() string {
func (*ReloadSchemasResponse_ShardResult) ProtoMessage() {}
func (x *ReloadSchemasResponse_ShardResult) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[100]
+ mi := &file_vtadmin_proto_msgTypes[105]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5709,7 +6008,7 @@ func (x *ReloadSchemasResponse_ShardResult) ProtoReflect() protoreflect.Message
// Deprecated: Use ReloadSchemasResponse_ShardResult.ProtoReflect.Descriptor instead.
func (*ReloadSchemasResponse_ShardResult) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{67, 1}
+ return file_vtadmin_proto_rawDescGZIP(), []int{69, 1}
}
func (x *ReloadSchemasResponse_ShardResult) GetShard() *Shard {
@@ -5745,7 +6044,7 @@ type ReloadSchemasResponse_TabletResult struct {
func (x *ReloadSchemasResponse_TabletResult) Reset() {
*x = ReloadSchemasResponse_TabletResult{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[101]
+ mi := &file_vtadmin_proto_msgTypes[106]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5758,7 +6057,7 @@ func (x *ReloadSchemasResponse_TabletResult) String() string {
func (*ReloadSchemasResponse_TabletResult) ProtoMessage() {}
func (x *ReloadSchemasResponse_TabletResult) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[101]
+ mi := &file_vtadmin_proto_msgTypes[106]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5771,7 +6070,7 @@ func (x *ReloadSchemasResponse_TabletResult) ProtoReflect() protoreflect.Message
// Deprecated: Use ReloadSchemasResponse_TabletResult.ProtoReflect.Descriptor instead.
func (*ReloadSchemasResponse_TabletResult) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{67, 2}
+ return file_vtadmin_proto_rawDescGZIP(), []int{69, 2}
}
func (x *ReloadSchemasResponse_TabletResult) GetTablet() *Tablet {
@@ -6089,667 +6388,726 @@ var file_vtadmin_proto_rawDesc = []byte{
0x73, 0x74, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2c, 0x0a,
0x08, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32,
0x10, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65,
- 0x72, 0x52, 0x08, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x22, 0x32, 0x0a, 0x0f, 0x47,
- 0x65, 0x74, 0x47, 0x61, 0x74, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f,
- 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20,
- 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22,
- 0x39, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x47, 0x61, 0x74, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f,
- 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x67, 0x61, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03,
- 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x54, 0x47,
- 0x61, 0x74, 0x65, 0x52, 0x05, 0x67, 0x61, 0x74, 0x65, 0x73, 0x22, 0x4f, 0x0a, 0x12, 0x47, 0x65,
- 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12,
- 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x36, 0x0a, 0x13, 0x47,
- 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x72, 0x52, 0x08, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x22, 0x62, 0x0a, 0x14, 0x47,
+ 0x65, 0x74, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69,
+ 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72,
+ 0x49, 0x64, 0x12, 0x2b, 0x0a, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62,
+ 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x22,
+ 0x32, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x47, 0x61, 0x74, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65,
0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64,
0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72,
- 0x49, 0x64, 0x73, 0x22, 0x47, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61,
- 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x09, 0x6b,
- 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11,
- 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63,
- 0x65, 0x52, 0x09, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x22, 0xb5, 0x01, 0x0a,
- 0x10, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64,
- 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05,
- 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x61, 0x62,
- 0x6c, 0x65, 0x12, 0x50, 0x0a, 0x12, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65,
- 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22,
- 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65,
- 0x6d, 0x61, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f,
- 0x6e, 0x73, 0x52, 0x10, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x4f, 0x70, 0x74,
- 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x86, 0x01, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65,
- 0x6d, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c,
- 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52,
- 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x12, 0x50, 0x0a, 0x12, 0x74,
- 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
- 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69,
- 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x54, 0x61, 0x62, 0x6c, 0x65,
- 0x53, 0x69, 0x7a, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x10, 0x74, 0x61, 0x62,
- 0x6c, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x3f, 0x0a,
- 0x12, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f,
- 0x6e, 0x73, 0x65, 0x12, 0x29, 0x0a, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x18, 0x01,
- 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x53,
- 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x22, 0x8d,
- 0x01, 0x0a, 0x23, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69,
- 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52,
+ 0x49, 0x64, 0x73, 0x22, 0x39, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x47, 0x61, 0x74, 0x65, 0x73, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x67, 0x61, 0x74, 0x65, 0x73,
+ 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e,
+ 0x2e, 0x56, 0x54, 0x47, 0x61, 0x74, 0x65, 0x52, 0x05, 0x67, 0x61, 0x74, 0x65, 0x73, 0x22, 0x4f,
+ 0x0a, 0x12, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f,
+ 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65,
+ 0x72, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22,
+ 0x36, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52,
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65,
0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75,
- 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x6b, 0x65, 0x79, 0x73, 0x70,
- 0x61, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x6b, 0x65, 0x79, 0x73,
- 0x70, 0x61, 0x63, 0x65, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63,
- 0x65, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e,
- 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x22, 0x85,
- 0x01, 0x0a, 0x24, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69,
- 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5d, 0x0a, 0x15, 0x72, 0x65, 0x70, 0x6c, 0x69,
- 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73,
- 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e,
- 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70,
- 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e,
- 0x52, 0x14, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73,
- 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x49, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76,
- 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d,
- 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x12, 0x0a,
- 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x65, 0x6c,
- 0x6c, 0x22, 0x4e, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65,
- 0x6d, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c,
+ 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, 0x47, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x4b, 0x65,
+ 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
+ 0x2f, 0x0a, 0x09, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03,
+ 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x4b, 0x65, 0x79,
+ 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x09, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73,
+ 0x22, 0xb5, 0x01, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72,
+ 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74,
+ 0x65, 0x72, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65,
+ 0x12, 0x14, 0x0a, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x50, 0x0a, 0x12, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f,
+ 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74,
+ 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x4f,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x10, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x69, 0x7a,
+ 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x86, 0x01, 0x0a, 0x11, 0x47, 0x65, 0x74,
+ 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f,
+ 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20,
+ 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x12,
+ 0x50, 0x0a, 0x12, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x6f, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x76, 0x74,
+ 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x54,
+ 0x61, 0x62, 0x6c, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52,
+ 0x10, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x22, 0x3f, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x29, 0x0a, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d,
+ 0x61, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d,
+ 0x69, 0x6e, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d,
+ 0x61, 0x73, 0x22, 0x8d, 0x01, 0x0a, 0x23, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52,
+ 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c,
0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52,
- 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x63,
- 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c,
- 0x73, 0x22, 0x51, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65,
- 0x6d, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x0d, 0x73,
- 0x72, 0x76, 0x5f, 0x76, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x18, 0x01, 0x20, 0x03,
- 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x53, 0x72, 0x76,
- 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x0b, 0x73, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68,
- 0x65, 0x6d, 0x61, 0x73, 0x22, 0x81, 0x01, 0x0a, 0x19, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65,
- 0x6d, 0x61, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f,
- 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x5f,
- 0x73, 0x69, 0x7a, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x61, 0x67, 0x67,
- 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x73, 0x12, 0x3b, 0x0a, 0x1a, 0x69,
- 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x6e, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69,
- 0x6e, 0x67, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52,
- 0x17, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x4e, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69,
- 0x6e, 0x67, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x22, 0x60, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x54,
- 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x05,
- 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f,
- 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69,
- 0x61, 0x73, 0x52, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75,
- 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a,
- 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, 0x34, 0x0a, 0x11, 0x47, 0x65,
- 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
- 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01,
- 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73,
- 0x22, 0x3f, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x52, 0x65,
- 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x29, 0x0a, 0x07, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74,
- 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69,
- 0x6e, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x07, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74,
- 0x73, 0x22, 0x4e, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65,
- 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73,
- 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63,
- 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63,
- 0x65, 0x22, 0x35, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74,
- 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c,
- 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, 0x44, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x56,
+ 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x6b,
+ 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09,
+ 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x6b, 0x65, 0x79,
+ 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03,
+ 0x28, 0x09, 0x52, 0x0e, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72,
+ 0x64, 0x73, 0x22, 0x85, 0x01, 0x0a, 0x24, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52,
+ 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5d, 0x0a, 0x15, 0x72,
+ 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x76, 0x74, 0x61,
+ 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x68, 0x61, 0x72,
+ 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69,
+ 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x14, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x49, 0x0a, 0x14, 0x47, 0x65,
+ 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49,
+ 0x64, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x22, 0x4e, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56,
+ 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f,
+ 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20,
+ 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x12,
+ 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05,
+ 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x22, 0x51, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56,
0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
- 0x2d, 0x0a, 0x09, 0x76, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x18, 0x01, 0x20, 0x03,
- 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x53, 0x63,
- 0x68, 0x65, 0x6d, 0x61, 0x52, 0x08, 0x76, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x22, 0x34,
- 0x0a, 0x11, 0x47, 0x65, 0x74, 0x56, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75,
+ 0x37, 0x0a, 0x0d, 0x73, 0x72, 0x76, 0x5f, 0x76, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73,
+ 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e,
+ 0x2e, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x0b, 0x73, 0x72, 0x76,
+ 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x22, 0x81, 0x01, 0x0a, 0x19, 0x47, 0x65, 0x74,
+ 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x4f,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67,
+ 0x61, 0x74, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52,
+ 0x0e, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x73, 0x12,
+ 0x3b, 0x0a, 0x1a, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x6e, 0x6f, 0x6e, 0x5f, 0x73,
+ 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x08, 0x52, 0x17, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x4e, 0x6f, 0x6e, 0x53,
+ 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x22, 0x60, 0x0a, 0x10,
+ 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x12, 0x2b, 0x0a, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65,
+ 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1f, 0x0a,
+ 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03,
+ 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, 0x34,
+ 0x0a, 0x11, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75,
0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69,
0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65,
- 0x72, 0x49, 0x64, 0x73, 0x22, 0x3f, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x56, 0x74, 0x63, 0x74, 0x6c,
- 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x29, 0x0a, 0x07, 0x76, 0x74,
- 0x63, 0x74, 0x6c, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74,
- 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x52, 0x07, 0x76, 0x74,
- 0x63, 0x74, 0x6c, 0x64, 0x73, 0x22, 0x84, 0x01, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72,
- 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a,
- 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6b,
- 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b,
- 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18,
- 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x61,
- 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08,
- 0x52, 0x0a, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x4f, 0x6e, 0x6c, 0x79, 0x22, 0xa0, 0x01, 0x0a,
- 0x13, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x71,
+ 0x72, 0x49, 0x64, 0x73, 0x22, 0x3f, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65,
+ 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x29, 0x0a, 0x07, 0x74, 0x61,
+ 0x62, 0x6c, 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74,
+ 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x07, 0x74, 0x61,
+ 0x62, 0x6c, 0x65, 0x74, 0x73, 0x22, 0x4b, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x70, 0x6f,
+ 0x6c, 0x6f, 0x67, 0x79, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
+ 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x12,
+ 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61,
+ 0x74, 0x68, 0x22, 0x4e, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74,
+ 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75,
+ 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61,
+ 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61,
+ 0x63, 0x65, 0x22, 0x35, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61,
+ 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73,
+ 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63,
+ 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, 0x44, 0x0a, 0x13, 0x47, 0x65, 0x74,
+ 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x12, 0x2d, 0x0a, 0x09, 0x76, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x18, 0x01, 0x20,
+ 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x53,
+ 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x08, 0x76, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x22,
+ 0x34, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x56, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x73, 0x52, 0x65, 0x71,
0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f,
0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74,
- 0x65, 0x72, 0x49, 0x64, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f,
- 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x63, 0x74, 0x69,
- 0x76, 0x65, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x1c, 0x0a, 0x09, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61,
- 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x6b, 0x65, 0x79, 0x73, 0x70,
- 0x61, 0x63, 0x65, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x6b,
- 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f,
- 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x22,
- 0xe1, 0x01, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x67, 0x0a, 0x14, 0x77, 0x6f, 0x72, 0x6b,
- 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x5f, 0x62, 0x79, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72,
- 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e,
- 0x2e, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x42,
- 0x79, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x12, 0x77,
- 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x42, 0x79, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65,
- 0x72, 0x1a, 0x60, 0x0a, 0x17, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x42, 0x79,
- 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03,
- 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2f,
- 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e,
- 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x57,
- 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a,
- 0x02, 0x38, 0x01, 0x22, 0x61, 0x0a, 0x11, 0x50, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65,
- 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x05, 0x61, 0x6c, 0x69, 0x61,
- 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61,
- 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x05,
- 0x61, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72,
- 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73,
- 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, 0x58, 0x0a, 0x12, 0x50, 0x69, 0x6e, 0x67, 0x54, 0x61,
- 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06,
- 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74,
- 0x61, 0x74, 0x75, 0x73, 0x12, 0x2a, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e,
- 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72,
- 0x22, 0x7e, 0x0a, 0x1b, 0x50, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x46, 0x61, 0x69, 0x6c, 0x6f,
- 0x76, 0x65, 0x72, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
- 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x40,
- 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50, 0x6c, 0x61, 0x6e,
- 0x6e, 0x65, 0x64, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
- 0x22, 0xe6, 0x01, 0x0a, 0x1c, 0x50, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x46, 0x61, 0x69, 0x6c,
- 0x6f, 0x76, 0x65, 0x72, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
- 0x65, 0x12, 0x2a, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x6c, 0x75,
- 0x73, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x1a, 0x0a,
- 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61,
- 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12,
- 0x40, 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x6d,
- 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f,
- 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73,
- 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72,
- 0x79, 0x12, 0x26, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28,
- 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e,
- 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x93, 0x01, 0x0a, 0x1b, 0x52, 0x65,
- 0x62, 0x75, 0x69, 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x47, 0x72, 0x61,
- 0x70, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75,
- 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63,
- 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73,
- 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73,
- 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x03, 0x20,
- 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x61, 0x6c,
- 0x6c, 0x6f, 0x77, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28,
- 0x08, 0x52, 0x0c, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x22,
- 0x36, 0x0a, 0x1c, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61,
- 0x63, 0x65, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
- 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x63, 0x0a, 0x13, 0x52, 0x65, 0x66, 0x72, 0x65,
- 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b,
+ 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, 0x3f, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x56, 0x74, 0x63, 0x74,
+ 0x6c, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x29, 0x0a, 0x07, 0x76,
+ 0x74, 0x63, 0x74, 0x6c, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76,
+ 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x52, 0x07, 0x76,
+ 0x74, 0x63, 0x74, 0x6c, 0x64, 0x73, 0x22, 0x84, 0x01, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x57, 0x6f,
+ 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a,
+ 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08,
+ 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08,
+ 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b,
+ 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28,
+ 0x08, 0x52, 0x0a, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x4f, 0x6e, 0x6c, 0x79, 0x22, 0xa0, 0x01,
+ 0x0a, 0x13, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72,
+ 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73,
+ 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65,
+ 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x63, 0x74,
+ 0x69, 0x76, 0x65, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x1c, 0x0a, 0x09, 0x6b, 0x65, 0x79, 0x73, 0x70,
+ 0x61, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x6b, 0x65, 0x79, 0x73,
+ 0x70, 0x61, 0x63, 0x65, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f,
+ 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52,
+ 0x0f, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73,
+ 0x22, 0xe1, 0x01, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77,
+ 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x67, 0x0a, 0x14, 0x77, 0x6f, 0x72,
+ 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x5f, 0x62, 0x79, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65,
+ 0x72, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69,
+ 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73,
+ 0x42, 0x79, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x12,
+ 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x42, 0x79, 0x43, 0x6c, 0x75, 0x73, 0x74,
+ 0x65, 0x72, 0x1a, 0x60, 0x0a, 0x17, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x42,
+ 0x79, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a,
+ 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12,
+ 0x2f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19,
+ 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72,
+ 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x3a, 0x02, 0x38, 0x01, 0x22, 0x61, 0x0a, 0x11, 0x50, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c,
+ 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x05, 0x61, 0x6c, 0x69,
+ 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64,
+ 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52,
+ 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65,
+ 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75,
+ 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, 0x58, 0x0a, 0x12, 0x50, 0x69, 0x6e, 0x67, 0x54,
+ 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a,
+ 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73,
+ 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2a, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e,
+ 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65,
+ 0x72, 0x22, 0x7e, 0x0a, 0x1b, 0x50, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x46, 0x61, 0x69, 0x6c,
+ 0x6f, 0x76, 0x65, 0x72, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12,
+ 0x40, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50, 0x6c, 0x61,
+ 0x6e, 0x6e, 0x65, 0x64, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x68, 0x61, 0x72,
+ 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x22, 0xe6, 0x01, 0x0a, 0x1c, 0x50, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x46, 0x61, 0x69,
+ 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x6c,
+ 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x1a,
+ 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68,
+ 0x61, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64,
+ 0x12, 0x40, 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x72, 0x69,
+ 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70,
+ 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61,
+ 0x73, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61,
+ 0x72, 0x79, 0x12, 0x26, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x05, 0x20, 0x03,
+ 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65,
+ 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x93, 0x01, 0x0a, 0x1b, 0x52,
+ 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x47, 0x72,
+ 0x61, 0x70, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c,
+ 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09,
+ 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79,
+ 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79,
+ 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x03,
+ 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x61,
+ 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x08, 0x52, 0x0c, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c,
+ 0x22, 0x36, 0x0a, 0x1c, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x70,
+ 0x61, 0x63, 0x65, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x63, 0x0a, 0x13, 0x52, 0x65, 0x66, 0x72,
+ 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
+ 0x2b, 0x0a, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15,
+ 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74,
+ 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1f, 0x0a, 0x0b,
+ 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28,
+ 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, 0x5a, 0x0a,
+ 0x14, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2a, 0x0a,
+ 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10,
+ 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72,
+ 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x22, 0x9f, 0x02, 0x0a, 0x14, 0x52, 0x65,
+ 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x18,
+ 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73,
+ 0x12, 0x27, 0x0a, 0x0f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x73, 0x68, 0x61,
+ 0x72, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x6b, 0x65, 0x79, 0x73, 0x70,
+ 0x61, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x2f, 0x0a, 0x07, 0x74, 0x61, 0x62,
+ 0x6c, 0x65, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70,
+ 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61,
+ 0x73, 0x52, 0x07, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c,
+ 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52,
+ 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x12, 0x20, 0x0a, 0x0b, 0x63,
+ 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d,
+ 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x23, 0x0a,
+ 0x0d, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x77, 0x61, 0x69, 0x74, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69,
+ 0x6f, 0x6e, 0x12, 0x27, 0x0a, 0x0f, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x70, 0x72,
+ 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x69, 0x6e, 0x63,
+ 0x6c, 0x75, 0x64, 0x65, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x22, 0xad, 0x04, 0x0a, 0x15,
+ 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x58, 0x0a, 0x10, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63,
+ 0x65, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32,
+ 0x2d, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64,
+ 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e,
+ 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x0f,
+ 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x12,
+ 0x4f, 0x0a, 0x0d, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73,
+ 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e,
+ 0x2e, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x75,
+ 0x6c, 0x74, 0x52, 0x0c, 0x73, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73,
+ 0x12, 0x52, 0x0a, 0x0e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c,
+ 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d,
+ 0x69, 0x6e, 0x2e, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52,
+ 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x0d, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73,
+ 0x75, 0x6c, 0x74, 0x73, 0x1a, 0x67, 0x0a, 0x0e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65,
+ 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x2d, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61,
+ 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d,
+ 0x69, 0x6e, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79,
+ 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x26, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18,
+ 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e,
+ 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x1a, 0x5b, 0x0a,
+ 0x0b, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x24, 0x0a, 0x05,
+ 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x76, 0x74,
+ 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x05, 0x73, 0x68, 0x61,
+ 0x72, 0x64, 0x12, 0x26, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03,
+ 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65,
+ 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x1a, 0x4f, 0x0a, 0x0c, 0x54, 0x61,
+ 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x27, 0x0a, 0x06, 0x74, 0x61,
+ 0x62, 0x6c, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x61,
+ 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x62,
+ 0x6c, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0xdb, 0x01, 0x0a, 0x18,
+ 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, 0x68, 0x61, 0x72,
+ 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73,
+ 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c,
+ 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70,
+ 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70,
+ 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x77, 0x61, 0x69,
+ 0x74, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x0c, 0x77, 0x61, 0x69, 0x74, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x27,
+ 0x0a, 0x0f, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72,
+ 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65,
+ 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75,
+ 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x63, 0x6f,
+ 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x22, 0x43, 0x0a, 0x19, 0x52, 0x65, 0x6c,
+ 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x26, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73,
+ 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c,
+ 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x75,
+ 0x0a, 0x25, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52,
+ 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74,
+ 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x05, 0x61,
+ 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f,
+ 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74,
+ 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, 0xb7, 0x01, 0x0a, 0x26, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73,
+ 0x68, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05,
+ 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61,
+ 0x72, 0x64, 0x12, 0x2f, 0x0a, 0x07, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54,
+ 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x07, 0x70, 0x72, 0x69, 0x6d,
+ 0x61, 0x72, 0x79, 0x12, 0x2a, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x04,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43,
+ 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x22,
+ 0x9e, 0x01, 0x0a, 0x19, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61,
+ 0x63, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a,
+ 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08,
+ 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08,
+ 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, 0x6c, 0x6c,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x12, 0x14, 0x0a, 0x05,
+ 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72,
+ 0x63, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x18,
+ 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65,
+ 0x22, 0x34, 0x0a, 0x1a, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61,
+ 0x63, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16,
+ 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06,
+ 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x65, 0x0a, 0x15, 0x52, 0x75, 0x6e, 0x48, 0x65, 0x61,
+ 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
+ 0x2b, 0x0a, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15,
+ 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74,
+ 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1f, 0x0a, 0x0b,
+ 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28,
+ 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, 0x5c, 0x0a,
+ 0x16, 0x52, 0x75, 0x6e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75,
+ 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12,
+ 0x2a, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x10, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74,
+ 0x65, 0x72, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x22, 0x62, 0x0a, 0x12, 0x53,
+ 0x65, 0x74, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x12, 0x2b, 0x0a, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c,
+ 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1f,
+ 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20,
+ 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22,
+ 0x15, 0x0a, 0x13, 0x53, 0x65, 0x74, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x63, 0x0a, 0x13, 0x53, 0x65, 0x74, 0x52, 0x65, 0x61,
+ 0x64, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a,
+ 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74,
+ 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c,
+ 0x69, 0x61, 0x73, 0x52, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c,
+ 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52,
+ 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, 0x16, 0x0a, 0x14, 0x53,
+ 0x65, 0x74, 0x52, 0x65, 0x61, 0x64, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x22, 0x67, 0x0a, 0x17, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c,
+ 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b,
0x0a, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e,
0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41,
0x6c, 0x69, 0x61, 0x73, 0x52, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x63,
0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09,
- 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, 0x5a, 0x0a, 0x14,
- 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2a, 0x0a, 0x07,
- 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e,
- 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52,
- 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x22, 0x9f, 0x02, 0x0a, 0x14, 0x52, 0x65, 0x6c,
- 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x18, 0x01,
- 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x12,
- 0x27, 0x0a, 0x0f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x73, 0x68, 0x61, 0x72,
- 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61,
- 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x2f, 0x0a, 0x07, 0x74, 0x61, 0x62, 0x6c,
- 0x65, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f,
- 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73,
- 0x52, 0x07, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75,
- 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a,
- 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f,
- 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52,
- 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x23, 0x0a, 0x0d,
- 0x77, 0x61, 0x69, 0x74, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x0c, 0x77, 0x61, 0x69, 0x74, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f,
- 0x6e, 0x12, 0x27, 0x0a, 0x0f, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x70, 0x72, 0x69,
- 0x6d, 0x61, 0x72, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x69, 0x6e, 0x63, 0x6c,
- 0x75, 0x64, 0x65, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x22, 0xad, 0x04, 0x0a, 0x15, 0x52,
- 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x58, 0x0a, 0x10, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65,
- 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d,
- 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53,
- 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4b,
- 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x0f, 0x6b,
- 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x4f,
- 0x0a, 0x0d, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18,
- 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e,
- 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6c,
- 0x74, 0x52, 0x0c, 0x73, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x12,
- 0x52, 0x0a, 0x0e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74,
- 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69,
- 0x6e, 0x2e, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65,
- 0x73, 0x75, 0x6c, 0x74, 0x52, 0x0d, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x75,
- 0x6c, 0x74, 0x73, 0x1a, 0x67, 0x0a, 0x0e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52,
- 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x2d, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63,
- 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69,
- 0x6e, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73,
- 0x70, 0x61, 0x63, 0x65, 0x12, 0x26, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x02,
- 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45,
- 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x1a, 0x5b, 0x0a, 0x0b,
- 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x24, 0x0a, 0x05, 0x73,
- 0x68, 0x61, 0x72, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x76, 0x74, 0x61,
- 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72,
- 0x64, 0x12, 0x26, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28,
- 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e,
- 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x1a, 0x4f, 0x0a, 0x0c, 0x54, 0x61, 0x62,
- 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x27, 0x0a, 0x06, 0x74, 0x61, 0x62,
- 0x6c, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x61, 0x64,
- 0x6d, 0x69, 0x6e, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c,
- 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x02, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0xdb, 0x01, 0x0a, 0x18, 0x52,
- 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, 0x68, 0x61, 0x72, 0x64,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74,
- 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75,
- 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61,
- 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61,
- 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x77, 0x61, 0x69, 0x74,
- 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x0c, 0x77, 0x61, 0x69, 0x74, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x27, 0x0a,
- 0x0f, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79,
- 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x50,
- 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72,
- 0x72, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x63, 0x6f, 0x6e,
- 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x22, 0x43, 0x0a, 0x19, 0x52, 0x65, 0x6c, 0x6f,
- 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x26, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18,
- 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e,
- 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x75, 0x0a,
- 0x25, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65,
- 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52,
+ 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, 0x5e, 0x0a, 0x18,
+ 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74,
+ 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73,
+ 0x12, 0x2a, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x6c, 0x75, 0x73,
+ 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x22, 0x66, 0x0a, 0x16,
+ 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52,
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18,
0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61,
0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x05, 0x61, 0x6c,
0x69, 0x61, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69,
0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65,
- 0x72, 0x49, 0x64, 0x73, 0x22, 0xb7, 0x01, 0x0a, 0x26, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68,
- 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
- 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73,
- 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72,
- 0x64, 0x12, 0x2f, 0x0a, 0x07, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61,
- 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x07, 0x70, 0x72, 0x69, 0x6d, 0x61,
- 0x72, 0x79, 0x12, 0x2a, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x04, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x6c,
- 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x22, 0x9e,
- 0x01, 0x0a, 0x19, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63,
- 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a,
- 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6b,
- 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b,
- 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x18,
- 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x66,
- 0x6f, 0x72, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63,
- 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x18, 0x05,
- 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x22,
- 0x34, 0x0a, 0x1a, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63,
- 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a,
- 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73,
- 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x65, 0x0a, 0x15, 0x52, 0x75, 0x6e, 0x48, 0x65, 0x61, 0x6c,
- 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b,
- 0x0a, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e,
- 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41,
- 0x6c, 0x69, 0x61, 0x73, 0x52, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x63,
- 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09,
- 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, 0x5c, 0x0a, 0x16,
- 0x52, 0x75, 0x6e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65,
- 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2a,
- 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x10, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65,
- 0x72, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x22, 0x62, 0x0a, 0x12, 0x53, 0x65,
- 0x74, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x12, 0x2b, 0x0a, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65,
- 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1f, 0x0a,
- 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03,
- 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, 0x15,
- 0x0a, 0x13, 0x53, 0x65, 0x74, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x63, 0x0a, 0x13, 0x53, 0x65, 0x74, 0x52, 0x65, 0x61, 0x64,
- 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x05,
+ 0x72, 0x49, 0x64, 0x73, 0x22, 0x5d, 0x0a, 0x17, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c,
+ 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
+ 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2a, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74,
+ 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d,
+ 0x69, 0x6e, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73,
+ 0x74, 0x65, 0x72, 0x22, 0x6f, 0x0a, 0x1f, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x45, 0x78, 0x74,
+ 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61,
+ 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x05, 0x61, 0x6c,
+ 0x69, 0x61, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69,
+ 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65,
+ 0x72, 0x49, 0x64, 0x73, 0x22, 0xf0, 0x01, 0x0a, 0x20, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x45,
+ 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65,
+ 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x07, 0x63, 0x6c, 0x75,
+ 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x61,
+ 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6c,
+ 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63,
+ 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63,
+ 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x36, 0x0a, 0x0b, 0x6e, 0x65, 0x77, 0x5f, 0x70,
+ 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74,
+ 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c,
+ 0x69, 0x61, 0x73, 0x52, 0x0a, 0x6e, 0x65, 0x77, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12,
+ 0x36, 0x0a, 0x0b, 0x6f, 0x6c, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x05,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e,
+ 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0a, 0x6f, 0x6c, 0x64,
+ 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x22, 0x71, 0x0a, 0x21, 0x54, 0x61, 0x62, 0x6c, 0x65,
+ 0x74, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x52, 0x65, 0x70, 0x61, 0x72,
+ 0x65, 0x6e, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x05,
0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f,
0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69,
0x61, 0x73, 0x52, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75,
0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a,
- 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, 0x16, 0x0a, 0x14, 0x53, 0x65,
- 0x74, 0x52, 0x65, 0x61, 0x64, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x22, 0x67, 0x0a, 0x17, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69,
- 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a,
- 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74,
- 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c,
- 0x69, 0x61, 0x73, 0x52, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c,
- 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52,
- 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, 0x5e, 0x0a, 0x18, 0x53,
- 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75,
- 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12,
- 0x2a, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x10, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74,
- 0x65, 0x72, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x22, 0x66, 0x0a, 0x16, 0x53,
- 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e,
- 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x05, 0x61, 0x6c, 0x69,
- 0x61, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64,
- 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72,
- 0x49, 0x64, 0x73, 0x22, 0x5d, 0x0a, 0x17, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69,
- 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16,
- 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06,
- 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2a, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65,
- 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69,
- 0x6e, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74,
- 0x65, 0x72, 0x22, 0x6f, 0x0a, 0x1f, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65,
- 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e,
- 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x05, 0x61, 0x6c, 0x69,
- 0x61, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64,
- 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72,
- 0x49, 0x64, 0x73, 0x22, 0xf0, 0x01, 0x0a, 0x20, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x45, 0x78,
- 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73,
- 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x61, 0x64,
- 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6c, 0x75,
- 0x73, 0x74, 0x65, 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65,
- 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65,
- 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x36, 0x0a, 0x0b, 0x6e, 0x65, 0x77, 0x5f, 0x70, 0x72,
- 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f,
- 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69,
- 0x61, 0x73, 0x52, 0x0a, 0x6e, 0x65, 0x77, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x36,
- 0x0a, 0x0b, 0x6f, 0x6c, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x05, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54,
- 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0a, 0x6f, 0x6c, 0x64, 0x50,
- 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x22, 0x71, 0x0a, 0x21, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74,
- 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65,
- 0x6e, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x05, 0x61,
- 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70,
- 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61,
- 0x73, 0x52, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73,
- 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63,
- 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, 0x77, 0x0a, 0x17, 0x56, 0x61, 0x6c,
- 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f,
- 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65,
- 0x72, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12,
- 0x21, 0x0a, 0x0c, 0x70, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x18,
- 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x70, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65,
- 0x74, 0x73, 0x22, 0x5a, 0x0a, 0x1d, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x63,
- 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75,
+ 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, 0x53, 0x0a, 0x0f, 0x56, 0x61,
+ 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a,
+ 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c,
+ 0x70, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x08, 0x52, 0x0b, 0x70, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x22,
+ 0x77, 0x0a, 0x17, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70,
+ 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c,
+ 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09,
+ 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79,
+ 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79,
+ 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x61,
+ 0x62, 0x6c, 0x65, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x70, 0x69, 0x6e,
+ 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x22, 0x5a, 0x0a, 0x1d, 0x56, 0x61, 0x6c, 0x69,
+ 0x64, 0x61, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61,
+ 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75,
+ 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63,
+ 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73,
+ 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73,
+ 0x70, 0x61, 0x63, 0x65, 0x22, 0x8a, 0x01, 0x0a, 0x14, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74,
+ 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a,
+ 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08,
+ 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08,
+ 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72,
+ 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x21,
+ 0x0a, 0x0c, 0x70, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x18, 0x04,
+ 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x70, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74,
+ 0x73, 0x22, 0x5b, 0x0a, 0x1e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72,
+ 0x73, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75,
0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69,
0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72,
0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x5b,
- 0x0a, 0x1e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f,
- 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12,
- 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x5a, 0x0a, 0x10, 0x56,
- 0x54, 0x45, 0x78, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
- 0x18, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79,
- 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79,
- 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x71, 0x6c, 0x18, 0x03, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x03, 0x73, 0x71, 0x6c, 0x22, 0x2f, 0x0a, 0x11, 0x56, 0x54, 0x45, 0x78, 0x70,
- 0x6c, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08,
- 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08,
- 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0xf9, 0x1c, 0x0a, 0x07, 0x56, 0x54, 0x41,
- 0x64, 0x6d, 0x69, 0x6e, 0x12, 0x53, 0x0a, 0x0e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4b, 0x65,
- 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1e, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e,
- 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e,
- 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4c, 0x0a, 0x0b, 0x43, 0x72, 0x65,
- 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x1b, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d,
- 0x69, 0x6e, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74,
- 0x61, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x55, 0x0a, 0x0e, 0x44, 0x65, 0x6c, 0x65, 0x74,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x6e,
+ 0x0a, 0x1b, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f,
+ 0x6e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a,
+ 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08,
+ 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08,
+ 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72,
+ 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, 0x5a,
+ 0x0a, 0x10, 0x56, 0x54, 0x45, 0x78, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x1a, 0x0a, 0x08,
+ 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08,
+ 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x71, 0x6c, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x73, 0x71, 0x6c, 0x22, 0x2f, 0x0a, 0x11, 0x56, 0x54,
+ 0x45, 0x78, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
+ 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0xa9, 0x20, 0x0a, 0x07,
+ 0x56, 0x54, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x12, 0x53, 0x0a, 0x0e, 0x43, 0x72, 0x65, 0x61, 0x74,
0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1e, 0x2e, 0x76, 0x74, 0x61, 0x64,
- 0x6d, 0x69, 0x6e, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61,
- 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74,
- 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73,
- 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4f,
- 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x1c,
- 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53,
- 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x76,
- 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53,
- 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12,
- 0x4d, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12,
- 0x1c, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65,
- 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e,
- 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61,
- 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6b,
- 0x0a, 0x16, 0x45, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x6e, 0x63, 0x79, 0x46, 0x61, 0x69, 0x6c, 0x6f,
- 0x76, 0x65, 0x72, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x26, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d,
- 0x69, 0x6e, 0x2e, 0x45, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x6e, 0x63, 0x79, 0x46, 0x61, 0x69, 0x6c,
- 0x6f, 0x76, 0x65, 0x72, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x1a, 0x27, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x45, 0x6d, 0x65, 0x72, 0x67,
- 0x65, 0x6e, 0x63, 0x79, 0x46, 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x53, 0x68, 0x61, 0x72,
- 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3b, 0x0a, 0x0a, 0x46,
- 0x69, 0x6e, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1a, 0x2e, 0x76, 0x74, 0x61, 0x64,
- 0x6d, 0x69, 0x6e, 0x2e, 0x46, 0x69, 0x6e, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65,
+ 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61,
+ 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x76, 0x74, 0x61, 0x64,
+ 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61,
+ 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4c, 0x0a, 0x0b,
+ 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x1b, 0x2e, 0x76, 0x74,
+ 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72,
+ 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c,
+ 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x55, 0x0a, 0x0e, 0x44, 0x65,
+ 0x6c, 0x65, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1e, 0x2e, 0x76,
+ 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4b, 0x65, 0x79,
+ 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x76,
+ 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4b,
+ 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
+ 0x00, 0x12, 0x4f, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64,
+ 0x73, 0x12, 0x1c, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x44, 0x65, 0x6c, 0x65,
+ 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
+ 0x1f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65,
+ 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x22, 0x00, 0x12, 0x4d, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c,
+ 0x65, 0x74, 0x12, 0x1c, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x44, 0x65, 0x6c,
+ 0x65, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x1a, 0x1d, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74,
+ 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
+ 0x00, 0x12, 0x6b, 0x0a, 0x16, 0x45, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x6e, 0x63, 0x79, 0x46, 0x61,
+ 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x26, 0x2e, 0x76, 0x74,
+ 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x45, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x6e, 0x63, 0x79, 0x46,
+ 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x45, 0x6d,
+ 0x65, 0x72, 0x67, 0x65, 0x6e, 0x63, 0x79, 0x46, 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x53,
+ 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3b,
+ 0x0a, 0x0a, 0x46, 0x69, 0x6e, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1a, 0x2e, 0x76,
+ 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x46, 0x69, 0x6e, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d,
+ 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0f, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d,
+ 0x69, 0x6e, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x00, 0x12, 0x47, 0x0a, 0x0a, 0x47,
+ 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x12, 0x1a, 0x2e, 0x76, 0x74, 0x61, 0x64,
+ 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e,
+ 0x47, 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x22, 0x00, 0x12, 0x4d, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x49,
+ 0x6e, 0x66, 0x6f, 0x73, 0x12, 0x1c, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47,
+ 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74,
+ 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x22, 0x00, 0x12, 0x56, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41,
+ 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x1f, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e,
+ 0x2e, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69,
+ 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65,
+ 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4a, 0x0a, 0x0b, 0x47,
+ 0x65, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x12, 0x1b, 0x2e, 0x76, 0x74, 0x61,
+ 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69,
+ 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x52, 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x46, 0x75,
+ 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1d, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d,
+ 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64,
+ 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75,
+ 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x41, 0x0a, 0x08, 0x47,
+ 0x65, 0x74, 0x47, 0x61, 0x74, 0x65, 0x73, 0x12, 0x18, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69,
+ 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x47, 0x61, 0x74, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x1a, 0x19, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x47,
+ 0x61, 0x74, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3f,
+ 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1b, 0x2e,
+ 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70,
+ 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x11, 0x2e, 0x76, 0x74, 0x61,
+ 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x00, 0x12,
+ 0x4d, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x12,
+ 0x1c, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79,
+ 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e,
+ 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70,
+ 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x39,
+ 0x0a, 0x09, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x19, 0x2e, 0x76, 0x74,
+ 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0f, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e,
+ 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x00, 0x12, 0x47, 0x0a, 0x0a, 0x47, 0x65, 0x74,
+ 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x12, 0x1a, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69,
+ 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65,
+ 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x22, 0x00, 0x12, 0x7d, 0x0a, 0x1c, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65,
+ 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x12, 0x2c, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74,
+ 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x1a, 0x2d, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x68,
+ 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f,
+ 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
+ 0x00, 0x12, 0x45, 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65,
+ 0x6d, 0x61, 0x12, 0x1d, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74,
+ 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x1a, 0x13, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x53, 0x72, 0x76, 0x56,
+ 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x00, 0x12, 0x53, 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x53,
+ 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x12, 0x1e, 0x2e, 0x76, 0x74, 0x61,
+ 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65,
+ 0x6d, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x76, 0x74, 0x61,
+ 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65,
+ 0x6d, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x39, 0x0a,
+ 0x09, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x19, 0x2e, 0x76, 0x74, 0x61,
+ 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65,
0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0f, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e,
- 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x00, 0x12, 0x47, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x42,
- 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x12, 0x1a, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e,
- 0x2e, 0x47, 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x22, 0x00, 0x12, 0x47, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x54,
+ 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x12, 0x1a, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e,
+ 0x2e, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65,
0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74,
- 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
- 0x00, 0x12, 0x4d, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f,
- 0x73, 0x12, 0x1c, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x43,
- 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
- 0x1d, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c,
- 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00,
- 0x12, 0x56, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61,
- 0x73, 0x65, 0x73, 0x12, 0x1f, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65,
- 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47,
- 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x52, 0x65,
- 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4a, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x43,
- 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x12, 0x1b, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69,
- 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47,
- 0x65, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x22, 0x00, 0x12, 0x41, 0x0a, 0x08, 0x47, 0x65, 0x74, 0x47, 0x61, 0x74, 0x65, 0x73,
- 0x12, 0x18, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x47, 0x61,
- 0x74, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x76, 0x74, 0x61,
- 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x47, 0x61, 0x74, 0x65, 0x73, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3f, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x4b, 0x65,
- 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1b, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e,
- 0x2e, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x1a, 0x11, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x4b, 0x65,
- 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x00, 0x12, 0x4d, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x4b,
- 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x12, 0x1c, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d,
- 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e,
- 0x2e, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x39, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x53, 0x63,
- 0x68, 0x65, 0x6d, 0x61, 0x12, 0x19, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47,
- 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
- 0x0f, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61,
- 0x22, 0x00, 0x12, 0x47, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73,
- 0x12, 0x1a, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x63,
- 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x76,
- 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61,
- 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x7d, 0x0a, 0x1c, 0x47,
- 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2c, 0x2e, 0x76, 0x74,
- 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65,
- 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f,
- 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x76, 0x74, 0x61, 0x64,
- 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c,
- 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x45, 0x0a, 0x0d, 0x47, 0x65,
- 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1d, 0x2e, 0x76, 0x74,
- 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68,
- 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x13, 0x2e, 0x76, 0x74, 0x61,
- 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22,
- 0x00, 0x12, 0x53, 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65,
- 0x6d, 0x61, 0x73, 0x12, 0x1e, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65,
- 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65,
- 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x39, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62,
- 0x6c, 0x65, 0x74, 0x12, 0x19, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65,
- 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0f,
- 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x22,
- 0x00, 0x12, 0x47, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x12,
- 0x1a, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62,
- 0x6c, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x76, 0x74,
- 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3c, 0x0a, 0x0a, 0x47, 0x65,
- 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1a, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d,
- 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x1a, 0x10, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56,
- 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x00, 0x12, 0x4a, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x56,
- 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x12, 0x1b, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69,
- 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47,
- 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x22, 0x00, 0x12, 0x47, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x56, 0x74, 0x63, 0x74, 0x6c,
- 0x64, 0x73, 0x12, 0x1a, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74,
- 0x56, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b,
- 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x74, 0x63, 0x74,
- 0x6c, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3f, 0x0a,
- 0x0b, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x1b, 0x2e, 0x76,
- 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c,
- 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x11, 0x2e, 0x76, 0x74, 0x61, 0x64,
- 0x6d, 0x69, 0x6e, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x22, 0x00, 0x12, 0x4d,
- 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x12, 0x1c,
- 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b,
- 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x76,
- 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c,
- 0x6f, 0x77, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x47, 0x0a,
- 0x0a, 0x50, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x1a, 0x2e, 0x76, 0x74,
- 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69,
- 0x6e, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x65, 0x0a, 0x14, 0x50, 0x6c, 0x61, 0x6e, 0x6e, 0x65,
- 0x64, 0x46, 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x24,
- 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x50, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64,
- 0x46, 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x50,
- 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x46, 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x53, 0x68,
- 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x65, 0x0a,
- 0x14, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65,
- 0x47, 0x72, 0x61, 0x70, 0x68, 0x12, 0x24, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e,
- 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x47,
- 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x76, 0x74,
- 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x4b, 0x65, 0x79,
- 0x73, 0x70, 0x61, 0x63, 0x65, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x22, 0x00, 0x12, 0x4d, 0x0a, 0x0c, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53,
- 0x74, 0x61, 0x74, 0x65, 0x12, 0x1c, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52,
- 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x65, 0x66,
- 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
- 0x65, 0x22, 0x00, 0x12, 0x83, 0x01, 0x0a, 0x1e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x54,
- 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x2e, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e,
- 0x2e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65,
- 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e,
- 0x2e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65,
- 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x50, 0x0a, 0x0d, 0x52, 0x65, 0x6c,
- 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x12, 0x1d, 0x2e, 0x76, 0x74, 0x61,
+ 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
+ 0x00, 0x12, 0x58, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79,
+ 0x50, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47,
+ 0x65, 0x74, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74,
+ 0x61, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x50, 0x61, 0x74,
+ 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3c, 0x0a, 0x0a, 0x47,
+ 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1a, 0x2e, 0x76, 0x74, 0x61, 0x64,
+ 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x10, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e,
+ 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x00, 0x12, 0x4a, 0x0a, 0x0b, 0x47, 0x65, 0x74,
+ 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x12, 0x1b, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d,
+ 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e,
+ 0x47, 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x47, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x56, 0x74, 0x63, 0x74,
+ 0x6c, 0x64, 0x73, 0x12, 0x1a, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65,
+ 0x74, 0x56, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
+ 0x1b, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x74, 0x63,
+ 0x74, 0x6c, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3f,
+ 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x1b, 0x2e,
+ 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66,
+ 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x11, 0x2e, 0x76, 0x74, 0x61,
+ 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x22, 0x00, 0x12,
+ 0x4d, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x12,
+ 0x1c, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72,
+ 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e,
+ 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66,
+ 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x47,
+ 0x0a, 0x0a, 0x50, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x1a, 0x2e, 0x76,
+ 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65,
+ 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d,
+ 0x69, 0x6e, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x65, 0x0a, 0x14, 0x50, 0x6c, 0x61, 0x6e, 0x6e,
+ 0x65, 0x64, 0x46, 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12,
+ 0x24, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x50, 0x6c, 0x61, 0x6e, 0x6e, 0x65,
+ 0x64, 0x46, 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e,
+ 0x50, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x46, 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x53,
+ 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x65,
+ 0x0a, 0x14, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63,
+ 0x65, 0x47, 0x72, 0x61, 0x70, 0x68, 0x12, 0x24, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e,
+ 0x2e, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65,
+ 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x76,
+ 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x4b, 0x65,
+ 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4d, 0x0a, 0x0c, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68,
+ 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x1c, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e,
+ 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x65,
+ 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x22, 0x00, 0x12, 0x83, 0x01, 0x0a, 0x1e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68,
+ 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x2e, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69,
+ 0x6e, 0x2e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52,
+ 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69,
+ 0x6e, 0x2e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52,
+ 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x50, 0x0a, 0x0d, 0x52, 0x65,
+ 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x12, 0x1d, 0x2e, 0x76, 0x74,
+ 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65,
+ 0x6d, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x76, 0x74, 0x61,
0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d,
- 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x76, 0x74, 0x61, 0x64,
- 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61,
- 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5c, 0x0a, 0x11, 0x52,
+ 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5c, 0x0a, 0x11,
+ 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, 0x68, 0x61, 0x72,
+ 0x64, 0x12, 0x21, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x65, 0x6c, 0x6f,
+ 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52,
0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, 0x68, 0x61, 0x72, 0x64,
- 0x12, 0x21, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x65, 0x6c, 0x6f, 0x61,
- 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x65,
- 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5f, 0x0a, 0x12, 0x52, 0x65, 0x6d,
- 0x6f, 0x76, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x12,
- 0x22, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65,
- 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x65,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5f, 0x0a, 0x12, 0x52, 0x65,
0x6d, 0x6f, 0x76, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x43, 0x65, 0x6c, 0x6c,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x53, 0x0a, 0x0e, 0x52, 0x75,
- 0x6e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x1e, 0x2e, 0x76,
- 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x75, 0x6e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68,
- 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x76,
- 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x75, 0x6e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68,
- 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12,
- 0x4a, 0x0a, 0x0b, 0x53, 0x65, 0x74, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x1b,
- 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x53, 0x65, 0x74, 0x52, 0x65, 0x61, 0x64,
- 0x4f, 0x6e, 0x6c, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x76, 0x74,
- 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x53, 0x65, 0x74, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c,
- 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4d, 0x0a, 0x0c, 0x53,
- 0x65, 0x74, 0x52, 0x65, 0x61, 0x64, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x1c, 0x2e, 0x76, 0x74,
- 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x53, 0x65, 0x74, 0x52, 0x65, 0x61, 0x64, 0x57, 0x72, 0x69,
- 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x76, 0x74, 0x61, 0x64,
- 0x6d, 0x69, 0x6e, 0x2e, 0x53, 0x65, 0x74, 0x52, 0x65, 0x61, 0x64, 0x57, 0x72, 0x69, 0x74, 0x65,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x59, 0x0a, 0x10, 0x53, 0x74,
- 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x20,
- 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65,
- 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74,
- 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f,
- 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x56, 0x0a, 0x0f, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70,
- 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d,
- 0x69, 0x6e, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x76, 0x74, 0x61, 0x64,
+ 0x12, 0x22, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76,
+ 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52,
+ 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x43, 0x65, 0x6c,
+ 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x53, 0x0a, 0x0e, 0x52,
+ 0x75, 0x6e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x1e, 0x2e,
+ 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x75, 0x6e, 0x48, 0x65, 0x61, 0x6c, 0x74,
+ 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e,
+ 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x75, 0x6e, 0x48, 0x65, 0x61, 0x6c, 0x74,
+ 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00,
+ 0x12, 0x4a, 0x0a, 0x0b, 0x53, 0x65, 0x74, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12,
+ 0x1b, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x53, 0x65, 0x74, 0x52, 0x65, 0x61,
+ 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x76,
+ 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x53, 0x65, 0x74, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x6e,
+ 0x6c, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4d, 0x0a, 0x0c,
+ 0x53, 0x65, 0x74, 0x52, 0x65, 0x61, 0x64, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x1c, 0x2e, 0x76,
+ 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x53, 0x65, 0x74, 0x52, 0x65, 0x61, 0x64, 0x57, 0x72,
+ 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x76, 0x74, 0x61,
+ 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x53, 0x65, 0x74, 0x52, 0x65, 0x61, 0x64, 0x57, 0x72, 0x69, 0x74,
+ 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x59, 0x0a, 0x10, 0x53,
+ 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12,
+ 0x20, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52,
+ 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x53, 0x74, 0x61, 0x72,
+ 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x56, 0x0a, 0x0f, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65,
+ 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x2e, 0x76, 0x74, 0x61, 0x64,
0x6d, 0x69, 0x6e, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x71, 0x0a,
- 0x18, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c,
- 0x79, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x12, 0x28, 0x2e, 0x76, 0x74, 0x61, 0x64,
- 0x6d, 0x69, 0x6e, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e,
- 0x61, 0x6c, 0x6c, 0x79, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x54, 0x61,
- 0x62, 0x6c, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x50, 0x72,
- 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00,
- 0x12, 0x5b, 0x0a, 0x10, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73,
- 0x70, 0x61, 0x63, 0x65, 0x12, 0x20, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56,
- 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61,
- 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70,
- 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6d, 0x0a,
- 0x16, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b,
- 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x26, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69,
- 0x6e, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61,
- 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
- 0x29, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69,
- 0x64, 0x61, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61,
- 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x70, 0x0a, 0x17,
- 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4b,
- 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x27, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69,
- 0x6e, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f,
- 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x1a, 0x2a, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c,
- 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x73,
- 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x44,
- 0x0a, 0x09, 0x56, 0x54, 0x45, 0x78, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x12, 0x19, 0x2e, 0x76, 0x74,
- 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x54, 0x45, 0x78, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e,
- 0x2e, 0x56, 0x54, 0x45, 0x78, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x22, 0x00, 0x42, 0x26, 0x5a, 0x24, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x69,
- 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, 0x2f, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x62, 0x06, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x33,
+ 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x76, 0x74, 0x61,
+ 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x71,
+ 0x0a, 0x18, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c,
+ 0x6c, 0x79, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x12, 0x28, 0x2e, 0x76, 0x74, 0x61,
+ 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, 0x72,
+ 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x54,
+ 0x61, 0x62, 0x6c, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x50,
+ 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
+ 0x00, 0x12, 0x43, 0x0a, 0x08, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x12, 0x18, 0x2e,
+ 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64,
+ 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5b, 0x0a, 0x10, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61,
+ 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x20, 0x2e, 0x76, 0x74, 0x61,
+ 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79,
+ 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x76,
+ 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74,
+ 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x22, 0x00, 0x12, 0x6d, 0x0a, 0x16, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53,
+ 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x26, 0x2e,
+ 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65,
+ 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74,
+ 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61,
+ 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x22, 0x00, 0x12, 0x52, 0x0a, 0x0d, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x68,
+ 0x61, 0x72, 0x64, 0x12, 0x1d, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x61,
+ 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56,
+ 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x70, 0x0a, 0x17, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61,
+ 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63,
+ 0x65, 0x12, 0x27, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x61, 0x6c, 0x69,
+ 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70,
+ 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x76, 0x74, 0x63,
+ 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56,
+ 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x67, 0x0a, 0x14, 0x56, 0x61, 0x6c, 0x69,
+ 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x68, 0x61, 0x72, 0x64,
+ 0x12, 0x24, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64,
+ 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61,
+ 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69,
+ 0x6f, 0x6e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
+ 0x00, 0x12, 0x44, 0x0a, 0x09, 0x56, 0x54, 0x45, 0x78, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x12, 0x19,
+ 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x54, 0x45, 0x78, 0x70, 0x6c, 0x61,
+ 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x76, 0x74, 0x61, 0x64,
+ 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x54, 0x45, 0x78, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x26, 0x5a, 0x24, 0x76, 0x69, 0x74, 0x65, 0x73,
+ 0x73, 0x2e, 0x69, 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76,
+ 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x62,
+ 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
@@ -6765,7 +7123,7 @@ func file_vtadmin_proto_rawDescGZIP() []byte {
}
var file_vtadmin_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
-var file_vtadmin_proto_msgTypes = make([]protoimpl.MessageInfo, 102)
+var file_vtadmin_proto_msgTypes = make([]protoimpl.MessageInfo, 107)
var file_vtadmin_proto_goTypes = []interface{}{
(Tablet_ServingState)(0), // 0: vtadmin.Tablet.ServingState
(*Cluster)(nil), // 1: vtadmin.Cluster
@@ -6801,293 +7159,314 @@ var file_vtadmin_proto_goTypes = []interface{}{
(*GetCellsAliasesResponse)(nil), // 31: vtadmin.GetCellsAliasesResponse
(*GetClustersRequest)(nil), // 32: vtadmin.GetClustersRequest
(*GetClustersResponse)(nil), // 33: vtadmin.GetClustersResponse
- (*GetGatesRequest)(nil), // 34: vtadmin.GetGatesRequest
- (*GetGatesResponse)(nil), // 35: vtadmin.GetGatesResponse
- (*GetKeyspaceRequest)(nil), // 36: vtadmin.GetKeyspaceRequest
- (*GetKeyspacesRequest)(nil), // 37: vtadmin.GetKeyspacesRequest
- (*GetKeyspacesResponse)(nil), // 38: vtadmin.GetKeyspacesResponse
- (*GetSchemaRequest)(nil), // 39: vtadmin.GetSchemaRequest
- (*GetSchemasRequest)(nil), // 40: vtadmin.GetSchemasRequest
- (*GetSchemasResponse)(nil), // 41: vtadmin.GetSchemasResponse
- (*GetShardReplicationPositionsRequest)(nil), // 42: vtadmin.GetShardReplicationPositionsRequest
- (*GetShardReplicationPositionsResponse)(nil), // 43: vtadmin.GetShardReplicationPositionsResponse
- (*GetSrvVSchemaRequest)(nil), // 44: vtadmin.GetSrvVSchemaRequest
- (*GetSrvVSchemasRequest)(nil), // 45: vtadmin.GetSrvVSchemasRequest
- (*GetSrvVSchemasResponse)(nil), // 46: vtadmin.GetSrvVSchemasResponse
- (*GetSchemaTableSizeOptions)(nil), // 47: vtadmin.GetSchemaTableSizeOptions
- (*GetTabletRequest)(nil), // 48: vtadmin.GetTabletRequest
- (*GetTabletsRequest)(nil), // 49: vtadmin.GetTabletsRequest
- (*GetTabletsResponse)(nil), // 50: vtadmin.GetTabletsResponse
- (*GetVSchemaRequest)(nil), // 51: vtadmin.GetVSchemaRequest
- (*GetVSchemasRequest)(nil), // 52: vtadmin.GetVSchemasRequest
- (*GetVSchemasResponse)(nil), // 53: vtadmin.GetVSchemasResponse
- (*GetVtctldsRequest)(nil), // 54: vtadmin.GetVtctldsRequest
- (*GetVtctldsResponse)(nil), // 55: vtadmin.GetVtctldsResponse
- (*GetWorkflowRequest)(nil), // 56: vtadmin.GetWorkflowRequest
- (*GetWorkflowsRequest)(nil), // 57: vtadmin.GetWorkflowsRequest
- (*GetWorkflowsResponse)(nil), // 58: vtadmin.GetWorkflowsResponse
- (*PingTabletRequest)(nil), // 59: vtadmin.PingTabletRequest
- (*PingTabletResponse)(nil), // 60: vtadmin.PingTabletResponse
- (*PlannedFailoverShardRequest)(nil), // 61: vtadmin.PlannedFailoverShardRequest
- (*PlannedFailoverShardResponse)(nil), // 62: vtadmin.PlannedFailoverShardResponse
- (*RebuildKeyspaceGraphRequest)(nil), // 63: vtadmin.RebuildKeyspaceGraphRequest
- (*RebuildKeyspaceGraphResponse)(nil), // 64: vtadmin.RebuildKeyspaceGraphResponse
- (*RefreshStateRequest)(nil), // 65: vtadmin.RefreshStateRequest
- (*RefreshStateResponse)(nil), // 66: vtadmin.RefreshStateResponse
- (*ReloadSchemasRequest)(nil), // 67: vtadmin.ReloadSchemasRequest
- (*ReloadSchemasResponse)(nil), // 68: vtadmin.ReloadSchemasResponse
- (*ReloadSchemaShardRequest)(nil), // 69: vtadmin.ReloadSchemaShardRequest
- (*ReloadSchemaShardResponse)(nil), // 70: vtadmin.ReloadSchemaShardResponse
- (*RefreshTabletReplicationSourceRequest)(nil), // 71: vtadmin.RefreshTabletReplicationSourceRequest
- (*RefreshTabletReplicationSourceResponse)(nil), // 72: vtadmin.RefreshTabletReplicationSourceResponse
- (*RemoveKeyspaceCellRequest)(nil), // 73: vtadmin.RemoveKeyspaceCellRequest
- (*RemoveKeyspaceCellResponse)(nil), // 74: vtadmin.RemoveKeyspaceCellResponse
- (*RunHealthCheckRequest)(nil), // 75: vtadmin.RunHealthCheckRequest
- (*RunHealthCheckResponse)(nil), // 76: vtadmin.RunHealthCheckResponse
- (*SetReadOnlyRequest)(nil), // 77: vtadmin.SetReadOnlyRequest
- (*SetReadOnlyResponse)(nil), // 78: vtadmin.SetReadOnlyResponse
- (*SetReadWriteRequest)(nil), // 79: vtadmin.SetReadWriteRequest
- (*SetReadWriteResponse)(nil), // 80: vtadmin.SetReadWriteResponse
- (*StartReplicationRequest)(nil), // 81: vtadmin.StartReplicationRequest
- (*StartReplicationResponse)(nil), // 82: vtadmin.StartReplicationResponse
- (*StopReplicationRequest)(nil), // 83: vtadmin.StopReplicationRequest
- (*StopReplicationResponse)(nil), // 84: vtadmin.StopReplicationResponse
- (*TabletExternallyPromotedRequest)(nil), // 85: vtadmin.TabletExternallyPromotedRequest
- (*TabletExternallyPromotedResponse)(nil), // 86: vtadmin.TabletExternallyPromotedResponse
- (*TabletExternallyReparentedRequest)(nil), // 87: vtadmin.TabletExternallyReparentedRequest
- (*ValidateKeyspaceRequest)(nil), // 88: vtadmin.ValidateKeyspaceRequest
- (*ValidateSchemaKeyspaceRequest)(nil), // 89: vtadmin.ValidateSchemaKeyspaceRequest
- (*ValidateVersionKeyspaceRequest)(nil), // 90: vtadmin.ValidateVersionKeyspaceRequest
- (*VTExplainRequest)(nil), // 91: vtadmin.VTExplainRequest
- (*VTExplainResponse)(nil), // 92: vtadmin.VTExplainResponse
- nil, // 93: vtadmin.ClusterCellsAliases.AliasesEntry
- nil, // 94: vtadmin.Keyspace.ShardsEntry
- nil, // 95: vtadmin.Schema.TableSizesEntry
- (*Schema_ShardTableSize)(nil), // 96: vtadmin.Schema.ShardTableSize
- (*Schema_TableSize)(nil), // 97: vtadmin.Schema.TableSize
- nil, // 98: vtadmin.Schema.TableSize.ByShardEntry
- nil, // 99: vtadmin.GetWorkflowsResponse.WorkflowsByClusterEntry
- (*ReloadSchemasResponse_KeyspaceResult)(nil), // 100: vtadmin.ReloadSchemasResponse.KeyspaceResult
- (*ReloadSchemasResponse_ShardResult)(nil), // 101: vtadmin.ReloadSchemasResponse.ShardResult
- (*ReloadSchemasResponse_TabletResult)(nil), // 102: vtadmin.ReloadSchemasResponse.TabletResult
- (*mysqlctl.BackupInfo)(nil), // 103: mysqlctl.BackupInfo
- (*topodata.CellInfo)(nil), // 104: topodata.CellInfo
- (*vtctldata.ShardReplicationPositionsResponse)(nil), // 105: vtctldata.ShardReplicationPositionsResponse
- (*vtctldata.Keyspace)(nil), // 106: vtctldata.Keyspace
- (*tabletmanagerdata.TableDefinition)(nil), // 107: tabletmanagerdata.TableDefinition
- (*vtctldata.Shard)(nil), // 108: vtctldata.Shard
- (*vschema.SrvVSchema)(nil), // 109: vschema.SrvVSchema
- (*topodata.Tablet)(nil), // 110: topodata.Tablet
- (*vschema.Keyspace)(nil), // 111: vschema.Keyspace
- (*vtctldata.Workflow)(nil), // 112: vtctldata.Workflow
- (*vtctldata.CreateKeyspaceRequest)(nil), // 113: vtctldata.CreateKeyspaceRequest
- (*vtctldata.CreateShardRequest)(nil), // 114: vtctldata.CreateShardRequest
- (*vtctldata.DeleteKeyspaceRequest)(nil), // 115: vtctldata.DeleteKeyspaceRequest
- (*vtctldata.DeleteShardsRequest)(nil), // 116: vtctldata.DeleteShardsRequest
- (*topodata.TabletAlias)(nil), // 117: topodata.TabletAlias
- (*vtctldata.EmergencyReparentShardRequest)(nil), // 118: vtctldata.EmergencyReparentShardRequest
- (*logutil.Event)(nil), // 119: logutil.Event
- (*vtctldata.GetBackupsRequest)(nil), // 120: vtctldata.GetBackupsRequest
- (*vtctldata.PlannedReparentShardRequest)(nil), // 121: vtctldata.PlannedReparentShardRequest
- (*topodata.CellsAlias)(nil), // 122: topodata.CellsAlias
- (*vtctldata.CreateShardResponse)(nil), // 123: vtctldata.CreateShardResponse
- (*vtctldata.DeleteKeyspaceResponse)(nil), // 124: vtctldata.DeleteKeyspaceResponse
- (*vtctldata.DeleteShardsResponse)(nil), // 125: vtctldata.DeleteShardsResponse
- (*vtctldata.ValidateKeyspaceResponse)(nil), // 126: vtctldata.ValidateKeyspaceResponse
- (*vtctldata.ValidateSchemaKeyspaceResponse)(nil), // 127: vtctldata.ValidateSchemaKeyspaceResponse
- (*vtctldata.ValidateVersionKeyspaceResponse)(nil), // 128: vtctldata.ValidateVersionKeyspaceResponse
+ (*GetFullStatusRequest)(nil), // 34: vtadmin.GetFullStatusRequest
+ (*GetGatesRequest)(nil), // 35: vtadmin.GetGatesRequest
+ (*GetGatesResponse)(nil), // 36: vtadmin.GetGatesResponse
+ (*GetKeyspaceRequest)(nil), // 37: vtadmin.GetKeyspaceRequest
+ (*GetKeyspacesRequest)(nil), // 38: vtadmin.GetKeyspacesRequest
+ (*GetKeyspacesResponse)(nil), // 39: vtadmin.GetKeyspacesResponse
+ (*GetSchemaRequest)(nil), // 40: vtadmin.GetSchemaRequest
+ (*GetSchemasRequest)(nil), // 41: vtadmin.GetSchemasRequest
+ (*GetSchemasResponse)(nil), // 42: vtadmin.GetSchemasResponse
+ (*GetShardReplicationPositionsRequest)(nil), // 43: vtadmin.GetShardReplicationPositionsRequest
+ (*GetShardReplicationPositionsResponse)(nil), // 44: vtadmin.GetShardReplicationPositionsResponse
+ (*GetSrvVSchemaRequest)(nil), // 45: vtadmin.GetSrvVSchemaRequest
+ (*GetSrvVSchemasRequest)(nil), // 46: vtadmin.GetSrvVSchemasRequest
+ (*GetSrvVSchemasResponse)(nil), // 47: vtadmin.GetSrvVSchemasResponse
+ (*GetSchemaTableSizeOptions)(nil), // 48: vtadmin.GetSchemaTableSizeOptions
+ (*GetTabletRequest)(nil), // 49: vtadmin.GetTabletRequest
+ (*GetTabletsRequest)(nil), // 50: vtadmin.GetTabletsRequest
+ (*GetTabletsResponse)(nil), // 51: vtadmin.GetTabletsResponse
+ (*GetTopologyPathRequest)(nil), // 52: vtadmin.GetTopologyPathRequest
+ (*GetVSchemaRequest)(nil), // 53: vtadmin.GetVSchemaRequest
+ (*GetVSchemasRequest)(nil), // 54: vtadmin.GetVSchemasRequest
+ (*GetVSchemasResponse)(nil), // 55: vtadmin.GetVSchemasResponse
+ (*GetVtctldsRequest)(nil), // 56: vtadmin.GetVtctldsRequest
+ (*GetVtctldsResponse)(nil), // 57: vtadmin.GetVtctldsResponse
+ (*GetWorkflowRequest)(nil), // 58: vtadmin.GetWorkflowRequest
+ (*GetWorkflowsRequest)(nil), // 59: vtadmin.GetWorkflowsRequest
+ (*GetWorkflowsResponse)(nil), // 60: vtadmin.GetWorkflowsResponse
+ (*PingTabletRequest)(nil), // 61: vtadmin.PingTabletRequest
+ (*PingTabletResponse)(nil), // 62: vtadmin.PingTabletResponse
+ (*PlannedFailoverShardRequest)(nil), // 63: vtadmin.PlannedFailoverShardRequest
+ (*PlannedFailoverShardResponse)(nil), // 64: vtadmin.PlannedFailoverShardResponse
+ (*RebuildKeyspaceGraphRequest)(nil), // 65: vtadmin.RebuildKeyspaceGraphRequest
+ (*RebuildKeyspaceGraphResponse)(nil), // 66: vtadmin.RebuildKeyspaceGraphResponse
+ (*RefreshStateRequest)(nil), // 67: vtadmin.RefreshStateRequest
+ (*RefreshStateResponse)(nil), // 68: vtadmin.RefreshStateResponse
+ (*ReloadSchemasRequest)(nil), // 69: vtadmin.ReloadSchemasRequest
+ (*ReloadSchemasResponse)(nil), // 70: vtadmin.ReloadSchemasResponse
+ (*ReloadSchemaShardRequest)(nil), // 71: vtadmin.ReloadSchemaShardRequest
+ (*ReloadSchemaShardResponse)(nil), // 72: vtadmin.ReloadSchemaShardResponse
+ (*RefreshTabletReplicationSourceRequest)(nil), // 73: vtadmin.RefreshTabletReplicationSourceRequest
+ (*RefreshTabletReplicationSourceResponse)(nil), // 74: vtadmin.RefreshTabletReplicationSourceResponse
+ (*RemoveKeyspaceCellRequest)(nil), // 75: vtadmin.RemoveKeyspaceCellRequest
+ (*RemoveKeyspaceCellResponse)(nil), // 76: vtadmin.RemoveKeyspaceCellResponse
+ (*RunHealthCheckRequest)(nil), // 77: vtadmin.RunHealthCheckRequest
+ (*RunHealthCheckResponse)(nil), // 78: vtadmin.RunHealthCheckResponse
+ (*SetReadOnlyRequest)(nil), // 79: vtadmin.SetReadOnlyRequest
+ (*SetReadOnlyResponse)(nil), // 80: vtadmin.SetReadOnlyResponse
+ (*SetReadWriteRequest)(nil), // 81: vtadmin.SetReadWriteRequest
+ (*SetReadWriteResponse)(nil), // 82: vtadmin.SetReadWriteResponse
+ (*StartReplicationRequest)(nil), // 83: vtadmin.StartReplicationRequest
+ (*StartReplicationResponse)(nil), // 84: vtadmin.StartReplicationResponse
+ (*StopReplicationRequest)(nil), // 85: vtadmin.StopReplicationRequest
+ (*StopReplicationResponse)(nil), // 86: vtadmin.StopReplicationResponse
+ (*TabletExternallyPromotedRequest)(nil), // 87: vtadmin.TabletExternallyPromotedRequest
+ (*TabletExternallyPromotedResponse)(nil), // 88: vtadmin.TabletExternallyPromotedResponse
+ (*TabletExternallyReparentedRequest)(nil), // 89: vtadmin.TabletExternallyReparentedRequest
+ (*ValidateRequest)(nil), // 90: vtadmin.ValidateRequest
+ (*ValidateKeyspaceRequest)(nil), // 91: vtadmin.ValidateKeyspaceRequest
+ (*ValidateSchemaKeyspaceRequest)(nil), // 92: vtadmin.ValidateSchemaKeyspaceRequest
+ (*ValidateShardRequest)(nil), // 93: vtadmin.ValidateShardRequest
+ (*ValidateVersionKeyspaceRequest)(nil), // 94: vtadmin.ValidateVersionKeyspaceRequest
+ (*ValidateVersionShardRequest)(nil), // 95: vtadmin.ValidateVersionShardRequest
+ (*VTExplainRequest)(nil), // 96: vtadmin.VTExplainRequest
+ (*VTExplainResponse)(nil), // 97: vtadmin.VTExplainResponse
+ nil, // 98: vtadmin.ClusterCellsAliases.AliasesEntry
+ nil, // 99: vtadmin.Keyspace.ShardsEntry
+ nil, // 100: vtadmin.Schema.TableSizesEntry
+ (*Schema_ShardTableSize)(nil), // 101: vtadmin.Schema.ShardTableSize
+ (*Schema_TableSize)(nil), // 102: vtadmin.Schema.TableSize
+ nil, // 103: vtadmin.Schema.TableSize.ByShardEntry
+ nil, // 104: vtadmin.GetWorkflowsResponse.WorkflowsByClusterEntry
+ (*ReloadSchemasResponse_KeyspaceResult)(nil), // 105: vtadmin.ReloadSchemasResponse.KeyspaceResult
+ (*ReloadSchemasResponse_ShardResult)(nil), // 106: vtadmin.ReloadSchemasResponse.ShardResult
+ (*ReloadSchemasResponse_TabletResult)(nil), // 107: vtadmin.ReloadSchemasResponse.TabletResult
+ (*mysqlctl.BackupInfo)(nil), // 108: mysqlctl.BackupInfo
+ (*topodata.CellInfo)(nil), // 109: topodata.CellInfo
+ (*vtctldata.ShardReplicationPositionsResponse)(nil), // 110: vtctldata.ShardReplicationPositionsResponse
+ (*vtctldata.Keyspace)(nil), // 111: vtctldata.Keyspace
+ (*tabletmanagerdata.TableDefinition)(nil), // 112: tabletmanagerdata.TableDefinition
+ (*vtctldata.Shard)(nil), // 113: vtctldata.Shard
+ (*vschema.SrvVSchema)(nil), // 114: vschema.SrvVSchema
+ (*topodata.Tablet)(nil), // 115: topodata.Tablet
+ (*vschema.Keyspace)(nil), // 116: vschema.Keyspace
+ (*vtctldata.Workflow)(nil), // 117: vtctldata.Workflow
+ (*vtctldata.CreateKeyspaceRequest)(nil), // 118: vtctldata.CreateKeyspaceRequest
+ (*vtctldata.CreateShardRequest)(nil), // 119: vtctldata.CreateShardRequest
+ (*vtctldata.DeleteKeyspaceRequest)(nil), // 120: vtctldata.DeleteKeyspaceRequest
+ (*vtctldata.DeleteShardsRequest)(nil), // 121: vtctldata.DeleteShardsRequest
+ (*topodata.TabletAlias)(nil), // 122: topodata.TabletAlias
+ (*vtctldata.EmergencyReparentShardRequest)(nil), // 123: vtctldata.EmergencyReparentShardRequest
+ (*logutil.Event)(nil), // 124: logutil.Event
+ (*vtctldata.GetBackupsRequest)(nil), // 125: vtctldata.GetBackupsRequest
+ (*vtctldata.PlannedReparentShardRequest)(nil), // 126: vtctldata.PlannedReparentShardRequest
+ (*topodata.CellsAlias)(nil), // 127: topodata.CellsAlias
+ (*vtctldata.CreateShardResponse)(nil), // 128: vtctldata.CreateShardResponse
+ (*vtctldata.DeleteKeyspaceResponse)(nil), // 129: vtctldata.DeleteKeyspaceResponse
+ (*vtctldata.DeleteShardsResponse)(nil), // 130: vtctldata.DeleteShardsResponse
+ (*vtctldata.GetFullStatusResponse)(nil), // 131: vtctldata.GetFullStatusResponse
+ (*vtctldata.GetTopologyPathResponse)(nil), // 132: vtctldata.GetTopologyPathResponse
+ (*vtctldata.ValidateResponse)(nil), // 133: vtctldata.ValidateResponse
+ (*vtctldata.ValidateKeyspaceResponse)(nil), // 134: vtctldata.ValidateKeyspaceResponse
+ (*vtctldata.ValidateSchemaKeyspaceResponse)(nil), // 135: vtctldata.ValidateSchemaKeyspaceResponse
+ (*vtctldata.ValidateShardResponse)(nil), // 136: vtctldata.ValidateShardResponse
+ (*vtctldata.ValidateVersionKeyspaceResponse)(nil), // 137: vtctldata.ValidateVersionKeyspaceResponse
+ (*vtctldata.ValidateVersionShardResponse)(nil), // 138: vtctldata.ValidateVersionShardResponse
}
var file_vtadmin_proto_depIdxs = []int32{
1, // 0: vtadmin.ClusterBackup.cluster:type_name -> vtadmin.Cluster
- 103, // 1: vtadmin.ClusterBackup.backup:type_name -> mysqlctl.BackupInfo
+ 108, // 1: vtadmin.ClusterBackup.backup:type_name -> mysqlctl.BackupInfo
1, // 2: vtadmin.ClusterCellsAliases.cluster:type_name -> vtadmin.Cluster
- 93, // 3: vtadmin.ClusterCellsAliases.aliases:type_name -> vtadmin.ClusterCellsAliases.AliasesEntry
+ 98, // 3: vtadmin.ClusterCellsAliases.aliases:type_name -> vtadmin.ClusterCellsAliases.AliasesEntry
1, // 4: vtadmin.ClusterCellInfo.cluster:type_name -> vtadmin.Cluster
- 104, // 5: vtadmin.ClusterCellInfo.cell_info:type_name -> topodata.CellInfo
+ 109, // 5: vtadmin.ClusterCellInfo.cell_info:type_name -> topodata.CellInfo
1, // 6: vtadmin.ClusterShardReplicationPosition.cluster:type_name -> vtadmin.Cluster
- 105, // 7: vtadmin.ClusterShardReplicationPosition.position_info:type_name -> vtctldata.ShardReplicationPositionsResponse
+ 110, // 7: vtadmin.ClusterShardReplicationPosition.position_info:type_name -> vtctldata.ShardReplicationPositionsResponse
15, // 8: vtadmin.ClusterWorkflows.workflows:type_name -> vtadmin.Workflow
1, // 9: vtadmin.Keyspace.cluster:type_name -> vtadmin.Cluster
- 106, // 10: vtadmin.Keyspace.keyspace:type_name -> vtctldata.Keyspace
- 94, // 11: vtadmin.Keyspace.shards:type_name -> vtadmin.Keyspace.ShardsEntry
+ 111, // 10: vtadmin.Keyspace.keyspace:type_name -> vtctldata.Keyspace
+ 99, // 11: vtadmin.Keyspace.shards:type_name -> vtadmin.Keyspace.ShardsEntry
1, // 12: vtadmin.Schema.cluster:type_name -> vtadmin.Cluster
- 107, // 13: vtadmin.Schema.table_definitions:type_name -> tabletmanagerdata.TableDefinition
- 95, // 14: vtadmin.Schema.table_sizes:type_name -> vtadmin.Schema.TableSizesEntry
+ 112, // 13: vtadmin.Schema.table_definitions:type_name -> tabletmanagerdata.TableDefinition
+ 100, // 14: vtadmin.Schema.table_sizes:type_name -> vtadmin.Schema.TableSizesEntry
1, // 15: vtadmin.Shard.cluster:type_name -> vtadmin.Cluster
- 108, // 16: vtadmin.Shard.shard:type_name -> vtctldata.Shard
+ 113, // 16: vtadmin.Shard.shard:type_name -> vtctldata.Shard
1, // 17: vtadmin.SrvVSchema.cluster:type_name -> vtadmin.Cluster
- 109, // 18: vtadmin.SrvVSchema.srv_v_schema:type_name -> vschema.SrvVSchema
+ 114, // 18: vtadmin.SrvVSchema.srv_v_schema:type_name -> vschema.SrvVSchema
1, // 19: vtadmin.Tablet.cluster:type_name -> vtadmin.Cluster
- 110, // 20: vtadmin.Tablet.tablet:type_name -> topodata.Tablet
+ 115, // 20: vtadmin.Tablet.tablet:type_name -> topodata.Tablet
0, // 21: vtadmin.Tablet.state:type_name -> vtadmin.Tablet.ServingState
1, // 22: vtadmin.VSchema.cluster:type_name -> vtadmin.Cluster
- 111, // 23: vtadmin.VSchema.v_schema:type_name -> vschema.Keyspace
+ 116, // 23: vtadmin.VSchema.v_schema:type_name -> vschema.Keyspace
1, // 24: vtadmin.Vtctld.cluster:type_name -> vtadmin.Cluster
1, // 25: vtadmin.VTGate.cluster:type_name -> vtadmin.Cluster
1, // 26: vtadmin.Workflow.cluster:type_name -> vtadmin.Cluster
- 112, // 27: vtadmin.Workflow.workflow:type_name -> vtctldata.Workflow
- 113, // 28: vtadmin.CreateKeyspaceRequest.options:type_name -> vtctldata.CreateKeyspaceRequest
+ 117, // 27: vtadmin.Workflow.workflow:type_name -> vtctldata.Workflow
+ 118, // 28: vtadmin.CreateKeyspaceRequest.options:type_name -> vtctldata.CreateKeyspaceRequest
7, // 29: vtadmin.CreateKeyspaceResponse.keyspace:type_name -> vtadmin.Keyspace
- 114, // 30: vtadmin.CreateShardRequest.options:type_name -> vtctldata.CreateShardRequest
- 115, // 31: vtadmin.DeleteKeyspaceRequest.options:type_name -> vtctldata.DeleteKeyspaceRequest
- 116, // 32: vtadmin.DeleteShardsRequest.options:type_name -> vtctldata.DeleteShardsRequest
- 117, // 33: vtadmin.DeleteTabletRequest.alias:type_name -> topodata.TabletAlias
+ 119, // 30: vtadmin.CreateShardRequest.options:type_name -> vtctldata.CreateShardRequest
+ 120, // 31: vtadmin.DeleteKeyspaceRequest.options:type_name -> vtctldata.DeleteKeyspaceRequest
+ 121, // 32: vtadmin.DeleteShardsRequest.options:type_name -> vtctldata.DeleteShardsRequest
+ 122, // 33: vtadmin.DeleteTabletRequest.alias:type_name -> topodata.TabletAlias
1, // 34: vtadmin.DeleteTabletResponse.cluster:type_name -> vtadmin.Cluster
- 118, // 35: vtadmin.EmergencyFailoverShardRequest.options:type_name -> vtctldata.EmergencyReparentShardRequest
+ 123, // 35: vtadmin.EmergencyFailoverShardRequest.options:type_name -> vtctldata.EmergencyReparentShardRequest
1, // 36: vtadmin.EmergencyFailoverShardResponse.cluster:type_name -> vtadmin.Cluster
- 117, // 37: vtadmin.EmergencyFailoverShardResponse.promoted_primary:type_name -> topodata.TabletAlias
- 119, // 38: vtadmin.EmergencyFailoverShardResponse.events:type_name -> logutil.Event
- 47, // 39: vtadmin.FindSchemaRequest.table_size_options:type_name -> vtadmin.GetSchemaTableSizeOptions
- 120, // 40: vtadmin.GetBackupsRequest.request_options:type_name -> vtctldata.GetBackupsRequest
+ 122, // 37: vtadmin.EmergencyFailoverShardResponse.promoted_primary:type_name -> topodata.TabletAlias
+ 124, // 38: vtadmin.EmergencyFailoverShardResponse.events:type_name -> logutil.Event
+ 48, // 39: vtadmin.FindSchemaRequest.table_size_options:type_name -> vtadmin.GetSchemaTableSizeOptions
+ 125, // 40: vtadmin.GetBackupsRequest.request_options:type_name -> vtctldata.GetBackupsRequest
2, // 41: vtadmin.GetBackupsResponse.backups:type_name -> vtadmin.ClusterBackup
4, // 42: vtadmin.GetCellInfosResponse.cell_infos:type_name -> vtadmin.ClusterCellInfo
3, // 43: vtadmin.GetCellsAliasesResponse.aliases:type_name -> vtadmin.ClusterCellsAliases
1, // 44: vtadmin.GetClustersResponse.clusters:type_name -> vtadmin.Cluster
- 14, // 45: vtadmin.GetGatesResponse.gates:type_name -> vtadmin.VTGate
- 7, // 46: vtadmin.GetKeyspacesResponse.keyspaces:type_name -> vtadmin.Keyspace
- 47, // 47: vtadmin.GetSchemaRequest.table_size_options:type_name -> vtadmin.GetSchemaTableSizeOptions
- 47, // 48: vtadmin.GetSchemasRequest.table_size_options:type_name -> vtadmin.GetSchemaTableSizeOptions
- 8, // 49: vtadmin.GetSchemasResponse.schemas:type_name -> vtadmin.Schema
- 5, // 50: vtadmin.GetShardReplicationPositionsResponse.replication_positions:type_name -> vtadmin.ClusterShardReplicationPosition
- 10, // 51: vtadmin.GetSrvVSchemasResponse.srv_v_schemas:type_name -> vtadmin.SrvVSchema
- 117, // 52: vtadmin.GetTabletRequest.alias:type_name -> topodata.TabletAlias
- 11, // 53: vtadmin.GetTabletsResponse.tablets:type_name -> vtadmin.Tablet
- 12, // 54: vtadmin.GetVSchemasResponse.v_schemas:type_name -> vtadmin.VSchema
- 13, // 55: vtadmin.GetVtctldsResponse.vtctlds:type_name -> vtadmin.Vtctld
- 99, // 56: vtadmin.GetWorkflowsResponse.workflows_by_cluster:type_name -> vtadmin.GetWorkflowsResponse.WorkflowsByClusterEntry
- 117, // 57: vtadmin.PingTabletRequest.alias:type_name -> topodata.TabletAlias
- 1, // 58: vtadmin.PingTabletResponse.cluster:type_name -> vtadmin.Cluster
- 121, // 59: vtadmin.PlannedFailoverShardRequest.options:type_name -> vtctldata.PlannedReparentShardRequest
- 1, // 60: vtadmin.PlannedFailoverShardResponse.cluster:type_name -> vtadmin.Cluster
- 117, // 61: vtadmin.PlannedFailoverShardResponse.promoted_primary:type_name -> topodata.TabletAlias
- 119, // 62: vtadmin.PlannedFailoverShardResponse.events:type_name -> logutil.Event
- 117, // 63: vtadmin.RefreshStateRequest.alias:type_name -> topodata.TabletAlias
- 1, // 64: vtadmin.RefreshStateResponse.cluster:type_name -> vtadmin.Cluster
- 117, // 65: vtadmin.ReloadSchemasRequest.tablets:type_name -> topodata.TabletAlias
- 100, // 66: vtadmin.ReloadSchemasResponse.keyspace_results:type_name -> vtadmin.ReloadSchemasResponse.KeyspaceResult
- 101, // 67: vtadmin.ReloadSchemasResponse.shard_results:type_name -> vtadmin.ReloadSchemasResponse.ShardResult
- 102, // 68: vtadmin.ReloadSchemasResponse.tablet_results:type_name -> vtadmin.ReloadSchemasResponse.TabletResult
- 119, // 69: vtadmin.ReloadSchemaShardResponse.events:type_name -> logutil.Event
- 117, // 70: vtadmin.RefreshTabletReplicationSourceRequest.alias:type_name -> topodata.TabletAlias
- 117, // 71: vtadmin.RefreshTabletReplicationSourceResponse.primary:type_name -> topodata.TabletAlias
- 1, // 72: vtadmin.RefreshTabletReplicationSourceResponse.cluster:type_name -> vtadmin.Cluster
- 117, // 73: vtadmin.RunHealthCheckRequest.alias:type_name -> topodata.TabletAlias
- 1, // 74: vtadmin.RunHealthCheckResponse.cluster:type_name -> vtadmin.Cluster
- 117, // 75: vtadmin.SetReadOnlyRequest.alias:type_name -> topodata.TabletAlias
- 117, // 76: vtadmin.SetReadWriteRequest.alias:type_name -> topodata.TabletAlias
- 117, // 77: vtadmin.StartReplicationRequest.alias:type_name -> topodata.TabletAlias
- 1, // 78: vtadmin.StartReplicationResponse.cluster:type_name -> vtadmin.Cluster
- 117, // 79: vtadmin.StopReplicationRequest.alias:type_name -> topodata.TabletAlias
- 1, // 80: vtadmin.StopReplicationResponse.cluster:type_name -> vtadmin.Cluster
- 117, // 81: vtadmin.TabletExternallyPromotedRequest.alias:type_name -> topodata.TabletAlias
- 1, // 82: vtadmin.TabletExternallyPromotedResponse.cluster:type_name -> vtadmin.Cluster
- 117, // 83: vtadmin.TabletExternallyPromotedResponse.new_primary:type_name -> topodata.TabletAlias
- 117, // 84: vtadmin.TabletExternallyPromotedResponse.old_primary:type_name -> topodata.TabletAlias
- 117, // 85: vtadmin.TabletExternallyReparentedRequest.alias:type_name -> topodata.TabletAlias
- 122, // 86: vtadmin.ClusterCellsAliases.AliasesEntry.value:type_name -> topodata.CellsAlias
- 108, // 87: vtadmin.Keyspace.ShardsEntry.value:type_name -> vtctldata.Shard
- 97, // 88: vtadmin.Schema.TableSizesEntry.value:type_name -> vtadmin.Schema.TableSize
- 98, // 89: vtadmin.Schema.TableSize.by_shard:type_name -> vtadmin.Schema.TableSize.ByShardEntry
- 96, // 90: vtadmin.Schema.TableSize.ByShardEntry.value:type_name -> vtadmin.Schema.ShardTableSize
- 6, // 91: vtadmin.GetWorkflowsResponse.WorkflowsByClusterEntry.value:type_name -> vtadmin.ClusterWorkflows
- 7, // 92: vtadmin.ReloadSchemasResponse.KeyspaceResult.keyspace:type_name -> vtadmin.Keyspace
- 119, // 93: vtadmin.ReloadSchemasResponse.KeyspaceResult.events:type_name -> logutil.Event
- 9, // 94: vtadmin.ReloadSchemasResponse.ShardResult.shard:type_name -> vtadmin.Shard
- 119, // 95: vtadmin.ReloadSchemasResponse.ShardResult.events:type_name -> logutil.Event
- 11, // 96: vtadmin.ReloadSchemasResponse.TabletResult.tablet:type_name -> vtadmin.Tablet
- 16, // 97: vtadmin.VTAdmin.CreateKeyspace:input_type -> vtadmin.CreateKeyspaceRequest
- 18, // 98: vtadmin.VTAdmin.CreateShard:input_type -> vtadmin.CreateShardRequest
- 19, // 99: vtadmin.VTAdmin.DeleteKeyspace:input_type -> vtadmin.DeleteKeyspaceRequest
- 20, // 100: vtadmin.VTAdmin.DeleteShards:input_type -> vtadmin.DeleteShardsRequest
- 21, // 101: vtadmin.VTAdmin.DeleteTablet:input_type -> vtadmin.DeleteTabletRequest
- 23, // 102: vtadmin.VTAdmin.EmergencyFailoverShard:input_type -> vtadmin.EmergencyFailoverShardRequest
- 25, // 103: vtadmin.VTAdmin.FindSchema:input_type -> vtadmin.FindSchemaRequest
- 26, // 104: vtadmin.VTAdmin.GetBackups:input_type -> vtadmin.GetBackupsRequest
- 28, // 105: vtadmin.VTAdmin.GetCellInfos:input_type -> vtadmin.GetCellInfosRequest
- 30, // 106: vtadmin.VTAdmin.GetCellsAliases:input_type -> vtadmin.GetCellsAliasesRequest
- 32, // 107: vtadmin.VTAdmin.GetClusters:input_type -> vtadmin.GetClustersRequest
- 34, // 108: vtadmin.VTAdmin.GetGates:input_type -> vtadmin.GetGatesRequest
- 36, // 109: vtadmin.VTAdmin.GetKeyspace:input_type -> vtadmin.GetKeyspaceRequest
- 37, // 110: vtadmin.VTAdmin.GetKeyspaces:input_type -> vtadmin.GetKeyspacesRequest
- 39, // 111: vtadmin.VTAdmin.GetSchema:input_type -> vtadmin.GetSchemaRequest
- 40, // 112: vtadmin.VTAdmin.GetSchemas:input_type -> vtadmin.GetSchemasRequest
- 42, // 113: vtadmin.VTAdmin.GetShardReplicationPositions:input_type -> vtadmin.GetShardReplicationPositionsRequest
- 44, // 114: vtadmin.VTAdmin.GetSrvVSchema:input_type -> vtadmin.GetSrvVSchemaRequest
- 45, // 115: vtadmin.VTAdmin.GetSrvVSchemas:input_type -> vtadmin.GetSrvVSchemasRequest
- 48, // 116: vtadmin.VTAdmin.GetTablet:input_type -> vtadmin.GetTabletRequest
- 49, // 117: vtadmin.VTAdmin.GetTablets:input_type -> vtadmin.GetTabletsRequest
- 51, // 118: vtadmin.VTAdmin.GetVSchema:input_type -> vtadmin.GetVSchemaRequest
- 52, // 119: vtadmin.VTAdmin.GetVSchemas:input_type -> vtadmin.GetVSchemasRequest
- 54, // 120: vtadmin.VTAdmin.GetVtctlds:input_type -> vtadmin.GetVtctldsRequest
- 56, // 121: vtadmin.VTAdmin.GetWorkflow:input_type -> vtadmin.GetWorkflowRequest
- 57, // 122: vtadmin.VTAdmin.GetWorkflows:input_type -> vtadmin.GetWorkflowsRequest
- 59, // 123: vtadmin.VTAdmin.PingTablet:input_type -> vtadmin.PingTabletRequest
- 61, // 124: vtadmin.VTAdmin.PlannedFailoverShard:input_type -> vtadmin.PlannedFailoverShardRequest
- 63, // 125: vtadmin.VTAdmin.RebuildKeyspaceGraph:input_type -> vtadmin.RebuildKeyspaceGraphRequest
- 65, // 126: vtadmin.VTAdmin.RefreshState:input_type -> vtadmin.RefreshStateRequest
- 71, // 127: vtadmin.VTAdmin.RefreshTabletReplicationSource:input_type -> vtadmin.RefreshTabletReplicationSourceRequest
- 67, // 128: vtadmin.VTAdmin.ReloadSchemas:input_type -> vtadmin.ReloadSchemasRequest
- 69, // 129: vtadmin.VTAdmin.ReloadSchemaShard:input_type -> vtadmin.ReloadSchemaShardRequest
- 73, // 130: vtadmin.VTAdmin.RemoveKeyspaceCell:input_type -> vtadmin.RemoveKeyspaceCellRequest
- 75, // 131: vtadmin.VTAdmin.RunHealthCheck:input_type -> vtadmin.RunHealthCheckRequest
- 77, // 132: vtadmin.VTAdmin.SetReadOnly:input_type -> vtadmin.SetReadOnlyRequest
- 79, // 133: vtadmin.VTAdmin.SetReadWrite:input_type -> vtadmin.SetReadWriteRequest
- 81, // 134: vtadmin.VTAdmin.StartReplication:input_type -> vtadmin.StartReplicationRequest
- 83, // 135: vtadmin.VTAdmin.StopReplication:input_type -> vtadmin.StopReplicationRequest
- 85, // 136: vtadmin.VTAdmin.TabletExternallyPromoted:input_type -> vtadmin.TabletExternallyPromotedRequest
- 88, // 137: vtadmin.VTAdmin.ValidateKeyspace:input_type -> vtadmin.ValidateKeyspaceRequest
- 89, // 138: vtadmin.VTAdmin.ValidateSchemaKeyspace:input_type -> vtadmin.ValidateSchemaKeyspaceRequest
- 90, // 139: vtadmin.VTAdmin.ValidateVersionKeyspace:input_type -> vtadmin.ValidateVersionKeyspaceRequest
- 91, // 140: vtadmin.VTAdmin.VTExplain:input_type -> vtadmin.VTExplainRequest
- 17, // 141: vtadmin.VTAdmin.CreateKeyspace:output_type -> vtadmin.CreateKeyspaceResponse
- 123, // 142: vtadmin.VTAdmin.CreateShard:output_type -> vtctldata.CreateShardResponse
- 124, // 143: vtadmin.VTAdmin.DeleteKeyspace:output_type -> vtctldata.DeleteKeyspaceResponse
- 125, // 144: vtadmin.VTAdmin.DeleteShards:output_type -> vtctldata.DeleteShardsResponse
- 22, // 145: vtadmin.VTAdmin.DeleteTablet:output_type -> vtadmin.DeleteTabletResponse
- 24, // 146: vtadmin.VTAdmin.EmergencyFailoverShard:output_type -> vtadmin.EmergencyFailoverShardResponse
- 8, // 147: vtadmin.VTAdmin.FindSchema:output_type -> vtadmin.Schema
- 27, // 148: vtadmin.VTAdmin.GetBackups:output_type -> vtadmin.GetBackupsResponse
- 29, // 149: vtadmin.VTAdmin.GetCellInfos:output_type -> vtadmin.GetCellInfosResponse
- 31, // 150: vtadmin.VTAdmin.GetCellsAliases:output_type -> vtadmin.GetCellsAliasesResponse
- 33, // 151: vtadmin.VTAdmin.GetClusters:output_type -> vtadmin.GetClustersResponse
- 35, // 152: vtadmin.VTAdmin.GetGates:output_type -> vtadmin.GetGatesResponse
- 7, // 153: vtadmin.VTAdmin.GetKeyspace:output_type -> vtadmin.Keyspace
- 38, // 154: vtadmin.VTAdmin.GetKeyspaces:output_type -> vtadmin.GetKeyspacesResponse
- 8, // 155: vtadmin.VTAdmin.GetSchema:output_type -> vtadmin.Schema
- 41, // 156: vtadmin.VTAdmin.GetSchemas:output_type -> vtadmin.GetSchemasResponse
- 43, // 157: vtadmin.VTAdmin.GetShardReplicationPositions:output_type -> vtadmin.GetShardReplicationPositionsResponse
- 10, // 158: vtadmin.VTAdmin.GetSrvVSchema:output_type -> vtadmin.SrvVSchema
- 46, // 159: vtadmin.VTAdmin.GetSrvVSchemas:output_type -> vtadmin.GetSrvVSchemasResponse
- 11, // 160: vtadmin.VTAdmin.GetTablet:output_type -> vtadmin.Tablet
- 50, // 161: vtadmin.VTAdmin.GetTablets:output_type -> vtadmin.GetTabletsResponse
- 12, // 162: vtadmin.VTAdmin.GetVSchema:output_type -> vtadmin.VSchema
- 53, // 163: vtadmin.VTAdmin.GetVSchemas:output_type -> vtadmin.GetVSchemasResponse
- 55, // 164: vtadmin.VTAdmin.GetVtctlds:output_type -> vtadmin.GetVtctldsResponse
- 15, // 165: vtadmin.VTAdmin.GetWorkflow:output_type -> vtadmin.Workflow
- 58, // 166: vtadmin.VTAdmin.GetWorkflows:output_type -> vtadmin.GetWorkflowsResponse
- 60, // 167: vtadmin.VTAdmin.PingTablet:output_type -> vtadmin.PingTabletResponse
- 62, // 168: vtadmin.VTAdmin.PlannedFailoverShard:output_type -> vtadmin.PlannedFailoverShardResponse
- 64, // 169: vtadmin.VTAdmin.RebuildKeyspaceGraph:output_type -> vtadmin.RebuildKeyspaceGraphResponse
- 66, // 170: vtadmin.VTAdmin.RefreshState:output_type -> vtadmin.RefreshStateResponse
- 72, // 171: vtadmin.VTAdmin.RefreshTabletReplicationSource:output_type -> vtadmin.RefreshTabletReplicationSourceResponse
- 68, // 172: vtadmin.VTAdmin.ReloadSchemas:output_type -> vtadmin.ReloadSchemasResponse
- 70, // 173: vtadmin.VTAdmin.ReloadSchemaShard:output_type -> vtadmin.ReloadSchemaShardResponse
- 74, // 174: vtadmin.VTAdmin.RemoveKeyspaceCell:output_type -> vtadmin.RemoveKeyspaceCellResponse
- 76, // 175: vtadmin.VTAdmin.RunHealthCheck:output_type -> vtadmin.RunHealthCheckResponse
- 78, // 176: vtadmin.VTAdmin.SetReadOnly:output_type -> vtadmin.SetReadOnlyResponse
- 80, // 177: vtadmin.VTAdmin.SetReadWrite:output_type -> vtadmin.SetReadWriteResponse
- 82, // 178: vtadmin.VTAdmin.StartReplication:output_type -> vtadmin.StartReplicationResponse
- 84, // 179: vtadmin.VTAdmin.StopReplication:output_type -> vtadmin.StopReplicationResponse
- 86, // 180: vtadmin.VTAdmin.TabletExternallyPromoted:output_type -> vtadmin.TabletExternallyPromotedResponse
- 126, // 181: vtadmin.VTAdmin.ValidateKeyspace:output_type -> vtctldata.ValidateKeyspaceResponse
- 127, // 182: vtadmin.VTAdmin.ValidateSchemaKeyspace:output_type -> vtctldata.ValidateSchemaKeyspaceResponse
- 128, // 183: vtadmin.VTAdmin.ValidateVersionKeyspace:output_type -> vtctldata.ValidateVersionKeyspaceResponse
- 92, // 184: vtadmin.VTAdmin.VTExplain:output_type -> vtadmin.VTExplainResponse
- 141, // [141:185] is the sub-list for method output_type
- 97, // [97:141] is the sub-list for method input_type
- 97, // [97:97] is the sub-list for extension type_name
- 97, // [97:97] is the sub-list for extension extendee
- 0, // [0:97] is the sub-list for field type_name
+ 122, // 45: vtadmin.GetFullStatusRequest.alias:type_name -> topodata.TabletAlias
+ 14, // 46: vtadmin.GetGatesResponse.gates:type_name -> vtadmin.VTGate
+ 7, // 47: vtadmin.GetKeyspacesResponse.keyspaces:type_name -> vtadmin.Keyspace
+ 48, // 48: vtadmin.GetSchemaRequest.table_size_options:type_name -> vtadmin.GetSchemaTableSizeOptions
+ 48, // 49: vtadmin.GetSchemasRequest.table_size_options:type_name -> vtadmin.GetSchemaTableSizeOptions
+ 8, // 50: vtadmin.GetSchemasResponse.schemas:type_name -> vtadmin.Schema
+ 5, // 51: vtadmin.GetShardReplicationPositionsResponse.replication_positions:type_name -> vtadmin.ClusterShardReplicationPosition
+ 10, // 52: vtadmin.GetSrvVSchemasResponse.srv_v_schemas:type_name -> vtadmin.SrvVSchema
+ 122, // 53: vtadmin.GetTabletRequest.alias:type_name -> topodata.TabletAlias
+ 11, // 54: vtadmin.GetTabletsResponse.tablets:type_name -> vtadmin.Tablet
+ 12, // 55: vtadmin.GetVSchemasResponse.v_schemas:type_name -> vtadmin.VSchema
+ 13, // 56: vtadmin.GetVtctldsResponse.vtctlds:type_name -> vtadmin.Vtctld
+ 104, // 57: vtadmin.GetWorkflowsResponse.workflows_by_cluster:type_name -> vtadmin.GetWorkflowsResponse.WorkflowsByClusterEntry
+ 122, // 58: vtadmin.PingTabletRequest.alias:type_name -> topodata.TabletAlias
+ 1, // 59: vtadmin.PingTabletResponse.cluster:type_name -> vtadmin.Cluster
+ 126, // 60: vtadmin.PlannedFailoverShardRequest.options:type_name -> vtctldata.PlannedReparentShardRequest
+ 1, // 61: vtadmin.PlannedFailoverShardResponse.cluster:type_name -> vtadmin.Cluster
+ 122, // 62: vtadmin.PlannedFailoverShardResponse.promoted_primary:type_name -> topodata.TabletAlias
+ 124, // 63: vtadmin.PlannedFailoverShardResponse.events:type_name -> logutil.Event
+ 122, // 64: vtadmin.RefreshStateRequest.alias:type_name -> topodata.TabletAlias
+ 1, // 65: vtadmin.RefreshStateResponse.cluster:type_name -> vtadmin.Cluster
+ 122, // 66: vtadmin.ReloadSchemasRequest.tablets:type_name -> topodata.TabletAlias
+ 105, // 67: vtadmin.ReloadSchemasResponse.keyspace_results:type_name -> vtadmin.ReloadSchemasResponse.KeyspaceResult
+ 106, // 68: vtadmin.ReloadSchemasResponse.shard_results:type_name -> vtadmin.ReloadSchemasResponse.ShardResult
+ 107, // 69: vtadmin.ReloadSchemasResponse.tablet_results:type_name -> vtadmin.ReloadSchemasResponse.TabletResult
+ 124, // 70: vtadmin.ReloadSchemaShardResponse.events:type_name -> logutil.Event
+ 122, // 71: vtadmin.RefreshTabletReplicationSourceRequest.alias:type_name -> topodata.TabletAlias
+ 122, // 72: vtadmin.RefreshTabletReplicationSourceResponse.primary:type_name -> topodata.TabletAlias
+ 1, // 73: vtadmin.RefreshTabletReplicationSourceResponse.cluster:type_name -> vtadmin.Cluster
+ 122, // 74: vtadmin.RunHealthCheckRequest.alias:type_name -> topodata.TabletAlias
+ 1, // 75: vtadmin.RunHealthCheckResponse.cluster:type_name -> vtadmin.Cluster
+ 122, // 76: vtadmin.SetReadOnlyRequest.alias:type_name -> topodata.TabletAlias
+ 122, // 77: vtadmin.SetReadWriteRequest.alias:type_name -> topodata.TabletAlias
+ 122, // 78: vtadmin.StartReplicationRequest.alias:type_name -> topodata.TabletAlias
+ 1, // 79: vtadmin.StartReplicationResponse.cluster:type_name -> vtadmin.Cluster
+ 122, // 80: vtadmin.StopReplicationRequest.alias:type_name -> topodata.TabletAlias
+ 1, // 81: vtadmin.StopReplicationResponse.cluster:type_name -> vtadmin.Cluster
+ 122, // 82: vtadmin.TabletExternallyPromotedRequest.alias:type_name -> topodata.TabletAlias
+ 1, // 83: vtadmin.TabletExternallyPromotedResponse.cluster:type_name -> vtadmin.Cluster
+ 122, // 84: vtadmin.TabletExternallyPromotedResponse.new_primary:type_name -> topodata.TabletAlias
+ 122, // 85: vtadmin.TabletExternallyPromotedResponse.old_primary:type_name -> topodata.TabletAlias
+ 122, // 86: vtadmin.TabletExternallyReparentedRequest.alias:type_name -> topodata.TabletAlias
+ 127, // 87: vtadmin.ClusterCellsAliases.AliasesEntry.value:type_name -> topodata.CellsAlias
+ 113, // 88: vtadmin.Keyspace.ShardsEntry.value:type_name -> vtctldata.Shard
+ 102, // 89: vtadmin.Schema.TableSizesEntry.value:type_name -> vtadmin.Schema.TableSize
+ 103, // 90: vtadmin.Schema.TableSize.by_shard:type_name -> vtadmin.Schema.TableSize.ByShardEntry
+ 101, // 91: vtadmin.Schema.TableSize.ByShardEntry.value:type_name -> vtadmin.Schema.ShardTableSize
+ 6, // 92: vtadmin.GetWorkflowsResponse.WorkflowsByClusterEntry.value:type_name -> vtadmin.ClusterWorkflows
+ 7, // 93: vtadmin.ReloadSchemasResponse.KeyspaceResult.keyspace:type_name -> vtadmin.Keyspace
+ 124, // 94: vtadmin.ReloadSchemasResponse.KeyspaceResult.events:type_name -> logutil.Event
+ 9, // 95: vtadmin.ReloadSchemasResponse.ShardResult.shard:type_name -> vtadmin.Shard
+ 124, // 96: vtadmin.ReloadSchemasResponse.ShardResult.events:type_name -> logutil.Event
+ 11, // 97: vtadmin.ReloadSchemasResponse.TabletResult.tablet:type_name -> vtadmin.Tablet
+ 16, // 98: vtadmin.VTAdmin.CreateKeyspace:input_type -> vtadmin.CreateKeyspaceRequest
+ 18, // 99: vtadmin.VTAdmin.CreateShard:input_type -> vtadmin.CreateShardRequest
+ 19, // 100: vtadmin.VTAdmin.DeleteKeyspace:input_type -> vtadmin.DeleteKeyspaceRequest
+ 20, // 101: vtadmin.VTAdmin.DeleteShards:input_type -> vtadmin.DeleteShardsRequest
+ 21, // 102: vtadmin.VTAdmin.DeleteTablet:input_type -> vtadmin.DeleteTabletRequest
+ 23, // 103: vtadmin.VTAdmin.EmergencyFailoverShard:input_type -> vtadmin.EmergencyFailoverShardRequest
+ 25, // 104: vtadmin.VTAdmin.FindSchema:input_type -> vtadmin.FindSchemaRequest
+ 26, // 105: vtadmin.VTAdmin.GetBackups:input_type -> vtadmin.GetBackupsRequest
+ 28, // 106: vtadmin.VTAdmin.GetCellInfos:input_type -> vtadmin.GetCellInfosRequest
+ 30, // 107: vtadmin.VTAdmin.GetCellsAliases:input_type -> vtadmin.GetCellsAliasesRequest
+ 32, // 108: vtadmin.VTAdmin.GetClusters:input_type -> vtadmin.GetClustersRequest
+ 34, // 109: vtadmin.VTAdmin.GetFullStatus:input_type -> vtadmin.GetFullStatusRequest
+ 35, // 110: vtadmin.VTAdmin.GetGates:input_type -> vtadmin.GetGatesRequest
+ 37, // 111: vtadmin.VTAdmin.GetKeyspace:input_type -> vtadmin.GetKeyspaceRequest
+ 38, // 112: vtadmin.VTAdmin.GetKeyspaces:input_type -> vtadmin.GetKeyspacesRequest
+ 40, // 113: vtadmin.VTAdmin.GetSchema:input_type -> vtadmin.GetSchemaRequest
+ 41, // 114: vtadmin.VTAdmin.GetSchemas:input_type -> vtadmin.GetSchemasRequest
+ 43, // 115: vtadmin.VTAdmin.GetShardReplicationPositions:input_type -> vtadmin.GetShardReplicationPositionsRequest
+ 45, // 116: vtadmin.VTAdmin.GetSrvVSchema:input_type -> vtadmin.GetSrvVSchemaRequest
+ 46, // 117: vtadmin.VTAdmin.GetSrvVSchemas:input_type -> vtadmin.GetSrvVSchemasRequest
+ 49, // 118: vtadmin.VTAdmin.GetTablet:input_type -> vtadmin.GetTabletRequest
+ 50, // 119: vtadmin.VTAdmin.GetTablets:input_type -> vtadmin.GetTabletsRequest
+ 52, // 120: vtadmin.VTAdmin.GetTopologyPath:input_type -> vtadmin.GetTopologyPathRequest
+ 53, // 121: vtadmin.VTAdmin.GetVSchema:input_type -> vtadmin.GetVSchemaRequest
+ 54, // 122: vtadmin.VTAdmin.GetVSchemas:input_type -> vtadmin.GetVSchemasRequest
+ 56, // 123: vtadmin.VTAdmin.GetVtctlds:input_type -> vtadmin.GetVtctldsRequest
+ 58, // 124: vtadmin.VTAdmin.GetWorkflow:input_type -> vtadmin.GetWorkflowRequest
+ 59, // 125: vtadmin.VTAdmin.GetWorkflows:input_type -> vtadmin.GetWorkflowsRequest
+ 61, // 126: vtadmin.VTAdmin.PingTablet:input_type -> vtadmin.PingTabletRequest
+ 63, // 127: vtadmin.VTAdmin.PlannedFailoverShard:input_type -> vtadmin.PlannedFailoverShardRequest
+ 65, // 128: vtadmin.VTAdmin.RebuildKeyspaceGraph:input_type -> vtadmin.RebuildKeyspaceGraphRequest
+ 67, // 129: vtadmin.VTAdmin.RefreshState:input_type -> vtadmin.RefreshStateRequest
+ 73, // 130: vtadmin.VTAdmin.RefreshTabletReplicationSource:input_type -> vtadmin.RefreshTabletReplicationSourceRequest
+ 69, // 131: vtadmin.VTAdmin.ReloadSchemas:input_type -> vtadmin.ReloadSchemasRequest
+ 71, // 132: vtadmin.VTAdmin.ReloadSchemaShard:input_type -> vtadmin.ReloadSchemaShardRequest
+ 75, // 133: vtadmin.VTAdmin.RemoveKeyspaceCell:input_type -> vtadmin.RemoveKeyspaceCellRequest
+ 77, // 134: vtadmin.VTAdmin.RunHealthCheck:input_type -> vtadmin.RunHealthCheckRequest
+ 79, // 135: vtadmin.VTAdmin.SetReadOnly:input_type -> vtadmin.SetReadOnlyRequest
+ 81, // 136: vtadmin.VTAdmin.SetReadWrite:input_type -> vtadmin.SetReadWriteRequest
+ 83, // 137: vtadmin.VTAdmin.StartReplication:input_type -> vtadmin.StartReplicationRequest
+ 85, // 138: vtadmin.VTAdmin.StopReplication:input_type -> vtadmin.StopReplicationRequest
+ 87, // 139: vtadmin.VTAdmin.TabletExternallyPromoted:input_type -> vtadmin.TabletExternallyPromotedRequest
+ 90, // 140: vtadmin.VTAdmin.Validate:input_type -> vtadmin.ValidateRequest
+ 91, // 141: vtadmin.VTAdmin.ValidateKeyspace:input_type -> vtadmin.ValidateKeyspaceRequest
+ 92, // 142: vtadmin.VTAdmin.ValidateSchemaKeyspace:input_type -> vtadmin.ValidateSchemaKeyspaceRequest
+ 93, // 143: vtadmin.VTAdmin.ValidateShard:input_type -> vtadmin.ValidateShardRequest
+ 94, // 144: vtadmin.VTAdmin.ValidateVersionKeyspace:input_type -> vtadmin.ValidateVersionKeyspaceRequest
+ 95, // 145: vtadmin.VTAdmin.ValidateVersionShard:input_type -> vtadmin.ValidateVersionShardRequest
+ 96, // 146: vtadmin.VTAdmin.VTExplain:input_type -> vtadmin.VTExplainRequest
+ 17, // 147: vtadmin.VTAdmin.CreateKeyspace:output_type -> vtadmin.CreateKeyspaceResponse
+ 128, // 148: vtadmin.VTAdmin.CreateShard:output_type -> vtctldata.CreateShardResponse
+ 129, // 149: vtadmin.VTAdmin.DeleteKeyspace:output_type -> vtctldata.DeleteKeyspaceResponse
+ 130, // 150: vtadmin.VTAdmin.DeleteShards:output_type -> vtctldata.DeleteShardsResponse
+ 22, // 151: vtadmin.VTAdmin.DeleteTablet:output_type -> vtadmin.DeleteTabletResponse
+ 24, // 152: vtadmin.VTAdmin.EmergencyFailoverShard:output_type -> vtadmin.EmergencyFailoverShardResponse
+ 8, // 153: vtadmin.VTAdmin.FindSchema:output_type -> vtadmin.Schema
+ 27, // 154: vtadmin.VTAdmin.GetBackups:output_type -> vtadmin.GetBackupsResponse
+ 29, // 155: vtadmin.VTAdmin.GetCellInfos:output_type -> vtadmin.GetCellInfosResponse
+ 31, // 156: vtadmin.VTAdmin.GetCellsAliases:output_type -> vtadmin.GetCellsAliasesResponse
+ 33, // 157: vtadmin.VTAdmin.GetClusters:output_type -> vtadmin.GetClustersResponse
+ 131, // 158: vtadmin.VTAdmin.GetFullStatus:output_type -> vtctldata.GetFullStatusResponse
+ 36, // 159: vtadmin.VTAdmin.GetGates:output_type -> vtadmin.GetGatesResponse
+ 7, // 160: vtadmin.VTAdmin.GetKeyspace:output_type -> vtadmin.Keyspace
+ 39, // 161: vtadmin.VTAdmin.GetKeyspaces:output_type -> vtadmin.GetKeyspacesResponse
+ 8, // 162: vtadmin.VTAdmin.GetSchema:output_type -> vtadmin.Schema
+ 42, // 163: vtadmin.VTAdmin.GetSchemas:output_type -> vtadmin.GetSchemasResponse
+ 44, // 164: vtadmin.VTAdmin.GetShardReplicationPositions:output_type -> vtadmin.GetShardReplicationPositionsResponse
+ 10, // 165: vtadmin.VTAdmin.GetSrvVSchema:output_type -> vtadmin.SrvVSchema
+ 47, // 166: vtadmin.VTAdmin.GetSrvVSchemas:output_type -> vtadmin.GetSrvVSchemasResponse
+ 11, // 167: vtadmin.VTAdmin.GetTablet:output_type -> vtadmin.Tablet
+ 51, // 168: vtadmin.VTAdmin.GetTablets:output_type -> vtadmin.GetTabletsResponse
+ 132, // 169: vtadmin.VTAdmin.GetTopologyPath:output_type -> vtctldata.GetTopologyPathResponse
+ 12, // 170: vtadmin.VTAdmin.GetVSchema:output_type -> vtadmin.VSchema
+ 55, // 171: vtadmin.VTAdmin.GetVSchemas:output_type -> vtadmin.GetVSchemasResponse
+ 57, // 172: vtadmin.VTAdmin.GetVtctlds:output_type -> vtadmin.GetVtctldsResponse
+ 15, // 173: vtadmin.VTAdmin.GetWorkflow:output_type -> vtadmin.Workflow
+ 60, // 174: vtadmin.VTAdmin.GetWorkflows:output_type -> vtadmin.GetWorkflowsResponse
+ 62, // 175: vtadmin.VTAdmin.PingTablet:output_type -> vtadmin.PingTabletResponse
+ 64, // 176: vtadmin.VTAdmin.PlannedFailoverShard:output_type -> vtadmin.PlannedFailoverShardResponse
+ 66, // 177: vtadmin.VTAdmin.RebuildKeyspaceGraph:output_type -> vtadmin.RebuildKeyspaceGraphResponse
+ 68, // 178: vtadmin.VTAdmin.RefreshState:output_type -> vtadmin.RefreshStateResponse
+ 74, // 179: vtadmin.VTAdmin.RefreshTabletReplicationSource:output_type -> vtadmin.RefreshTabletReplicationSourceResponse
+ 70, // 180: vtadmin.VTAdmin.ReloadSchemas:output_type -> vtadmin.ReloadSchemasResponse
+ 72, // 181: vtadmin.VTAdmin.ReloadSchemaShard:output_type -> vtadmin.ReloadSchemaShardResponse
+ 76, // 182: vtadmin.VTAdmin.RemoveKeyspaceCell:output_type -> vtadmin.RemoveKeyspaceCellResponse
+ 78, // 183: vtadmin.VTAdmin.RunHealthCheck:output_type -> vtadmin.RunHealthCheckResponse
+ 80, // 184: vtadmin.VTAdmin.SetReadOnly:output_type -> vtadmin.SetReadOnlyResponse
+ 82, // 185: vtadmin.VTAdmin.SetReadWrite:output_type -> vtadmin.SetReadWriteResponse
+ 84, // 186: vtadmin.VTAdmin.StartReplication:output_type -> vtadmin.StartReplicationResponse
+ 86, // 187: vtadmin.VTAdmin.StopReplication:output_type -> vtadmin.StopReplicationResponse
+ 88, // 188: vtadmin.VTAdmin.TabletExternallyPromoted:output_type -> vtadmin.TabletExternallyPromotedResponse
+ 133, // 189: vtadmin.VTAdmin.Validate:output_type -> vtctldata.ValidateResponse
+ 134, // 190: vtadmin.VTAdmin.ValidateKeyspace:output_type -> vtctldata.ValidateKeyspaceResponse
+ 135, // 191: vtadmin.VTAdmin.ValidateSchemaKeyspace:output_type -> vtctldata.ValidateSchemaKeyspaceResponse
+ 136, // 192: vtadmin.VTAdmin.ValidateShard:output_type -> vtctldata.ValidateShardResponse
+ 137, // 193: vtadmin.VTAdmin.ValidateVersionKeyspace:output_type -> vtctldata.ValidateVersionKeyspaceResponse
+ 138, // 194: vtadmin.VTAdmin.ValidateVersionShard:output_type -> vtctldata.ValidateVersionShardResponse
+ 97, // 195: vtadmin.VTAdmin.VTExplain:output_type -> vtadmin.VTExplainResponse
+ 147, // [147:196] is the sub-list for method output_type
+ 98, // [98:147] is the sub-list for method input_type
+ 98, // [98:98] is the sub-list for extension type_name
+ 98, // [98:98] is the sub-list for extension extendee
+ 0, // [0:98] is the sub-list for field type_name
}
func init() { file_vtadmin_proto_init() }
@@ -7493,7 +7872,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetGatesRequest); i {
+ switch v := v.(*GetFullStatusRequest); i {
case 0:
return &v.state
case 1:
@@ -7505,7 +7884,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetGatesResponse); i {
+ switch v := v.(*GetGatesRequest); i {
case 0:
return &v.state
case 1:
@@ -7517,7 +7896,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetKeyspaceRequest); i {
+ switch v := v.(*GetGatesResponse); i {
case 0:
return &v.state
case 1:
@@ -7529,7 +7908,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetKeyspacesRequest); i {
+ switch v := v.(*GetKeyspaceRequest); i {
case 0:
return &v.state
case 1:
@@ -7541,7 +7920,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetKeyspacesResponse); i {
+ switch v := v.(*GetKeyspacesRequest); i {
case 0:
return &v.state
case 1:
@@ -7553,7 +7932,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetSchemaRequest); i {
+ switch v := v.(*GetKeyspacesResponse); i {
case 0:
return &v.state
case 1:
@@ -7565,7 +7944,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetSchemasRequest); i {
+ switch v := v.(*GetSchemaRequest); i {
case 0:
return &v.state
case 1:
@@ -7577,7 +7956,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetSchemasResponse); i {
+ switch v := v.(*GetSchemasRequest); i {
case 0:
return &v.state
case 1:
@@ -7589,7 +7968,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetShardReplicationPositionsRequest); i {
+ switch v := v.(*GetSchemasResponse); i {
case 0:
return &v.state
case 1:
@@ -7601,7 +7980,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetShardReplicationPositionsResponse); i {
+ switch v := v.(*GetShardReplicationPositionsRequest); i {
case 0:
return &v.state
case 1:
@@ -7613,7 +7992,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetSrvVSchemaRequest); i {
+ switch v := v.(*GetShardReplicationPositionsResponse); i {
case 0:
return &v.state
case 1:
@@ -7625,7 +8004,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetSrvVSchemasRequest); i {
+ switch v := v.(*GetSrvVSchemaRequest); i {
case 0:
return &v.state
case 1:
@@ -7637,7 +8016,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetSrvVSchemasResponse); i {
+ switch v := v.(*GetSrvVSchemasRequest); i {
case 0:
return &v.state
case 1:
@@ -7649,7 +8028,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetSchemaTableSizeOptions); i {
+ switch v := v.(*GetSrvVSchemasResponse); i {
case 0:
return &v.state
case 1:
@@ -7661,7 +8040,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetTabletRequest); i {
+ switch v := v.(*GetSchemaTableSizeOptions); i {
case 0:
return &v.state
case 1:
@@ -7673,7 +8052,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetTabletsRequest); i {
+ switch v := v.(*GetTabletRequest); i {
case 0:
return &v.state
case 1:
@@ -7685,7 +8064,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetTabletsResponse); i {
+ switch v := v.(*GetTabletsRequest); i {
case 0:
return &v.state
case 1:
@@ -7697,7 +8076,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetVSchemaRequest); i {
+ switch v := v.(*GetTabletsResponse); i {
case 0:
return &v.state
case 1:
@@ -7709,7 +8088,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetVSchemasRequest); i {
+ switch v := v.(*GetTopologyPathRequest); i {
case 0:
return &v.state
case 1:
@@ -7721,7 +8100,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetVSchemasResponse); i {
+ switch v := v.(*GetVSchemaRequest); i {
case 0:
return &v.state
case 1:
@@ -7733,7 +8112,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetVtctldsRequest); i {
+ switch v := v.(*GetVSchemasRequest); i {
case 0:
return &v.state
case 1:
@@ -7745,7 +8124,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetVtctldsResponse); i {
+ switch v := v.(*GetVSchemasResponse); i {
case 0:
return &v.state
case 1:
@@ -7757,7 +8136,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetWorkflowRequest); i {
+ switch v := v.(*GetVtctldsRequest); i {
case 0:
return &v.state
case 1:
@@ -7769,7 +8148,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetWorkflowsRequest); i {
+ switch v := v.(*GetVtctldsResponse); i {
case 0:
return &v.state
case 1:
@@ -7781,7 +8160,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetWorkflowsResponse); i {
+ switch v := v.(*GetWorkflowRequest); i {
case 0:
return &v.state
case 1:
@@ -7793,7 +8172,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*PingTabletRequest); i {
+ switch v := v.(*GetWorkflowsRequest); i {
case 0:
return &v.state
case 1:
@@ -7805,7 +8184,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*PingTabletResponse); i {
+ switch v := v.(*GetWorkflowsResponse); i {
case 0:
return &v.state
case 1:
@@ -7817,7 +8196,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[60].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*PlannedFailoverShardRequest); i {
+ switch v := v.(*PingTabletRequest); i {
case 0:
return &v.state
case 1:
@@ -7829,7 +8208,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[61].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*PlannedFailoverShardResponse); i {
+ switch v := v.(*PingTabletResponse); i {
case 0:
return &v.state
case 1:
@@ -7841,7 +8220,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[62].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RebuildKeyspaceGraphRequest); i {
+ switch v := v.(*PlannedFailoverShardRequest); i {
case 0:
return &v.state
case 1:
@@ -7853,7 +8232,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[63].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RebuildKeyspaceGraphResponse); i {
+ switch v := v.(*PlannedFailoverShardResponse); i {
case 0:
return &v.state
case 1:
@@ -7865,7 +8244,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[64].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RefreshStateRequest); i {
+ switch v := v.(*RebuildKeyspaceGraphRequest); i {
case 0:
return &v.state
case 1:
@@ -7877,7 +8256,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[65].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RefreshStateResponse); i {
+ switch v := v.(*RebuildKeyspaceGraphResponse); i {
case 0:
return &v.state
case 1:
@@ -7889,7 +8268,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[66].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ReloadSchemasRequest); i {
+ switch v := v.(*RefreshStateRequest); i {
case 0:
return &v.state
case 1:
@@ -7901,7 +8280,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[67].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ReloadSchemasResponse); i {
+ switch v := v.(*RefreshStateResponse); i {
case 0:
return &v.state
case 1:
@@ -7913,7 +8292,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[68].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ReloadSchemaShardRequest); i {
+ switch v := v.(*ReloadSchemasRequest); i {
case 0:
return &v.state
case 1:
@@ -7925,7 +8304,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[69].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ReloadSchemaShardResponse); i {
+ switch v := v.(*ReloadSchemasResponse); i {
case 0:
return &v.state
case 1:
@@ -7937,7 +8316,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[70].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RefreshTabletReplicationSourceRequest); i {
+ switch v := v.(*ReloadSchemaShardRequest); i {
case 0:
return &v.state
case 1:
@@ -7949,7 +8328,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[71].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RefreshTabletReplicationSourceResponse); i {
+ switch v := v.(*ReloadSchemaShardResponse); i {
case 0:
return &v.state
case 1:
@@ -7961,7 +8340,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[72].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RemoveKeyspaceCellRequest); i {
+ switch v := v.(*RefreshTabletReplicationSourceRequest); i {
case 0:
return &v.state
case 1:
@@ -7973,7 +8352,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[73].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RemoveKeyspaceCellResponse); i {
+ switch v := v.(*RefreshTabletReplicationSourceResponse); i {
case 0:
return &v.state
case 1:
@@ -7985,7 +8364,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[74].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RunHealthCheckRequest); i {
+ switch v := v.(*RemoveKeyspaceCellRequest); i {
case 0:
return &v.state
case 1:
@@ -7997,7 +8376,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[75].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RunHealthCheckResponse); i {
+ switch v := v.(*RemoveKeyspaceCellResponse); i {
case 0:
return &v.state
case 1:
@@ -8009,7 +8388,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[76].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SetReadOnlyRequest); i {
+ switch v := v.(*RunHealthCheckRequest); i {
case 0:
return &v.state
case 1:
@@ -8021,7 +8400,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[77].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SetReadOnlyResponse); i {
+ switch v := v.(*RunHealthCheckResponse); i {
case 0:
return &v.state
case 1:
@@ -8033,7 +8412,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[78].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SetReadWriteRequest); i {
+ switch v := v.(*SetReadOnlyRequest); i {
case 0:
return &v.state
case 1:
@@ -8045,7 +8424,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[79].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SetReadWriteResponse); i {
+ switch v := v.(*SetReadOnlyResponse); i {
case 0:
return &v.state
case 1:
@@ -8057,7 +8436,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[80].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*StartReplicationRequest); i {
+ switch v := v.(*SetReadWriteRequest); i {
case 0:
return &v.state
case 1:
@@ -8069,7 +8448,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[81].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*StartReplicationResponse); i {
+ switch v := v.(*SetReadWriteResponse); i {
case 0:
return &v.state
case 1:
@@ -8081,7 +8460,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[82].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*StopReplicationRequest); i {
+ switch v := v.(*StartReplicationRequest); i {
case 0:
return &v.state
case 1:
@@ -8093,7 +8472,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[83].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*StopReplicationResponse); i {
+ switch v := v.(*StartReplicationResponse); i {
case 0:
return &v.state
case 1:
@@ -8105,7 +8484,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[84].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*TabletExternallyPromotedRequest); i {
+ switch v := v.(*StopReplicationRequest); i {
case 0:
return &v.state
case 1:
@@ -8117,7 +8496,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[85].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*TabletExternallyPromotedResponse); i {
+ switch v := v.(*StopReplicationResponse); i {
case 0:
return &v.state
case 1:
@@ -8129,7 +8508,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[86].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*TabletExternallyReparentedRequest); i {
+ switch v := v.(*TabletExternallyPromotedRequest); i {
case 0:
return &v.state
case 1:
@@ -8141,7 +8520,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[87].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ValidateKeyspaceRequest); i {
+ switch v := v.(*TabletExternallyPromotedResponse); i {
case 0:
return &v.state
case 1:
@@ -8153,7 +8532,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[88].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ValidateSchemaKeyspaceRequest); i {
+ switch v := v.(*TabletExternallyReparentedRequest); i {
case 0:
return &v.state
case 1:
@@ -8165,7 +8544,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[89].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ValidateVersionKeyspaceRequest); i {
+ switch v := v.(*ValidateRequest); i {
case 0:
return &v.state
case 1:
@@ -8177,7 +8556,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[90].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*VTExplainRequest); i {
+ switch v := v.(*ValidateKeyspaceRequest); i {
case 0:
return &v.state
case 1:
@@ -8189,7 +8568,43 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[91].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*VTExplainResponse); i {
+ switch v := v.(*ValidateSchemaKeyspaceRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_vtadmin_proto_msgTypes[92].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ValidateShardRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_vtadmin_proto_msgTypes[93].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ValidateVersionKeyspaceRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_vtadmin_proto_msgTypes[94].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ValidateVersionShardRequest); i {
case 0:
return &v.state
case 1:
@@ -8201,7 +8616,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[95].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Schema_ShardTableSize); i {
+ switch v := v.(*VTExplainRequest); i {
case 0:
return &v.state
case 1:
@@ -8213,6 +8628,30 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[96].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VTExplainResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_vtadmin_proto_msgTypes[100].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Schema_ShardTableSize); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_vtadmin_proto_msgTypes[101].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Schema_TableSize); i {
case 0:
return &v.state
@@ -8224,7 +8663,7 @@ func file_vtadmin_proto_init() {
return nil
}
}
- file_vtadmin_proto_msgTypes[99].Exporter = func(v interface{}, i int) interface{} {
+ file_vtadmin_proto_msgTypes[104].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ReloadSchemasResponse_KeyspaceResult); i {
case 0:
return &v.state
@@ -8236,7 +8675,7 @@ func file_vtadmin_proto_init() {
return nil
}
}
- file_vtadmin_proto_msgTypes[100].Exporter = func(v interface{}, i int) interface{} {
+ file_vtadmin_proto_msgTypes[105].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ReloadSchemasResponse_ShardResult); i {
case 0:
return &v.state
@@ -8248,7 +8687,7 @@ func file_vtadmin_proto_init() {
return nil
}
}
- file_vtadmin_proto_msgTypes[101].Exporter = func(v interface{}, i int) interface{} {
+ file_vtadmin_proto_msgTypes[106].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ReloadSchemasResponse_TabletResult); i {
case 0:
return &v.state
@@ -8267,7 +8706,7 @@ func file_vtadmin_proto_init() {
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_vtadmin_proto_rawDesc,
NumEnums: 1,
- NumMessages: 102,
+ NumMessages: 107,
NumExtensions: 0,
NumServices: 1,
},
diff --git a/go/vt/proto/vtadmin/vtadmin_grpc.pb.go b/go/vt/proto/vtadmin/vtadmin_grpc.pb.go
index 3dcdc93216b..fd6cda64704 100644
--- a/go/vt/proto/vtadmin/vtadmin_grpc.pb.go
+++ b/go/vt/proto/vtadmin/vtadmin_grpc.pb.go
@@ -54,6 +54,8 @@ type VTAdminClient interface {
GetCellsAliases(ctx context.Context, in *GetCellsAliasesRequest, opts ...grpc.CallOption) (*GetCellsAliasesResponse, error)
// GetClusters returns all configured clusters.
GetClusters(ctx context.Context, in *GetClustersRequest, opts ...grpc.CallOption) (*GetClustersResponse, error)
+ // GetFullStatus returns the full status of MySQL including the replication information, semi-sync information, GTID information among others
+ GetFullStatus(ctx context.Context, in *GetFullStatusRequest, opts ...grpc.CallOption) (*vtctldata.GetFullStatusResponse, error)
// GetGates returns all gates across all the specified clusters.
GetGates(ctx context.Context, in *GetGatesRequest, opts ...grpc.CallOption) (*GetGatesResponse, error)
// GetKeyspace returns a keyspace by name in the specified cluster.
@@ -78,6 +80,8 @@ type VTAdminClient interface {
GetTablet(ctx context.Context, in *GetTabletRequest, opts ...grpc.CallOption) (*Tablet, error)
// GetTablets returns all tablets across all the specified clusters.
GetTablets(ctx context.Context, in *GetTabletsRequest, opts ...grpc.CallOption) (*GetTabletsResponse, error)
+ // GetTopologyPath returns the cell located at the specified path in the topology server.
+ GetTopologyPath(ctx context.Context, in *GetTopologyPathRequest, opts ...grpc.CallOption) (*vtctldata.GetTopologyPathResponse, error)
// GetVSchema returns a VSchema for the specified keyspace in the specified
// cluster.
GetVSchema(ctx context.Context, in *GetVSchemaRequest, opts ...grpc.CallOption) (*VSchema, error)
@@ -138,6 +142,9 @@ type VTAdminClient interface {
// * "orchestrator" here refers to external orchestrator, not the newer,
// Vitess-aware orchestrator, VTOrc.
TabletExternallyPromoted(ctx context.Context, in *TabletExternallyPromotedRequest, opts ...grpc.CallOption) (*TabletExternallyPromotedResponse, error)
+ // Validate validates all nodes in a cluster that are reachable from the global replication graph,
+ // as well as all tablets in discoverable cells, are consistent
+ Validate(ctx context.Context, in *ValidateRequest, opts ...grpc.CallOption) (*vtctldata.ValidateResponse, error)
// ValidateKeyspace validates that all nodes reachable from the specified
// keyspace are consistent.
ValidateKeyspace(ctx context.Context, in *ValidateKeyspaceRequest, opts ...grpc.CallOption) (*vtctldata.ValidateKeyspaceResponse, error)
@@ -145,9 +152,13 @@ type VTAdminClient interface {
// for shard 0 matches the schema on all of the other tablets in the
// keyspace.
ValidateSchemaKeyspace(ctx context.Context, in *ValidateSchemaKeyspaceRequest, opts ...grpc.CallOption) (*vtctldata.ValidateSchemaKeyspaceResponse, error)
+ // ValidateShard validates that that all nodes reachable from the specified shard are consistent.
+ ValidateShard(ctx context.Context, in *ValidateShardRequest, opts ...grpc.CallOption) (*vtctldata.ValidateShardResponse, error)
// ValidateVersionKeyspace validates that the version on the primary of
// shard 0 matches all of the other tablets in the keyspace.
ValidateVersionKeyspace(ctx context.Context, in *ValidateVersionKeyspaceRequest, opts ...grpc.CallOption) (*vtctldata.ValidateVersionKeyspaceResponse, error)
+ // ValidateVersionShard validates that the version on the primary matches all of the replicas.
+ ValidateVersionShard(ctx context.Context, in *ValidateVersionShardRequest, opts ...grpc.CallOption) (*vtctldata.ValidateVersionShardResponse, error)
// VTExplain provides information on how Vitess plans to execute a
// particular query.
VTExplain(ctx context.Context, in *VTExplainRequest, opts ...grpc.CallOption) (*VTExplainResponse, error)
@@ -260,6 +271,15 @@ func (c *vTAdminClient) GetClusters(ctx context.Context, in *GetClustersRequest,
return out, nil
}
+func (c *vTAdminClient) GetFullStatus(ctx context.Context, in *GetFullStatusRequest, opts ...grpc.CallOption) (*vtctldata.GetFullStatusResponse, error) {
+ out := new(vtctldata.GetFullStatusResponse)
+ err := c.cc.Invoke(ctx, "/vtadmin.VTAdmin/GetFullStatus", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
func (c *vTAdminClient) GetGates(ctx context.Context, in *GetGatesRequest, opts ...grpc.CallOption) (*GetGatesResponse, error) {
out := new(GetGatesResponse)
err := c.cc.Invoke(ctx, "/vtadmin.VTAdmin/GetGates", in, out, opts...)
@@ -350,6 +370,15 @@ func (c *vTAdminClient) GetTablets(ctx context.Context, in *GetTabletsRequest, o
return out, nil
}
+func (c *vTAdminClient) GetTopologyPath(ctx context.Context, in *GetTopologyPathRequest, opts ...grpc.CallOption) (*vtctldata.GetTopologyPathResponse, error) {
+ out := new(vtctldata.GetTopologyPathResponse)
+ err := c.cc.Invoke(ctx, "/vtadmin.VTAdmin/GetTopologyPath", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
func (c *vTAdminClient) GetVSchema(ctx context.Context, in *GetVSchemaRequest, opts ...grpc.CallOption) (*VSchema, error) {
out := new(VSchema)
err := c.cc.Invoke(ctx, "/vtadmin.VTAdmin/GetVSchema", in, out, opts...)
@@ -521,6 +550,15 @@ func (c *vTAdminClient) TabletExternallyPromoted(ctx context.Context, in *Tablet
return out, nil
}
+func (c *vTAdminClient) Validate(ctx context.Context, in *ValidateRequest, opts ...grpc.CallOption) (*vtctldata.ValidateResponse, error) {
+ out := new(vtctldata.ValidateResponse)
+ err := c.cc.Invoke(ctx, "/vtadmin.VTAdmin/Validate", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
func (c *vTAdminClient) ValidateKeyspace(ctx context.Context, in *ValidateKeyspaceRequest, opts ...grpc.CallOption) (*vtctldata.ValidateKeyspaceResponse, error) {
out := new(vtctldata.ValidateKeyspaceResponse)
err := c.cc.Invoke(ctx, "/vtadmin.VTAdmin/ValidateKeyspace", in, out, opts...)
@@ -539,6 +577,15 @@ func (c *vTAdminClient) ValidateSchemaKeyspace(ctx context.Context, in *Validate
return out, nil
}
+func (c *vTAdminClient) ValidateShard(ctx context.Context, in *ValidateShardRequest, opts ...grpc.CallOption) (*vtctldata.ValidateShardResponse, error) {
+ out := new(vtctldata.ValidateShardResponse)
+ err := c.cc.Invoke(ctx, "/vtadmin.VTAdmin/ValidateShard", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
func (c *vTAdminClient) ValidateVersionKeyspace(ctx context.Context, in *ValidateVersionKeyspaceRequest, opts ...grpc.CallOption) (*vtctldata.ValidateVersionKeyspaceResponse, error) {
out := new(vtctldata.ValidateVersionKeyspaceResponse)
err := c.cc.Invoke(ctx, "/vtadmin.VTAdmin/ValidateVersionKeyspace", in, out, opts...)
@@ -548,6 +595,15 @@ func (c *vTAdminClient) ValidateVersionKeyspace(ctx context.Context, in *Validat
return out, nil
}
+func (c *vTAdminClient) ValidateVersionShard(ctx context.Context, in *ValidateVersionShardRequest, opts ...grpc.CallOption) (*vtctldata.ValidateVersionShardResponse, error) {
+ out := new(vtctldata.ValidateVersionShardResponse)
+ err := c.cc.Invoke(ctx, "/vtadmin.VTAdmin/ValidateVersionShard", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
func (c *vTAdminClient) VTExplain(ctx context.Context, in *VTExplainRequest, opts ...grpc.CallOption) (*VTExplainResponse, error) {
out := new(VTExplainResponse)
err := c.cc.Invoke(ctx, "/vtadmin.VTAdmin/VTExplain", in, out, opts...)
@@ -592,6 +648,8 @@ type VTAdminServer interface {
GetCellsAliases(context.Context, *GetCellsAliasesRequest) (*GetCellsAliasesResponse, error)
// GetClusters returns all configured clusters.
GetClusters(context.Context, *GetClustersRequest) (*GetClustersResponse, error)
+ // GetFullStatus returns the full status of MySQL including the replication information, semi-sync information, GTID information among others
+ GetFullStatus(context.Context, *GetFullStatusRequest) (*vtctldata.GetFullStatusResponse, error)
// GetGates returns all gates across all the specified clusters.
GetGates(context.Context, *GetGatesRequest) (*GetGatesResponse, error)
// GetKeyspace returns a keyspace by name in the specified cluster.
@@ -616,6 +674,8 @@ type VTAdminServer interface {
GetTablet(context.Context, *GetTabletRequest) (*Tablet, error)
// GetTablets returns all tablets across all the specified clusters.
GetTablets(context.Context, *GetTabletsRequest) (*GetTabletsResponse, error)
+ // GetTopologyPath returns the cell located at the specified path in the topology server.
+ GetTopologyPath(context.Context, *GetTopologyPathRequest) (*vtctldata.GetTopologyPathResponse, error)
// GetVSchema returns a VSchema for the specified keyspace in the specified
// cluster.
GetVSchema(context.Context, *GetVSchemaRequest) (*VSchema, error)
@@ -676,6 +736,9 @@ type VTAdminServer interface {
// * "orchestrator" here refers to external orchestrator, not the newer,
// Vitess-aware orchestrator, VTOrc.
TabletExternallyPromoted(context.Context, *TabletExternallyPromotedRequest) (*TabletExternallyPromotedResponse, error)
+ // Validate validates all nodes in a cluster that are reachable from the global replication graph,
+ // as well as all tablets in discoverable cells, are consistent
+ Validate(context.Context, *ValidateRequest) (*vtctldata.ValidateResponse, error)
// ValidateKeyspace validates that all nodes reachable from the specified
// keyspace are consistent.
ValidateKeyspace(context.Context, *ValidateKeyspaceRequest) (*vtctldata.ValidateKeyspaceResponse, error)
@@ -683,9 +746,13 @@ type VTAdminServer interface {
// for shard 0 matches the schema on all of the other tablets in the
// keyspace.
ValidateSchemaKeyspace(context.Context, *ValidateSchemaKeyspaceRequest) (*vtctldata.ValidateSchemaKeyspaceResponse, error)
+ // ValidateShard validates that that all nodes reachable from the specified shard are consistent.
+ ValidateShard(context.Context, *ValidateShardRequest) (*vtctldata.ValidateShardResponse, error)
// ValidateVersionKeyspace validates that the version on the primary of
// shard 0 matches all of the other tablets in the keyspace.
ValidateVersionKeyspace(context.Context, *ValidateVersionKeyspaceRequest) (*vtctldata.ValidateVersionKeyspaceResponse, error)
+ // ValidateVersionShard validates that the version on the primary matches all of the replicas.
+ ValidateVersionShard(context.Context, *ValidateVersionShardRequest) (*vtctldata.ValidateVersionShardResponse, error)
// VTExplain provides information on how Vitess plans to execute a
// particular query.
VTExplain(context.Context, *VTExplainRequest) (*VTExplainResponse, error)
@@ -729,6 +796,9 @@ func (UnimplementedVTAdminServer) GetCellsAliases(context.Context, *GetCellsAlia
func (UnimplementedVTAdminServer) GetClusters(context.Context, *GetClustersRequest) (*GetClustersResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetClusters not implemented")
}
+func (UnimplementedVTAdminServer) GetFullStatus(context.Context, *GetFullStatusRequest) (*vtctldata.GetFullStatusResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetFullStatus not implemented")
+}
func (UnimplementedVTAdminServer) GetGates(context.Context, *GetGatesRequest) (*GetGatesResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetGates not implemented")
}
@@ -759,6 +829,9 @@ func (UnimplementedVTAdminServer) GetTablet(context.Context, *GetTabletRequest)
func (UnimplementedVTAdminServer) GetTablets(context.Context, *GetTabletsRequest) (*GetTabletsResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetTablets not implemented")
}
+func (UnimplementedVTAdminServer) GetTopologyPath(context.Context, *GetTopologyPathRequest) (*vtctldata.GetTopologyPathResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetTopologyPath not implemented")
+}
func (UnimplementedVTAdminServer) GetVSchema(context.Context, *GetVSchemaRequest) (*VSchema, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetVSchema not implemented")
}
@@ -816,15 +889,24 @@ func (UnimplementedVTAdminServer) StopReplication(context.Context, *StopReplicat
func (UnimplementedVTAdminServer) TabletExternallyPromoted(context.Context, *TabletExternallyPromotedRequest) (*TabletExternallyPromotedResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method TabletExternallyPromoted not implemented")
}
+func (UnimplementedVTAdminServer) Validate(context.Context, *ValidateRequest) (*vtctldata.ValidateResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Validate not implemented")
+}
func (UnimplementedVTAdminServer) ValidateKeyspace(context.Context, *ValidateKeyspaceRequest) (*vtctldata.ValidateKeyspaceResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method ValidateKeyspace not implemented")
}
func (UnimplementedVTAdminServer) ValidateSchemaKeyspace(context.Context, *ValidateSchemaKeyspaceRequest) (*vtctldata.ValidateSchemaKeyspaceResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method ValidateSchemaKeyspace not implemented")
}
+func (UnimplementedVTAdminServer) ValidateShard(context.Context, *ValidateShardRequest) (*vtctldata.ValidateShardResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ValidateShard not implemented")
+}
func (UnimplementedVTAdminServer) ValidateVersionKeyspace(context.Context, *ValidateVersionKeyspaceRequest) (*vtctldata.ValidateVersionKeyspaceResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method ValidateVersionKeyspace not implemented")
}
+func (UnimplementedVTAdminServer) ValidateVersionShard(context.Context, *ValidateVersionShardRequest) (*vtctldata.ValidateVersionShardResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ValidateVersionShard not implemented")
+}
func (UnimplementedVTAdminServer) VTExplain(context.Context, *VTExplainRequest) (*VTExplainResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method VTExplain not implemented")
}
@@ -1039,6 +1121,24 @@ func _VTAdmin_GetClusters_Handler(srv interface{}, ctx context.Context, dec func
return interceptor(ctx, in, info, handler)
}
+func _VTAdmin_GetFullStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetFullStatusRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(VTAdminServer).GetFullStatus(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/vtadmin.VTAdmin/GetFullStatus",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(VTAdminServer).GetFullStatus(ctx, req.(*GetFullStatusRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
func _VTAdmin_GetGates_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(GetGatesRequest)
if err := dec(in); err != nil {
@@ -1219,6 +1319,24 @@ func _VTAdmin_GetTablets_Handler(srv interface{}, ctx context.Context, dec func(
return interceptor(ctx, in, info, handler)
}
+func _VTAdmin_GetTopologyPath_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetTopologyPathRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(VTAdminServer).GetTopologyPath(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/vtadmin.VTAdmin/GetTopologyPath",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(VTAdminServer).GetTopologyPath(ctx, req.(*GetTopologyPathRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
func _VTAdmin_GetVSchema_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(GetVSchemaRequest)
if err := dec(in); err != nil {
@@ -1561,6 +1679,24 @@ func _VTAdmin_TabletExternallyPromoted_Handler(srv interface{}, ctx context.Cont
return interceptor(ctx, in, info, handler)
}
+func _VTAdmin_Validate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ValidateRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(VTAdminServer).Validate(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/vtadmin.VTAdmin/Validate",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(VTAdminServer).Validate(ctx, req.(*ValidateRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
func _VTAdmin_ValidateKeyspace_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ValidateKeyspaceRequest)
if err := dec(in); err != nil {
@@ -1597,6 +1733,24 @@ func _VTAdmin_ValidateSchemaKeyspace_Handler(srv interface{}, ctx context.Contex
return interceptor(ctx, in, info, handler)
}
+func _VTAdmin_ValidateShard_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ValidateShardRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(VTAdminServer).ValidateShard(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/vtadmin.VTAdmin/ValidateShard",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(VTAdminServer).ValidateShard(ctx, req.(*ValidateShardRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
func _VTAdmin_ValidateVersionKeyspace_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ValidateVersionKeyspaceRequest)
if err := dec(in); err != nil {
@@ -1615,6 +1769,24 @@ func _VTAdmin_ValidateVersionKeyspace_Handler(srv interface{}, ctx context.Conte
return interceptor(ctx, in, info, handler)
}
+func _VTAdmin_ValidateVersionShard_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ValidateVersionShardRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(VTAdminServer).ValidateVersionShard(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/vtadmin.VTAdmin/ValidateVersionShard",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(VTAdminServer).ValidateVersionShard(ctx, req.(*ValidateVersionShardRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
func _VTAdmin_VTExplain_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(VTExplainRequest)
if err := dec(in); err != nil {
@@ -1684,6 +1856,10 @@ var VTAdmin_ServiceDesc = grpc.ServiceDesc{
MethodName: "GetClusters",
Handler: _VTAdmin_GetClusters_Handler,
},
+ {
+ MethodName: "GetFullStatus",
+ Handler: _VTAdmin_GetFullStatus_Handler,
+ },
{
MethodName: "GetGates",
Handler: _VTAdmin_GetGates_Handler,
@@ -1724,6 +1900,10 @@ var VTAdmin_ServiceDesc = grpc.ServiceDesc{
MethodName: "GetTablets",
Handler: _VTAdmin_GetTablets_Handler,
},
+ {
+ MethodName: "GetTopologyPath",
+ Handler: _VTAdmin_GetTopologyPath_Handler,
+ },
{
MethodName: "GetVSchema",
Handler: _VTAdmin_GetVSchema_Handler,
@@ -1800,6 +1980,10 @@ var VTAdmin_ServiceDesc = grpc.ServiceDesc{
MethodName: "TabletExternallyPromoted",
Handler: _VTAdmin_TabletExternallyPromoted_Handler,
},
+ {
+ MethodName: "Validate",
+ Handler: _VTAdmin_Validate_Handler,
+ },
{
MethodName: "ValidateKeyspace",
Handler: _VTAdmin_ValidateKeyspace_Handler,
@@ -1808,10 +1992,18 @@ var VTAdmin_ServiceDesc = grpc.ServiceDesc{
MethodName: "ValidateSchemaKeyspace",
Handler: _VTAdmin_ValidateSchemaKeyspace_Handler,
},
+ {
+ MethodName: "ValidateShard",
+ Handler: _VTAdmin_ValidateShard_Handler,
+ },
{
MethodName: "ValidateVersionKeyspace",
Handler: _VTAdmin_ValidateVersionKeyspace_Handler,
},
+ {
+ MethodName: "ValidateVersionShard",
+ Handler: _VTAdmin_ValidateVersionShard_Handler,
+ },
{
MethodName: "VTExplain",
Handler: _VTAdmin_VTExplain_Handler,
diff --git a/go/vt/proto/vtadmin/vtadmin_vtproto.pb.go b/go/vt/proto/vtadmin/vtadmin_vtproto.pb.go
index e95dc2e6e11..9706f67e07f 100644
--- a/go/vt/proto/vtadmin/vtadmin_vtproto.pb.go
+++ b/go/vt/proto/vtadmin/vtadmin_vtproto.pb.go
@@ -1,5 +1,5 @@
// Code generated by protoc-gen-go-vtproto. DO NOT EDIT.
-// protoc-gen-go-vtproto version: v0.3.0
+// protoc-gen-go-vtproto version: v0.4.0
// source: vtadmin.proto
package vtadmin
@@ -2001,6 +2001,56 @@ func (m *GetClustersResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
return len(dAtA) - i, nil
}
+func (m *GetFullStatusRequest) MarshalVT() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVT(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *GetFullStatusRequest) MarshalToVT(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVT(dAtA[:size])
+}
+
+func (m *GetFullStatusRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.Alias != nil {
+ size, err := m.Alias.MarshalToSizedBufferVT(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.ClusterId) > 0 {
+ i -= len(m.ClusterId)
+ copy(dAtA[i:], m.ClusterId)
+ i = encodeVarint(dAtA, i, uint64(len(m.ClusterId)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
func (m *GetGatesRequest) MarshalVT() (dAtA []byte, err error) {
if m == nil {
return nil, nil
@@ -2823,6 +2873,53 @@ func (m *GetTabletsResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
return len(dAtA) - i, nil
}
+func (m *GetTopologyPathRequest) MarshalVT() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVT(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *GetTopologyPathRequest) MarshalToVT(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVT(dAtA[:size])
+}
+
+func (m *GetTopologyPathRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if len(m.Path) > 0 {
+ i -= len(m.Path)
+ copy(dAtA[i:], m.Path)
+ i = encodeVarint(dAtA, i, uint64(len(m.Path)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.ClusterId) > 0 {
+ i -= len(m.ClusterId)
+ copy(dAtA[i:], m.ClusterId)
+ i = encodeVarint(dAtA, i, uint64(len(m.ClusterId)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
func (m *GetVSchemaRequest) MarshalVT() (dAtA []byte, err error) {
if m == nil {
return nil, nil
@@ -5006,6 +5103,56 @@ func (m *TabletExternallyReparentedRequest) MarshalToSizedBufferVT(dAtA []byte)
return len(dAtA) - i, nil
}
+func (m *ValidateRequest) MarshalVT() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVT(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ValidateRequest) MarshalToVT(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVT(dAtA[:size])
+}
+
+func (m *ValidateRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.PingTablets {
+ i--
+ if m.PingTablets {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x10
+ }
+ if len(m.ClusterId) > 0 {
+ i -= len(m.ClusterId)
+ copy(dAtA[i:], m.ClusterId)
+ i = encodeVarint(dAtA, i, uint64(len(m.ClusterId)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
func (m *ValidateKeyspaceRequest) MarshalVT() (dAtA []byte, err error) {
if m == nil {
return nil, nil
@@ -5110,6 +5257,70 @@ func (m *ValidateSchemaKeyspaceRequest) MarshalToSizedBufferVT(dAtA []byte) (int
return len(dAtA) - i, nil
}
+func (m *ValidateShardRequest) MarshalVT() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVT(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ValidateShardRequest) MarshalToVT(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVT(dAtA[:size])
+}
+
+func (m *ValidateShardRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.PingTablets {
+ i--
+ if m.PingTablets {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x20
+ }
+ if len(m.Shard) > 0 {
+ i -= len(m.Shard)
+ copy(dAtA[i:], m.Shard)
+ i = encodeVarint(dAtA, i, uint64(len(m.Shard)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if len(m.Keyspace) > 0 {
+ i -= len(m.Keyspace)
+ copy(dAtA[i:], m.Keyspace)
+ i = encodeVarint(dAtA, i, uint64(len(m.Keyspace)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.ClusterId) > 0 {
+ i -= len(m.ClusterId)
+ copy(dAtA[i:], m.ClusterId)
+ i = encodeVarint(dAtA, i, uint64(len(m.ClusterId)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
func (m *ValidateVersionKeyspaceRequest) MarshalVT() (dAtA []byte, err error) {
if m == nil {
return nil, nil
@@ -5157,6 +5368,60 @@ func (m *ValidateVersionKeyspaceRequest) MarshalToSizedBufferVT(dAtA []byte) (in
return len(dAtA) - i, nil
}
+func (m *ValidateVersionShardRequest) MarshalVT() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVT(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ValidateVersionShardRequest) MarshalToVT(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVT(dAtA[:size])
+}
+
+func (m *ValidateVersionShardRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if len(m.Shard) > 0 {
+ i -= len(m.Shard)
+ copy(dAtA[i:], m.Shard)
+ i = encodeVarint(dAtA, i, uint64(len(m.Shard)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if len(m.Keyspace) > 0 {
+ i -= len(m.Keyspace)
+ copy(dAtA[i:], m.Keyspace)
+ i = encodeVarint(dAtA, i, uint64(len(m.Keyspace)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.ClusterId) > 0 {
+ i -= len(m.ClusterId)
+ copy(dAtA[i:], m.ClusterId)
+ i = encodeVarint(dAtA, i, uint64(len(m.ClusterId)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
func (m *VTExplainRequest) MarshalVT() (dAtA []byte, err error) {
if m == nil {
return nil, nil
@@ -5276,9 +5541,7 @@ func (m *Cluster) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5296,9 +5559,7 @@ func (m *ClusterBackup) SizeVT() (n int) {
l = m.Backup.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5325,9 +5586,7 @@ func (m *ClusterCellsAliases) SizeVT() (n int) {
n += mapEntrySize + 1 + sov(uint64(mapEntrySize))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5349,9 +5608,7 @@ func (m *ClusterCellInfo) SizeVT() (n int) {
l = m.CellInfo.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5377,9 +5634,7 @@ func (m *ClusterShardReplicationPosition) SizeVT() (n int) {
l = m.PositionInfo.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5401,9 +5656,7 @@ func (m *ClusterWorkflows) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5434,9 +5687,7 @@ func (m *Keyspace) SizeVT() (n int) {
n += mapEntrySize + 1 + sov(uint64(mapEntrySize))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5452,9 +5703,7 @@ func (m *Schema_ShardTableSize) SizeVT() (n int) {
if m.DataLength != 0 {
n += 1 + sov(uint64(m.DataLength))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5483,9 +5732,7 @@ func (m *Schema_TableSize) SizeVT() (n int) {
n += mapEntrySize + 1 + sov(uint64(mapEntrySize))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5522,9 +5769,7 @@ func (m *Schema) SizeVT() (n int) {
n += mapEntrySize + 1 + sov(uint64(mapEntrySize))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5542,9 +5787,7 @@ func (m *Shard) SizeVT() (n int) {
l = m.Shard.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5566,9 +5809,7 @@ func (m *SrvVSchema) SizeVT() (n int) {
l = m.SrvVSchema.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5593,9 +5834,7 @@ func (m *Tablet) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5617,9 +5856,7 @@ func (m *VSchema) SizeVT() (n int) {
l = m.VSchema.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5641,9 +5878,7 @@ func (m *Vtctld) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5679,9 +5914,7 @@ func (m *VTGate) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5703,9 +5936,7 @@ func (m *Workflow) SizeVT() (n int) {
l = m.Workflow.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5723,9 +5954,7 @@ func (m *CreateKeyspaceRequest) SizeVT() (n int) {
l = m.Options.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5739,9 +5968,7 @@ func (m *CreateKeyspaceResponse) SizeVT() (n int) {
l = m.Keyspace.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5759,9 +5986,7 @@ func (m *CreateShardRequest) SizeVT() (n int) {
l = m.Options.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5779,9 +6004,7 @@ func (m *DeleteKeyspaceRequest) SizeVT() (n int) {
l = m.Options.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5799,9 +6022,7 @@ func (m *DeleteShardsRequest) SizeVT() (n int) {
l = m.Options.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5824,9 +6045,7 @@ func (m *DeleteTabletRequest) SizeVT() (n int) {
if m.AllowPrimary {
n += 2
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5844,9 +6063,7 @@ func (m *DeleteTabletResponse) SizeVT() (n int) {
l = m.Cluster.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5864,9 +6081,7 @@ func (m *EmergencyFailoverShardRequest) SizeVT() (n int) {
l = m.Options.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5898,9 +6113,7 @@ func (m *EmergencyFailoverShardResponse) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5924,9 +6137,7 @@ func (m *FindSchemaRequest) SizeVT() (n int) {
l = m.TableSizeOptions.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5958,9 +6169,7 @@ func (m *GetBackupsRequest) SizeVT() (n int) {
l = m.RequestOptions.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -5976,9 +6185,7 @@ func (m *GetBackupsResponse) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -6003,9 +6210,7 @@ func (m *GetCellInfosRequest) SizeVT() (n int) {
if m.NamesOnly {
n += 2
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -6021,9 +6226,7 @@ func (m *GetCellInfosResponse) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -6039,9 +6242,7 @@ func (m *GetCellsAliasesRequest) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -6057,9 +6258,7 @@ func (m *GetCellsAliasesResponse) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -6069,9 +6268,7 @@ func (m *GetClustersRequest) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -6087,9 +6284,25 @@ func (m *GetClustersResponse) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *GetFullStatusRequest) SizeVT() (n int) {
+ if m == nil {
+ return 0
}
+ var l int
+ _ = l
+ l = len(m.ClusterId)
+ if l > 0 {
+ n += 1 + l + sov(uint64(l))
+ }
+ if m.Alias != nil {
+ l = m.Alias.SizeVT()
+ n += 1 + l + sov(uint64(l))
+ }
+ n += len(m.unknownFields)
return n
}
@@ -6105,9 +6318,7 @@ func (m *GetGatesRequest) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -6123,9 +6334,7 @@ func (m *GetGatesResponse) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -6143,9 +6352,7 @@ func (m *GetKeyspaceRequest) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -6161,9 +6368,7 @@ func (m *GetKeyspacesRequest) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -6179,9 +6384,7 @@ func (m *GetKeyspacesResponse) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -6207,9 +6410,7 @@ func (m *GetSchemaRequest) SizeVT() (n int) {
l = m.TableSizeOptions.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -6229,9 +6430,7 @@ func (m *GetSchemasRequest) SizeVT() (n int) {
l = m.TableSizeOptions.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -6247,9 +6446,7 @@ func (m *GetSchemasResponse) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -6277,9 +6474,7 @@ func (m *GetShardReplicationPositionsRequest) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -6295,9 +6490,7 @@ func (m *GetShardReplicationPositionsResponse) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -6315,9 +6508,7 @@ func (m *GetSrvVSchemaRequest) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -6339,9 +6530,7 @@ func (m *GetSrvVSchemasRequest) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -6357,9 +6546,7 @@ func (m *GetSrvVSchemasResponse) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -6375,9 +6562,7 @@ func (m *GetSchemaTableSizeOptions) SizeVT() (n int) {
if m.IncludeNonServingShards {
n += 2
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -6397,9 +6582,7 @@ func (m *GetTabletRequest) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -6415,9 +6598,7 @@ func (m *GetTabletsRequest) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -6433,9 +6614,25 @@ func (m *GetTabletsResponse) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *GetTopologyPathRequest) SizeVT() (n int) {
+ if m == nil {
+ return 0
}
+ var l int
+ _ = l
+ l = len(m.ClusterId)
+ if l > 0 {
+ n += 1 + l + sov(uint64(l))
+ }
+ l = len(m.Path)
+ if l > 0 {
+ n += 1 + l + sov(uint64(l))
+ }
+ n += len(m.unknownFields)
return n
}
@@ -6453,9 +6650,7 @@ func (m *GetVSchemaRequest) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -6471,9 +6666,7 @@ func (m *GetVSchemasRequest) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -6489,9 +6682,7 @@ func (m *GetVSchemasResponse) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -6507,9 +6698,7 @@ func (m *GetVtctldsRequest) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -6525,9 +6714,7 @@ func (m *GetVtctldsResponse) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -6552,9 +6739,7 @@ func (m *GetWorkflowRequest) SizeVT() (n int) {
if m.ActiveOnly {
n += 2
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -6585,9 +6770,7 @@ func (m *GetWorkflowsRequest) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -6610,9 +6793,7 @@ func (m *GetWorkflowsResponse) SizeVT() (n int) {
n += mapEntrySize + 1 + sov(uint64(mapEntrySize))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -6632,9 +6813,7 @@ func (m *PingTabletRequest) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -6652,9 +6831,7 @@ func (m *PingTabletResponse) SizeVT() (n int) {
l = m.Cluster.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -6672,9 +6849,7 @@ func (m *PlannedFailoverShardRequest) SizeVT() (n int) {
l = m.Options.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -6706,9 +6881,7 @@ func (m *PlannedFailoverShardResponse) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -6735,9 +6908,7 @@ func (m *RebuildKeyspaceGraphRequest) SizeVT() (n int) {
if m.AllowPartial {
n += 2
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -6751,9 +6922,7 @@ func (m *RebuildKeyspaceGraphResponse) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -6773,9 +6942,7 @@ func (m *RefreshStateRequest) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -6793,9 +6960,7 @@ func (m *RefreshStateResponse) SizeVT() (n int) {
l = m.Cluster.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -6839,9 +7004,7 @@ func (m *ReloadSchemasRequest) SizeVT() (n int) {
if m.IncludePrimary {
n += 2
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -6861,9 +7024,7 @@ func (m *ReloadSchemasResponse_KeyspaceResult) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -6883,9 +7044,7 @@ func (m *ReloadSchemasResponse_ShardResult) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -6903,9 +7062,7 @@ func (m *ReloadSchemasResponse_TabletResult) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -6933,9 +7090,7 @@ func (m *ReloadSchemasResponse) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -6967,9 +7122,7 @@ func (m *ReloadSchemaShardRequest) SizeVT() (n int) {
if m.Concurrency != 0 {
n += 1 + sov(uint64(m.Concurrency))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -6985,9 +7138,7 @@ func (m *ReloadSchemaShardResponse) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -7007,9 +7158,7 @@ func (m *RefreshTabletReplicationSourceRequest) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -7035,9 +7184,7 @@ func (m *RefreshTabletReplicationSourceResponse) SizeVT() (n int) {
l = m.Cluster.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -7065,9 +7212,7 @@ func (m *RemoveKeyspaceCellRequest) SizeVT() (n int) {
if m.Recursive {
n += 2
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -7081,9 +7226,7 @@ func (m *RemoveKeyspaceCellResponse) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -7103,9 +7246,7 @@ func (m *RunHealthCheckRequest) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -7123,9 +7264,7 @@ func (m *RunHealthCheckResponse) SizeVT() (n int) {
l = m.Cluster.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -7145,9 +7284,7 @@ func (m *SetReadOnlyRequest) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -7157,9 +7294,7 @@ func (m *SetReadOnlyResponse) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -7179,9 +7314,7 @@ func (m *SetReadWriteRequest) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -7191,9 +7324,7 @@ func (m *SetReadWriteResponse) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -7213,9 +7344,7 @@ func (m *StartReplicationRequest) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -7233,9 +7362,7 @@ func (m *StartReplicationResponse) SizeVT() (n int) {
l = m.Cluster.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -7255,9 +7382,7 @@ func (m *StopReplicationRequest) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -7275,9 +7400,7 @@ func (m *StopReplicationResponse) SizeVT() (n int) {
l = m.Cluster.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -7297,9 +7420,7 @@ func (m *TabletExternallyPromotedRequest) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -7329,9 +7450,7 @@ func (m *TabletExternallyPromotedResponse) SizeVT() (n int) {
l = m.OldPrimary.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -7351,9 +7470,24 @@ func (m *TabletExternallyReparentedRequest) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *ValidateRequest) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.ClusterId)
+ if l > 0 {
+ n += 1 + l + sov(uint64(l))
+ }
+ if m.PingTablets {
+ n += 2
}
+ n += len(m.unknownFields)
return n
}
@@ -7374,9 +7508,7 @@ func (m *ValidateKeyspaceRequest) SizeVT() (n int) {
if m.PingTablets {
n += 2
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -7394,13 +7526,11 @@ func (m *ValidateSchemaKeyspaceRequest) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
-func (m *ValidateVersionKeyspaceRequest) SizeVT() (n int) {
+func (m *ValidateShardRequest) SizeVT() (n int) {
if m == nil {
return 0
}
@@ -7414,19 +7544,24 @@ func (m *ValidateVersionKeyspaceRequest) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
+ l = len(m.Shard)
+ if l > 0 {
+ n += 1 + l + sov(uint64(l))
+ }
+ if m.PingTablets {
+ n += 2
}
+ n += len(m.unknownFields)
return n
}
-func (m *VTExplainRequest) SizeVT() (n int) {
+func (m *ValidateVersionKeyspaceRequest) SizeVT() (n int) {
if m == nil {
return 0
}
var l int
_ = l
- l = len(m.Cluster)
+ l = len(m.ClusterId)
if l > 0 {
n += 1 + l + sov(uint64(l))
}
@@ -7434,29 +7569,65 @@ func (m *VTExplainRequest) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- l = len(m.Sql)
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *ValidateVersionShardRequest) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.ClusterId)
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
+ l = len(m.Keyspace)
+ if l > 0 {
+ n += 1 + l + sov(uint64(l))
+ }
+ l = len(m.Shard)
+ if l > 0 {
+ n += 1 + l + sov(uint64(l))
}
+ n += len(m.unknownFields)
return n
}
-func (m *VTExplainResponse) SizeVT() (n int) {
+func (m *VTExplainRequest) SizeVT() (n int) {
if m == nil {
return 0
}
var l int
_ = l
- l = len(m.Response)
+ l = len(m.Cluster)
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
+ l = len(m.Keyspace)
+ if l > 0 {
+ n += 1 + l + sov(uint64(l))
}
+ l = len(m.Sql)
+ if l > 0 {
+ n += 1 + l + sov(uint64(l))
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *VTExplainResponse) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Response)
+ if l > 0 {
+ n += 1 + l + sov(uint64(l))
+ }
+ n += len(m.unknownFields)
return n
}
@@ -12484,6 +12655,125 @@ func (m *GetClustersResponse) UnmarshalVT(dAtA []byte) error {
}
return nil
}
+func (m *GetFullStatusRequest) UnmarshalVT(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: GetFullStatusRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: GetFullStatusRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClusterId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLength
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLength
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ClusterId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Alias", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLength
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLength
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Alias == nil {
+ m.Alias = &topodata.TabletAlias{}
+ }
+ if err := m.Alias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skip(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLength
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
func (m *GetGatesRequest) UnmarshalVT(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
@@ -14247,6 +14537,121 @@ func (m *GetTabletsResponse) UnmarshalVT(dAtA []byte) error {
}
return nil
}
+func (m *GetTopologyPathRequest) UnmarshalVT(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: GetTopologyPathRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: GetTopologyPathRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClusterId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLength
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLength
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ClusterId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLength
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLength
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Path = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skip(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLength
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
func (m *GetVSchemaRequest) UnmarshalVT(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
@@ -19438,7 +19843,7 @@ func (m *TabletExternallyReparentedRequest) UnmarshalVT(dAtA []byte) error {
}
return nil
}
-func (m *ValidateKeyspaceRequest) UnmarshalVT(dAtA []byte) error {
+func (m *ValidateRequest) UnmarshalVT(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -19461,10 +19866,10 @@ func (m *ValidateKeyspaceRequest) UnmarshalVT(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: ValidateKeyspaceRequest: wiretype end group for non-group")
+ return fmt.Errorf("proto: ValidateRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: ValidateKeyspaceRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: ValidateRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
@@ -19500,10 +19905,10 @@ func (m *ValidateKeyspaceRequest) UnmarshalVT(dAtA []byte) error {
m.ClusterId = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType)
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PingTablets", wireType)
}
- var stringLen uint64
+ var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflow
@@ -19513,13 +19918,116 @@ func (m *ValidateKeyspaceRequest) UnmarshalVT(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= uint64(b&0x7F) << shift
+ v |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- intStringLen := int(stringLen)
- if intStringLen < 0 {
+ m.PingTablets = bool(v != 0)
+ default:
+ iNdEx = preIndex
+ skippy, err := skip(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLength
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ValidateKeyspaceRequest) UnmarshalVT(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ValidateKeyspaceRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ValidateKeyspaceRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClusterId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLength
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLength
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ClusterId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
return ErrInvalidLength
}
postIndex := iNdEx + intStringLen
@@ -19688,6 +20196,173 @@ func (m *ValidateSchemaKeyspaceRequest) UnmarshalVT(dAtA []byte) error {
}
return nil
}
+func (m *ValidateShardRequest) UnmarshalVT(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ValidateShardRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ValidateShardRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClusterId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLength
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLength
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ClusterId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLength
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLength
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Keyspace = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLength
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLength
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Shard = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PingTablets", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.PingTablets = bool(v != 0)
+ default:
+ iNdEx = preIndex
+ skippy, err := skip(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLength
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
func (m *ValidateVersionKeyspaceRequest) UnmarshalVT(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
@@ -19803,6 +20478,153 @@ func (m *ValidateVersionKeyspaceRequest) UnmarshalVT(dAtA []byte) error {
}
return nil
}
+func (m *ValidateVersionShardRequest) UnmarshalVT(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ValidateVersionShardRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ValidateVersionShardRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClusterId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLength
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLength
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ClusterId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLength
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLength
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Keyspace = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLength
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLength
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Shard = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skip(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLength
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
func (m *VTExplainRequest) UnmarshalVT(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
@@ -20033,6 +20855,7 @@ func (m *VTExplainResponse) UnmarshalVT(dAtA []byte) error {
}
return nil
}
+
func skip(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
diff --git a/go/vt/proto/vtctldata/vtctldata.pb.go b/go/vt/proto/vtctldata/vtctldata.pb.go
index 85617f9a409..c6419bf796b 100644
--- a/go/vt/proto/vtctldata/vtctldata.pb.go
+++ b/go/vt/proto/vtctldata/vtctldata.pb.go
@@ -18,7 +18,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.28.0
+// protoc-gen-go v1.28.1
// protoc v3.21.3
// source: vtctldata.proto
@@ -301,6 +301,10 @@ type MaterializeSettings struct {
// and to the SourceTimeZone in reverse workflows
TargetTimeZone string `protobuf:"bytes,11,opt,name=target_time_zone,json=targetTimeZone,proto3" json:"target_time_zone,omitempty"`
SourceShards []string `protobuf:"bytes,12,rep,name=source_shards,json=sourceShards,proto3" json:"source_shards,omitempty"`
+ // OnDdl specifies the action to be taken when a DDL is encountered.
+ OnDdl string `protobuf:"bytes,13,opt,name=on_ddl,json=onDdl,proto3" json:"on_ddl,omitempty"`
+ // DeferSecondaryKeys specifies if secondary keys should be created in one shot after table copy finishes.
+ DeferSecondaryKeys bool `protobuf:"varint,14,opt,name=defer_secondary_keys,json=deferSecondaryKeys,proto3" json:"defer_secondary_keys,omitempty"`
}
func (x *MaterializeSettings) Reset() {
@@ -419,6 +423,20 @@ func (x *MaterializeSettings) GetSourceShards() []string {
return nil
}
+func (x *MaterializeSettings) GetOnDdl() string {
+ if x != nil {
+ return x.OnDdl
+ }
+ return ""
+}
+
+func (x *MaterializeSettings) GetDeferSecondaryKeys() bool {
+ if x != nil {
+ return x.DeferSecondaryKeys
+ }
+ return false
+}
+
type Keyspace struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -548,6 +566,8 @@ type Workflow struct {
Target *Workflow_ReplicationLocation `protobuf:"bytes,3,opt,name=target,proto3" json:"target,omitempty"`
MaxVReplicationLag int64 `protobuf:"varint,4,opt,name=max_v_replication_lag,json=maxVReplicationLag,proto3" json:"max_v_replication_lag,omitempty"`
ShardStreams map[string]*Workflow_ShardStream `protobuf:"bytes,5,rep,name=shard_streams,json=shardStreams,proto3" json:"shard_streams,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ WorkflowType string `protobuf:"bytes,6,opt,name=workflow_type,json=workflowType,proto3" json:"workflow_type,omitempty"`
+ WorkflowSubType string `protobuf:"bytes,7,opt,name=workflow_sub_type,json=workflowSubType,proto3" json:"workflow_sub_type,omitempty"`
}
func (x *Workflow) Reset() {
@@ -617,6 +637,20 @@ func (x *Workflow) GetShardStreams() map[string]*Workflow_ShardStream {
return nil
}
+func (x *Workflow) GetWorkflowType() string {
+ if x != nil {
+ return x.WorkflowType
+ }
+ return ""
+}
+
+func (x *Workflow) GetWorkflowSubType() string {
+ if x != nil {
+ return x.WorkflowSubType
+ }
+ return ""
+}
+
type AddCellInfoRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -1335,6 +1369,9 @@ type BackupRequest struct {
// Concurrency specifies the number of compression/checksum jobs to run
// simultaneously.
Concurrency uint64 `protobuf:"varint,3,opt,name=concurrency,proto3" json:"concurrency,omitempty"`
+ // IncrementalFromPos indicates a position of a previous backup. When this value is non-empty
+ // then the backup becomes incremental and applies as of given position.
+ IncrementalFromPos string `protobuf:"bytes,4,opt,name=incremental_from_pos,json=incrementalFromPos,proto3" json:"incremental_from_pos,omitempty"`
}
func (x *BackupRequest) Reset() {
@@ -1390,6 +1427,13 @@ func (x *BackupRequest) GetConcurrency() uint64 {
return 0
}
+func (x *BackupRequest) GetIncrementalFromPos() string {
+ if x != nil {
+ return x.IncrementalFromPos
+ }
+ return ""
+}
+
type BackupResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -4582,6 +4626,154 @@ func (x *GetSrvKeyspacesResponse) GetSrvKeyspaces() map[string]*topodata.SrvKeys
return nil
}
+type UpdateThrottlerConfigRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"`
+ // Enable instructs to enable the throttler
+ Enable bool `protobuf:"varint,2,opt,name=enable,proto3" json:"enable,omitempty"`
+ // Disable instructs to disable the throttler
+ Disable bool `protobuf:"varint,3,opt,name=disable,proto3" json:"disable,omitempty"`
+ // Threshold for throttler (with no custom query, ie using default query, only positive values are considered)
+ Threshold float64 `protobuf:"fixed64,4,opt,name=threshold,proto3" json:"threshold,omitempty"`
+ // CustomQuery replaces the default replication lag query
+ CustomQuery string `protobuf:"bytes,5,opt,name=custom_query,json=customQuery,proto3" json:"custom_query,omitempty"`
+ // CustomQuerySet indicates that the value of CustomQuery has changed
+ CustomQuerySet bool `protobuf:"varint,6,opt,name=custom_query_set,json=customQuerySet,proto3" json:"custom_query_set,omitempty"`
+ // CheckAsCheckSelf instructs the throttler to respond to /check requests by checking the tablet's own health
+ CheckAsCheckSelf bool `protobuf:"varint,7,opt,name=check_as_check_self,json=checkAsCheckSelf,proto3" json:"check_as_check_self,omitempty"`
+ // CheckAsCheckShard instructs the throttler to respond to /check requests by checking the shard's health (this is the default behavior)
+ CheckAsCheckShard bool `protobuf:"varint,8,opt,name=check_as_check_shard,json=checkAsCheckShard,proto3" json:"check_as_check_shard,omitempty"`
+}
+
+func (x *UpdateThrottlerConfigRequest) Reset() {
+ *x = UpdateThrottlerConfigRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_vtctldata_proto_msgTypes[78]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *UpdateThrottlerConfigRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UpdateThrottlerConfigRequest) ProtoMessage() {}
+
+func (x *UpdateThrottlerConfigRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_vtctldata_proto_msgTypes[78]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UpdateThrottlerConfigRequest.ProtoReflect.Descriptor instead.
+func (*UpdateThrottlerConfigRequest) Descriptor() ([]byte, []int) {
+ return file_vtctldata_proto_rawDescGZIP(), []int{78}
+}
+
+func (x *UpdateThrottlerConfigRequest) GetKeyspace() string {
+ if x != nil {
+ return x.Keyspace
+ }
+ return ""
+}
+
+func (x *UpdateThrottlerConfigRequest) GetEnable() bool {
+ if x != nil {
+ return x.Enable
+ }
+ return false
+}
+
+func (x *UpdateThrottlerConfigRequest) GetDisable() bool {
+ if x != nil {
+ return x.Disable
+ }
+ return false
+}
+
+func (x *UpdateThrottlerConfigRequest) GetThreshold() float64 {
+ if x != nil {
+ return x.Threshold
+ }
+ return 0
+}
+
+func (x *UpdateThrottlerConfigRequest) GetCustomQuery() string {
+ if x != nil {
+ return x.CustomQuery
+ }
+ return ""
+}
+
+func (x *UpdateThrottlerConfigRequest) GetCustomQuerySet() bool {
+ if x != nil {
+ return x.CustomQuerySet
+ }
+ return false
+}
+
+func (x *UpdateThrottlerConfigRequest) GetCheckAsCheckSelf() bool {
+ if x != nil {
+ return x.CheckAsCheckSelf
+ }
+ return false
+}
+
+func (x *UpdateThrottlerConfigRequest) GetCheckAsCheckShard() bool {
+ if x != nil {
+ return x.CheckAsCheckShard
+ }
+ return false
+}
+
+type UpdateThrottlerConfigResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *UpdateThrottlerConfigResponse) Reset() {
+ *x = UpdateThrottlerConfigResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_vtctldata_proto_msgTypes[79]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *UpdateThrottlerConfigResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UpdateThrottlerConfigResponse) ProtoMessage() {}
+
+func (x *UpdateThrottlerConfigResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_vtctldata_proto_msgTypes[79]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UpdateThrottlerConfigResponse.ProtoReflect.Descriptor instead.
+func (*UpdateThrottlerConfigResponse) Descriptor() ([]byte, []int) {
+ return file_vtctldata_proto_rawDescGZIP(), []int{79}
+}
+
type GetSrvVSchemaRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -4593,7 +4785,7 @@ type GetSrvVSchemaRequest struct {
func (x *GetSrvVSchemaRequest) Reset() {
*x = GetSrvVSchemaRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[78]
+ mi := &file_vtctldata_proto_msgTypes[80]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4606,7 +4798,7 @@ func (x *GetSrvVSchemaRequest) String() string {
func (*GetSrvVSchemaRequest) ProtoMessage() {}
func (x *GetSrvVSchemaRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[78]
+ mi := &file_vtctldata_proto_msgTypes[80]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4619,7 +4811,7 @@ func (x *GetSrvVSchemaRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetSrvVSchemaRequest.ProtoReflect.Descriptor instead.
func (*GetSrvVSchemaRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{78}
+ return file_vtctldata_proto_rawDescGZIP(), []int{80}
}
func (x *GetSrvVSchemaRequest) GetCell() string {
@@ -4640,7 +4832,7 @@ type GetSrvVSchemaResponse struct {
func (x *GetSrvVSchemaResponse) Reset() {
*x = GetSrvVSchemaResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[79]
+ mi := &file_vtctldata_proto_msgTypes[81]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4653,7 +4845,7 @@ func (x *GetSrvVSchemaResponse) String() string {
func (*GetSrvVSchemaResponse) ProtoMessage() {}
func (x *GetSrvVSchemaResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[79]
+ mi := &file_vtctldata_proto_msgTypes[81]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4666,7 +4858,7 @@ func (x *GetSrvVSchemaResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetSrvVSchemaResponse.ProtoReflect.Descriptor instead.
func (*GetSrvVSchemaResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{79}
+ return file_vtctldata_proto_rawDescGZIP(), []int{81}
}
func (x *GetSrvVSchemaResponse) GetSrvVSchema() *vschema.SrvVSchema {
@@ -4687,7 +4879,7 @@ type GetSrvVSchemasRequest struct {
func (x *GetSrvVSchemasRequest) Reset() {
*x = GetSrvVSchemasRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[80]
+ mi := &file_vtctldata_proto_msgTypes[82]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4700,7 +4892,7 @@ func (x *GetSrvVSchemasRequest) String() string {
func (*GetSrvVSchemasRequest) ProtoMessage() {}
func (x *GetSrvVSchemasRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[80]
+ mi := &file_vtctldata_proto_msgTypes[82]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4713,7 +4905,7 @@ func (x *GetSrvVSchemasRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetSrvVSchemasRequest.ProtoReflect.Descriptor instead.
func (*GetSrvVSchemasRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{80}
+ return file_vtctldata_proto_rawDescGZIP(), []int{82}
}
func (x *GetSrvVSchemasRequest) GetCells() []string {
@@ -4735,7 +4927,7 @@ type GetSrvVSchemasResponse struct {
func (x *GetSrvVSchemasResponse) Reset() {
*x = GetSrvVSchemasResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[81]
+ mi := &file_vtctldata_proto_msgTypes[83]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4748,7 +4940,7 @@ func (x *GetSrvVSchemasResponse) String() string {
func (*GetSrvVSchemasResponse) ProtoMessage() {}
func (x *GetSrvVSchemasResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[81]
+ mi := &file_vtctldata_proto_msgTypes[83]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4761,7 +4953,7 @@ func (x *GetSrvVSchemasResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetSrvVSchemasResponse.ProtoReflect.Descriptor instead.
func (*GetSrvVSchemasResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{81}
+ return file_vtctldata_proto_rawDescGZIP(), []int{83}
}
func (x *GetSrvVSchemasResponse) GetSrvVSchemas() map[string]*vschema.SrvVSchema {
@@ -4782,7 +4974,7 @@ type GetTabletRequest struct {
func (x *GetTabletRequest) Reset() {
*x = GetTabletRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[82]
+ mi := &file_vtctldata_proto_msgTypes[84]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4795,7 +4987,7 @@ func (x *GetTabletRequest) String() string {
func (*GetTabletRequest) ProtoMessage() {}
func (x *GetTabletRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[82]
+ mi := &file_vtctldata_proto_msgTypes[84]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4808,7 +5000,7 @@ func (x *GetTabletRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetTabletRequest.ProtoReflect.Descriptor instead.
func (*GetTabletRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{82}
+ return file_vtctldata_proto_rawDescGZIP(), []int{84}
}
func (x *GetTabletRequest) GetTabletAlias() *topodata.TabletAlias {
@@ -4829,7 +5021,7 @@ type GetTabletResponse struct {
func (x *GetTabletResponse) Reset() {
*x = GetTabletResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[83]
+ mi := &file_vtctldata_proto_msgTypes[85]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4842,7 +5034,7 @@ func (x *GetTabletResponse) String() string {
func (*GetTabletResponse) ProtoMessage() {}
func (x *GetTabletResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[83]
+ mi := &file_vtctldata_proto_msgTypes[85]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4855,7 +5047,7 @@ func (x *GetTabletResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetTabletResponse.ProtoReflect.Descriptor instead.
func (*GetTabletResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{83}
+ return file_vtctldata_proto_rawDescGZIP(), []int{85}
}
func (x *GetTabletResponse) GetTablet() *topodata.Tablet {
@@ -4897,7 +5089,7 @@ type GetTabletsRequest struct {
func (x *GetTabletsRequest) Reset() {
*x = GetTabletsRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[84]
+ mi := &file_vtctldata_proto_msgTypes[86]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4910,7 +5102,7 @@ func (x *GetTabletsRequest) String() string {
func (*GetTabletsRequest) ProtoMessage() {}
func (x *GetTabletsRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[84]
+ mi := &file_vtctldata_proto_msgTypes[86]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4923,7 +5115,7 @@ func (x *GetTabletsRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetTabletsRequest.ProtoReflect.Descriptor instead.
func (*GetTabletsRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{84}
+ return file_vtctldata_proto_rawDescGZIP(), []int{86}
}
func (x *GetTabletsRequest) GetKeyspace() string {
@@ -4979,7 +5171,7 @@ type GetTabletsResponse struct {
func (x *GetTabletsResponse) Reset() {
*x = GetTabletsResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[85]
+ mi := &file_vtctldata_proto_msgTypes[87]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4992,7 +5184,7 @@ func (x *GetTabletsResponse) String() string {
func (*GetTabletsResponse) ProtoMessage() {}
func (x *GetTabletsResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[85]
+ mi := &file_vtctldata_proto_msgTypes[87]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5005,7 +5197,7 @@ func (x *GetTabletsResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetTabletsResponse.ProtoReflect.Descriptor instead.
func (*GetTabletsResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{85}
+ return file_vtctldata_proto_rawDescGZIP(), []int{87}
}
func (x *GetTabletsResponse) GetTablets() []*topodata.Tablet {
@@ -5015,31 +5207,31 @@ func (x *GetTabletsResponse) GetTablets() []*topodata.Tablet {
return nil
}
-type GetVSchemaRequest struct {
+type GetTopologyPathRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"`
+ Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
}
-func (x *GetVSchemaRequest) Reset() {
- *x = GetVSchemaRequest{}
+func (x *GetTopologyPathRequest) Reset() {
+ *x = GetTopologyPathRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[86]
+ mi := &file_vtctldata_proto_msgTypes[88]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
-func (x *GetVSchemaRequest) String() string {
+func (x *GetTopologyPathRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*GetVSchemaRequest) ProtoMessage() {}
+func (*GetTopologyPathRequest) ProtoMessage() {}
-func (x *GetVSchemaRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[86]
+func (x *GetTopologyPathRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_vtctldata_proto_msgTypes[88]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5050,43 +5242,43 @@ func (x *GetVSchemaRequest) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use GetVSchemaRequest.ProtoReflect.Descriptor instead.
-func (*GetVSchemaRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{86}
+// Deprecated: Use GetTopologyPathRequest.ProtoReflect.Descriptor instead.
+func (*GetTopologyPathRequest) Descriptor() ([]byte, []int) {
+ return file_vtctldata_proto_rawDescGZIP(), []int{88}
}
-func (x *GetVSchemaRequest) GetKeyspace() string {
+func (x *GetTopologyPathRequest) GetPath() string {
if x != nil {
- return x.Keyspace
+ return x.Path
}
return ""
}
-type GetVersionRequest struct {
+type GetTopologyPathResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- TabletAlias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"`
+ Cell *TopologyCell `protobuf:"bytes,1,opt,name=cell,proto3" json:"cell,omitempty"`
}
-func (x *GetVersionRequest) Reset() {
- *x = GetVersionRequest{}
+func (x *GetTopologyPathResponse) Reset() {
+ *x = GetTopologyPathResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[87]
+ mi := &file_vtctldata_proto_msgTypes[89]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
-func (x *GetVersionRequest) String() string {
+func (x *GetTopologyPathResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*GetVersionRequest) ProtoMessage() {}
+func (*GetTopologyPathResponse) ProtoMessage() {}
-func (x *GetVersionRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[87]
+func (x *GetTopologyPathResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_vtctldata_proto_msgTypes[89]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5097,43 +5289,48 @@ func (x *GetVersionRequest) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use GetVersionRequest.ProtoReflect.Descriptor instead.
-func (*GetVersionRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{87}
+// Deprecated: Use GetTopologyPathResponse.ProtoReflect.Descriptor instead.
+func (*GetTopologyPathResponse) Descriptor() ([]byte, []int) {
+ return file_vtctldata_proto_rawDescGZIP(), []int{89}
}
-func (x *GetVersionRequest) GetTabletAlias() *topodata.TabletAlias {
+func (x *GetTopologyPathResponse) GetCell() *TopologyCell {
if x != nil {
- return x.TabletAlias
+ return x.Cell
}
return nil
}
-type GetVersionResponse struct {
+type TopologyCell struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"`
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"`
+ // Data is the file contents of the cell located at path.
+ // It is only populated if the cell is a terminal node.
+ Data string `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"`
+ Children []string `protobuf:"bytes,4,rep,name=children,proto3" json:"children,omitempty"`
}
-func (x *GetVersionResponse) Reset() {
- *x = GetVersionResponse{}
+func (x *TopologyCell) Reset() {
+ *x = TopologyCell{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[88]
+ mi := &file_vtctldata_proto_msgTypes[90]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
-func (x *GetVersionResponse) String() string {
+func (x *TopologyCell) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*GetVersionResponse) ProtoMessage() {}
+func (*TopologyCell) ProtoMessage() {}
-func (x *GetVersionResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[88]
+func (x *TopologyCell) ProtoReflect() protoreflect.Message {
+ mi := &file_vtctldata_proto_msgTypes[90]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5144,43 +5341,205 @@ func (x *GetVersionResponse) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use GetVersionResponse.ProtoReflect.Descriptor instead.
-func (*GetVersionResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{88}
+// Deprecated: Use TopologyCell.ProtoReflect.Descriptor instead.
+func (*TopologyCell) Descriptor() ([]byte, []int) {
+ return file_vtctldata_proto_rawDescGZIP(), []int{90}
}
-func (x *GetVersionResponse) GetVersion() string {
+func (x *TopologyCell) GetName() string {
if x != nil {
- return x.Version
+ return x.Name
}
return ""
}
-type GetVSchemaResponse struct {
+func (x *TopologyCell) GetPath() string {
+ if x != nil {
+ return x.Path
+ }
+ return ""
+}
+
+func (x *TopologyCell) GetData() string {
+ if x != nil {
+ return x.Data
+ }
+ return ""
+}
+
+func (x *TopologyCell) GetChildren() []string {
+ if x != nil {
+ return x.Children
+ }
+ return nil
+}
+
+type GetVSchemaRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- VSchema *vschema.Keyspace `protobuf:"bytes,1,opt,name=v_schema,json=vSchema,proto3" json:"v_schema,omitempty"`
+ Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"`
}
-func (x *GetVSchemaResponse) Reset() {
- *x = GetVSchemaResponse{}
+func (x *GetVSchemaRequest) Reset() {
+ *x = GetVSchemaRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[89]
+ mi := &file_vtctldata_proto_msgTypes[91]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
-func (x *GetVSchemaResponse) String() string {
+func (x *GetVSchemaRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*GetVSchemaResponse) ProtoMessage() {}
+func (*GetVSchemaRequest) ProtoMessage() {}
+
+func (x *GetVSchemaRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_vtctldata_proto_msgTypes[91]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetVSchemaRequest.ProtoReflect.Descriptor instead.
+func (*GetVSchemaRequest) Descriptor() ([]byte, []int) {
+ return file_vtctldata_proto_rawDescGZIP(), []int{91}
+}
+
+func (x *GetVSchemaRequest) GetKeyspace() string {
+ if x != nil {
+ return x.Keyspace
+ }
+ return ""
+}
+
+type GetVersionRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ TabletAlias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"`
+}
+
+func (x *GetVersionRequest) Reset() {
+ *x = GetVersionRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_vtctldata_proto_msgTypes[92]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetVersionRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetVersionRequest) ProtoMessage() {}
+
+func (x *GetVersionRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_vtctldata_proto_msgTypes[92]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetVersionRequest.ProtoReflect.Descriptor instead.
+func (*GetVersionRequest) Descriptor() ([]byte, []int) {
+ return file_vtctldata_proto_rawDescGZIP(), []int{92}
+}
+
+func (x *GetVersionRequest) GetTabletAlias() *topodata.TabletAlias {
+ if x != nil {
+ return x.TabletAlias
+ }
+ return nil
+}
+
+type GetVersionResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"`
+}
+
+func (x *GetVersionResponse) Reset() {
+ *x = GetVersionResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_vtctldata_proto_msgTypes[93]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetVersionResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetVersionResponse) ProtoMessage() {}
+
+func (x *GetVersionResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_vtctldata_proto_msgTypes[93]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetVersionResponse.ProtoReflect.Descriptor instead.
+func (*GetVersionResponse) Descriptor() ([]byte, []int) {
+ return file_vtctldata_proto_rawDescGZIP(), []int{93}
+}
+
+func (x *GetVersionResponse) GetVersion() string {
+ if x != nil {
+ return x.Version
+ }
+ return ""
+}
+
+type GetVSchemaResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ VSchema *vschema.Keyspace `protobuf:"bytes,1,opt,name=v_schema,json=vSchema,proto3" json:"v_schema,omitempty"`
+}
+
+func (x *GetVSchemaResponse) Reset() {
+ *x = GetVSchemaResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_vtctldata_proto_msgTypes[94]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetVSchemaResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetVSchemaResponse) ProtoMessage() {}
func (x *GetVSchemaResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[89]
+ mi := &file_vtctldata_proto_msgTypes[94]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5193,7 +5552,7 @@ func (x *GetVSchemaResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetVSchemaResponse.ProtoReflect.Descriptor instead.
func (*GetVSchemaResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{89}
+ return file_vtctldata_proto_rawDescGZIP(), []int{94}
}
func (x *GetVSchemaResponse) GetVSchema() *vschema.Keyspace {
@@ -5215,7 +5574,7 @@ type GetWorkflowsRequest struct {
func (x *GetWorkflowsRequest) Reset() {
*x = GetWorkflowsRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[90]
+ mi := &file_vtctldata_proto_msgTypes[95]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5228,7 +5587,7 @@ func (x *GetWorkflowsRequest) String() string {
func (*GetWorkflowsRequest) ProtoMessage() {}
func (x *GetWorkflowsRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[90]
+ mi := &file_vtctldata_proto_msgTypes[95]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5241,7 +5600,7 @@ func (x *GetWorkflowsRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetWorkflowsRequest.ProtoReflect.Descriptor instead.
func (*GetWorkflowsRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{90}
+ return file_vtctldata_proto_rawDescGZIP(), []int{95}
}
func (x *GetWorkflowsRequest) GetKeyspace() string {
@@ -5269,7 +5628,7 @@ type GetWorkflowsResponse struct {
func (x *GetWorkflowsResponse) Reset() {
*x = GetWorkflowsResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[91]
+ mi := &file_vtctldata_proto_msgTypes[96]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5282,7 +5641,7 @@ func (x *GetWorkflowsResponse) String() string {
func (*GetWorkflowsResponse) ProtoMessage() {}
func (x *GetWorkflowsResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[91]
+ mi := &file_vtctldata_proto_msgTypes[96]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5295,7 +5654,7 @@ func (x *GetWorkflowsResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetWorkflowsResponse.ProtoReflect.Descriptor instead.
func (*GetWorkflowsResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{91}
+ return file_vtctldata_proto_rawDescGZIP(), []int{96}
}
func (x *GetWorkflowsResponse) GetWorkflows() []*Workflow {
@@ -5320,7 +5679,7 @@ type InitShardPrimaryRequest struct {
func (x *InitShardPrimaryRequest) Reset() {
*x = InitShardPrimaryRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[92]
+ mi := &file_vtctldata_proto_msgTypes[97]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5333,7 +5692,7 @@ func (x *InitShardPrimaryRequest) String() string {
func (*InitShardPrimaryRequest) ProtoMessage() {}
func (x *InitShardPrimaryRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[92]
+ mi := &file_vtctldata_proto_msgTypes[97]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5346,7 +5705,7 @@ func (x *InitShardPrimaryRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use InitShardPrimaryRequest.ProtoReflect.Descriptor instead.
func (*InitShardPrimaryRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{92}
+ return file_vtctldata_proto_rawDescGZIP(), []int{97}
}
func (x *InitShardPrimaryRequest) GetKeyspace() string {
@@ -5395,7 +5754,7 @@ type InitShardPrimaryResponse struct {
func (x *InitShardPrimaryResponse) Reset() {
*x = InitShardPrimaryResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[93]
+ mi := &file_vtctldata_proto_msgTypes[98]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5408,7 +5767,7 @@ func (x *InitShardPrimaryResponse) String() string {
func (*InitShardPrimaryResponse) ProtoMessage() {}
func (x *InitShardPrimaryResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[93]
+ mi := &file_vtctldata_proto_msgTypes[98]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5421,7 +5780,7 @@ func (x *InitShardPrimaryResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use InitShardPrimaryResponse.ProtoReflect.Descriptor instead.
func (*InitShardPrimaryResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{93}
+ return file_vtctldata_proto_rawDescGZIP(), []int{98}
}
func (x *InitShardPrimaryResponse) GetEvents() []*logutil.Event {
@@ -5442,7 +5801,7 @@ type PingTabletRequest struct {
func (x *PingTabletRequest) Reset() {
*x = PingTabletRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[94]
+ mi := &file_vtctldata_proto_msgTypes[99]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5455,7 +5814,7 @@ func (x *PingTabletRequest) String() string {
func (*PingTabletRequest) ProtoMessage() {}
func (x *PingTabletRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[94]
+ mi := &file_vtctldata_proto_msgTypes[99]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5468,7 +5827,7 @@ func (x *PingTabletRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use PingTabletRequest.ProtoReflect.Descriptor instead.
func (*PingTabletRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{94}
+ return file_vtctldata_proto_rawDescGZIP(), []int{99}
}
func (x *PingTabletRequest) GetTabletAlias() *topodata.TabletAlias {
@@ -5487,7 +5846,7 @@ type PingTabletResponse struct {
func (x *PingTabletResponse) Reset() {
*x = PingTabletResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[95]
+ mi := &file_vtctldata_proto_msgTypes[100]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5500,7 +5859,7 @@ func (x *PingTabletResponse) String() string {
func (*PingTabletResponse) ProtoMessage() {}
func (x *PingTabletResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[95]
+ mi := &file_vtctldata_proto_msgTypes[100]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5513,7 +5872,7 @@ func (x *PingTabletResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use PingTabletResponse.ProtoReflect.Descriptor instead.
func (*PingTabletResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{95}
+ return file_vtctldata_proto_rawDescGZIP(), []int{100}
}
type PlannedReparentShardRequest struct {
@@ -5548,7 +5907,7 @@ type PlannedReparentShardRequest struct {
func (x *PlannedReparentShardRequest) Reset() {
*x = PlannedReparentShardRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[96]
+ mi := &file_vtctldata_proto_msgTypes[101]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5561,7 +5920,7 @@ func (x *PlannedReparentShardRequest) String() string {
func (*PlannedReparentShardRequest) ProtoMessage() {}
func (x *PlannedReparentShardRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[96]
+ mi := &file_vtctldata_proto_msgTypes[101]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5574,7 +5933,7 @@ func (x *PlannedReparentShardRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use PlannedReparentShardRequest.ProtoReflect.Descriptor instead.
func (*PlannedReparentShardRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{96}
+ return file_vtctldata_proto_rawDescGZIP(), []int{101}
}
func (x *PlannedReparentShardRequest) GetKeyspace() string {
@@ -5632,7 +5991,7 @@ type PlannedReparentShardResponse struct {
func (x *PlannedReparentShardResponse) Reset() {
*x = PlannedReparentShardResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[97]
+ mi := &file_vtctldata_proto_msgTypes[102]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5645,7 +6004,7 @@ func (x *PlannedReparentShardResponse) String() string {
func (*PlannedReparentShardResponse) ProtoMessage() {}
func (x *PlannedReparentShardResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[97]
+ mi := &file_vtctldata_proto_msgTypes[102]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5658,7 +6017,7 @@ func (x *PlannedReparentShardResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use PlannedReparentShardResponse.ProtoReflect.Descriptor instead.
func (*PlannedReparentShardResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{97}
+ return file_vtctldata_proto_rawDescGZIP(), []int{102}
}
func (x *PlannedReparentShardResponse) GetKeyspace() string {
@@ -5704,7 +6063,7 @@ type RebuildKeyspaceGraphRequest struct {
func (x *RebuildKeyspaceGraphRequest) Reset() {
*x = RebuildKeyspaceGraphRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[98]
+ mi := &file_vtctldata_proto_msgTypes[103]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5717,7 +6076,7 @@ func (x *RebuildKeyspaceGraphRequest) String() string {
func (*RebuildKeyspaceGraphRequest) ProtoMessage() {}
func (x *RebuildKeyspaceGraphRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[98]
+ mi := &file_vtctldata_proto_msgTypes[103]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5730,7 +6089,7 @@ func (x *RebuildKeyspaceGraphRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use RebuildKeyspaceGraphRequest.ProtoReflect.Descriptor instead.
func (*RebuildKeyspaceGraphRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{98}
+ return file_vtctldata_proto_rawDescGZIP(), []int{103}
}
func (x *RebuildKeyspaceGraphRequest) GetKeyspace() string {
@@ -5763,7 +6122,7 @@ type RebuildKeyspaceGraphResponse struct {
func (x *RebuildKeyspaceGraphResponse) Reset() {
*x = RebuildKeyspaceGraphResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[99]
+ mi := &file_vtctldata_proto_msgTypes[104]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5776,7 +6135,7 @@ func (x *RebuildKeyspaceGraphResponse) String() string {
func (*RebuildKeyspaceGraphResponse) ProtoMessage() {}
func (x *RebuildKeyspaceGraphResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[99]
+ mi := &file_vtctldata_proto_msgTypes[104]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5789,7 +6148,7 @@ func (x *RebuildKeyspaceGraphResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use RebuildKeyspaceGraphResponse.ProtoReflect.Descriptor instead.
func (*RebuildKeyspaceGraphResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{99}
+ return file_vtctldata_proto_rawDescGZIP(), []int{104}
}
type RebuildVSchemaGraphRequest struct {
@@ -5805,7 +6164,7 @@ type RebuildVSchemaGraphRequest struct {
func (x *RebuildVSchemaGraphRequest) Reset() {
*x = RebuildVSchemaGraphRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[100]
+ mi := &file_vtctldata_proto_msgTypes[105]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5818,7 +6177,7 @@ func (x *RebuildVSchemaGraphRequest) String() string {
func (*RebuildVSchemaGraphRequest) ProtoMessage() {}
func (x *RebuildVSchemaGraphRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[100]
+ mi := &file_vtctldata_proto_msgTypes[105]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5831,7 +6190,7 @@ func (x *RebuildVSchemaGraphRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use RebuildVSchemaGraphRequest.ProtoReflect.Descriptor instead.
func (*RebuildVSchemaGraphRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{100}
+ return file_vtctldata_proto_rawDescGZIP(), []int{105}
}
func (x *RebuildVSchemaGraphRequest) GetCells() []string {
@@ -5850,7 +6209,7 @@ type RebuildVSchemaGraphResponse struct {
func (x *RebuildVSchemaGraphResponse) Reset() {
*x = RebuildVSchemaGraphResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[101]
+ mi := &file_vtctldata_proto_msgTypes[106]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5863,7 +6222,7 @@ func (x *RebuildVSchemaGraphResponse) String() string {
func (*RebuildVSchemaGraphResponse) ProtoMessage() {}
func (x *RebuildVSchemaGraphResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[101]
+ mi := &file_vtctldata_proto_msgTypes[106]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5876,7 +6235,7 @@ func (x *RebuildVSchemaGraphResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use RebuildVSchemaGraphResponse.ProtoReflect.Descriptor instead.
func (*RebuildVSchemaGraphResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{101}
+ return file_vtctldata_proto_rawDescGZIP(), []int{106}
}
type RefreshStateRequest struct {
@@ -5890,7 +6249,7 @@ type RefreshStateRequest struct {
func (x *RefreshStateRequest) Reset() {
*x = RefreshStateRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[102]
+ mi := &file_vtctldata_proto_msgTypes[107]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5903,7 +6262,7 @@ func (x *RefreshStateRequest) String() string {
func (*RefreshStateRequest) ProtoMessage() {}
func (x *RefreshStateRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[102]
+ mi := &file_vtctldata_proto_msgTypes[107]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5916,7 +6275,7 @@ func (x *RefreshStateRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use RefreshStateRequest.ProtoReflect.Descriptor instead.
func (*RefreshStateRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{102}
+ return file_vtctldata_proto_rawDescGZIP(), []int{107}
}
func (x *RefreshStateRequest) GetTabletAlias() *topodata.TabletAlias {
@@ -5935,7 +6294,7 @@ type RefreshStateResponse struct {
func (x *RefreshStateResponse) Reset() {
*x = RefreshStateResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[103]
+ mi := &file_vtctldata_proto_msgTypes[108]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5948,7 +6307,7 @@ func (x *RefreshStateResponse) String() string {
func (*RefreshStateResponse) ProtoMessage() {}
func (x *RefreshStateResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[103]
+ mi := &file_vtctldata_proto_msgTypes[108]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5961,7 +6320,7 @@ func (x *RefreshStateResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use RefreshStateResponse.ProtoReflect.Descriptor instead.
func (*RefreshStateResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{103}
+ return file_vtctldata_proto_rawDescGZIP(), []int{108}
}
type RefreshStateByShardRequest struct {
@@ -5977,7 +6336,7 @@ type RefreshStateByShardRequest struct {
func (x *RefreshStateByShardRequest) Reset() {
*x = RefreshStateByShardRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[104]
+ mi := &file_vtctldata_proto_msgTypes[109]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5990,7 +6349,7 @@ func (x *RefreshStateByShardRequest) String() string {
func (*RefreshStateByShardRequest) ProtoMessage() {}
func (x *RefreshStateByShardRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[104]
+ mi := &file_vtctldata_proto_msgTypes[109]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -6003,7 +6362,7 @@ func (x *RefreshStateByShardRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use RefreshStateByShardRequest.ProtoReflect.Descriptor instead.
func (*RefreshStateByShardRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{104}
+ return file_vtctldata_proto_rawDescGZIP(), []int{109}
}
func (x *RefreshStateByShardRequest) GetKeyspace() string {
@@ -6040,7 +6399,7 @@ type RefreshStateByShardResponse struct {
func (x *RefreshStateByShardResponse) Reset() {
*x = RefreshStateByShardResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[105]
+ mi := &file_vtctldata_proto_msgTypes[110]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -6053,7 +6412,7 @@ func (x *RefreshStateByShardResponse) String() string {
func (*RefreshStateByShardResponse) ProtoMessage() {}
func (x *RefreshStateByShardResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[105]
+ mi := &file_vtctldata_proto_msgTypes[110]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -6066,7 +6425,7 @@ func (x *RefreshStateByShardResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use RefreshStateByShardResponse.ProtoReflect.Descriptor instead.
func (*RefreshStateByShardResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{105}
+ return file_vtctldata_proto_rawDescGZIP(), []int{110}
}
func (x *RefreshStateByShardResponse) GetIsPartialRefresh() bool {
@@ -6094,7 +6453,7 @@ type ReloadSchemaRequest struct {
func (x *ReloadSchemaRequest) Reset() {
*x = ReloadSchemaRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[106]
+ mi := &file_vtctldata_proto_msgTypes[111]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -6107,7 +6466,7 @@ func (x *ReloadSchemaRequest) String() string {
func (*ReloadSchemaRequest) ProtoMessage() {}
func (x *ReloadSchemaRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[106]
+ mi := &file_vtctldata_proto_msgTypes[111]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -6120,7 +6479,7 @@ func (x *ReloadSchemaRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use ReloadSchemaRequest.ProtoReflect.Descriptor instead.
func (*ReloadSchemaRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{106}
+ return file_vtctldata_proto_rawDescGZIP(), []int{111}
}
func (x *ReloadSchemaRequest) GetTabletAlias() *topodata.TabletAlias {
@@ -6139,7 +6498,7 @@ type ReloadSchemaResponse struct {
func (x *ReloadSchemaResponse) Reset() {
*x = ReloadSchemaResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[107]
+ mi := &file_vtctldata_proto_msgTypes[112]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -6152,7 +6511,7 @@ func (x *ReloadSchemaResponse) String() string {
func (*ReloadSchemaResponse) ProtoMessage() {}
func (x *ReloadSchemaResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[107]
+ mi := &file_vtctldata_proto_msgTypes[112]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -6165,7 +6524,7 @@ func (x *ReloadSchemaResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use ReloadSchemaResponse.ProtoReflect.Descriptor instead.
func (*ReloadSchemaResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{107}
+ return file_vtctldata_proto_rawDescGZIP(), []int{112}
}
type ReloadSchemaKeyspaceRequest struct {
@@ -6185,7 +6544,7 @@ type ReloadSchemaKeyspaceRequest struct {
func (x *ReloadSchemaKeyspaceRequest) Reset() {
*x = ReloadSchemaKeyspaceRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[108]
+ mi := &file_vtctldata_proto_msgTypes[113]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -6198,7 +6557,7 @@ func (x *ReloadSchemaKeyspaceRequest) String() string {
func (*ReloadSchemaKeyspaceRequest) ProtoMessage() {}
func (x *ReloadSchemaKeyspaceRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[108]
+ mi := &file_vtctldata_proto_msgTypes[113]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -6211,7 +6570,7 @@ func (x *ReloadSchemaKeyspaceRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use ReloadSchemaKeyspaceRequest.ProtoReflect.Descriptor instead.
func (*ReloadSchemaKeyspaceRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{108}
+ return file_vtctldata_proto_rawDescGZIP(), []int{113}
}
func (x *ReloadSchemaKeyspaceRequest) GetKeyspace() string {
@@ -6253,7 +6612,7 @@ type ReloadSchemaKeyspaceResponse struct {
func (x *ReloadSchemaKeyspaceResponse) Reset() {
*x = ReloadSchemaKeyspaceResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[109]
+ mi := &file_vtctldata_proto_msgTypes[114]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -6266,7 +6625,7 @@ func (x *ReloadSchemaKeyspaceResponse) String() string {
func (*ReloadSchemaKeyspaceResponse) ProtoMessage() {}
func (x *ReloadSchemaKeyspaceResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[109]
+ mi := &file_vtctldata_proto_msgTypes[114]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -6279,7 +6638,7 @@ func (x *ReloadSchemaKeyspaceResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use ReloadSchemaKeyspaceResponse.ProtoReflect.Descriptor instead.
func (*ReloadSchemaKeyspaceResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{109}
+ return file_vtctldata_proto_rawDescGZIP(), []int{114}
}
func (x *ReloadSchemaKeyspaceResponse) GetEvents() []*logutil.Event {
@@ -6305,7 +6664,7 @@ type ReloadSchemaShardRequest struct {
func (x *ReloadSchemaShardRequest) Reset() {
*x = ReloadSchemaShardRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[110]
+ mi := &file_vtctldata_proto_msgTypes[115]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -6318,7 +6677,7 @@ func (x *ReloadSchemaShardRequest) String() string {
func (*ReloadSchemaShardRequest) ProtoMessage() {}
func (x *ReloadSchemaShardRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[110]
+ mi := &file_vtctldata_proto_msgTypes[115]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -6331,7 +6690,7 @@ func (x *ReloadSchemaShardRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use ReloadSchemaShardRequest.ProtoReflect.Descriptor instead.
func (*ReloadSchemaShardRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{110}
+ return file_vtctldata_proto_rawDescGZIP(), []int{115}
}
func (x *ReloadSchemaShardRequest) GetKeyspace() string {
@@ -6380,7 +6739,7 @@ type ReloadSchemaShardResponse struct {
func (x *ReloadSchemaShardResponse) Reset() {
*x = ReloadSchemaShardResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[111]
+ mi := &file_vtctldata_proto_msgTypes[116]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -6393,7 +6752,7 @@ func (x *ReloadSchemaShardResponse) String() string {
func (*ReloadSchemaShardResponse) ProtoMessage() {}
func (x *ReloadSchemaShardResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[111]
+ mi := &file_vtctldata_proto_msgTypes[116]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -6406,7 +6765,7 @@ func (x *ReloadSchemaShardResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use ReloadSchemaShardResponse.ProtoReflect.Descriptor instead.
func (*ReloadSchemaShardResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{111}
+ return file_vtctldata_proto_rawDescGZIP(), []int{116}
}
func (x *ReloadSchemaShardResponse) GetEvents() []*logutil.Event {
@@ -6429,7 +6788,7 @@ type RemoveBackupRequest struct {
func (x *RemoveBackupRequest) Reset() {
*x = RemoveBackupRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[112]
+ mi := &file_vtctldata_proto_msgTypes[117]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -6442,7 +6801,7 @@ func (x *RemoveBackupRequest) String() string {
func (*RemoveBackupRequest) ProtoMessage() {}
func (x *RemoveBackupRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[112]
+ mi := &file_vtctldata_proto_msgTypes[117]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -6455,7 +6814,7 @@ func (x *RemoveBackupRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use RemoveBackupRequest.ProtoReflect.Descriptor instead.
func (*RemoveBackupRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{112}
+ return file_vtctldata_proto_rawDescGZIP(), []int{117}
}
func (x *RemoveBackupRequest) GetKeyspace() string {
@@ -6488,7 +6847,7 @@ type RemoveBackupResponse struct {
func (x *RemoveBackupResponse) Reset() {
*x = RemoveBackupResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[113]
+ mi := &file_vtctldata_proto_msgTypes[118]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -6501,7 +6860,7 @@ func (x *RemoveBackupResponse) String() string {
func (*RemoveBackupResponse) ProtoMessage() {}
func (x *RemoveBackupResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[113]
+ mi := &file_vtctldata_proto_msgTypes[118]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -6514,7 +6873,7 @@ func (x *RemoveBackupResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use RemoveBackupResponse.ProtoReflect.Descriptor instead.
func (*RemoveBackupResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{113}
+ return file_vtctldata_proto_rawDescGZIP(), []int{118}
}
type RemoveKeyspaceCellRequest struct {
@@ -6536,7 +6895,7 @@ type RemoveKeyspaceCellRequest struct {
func (x *RemoveKeyspaceCellRequest) Reset() {
*x = RemoveKeyspaceCellRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[114]
+ mi := &file_vtctldata_proto_msgTypes[119]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -6549,7 +6908,7 @@ func (x *RemoveKeyspaceCellRequest) String() string {
func (*RemoveKeyspaceCellRequest) ProtoMessage() {}
func (x *RemoveKeyspaceCellRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[114]
+ mi := &file_vtctldata_proto_msgTypes[119]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -6562,7 +6921,7 @@ func (x *RemoveKeyspaceCellRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use RemoveKeyspaceCellRequest.ProtoReflect.Descriptor instead.
func (*RemoveKeyspaceCellRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{114}
+ return file_vtctldata_proto_rawDescGZIP(), []int{119}
}
func (x *RemoveKeyspaceCellRequest) GetKeyspace() string {
@@ -6602,7 +6961,7 @@ type RemoveKeyspaceCellResponse struct {
func (x *RemoveKeyspaceCellResponse) Reset() {
*x = RemoveKeyspaceCellResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[115]
+ mi := &file_vtctldata_proto_msgTypes[120]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -6615,7 +6974,7 @@ func (x *RemoveKeyspaceCellResponse) String() string {
func (*RemoveKeyspaceCellResponse) ProtoMessage() {}
func (x *RemoveKeyspaceCellResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[115]
+ mi := &file_vtctldata_proto_msgTypes[120]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -6628,7 +6987,7 @@ func (x *RemoveKeyspaceCellResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use RemoveKeyspaceCellResponse.ProtoReflect.Descriptor instead.
func (*RemoveKeyspaceCellResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{115}
+ return file_vtctldata_proto_rawDescGZIP(), []int{120}
}
type RemoveShardCellRequest struct {
@@ -6651,7 +7010,7 @@ type RemoveShardCellRequest struct {
func (x *RemoveShardCellRequest) Reset() {
*x = RemoveShardCellRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[116]
+ mi := &file_vtctldata_proto_msgTypes[121]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -6664,7 +7023,7 @@ func (x *RemoveShardCellRequest) String() string {
func (*RemoveShardCellRequest) ProtoMessage() {}
func (x *RemoveShardCellRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[116]
+ mi := &file_vtctldata_proto_msgTypes[121]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -6677,7 +7036,7 @@ func (x *RemoveShardCellRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use RemoveShardCellRequest.ProtoReflect.Descriptor instead.
func (*RemoveShardCellRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{116}
+ return file_vtctldata_proto_rawDescGZIP(), []int{121}
}
func (x *RemoveShardCellRequest) GetKeyspace() string {
@@ -6724,7 +7083,7 @@ type RemoveShardCellResponse struct {
func (x *RemoveShardCellResponse) Reset() {
*x = RemoveShardCellResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[117]
+ mi := &file_vtctldata_proto_msgTypes[122]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -6737,7 +7096,7 @@ func (x *RemoveShardCellResponse) String() string {
func (*RemoveShardCellResponse) ProtoMessage() {}
func (x *RemoveShardCellResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[117]
+ mi := &file_vtctldata_proto_msgTypes[122]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -6750,7 +7109,7 @@ func (x *RemoveShardCellResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use RemoveShardCellResponse.ProtoReflect.Descriptor instead.
func (*RemoveShardCellResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{117}
+ return file_vtctldata_proto_rawDescGZIP(), []int{122}
}
type ReparentTabletRequest struct {
@@ -6766,7 +7125,7 @@ type ReparentTabletRequest struct {
func (x *ReparentTabletRequest) Reset() {
*x = ReparentTabletRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[118]
+ mi := &file_vtctldata_proto_msgTypes[123]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -6779,7 +7138,7 @@ func (x *ReparentTabletRequest) String() string {
func (*ReparentTabletRequest) ProtoMessage() {}
func (x *ReparentTabletRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[118]
+ mi := &file_vtctldata_proto_msgTypes[123]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -6792,7 +7151,7 @@ func (x *ReparentTabletRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use ReparentTabletRequest.ProtoReflect.Descriptor instead.
func (*ReparentTabletRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{118}
+ return file_vtctldata_proto_rawDescGZIP(), []int{123}
}
func (x *ReparentTabletRequest) GetTablet() *topodata.TabletAlias {
@@ -6818,7 +7177,7 @@ type ReparentTabletResponse struct {
func (x *ReparentTabletResponse) Reset() {
*x = ReparentTabletResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[119]
+ mi := &file_vtctldata_proto_msgTypes[124]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -6831,7 +7190,7 @@ func (x *ReparentTabletResponse) String() string {
func (*ReparentTabletResponse) ProtoMessage() {}
func (x *ReparentTabletResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[119]
+ mi := &file_vtctldata_proto_msgTypes[124]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -6844,7 +7203,7 @@ func (x *ReparentTabletResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use ReparentTabletResponse.ProtoReflect.Descriptor instead.
func (*ReparentTabletResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{119}
+ return file_vtctldata_proto_rawDescGZIP(), []int{124}
}
func (x *ReparentTabletResponse) GetKeyspace() string {
@@ -6877,12 +7236,18 @@ type RestoreFromBackupRequest struct {
// BackupTime, if set, will use the backup taken most closely at or before
// this time. If nil, the latest backup will be restored on the tablet.
BackupTime *vttime.Time `protobuf:"bytes,2,opt,name=backup_time,json=backupTime,proto3" json:"backup_time,omitempty"`
+ // RestoreToPos indicates a position for a point-in-time recovery. The recovery
+ // is expected to utilize one full backup, followed by zero or more incremental backups,
+ // that reach the precise desired position
+ RestoreToPos string `protobuf:"bytes,3,opt,name=restore_to_pos,json=restoreToPos,proto3" json:"restore_to_pos,omitempty"`
+ // Dry run does not actually performs the restore, but validates the steps and availability of backups
+ DryRun bool `protobuf:"varint,4,opt,name=dry_run,json=dryRun,proto3" json:"dry_run,omitempty"`
}
func (x *RestoreFromBackupRequest) Reset() {
*x = RestoreFromBackupRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[120]
+ mi := &file_vtctldata_proto_msgTypes[125]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -6895,7 +7260,7 @@ func (x *RestoreFromBackupRequest) String() string {
func (*RestoreFromBackupRequest) ProtoMessage() {}
func (x *RestoreFromBackupRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[120]
+ mi := &file_vtctldata_proto_msgTypes[125]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -6908,7 +7273,7 @@ func (x *RestoreFromBackupRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use RestoreFromBackupRequest.ProtoReflect.Descriptor instead.
func (*RestoreFromBackupRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{120}
+ return file_vtctldata_proto_rawDescGZIP(), []int{125}
}
func (x *RestoreFromBackupRequest) GetTabletAlias() *topodata.TabletAlias {
@@ -6925,6 +7290,20 @@ func (x *RestoreFromBackupRequest) GetBackupTime() *vttime.Time {
return nil
}
+func (x *RestoreFromBackupRequest) GetRestoreToPos() string {
+ if x != nil {
+ return x.RestoreToPos
+ }
+ return ""
+}
+
+func (x *RestoreFromBackupRequest) GetDryRun() bool {
+ if x != nil {
+ return x.DryRun
+ }
+ return false
+}
+
type RestoreFromBackupResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -6940,7 +7319,7 @@ type RestoreFromBackupResponse struct {
func (x *RestoreFromBackupResponse) Reset() {
*x = RestoreFromBackupResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[121]
+ mi := &file_vtctldata_proto_msgTypes[126]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -6953,7 +7332,7 @@ func (x *RestoreFromBackupResponse) String() string {
func (*RestoreFromBackupResponse) ProtoMessage() {}
func (x *RestoreFromBackupResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[121]
+ mi := &file_vtctldata_proto_msgTypes[126]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -6966,7 +7345,7 @@ func (x *RestoreFromBackupResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use RestoreFromBackupResponse.ProtoReflect.Descriptor instead.
func (*RestoreFromBackupResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{121}
+ return file_vtctldata_proto_rawDescGZIP(), []int{126}
}
func (x *RestoreFromBackupResponse) GetTabletAlias() *topodata.TabletAlias {
@@ -7008,7 +7387,7 @@ type RunHealthCheckRequest struct {
func (x *RunHealthCheckRequest) Reset() {
*x = RunHealthCheckRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[122]
+ mi := &file_vtctldata_proto_msgTypes[127]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -7021,7 +7400,7 @@ func (x *RunHealthCheckRequest) String() string {
func (*RunHealthCheckRequest) ProtoMessage() {}
func (x *RunHealthCheckRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[122]
+ mi := &file_vtctldata_proto_msgTypes[127]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -7034,7 +7413,7 @@ func (x *RunHealthCheckRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use RunHealthCheckRequest.ProtoReflect.Descriptor instead.
func (*RunHealthCheckRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{122}
+ return file_vtctldata_proto_rawDescGZIP(), []int{127}
}
func (x *RunHealthCheckRequest) GetTabletAlias() *topodata.TabletAlias {
@@ -7053,7 +7432,7 @@ type RunHealthCheckResponse struct {
func (x *RunHealthCheckResponse) Reset() {
*x = RunHealthCheckResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[123]
+ mi := &file_vtctldata_proto_msgTypes[128]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -7066,7 +7445,7 @@ func (x *RunHealthCheckResponse) String() string {
func (*RunHealthCheckResponse) ProtoMessage() {}
func (x *RunHealthCheckResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[123]
+ mi := &file_vtctldata_proto_msgTypes[128]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -7079,7 +7458,7 @@ func (x *RunHealthCheckResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use RunHealthCheckResponse.ProtoReflect.Descriptor instead.
func (*RunHealthCheckResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{123}
+ return file_vtctldata_proto_rawDescGZIP(), []int{128}
}
type SetKeyspaceDurabilityPolicyRequest struct {
@@ -7094,7 +7473,7 @@ type SetKeyspaceDurabilityPolicyRequest struct {
func (x *SetKeyspaceDurabilityPolicyRequest) Reset() {
*x = SetKeyspaceDurabilityPolicyRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[124]
+ mi := &file_vtctldata_proto_msgTypes[129]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -7107,7 +7486,7 @@ func (x *SetKeyspaceDurabilityPolicyRequest) String() string {
func (*SetKeyspaceDurabilityPolicyRequest) ProtoMessage() {}
func (x *SetKeyspaceDurabilityPolicyRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[124]
+ mi := &file_vtctldata_proto_msgTypes[129]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -7120,7 +7499,7 @@ func (x *SetKeyspaceDurabilityPolicyRequest) ProtoReflect() protoreflect.Message
// Deprecated: Use SetKeyspaceDurabilityPolicyRequest.ProtoReflect.Descriptor instead.
func (*SetKeyspaceDurabilityPolicyRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{124}
+ return file_vtctldata_proto_rawDescGZIP(), []int{129}
}
func (x *SetKeyspaceDurabilityPolicyRequest) GetKeyspace() string {
@@ -7149,7 +7528,7 @@ type SetKeyspaceDurabilityPolicyResponse struct {
func (x *SetKeyspaceDurabilityPolicyResponse) Reset() {
*x = SetKeyspaceDurabilityPolicyResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[125]
+ mi := &file_vtctldata_proto_msgTypes[130]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -7162,7 +7541,7 @@ func (x *SetKeyspaceDurabilityPolicyResponse) String() string {
func (*SetKeyspaceDurabilityPolicyResponse) ProtoMessage() {}
func (x *SetKeyspaceDurabilityPolicyResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[125]
+ mi := &file_vtctldata_proto_msgTypes[130]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -7175,7 +7554,7 @@ func (x *SetKeyspaceDurabilityPolicyResponse) ProtoReflect() protoreflect.Messag
// Deprecated: Use SetKeyspaceDurabilityPolicyResponse.ProtoReflect.Descriptor instead.
func (*SetKeyspaceDurabilityPolicyResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{125}
+ return file_vtctldata_proto_rawDescGZIP(), []int{130}
}
func (x *SetKeyspaceDurabilityPolicyResponse) GetKeyspace() *topodata.Keyspace {
@@ -7200,7 +7579,7 @@ type SetKeyspaceServedFromRequest struct {
func (x *SetKeyspaceServedFromRequest) Reset() {
*x = SetKeyspaceServedFromRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[126]
+ mi := &file_vtctldata_proto_msgTypes[131]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -7213,7 +7592,7 @@ func (x *SetKeyspaceServedFromRequest) String() string {
func (*SetKeyspaceServedFromRequest) ProtoMessage() {}
func (x *SetKeyspaceServedFromRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[126]
+ mi := &file_vtctldata_proto_msgTypes[131]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -7226,7 +7605,7 @@ func (x *SetKeyspaceServedFromRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use SetKeyspaceServedFromRequest.ProtoReflect.Descriptor instead.
func (*SetKeyspaceServedFromRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{126}
+ return file_vtctldata_proto_rawDescGZIP(), []int{131}
}
func (x *SetKeyspaceServedFromRequest) GetKeyspace() string {
@@ -7276,7 +7655,7 @@ type SetKeyspaceServedFromResponse struct {
func (x *SetKeyspaceServedFromResponse) Reset() {
*x = SetKeyspaceServedFromResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[127]
+ mi := &file_vtctldata_proto_msgTypes[132]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -7289,7 +7668,7 @@ func (x *SetKeyspaceServedFromResponse) String() string {
func (*SetKeyspaceServedFromResponse) ProtoMessage() {}
func (x *SetKeyspaceServedFromResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[127]
+ mi := &file_vtctldata_proto_msgTypes[132]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -7302,7 +7681,7 @@ func (x *SetKeyspaceServedFromResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use SetKeyspaceServedFromResponse.ProtoReflect.Descriptor instead.
func (*SetKeyspaceServedFromResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{127}
+ return file_vtctldata_proto_rawDescGZIP(), []int{132}
}
func (x *SetKeyspaceServedFromResponse) GetKeyspace() *topodata.Keyspace {
@@ -7324,7 +7703,7 @@ type SetKeyspaceShardingInfoRequest struct {
func (x *SetKeyspaceShardingInfoRequest) Reset() {
*x = SetKeyspaceShardingInfoRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[128]
+ mi := &file_vtctldata_proto_msgTypes[133]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -7337,7 +7716,7 @@ func (x *SetKeyspaceShardingInfoRequest) String() string {
func (*SetKeyspaceShardingInfoRequest) ProtoMessage() {}
func (x *SetKeyspaceShardingInfoRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[128]
+ mi := &file_vtctldata_proto_msgTypes[133]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -7350,7 +7729,7 @@ func (x *SetKeyspaceShardingInfoRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use SetKeyspaceShardingInfoRequest.ProtoReflect.Descriptor instead.
func (*SetKeyspaceShardingInfoRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{128}
+ return file_vtctldata_proto_rawDescGZIP(), []int{133}
}
func (x *SetKeyspaceShardingInfoRequest) GetKeyspace() string {
@@ -7379,7 +7758,7 @@ type SetKeyspaceShardingInfoResponse struct {
func (x *SetKeyspaceShardingInfoResponse) Reset() {
*x = SetKeyspaceShardingInfoResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[129]
+ mi := &file_vtctldata_proto_msgTypes[134]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -7392,7 +7771,7 @@ func (x *SetKeyspaceShardingInfoResponse) String() string {
func (*SetKeyspaceShardingInfoResponse) ProtoMessage() {}
func (x *SetKeyspaceShardingInfoResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[129]
+ mi := &file_vtctldata_proto_msgTypes[134]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -7405,7 +7784,7 @@ func (x *SetKeyspaceShardingInfoResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use SetKeyspaceShardingInfoResponse.ProtoReflect.Descriptor instead.
func (*SetKeyspaceShardingInfoResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{129}
+ return file_vtctldata_proto_rawDescGZIP(), []int{134}
}
func (x *SetKeyspaceShardingInfoResponse) GetKeyspace() *topodata.Keyspace {
@@ -7428,7 +7807,7 @@ type SetShardIsPrimaryServingRequest struct {
func (x *SetShardIsPrimaryServingRequest) Reset() {
*x = SetShardIsPrimaryServingRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[130]
+ mi := &file_vtctldata_proto_msgTypes[135]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -7441,7 +7820,7 @@ func (x *SetShardIsPrimaryServingRequest) String() string {
func (*SetShardIsPrimaryServingRequest) ProtoMessage() {}
func (x *SetShardIsPrimaryServingRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[130]
+ mi := &file_vtctldata_proto_msgTypes[135]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -7454,7 +7833,7 @@ func (x *SetShardIsPrimaryServingRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use SetShardIsPrimaryServingRequest.ProtoReflect.Descriptor instead.
func (*SetShardIsPrimaryServingRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{130}
+ return file_vtctldata_proto_rawDescGZIP(), []int{135}
}
func (x *SetShardIsPrimaryServingRequest) GetKeyspace() string {
@@ -7490,7 +7869,7 @@ type SetShardIsPrimaryServingResponse struct {
func (x *SetShardIsPrimaryServingResponse) Reset() {
*x = SetShardIsPrimaryServingResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[131]
+ mi := &file_vtctldata_proto_msgTypes[136]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -7503,7 +7882,7 @@ func (x *SetShardIsPrimaryServingResponse) String() string {
func (*SetShardIsPrimaryServingResponse) ProtoMessage() {}
func (x *SetShardIsPrimaryServingResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[131]
+ mi := &file_vtctldata_proto_msgTypes[136]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -7516,7 +7895,7 @@ func (x *SetShardIsPrimaryServingResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use SetShardIsPrimaryServingResponse.ProtoReflect.Descriptor instead.
func (*SetShardIsPrimaryServingResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{131}
+ return file_vtctldata_proto_rawDescGZIP(), []int{136}
}
func (x *SetShardIsPrimaryServingResponse) GetShard() *topodata.Shard {
@@ -7557,7 +7936,7 @@ type SetShardTabletControlRequest struct {
func (x *SetShardTabletControlRequest) Reset() {
*x = SetShardTabletControlRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[132]
+ mi := &file_vtctldata_proto_msgTypes[137]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -7570,7 +7949,7 @@ func (x *SetShardTabletControlRequest) String() string {
func (*SetShardTabletControlRequest) ProtoMessage() {}
func (x *SetShardTabletControlRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[132]
+ mi := &file_vtctldata_proto_msgTypes[137]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -7583,7 +7962,7 @@ func (x *SetShardTabletControlRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use SetShardTabletControlRequest.ProtoReflect.Descriptor instead.
func (*SetShardTabletControlRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{132}
+ return file_vtctldata_proto_rawDescGZIP(), []int{137}
}
func (x *SetShardTabletControlRequest) GetKeyspace() string {
@@ -7647,7 +8026,7 @@ type SetShardTabletControlResponse struct {
func (x *SetShardTabletControlResponse) Reset() {
*x = SetShardTabletControlResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[133]
+ mi := &file_vtctldata_proto_msgTypes[138]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -7660,7 +8039,7 @@ func (x *SetShardTabletControlResponse) String() string {
func (*SetShardTabletControlResponse) ProtoMessage() {}
func (x *SetShardTabletControlResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[133]
+ mi := &file_vtctldata_proto_msgTypes[138]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -7673,7 +8052,7 @@ func (x *SetShardTabletControlResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use SetShardTabletControlResponse.ProtoReflect.Descriptor instead.
func (*SetShardTabletControlResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{133}
+ return file_vtctldata_proto_rawDescGZIP(), []int{138}
}
func (x *SetShardTabletControlResponse) GetShard() *topodata.Shard {
@@ -7695,7 +8074,7 @@ type SetWritableRequest struct {
func (x *SetWritableRequest) Reset() {
*x = SetWritableRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[134]
+ mi := &file_vtctldata_proto_msgTypes[139]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -7708,7 +8087,7 @@ func (x *SetWritableRequest) String() string {
func (*SetWritableRequest) ProtoMessage() {}
func (x *SetWritableRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[134]
+ mi := &file_vtctldata_proto_msgTypes[139]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -7721,7 +8100,7 @@ func (x *SetWritableRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use SetWritableRequest.ProtoReflect.Descriptor instead.
func (*SetWritableRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{134}
+ return file_vtctldata_proto_rawDescGZIP(), []int{139}
}
func (x *SetWritableRequest) GetTabletAlias() *topodata.TabletAlias {
@@ -7747,7 +8126,7 @@ type SetWritableResponse struct {
func (x *SetWritableResponse) Reset() {
*x = SetWritableResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[135]
+ mi := &file_vtctldata_proto_msgTypes[140]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -7760,7 +8139,7 @@ func (x *SetWritableResponse) String() string {
func (*SetWritableResponse) ProtoMessage() {}
func (x *SetWritableResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[135]
+ mi := &file_vtctldata_proto_msgTypes[140]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -7773,7 +8152,7 @@ func (x *SetWritableResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use SetWritableResponse.ProtoReflect.Descriptor instead.
func (*SetWritableResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{135}
+ return file_vtctldata_proto_rawDescGZIP(), []int{140}
}
type ShardReplicationAddRequest struct {
@@ -7789,7 +8168,7 @@ type ShardReplicationAddRequest struct {
func (x *ShardReplicationAddRequest) Reset() {
*x = ShardReplicationAddRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[136]
+ mi := &file_vtctldata_proto_msgTypes[141]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -7802,7 +8181,7 @@ func (x *ShardReplicationAddRequest) String() string {
func (*ShardReplicationAddRequest) ProtoMessage() {}
func (x *ShardReplicationAddRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[136]
+ mi := &file_vtctldata_proto_msgTypes[141]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -7815,7 +8194,7 @@ func (x *ShardReplicationAddRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use ShardReplicationAddRequest.ProtoReflect.Descriptor instead.
func (*ShardReplicationAddRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{136}
+ return file_vtctldata_proto_rawDescGZIP(), []int{141}
}
func (x *ShardReplicationAddRequest) GetKeyspace() string {
@@ -7848,7 +8227,7 @@ type ShardReplicationAddResponse struct {
func (x *ShardReplicationAddResponse) Reset() {
*x = ShardReplicationAddResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[137]
+ mi := &file_vtctldata_proto_msgTypes[142]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -7861,7 +8240,7 @@ func (x *ShardReplicationAddResponse) String() string {
func (*ShardReplicationAddResponse) ProtoMessage() {}
func (x *ShardReplicationAddResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[137]
+ mi := &file_vtctldata_proto_msgTypes[142]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -7874,7 +8253,7 @@ func (x *ShardReplicationAddResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use ShardReplicationAddResponse.ProtoReflect.Descriptor instead.
func (*ShardReplicationAddResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{137}
+ return file_vtctldata_proto_rawDescGZIP(), []int{142}
}
type ShardReplicationFixRequest struct {
@@ -7890,7 +8269,7 @@ type ShardReplicationFixRequest struct {
func (x *ShardReplicationFixRequest) Reset() {
*x = ShardReplicationFixRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[138]
+ mi := &file_vtctldata_proto_msgTypes[143]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -7903,7 +8282,7 @@ func (x *ShardReplicationFixRequest) String() string {
func (*ShardReplicationFixRequest) ProtoMessage() {}
func (x *ShardReplicationFixRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[138]
+ mi := &file_vtctldata_proto_msgTypes[143]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -7916,7 +8295,7 @@ func (x *ShardReplicationFixRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use ShardReplicationFixRequest.ProtoReflect.Descriptor instead.
func (*ShardReplicationFixRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{138}
+ return file_vtctldata_proto_rawDescGZIP(), []int{143}
}
func (x *ShardReplicationFixRequest) GetKeyspace() string {
@@ -7954,7 +8333,7 @@ type ShardReplicationFixResponse struct {
func (x *ShardReplicationFixResponse) Reset() {
*x = ShardReplicationFixResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[139]
+ mi := &file_vtctldata_proto_msgTypes[144]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -7967,7 +8346,7 @@ func (x *ShardReplicationFixResponse) String() string {
func (*ShardReplicationFixResponse) ProtoMessage() {}
func (x *ShardReplicationFixResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[139]
+ mi := &file_vtctldata_proto_msgTypes[144]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -7980,7 +8359,7 @@ func (x *ShardReplicationFixResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use ShardReplicationFixResponse.ProtoReflect.Descriptor instead.
func (*ShardReplicationFixResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{139}
+ return file_vtctldata_proto_rawDescGZIP(), []int{144}
}
func (x *ShardReplicationFixResponse) GetError() *topodata.ShardReplicationError {
@@ -8002,7 +8381,7 @@ type ShardReplicationPositionsRequest struct {
func (x *ShardReplicationPositionsRequest) Reset() {
*x = ShardReplicationPositionsRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[140]
+ mi := &file_vtctldata_proto_msgTypes[145]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -8015,7 +8394,7 @@ func (x *ShardReplicationPositionsRequest) String() string {
func (*ShardReplicationPositionsRequest) ProtoMessage() {}
func (x *ShardReplicationPositionsRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[140]
+ mi := &file_vtctldata_proto_msgTypes[145]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -8028,7 +8407,7 @@ func (x *ShardReplicationPositionsRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use ShardReplicationPositionsRequest.ProtoReflect.Descriptor instead.
func (*ShardReplicationPositionsRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{140}
+ return file_vtctldata_proto_rawDescGZIP(), []int{145}
}
func (x *ShardReplicationPositionsRequest) GetKeyspace() string {
@@ -8061,7 +8440,7 @@ type ShardReplicationPositionsResponse struct {
func (x *ShardReplicationPositionsResponse) Reset() {
*x = ShardReplicationPositionsResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[141]
+ mi := &file_vtctldata_proto_msgTypes[146]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -8074,7 +8453,7 @@ func (x *ShardReplicationPositionsResponse) String() string {
func (*ShardReplicationPositionsResponse) ProtoMessage() {}
func (x *ShardReplicationPositionsResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[141]
+ mi := &file_vtctldata_proto_msgTypes[146]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -8087,7 +8466,7 @@ func (x *ShardReplicationPositionsResponse) ProtoReflect() protoreflect.Message
// Deprecated: Use ShardReplicationPositionsResponse.ProtoReflect.Descriptor instead.
func (*ShardReplicationPositionsResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{141}
+ return file_vtctldata_proto_rawDescGZIP(), []int{146}
}
func (x *ShardReplicationPositionsResponse) GetReplicationStatuses() map[string]*replicationdata.Status {
@@ -8117,7 +8496,7 @@ type ShardReplicationRemoveRequest struct {
func (x *ShardReplicationRemoveRequest) Reset() {
*x = ShardReplicationRemoveRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[142]
+ mi := &file_vtctldata_proto_msgTypes[147]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -8130,7 +8509,7 @@ func (x *ShardReplicationRemoveRequest) String() string {
func (*ShardReplicationRemoveRequest) ProtoMessage() {}
func (x *ShardReplicationRemoveRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[142]
+ mi := &file_vtctldata_proto_msgTypes[147]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -8143,7 +8522,7 @@ func (x *ShardReplicationRemoveRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use ShardReplicationRemoveRequest.ProtoReflect.Descriptor instead.
func (*ShardReplicationRemoveRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{142}
+ return file_vtctldata_proto_rawDescGZIP(), []int{147}
}
func (x *ShardReplicationRemoveRequest) GetKeyspace() string {
@@ -8176,7 +8555,7 @@ type ShardReplicationRemoveResponse struct {
func (x *ShardReplicationRemoveResponse) Reset() {
*x = ShardReplicationRemoveResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[143]
+ mi := &file_vtctldata_proto_msgTypes[148]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -8189,7 +8568,7 @@ func (x *ShardReplicationRemoveResponse) String() string {
func (*ShardReplicationRemoveResponse) ProtoMessage() {}
func (x *ShardReplicationRemoveResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[143]
+ mi := &file_vtctldata_proto_msgTypes[148]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -8202,7 +8581,7 @@ func (x *ShardReplicationRemoveResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use ShardReplicationRemoveResponse.ProtoReflect.Descriptor instead.
func (*ShardReplicationRemoveResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{143}
+ return file_vtctldata_proto_rawDescGZIP(), []int{148}
}
type SleepTabletRequest struct {
@@ -8217,7 +8596,7 @@ type SleepTabletRequest struct {
func (x *SleepTabletRequest) Reset() {
*x = SleepTabletRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[144]
+ mi := &file_vtctldata_proto_msgTypes[149]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -8230,7 +8609,7 @@ func (x *SleepTabletRequest) String() string {
func (*SleepTabletRequest) ProtoMessage() {}
func (x *SleepTabletRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[144]
+ mi := &file_vtctldata_proto_msgTypes[149]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -8243,7 +8622,7 @@ func (x *SleepTabletRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use SleepTabletRequest.ProtoReflect.Descriptor instead.
func (*SleepTabletRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{144}
+ return file_vtctldata_proto_rawDescGZIP(), []int{149}
}
func (x *SleepTabletRequest) GetTabletAlias() *topodata.TabletAlias {
@@ -8269,7 +8648,7 @@ type SleepTabletResponse struct {
func (x *SleepTabletResponse) Reset() {
*x = SleepTabletResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[145]
+ mi := &file_vtctldata_proto_msgTypes[150]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -8282,7 +8661,7 @@ func (x *SleepTabletResponse) String() string {
func (*SleepTabletResponse) ProtoMessage() {}
func (x *SleepTabletResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[145]
+ mi := &file_vtctldata_proto_msgTypes[150]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -8295,7 +8674,7 @@ func (x *SleepTabletResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use SleepTabletResponse.ProtoReflect.Descriptor instead.
func (*SleepTabletResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{145}
+ return file_vtctldata_proto_rawDescGZIP(), []int{150}
}
type SourceShardAddRequest struct {
@@ -8319,7 +8698,7 @@ type SourceShardAddRequest struct {
func (x *SourceShardAddRequest) Reset() {
*x = SourceShardAddRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[146]
+ mi := &file_vtctldata_proto_msgTypes[151]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -8332,7 +8711,7 @@ func (x *SourceShardAddRequest) String() string {
func (*SourceShardAddRequest) ProtoMessage() {}
func (x *SourceShardAddRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[146]
+ mi := &file_vtctldata_proto_msgTypes[151]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -8345,7 +8724,7 @@ func (x *SourceShardAddRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use SourceShardAddRequest.ProtoReflect.Descriptor instead.
func (*SourceShardAddRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{146}
+ return file_vtctldata_proto_rawDescGZIP(), []int{151}
}
func (x *SourceShardAddRequest) GetKeyspace() string {
@@ -8409,7 +8788,7 @@ type SourceShardAddResponse struct {
func (x *SourceShardAddResponse) Reset() {
*x = SourceShardAddResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[147]
+ mi := &file_vtctldata_proto_msgTypes[152]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -8422,7 +8801,7 @@ func (x *SourceShardAddResponse) String() string {
func (*SourceShardAddResponse) ProtoMessage() {}
func (x *SourceShardAddResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[147]
+ mi := &file_vtctldata_proto_msgTypes[152]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -8435,7 +8814,7 @@ func (x *SourceShardAddResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use SourceShardAddResponse.ProtoReflect.Descriptor instead.
func (*SourceShardAddResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{147}
+ return file_vtctldata_proto_rawDescGZIP(), []int{152}
}
func (x *SourceShardAddResponse) GetShard() *topodata.Shard {
@@ -8458,7 +8837,7 @@ type SourceShardDeleteRequest struct {
func (x *SourceShardDeleteRequest) Reset() {
*x = SourceShardDeleteRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[148]
+ mi := &file_vtctldata_proto_msgTypes[153]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -8471,7 +8850,7 @@ func (x *SourceShardDeleteRequest) String() string {
func (*SourceShardDeleteRequest) ProtoMessage() {}
func (x *SourceShardDeleteRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[148]
+ mi := &file_vtctldata_proto_msgTypes[153]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -8484,7 +8863,7 @@ func (x *SourceShardDeleteRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use SourceShardDeleteRequest.ProtoReflect.Descriptor instead.
func (*SourceShardDeleteRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{148}
+ return file_vtctldata_proto_rawDescGZIP(), []int{153}
}
func (x *SourceShardDeleteRequest) GetKeyspace() string {
@@ -8520,7 +8899,7 @@ type SourceShardDeleteResponse struct {
func (x *SourceShardDeleteResponse) Reset() {
*x = SourceShardDeleteResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[149]
+ mi := &file_vtctldata_proto_msgTypes[154]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -8533,7 +8912,7 @@ func (x *SourceShardDeleteResponse) String() string {
func (*SourceShardDeleteResponse) ProtoMessage() {}
func (x *SourceShardDeleteResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[149]
+ mi := &file_vtctldata_proto_msgTypes[154]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -8546,7 +8925,7 @@ func (x *SourceShardDeleteResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use SourceShardDeleteResponse.ProtoReflect.Descriptor instead.
func (*SourceShardDeleteResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{149}
+ return file_vtctldata_proto_rawDescGZIP(), []int{154}
}
func (x *SourceShardDeleteResponse) GetShard() *topodata.Shard {
@@ -8567,7 +8946,7 @@ type StartReplicationRequest struct {
func (x *StartReplicationRequest) Reset() {
*x = StartReplicationRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[150]
+ mi := &file_vtctldata_proto_msgTypes[155]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -8580,7 +8959,7 @@ func (x *StartReplicationRequest) String() string {
func (*StartReplicationRequest) ProtoMessage() {}
func (x *StartReplicationRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[150]
+ mi := &file_vtctldata_proto_msgTypes[155]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -8593,7 +8972,7 @@ func (x *StartReplicationRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use StartReplicationRequest.ProtoReflect.Descriptor instead.
func (*StartReplicationRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{150}
+ return file_vtctldata_proto_rawDescGZIP(), []int{155}
}
func (x *StartReplicationRequest) GetTabletAlias() *topodata.TabletAlias {
@@ -8612,7 +8991,7 @@ type StartReplicationResponse struct {
func (x *StartReplicationResponse) Reset() {
*x = StartReplicationResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[151]
+ mi := &file_vtctldata_proto_msgTypes[156]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -8625,7 +9004,7 @@ func (x *StartReplicationResponse) String() string {
func (*StartReplicationResponse) ProtoMessage() {}
func (x *StartReplicationResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[151]
+ mi := &file_vtctldata_proto_msgTypes[156]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -8638,7 +9017,7 @@ func (x *StartReplicationResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use StartReplicationResponse.ProtoReflect.Descriptor instead.
func (*StartReplicationResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{151}
+ return file_vtctldata_proto_rawDescGZIP(), []int{156}
}
type StopReplicationRequest struct {
@@ -8652,7 +9031,7 @@ type StopReplicationRequest struct {
func (x *StopReplicationRequest) Reset() {
*x = StopReplicationRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[152]
+ mi := &file_vtctldata_proto_msgTypes[157]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -8665,7 +9044,7 @@ func (x *StopReplicationRequest) String() string {
func (*StopReplicationRequest) ProtoMessage() {}
func (x *StopReplicationRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[152]
+ mi := &file_vtctldata_proto_msgTypes[157]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -8678,7 +9057,7 @@ func (x *StopReplicationRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use StopReplicationRequest.ProtoReflect.Descriptor instead.
func (*StopReplicationRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{152}
+ return file_vtctldata_proto_rawDescGZIP(), []int{157}
}
func (x *StopReplicationRequest) GetTabletAlias() *topodata.TabletAlias {
@@ -8697,7 +9076,7 @@ type StopReplicationResponse struct {
func (x *StopReplicationResponse) Reset() {
*x = StopReplicationResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[153]
+ mi := &file_vtctldata_proto_msgTypes[158]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -8710,7 +9089,7 @@ func (x *StopReplicationResponse) String() string {
func (*StopReplicationResponse) ProtoMessage() {}
func (x *StopReplicationResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[153]
+ mi := &file_vtctldata_proto_msgTypes[158]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -8723,7 +9102,7 @@ func (x *StopReplicationResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use StopReplicationResponse.ProtoReflect.Descriptor instead.
func (*StopReplicationResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{153}
+ return file_vtctldata_proto_rawDescGZIP(), []int{158}
}
type TabletExternallyReparentedRequest struct {
@@ -8739,7 +9118,7 @@ type TabletExternallyReparentedRequest struct {
func (x *TabletExternallyReparentedRequest) Reset() {
*x = TabletExternallyReparentedRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[154]
+ mi := &file_vtctldata_proto_msgTypes[159]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -8752,7 +9131,7 @@ func (x *TabletExternallyReparentedRequest) String() string {
func (*TabletExternallyReparentedRequest) ProtoMessage() {}
func (x *TabletExternallyReparentedRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[154]
+ mi := &file_vtctldata_proto_msgTypes[159]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -8765,7 +9144,7 @@ func (x *TabletExternallyReparentedRequest) ProtoReflect() protoreflect.Message
// Deprecated: Use TabletExternallyReparentedRequest.ProtoReflect.Descriptor instead.
func (*TabletExternallyReparentedRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{154}
+ return file_vtctldata_proto_rawDescGZIP(), []int{159}
}
func (x *TabletExternallyReparentedRequest) GetTablet() *topodata.TabletAlias {
@@ -8789,7 +9168,7 @@ type TabletExternallyReparentedResponse struct {
func (x *TabletExternallyReparentedResponse) Reset() {
*x = TabletExternallyReparentedResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[155]
+ mi := &file_vtctldata_proto_msgTypes[160]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -8802,7 +9181,7 @@ func (x *TabletExternallyReparentedResponse) String() string {
func (*TabletExternallyReparentedResponse) ProtoMessage() {}
func (x *TabletExternallyReparentedResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[155]
+ mi := &file_vtctldata_proto_msgTypes[160]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -8815,7 +9194,7 @@ func (x *TabletExternallyReparentedResponse) ProtoReflect() protoreflect.Message
// Deprecated: Use TabletExternallyReparentedResponse.ProtoReflect.Descriptor instead.
func (*TabletExternallyReparentedResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{155}
+ return file_vtctldata_proto_rawDescGZIP(), []int{160}
}
func (x *TabletExternallyReparentedResponse) GetKeyspace() string {
@@ -8858,7 +9237,7 @@ type UpdateCellInfoRequest struct {
func (x *UpdateCellInfoRequest) Reset() {
*x = UpdateCellInfoRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[156]
+ mi := &file_vtctldata_proto_msgTypes[161]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -8871,7 +9250,7 @@ func (x *UpdateCellInfoRequest) String() string {
func (*UpdateCellInfoRequest) ProtoMessage() {}
func (x *UpdateCellInfoRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[156]
+ mi := &file_vtctldata_proto_msgTypes[161]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -8884,7 +9263,7 @@ func (x *UpdateCellInfoRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use UpdateCellInfoRequest.ProtoReflect.Descriptor instead.
func (*UpdateCellInfoRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{156}
+ return file_vtctldata_proto_rawDescGZIP(), []int{161}
}
func (x *UpdateCellInfoRequest) GetName() string {
@@ -8913,7 +9292,7 @@ type UpdateCellInfoResponse struct {
func (x *UpdateCellInfoResponse) Reset() {
*x = UpdateCellInfoResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[157]
+ mi := &file_vtctldata_proto_msgTypes[162]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -8926,7 +9305,7 @@ func (x *UpdateCellInfoResponse) String() string {
func (*UpdateCellInfoResponse) ProtoMessage() {}
func (x *UpdateCellInfoResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[157]
+ mi := &file_vtctldata_proto_msgTypes[162]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -8939,7 +9318,7 @@ func (x *UpdateCellInfoResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use UpdateCellInfoResponse.ProtoReflect.Descriptor instead.
func (*UpdateCellInfoResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{157}
+ return file_vtctldata_proto_rawDescGZIP(), []int{162}
}
func (x *UpdateCellInfoResponse) GetName() string {
@@ -8968,7 +9347,7 @@ type UpdateCellsAliasRequest struct {
func (x *UpdateCellsAliasRequest) Reset() {
*x = UpdateCellsAliasRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[158]
+ mi := &file_vtctldata_proto_msgTypes[163]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -8981,7 +9360,7 @@ func (x *UpdateCellsAliasRequest) String() string {
func (*UpdateCellsAliasRequest) ProtoMessage() {}
func (x *UpdateCellsAliasRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[158]
+ mi := &file_vtctldata_proto_msgTypes[163]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -8994,7 +9373,7 @@ func (x *UpdateCellsAliasRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use UpdateCellsAliasRequest.ProtoReflect.Descriptor instead.
func (*UpdateCellsAliasRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{158}
+ return file_vtctldata_proto_rawDescGZIP(), []int{163}
}
func (x *UpdateCellsAliasRequest) GetName() string {
@@ -9023,7 +9402,7 @@ type UpdateCellsAliasResponse struct {
func (x *UpdateCellsAliasResponse) Reset() {
*x = UpdateCellsAliasResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[159]
+ mi := &file_vtctldata_proto_msgTypes[164]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -9036,7 +9415,7 @@ func (x *UpdateCellsAliasResponse) String() string {
func (*UpdateCellsAliasResponse) ProtoMessage() {}
func (x *UpdateCellsAliasResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[159]
+ mi := &file_vtctldata_proto_msgTypes[164]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -9049,7 +9428,7 @@ func (x *UpdateCellsAliasResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use UpdateCellsAliasResponse.ProtoReflect.Descriptor instead.
func (*UpdateCellsAliasResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{159}
+ return file_vtctldata_proto_rawDescGZIP(), []int{164}
}
func (x *UpdateCellsAliasResponse) GetName() string {
@@ -9077,7 +9456,7 @@ type ValidateRequest struct {
func (x *ValidateRequest) Reset() {
*x = ValidateRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[160]
+ mi := &file_vtctldata_proto_msgTypes[165]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -9090,7 +9469,7 @@ func (x *ValidateRequest) String() string {
func (*ValidateRequest) ProtoMessage() {}
func (x *ValidateRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[160]
+ mi := &file_vtctldata_proto_msgTypes[165]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -9103,7 +9482,7 @@ func (x *ValidateRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use ValidateRequest.ProtoReflect.Descriptor instead.
func (*ValidateRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{160}
+ return file_vtctldata_proto_rawDescGZIP(), []int{165}
}
func (x *ValidateRequest) GetPingTablets() bool {
@@ -9125,7 +9504,7 @@ type ValidateResponse struct {
func (x *ValidateResponse) Reset() {
*x = ValidateResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[161]
+ mi := &file_vtctldata_proto_msgTypes[166]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -9138,7 +9517,7 @@ func (x *ValidateResponse) String() string {
func (*ValidateResponse) ProtoMessage() {}
func (x *ValidateResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[161]
+ mi := &file_vtctldata_proto_msgTypes[166]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -9151,7 +9530,7 @@ func (x *ValidateResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use ValidateResponse.ProtoReflect.Descriptor instead.
func (*ValidateResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{161}
+ return file_vtctldata_proto_rawDescGZIP(), []int{166}
}
func (x *ValidateResponse) GetResults() []string {
@@ -9180,7 +9559,7 @@ type ValidateKeyspaceRequest struct {
func (x *ValidateKeyspaceRequest) Reset() {
*x = ValidateKeyspaceRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[162]
+ mi := &file_vtctldata_proto_msgTypes[167]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -9193,7 +9572,7 @@ func (x *ValidateKeyspaceRequest) String() string {
func (*ValidateKeyspaceRequest) ProtoMessage() {}
func (x *ValidateKeyspaceRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[162]
+ mi := &file_vtctldata_proto_msgTypes[167]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -9206,7 +9585,7 @@ func (x *ValidateKeyspaceRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use ValidateKeyspaceRequest.ProtoReflect.Descriptor instead.
func (*ValidateKeyspaceRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{162}
+ return file_vtctldata_proto_rawDescGZIP(), []int{167}
}
func (x *ValidateKeyspaceRequest) GetKeyspace() string {
@@ -9235,7 +9614,7 @@ type ValidateKeyspaceResponse struct {
func (x *ValidateKeyspaceResponse) Reset() {
*x = ValidateKeyspaceResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[163]
+ mi := &file_vtctldata_proto_msgTypes[168]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -9248,7 +9627,7 @@ func (x *ValidateKeyspaceResponse) String() string {
func (*ValidateKeyspaceResponse) ProtoMessage() {}
func (x *ValidateKeyspaceResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[163]
+ mi := &file_vtctldata_proto_msgTypes[168]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -9261,7 +9640,7 @@ func (x *ValidateKeyspaceResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use ValidateKeyspaceResponse.ProtoReflect.Descriptor instead.
func (*ValidateKeyspaceResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{163}
+ return file_vtctldata_proto_rawDescGZIP(), []int{168}
}
func (x *ValidateKeyspaceResponse) GetResults() []string {
@@ -9293,7 +9672,7 @@ type ValidateSchemaKeyspaceRequest struct {
func (x *ValidateSchemaKeyspaceRequest) Reset() {
*x = ValidateSchemaKeyspaceRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[164]
+ mi := &file_vtctldata_proto_msgTypes[169]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -9306,7 +9685,7 @@ func (x *ValidateSchemaKeyspaceRequest) String() string {
func (*ValidateSchemaKeyspaceRequest) ProtoMessage() {}
func (x *ValidateSchemaKeyspaceRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[164]
+ mi := &file_vtctldata_proto_msgTypes[169]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -9319,7 +9698,7 @@ func (x *ValidateSchemaKeyspaceRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use ValidateSchemaKeyspaceRequest.ProtoReflect.Descriptor instead.
func (*ValidateSchemaKeyspaceRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{164}
+ return file_vtctldata_proto_rawDescGZIP(), []int{169}
}
func (x *ValidateSchemaKeyspaceRequest) GetKeyspace() string {
@@ -9369,7 +9748,7 @@ type ValidateSchemaKeyspaceResponse struct {
func (x *ValidateSchemaKeyspaceResponse) Reset() {
*x = ValidateSchemaKeyspaceResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[165]
+ mi := &file_vtctldata_proto_msgTypes[170]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -9382,7 +9761,7 @@ func (x *ValidateSchemaKeyspaceResponse) String() string {
func (*ValidateSchemaKeyspaceResponse) ProtoMessage() {}
func (x *ValidateSchemaKeyspaceResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[165]
+ mi := &file_vtctldata_proto_msgTypes[170]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -9395,7 +9774,7 @@ func (x *ValidateSchemaKeyspaceResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use ValidateSchemaKeyspaceResponse.ProtoReflect.Descriptor instead.
func (*ValidateSchemaKeyspaceResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{165}
+ return file_vtctldata_proto_rawDescGZIP(), []int{170}
}
func (x *ValidateSchemaKeyspaceResponse) GetResults() []string {
@@ -9425,7 +9804,7 @@ type ValidateShardRequest struct {
func (x *ValidateShardRequest) Reset() {
*x = ValidateShardRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[166]
+ mi := &file_vtctldata_proto_msgTypes[171]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -9438,7 +9817,7 @@ func (x *ValidateShardRequest) String() string {
func (*ValidateShardRequest) ProtoMessage() {}
func (x *ValidateShardRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[166]
+ mi := &file_vtctldata_proto_msgTypes[171]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -9451,7 +9830,7 @@ func (x *ValidateShardRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use ValidateShardRequest.ProtoReflect.Descriptor instead.
func (*ValidateShardRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{166}
+ return file_vtctldata_proto_rawDescGZIP(), []int{171}
}
func (x *ValidateShardRequest) GetKeyspace() string {
@@ -9486,7 +9865,7 @@ type ValidateShardResponse struct {
func (x *ValidateShardResponse) Reset() {
*x = ValidateShardResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[167]
+ mi := &file_vtctldata_proto_msgTypes[172]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -9499,7 +9878,7 @@ func (x *ValidateShardResponse) String() string {
func (*ValidateShardResponse) ProtoMessage() {}
func (x *ValidateShardResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[167]
+ mi := &file_vtctldata_proto_msgTypes[172]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -9512,7 +9891,7 @@ func (x *ValidateShardResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use ValidateShardResponse.ProtoReflect.Descriptor instead.
func (*ValidateShardResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{167}
+ return file_vtctldata_proto_rawDescGZIP(), []int{172}
}
func (x *ValidateShardResponse) GetResults() []string {
@@ -9533,7 +9912,7 @@ type ValidateVersionKeyspaceRequest struct {
func (x *ValidateVersionKeyspaceRequest) Reset() {
*x = ValidateVersionKeyspaceRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[168]
+ mi := &file_vtctldata_proto_msgTypes[173]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -9546,7 +9925,7 @@ func (x *ValidateVersionKeyspaceRequest) String() string {
func (*ValidateVersionKeyspaceRequest) ProtoMessage() {}
func (x *ValidateVersionKeyspaceRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[168]
+ mi := &file_vtctldata_proto_msgTypes[173]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -9559,7 +9938,7 @@ func (x *ValidateVersionKeyspaceRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use ValidateVersionKeyspaceRequest.ProtoReflect.Descriptor instead.
func (*ValidateVersionKeyspaceRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{168}
+ return file_vtctldata_proto_rawDescGZIP(), []int{173}
}
func (x *ValidateVersionKeyspaceRequest) GetKeyspace() string {
@@ -9581,7 +9960,7 @@ type ValidateVersionKeyspaceResponse struct {
func (x *ValidateVersionKeyspaceResponse) Reset() {
*x = ValidateVersionKeyspaceResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[169]
+ mi := &file_vtctldata_proto_msgTypes[174]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -9594,7 +9973,7 @@ func (x *ValidateVersionKeyspaceResponse) String() string {
func (*ValidateVersionKeyspaceResponse) ProtoMessage() {}
func (x *ValidateVersionKeyspaceResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[169]
+ mi := &file_vtctldata_proto_msgTypes[174]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -9607,7 +9986,7 @@ func (x *ValidateVersionKeyspaceResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use ValidateVersionKeyspaceResponse.ProtoReflect.Descriptor instead.
func (*ValidateVersionKeyspaceResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{169}
+ return file_vtctldata_proto_rawDescGZIP(), []int{174}
}
func (x *ValidateVersionKeyspaceResponse) GetResults() []string {
@@ -9624,6 +10003,108 @@ func (x *ValidateVersionKeyspaceResponse) GetResultsByShard() map[string]*Valida
return nil
}
+type ValidateVersionShardRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"`
+ Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"`
+}
+
+func (x *ValidateVersionShardRequest) Reset() {
+ *x = ValidateVersionShardRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_vtctldata_proto_msgTypes[175]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ValidateVersionShardRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ValidateVersionShardRequest) ProtoMessage() {}
+
+func (x *ValidateVersionShardRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_vtctldata_proto_msgTypes[175]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ValidateVersionShardRequest.ProtoReflect.Descriptor instead.
+func (*ValidateVersionShardRequest) Descriptor() ([]byte, []int) {
+ return file_vtctldata_proto_rawDescGZIP(), []int{175}
+}
+
+func (x *ValidateVersionShardRequest) GetKeyspace() string {
+ if x != nil {
+ return x.Keyspace
+ }
+ return ""
+}
+
+func (x *ValidateVersionShardRequest) GetShard() string {
+ if x != nil {
+ return x.Shard
+ }
+ return ""
+}
+
+type ValidateVersionShardResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Results []string `protobuf:"bytes,1,rep,name=results,proto3" json:"results,omitempty"`
+}
+
+func (x *ValidateVersionShardResponse) Reset() {
+ *x = ValidateVersionShardResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_vtctldata_proto_msgTypes[176]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ValidateVersionShardResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ValidateVersionShardResponse) ProtoMessage() {}
+
+func (x *ValidateVersionShardResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_vtctldata_proto_msgTypes[176]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ValidateVersionShardResponse.ProtoReflect.Descriptor instead.
+func (*ValidateVersionShardResponse) Descriptor() ([]byte, []int) {
+ return file_vtctldata_proto_rawDescGZIP(), []int{176}
+}
+
+func (x *ValidateVersionShardResponse) GetResults() []string {
+ if x != nil {
+ return x.Results
+ }
+ return nil
+}
+
type ValidateVSchemaRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -9638,7 +10119,7 @@ type ValidateVSchemaRequest struct {
func (x *ValidateVSchemaRequest) Reset() {
*x = ValidateVSchemaRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[170]
+ mi := &file_vtctldata_proto_msgTypes[177]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -9651,7 +10132,7 @@ func (x *ValidateVSchemaRequest) String() string {
func (*ValidateVSchemaRequest) ProtoMessage() {}
func (x *ValidateVSchemaRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[170]
+ mi := &file_vtctldata_proto_msgTypes[177]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -9664,7 +10145,7 @@ func (x *ValidateVSchemaRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use ValidateVSchemaRequest.ProtoReflect.Descriptor instead.
func (*ValidateVSchemaRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{170}
+ return file_vtctldata_proto_rawDescGZIP(), []int{177}
}
func (x *ValidateVSchemaRequest) GetKeyspace() string {
@@ -9707,7 +10188,7 @@ type ValidateVSchemaResponse struct {
func (x *ValidateVSchemaResponse) Reset() {
*x = ValidateVSchemaResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[171]
+ mi := &file_vtctldata_proto_msgTypes[178]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -9720,7 +10201,7 @@ func (x *ValidateVSchemaResponse) String() string {
func (*ValidateVSchemaResponse) ProtoMessage() {}
func (x *ValidateVSchemaResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[171]
+ mi := &file_vtctldata_proto_msgTypes[178]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -9733,7 +10214,7 @@ func (x *ValidateVSchemaResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use ValidateVSchemaResponse.ProtoReflect.Descriptor instead.
func (*ValidateVSchemaResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{171}
+ return file_vtctldata_proto_rawDescGZIP(), []int{178}
}
func (x *ValidateVSchemaResponse) GetResults() []string {
@@ -9762,7 +10243,7 @@ type Workflow_ReplicationLocation struct {
func (x *Workflow_ReplicationLocation) Reset() {
*x = Workflow_ReplicationLocation{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[173]
+ mi := &file_vtctldata_proto_msgTypes[180]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -9775,7 +10256,7 @@ func (x *Workflow_ReplicationLocation) String() string {
func (*Workflow_ReplicationLocation) ProtoMessage() {}
func (x *Workflow_ReplicationLocation) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[173]
+ mi := &file_vtctldata_proto_msgTypes[180]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -9818,7 +10299,7 @@ type Workflow_ShardStream struct {
func (x *Workflow_ShardStream) Reset() {
*x = Workflow_ShardStream{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[174]
+ mi := &file_vtctldata_proto_msgTypes[181]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -9831,7 +10312,7 @@ func (x *Workflow_ShardStream) String() string {
func (*Workflow_ShardStream) ProtoMessage() {}
func (x *Workflow_ShardStream) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[174]
+ mi := &file_vtctldata_proto_msgTypes[181]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -9901,7 +10382,7 @@ type Workflow_Stream struct {
func (x *Workflow_Stream) Reset() {
*x = Workflow_Stream{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[175]
+ mi := &file_vtctldata_proto_msgTypes[182]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -9914,7 +10395,7 @@ func (x *Workflow_Stream) String() string {
func (*Workflow_Stream) ProtoMessage() {}
func (x *Workflow_Stream) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[175]
+ mi := &file_vtctldata_proto_msgTypes[182]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -10047,7 +10528,7 @@ type Workflow_Stream_CopyState struct {
func (x *Workflow_Stream_CopyState) Reset() {
*x = Workflow_Stream_CopyState{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[176]
+ mi := &file_vtctldata_proto_msgTypes[183]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -10060,7 +10541,7 @@ func (x *Workflow_Stream_CopyState) String() string {
func (*Workflow_Stream_CopyState) ProtoMessage() {}
func (x *Workflow_Stream_CopyState) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[176]
+ mi := &file_vtctldata_proto_msgTypes[183]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -10108,7 +10589,7 @@ type Workflow_Stream_Log struct {
func (x *Workflow_Stream_Log) Reset() {
*x = Workflow_Stream_Log{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[177]
+ mi := &file_vtctldata_proto_msgTypes[184]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -10121,7 +10602,7 @@ func (x *Workflow_Stream_Log) String() string {
func (*Workflow_Stream_Log) ProtoMessage() {}
func (x *Workflow_Stream_Log) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[177]
+ mi := &file_vtctldata_proto_msgTypes[184]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -10204,7 +10685,7 @@ type GetSrvKeyspaceNamesResponse_NameList struct {
func (x *GetSrvKeyspaceNamesResponse_NameList) Reset() {
*x = GetSrvKeyspaceNamesResponse_NameList{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[181]
+ mi := &file_vtctldata_proto_msgTypes[188]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -10217,7 +10698,7 @@ func (x *GetSrvKeyspaceNamesResponse_NameList) String() string {
func (*GetSrvKeyspaceNamesResponse_NameList) ProtoMessage() {}
func (x *GetSrvKeyspaceNamesResponse_NameList) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[181]
+ mi := &file_vtctldata_proto_msgTypes[188]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -10273,7 +10754,7 @@ var file_vtctldata_proto_rawDesc = []byte{
0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x73,
0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12,
0x1d, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x64, 0x64, 0x6c, 0x18, 0x03, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x44, 0x64, 0x6c, 0x22, 0xab,
+ 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x44, 0x64, 0x6c, 0x22, 0xf4,
0x04, 0x0a, 0x13, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x53, 0x65,
0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c,
0x6f, 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c,
@@ -10308,1246 +10789,1307 @@ var file_vtctldata_proto_rawDesc = []byte{
0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54,
0x69, 0x6d, 0x65, 0x5a, 0x6f, 0x6e, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63,
0x65, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c,
- 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x22, 0x4e, 0x0a, 0x08,
- 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2e, 0x0a, 0x08,
- 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12,
- 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61,
- 0x63, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x5e, 0x0a, 0x05,
- 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63,
- 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63,
- 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x03,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e,
- 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, 0x81, 0x0c, 0x0a,
- 0x08, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d,
- 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a,
- 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e,
- 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c,
- 0x6f, 0x77, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x6f,
- 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x3f,
- 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27,
+ 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x15, 0x0a, 0x06,
+ 0x6f, 0x6e, 0x5f, 0x64, 0x64, 0x6c, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6f, 0x6e,
+ 0x44, 0x64, 0x6c, 0x12, 0x30, 0x0a, 0x14, 0x64, 0x65, 0x66, 0x65, 0x72, 0x5f, 0x73, 0x65, 0x63,
+ 0x6f, 0x6e, 0x64, 0x61, 0x72, 0x79, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x0e, 0x20, 0x01, 0x28,
+ 0x08, 0x52, 0x12, 0x64, 0x65, 0x66, 0x65, 0x72, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x61, 0x72,
+ 0x79, 0x4b, 0x65, 0x79, 0x73, 0x22, 0x4e, 0x0a, 0x08, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63,
+ 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2e, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63,
+ 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61,
+ 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79,
+ 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x5e, 0x0a, 0x05, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x1a,
+ 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61,
+ 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x25,
+ 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e,
+ 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x05,
+ 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, 0xd2, 0x0c, 0x0a, 0x08, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c,
+ 0x6f, 0x77, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61,
+ 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x52, 0x65, 0x70, 0x6c,
+ 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52,
+ 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x3f, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65,
+ 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64,
+ 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x52, 0x65, 0x70,
+ 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x31, 0x0a, 0x15, 0x6d, 0x61, 0x78, 0x5f,
+ 0x76, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x61,
+ 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x12, 0x6d, 0x61, 0x78, 0x56, 0x52, 0x65, 0x70,
+ 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x61, 0x67, 0x12, 0x4a, 0x0a, 0x0d, 0x73,
+ 0x68, 0x61, 0x72, 0x64, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x18, 0x05, 0x20, 0x03,
+ 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57,
+ 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x53, 0x74, 0x72,
+ 0x65, 0x61, 0x6d, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x73, 0x68, 0x61, 0x72, 0x64,
+ 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x77, 0x6f, 0x72, 0x6b, 0x66,
+ 0x6c, 0x6f, 0x77, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c,
+ 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2a, 0x0a, 0x11,
+ 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x73, 0x75, 0x62, 0x5f, 0x74, 0x79, 0x70,
+ 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f,
+ 0x77, 0x53, 0x75, 0x62, 0x54, 0x79, 0x70, 0x65, 0x1a, 0x60, 0x0a, 0x11, 0x53, 0x68, 0x61, 0x72,
+ 0x64, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a,
+ 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12,
+ 0x35, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f,
0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66,
- 0x6c, 0x6f, 0x77, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c,
- 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12,
- 0x31, 0x0a, 0x15, 0x6d, 0x61, 0x78, 0x5f, 0x76, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x61, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x12,
- 0x6d, 0x61, 0x78, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c,
- 0x61, 0x67, 0x12, 0x4a, 0x0a, 0x0d, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x73, 0x74, 0x72, 0x65,
- 0x61, 0x6d, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x76, 0x74, 0x63, 0x74,
- 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x53,
- 0x68, 0x61, 0x72, 0x64, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79,
- 0x52, 0x0c, 0x73, 0x68, 0x61, 0x72, 0x64, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x1a, 0x60,
- 0x0a, 0x11, 0x53, 0x68, 0x61, 0x72, 0x64, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x45, 0x6e,
- 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x35, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61,
- 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x53,
- 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01,
- 0x1a, 0x49, 0x0a, 0x13, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c,
- 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70,
- 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70,
- 0x61, 0x63, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x02, 0x20,
- 0x03, 0x28, 0x09, 0x52, 0x06, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x1a, 0xb9, 0x01, 0x0a, 0x0b,
- 0x53, 0x68, 0x61, 0x72, 0x64, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x34, 0x0a, 0x07, 0x73,
- 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x76,
- 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f,
- 0x77, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x07, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d,
- 0x73, 0x12, 0x46, 0x0a, 0x0f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x74,
- 0x72, 0x6f, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x74, 0x6f, 0x70,
- 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x2e, 0x54, 0x61, 0x62, 0x6c,
- 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x0e, 0x74, 0x61, 0x62, 0x6c, 0x65,
- 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x69, 0x73, 0x5f,
- 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x18,
- 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x69, 0x73, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79,
- 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x1a, 0xf6, 0x06, 0x0a, 0x06, 0x53, 0x74, 0x72, 0x65,
- 0x61, 0x6d, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02,
- 0x69, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x2d, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c,
- 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64,
- 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52,
- 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x3d, 0x0a, 0x0d, 0x62, 0x69, 0x6e, 0x6c, 0x6f,
- 0x67, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18,
- 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x42, 0x69, 0x6e, 0x6c,
- 0x6f, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x0c, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67,
- 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69,
- 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69,
- 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f, 0x70, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74,
- 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x70, 0x50,
- 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65,
- 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x17, 0x0a,
- 0x07, 0x64, 0x62, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06,
- 0x64, 0x62, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x41, 0x0a, 0x15, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61,
- 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18,
- 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54,
- 0x69, 0x6d, 0x65, 0x52, 0x14, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e,
- 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x2f, 0x0a, 0x0c, 0x74, 0x69, 0x6d,
- 0x65, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x0b, 0x74,
- 0x69, 0x6d, 0x65, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65,
- 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73,
- 0x73, 0x61, 0x67, 0x65, 0x12, 0x45, 0x0a, 0x0b, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x74, 0x61,
- 0x74, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x76, 0x74, 0x63, 0x74,
- 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x53,
- 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e, 0x43, 0x6f, 0x70, 0x79, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52,
- 0x0a, 0x63, 0x6f, 0x70, 0x79, 0x53, 0x74, 0x61, 0x74, 0x65, 0x73, 0x12, 0x32, 0x0a, 0x04, 0x6c,
- 0x6f, 0x67, 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74,
- 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x53,
- 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e, 0x4c, 0x6f, 0x67, 0x52, 0x04, 0x6c, 0x6f, 0x67, 0x73, 0x12,
- 0x26, 0x0a, 0x0f, 0x6c, 0x6f, 0x67, 0x5f, 0x66, 0x65, 0x74, 0x63, 0x68, 0x5f, 0x65, 0x72, 0x72,
- 0x6f, 0x72, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6c, 0x6f, 0x67, 0x46, 0x65, 0x74,
- 0x63, 0x68, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73, 0x18,
- 0x0f, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x1a, 0x3a, 0x0a, 0x09, 0x43,
- 0x6f, 0x70, 0x79, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x61, 0x62, 0x6c,
- 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x17,
- 0x0a, 0x07, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x70, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x06, 0x6c, 0x61, 0x73, 0x74, 0x50, 0x6b, 0x1a, 0xe6, 0x01, 0x0a, 0x03, 0x4c, 0x6f, 0x67, 0x12,
- 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x12,
- 0x1b, 0x0a, 0x09, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01,
- 0x28, 0x03, 0x52, 0x08, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04,
- 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65,
- 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2b, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65,
- 0x64, 0x5f, 0x61, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74,
- 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65,
- 0x64, 0x41, 0x74, 0x12, 0x2b, 0x0a, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61,
- 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65,
- 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x09, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74,
- 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f,
- 0x75, 0x6e, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74,
- 0x22, 0x59, 0x0a, 0x12, 0x41, 0x64, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x09, 0x63, 0x65,
- 0x6c, 0x6c, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e,
- 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66,
- 0x6f, 0x52, 0x08, 0x63, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x15, 0x0a, 0x13, 0x41,
- 0x64, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x22, 0x40, 0x0a, 0x14, 0x41, 0x64, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c,
- 0x69, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61,
- 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14,
- 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63,
- 0x65, 0x6c, 0x6c, 0x73, 0x22, 0x17, 0x0a, 0x15, 0x41, 0x64, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x73,
- 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x9e, 0x01,
- 0x0a, 0x18, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75,
- 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3a, 0x0a, 0x0d, 0x72, 0x6f,
- 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x15, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x6f, 0x75, 0x74,
- 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x0c, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e,
- 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x72,
- 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x73, 0x6b,
- 0x69, 0x70, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x62,
- 0x75, 0x69, 0x6c, 0x64, 0x5f, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09,
- 0x52, 0x0c, 0x72, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x22, 0x1b,
- 0x0a, 0x19, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75,
- 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xb3, 0x01, 0x0a, 0x1d,
- 0x41, 0x70, 0x70, 0x6c, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e,
- 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4a, 0x0a,
- 0x13, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x72,
- 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x76, 0x73, 0x63,
- 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e,
- 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x11, 0x73, 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75,
- 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x6b, 0x69,
- 0x70, 0x5f, 0x72, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52,
- 0x0b, 0x73, 0x6b, 0x69, 0x70, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x12, 0x23, 0x0a, 0x0d,
- 0x72, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x5f, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x03, 0x20,
- 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x43, 0x65, 0x6c, 0x6c,
- 0x73, 0x22, 0x20, 0x0a, 0x1e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52,
- 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f,
- 0x6e, 0x73, 0x65, 0x22, 0x86, 0x03, 0x0a, 0x12, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x53, 0x63, 0x68,
- 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65,
- 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65,
- 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x3a, 0x0a, 0x19, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f,
- 0x6c, 0x6f, 0x6e, 0x67, 0x5f, 0x75, 0x6e, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x69, 0x6c,
- 0x69, 0x74, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x17, 0x61, 0x6c, 0x6c, 0x6f, 0x77,
- 0x4c, 0x6f, 0x6e, 0x67, 0x55, 0x6e, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x69, 0x6c, 0x69,
- 0x74, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x71, 0x6c, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52,
- 0x03, 0x73, 0x71, 0x6c, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x64, 0x6c, 0x5f, 0x73, 0x74, 0x72, 0x61,
- 0x74, 0x65, 0x67, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x64, 0x6c, 0x53,
- 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x75, 0x75, 0x69, 0x64, 0x5f,
- 0x6c, 0x69, 0x73, 0x74, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x75, 0x75, 0x69, 0x64,
- 0x4c, 0x69, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x11, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x10, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78,
- 0x74, 0x12, 0x44, 0x0a, 0x15, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63,
- 0x61, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x10, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x52, 0x13, 0x77, 0x61, 0x69, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73,
- 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x6b, 0x69, 0x70, 0x5f,
- 0x70, 0x72, 0x65, 0x66, 0x6c, 0x69, 0x67, 0x68, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52,
- 0x0d, 0x73, 0x6b, 0x69, 0x70, 0x50, 0x72, 0x65, 0x66, 0x6c, 0x69, 0x67, 0x68, 0x74, 0x12, 0x2c,
- 0x0a, 0x09, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72,
- 0x49, 0x44, 0x52, 0x08, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x22, 0x32, 0x0a, 0x13,
- 0x41, 0x70, 0x70, 0x6c, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f,
- 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x75, 0x75, 0x69, 0x64, 0x5f, 0x6c, 0x69, 0x73, 0x74,
- 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x75, 0x75, 0x69, 0x64, 0x4c, 0x69, 0x73, 0x74,
- 0x22, 0xc3, 0x01, 0x0a, 0x13, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d,
- 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73,
- 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73,
- 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x72, 0x65, 0x62,
- 0x75, 0x69, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x73, 0x6b, 0x69, 0x70,
- 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x64, 0x72, 0x79, 0x5f, 0x72,
- 0x75, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x64, 0x72, 0x79, 0x52, 0x75, 0x6e,
- 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52,
- 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x2c, 0x0a, 0x08, 0x76, 0x5f, 0x73, 0x63, 0x68, 0x65,
- 0x6d, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65,
- 0x6d, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x07, 0x76, 0x53, 0x63,
- 0x68, 0x65, 0x6d, 0x61, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x71, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x03, 0x73, 0x71, 0x6c, 0x22, 0x44, 0x0a, 0x14, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x56,
- 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2c,
- 0x0a, 0x08, 0x76, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x11, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70,
- 0x61, 0x63, 0x65, 0x52, 0x07, 0x76, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x90, 0x01, 0x0a,
- 0x0d, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38,
- 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e,
- 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62,
- 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x61, 0x6c, 0x6c, 0x6f,
- 0x77, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52,
- 0x0c, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x20, 0x0a,
- 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x03, 0x20, 0x01,
- 0x28, 0x04, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x22,
- 0xa2, 0x01, 0x0a, 0x0e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69,
- 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64,
- 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52,
- 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1a, 0x0a, 0x08,
- 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08,
- 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72,
- 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x24,
- 0x0a, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e,
- 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x05, 0x65,
- 0x76, 0x65, 0x6e, 0x74, 0x22, 0x8d, 0x01, 0x0a, 0x12, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53,
- 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b,
- 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b,
- 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64,
- 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x23, 0x0a,
- 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03,
- 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x50, 0x72, 0x69, 0x6d, 0x61,
- 0x72, 0x79, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63,
- 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72,
- 0x65, 0x6e, 0x63, 0x79, 0x22, 0x9b, 0x01, 0x0a, 0x17, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x54,
- 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74,
- 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74,
- 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x2d, 0x0a, 0x07, 0x64, 0x62,
- 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x74, 0x6f,
- 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70,
- 0x65, 0x52, 0x06, 0x64, 0x62, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x64, 0x72, 0x79,
- 0x5f, 0x72, 0x75, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x64, 0x72, 0x79, 0x52,
- 0x75, 0x6e, 0x22, 0xa6, 0x01, 0x0a, 0x18, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x54, 0x61, 0x62,
- 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
- 0x35, 0x0a, 0x0d, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74,
- 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x0c, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65,
- 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x33, 0x0a, 0x0c, 0x61, 0x66, 0x74, 0x65, 0x72, 0x5f,
- 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x74,
- 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x0b,
- 0x61, 0x66, 0x74, 0x65, 0x72, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x1e, 0x0a, 0x0b, 0x77,
- 0x61, 0x73, 0x5f, 0x64, 0x72, 0x79, 0x5f, 0x72, 0x75, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08,
- 0x52, 0x09, 0x77, 0x61, 0x73, 0x44, 0x72, 0x79, 0x52, 0x75, 0x6e, 0x22, 0xf1, 0x02, 0x0a, 0x15,
- 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72,
- 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x12,
- 0x2f, 0x0a, 0x14, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x5f, 0x76,
- 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x61,
- 0x6c, 0x6c, 0x6f, 0x77, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61,
- 0x12, 0x40, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x66, 0x72, 0x6f, 0x6d, 0x73,
- 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74,
- 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65,
- 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x46, 0x72, 0x6f,
- 0x6d, 0x73, 0x12, 0x2a, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e,
- 0x32, 0x16, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73,
- 0x70, 0x61, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x23,
- 0x0a, 0x0d, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18,
- 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x62, 0x61, 0x73, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70,
- 0x61, 0x63, 0x65, 0x12, 0x31, 0x0a, 0x0d, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x5f,
- 0x74, 0x69, 0x6d, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74,
- 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x0c, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68,
- 0x6f, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x2b, 0x0a, 0x11, 0x64, 0x75, 0x72, 0x61, 0x62, 0x69,
- 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x0a, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x10, 0x64, 0x75, 0x72, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x50, 0x6f, 0x6c,
- 0x69, 0x63, 0x79, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22,
- 0x49, 0x0a, 0x16, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63,
- 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x08, 0x6b, 0x65, 0x79,
- 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x76, 0x74,
- 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65,
- 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x8c, 0x01, 0x0a, 0x12, 0x43,
- 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1d, 0x0a,
- 0x0a, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x09, 0x73, 0x68, 0x61, 0x72, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05,
- 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72,
- 0x63, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x70, 0x61,
- 0x72, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x69, 0x6e, 0x63, 0x6c,
- 0x75, 0x64, 0x65, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x22, 0xa0, 0x01, 0x0a, 0x13, 0x43, 0x72,
- 0x65, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
- 0x65, 0x12, 0x2f, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e,
- 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61,
- 0x63, 0x65, 0x12, 0x26, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68,
- 0x61, 0x72, 0x64, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x30, 0x0a, 0x14, 0x73, 0x68,
- 0x61, 0x72, 0x64, 0x5f, 0x61, 0x6c, 0x72, 0x65, 0x61, 0x64, 0x79, 0x5f, 0x65, 0x78, 0x69, 0x73,
- 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x73, 0x68, 0x61, 0x72, 0x64, 0x41,
- 0x6c, 0x72, 0x65, 0x61, 0x64, 0x79, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x22, 0x41, 0x0a, 0x15,
- 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72,
- 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x22,
- 0x18, 0x0a, 0x16, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66,
- 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2d, 0x0a, 0x17, 0x44, 0x65, 0x6c,
- 0x65, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x65, 0x71,
+ 0x6c, 0x6f, 0x77, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52,
+ 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x49, 0x0a, 0x13, 0x52, 0x65,
+ 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x16, 0x0a,
+ 0x06, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x73,
+ 0x68, 0x61, 0x72, 0x64, 0x73, 0x1a, 0xb9, 0x01, 0x0a, 0x0b, 0x53, 0x68, 0x61, 0x72, 0x64, 0x53,
+ 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x34, 0x0a, 0x07, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73,
+ 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61,
+ 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x53, 0x74, 0x72, 0x65,
+ 0x61, 0x6d, 0x52, 0x07, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x12, 0x46, 0x0a, 0x0f, 0x74,
+ 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x73, 0x18, 0x02,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e,
+ 0x53, 0x68, 0x61, 0x72, 0x64, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x74,
+ 0x72, 0x6f, 0x6c, 0x52, 0x0e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72,
+ 0x6f, 0x6c, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x69, 0x73, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72,
+ 0x79, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52,
+ 0x10, 0x69, 0x73, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e,
+ 0x67, 0x1a, 0xf6, 0x06, 0x0a, 0x06, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x0e, 0x0a, 0x02,
+ 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x12, 0x14, 0x0a, 0x05,
+ 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61,
+ 0x72, 0x64, 0x12, 0x2d, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61,
+ 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65,
+ 0x74, 0x12, 0x3d, 0x0a, 0x0d, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x5f, 0x73, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f,
+ 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x53, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x52, 0x0c, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65,
+ 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d,
+ 0x73, 0x74, 0x6f, 0x70, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x70, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f,
+ 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x64, 0x62, 0x5f, 0x6e, 0x61,
+ 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x62, 0x4e, 0x61, 0x6d, 0x65,
+ 0x12, 0x41, 0x0a, 0x15, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
+ 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x14, 0x74,
+ 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74,
+ 0x61, 0x6d, 0x70, 0x12, 0x2f, 0x0a, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x70, 0x64, 0x61,
+ 0x74, 0x65, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69,
+ 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x55, 0x70, 0x64,
+ 0x61, 0x74, 0x65, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18,
+ 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x45,
+ 0x0a, 0x0b, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x73, 0x18, 0x0c, 0x20,
+ 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e,
+ 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e,
+ 0x43, 0x6f, 0x70, 0x79, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0a, 0x63, 0x6f, 0x70, 0x79, 0x53,
+ 0x74, 0x61, 0x74, 0x65, 0x73, 0x12, 0x32, 0x0a, 0x04, 0x6c, 0x6f, 0x67, 0x73, 0x18, 0x0d, 0x20,
+ 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e,
+ 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e,
+ 0x4c, 0x6f, 0x67, 0x52, 0x04, 0x6c, 0x6f, 0x67, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6c, 0x6f, 0x67,
+ 0x5f, 0x66, 0x65, 0x74, 0x63, 0x68, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x0e, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x0d, 0x6c, 0x6f, 0x67, 0x46, 0x65, 0x74, 0x63, 0x68, 0x45, 0x72, 0x72, 0x6f,
+ 0x72, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x09, 0x52,
+ 0x04, 0x74, 0x61, 0x67, 0x73, 0x1a, 0x3a, 0x0a, 0x09, 0x43, 0x6f, 0x70, 0x79, 0x53, 0x74, 0x61,
+ 0x74, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x6c, 0x61, 0x73, 0x74,
+ 0x5f, 0x70, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6c, 0x61, 0x73, 0x74, 0x50,
+ 0x6b, 0x1a, 0xe6, 0x01, 0x0a, 0x03, 0x4c, 0x6f, 0x67, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x74, 0x72,
+ 0x65, 0x61, 0x6d, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x73, 0x74,
+ 0x72, 0x65, 0x61, 0x6d, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74,
+ 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65,
+ 0x12, 0x2b, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x05,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69,
+ 0x6d, 0x65, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x2b, 0x0a,
+ 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52,
+ 0x09, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65,
+ 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73,
+ 0x73, 0x61, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x08, 0x20,
+ 0x01, 0x28, 0x03, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x59, 0x0a, 0x12, 0x41, 0x64,
+ 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04,
+ 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x09, 0x63, 0x65, 0x6c, 0x6c, 0x5f, 0x69, 0x6e, 0x66,
+ 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61,
+ 0x74, 0x61, 0x2e, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x63, 0x65, 0x6c,
+ 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x15, 0x0a, 0x13, 0x41, 0x64, 0x64, 0x43, 0x65, 0x6c, 0x6c,
+ 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x40, 0x0a, 0x14,
+ 0x41, 0x64, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x65, 0x71,
0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x1a, 0x0a, 0x18, 0x44, 0x65, 0x6c, 0x65,
- 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x67, 0x0a, 0x15, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4b, 0x65,
- 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a,
- 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x63,
- 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x72, 0x65,
- 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65,
- 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x22, 0x18, 0x0a,
- 0x16, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x9b, 0x01, 0x0a, 0x13, 0x44, 0x65, 0x6c, 0x65,
- 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
- 0x28, 0x0a, 0x06, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32,
- 0x10, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72,
- 0x64, 0x52, 0x06, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x63,
- 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x72, 0x65,
- 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x12, 0x26, 0x0a, 0x0f, 0x65, 0x76, 0x65, 0x6e, 0x5f,
- 0x69, 0x66, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08,
- 0x52, 0x0d, 0x65, 0x76, 0x65, 0x6e, 0x49, 0x66, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x12,
- 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05,
- 0x66, 0x6f, 0x72, 0x63, 0x65, 0x22, 0x16, 0x0a, 0x14, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53,
- 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2d, 0x0a,
- 0x17, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d,
- 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, 0x6c, 0x6c,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x22, 0x1a, 0x0a, 0x18,
- 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x79, 0x0a, 0x14, 0x44, 0x65, 0x6c, 0x65,
- 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x12, 0x3c, 0x0a, 0x0e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73,
- 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64,
- 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52,
- 0x0d, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x23,
- 0x0a, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x50, 0x72, 0x69, 0x6d,
- 0x61, 0x72, 0x79, 0x22, 0x17, 0x0a, 0x15, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, 0x62,
- 0x6c, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xd0, 0x02, 0x0a,
- 0x1d, 0x45, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x6e, 0x63, 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65,
- 0x6e, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a,
- 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68,
- 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64,
- 0x12, 0x36, 0x0a, 0x0b, 0x6e, 0x65, 0x77, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18,
- 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61,
- 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0a, 0x6e, 0x65,
- 0x77, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x3e, 0x0a, 0x0f, 0x69, 0x67, 0x6e, 0x6f,
- 0x72, 0x65, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28,
- 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62,
- 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0e, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65,
- 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x12, 0x44, 0x0a, 0x15, 0x77, 0x61, 0x69, 0x74,
- 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75,
- 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65,
- 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x77, 0x61, 0x69, 0x74, 0x52,
- 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x3f,
- 0x0a, 0x1c, 0x70, 0x72, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x72, 0x6f, 0x73, 0x73, 0x5f,
- 0x63, 0x65, 0x6c, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06,
- 0x20, 0x01, 0x28, 0x08, 0x52, 0x19, 0x70, 0x72, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x43, 0x72, 0x6f,
- 0x73, 0x73, 0x43, 0x65, 0x6c, 0x6c, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x69, 0x6f, 0x6e, 0x22,
- 0xbc, 0x01, 0x0a, 0x1e, 0x45, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x6e, 0x63, 0x79, 0x52, 0x65, 0x70,
- 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14,
- 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73,
- 0x68, 0x61, 0x72, 0x64, 0x12, 0x40, 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64,
- 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15,
+ 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c,
+ 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x22, 0x17,
+ 0x0a, 0x15, 0x41, 0x64, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x9e, 0x01, 0x0a, 0x18, 0x41, 0x70, 0x70, 0x6c,
+ 0x79, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x12, 0x3a, 0x0a, 0x0d, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f,
+ 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x76, 0x73,
+ 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c,
+ 0x65, 0x73, 0x52, 0x0c, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73,
+ 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x72, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x73, 0x6b, 0x69, 0x70, 0x52, 0x65, 0x62, 0x75,
+ 0x69, 0x6c, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x5f, 0x63,
+ 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x62, 0x75,
+ 0x69, 0x6c, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x22, 0x1b, 0x0a, 0x19, 0x41, 0x70, 0x70, 0x6c,
+ 0x79, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xb3, 0x01, 0x0a, 0x1d, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x53,
+ 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4a, 0x0a, 0x13, 0x73, 0x68, 0x61, 0x72, 0x64,
+ 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x53,
+ 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73,
+ 0x52, 0x11, 0x73, 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75,
+ 0x6c, 0x65, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x72, 0x65, 0x62, 0x75,
+ 0x69, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x73, 0x6b, 0x69, 0x70, 0x52,
+ 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x62, 0x75, 0x69, 0x6c,
+ 0x64, 0x5f, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72,
+ 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x22, 0x20, 0x0a, 0x1e, 0x41,
+ 0x70, 0x70, 0x6c, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67,
+ 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x86, 0x03,
+ 0x0a, 0x12, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65,
+ 0x12, 0x3a, 0x0a, 0x19, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x6c, 0x6f, 0x6e, 0x67, 0x5f, 0x75,
+ 0x6e, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x08, 0x52, 0x17, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x4c, 0x6f, 0x6e, 0x67, 0x55, 0x6e,
+ 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x12, 0x10, 0x0a, 0x03,
+ 0x73, 0x71, 0x6c, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x03, 0x73, 0x71, 0x6c, 0x12, 0x21,
+ 0x0a, 0x0c, 0x64, 0x64, 0x6c, 0x5f, 0x73, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x18, 0x04,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x64, 0x6c, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67,
+ 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x75, 0x75, 0x69, 0x64, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x05,
+ 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x75, 0x75, 0x69, 0x64, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x2b,
+ 0x0a, 0x11, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x74,
+ 0x65, 0x78, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x6d, 0x69, 0x67, 0x72, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x44, 0x0a, 0x15, 0x77,
+ 0x61, 0x69, 0x74, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x5f, 0x74, 0x69, 0x6d,
+ 0x65, 0x6f, 0x75, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x74,
+ 0x69, 0x6d, 0x65, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x77, 0x61,
+ 0x69, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75,
+ 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x6c, 0x69,
+ 0x67, 0x68, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x73, 0x6b, 0x69, 0x70, 0x50,
+ 0x72, 0x65, 0x66, 0x6c, 0x69, 0x67, 0x68, 0x74, 0x12, 0x2c, 0x0a, 0x09, 0x63, 0x61, 0x6c, 0x6c,
+ 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74,
+ 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x08, 0x63, 0x61,
+ 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x22, 0x32, 0x0a, 0x13, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x53,
+ 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a,
+ 0x09, 0x75, 0x75, 0x69, 0x64, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09,
+ 0x52, 0x08, 0x75, 0x75, 0x69, 0x64, 0x4c, 0x69, 0x73, 0x74, 0x22, 0xc3, 0x01, 0x0a, 0x13, 0x41,
+ 0x70, 0x70, 0x6c, 0x79, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21,
+ 0x0a, 0x0c, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x72, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x73, 0x6b, 0x69, 0x70, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c,
+ 0x64, 0x12, 0x17, 0x0a, 0x07, 0x64, 0x72, 0x79, 0x5f, 0x72, 0x75, 0x6e, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x08, 0x52, 0x06, 0x64, 0x72, 0x79, 0x52, 0x75, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65,
+ 0x6c, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73,
+ 0x12, 0x2c, 0x0a, 0x08, 0x76, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x05, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x4b, 0x65, 0x79,
+ 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x07, 0x76, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x10,
+ 0x0a, 0x03, 0x73, 0x71, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x73, 0x71, 0x6c,
+ 0x22, 0x44, 0x0a, 0x14, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2c, 0x0a, 0x08, 0x76, 0x5f, 0x73, 0x63,
+ 0x68, 0x65, 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x76, 0x73, 0x63,
+ 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x07, 0x76,
+ 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0xc2, 0x01, 0x0a, 0x0d, 0x42, 0x61, 0x63, 0x6b, 0x75,
+ 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c,
+ 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15,
0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74,
- 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x50,
- 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x26, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73,
- 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c,
- 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x22, 0xa0,
- 0x01, 0x0a, 0x18, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41,
- 0x73, 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74,
- 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62,
- 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74,
- 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x02,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x6d,
- 0x61, 0x78, 0x5f, 0x72, 0x6f, 0x77, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x6d,
- 0x61, 0x78, 0x52, 0x6f, 0x77, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x75, 0x73, 0x65, 0x5f, 0x70, 0x6f,
- 0x6f, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x75, 0x73, 0x65, 0x50, 0x6f, 0x6f,
- 0x6c, 0x22, 0x47, 0x0a, 0x19, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, 0x63,
- 0x68, 0x41, 0x73, 0x41, 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a,
- 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12,
- 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75,
- 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0xd3, 0x01, 0x0a, 0x18, 0x45,
- 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x44, 0x42, 0x41,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65,
- 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e,
- 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41,
- 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61,
- 0x73, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x61, 0x78, 0x5f, 0x72,
- 0x6f, 0x77, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x52, 0x6f,
- 0x77, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x62, 0x69,
- 0x6e, 0x6c, 0x6f, 0x67, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x64, 0x69, 0x73,
- 0x61, 0x62, 0x6c, 0x65, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x72,
- 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x05, 0x20, 0x01,
- 0x28, 0x08, 0x52, 0x0c, 0x72, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61,
- 0x22, 0x47, 0x0a, 0x19, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, 0x68,
- 0x41, 0x73, 0x44, 0x42, 0x41, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a,
- 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e,
- 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c,
- 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0xa5, 0x01, 0x0a, 0x12, 0x45, 0x78,
- 0x65, 0x63, 0x75, 0x74, 0x65, 0x48, 0x6f, 0x6f, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74,
- 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74,
- 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x55, 0x0a, 0x13, 0x74, 0x61,
- 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x68, 0x6f, 0x6f, 0x6b, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74,
- 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x45, 0x78, 0x65, 0x63,
- 0x75, 0x74, 0x65, 0x48, 0x6f, 0x6f, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x11,
- 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x48, 0x6f, 0x6f, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x22, 0x5e, 0x0a, 0x13, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x48, 0x6f, 0x6f, 0x6b,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x47, 0x0a, 0x0b, 0x68, 0x6f, 0x6f, 0x6b,
- 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e,
- 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74,
- 0x61, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x48, 0x6f, 0x6f, 0x6b, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x0a, 0x68, 0x6f, 0x6f, 0x6b, 0x52, 0x65, 0x73, 0x75, 0x6c,
- 0x74, 0x22, 0x3c, 0x0a, 0x1e, 0x46, 0x69, 0x6e, 0x64, 0x41, 0x6c, 0x6c, 0x53, 0x68, 0x61, 0x72,
- 0x64, 0x73, 0x49, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22,
- 0xbe, 0x01, 0x0a, 0x1f, 0x46, 0x69, 0x6e, 0x64, 0x41, 0x6c, 0x6c, 0x53, 0x68, 0x61, 0x72, 0x64,
- 0x73, 0x49, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f,
- 0x6e, 0x73, 0x65, 0x12, 0x4e, 0x0a, 0x06, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x01, 0x20,
- 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e,
- 0x46, 0x69, 0x6e, 0x64, 0x41, 0x6c, 0x6c, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x49, 0x6e, 0x4b,
- 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e,
- 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x73, 0x68, 0x61,
- 0x72, 0x64, 0x73, 0x1a, 0x4b, 0x0a, 0x0b, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x45, 0x6e, 0x74,
- 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x03, 0x6b, 0x65, 0x79, 0x12, 0x26, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e,
- 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01,
- 0x22, 0x9e, 0x01, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61,
- 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61,
- 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69,
- 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x1a,
- 0x0a, 0x08, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08,
- 0x52, 0x08, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x12, 0x25, 0x0a, 0x0e, 0x64, 0x65,
- 0x74, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x05, 0x20, 0x01,
- 0x28, 0x0d, 0x52, 0x0d, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x4c, 0x69, 0x6d, 0x69,
- 0x74, 0x22, 0x44, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x52,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2e, 0x0a, 0x07, 0x62, 0x61, 0x63, 0x6b, 0x75,
- 0x70, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c,
- 0x63, 0x74, 0x6c, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x07,
- 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x22, 0x28, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x43, 0x65,
- 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a,
- 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x65, 0x6c,
- 0x6c, 0x22, 0x46, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x09, 0x63, 0x65, 0x6c, 0x6c,
- 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x6f,
- 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52,
- 0x08, 0x63, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x19, 0x0a, 0x17, 0x47, 0x65, 0x74,
- 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x22, 0x30, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x49,
- 0x6e, 0x66, 0x6f, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
- 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52,
- 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x22, 0x18, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c,
- 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x22, 0xb6, 0x01, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69,
- 0x61, 0x73, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x49, 0x0a, 0x07,
- 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e,
- 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c,
- 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
- 0x65, 0x2e, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07,
- 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x1a, 0x50, 0x0a, 0x0c, 0x41, 0x6c, 0x69, 0x61, 0x73,
- 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2a, 0x0a, 0x05, 0x76, 0x61, 0x6c,
- 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64,
- 0x61, 0x74, 0x61, 0x2e, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x05,
- 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x50, 0x0a, 0x14, 0x47, 0x65, 0x74,
- 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61,
- 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61,
- 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b,
- 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x4c, 0x0a, 0x15, 0x47,
- 0x65, 0x74, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75,
- 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x15, 0x0a, 0x13, 0x47, 0x65, 0x74,
- 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x22, 0x49, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x31, 0x0a, 0x09, 0x6b, 0x65, 0x79, 0x73,
- 0x70, 0x61, 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x76, 0x74,
- 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65,
- 0x52, 0x09, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x22, 0x30, 0x0a, 0x12, 0x47,
- 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x46, 0x0a,
- 0x13, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61,
- 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79,
- 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x51, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x50, 0x65, 0x72, 0x6d,
- 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38,
+ 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69,
+ 0x61, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x70, 0x72, 0x69, 0x6d,
+ 0x61, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x61, 0x6c, 0x6c, 0x6f, 0x77,
+ 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75,
+ 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x63, 0x6f,
+ 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x30, 0x0a, 0x14, 0x69, 0x6e, 0x63,
+ 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x5f, 0x66, 0x72, 0x6f, 0x6d, 0x5f, 0x70, 0x6f,
+ 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x69, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65,
+ 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x72, 0x6f, 0x6d, 0x50, 0x6f, 0x73, 0x22, 0xa2, 0x01, 0x0a, 0x0e,
+ 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x38,
0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01,
0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e,
0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62,
- 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x5a, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x50,
- 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e,
- 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74,
- 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50, 0x65, 0x72, 0x6d,
- 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0b, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73,
- 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x18, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x69,
- 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x55,
- 0x0a, 0x17, 0x47, 0x65, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65,
- 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a, 0x0a, 0x0d, 0x72, 0x6f, 0x75,
- 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x15, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x69,
- 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x0c, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67,
- 0x52, 0x75, 0x6c, 0x65, 0x73, 0x22, 0xb0, 0x02, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68,
- 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61,
- 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c,
- 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41,
- 0x6c, 0x69, 0x61, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x02,
- 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e,
- 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x03,
- 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x54, 0x61, 0x62,
- 0x6c, 0x65, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x76,
- 0x69, 0x65, 0x77, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x69, 0x6e, 0x63, 0x6c,
- 0x75, 0x64, 0x65, 0x56, 0x69, 0x65, 0x77, 0x73, 0x12, 0x28, 0x0a, 0x10, 0x74, 0x61, 0x62, 0x6c,
- 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x05, 0x20, 0x01,
- 0x28, 0x08, 0x52, 0x0e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x4f, 0x6e,
- 0x6c, 0x79, 0x12, 0x28, 0x0a, 0x10, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65,
- 0x73, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x74, 0x61,
- 0x62, 0x6c, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x73, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x2a, 0x0a, 0x11,
- 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x6f, 0x6e, 0x6c,
- 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x63,
- 0x68, 0x65, 0x6d, 0x61, 0x4f, 0x6e, 0x6c, 0x79, 0x22, 0x50, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x53,
- 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3b, 0x0a,
- 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e,
- 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74,
- 0x61, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69,
- 0x6f, 0x6e, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x4c, 0x0a, 0x0f, 0x47, 0x65,
- 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a,
- 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x68, 0x61,
- 0x72, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73,
- 0x68, 0x61, 0x72, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x3a, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x53,
- 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x26, 0x0a, 0x05,
- 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74,
- 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x05, 0x73,
- 0x68, 0x61, 0x72, 0x64, 0x22, 0x1d, 0x0a, 0x1b, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64,
- 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x22, 0x6a, 0x0a, 0x1c, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52,
- 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f,
- 0x6e, 0x73, 0x65, 0x12, 0x4a, 0x0a, 0x13, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x72, 0x6f, 0x75,
- 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x1a, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64,
- 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x11, 0x73, 0x68,
- 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x22,
- 0x32, 0x0a, 0x1a, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63,
- 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a,
- 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65,
- 0x6c, 0x6c, 0x73, 0x22, 0xf3, 0x01, 0x0a, 0x1b, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65,
- 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f,
- 0x6e, 0x73, 0x65, 0x12, 0x47, 0x0a, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03,
- 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47,
- 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x4e, 0x61, 0x6d,
- 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x73,
- 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x1a, 0x69, 0x0a, 0x0a,
- 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65,
- 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x45, 0x0a, 0x05,
- 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x76, 0x74,
- 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65,
- 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f,
- 0x6e, 0x73, 0x65, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x05, 0x76, 0x61,
- 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x20, 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x4c,
- 0x69, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03,
- 0x28, 0x09, 0x52, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x22, 0x4a, 0x0a, 0x16, 0x47, 0x65, 0x74,
- 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12,
- 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05,
- 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x22, 0xcc, 0x01, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76,
- 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
- 0x65, 0x12, 0x59, 0x0a, 0x0d, 0x73, 0x72, 0x76, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63,
- 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c,
- 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70,
- 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x72, 0x76,
- 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c,
- 0x73, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x1a, 0x56, 0x0a, 0x11,
- 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72,
- 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03,
- 0x6b, 0x65, 0x79, 0x12, 0x2b, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x72,
- 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
- 0x3a, 0x02, 0x38, 0x01, 0x22, 0x2a, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53,
- 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04,
- 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x65, 0x6c, 0x6c,
- 0x22, 0x4e, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d,
- 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x35, 0x0a, 0x0c, 0x73, 0x72, 0x76,
- 0x5f, 0x76, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x13, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63,
- 0x68, 0x65, 0x6d, 0x61, 0x52, 0x0a, 0x73, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61,
- 0x22, 0x2d, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d,
- 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c,
- 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x22,
- 0xc5, 0x01, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d,
- 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x56, 0x0a, 0x0d, 0x73, 0x72,
- 0x76, 0x5f, 0x76, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28,
- 0x0b, 0x32, 0x32, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65,
- 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73,
- 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x73, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d,
- 0x61, 0x73, 0x1a, 0x53, 0x0a, 0x10, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61,
- 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75,
- 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d,
- 0x61, 0x2e, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x05, 0x76, 0x61,
- 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4c, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x54, 0x61,
- 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74,
- 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62,
- 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74,
- 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x3d, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c,
- 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x28, 0x0a, 0x06, 0x74, 0x61,
- 0x62, 0x6c, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x74, 0x6f, 0x70,
- 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61,
- 0x62, 0x6c, 0x65, 0x74, 0x22, 0xe8, 0x01, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c,
- 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65,
- 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65,
- 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x14, 0x0a, 0x05,
- 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c,
- 0x6c, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x18, 0x04, 0x20, 0x01,
- 0x28, 0x08, 0x52, 0x06, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x12, 0x3c, 0x0a, 0x0e, 0x74, 0x61,
- 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03,
- 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61,
- 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0d, 0x74, 0x61, 0x62, 0x6c, 0x65,
- 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x0b, 0x74, 0x61, 0x62, 0x6c,
- 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e,
- 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54,
- 0x79, 0x70, 0x65, 0x52, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x22,
- 0x40, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x07, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73,
- 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74,
- 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x07, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74,
- 0x73, 0x22, 0x2f, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61,
- 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61,
- 0x63, 0x65, 0x22, 0x4d, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65,
- 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e,
- 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41,
- 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61,
- 0x73, 0x22, 0x2e, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69,
- 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f,
- 0x6e, 0x22, 0x42, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2c, 0x0a, 0x08, 0x76, 0x5f, 0x73, 0x63, 0x68,
- 0x65, 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x76, 0x73, 0x63, 0x68,
- 0x65, 0x6d, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x07, 0x76, 0x53,
- 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x52, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b,
- 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08,
- 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08,
- 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x63, 0x74, 0x69,
- 0x76, 0x65, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61,
- 0x63, 0x74, 0x69, 0x76, 0x65, 0x4f, 0x6e, 0x6c, 0x79, 0x22, 0x49, 0x0a, 0x14, 0x47, 0x65, 0x74,
- 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
- 0x65, 0x12, 0x31, 0x0a, 0x09, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x18, 0x01,
- 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61,
- 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x09, 0x77, 0x6f, 0x72, 0x6b, 0x66,
- 0x6c, 0x6f, 0x77, 0x73, 0x22, 0xfb, 0x01, 0x0a, 0x17, 0x49, 0x6e, 0x69, 0x74, 0x53, 0x68, 0x61,
- 0x72, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05,
- 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61,
- 0x72, 0x64, 0x12, 0x52, 0x0a, 0x1a, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x5f, 0x65, 0x6c,
- 0x65, 0x63, 0x74, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73,
- 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74,
- 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x17, 0x70,
- 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x45, 0x6c, 0x65, 0x63, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65,
- 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18,
- 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x12, 0x44, 0x0a, 0x15,
- 0x77, 0x61, 0x69, 0x74, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x5f, 0x74, 0x69,
- 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74,
- 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x77,
- 0x61, 0x69, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x6f,
- 0x75, 0x74, 0x22, 0x42, 0x0a, 0x18, 0x49, 0x6e, 0x69, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x50,
- 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x26,
- 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e,
- 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06,
- 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x4d, 0x0a, 0x11, 0x50, 0x69, 0x6e, 0x67, 0x54, 0x61,
- 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74,
- 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62,
- 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74,
- 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x14, 0x0a, 0x12, 0x50, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62,
- 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x89, 0x02, 0x0a, 0x1b,
- 0x50, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53,
- 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b,
- 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b,
- 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64,
- 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x36, 0x0a,
- 0x0b, 0x6e, 0x65, 0x77, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01,
+ 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73,
+ 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73,
+ 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x24, 0x0a, 0x05, 0x65, 0x76,
+ 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75,
+ 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74,
+ 0x22, 0x8d, 0x01, 0x0a, 0x12, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70,
+ 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70,
+ 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x61, 0x6c, 0x6c,
+ 0x6f, 0x77, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08,
+ 0x52, 0x0c, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x20,
+ 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x04, 0x20,
+ 0x01, 0x28, 0x04, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79,
+ 0x22, 0x9b, 0x01, 0x0a, 0x17, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65,
+ 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c,
+ 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01,
0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61,
- 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0a, 0x6e, 0x65, 0x77, 0x50, 0x72,
- 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x3a, 0x0a, 0x0d, 0x61, 0x76, 0x6f, 0x69, 0x64, 0x5f, 0x70,
- 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74,
- 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c,
- 0x69, 0x61, 0x73, 0x52, 0x0c, 0x61, 0x76, 0x6f, 0x69, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72,
- 0x79, 0x12, 0x44, 0x0a, 0x15, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63,
- 0x61, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x10, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x52, 0x13, 0x77, 0x61, 0x69, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73,
- 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x22, 0xba, 0x01, 0x0a, 0x1c, 0x50, 0x6c, 0x61, 0x6e,
- 0x6e, 0x65, 0x64, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73,
- 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73,
- 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x40, 0x0a, 0x10, 0x70, 0x72,
- 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e,
- 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0f, 0x70, 0x72, 0x6f,
- 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x26, 0x0a, 0x06,
- 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c,
- 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76,
- 0x65, 0x6e, 0x74, 0x73, 0x22, 0x74, 0x0a, 0x1b, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x4b,
- 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12,
- 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05,
- 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x70,
- 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x61, 0x6c,
- 0x6c, 0x6f, 0x77, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x22, 0x1e, 0x0a, 0x1c, 0x52, 0x65,
- 0x62, 0x75, 0x69, 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x47, 0x72, 0x61,
- 0x70, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x32, 0x0a, 0x1a, 0x52, 0x65,
- 0x62, 0x75, 0x69, 0x6c, 0x64, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x47, 0x72, 0x61, 0x70,
- 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c,
- 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x22, 0x1d,
- 0x0a, 0x1b, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61,
- 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x4f, 0x0a,
- 0x13, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61,
- 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70,
- 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61,
- 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x16,
- 0x0a, 0x14, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65,
- 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x64, 0x0a, 0x1a, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73,
- 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65,
- 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18,
- 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x22, 0x83, 0x01, 0x0a,
- 0x1b, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x79, 0x53,
- 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2c, 0x0a, 0x12,
- 0x69, 0x73, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x66, 0x72, 0x65,
- 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x69, 0x73, 0x50, 0x61, 0x72, 0x74,
- 0x69, 0x61, 0x6c, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x12, 0x36, 0x0a, 0x17, 0x70, 0x61,
- 0x72, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x5f, 0x64, 0x65,
- 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x15, 0x70, 0x61, 0x72,
- 0x74, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x44, 0x65, 0x74, 0x61, 0x69,
- 0x6c, 0x73, 0x22, 0x4f, 0x0a, 0x13, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65,
- 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62,
- 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65,
- 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c,
- 0x69, 0x61, 0x73, 0x22, 0x16, 0x0a, 0x14, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68,
- 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xa9, 0x01, 0x0a, 0x1b,
- 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73,
- 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b,
- 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b,
- 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x77, 0x61, 0x69, 0x74, 0x5f,
- 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c,
- 0x77, 0x61, 0x69, 0x74, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x27, 0x0a, 0x0f,
- 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18,
- 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x50, 0x72,
- 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72,
- 0x65, 0x6e, 0x63, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x63,
- 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x22, 0x46, 0x0a, 0x1c, 0x52, 0x65, 0x6c, 0x6f, 0x61,
- 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x26, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74,
- 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69,
- 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x22,
- 0xbc, 0x01, 0x0a, 0x18, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61,
- 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08,
- 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08,
- 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72,
- 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x23,
- 0x0a, 0x0d, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18,
- 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x77, 0x61, 0x69, 0x74, 0x50, 0x6f, 0x73, 0x69, 0x74,
- 0x69, 0x6f, 0x6e, 0x12, 0x27, 0x0a, 0x0f, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x70,
- 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x69, 0x6e,
- 0x63, 0x6c, 0x75, 0x64, 0x65, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x20, 0x0a, 0x0b,
- 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28,
- 0x0d, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x22, 0x43,
- 0x0a, 0x19, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, 0x68,
- 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x26, 0x0a, 0x06, 0x65,
- 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f,
- 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65,
- 0x6e, 0x74, 0x73, 0x22, 0x5b, 0x0a, 0x13, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x42, 0x61, 0x63,
- 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65,
+ 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65,
+ 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x2d, 0x0a, 0x07, 0x64, 0x62, 0x5f, 0x74, 0x79, 0x70,
+ 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61,
+ 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x06, 0x64,
+ 0x62, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x64, 0x72, 0x79, 0x5f, 0x72, 0x75, 0x6e,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x64, 0x72, 0x79, 0x52, 0x75, 0x6e, 0x22, 0xa6,
+ 0x01, 0x0a, 0x18, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54,
+ 0x79, 0x70, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x35, 0x0a, 0x0d, 0x62,
+ 0x65, 0x66, 0x6f, 0x72, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61,
+ 0x62, 0x6c, 0x65, 0x74, 0x52, 0x0c, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x54, 0x61, 0x62, 0x6c,
+ 0x65, 0x74, 0x12, 0x33, 0x0a, 0x0c, 0x61, 0x66, 0x74, 0x65, 0x72, 0x5f, 0x74, 0x61, 0x62, 0x6c,
+ 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64,
+ 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x0b, 0x61, 0x66, 0x74, 0x65,
+ 0x72, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x1e, 0x0a, 0x0b, 0x77, 0x61, 0x73, 0x5f, 0x64,
+ 0x72, 0x79, 0x5f, 0x72, 0x75, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x77, 0x61,
+ 0x73, 0x44, 0x72, 0x79, 0x52, 0x75, 0x6e, 0x22, 0xf1, 0x02, 0x0a, 0x15, 0x43, 0x72, 0x65, 0x61,
+ 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x12, 0x2f, 0x0a, 0x14, 0x61,
+ 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x5f, 0x76, 0x5f, 0x73, 0x63, 0x68,
+ 0x65, 0x6d, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x61, 0x6c, 0x6c, 0x6f, 0x77,
+ 0x45, 0x6d, 0x70, 0x74, 0x79, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x40, 0x0a, 0x0c,
+ 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x66, 0x72, 0x6f, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x03,
+ 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65,
+ 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x64, 0x46, 0x72, 0x6f,
+ 0x6d, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x73, 0x12, 0x2a,
+ 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x74,
+ 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65,
+ 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x62, 0x61,
+ 0x73, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x0c, 0x62, 0x61, 0x73, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12,
+ 0x31, 0x0a, 0x0d, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65,
+ 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e,
+ 0x54, 0x69, 0x6d, 0x65, 0x52, 0x0c, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x54, 0x69,
+ 0x6d, 0x65, 0x12, 0x2b, 0x0a, 0x11, 0x64, 0x75, 0x72, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79,
+ 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x64,
+ 0x75, 0x72, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x4a,
+ 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0x49, 0x0a, 0x16, 0x43,
+ 0x72, 0x65, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63,
+ 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64,
+ 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x08, 0x6b, 0x65,
+ 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x8c, 0x01, 0x0a, 0x12, 0x43, 0x72, 0x65, 0x61, 0x74,
+ 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a,
+ 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x68, 0x61,
+ 0x72, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73,
+ 0x68, 0x61, 0x72, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63,
+ 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x12, 0x25,
+ 0x0a, 0x0e, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74,
+ 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x50,
+ 0x61, 0x72, 0x65, 0x6e, 0x74, 0x22, 0xa0, 0x01, 0x0a, 0x13, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65,
+ 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a,
+ 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x13, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73,
+ 0x70, 0x61, 0x63, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x26,
+ 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e,
+ 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52,
+ 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x30, 0x0a, 0x14, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f,
+ 0x61, 0x6c, 0x72, 0x65, 0x61, 0x64, 0x79, 0x5f, 0x65, 0x78, 0x69, 0x73, 0x74, 0x73, 0x18, 0x03,
+ 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x73, 0x68, 0x61, 0x72, 0x64, 0x41, 0x6c, 0x72, 0x65, 0x61,
+ 0x64, 0x79, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x22, 0x41, 0x0a, 0x15, 0x44, 0x65, 0x6c, 0x65,
+ 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x22, 0x18, 0x0a, 0x16, 0x44,
+ 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2d, 0x0a, 0x17, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43,
+ 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04,
+ 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x1a, 0x0a, 0x18, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x65,
+ 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x22, 0x67, 0x0a, 0x15, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61,
+ 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79,
+ 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79,
+ 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69,
+ 0x76, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73,
+ 0x69, 0x76, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x22, 0x18, 0x0a, 0x16, 0x44, 0x65, 0x6c,
+ 0x65, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x22, 0x9b, 0x01, 0x0a, 0x13, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x68,
+ 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x28, 0x0a, 0x06, 0x73,
+ 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74,
+ 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x06, 0x73,
+ 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69,
+ 0x76, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73,
+ 0x69, 0x76, 0x65, 0x12, 0x26, 0x0a, 0x0f, 0x65, 0x76, 0x65, 0x6e, 0x5f, 0x69, 0x66, 0x5f, 0x73,
+ 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x65, 0x76,
+ 0x65, 0x6e, 0x49, 0x66, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x12, 0x14, 0x0a, 0x05, 0x66,
+ 0x6f, 0x72, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63,
+ 0x65, 0x22, 0x16, 0x0a, 0x14, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64,
+ 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2d, 0x0a, 0x17, 0x44, 0x65, 0x6c,
+ 0x65, 0x74, 0x65, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x22, 0x1a, 0x0a, 0x18, 0x44, 0x65, 0x6c, 0x65,
+ 0x74, 0x65, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x79, 0x0a, 0x14, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61,
+ 0x62, 0x6c, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3c, 0x0a, 0x0e,
+ 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x01,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e,
+ 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0d, 0x74, 0x61, 0x62,
+ 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x61, 0x6c,
+ 0x6c, 0x6f, 0x77, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x08, 0x52, 0x0c, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x22,
+ 0x17, 0x0a, 0x15, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xd0, 0x02, 0x0a, 0x1d, 0x45, 0x6d, 0x65,
+ 0x72, 0x67, 0x65, 0x6e, 0x63, 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x68,
+ 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65,
0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65,
0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x12, 0x0a, 0x04,
- 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65,
- 0x22, 0x16, 0x0a, 0x14, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x7f, 0x0a, 0x19, 0x52, 0x65, 0x6d, 0x6f,
- 0x76, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63,
- 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63,
- 0x65, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x03,
- 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x72,
- 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09,
- 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x22, 0x1c, 0x0a, 0x1a, 0x52, 0x65, 0x6d,
- 0x6f, 0x76, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x52,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x9b, 0x01, 0x0a, 0x16, 0x52, 0x65, 0x6d, 0x6f,
- 0x76, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1d,
- 0x0a, 0x0a, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x09, 0x73, 0x68, 0x61, 0x72, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a,
- 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x65, 0x6c,
- 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08,
- 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x63, 0x75, 0x72,
- 0x73, 0x69, 0x76, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x72, 0x65, 0x63, 0x75,
- 0x72, 0x73, 0x69, 0x76, 0x65, 0x22, 0x19, 0x0a, 0x17, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x53,
- 0x68, 0x61, 0x72, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
- 0x22, 0x46, 0x0a, 0x15, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x62, 0x6c,
- 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2d, 0x0a, 0x06, 0x74, 0x61, 0x62,
- 0x6c, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f,
+ 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x36, 0x0a, 0x0b,
+ 0x6e, 0x65, 0x77, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62,
+ 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0a, 0x6e, 0x65, 0x77, 0x50, 0x72, 0x69,
+ 0x6d, 0x61, 0x72, 0x79, 0x12, 0x3e, 0x0a, 0x0f, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x72,
+ 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e,
+ 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41,
+ 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0e, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x52, 0x65, 0x70, 0x6c,
+ 0x69, 0x63, 0x61, 0x73, 0x12, 0x44, 0x0a, 0x15, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x72, 0x65, 0x70,
+ 0x6c, 0x69, 0x63, 0x61, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x05, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x44, 0x75, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x77, 0x61, 0x69, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69,
+ 0x63, 0x61, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x3f, 0x0a, 0x1c, 0x70, 0x72,
+ 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x72, 0x6f, 0x73, 0x73, 0x5f, 0x63, 0x65, 0x6c, 0x6c,
+ 0x5f, 0x70, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08,
+ 0x52, 0x19, 0x70, 0x72, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x43, 0x65,
+ 0x6c, 0x6c, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xbc, 0x01, 0x0a, 0x1e,
+ 0x45, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x6e, 0x63, 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e,
+ 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a,
+ 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68,
+ 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64,
+ 0x12, 0x40, 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x72, 0x69,
+ 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70,
+ 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61,
+ 0x73, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61,
+ 0x72, 0x79, 0x12, 0x26, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x03,
+ 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65,
+ 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x22, 0xa0, 0x01, 0x0a, 0x18, 0x45,
+ 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x41, 0x70, 0x70,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65,
+ 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e,
+ 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41,
+ 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61,
+ 0x73, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x61, 0x78, 0x5f, 0x72,
+ 0x6f, 0x77, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x52, 0x6f,
+ 0x77, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x75, 0x73, 0x65, 0x5f, 0x70, 0x6f, 0x6f, 0x6c, 0x18, 0x04,
+ 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x75, 0x73, 0x65, 0x50, 0x6f, 0x6f, 0x6c, 0x22, 0x47, 0x0a,
+ 0x19, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x41,
+ 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65,
+ 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65,
+ 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06,
+ 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0xd3, 0x01, 0x0a, 0x18, 0x45, 0x78, 0x65, 0x63, 0x75,
+ 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x44, 0x42, 0x41, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c,
+ 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f,
0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73,
- 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x22, 0x7b, 0x0a, 0x16, 0x52, 0x65, 0x70, 0x61,
- 0x72, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01,
+ 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x14, 0x0a,
+ 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x71, 0x75,
+ 0x65, 0x72, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x6f, 0x77, 0x73, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x52, 0x6f, 0x77, 0x73, 0x12, 0x27,
+ 0x0a, 0x0f, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67,
+ 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65,
+ 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x6c, 0x6f, 0x61,
+ 0x64, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c,
+ 0x72, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x47, 0x0a, 0x19,
+ 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x44, 0x42,
+ 0x41, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73,
+ 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72,
+ 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72,
+ 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0xa5, 0x01, 0x0a, 0x12, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74,
+ 0x65, 0x48, 0x6f, 0x6f, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c,
+ 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61,
+ 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65,
+ 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x55, 0x0a, 0x13, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74,
+ 0x5f, 0x68, 0x6f, 0x6f, 0x6b, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61,
+ 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x48,
+ 0x6f, 0x6f, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x11, 0x74, 0x61, 0x62, 0x6c,
+ 0x65, 0x74, 0x48, 0x6f, 0x6f, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x5e, 0x0a,
+ 0x13, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x48, 0x6f, 0x6f, 0x6b, 0x52, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x47, 0x0a, 0x0b, 0x68, 0x6f, 0x6f, 0x6b, 0x5f, 0x72, 0x65, 0x73,
+ 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x74, 0x61, 0x62, 0x6c,
+ 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x45, 0x78,
+ 0x65, 0x63, 0x75, 0x74, 0x65, 0x48, 0x6f, 0x6f, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x52, 0x0a, 0x68, 0x6f, 0x6f, 0x6b, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0x3c, 0x0a,
+ 0x1e, 0x46, 0x69, 0x6e, 0x64, 0x41, 0x6c, 0x6c, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x49, 0x6e,
+ 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
+ 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0xbe, 0x01, 0x0a, 0x1f,
+ 0x46, 0x69, 0x6e, 0x64, 0x41, 0x6c, 0x6c, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x49, 0x6e, 0x4b,
+ 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
+ 0x4e, 0x0a, 0x06, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32,
+ 0x36, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, 0x69, 0x6e, 0x64,
+ 0x41, 0x6c, 0x6c, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x49, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70,
+ 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x68, 0x61, 0x72,
+ 0x64, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x1a,
+ 0x4b, 0x0a, 0x0b, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10,
+ 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79,
+ 0x12, 0x26, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x10, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72,
+ 0x64, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x9e, 0x01, 0x0a,
+ 0x11, 0x47, 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01,
0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14,
0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73,
- 0x68, 0x61, 0x72, 0x64, 0x12, 0x2f, 0x0a, 0x07, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18,
- 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61,
- 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x07, 0x70, 0x72,
- 0x69, 0x6d, 0x61, 0x72, 0x79, 0x22, 0x83, 0x01, 0x0a, 0x18, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72,
- 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x68, 0x61, 0x72, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x0d, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65,
+ 0x74, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x64, 0x65,
+ 0x74, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x12, 0x25, 0x0a, 0x0e, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c,
+ 0x65, 0x64, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0d,
+ 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x22, 0x44, 0x0a,
+ 0x12, 0x47, 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x12, 0x2e, 0x0a, 0x07, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x18, 0x01,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e,
+ 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x07, 0x62, 0x61, 0x63, 0x6b,
+ 0x75, 0x70, 0x73, 0x22, 0x28, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e,
+ 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, 0x6c,
+ 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x22, 0x46, 0x0a,
+ 0x13, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x09, 0x63, 0x65, 0x6c, 0x6c, 0x5f, 0x69, 0x6e, 0x66,
+ 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61,
+ 0x74, 0x61, 0x2e, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x63, 0x65, 0x6c,
+ 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x19, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c,
+ 0x49, 0x6e, 0x66, 0x6f, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x22, 0x30, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x4e,
+ 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05,
+ 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x6e, 0x61, 0x6d,
+ 0x65, 0x73, 0x22, 0x18, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c,
+ 0x69, 0x61, 0x73, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0xb6, 0x01, 0x0a,
+ 0x17, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x49, 0x0a, 0x07, 0x61, 0x6c, 0x69, 0x61,
+ 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x76, 0x74, 0x63, 0x74,
+ 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c,
+ 0x69, 0x61, 0x73, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x41, 0x6c,
+ 0x69, 0x61, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x61, 0x6c, 0x69, 0x61,
+ 0x73, 0x65, 0x73, 0x1a, 0x50, 0x0a, 0x0c, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x45, 0x6e,
+ 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e,
+ 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x50, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x46, 0x75, 0x6c, 0x6c,
+ 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a,
+ 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54,
+ 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c,
+ 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x4c, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x46, 0x75,
+ 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x12, 0x33, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x1b, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x64, 0x61,
+ 0x74, 0x61, 0x2e, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73,
+ 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x15, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73,
+ 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x49, 0x0a, 0x14,
+ 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x31, 0x0a, 0x09, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65,
+ 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64,
+ 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x09, 0x6b, 0x65,
+ 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x22, 0x30, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x4b, 0x65,
+ 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a,
+ 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x46, 0x0a, 0x13, 0x47, 0x65, 0x74,
+ 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x12, 0x2f, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b,
+ 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63,
+ 0x65, 0x22, 0x51, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69,
+ 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61,
+ 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c,
+ 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41,
+ 0x6c, 0x69, 0x61, 0x73, 0x22, 0x5a, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x50, 0x65, 0x72, 0x6d, 0x69,
+ 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x40,
+ 0x0a, 0x0b, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61,
+ 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69,
+ 0x6f, 0x6e, 0x73, 0x52, 0x0b, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73,
+ 0x22, 0x18, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75,
+ 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x55, 0x0a, 0x17, 0x47, 0x65,
+ 0x74, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a, 0x0a, 0x0d, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67,
+ 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x76,
+ 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75,
+ 0x6c, 0x65, 0x73, 0x52, 0x0c, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65,
+ 0x73, 0x22, 0xb0, 0x02, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74,
+ 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74,
+ 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c,
+ 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73,
+ 0x12, 0x16, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09,
+ 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x65, 0x78, 0x63, 0x6c,
+ 0x75, 0x64, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09,
+ 0x52, 0x0d, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12,
+ 0x23, 0x0a, 0x0d, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x76, 0x69, 0x65, 0x77, 0x73,
+ 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x56,
+ 0x69, 0x65, 0x77, 0x73, 0x12, 0x28, 0x0a, 0x10, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61,
+ 0x6d, 0x65, 0x73, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e,
+ 0x74, 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x28,
+ 0x0a, 0x10, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x73, 0x5f, 0x6f, 0x6e,
+ 0x6c, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53,
+ 0x69, 0x7a, 0x65, 0x73, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x2a, 0x0a, 0x11, 0x74, 0x61, 0x62, 0x6c,
+ 0x65, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x07, 0x20,
+ 0x01, 0x28, 0x08, 0x52, 0x0f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61,
+ 0x4f, 0x6e, 0x6c, 0x79, 0x22, 0x50, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d,
+ 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3b, 0x0a, 0x06, 0x73, 0x63, 0x68,
+ 0x65, 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x74, 0x61, 0x62, 0x6c,
+ 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x63,
+ 0x68, 0x65, 0x6d, 0x61, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06,
+ 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x4c, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61,
+ 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79,
+ 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79,
+ 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x6e,
+ 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x68, 0x61, 0x72, 0x64,
+ 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x3a, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x26, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72,
+ 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64,
+ 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64,
+ 0x22, 0x1d, 0x0a, 0x1b, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74,
+ 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22,
+ 0x6a, 0x0a, 0x1c, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69,
+ 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
+ 0x4a, 0x0a, 0x13, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67,
+ 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x76,
+ 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74,
+ 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x11, 0x73, 0x68, 0x61, 0x72, 0x64, 0x52,
+ 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x22, 0x32, 0x0a, 0x1a, 0x47,
+ 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x4e, 0x61, 0x6d,
+ 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c,
+ 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x22,
+ 0xf3, 0x01, 0x0a, 0x1b, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61,
+ 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
+ 0x47, 0x0a, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x31,
+ 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72,
+ 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72,
+ 0x79, 0x52, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x1a, 0x69, 0x0a, 0x0a, 0x4e, 0x61, 0x6d, 0x65,
+ 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x45, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64,
+ 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61,
+ 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e,
+ 0x4e, 0x61, 0x6d, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a,
+ 0x02, 0x38, 0x01, 0x1a, 0x20, 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12,
+ 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05,
+ 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x22, 0x4a, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b,
+ 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
+ 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63,
+ 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c,
+ 0x73, 0x22, 0xcc, 0x01, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73,
+ 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x59, 0x0a,
+ 0x0d, 0x73, 0x72, 0x76, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x18, 0x01,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61,
+ 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73,
+ 0x70, 0x61, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x73, 0x72, 0x76, 0x4b,
+ 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x1a, 0x56, 0x0a, 0x11, 0x53, 0x72, 0x76, 0x4b,
+ 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a,
+ 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12,
+ 0x2b, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15,
+ 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79,
+ 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01,
+ 0x22, 0xb7, 0x02, 0x0a, 0x1c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x68, 0x72, 0x6f, 0x74,
+ 0x74, 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x16, 0x0a,
+ 0x06, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x65,
+ 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x12,
+ 0x1c, 0x0a, 0x09, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x01, 0x52, 0x09, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x21, 0x0a,
+ 0x0c, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x05, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x51, 0x75, 0x65, 0x72, 0x79,
+ 0x12, 0x28, 0x0a, 0x10, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79,
+ 0x5f, 0x73, 0x65, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x63, 0x75, 0x73, 0x74,
+ 0x6f, 0x6d, 0x51, 0x75, 0x65, 0x72, 0x79, 0x53, 0x65, 0x74, 0x12, 0x2d, 0x0a, 0x13, 0x63, 0x68,
+ 0x65, 0x63, 0x6b, 0x5f, 0x61, 0x73, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x73, 0x65, 0x6c,
+ 0x66, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x41, 0x73,
+ 0x43, 0x68, 0x65, 0x63, 0x6b, 0x53, 0x65, 0x6c, 0x66, 0x12, 0x2f, 0x0a, 0x14, 0x63, 0x68, 0x65,
+ 0x63, 0x6b, 0x5f, 0x61, 0x73, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x73, 0x68, 0x61, 0x72,
+ 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x41, 0x73,
+ 0x43, 0x68, 0x65, 0x63, 0x6b, 0x53, 0x68, 0x61, 0x72, 0x64, 0x22, 0x1f, 0x0a, 0x1d, 0x55, 0x70,
+ 0x64, 0x61, 0x74, 0x65, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2a, 0x0a, 0x14, 0x47,
+ 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x22, 0x4e, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x53, 0x72,
+ 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x12, 0x35, 0x0a, 0x0c, 0x73, 0x72, 0x76, 0x5f, 0x76, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61,
+ 0x2e, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x0a, 0x73, 0x72, 0x76,
+ 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x2d, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x53, 0x72,
+ 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52,
+ 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x22, 0xc5, 0x01, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x53, 0x72,
+ 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x12, 0x56, 0x0a, 0x0d, 0x73, 0x72, 0x76, 0x5f, 0x76, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d,
+ 0x61, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c,
+ 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65,
+ 0x6d, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x72, 0x76, 0x56,
+ 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x73, 0x72,
+ 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x1a, 0x53, 0x0a, 0x10, 0x53, 0x72, 0x76,
+ 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a,
+ 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12,
+ 0x29, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13,
+ 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68,
+ 0x65, 0x6d, 0x61, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4c,
+ 0x0a, 0x10, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65,
0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69,
0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64,
0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52,
- 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x2d, 0x0a, 0x0b,
- 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52,
- 0x0a, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x54, 0x69, 0x6d, 0x65, 0x22, 0xad, 0x01, 0x0a, 0x19,
- 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x42, 0x61, 0x63, 0x6b, 0x75,
- 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62,
- 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65,
- 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c,
- 0x69, 0x61, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12,
- 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05,
- 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x24, 0x0a, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x04,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45,
- 0x76, 0x65, 0x6e, 0x74, 0x52, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x22, 0x51, 0x0a, 0x15, 0x52,
- 0x75, 0x6e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61,
- 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70,
- 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61,
- 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x18,
- 0x0a, 0x16, 0x52, 0x75, 0x6e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x6d, 0x0a, 0x22, 0x53, 0x65, 0x74, 0x4b,
- 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x44, 0x75, 0x72, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74,
- 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a,
- 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x2b, 0x0a, 0x11, 0x64, 0x75,
- 0x72, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x64, 0x75, 0x72, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74,
- 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x55, 0x0a, 0x23, 0x53, 0x65, 0x74, 0x4b, 0x65,
- 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x44, 0x75, 0x72, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79,
- 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2e,
- 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73,
- 0x70, 0x61, 0x63, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0xc8,
- 0x01, 0x0a, 0x1c, 0x53, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x53, 0x65,
- 0x72, 0x76, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
- 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x35, 0x0a, 0x0b, 0x74,
- 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e,
- 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c,
- 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79,
- 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28,
- 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x6d, 0x6f,
- 0x76, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65,
- 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70,
- 0x61, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63,
- 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x4f, 0x0a, 0x1d, 0x53, 0x65, 0x74,
- 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x64, 0x46, 0x72,
- 0x6f, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2e, 0x0a, 0x08, 0x6b, 0x65,
- 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74,
- 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65,
- 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x5e, 0x0a, 0x1e, 0x53, 0x65,
- 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x69, 0x6e,
- 0x67, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08,
- 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08,
- 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63,
- 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x4a, 0x04,
- 0x08, 0x02, 0x10, 0x03, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, 0x22, 0x51, 0x0a, 0x1f, 0x53, 0x65,
- 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x69, 0x6e,
- 0x67, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2e, 0x0a,
- 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70,
- 0x61, 0x63, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x72, 0x0a,
- 0x1f, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x73, 0x50, 0x72, 0x69, 0x6d, 0x61,
- 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x3d, 0x0a, 0x11,
+ 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x12, 0x28, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x10, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62,
+ 0x6c, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x22, 0xe8, 0x01, 0x0a, 0x11,
+ 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a,
+ 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68,
+ 0x61, 0x72, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03,
+ 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x72,
+ 0x69, 0x63, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x73, 0x74, 0x72, 0x69, 0x63,
+ 0x74, 0x12, 0x3c, 0x0a, 0x0e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61,
+ 0x73, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f,
+ 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73,
+ 0x52, 0x0d, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12,
+ 0x35, 0x0a, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06,
+ 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e,
+ 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x74, 0x61, 0x62, 0x6c,
+ 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x22, 0x40, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62,
+ 0x6c, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x07,
+ 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e,
+ 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52,
+ 0x07, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x22, 0x2c, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x54,
+ 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x22, 0x46, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x70,
+ 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x12, 0x2b, 0x0a, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x17, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x6f, 0x70, 0x6f,
+ 0x6c, 0x6f, 0x67, 0x79, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x22, 0x66,
+ 0x0a, 0x0c, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x43, 0x65, 0x6c, 0x6c, 0x12, 0x12,
+ 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61,
+ 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x68,
+ 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x63, 0x68,
+ 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x22, 0x2f, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x56, 0x53, 0x63,
+ 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b,
+ 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b,
+ 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x4d, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x56, 0x65,
+ 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c,
+ 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61,
+ 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65,
+ 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x2e, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x56, 0x65, 0x72,
+ 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07,
+ 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76,
+ 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x42, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x56, 0x53, 0x63,
+ 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2c, 0x0a, 0x08,
+ 0x76, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11,
+ 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63,
+ 0x65, 0x52, 0x07, 0x76, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x52, 0x0a, 0x13, 0x47, 0x65,
+ 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1f, 0x0a,
+ 0x0b, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x08, 0x52, 0x0a, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x4f, 0x6e, 0x6c, 0x79, 0x22, 0x49,
+ 0x0a, 0x14, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x31, 0x0a, 0x09, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c,
+ 0x6f, 0x77, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x76, 0x74, 0x63, 0x74,
+ 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x09,
+ 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x22, 0xfb, 0x01, 0x0a, 0x17, 0x49, 0x6e,
+ 0x69, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63,
+ 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63,
+ 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x52, 0x0a, 0x1a, 0x70, 0x72, 0x69, 0x6d, 0x61,
+ 0x72, 0x79, 0x5f, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f,
+ 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f,
+ 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69,
+ 0x61, 0x73, 0x52, 0x17, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x45, 0x6c, 0x65, 0x63, 0x74,
+ 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x66,
+ 0x6f, 0x72, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63,
+ 0x65, 0x12, 0x44, 0x0a, 0x15, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63,
+ 0x61, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x10, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x52, 0x13, 0x77, 0x61, 0x69, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73,
+ 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x22, 0x42, 0x0a, 0x18, 0x49, 0x6e, 0x69, 0x74, 0x53,
+ 0x68, 0x61, 0x72, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x12, 0x26, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20,
+ 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76,
+ 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x4d, 0x0a, 0x11, 0x50,
+ 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74,
+ 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74,
+ 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x14, 0x0a, 0x12, 0x50, 0x69,
+ 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x22, 0x89, 0x02, 0x0a, 0x1b, 0x50, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x52, 0x65, 0x70, 0x61,
+ 0x72, 0x65, 0x6e, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01,
0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05,
0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61,
- 0x72, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x73, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67,
- 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x69, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e,
- 0x67, 0x22, 0x49, 0x0a, 0x20, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x73, 0x50,
- 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e,
- 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, 0x8e, 0x02, 0x0a,
- 0x1c, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x43,
- 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a,
+ 0x72, 0x64, 0x12, 0x36, 0x0a, 0x0b, 0x6e, 0x65, 0x77, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72,
+ 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61,
+ 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0a,
+ 0x6e, 0x65, 0x77, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x3a, 0x0a, 0x0d, 0x61, 0x76,
+ 0x6f, 0x69, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62,
+ 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0c, 0x61, 0x76, 0x6f, 0x69, 0x64, 0x50,
+ 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x44, 0x0a, 0x15, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x72,
+ 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18,
+ 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x44,
+ 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x77, 0x61, 0x69, 0x74, 0x52, 0x65, 0x70,
+ 0x6c, 0x69, 0x63, 0x61, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x22, 0xba, 0x01, 0x0a,
+ 0x1c, 0x50, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74,
+ 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a,
0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61,
0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12,
- 0x35, 0x0a, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03,
- 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e,
- 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x74, 0x61, 0x62, 0x6c,
- 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18,
- 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x23, 0x0a, 0x0d,
- 0x64, 0x65, 0x6e, 0x69, 0x65, 0x64, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x05, 0x20,
- 0x03, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x6e, 0x69, 0x65, 0x64, 0x54, 0x61, 0x62, 0x6c, 0x65,
- 0x73, 0x12, 0x32, 0x0a, 0x15, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x71, 0x75, 0x65,
- 0x72, 0x79, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08,
- 0x52, 0x13, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x51, 0x75, 0x65, 0x72, 0x79, 0x53, 0x65,
- 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x18,
- 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x22, 0x46, 0x0a,
- 0x1d, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x43,
- 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25,
- 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e,
- 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x05,
- 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, 0x6a, 0x0a, 0x12, 0x53, 0x65, 0x74, 0x57, 0x72, 0x69, 0x74,
- 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74,
- 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62,
- 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74,
- 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c,
- 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x77, 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c,
- 0x65, 0x22, 0x15, 0x0a, 0x13, 0x53, 0x65, 0x74, 0x57, 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c, 0x65,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x88, 0x01, 0x0a, 0x1a, 0x53, 0x68, 0x61,
- 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x64, 0x64,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70,
- 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70,
- 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62,
- 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x40, 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x6d,
+ 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f,
+ 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73,
+ 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72,
+ 0x79, 0x12, 0x26, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28,
+ 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e,
+ 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x74, 0x0a, 0x1b, 0x52, 0x65, 0x62,
+ 0x75, 0x69, 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x47, 0x72, 0x61, 0x70,
+ 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73,
+ 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73,
+ 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x02, 0x20,
+ 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x61, 0x6c,
+ 0x6c, 0x6f, 0x77, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x08, 0x52, 0x0c, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x22,
+ 0x1e, 0x0a, 0x1c, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61,
+ 0x63, 0x65, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
+ 0x32, 0x0a, 0x1a, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d,
+ 0x61, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a,
+ 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65,
+ 0x6c, 0x6c, 0x73, 0x22, 0x1d, 0x0a, 0x1b, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x56, 0x53,
+ 0x63, 0x68, 0x65, 0x6d, 0x61, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x22, 0x4f, 0x0a, 0x13, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61,
+ 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62,
+ 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65,
0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c,
- 0x69, 0x61, 0x73, 0x22, 0x1d, 0x0a, 0x1b, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c,
- 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x64, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x22, 0x62, 0x0a, 0x1a, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69,
- 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x78, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x69, 0x61, 0x73, 0x22, 0x16, 0x0a, 0x14, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74,
+ 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x64, 0x0a, 0x1a, 0x52,
+ 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x79, 0x53, 0x68, 0x61,
+ 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79,
+ 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79,
+ 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x63,
+ 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c,
+ 0x73, 0x22, 0x83, 0x01, 0x0a, 0x1b, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61,
+ 0x74, 0x65, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x69, 0x73, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x5f,
+ 0x72, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x69,
+ 0x73, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x12,
+ 0x36, 0x0a, 0x17, 0x70, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x66, 0x72, 0x65,
+ 0x73, 0x68, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x15, 0x70, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68,
+ 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x22, 0x4f, 0x0a, 0x13, 0x52, 0x65, 0x6c, 0x6f, 0x61,
+ 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38,
+ 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e,
+ 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62,
+ 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x16, 0x0a, 0x14, 0x52, 0x65, 0x6c, 0x6f,
+ 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x22, 0xa9, 0x01, 0x0a, 0x1b, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d,
+ 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05,
- 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61,
- 0x72, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x22, 0x54, 0x0a, 0x1b, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52,
- 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x78, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x35, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e,
- 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x54, 0x0a, 0x20,
- 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x23, 0x0a, 0x0d,
+ 0x77, 0x61, 0x69, 0x74, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x0c, 0x77, 0x61, 0x69, 0x74, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f,
+ 0x6e, 0x12, 0x27, 0x0a, 0x0f, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x70, 0x72, 0x69,
+ 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x69, 0x6e, 0x63, 0x6c,
+ 0x75, 0x64, 0x65, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f,
+ 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52,
+ 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x22, 0x46, 0x0a, 0x1c,
+ 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73,
+ 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x26, 0x0a, 0x06,
+ 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c,
+ 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76,
+ 0x65, 0x6e, 0x74, 0x73, 0x22, 0xbc, 0x01, 0x0a, 0x18, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53,
+ 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a,
+ 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68,
+ 0x61, 0x72, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x70, 0x6f, 0x73, 0x69,
+ 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x77, 0x61, 0x69, 0x74,
+ 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x27, 0x0a, 0x0f, 0x69, 0x6e, 0x63, 0x6c,
+ 0x75, 0x64, 0x65, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28,
+ 0x08, 0x52, 0x0e, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72,
+ 0x79, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79,
+ 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65,
+ 0x6e, 0x63, 0x79, 0x22, 0x43, 0x0a, 0x19, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68,
+ 0x65, 0x6d, 0x61, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x12, 0x26, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b,
+ 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74,
+ 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x5b, 0x0a, 0x13, 0x52, 0x65, 0x6d, 0x6f,
+ 0x76, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
+ 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73,
+ 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72,
+ 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x16, 0x0a, 0x14, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x42,
+ 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x7f, 0x0a,
+ 0x19, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x43,
+ 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65,
+ 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65,
+ 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f,
+ 0x72, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65,
+ 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x18, 0x04, 0x20,
+ 0x01, 0x28, 0x08, 0x52, 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x22, 0x1c,
+ 0x0a, 0x1a, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65,
+ 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x9b, 0x01, 0x0a,
+ 0x16, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x43, 0x65, 0x6c, 0x6c,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70,
+ 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70,
+ 0x61, 0x63, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x6e, 0x61, 0x6d,
+ 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x68, 0x61, 0x72, 0x64, 0x4e, 0x61,
+ 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18,
+ 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x12, 0x1c, 0x0a, 0x09,
+ 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52,
+ 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x22, 0x19, 0x0a, 0x17, 0x52, 0x65,
+ 0x6d, 0x6f, 0x76, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x46, 0x0a, 0x15, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e,
+ 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2d,
+ 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15,
+ 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74,
+ 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x22, 0x7b, 0x0a,
+ 0x16, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70,
+ 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70,
+ 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x2f, 0x0a, 0x07, 0x70, 0x72, 0x69,
+ 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70,
+ 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61,
+ 0x73, 0x52, 0x07, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x22, 0xc2, 0x01, 0x0a, 0x18, 0x52,
+ 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65,
+ 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e,
+ 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41,
+ 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61,
+ 0x73, 0x12, 0x2d, 0x0a, 0x0b, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x74, 0x69, 0x6d, 0x65,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e,
+ 0x54, 0x69, 0x6d, 0x65, 0x52, 0x0a, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x54, 0x69, 0x6d, 0x65,
+ 0x12, 0x24, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x74, 0x6f, 0x5f, 0x70,
+ 0x6f, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72,
+ 0x65, 0x54, 0x6f, 0x50, 0x6f, 0x73, 0x12, 0x17, 0x0a, 0x07, 0x64, 0x72, 0x79, 0x5f, 0x72, 0x75,
+ 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x64, 0x72, 0x79, 0x52, 0x75, 0x6e, 0x22,
+ 0xad, 0x01, 0x0a, 0x19, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x42,
+ 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x38, 0x0a,
+ 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54,
+ 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c,
+ 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70,
+ 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70,
+ 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x24, 0x0a, 0x05, 0x65, 0x76, 0x65,
+ 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74,
+ 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x22,
+ 0x51, 0x0a, 0x15, 0x52, 0x75, 0x6e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63,
+ 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c,
+ 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15,
+ 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74,
+ 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69,
+ 0x61, 0x73, 0x22, 0x18, 0x0a, 0x16, 0x52, 0x75, 0x6e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43,
+ 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x6d, 0x0a, 0x22,
+ 0x53, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x44, 0x75, 0x72, 0x61, 0x62,
+ 0x69, 0x6c, 0x69, 0x74, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x2b,
+ 0x0a, 0x11, 0x64, 0x75, 0x72, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x70, 0x6f, 0x6c,
+ 0x69, 0x63, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x64, 0x75, 0x72, 0x61, 0x62,
+ 0x69, 0x6c, 0x69, 0x74, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x55, 0x0a, 0x23, 0x53,
+ 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x44, 0x75, 0x72, 0x61, 0x62, 0x69,
+ 0x6c, 0x69, 0x74, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x12, 0x2e, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e,
+ 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61,
+ 0x63, 0x65, 0x22, 0xc8, 0x01, 0x0a, 0x1c, 0x53, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61,
+ 0x63, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12,
+ 0x35, 0x0a, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e,
+ 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x74, 0x61, 0x62, 0x6c,
+ 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18,
+ 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x16, 0x0a, 0x06,
+ 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x72, 0x65,
+ 0x6d, 0x6f, 0x76, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6b,
+ 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73,
+ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x4f, 0x0a,
+ 0x1d, 0x53, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x53, 0x65, 0x72, 0x76,
+ 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2e,
+ 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73,
+ 0x70, 0x61, 0x63, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x5e,
+ 0x0a, 0x1e, 0x53, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x53, 0x68, 0x61,
+ 0x72, 0x64, 0x69, 0x6e, 0x67, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01,
0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05,
- 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61,
- 0x72, 0x64, 0x22, 0xaa, 0x03, 0x0a, 0x21, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c,
- 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x78, 0x0a, 0x14, 0x72, 0x65, 0x70, 0x6c,
- 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73,
- 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x45, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61,
- 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x13, 0x72,
- 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
- 0x65, 0x73, 0x12, 0x5a, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x6d, 0x61, 0x70,
- 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61,
- 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x45, 0x6e,
- 0x74, 0x72, 0x79, 0x52, 0x09, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x1a, 0x5f,
- 0x0a, 0x18, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61,
- 0x74, 0x75, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65,
- 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2d, 0x0a, 0x05,
- 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x72, 0x65,
- 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74,
- 0x61, 0x74, 0x75, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a,
- 0x4e, 0x0a, 0x0e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72,
- 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03,
- 0x6b, 0x65, 0x79, 0x12, 0x26, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61,
- 0x62, 0x6c, 0x65, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22,
- 0x8b, 0x01, 0x0a, 0x1d, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72,
+ 0x63, 0x65, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, 0x22, 0x51,
+ 0x0a, 0x1f, 0x53, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x53, 0x68, 0x61,
+ 0x72, 0x64, 0x69, 0x6e, 0x67, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x12, 0x2e, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b,
+ 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63,
+ 0x65, 0x22, 0x72, 0x0a, 0x1f, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x73, 0x50,
+ 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65,
+ 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x73, 0x5f, 0x73, 0x65, 0x72,
+ 0x76, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x69, 0x73, 0x53, 0x65,
+ 0x72, 0x76, 0x69, 0x6e, 0x67, 0x22, 0x49, 0x0a, 0x20, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72,
+ 0x64, 0x49, 0x73, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e,
+ 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x73, 0x68, 0x61,
+ 0x72, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64,
+ 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64,
+ 0x22, 0x8e, 0x02, 0x0a, 0x1c, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x54, 0x61, 0x62,
+ 0x6c, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20,
0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a,
0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68,
- 0x61, 0x72, 0x64, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c,
- 0x69, 0x61, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f,
- 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73,
- 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x20, 0x0a,
- 0x1e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
- 0x7c, 0x0a, 0x12, 0x53, 0x6c, 0x65, 0x65, 0x70, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f,
- 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f,
- 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69,
- 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12,
- 0x2c, 0x0a, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x15, 0x0a,
- 0x13, 0x53, 0x6c, 0x65, 0x65, 0x70, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xf0, 0x01, 0x0a, 0x15, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53,
- 0x68, 0x61, 0x72, 0x64, 0x41, 0x64, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a,
- 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68,
- 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64,
- 0x12, 0x10, 0x0a, 0x03, 0x75, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x75,
- 0x69, 0x64, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6b, 0x65, 0x79,
- 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x6f, 0x75,
- 0x72, 0x63, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73,
- 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x2f,
- 0x0a, 0x09, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79,
- 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12,
- 0x16, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52,
- 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x22, 0x3f, 0x0a, 0x16, 0x53, 0x6f, 0x75, 0x72, 0x63,
- 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x41, 0x64, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
- 0x65, 0x12, 0x25, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x0f, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72,
- 0x64, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, 0x5e, 0x0a, 0x18, 0x53, 0x6f, 0x75, 0x72,
- 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71,
+ 0x61, 0x72, 0x64, 0x12, 0x35, 0x0a, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79,
+ 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64,
+ 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a,
+ 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65,
+ 0x6c, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73,
+ 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x6e, 0x69, 0x65, 0x64, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65,
+ 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x6e, 0x69, 0x65, 0x64, 0x54,
+ 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x32, 0x0a, 0x15, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65,
+ 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x06,
+ 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x51, 0x75, 0x65,
+ 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x6d,
+ 0x6f, 0x76, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x72, 0x65, 0x6d, 0x6f, 0x76,
+ 0x65, 0x22, 0x46, 0x0a, 0x1d, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x54, 0x61, 0x62,
+ 0x6c, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61,
+ 0x72, 0x64, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, 0x6a, 0x0a, 0x12, 0x53, 0x65, 0x74,
+ 0x57, 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
+ 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61,
+ 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61,
+ 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x72, 0x69,
+ 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x77, 0x72, 0x69,
+ 0x74, 0x61, 0x62, 0x6c, 0x65, 0x22, 0x15, 0x0a, 0x13, 0x53, 0x65, 0x74, 0x57, 0x72, 0x69, 0x74,
+ 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x88, 0x01, 0x0a,
+ 0x1a, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x41, 0x64, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b,
+ 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b,
+ 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x38, 0x0a,
+ 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54,
+ 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c,
+ 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x1d, 0x0a, 0x1b, 0x53, 0x68, 0x61, 0x72, 0x64,
+ 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x64, 0x64, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x62, 0x0a, 0x1a, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52,
+ 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x78, 0x52, 0x65, 0x71,
0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65,
0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65,
0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x69, 0x64, 0x18, 0x03, 0x20,
- 0x01, 0x28, 0x0d, 0x52, 0x03, 0x75, 0x69, 0x64, 0x22, 0x42, 0x0a, 0x19, 0x53, 0x6f, 0x75, 0x72,
- 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e,
- 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, 0x53, 0x0a, 0x17,
- 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65,
- 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e,
- 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41,
- 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61,
- 0x73, 0x22, 0x1a, 0x0a, 0x18, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x52, 0x0a,
- 0x16, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65,
- 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e,
+ 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x03,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x22, 0x54, 0x0a, 0x1b, 0x53, 0x68,
+ 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69,
+ 0x78, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x35, 0x0a, 0x05, 0x65, 0x72, 0x72,
+ 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64,
+ 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72,
+ 0x22, 0x54, 0x0a, 0x20, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65,
+ 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, 0xaa, 0x03, 0x0a, 0x21, 0x53, 0x68, 0x61, 0x72, 0x64,
+ 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x78, 0x0a, 0x14,
+ 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74,
+ 0x75, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x45, 0x2e, 0x76, 0x74, 0x63,
+ 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c,
+ 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72,
+ 0x79, 0x52, 0x13, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74,
+ 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x12, 0x5a, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74,
+ 0x5f, 0x6d, 0x61, 0x70, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x76, 0x74, 0x63,
+ 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c,
+ 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x4d,
+ 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x4d,
+ 0x61, 0x70, 0x1a, 0x5f, 0x0a, 0x18, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10,
+ 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79,
+ 0x12, 0x2d, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x17, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x64, 0x61, 0x74,
+ 0x61, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a,
+ 0x02, 0x38, 0x01, 0x1a, 0x4e, 0x0a, 0x0e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x4d, 0x61, 0x70,
+ 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x26, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74,
+ 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a,
+ 0x02, 0x38, 0x01, 0x22, 0x8b, 0x01, 0x0a, 0x1d, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70,
+ 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63,
+ 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63,
+ 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65,
+ 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e,
0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41,
0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61,
- 0x73, 0x22, 0x19, 0x0a, 0x17, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x52, 0x0a, 0x21,
- 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79,
- 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x12, 0x2d, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62,
- 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74,
- 0x22, 0xc6, 0x01, 0x0a, 0x22, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, 0x72,
- 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x65, 0x64, 0x52,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70,
- 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70,
- 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x36, 0x0a, 0x0b, 0x6e, 0x65, 0x77,
- 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15,
- 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74,
- 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0a, 0x6e, 0x65, 0x77, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72,
- 0x79, 0x12, 0x36, 0x0a, 0x0b, 0x6f, 0x6c, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79,
- 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74,
- 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0a, 0x6f,
- 0x6c, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x22, 0x5c, 0x0a, 0x15, 0x55, 0x70, 0x64,
- 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x09, 0x63, 0x65, 0x6c, 0x6c, 0x5f, 0x69,
- 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f,
- 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x63,
- 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x5d, 0x0a, 0x16, 0x55, 0x70, 0x64, 0x61, 0x74,
- 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
- 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x09, 0x63, 0x65, 0x6c, 0x6c, 0x5f, 0x69, 0x6e,
- 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64,
- 0x61, 0x74, 0x61, 0x2e, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x63, 0x65,
- 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x64, 0x0a, 0x17, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65,
- 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x0b, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x5f, 0x61,
- 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70,
- 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73,
- 0x52, 0x0a, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x65, 0x0a, 0x18,
- 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x0b,
- 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x65, 0x6c,
- 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0a, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c,
- 0x69, 0x61, 0x73, 0x22, 0x34, 0x0a, 0x0f, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x69, 0x6e, 0x67, 0x5f, 0x74,
- 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x70, 0x69,
- 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x22, 0xfb, 0x01, 0x0a, 0x10, 0x56, 0x61,
- 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18,
- 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52,
- 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x62, 0x0a, 0x13, 0x72, 0x65, 0x73, 0x75,
- 0x6c, 0x74, 0x73, 0x5f, 0x62, 0x79, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18,
- 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74,
- 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x4b, 0x65, 0x79, 0x73,
- 0x70, 0x61, 0x63, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x11, 0x72, 0x65, 0x73, 0x75, 0x6c,
- 0x74, 0x73, 0x42, 0x79, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x1a, 0x69, 0x0a, 0x16,
- 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63,
- 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x39, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75,
- 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64,
- 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73,
- 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x05, 0x76, 0x61,
- 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x58, 0x0a, 0x17, 0x56, 0x61, 0x6c, 0x69, 0x64,
- 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x22, 0x20, 0x0a, 0x1e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x22, 0x7c, 0x0a, 0x12, 0x53, 0x6c, 0x65, 0x65, 0x70, 0x54, 0x61, 0x62, 0x6c,
+ 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62,
+ 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65,
+ 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c,
+ 0x69, 0x61, 0x73, 0x12, 0x2c, 0x0a, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x44,
+ 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x22, 0x15, 0x0a, 0x13, 0x53, 0x6c, 0x65, 0x65, 0x70, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xf0, 0x01, 0x0a, 0x15, 0x53, 0x6f, 0x75,
+ 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x41, 0x64, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65,
0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21,
- 0x0a, 0x0c, 0x70, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x18, 0x02,
- 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x70, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74,
- 0x73, 0x22, 0xfc, 0x01, 0x0a, 0x18, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65,
- 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18,
- 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52,
- 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x61, 0x0a, 0x10, 0x72, 0x65, 0x73, 0x75,
- 0x6c, 0x74, 0x73, 0x5f, 0x62, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x03,
- 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14,
+ 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73,
+ 0x68, 0x61, 0x72, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x0d, 0x52, 0x03, 0x75, 0x69, 0x64, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
+ 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12,
+ 0x21, 0x0a, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18,
+ 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61,
+ 0x72, 0x64, 0x12, 0x2f, 0x0a, 0x09, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18,
+ 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61,
+ 0x2e, 0x4b, 0x65, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x52, 0x61,
+ 0x6e, 0x67, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x07, 0x20,
+ 0x03, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x22, 0x3f, 0x0a, 0x16, 0x53,
+ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x41, 0x64, 0x64, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e,
+ 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, 0x5e, 0x0a, 0x18,
+ 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x44, 0x65, 0x6c, 0x65, 0x74,
+ 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73,
+ 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73,
+ 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x69,
+ 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x75, 0x69, 0x64, 0x22, 0x42, 0x0a, 0x19,
+ 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x44, 0x65, 0x6c, 0x65, 0x74,
+ 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x73, 0x68, 0x61,
+ 0x72, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64,
+ 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64,
+ 0x22, 0x53, 0x0a, 0x17, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74,
+ 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62,
+ 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74,
+ 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x1a, 0x0a, 0x18, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65,
+ 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x22, 0x52, 0x0a, 0x16, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74,
+ 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62,
+ 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74,
+ 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x19, 0x0a, 0x17, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70,
+ 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x22, 0x52, 0x0a, 0x21, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e,
+ 0x61, 0x6c, 0x6c, 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x65, 0x64, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2d, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61,
+ 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x06, 0x74, 0x61,
+ 0x62, 0x6c, 0x65, 0x74, 0x22, 0xc6, 0x01, 0x0a, 0x22, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x45,
+ 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e,
+ 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6b,
+ 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b,
+ 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x36, 0x0a,
+ 0x0b, 0x6e, 0x65, 0x77, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61,
+ 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0a, 0x6e, 0x65, 0x77, 0x50, 0x72,
+ 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x36, 0x0a, 0x0b, 0x6f, 0x6c, 0x64, 0x5f, 0x70, 0x72, 0x69,
+ 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70,
+ 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61,
+ 0x73, 0x52, 0x0a, 0x6f, 0x6c, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x22, 0x5c, 0x0a,
+ 0x15, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x09, 0x63, 0x65,
+ 0x6c, 0x6c, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e,
+ 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66,
+ 0x6f, 0x52, 0x08, 0x63, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x5d, 0x0a, 0x16, 0x55,
+ 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x09, 0x63, 0x65, 0x6c,
+ 0x6c, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74,
+ 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f,
+ 0x52, 0x08, 0x63, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x64, 0x0a, 0x17, 0x55, 0x70,
+ 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x0b, 0x63, 0x65, 0x6c,
+ 0x6c, 0x73, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14,
+ 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41,
+ 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0a, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73,
+ 0x22, 0x65, 0x0a, 0x18, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41,
+ 0x6c, 0x69, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04,
+ 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+ 0x12, 0x35, 0x0a, 0x0b, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61,
+ 0x2e, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0a, 0x63, 0x65, 0x6c,
+ 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x34, 0x0a, 0x0f, 0x56, 0x61, 0x6c, 0x69, 0x64,
+ 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x69,
+ 0x6e, 0x67, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08,
+ 0x52, 0x0b, 0x70, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x22, 0xfb, 0x01,
+ 0x0a, 0x10, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20,
+ 0x03, 0x28, 0x09, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x62, 0x0a, 0x13,
+ 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x5f, 0x62, 0x79, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70,
+ 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x76, 0x74, 0x63, 0x74,
+ 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79,
+ 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x11, 0x72,
+ 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65,
+ 0x1a, 0x69, 0x0a, 0x16, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x4b, 0x65, 0x79,
+ 0x73, 0x70, 0x61, 0x63, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65,
+ 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x39, 0x0a, 0x05,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x76, 0x74,
+ 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65,
+ 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x58, 0x0a, 0x17, 0x56,
0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42,
- 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x72, 0x65, 0x73,
- 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x1a, 0x63, 0x0a, 0x13, 0x52,
- 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74,
- 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x03, 0x6b, 0x65, 0x79, 0x12, 0x36, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e,
- 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01,
- 0x22, 0xd8, 0x01, 0x0a, 0x1d, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x63, 0x68,
- 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61,
+ 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61,
+ 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65,
+ 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x70, 0x69, 0x6e, 0x67, 0x54, 0x61,
+ 0x62, 0x6c, 0x65, 0x74, 0x73, 0x22, 0xfc, 0x01, 0x0a, 0x18, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61,
+ 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20,
+ 0x03, 0x28, 0x09, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x61, 0x0a, 0x10,
+ 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x5f, 0x62, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64,
+ 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61,
+ 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70,
+ 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x75,
+ 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52,
+ 0x0e, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x1a,
+ 0x63, 0x0a, 0x13, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72,
+ 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x36, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64,
+ 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72,
+ 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x3a, 0x02, 0x38, 0x01, 0x22, 0xd8, 0x01, 0x0a, 0x1d, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74,
+ 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61,
+ 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61,
+ 0x63, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x74, 0x61,
+ 0x62, 0x6c, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x65, 0x78, 0x63, 0x6c,
+ 0x75, 0x64, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6e, 0x63,
+ 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x76, 0x69, 0x65, 0x77, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08,
+ 0x52, 0x0c, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x56, 0x69, 0x65, 0x77, 0x73, 0x12, 0x26,
+ 0x0a, 0x0f, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x6e, 0x6f, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72,
+ 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x73, 0x6b, 0x69, 0x70, 0x4e, 0x6f, 0x50,
+ 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x27, 0x0a, 0x0f, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64,
+ 0x65, 0x5f, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52,
+ 0x0e, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x56, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22,
+ 0x88, 0x02, 0x0a, 0x1e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65,
+ 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20,
+ 0x03, 0x28, 0x09, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x67, 0x0a, 0x10,
+ 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x5f, 0x62, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64,
+ 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61,
+ 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d,
+ 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64,
+ 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79,
+ 0x53, 0x68, 0x61, 0x72, 0x64, 0x1a, 0x63, 0x0a, 0x13, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73,
+ 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03,
+ 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x36,
+ 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e,
+ 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61,
+ 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52,
+ 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x6b, 0x0a, 0x14, 0x56, 0x61,
+ 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65,
0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x25,
- 0x0a, 0x0e, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73,
- 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x54,
- 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65,
- 0x5f, 0x76, 0x69, 0x65, 0x77, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x69, 0x6e,
- 0x63, 0x6c, 0x75, 0x64, 0x65, 0x56, 0x69, 0x65, 0x77, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x73, 0x6b,
- 0x69, 0x70, 0x5f, 0x6e, 0x6f, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20,
- 0x01, 0x28, 0x08, 0x52, 0x0d, 0x73, 0x6b, 0x69, 0x70, 0x4e, 0x6f, 0x50, 0x72, 0x69, 0x6d, 0x61,
- 0x72, 0x79, 0x12, 0x27, 0x0a, 0x0f, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x76, 0x73,
- 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x69, 0x6e, 0x63,
- 0x6c, 0x75, 0x64, 0x65, 0x56, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x88, 0x02, 0x0a, 0x1e,
- 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65,
- 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18,
- 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52,
- 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x67, 0x0a, 0x10, 0x72, 0x65, 0x73, 0x75,
- 0x6c, 0x74, 0x73, 0x5f, 0x62, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x03,
- 0x28, 0x0b, 0x32, 0x3d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56,
- 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79,
- 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65,
- 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72,
- 0x79, 0x52, 0x0e, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72,
- 0x64, 0x1a, 0x63, 0x0a, 0x13, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68,
- 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x36, 0x0a, 0x05, 0x76, 0x61,
- 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74,
- 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x68,
- 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c,
- 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x6b, 0x0a, 0x14, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61,
- 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a,
- 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68,
- 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64,
- 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73,
- 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x70, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c,
- 0x65, 0x74, 0x73, 0x22, 0x31, 0x0a, 0x15, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53,
- 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14,
+ 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73,
+ 0x68, 0x61, 0x72, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x61, 0x62,
+ 0x6c, 0x65, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x70, 0x69, 0x6e, 0x67,
+ 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x22, 0x31, 0x0a, 0x15, 0x56, 0x61, 0x6c, 0x69, 0x64,
+ 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28,
+ 0x09, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22, 0x3c, 0x0a, 0x1e, 0x56, 0x61,
+ 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79,
+ 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08,
+ 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08,
+ 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x8a, 0x02, 0x0a, 0x1f, 0x56, 0x61, 0x6c,
+ 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x73,
+ 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07,
0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x72,
- 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22, 0x3c, 0x0a, 0x1e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61,
- 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63,
- 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73,
- 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73,
- 0x70, 0x61, 0x63, 0x65, 0x22, 0x8a, 0x02, 0x0a, 0x1f, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74,
- 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75,
- 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c,
- 0x74, 0x73, 0x12, 0x68, 0x0a, 0x10, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x5f, 0x62, 0x79,
- 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x76,
- 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74,
- 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73,
- 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x72, 0x65,
- 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x1a, 0x63, 0x0a, 0x13,
+ 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x68, 0x0a, 0x10, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74,
+ 0x73, 0x5f, 0x62, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b,
+ 0x32, 0x3e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c,
+ 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x73,
+ 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73,
+ 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79,
+ 0x52, 0x0e, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64,
+ 0x1a, 0x63, 0x0a, 0x13, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61,
+ 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x36, 0x0a, 0x05, 0x76, 0x61, 0x6c,
+ 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c,
+ 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61,
+ 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4f, 0x0a, 0x1b, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74,
+ 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65,
+ 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, 0x38, 0x0a, 0x1c, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61,
+ 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74,
+ 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73,
+ 0x22, 0x98, 0x01, 0x0a, 0x16, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x53, 0x63,
+ 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b,
+ 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b,
+ 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x68, 0x61, 0x72, 0x64,
+ 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12,
+ 0x25, 0x0a, 0x0e, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65,
+ 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65,
+ 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64,
+ 0x65, 0x5f, 0x76, 0x69, 0x65, 0x77, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x69,
+ 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x56, 0x69, 0x65, 0x77, 0x73, 0x22, 0xfa, 0x01, 0x0a, 0x17,
+ 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c,
+ 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74,
+ 0x73, 0x12, 0x60, 0x0a, 0x10, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x5f, 0x62, 0x79, 0x5f,
+ 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x76, 0x74,
+ 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65,
+ 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e,
0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e,
- 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x36, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61,
- 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65,
- 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38,
- 0x01, 0x22, 0x98, 0x01, 0x0a, 0x16, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x53,
- 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08,
- 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08,
- 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x68, 0x61, 0x72,
- 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73,
- 0x12, 0x25, 0x0a, 0x0e, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c,
- 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64,
- 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6e, 0x63, 0x6c, 0x75,
- 0x64, 0x65, 0x5f, 0x76, 0x69, 0x65, 0x77, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c,
- 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x56, 0x69, 0x65, 0x77, 0x73, 0x22, 0xfa, 0x01, 0x0a,
- 0x17, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75,
- 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c,
- 0x74, 0x73, 0x12, 0x60, 0x0a, 0x10, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x5f, 0x62, 0x79,
- 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x76,
- 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74,
- 0x65, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
- 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45,
- 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53,
- 0x68, 0x61, 0x72, 0x64, 0x1a, 0x63, 0x0a, 0x13, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42,
- 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b,
- 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x36, 0x0a,
- 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x76,
- 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74,
- 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x05,
- 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x2a, 0x4a, 0x0a, 0x15, 0x4d, 0x61, 0x74,
- 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x74, 0x65,
- 0x6e, 0x74, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x10, 0x00, 0x12, 0x0e,
- 0x0a, 0x0a, 0x4d, 0x4f, 0x56, 0x45, 0x54, 0x41, 0x42, 0x4c, 0x45, 0x53, 0x10, 0x01, 0x12, 0x15,
- 0x0a, 0x11, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x49, 0x4e,
- 0x44, 0x45, 0x58, 0x10, 0x02, 0x42, 0x28, 0x5a, 0x26, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e,
- 0x69, 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, 0x2f,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x62,
- 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+ 0x74, 0x72, 0x79, 0x52, 0x0e, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68,
+ 0x61, 0x72, 0x64, 0x1a, 0x63, 0x0a, 0x13, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79,
+ 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65,
+ 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x36, 0x0a, 0x05,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x76, 0x74,
+ 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65,
+ 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x05, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x2a, 0x4a, 0x0a, 0x15, 0x4d, 0x61, 0x74, 0x65,
+ 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x74, 0x65, 0x6e,
+ 0x74, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x10, 0x00, 0x12, 0x0e, 0x0a,
+ 0x0a, 0x4d, 0x4f, 0x56, 0x45, 0x54, 0x41, 0x42, 0x4c, 0x45, 0x53, 0x10, 0x01, 0x12, 0x15, 0x0a,
+ 0x11, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x49, 0x4e, 0x44,
+ 0x45, 0x58, 0x10, 0x02, 0x42, 0x28, 0x5a, 0x26, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x69,
+ 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, 0x2f, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x62, 0x06,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
@@ -11563,7 +12105,7 @@ func file_vtctldata_proto_rawDescGZIP() []byte {
}
var file_vtctldata_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
-var file_vtctldata_proto_msgTypes = make([]protoimpl.MessageInfo, 191)
+var file_vtctldata_proto_msgTypes = make([]protoimpl.MessageInfo, 198)
var file_vtctldata_proto_goTypes = []interface{}{
(MaterializationIntent)(0), // 0: vtctldata.MaterializationIntent
(*ExecuteVtctlCommandRequest)(nil), // 1: vtctldata.ExecuteVtctlCommandRequest
@@ -11644,300 +12186,308 @@ var file_vtctldata_proto_goTypes = []interface{}{
(*GetSrvKeyspaceNamesResponse)(nil), // 76: vtctldata.GetSrvKeyspaceNamesResponse
(*GetSrvKeyspacesRequest)(nil), // 77: vtctldata.GetSrvKeyspacesRequest
(*GetSrvKeyspacesResponse)(nil), // 78: vtctldata.GetSrvKeyspacesResponse
- (*GetSrvVSchemaRequest)(nil), // 79: vtctldata.GetSrvVSchemaRequest
- (*GetSrvVSchemaResponse)(nil), // 80: vtctldata.GetSrvVSchemaResponse
- (*GetSrvVSchemasRequest)(nil), // 81: vtctldata.GetSrvVSchemasRequest
- (*GetSrvVSchemasResponse)(nil), // 82: vtctldata.GetSrvVSchemasResponse
- (*GetTabletRequest)(nil), // 83: vtctldata.GetTabletRequest
- (*GetTabletResponse)(nil), // 84: vtctldata.GetTabletResponse
- (*GetTabletsRequest)(nil), // 85: vtctldata.GetTabletsRequest
- (*GetTabletsResponse)(nil), // 86: vtctldata.GetTabletsResponse
- (*GetVSchemaRequest)(nil), // 87: vtctldata.GetVSchemaRequest
- (*GetVersionRequest)(nil), // 88: vtctldata.GetVersionRequest
- (*GetVersionResponse)(nil), // 89: vtctldata.GetVersionResponse
- (*GetVSchemaResponse)(nil), // 90: vtctldata.GetVSchemaResponse
- (*GetWorkflowsRequest)(nil), // 91: vtctldata.GetWorkflowsRequest
- (*GetWorkflowsResponse)(nil), // 92: vtctldata.GetWorkflowsResponse
- (*InitShardPrimaryRequest)(nil), // 93: vtctldata.InitShardPrimaryRequest
- (*InitShardPrimaryResponse)(nil), // 94: vtctldata.InitShardPrimaryResponse
- (*PingTabletRequest)(nil), // 95: vtctldata.PingTabletRequest
- (*PingTabletResponse)(nil), // 96: vtctldata.PingTabletResponse
- (*PlannedReparentShardRequest)(nil), // 97: vtctldata.PlannedReparentShardRequest
- (*PlannedReparentShardResponse)(nil), // 98: vtctldata.PlannedReparentShardResponse
- (*RebuildKeyspaceGraphRequest)(nil), // 99: vtctldata.RebuildKeyspaceGraphRequest
- (*RebuildKeyspaceGraphResponse)(nil), // 100: vtctldata.RebuildKeyspaceGraphResponse
- (*RebuildVSchemaGraphRequest)(nil), // 101: vtctldata.RebuildVSchemaGraphRequest
- (*RebuildVSchemaGraphResponse)(nil), // 102: vtctldata.RebuildVSchemaGraphResponse
- (*RefreshStateRequest)(nil), // 103: vtctldata.RefreshStateRequest
- (*RefreshStateResponse)(nil), // 104: vtctldata.RefreshStateResponse
- (*RefreshStateByShardRequest)(nil), // 105: vtctldata.RefreshStateByShardRequest
- (*RefreshStateByShardResponse)(nil), // 106: vtctldata.RefreshStateByShardResponse
- (*ReloadSchemaRequest)(nil), // 107: vtctldata.ReloadSchemaRequest
- (*ReloadSchemaResponse)(nil), // 108: vtctldata.ReloadSchemaResponse
- (*ReloadSchemaKeyspaceRequest)(nil), // 109: vtctldata.ReloadSchemaKeyspaceRequest
- (*ReloadSchemaKeyspaceResponse)(nil), // 110: vtctldata.ReloadSchemaKeyspaceResponse
- (*ReloadSchemaShardRequest)(nil), // 111: vtctldata.ReloadSchemaShardRequest
- (*ReloadSchemaShardResponse)(nil), // 112: vtctldata.ReloadSchemaShardResponse
- (*RemoveBackupRequest)(nil), // 113: vtctldata.RemoveBackupRequest
- (*RemoveBackupResponse)(nil), // 114: vtctldata.RemoveBackupResponse
- (*RemoveKeyspaceCellRequest)(nil), // 115: vtctldata.RemoveKeyspaceCellRequest
- (*RemoveKeyspaceCellResponse)(nil), // 116: vtctldata.RemoveKeyspaceCellResponse
- (*RemoveShardCellRequest)(nil), // 117: vtctldata.RemoveShardCellRequest
- (*RemoveShardCellResponse)(nil), // 118: vtctldata.RemoveShardCellResponse
- (*ReparentTabletRequest)(nil), // 119: vtctldata.ReparentTabletRequest
- (*ReparentTabletResponse)(nil), // 120: vtctldata.ReparentTabletResponse
- (*RestoreFromBackupRequest)(nil), // 121: vtctldata.RestoreFromBackupRequest
- (*RestoreFromBackupResponse)(nil), // 122: vtctldata.RestoreFromBackupResponse
- (*RunHealthCheckRequest)(nil), // 123: vtctldata.RunHealthCheckRequest
- (*RunHealthCheckResponse)(nil), // 124: vtctldata.RunHealthCheckResponse
- (*SetKeyspaceDurabilityPolicyRequest)(nil), // 125: vtctldata.SetKeyspaceDurabilityPolicyRequest
- (*SetKeyspaceDurabilityPolicyResponse)(nil), // 126: vtctldata.SetKeyspaceDurabilityPolicyResponse
- (*SetKeyspaceServedFromRequest)(nil), // 127: vtctldata.SetKeyspaceServedFromRequest
- (*SetKeyspaceServedFromResponse)(nil), // 128: vtctldata.SetKeyspaceServedFromResponse
- (*SetKeyspaceShardingInfoRequest)(nil), // 129: vtctldata.SetKeyspaceShardingInfoRequest
- (*SetKeyspaceShardingInfoResponse)(nil), // 130: vtctldata.SetKeyspaceShardingInfoResponse
- (*SetShardIsPrimaryServingRequest)(nil), // 131: vtctldata.SetShardIsPrimaryServingRequest
- (*SetShardIsPrimaryServingResponse)(nil), // 132: vtctldata.SetShardIsPrimaryServingResponse
- (*SetShardTabletControlRequest)(nil), // 133: vtctldata.SetShardTabletControlRequest
- (*SetShardTabletControlResponse)(nil), // 134: vtctldata.SetShardTabletControlResponse
- (*SetWritableRequest)(nil), // 135: vtctldata.SetWritableRequest
- (*SetWritableResponse)(nil), // 136: vtctldata.SetWritableResponse
- (*ShardReplicationAddRequest)(nil), // 137: vtctldata.ShardReplicationAddRequest
- (*ShardReplicationAddResponse)(nil), // 138: vtctldata.ShardReplicationAddResponse
- (*ShardReplicationFixRequest)(nil), // 139: vtctldata.ShardReplicationFixRequest
- (*ShardReplicationFixResponse)(nil), // 140: vtctldata.ShardReplicationFixResponse
- (*ShardReplicationPositionsRequest)(nil), // 141: vtctldata.ShardReplicationPositionsRequest
- (*ShardReplicationPositionsResponse)(nil), // 142: vtctldata.ShardReplicationPositionsResponse
- (*ShardReplicationRemoveRequest)(nil), // 143: vtctldata.ShardReplicationRemoveRequest
- (*ShardReplicationRemoveResponse)(nil), // 144: vtctldata.ShardReplicationRemoveResponse
- (*SleepTabletRequest)(nil), // 145: vtctldata.SleepTabletRequest
- (*SleepTabletResponse)(nil), // 146: vtctldata.SleepTabletResponse
- (*SourceShardAddRequest)(nil), // 147: vtctldata.SourceShardAddRequest
- (*SourceShardAddResponse)(nil), // 148: vtctldata.SourceShardAddResponse
- (*SourceShardDeleteRequest)(nil), // 149: vtctldata.SourceShardDeleteRequest
- (*SourceShardDeleteResponse)(nil), // 150: vtctldata.SourceShardDeleteResponse
- (*StartReplicationRequest)(nil), // 151: vtctldata.StartReplicationRequest
- (*StartReplicationResponse)(nil), // 152: vtctldata.StartReplicationResponse
- (*StopReplicationRequest)(nil), // 153: vtctldata.StopReplicationRequest
- (*StopReplicationResponse)(nil), // 154: vtctldata.StopReplicationResponse
- (*TabletExternallyReparentedRequest)(nil), // 155: vtctldata.TabletExternallyReparentedRequest
- (*TabletExternallyReparentedResponse)(nil), // 156: vtctldata.TabletExternallyReparentedResponse
- (*UpdateCellInfoRequest)(nil), // 157: vtctldata.UpdateCellInfoRequest
- (*UpdateCellInfoResponse)(nil), // 158: vtctldata.UpdateCellInfoResponse
- (*UpdateCellsAliasRequest)(nil), // 159: vtctldata.UpdateCellsAliasRequest
- (*UpdateCellsAliasResponse)(nil), // 160: vtctldata.UpdateCellsAliasResponse
- (*ValidateRequest)(nil), // 161: vtctldata.ValidateRequest
- (*ValidateResponse)(nil), // 162: vtctldata.ValidateResponse
- (*ValidateKeyspaceRequest)(nil), // 163: vtctldata.ValidateKeyspaceRequest
- (*ValidateKeyspaceResponse)(nil), // 164: vtctldata.ValidateKeyspaceResponse
- (*ValidateSchemaKeyspaceRequest)(nil), // 165: vtctldata.ValidateSchemaKeyspaceRequest
- (*ValidateSchemaKeyspaceResponse)(nil), // 166: vtctldata.ValidateSchemaKeyspaceResponse
- (*ValidateShardRequest)(nil), // 167: vtctldata.ValidateShardRequest
- (*ValidateShardResponse)(nil), // 168: vtctldata.ValidateShardResponse
- (*ValidateVersionKeyspaceRequest)(nil), // 169: vtctldata.ValidateVersionKeyspaceRequest
- (*ValidateVersionKeyspaceResponse)(nil), // 170: vtctldata.ValidateVersionKeyspaceResponse
- (*ValidateVSchemaRequest)(nil), // 171: vtctldata.ValidateVSchemaRequest
- (*ValidateVSchemaResponse)(nil), // 172: vtctldata.ValidateVSchemaResponse
- nil, // 173: vtctldata.Workflow.ShardStreamsEntry
- (*Workflow_ReplicationLocation)(nil), // 174: vtctldata.Workflow.ReplicationLocation
- (*Workflow_ShardStream)(nil), // 175: vtctldata.Workflow.ShardStream
- (*Workflow_Stream)(nil), // 176: vtctldata.Workflow.Stream
- (*Workflow_Stream_CopyState)(nil), // 177: vtctldata.Workflow.Stream.CopyState
- (*Workflow_Stream_Log)(nil), // 178: vtctldata.Workflow.Stream.Log
- nil, // 179: vtctldata.FindAllShardsInKeyspaceResponse.ShardsEntry
- nil, // 180: vtctldata.GetCellsAliasesResponse.AliasesEntry
- nil, // 181: vtctldata.GetSrvKeyspaceNamesResponse.NamesEntry
- (*GetSrvKeyspaceNamesResponse_NameList)(nil), // 182: vtctldata.GetSrvKeyspaceNamesResponse.NameList
- nil, // 183: vtctldata.GetSrvKeyspacesResponse.SrvKeyspacesEntry
- nil, // 184: vtctldata.GetSrvVSchemasResponse.SrvVSchemasEntry
- nil, // 185: vtctldata.ShardReplicationPositionsResponse.ReplicationStatusesEntry
- nil, // 186: vtctldata.ShardReplicationPositionsResponse.TabletMapEntry
- nil, // 187: vtctldata.ValidateResponse.ResultsByKeyspaceEntry
- nil, // 188: vtctldata.ValidateKeyspaceResponse.ResultsByShardEntry
- nil, // 189: vtctldata.ValidateSchemaKeyspaceResponse.ResultsByShardEntry
- nil, // 190: vtctldata.ValidateVersionKeyspaceResponse.ResultsByShardEntry
- nil, // 191: vtctldata.ValidateVSchemaResponse.ResultsByShardEntry
- (*logutil.Event)(nil), // 192: logutil.Event
- (*topodata.Keyspace)(nil), // 193: topodata.Keyspace
- (*topodata.Shard)(nil), // 194: topodata.Shard
- (*topodata.CellInfo)(nil), // 195: topodata.CellInfo
- (*vschema.RoutingRules)(nil), // 196: vschema.RoutingRules
- (*vschema.ShardRoutingRules)(nil), // 197: vschema.ShardRoutingRules
- (*vttime.Duration)(nil), // 198: vttime.Duration
- (*vtrpc.CallerID)(nil), // 199: vtrpc.CallerID
- (*vschema.Keyspace)(nil), // 200: vschema.Keyspace
- (*topodata.TabletAlias)(nil), // 201: topodata.TabletAlias
- (topodata.TabletType)(0), // 202: topodata.TabletType
- (*topodata.Tablet)(nil), // 203: topodata.Tablet
- (*topodata.Keyspace_ServedFrom)(nil), // 204: topodata.Keyspace.ServedFrom
- (topodata.KeyspaceType)(0), // 205: topodata.KeyspaceType
- (*vttime.Time)(nil), // 206: vttime.Time
- (*query.QueryResult)(nil), // 207: query.QueryResult
- (*tabletmanagerdata.ExecuteHookRequest)(nil), // 208: tabletmanagerdata.ExecuteHookRequest
- (*tabletmanagerdata.ExecuteHookResponse)(nil), // 209: tabletmanagerdata.ExecuteHookResponse
- (*mysqlctl.BackupInfo)(nil), // 210: mysqlctl.BackupInfo
- (*replicationdata.FullStatus)(nil), // 211: replicationdata.FullStatus
- (*tabletmanagerdata.Permissions)(nil), // 212: tabletmanagerdata.Permissions
- (*tabletmanagerdata.SchemaDefinition)(nil), // 213: tabletmanagerdata.SchemaDefinition
- (*vschema.SrvVSchema)(nil), // 214: vschema.SrvVSchema
- (*topodata.ShardReplicationError)(nil), // 215: topodata.ShardReplicationError
- (*topodata.KeyRange)(nil), // 216: topodata.KeyRange
- (*topodata.CellsAlias)(nil), // 217: topodata.CellsAlias
- (*topodata.Shard_TabletControl)(nil), // 218: topodata.Shard.TabletControl
- (*binlogdata.BinlogSource)(nil), // 219: binlogdata.BinlogSource
- (*topodata.SrvKeyspace)(nil), // 220: topodata.SrvKeyspace
- (*replicationdata.Status)(nil), // 221: replicationdata.Status
+ (*UpdateThrottlerConfigRequest)(nil), // 79: vtctldata.UpdateThrottlerConfigRequest
+ (*UpdateThrottlerConfigResponse)(nil), // 80: vtctldata.UpdateThrottlerConfigResponse
+ (*GetSrvVSchemaRequest)(nil), // 81: vtctldata.GetSrvVSchemaRequest
+ (*GetSrvVSchemaResponse)(nil), // 82: vtctldata.GetSrvVSchemaResponse
+ (*GetSrvVSchemasRequest)(nil), // 83: vtctldata.GetSrvVSchemasRequest
+ (*GetSrvVSchemasResponse)(nil), // 84: vtctldata.GetSrvVSchemasResponse
+ (*GetTabletRequest)(nil), // 85: vtctldata.GetTabletRequest
+ (*GetTabletResponse)(nil), // 86: vtctldata.GetTabletResponse
+ (*GetTabletsRequest)(nil), // 87: vtctldata.GetTabletsRequest
+ (*GetTabletsResponse)(nil), // 88: vtctldata.GetTabletsResponse
+ (*GetTopologyPathRequest)(nil), // 89: vtctldata.GetTopologyPathRequest
+ (*GetTopologyPathResponse)(nil), // 90: vtctldata.GetTopologyPathResponse
+ (*TopologyCell)(nil), // 91: vtctldata.TopologyCell
+ (*GetVSchemaRequest)(nil), // 92: vtctldata.GetVSchemaRequest
+ (*GetVersionRequest)(nil), // 93: vtctldata.GetVersionRequest
+ (*GetVersionResponse)(nil), // 94: vtctldata.GetVersionResponse
+ (*GetVSchemaResponse)(nil), // 95: vtctldata.GetVSchemaResponse
+ (*GetWorkflowsRequest)(nil), // 96: vtctldata.GetWorkflowsRequest
+ (*GetWorkflowsResponse)(nil), // 97: vtctldata.GetWorkflowsResponse
+ (*InitShardPrimaryRequest)(nil), // 98: vtctldata.InitShardPrimaryRequest
+ (*InitShardPrimaryResponse)(nil), // 99: vtctldata.InitShardPrimaryResponse
+ (*PingTabletRequest)(nil), // 100: vtctldata.PingTabletRequest
+ (*PingTabletResponse)(nil), // 101: vtctldata.PingTabletResponse
+ (*PlannedReparentShardRequest)(nil), // 102: vtctldata.PlannedReparentShardRequest
+ (*PlannedReparentShardResponse)(nil), // 103: vtctldata.PlannedReparentShardResponse
+ (*RebuildKeyspaceGraphRequest)(nil), // 104: vtctldata.RebuildKeyspaceGraphRequest
+ (*RebuildKeyspaceGraphResponse)(nil), // 105: vtctldata.RebuildKeyspaceGraphResponse
+ (*RebuildVSchemaGraphRequest)(nil), // 106: vtctldata.RebuildVSchemaGraphRequest
+ (*RebuildVSchemaGraphResponse)(nil), // 107: vtctldata.RebuildVSchemaGraphResponse
+ (*RefreshStateRequest)(nil), // 108: vtctldata.RefreshStateRequest
+ (*RefreshStateResponse)(nil), // 109: vtctldata.RefreshStateResponse
+ (*RefreshStateByShardRequest)(nil), // 110: vtctldata.RefreshStateByShardRequest
+ (*RefreshStateByShardResponse)(nil), // 111: vtctldata.RefreshStateByShardResponse
+ (*ReloadSchemaRequest)(nil), // 112: vtctldata.ReloadSchemaRequest
+ (*ReloadSchemaResponse)(nil), // 113: vtctldata.ReloadSchemaResponse
+ (*ReloadSchemaKeyspaceRequest)(nil), // 114: vtctldata.ReloadSchemaKeyspaceRequest
+ (*ReloadSchemaKeyspaceResponse)(nil), // 115: vtctldata.ReloadSchemaKeyspaceResponse
+ (*ReloadSchemaShardRequest)(nil), // 116: vtctldata.ReloadSchemaShardRequest
+ (*ReloadSchemaShardResponse)(nil), // 117: vtctldata.ReloadSchemaShardResponse
+ (*RemoveBackupRequest)(nil), // 118: vtctldata.RemoveBackupRequest
+ (*RemoveBackupResponse)(nil), // 119: vtctldata.RemoveBackupResponse
+ (*RemoveKeyspaceCellRequest)(nil), // 120: vtctldata.RemoveKeyspaceCellRequest
+ (*RemoveKeyspaceCellResponse)(nil), // 121: vtctldata.RemoveKeyspaceCellResponse
+ (*RemoveShardCellRequest)(nil), // 122: vtctldata.RemoveShardCellRequest
+ (*RemoveShardCellResponse)(nil), // 123: vtctldata.RemoveShardCellResponse
+ (*ReparentTabletRequest)(nil), // 124: vtctldata.ReparentTabletRequest
+ (*ReparentTabletResponse)(nil), // 125: vtctldata.ReparentTabletResponse
+ (*RestoreFromBackupRequest)(nil), // 126: vtctldata.RestoreFromBackupRequest
+ (*RestoreFromBackupResponse)(nil), // 127: vtctldata.RestoreFromBackupResponse
+ (*RunHealthCheckRequest)(nil), // 128: vtctldata.RunHealthCheckRequest
+ (*RunHealthCheckResponse)(nil), // 129: vtctldata.RunHealthCheckResponse
+ (*SetKeyspaceDurabilityPolicyRequest)(nil), // 130: vtctldata.SetKeyspaceDurabilityPolicyRequest
+ (*SetKeyspaceDurabilityPolicyResponse)(nil), // 131: vtctldata.SetKeyspaceDurabilityPolicyResponse
+ (*SetKeyspaceServedFromRequest)(nil), // 132: vtctldata.SetKeyspaceServedFromRequest
+ (*SetKeyspaceServedFromResponse)(nil), // 133: vtctldata.SetKeyspaceServedFromResponse
+ (*SetKeyspaceShardingInfoRequest)(nil), // 134: vtctldata.SetKeyspaceShardingInfoRequest
+ (*SetKeyspaceShardingInfoResponse)(nil), // 135: vtctldata.SetKeyspaceShardingInfoResponse
+ (*SetShardIsPrimaryServingRequest)(nil), // 136: vtctldata.SetShardIsPrimaryServingRequest
+ (*SetShardIsPrimaryServingResponse)(nil), // 137: vtctldata.SetShardIsPrimaryServingResponse
+ (*SetShardTabletControlRequest)(nil), // 138: vtctldata.SetShardTabletControlRequest
+ (*SetShardTabletControlResponse)(nil), // 139: vtctldata.SetShardTabletControlResponse
+ (*SetWritableRequest)(nil), // 140: vtctldata.SetWritableRequest
+ (*SetWritableResponse)(nil), // 141: vtctldata.SetWritableResponse
+ (*ShardReplicationAddRequest)(nil), // 142: vtctldata.ShardReplicationAddRequest
+ (*ShardReplicationAddResponse)(nil), // 143: vtctldata.ShardReplicationAddResponse
+ (*ShardReplicationFixRequest)(nil), // 144: vtctldata.ShardReplicationFixRequest
+ (*ShardReplicationFixResponse)(nil), // 145: vtctldata.ShardReplicationFixResponse
+ (*ShardReplicationPositionsRequest)(nil), // 146: vtctldata.ShardReplicationPositionsRequest
+ (*ShardReplicationPositionsResponse)(nil), // 147: vtctldata.ShardReplicationPositionsResponse
+ (*ShardReplicationRemoveRequest)(nil), // 148: vtctldata.ShardReplicationRemoveRequest
+ (*ShardReplicationRemoveResponse)(nil), // 149: vtctldata.ShardReplicationRemoveResponse
+ (*SleepTabletRequest)(nil), // 150: vtctldata.SleepTabletRequest
+ (*SleepTabletResponse)(nil), // 151: vtctldata.SleepTabletResponse
+ (*SourceShardAddRequest)(nil), // 152: vtctldata.SourceShardAddRequest
+ (*SourceShardAddResponse)(nil), // 153: vtctldata.SourceShardAddResponse
+ (*SourceShardDeleteRequest)(nil), // 154: vtctldata.SourceShardDeleteRequest
+ (*SourceShardDeleteResponse)(nil), // 155: vtctldata.SourceShardDeleteResponse
+ (*StartReplicationRequest)(nil), // 156: vtctldata.StartReplicationRequest
+ (*StartReplicationResponse)(nil), // 157: vtctldata.StartReplicationResponse
+ (*StopReplicationRequest)(nil), // 158: vtctldata.StopReplicationRequest
+ (*StopReplicationResponse)(nil), // 159: vtctldata.StopReplicationResponse
+ (*TabletExternallyReparentedRequest)(nil), // 160: vtctldata.TabletExternallyReparentedRequest
+ (*TabletExternallyReparentedResponse)(nil), // 161: vtctldata.TabletExternallyReparentedResponse
+ (*UpdateCellInfoRequest)(nil), // 162: vtctldata.UpdateCellInfoRequest
+ (*UpdateCellInfoResponse)(nil), // 163: vtctldata.UpdateCellInfoResponse
+ (*UpdateCellsAliasRequest)(nil), // 164: vtctldata.UpdateCellsAliasRequest
+ (*UpdateCellsAliasResponse)(nil), // 165: vtctldata.UpdateCellsAliasResponse
+ (*ValidateRequest)(nil), // 166: vtctldata.ValidateRequest
+ (*ValidateResponse)(nil), // 167: vtctldata.ValidateResponse
+ (*ValidateKeyspaceRequest)(nil), // 168: vtctldata.ValidateKeyspaceRequest
+ (*ValidateKeyspaceResponse)(nil), // 169: vtctldata.ValidateKeyspaceResponse
+ (*ValidateSchemaKeyspaceRequest)(nil), // 170: vtctldata.ValidateSchemaKeyspaceRequest
+ (*ValidateSchemaKeyspaceResponse)(nil), // 171: vtctldata.ValidateSchemaKeyspaceResponse
+ (*ValidateShardRequest)(nil), // 172: vtctldata.ValidateShardRequest
+ (*ValidateShardResponse)(nil), // 173: vtctldata.ValidateShardResponse
+ (*ValidateVersionKeyspaceRequest)(nil), // 174: vtctldata.ValidateVersionKeyspaceRequest
+ (*ValidateVersionKeyspaceResponse)(nil), // 175: vtctldata.ValidateVersionKeyspaceResponse
+ (*ValidateVersionShardRequest)(nil), // 176: vtctldata.ValidateVersionShardRequest
+ (*ValidateVersionShardResponse)(nil), // 177: vtctldata.ValidateVersionShardResponse
+ (*ValidateVSchemaRequest)(nil), // 178: vtctldata.ValidateVSchemaRequest
+ (*ValidateVSchemaResponse)(nil), // 179: vtctldata.ValidateVSchemaResponse
+ nil, // 180: vtctldata.Workflow.ShardStreamsEntry
+ (*Workflow_ReplicationLocation)(nil), // 181: vtctldata.Workflow.ReplicationLocation
+ (*Workflow_ShardStream)(nil), // 182: vtctldata.Workflow.ShardStream
+ (*Workflow_Stream)(nil), // 183: vtctldata.Workflow.Stream
+ (*Workflow_Stream_CopyState)(nil), // 184: vtctldata.Workflow.Stream.CopyState
+ (*Workflow_Stream_Log)(nil), // 185: vtctldata.Workflow.Stream.Log
+ nil, // 186: vtctldata.FindAllShardsInKeyspaceResponse.ShardsEntry
+ nil, // 187: vtctldata.GetCellsAliasesResponse.AliasesEntry
+ nil, // 188: vtctldata.GetSrvKeyspaceNamesResponse.NamesEntry
+ (*GetSrvKeyspaceNamesResponse_NameList)(nil), // 189: vtctldata.GetSrvKeyspaceNamesResponse.NameList
+ nil, // 190: vtctldata.GetSrvKeyspacesResponse.SrvKeyspacesEntry
+ nil, // 191: vtctldata.GetSrvVSchemasResponse.SrvVSchemasEntry
+ nil, // 192: vtctldata.ShardReplicationPositionsResponse.ReplicationStatusesEntry
+ nil, // 193: vtctldata.ShardReplicationPositionsResponse.TabletMapEntry
+ nil, // 194: vtctldata.ValidateResponse.ResultsByKeyspaceEntry
+ nil, // 195: vtctldata.ValidateKeyspaceResponse.ResultsByShardEntry
+ nil, // 196: vtctldata.ValidateSchemaKeyspaceResponse.ResultsByShardEntry
+ nil, // 197: vtctldata.ValidateVersionKeyspaceResponse.ResultsByShardEntry
+ nil, // 198: vtctldata.ValidateVSchemaResponse.ResultsByShardEntry
+ (*logutil.Event)(nil), // 199: logutil.Event
+ (*topodata.Keyspace)(nil), // 200: topodata.Keyspace
+ (*topodata.Shard)(nil), // 201: topodata.Shard
+ (*topodata.CellInfo)(nil), // 202: topodata.CellInfo
+ (*vschema.RoutingRules)(nil), // 203: vschema.RoutingRules
+ (*vschema.ShardRoutingRules)(nil), // 204: vschema.ShardRoutingRules
+ (*vttime.Duration)(nil), // 205: vttime.Duration
+ (*vtrpc.CallerID)(nil), // 206: vtrpc.CallerID
+ (*vschema.Keyspace)(nil), // 207: vschema.Keyspace
+ (*topodata.TabletAlias)(nil), // 208: topodata.TabletAlias
+ (topodata.TabletType)(0), // 209: topodata.TabletType
+ (*topodata.Tablet)(nil), // 210: topodata.Tablet
+ (*topodata.Keyspace_ServedFrom)(nil), // 211: topodata.Keyspace.ServedFrom
+ (topodata.KeyspaceType)(0), // 212: topodata.KeyspaceType
+ (*vttime.Time)(nil), // 213: vttime.Time
+ (*query.QueryResult)(nil), // 214: query.QueryResult
+ (*tabletmanagerdata.ExecuteHookRequest)(nil), // 215: tabletmanagerdata.ExecuteHookRequest
+ (*tabletmanagerdata.ExecuteHookResponse)(nil), // 216: tabletmanagerdata.ExecuteHookResponse
+ (*mysqlctl.BackupInfo)(nil), // 217: mysqlctl.BackupInfo
+ (*replicationdata.FullStatus)(nil), // 218: replicationdata.FullStatus
+ (*tabletmanagerdata.Permissions)(nil), // 219: tabletmanagerdata.Permissions
+ (*tabletmanagerdata.SchemaDefinition)(nil), // 220: tabletmanagerdata.SchemaDefinition
+ (*vschema.SrvVSchema)(nil), // 221: vschema.SrvVSchema
+ (*topodata.ShardReplicationError)(nil), // 222: topodata.ShardReplicationError
+ (*topodata.KeyRange)(nil), // 223: topodata.KeyRange
+ (*topodata.CellsAlias)(nil), // 224: topodata.CellsAlias
+ (*topodata.Shard_TabletControl)(nil), // 225: topodata.Shard.TabletControl
+ (*binlogdata.BinlogSource)(nil), // 226: binlogdata.BinlogSource
+ (*topodata.SrvKeyspace)(nil), // 227: topodata.SrvKeyspace
+ (*replicationdata.Status)(nil), // 228: replicationdata.Status
}
var file_vtctldata_proto_depIdxs = []int32{
- 192, // 0: vtctldata.ExecuteVtctlCommandResponse.event:type_name -> logutil.Event
+ 199, // 0: vtctldata.ExecuteVtctlCommandResponse.event:type_name -> logutil.Event
3, // 1: vtctldata.MaterializeSettings.table_settings:type_name -> vtctldata.TableMaterializeSettings
0, // 2: vtctldata.MaterializeSettings.materialization_intent:type_name -> vtctldata.MaterializationIntent
- 193, // 3: vtctldata.Keyspace.keyspace:type_name -> topodata.Keyspace
- 194, // 4: vtctldata.Shard.shard:type_name -> topodata.Shard
- 174, // 5: vtctldata.Workflow.source:type_name -> vtctldata.Workflow.ReplicationLocation
- 174, // 6: vtctldata.Workflow.target:type_name -> vtctldata.Workflow.ReplicationLocation
- 173, // 7: vtctldata.Workflow.shard_streams:type_name -> vtctldata.Workflow.ShardStreamsEntry
- 195, // 8: vtctldata.AddCellInfoRequest.cell_info:type_name -> topodata.CellInfo
- 196, // 9: vtctldata.ApplyRoutingRulesRequest.routing_rules:type_name -> vschema.RoutingRules
- 197, // 10: vtctldata.ApplyShardRoutingRulesRequest.shard_routing_rules:type_name -> vschema.ShardRoutingRules
- 198, // 11: vtctldata.ApplySchemaRequest.wait_replicas_timeout:type_name -> vttime.Duration
- 199, // 12: vtctldata.ApplySchemaRequest.caller_id:type_name -> vtrpc.CallerID
- 200, // 13: vtctldata.ApplyVSchemaRequest.v_schema:type_name -> vschema.Keyspace
- 200, // 14: vtctldata.ApplyVSchemaResponse.v_schema:type_name -> vschema.Keyspace
- 201, // 15: vtctldata.BackupRequest.tablet_alias:type_name -> topodata.TabletAlias
- 201, // 16: vtctldata.BackupResponse.tablet_alias:type_name -> topodata.TabletAlias
- 192, // 17: vtctldata.BackupResponse.event:type_name -> logutil.Event
- 201, // 18: vtctldata.ChangeTabletTypeRequest.tablet_alias:type_name -> topodata.TabletAlias
- 202, // 19: vtctldata.ChangeTabletTypeRequest.db_type:type_name -> topodata.TabletType
- 203, // 20: vtctldata.ChangeTabletTypeResponse.before_tablet:type_name -> topodata.Tablet
- 203, // 21: vtctldata.ChangeTabletTypeResponse.after_tablet:type_name -> topodata.Tablet
- 204, // 22: vtctldata.CreateKeyspaceRequest.served_froms:type_name -> topodata.Keyspace.ServedFrom
- 205, // 23: vtctldata.CreateKeyspaceRequest.type:type_name -> topodata.KeyspaceType
- 206, // 24: vtctldata.CreateKeyspaceRequest.snapshot_time:type_name -> vttime.Time
+ 200, // 3: vtctldata.Keyspace.keyspace:type_name -> topodata.Keyspace
+ 201, // 4: vtctldata.Shard.shard:type_name -> topodata.Shard
+ 181, // 5: vtctldata.Workflow.source:type_name -> vtctldata.Workflow.ReplicationLocation
+ 181, // 6: vtctldata.Workflow.target:type_name -> vtctldata.Workflow.ReplicationLocation
+ 180, // 7: vtctldata.Workflow.shard_streams:type_name -> vtctldata.Workflow.ShardStreamsEntry
+ 202, // 8: vtctldata.AddCellInfoRequest.cell_info:type_name -> topodata.CellInfo
+ 203, // 9: vtctldata.ApplyRoutingRulesRequest.routing_rules:type_name -> vschema.RoutingRules
+ 204, // 10: vtctldata.ApplyShardRoutingRulesRequest.shard_routing_rules:type_name -> vschema.ShardRoutingRules
+ 205, // 11: vtctldata.ApplySchemaRequest.wait_replicas_timeout:type_name -> vttime.Duration
+ 206, // 12: vtctldata.ApplySchemaRequest.caller_id:type_name -> vtrpc.CallerID
+ 207, // 13: vtctldata.ApplyVSchemaRequest.v_schema:type_name -> vschema.Keyspace
+ 207, // 14: vtctldata.ApplyVSchemaResponse.v_schema:type_name -> vschema.Keyspace
+ 208, // 15: vtctldata.BackupRequest.tablet_alias:type_name -> topodata.TabletAlias
+ 208, // 16: vtctldata.BackupResponse.tablet_alias:type_name -> topodata.TabletAlias
+ 199, // 17: vtctldata.BackupResponse.event:type_name -> logutil.Event
+ 208, // 18: vtctldata.ChangeTabletTypeRequest.tablet_alias:type_name -> topodata.TabletAlias
+ 209, // 19: vtctldata.ChangeTabletTypeRequest.db_type:type_name -> topodata.TabletType
+ 210, // 20: vtctldata.ChangeTabletTypeResponse.before_tablet:type_name -> topodata.Tablet
+ 210, // 21: vtctldata.ChangeTabletTypeResponse.after_tablet:type_name -> topodata.Tablet
+ 211, // 22: vtctldata.CreateKeyspaceRequest.served_froms:type_name -> topodata.Keyspace.ServedFrom
+ 212, // 23: vtctldata.CreateKeyspaceRequest.type:type_name -> topodata.KeyspaceType
+ 213, // 24: vtctldata.CreateKeyspaceRequest.snapshot_time:type_name -> vttime.Time
5, // 25: vtctldata.CreateKeyspaceResponse.keyspace:type_name -> vtctldata.Keyspace
5, // 26: vtctldata.CreateShardResponse.keyspace:type_name -> vtctldata.Keyspace
6, // 27: vtctldata.CreateShardResponse.shard:type_name -> vtctldata.Shard
6, // 28: vtctldata.DeleteShardsRequest.shards:type_name -> vtctldata.Shard
- 201, // 29: vtctldata.DeleteTabletsRequest.tablet_aliases:type_name -> topodata.TabletAlias
- 201, // 30: vtctldata.EmergencyReparentShardRequest.new_primary:type_name -> topodata.TabletAlias
- 201, // 31: vtctldata.EmergencyReparentShardRequest.ignore_replicas:type_name -> topodata.TabletAlias
- 198, // 32: vtctldata.EmergencyReparentShardRequest.wait_replicas_timeout:type_name -> vttime.Duration
- 201, // 33: vtctldata.EmergencyReparentShardResponse.promoted_primary:type_name -> topodata.TabletAlias
- 192, // 34: vtctldata.EmergencyReparentShardResponse.events:type_name -> logutil.Event
- 201, // 35: vtctldata.ExecuteFetchAsAppRequest.tablet_alias:type_name -> topodata.TabletAlias
- 207, // 36: vtctldata.ExecuteFetchAsAppResponse.result:type_name -> query.QueryResult
- 201, // 37: vtctldata.ExecuteFetchAsDBARequest.tablet_alias:type_name -> topodata.TabletAlias
- 207, // 38: vtctldata.ExecuteFetchAsDBAResponse.result:type_name -> query.QueryResult
- 201, // 39: vtctldata.ExecuteHookRequest.tablet_alias:type_name -> topodata.TabletAlias
- 208, // 40: vtctldata.ExecuteHookRequest.tablet_hook_request:type_name -> tabletmanagerdata.ExecuteHookRequest
- 209, // 41: vtctldata.ExecuteHookResponse.hook_result:type_name -> tabletmanagerdata.ExecuteHookResponse
- 179, // 42: vtctldata.FindAllShardsInKeyspaceResponse.shards:type_name -> vtctldata.FindAllShardsInKeyspaceResponse.ShardsEntry
- 210, // 43: vtctldata.GetBackupsResponse.backups:type_name -> mysqlctl.BackupInfo
- 195, // 44: vtctldata.GetCellInfoResponse.cell_info:type_name -> topodata.CellInfo
- 180, // 45: vtctldata.GetCellsAliasesResponse.aliases:type_name -> vtctldata.GetCellsAliasesResponse.AliasesEntry
- 201, // 46: vtctldata.GetFullStatusRequest.tablet_alias:type_name -> topodata.TabletAlias
- 211, // 47: vtctldata.GetFullStatusResponse.status:type_name -> replicationdata.FullStatus
+ 208, // 29: vtctldata.DeleteTabletsRequest.tablet_aliases:type_name -> topodata.TabletAlias
+ 208, // 30: vtctldata.EmergencyReparentShardRequest.new_primary:type_name -> topodata.TabletAlias
+ 208, // 31: vtctldata.EmergencyReparentShardRequest.ignore_replicas:type_name -> topodata.TabletAlias
+ 205, // 32: vtctldata.EmergencyReparentShardRequest.wait_replicas_timeout:type_name -> vttime.Duration
+ 208, // 33: vtctldata.EmergencyReparentShardResponse.promoted_primary:type_name -> topodata.TabletAlias
+ 199, // 34: vtctldata.EmergencyReparentShardResponse.events:type_name -> logutil.Event
+ 208, // 35: vtctldata.ExecuteFetchAsAppRequest.tablet_alias:type_name -> topodata.TabletAlias
+ 214, // 36: vtctldata.ExecuteFetchAsAppResponse.result:type_name -> query.QueryResult
+ 208, // 37: vtctldata.ExecuteFetchAsDBARequest.tablet_alias:type_name -> topodata.TabletAlias
+ 214, // 38: vtctldata.ExecuteFetchAsDBAResponse.result:type_name -> query.QueryResult
+ 208, // 39: vtctldata.ExecuteHookRequest.tablet_alias:type_name -> topodata.TabletAlias
+ 215, // 40: vtctldata.ExecuteHookRequest.tablet_hook_request:type_name -> tabletmanagerdata.ExecuteHookRequest
+ 216, // 41: vtctldata.ExecuteHookResponse.hook_result:type_name -> tabletmanagerdata.ExecuteHookResponse
+ 186, // 42: vtctldata.FindAllShardsInKeyspaceResponse.shards:type_name -> vtctldata.FindAllShardsInKeyspaceResponse.ShardsEntry
+ 217, // 43: vtctldata.GetBackupsResponse.backups:type_name -> mysqlctl.BackupInfo
+ 202, // 44: vtctldata.GetCellInfoResponse.cell_info:type_name -> topodata.CellInfo
+ 187, // 45: vtctldata.GetCellsAliasesResponse.aliases:type_name -> vtctldata.GetCellsAliasesResponse.AliasesEntry
+ 208, // 46: vtctldata.GetFullStatusRequest.tablet_alias:type_name -> topodata.TabletAlias
+ 218, // 47: vtctldata.GetFullStatusResponse.status:type_name -> replicationdata.FullStatus
5, // 48: vtctldata.GetKeyspacesResponse.keyspaces:type_name -> vtctldata.Keyspace
5, // 49: vtctldata.GetKeyspaceResponse.keyspace:type_name -> vtctldata.Keyspace
- 201, // 50: vtctldata.GetPermissionsRequest.tablet_alias:type_name -> topodata.TabletAlias
- 212, // 51: vtctldata.GetPermissionsResponse.permissions:type_name -> tabletmanagerdata.Permissions
- 196, // 52: vtctldata.GetRoutingRulesResponse.routing_rules:type_name -> vschema.RoutingRules
- 201, // 53: vtctldata.GetSchemaRequest.tablet_alias:type_name -> topodata.TabletAlias
- 213, // 54: vtctldata.GetSchemaResponse.schema:type_name -> tabletmanagerdata.SchemaDefinition
+ 208, // 50: vtctldata.GetPermissionsRequest.tablet_alias:type_name -> topodata.TabletAlias
+ 219, // 51: vtctldata.GetPermissionsResponse.permissions:type_name -> tabletmanagerdata.Permissions
+ 203, // 52: vtctldata.GetRoutingRulesResponse.routing_rules:type_name -> vschema.RoutingRules
+ 208, // 53: vtctldata.GetSchemaRequest.tablet_alias:type_name -> topodata.TabletAlias
+ 220, // 54: vtctldata.GetSchemaResponse.schema:type_name -> tabletmanagerdata.SchemaDefinition
6, // 55: vtctldata.GetShardResponse.shard:type_name -> vtctldata.Shard
- 197, // 56: vtctldata.GetShardRoutingRulesResponse.shard_routing_rules:type_name -> vschema.ShardRoutingRules
- 181, // 57: vtctldata.GetSrvKeyspaceNamesResponse.names:type_name -> vtctldata.GetSrvKeyspaceNamesResponse.NamesEntry
- 183, // 58: vtctldata.GetSrvKeyspacesResponse.srv_keyspaces:type_name -> vtctldata.GetSrvKeyspacesResponse.SrvKeyspacesEntry
- 214, // 59: vtctldata.GetSrvVSchemaResponse.srv_v_schema:type_name -> vschema.SrvVSchema
- 184, // 60: vtctldata.GetSrvVSchemasResponse.srv_v_schemas:type_name -> vtctldata.GetSrvVSchemasResponse.SrvVSchemasEntry
- 201, // 61: vtctldata.GetTabletRequest.tablet_alias:type_name -> topodata.TabletAlias
- 203, // 62: vtctldata.GetTabletResponse.tablet:type_name -> topodata.Tablet
- 201, // 63: vtctldata.GetTabletsRequest.tablet_aliases:type_name -> topodata.TabletAlias
- 202, // 64: vtctldata.GetTabletsRequest.tablet_type:type_name -> topodata.TabletType
- 203, // 65: vtctldata.GetTabletsResponse.tablets:type_name -> topodata.Tablet
- 201, // 66: vtctldata.GetVersionRequest.tablet_alias:type_name -> topodata.TabletAlias
- 200, // 67: vtctldata.GetVSchemaResponse.v_schema:type_name -> vschema.Keyspace
- 7, // 68: vtctldata.GetWorkflowsResponse.workflows:type_name -> vtctldata.Workflow
- 201, // 69: vtctldata.InitShardPrimaryRequest.primary_elect_tablet_alias:type_name -> topodata.TabletAlias
- 198, // 70: vtctldata.InitShardPrimaryRequest.wait_replicas_timeout:type_name -> vttime.Duration
- 192, // 71: vtctldata.InitShardPrimaryResponse.events:type_name -> logutil.Event
- 201, // 72: vtctldata.PingTabletRequest.tablet_alias:type_name -> topodata.TabletAlias
- 201, // 73: vtctldata.PlannedReparentShardRequest.new_primary:type_name -> topodata.TabletAlias
- 201, // 74: vtctldata.PlannedReparentShardRequest.avoid_primary:type_name -> topodata.TabletAlias
- 198, // 75: vtctldata.PlannedReparentShardRequest.wait_replicas_timeout:type_name -> vttime.Duration
- 201, // 76: vtctldata.PlannedReparentShardResponse.promoted_primary:type_name -> topodata.TabletAlias
- 192, // 77: vtctldata.PlannedReparentShardResponse.events:type_name -> logutil.Event
- 201, // 78: vtctldata.RefreshStateRequest.tablet_alias:type_name -> topodata.TabletAlias
- 201, // 79: vtctldata.ReloadSchemaRequest.tablet_alias:type_name -> topodata.TabletAlias
- 192, // 80: vtctldata.ReloadSchemaKeyspaceResponse.events:type_name -> logutil.Event
- 192, // 81: vtctldata.ReloadSchemaShardResponse.events:type_name -> logutil.Event
- 201, // 82: vtctldata.ReparentTabletRequest.tablet:type_name -> topodata.TabletAlias
- 201, // 83: vtctldata.ReparentTabletResponse.primary:type_name -> topodata.TabletAlias
- 201, // 84: vtctldata.RestoreFromBackupRequest.tablet_alias:type_name -> topodata.TabletAlias
- 206, // 85: vtctldata.RestoreFromBackupRequest.backup_time:type_name -> vttime.Time
- 201, // 86: vtctldata.RestoreFromBackupResponse.tablet_alias:type_name -> topodata.TabletAlias
- 192, // 87: vtctldata.RestoreFromBackupResponse.event:type_name -> logutil.Event
- 201, // 88: vtctldata.RunHealthCheckRequest.tablet_alias:type_name -> topodata.TabletAlias
- 193, // 89: vtctldata.SetKeyspaceDurabilityPolicyResponse.keyspace:type_name -> topodata.Keyspace
- 202, // 90: vtctldata.SetKeyspaceServedFromRequest.tablet_type:type_name -> topodata.TabletType
- 193, // 91: vtctldata.SetKeyspaceServedFromResponse.keyspace:type_name -> topodata.Keyspace
- 193, // 92: vtctldata.SetKeyspaceShardingInfoResponse.keyspace:type_name -> topodata.Keyspace
- 194, // 93: vtctldata.SetShardIsPrimaryServingResponse.shard:type_name -> topodata.Shard
- 202, // 94: vtctldata.SetShardTabletControlRequest.tablet_type:type_name -> topodata.TabletType
- 194, // 95: vtctldata.SetShardTabletControlResponse.shard:type_name -> topodata.Shard
- 201, // 96: vtctldata.SetWritableRequest.tablet_alias:type_name -> topodata.TabletAlias
- 201, // 97: vtctldata.ShardReplicationAddRequest.tablet_alias:type_name -> topodata.TabletAlias
- 215, // 98: vtctldata.ShardReplicationFixResponse.error:type_name -> topodata.ShardReplicationError
- 185, // 99: vtctldata.ShardReplicationPositionsResponse.replication_statuses:type_name -> vtctldata.ShardReplicationPositionsResponse.ReplicationStatusesEntry
- 186, // 100: vtctldata.ShardReplicationPositionsResponse.tablet_map:type_name -> vtctldata.ShardReplicationPositionsResponse.TabletMapEntry
- 201, // 101: vtctldata.ShardReplicationRemoveRequest.tablet_alias:type_name -> topodata.TabletAlias
- 201, // 102: vtctldata.SleepTabletRequest.tablet_alias:type_name -> topodata.TabletAlias
- 198, // 103: vtctldata.SleepTabletRequest.duration:type_name -> vttime.Duration
- 216, // 104: vtctldata.SourceShardAddRequest.key_range:type_name -> topodata.KeyRange
- 194, // 105: vtctldata.SourceShardAddResponse.shard:type_name -> topodata.Shard
- 194, // 106: vtctldata.SourceShardDeleteResponse.shard:type_name -> topodata.Shard
- 201, // 107: vtctldata.StartReplicationRequest.tablet_alias:type_name -> topodata.TabletAlias
- 201, // 108: vtctldata.StopReplicationRequest.tablet_alias:type_name -> topodata.TabletAlias
- 201, // 109: vtctldata.TabletExternallyReparentedRequest.tablet:type_name -> topodata.TabletAlias
- 201, // 110: vtctldata.TabletExternallyReparentedResponse.new_primary:type_name -> topodata.TabletAlias
- 201, // 111: vtctldata.TabletExternallyReparentedResponse.old_primary:type_name -> topodata.TabletAlias
- 195, // 112: vtctldata.UpdateCellInfoRequest.cell_info:type_name -> topodata.CellInfo
- 195, // 113: vtctldata.UpdateCellInfoResponse.cell_info:type_name -> topodata.CellInfo
- 217, // 114: vtctldata.UpdateCellsAliasRequest.cells_alias:type_name -> topodata.CellsAlias
- 217, // 115: vtctldata.UpdateCellsAliasResponse.cells_alias:type_name -> topodata.CellsAlias
- 187, // 116: vtctldata.ValidateResponse.results_by_keyspace:type_name -> vtctldata.ValidateResponse.ResultsByKeyspaceEntry
- 188, // 117: vtctldata.ValidateKeyspaceResponse.results_by_shard:type_name -> vtctldata.ValidateKeyspaceResponse.ResultsByShardEntry
- 189, // 118: vtctldata.ValidateSchemaKeyspaceResponse.results_by_shard:type_name -> vtctldata.ValidateSchemaKeyspaceResponse.ResultsByShardEntry
- 190, // 119: vtctldata.ValidateVersionKeyspaceResponse.results_by_shard:type_name -> vtctldata.ValidateVersionKeyspaceResponse.ResultsByShardEntry
- 191, // 120: vtctldata.ValidateVSchemaResponse.results_by_shard:type_name -> vtctldata.ValidateVSchemaResponse.ResultsByShardEntry
- 175, // 121: vtctldata.Workflow.ShardStreamsEntry.value:type_name -> vtctldata.Workflow.ShardStream
- 176, // 122: vtctldata.Workflow.ShardStream.streams:type_name -> vtctldata.Workflow.Stream
- 218, // 123: vtctldata.Workflow.ShardStream.tablet_controls:type_name -> topodata.Shard.TabletControl
- 201, // 124: vtctldata.Workflow.Stream.tablet:type_name -> topodata.TabletAlias
- 219, // 125: vtctldata.Workflow.Stream.binlog_source:type_name -> binlogdata.BinlogSource
- 206, // 126: vtctldata.Workflow.Stream.transaction_timestamp:type_name -> vttime.Time
- 206, // 127: vtctldata.Workflow.Stream.time_updated:type_name -> vttime.Time
- 177, // 128: vtctldata.Workflow.Stream.copy_states:type_name -> vtctldata.Workflow.Stream.CopyState
- 178, // 129: vtctldata.Workflow.Stream.logs:type_name -> vtctldata.Workflow.Stream.Log
- 206, // 130: vtctldata.Workflow.Stream.Log.created_at:type_name -> vttime.Time
- 206, // 131: vtctldata.Workflow.Stream.Log.updated_at:type_name -> vttime.Time
- 6, // 132: vtctldata.FindAllShardsInKeyspaceResponse.ShardsEntry.value:type_name -> vtctldata.Shard
- 217, // 133: vtctldata.GetCellsAliasesResponse.AliasesEntry.value:type_name -> topodata.CellsAlias
- 182, // 134: vtctldata.GetSrvKeyspaceNamesResponse.NamesEntry.value:type_name -> vtctldata.GetSrvKeyspaceNamesResponse.NameList
- 220, // 135: vtctldata.GetSrvKeyspacesResponse.SrvKeyspacesEntry.value:type_name -> topodata.SrvKeyspace
- 214, // 136: vtctldata.GetSrvVSchemasResponse.SrvVSchemasEntry.value:type_name -> vschema.SrvVSchema
- 221, // 137: vtctldata.ShardReplicationPositionsResponse.ReplicationStatusesEntry.value:type_name -> replicationdata.Status
- 203, // 138: vtctldata.ShardReplicationPositionsResponse.TabletMapEntry.value:type_name -> topodata.Tablet
- 164, // 139: vtctldata.ValidateResponse.ResultsByKeyspaceEntry.value:type_name -> vtctldata.ValidateKeyspaceResponse
- 168, // 140: vtctldata.ValidateKeyspaceResponse.ResultsByShardEntry.value:type_name -> vtctldata.ValidateShardResponse
- 168, // 141: vtctldata.ValidateSchemaKeyspaceResponse.ResultsByShardEntry.value:type_name -> vtctldata.ValidateShardResponse
- 168, // 142: vtctldata.ValidateVersionKeyspaceResponse.ResultsByShardEntry.value:type_name -> vtctldata.ValidateShardResponse
- 168, // 143: vtctldata.ValidateVSchemaResponse.ResultsByShardEntry.value:type_name -> vtctldata.ValidateShardResponse
- 144, // [144:144] is the sub-list for method output_type
- 144, // [144:144] is the sub-list for method input_type
- 144, // [144:144] is the sub-list for extension type_name
- 144, // [144:144] is the sub-list for extension extendee
- 0, // [0:144] is the sub-list for field type_name
+ 204, // 56: vtctldata.GetShardRoutingRulesResponse.shard_routing_rules:type_name -> vschema.ShardRoutingRules
+ 188, // 57: vtctldata.GetSrvKeyspaceNamesResponse.names:type_name -> vtctldata.GetSrvKeyspaceNamesResponse.NamesEntry
+ 190, // 58: vtctldata.GetSrvKeyspacesResponse.srv_keyspaces:type_name -> vtctldata.GetSrvKeyspacesResponse.SrvKeyspacesEntry
+ 221, // 59: vtctldata.GetSrvVSchemaResponse.srv_v_schema:type_name -> vschema.SrvVSchema
+ 191, // 60: vtctldata.GetSrvVSchemasResponse.srv_v_schemas:type_name -> vtctldata.GetSrvVSchemasResponse.SrvVSchemasEntry
+ 208, // 61: vtctldata.GetTabletRequest.tablet_alias:type_name -> topodata.TabletAlias
+ 210, // 62: vtctldata.GetTabletResponse.tablet:type_name -> topodata.Tablet
+ 208, // 63: vtctldata.GetTabletsRequest.tablet_aliases:type_name -> topodata.TabletAlias
+ 209, // 64: vtctldata.GetTabletsRequest.tablet_type:type_name -> topodata.TabletType
+ 210, // 65: vtctldata.GetTabletsResponse.tablets:type_name -> topodata.Tablet
+ 91, // 66: vtctldata.GetTopologyPathResponse.cell:type_name -> vtctldata.TopologyCell
+ 208, // 67: vtctldata.GetVersionRequest.tablet_alias:type_name -> topodata.TabletAlias
+ 207, // 68: vtctldata.GetVSchemaResponse.v_schema:type_name -> vschema.Keyspace
+ 7, // 69: vtctldata.GetWorkflowsResponse.workflows:type_name -> vtctldata.Workflow
+ 208, // 70: vtctldata.InitShardPrimaryRequest.primary_elect_tablet_alias:type_name -> topodata.TabletAlias
+ 205, // 71: vtctldata.InitShardPrimaryRequest.wait_replicas_timeout:type_name -> vttime.Duration
+ 199, // 72: vtctldata.InitShardPrimaryResponse.events:type_name -> logutil.Event
+ 208, // 73: vtctldata.PingTabletRequest.tablet_alias:type_name -> topodata.TabletAlias
+ 208, // 74: vtctldata.PlannedReparentShardRequest.new_primary:type_name -> topodata.TabletAlias
+ 208, // 75: vtctldata.PlannedReparentShardRequest.avoid_primary:type_name -> topodata.TabletAlias
+ 205, // 76: vtctldata.PlannedReparentShardRequest.wait_replicas_timeout:type_name -> vttime.Duration
+ 208, // 77: vtctldata.PlannedReparentShardResponse.promoted_primary:type_name -> topodata.TabletAlias
+ 199, // 78: vtctldata.PlannedReparentShardResponse.events:type_name -> logutil.Event
+ 208, // 79: vtctldata.RefreshStateRequest.tablet_alias:type_name -> topodata.TabletAlias
+ 208, // 80: vtctldata.ReloadSchemaRequest.tablet_alias:type_name -> topodata.TabletAlias
+ 199, // 81: vtctldata.ReloadSchemaKeyspaceResponse.events:type_name -> logutil.Event
+ 199, // 82: vtctldata.ReloadSchemaShardResponse.events:type_name -> logutil.Event
+ 208, // 83: vtctldata.ReparentTabletRequest.tablet:type_name -> topodata.TabletAlias
+ 208, // 84: vtctldata.ReparentTabletResponse.primary:type_name -> topodata.TabletAlias
+ 208, // 85: vtctldata.RestoreFromBackupRequest.tablet_alias:type_name -> topodata.TabletAlias
+ 213, // 86: vtctldata.RestoreFromBackupRequest.backup_time:type_name -> vttime.Time
+ 208, // 87: vtctldata.RestoreFromBackupResponse.tablet_alias:type_name -> topodata.TabletAlias
+ 199, // 88: vtctldata.RestoreFromBackupResponse.event:type_name -> logutil.Event
+ 208, // 89: vtctldata.RunHealthCheckRequest.tablet_alias:type_name -> topodata.TabletAlias
+ 200, // 90: vtctldata.SetKeyspaceDurabilityPolicyResponse.keyspace:type_name -> topodata.Keyspace
+ 209, // 91: vtctldata.SetKeyspaceServedFromRequest.tablet_type:type_name -> topodata.TabletType
+ 200, // 92: vtctldata.SetKeyspaceServedFromResponse.keyspace:type_name -> topodata.Keyspace
+ 200, // 93: vtctldata.SetKeyspaceShardingInfoResponse.keyspace:type_name -> topodata.Keyspace
+ 201, // 94: vtctldata.SetShardIsPrimaryServingResponse.shard:type_name -> topodata.Shard
+ 209, // 95: vtctldata.SetShardTabletControlRequest.tablet_type:type_name -> topodata.TabletType
+ 201, // 96: vtctldata.SetShardTabletControlResponse.shard:type_name -> topodata.Shard
+ 208, // 97: vtctldata.SetWritableRequest.tablet_alias:type_name -> topodata.TabletAlias
+ 208, // 98: vtctldata.ShardReplicationAddRequest.tablet_alias:type_name -> topodata.TabletAlias
+ 222, // 99: vtctldata.ShardReplicationFixResponse.error:type_name -> topodata.ShardReplicationError
+ 192, // 100: vtctldata.ShardReplicationPositionsResponse.replication_statuses:type_name -> vtctldata.ShardReplicationPositionsResponse.ReplicationStatusesEntry
+ 193, // 101: vtctldata.ShardReplicationPositionsResponse.tablet_map:type_name -> vtctldata.ShardReplicationPositionsResponse.TabletMapEntry
+ 208, // 102: vtctldata.ShardReplicationRemoveRequest.tablet_alias:type_name -> topodata.TabletAlias
+ 208, // 103: vtctldata.SleepTabletRequest.tablet_alias:type_name -> topodata.TabletAlias
+ 205, // 104: vtctldata.SleepTabletRequest.duration:type_name -> vttime.Duration
+ 223, // 105: vtctldata.SourceShardAddRequest.key_range:type_name -> topodata.KeyRange
+ 201, // 106: vtctldata.SourceShardAddResponse.shard:type_name -> topodata.Shard
+ 201, // 107: vtctldata.SourceShardDeleteResponse.shard:type_name -> topodata.Shard
+ 208, // 108: vtctldata.StartReplicationRequest.tablet_alias:type_name -> topodata.TabletAlias
+ 208, // 109: vtctldata.StopReplicationRequest.tablet_alias:type_name -> topodata.TabletAlias
+ 208, // 110: vtctldata.TabletExternallyReparentedRequest.tablet:type_name -> topodata.TabletAlias
+ 208, // 111: vtctldata.TabletExternallyReparentedResponse.new_primary:type_name -> topodata.TabletAlias
+ 208, // 112: vtctldata.TabletExternallyReparentedResponse.old_primary:type_name -> topodata.TabletAlias
+ 202, // 113: vtctldata.UpdateCellInfoRequest.cell_info:type_name -> topodata.CellInfo
+ 202, // 114: vtctldata.UpdateCellInfoResponse.cell_info:type_name -> topodata.CellInfo
+ 224, // 115: vtctldata.UpdateCellsAliasRequest.cells_alias:type_name -> topodata.CellsAlias
+ 224, // 116: vtctldata.UpdateCellsAliasResponse.cells_alias:type_name -> topodata.CellsAlias
+ 194, // 117: vtctldata.ValidateResponse.results_by_keyspace:type_name -> vtctldata.ValidateResponse.ResultsByKeyspaceEntry
+ 195, // 118: vtctldata.ValidateKeyspaceResponse.results_by_shard:type_name -> vtctldata.ValidateKeyspaceResponse.ResultsByShardEntry
+ 196, // 119: vtctldata.ValidateSchemaKeyspaceResponse.results_by_shard:type_name -> vtctldata.ValidateSchemaKeyspaceResponse.ResultsByShardEntry
+ 197, // 120: vtctldata.ValidateVersionKeyspaceResponse.results_by_shard:type_name -> vtctldata.ValidateVersionKeyspaceResponse.ResultsByShardEntry
+ 198, // 121: vtctldata.ValidateVSchemaResponse.results_by_shard:type_name -> vtctldata.ValidateVSchemaResponse.ResultsByShardEntry
+ 182, // 122: vtctldata.Workflow.ShardStreamsEntry.value:type_name -> vtctldata.Workflow.ShardStream
+ 183, // 123: vtctldata.Workflow.ShardStream.streams:type_name -> vtctldata.Workflow.Stream
+ 225, // 124: vtctldata.Workflow.ShardStream.tablet_controls:type_name -> topodata.Shard.TabletControl
+ 208, // 125: vtctldata.Workflow.Stream.tablet:type_name -> topodata.TabletAlias
+ 226, // 126: vtctldata.Workflow.Stream.binlog_source:type_name -> binlogdata.BinlogSource
+ 213, // 127: vtctldata.Workflow.Stream.transaction_timestamp:type_name -> vttime.Time
+ 213, // 128: vtctldata.Workflow.Stream.time_updated:type_name -> vttime.Time
+ 184, // 129: vtctldata.Workflow.Stream.copy_states:type_name -> vtctldata.Workflow.Stream.CopyState
+ 185, // 130: vtctldata.Workflow.Stream.logs:type_name -> vtctldata.Workflow.Stream.Log
+ 213, // 131: vtctldata.Workflow.Stream.Log.created_at:type_name -> vttime.Time
+ 213, // 132: vtctldata.Workflow.Stream.Log.updated_at:type_name -> vttime.Time
+ 6, // 133: vtctldata.FindAllShardsInKeyspaceResponse.ShardsEntry.value:type_name -> vtctldata.Shard
+ 224, // 134: vtctldata.GetCellsAliasesResponse.AliasesEntry.value:type_name -> topodata.CellsAlias
+ 189, // 135: vtctldata.GetSrvKeyspaceNamesResponse.NamesEntry.value:type_name -> vtctldata.GetSrvKeyspaceNamesResponse.NameList
+ 227, // 136: vtctldata.GetSrvKeyspacesResponse.SrvKeyspacesEntry.value:type_name -> topodata.SrvKeyspace
+ 221, // 137: vtctldata.GetSrvVSchemasResponse.SrvVSchemasEntry.value:type_name -> vschema.SrvVSchema
+ 228, // 138: vtctldata.ShardReplicationPositionsResponse.ReplicationStatusesEntry.value:type_name -> replicationdata.Status
+ 210, // 139: vtctldata.ShardReplicationPositionsResponse.TabletMapEntry.value:type_name -> topodata.Tablet
+ 169, // 140: vtctldata.ValidateResponse.ResultsByKeyspaceEntry.value:type_name -> vtctldata.ValidateKeyspaceResponse
+ 173, // 141: vtctldata.ValidateKeyspaceResponse.ResultsByShardEntry.value:type_name -> vtctldata.ValidateShardResponse
+ 173, // 142: vtctldata.ValidateSchemaKeyspaceResponse.ResultsByShardEntry.value:type_name -> vtctldata.ValidateShardResponse
+ 173, // 143: vtctldata.ValidateVersionKeyspaceResponse.ResultsByShardEntry.value:type_name -> vtctldata.ValidateShardResponse
+ 173, // 144: vtctldata.ValidateVSchemaResponse.ResultsByShardEntry.value:type_name -> vtctldata.ValidateShardResponse
+ 145, // [145:145] is the sub-list for method output_type
+ 145, // [145:145] is the sub-list for method input_type
+ 145, // [145:145] is the sub-list for extension type_name
+ 145, // [145:145] is the sub-list for extension extendee
+ 0, // [0:145] is the sub-list for field type_name
}
func init() { file_vtctldata_proto_init() }
@@ -12798,8 +13348,68 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[71].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetShardResponse); i {
+ file_vtctldata_proto_msgTypes[71].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetShardResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_vtctldata_proto_msgTypes[72].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetShardRoutingRulesRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_vtctldata_proto_msgTypes[73].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetShardRoutingRulesResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_vtctldata_proto_msgTypes[74].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetSrvKeyspaceNamesRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_vtctldata_proto_msgTypes[75].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetSrvKeyspaceNamesResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_vtctldata_proto_msgTypes[76].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetSrvKeyspacesRequest); i {
case 0:
return &v.state
case 1:
@@ -12810,8 +13420,8 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[72].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetShardRoutingRulesRequest); i {
+ file_vtctldata_proto_msgTypes[77].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetSrvKeyspacesResponse); i {
case 0:
return &v.state
case 1:
@@ -12822,8 +13432,8 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[73].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetShardRoutingRulesResponse); i {
+ file_vtctldata_proto_msgTypes[78].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*UpdateThrottlerConfigRequest); i {
case 0:
return &v.state
case 1:
@@ -12834,8 +13444,8 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[74].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetSrvKeyspaceNamesRequest); i {
+ file_vtctldata_proto_msgTypes[79].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*UpdateThrottlerConfigResponse); i {
case 0:
return &v.state
case 1:
@@ -12846,8 +13456,8 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[75].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetSrvKeyspaceNamesResponse); i {
+ file_vtctldata_proto_msgTypes[80].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetSrvVSchemaRequest); i {
case 0:
return &v.state
case 1:
@@ -12858,8 +13468,8 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[76].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetSrvKeyspacesRequest); i {
+ file_vtctldata_proto_msgTypes[81].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetSrvVSchemaResponse); i {
case 0:
return &v.state
case 1:
@@ -12870,8 +13480,8 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[77].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetSrvKeyspacesResponse); i {
+ file_vtctldata_proto_msgTypes[82].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetSrvVSchemasRequest); i {
case 0:
return &v.state
case 1:
@@ -12882,8 +13492,8 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[78].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetSrvVSchemaRequest); i {
+ file_vtctldata_proto_msgTypes[83].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetSrvVSchemasResponse); i {
case 0:
return &v.state
case 1:
@@ -12894,8 +13504,8 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[79].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetSrvVSchemaResponse); i {
+ file_vtctldata_proto_msgTypes[84].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetTabletRequest); i {
case 0:
return &v.state
case 1:
@@ -12906,8 +13516,8 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[80].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetSrvVSchemasRequest); i {
+ file_vtctldata_proto_msgTypes[85].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetTabletResponse); i {
case 0:
return &v.state
case 1:
@@ -12918,8 +13528,8 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[81].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetSrvVSchemasResponse); i {
+ file_vtctldata_proto_msgTypes[86].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetTabletsRequest); i {
case 0:
return &v.state
case 1:
@@ -12930,8 +13540,8 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[82].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetTabletRequest); i {
+ file_vtctldata_proto_msgTypes[87].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetTabletsResponse); i {
case 0:
return &v.state
case 1:
@@ -12942,8 +13552,8 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[83].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetTabletResponse); i {
+ file_vtctldata_proto_msgTypes[88].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetTopologyPathRequest); i {
case 0:
return &v.state
case 1:
@@ -12954,8 +13564,8 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[84].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetTabletsRequest); i {
+ file_vtctldata_proto_msgTypes[89].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetTopologyPathResponse); i {
case 0:
return &v.state
case 1:
@@ -12966,8 +13576,8 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[85].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetTabletsResponse); i {
+ file_vtctldata_proto_msgTypes[90].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*TopologyCell); i {
case 0:
return &v.state
case 1:
@@ -12978,7 +13588,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[86].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[91].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*GetVSchemaRequest); i {
case 0:
return &v.state
@@ -12990,7 +13600,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[87].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[92].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*GetVersionRequest); i {
case 0:
return &v.state
@@ -13002,7 +13612,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[88].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[93].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*GetVersionResponse); i {
case 0:
return &v.state
@@ -13014,7 +13624,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[89].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[94].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*GetVSchemaResponse); i {
case 0:
return &v.state
@@ -13026,7 +13636,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[90].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[95].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*GetWorkflowsRequest); i {
case 0:
return &v.state
@@ -13038,7 +13648,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[91].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[96].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*GetWorkflowsResponse); i {
case 0:
return &v.state
@@ -13050,7 +13660,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[92].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[97].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*InitShardPrimaryRequest); i {
case 0:
return &v.state
@@ -13062,7 +13672,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[93].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[98].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*InitShardPrimaryResponse); i {
case 0:
return &v.state
@@ -13074,7 +13684,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[94].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[99].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*PingTabletRequest); i {
case 0:
return &v.state
@@ -13086,7 +13696,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[95].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[100].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*PingTabletResponse); i {
case 0:
return &v.state
@@ -13098,7 +13708,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[96].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[101].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*PlannedReparentShardRequest); i {
case 0:
return &v.state
@@ -13110,7 +13720,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[97].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[102].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*PlannedReparentShardResponse); i {
case 0:
return &v.state
@@ -13122,7 +13732,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[98].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[103].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*RebuildKeyspaceGraphRequest); i {
case 0:
return &v.state
@@ -13134,7 +13744,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[99].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[104].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*RebuildKeyspaceGraphResponse); i {
case 0:
return &v.state
@@ -13146,7 +13756,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[100].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[105].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*RebuildVSchemaGraphRequest); i {
case 0:
return &v.state
@@ -13158,7 +13768,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[101].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[106].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*RebuildVSchemaGraphResponse); i {
case 0:
return &v.state
@@ -13170,7 +13780,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[102].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[107].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*RefreshStateRequest); i {
case 0:
return &v.state
@@ -13182,7 +13792,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[103].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[108].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*RefreshStateResponse); i {
case 0:
return &v.state
@@ -13194,7 +13804,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[104].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[109].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*RefreshStateByShardRequest); i {
case 0:
return &v.state
@@ -13206,7 +13816,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[105].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[110].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*RefreshStateByShardResponse); i {
case 0:
return &v.state
@@ -13218,7 +13828,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[106].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[111].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ReloadSchemaRequest); i {
case 0:
return &v.state
@@ -13230,7 +13840,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[107].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[112].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ReloadSchemaResponse); i {
case 0:
return &v.state
@@ -13242,7 +13852,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[108].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[113].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ReloadSchemaKeyspaceRequest); i {
case 0:
return &v.state
@@ -13254,7 +13864,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[109].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[114].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ReloadSchemaKeyspaceResponse); i {
case 0:
return &v.state
@@ -13266,7 +13876,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[110].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[115].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ReloadSchemaShardRequest); i {
case 0:
return &v.state
@@ -13278,7 +13888,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[111].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[116].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ReloadSchemaShardResponse); i {
case 0:
return &v.state
@@ -13290,7 +13900,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[112].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[117].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*RemoveBackupRequest); i {
case 0:
return &v.state
@@ -13302,7 +13912,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[113].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[118].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*RemoveBackupResponse); i {
case 0:
return &v.state
@@ -13314,7 +13924,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[114].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[119].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*RemoveKeyspaceCellRequest); i {
case 0:
return &v.state
@@ -13326,7 +13936,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[115].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[120].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*RemoveKeyspaceCellResponse); i {
case 0:
return &v.state
@@ -13338,7 +13948,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[116].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[121].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*RemoveShardCellRequest); i {
case 0:
return &v.state
@@ -13350,7 +13960,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[117].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[122].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*RemoveShardCellResponse); i {
case 0:
return &v.state
@@ -13362,7 +13972,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[118].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[123].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ReparentTabletRequest); i {
case 0:
return &v.state
@@ -13374,7 +13984,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[119].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[124].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ReparentTabletResponse); i {
case 0:
return &v.state
@@ -13386,7 +13996,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[120].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[125].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*RestoreFromBackupRequest); i {
case 0:
return &v.state
@@ -13398,7 +14008,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[121].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[126].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*RestoreFromBackupResponse); i {
case 0:
return &v.state
@@ -13410,7 +14020,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[122].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[127].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*RunHealthCheckRequest); i {
case 0:
return &v.state
@@ -13422,7 +14032,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[123].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[128].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*RunHealthCheckResponse); i {
case 0:
return &v.state
@@ -13434,7 +14044,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[124].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[129].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*SetKeyspaceDurabilityPolicyRequest); i {
case 0:
return &v.state
@@ -13446,7 +14056,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[125].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[130].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*SetKeyspaceDurabilityPolicyResponse); i {
case 0:
return &v.state
@@ -13458,7 +14068,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[126].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[131].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*SetKeyspaceServedFromRequest); i {
case 0:
return &v.state
@@ -13470,7 +14080,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[127].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[132].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*SetKeyspaceServedFromResponse); i {
case 0:
return &v.state
@@ -13482,7 +14092,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[128].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[133].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*SetKeyspaceShardingInfoRequest); i {
case 0:
return &v.state
@@ -13494,7 +14104,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[129].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[134].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*SetKeyspaceShardingInfoResponse); i {
case 0:
return &v.state
@@ -13506,7 +14116,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[130].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[135].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*SetShardIsPrimaryServingRequest); i {
case 0:
return &v.state
@@ -13518,7 +14128,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[131].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[136].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*SetShardIsPrimaryServingResponse); i {
case 0:
return &v.state
@@ -13530,7 +14140,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[132].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[137].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*SetShardTabletControlRequest); i {
case 0:
return &v.state
@@ -13542,7 +14152,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[133].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[138].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*SetShardTabletControlResponse); i {
case 0:
return &v.state
@@ -13554,7 +14164,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[134].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[139].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*SetWritableRequest); i {
case 0:
return &v.state
@@ -13566,7 +14176,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[135].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[140].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*SetWritableResponse); i {
case 0:
return &v.state
@@ -13578,7 +14188,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[136].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[141].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ShardReplicationAddRequest); i {
case 0:
return &v.state
@@ -13590,7 +14200,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[137].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[142].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ShardReplicationAddResponse); i {
case 0:
return &v.state
@@ -13602,7 +14212,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[138].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[143].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ShardReplicationFixRequest); i {
case 0:
return &v.state
@@ -13614,7 +14224,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[139].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[144].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ShardReplicationFixResponse); i {
case 0:
return &v.state
@@ -13626,7 +14236,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[140].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[145].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ShardReplicationPositionsRequest); i {
case 0:
return &v.state
@@ -13638,7 +14248,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[141].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[146].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ShardReplicationPositionsResponse); i {
case 0:
return &v.state
@@ -13650,7 +14260,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[142].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[147].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ShardReplicationRemoveRequest); i {
case 0:
return &v.state
@@ -13662,7 +14272,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[143].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[148].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ShardReplicationRemoveResponse); i {
case 0:
return &v.state
@@ -13674,7 +14284,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[144].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[149].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*SleepTabletRequest); i {
case 0:
return &v.state
@@ -13686,7 +14296,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[145].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[150].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*SleepTabletResponse); i {
case 0:
return &v.state
@@ -13698,7 +14308,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[146].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[151].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*SourceShardAddRequest); i {
case 0:
return &v.state
@@ -13710,7 +14320,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[147].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[152].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*SourceShardAddResponse); i {
case 0:
return &v.state
@@ -13722,7 +14332,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[148].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[153].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*SourceShardDeleteRequest); i {
case 0:
return &v.state
@@ -13734,7 +14344,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[149].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[154].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*SourceShardDeleteResponse); i {
case 0:
return &v.state
@@ -13746,7 +14356,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[150].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[155].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*StartReplicationRequest); i {
case 0:
return &v.state
@@ -13758,7 +14368,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[151].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[156].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*StartReplicationResponse); i {
case 0:
return &v.state
@@ -13770,7 +14380,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[152].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[157].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*StopReplicationRequest); i {
case 0:
return &v.state
@@ -13782,7 +14392,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[153].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[158].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*StopReplicationResponse); i {
case 0:
return &v.state
@@ -13794,7 +14404,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[154].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[159].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*TabletExternallyReparentedRequest); i {
case 0:
return &v.state
@@ -13806,7 +14416,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[155].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[160].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*TabletExternallyReparentedResponse); i {
case 0:
return &v.state
@@ -13818,7 +14428,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[156].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[161].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*UpdateCellInfoRequest); i {
case 0:
return &v.state
@@ -13830,7 +14440,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[157].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[162].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*UpdateCellInfoResponse); i {
case 0:
return &v.state
@@ -13842,7 +14452,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[158].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[163].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*UpdateCellsAliasRequest); i {
case 0:
return &v.state
@@ -13854,7 +14464,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[159].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[164].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*UpdateCellsAliasResponse); i {
case 0:
return &v.state
@@ -13866,7 +14476,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[160].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[165].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ValidateRequest); i {
case 0:
return &v.state
@@ -13878,7 +14488,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[161].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[166].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ValidateResponse); i {
case 0:
return &v.state
@@ -13890,7 +14500,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[162].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[167].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ValidateKeyspaceRequest); i {
case 0:
return &v.state
@@ -13902,7 +14512,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[163].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[168].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ValidateKeyspaceResponse); i {
case 0:
return &v.state
@@ -13914,7 +14524,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[164].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[169].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ValidateSchemaKeyspaceRequest); i {
case 0:
return &v.state
@@ -13926,7 +14536,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[165].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[170].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ValidateSchemaKeyspaceResponse); i {
case 0:
return &v.state
@@ -13938,7 +14548,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[166].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[171].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ValidateShardRequest); i {
case 0:
return &v.state
@@ -13950,7 +14560,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[167].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[172].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ValidateShardResponse); i {
case 0:
return &v.state
@@ -13962,7 +14572,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[168].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[173].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ValidateVersionKeyspaceRequest); i {
case 0:
return &v.state
@@ -13974,7 +14584,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[169].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[174].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ValidateVersionKeyspaceResponse); i {
case 0:
return &v.state
@@ -13986,7 +14596,31 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[170].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[175].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ValidateVersionShardRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_vtctldata_proto_msgTypes[176].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ValidateVersionShardResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_vtctldata_proto_msgTypes[177].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ValidateVSchemaRequest); i {
case 0:
return &v.state
@@ -13998,7 +14632,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[171].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[178].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ValidateVSchemaResponse); i {
case 0:
return &v.state
@@ -14010,7 +14644,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[173].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[180].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Workflow_ReplicationLocation); i {
case 0:
return &v.state
@@ -14022,7 +14656,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[174].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[181].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Workflow_ShardStream); i {
case 0:
return &v.state
@@ -14034,7 +14668,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[175].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[182].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Workflow_Stream); i {
case 0:
return &v.state
@@ -14046,7 +14680,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[176].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[183].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Workflow_Stream_CopyState); i {
case 0:
return &v.state
@@ -14058,7 +14692,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[177].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[184].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Workflow_Stream_Log); i {
case 0:
return &v.state
@@ -14070,7 +14704,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[181].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[188].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*GetSrvKeyspaceNamesResponse_NameList); i {
case 0:
return &v.state
@@ -14089,7 +14723,7 @@ func file_vtctldata_proto_init() {
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_vtctldata_proto_rawDesc,
NumEnums: 1,
- NumMessages: 191,
+ NumMessages: 198,
NumExtensions: 0,
NumServices: 0,
},
diff --git a/go/vt/proto/vtctldata/vtctldata_vtproto.pb.go b/go/vt/proto/vtctldata/vtctldata_vtproto.pb.go
index 08943dd5ac5..e59cf96bd3d 100644
--- a/go/vt/proto/vtctldata/vtctldata_vtproto.pb.go
+++ b/go/vt/proto/vtctldata/vtctldata_vtproto.pb.go
@@ -1,13 +1,15 @@
// Code generated by protoc-gen-go-vtproto. DO NOT EDIT.
-// protoc-gen-go-vtproto version: v0.3.0
+// protoc-gen-go-vtproto version: v0.4.0
// source: vtctldata.proto
package vtctldata
import (
+ binary "encoding/binary"
fmt "fmt"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
io "io"
+ math "math"
bits "math/bits"
binlogdata "vitess.io/vitess/go/vt/proto/binlogdata"
logutil "vitess.io/vitess/go/vt/proto/logutil"
@@ -202,6 +204,23 @@ func (m *MaterializeSettings) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
i -= len(m.unknownFields)
copy(dAtA[i:], m.unknownFields)
}
+ if m.DeferSecondaryKeys {
+ i--
+ if m.DeferSecondaryKeys {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x70
+ }
+ if len(m.OnDdl) > 0 {
+ i -= len(m.OnDdl)
+ copy(dAtA[i:], m.OnDdl)
+ i = encodeVarint(dAtA, i, uint64(len(m.OnDdl)))
+ i--
+ dAtA[i] = 0x6a
+ }
if len(m.SourceShards) > 0 {
for iNdEx := len(m.SourceShards) - 1; iNdEx >= 0; iNdEx-- {
i -= len(m.SourceShards[iNdEx])
@@ -846,6 +865,20 @@ func (m *Workflow) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
i -= len(m.unknownFields)
copy(dAtA[i:], m.unknownFields)
}
+ if len(m.WorkflowSubType) > 0 {
+ i -= len(m.WorkflowSubType)
+ copy(dAtA[i:], m.WorkflowSubType)
+ i = encodeVarint(dAtA, i, uint64(len(m.WorkflowSubType)))
+ i--
+ dAtA[i] = 0x3a
+ }
+ if len(m.WorkflowType) > 0 {
+ i -= len(m.WorkflowType)
+ copy(dAtA[i:], m.WorkflowType)
+ i = encodeVarint(dAtA, i, uint64(len(m.WorkflowType)))
+ i--
+ dAtA[i] = 0x32
+ }
if len(m.ShardStreams) > 0 {
for k := range m.ShardStreams {
v := m.ShardStreams[k]
@@ -1571,6 +1604,13 @@ func (m *BackupRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
i -= len(m.unknownFields)
copy(dAtA[i:], m.unknownFields)
}
+ if len(m.IncrementalFromPos) > 0 {
+ i -= len(m.IncrementalFromPos)
+ copy(dAtA[i:], m.IncrementalFromPos)
+ i = encodeVarint(dAtA, i, uint64(len(m.IncrementalFromPos)))
+ i--
+ dAtA[i] = 0x22
+ }
if m.Concurrency != 0 {
i = encodeVarint(dAtA, i, uint64(m.Concurrency))
i--
@@ -4536,6 +4576,142 @@ func (m *GetSrvKeyspacesResponse) MarshalToSizedBufferVT(dAtA []byte) (int, erro
return len(dAtA) - i, nil
}
+func (m *UpdateThrottlerConfigRequest) MarshalVT() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVT(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *UpdateThrottlerConfigRequest) MarshalToVT(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVT(dAtA[:size])
+}
+
+func (m *UpdateThrottlerConfigRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.CheckAsCheckShard {
+ i--
+ if m.CheckAsCheckShard {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x40
+ }
+ if m.CheckAsCheckSelf {
+ i--
+ if m.CheckAsCheckSelf {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x38
+ }
+ if m.CustomQuerySet {
+ i--
+ if m.CustomQuerySet {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x30
+ }
+ if len(m.CustomQuery) > 0 {
+ i -= len(m.CustomQuery)
+ copy(dAtA[i:], m.CustomQuery)
+ i = encodeVarint(dAtA, i, uint64(len(m.CustomQuery)))
+ i--
+ dAtA[i] = 0x2a
+ }
+ if m.Threshold != 0 {
+ i -= 8
+ binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Threshold))))
+ i--
+ dAtA[i] = 0x21
+ }
+ if m.Disable {
+ i--
+ if m.Disable {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x18
+ }
+ if m.Enable {
+ i--
+ if m.Enable {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x10
+ }
+ if len(m.Keyspace) > 0 {
+ i -= len(m.Keyspace)
+ copy(dAtA[i:], m.Keyspace)
+ i = encodeVarint(dAtA, i, uint64(len(m.Keyspace)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *UpdateThrottlerConfigResponse) MarshalVT() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVT(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *UpdateThrottlerConfigResponse) MarshalToVT(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVT(dAtA[:size])
+}
+
+func (m *UpdateThrottlerConfigResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ return len(dAtA) - i, nil
+}
+
func (m *GetSrvVSchemaRequest) MarshalVT() (dAtA []byte, err error) {
if m == nil {
return nil, nil
@@ -4930,6 +5106,152 @@ func (m *GetTabletsResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
return len(dAtA) - i, nil
}
+func (m *GetTopologyPathRequest) MarshalVT() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVT(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *GetTopologyPathRequest) MarshalToVT(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVT(dAtA[:size])
+}
+
+func (m *GetTopologyPathRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if len(m.Path) > 0 {
+ i -= len(m.Path)
+ copy(dAtA[i:], m.Path)
+ i = encodeVarint(dAtA, i, uint64(len(m.Path)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *GetTopologyPathResponse) MarshalVT() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVT(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *GetTopologyPathResponse) MarshalToVT(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVT(dAtA[:size])
+}
+
+func (m *GetTopologyPathResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.Cell != nil {
+ size, err := m.Cell.MarshalToSizedBufferVT(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *TopologyCell) MarshalVT() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVT(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *TopologyCell) MarshalToVT(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVT(dAtA[:size])
+}
+
+func (m *TopologyCell) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if len(m.Children) > 0 {
+ for iNdEx := len(m.Children) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Children[iNdEx])
+ copy(dAtA[i:], m.Children[iNdEx])
+ i = encodeVarint(dAtA, i, uint64(len(m.Children[iNdEx])))
+ i--
+ dAtA[i] = 0x22
+ }
+ }
+ if len(m.Data) > 0 {
+ i -= len(m.Data)
+ copy(dAtA[i:], m.Data)
+ i = encodeVarint(dAtA, i, uint64(len(m.Data)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if len(m.Path) > 0 {
+ i -= len(m.Path)
+ copy(dAtA[i:], m.Path)
+ i = encodeVarint(dAtA, i, uint64(len(m.Path)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Name) > 0 {
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarint(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
func (m *GetVSchemaRequest) MarshalVT() (dAtA []byte, err error) {
if m == nil {
return nil, nil
@@ -6605,6 +6927,23 @@ func (m *RestoreFromBackupRequest) MarshalToSizedBufferVT(dAtA []byte) (int, err
i -= len(m.unknownFields)
copy(dAtA[i:], m.unknownFields)
}
+ if m.DryRun {
+ i--
+ if m.DryRun {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x20
+ }
+ if len(m.RestoreToPos) > 0 {
+ i -= len(m.RestoreToPos)
+ copy(dAtA[i:], m.RestoreToPos)
+ i = encodeVarint(dAtA, i, uint64(len(m.RestoreToPos)))
+ i--
+ dAtA[i] = 0x1a
+ }
if m.BackupTime != nil {
size, err := m.BackupTime.MarshalToSizedBufferVT(dAtA[:i])
if err != nil {
@@ -9126,6 +9465,95 @@ func (m *ValidateVersionKeyspaceResponse) MarshalToSizedBufferVT(dAtA []byte) (i
return len(dAtA) - i, nil
}
+func (m *ValidateVersionShardRequest) MarshalVT() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVT(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ValidateVersionShardRequest) MarshalToVT(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVT(dAtA[:size])
+}
+
+func (m *ValidateVersionShardRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if len(m.Shard) > 0 {
+ i -= len(m.Shard)
+ copy(dAtA[i:], m.Shard)
+ i = encodeVarint(dAtA, i, uint64(len(m.Shard)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Keyspace) > 0 {
+ i -= len(m.Keyspace)
+ copy(dAtA[i:], m.Keyspace)
+ i = encodeVarint(dAtA, i, uint64(len(m.Keyspace)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ValidateVersionShardResponse) MarshalVT() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVT(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ValidateVersionShardResponse) MarshalToVT(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVT(dAtA[:size])
+}
+
+func (m *ValidateVersionShardResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if len(m.Results) > 0 {
+ for iNdEx := len(m.Results) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Results[iNdEx])
+ copy(dAtA[i:], m.Results[iNdEx])
+ i = encodeVarint(dAtA, i, uint64(len(m.Results[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
func (m *ValidateVSchemaRequest) MarshalVT() (dAtA []byte, err error) {
if m == nil {
return nil, nil
@@ -9284,9 +9712,7 @@ func (m *ExecuteVtctlCommandRequest) SizeVT() (n int) {
if m.ActionTimeout != 0 {
n += 1 + sov(uint64(m.ActionTimeout))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -9300,9 +9726,7 @@ func (m *ExecuteVtctlCommandResponse) SizeVT() (n int) {
l = m.Event.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -9324,9 +9748,7 @@ func (m *TableMaterializeSettings) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -9386,9 +9808,14 @@ func (m *MaterializeSettings) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
+ l = len(m.OnDdl)
+ if l > 0 {
+ n += 1 + l + sov(uint64(l))
+ }
+ if m.DeferSecondaryKeys {
+ n += 2
}
+ n += len(m.unknownFields)
return n
}
@@ -9406,9 +9833,7 @@ func (m *Keyspace) SizeVT() (n int) {
l = m.Keyspace.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -9430,9 +9855,7 @@ func (m *Shard) SizeVT() (n int) {
l = m.Shard.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -9452,9 +9875,7 @@ func (m *Workflow_ReplicationLocation) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -9479,9 +9900,7 @@ func (m *Workflow_ShardStream) SizeVT() (n int) {
if m.IsPrimaryServing {
n += 2
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -9499,9 +9918,7 @@ func (m *Workflow_Stream_CopyState) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -9540,9 +9957,7 @@ func (m *Workflow_Stream_Log) SizeVT() (n int) {
if m.Count != 0 {
n += 1 + sov(uint64(m.Count))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -9617,9 +10032,7 @@ func (m *Workflow_Stream) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -9657,9 +10070,15 @@ func (m *Workflow) SizeVT() (n int) {
n += mapEntrySize + 1 + sov(uint64(mapEntrySize))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
+ l = len(m.WorkflowType)
+ if l > 0 {
+ n += 1 + l + sov(uint64(l))
+ }
+ l = len(m.WorkflowSubType)
+ if l > 0 {
+ n += 1 + l + sov(uint64(l))
}
+ n += len(m.unknownFields)
return n
}
@@ -9677,9 +10096,7 @@ func (m *AddCellInfoRequest) SizeVT() (n int) {
l = m.CellInfo.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -9689,9 +10106,7 @@ func (m *AddCellInfoResponse) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -9711,9 +10126,7 @@ func (m *AddCellsAliasRequest) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -9723,9 +10136,7 @@ func (m *AddCellsAliasResponse) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -9748,9 +10159,7 @@ func (m *ApplyRoutingRulesRequest) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -9760,9 +10169,7 @@ func (m *ApplyRoutingRulesResponse) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -9785,9 +10192,7 @@ func (m *ApplyShardRoutingRulesRequest) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -9797,9 +10202,7 @@ func (m *ApplyShardRoutingRulesResponse) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -9847,9 +10250,7 @@ func (m *ApplySchemaRequest) SizeVT() (n int) {
l = m.CallerId.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -9865,9 +10266,7 @@ func (m *ApplySchemaResponse) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -9901,9 +10300,7 @@ func (m *ApplyVSchemaRequest) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -9917,9 +10314,7 @@ func (m *ApplyVSchemaResponse) SizeVT() (n int) {
l = m.VSchema.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -9939,9 +10334,11 @@ func (m *BackupRequest) SizeVT() (n int) {
if m.Concurrency != 0 {
n += 1 + sov(uint64(m.Concurrency))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
+ l = len(m.IncrementalFromPos)
+ if l > 0 {
+ n += 1 + l + sov(uint64(l))
}
+ n += len(m.unknownFields)
return n
}
@@ -9967,9 +10364,7 @@ func (m *BackupResponse) SizeVT() (n int) {
l = m.Event.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -9993,9 +10388,7 @@ func (m *BackupShardRequest) SizeVT() (n int) {
if m.Concurrency != 0 {
n += 1 + sov(uint64(m.Concurrency))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -10015,9 +10408,7 @@ func (m *ChangeTabletTypeRequest) SizeVT() (n int) {
if m.DryRun {
n += 2
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -10038,9 +10429,7 @@ func (m *ChangeTabletTypeResponse) SizeVT() (n int) {
if m.WasDryRun {
n += 2
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -10081,9 +10470,7 @@ func (m *CreateKeyspaceRequest) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -10097,9 +10484,7 @@ func (m *CreateKeyspaceResponse) SizeVT() (n int) {
l = m.Keyspace.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -10123,9 +10508,7 @@ func (m *CreateShardRequest) SizeVT() (n int) {
if m.IncludeParent {
n += 2
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -10146,9 +10529,7 @@ func (m *CreateShardResponse) SizeVT() (n int) {
if m.ShardAlreadyExists {
n += 2
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -10165,9 +10546,7 @@ func (m *DeleteCellInfoRequest) SizeVT() (n int) {
if m.Force {
n += 2
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -10177,9 +10556,7 @@ func (m *DeleteCellInfoResponse) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -10193,9 +10570,7 @@ func (m *DeleteCellsAliasRequest) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -10205,9 +10580,7 @@ func (m *DeleteCellsAliasResponse) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -10227,9 +10600,7 @@ func (m *DeleteKeyspaceRequest) SizeVT() (n int) {
if m.Force {
n += 2
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -10239,9 +10610,7 @@ func (m *DeleteKeyspaceResponse) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -10266,9 +10635,7 @@ func (m *DeleteShardsRequest) SizeVT() (n int) {
if m.Force {
n += 2
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -10278,9 +10645,7 @@ func (m *DeleteShardsResponse) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -10294,9 +10659,7 @@ func (m *DeleteSrvVSchemaRequest) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -10306,9 +10669,7 @@ func (m *DeleteSrvVSchemaResponse) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -10327,9 +10688,7 @@ func (m *DeleteTabletsRequest) SizeVT() (n int) {
if m.AllowPrimary {
n += 2
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -10339,9 +10698,7 @@ func (m *DeleteTabletsResponse) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -10376,9 +10733,7 @@ func (m *EmergencyReparentShardRequest) SizeVT() (n int) {
if m.PreventCrossCellPromotion {
n += 2
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -10406,9 +10761,7 @@ func (m *EmergencyReparentShardResponse) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -10432,9 +10785,7 @@ func (m *ExecuteFetchAsAppRequest) SizeVT() (n int) {
if m.UsePool {
n += 2
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -10448,9 +10799,7 @@ func (m *ExecuteFetchAsAppResponse) SizeVT() (n int) {
l = m.Result.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -10477,9 +10826,7 @@ func (m *ExecuteFetchAsDBARequest) SizeVT() (n int) {
if m.ReloadSchema {
n += 2
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -10493,9 +10840,7 @@ func (m *ExecuteFetchAsDBAResponse) SizeVT() (n int) {
l = m.Result.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -10513,9 +10858,7 @@ func (m *ExecuteHookRequest) SizeVT() (n int) {
l = m.TabletHookRequest.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -10529,9 +10872,7 @@ func (m *ExecuteHookResponse) SizeVT() (n int) {
l = m.HookResult.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -10545,9 +10886,7 @@ func (m *FindAllShardsInKeyspaceRequest) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -10570,9 +10909,7 @@ func (m *FindAllShardsInKeyspaceResponse) SizeVT() (n int) {
n += mapEntrySize + 1 + sov(uint64(mapEntrySize))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -10599,9 +10936,7 @@ func (m *GetBackupsRequest) SizeVT() (n int) {
if m.DetailedLimit != 0 {
n += 1 + sov(uint64(m.DetailedLimit))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -10617,9 +10952,7 @@ func (m *GetBackupsResponse) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -10633,9 +10966,7 @@ func (m *GetCellInfoRequest) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -10649,9 +10980,7 @@ func (m *GetCellInfoResponse) SizeVT() (n int) {
l = m.CellInfo.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -10661,9 +10990,7 @@ func (m *GetCellInfoNamesRequest) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -10679,9 +11006,7 @@ func (m *GetCellInfoNamesResponse) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -10691,9 +11016,7 @@ func (m *GetCellsAliasesRequest) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -10716,9 +11039,7 @@ func (m *GetCellsAliasesResponse) SizeVT() (n int) {
n += mapEntrySize + 1 + sov(uint64(mapEntrySize))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -10732,9 +11053,7 @@ func (m *GetFullStatusRequest) SizeVT() (n int) {
l = m.TabletAlias.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -10748,9 +11067,7 @@ func (m *GetFullStatusResponse) SizeVT() (n int) {
l = m.Status.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -10760,9 +11077,7 @@ func (m *GetKeyspacesRequest) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -10778,9 +11093,7 @@ func (m *GetKeyspacesResponse) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -10794,9 +11107,7 @@ func (m *GetKeyspaceRequest) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -10810,9 +11121,7 @@ func (m *GetKeyspaceResponse) SizeVT() (n int) {
l = m.Keyspace.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -10826,9 +11135,7 @@ func (m *GetPermissionsRequest) SizeVT() (n int) {
l = m.TabletAlias.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -10842,9 +11149,7 @@ func (m *GetPermissionsResponse) SizeVT() (n int) {
l = m.Permissions.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -10854,9 +11159,7 @@ func (m *GetRoutingRulesRequest) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -10870,9 +11173,7 @@ func (m *GetRoutingRulesResponse) SizeVT() (n int) {
l = m.RoutingRules.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -10910,9 +11211,7 @@ func (m *GetSchemaRequest) SizeVT() (n int) {
if m.TableSchemaOnly {
n += 2
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -10926,9 +11225,7 @@ func (m *GetSchemaResponse) SizeVT() (n int) {
l = m.Schema.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -10946,9 +11243,7 @@ func (m *GetShardRequest) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -10962,9 +11257,7 @@ func (m *GetShardResponse) SizeVT() (n int) {
l = m.Shard.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -10974,9 +11267,7 @@ func (m *GetShardRoutingRulesRequest) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -10990,9 +11281,7 @@ func (m *GetShardRoutingRulesResponse) SizeVT() (n int) {
l = m.ShardRoutingRules.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -11008,9 +11297,7 @@ func (m *GetSrvKeyspaceNamesRequest) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -11026,9 +11313,7 @@ func (m *GetSrvKeyspaceNamesResponse_NameList) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -11051,9 +11336,7 @@ func (m *GetSrvKeyspaceNamesResponse) SizeVT() (n int) {
n += mapEntrySize + 1 + sov(uint64(mapEntrySize))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -11073,9 +11356,7 @@ func (m *GetSrvKeyspacesRequest) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -11098,9 +11379,53 @@ func (m *GetSrvKeyspacesResponse) SizeVT() (n int) {
n += mapEntrySize + 1 + sov(uint64(mapEntrySize))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *UpdateThrottlerConfigRequest) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Keyspace)
+ if l > 0 {
+ n += 1 + l + sov(uint64(l))
+ }
+ if m.Enable {
+ n += 2
+ }
+ if m.Disable {
+ n += 2
+ }
+ if m.Threshold != 0 {
+ n += 9
}
+ l = len(m.CustomQuery)
+ if l > 0 {
+ n += 1 + l + sov(uint64(l))
+ }
+ if m.CustomQuerySet {
+ n += 2
+ }
+ if m.CheckAsCheckSelf {
+ n += 2
+ }
+ if m.CheckAsCheckShard {
+ n += 2
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *UpdateThrottlerConfigResponse) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ n += len(m.unknownFields)
return n
}
@@ -11114,9 +11439,7 @@ func (m *GetSrvVSchemaRequest) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -11130,9 +11453,7 @@ func (m *GetSrvVSchemaResponse) SizeVT() (n int) {
l = m.SrvVSchema.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -11148,9 +11469,7 @@ func (m *GetSrvVSchemasRequest) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -11173,9 +11492,7 @@ func (m *GetSrvVSchemasResponse) SizeVT() (n int) {
n += mapEntrySize + 1 + sov(uint64(mapEntrySize))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -11189,9 +11506,7 @@ func (m *GetTabletRequest) SizeVT() (n int) {
l = m.TabletAlias.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -11205,9 +11520,7 @@ func (m *GetTabletResponse) SizeVT() (n int) {
l = m.Tablet.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -11243,9 +11556,7 @@ func (m *GetTabletsRequest) SizeVT() (n int) {
if m.TabletType != 0 {
n += 1 + sov(uint64(m.TabletType))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -11261,9 +11572,63 @@ func (m *GetTabletsResponse) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *GetTopologyPathRequest) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Path)
+ if l > 0 {
+ n += 1 + l + sov(uint64(l))
}
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *GetTopologyPathResponse) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Cell != nil {
+ l = m.Cell.SizeVT()
+ n += 1 + l + sov(uint64(l))
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *TopologyCell) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + sov(uint64(l))
+ }
+ l = len(m.Path)
+ if l > 0 {
+ n += 1 + l + sov(uint64(l))
+ }
+ l = len(m.Data)
+ if l > 0 {
+ n += 1 + l + sov(uint64(l))
+ }
+ if len(m.Children) > 0 {
+ for _, s := range m.Children {
+ l = len(s)
+ n += 1 + l + sov(uint64(l))
+ }
+ }
+ n += len(m.unknownFields)
return n
}
@@ -11277,9 +11642,7 @@ func (m *GetVSchemaRequest) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -11293,9 +11656,7 @@ func (m *GetVersionRequest) SizeVT() (n int) {
l = m.TabletAlias.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -11309,9 +11670,7 @@ func (m *GetVersionResponse) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -11325,9 +11684,7 @@ func (m *GetVSchemaResponse) SizeVT() (n int) {
l = m.VSchema.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -11344,9 +11701,7 @@ func (m *GetWorkflowsRequest) SizeVT() (n int) {
if m.ActiveOnly {
n += 2
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -11362,9 +11717,7 @@ func (m *GetWorkflowsResponse) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -11393,9 +11746,7 @@ func (m *InitShardPrimaryRequest) SizeVT() (n int) {
l = m.WaitReplicasTimeout.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -11411,9 +11762,7 @@ func (m *InitShardPrimaryResponse) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -11427,9 +11776,7 @@ func (m *PingTabletRequest) SizeVT() (n int) {
l = m.TabletAlias.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -11439,9 +11786,7 @@ func (m *PingTabletResponse) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -11471,9 +11816,7 @@ func (m *PlannedReparentShardRequest) SizeVT() (n int) {
l = m.WaitReplicasTimeout.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -11501,9 +11844,7 @@ func (m *PlannedReparentShardResponse) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -11526,9 +11867,7 @@ func (m *RebuildKeyspaceGraphRequest) SizeVT() (n int) {
if m.AllowPartial {
n += 2
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -11538,9 +11877,7 @@ func (m *RebuildKeyspaceGraphResponse) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -11556,9 +11893,7 @@ func (m *RebuildVSchemaGraphRequest) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -11568,9 +11903,7 @@ func (m *RebuildVSchemaGraphResponse) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -11584,9 +11917,7 @@ func (m *RefreshStateRequest) SizeVT() (n int) {
l = m.TabletAlias.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -11596,9 +11927,7 @@ func (m *RefreshStateResponse) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -11622,9 +11951,7 @@ func (m *RefreshStateByShardRequest) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -11641,9 +11968,7 @@ func (m *RefreshStateByShardResponse) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -11657,9 +11982,7 @@ func (m *ReloadSchemaRequest) SizeVT() (n int) {
l = m.TabletAlias.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -11669,9 +11992,7 @@ func (m *ReloadSchemaResponse) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -11695,9 +12016,7 @@ func (m *ReloadSchemaKeyspaceRequest) SizeVT() (n int) {
if m.Concurrency != 0 {
n += 1 + sov(uint64(m.Concurrency))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -11713,9 +12032,7 @@ func (m *ReloadSchemaKeyspaceResponse) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -11743,9 +12060,7 @@ func (m *ReloadSchemaShardRequest) SizeVT() (n int) {
if m.Concurrency != 0 {
n += 1 + sov(uint64(m.Concurrency))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -11761,9 +12076,7 @@ func (m *ReloadSchemaShardResponse) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -11785,9 +12098,7 @@ func (m *RemoveBackupRequest) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -11797,9 +12108,7 @@ func (m *RemoveBackupResponse) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -11823,9 +12132,7 @@ func (m *RemoveKeyspaceCellRequest) SizeVT() (n int) {
if m.Recursive {
n += 2
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -11835,9 +12142,7 @@ func (m *RemoveKeyspaceCellResponse) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -11865,9 +12170,7 @@ func (m *RemoveShardCellRequest) SizeVT() (n int) {
if m.Recursive {
n += 2
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -11877,9 +12180,7 @@ func (m *RemoveShardCellResponse) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -11893,9 +12194,7 @@ func (m *ReparentTabletRequest) SizeVT() (n int) {
l = m.Tablet.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -11917,9 +12216,7 @@ func (m *ReparentTabletResponse) SizeVT() (n int) {
l = m.Primary.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -11937,9 +12234,14 @@ func (m *RestoreFromBackupRequest) SizeVT() (n int) {
l = m.BackupTime.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
+ l = len(m.RestoreToPos)
+ if l > 0 {
+ n += 1 + l + sov(uint64(l))
}
+ if m.DryRun {
+ n += 2
+ }
+ n += len(m.unknownFields)
return n
}
@@ -11965,9 +12267,7 @@ func (m *RestoreFromBackupResponse) SizeVT() (n int) {
l = m.Event.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -11981,9 +12281,7 @@ func (m *RunHealthCheckRequest) SizeVT() (n int) {
l = m.TabletAlias.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -11993,9 +12291,7 @@ func (m *RunHealthCheckResponse) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -12013,9 +12309,7 @@ func (m *SetKeyspaceDurabilityPolicyRequest) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -12029,9 +12323,7 @@ func (m *SetKeyspaceDurabilityPolicyResponse) SizeVT() (n int) {
l = m.Keyspace.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -12061,9 +12353,7 @@ func (m *SetKeyspaceServedFromRequest) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -12077,9 +12367,7 @@ func (m *SetKeyspaceServedFromResponse) SizeVT() (n int) {
l = m.Keyspace.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -12096,9 +12384,7 @@ func (m *SetKeyspaceShardingInfoRequest) SizeVT() (n int) {
if m.Force {
n += 2
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -12112,9 +12398,7 @@ func (m *SetKeyspaceShardingInfoResponse) SizeVT() (n int) {
l = m.Keyspace.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -12135,9 +12419,7 @@ func (m *SetShardIsPrimaryServingRequest) SizeVT() (n int) {
if m.IsServing {
n += 2
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -12151,9 +12433,7 @@ func (m *SetShardIsPrimaryServingResponse) SizeVT() (n int) {
l = m.Shard.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -12192,9 +12472,7 @@ func (m *SetShardTabletControlRequest) SizeVT() (n int) {
if m.Remove {
n += 2
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -12208,9 +12486,7 @@ func (m *SetShardTabletControlResponse) SizeVT() (n int) {
l = m.Shard.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -12227,9 +12503,7 @@ func (m *SetWritableRequest) SizeVT() (n int) {
if m.Writable {
n += 2
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -12239,9 +12513,7 @@ func (m *SetWritableResponse) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -12263,9 +12535,7 @@ func (m *ShardReplicationAddRequest) SizeVT() (n int) {
l = m.TabletAlias.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -12275,9 +12545,7 @@ func (m *ShardReplicationAddResponse) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -12299,9 +12567,7 @@ func (m *ShardReplicationFixRequest) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -12315,9 +12581,7 @@ func (m *ShardReplicationFixResponse) SizeVT() (n int) {
l = m.Error.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -12335,9 +12599,7 @@ func (m *ShardReplicationPositionsRequest) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -12373,9 +12635,7 @@ func (m *ShardReplicationPositionsResponse) SizeVT() (n int) {
n += mapEntrySize + 1 + sov(uint64(mapEntrySize))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -12397,9 +12657,7 @@ func (m *ShardReplicationRemoveRequest) SizeVT() (n int) {
l = m.TabletAlias.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -12409,9 +12667,7 @@ func (m *ShardReplicationRemoveResponse) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -12429,9 +12685,7 @@ func (m *SleepTabletRequest) SizeVT() (n int) {
l = m.Duration.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -12441,9 +12695,7 @@ func (m *SleepTabletResponse) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -12482,9 +12734,7 @@ func (m *SourceShardAddRequest) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -12498,9 +12748,7 @@ func (m *SourceShardAddResponse) SizeVT() (n int) {
l = m.Shard.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -12521,9 +12769,7 @@ func (m *SourceShardDeleteRequest) SizeVT() (n int) {
if m.Uid != 0 {
n += 1 + sov(uint64(m.Uid))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -12537,9 +12783,7 @@ func (m *SourceShardDeleteResponse) SizeVT() (n int) {
l = m.Shard.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -12553,9 +12797,7 @@ func (m *StartReplicationRequest) SizeVT() (n int) {
l = m.TabletAlias.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -12565,9 +12807,7 @@ func (m *StartReplicationResponse) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -12581,9 +12821,7 @@ func (m *StopReplicationRequest) SizeVT() (n int) {
l = m.TabletAlias.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -12593,9 +12831,7 @@ func (m *StopReplicationResponse) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -12609,9 +12845,7 @@ func (m *TabletExternallyReparentedRequest) SizeVT() (n int) {
l = m.Tablet.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -12637,9 +12871,7 @@ func (m *TabletExternallyReparentedResponse) SizeVT() (n int) {
l = m.OldPrimary.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -12657,9 +12889,7 @@ func (m *UpdateCellInfoRequest) SizeVT() (n int) {
l = m.CellInfo.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -12677,9 +12907,7 @@ func (m *UpdateCellInfoResponse) SizeVT() (n int) {
l = m.CellInfo.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -12697,9 +12925,7 @@ func (m *UpdateCellsAliasRequest) SizeVT() (n int) {
l = m.CellsAlias.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -12717,9 +12943,7 @@ func (m *UpdateCellsAliasResponse) SizeVT() (n int) {
l = m.CellsAlias.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -12732,9 +12956,7 @@ func (m *ValidateRequest) SizeVT() (n int) {
if m.PingTablets {
n += 2
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -12763,9 +12985,7 @@ func (m *ValidateResponse) SizeVT() (n int) {
n += mapEntrySize + 1 + sov(uint64(mapEntrySize))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -12782,9 +13002,7 @@ func (m *ValidateKeyspaceRequest) SizeVT() (n int) {
if m.PingTablets {
n += 2
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -12813,9 +13031,7 @@ func (m *ValidateKeyspaceResponse) SizeVT() (n int) {
n += mapEntrySize + 1 + sov(uint64(mapEntrySize))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -12844,9 +13060,7 @@ func (m *ValidateSchemaKeyspaceRequest) SizeVT() (n int) {
if m.IncludeVschema {
n += 2
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -12875,9 +13089,7 @@ func (m *ValidateSchemaKeyspaceResponse) SizeVT() (n int) {
n += mapEntrySize + 1 + sov(uint64(mapEntrySize))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -12898,9 +13110,7 @@ func (m *ValidateShardRequest) SizeVT() (n int) {
if m.PingTablets {
n += 2
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -12916,9 +13126,7 @@ func (m *ValidateShardResponse) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -12932,9 +13140,7 @@ func (m *ValidateVersionKeyspaceRequest) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -12963,9 +13169,41 @@ func (m *ValidateVersionKeyspaceResponse) SizeVT() (n int) {
n += mapEntrySize + 1 + sov(uint64(mapEntrySize))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *ValidateVersionShardRequest) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Keyspace)
+ if l > 0 {
+ n += 1 + l + sov(uint64(l))
}
+ l = len(m.Shard)
+ if l > 0 {
+ n += 1 + l + sov(uint64(l))
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *ValidateVersionShardResponse) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Results) > 0 {
+ for _, s := range m.Results {
+ l = len(s)
+ n += 1 + l + sov(uint64(l))
+ }
+ }
+ n += len(m.unknownFields)
return n
}
@@ -12994,9 +13232,7 @@ func (m *ValidateVSchemaRequest) SizeVT() (n int) {
if m.IncludeViews {
n += 2
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -13025,9 +13261,7 @@ func (m *ValidateVSchemaResponse) SizeVT() (n int) {
n += mapEntrySize + 1 + sov(uint64(mapEntrySize))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -13763,6 +13997,58 @@ func (m *MaterializeSettings) UnmarshalVT(dAtA []byte) error {
}
m.SourceShards = append(m.SourceShards, string(dAtA[iNdEx:postIndex]))
iNdEx = postIndex
+ case 13:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field OnDdl", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLength
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLength
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.OnDdl = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 14:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DeferSecondaryKeys", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.DeferSecondaryKeys = bool(v != 0)
default:
iNdEx = preIndex
skippy, err := skip(dAtA[iNdEx:])
@@ -15519,60 +15805,9 @@ func (m *Workflow) UnmarshalVT(dAtA []byte) error {
}
m.ShardStreams[mapkey] = mapvalue
iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skip(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLength
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *AddCellInfoRequest) UnmarshalVT(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflow
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: AddCellInfoRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: AddCellInfoRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
+ case 6:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field WorkflowType", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@@ -15600,149 +15835,264 @@ func (m *AddCellInfoRequest) UnmarshalVT(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Name = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field CellInfo", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflow
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLength
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLength
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.CellInfo == nil {
- m.CellInfo = &topodata.CellInfo{}
- }
- if err := m.CellInfo.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
+ m.WorkflowType = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skip(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLength
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *AddCellInfoResponse) UnmarshalVT(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflow
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: AddCellInfoResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: AddCellInfoResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- default:
- iNdEx = preIndex
- skippy, err := skip(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLength
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *AddCellsAliasRequest) UnmarshalVT(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflow
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: AddCellsAliasRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: AddCellsAliasRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
+ case 7:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field WorkflowSubType", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLength
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLength
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.WorkflowSubType = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skip(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLength
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AddCellInfoRequest) UnmarshalVT(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AddCellInfoRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AddCellInfoRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLength
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLength
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CellInfo", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLength
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLength
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.CellInfo == nil {
+ m.CellInfo = &topodata.CellInfo{}
+ }
+ if err := m.CellInfo.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skip(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLength
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AddCellInfoResponse) UnmarshalVT(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AddCellInfoResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AddCellInfoResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skip(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLength
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AddCellsAliasRequest) UnmarshalVT(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AddCellsAliasRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AddCellsAliasRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@@ -17077,6 +17427,38 @@ func (m *BackupRequest) UnmarshalVT(dAtA []byte) error {
break
}
}
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field IncrementalFromPos", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLength
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLength
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.IncrementalFromPos = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skip(dAtA[iNdEx:])
@@ -23611,6 +23993,283 @@ func (m *GetSrvKeyspacesResponse) UnmarshalVT(dAtA []byte) error {
}
return nil
}
+func (m *UpdateThrottlerConfigRequest) UnmarshalVT(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: UpdateThrottlerConfigRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: UpdateThrottlerConfigRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLength
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLength
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Keyspace = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Enable", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Enable = bool(v != 0)
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Disable", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Disable = bool(v != 0)
+ case 4:
+ if wireType != 1 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Threshold", wireType)
+ }
+ var v uint64
+ if (iNdEx + 8) > l {
+ return io.ErrUnexpectedEOF
+ }
+ v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:]))
+ iNdEx += 8
+ m.Threshold = float64(math.Float64frombits(v))
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CustomQuery", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLength
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLength
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.CustomQuery = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 6:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CustomQuerySet", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.CustomQuerySet = bool(v != 0)
+ case 7:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CheckAsCheckSelf", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.CheckAsCheckSelf = bool(v != 0)
+ case 8:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CheckAsCheckShard", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.CheckAsCheckShard = bool(v != 0)
+ default:
+ iNdEx = preIndex
+ skippy, err := skip(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLength
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *UpdateThrottlerConfigResponse) UnmarshalVT(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: UpdateThrottlerConfigResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: UpdateThrottlerConfigResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skip(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLength
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
func (m *GetSrvVSchemaRequest) UnmarshalVT(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
@@ -24523,7 +25182,7 @@ func (m *GetTabletsResponse) UnmarshalVT(dAtA []byte) error {
}
return nil
}
-func (m *GetVSchemaRequest) UnmarshalVT(dAtA []byte) error {
+func (m *GetTopologyPathRequest) UnmarshalVT(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -24546,15 +25205,15 @@ func (m *GetVSchemaRequest) UnmarshalVT(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: GetVSchemaRequest: wiretype end group for non-group")
+ return fmt.Errorf("proto: GetTopologyPathRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: GetVSchemaRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: GetTopologyPathRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@@ -24582,7 +25241,7 @@ func (m *GetVSchemaRequest) UnmarshalVT(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Keyspace = string(dAtA[iNdEx:postIndex])
+ m.Path = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
@@ -24606,7 +25265,7 @@ func (m *GetVSchemaRequest) UnmarshalVT(dAtA []byte) error {
}
return nil
}
-func (m *GetVersionRequest) UnmarshalVT(dAtA []byte) error {
+func (m *GetTopologyPathResponse) UnmarshalVT(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -24629,15 +25288,15 @@ func (m *GetVersionRequest) UnmarshalVT(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: GetVersionRequest: wiretype end group for non-group")
+ return fmt.Errorf("proto: GetTopologyPathResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: GetVersionRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: GetTopologyPathResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Cell", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -24664,10 +25323,10 @@ func (m *GetVersionRequest) UnmarshalVT(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if m.TabletAlias == nil {
- m.TabletAlias = &topodata.TabletAlias{}
+ if m.Cell == nil {
+ m.Cell = &TopologyCell{}
}
- if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil {
+ if err := m.Cell.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
@@ -24693,7 +25352,7 @@ func (m *GetVersionRequest) UnmarshalVT(dAtA []byte) error {
}
return nil
}
-func (m *GetVersionResponse) UnmarshalVT(dAtA []byte) error {
+func (m *TopologyCell) UnmarshalVT(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -24716,15 +25375,15 @@ func (m *GetVersionResponse) UnmarshalVT(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: GetVersionResponse: wiretype end group for non-group")
+ return fmt.Errorf("proto: TopologyCell: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: GetVersionResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: TopologyCell: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@@ -24752,7 +25411,103 @@ func (m *GetVersionResponse) UnmarshalVT(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Version = string(dAtA[iNdEx:postIndex])
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLength
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLength
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Path = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLength
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLength
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Data = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Children", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLength
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLength
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Children = append(m.Children, string(dAtA[iNdEx:postIndex]))
iNdEx = postIndex
default:
iNdEx = preIndex
@@ -24776,7 +25531,7 @@ func (m *GetVersionResponse) UnmarshalVT(dAtA []byte) error {
}
return nil
}
-func (m *GetVSchemaResponse) UnmarshalVT(dAtA []byte) error {
+func (m *GetVSchemaRequest) UnmarshalVT(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -24799,17 +25554,17 @@ func (m *GetVSchemaResponse) UnmarshalVT(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: GetVSchemaResponse: wiretype end group for non-group")
+ return fmt.Errorf("proto: GetVSchemaRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: GetVSchemaResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: GetVSchemaRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field VSchema", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType)
}
- var msglen int
+ var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflow
@@ -24819,27 +25574,23 @@ func (m *GetVSchemaResponse) UnmarshalVT(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= int(b&0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
- if msglen < 0 {
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
return ErrInvalidLength
}
- postIndex := iNdEx + msglen
+ postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLength
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if m.VSchema == nil {
- m.VSchema = &vschema.Keyspace{}
- }
- if err := m.VSchema.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
+ m.Keyspace = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
@@ -24863,7 +25614,7 @@ func (m *GetVSchemaResponse) UnmarshalVT(dAtA []byte) error {
}
return nil
}
-func (m *GetWorkflowsRequest) UnmarshalVT(dAtA []byte) error {
+func (m *GetVersionRequest) UnmarshalVT(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -24886,17 +25637,17 @@ func (m *GetWorkflowsRequest) UnmarshalVT(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: GetWorkflowsRequest: wiretype end group for non-group")
+ return fmt.Errorf("proto: GetVersionRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: GetWorkflowsRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: GetVersionRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType)
}
- var stringLen uint64
+ var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflow
@@ -24906,44 +25657,28 @@ func (m *GetWorkflowsRequest) UnmarshalVT(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= uint64(b&0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- intStringLen := int(stringLen)
- if intStringLen < 0 {
+ if msglen < 0 {
return ErrInvalidLength
}
- postIndex := iNdEx + intStringLen
+ postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLength
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Keyspace = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field ActiveOnly", wireType)
+ if m.TabletAlias == nil {
+ m.TabletAlias = &topodata.TabletAlias{}
}
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflow
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
+ if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil {
+ return err
}
- m.ActiveOnly = bool(v != 0)
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skip(dAtA[iNdEx:])
@@ -24966,7 +25701,7 @@ func (m *GetWorkflowsRequest) UnmarshalVT(dAtA []byte) error {
}
return nil
}
-func (m *GetWorkflowsResponse) UnmarshalVT(dAtA []byte) error {
+func (m *GetVersionResponse) UnmarshalVT(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -24989,17 +25724,17 @@ func (m *GetWorkflowsResponse) UnmarshalVT(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: GetWorkflowsResponse: wiretype end group for non-group")
+ return fmt.Errorf("proto: GetVersionResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: GetWorkflowsResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: GetVersionResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Workflows", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
}
- var msglen int
+ var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflow
@@ -25009,25 +25744,23 @@ func (m *GetWorkflowsResponse) UnmarshalVT(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= int(b&0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
- if msglen < 0 {
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
return ErrInvalidLength
}
- postIndex := iNdEx + msglen
+ postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLength
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Workflows = append(m.Workflows, &Workflow{})
- if err := m.Workflows[len(m.Workflows)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
+ m.Version = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
@@ -25051,7 +25784,7 @@ func (m *GetWorkflowsResponse) UnmarshalVT(dAtA []byte) error {
}
return nil
}
-func (m *InitShardPrimaryRequest) UnmarshalVT(dAtA []byte) error {
+func (m *GetVSchemaResponse) UnmarshalVT(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -25074,17 +25807,17 @@ func (m *InitShardPrimaryRequest) UnmarshalVT(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: InitShardPrimaryRequest: wiretype end group for non-group")
+ return fmt.Errorf("proto: GetVSchemaResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: InitShardPrimaryRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: GetVSchemaResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field VSchema", wireType)
}
- var stringLen uint64
+ var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflow
@@ -25094,27 +25827,302 @@ func (m *InitShardPrimaryRequest) UnmarshalVT(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= uint64(b&0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- intStringLen := int(stringLen)
- if intStringLen < 0 {
+ if msglen < 0 {
return ErrInvalidLength
}
- postIndex := iNdEx + intStringLen
+ postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLength
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Keyspace = string(dAtA[iNdEx:postIndex])
+ if m.VSchema == nil {
+ m.VSchema = &vschema.Keyspace{}
+ }
+ if err := m.VSchema.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
iNdEx = postIndex
- case 2:
+ default:
+ iNdEx = preIndex
+ skippy, err := skip(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLength
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *GetWorkflowsRequest) UnmarshalVT(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: GetWorkflowsRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: GetWorkflowsRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLength
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLength
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Keyspace = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ActiveOnly", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.ActiveOnly = bool(v != 0)
+ default:
+ iNdEx = preIndex
+ skippy, err := skip(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLength
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *GetWorkflowsResponse) UnmarshalVT(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: GetWorkflowsResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: GetWorkflowsResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Workflows", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLength
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLength
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Workflows = append(m.Workflows, &Workflow{})
+ if err := m.Workflows[len(m.Workflows)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skip(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLength
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *InitShardPrimaryRequest) UnmarshalVT(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: InitShardPrimaryRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: InitShardPrimaryRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLength
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLength
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Keyspace = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@@ -28226,6 +29234,58 @@ func (m *RestoreFromBackupRequest) UnmarshalVT(dAtA []byte) error {
return err
}
iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RestoreToPos", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLength
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLength
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.RestoreToPos = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DryRun", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.DryRun = bool(v != 0)
default:
iNdEx = preIndex
skippy, err := skip(dAtA[iNdEx:])
@@ -34277,6 +35337,204 @@ func (m *ValidateVersionKeyspaceResponse) UnmarshalVT(dAtA []byte) error {
}
return nil
}
+func (m *ValidateVersionShardRequest) UnmarshalVT(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ValidateVersionShardRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ValidateVersionShardRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLength
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLength
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Keyspace = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLength
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLength
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Shard = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skip(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLength
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ValidateVersionShardResponse) UnmarshalVT(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ValidateVersionShardResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ValidateVersionShardResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Results", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLength
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLength
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Results = append(m.Results, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skip(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLength
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
func (m *ValidateVSchemaRequest) UnmarshalVT(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
@@ -34656,6 +35914,7 @@ func (m *ValidateVSchemaResponse) UnmarshalVT(dAtA []byte) error {
}
return nil
}
+
func skip(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
diff --git a/go/vt/proto/vtctlservice/vtctlservice.pb.go b/go/vt/proto/vtctlservice/vtctlservice.pb.go
index 5981248bde7..615622a97aa 100644
--- a/go/vt/proto/vtctlservice/vtctlservice.pb.go
+++ b/go/vt/proto/vtctlservice/vtctlservice.pb.go
@@ -18,7 +18,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.28.0
+// protoc-gen-go v1.28.1
// protoc v3.21.3
// source: vtctlservice.proto
@@ -51,7 +51,7 @@ var file_vtctlservice_proto_rawDesc = []byte{
0x61, 0x6e, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x76, 0x74, 0x63,
0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x56, 0x74,
0x63, 0x74, 0x6c, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x32, 0xdc, 0x3a, 0x0a, 0x06, 0x56, 0x74, 0x63, 0x74, 0x6c,
+ 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x32, 0x91, 0x3d, 0x0a, 0x06, 0x56, 0x74, 0x63, 0x74, 0x6c,
0x64, 0x12, 0x4e, 0x0a, 0x0b, 0x41, 0x64, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f,
0x12, 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x41, 0x64, 0x64,
0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
@@ -252,279 +252,298 @@ var file_vtctlservice_proto_rawDesc = []byte{
0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65,
0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74,
0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65,
- 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x54, 0x0a, 0x0d, 0x47,
- 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1f, 0x2e, 0x76,
- 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56,
- 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e,
- 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76,
- 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
- 0x00, 0x12, 0x57, 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65,
- 0x6d, 0x61, 0x73, 0x12, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e,
+ 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6c, 0x0a, 0x15, 0x55,
+ 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x43, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x12, 0x27, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61,
+ 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72,
+ 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e,
+ 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65,
+ 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x54, 0x0a, 0x0d, 0x47, 0x65, 0x74,
+ 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1f, 0x2e, 0x76, 0x74, 0x63,
+ 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63,
+ 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x76, 0x74,
+ 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53,
+ 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12,
+ 0x57, 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61,
+ 0x73, 0x12, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65,
+ 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e,
0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74,
- 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x48, 0x0a, 0x09, 0x47, 0x65,
- 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x1b, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64,
- 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61,
- 0x2e, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65,
- 0x74, 0x73, 0x12, 0x1c, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47,
- 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x1a, 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74,
- 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
- 0x00, 0x12, 0x4b, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12,
- 0x1c, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x56,
- 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e,
- 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x65, 0x72,
- 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4b,
- 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1c, 0x2e, 0x76,
- 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x53, 0x63, 0x68,
- 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x76, 0x74, 0x63,
- 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d,
- 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x51, 0x0a, 0x0c, 0x47,
- 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x12, 0x1e, 0x2e, 0x76, 0x74,
- 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66,
- 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x76, 0x74,
- 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66,
- 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5d,
- 0x0a, 0x10, 0x49, 0x6e, 0x69, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61,
- 0x72, 0x79, 0x12, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x49,
- 0x6e, 0x69, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61,
- 0x74, 0x61, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x50, 0x72, 0x69, 0x6d,
- 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4b, 0x0a,
- 0x0a, 0x50, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x1c, 0x2e, 0x76, 0x74,
- 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c,
- 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74,
- 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x69, 0x0a, 0x14, 0x50, 0x6c,
- 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x68, 0x61,
- 0x72, 0x64, 0x12, 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50,
- 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x68,
- 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x76, 0x74, 0x63,
- 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x52, 0x65,
- 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f,
- 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x69, 0x0a, 0x14, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64,
- 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x47, 0x72, 0x61, 0x70, 0x68, 0x12, 0x26, 0x2e,
- 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c,
- 0x64, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74,
- 0x61, 0x2e, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63,
- 0x65, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00,
- 0x12, 0x66, 0x0a, 0x13, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x56, 0x53, 0x63, 0x68, 0x65,
- 0x6d, 0x61, 0x47, 0x72, 0x61, 0x70, 0x68, 0x12, 0x25, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64,
- 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x56, 0x53, 0x63, 0x68, 0x65,
- 0x6d, 0x61, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26,
- 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x62, 0x75, 0x69,
- 0x6c, 0x64, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65,
- 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x51, 0x0a, 0x0c, 0x52, 0x65, 0x66, 0x72,
- 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c,
- 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74,
- 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c,
- 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74,
- 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x66, 0x0a, 0x13, 0x52,
- 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x79, 0x53, 0x68, 0x61,
- 0x72, 0x64, 0x12, 0x25, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52,
- 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x79, 0x53, 0x68, 0x61,
- 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74,
- 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61,
- 0x74, 0x65, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
- 0x65, 0x22, 0x00, 0x12, 0x51, 0x0a, 0x0c, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68,
- 0x65, 0x6d, 0x61, 0x12, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e,
- 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e,
- 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x69, 0x0a, 0x14, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64,
- 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x26,
- 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6c, 0x6f, 0x61,
- 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61,
- 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b,
- 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
- 0x00, 0x12, 0x60, 0x0a, 0x11, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d,
- 0x61, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61,
- 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53,
- 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x76, 0x74,
- 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63,
- 0x68, 0x65, 0x6d, 0x61, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
- 0x65, 0x22, 0x00, 0x12, 0x51, 0x0a, 0x0c, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x42, 0x61, 0x63,
- 0x6b, 0x75, 0x70, 0x12, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e,
- 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e,
- 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x63, 0x0a, 0x12, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65,
- 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x12, 0x24, 0x2e, 0x76,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x48, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x54,
+ 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x1b, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74,
+ 0x61, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47,
+ 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73,
+ 0x12, 0x1c, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74,
+ 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d,
+ 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x61,
+ 0x62, 0x6c, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12,
+ 0x5a, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x50, 0x61,
+ 0x74, 0x68, 0x12, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47,
+ 0x65, 0x74, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74,
+ 0x61, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x50, 0x61, 0x74,
+ 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0a, 0x47,
+ 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x2e, 0x76, 0x74, 0x63, 0x74,
+ 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64,
+ 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x56,
+ 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1c, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61,
+ 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61,
+ 0x2e, 0x47, 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x51, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b,
+ 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x12, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74,
+ 0x61, 0x2e, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74,
+ 0x61, 0x2e, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x10, 0x49, 0x6e, 0x69, 0x74,
+ 0x53, 0x68, 0x61, 0x72, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x22, 0x2e, 0x76,
+ 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x53, 0x68, 0x61,
+ 0x72, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x1a, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x49, 0x6e, 0x69,
+ 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0a, 0x50, 0x69, 0x6e, 0x67, 0x54,
+ 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x1c, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74,
+ 0x61, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e,
+ 0x50, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x22, 0x00, 0x12, 0x69, 0x0a, 0x14, 0x50, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x52,
+ 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x26, 0x2e, 0x76,
+ 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64,
+ 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61,
+ 0x2e, 0x50, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74,
+ 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12,
+ 0x69, 0x0a, 0x14, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61,
+ 0x63, 0x65, 0x47, 0x72, 0x61, 0x70, 0x68, 0x12, 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64,
+ 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x70,
+ 0x61, 0x63, 0x65, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
+ 0x27, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x62, 0x75,
+ 0x69, 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x47, 0x72, 0x61, 0x70, 0x68,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x66, 0x0a, 0x13, 0x52, 0x65,
+ 0x62, 0x75, 0x69, 0x6c, 0x64, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x47, 0x72, 0x61, 0x70,
+ 0x68, 0x12, 0x25, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65,
+ 0x62, 0x75, 0x69, 0x6c, 0x64, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x47, 0x72, 0x61, 0x70,
+ 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c,
+ 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x56, 0x53, 0x63, 0x68,
+ 0x65, 0x6d, 0x61, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x22, 0x00, 0x12, 0x51, 0x0a, 0x0c, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61,
+ 0x74, 0x65, 0x12, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52,
+ 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52,
+ 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x66, 0x0a, 0x13, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68,
+ 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x25, 0x2e, 0x76,
+ 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68,
+ 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e,
+ 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x79, 0x53, 0x68,
+ 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x51, 0x0a,
+ 0x0c, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1e, 0x2e,
+ 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64,
+ 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e,
+ 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64,
+ 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00,
+ 0x12, 0x69, 0x0a, 0x14, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61,
+ 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c,
+ 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d,
+ 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x1a, 0x27, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6c,
+ 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63,
+ 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x60, 0x0a, 0x11, 0x52,
+ 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, 0x68, 0x61, 0x72, 0x64,
+ 0x12, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6c,
+ 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74,
+ 0x61, 0x2e, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, 0x68,
+ 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x51, 0x0a,
+ 0x0c, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x1e, 0x2e,
+ 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65,
+ 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e,
+ 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65,
+ 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00,
+ 0x12, 0x63, 0x0a, 0x12, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61,
+ 0x63, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x12, 0x24, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61,
+ 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63,
+ 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x76,
0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4b,
- 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52,
- 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x43, 0x65, 0x6c,
- 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5a, 0x0a, 0x0f, 0x52,
- 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x12, 0x21,
- 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76,
- 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x1a, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65,
- 0x6d, 0x6f, 0x76, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x57, 0x0a, 0x0e, 0x52, 0x65, 0x70, 0x61, 0x72,
- 0x65, 0x6e, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74,
- 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x61,
- 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x76, 0x74,
- 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74,
- 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00,
- 0x12, 0x62, 0x0a, 0x11, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x42,
- 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74,
- 0x61, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x42, 0x61, 0x63,
- 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x76, 0x74, 0x63,
- 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x46, 0x72,
- 0x6f, 0x6d, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
- 0x22, 0x00, 0x30, 0x01, 0x12, 0x57, 0x0a, 0x0e, 0x52, 0x75, 0x6e, 0x48, 0x65, 0x61, 0x6c, 0x74,
- 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61,
- 0x74, 0x61, 0x2e, 0x52, 0x75, 0x6e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63,
- 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c,
- 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x75, 0x6e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68,
- 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x7e, 0x0a,
- 0x1b, 0x53, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x44, 0x75, 0x72, 0x61,
- 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x2d, 0x2e, 0x76,
- 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73,
- 0x70, 0x61, 0x63, 0x65, 0x44, 0x75, 0x72, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x50, 0x6f,
- 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x76, 0x74,
- 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70,
- 0x61, 0x63, 0x65, 0x44, 0x75, 0x72, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x50, 0x6f, 0x6c,
- 0x69, 0x63, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x75, 0x0a,
- 0x18, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x73, 0x50, 0x72, 0x69, 0x6d, 0x61,
- 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x12, 0x2a, 0x2e, 0x76, 0x74, 0x63, 0x74,
- 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x73,
- 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74,
- 0x61, 0x2e, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x73, 0x50, 0x72, 0x69, 0x6d,
- 0x61, 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x22, 0x00, 0x12, 0x6c, 0x0a, 0x15, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64,
- 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x27, 0x2e,
- 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61,
- 0x72, 0x64, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61,
- 0x74, 0x61, 0x2e, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x54, 0x61, 0x62, 0x6c, 0x65,
- 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
- 0x22, 0x00, 0x12, 0x4e, 0x0a, 0x0b, 0x53, 0x65, 0x74, 0x57, 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c,
- 0x65, 0x12, 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x65,
- 0x74, 0x57, 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x1a, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x65, 0x74,
- 0x57, 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
- 0x22, 0x00, 0x12, 0x66, 0x0a, 0x13, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69,
- 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x64, 0x64, 0x12, 0x25, 0x2e, 0x76, 0x74, 0x63, 0x74,
- 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69,
- 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x64, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x1a, 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61,
- 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x64, 0x64,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x66, 0x0a, 0x13, 0x53, 0x68,
- 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69,
- 0x78, 0x12, 0x25, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68,
- 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69,
- 0x78, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c,
- 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x78, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
- 0x22, 0x00, 0x12, 0x78, 0x0a, 0x19, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69,
- 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12,
- 0x2b, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72,
- 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69,
- 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x76,
- 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65,
- 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f,
- 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6f, 0x0a, 0x16,
+ 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5a, 0x0a, 0x0f, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x53,
+ 0x68, 0x61, 0x72, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x12, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c,
+ 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64,
+ 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x76, 0x74,
+ 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x53, 0x68,
+ 0x61, 0x72, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
+ 0x00, 0x12, 0x57, 0x0a, 0x0e, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x62,
+ 0x6c, 0x65, 0x74, 0x12, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e,
+ 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74,
+ 0x61, 0x2e, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x62, 0x0a, 0x11, 0x52, 0x65,
+ 0x73, 0x74, 0x6f, 0x72, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12,
+ 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x74,
+ 0x6f, 0x72, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61,
+ 0x2e, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x42, 0x61, 0x63, 0x6b,
+ 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x57,
+ 0x0a, 0x0e, 0x52, 0x75, 0x6e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b,
+ 0x12, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x75, 0x6e,
+ 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52,
+ 0x75, 0x6e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x7e, 0x0a, 0x1b, 0x53, 0x65, 0x74, 0x4b, 0x65,
+ 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x44, 0x75, 0x72, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79,
+ 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x2d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61,
+ 0x74, 0x61, 0x2e, 0x53, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x44, 0x75,
+ 0x72, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74,
+ 0x61, 0x2e, 0x53, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x44, 0x75, 0x72,
+ 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x75, 0x0a, 0x18, 0x53, 0x65, 0x74, 0x53, 0x68,
+ 0x61, 0x72, 0x64, 0x49, 0x73, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x65, 0x72, 0x76,
+ 0x69, 0x6e, 0x67, 0x12, 0x2a, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e,
+ 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x73, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72,
+ 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
+ 0x2b, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x65, 0x74, 0x53,
+ 0x68, 0x61, 0x72, 0x64, 0x49, 0x73, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x65, 0x72,
+ 0x76, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6c,
+ 0x0a, 0x15, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74,
+ 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x27, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64,
+ 0x61, 0x74, 0x61, 0x2e, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x54, 0x61, 0x62, 0x6c,
+ 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x1a, 0x28, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x65, 0x74,
+ 0x53, 0x68, 0x61, 0x72, 0x64, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72,
+ 0x6f, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4e, 0x0a, 0x0b,
+ 0x53, 0x65, 0x74, 0x57, 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x1d, 0x2e, 0x76, 0x74,
+ 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x65, 0x74, 0x57, 0x72, 0x69, 0x74, 0x61,
+ 0x62, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x76, 0x74, 0x63,
+ 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x65, 0x74, 0x57, 0x72, 0x69, 0x74, 0x61, 0x62,
+ 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x66, 0x0a, 0x13,
+ 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x41, 0x64, 0x64, 0x12, 0x25, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e,
0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x12, 0x28, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61,
+ 0x41, 0x64, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x76, 0x74, 0x63,
+ 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c,
+ 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x64, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x22, 0x00, 0x12, 0x66, 0x0a, 0x13, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70,
+ 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x78, 0x12, 0x25, 0x2e, 0x76, 0x74,
+ 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70,
+ 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x78, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53,
+ 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46,
+ 0x69, 0x78, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x78, 0x0a, 0x19,
+ 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2b, 0x2e, 0x76, 0x74, 0x63, 0x74,
+ 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69,
+ 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61,
0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x1a, 0x29, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61,
+ 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6f, 0x0a, 0x16, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52,
+ 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65,
+ 0x12, 0x28, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61,
0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x6d,
- 0x6f, 0x76, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4e, 0x0a,
- 0x0b, 0x53, 0x6c, 0x65, 0x65, 0x70, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x1d, 0x2e, 0x76,
- 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x6c, 0x65, 0x65, 0x70, 0x54, 0x61,
- 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x76, 0x74,
- 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x6c, 0x65, 0x65, 0x70, 0x54, 0x61, 0x62,
- 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x57, 0x0a,
- 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x41, 0x64, 0x64, 0x12,
- 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x6f, 0x75, 0x72,
- 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x41, 0x64, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x6f,
- 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x41, 0x64, 0x64, 0x52, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x60, 0x0a, 0x11, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65,
- 0x53, 0x68, 0x61, 0x72, 0x64, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x23, 0x2e, 0x76, 0x74,
+ 0x6f, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x76, 0x74, 0x63,
+ 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c,
+ 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4e, 0x0a, 0x0b, 0x53, 0x6c, 0x65, 0x65, 0x70,
+ 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61,
+ 0x74, 0x61, 0x2e, 0x53, 0x6c, 0x65, 0x65, 0x70, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74,
+ 0x61, 0x2e, 0x53, 0x6c, 0x65, 0x65, 0x70, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x57, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63,
+ 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x41, 0x64, 0x64, 0x12, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74,
+ 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72,
+ 0x64, 0x41, 0x64, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x76, 0x74,
0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68,
- 0x61, 0x72, 0x64, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x1a, 0x24, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x6f, 0x75,
- 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65,
- 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x10, 0x53, 0x74, 0x61, 0x72,
- 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x2e, 0x76,
- 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65,
- 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x1a, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x61,
- 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5a, 0x0a, 0x0f, 0x53, 0x74, 0x6f, 0x70, 0x52,
- 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x21, 0x2e, 0x76, 0x74, 0x63,
- 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69,
- 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e,
- 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65,
- 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
- 0x65, 0x22, 0x00, 0x12, 0x7b, 0x0a, 0x1a, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x45, 0x78, 0x74,
- 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x65,
- 0x64, 0x12, 0x2c, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61,
- 0x62, 0x6c, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x52, 0x65,
- 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
- 0x2d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c,
- 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x52, 0x65, 0x70, 0x61,
- 0x72, 0x65, 0x6e, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00,
- 0x12, 0x57, 0x0a, 0x0e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e,
- 0x66, 0x6f, 0x12, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x55,
- 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61,
- 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x10, 0x55, 0x70, 0x64,
- 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x22, 0x2e,
- 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65,
- 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x1a, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x55, 0x70,
- 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x65,
- 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x45, 0x0a, 0x08, 0x56, 0x61, 0x6c, 0x69,
- 0x64, 0x61, 0x74, 0x65, 0x12, 0x1a, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61,
- 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x1a, 0x1b, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c,
- 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12,
- 0x5d, 0x0a, 0x10, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70,
- 0x61, 0x63, 0x65, 0x12, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e,
- 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64,
- 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73,
- 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6f,
- 0x0a, 0x16, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61,
- 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x28, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c,
- 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x63, 0x68,
- 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56,
+ 0x61, 0x72, 0x64, 0x41, 0x64, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00,
+ 0x12, 0x60, 0x0a, 0x11, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x44,
+ 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74,
+ 0x61, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x44, 0x65, 0x6c,
+ 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x76, 0x74, 0x63,
+ 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61,
+ 0x72, 0x64, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x10, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69,
+ 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61,
+ 0x74, 0x61, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x76, 0x74, 0x63,
+ 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c,
+ 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
+ 0x00, 0x12, 0x5a, 0x0a, 0x0f, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61,
+ 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64,
+ 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x7b, 0x0a,
+ 0x1a, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c,
+ 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x65, 0x64, 0x12, 0x2c, 0x2e, 0x76, 0x74,
+ 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x45, 0x78,
+ 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74,
+ 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x76, 0x74, 0x63, 0x74,
+ 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65,
+ 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x65, 0x64,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x57, 0x0a, 0x0e, 0x55, 0x70,
+ 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x20, 0x2e, 0x76,
+ 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43,
+ 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21,
+ 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74,
+ 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x10, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c,
+ 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64,
+ 0x61, 0x74, 0x61, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41,
+ 0x6c, 0x69, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x76, 0x74,
+ 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x65,
+ 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x22, 0x00, 0x12, 0x45, 0x0a, 0x08, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x12, 0x1a,
+ 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64,
+ 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x76, 0x74, 0x63,
+ 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x10, 0x56, 0x61, 0x6c,
+ 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x22, 0x2e,
+ 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61,
+ 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x1a, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61,
+ 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6f, 0x0a, 0x16, 0x56, 0x61, 0x6c, 0x69,
+ 0x64, 0x61, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61,
+ 0x63, 0x65, 0x12, 0x28, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56,
0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79,
- 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12,
- 0x54, 0x0a, 0x0d, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64,
- 0x12, 0x1f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c,
- 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x1a, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61,
- 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f,
- 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x72, 0x0a, 0x17, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74,
- 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65,
- 0x12, 0x29, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c,
- 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x73,
- 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x76, 0x74,
- 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65,
- 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5a, 0x0a, 0x0f, 0x56, 0x61, 0x6c,
- 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x21, 0x2e, 0x76,
+ 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x76,
0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74,
- 0x65, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
- 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69,
- 0x64, 0x61, 0x74, 0x65, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f,
- 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x2b, 0x5a, 0x29, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e,
- 0x69, 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, 0x2f,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x73, 0x65, 0x72, 0x76, 0x69,
- 0x63, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+ 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x54, 0x0a, 0x0d, 0x56, 0x61, 0x6c,
+ 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x1f, 0x2e, 0x76, 0x74, 0x63,
+ 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53,
+ 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x76, 0x74,
+ 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65,
+ 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12,
+ 0x72, 0x0a, 0x17, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69,
+ 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x29, 0x2e, 0x76, 0x74, 0x63,
+ 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56,
+ 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74,
+ 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f,
+ 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x22, 0x00, 0x12, 0x69, 0x0a, 0x14, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56,
+ 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x26, 0x2e, 0x76, 0x74,
+ 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65,
+ 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e,
+ 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53,
+ 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5a,
+ 0x0a, 0x0f, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d,
+ 0x61, 0x12, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61,
+ 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61,
+ 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x2b, 0x5a, 0x29, 0x76, 0x69,
+ 0x74, 0x65, 0x73, 0x73, 0x2e, 0x69, 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67,
+ 0x6f, 0x2f, 0x76, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x74, 0x63, 0x74, 0x6c,
+ 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var file_vtctlservice_proto_goTypes = []interface{}{
@@ -565,132 +584,138 @@ var file_vtctlservice_proto_goTypes = []interface{}{
(*vtctldata.GetShardRoutingRulesRequest)(nil), // 34: vtctldata.GetShardRoutingRulesRequest
(*vtctldata.GetSrvKeyspaceNamesRequest)(nil), // 35: vtctldata.GetSrvKeyspaceNamesRequest
(*vtctldata.GetSrvKeyspacesRequest)(nil), // 36: vtctldata.GetSrvKeyspacesRequest
- (*vtctldata.GetSrvVSchemaRequest)(nil), // 37: vtctldata.GetSrvVSchemaRequest
- (*vtctldata.GetSrvVSchemasRequest)(nil), // 38: vtctldata.GetSrvVSchemasRequest
- (*vtctldata.GetTabletRequest)(nil), // 39: vtctldata.GetTabletRequest
- (*vtctldata.GetTabletsRequest)(nil), // 40: vtctldata.GetTabletsRequest
- (*vtctldata.GetVersionRequest)(nil), // 41: vtctldata.GetVersionRequest
- (*vtctldata.GetVSchemaRequest)(nil), // 42: vtctldata.GetVSchemaRequest
- (*vtctldata.GetWorkflowsRequest)(nil), // 43: vtctldata.GetWorkflowsRequest
- (*vtctldata.InitShardPrimaryRequest)(nil), // 44: vtctldata.InitShardPrimaryRequest
- (*vtctldata.PingTabletRequest)(nil), // 45: vtctldata.PingTabletRequest
- (*vtctldata.PlannedReparentShardRequest)(nil), // 46: vtctldata.PlannedReparentShardRequest
- (*vtctldata.RebuildKeyspaceGraphRequest)(nil), // 47: vtctldata.RebuildKeyspaceGraphRequest
- (*vtctldata.RebuildVSchemaGraphRequest)(nil), // 48: vtctldata.RebuildVSchemaGraphRequest
- (*vtctldata.RefreshStateRequest)(nil), // 49: vtctldata.RefreshStateRequest
- (*vtctldata.RefreshStateByShardRequest)(nil), // 50: vtctldata.RefreshStateByShardRequest
- (*vtctldata.ReloadSchemaRequest)(nil), // 51: vtctldata.ReloadSchemaRequest
- (*vtctldata.ReloadSchemaKeyspaceRequest)(nil), // 52: vtctldata.ReloadSchemaKeyspaceRequest
- (*vtctldata.ReloadSchemaShardRequest)(nil), // 53: vtctldata.ReloadSchemaShardRequest
- (*vtctldata.RemoveBackupRequest)(nil), // 54: vtctldata.RemoveBackupRequest
- (*vtctldata.RemoveKeyspaceCellRequest)(nil), // 55: vtctldata.RemoveKeyspaceCellRequest
- (*vtctldata.RemoveShardCellRequest)(nil), // 56: vtctldata.RemoveShardCellRequest
- (*vtctldata.ReparentTabletRequest)(nil), // 57: vtctldata.ReparentTabletRequest
- (*vtctldata.RestoreFromBackupRequest)(nil), // 58: vtctldata.RestoreFromBackupRequest
- (*vtctldata.RunHealthCheckRequest)(nil), // 59: vtctldata.RunHealthCheckRequest
- (*vtctldata.SetKeyspaceDurabilityPolicyRequest)(nil), // 60: vtctldata.SetKeyspaceDurabilityPolicyRequest
- (*vtctldata.SetShardIsPrimaryServingRequest)(nil), // 61: vtctldata.SetShardIsPrimaryServingRequest
- (*vtctldata.SetShardTabletControlRequest)(nil), // 62: vtctldata.SetShardTabletControlRequest
- (*vtctldata.SetWritableRequest)(nil), // 63: vtctldata.SetWritableRequest
- (*vtctldata.ShardReplicationAddRequest)(nil), // 64: vtctldata.ShardReplicationAddRequest
- (*vtctldata.ShardReplicationFixRequest)(nil), // 65: vtctldata.ShardReplicationFixRequest
- (*vtctldata.ShardReplicationPositionsRequest)(nil), // 66: vtctldata.ShardReplicationPositionsRequest
- (*vtctldata.ShardReplicationRemoveRequest)(nil), // 67: vtctldata.ShardReplicationRemoveRequest
- (*vtctldata.SleepTabletRequest)(nil), // 68: vtctldata.SleepTabletRequest
- (*vtctldata.SourceShardAddRequest)(nil), // 69: vtctldata.SourceShardAddRequest
- (*vtctldata.SourceShardDeleteRequest)(nil), // 70: vtctldata.SourceShardDeleteRequest
- (*vtctldata.StartReplicationRequest)(nil), // 71: vtctldata.StartReplicationRequest
- (*vtctldata.StopReplicationRequest)(nil), // 72: vtctldata.StopReplicationRequest
- (*vtctldata.TabletExternallyReparentedRequest)(nil), // 73: vtctldata.TabletExternallyReparentedRequest
- (*vtctldata.UpdateCellInfoRequest)(nil), // 74: vtctldata.UpdateCellInfoRequest
- (*vtctldata.UpdateCellsAliasRequest)(nil), // 75: vtctldata.UpdateCellsAliasRequest
- (*vtctldata.ValidateRequest)(nil), // 76: vtctldata.ValidateRequest
- (*vtctldata.ValidateKeyspaceRequest)(nil), // 77: vtctldata.ValidateKeyspaceRequest
- (*vtctldata.ValidateSchemaKeyspaceRequest)(nil), // 78: vtctldata.ValidateSchemaKeyspaceRequest
- (*vtctldata.ValidateShardRequest)(nil), // 79: vtctldata.ValidateShardRequest
- (*vtctldata.ValidateVersionKeyspaceRequest)(nil), // 80: vtctldata.ValidateVersionKeyspaceRequest
- (*vtctldata.ValidateVSchemaRequest)(nil), // 81: vtctldata.ValidateVSchemaRequest
- (*vtctldata.ExecuteVtctlCommandResponse)(nil), // 82: vtctldata.ExecuteVtctlCommandResponse
- (*vtctldata.AddCellInfoResponse)(nil), // 83: vtctldata.AddCellInfoResponse
- (*vtctldata.AddCellsAliasResponse)(nil), // 84: vtctldata.AddCellsAliasResponse
- (*vtctldata.ApplyRoutingRulesResponse)(nil), // 85: vtctldata.ApplyRoutingRulesResponse
- (*vtctldata.ApplySchemaResponse)(nil), // 86: vtctldata.ApplySchemaResponse
- (*vtctldata.ApplyShardRoutingRulesResponse)(nil), // 87: vtctldata.ApplyShardRoutingRulesResponse
- (*vtctldata.ApplyVSchemaResponse)(nil), // 88: vtctldata.ApplyVSchemaResponse
- (*vtctldata.BackupResponse)(nil), // 89: vtctldata.BackupResponse
- (*vtctldata.ChangeTabletTypeResponse)(nil), // 90: vtctldata.ChangeTabletTypeResponse
- (*vtctldata.CreateKeyspaceResponse)(nil), // 91: vtctldata.CreateKeyspaceResponse
- (*vtctldata.CreateShardResponse)(nil), // 92: vtctldata.CreateShardResponse
- (*vtctldata.DeleteCellInfoResponse)(nil), // 93: vtctldata.DeleteCellInfoResponse
- (*vtctldata.DeleteCellsAliasResponse)(nil), // 94: vtctldata.DeleteCellsAliasResponse
- (*vtctldata.DeleteKeyspaceResponse)(nil), // 95: vtctldata.DeleteKeyspaceResponse
- (*vtctldata.DeleteShardsResponse)(nil), // 96: vtctldata.DeleteShardsResponse
- (*vtctldata.DeleteSrvVSchemaResponse)(nil), // 97: vtctldata.DeleteSrvVSchemaResponse
- (*vtctldata.DeleteTabletsResponse)(nil), // 98: vtctldata.DeleteTabletsResponse
- (*vtctldata.EmergencyReparentShardResponse)(nil), // 99: vtctldata.EmergencyReparentShardResponse
- (*vtctldata.ExecuteFetchAsAppResponse)(nil), // 100: vtctldata.ExecuteFetchAsAppResponse
- (*vtctldata.ExecuteFetchAsDBAResponse)(nil), // 101: vtctldata.ExecuteFetchAsDBAResponse
- (*vtctldata.ExecuteHookResponse)(nil), // 102: vtctldata.ExecuteHookResponse
- (*vtctldata.FindAllShardsInKeyspaceResponse)(nil), // 103: vtctldata.FindAllShardsInKeyspaceResponse
- (*vtctldata.GetBackupsResponse)(nil), // 104: vtctldata.GetBackupsResponse
- (*vtctldata.GetCellInfoResponse)(nil), // 105: vtctldata.GetCellInfoResponse
- (*vtctldata.GetCellInfoNamesResponse)(nil), // 106: vtctldata.GetCellInfoNamesResponse
- (*vtctldata.GetCellsAliasesResponse)(nil), // 107: vtctldata.GetCellsAliasesResponse
- (*vtctldata.GetFullStatusResponse)(nil), // 108: vtctldata.GetFullStatusResponse
- (*vtctldata.GetKeyspaceResponse)(nil), // 109: vtctldata.GetKeyspaceResponse
- (*vtctldata.GetKeyspacesResponse)(nil), // 110: vtctldata.GetKeyspacesResponse
- (*vtctldata.GetPermissionsResponse)(nil), // 111: vtctldata.GetPermissionsResponse
- (*vtctldata.GetRoutingRulesResponse)(nil), // 112: vtctldata.GetRoutingRulesResponse
- (*vtctldata.GetSchemaResponse)(nil), // 113: vtctldata.GetSchemaResponse
- (*vtctldata.GetShardResponse)(nil), // 114: vtctldata.GetShardResponse
- (*vtctldata.GetShardRoutingRulesResponse)(nil), // 115: vtctldata.GetShardRoutingRulesResponse
- (*vtctldata.GetSrvKeyspaceNamesResponse)(nil), // 116: vtctldata.GetSrvKeyspaceNamesResponse
- (*vtctldata.GetSrvKeyspacesResponse)(nil), // 117: vtctldata.GetSrvKeyspacesResponse
- (*vtctldata.GetSrvVSchemaResponse)(nil), // 118: vtctldata.GetSrvVSchemaResponse
- (*vtctldata.GetSrvVSchemasResponse)(nil), // 119: vtctldata.GetSrvVSchemasResponse
- (*vtctldata.GetTabletResponse)(nil), // 120: vtctldata.GetTabletResponse
- (*vtctldata.GetTabletsResponse)(nil), // 121: vtctldata.GetTabletsResponse
- (*vtctldata.GetVersionResponse)(nil), // 122: vtctldata.GetVersionResponse
- (*vtctldata.GetVSchemaResponse)(nil), // 123: vtctldata.GetVSchemaResponse
- (*vtctldata.GetWorkflowsResponse)(nil), // 124: vtctldata.GetWorkflowsResponse
- (*vtctldata.InitShardPrimaryResponse)(nil), // 125: vtctldata.InitShardPrimaryResponse
- (*vtctldata.PingTabletResponse)(nil), // 126: vtctldata.PingTabletResponse
- (*vtctldata.PlannedReparentShardResponse)(nil), // 127: vtctldata.PlannedReparentShardResponse
- (*vtctldata.RebuildKeyspaceGraphResponse)(nil), // 128: vtctldata.RebuildKeyspaceGraphResponse
- (*vtctldata.RebuildVSchemaGraphResponse)(nil), // 129: vtctldata.RebuildVSchemaGraphResponse
- (*vtctldata.RefreshStateResponse)(nil), // 130: vtctldata.RefreshStateResponse
- (*vtctldata.RefreshStateByShardResponse)(nil), // 131: vtctldata.RefreshStateByShardResponse
- (*vtctldata.ReloadSchemaResponse)(nil), // 132: vtctldata.ReloadSchemaResponse
- (*vtctldata.ReloadSchemaKeyspaceResponse)(nil), // 133: vtctldata.ReloadSchemaKeyspaceResponse
- (*vtctldata.ReloadSchemaShardResponse)(nil), // 134: vtctldata.ReloadSchemaShardResponse
- (*vtctldata.RemoveBackupResponse)(nil), // 135: vtctldata.RemoveBackupResponse
- (*vtctldata.RemoveKeyspaceCellResponse)(nil), // 136: vtctldata.RemoveKeyspaceCellResponse
- (*vtctldata.RemoveShardCellResponse)(nil), // 137: vtctldata.RemoveShardCellResponse
- (*vtctldata.ReparentTabletResponse)(nil), // 138: vtctldata.ReparentTabletResponse
- (*vtctldata.RestoreFromBackupResponse)(nil), // 139: vtctldata.RestoreFromBackupResponse
- (*vtctldata.RunHealthCheckResponse)(nil), // 140: vtctldata.RunHealthCheckResponse
- (*vtctldata.SetKeyspaceDurabilityPolicyResponse)(nil), // 141: vtctldata.SetKeyspaceDurabilityPolicyResponse
- (*vtctldata.SetShardIsPrimaryServingResponse)(nil), // 142: vtctldata.SetShardIsPrimaryServingResponse
- (*vtctldata.SetShardTabletControlResponse)(nil), // 143: vtctldata.SetShardTabletControlResponse
- (*vtctldata.SetWritableResponse)(nil), // 144: vtctldata.SetWritableResponse
- (*vtctldata.ShardReplicationAddResponse)(nil), // 145: vtctldata.ShardReplicationAddResponse
- (*vtctldata.ShardReplicationFixResponse)(nil), // 146: vtctldata.ShardReplicationFixResponse
- (*vtctldata.ShardReplicationPositionsResponse)(nil), // 147: vtctldata.ShardReplicationPositionsResponse
- (*vtctldata.ShardReplicationRemoveResponse)(nil), // 148: vtctldata.ShardReplicationRemoveResponse
- (*vtctldata.SleepTabletResponse)(nil), // 149: vtctldata.SleepTabletResponse
- (*vtctldata.SourceShardAddResponse)(nil), // 150: vtctldata.SourceShardAddResponse
- (*vtctldata.SourceShardDeleteResponse)(nil), // 151: vtctldata.SourceShardDeleteResponse
- (*vtctldata.StartReplicationResponse)(nil), // 152: vtctldata.StartReplicationResponse
- (*vtctldata.StopReplicationResponse)(nil), // 153: vtctldata.StopReplicationResponse
- (*vtctldata.TabletExternallyReparentedResponse)(nil), // 154: vtctldata.TabletExternallyReparentedResponse
- (*vtctldata.UpdateCellInfoResponse)(nil), // 155: vtctldata.UpdateCellInfoResponse
- (*vtctldata.UpdateCellsAliasResponse)(nil), // 156: vtctldata.UpdateCellsAliasResponse
- (*vtctldata.ValidateResponse)(nil), // 157: vtctldata.ValidateResponse
- (*vtctldata.ValidateKeyspaceResponse)(nil), // 158: vtctldata.ValidateKeyspaceResponse
- (*vtctldata.ValidateSchemaKeyspaceResponse)(nil), // 159: vtctldata.ValidateSchemaKeyspaceResponse
- (*vtctldata.ValidateShardResponse)(nil), // 160: vtctldata.ValidateShardResponse
- (*vtctldata.ValidateVersionKeyspaceResponse)(nil), // 161: vtctldata.ValidateVersionKeyspaceResponse
- (*vtctldata.ValidateVSchemaResponse)(nil), // 162: vtctldata.ValidateVSchemaResponse
+ (*vtctldata.UpdateThrottlerConfigRequest)(nil), // 37: vtctldata.UpdateThrottlerConfigRequest
+ (*vtctldata.GetSrvVSchemaRequest)(nil), // 38: vtctldata.GetSrvVSchemaRequest
+ (*vtctldata.GetSrvVSchemasRequest)(nil), // 39: vtctldata.GetSrvVSchemasRequest
+ (*vtctldata.GetTabletRequest)(nil), // 40: vtctldata.GetTabletRequest
+ (*vtctldata.GetTabletsRequest)(nil), // 41: vtctldata.GetTabletsRequest
+ (*vtctldata.GetTopologyPathRequest)(nil), // 42: vtctldata.GetTopologyPathRequest
+ (*vtctldata.GetVersionRequest)(nil), // 43: vtctldata.GetVersionRequest
+ (*vtctldata.GetVSchemaRequest)(nil), // 44: vtctldata.GetVSchemaRequest
+ (*vtctldata.GetWorkflowsRequest)(nil), // 45: vtctldata.GetWorkflowsRequest
+ (*vtctldata.InitShardPrimaryRequest)(nil), // 46: vtctldata.InitShardPrimaryRequest
+ (*vtctldata.PingTabletRequest)(nil), // 47: vtctldata.PingTabletRequest
+ (*vtctldata.PlannedReparentShardRequest)(nil), // 48: vtctldata.PlannedReparentShardRequest
+ (*vtctldata.RebuildKeyspaceGraphRequest)(nil), // 49: vtctldata.RebuildKeyspaceGraphRequest
+ (*vtctldata.RebuildVSchemaGraphRequest)(nil), // 50: vtctldata.RebuildVSchemaGraphRequest
+ (*vtctldata.RefreshStateRequest)(nil), // 51: vtctldata.RefreshStateRequest
+ (*vtctldata.RefreshStateByShardRequest)(nil), // 52: vtctldata.RefreshStateByShardRequest
+ (*vtctldata.ReloadSchemaRequest)(nil), // 53: vtctldata.ReloadSchemaRequest
+ (*vtctldata.ReloadSchemaKeyspaceRequest)(nil), // 54: vtctldata.ReloadSchemaKeyspaceRequest
+ (*vtctldata.ReloadSchemaShardRequest)(nil), // 55: vtctldata.ReloadSchemaShardRequest
+ (*vtctldata.RemoveBackupRequest)(nil), // 56: vtctldata.RemoveBackupRequest
+ (*vtctldata.RemoveKeyspaceCellRequest)(nil), // 57: vtctldata.RemoveKeyspaceCellRequest
+ (*vtctldata.RemoveShardCellRequest)(nil), // 58: vtctldata.RemoveShardCellRequest
+ (*vtctldata.ReparentTabletRequest)(nil), // 59: vtctldata.ReparentTabletRequest
+ (*vtctldata.RestoreFromBackupRequest)(nil), // 60: vtctldata.RestoreFromBackupRequest
+ (*vtctldata.RunHealthCheckRequest)(nil), // 61: vtctldata.RunHealthCheckRequest
+ (*vtctldata.SetKeyspaceDurabilityPolicyRequest)(nil), // 62: vtctldata.SetKeyspaceDurabilityPolicyRequest
+ (*vtctldata.SetShardIsPrimaryServingRequest)(nil), // 63: vtctldata.SetShardIsPrimaryServingRequest
+ (*vtctldata.SetShardTabletControlRequest)(nil), // 64: vtctldata.SetShardTabletControlRequest
+ (*vtctldata.SetWritableRequest)(nil), // 65: vtctldata.SetWritableRequest
+ (*vtctldata.ShardReplicationAddRequest)(nil), // 66: vtctldata.ShardReplicationAddRequest
+ (*vtctldata.ShardReplicationFixRequest)(nil), // 67: vtctldata.ShardReplicationFixRequest
+ (*vtctldata.ShardReplicationPositionsRequest)(nil), // 68: vtctldata.ShardReplicationPositionsRequest
+ (*vtctldata.ShardReplicationRemoveRequest)(nil), // 69: vtctldata.ShardReplicationRemoveRequest
+ (*vtctldata.SleepTabletRequest)(nil), // 70: vtctldata.SleepTabletRequest
+ (*vtctldata.SourceShardAddRequest)(nil), // 71: vtctldata.SourceShardAddRequest
+ (*vtctldata.SourceShardDeleteRequest)(nil), // 72: vtctldata.SourceShardDeleteRequest
+ (*vtctldata.StartReplicationRequest)(nil), // 73: vtctldata.StartReplicationRequest
+ (*vtctldata.StopReplicationRequest)(nil), // 74: vtctldata.StopReplicationRequest
+ (*vtctldata.TabletExternallyReparentedRequest)(nil), // 75: vtctldata.TabletExternallyReparentedRequest
+ (*vtctldata.UpdateCellInfoRequest)(nil), // 76: vtctldata.UpdateCellInfoRequest
+ (*vtctldata.UpdateCellsAliasRequest)(nil), // 77: vtctldata.UpdateCellsAliasRequest
+ (*vtctldata.ValidateRequest)(nil), // 78: vtctldata.ValidateRequest
+ (*vtctldata.ValidateKeyspaceRequest)(nil), // 79: vtctldata.ValidateKeyspaceRequest
+ (*vtctldata.ValidateSchemaKeyspaceRequest)(nil), // 80: vtctldata.ValidateSchemaKeyspaceRequest
+ (*vtctldata.ValidateShardRequest)(nil), // 81: vtctldata.ValidateShardRequest
+ (*vtctldata.ValidateVersionKeyspaceRequest)(nil), // 82: vtctldata.ValidateVersionKeyspaceRequest
+ (*vtctldata.ValidateVersionShardRequest)(nil), // 83: vtctldata.ValidateVersionShardRequest
+ (*vtctldata.ValidateVSchemaRequest)(nil), // 84: vtctldata.ValidateVSchemaRequest
+ (*vtctldata.ExecuteVtctlCommandResponse)(nil), // 85: vtctldata.ExecuteVtctlCommandResponse
+ (*vtctldata.AddCellInfoResponse)(nil), // 86: vtctldata.AddCellInfoResponse
+ (*vtctldata.AddCellsAliasResponse)(nil), // 87: vtctldata.AddCellsAliasResponse
+ (*vtctldata.ApplyRoutingRulesResponse)(nil), // 88: vtctldata.ApplyRoutingRulesResponse
+ (*vtctldata.ApplySchemaResponse)(nil), // 89: vtctldata.ApplySchemaResponse
+ (*vtctldata.ApplyShardRoutingRulesResponse)(nil), // 90: vtctldata.ApplyShardRoutingRulesResponse
+ (*vtctldata.ApplyVSchemaResponse)(nil), // 91: vtctldata.ApplyVSchemaResponse
+ (*vtctldata.BackupResponse)(nil), // 92: vtctldata.BackupResponse
+ (*vtctldata.ChangeTabletTypeResponse)(nil), // 93: vtctldata.ChangeTabletTypeResponse
+ (*vtctldata.CreateKeyspaceResponse)(nil), // 94: vtctldata.CreateKeyspaceResponse
+ (*vtctldata.CreateShardResponse)(nil), // 95: vtctldata.CreateShardResponse
+ (*vtctldata.DeleteCellInfoResponse)(nil), // 96: vtctldata.DeleteCellInfoResponse
+ (*vtctldata.DeleteCellsAliasResponse)(nil), // 97: vtctldata.DeleteCellsAliasResponse
+ (*vtctldata.DeleteKeyspaceResponse)(nil), // 98: vtctldata.DeleteKeyspaceResponse
+ (*vtctldata.DeleteShardsResponse)(nil), // 99: vtctldata.DeleteShardsResponse
+ (*vtctldata.DeleteSrvVSchemaResponse)(nil), // 100: vtctldata.DeleteSrvVSchemaResponse
+ (*vtctldata.DeleteTabletsResponse)(nil), // 101: vtctldata.DeleteTabletsResponse
+ (*vtctldata.EmergencyReparentShardResponse)(nil), // 102: vtctldata.EmergencyReparentShardResponse
+ (*vtctldata.ExecuteFetchAsAppResponse)(nil), // 103: vtctldata.ExecuteFetchAsAppResponse
+ (*vtctldata.ExecuteFetchAsDBAResponse)(nil), // 104: vtctldata.ExecuteFetchAsDBAResponse
+ (*vtctldata.ExecuteHookResponse)(nil), // 105: vtctldata.ExecuteHookResponse
+ (*vtctldata.FindAllShardsInKeyspaceResponse)(nil), // 106: vtctldata.FindAllShardsInKeyspaceResponse
+ (*vtctldata.GetBackupsResponse)(nil), // 107: vtctldata.GetBackupsResponse
+ (*vtctldata.GetCellInfoResponse)(nil), // 108: vtctldata.GetCellInfoResponse
+ (*vtctldata.GetCellInfoNamesResponse)(nil), // 109: vtctldata.GetCellInfoNamesResponse
+ (*vtctldata.GetCellsAliasesResponse)(nil), // 110: vtctldata.GetCellsAliasesResponse
+ (*vtctldata.GetFullStatusResponse)(nil), // 111: vtctldata.GetFullStatusResponse
+ (*vtctldata.GetKeyspaceResponse)(nil), // 112: vtctldata.GetKeyspaceResponse
+ (*vtctldata.GetKeyspacesResponse)(nil), // 113: vtctldata.GetKeyspacesResponse
+ (*vtctldata.GetPermissionsResponse)(nil), // 114: vtctldata.GetPermissionsResponse
+ (*vtctldata.GetRoutingRulesResponse)(nil), // 115: vtctldata.GetRoutingRulesResponse
+ (*vtctldata.GetSchemaResponse)(nil), // 116: vtctldata.GetSchemaResponse
+ (*vtctldata.GetShardResponse)(nil), // 117: vtctldata.GetShardResponse
+ (*vtctldata.GetShardRoutingRulesResponse)(nil), // 118: vtctldata.GetShardRoutingRulesResponse
+ (*vtctldata.GetSrvKeyspaceNamesResponse)(nil), // 119: vtctldata.GetSrvKeyspaceNamesResponse
+ (*vtctldata.GetSrvKeyspacesResponse)(nil), // 120: vtctldata.GetSrvKeyspacesResponse
+ (*vtctldata.UpdateThrottlerConfigResponse)(nil), // 121: vtctldata.UpdateThrottlerConfigResponse
+ (*vtctldata.GetSrvVSchemaResponse)(nil), // 122: vtctldata.GetSrvVSchemaResponse
+ (*vtctldata.GetSrvVSchemasResponse)(nil), // 123: vtctldata.GetSrvVSchemasResponse
+ (*vtctldata.GetTabletResponse)(nil), // 124: vtctldata.GetTabletResponse
+ (*vtctldata.GetTabletsResponse)(nil), // 125: vtctldata.GetTabletsResponse
+ (*vtctldata.GetTopologyPathResponse)(nil), // 126: vtctldata.GetTopologyPathResponse
+ (*vtctldata.GetVersionResponse)(nil), // 127: vtctldata.GetVersionResponse
+ (*vtctldata.GetVSchemaResponse)(nil), // 128: vtctldata.GetVSchemaResponse
+ (*vtctldata.GetWorkflowsResponse)(nil), // 129: vtctldata.GetWorkflowsResponse
+ (*vtctldata.InitShardPrimaryResponse)(nil), // 130: vtctldata.InitShardPrimaryResponse
+ (*vtctldata.PingTabletResponse)(nil), // 131: vtctldata.PingTabletResponse
+ (*vtctldata.PlannedReparentShardResponse)(nil), // 132: vtctldata.PlannedReparentShardResponse
+ (*vtctldata.RebuildKeyspaceGraphResponse)(nil), // 133: vtctldata.RebuildKeyspaceGraphResponse
+ (*vtctldata.RebuildVSchemaGraphResponse)(nil), // 134: vtctldata.RebuildVSchemaGraphResponse
+ (*vtctldata.RefreshStateResponse)(nil), // 135: vtctldata.RefreshStateResponse
+ (*vtctldata.RefreshStateByShardResponse)(nil), // 136: vtctldata.RefreshStateByShardResponse
+ (*vtctldata.ReloadSchemaResponse)(nil), // 137: vtctldata.ReloadSchemaResponse
+ (*vtctldata.ReloadSchemaKeyspaceResponse)(nil), // 138: vtctldata.ReloadSchemaKeyspaceResponse
+ (*vtctldata.ReloadSchemaShardResponse)(nil), // 139: vtctldata.ReloadSchemaShardResponse
+ (*vtctldata.RemoveBackupResponse)(nil), // 140: vtctldata.RemoveBackupResponse
+ (*vtctldata.RemoveKeyspaceCellResponse)(nil), // 141: vtctldata.RemoveKeyspaceCellResponse
+ (*vtctldata.RemoveShardCellResponse)(nil), // 142: vtctldata.RemoveShardCellResponse
+ (*vtctldata.ReparentTabletResponse)(nil), // 143: vtctldata.ReparentTabletResponse
+ (*vtctldata.RestoreFromBackupResponse)(nil), // 144: vtctldata.RestoreFromBackupResponse
+ (*vtctldata.RunHealthCheckResponse)(nil), // 145: vtctldata.RunHealthCheckResponse
+ (*vtctldata.SetKeyspaceDurabilityPolicyResponse)(nil), // 146: vtctldata.SetKeyspaceDurabilityPolicyResponse
+ (*vtctldata.SetShardIsPrimaryServingResponse)(nil), // 147: vtctldata.SetShardIsPrimaryServingResponse
+ (*vtctldata.SetShardTabletControlResponse)(nil), // 148: vtctldata.SetShardTabletControlResponse
+ (*vtctldata.SetWritableResponse)(nil), // 149: vtctldata.SetWritableResponse
+ (*vtctldata.ShardReplicationAddResponse)(nil), // 150: vtctldata.ShardReplicationAddResponse
+ (*vtctldata.ShardReplicationFixResponse)(nil), // 151: vtctldata.ShardReplicationFixResponse
+ (*vtctldata.ShardReplicationPositionsResponse)(nil), // 152: vtctldata.ShardReplicationPositionsResponse
+ (*vtctldata.ShardReplicationRemoveResponse)(nil), // 153: vtctldata.ShardReplicationRemoveResponse
+ (*vtctldata.SleepTabletResponse)(nil), // 154: vtctldata.SleepTabletResponse
+ (*vtctldata.SourceShardAddResponse)(nil), // 155: vtctldata.SourceShardAddResponse
+ (*vtctldata.SourceShardDeleteResponse)(nil), // 156: vtctldata.SourceShardDeleteResponse
+ (*vtctldata.StartReplicationResponse)(nil), // 157: vtctldata.StartReplicationResponse
+ (*vtctldata.StopReplicationResponse)(nil), // 158: vtctldata.StopReplicationResponse
+ (*vtctldata.TabletExternallyReparentedResponse)(nil), // 159: vtctldata.TabletExternallyReparentedResponse
+ (*vtctldata.UpdateCellInfoResponse)(nil), // 160: vtctldata.UpdateCellInfoResponse
+ (*vtctldata.UpdateCellsAliasResponse)(nil), // 161: vtctldata.UpdateCellsAliasResponse
+ (*vtctldata.ValidateResponse)(nil), // 162: vtctldata.ValidateResponse
+ (*vtctldata.ValidateKeyspaceResponse)(nil), // 163: vtctldata.ValidateKeyspaceResponse
+ (*vtctldata.ValidateSchemaKeyspaceResponse)(nil), // 164: vtctldata.ValidateSchemaKeyspaceResponse
+ (*vtctldata.ValidateShardResponse)(nil), // 165: vtctldata.ValidateShardResponse
+ (*vtctldata.ValidateVersionKeyspaceResponse)(nil), // 166: vtctldata.ValidateVersionKeyspaceResponse
+ (*vtctldata.ValidateVersionShardResponse)(nil), // 167: vtctldata.ValidateVersionShardResponse
+ (*vtctldata.ValidateVSchemaResponse)(nil), // 168: vtctldata.ValidateVSchemaResponse
}
var file_vtctlservice_proto_depIdxs = []int32{
0, // 0: vtctlservice.Vtctl.ExecuteVtctlCommand:input_type -> vtctldata.ExecuteVtctlCommandRequest
@@ -730,135 +755,141 @@ var file_vtctlservice_proto_depIdxs = []int32{
34, // 34: vtctlservice.Vtctld.GetShardRoutingRules:input_type -> vtctldata.GetShardRoutingRulesRequest
35, // 35: vtctlservice.Vtctld.GetSrvKeyspaceNames:input_type -> vtctldata.GetSrvKeyspaceNamesRequest
36, // 36: vtctlservice.Vtctld.GetSrvKeyspaces:input_type -> vtctldata.GetSrvKeyspacesRequest
- 37, // 37: vtctlservice.Vtctld.GetSrvVSchema:input_type -> vtctldata.GetSrvVSchemaRequest
- 38, // 38: vtctlservice.Vtctld.GetSrvVSchemas:input_type -> vtctldata.GetSrvVSchemasRequest
- 39, // 39: vtctlservice.Vtctld.GetTablet:input_type -> vtctldata.GetTabletRequest
- 40, // 40: vtctlservice.Vtctld.GetTablets:input_type -> vtctldata.GetTabletsRequest
- 41, // 41: vtctlservice.Vtctld.GetVersion:input_type -> vtctldata.GetVersionRequest
- 42, // 42: vtctlservice.Vtctld.GetVSchema:input_type -> vtctldata.GetVSchemaRequest
- 43, // 43: vtctlservice.Vtctld.GetWorkflows:input_type -> vtctldata.GetWorkflowsRequest
- 44, // 44: vtctlservice.Vtctld.InitShardPrimary:input_type -> vtctldata.InitShardPrimaryRequest
- 45, // 45: vtctlservice.Vtctld.PingTablet:input_type -> vtctldata.PingTabletRequest
- 46, // 46: vtctlservice.Vtctld.PlannedReparentShard:input_type -> vtctldata.PlannedReparentShardRequest
- 47, // 47: vtctlservice.Vtctld.RebuildKeyspaceGraph:input_type -> vtctldata.RebuildKeyspaceGraphRequest
- 48, // 48: vtctlservice.Vtctld.RebuildVSchemaGraph:input_type -> vtctldata.RebuildVSchemaGraphRequest
- 49, // 49: vtctlservice.Vtctld.RefreshState:input_type -> vtctldata.RefreshStateRequest
- 50, // 50: vtctlservice.Vtctld.RefreshStateByShard:input_type -> vtctldata.RefreshStateByShardRequest
- 51, // 51: vtctlservice.Vtctld.ReloadSchema:input_type -> vtctldata.ReloadSchemaRequest
- 52, // 52: vtctlservice.Vtctld.ReloadSchemaKeyspace:input_type -> vtctldata.ReloadSchemaKeyspaceRequest
- 53, // 53: vtctlservice.Vtctld.ReloadSchemaShard:input_type -> vtctldata.ReloadSchemaShardRequest
- 54, // 54: vtctlservice.Vtctld.RemoveBackup:input_type -> vtctldata.RemoveBackupRequest
- 55, // 55: vtctlservice.Vtctld.RemoveKeyspaceCell:input_type -> vtctldata.RemoveKeyspaceCellRequest
- 56, // 56: vtctlservice.Vtctld.RemoveShardCell:input_type -> vtctldata.RemoveShardCellRequest
- 57, // 57: vtctlservice.Vtctld.ReparentTablet:input_type -> vtctldata.ReparentTabletRequest
- 58, // 58: vtctlservice.Vtctld.RestoreFromBackup:input_type -> vtctldata.RestoreFromBackupRequest
- 59, // 59: vtctlservice.Vtctld.RunHealthCheck:input_type -> vtctldata.RunHealthCheckRequest
- 60, // 60: vtctlservice.Vtctld.SetKeyspaceDurabilityPolicy:input_type -> vtctldata.SetKeyspaceDurabilityPolicyRequest
- 61, // 61: vtctlservice.Vtctld.SetShardIsPrimaryServing:input_type -> vtctldata.SetShardIsPrimaryServingRequest
- 62, // 62: vtctlservice.Vtctld.SetShardTabletControl:input_type -> vtctldata.SetShardTabletControlRequest
- 63, // 63: vtctlservice.Vtctld.SetWritable:input_type -> vtctldata.SetWritableRequest
- 64, // 64: vtctlservice.Vtctld.ShardReplicationAdd:input_type -> vtctldata.ShardReplicationAddRequest
- 65, // 65: vtctlservice.Vtctld.ShardReplicationFix:input_type -> vtctldata.ShardReplicationFixRequest
- 66, // 66: vtctlservice.Vtctld.ShardReplicationPositions:input_type -> vtctldata.ShardReplicationPositionsRequest
- 67, // 67: vtctlservice.Vtctld.ShardReplicationRemove:input_type -> vtctldata.ShardReplicationRemoveRequest
- 68, // 68: vtctlservice.Vtctld.SleepTablet:input_type -> vtctldata.SleepTabletRequest
- 69, // 69: vtctlservice.Vtctld.SourceShardAdd:input_type -> vtctldata.SourceShardAddRequest
- 70, // 70: vtctlservice.Vtctld.SourceShardDelete:input_type -> vtctldata.SourceShardDeleteRequest
- 71, // 71: vtctlservice.Vtctld.StartReplication:input_type -> vtctldata.StartReplicationRequest
- 72, // 72: vtctlservice.Vtctld.StopReplication:input_type -> vtctldata.StopReplicationRequest
- 73, // 73: vtctlservice.Vtctld.TabletExternallyReparented:input_type -> vtctldata.TabletExternallyReparentedRequest
- 74, // 74: vtctlservice.Vtctld.UpdateCellInfo:input_type -> vtctldata.UpdateCellInfoRequest
- 75, // 75: vtctlservice.Vtctld.UpdateCellsAlias:input_type -> vtctldata.UpdateCellsAliasRequest
- 76, // 76: vtctlservice.Vtctld.Validate:input_type -> vtctldata.ValidateRequest
- 77, // 77: vtctlservice.Vtctld.ValidateKeyspace:input_type -> vtctldata.ValidateKeyspaceRequest
- 78, // 78: vtctlservice.Vtctld.ValidateSchemaKeyspace:input_type -> vtctldata.ValidateSchemaKeyspaceRequest
- 79, // 79: vtctlservice.Vtctld.ValidateShard:input_type -> vtctldata.ValidateShardRequest
- 80, // 80: vtctlservice.Vtctld.ValidateVersionKeyspace:input_type -> vtctldata.ValidateVersionKeyspaceRequest
- 81, // 81: vtctlservice.Vtctld.ValidateVSchema:input_type -> vtctldata.ValidateVSchemaRequest
- 82, // 82: vtctlservice.Vtctl.ExecuteVtctlCommand:output_type -> vtctldata.ExecuteVtctlCommandResponse
- 83, // 83: vtctlservice.Vtctld.AddCellInfo:output_type -> vtctldata.AddCellInfoResponse
- 84, // 84: vtctlservice.Vtctld.AddCellsAlias:output_type -> vtctldata.AddCellsAliasResponse
- 85, // 85: vtctlservice.Vtctld.ApplyRoutingRules:output_type -> vtctldata.ApplyRoutingRulesResponse
- 86, // 86: vtctlservice.Vtctld.ApplySchema:output_type -> vtctldata.ApplySchemaResponse
- 87, // 87: vtctlservice.Vtctld.ApplyShardRoutingRules:output_type -> vtctldata.ApplyShardRoutingRulesResponse
- 88, // 88: vtctlservice.Vtctld.ApplyVSchema:output_type -> vtctldata.ApplyVSchemaResponse
- 89, // 89: vtctlservice.Vtctld.Backup:output_type -> vtctldata.BackupResponse
- 89, // 90: vtctlservice.Vtctld.BackupShard:output_type -> vtctldata.BackupResponse
- 90, // 91: vtctlservice.Vtctld.ChangeTabletType:output_type -> vtctldata.ChangeTabletTypeResponse
- 91, // 92: vtctlservice.Vtctld.CreateKeyspace:output_type -> vtctldata.CreateKeyspaceResponse
- 92, // 93: vtctlservice.Vtctld.CreateShard:output_type -> vtctldata.CreateShardResponse
- 93, // 94: vtctlservice.Vtctld.DeleteCellInfo:output_type -> vtctldata.DeleteCellInfoResponse
- 94, // 95: vtctlservice.Vtctld.DeleteCellsAlias:output_type -> vtctldata.DeleteCellsAliasResponse
- 95, // 96: vtctlservice.Vtctld.DeleteKeyspace:output_type -> vtctldata.DeleteKeyspaceResponse
- 96, // 97: vtctlservice.Vtctld.DeleteShards:output_type -> vtctldata.DeleteShardsResponse
- 97, // 98: vtctlservice.Vtctld.DeleteSrvVSchema:output_type -> vtctldata.DeleteSrvVSchemaResponse
- 98, // 99: vtctlservice.Vtctld.DeleteTablets:output_type -> vtctldata.DeleteTabletsResponse
- 99, // 100: vtctlservice.Vtctld.EmergencyReparentShard:output_type -> vtctldata.EmergencyReparentShardResponse
- 100, // 101: vtctlservice.Vtctld.ExecuteFetchAsApp:output_type -> vtctldata.ExecuteFetchAsAppResponse
- 101, // 102: vtctlservice.Vtctld.ExecuteFetchAsDBA:output_type -> vtctldata.ExecuteFetchAsDBAResponse
- 102, // 103: vtctlservice.Vtctld.ExecuteHook:output_type -> vtctldata.ExecuteHookResponse
- 103, // 104: vtctlservice.Vtctld.FindAllShardsInKeyspace:output_type -> vtctldata.FindAllShardsInKeyspaceResponse
- 104, // 105: vtctlservice.Vtctld.GetBackups:output_type -> vtctldata.GetBackupsResponse
- 105, // 106: vtctlservice.Vtctld.GetCellInfo:output_type -> vtctldata.GetCellInfoResponse
- 106, // 107: vtctlservice.Vtctld.GetCellInfoNames:output_type -> vtctldata.GetCellInfoNamesResponse
- 107, // 108: vtctlservice.Vtctld.GetCellsAliases:output_type -> vtctldata.GetCellsAliasesResponse
- 108, // 109: vtctlservice.Vtctld.GetFullStatus:output_type -> vtctldata.GetFullStatusResponse
- 109, // 110: vtctlservice.Vtctld.GetKeyspace:output_type -> vtctldata.GetKeyspaceResponse
- 110, // 111: vtctlservice.Vtctld.GetKeyspaces:output_type -> vtctldata.GetKeyspacesResponse
- 111, // 112: vtctlservice.Vtctld.GetPermissions:output_type -> vtctldata.GetPermissionsResponse
- 112, // 113: vtctlservice.Vtctld.GetRoutingRules:output_type -> vtctldata.GetRoutingRulesResponse
- 113, // 114: vtctlservice.Vtctld.GetSchema:output_type -> vtctldata.GetSchemaResponse
- 114, // 115: vtctlservice.Vtctld.GetShard:output_type -> vtctldata.GetShardResponse
- 115, // 116: vtctlservice.Vtctld.GetShardRoutingRules:output_type -> vtctldata.GetShardRoutingRulesResponse
- 116, // 117: vtctlservice.Vtctld.GetSrvKeyspaceNames:output_type -> vtctldata.GetSrvKeyspaceNamesResponse
- 117, // 118: vtctlservice.Vtctld.GetSrvKeyspaces:output_type -> vtctldata.GetSrvKeyspacesResponse
- 118, // 119: vtctlservice.Vtctld.GetSrvVSchema:output_type -> vtctldata.GetSrvVSchemaResponse
- 119, // 120: vtctlservice.Vtctld.GetSrvVSchemas:output_type -> vtctldata.GetSrvVSchemasResponse
- 120, // 121: vtctlservice.Vtctld.GetTablet:output_type -> vtctldata.GetTabletResponse
- 121, // 122: vtctlservice.Vtctld.GetTablets:output_type -> vtctldata.GetTabletsResponse
- 122, // 123: vtctlservice.Vtctld.GetVersion:output_type -> vtctldata.GetVersionResponse
- 123, // 124: vtctlservice.Vtctld.GetVSchema:output_type -> vtctldata.GetVSchemaResponse
- 124, // 125: vtctlservice.Vtctld.GetWorkflows:output_type -> vtctldata.GetWorkflowsResponse
- 125, // 126: vtctlservice.Vtctld.InitShardPrimary:output_type -> vtctldata.InitShardPrimaryResponse
- 126, // 127: vtctlservice.Vtctld.PingTablet:output_type -> vtctldata.PingTabletResponse
- 127, // 128: vtctlservice.Vtctld.PlannedReparentShard:output_type -> vtctldata.PlannedReparentShardResponse
- 128, // 129: vtctlservice.Vtctld.RebuildKeyspaceGraph:output_type -> vtctldata.RebuildKeyspaceGraphResponse
- 129, // 130: vtctlservice.Vtctld.RebuildVSchemaGraph:output_type -> vtctldata.RebuildVSchemaGraphResponse
- 130, // 131: vtctlservice.Vtctld.RefreshState:output_type -> vtctldata.RefreshStateResponse
- 131, // 132: vtctlservice.Vtctld.RefreshStateByShard:output_type -> vtctldata.RefreshStateByShardResponse
- 132, // 133: vtctlservice.Vtctld.ReloadSchema:output_type -> vtctldata.ReloadSchemaResponse
- 133, // 134: vtctlservice.Vtctld.ReloadSchemaKeyspace:output_type -> vtctldata.ReloadSchemaKeyspaceResponse
- 134, // 135: vtctlservice.Vtctld.ReloadSchemaShard:output_type -> vtctldata.ReloadSchemaShardResponse
- 135, // 136: vtctlservice.Vtctld.RemoveBackup:output_type -> vtctldata.RemoveBackupResponse
- 136, // 137: vtctlservice.Vtctld.RemoveKeyspaceCell:output_type -> vtctldata.RemoveKeyspaceCellResponse
- 137, // 138: vtctlservice.Vtctld.RemoveShardCell:output_type -> vtctldata.RemoveShardCellResponse
- 138, // 139: vtctlservice.Vtctld.ReparentTablet:output_type -> vtctldata.ReparentTabletResponse
- 139, // 140: vtctlservice.Vtctld.RestoreFromBackup:output_type -> vtctldata.RestoreFromBackupResponse
- 140, // 141: vtctlservice.Vtctld.RunHealthCheck:output_type -> vtctldata.RunHealthCheckResponse
- 141, // 142: vtctlservice.Vtctld.SetKeyspaceDurabilityPolicy:output_type -> vtctldata.SetKeyspaceDurabilityPolicyResponse
- 142, // 143: vtctlservice.Vtctld.SetShardIsPrimaryServing:output_type -> vtctldata.SetShardIsPrimaryServingResponse
- 143, // 144: vtctlservice.Vtctld.SetShardTabletControl:output_type -> vtctldata.SetShardTabletControlResponse
- 144, // 145: vtctlservice.Vtctld.SetWritable:output_type -> vtctldata.SetWritableResponse
- 145, // 146: vtctlservice.Vtctld.ShardReplicationAdd:output_type -> vtctldata.ShardReplicationAddResponse
- 146, // 147: vtctlservice.Vtctld.ShardReplicationFix:output_type -> vtctldata.ShardReplicationFixResponse
- 147, // 148: vtctlservice.Vtctld.ShardReplicationPositions:output_type -> vtctldata.ShardReplicationPositionsResponse
- 148, // 149: vtctlservice.Vtctld.ShardReplicationRemove:output_type -> vtctldata.ShardReplicationRemoveResponse
- 149, // 150: vtctlservice.Vtctld.SleepTablet:output_type -> vtctldata.SleepTabletResponse
- 150, // 151: vtctlservice.Vtctld.SourceShardAdd:output_type -> vtctldata.SourceShardAddResponse
- 151, // 152: vtctlservice.Vtctld.SourceShardDelete:output_type -> vtctldata.SourceShardDeleteResponse
- 152, // 153: vtctlservice.Vtctld.StartReplication:output_type -> vtctldata.StartReplicationResponse
- 153, // 154: vtctlservice.Vtctld.StopReplication:output_type -> vtctldata.StopReplicationResponse
- 154, // 155: vtctlservice.Vtctld.TabletExternallyReparented:output_type -> vtctldata.TabletExternallyReparentedResponse
- 155, // 156: vtctlservice.Vtctld.UpdateCellInfo:output_type -> vtctldata.UpdateCellInfoResponse
- 156, // 157: vtctlservice.Vtctld.UpdateCellsAlias:output_type -> vtctldata.UpdateCellsAliasResponse
- 157, // 158: vtctlservice.Vtctld.Validate:output_type -> vtctldata.ValidateResponse
- 158, // 159: vtctlservice.Vtctld.ValidateKeyspace:output_type -> vtctldata.ValidateKeyspaceResponse
- 159, // 160: vtctlservice.Vtctld.ValidateSchemaKeyspace:output_type -> vtctldata.ValidateSchemaKeyspaceResponse
- 160, // 161: vtctlservice.Vtctld.ValidateShard:output_type -> vtctldata.ValidateShardResponse
- 161, // 162: vtctlservice.Vtctld.ValidateVersionKeyspace:output_type -> vtctldata.ValidateVersionKeyspaceResponse
- 162, // 163: vtctlservice.Vtctld.ValidateVSchema:output_type -> vtctldata.ValidateVSchemaResponse
- 82, // [82:164] is the sub-list for method output_type
- 0, // [0:82] is the sub-list for method input_type
+ 37, // 37: vtctlservice.Vtctld.UpdateThrottlerConfig:input_type -> vtctldata.UpdateThrottlerConfigRequest
+ 38, // 38: vtctlservice.Vtctld.GetSrvVSchema:input_type -> vtctldata.GetSrvVSchemaRequest
+ 39, // 39: vtctlservice.Vtctld.GetSrvVSchemas:input_type -> vtctldata.GetSrvVSchemasRequest
+ 40, // 40: vtctlservice.Vtctld.GetTablet:input_type -> vtctldata.GetTabletRequest
+ 41, // 41: vtctlservice.Vtctld.GetTablets:input_type -> vtctldata.GetTabletsRequest
+ 42, // 42: vtctlservice.Vtctld.GetTopologyPath:input_type -> vtctldata.GetTopologyPathRequest
+ 43, // 43: vtctlservice.Vtctld.GetVersion:input_type -> vtctldata.GetVersionRequest
+ 44, // 44: vtctlservice.Vtctld.GetVSchema:input_type -> vtctldata.GetVSchemaRequest
+ 45, // 45: vtctlservice.Vtctld.GetWorkflows:input_type -> vtctldata.GetWorkflowsRequest
+ 46, // 46: vtctlservice.Vtctld.InitShardPrimary:input_type -> vtctldata.InitShardPrimaryRequest
+ 47, // 47: vtctlservice.Vtctld.PingTablet:input_type -> vtctldata.PingTabletRequest
+ 48, // 48: vtctlservice.Vtctld.PlannedReparentShard:input_type -> vtctldata.PlannedReparentShardRequest
+ 49, // 49: vtctlservice.Vtctld.RebuildKeyspaceGraph:input_type -> vtctldata.RebuildKeyspaceGraphRequest
+ 50, // 50: vtctlservice.Vtctld.RebuildVSchemaGraph:input_type -> vtctldata.RebuildVSchemaGraphRequest
+ 51, // 51: vtctlservice.Vtctld.RefreshState:input_type -> vtctldata.RefreshStateRequest
+ 52, // 52: vtctlservice.Vtctld.RefreshStateByShard:input_type -> vtctldata.RefreshStateByShardRequest
+ 53, // 53: vtctlservice.Vtctld.ReloadSchema:input_type -> vtctldata.ReloadSchemaRequest
+ 54, // 54: vtctlservice.Vtctld.ReloadSchemaKeyspace:input_type -> vtctldata.ReloadSchemaKeyspaceRequest
+ 55, // 55: vtctlservice.Vtctld.ReloadSchemaShard:input_type -> vtctldata.ReloadSchemaShardRequest
+ 56, // 56: vtctlservice.Vtctld.RemoveBackup:input_type -> vtctldata.RemoveBackupRequest
+ 57, // 57: vtctlservice.Vtctld.RemoveKeyspaceCell:input_type -> vtctldata.RemoveKeyspaceCellRequest
+ 58, // 58: vtctlservice.Vtctld.RemoveShardCell:input_type -> vtctldata.RemoveShardCellRequest
+ 59, // 59: vtctlservice.Vtctld.ReparentTablet:input_type -> vtctldata.ReparentTabletRequest
+ 60, // 60: vtctlservice.Vtctld.RestoreFromBackup:input_type -> vtctldata.RestoreFromBackupRequest
+ 61, // 61: vtctlservice.Vtctld.RunHealthCheck:input_type -> vtctldata.RunHealthCheckRequest
+ 62, // 62: vtctlservice.Vtctld.SetKeyspaceDurabilityPolicy:input_type -> vtctldata.SetKeyspaceDurabilityPolicyRequest
+ 63, // 63: vtctlservice.Vtctld.SetShardIsPrimaryServing:input_type -> vtctldata.SetShardIsPrimaryServingRequest
+ 64, // 64: vtctlservice.Vtctld.SetShardTabletControl:input_type -> vtctldata.SetShardTabletControlRequest
+ 65, // 65: vtctlservice.Vtctld.SetWritable:input_type -> vtctldata.SetWritableRequest
+ 66, // 66: vtctlservice.Vtctld.ShardReplicationAdd:input_type -> vtctldata.ShardReplicationAddRequest
+ 67, // 67: vtctlservice.Vtctld.ShardReplicationFix:input_type -> vtctldata.ShardReplicationFixRequest
+ 68, // 68: vtctlservice.Vtctld.ShardReplicationPositions:input_type -> vtctldata.ShardReplicationPositionsRequest
+ 69, // 69: vtctlservice.Vtctld.ShardReplicationRemove:input_type -> vtctldata.ShardReplicationRemoveRequest
+ 70, // 70: vtctlservice.Vtctld.SleepTablet:input_type -> vtctldata.SleepTabletRequest
+ 71, // 71: vtctlservice.Vtctld.SourceShardAdd:input_type -> vtctldata.SourceShardAddRequest
+ 72, // 72: vtctlservice.Vtctld.SourceShardDelete:input_type -> vtctldata.SourceShardDeleteRequest
+ 73, // 73: vtctlservice.Vtctld.StartReplication:input_type -> vtctldata.StartReplicationRequest
+ 74, // 74: vtctlservice.Vtctld.StopReplication:input_type -> vtctldata.StopReplicationRequest
+ 75, // 75: vtctlservice.Vtctld.TabletExternallyReparented:input_type -> vtctldata.TabletExternallyReparentedRequest
+ 76, // 76: vtctlservice.Vtctld.UpdateCellInfo:input_type -> vtctldata.UpdateCellInfoRequest
+ 77, // 77: vtctlservice.Vtctld.UpdateCellsAlias:input_type -> vtctldata.UpdateCellsAliasRequest
+ 78, // 78: vtctlservice.Vtctld.Validate:input_type -> vtctldata.ValidateRequest
+ 79, // 79: vtctlservice.Vtctld.ValidateKeyspace:input_type -> vtctldata.ValidateKeyspaceRequest
+ 80, // 80: vtctlservice.Vtctld.ValidateSchemaKeyspace:input_type -> vtctldata.ValidateSchemaKeyspaceRequest
+ 81, // 81: vtctlservice.Vtctld.ValidateShard:input_type -> vtctldata.ValidateShardRequest
+ 82, // 82: vtctlservice.Vtctld.ValidateVersionKeyspace:input_type -> vtctldata.ValidateVersionKeyspaceRequest
+ 83, // 83: vtctlservice.Vtctld.ValidateVersionShard:input_type -> vtctldata.ValidateVersionShardRequest
+ 84, // 84: vtctlservice.Vtctld.ValidateVSchema:input_type -> vtctldata.ValidateVSchemaRequest
+ 85, // 85: vtctlservice.Vtctl.ExecuteVtctlCommand:output_type -> vtctldata.ExecuteVtctlCommandResponse
+ 86, // 86: vtctlservice.Vtctld.AddCellInfo:output_type -> vtctldata.AddCellInfoResponse
+ 87, // 87: vtctlservice.Vtctld.AddCellsAlias:output_type -> vtctldata.AddCellsAliasResponse
+ 88, // 88: vtctlservice.Vtctld.ApplyRoutingRules:output_type -> vtctldata.ApplyRoutingRulesResponse
+ 89, // 89: vtctlservice.Vtctld.ApplySchema:output_type -> vtctldata.ApplySchemaResponse
+ 90, // 90: vtctlservice.Vtctld.ApplyShardRoutingRules:output_type -> vtctldata.ApplyShardRoutingRulesResponse
+ 91, // 91: vtctlservice.Vtctld.ApplyVSchema:output_type -> vtctldata.ApplyVSchemaResponse
+ 92, // 92: vtctlservice.Vtctld.Backup:output_type -> vtctldata.BackupResponse
+ 92, // 93: vtctlservice.Vtctld.BackupShard:output_type -> vtctldata.BackupResponse
+ 93, // 94: vtctlservice.Vtctld.ChangeTabletType:output_type -> vtctldata.ChangeTabletTypeResponse
+ 94, // 95: vtctlservice.Vtctld.CreateKeyspace:output_type -> vtctldata.CreateKeyspaceResponse
+ 95, // 96: vtctlservice.Vtctld.CreateShard:output_type -> vtctldata.CreateShardResponse
+ 96, // 97: vtctlservice.Vtctld.DeleteCellInfo:output_type -> vtctldata.DeleteCellInfoResponse
+ 97, // 98: vtctlservice.Vtctld.DeleteCellsAlias:output_type -> vtctldata.DeleteCellsAliasResponse
+ 98, // 99: vtctlservice.Vtctld.DeleteKeyspace:output_type -> vtctldata.DeleteKeyspaceResponse
+ 99, // 100: vtctlservice.Vtctld.DeleteShards:output_type -> vtctldata.DeleteShardsResponse
+ 100, // 101: vtctlservice.Vtctld.DeleteSrvVSchema:output_type -> vtctldata.DeleteSrvVSchemaResponse
+ 101, // 102: vtctlservice.Vtctld.DeleteTablets:output_type -> vtctldata.DeleteTabletsResponse
+ 102, // 103: vtctlservice.Vtctld.EmergencyReparentShard:output_type -> vtctldata.EmergencyReparentShardResponse
+ 103, // 104: vtctlservice.Vtctld.ExecuteFetchAsApp:output_type -> vtctldata.ExecuteFetchAsAppResponse
+ 104, // 105: vtctlservice.Vtctld.ExecuteFetchAsDBA:output_type -> vtctldata.ExecuteFetchAsDBAResponse
+ 105, // 106: vtctlservice.Vtctld.ExecuteHook:output_type -> vtctldata.ExecuteHookResponse
+ 106, // 107: vtctlservice.Vtctld.FindAllShardsInKeyspace:output_type -> vtctldata.FindAllShardsInKeyspaceResponse
+ 107, // 108: vtctlservice.Vtctld.GetBackups:output_type -> vtctldata.GetBackupsResponse
+ 108, // 109: vtctlservice.Vtctld.GetCellInfo:output_type -> vtctldata.GetCellInfoResponse
+ 109, // 110: vtctlservice.Vtctld.GetCellInfoNames:output_type -> vtctldata.GetCellInfoNamesResponse
+ 110, // 111: vtctlservice.Vtctld.GetCellsAliases:output_type -> vtctldata.GetCellsAliasesResponse
+ 111, // 112: vtctlservice.Vtctld.GetFullStatus:output_type -> vtctldata.GetFullStatusResponse
+ 112, // 113: vtctlservice.Vtctld.GetKeyspace:output_type -> vtctldata.GetKeyspaceResponse
+ 113, // 114: vtctlservice.Vtctld.GetKeyspaces:output_type -> vtctldata.GetKeyspacesResponse
+ 114, // 115: vtctlservice.Vtctld.GetPermissions:output_type -> vtctldata.GetPermissionsResponse
+ 115, // 116: vtctlservice.Vtctld.GetRoutingRules:output_type -> vtctldata.GetRoutingRulesResponse
+ 116, // 117: vtctlservice.Vtctld.GetSchema:output_type -> vtctldata.GetSchemaResponse
+ 117, // 118: vtctlservice.Vtctld.GetShard:output_type -> vtctldata.GetShardResponse
+ 118, // 119: vtctlservice.Vtctld.GetShardRoutingRules:output_type -> vtctldata.GetShardRoutingRulesResponse
+ 119, // 120: vtctlservice.Vtctld.GetSrvKeyspaceNames:output_type -> vtctldata.GetSrvKeyspaceNamesResponse
+ 120, // 121: vtctlservice.Vtctld.GetSrvKeyspaces:output_type -> vtctldata.GetSrvKeyspacesResponse
+ 121, // 122: vtctlservice.Vtctld.UpdateThrottlerConfig:output_type -> vtctldata.UpdateThrottlerConfigResponse
+ 122, // 123: vtctlservice.Vtctld.GetSrvVSchema:output_type -> vtctldata.GetSrvVSchemaResponse
+ 123, // 124: vtctlservice.Vtctld.GetSrvVSchemas:output_type -> vtctldata.GetSrvVSchemasResponse
+ 124, // 125: vtctlservice.Vtctld.GetTablet:output_type -> vtctldata.GetTabletResponse
+ 125, // 126: vtctlservice.Vtctld.GetTablets:output_type -> vtctldata.GetTabletsResponse
+ 126, // 127: vtctlservice.Vtctld.GetTopologyPath:output_type -> vtctldata.GetTopologyPathResponse
+ 127, // 128: vtctlservice.Vtctld.GetVersion:output_type -> vtctldata.GetVersionResponse
+ 128, // 129: vtctlservice.Vtctld.GetVSchema:output_type -> vtctldata.GetVSchemaResponse
+ 129, // 130: vtctlservice.Vtctld.GetWorkflows:output_type -> vtctldata.GetWorkflowsResponse
+ 130, // 131: vtctlservice.Vtctld.InitShardPrimary:output_type -> vtctldata.InitShardPrimaryResponse
+ 131, // 132: vtctlservice.Vtctld.PingTablet:output_type -> vtctldata.PingTabletResponse
+ 132, // 133: vtctlservice.Vtctld.PlannedReparentShard:output_type -> vtctldata.PlannedReparentShardResponse
+ 133, // 134: vtctlservice.Vtctld.RebuildKeyspaceGraph:output_type -> vtctldata.RebuildKeyspaceGraphResponse
+ 134, // 135: vtctlservice.Vtctld.RebuildVSchemaGraph:output_type -> vtctldata.RebuildVSchemaGraphResponse
+ 135, // 136: vtctlservice.Vtctld.RefreshState:output_type -> vtctldata.RefreshStateResponse
+ 136, // 137: vtctlservice.Vtctld.RefreshStateByShard:output_type -> vtctldata.RefreshStateByShardResponse
+ 137, // 138: vtctlservice.Vtctld.ReloadSchema:output_type -> vtctldata.ReloadSchemaResponse
+ 138, // 139: vtctlservice.Vtctld.ReloadSchemaKeyspace:output_type -> vtctldata.ReloadSchemaKeyspaceResponse
+ 139, // 140: vtctlservice.Vtctld.ReloadSchemaShard:output_type -> vtctldata.ReloadSchemaShardResponse
+ 140, // 141: vtctlservice.Vtctld.RemoveBackup:output_type -> vtctldata.RemoveBackupResponse
+ 141, // 142: vtctlservice.Vtctld.RemoveKeyspaceCell:output_type -> vtctldata.RemoveKeyspaceCellResponse
+ 142, // 143: vtctlservice.Vtctld.RemoveShardCell:output_type -> vtctldata.RemoveShardCellResponse
+ 143, // 144: vtctlservice.Vtctld.ReparentTablet:output_type -> vtctldata.ReparentTabletResponse
+ 144, // 145: vtctlservice.Vtctld.RestoreFromBackup:output_type -> vtctldata.RestoreFromBackupResponse
+ 145, // 146: vtctlservice.Vtctld.RunHealthCheck:output_type -> vtctldata.RunHealthCheckResponse
+ 146, // 147: vtctlservice.Vtctld.SetKeyspaceDurabilityPolicy:output_type -> vtctldata.SetKeyspaceDurabilityPolicyResponse
+ 147, // 148: vtctlservice.Vtctld.SetShardIsPrimaryServing:output_type -> vtctldata.SetShardIsPrimaryServingResponse
+ 148, // 149: vtctlservice.Vtctld.SetShardTabletControl:output_type -> vtctldata.SetShardTabletControlResponse
+ 149, // 150: vtctlservice.Vtctld.SetWritable:output_type -> vtctldata.SetWritableResponse
+ 150, // 151: vtctlservice.Vtctld.ShardReplicationAdd:output_type -> vtctldata.ShardReplicationAddResponse
+ 151, // 152: vtctlservice.Vtctld.ShardReplicationFix:output_type -> vtctldata.ShardReplicationFixResponse
+ 152, // 153: vtctlservice.Vtctld.ShardReplicationPositions:output_type -> vtctldata.ShardReplicationPositionsResponse
+ 153, // 154: vtctlservice.Vtctld.ShardReplicationRemove:output_type -> vtctldata.ShardReplicationRemoveResponse
+ 154, // 155: vtctlservice.Vtctld.SleepTablet:output_type -> vtctldata.SleepTabletResponse
+ 155, // 156: vtctlservice.Vtctld.SourceShardAdd:output_type -> vtctldata.SourceShardAddResponse
+ 156, // 157: vtctlservice.Vtctld.SourceShardDelete:output_type -> vtctldata.SourceShardDeleteResponse
+ 157, // 158: vtctlservice.Vtctld.StartReplication:output_type -> vtctldata.StartReplicationResponse
+ 158, // 159: vtctlservice.Vtctld.StopReplication:output_type -> vtctldata.StopReplicationResponse
+ 159, // 160: vtctlservice.Vtctld.TabletExternallyReparented:output_type -> vtctldata.TabletExternallyReparentedResponse
+ 160, // 161: vtctlservice.Vtctld.UpdateCellInfo:output_type -> vtctldata.UpdateCellInfoResponse
+ 161, // 162: vtctlservice.Vtctld.UpdateCellsAlias:output_type -> vtctldata.UpdateCellsAliasResponse
+ 162, // 163: vtctlservice.Vtctld.Validate:output_type -> vtctldata.ValidateResponse
+ 163, // 164: vtctlservice.Vtctld.ValidateKeyspace:output_type -> vtctldata.ValidateKeyspaceResponse
+ 164, // 165: vtctlservice.Vtctld.ValidateSchemaKeyspace:output_type -> vtctldata.ValidateSchemaKeyspaceResponse
+ 165, // 166: vtctlservice.Vtctld.ValidateShard:output_type -> vtctldata.ValidateShardResponse
+ 166, // 167: vtctlservice.Vtctld.ValidateVersionKeyspace:output_type -> vtctldata.ValidateVersionKeyspaceResponse
+ 167, // 168: vtctlservice.Vtctld.ValidateVersionShard:output_type -> vtctldata.ValidateVersionShardResponse
+ 168, // 169: vtctlservice.Vtctld.ValidateVSchema:output_type -> vtctldata.ValidateVSchemaResponse
+ 85, // [85:170] is the sub-list for method output_type
+ 0, // [0:85] is the sub-list for method input_type
0, // [0:0] is the sub-list for extension type_name
0, // [0:0] is the sub-list for extension extendee
0, // [0:0] is the sub-list for field type_name
diff --git a/go/vt/proto/vtctlservice/vtctlservice_grpc.pb.go b/go/vt/proto/vtctlservice/vtctlservice_grpc.pb.go
index 8173b8c5ed0..c97a10edd16 100644
--- a/go/vt/proto/vtctlservice/vtctlservice_grpc.pb.go
+++ b/go/vt/proto/vtctlservice/vtctlservice_grpc.pb.go
@@ -235,6 +235,8 @@ type VtctldClient interface {
// GetSrvKeyspaces returns the SrvKeyspaces for a keyspace in one or more
// cells.
GetSrvKeyspaces(ctx context.Context, in *vtctldata.GetSrvKeyspacesRequest, opts ...grpc.CallOption) (*vtctldata.GetSrvKeyspacesResponse, error)
+ // UpdateThrottlerConfig updates the tablet throttler configuration
+ UpdateThrottlerConfig(ctx context.Context, in *vtctldata.UpdateThrottlerConfigRequest, opts ...grpc.CallOption) (*vtctldata.UpdateThrottlerConfigResponse, error)
// GetSrvVSchema returns the SrvVSchema for a cell.
GetSrvVSchema(ctx context.Context, in *vtctldata.GetSrvVSchemaRequest, opts ...grpc.CallOption) (*vtctldata.GetSrvVSchemaResponse, error)
// GetSrvVSchemas returns a mapping from cell name to SrvVSchema for all cells,
@@ -244,6 +246,8 @@ type VtctldClient interface {
GetTablet(ctx context.Context, in *vtctldata.GetTabletRequest, opts ...grpc.CallOption) (*vtctldata.GetTabletResponse, error)
// GetTablets returns tablets, optionally filtered by keyspace and shard.
GetTablets(ctx context.Context, in *vtctldata.GetTabletsRequest, opts ...grpc.CallOption) (*vtctldata.GetTabletsResponse, error)
+ // GetTopologyPath returns the topology cell at a given path.
+ GetTopologyPath(ctx context.Context, in *vtctldata.GetTopologyPathRequest, opts ...grpc.CallOption) (*vtctldata.GetTopologyPathResponse, error)
// GetVersion returns the version of a tablet from its debug vars.
GetVersion(ctx context.Context, in *vtctldata.GetVersionRequest, opts ...grpc.CallOption) (*vtctldata.GetVersionResponse, error)
// GetVSchema returns the vschema for a keyspace.
@@ -390,6 +394,8 @@ type VtctldClient interface {
ValidateShard(ctx context.Context, in *vtctldata.ValidateShardRequest, opts ...grpc.CallOption) (*vtctldata.ValidateShardResponse, error)
// ValidateVersionKeyspace validates that the version on the primary of shard 0 matches all of the other tablets in the keyspace.
ValidateVersionKeyspace(ctx context.Context, in *vtctldata.ValidateVersionKeyspaceRequest, opts ...grpc.CallOption) (*vtctldata.ValidateVersionKeyspaceResponse, error)
+ // ValidateVersionShard validates that the version on the primary matches all of the replicas.
+ ValidateVersionShard(ctx context.Context, in *vtctldata.ValidateVersionShardRequest, opts ...grpc.CallOption) (*vtctldata.ValidateVersionShardResponse, error)
// ValidateVSchema compares the schema of each primary tablet in "keyspace/shards..." to the vschema and errs if there are differences.
ValidateVSchema(ctx context.Context, in *vtctldata.ValidateVSchemaRequest, opts ...grpc.CallOption) (*vtctldata.ValidateVSchemaResponse, error)
}
@@ -772,6 +778,15 @@ func (c *vtctldClient) GetSrvKeyspaces(ctx context.Context, in *vtctldata.GetSrv
return out, nil
}
+func (c *vtctldClient) UpdateThrottlerConfig(ctx context.Context, in *vtctldata.UpdateThrottlerConfigRequest, opts ...grpc.CallOption) (*vtctldata.UpdateThrottlerConfigResponse, error) {
+ out := new(vtctldata.UpdateThrottlerConfigResponse)
+ err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/UpdateThrottlerConfig", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
func (c *vtctldClient) GetSrvVSchema(ctx context.Context, in *vtctldata.GetSrvVSchemaRequest, opts ...grpc.CallOption) (*vtctldata.GetSrvVSchemaResponse, error) {
out := new(vtctldata.GetSrvVSchemaResponse)
err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/GetSrvVSchema", in, out, opts...)
@@ -808,6 +823,15 @@ func (c *vtctldClient) GetTablets(ctx context.Context, in *vtctldata.GetTabletsR
return out, nil
}
+func (c *vtctldClient) GetTopologyPath(ctx context.Context, in *vtctldata.GetTopologyPathRequest, opts ...grpc.CallOption) (*vtctldata.GetTopologyPathResponse, error) {
+ out := new(vtctldata.GetTopologyPathResponse)
+ err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/GetTopologyPath", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
func (c *vtctldClient) GetVersion(ctx context.Context, in *vtctldata.GetVersionRequest, opts ...grpc.CallOption) (*vtctldata.GetVersionResponse, error) {
out := new(vtctldata.GetVersionResponse)
err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/GetVersion", in, out, opts...)
@@ -1191,6 +1215,15 @@ func (c *vtctldClient) ValidateVersionKeyspace(ctx context.Context, in *vtctldat
return out, nil
}
+func (c *vtctldClient) ValidateVersionShard(ctx context.Context, in *vtctldata.ValidateVersionShardRequest, opts ...grpc.CallOption) (*vtctldata.ValidateVersionShardResponse, error) {
+ out := new(vtctldata.ValidateVersionShardResponse)
+ err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/ValidateVersionShard", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
func (c *vtctldClient) ValidateVSchema(ctx context.Context, in *vtctldata.ValidateVSchemaRequest, opts ...grpc.CallOption) (*vtctldata.ValidateVSchemaResponse, error) {
out := new(vtctldata.ValidateVSchemaResponse)
err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/ValidateVSchema", in, out, opts...)
@@ -1303,6 +1336,8 @@ type VtctldServer interface {
// GetSrvKeyspaces returns the SrvKeyspaces for a keyspace in one or more
// cells.
GetSrvKeyspaces(context.Context, *vtctldata.GetSrvKeyspacesRequest) (*vtctldata.GetSrvKeyspacesResponse, error)
+ // UpdateThrottlerConfig updates the tablet throttler configuration
+ UpdateThrottlerConfig(context.Context, *vtctldata.UpdateThrottlerConfigRequest) (*vtctldata.UpdateThrottlerConfigResponse, error)
// GetSrvVSchema returns the SrvVSchema for a cell.
GetSrvVSchema(context.Context, *vtctldata.GetSrvVSchemaRequest) (*vtctldata.GetSrvVSchemaResponse, error)
// GetSrvVSchemas returns a mapping from cell name to SrvVSchema for all cells,
@@ -1312,6 +1347,8 @@ type VtctldServer interface {
GetTablet(context.Context, *vtctldata.GetTabletRequest) (*vtctldata.GetTabletResponse, error)
// GetTablets returns tablets, optionally filtered by keyspace and shard.
GetTablets(context.Context, *vtctldata.GetTabletsRequest) (*vtctldata.GetTabletsResponse, error)
+ // GetTopologyPath returns the topology cell at a given path.
+ GetTopologyPath(context.Context, *vtctldata.GetTopologyPathRequest) (*vtctldata.GetTopologyPathResponse, error)
// GetVersion returns the version of a tablet from its debug vars.
GetVersion(context.Context, *vtctldata.GetVersionRequest) (*vtctldata.GetVersionResponse, error)
// GetVSchema returns the vschema for a keyspace.
@@ -1458,6 +1495,8 @@ type VtctldServer interface {
ValidateShard(context.Context, *vtctldata.ValidateShardRequest) (*vtctldata.ValidateShardResponse, error)
// ValidateVersionKeyspace validates that the version on the primary of shard 0 matches all of the other tablets in the keyspace.
ValidateVersionKeyspace(context.Context, *vtctldata.ValidateVersionKeyspaceRequest) (*vtctldata.ValidateVersionKeyspaceResponse, error)
+ // ValidateVersionShard validates that the version on the primary matches all of the replicas.
+ ValidateVersionShard(context.Context, *vtctldata.ValidateVersionShardRequest) (*vtctldata.ValidateVersionShardResponse, error)
// ValidateVSchema compares the schema of each primary tablet in "keyspace/shards..." to the vschema and errs if there are differences.
ValidateVSchema(context.Context, *vtctldata.ValidateVSchemaRequest) (*vtctldata.ValidateVSchemaResponse, error)
mustEmbedUnimplementedVtctldServer()
@@ -1575,6 +1614,9 @@ func (UnimplementedVtctldServer) GetSrvKeyspaceNames(context.Context, *vtctldata
func (UnimplementedVtctldServer) GetSrvKeyspaces(context.Context, *vtctldata.GetSrvKeyspacesRequest) (*vtctldata.GetSrvKeyspacesResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetSrvKeyspaces not implemented")
}
+func (UnimplementedVtctldServer) UpdateThrottlerConfig(context.Context, *vtctldata.UpdateThrottlerConfigRequest) (*vtctldata.UpdateThrottlerConfigResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method UpdateThrottlerConfig not implemented")
+}
func (UnimplementedVtctldServer) GetSrvVSchema(context.Context, *vtctldata.GetSrvVSchemaRequest) (*vtctldata.GetSrvVSchemaResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetSrvVSchema not implemented")
}
@@ -1587,6 +1629,9 @@ func (UnimplementedVtctldServer) GetTablet(context.Context, *vtctldata.GetTablet
func (UnimplementedVtctldServer) GetTablets(context.Context, *vtctldata.GetTabletsRequest) (*vtctldata.GetTabletsResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetTablets not implemented")
}
+func (UnimplementedVtctldServer) GetTopologyPath(context.Context, *vtctldata.GetTopologyPathRequest) (*vtctldata.GetTopologyPathResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetTopologyPath not implemented")
+}
func (UnimplementedVtctldServer) GetVersion(context.Context, *vtctldata.GetVersionRequest) (*vtctldata.GetVersionResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetVersion not implemented")
}
@@ -1707,6 +1752,9 @@ func (UnimplementedVtctldServer) ValidateShard(context.Context, *vtctldata.Valid
func (UnimplementedVtctldServer) ValidateVersionKeyspace(context.Context, *vtctldata.ValidateVersionKeyspaceRequest) (*vtctldata.ValidateVersionKeyspaceResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method ValidateVersionKeyspace not implemented")
}
+func (UnimplementedVtctldServer) ValidateVersionShard(context.Context, *vtctldata.ValidateVersionShardRequest) (*vtctldata.ValidateVersionShardResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ValidateVersionShard not implemented")
+}
func (UnimplementedVtctldServer) ValidateVSchema(context.Context, *vtctldata.ValidateVSchemaRequest) (*vtctldata.ValidateVSchemaResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method ValidateVSchema not implemented")
}
@@ -2377,6 +2425,24 @@ func _Vtctld_GetSrvKeyspaces_Handler(srv interface{}, ctx context.Context, dec f
return interceptor(ctx, in, info, handler)
}
+func _Vtctld_UpdateThrottlerConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(vtctldata.UpdateThrottlerConfigRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(VtctldServer).UpdateThrottlerConfig(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/vtctlservice.Vtctld/UpdateThrottlerConfig",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(VtctldServer).UpdateThrottlerConfig(ctx, req.(*vtctldata.UpdateThrottlerConfigRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
func _Vtctld_GetSrvVSchema_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(vtctldata.GetSrvVSchemaRequest)
if err := dec(in); err != nil {
@@ -2449,6 +2515,24 @@ func _Vtctld_GetTablets_Handler(srv interface{}, ctx context.Context, dec func(i
return interceptor(ctx, in, info, handler)
}
+func _Vtctld_GetTopologyPath_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(vtctldata.GetTopologyPathRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(VtctldServer).GetTopologyPath(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/vtctlservice.Vtctld/GetTopologyPath",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(VtctldServer).GetTopologyPath(ctx, req.(*vtctldata.GetTopologyPathRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
func _Vtctld_GetVersion_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(vtctldata.GetVersionRequest)
if err := dec(in); err != nil {
@@ -3172,6 +3256,24 @@ func _Vtctld_ValidateVersionKeyspace_Handler(srv interface{}, ctx context.Contex
return interceptor(ctx, in, info, handler)
}
+func _Vtctld_ValidateVersionShard_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(vtctldata.ValidateVersionShardRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(VtctldServer).ValidateVersionShard(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/vtctlservice.Vtctld/ValidateVersionShard",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(VtctldServer).ValidateVersionShard(ctx, req.(*vtctldata.ValidateVersionShardRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
func _Vtctld_ValidateVSchema_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(vtctldata.ValidateVSchemaRequest)
if err := dec(in); err != nil {
@@ -3333,6 +3435,10 @@ var Vtctld_ServiceDesc = grpc.ServiceDesc{
MethodName: "GetSrvKeyspaces",
Handler: _Vtctld_GetSrvKeyspaces_Handler,
},
+ {
+ MethodName: "UpdateThrottlerConfig",
+ Handler: _Vtctld_UpdateThrottlerConfig_Handler,
+ },
{
MethodName: "GetSrvVSchema",
Handler: _Vtctld_GetSrvVSchema_Handler,
@@ -3349,6 +3455,10 @@ var Vtctld_ServiceDesc = grpc.ServiceDesc{
MethodName: "GetTablets",
Handler: _Vtctld_GetTablets_Handler,
},
+ {
+ MethodName: "GetTopologyPath",
+ Handler: _Vtctld_GetTopologyPath_Handler,
+ },
{
MethodName: "GetVersion",
Handler: _Vtctld_GetVersion_Handler,
@@ -3505,6 +3615,10 @@ var Vtctld_ServiceDesc = grpc.ServiceDesc{
MethodName: "ValidateVersionKeyspace",
Handler: _Vtctld_ValidateVersionKeyspace_Handler,
},
+ {
+ MethodName: "ValidateVersionShard",
+ Handler: _Vtctld_ValidateVersionShard_Handler,
+ },
{
MethodName: "ValidateVSchema",
Handler: _Vtctld_ValidateVSchema_Handler,
diff --git a/go/vt/proto/vtgate/vtgate.pb.go b/go/vt/proto/vtgate/vtgate.pb.go
index 11914d8f3fb..8f06d90a76c 100644
--- a/go/vt/proto/vtgate/vtgate.pb.go
+++ b/go/vt/proto/vtgate/vtgate.pb.go
@@ -17,7 +17,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.28.0
+// protoc-gen-go v1.28.1
// protoc v3.21.3
// source: vtgate.proto
@@ -222,6 +222,8 @@ type Session struct {
// enable_system_settings defines if we can use reserved connections.
EnableSystemSettings bool `protobuf:"varint,23,opt,name=enable_system_settings,json=enableSystemSettings,proto3" json:"enable_system_settings,omitempty"`
AdvisoryLock map[string]int64 `protobuf:"bytes,24,rep,name=advisory_lock,json=advisoryLock,proto3" json:"advisory_lock,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"`
+ // query_timeout is the maximum amount of time a query is permitted to run
+ QueryTimeout int64 `protobuf:"varint,25,opt,name=query_timeout,json=queryTimeout,proto3" json:"query_timeout,omitempty"`
}
func (x *Session) Reset() {
@@ -417,6 +419,13 @@ func (x *Session) GetAdvisoryLock() map[string]int64 {
return nil
}
+func (x *Session) GetQueryTimeout() int64 {
+ if x != nil {
+ return x.QueryTimeout
+ }
+ return 0
+}
+
// ReadAfterWrite contains information regarding gtid set and timeout
// Also if the gtid information needs to be passed to client.
type ReadAfterWrite struct {
@@ -1517,7 +1526,7 @@ var file_vtgate_proto_rawDesc = []byte{
0x74, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x0b, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x0e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x0b, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x22, 0xfb, 0x0c, 0x0a, 0x07, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x25,
+ 0x74, 0x6f, 0x22, 0xa0, 0x0d, 0x0a, 0x07, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x25,
0x0a, 0x0e, 0x69, 0x6e, 0x5f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e,
0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x69, 0x6e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61,
0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x43, 0x0a, 0x0e, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x73,
@@ -1592,181 +1601,184 @@ var file_vtgate_proto_rawDesc = []byte{
0x5f, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x18, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x76, 0x74,
0x67, 0x61, 0x74, 0x65, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x41, 0x64, 0x76,
0x69, 0x73, 0x6f, 0x72, 0x79, 0x4c, 0x6f, 0x63, 0x6b, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c,
- 0x61, 0x64, 0x76, 0x69, 0x73, 0x6f, 0x72, 0x79, 0x4c, 0x6f, 0x63, 0x6b, 0x1a, 0xd8, 0x01, 0x0a,
- 0x0c, 0x53, 0x68, 0x61, 0x72, 0x64, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x25, 0x0a,
- 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e,
- 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61,
- 0x72, 0x67, 0x65, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74,
- 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x74, 0x72,
- 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x38, 0x0a, 0x0c, 0x74,
- 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62,
- 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74,
- 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65,
- 0x64, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x65,
- 0x72, 0x76, 0x65, 0x64, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x76, 0x69, 0x6e, 0x64, 0x65, 0x78,
- 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x76, 0x69, 0x6e,
- 0x64, 0x65, 0x78, 0x4f, 0x6e, 0x6c, 0x79, 0x1a, 0x5c, 0x0a, 0x19, 0x55, 0x73, 0x65, 0x72, 0x44,
- 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x45,
- 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x42, 0x69,
- 0x6e, 0x64, 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75,
- 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x42, 0x0a, 0x14, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x56,
- 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a,
- 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12,
- 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05,
- 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3f, 0x0a, 0x11, 0x41, 0x64, 0x76,
- 0x69, 0x73, 0x6f, 0x72, 0x79, 0x4c, 0x6f, 0x63, 0x6b, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10,
- 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79,
- 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52,
- 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04,
- 0x22, 0xac, 0x01, 0x0a, 0x0e, 0x52, 0x65, 0x61, 0x64, 0x41, 0x66, 0x74, 0x65, 0x72, 0x57, 0x72,
- 0x69, 0x74, 0x65, 0x12, 0x31, 0x0a, 0x15, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x61, 0x66, 0x74, 0x65,
- 0x72, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x67, 0x74, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x12, 0x72, 0x65, 0x61, 0x64, 0x41, 0x66, 0x74, 0x65, 0x72, 0x57, 0x72, 0x69,
- 0x74, 0x65, 0x47, 0x74, 0x69, 0x64, 0x12, 0x37, 0x0a, 0x18, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x61,
- 0x66, 0x74, 0x65, 0x72, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f,
- 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x15, 0x72, 0x65, 0x61, 0x64, 0x41, 0x66,
- 0x74, 0x65, 0x72, 0x57, 0x72, 0x69, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12,
- 0x2e, 0x0a, 0x13, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x72, 0x61, 0x63, 0x6b,
- 0x5f, 0x67, 0x74, 0x69, 0x64, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x73, 0x65,
- 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x54, 0x72, 0x61, 0x63, 0x6b, 0x47, 0x74, 0x69, 0x64, 0x73, 0x22,
- 0xaa, 0x01, 0x0a, 0x0e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x12, 0x2c, 0x0a, 0x09, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61,
- 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x08, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64,
- 0x12, 0x29, 0x0a, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x67, 0x61, 0x74, 0x65, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69,
- 0x6f, 0x6e, 0x52, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x27, 0x0a, 0x05, 0x71,
- 0x75, 0x65, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x71, 0x75, 0x65,
- 0x72, 0x79, 0x2e, 0x42, 0x6f, 0x75, 0x6e, 0x64, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x05, 0x71,
- 0x75, 0x65, 0x72, 0x79, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06,
- 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x07, 0x10, 0x08, 0x22, 0x8f, 0x01, 0x0a,
- 0x0f, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
- 0x12, 0x25, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x50, 0x43, 0x45, 0x72, 0x72, 0x6f, 0x72,
- 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x29, 0x0a, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69,
- 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x67, 0x61, 0x74,
- 0x65, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69,
- 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x03, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79,
- 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0xb3,
- 0x01, 0x0a, 0x13, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x42, 0x61, 0x74, 0x63, 0x68, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2c, 0x0a, 0x09, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72,
- 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70,
- 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x08, 0x63, 0x61, 0x6c, 0x6c,
- 0x65, 0x72, 0x49, 0x64, 0x12, 0x29, 0x0a, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x67, 0x61, 0x74, 0x65, 0x2e, 0x53,
- 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12,
- 0x2b, 0x0a, 0x07, 0x71, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b,
- 0x32, 0x11, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x42, 0x6f, 0x75, 0x6e, 0x64, 0x51, 0x75,
- 0x65, 0x72, 0x79, 0x52, 0x07, 0x71, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x4a, 0x04, 0x08, 0x04,
- 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04,
- 0x08, 0x07, 0x10, 0x08, 0x22, 0x9a, 0x01, 0x0a, 0x14, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65,
- 0x42, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a,
- 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76,
- 0x74, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x50, 0x43, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65,
- 0x72, 0x72, 0x6f, 0x72, 0x12, 0x29, 0x0a, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x67, 0x61, 0x74, 0x65, 0x2e, 0x53,
- 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12,
- 0x30, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b,
- 0x32, 0x16, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x57,
- 0x69, 0x74, 0x68, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74,
- 0x73, 0x22, 0xaa, 0x01, 0x0a, 0x14, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x78, 0x65, 0x63,
- 0x75, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2c, 0x0a, 0x09, 0x63, 0x61,
- 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e,
- 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x08,
- 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x27, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72,
- 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e,
- 0x42, 0x6f, 0x75, 0x6e, 0x64, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72,
- 0x79, 0x12, 0x29, 0x0a, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x67, 0x61, 0x74, 0x65, 0x2e, 0x53, 0x65, 0x73, 0x73,
- 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x4a, 0x04, 0x08, 0x03,
- 0x10, 0x04, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0x43,
- 0x0a, 0x15, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c,
- 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e,
- 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73,
- 0x75, 0x6c, 0x74, 0x22, 0x5d, 0x0a, 0x19, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x54, 0x72,
- 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x12, 0x2c, 0x0a, 0x09, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c,
- 0x65, 0x72, 0x49, 0x44, 0x52, 0x08, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x12,
- 0x0a, 0x04, 0x64, 0x74, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, 0x74,
- 0x69, 0x64, 0x22, 0x1c, 0x0a, 0x1a, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x54, 0x72, 0x61,
- 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
- 0x22, 0xa0, 0x01, 0x0a, 0x0c, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x46, 0x6c, 0x61, 0x67,
- 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x73, 0x6b,
- 0x65, 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x69,
- 0x7a, 0x65, 0x53, 0x6b, 0x65, 0x77, 0x12, 0x2d, 0x0a, 0x12, 0x68, 0x65, 0x61, 0x72, 0x74, 0x62,
- 0x65, 0x61, 0x74, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01,
- 0x28, 0x0d, 0x52, 0x11, 0x68, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x49, 0x6e, 0x74,
- 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x26, 0x0a, 0x0f, 0x73, 0x74, 0x6f, 0x70, 0x5f, 0x6f, 0x6e,
- 0x5f, 0x72, 0x65, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d,
- 0x73, 0x74, 0x6f, 0x70, 0x4f, 0x6e, 0x52, 0x65, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x14, 0x0a,
- 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65,
- 0x6c, 0x6c, 0x73, 0x22, 0xf6, 0x01, 0x0a, 0x0e, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2c, 0x0a, 0x09, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72,
- 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70,
- 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x08, 0x63, 0x61, 0x6c, 0x6c,
- 0x65, 0x72, 0x49, 0x64, 0x12, 0x35, 0x0a, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x74,
- 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f,
- 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52,
- 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x27, 0x0a, 0x05, 0x76,
- 0x67, 0x74, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x62, 0x69, 0x6e,
- 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x47, 0x74, 0x69, 0x64, 0x52, 0x05, 0x76,
- 0x67, 0x74, 0x69, 0x64, 0x12, 0x2a, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x04,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74,
- 0x61, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72,
- 0x12, 0x2a, 0x0a, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x14, 0x2e, 0x76, 0x74, 0x67, 0x61, 0x74, 0x65, 0x2e, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d,
- 0x46, 0x6c, 0x61, 0x67, 0x73, 0x52, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x22, 0x3d, 0x0a, 0x0f,
- 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
- 0x2a, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32,
- 0x12, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x45, 0x76,
- 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x92, 0x01, 0x0a, 0x0e,
- 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2c,
- 0x0a, 0x09, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72,
- 0x49, 0x44, 0x52, 0x08, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x29, 0x0a, 0x07,
+ 0x61, 0x64, 0x76, 0x69, 0x73, 0x6f, 0x72, 0x79, 0x4c, 0x6f, 0x63, 0x6b, 0x12, 0x23, 0x0a, 0x0d,
+ 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x19, 0x20,
+ 0x01, 0x28, 0x03, 0x52, 0x0c, 0x71, 0x75, 0x65, 0x72, 0x79, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75,
+ 0x74, 0x1a, 0xd8, 0x01, 0x0a, 0x0c, 0x53, 0x68, 0x61, 0x72, 0x64, 0x53, 0x65, 0x73, 0x73, 0x69,
+ 0x6f, 0x6e, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65,
+ 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x72, 0x61,
+ 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x03, 0x52, 0x0d, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64,
+ 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74,
+ 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74,
+ 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65,
+ 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52,
+ 0x0a, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x76,
+ 0x69, 0x6e, 0x64, 0x65, 0x78, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08,
+ 0x52, 0x0a, 0x76, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x4f, 0x6e, 0x6c, 0x79, 0x1a, 0x5c, 0x0a, 0x19,
+ 0x55, 0x73, 0x65, 0x72, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x56, 0x61, 0x72, 0x69, 0x61,
+ 0x62, 0x6c, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x05, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x71, 0x75, 0x65,
+ 0x72, 0x79, 0x2e, 0x42, 0x69, 0x6e, 0x64, 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x52,
+ 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x42, 0x0a, 0x14, 0x53, 0x79,
+ 0x73, 0x74, 0x65, 0x6d, 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x45, 0x6e, 0x74,
+ 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3f,
+ 0x0a, 0x11, 0x41, 0x64, 0x76, 0x69, 0x73, 0x6f, 0x72, 0x79, 0x4c, 0x6f, 0x63, 0x6b, 0x45, 0x6e,
+ 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x4a,
+ 0x04, 0x08, 0x03, 0x10, 0x04, 0x22, 0xac, 0x01, 0x0a, 0x0e, 0x52, 0x65, 0x61, 0x64, 0x41, 0x66,
+ 0x74, 0x65, 0x72, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x31, 0x0a, 0x15, 0x72, 0x65, 0x61, 0x64,
+ 0x5f, 0x61, 0x66, 0x74, 0x65, 0x72, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x67, 0x74, 0x69,
+ 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x72, 0x65, 0x61, 0x64, 0x41, 0x66, 0x74,
+ 0x65, 0x72, 0x57, 0x72, 0x69, 0x74, 0x65, 0x47, 0x74, 0x69, 0x64, 0x12, 0x37, 0x0a, 0x18, 0x72,
+ 0x65, 0x61, 0x64, 0x5f, 0x61, 0x66, 0x74, 0x65, 0x72, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f,
+ 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x15, 0x72,
+ 0x65, 0x61, 0x64, 0x41, 0x66, 0x74, 0x65, 0x72, 0x57, 0x72, 0x69, 0x74, 0x65, 0x54, 0x69, 0x6d,
+ 0x65, 0x6f, 0x75, 0x74, 0x12, 0x2e, 0x0a, 0x13, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f,
+ 0x74, 0x72, 0x61, 0x63, 0x6b, 0x5f, 0x67, 0x74, 0x69, 0x64, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x08, 0x52, 0x11, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x54, 0x72, 0x61, 0x63, 0x6b, 0x47,
+ 0x74, 0x69, 0x64, 0x73, 0x22, 0xaa, 0x01, 0x0a, 0x0e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2c, 0x0a, 0x09, 0x63, 0x61, 0x6c, 0x6c, 0x65,
+ 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72,
+ 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x08, 0x63, 0x61, 0x6c,
+ 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x29, 0x0a, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x67, 0x61, 0x74, 0x65, 0x2e,
+ 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e,
+ 0x12, 0x27, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x11, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x42, 0x6f, 0x75, 0x6e, 0x64, 0x51, 0x75, 0x65,
+ 0x72, 0x79, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a,
+ 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x07, 0x10,
+ 0x08, 0x22, 0x8f, 0x01, 0x0a, 0x0f, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x50, 0x43,
+ 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x29, 0x0a, 0x07,
0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e,
0x76, 0x74, 0x67, 0x61, 0x74, 0x65, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07,
- 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x27, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79,
- 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x42,
- 0x6f, 0x75, 0x6e, 0x64, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79,
- 0x22, 0x89, 0x01, 0x0a, 0x0f, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x52, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x50, 0x43, 0x45,
- 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x29, 0x0a, 0x07, 0x73,
- 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76,
- 0x74, 0x67, 0x61, 0x74, 0x65, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x73,
- 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x24, 0x0a, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73,
- 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x46,
- 0x69, 0x65, 0x6c, 0x64, 0x52, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x22, 0x6e, 0x0a, 0x13,
- 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75,
+ 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c,
+ 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e,
+ 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73,
+ 0x75, 0x6c, 0x74, 0x22, 0xb3, 0x01, 0x0a, 0x13, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x42,
+ 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2c, 0x0a, 0x09, 0x63,
+ 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f,
+ 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52,
+ 0x08, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x29, 0x0a, 0x07, 0x73, 0x65, 0x73,
+ 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x67,
+ 0x61, 0x74, 0x65, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x73, 0x65, 0x73,
+ 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x07, 0x71, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x18,
+ 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x42, 0x6f,
+ 0x75, 0x6e, 0x64, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x07, 0x71, 0x75, 0x65, 0x72, 0x69, 0x65,
+ 0x73, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08,
+ 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x07, 0x10, 0x08, 0x22, 0x9a, 0x01, 0x0a, 0x14, 0x45, 0x78,
+ 0x65, 0x63, 0x75, 0x74, 0x65, 0x42, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x50, 0x43, 0x45, 0x72, 0x72,
+ 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x29, 0x0a, 0x07, 0x73, 0x65, 0x73,
+ 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x67,
+ 0x61, 0x74, 0x65, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x73, 0x65, 0x73,
+ 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x30, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18,
+ 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x52, 0x65,
+ 0x73, 0x75, 0x6c, 0x74, 0x57, 0x69, 0x74, 0x68, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x07, 0x72,
+ 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22, 0xaa, 0x01, 0x0a, 0x14, 0x53, 0x74, 0x72, 0x65, 0x61,
+ 0x6d, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
+ 0x2c, 0x0a, 0x09, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65,
+ 0x72, 0x49, 0x44, 0x52, 0x08, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x27, 0x0a,
+ 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x71,
+ 0x75, 0x65, 0x72, 0x79, 0x2e, 0x42, 0x6f, 0x75, 0x6e, 0x64, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52,
+ 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x29, 0x0a, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f,
+ 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x67, 0x61, 0x74, 0x65,
+ 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f,
+ 0x6e, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08,
+ 0x05, 0x10, 0x06, 0x22, 0x43, 0x0a, 0x15, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x78, 0x65,
+ 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06,
+ 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71,
+ 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74,
+ 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0x5d, 0x0a, 0x19, 0x52, 0x65, 0x73, 0x6f,
+ 0x6c, 0x76, 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2c, 0x0a, 0x09, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f,
+ 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63,
+ 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x08, 0x63, 0x61, 0x6c, 0x6c, 0x65,
+ 0x72, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x74, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x04, 0x64, 0x74, 0x69, 0x64, 0x22, 0x1c, 0x0a, 0x1a, 0x52, 0x65, 0x73, 0x6f, 0x6c,
+ 0x76, 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xa0, 0x01, 0x0a, 0x0c, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61,
+ 0x6d, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x69,
+ 0x7a, 0x65, 0x5f, 0x73, 0x6b, 0x65, 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x6d,
+ 0x69, 0x6e, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x53, 0x6b, 0x65, 0x77, 0x12, 0x2d, 0x0a, 0x12, 0x68,
+ 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61,
+ 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x11, 0x68, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65,
+ 0x61, 0x74, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x26, 0x0a, 0x0f, 0x73, 0x74,
+ 0x6f, 0x70, 0x5f, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x08, 0x52, 0x0d, 0x73, 0x74, 0x6f, 0x70, 0x4f, 0x6e, 0x52, 0x65, 0x73, 0x68, 0x61,
+ 0x72, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x22, 0xf6, 0x01, 0x0a, 0x0e, 0x56, 0x53, 0x74,
+ 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2c, 0x0a, 0x09, 0x63,
+ 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f,
+ 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52,
+ 0x08, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x35, 0x0a, 0x0b, 0x74, 0x61, 0x62,
+ 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14,
+ 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74,
+ 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65,
+ 0x12, 0x27, 0x0a, 0x05, 0x76, 0x67, 0x74, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x11, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x47, 0x74,
+ 0x69, 0x64, 0x52, 0x05, 0x76, 0x67, 0x74, 0x69, 0x64, 0x12, 0x2a, 0x0a, 0x06, 0x66, 0x69, 0x6c,
+ 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x62, 0x69, 0x6e, 0x6c,
+ 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x06, 0x66,
+ 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x2a, 0x0a, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x18, 0x05,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x76, 0x74, 0x67, 0x61, 0x74, 0x65, 0x2e, 0x56, 0x53,
+ 0x74, 0x72, 0x65, 0x61, 0x6d, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x52, 0x05, 0x66, 0x6c, 0x61, 0x67,
+ 0x73, 0x22, 0x3d, 0x0a, 0x0f, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74,
+ 0x61, 0x2e, 0x56, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73,
+ 0x22, 0x92, 0x01, 0x0a, 0x0e, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75,
0x65, 0x73, 0x74, 0x12, 0x2c, 0x0a, 0x09, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64,
0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43,
0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x08, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49,
0x64, 0x12, 0x29, 0x0a, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01,
0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x67, 0x61, 0x74, 0x65, 0x2e, 0x53, 0x65, 0x73, 0x73,
- 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x3d, 0x0a, 0x14,
- 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x50, 0x43, 0x45,
- 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2a, 0x44, 0x0a, 0x0f, 0x54,
- 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x0f,
- 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12,
- 0x0a, 0x0a, 0x06, 0x53, 0x49, 0x4e, 0x47, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x4d,
- 0x55, 0x4c, 0x54, 0x49, 0x10, 0x02, 0x12, 0x09, 0x0a, 0x05, 0x54, 0x57, 0x4f, 0x50, 0x43, 0x10,
- 0x03, 0x2a, 0x3c, 0x0a, 0x0b, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x4f, 0x72, 0x64, 0x65, 0x72,
- 0x12, 0x0a, 0x0a, 0x06, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03,
- 0x50, 0x52, 0x45, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x50, 0x4f, 0x53, 0x54, 0x10, 0x02, 0x12,
- 0x0e, 0x0a, 0x0a, 0x41, 0x55, 0x54, 0x4f, 0x43, 0x4f, 0x4d, 0x4d, 0x49, 0x54, 0x10, 0x03, 0x42,
- 0x36, 0x0a, 0x0f, 0x69, 0x6f, 0x2e, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x5a, 0x23, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x69, 0x6f, 0x2f, 0x76, 0x69,
- 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x2f, 0x76, 0x74, 0x67, 0x61, 0x74, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+ 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x27, 0x0a, 0x05,
+ 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x71, 0x75,
+ 0x65, 0x72, 0x79, 0x2e, 0x42, 0x6f, 0x75, 0x6e, 0x64, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x05,
+ 0x71, 0x75, 0x65, 0x72, 0x79, 0x22, 0x89, 0x01, 0x0a, 0x0f, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72,
+ 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x65, 0x72, 0x72,
+ 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63,
+ 0x2e, 0x52, 0x50, 0x43, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72,
+ 0x12, 0x29, 0x0a, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x67, 0x61, 0x74, 0x65, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69,
+ 0x6f, 0x6e, 0x52, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x24, 0x0a, 0x06, 0x66,
+ 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x71, 0x75,
+ 0x65, 0x72, 0x79, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x52, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64,
+ 0x73, 0x22, 0x6e, 0x0a, 0x13, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f,
+ 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2c, 0x0a, 0x09, 0x63, 0x61, 0x6c, 0x6c,
+ 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74,
+ 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x08, 0x63, 0x61,
+ 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x29, 0x0a, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f,
+ 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x67, 0x61, 0x74, 0x65,
+ 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f,
+ 0x6e, 0x22, 0x3d, 0x0a, 0x14, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f,
+ 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x65, 0x72, 0x72,
+ 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63,
+ 0x2e, 0x52, 0x50, 0x43, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72,
+ 0x2a, 0x44, 0x0a, 0x0f, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d,
+ 0x6f, 0x64, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49,
+ 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x49, 0x4e, 0x47, 0x4c, 0x45, 0x10, 0x01,
+ 0x12, 0x09, 0x0a, 0x05, 0x4d, 0x55, 0x4c, 0x54, 0x49, 0x10, 0x02, 0x12, 0x09, 0x0a, 0x05, 0x54,
+ 0x57, 0x4f, 0x50, 0x43, 0x10, 0x03, 0x2a, 0x3c, 0x0a, 0x0b, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74,
+ 0x4f, 0x72, 0x64, 0x65, 0x72, 0x12, 0x0a, 0x0a, 0x06, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10,
+ 0x00, 0x12, 0x07, 0x0a, 0x03, 0x50, 0x52, 0x45, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x50, 0x4f,
+ 0x53, 0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x41, 0x55, 0x54, 0x4f, 0x43, 0x4f, 0x4d, 0x4d,
+ 0x49, 0x54, 0x10, 0x03, 0x42, 0x36, 0x0a, 0x0f, 0x69, 0x6f, 0x2e, 0x76, 0x69, 0x74, 0x65, 0x73,
+ 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x5a, 0x23, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e,
+ 0x69, 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, 0x2f,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x74, 0x67, 0x61, 0x74, 0x65, 0x62, 0x06, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x33,
}
var (
diff --git a/go/vt/proto/vtgate/vtgate_vtproto.pb.go b/go/vt/proto/vtgate/vtgate_vtproto.pb.go
index 8f05e8d01ab..97f92f5e873 100644
--- a/go/vt/proto/vtgate/vtgate_vtproto.pb.go
+++ b/go/vt/proto/vtgate/vtgate_vtproto.pb.go
@@ -1,5 +1,5 @@
// Code generated by protoc-gen-go-vtproto. DO NOT EDIT.
-// protoc-gen-go-vtproto version: v0.3.0
+// protoc-gen-go-vtproto version: v0.4.0
// source: vtgate.proto
package vtgate
@@ -127,6 +127,13 @@ func (m *Session) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
i -= len(m.unknownFields)
copy(dAtA[i:], m.unknownFields)
}
+ if m.QueryTimeout != 0 {
+ i = encodeVarint(dAtA, i, uint64(m.QueryTimeout))
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0xc8
+ }
if len(m.AdvisoryLock) > 0 {
for k := range m.AdvisoryLock {
v := m.AdvisoryLock[k]
@@ -1326,9 +1333,7 @@ func (m *Session_ShardSession) SizeVT() (n int) {
if m.VindexOnly {
n += 2
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -1448,9 +1453,10 @@ func (m *Session) SizeVT() (n int) {
n += mapEntrySize + 2 + sov(uint64(mapEntrySize))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
+ if m.QueryTimeout != 0 {
+ n += 2 + sov(uint64(m.QueryTimeout))
}
+ n += len(m.unknownFields)
return n
}
@@ -1470,9 +1476,7 @@ func (m *ReadAfterWrite) SizeVT() (n int) {
if m.SessionTrackGtids {
n += 2
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -1494,9 +1498,7 @@ func (m *ExecuteRequest) SizeVT() (n int) {
l = m.Query.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -1518,9 +1520,7 @@ func (m *ExecuteResponse) SizeVT() (n int) {
l = m.Result.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -1544,9 +1544,7 @@ func (m *ExecuteBatchRequest) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -1570,9 +1568,7 @@ func (m *ExecuteBatchResponse) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -1594,9 +1590,7 @@ func (m *StreamExecuteRequest) SizeVT() (n int) {
l = m.Session.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -1610,9 +1604,7 @@ func (m *StreamExecuteResponse) SizeVT() (n int) {
l = m.Result.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -1630,9 +1622,7 @@ func (m *ResolveTransactionRequest) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -1642,9 +1632,7 @@ func (m *ResolveTransactionResponse) SizeVT() (n int) {
}
var l int
_ = l
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -1667,9 +1655,7 @@ func (m *VStreamFlags) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -1698,9 +1684,7 @@ func (m *VStreamRequest) SizeVT() (n int) {
l = m.Flags.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -1716,9 +1700,7 @@ func (m *VStreamResponse) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -1740,9 +1722,7 @@ func (m *PrepareRequest) SizeVT() (n int) {
l = m.Query.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -1766,9 +1746,7 @@ func (m *PrepareResponse) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -1786,9 +1764,7 @@ func (m *CloseSessionRequest) SizeVT() (n int) {
l = m.Session.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -1802,9 +1778,7 @@ func (m *CloseSessionResponse) SizeVT() (n int) {
l = m.Error.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -2940,6 +2914,25 @@ func (m *Session) UnmarshalVT(dAtA []byte) error {
}
m.AdvisoryLock[mapkey] = mapvalue
iNdEx = postIndex
+ case 25:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field QueryTimeout", wireType)
+ }
+ m.QueryTimeout = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.QueryTimeout |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
default:
iNdEx = preIndex
skippy, err := skip(dAtA[iNdEx:])
@@ -5091,6 +5084,7 @@ func (m *CloseSessionResponse) UnmarshalVT(dAtA []byte) error {
}
return nil
}
+
func skip(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
diff --git a/go/vt/proto/vtgateservice/vtgateservice.pb.go b/go/vt/proto/vtgateservice/vtgateservice.pb.go
index ce7358d64ec..7c7d049867d 100644
--- a/go/vt/proto/vtgateservice/vtgateservice.pb.go
+++ b/go/vt/proto/vtgateservice/vtgateservice.pb.go
@@ -18,7 +18,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.28.0
+// protoc-gen-go v1.28.1
// protoc v3.21.3
// source: vtgateservice.proto
diff --git a/go/vt/proto/vtrpc/vtrpc.pb.go b/go/vt/proto/vtrpc/vtrpc.pb.go
index da29005747c..abde129b474 100644
--- a/go/vt/proto/vtrpc/vtrpc.pb.go
+++ b/go/vt/proto/vtrpc/vtrpc.pb.go
@@ -17,7 +17,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.28.0
+// protoc-gen-go v1.28.1
// protoc v3.21.3
// source: vtrpc.proto
@@ -39,7 +39,9 @@ const (
// Code represents canonical error codes. The names, numbers and comments
// must match the ones defined by grpc (0-16):
-// https://godoc.org/google.golang.org/grpc/codes.
+//
+// https://godoc.org/google.golang.org/grpc/codes.
+//
// 17+ are custom codes
type Code int32
@@ -88,18 +90,19 @@ const (
//
// A litmus test that may help a service implementor in deciding
// between FAILED_PRECONDITION, ABORTED, and UNAVAILABLE:
- // (a) Use UNAVAILABLE if the client can retry just the failing call.
- // (b) Use ABORTED if the client should retry at a higher-level
- // (e.g., restarting a read-modify-write sequence).
- // (c) Use FAILED_PRECONDITION if the client should not retry until
- // the system state has been explicitly fixed. E.g., if an "rmdir"
- // fails because the directory is non-empty, FAILED_PRECONDITION
- // should be returned since the client should not retry unless
- // they have first fixed up the directory by deleting files from it.
- // (d) Use FAILED_PRECONDITION if the client performs conditional
- // REST Get/Update/Delete on a resource and the resource on the
- // server does not match the condition. E.g., conflicting
- // read-modify-write on the same resource.
+ //
+ // (a) Use UNAVAILABLE if the client can retry just the failing call.
+ // (b) Use ABORTED if the client should retry at a higher-level
+ // (e.g., restarting a read-modify-write sequence).
+ // (c) Use FAILED_PRECONDITION if the client should not retry until
+ // the system state has been explicitly fixed. E.g., if an "rmdir"
+ // fails because the directory is non-empty, FAILED_PRECONDITION
+ // should be returned since the client should not retry unless
+ // they have first fixed up the directory by deleting files from it.
+ // (d) Use FAILED_PRECONDITION if the client performs conditional
+ // REST Get/Update/Delete on a resource and the resource on the
+ // server does not match the condition. E.g., conflicting
+ // read-modify-write on the same resource.
Code_FAILED_PRECONDITION Code = 9
// ABORTED indicates the operation was aborted, typically due to a
// concurrency issue like sequencer check failures, transaction aborts,
diff --git a/go/vt/proto/vtrpc/vtrpc_vtproto.pb.go b/go/vt/proto/vtrpc/vtrpc_vtproto.pb.go
index 9eef53a472d..008fe7aa100 100644
--- a/go/vt/proto/vtrpc/vtrpc_vtproto.pb.go
+++ b/go/vt/proto/vtrpc/vtrpc_vtproto.pb.go
@@ -1,5 +1,5 @@
// Code generated by protoc-gen-go-vtproto. DO NOT EDIT.
-// protoc-gen-go-vtproto version: v0.3.0
+// protoc-gen-go-vtproto version: v0.4.0
// source: vtrpc.proto
package vtrpc
@@ -161,9 +161,7 @@ func (m *CallerID) SizeVT() (n int) {
n += 1 + l + sov(uint64(l))
}
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -180,9 +178,7 @@ func (m *RPCError) SizeVT() (n int) {
if m.Code != 0 {
n += 1 + sov(uint64(m.Code))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -473,6 +469,7 @@ func (m *RPCError) UnmarshalVT(dAtA []byte) error {
}
return nil
}
+
func skip(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
diff --git a/go/vt/proto/vttest/vttest.pb.go b/go/vt/proto/vttest/vttest.pb.go
index aaf38b1d046..0ab40f16929 100644
--- a/go/vt/proto/vttest/vttest.pb.go
+++ b/go/vt/proto/vttest/vttest.pb.go
@@ -41,7 +41,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.28.0
+// protoc-gen-go v1.28.1
// protoc v3.21.3
// source: vttest.proto
diff --git a/go/vt/proto/vttest/vttest_vtproto.pb.go b/go/vt/proto/vttest/vttest_vtproto.pb.go
index d8606d83624..a7474446a26 100644
--- a/go/vt/proto/vttest/vttest_vtproto.pb.go
+++ b/go/vt/proto/vttest/vttest_vtproto.pb.go
@@ -1,5 +1,5 @@
// Code generated by protoc-gen-go-vtproto. DO NOT EDIT.
-// protoc-gen-go-vtproto version: v0.3.0
+// protoc-gen-go-vtproto version: v0.4.0
// source: vttest.proto
package vttest
@@ -224,9 +224,7 @@ func (m *Shard) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -256,9 +254,7 @@ func (m *Keyspace) SizeVT() (n int) {
if m.RdonlyCount != 0 {
n += 1 + sov(uint64(m.RdonlyCount))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -284,9 +280,7 @@ func (m *VTTestTopology) SizeVT() (n int) {
l = m.RoutingRules.SizeVT()
n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -751,6 +745,7 @@ func (m *VTTestTopology) UnmarshalVT(dAtA []byte) error {
}
return nil
}
+
func skip(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
diff --git a/go/vt/proto/vttime/vttime.pb.go b/go/vt/proto/vttime/vttime.pb.go
index d0debf8d2fa..96ac33eecd9 100644
--- a/go/vt/proto/vttime/vttime.pb.go
+++ b/go/vt/proto/vttime/vttime.pb.go
@@ -17,7 +17,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.28.0
+// protoc-gen-go v1.28.1
// protoc v3.21.3
// source: vttime.proto
diff --git a/go/vt/proto/vttime/vttime_vtproto.pb.go b/go/vt/proto/vttime/vttime_vtproto.pb.go
index 2812ff5fe5e..d1e1ce8a4cc 100644
--- a/go/vt/proto/vttime/vttime_vtproto.pb.go
+++ b/go/vt/proto/vttime/vttime_vtproto.pb.go
@@ -1,5 +1,5 @@
// Code generated by protoc-gen-go-vtproto. DO NOT EDIT.
-// protoc-gen-go-vtproto version: v0.3.0
+// protoc-gen-go-vtproto version: v0.4.0
// source: vttime.proto
package vttime
@@ -127,9 +127,7 @@ func (m *Time) SizeVT() (n int) {
if m.Nanoseconds != 0 {
n += 1 + sov(uint64(m.Nanoseconds))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -145,9 +143,7 @@ func (m *Duration) SizeVT() (n int) {
if m.Nanos != 0 {
n += 1 + sov(uint64(m.Nanos))
}
- if m.unknownFields != nil {
- n += len(m.unknownFields)
- }
+ n += len(m.unknownFields)
return n
}
@@ -335,6 +331,7 @@ func (m *Duration) UnmarshalVT(dAtA []byte) error {
}
return nil
}
+
func skip(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
diff --git a/go/vt/proto/workflow/workflow.pb.go b/go/vt/proto/workflow/workflow.pb.go
index b102179225a..4482b67f432 100644
--- a/go/vt/proto/workflow/workflow.pb.go
+++ b/go/vt/proto/workflow/workflow.pb.go
@@ -19,7 +19,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.28.0
+// protoc-gen-go v1.28.1
// protoc v3.21.3
// source: workflow.proto
diff --git a/go/vt/schema/ddl_strategy.go b/go/vt/schema/ddl_strategy.go
index 05e2e15c1f5..d56b8004ab8 100644
--- a/go/vt/schema/ddl_strategy.go
+++ b/go/vt/schema/ddl_strategy.go
@@ -35,17 +35,19 @@ const (
allowZeroInDateFlag = "allow-zero-in-date"
postponeLaunchFlag = "postpone-launch"
postponeCompletionFlag = "postpone-completion"
+ inOrderCompletionFlag = "in-order-completion"
allowConcurrentFlag = "allow-concurrent"
- fastOverRevertibleFlag = "fast-over-revertible"
+ preferInstantDDL = "prefer-instant-ddl"
fastRangeRotationFlag = "fast-range-rotation"
vreplicationTestSuite = "vreplication-test-suite"
+ allowForeignKeysFlag = "unsafe-allow-foreign-keys"
)
// DDLStrategy suggests how an ALTER TABLE should run (e.g. "direct", "online", "gh-ost" or "pt-osc")
type DDLStrategy string
const (
- // DDLStrategyDirect means not an online-ddl migration. Just a normal MySQL ALTER TABLE
+ // DDLStrategyDirect means not an online-ddl migration; unmanaged. Just a normal MySQL `ALTER TABLE`
DDLStrategyDirect DDLStrategy = "direct"
// DDLStrategyVitess requests vreplication to run the migration; new name for DDLStrategyOnline
DDLStrategyVitess DDLStrategy = "vitess"
@@ -55,13 +57,15 @@ const (
DDLStrategyGhost DDLStrategy = "gh-ost"
// DDLStrategyPTOSC requests pt-online-schema-change to run the migration
DDLStrategyPTOSC DDLStrategy = "pt-osc"
+ // DDLStrategyMySQL is a managed migration (queued and executed by the scheduler) but runs through a MySQL `ALTER TABLE`
+ DDLStrategyMySQL DDLStrategy = "mysql"
)
// IsDirect returns true if this strategy is a direct strategy
// A strategy is direct if it's not explciitly one of the online DDL strategies
func (s DDLStrategy) IsDirect() bool {
switch s {
- case DDLStrategyVitess, DDLStrategyOnline, DDLStrategyGhost, DDLStrategyPTOSC:
+ case DDLStrategyVitess, DDLStrategyOnline, DDLStrategyGhost, DDLStrategyPTOSC, DDLStrategyMySQL:
return false
}
return true
@@ -93,7 +97,7 @@ func ParseDDLStrategy(strategyVariable string) (*DDLStrategySetting, error) {
switch strategy := DDLStrategy(strategyName); strategy {
case "": // backward compatiblity and to handle unspecified values
setting.Strategy = DDLStrategyDirect
- case DDLStrategyVitess, DDLStrategyOnline, DDLStrategyGhost, DDLStrategyPTOSC, DDLStrategyDirect:
+ case DDLStrategyVitess, DDLStrategyOnline, DDLStrategyGhost, DDLStrategyPTOSC, DDLStrategyMySQL, DDLStrategyDirect:
setting.Strategy = strategy
default:
return nil, fmt.Errorf("Unknown online DDL strategy: '%v'", strategy)
@@ -123,56 +127,66 @@ func (setting *DDLStrategySetting) hasFlag(name string) bool {
return false
}
-// IsDeclarative checks if strategy options include -declarative
+// IsDeclarative checks if strategy options include --declarative
func (setting *DDLStrategySetting) IsDeclarative() bool {
return setting.hasFlag(declarativeFlag)
}
-// IsSingleton checks if strategy options include -singleton
+// IsSingleton checks if strategy options include --singleton
func (setting *DDLStrategySetting) IsSingleton() bool {
return setting.hasFlag(singletonFlag)
}
-// IsSingletonContext checks if strategy options include -singleton-context
+// IsSingletonContext checks if strategy options include --singleton-context
func (setting *DDLStrategySetting) IsSingletonContext() bool {
return setting.hasFlag(singletonContextFlag)
}
-// IsAllowZeroInDateFlag checks if strategy options include -allow-zero-in-date
+// IsAllowZeroInDateFlag checks if strategy options include --allow-zero-in-date
func (setting *DDLStrategySetting) IsAllowZeroInDateFlag() bool {
return setting.hasFlag(allowZeroInDateFlag)
}
-// IsPostponeLaunch checks if strategy options include -postpone-launch
+// IsPostponeLaunch checks if strategy options include --postpone-launch
func (setting *DDLStrategySetting) IsPostponeLaunch() bool {
return setting.hasFlag(postponeLaunchFlag)
}
-// IsPostponeCompletion checks if strategy options include -postpone-completion
+// IsPostponeCompletion checks if strategy options include --postpone-completion
func (setting *DDLStrategySetting) IsPostponeCompletion() bool {
return setting.hasFlag(postponeCompletionFlag)
}
-// IsAllowConcurrent checks if strategy options include -allow-concurrent
+// IsInOrderCompletion checks if strategy options include --in-order-completion
+func (setting *DDLStrategySetting) IsInOrderCompletion() bool {
+ return setting.hasFlag(inOrderCompletionFlag)
+}
+
+// IsAllowConcurrent checks if strategy options include --allow-concurrent
func (setting *DDLStrategySetting) IsAllowConcurrent() bool {
return setting.hasFlag(allowConcurrentFlag)
}
-// IsFastOverRevertibleFlag checks if strategy options include -fast-over-revertible
-func (setting *DDLStrategySetting) IsFastOverRevertibleFlag() bool {
- return setting.hasFlag(fastOverRevertibleFlag)
+// IsPreferInstantDDL checks if strategy options include --prefer-instant-ddl
+func (setting *DDLStrategySetting) IsPreferInstantDDL() bool {
+ return setting.hasFlag(preferInstantDDL)
}
-// IsFastRangeRotationFlag checks if strategy options include -fast-range-rotation
+// IsFastRangeRotationFlag checks if strategy options include --fast-range-rotation
func (setting *DDLStrategySetting) IsFastRangeRotationFlag() bool {
return setting.hasFlag(fastRangeRotationFlag)
}
-// IsVreplicationTestSuite checks if strategy options include -vreplicatoin-test-suite
+// IsVreplicationTestSuite checks if strategy options include --vreplicatoin-test-suite
func (setting *DDLStrategySetting) IsVreplicationTestSuite() bool {
return setting.hasFlag(vreplicationTestSuite)
}
+// IsAllowForeignKeysFlag checks if strategy options include --unsafe-allow-foreign-keys
+func (setting *DDLStrategySetting) IsAllowForeignKeysFlag() bool {
+ return setting.hasFlag(allowForeignKeysFlag)
+}
+
// RuntimeOptions returns the options used as runtime flags for given strategy, removing any internal hint options
func (setting *DDLStrategySetting) RuntimeOptions() []string {
opts, _ := shlex.Split(setting.Options)
@@ -186,10 +200,12 @@ func (setting *DDLStrategySetting) RuntimeOptions() []string {
case isFlag(opt, allowZeroInDateFlag):
case isFlag(opt, postponeLaunchFlag):
case isFlag(opt, postponeCompletionFlag):
+ case isFlag(opt, inOrderCompletionFlag):
case isFlag(opt, allowConcurrentFlag):
- case isFlag(opt, fastOverRevertibleFlag):
+ case isFlag(opt, preferInstantDDL):
case isFlag(opt, fastRangeRotationFlag):
case isFlag(opt, vreplicationTestSuite):
+ case isFlag(opt, allowForeignKeysFlag):
default:
validOpts = append(validOpts, opt)
}
diff --git a/go/vt/schema/ddl_strategy_test.go b/go/vt/schema/ddl_strategy_test.go
index 8a700655e51..610cb8b9ed3 100644
--- a/go/vt/schema/ddl_strategy_test.go
+++ b/go/vt/schema/ddl_strategy_test.go
@@ -34,6 +34,7 @@ func TestIsDirect(t *testing.T) {
assert.False(t, DDLStrategy("online").IsDirect())
assert.False(t, DDLStrategy("gh-ost").IsDirect())
assert.False(t, DDLStrategy("pt-osc").IsDirect())
+ assert.False(t, DDLStrategy("mysql").IsDirect())
assert.True(t, DDLStrategy("something").IsDirect())
}
@@ -46,9 +47,11 @@ func TestParseDDLStrategy(t *testing.T) {
isSingleton bool
isPostponeLaunch bool
isPostponeCompletion bool
+ isInOrderCompletion bool
isAllowConcurrent bool
fastOverRevertible bool
fastRangeRotation bool
+ allowForeignKeys bool
runtimeOptions string
err error
}{
@@ -72,6 +75,10 @@ func TestParseDDLStrategy(t *testing.T) {
strategyVariable: "pt-osc",
strategy: DDLStrategyPTOSC,
},
+ {
+ strategyVariable: "mysql",
+ strategy: DDLStrategyMySQL,
+ },
{
strategy: DDLStrategyDirect,
},
@@ -117,6 +124,13 @@ func TestParseDDLStrategy(t *testing.T) {
runtimeOptions: "",
isPostponeCompletion: true,
},
+ {
+ strategyVariable: "online --in-order-completion",
+ strategy: DDLStrategyOnline,
+ options: "--in-order-completion",
+ runtimeOptions: "",
+ isInOrderCompletion: true,
+ },
{
strategyVariable: "online -allow-concurrent",
strategy: DDLStrategyOnline,
@@ -132,9 +146,9 @@ func TestParseDDLStrategy(t *testing.T) {
isAllowConcurrent: true,
},
{
- strategyVariable: "vitess --fast-over-revertible",
+ strategyVariable: "vitess --prefer-instant-ddl",
strategy: DDLStrategyVitess,
- options: "--fast-over-revertible",
+ options: "--prefer-instant-ddl",
runtimeOptions: "",
fastOverRevertible: true,
},
@@ -145,22 +159,32 @@ func TestParseDDLStrategy(t *testing.T) {
runtimeOptions: "",
fastRangeRotation: true,
},
+ {
+ strategyVariable: "vitess --unsafe-allow-foreign-keys",
+ strategy: DDLStrategyVitess,
+ options: "--unsafe-allow-foreign-keys",
+ runtimeOptions: "",
+ allowForeignKeys: true,
+ },
}
for _, ts := range tt {
- setting, err := ParseDDLStrategy(ts.strategyVariable)
- assert.NoError(t, err)
- assert.Equal(t, ts.strategy, setting.Strategy)
- assert.Equal(t, ts.options, setting.Options)
- assert.Equal(t, ts.isDeclarative, setting.IsDeclarative())
- assert.Equal(t, ts.isSingleton, setting.IsSingleton())
- assert.Equal(t, ts.isPostponeCompletion, setting.IsPostponeCompletion())
- assert.Equal(t, ts.isPostponeLaunch, setting.IsPostponeLaunch())
- assert.Equal(t, ts.isAllowConcurrent, setting.IsAllowConcurrent())
- assert.Equal(t, ts.fastOverRevertible, setting.IsFastOverRevertibleFlag())
- assert.Equal(t, ts.fastRangeRotation, setting.IsFastRangeRotationFlag())
+ t.Run(ts.strategyVariable, func(t *testing.T) {
+ setting, err := ParseDDLStrategy(ts.strategyVariable)
+ assert.NoError(t, err)
+ assert.Equal(t, ts.strategy, setting.Strategy)
+ assert.Equal(t, ts.options, setting.Options)
+ assert.Equal(t, ts.isDeclarative, setting.IsDeclarative())
+ assert.Equal(t, ts.isSingleton, setting.IsSingleton())
+ assert.Equal(t, ts.isPostponeCompletion, setting.IsPostponeCompletion())
+ assert.Equal(t, ts.isPostponeLaunch, setting.IsPostponeLaunch())
+ assert.Equal(t, ts.isAllowConcurrent, setting.IsAllowConcurrent())
+ assert.Equal(t, ts.fastOverRevertible, setting.IsPreferInstantDDL())
+ assert.Equal(t, ts.fastRangeRotation, setting.IsFastRangeRotationFlag())
+ assert.Equal(t, ts.allowForeignKeys, setting.IsAllowForeignKeysFlag())
- runtimeOptions := strings.Join(setting.RuntimeOptions(), " ")
- assert.Equal(t, ts.runtimeOptions, runtimeOptions)
+ runtimeOptions := strings.Join(setting.RuntimeOptions(), " ")
+ assert.Equal(t, ts.runtimeOptions, runtimeOptions)
+ })
}
{
_, err := ParseDDLStrategy("other")
diff --git a/go/vt/schema/name_test.go b/go/vt/schema/name_test.go
index 7bf418066a0..ab72f80644e 100644
--- a/go/vt/schema/name_test.go
+++ b/go/vt/schema/name_test.go
@@ -22,6 +22,41 @@ import (
"github.com/stretchr/testify/assert"
)
+func TestNameIsGCTableName(t *testing.T) {
+ irrelevantNames := []string{
+ "t",
+ "_table_new",
+ "__table_new",
+ "_table_gho",
+ "_table_ghc",
+ "_table_del",
+ "table_old",
+ "vt_onlineddl_test_02",
+ "_4e5dcf80_354b_11eb_82cd_f875a4d24e90_20201203114014_gho",
+ "_4e5dcf80_354b_11eb_82cd_f875a4d24e90_20201203114014_ghc",
+ "_4e5dcf80_354b_11eb_82cd_f875a4d24e90_20201203114014_del",
+ "_4e5dcf80_354b_11eb_82cd_f875a4d24e90_20201203114013_new",
+ "_table_old",
+ "__table_old",
+ }
+ for _, tableName := range irrelevantNames {
+ t.Run(tableName, func(t *testing.T) {
+ assert.False(t, IsGCTableName(tableName))
+ })
+ }
+ relevantNames := []string{
+ "_vt_DROP_6ace8bcef73211ea87e9f875a4d24e90_20200915120410",
+ "_vt_HOLD_6ace8bcef73211ea87e9f875a4d24e90_20200915120410",
+ "_vt_EVAC_6ace8bcef73211ea87e9f875a4d24e90_20200915120410",
+ "_vt_PURGE_6ace8bcef73211ea87e9f875a4d24e90_20200915120410",
+ }
+ for _, tableName := range relevantNames {
+ t.Run(tableName, func(t *testing.T) {
+ assert.True(t, IsGCTableName(tableName))
+ })
+ }
+}
+
func TestIsInternalOperationTableName(t *testing.T) {
names := []string{
"_4e5dcf80_354b_11eb_82cd_f875a4d24e90_20201203114014_gho",
diff --git a/go/vt/schema/online_ddl.go b/go/vt/schema/online_ddl.go
index 07740039004..3565680e108 100644
--- a/go/vt/schema/online_ddl.go
+++ b/go/vt/schema/online_ddl.go
@@ -54,13 +54,15 @@ const (
)
// when validateWalk returns true, then the child nodes are also visited
-func validateWalk(node sqlparser.SQLNode) (kontinue bool, err error) {
+func validateWalk(node sqlparser.SQLNode, allowForeignKeys bool) (kontinue bool, err error) {
switch node.(type) {
case *sqlparser.CreateTable, *sqlparser.AlterTable,
*sqlparser.TableSpec, *sqlparser.AddConstraintDefinition, *sqlparser.ConstraintDefinition:
return true, nil
case *sqlparser.ForeignKeyDefinition:
- return false, ErrForeignKeyFound
+ if !allowForeignKeys {
+ return false, ErrForeignKeyFound
+ }
case *sqlparser.RenameTableName:
return false, ErrRenameTableFound
}
@@ -118,7 +120,7 @@ func ParseOnlineDDLStatement(sql string) (ddlStmt sqlparser.DDLStatement, action
return ddlStmt, action, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "unsupported query type: %s", sql)
}
-func onlineDDLStatementSanity(sql string, ddlStmt sqlparser.DDLStatement) error {
+func onlineDDLStatementSanity(sql string, ddlStmt sqlparser.DDLStatement, ddlStrategySetting *DDLStrategySetting) error {
// SQL statement sanity checks:
if !ddlStmt.IsFullyParsed() {
if _, err := sqlparser.ParseStrictDDL(sql); err != nil {
@@ -128,7 +130,10 @@ func onlineDDLStatementSanity(sql string, ddlStmt sqlparser.DDLStatement) error
return vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.SyntaxError, "cannot parse statement: %v", sql)
}
- if err := sqlparser.Walk(validateWalk, ddlStmt); err != nil {
+ walkFunc := func(node sqlparser.SQLNode) (kontinue bool, err error) {
+ return validateWalk(node, ddlStrategySetting.IsAllowForeignKeysFlag())
+ }
+ if err := sqlparser.Walk(walkFunc, ddlStmt); err != nil {
switch err {
case ErrForeignKeyFound:
return vterrors.Errorf(vtrpcpb.Code_ABORTED, "foreign key constraints are not supported in online DDL, see https://vitess.io/blog/2021-06-15-online-ddl-why-no-fk/")
@@ -142,7 +147,7 @@ func onlineDDLStatementSanity(sql string, ddlStmt sqlparser.DDLStatement) error
// NewOnlineDDLs takes a single DDL statement, normalizes it (potentially break down into multiple statements), and generates one or more OnlineDDL instances, one for each normalized statement
func NewOnlineDDLs(keyspace string, sql string, ddlStmt sqlparser.DDLStatement, ddlStrategySetting *DDLStrategySetting, migrationContext string, providedUUID string) (onlineDDLs [](*OnlineDDL), err error) {
appendOnlineDDL := func(tableName string, ddlStmt sqlparser.DDLStatement) error {
- if err := onlineDDLStatementSanity(sql, ddlStmt); err != nil {
+ if err := onlineDDLStatementSanity(sql, ddlStmt, ddlStrategySetting); err != nil {
return err
}
onlineDDL, err := NewOnlineDDL(keyspace, tableName, sqlparser.String(ddlStmt), ddlStrategySetting, migrationContext, providedUUID)
@@ -269,6 +274,11 @@ func OnlineDDLFromCommentedStatement(stmt sqlparser.Statement) (onlineDDL *Onlin
default:
return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "unsupported statement for Online DDL: %v", sqlparser.String(stmt))
}
+ // We clone the comments because they will end up being cached by the query planner. Then, the Directive() function actually modifies the comments.
+ // If comments are shared in cache, and Directive() modifies it, then we have a concurrency issue when someone else wants to read the comments.
+ // By cloning the comments we remove the concurrency problem.
+ comments = sqlparser.CloneRefOfParsedComments(comments)
+ comments.ResetDirectives()
if comments.Length() == 0 {
return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "no comments found in statement: %v", sqlparser.String(stmt))
diff --git a/go/vt/schema/online_ddl_test.go b/go/vt/schema/online_ddl_test.go
index 0ddd8588bc7..159faa095ef 100644
--- a/go/vt/schema/online_ddl_test.go
+++ b/go/vt/schema/online_ddl_test.go
@@ -18,6 +18,7 @@ package schema
import (
"encoding/hex"
+ "fmt"
"strings"
"testing"
@@ -323,6 +324,54 @@ func TestNewOnlineDDLs(t *testing.T) {
}
}
+func TestNewOnlineDDLsForeignKeys(t *testing.T) {
+ type expect struct {
+ sqls []string
+ notDDL bool
+ parseError bool
+ isError bool
+ expectErrorText string
+ isView bool
+ }
+ queries := []string{
+ "alter table corder add FOREIGN KEY my_fk(customer_id) references customer(customer_id)",
+ "create table t1 (id int primary key, i int, foreign key (i) references parent(id))",
+ }
+
+ migrationContext := "354b-11eb-82cd-f875a4d24e90"
+ for _, query := range queries {
+ t.Run(query, func(t *testing.T) {
+ for _, allowForeignKeys := range []bool{false, true} {
+ testName := fmt.Sprintf("%t", allowForeignKeys)
+ t.Run(testName, func(t *testing.T) {
+ stmt, err := sqlparser.Parse(query)
+ require.NoError(t, err)
+ ddlStmt, ok := stmt.(sqlparser.DDLStatement)
+ require.True(t, ok)
+
+ flags := ""
+ if allowForeignKeys {
+ flags = "--unsafe-allow-foreign-keys"
+ }
+ onlineDDLs, err := NewOnlineDDLs("test_ks", query, ddlStmt, NewDDLStrategySetting(DDLStrategyVitess, flags), migrationContext, "")
+ if allowForeignKeys {
+ assert.NoError(t, err)
+ } else {
+ assert.Error(t, err)
+ assert.Contains(t, err.Error(), "foreign key constraints are not supported")
+ }
+
+ for _, onlineDDL := range onlineDDLs {
+ sql, err := onlineDDL.sqlWithoutComments()
+ assert.NoError(t, err)
+ assert.NotEmpty(t, sql)
+ }
+ })
+ }
+ })
+ }
+}
+
func TestOnlineDDLFromCommentedStatement(t *testing.T) {
queries := []string{
`create table t (id int primary key)`,
diff --git a/go/vt/schema/tablegc.go b/go/vt/schema/tablegc.go
index ff9c5e92c92..872fb42dbe5 100644
--- a/go/vt/schema/tablegc.go
+++ b/go/vt/schema/tablegc.go
@@ -44,9 +44,13 @@ const (
TableDroppedGCState TableGCState = ""
)
+const (
+ GCTableNameExpression string = `^_vt_(HOLD|PURGE|EVAC|DROP)_([0-f]{32})_([0-9]{14})$`
+)
+
var (
gcUUIDRegexp = regexp.MustCompile(`^[0-f]{32}$`)
- gcTableNameRegexp = regexp.MustCompile(`^_vt_(HOLD|PURGE|EVAC|DROP)_([0-f]{32})_([0-9]{14})$`)
+ gcTableNameRegexp = regexp.MustCompile(GCTableNameExpression)
gcStates = map[string]TableGCState{
string(HoldTableGCState): HoldTableGCState,
diff --git a/go/vt/schemadiff/column.go b/go/vt/schemadiff/column.go
index 27169a698f7..4b8022ac289 100644
--- a/go/vt/schemadiff/column.go
+++ b/go/vt/schemadiff/column.go
@@ -33,7 +33,7 @@ func (c *columnDetails) identicalOtherThanName(other *sqlparser.ColumnDefinition
if other == nil {
return false
}
- return sqlparser.EqualsColumnType(c.col.Type, other.Type)
+ return sqlparser.Equals.SQLNode(c.col.Type, other.Type)
}
func (c *columnDetails) prevColName() string {
@@ -82,7 +82,7 @@ func NewColumnDefinitionEntity(c *sqlparser.ColumnDefinition) *ColumnDefinitionE
// It returns an AlterTable statement if changes are found, or nil if not.
// the other table may be of different name; its name is ignored.
func (c *ColumnDefinitionEntity) ColumnDiff(other *ColumnDefinitionEntity, _ *DiffHints) *ModifyColumnDiff {
- if sqlparser.EqualsRefOfColumnDefinition(c.columnDefinition, other.columnDefinition) {
+ if sqlparser.Equals.RefOfColumnDefinition(c.columnDefinition, other.columnDefinition) {
return nil
}
diff --git a/go/vt/schemadiff/diff_test.go b/go/vt/schemadiff/diff_test.go
index b26ffcddb48..668895a7891 100644
--- a/go/vt/schemadiff/diff_test.go
+++ b/go/vt/schemadiff/diff_test.go
@@ -55,8 +55,8 @@ func TestDiffTables(t *testing.T) {
{
name: "create",
to: "create table t(id int primary key)",
- diff: "create table t (\n\tid int primary key\n)",
- cdiff: "CREATE TABLE `t` (\n\t`id` int PRIMARY KEY\n)",
+ diff: "create table t (\n\tid int,\n\tprimary key (id)\n)",
+ cdiff: "CREATE TABLE `t` (\n\t`id` int,\n\tPRIMARY KEY (`id`)\n)",
action: "create",
toName: "t",
},
@@ -411,10 +411,10 @@ func TestDiffSchemas(t *testing.T) {
name: "create table",
to: "create table t(id int primary key)",
diffs: []string{
- "create table t (\n\tid int primary key\n)",
+ "create table t (\n\tid int,\n\tprimary key (id)\n)",
},
cdiffs: []string{
- "CREATE TABLE `t` (\n\t`id` int PRIMARY KEY\n)",
+ "CREATE TABLE `t` (\n\t`id` int,\n\tPRIMARY KEY (`id`)\n)",
},
},
{
@@ -422,10 +422,10 @@ func TestDiffSchemas(t *testing.T) {
from: ";;; ; ; ;;;",
to: "create table t(id int primary key)",
diffs: []string{
- "create table t (\n\tid int primary key\n)",
+ "create table t (\n\tid int,\n\tprimary key (id)\n)",
},
cdiffs: []string{
- "CREATE TABLE `t` (\n\t`id` int PRIMARY KEY\n)",
+ "CREATE TABLE `t` (\n\t`id` int,\n\tPRIMARY KEY (`id`)\n)",
},
},
{
@@ -444,13 +444,13 @@ func TestDiffSchemas(t *testing.T) {
to: "create table t4(id int primary key); create table t2(id bigint primary key); create table t3(id int primary key)",
diffs: []string{
"drop table t1",
- "alter table t2 modify column id bigint primary key",
- "create table t4 (\n\tid int primary key\n)",
+ "alter table t2 modify column id bigint",
+ "create table t4 (\n\tid int,\n\tprimary key (id)\n)",
},
cdiffs: []string{
"DROP TABLE `t1`",
- "ALTER TABLE `t2` MODIFY COLUMN `id` bigint PRIMARY KEY",
- "CREATE TABLE `t4` (\n\t`id` int PRIMARY KEY\n)",
+ "ALTER TABLE `t2` MODIFY COLUMN `id` bigint",
+ "CREATE TABLE `t4` (\n\t`id` int,\n\tPRIMARY KEY (`id`)\n)",
},
},
{
@@ -459,11 +459,11 @@ func TestDiffSchemas(t *testing.T) {
to: "create table t1(id int primary key); create table t3(id int unsigned primary key);",
diffs: []string{
"drop table t2",
- "create table t3 (\n\tid int unsigned primary key\n)",
+ "create table t3 (\n\tid int unsigned,\n\tprimary key (id)\n)",
},
cdiffs: []string{
"DROP TABLE `t2`",
- "CREATE TABLE `t3` (\n\t`id` int unsigned PRIMARY KEY\n)",
+ "CREATE TABLE `t3` (\n\t`id` int unsigned,\n\tPRIMARY KEY (`id`)\n)",
},
},
{
@@ -486,17 +486,17 @@ func TestDiffSchemas(t *testing.T) {
"drop table t1a",
"drop table t2a",
"drop table t3a",
- "create table t1b (\n\tid bigint primary key\n)",
- "create table t2b (\n\tid int unsigned primary key\n)",
- "create table t3b (\n\tid int primary key\n)",
+ "create table t1b (\n\tid bigint,\n\tprimary key (id)\n)",
+ "create table t2b (\n\tid int unsigned,\n\tprimary key (id)\n)",
+ "create table t3b (\n\tid int,\n\tprimary key (id)\n)",
},
cdiffs: []string{
"DROP TABLE `t1a`",
"DROP TABLE `t2a`",
"DROP TABLE `t3a`",
- "CREATE TABLE `t1b` (\n\t`id` bigint PRIMARY KEY\n)",
- "CREATE TABLE `t2b` (\n\t`id` int unsigned PRIMARY KEY\n)",
- "CREATE TABLE `t3b` (\n\t`id` int PRIMARY KEY\n)",
+ "CREATE TABLE `t1b` (\n\t`id` bigint,\n\tPRIMARY KEY (`id`)\n)",
+ "CREATE TABLE `t2b` (\n\t`id` int unsigned,\n\tPRIMARY KEY (`id`)\n)",
+ "CREATE TABLE `t3b` (\n\t`id` int,\n\tPRIMARY KEY (`id`)\n)",
},
},
{
@@ -505,13 +505,13 @@ func TestDiffSchemas(t *testing.T) {
to: "create table t1b(id bigint primary key); create table t2b(id int unsigned primary key); create table t3b(id int primary key); ",
diffs: []string{
"drop table t3a",
- "create table t1b (\n\tid bigint primary key\n)",
+ "create table t1b (\n\tid bigint,\n\tprimary key (id)\n)",
"rename table t1a to t3b",
"rename table t2a to t2b",
},
cdiffs: []string{
"DROP TABLE `t3a`",
- "CREATE TABLE `t1b` (\n\t`id` bigint PRIMARY KEY\n)",
+ "CREATE TABLE `t1b` (\n\t`id` bigint,\n\tPRIMARY KEY (`id`)\n)",
"RENAME TABLE `t1a` TO `t3b`",
"RENAME TABLE `t2a` TO `t2b`",
},
@@ -601,17 +601,17 @@ func TestDiffSchemas(t *testing.T) {
diffs: []string{
"drop table t1",
"drop view v1",
- "alter table t2 modify column id bigint primary key",
+ "alter table t2 modify column id bigint",
"alter view v2 as select id from t2",
- "create table t4 (\n\tid int primary key\n)",
+ "create table t4 (\n\tid int,\n\tprimary key (id)\n)",
"create view v0 as select * from v2, t2",
},
cdiffs: []string{
"DROP TABLE `t1`",
"DROP VIEW `v1`",
- "ALTER TABLE `t2` MODIFY COLUMN `id` bigint PRIMARY KEY",
+ "ALTER TABLE `t2` MODIFY COLUMN `id` bigint",
"ALTER VIEW `v2` AS SELECT `id` FROM `t2`",
- "CREATE TABLE `t4` (\n\t`id` int PRIMARY KEY\n)",
+ "CREATE TABLE `t4` (\n\t`id` int,\n\tPRIMARY KEY (`id`)\n)",
"CREATE VIEW `v0` AS SELECT * FROM `v2`, `t2`",
},
},
diff --git a/go/vt/schemadiff/errors.go b/go/vt/schemadiff/errors.go
index 0ea4dfecda1..42dd304e75a 100644
--- a/go/vt/schemadiff/errors.go
+++ b/go/vt/schemadiff/errors.go
@@ -180,6 +180,15 @@ type InvalidColumnInKeyError struct {
Key string
}
+type DuplicateKeyNameError struct {
+ Table string
+ Key string
+}
+
+func (e *DuplicateKeyNameError) Error() string {
+ return fmt.Sprintf("duplicate key %s in table %s", sqlescape.EscapeID(e.Key), sqlescape.EscapeID(e.Table))
+}
+
func (e *InvalidColumnInKeyError) Error() string {
return fmt.Sprintf("invalid column %s referenced by key %s in table %s",
sqlescape.EscapeID(e.Column), sqlescape.EscapeID(e.Key), sqlescape.EscapeID(e.Table))
@@ -228,6 +237,15 @@ func (e *InvalidColumnInCheckConstraintError) Error() string {
sqlescape.EscapeID(e.Column), sqlescape.EscapeID(e.Constraint), sqlescape.EscapeID(e.Table))
}
+type ForeignKeyDependencyUnresolvedError struct {
+ Table string
+}
+
+func (e *ForeignKeyDependencyUnresolvedError) Error() string {
+ return fmt.Sprintf("table %s has unresolved/loop foreign key dependencies",
+ sqlescape.EscapeID(e.Table))
+}
+
type InvalidColumnInForeignKeyConstraintError struct {
Table string
Constraint string
@@ -235,10 +253,80 @@ type InvalidColumnInForeignKeyConstraintError struct {
}
func (e *InvalidColumnInForeignKeyConstraintError) Error() string {
- return fmt.Sprintf("invalid column %s referenced by foreign key constraint %s in table %s",
+ return fmt.Sprintf("invalid column %s covered by foreign key constraint %s in table %s",
sqlescape.EscapeID(e.Column), sqlescape.EscapeID(e.Constraint), sqlescape.EscapeID(e.Table))
}
+type InvalidReferencedColumnInForeignKeyConstraintError struct {
+ Table string
+ Constraint string
+ ReferencedTable string
+ ReferencedColumn string
+}
+
+func (e *InvalidReferencedColumnInForeignKeyConstraintError) Error() string {
+ return fmt.Sprintf("invalid column %s.%s referenced by foreign key constraint %s in table %s",
+ sqlescape.EscapeID(e.ReferencedTable), sqlescape.EscapeID(e.ReferencedColumn), sqlescape.EscapeID(e.Constraint), sqlescape.EscapeID(e.Table))
+}
+
+type ForeignKeyColumnCountMismatchError struct {
+ Table string
+ Constraint string
+ ColumnCount int
+ ReferencedTable string
+ ReferencedColumnCount int
+}
+
+func (e *ForeignKeyColumnCountMismatchError) Error() string {
+ return fmt.Sprintf("mismatching column count %d referenced by foreign key constraint %s in table %s. Expected %d",
+ e.ReferencedColumnCount, sqlescape.EscapeID(e.Constraint), sqlescape.EscapeID(e.Table), e.ColumnCount)
+}
+
+type ForeignKeyColumnTypeMismatchError struct {
+ Table string
+ Constraint string
+ Column string
+ ReferencedTable string
+ ReferencedColumn string
+}
+
+func (e *ForeignKeyColumnTypeMismatchError) Error() string {
+ return fmt.Sprintf("mismatching column type %s.%s and %s.%s referenced by foreign key constraint %s in table %s",
+ sqlescape.EscapeID(e.ReferencedTable),
+ sqlescape.EscapeID(e.ReferencedColumn),
+ sqlescape.EscapeID(e.Table),
+ sqlescape.EscapeID(e.Column),
+ sqlescape.EscapeID(e.Constraint),
+ sqlescape.EscapeID(e.Table),
+ )
+}
+
+type MissingForeignKeyReferencedIndexError struct {
+ Table string
+ Constraint string
+ ReferencedTable string
+}
+
+func (e *MissingForeignKeyReferencedIndexError) Error() string {
+ return fmt.Sprintf("missing index in referenced table %s for foreign key constraint %s in table %s",
+ sqlescape.EscapeID(e.ReferencedTable),
+ sqlescape.EscapeID(e.Constraint),
+ sqlescape.EscapeID(e.Table),
+ )
+}
+
+type IndexNeededByForeignKeyError struct {
+ Table string
+ Key string
+}
+
+func (e *IndexNeededByForeignKeyError) Error() string {
+ return fmt.Sprintf("key %s needed by a foreign key constraint in table %s",
+ sqlescape.EscapeID(e.Key),
+ sqlescape.EscapeID(e.Table),
+ )
+}
+
type ViewDependencyUnresolvedError struct {
View string
}
diff --git a/go/vt/schemadiff/mysql.go b/go/vt/schemadiff/mysql.go
index 768714af7a7..624897e2e43 100644
--- a/go/vt/schemadiff/mysql.go
+++ b/go/vt/schemadiff/mysql.go
@@ -29,6 +29,14 @@ var integralTypes = map[string]bool{
"bigint": true,
}
+var floatTypes = map[string]bool{
+ "float": true,
+ "float4": true,
+ "float8": true,
+ "double": true,
+ "real": true,
+}
+
var charsetTypes = map[string]bool{
"char": true,
"varchar": true,
@@ -39,3 +47,7 @@ var charsetTypes = map[string]bool{
"enum": true,
"set": true,
}
+
+func IsIntegralType(columnType string) bool {
+ return integralTypes[columnType]
+}
diff --git a/go/vt/schemadiff/schema.go b/go/vt/schemadiff/schema.go
index 3cb6f4436b6..0e9ae4c4df1 100644
--- a/go/vt/schemadiff/schema.go
+++ b/go/vt/schemadiff/schema.go
@@ -35,6 +35,9 @@ type Schema struct {
named map[string]Entity
sorted []Entity
+
+ foreignKeyParents []*CreateTableEntity // subset of tables
+ foreignKeyChildren []*CreateTableEntity // subset of tables
}
// newEmptySchema is used internally to initialize a Schema object
@@ -44,6 +47,9 @@ func newEmptySchema() *Schema {
views: []*CreateViewEntity{},
named: map[string]Entity{},
sorted: []Entity{},
+
+ foreignKeyParents: []*CreateTableEntity{},
+ foreignKeyChildren: []*CreateTableEntity{},
}
return schema
}
@@ -122,6 +128,18 @@ func NewSchemaFromSQL(sql string) (*Schema, error) {
return NewSchemaFromStatements(statements)
}
+// getForeignKeyParentTableNames analyzes a CREATE TABLE definition and extracts all referened foreign key tables names.
+// A table name may appear twice in the result output, it it is referenced by more than one foreign key
+func getForeignKeyParentTableNames(createTable *sqlparser.CreateTable) (names []string, err error) {
+ for _, cs := range createTable.TableSpec.Constraints {
+ if check, ok := cs.Details.(*sqlparser.ForeignKeyDefinition); ok {
+ parentTableName := check.ReferenceDefinition.ReferencedTable.Name.String()
+ names = append(names, parentTableName)
+ }
+ }
+ return names, err
+}
+
// getViewDependentTableNames analyzes a CREATE VIEW definition and extracts all tables/views read by this view
func getViewDependentTableNames(createView *sqlparser.CreateView) (names []string, err error) {
err = sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) {
@@ -175,10 +193,6 @@ func (s *Schema) normalize() error {
// If a view v1 depends on v2, then v2 must come before v1, even though v1
// precedes v2 alphabetically
dependencyLevels := make(map[string]int, len(s.tables)+len(s.views))
- for _, t := range s.tables {
- s.sorted = append(s.sorted, t)
- dependencyLevels[t.Name()] = 0
- }
allNamesFoundInLowerLevel := func(names []string, level int) bool {
for _, name := range names {
@@ -198,12 +212,67 @@ func (s *Schema) normalize() error {
return true
}
+ // We now iterate all tables. We iterate "dependency levels":
+ // - first we want all tables that don't have foreign keys or which only reference themselves
+ // - then we only want tables that reference 1st level tables. these are 2nd level tables
+ // - etc.
+ // we stop when we have been unable to find a table in an iteration.
+ fkParents := map[string]bool{}
+ iterationLevel := 0
+ for {
+ handledAnyTablesInIteration := false
+ for _, t := range s.tables {
+ name := t.Name()
+ if _, ok := dependencyLevels[name]; ok {
+ // already handled; skip
+ continue
+ }
+ // Not handled. Is this view dependent on already handled objects?
+ referencedTableNames, err := getForeignKeyParentTableNames(t.CreateTable)
+ if err != nil {
+ return err
+ }
+ if len(referencedTableNames) > 0 {
+ s.foreignKeyChildren = append(s.foreignKeyChildren, t)
+ }
+ nonSelfReferenceNames := []string{}
+ for _, referencedTableName := range referencedTableNames {
+ if referencedTableName != name {
+ nonSelfReferenceNames = append(nonSelfReferenceNames, referencedTableName)
+ }
+ fkParents[referencedTableName] = true
+ }
+ if allNamesFoundInLowerLevel(nonSelfReferenceNames, iterationLevel) {
+ s.sorted = append(s.sorted, t)
+ dependencyLevels[t.Name()] = iterationLevel
+ handledAnyTablesInIteration = true
+ }
+ }
+ if !handledAnyTablesInIteration {
+ break
+ }
+ iterationLevel++
+ }
+ for _, t := range s.tables {
+ if fkParents[t.Name()] {
+ s.foreignKeyParents = append(s.foreignKeyParents, t)
+ }
+ }
// We now iterate all views. We iterate "dependency levels":
// - first we want all views that only depend on tables. These are 1st level views.
// - then we only want views that depend on 1st level views or on tables. These are 2nd level views.
// - etc.
// we stop when we have been unable to find a view in an iteration.
- for iterationLevel := 1; ; iterationLevel++ {
+
+ // It's possible that there's never been any tables in this schema. Which means
+ // iterationLevel remains zero.
+ // To deal with views, we must have iterationLevel at least 1. This is because any view reads
+ // from _something_: at the very least it reads from DUAL (inplicitly or explicitly). Which
+ // puts the view at a higher level.
+ if iterationLevel < 1 {
+ iterationLevel = 1
+ }
+ for {
handledAnyViewsInIteration := false
for _, v := range s.views {
name := v.Name()
@@ -225,11 +294,21 @@ func (s *Schema) normalize() error {
if !handledAnyViewsInIteration {
break
}
+ iterationLevel++
}
if len(s.sorted) != len(s.tables)+len(s.views) {
- // We have leftover views. This can happen if the schema definition is invalid:
+ // We have leftover tables or views. This can happen if the schema definition is invalid:
+ // - a table's foreign key references a nonexistent table
+ // - two or more tables have circular FK dependency
// - a view depends on a nonexistent table
- // - two views have a circular dependency
+ // - two or more views have a circular dependency
+ for _, t := range s.tables {
+ if _, ok := dependencyLevels[t.Name()]; !ok {
+ // We _know_ that in this iteration, at least one view is found unassigned a dependency level.
+ // We return the first one.
+ return &ForeignKeyDependencyUnresolvedError{Table: t.Name()}
+ }
+ }
for _, v := range s.views {
if _, ok := dependencyLevels[v.Name()]; !ok {
// We _know_ that in this iteration, at least one view is found unassigned a dependency level.
@@ -238,6 +317,71 @@ func (s *Schema) normalize() error {
}
}
}
+
+ // Validate table definitions
+ for _, t := range s.tables {
+ if err := t.validate(); err != nil {
+ return err
+ }
+ }
+ colTypeEqualForForeignKey := func(a, b *sqlparser.ColumnType) bool {
+ return a.Type == b.Type &&
+ a.Unsigned == b.Unsigned &&
+ a.Zerofill == b.Zerofill &&
+ sqlparser.Equals.ColumnCharset(a.Charset, b.Charset) &&
+ sqlparser.Equals.SliceOfString(a.EnumValues, b.EnumValues)
+ }
+
+ // Now validate foreign key columns:
+ // - referenced table columns must exist
+ // - foreign key columns must match in count and type to referenced table columns
+ // - referenced table has an appropriate index over referenced columns
+ for _, t := range s.tables {
+ if len(t.TableSpec.Constraints) == 0 {
+ continue
+ }
+
+ tableColumns := map[string]*sqlparser.ColumnDefinition{}
+ for _, col := range t.CreateTable.TableSpec.Columns {
+ colName := col.Name.Lowered()
+ tableColumns[colName] = col
+ }
+
+ for _, cs := range t.TableSpec.Constraints {
+ check, ok := cs.Details.(*sqlparser.ForeignKeyDefinition)
+ if !ok {
+ continue
+ }
+ referencedTableName := check.ReferenceDefinition.ReferencedTable.Name.String()
+ referencedTable := s.Table(referencedTableName) // we know this exists because we validated foreign key dependencies earlier on
+
+ referencedColumns := map[string]*sqlparser.ColumnDefinition{}
+ for _, col := range referencedTable.CreateTable.TableSpec.Columns {
+ colName := col.Name.Lowered()
+ referencedColumns[colName] = col
+ }
+ // Thanks to table validation, we already know the foreign key covered columns count is equal to the
+ // referenced table column count. Now ensure their types are identical
+ for i, col := range check.Source {
+ coveredColumn, ok := tableColumns[col.Lowered()]
+ if !ok {
+ return &InvalidColumnInForeignKeyConstraintError{Table: t.Name(), Constraint: cs.Name.String(), Column: col.String()}
+ }
+ referencedColumnName := check.ReferenceDefinition.ReferencedColumns[i].Lowered()
+ referencedColumn, ok := referencedColumns[referencedColumnName]
+ if !ok {
+ return &InvalidReferencedColumnInForeignKeyConstraintError{Table: t.Name(), Constraint: cs.Name.String(), ReferencedTable: referencedTableName, ReferencedColumn: referencedColumnName}
+ }
+ if !colTypeEqualForForeignKey(coveredColumn.Type, referencedColumn.Type) {
+ return &ForeignKeyColumnTypeMismatchError{Table: t.Name(), Constraint: cs.Name.String(), Column: coveredColumn.Name.String(), ReferencedTable: referencedTableName, ReferencedColumn: referencedColumnName}
+ }
+ }
+
+ if !referencedTable.columnsCoveredByInOrderIndex(check.ReferenceDefinition.ReferencedColumns) {
+ return &MissingForeignKeyReferencedIndexError{Table: t.Name(), Constraint: cs.Name.String(), ReferencedTable: referencedTableName}
+ }
+ }
+ }
return nil
}
diff --git a/go/vt/schemadiff/schema_test.go b/go/vt/schemadiff/schema_test.go
index 308333bf641..1a24b862b1c 100644
--- a/go/vt/schemadiff/schema_test.go
+++ b/go/vt/schemadiff/schema_test.go
@@ -21,6 +21,9 @@ import (
"testing"
"github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "vitess.io/vitess/go/vt/sqlparser"
)
var createQueries = []string{
@@ -123,6 +126,24 @@ func TestNewSchemaFromQueriesUnresolvedAlias(t *testing.T) {
assert.EqualError(t, err, (&ViewDependencyUnresolvedError{View: "v7"}).Error())
}
+func TestNewSchemaFromQueriesViewFromDual(t *testing.T) {
+ // Schema will not contain any tables, just a view selecting from DUAL
+ queries := []string{
+ "create view v20 as select 1 from dual",
+ }
+ _, err := NewSchemaFromQueries(queries)
+ assert.NoError(t, err)
+}
+
+func TestNewSchemaFromQueriesViewFromDualImplicit(t *testing.T) {
+ // Schema will not contain any tables, just a view implicitly selecting from DUAL
+ queries := []string{
+ "create view v20 as select 1",
+ }
+ _, err := NewSchemaFromQueries(queries)
+ assert.NoError(t, err)
+}
+
func TestNewSchemaFromQueriesLoop(t *testing.T) {
// v7 and v8 depend on each other
queries := append(createQueries,
@@ -153,3 +174,227 @@ func TestCopy(t *testing.T) {
assert.Equal(t, schema.ToSQL(), schemaClone.ToSQL())
assert.False(t, schema == schemaClone)
}
+
+func TestGetViewDependentTableNames(t *testing.T) {
+ tt := []struct {
+ name string
+ view string
+ tables []string
+ }{
+ {
+ view: "create view v6 as select * from v4",
+ tables: []string{"v4"},
+ },
+ {
+ view: "create view v2 as select * from v3, t2",
+ tables: []string{"v3", "t2"},
+ },
+ {
+ view: "create view v3 as select * from t3 as t3",
+ tables: []string{"t3"},
+ },
+ {
+ view: "create view v3 as select * from t3 as something_else",
+ tables: []string{"t3"},
+ },
+ {
+ view: "create view v5 as select * from t1, (select * from v3) as some_alias",
+ tables: []string{"t1", "v3"},
+ },
+ {
+ view: "create view v0 as select 1 from DUAL",
+ tables: []string{"dual"},
+ },
+ {
+ view: "create view v9 as select 1",
+ tables: []string{"dual"},
+ },
+ }
+ for _, ts := range tt {
+ t.Run(ts.view, func(t *testing.T) {
+ stmt, err := sqlparser.ParseStrictDDL(ts.view)
+ require.NoError(t, err)
+ createView, ok := stmt.(*sqlparser.CreateView)
+ require.True(t, ok)
+
+ tables, err := getViewDependentTableNames(createView)
+ assert.NoError(t, err)
+ assert.Equal(t, ts.tables, tables)
+ })
+ }
+}
+
+func TestGetForeignKeyParentTableNames(t *testing.T) {
+ tt := []struct {
+ name string
+ table string
+ tables []string
+ }{
+ {
+ table: "create table t1 (id int primary key, i int, foreign key (i) references parent(id))",
+ tables: []string{"parent"},
+ },
+ {
+ table: "create table t1 (id int primary key, i int, constraint f foreign key (i) references parent(id))",
+ tables: []string{"parent"},
+ },
+ {
+ table: "create table t1 (id int primary key, i int, constraint f foreign key (i) references parent(id) on delete cascade)",
+ tables: []string{"parent"},
+ },
+ {
+ table: "create table t1 (id int primary key, i int, i2 int, constraint f foreign key (i) references parent(id) on delete cascade, constraint f2 foreign key (i2) references parent2(id) on delete restrict)",
+ tables: []string{"parent", "parent2"},
+ },
+ {
+ table: "create table t1 (id int primary key, i int, i2 int, constraint f foreign key (i) references parent(id) on delete cascade, constraint f2 foreign key (i2) references parent(id) on delete restrict)",
+ tables: []string{"parent", "parent"},
+ },
+ }
+ for _, ts := range tt {
+ t.Run(ts.table, func(t *testing.T) {
+ stmt, err := sqlparser.ParseStrictDDL(ts.table)
+ require.NoError(t, err)
+ createTable, ok := stmt.(*sqlparser.CreateTable)
+ require.True(t, ok)
+
+ tables, err := getForeignKeyParentTableNames(createTable)
+ assert.NoError(t, err)
+ assert.Equal(t, ts.tables, tables)
+ })
+ }
+}
+
+func TestTableForeignKeyOrdering(t *testing.T) {
+ fkQueries := []string{
+ "create table t11 (id int primary key, i int, key ix (i), constraint f12 foreign key (i) references t12(id) on delete restrict, constraint f20 foreign key (i) references t20(id) on delete restrict)",
+ "create table t15(id int, primary key(id))",
+ "create view v09 as select * from v13, t17",
+ "create table t20 (id int primary key, i int, key ix (i), constraint f15 foreign key (i) references t15(id) on delete restrict)",
+ "create view v13 as select * from t20",
+ "create table t12 (id int primary key, i int, key ix (i), constraint f15 foreign key (i) references t15(id) on delete restrict)",
+ "create table t17 (id int primary key, i int, key ix (i), constraint f11 foreign key (i) references t11(id) on delete restrict, constraint f15 foreign key (i) references t15(id) on delete restrict)",
+ "create table t16 (id int primary key, i int, key ix (i), constraint f11 foreign key (i) references t11(id) on delete restrict, constraint f15 foreign key (i) references t15(id) on delete restrict)",
+ "create table t14 (id int primary key, i int, key ix (i), constraint f14 foreign key (i) references t14(id) on delete restrict)",
+ }
+ expectSortedTableNames := []string{
+ "t14",
+ "t15",
+ "t12",
+ "t20",
+ "t11",
+ "t16",
+ "t17",
+ }
+ expectSortedViewNames := []string{
+ "v13",
+ "v09",
+ }
+ schema, err := NewSchemaFromQueries(fkQueries)
+ require.NoError(t, err)
+ assert.NotNil(t, schema)
+
+ assert.Equal(t, append(expectSortedTableNames, expectSortedViewNames...), schema.EntityNames())
+ assert.Equal(t, expectSortedTableNames, schema.TableNames())
+ assert.Equal(t, expectSortedViewNames, schema.ViewNames())
+}
+
+func TestInvalidSchema(t *testing.T) {
+ tt := []struct {
+ schema string
+ expectErr error
+ }{
+ {
+ schema: "create table t11 (id int primary key, i int, key ix(i), constraint f11 foreign key (i) references t11(id) on delete restrict)",
+ },
+ {
+ schema: "create table t10(id int primary key); create table t11 (id int primary key, i int, key ix(i), constraint f10 foreign key (i) references t10(id) on delete restrict)",
+ },
+ {
+ schema: "create table t11 (id int primary key, i int, constraint f11 foreign key (i7) references t11(id) on delete restrict)",
+ expectErr: &InvalidColumnInForeignKeyConstraintError{Table: "t11", Constraint: "f11", Column: "i7"},
+ },
+ {
+ schema: "create table t11 (id int primary key, i int, constraint f11 foreign key (i) references t11(id, i) on delete restrict)",
+ expectErr: &ForeignKeyColumnCountMismatchError{Table: "t11", Constraint: "f11", ColumnCount: 1, ReferencedTable: "t11", ReferencedColumnCount: 2},
+ },
+ {
+ schema: "create table t11 (id int primary key, i1 int, i2 int, constraint f11 foreign key (i1, i2) references t11(i1) on delete restrict)",
+ expectErr: &ForeignKeyColumnCountMismatchError{Table: "t11", Constraint: "f11", ColumnCount: 2, ReferencedTable: "t11", ReferencedColumnCount: 1},
+ },
+ {
+ schema: "create table t11 (id int primary key, i int, constraint f12 foreign key (i) references t12(id) on delete restrict)",
+ expectErr: &ForeignKeyDependencyUnresolvedError{Table: "t11"},
+ },
+ {
+ schema: "create table t11 (id int primary key, i int, key ix(i), constraint f11 foreign key (i) references t11(id2) on delete restrict)",
+ expectErr: &InvalidReferencedColumnInForeignKeyConstraintError{Table: "t11", Constraint: "f11", ReferencedTable: "t11", ReferencedColumn: "id2"},
+ },
+ {
+ schema: "create table t10(id int primary key); create table t11 (id int primary key, i int, key ix(i), constraint f10 foreign key (i) references t10(x) on delete restrict)",
+ expectErr: &InvalidReferencedColumnInForeignKeyConstraintError{Table: "t11", Constraint: "f10", ReferencedTable: "t10", ReferencedColumn: "x"},
+ },
+ {
+ schema: "create table t10(id int primary key, i int); create table t11 (id int primary key, i int, key ix(i), constraint f10 foreign key (i) references t10(i) on delete restrict)",
+ expectErr: &MissingForeignKeyReferencedIndexError{Table: "t11", Constraint: "f10", ReferencedTable: "t10"},
+ },
+ {
+ schema: "create table t10(id int primary key); create table t11 (id int primary key, i int unsigned, key ix(i), constraint f10 foreign key (i) references t10(id) on delete restrict)",
+ expectErr: &ForeignKeyColumnTypeMismatchError{Table: "t11", Constraint: "f10", Column: "i", ReferencedTable: "t10", ReferencedColumn: "id"},
+ },
+ {
+ schema: "create table t10(id int primary key); create table t11 (id int primary key, i bigint, key ix(i), constraint f10 foreign key (i) references t10(id) on delete restrict)",
+ expectErr: &ForeignKeyColumnTypeMismatchError{Table: "t11", Constraint: "f10", Column: "i", ReferencedTable: "t10", ReferencedColumn: "id"},
+ },
+ {
+ schema: "create table t10(id bigint primary key); create table t11 (id int primary key, i int, key ix(i), constraint f10 foreign key (i) references t10(id) on delete restrict)",
+ expectErr: &ForeignKeyColumnTypeMismatchError{Table: "t11", Constraint: "f10", Column: "i", ReferencedTable: "t10", ReferencedColumn: "id"},
+ },
+ {
+ schema: "create table t10(id bigint primary key); create table t11 (id int primary key, i varchar(100), key ix(i), constraint f10 foreign key (i) references t10(id) on delete restrict)",
+ expectErr: &ForeignKeyColumnTypeMismatchError{Table: "t11", Constraint: "f10", Column: "i", ReferencedTable: "t10", ReferencedColumn: "id"},
+ },
+ {
+ // InnoDB allows different string length
+ schema: "create table t10(id varchar(50) primary key); create table t11 (id int primary key, i varchar(100), key ix(i), constraint f10 foreign key (i) references t10(id) on delete restrict)",
+ },
+ {
+ schema: "create table t10(id varchar(50) charset utf8mb3 primary key); create table t11 (id int primary key, i varchar(100) charset utf8mb4, key ix(i), constraint f10 foreign key (i) references t10(id) on delete restrict)",
+ expectErr: &ForeignKeyColumnTypeMismatchError{Table: "t11", Constraint: "f10", Column: "i", ReferencedTable: "t10", ReferencedColumn: "id"},
+ },
+ }
+ for _, ts := range tt {
+ t.Run(ts.schema, func(t *testing.T) {
+
+ _, err := NewSchemaFromSQL(ts.schema)
+ if ts.expectErr == nil {
+ assert.NoError(t, err)
+ } else {
+ assert.Error(t, err)
+ assert.EqualError(t, err, ts.expectErr.Error())
+ }
+ })
+ }
+}
+
+func TestInvalidTableForeignKeyReference(t *testing.T) {
+ {
+ fkQueries := []string{
+ "create table t11 (id int primary key, i int, constraint f12 foreign key (i) references t12(id) on delete restrict)",
+ "create table t15(id int, primary key(id))",
+ }
+ _, err := NewSchemaFromQueries(fkQueries)
+ assert.Error(t, err)
+ assert.EqualError(t, err, (&ForeignKeyDependencyUnresolvedError{Table: "t11"}).Error())
+ }
+ {
+ fkQueries := []string{
+ "create table t13 (id int primary key, i int, constraint f11 foreign key (i) references t11(id) on delete restrict)",
+ "create table t11 (id int primary key, i int, constraint f12 foreign key (i) references t12(id) on delete restrict)",
+ "create table t12 (id int primary key, i int, constraint f13 foreign key (i) references t13(id) on delete restrict)",
+ }
+ _, err := NewSchemaFromQueries(fkQueries)
+ assert.Error(t, err)
+ assert.EqualError(t, err, (&ForeignKeyDependencyUnresolvedError{Table: "t11"}).Error())
+ }
+}
diff --git a/go/vt/schemadiff/table.go b/go/vt/schemadiff/table.go
index 11cbb584f02..f58faa06310 100644
--- a/go/vt/schemadiff/table.go
+++ b/go/vt/schemadiff/table.go
@@ -296,7 +296,9 @@ func NewCreateTableEntity(c *sqlparser.CreateTable) (*CreateTableEntity, error)
// - table option case (upper/lower/special)
// The function returns this receiver as courtesy
func (c *CreateTableEntity) normalize() *CreateTableEntity {
- c.normalizeKeys()
+ c.normalizePrimaryKeyColumns()
+ c.normalizeForeignKeyIndexes() // implicitly add missing indexes for foreign keys
+ c.normalizeKeys() // assign names to keys
c.normalizeUnnamedConstraints()
c.normalizeTableOptions()
c.normalizeColumnOptions()
@@ -309,11 +311,16 @@ func (c *CreateTableEntity) normalizeTableOptions() {
for _, opt := range c.CreateTable.TableSpec.Options {
opt.Name = strings.ToLower(opt.Name)
switch opt.Name {
- case "charset", "collate":
+ case "charset":
opt.String = strings.ToLower(opt.String)
if charset, ok := collationEnv.CharsetAlias(opt.String); ok {
opt.String = charset
}
+ case "collate":
+ opt.String = strings.ToLower(opt.String)
+ if collation, ok := collationEnv.CollationAlias(opt.String); ok {
+ opt.String = collation
+ }
case "engine":
opt.String = strings.ToUpper(opt.String)
if engineName, ok := engineCasing[opt.String]; ok {
@@ -414,6 +421,12 @@ func (c *CreateTableEntity) normalizeColumnOptions() {
col.Type.Charset.Name = charset
}
+ // Map any collation aliases to the real collation. This applies mainly right
+ // now to utf8 being an alias for utf8mb3 collations.
+ if collation, ok := collationEnv.CollationAlias(col.Type.Options.Collate); ok {
+ col.Type.Options.Collate = collation
+ }
+
// Remove any lengths for integral types since it is deprecated there and
// doesn't mean anything anymore.
if _, ok := integralTypes[col.Type.Type]; ok {
@@ -424,6 +437,30 @@ func (c *CreateTableEntity) normalizeColumnOptions() {
}
}
+ if _, ok := floatTypes[col.Type.Type]; ok {
+ // First, normalize the actual type
+ switch col.Type.Type {
+ case "float4":
+ col.Type.Type = "float"
+ case "float8", "real":
+ col.Type.Type = "double"
+ }
+
+ if col.Type.Length != nil && col.Type.Scale == nil && col.Type.Length.Type == sqlparser.IntVal {
+ if l, err := strconv.ParseInt(col.Type.Length.Val, 10, 64); err == nil {
+ // See https://dev.mysql.com/doc/refman/8.0/en/floating-point-types.html, but the docs are
+ // subtly wrong. We use a float for a precision of 24, not a double as the documentation
+ // mentioned. Validated against the actual behavior of MySQL.
+ if l <= 24 {
+ col.Type.Type = "float"
+ } else {
+ col.Type.Type = "double"
+ }
+ }
+ col.Type.Length = nil
+ }
+ }
+
if _, ok := charsetTypes[col.Type.Type]; ok {
// If the charset is explicitly configured and it mismatches, we don't normalize
// anything for charsets or collations and move on.
@@ -471,11 +508,12 @@ func (c *CreateTableEntity) normalizeIndexOptions() {
idx.Info.Type = strings.ToLower(idx.Info.Type)
for _, opt := range idx.Options {
opt.Name = strings.ToLower(opt.Name)
+ opt.String = strings.ToLower(opt.String)
}
}
}
-func isBool(colType sqlparser.ColumnType) bool {
+func isBool(colType *sqlparser.ColumnType) bool {
return colType.Type == sqlparser.KeywordString(sqlparser.TINYINT) && colType.Length != nil && sqlparser.CanonicalString(colType.Length) == "1"
}
@@ -496,7 +534,36 @@ func (c *CreateTableEntity) normalizePartitionOptions() {
}
}
+func newPrimaryKeyIndexDefinitionSingleColumn(name sqlparser.IdentifierCI) *sqlparser.IndexDefinition {
+ index := &sqlparser.IndexDefinition{
+ Info: &sqlparser.IndexInfo{
+ Name: sqlparser.NewIdentifierCI("PRIMARY"),
+ Type: "PRIMARY KEY",
+ Primary: true,
+ Unique: true,
+ },
+ Columns: []*sqlparser.IndexColumn{{Column: name}},
+ }
+ return index
+}
+
+func (c *CreateTableEntity) normalizePrimaryKeyColumns() {
+ // normalize PRIMARY KEY:
+ // `create table t (id int primary key)`
+ // should turn into:
+ // `create table t (id int, primary key (id))`
+ // Also, PRIMARY KEY must come first before all other keys
+ for _, col := range c.CreateTable.TableSpec.Columns {
+ if col.Type.Options.KeyOpt == sqlparser.ColKeyPrimary {
+ c.CreateTable.TableSpec.Indexes = append([]*sqlparser.IndexDefinition{newPrimaryKeyIndexDefinitionSingleColumn(col.Name)}, c.CreateTable.TableSpec.Indexes...)
+ col.Type.Options.KeyOpt = sqlparser.ColKeyNone
+ }
+ }
+}
+
func (c *CreateTableEntity) normalizeKeys() {
+ c.normalizePrimaryKeyColumns()
+
// let's ensure all keys have names
keyNameExists := map[string]bool{}
// first, we iterate and take note for all keys that do already have names
@@ -557,7 +624,7 @@ func (c *CreateTableEntity) normalizeKeys() {
}
func (c *CreateTableEntity) normalizeUnnamedConstraints() {
- // let's ensure all keys have names
+ // let's ensure all constraints have names
constraintNameExists := map[string]bool{}
// first, we iterate and take note for all keys that do already have names
for _, constraint := range c.CreateTable.TableSpec.Constraints {
@@ -585,6 +652,34 @@ func (c *CreateTableEntity) normalizeUnnamedConstraints() {
}
}
+func (c *CreateTableEntity) normalizeForeignKeyIndexes() {
+ for _, constraint := range c.CreateTable.TableSpec.Constraints {
+ fk, ok := constraint.Details.(*sqlparser.ForeignKeyDefinition)
+ if !ok {
+ continue
+ }
+ if !c.columnsCoveredByInOrderIndex(fk.Source) {
+ // We add a foreign key, but the local FK columns are not indexed.
+ // MySQL's behavior is to implicitly add an index that covers the foreign key's local columns.
+ // The name of the index is either:
+ // - the same name of the constraint, if such name is provided
+ // - and error if an index by this name exists
+ // - or, a standard auto-generated index name, if the constraint name is not provided
+ indexDefinition := &sqlparser.IndexDefinition{
+ Info: &sqlparser.IndexInfo{
+ Type: "key",
+ Name: constraint.Name, // if name is empty, then the name is later auto populated
+ },
+ }
+ for _, col := range fk.Source {
+ indexColumn := &sqlparser.IndexColumn{Column: col}
+ indexDefinition.Columns = append(indexDefinition.Columns, indexColumn)
+ }
+ c.TableSpec.Indexes = append(c.TableSpec.Indexes, indexDefinition)
+ }
+ }
+}
+
// Name implements Entity interface
func (c *CreateTableEntity) Name() string {
return c.CreateTable.GetTable().Name.String()
@@ -680,15 +775,34 @@ func (c *CreateTableEntity) TableDiff(other *CreateTableEntity, hints *DiffHints
}
}
tableSpecHasChanged := len(alterTable.AlterOptions) > 0 || alterTable.PartitionOption != nil || alterTable.PartitionSpec != nil
+
+ newAlterTableEntityDiff := func(alterTable *sqlparser.AlterTable) *AlterTableEntityDiff {
+ d := &AlterTableEntityDiff{alterTable: alterTable, from: c, to: other}
+
+ var algorithmValue sqlparser.AlgorithmValue
+
+ switch hints.AlterTableAlgorithmStrategy {
+ case AlterTableAlgorithmStrategyCopy:
+ algorithmValue = sqlparser.AlgorithmValue("COPY")
+ case AlterTableAlgorithmStrategyInplace:
+ algorithmValue = sqlparser.AlgorithmValue("INPLACE")
+ case AlterTableAlgorithmStrategyInstant:
+ algorithmValue = sqlparser.AlgorithmValue("INSTANT")
+ }
+ if algorithmValue != "" {
+ alterTable.AlterOptions = append(alterTable.AlterOptions, algorithmValue)
+ }
+ return d
+ }
if tableSpecHasChanged {
- parentAlterTableEntityDiff = &AlterTableEntityDiff{alterTable: alterTable, from: c, to: other}
+ parentAlterTableEntityDiff = newAlterTableEntityDiff(alterTable)
}
for _, superfluousFulltextKey := range superfluousFulltextKeys {
alterTable := &sqlparser.AlterTable{
Table: c.CreateTable.Table,
AlterOptions: []sqlparser.AlterOption{superfluousFulltextKey},
}
- diff := &AlterTableEntityDiff{alterTable: alterTable, from: c, to: other}
+ diff := newAlterTableEntityDiff(alterTable)
// if we got superfluous fulltext keys, that means the table spec has changed, ie
// parentAlterTableEntityDiff is not nil
parentAlterTableEntityDiff.addSubsequentDiff(diff)
@@ -698,7 +812,7 @@ func (c *CreateTableEntity) TableDiff(other *CreateTableEntity, hints *DiffHints
Table: c.CreateTable.Table,
PartitionSpec: partitionSpec,
}
- diff := &AlterTableEntityDiff{alterTable: alterTable, from: c, to: other}
+ diff := newAlterTableEntityDiff(alterTable)
if parentAlterTableEntityDiff == nil {
parentAlterTableEntityDiff = diff
} else {
@@ -798,12 +912,18 @@ func (c *CreateTableEntity) diffOptions(alterTable *sqlparser.AlterTable,
// skip
case "AVG_ROW_LENGTH":
// skip. MyISAM only, not interesting
+ case "CHARSET":
+ switch hints.TableCharsetCollateStrategy {
+ case TableCharsetCollateStrict:
+ tableOption = &sqlparser.TableOption{String: ""}
+ // in all other strategies we ignore the charset
+ }
case "CHECKSUM":
tableOption = &sqlparser.TableOption{Value: sqlparser.NewIntLiteral("0")}
case "COLLATE":
// skip. the default collation is applied per CHARSET
case "COMMENT":
- tableOption = &sqlparser.TableOption{String: ""}
+ tableOption = &sqlparser.TableOption{Value: sqlparser.NewStrLiteral("")}
case "COMPRESSION":
tableOption = &sqlparser.TableOption{Value: sqlparser.NewStrLiteral("")}
case "CONNECTION":
@@ -861,10 +981,22 @@ func (c *CreateTableEntity) diffOptions(alterTable *sqlparser.AlterTable,
if t1Option, ok := t1OptionsMap[t2Option.Name]; ok {
options1 := sqlparser.TableOptions{t1Option}
options2 := sqlparser.TableOptions{t2Option}
- if !sqlparser.EqualsTableOptions(options1, options2) {
+ if !sqlparser.Equals.TableOptions(options1, options2) {
// options are different.
// However, we don't automatically apply these changes. It depends on the option!
switch strings.ToUpper(t1Option.Name) {
+ case "CHARSET", "COLLATE":
+ switch hints.TableCharsetCollateStrategy {
+ case TableCharsetCollateStrict:
+ alterTableOptions = append(alterTableOptions, t2Option)
+ case TableCharsetCollateIgnoreEmpty:
+ if t1Option.String != "" && t2Option.String != "" {
+ alterTableOptions = append(alterTableOptions, t2Option)
+ }
+ // if one is empty, we ignore
+ case TableCharsetCollateIgnoreAlways:
+ // ignore always
+ }
case "AUTO_INCREMENT":
switch hints.AutoIncrementStrategy {
case AutoIncrementApplyAlways:
@@ -896,6 +1028,12 @@ func (c *CreateTableEntity) diffOptions(alterTable *sqlparser.AlterTable,
for _, t2Option := range t2Options {
if _, ok := t1OptionsMap[t2Option.Name]; !ok {
switch strings.ToUpper(t2Option.Name) {
+ case "CHARSET", "COLLATE":
+ switch hints.TableCharsetCollateStrategy {
+ case TableCharsetCollateStrict:
+ alterTableOptions = append(alterTableOptions, t2Option)
+ // in all other strategies we ignore the charset
+ }
case "AUTO_INCREMENT":
switch hints.AutoIncrementStrategy {
case AutoIncrementApplyAlways, AutoIncrementApplyHigher:
@@ -942,7 +1080,7 @@ func (c *CreateTableEntity) isRangePartitionsRotation(
}
var droppedPartitions1 []*sqlparser.PartitionDefinition
// It's OK for prefix of t1 partitions to be nonexistent in t2 (as they may have been rotated away in t2)
- for len(definitions1) > 0 && !sqlparser.EqualsRefOfPartitionDefinition(definitions1[0], definitions2[0]) {
+ for len(definitions1) > 0 && !sqlparser.Equals.RefOfPartitionDefinition(definitions1[0], definitions2[0]) {
droppedPartitions1 = append(droppedPartitions1, definitions1[0])
definitions1 = definitions1[1:]
}
@@ -954,14 +1092,14 @@ func (c *CreateTableEntity) isRangePartitionsRotation(
if len(definitions1) > len(definitions2) {
return false, nil, nil
}
- // To save computation, and because we've already shown that sqlparser.EqualsRefOfPartitionDefinition(definitions1[0], definitions2[0]),
+ // To save computation, and because we've already shown that sqlparser.EqualsRefOfPartitionDefinition(definitions1[0], definitions2[0]), nil,
// we can skip one element
definitions1 = definitions1[1:]
definitions2 = definitions2[1:]
// Now let's ensure that whatever is remaining in definitions1 is an exact match for a prefix of definitions2
// It's ok if we end up with leftover elements in definition2
for len(definitions1) > 0 {
- if !sqlparser.EqualsRefOfPartitionDefinition(definitions1[0], definitions2[0]) {
+ if !sqlparser.Equals.RefOfPartitionDefinition(definitions1[0], definitions2[0]) {
return false, nil, nil
}
definitions1 = definitions1[1:]
@@ -1004,7 +1142,7 @@ func (c *CreateTableEntity) diffPartitions(alterTable *sqlparser.AlterTable,
IsAll: true,
}
alterTable.PartitionSpec = partitionSpec
- case sqlparser.EqualsRefOfPartitionOption(t1Partitions, t2Partitions):
+ case sqlparser.Equals.RefOfPartitionOption(t1Partitions, t2Partitions):
// identical partitioning
return nil, nil
default:
@@ -1089,13 +1227,13 @@ func (c *CreateTableEntity) diffConstraints(alterTable *sqlparser.AlterTable,
if t1Constraint, ok := t1ConstraintsMap[normalizedT2ConstraintName]; ok {
// constraint exists in both tables
// check diff between before/after columns:
- if !sqlparser.EqualsConstraintInfo(t2Constraint.Details, t1Constraint.Details) {
+ if !sqlparser.Equals.ConstraintInfo(t2Constraint.Details, t1Constraint.Details) {
// constraints with same name have different definition.
// First we check if this is only the enforced setting that changed which can
// be directly altered.
check1Details, ok1 := t1Constraint.Details.(*sqlparser.CheckConstraintDefinition)
check2Details, ok2 := t2Constraint.Details.(*sqlparser.CheckConstraintDefinition)
- if ok1 && ok2 && sqlparser.EqualsExpr(check1Details.Expr, check2Details.Expr) {
+ if ok1 && ok2 && sqlparser.Equals.Expr(check1Details.Expr, check2Details.Expr) {
// We have the same expression, so we have a different Enforced here
alterConstraint := &sqlparser.AlterCheck{
Name: t2Constraint.Name,
@@ -1166,7 +1304,7 @@ func (c *CreateTableEntity) diffKeys(alterTable *sqlparser.AlterTable,
if t1Key, ok := t1KeysMap[t2KeyName]; ok {
// key exists in both tables
// check diff between before/after columns:
- if !sqlparser.EqualsRefOfIndexDefinition(t2Key, t1Key) {
+ if !sqlparser.Equals.RefOfIndexDefinition(t2Key, t1Key) {
indexVisibilityChange, newVisibility := indexOnlyVisibilityChange(t1Key, t2Key)
if indexVisibilityChange {
alterTable.AlterOptions = append(alterTable.AlterOptions, &sqlparser.AlterIndex{
@@ -1231,7 +1369,7 @@ func indexOnlyVisibilityChange(t1Key, t2Key *sqlparser.IndexDefinition) (bool, b
t2KeyKeptOptions = append(t2KeyKeptOptions, opt)
}
t2KeyCopy.Options = t2KeyKeptOptions
- if sqlparser.EqualsRefOfIndexDefinition(t2KeyCopy, t1KeyCopy) {
+ if sqlparser.Equals.RefOfIndexDefinition(t2KeyCopy, t1KeyCopy) {
return true, t2KeyInvisible
}
return false, false
@@ -1472,6 +1610,17 @@ func heuristicallyDetectColumnRenames(
return dropColumns, addColumns, renameColumns
}
+// primaryKeyColumns returns the columns covered by an existing PRIMARY KEY, or nil if there isn't
+// a PRIMARY KEY
+func (c *CreateTableEntity) primaryKeyColumns() []*sqlparser.IndexColumn {
+ for _, existingIndex := range c.CreateTable.TableSpec.Indexes {
+ if existingIndex.Info.Primary {
+ return existingIndex.Columns
+ }
+ }
+ return nil
+}
+
// Create implements Entity interface
func (c *CreateTableEntity) Create() EntityDiff {
return &CreateTableEntityDiff{to: c, createTable: c.CreateTable}
@@ -1658,6 +1807,21 @@ func (c *CreateTableEntity) apply(diff *AlterTableEntityDiff) error {
if !found {
return &ApplyKeyNotFoundError{Table: c.Name(), Key: opt.Name.String()}
}
+
+ // Now, if this is a normal key being dropped, let's validate it does not leave any foreign key constraint uncovered
+ switch opt.Type {
+ case sqlparser.PrimaryKeyType, sqlparser.NormalKeyType:
+ for _, cs := range c.CreateTable.TableSpec.Constraints {
+ fk, ok := cs.Details.(*sqlparser.ForeignKeyDefinition)
+ if !ok {
+ continue
+ }
+ if !c.columnsCoveredByInOrderIndex(fk.Source) {
+ return &IndexNeededByForeignKeyError{Table: c.Name(), Key: opt.Name.String()}
+ }
+ }
+ }
+
case *sqlparser.AddIndexDefinition:
// validate no existing key by same name
keyName := opt.IndexDefinition.Info.Name.String()
@@ -1720,6 +1884,12 @@ func (c *CreateTableEntity) apply(diff *AlterTableEntityDiff) error {
return &ApplyDuplicateColumnError{Table: c.Name(), Column: addedCol.Name.String()}
}
}
+ // if this column has the PRIMARY KEY option, verify there isn't already a PRIMARY KEY
+ if addedCol.Type.Options.KeyOpt == sqlparser.ColKeyPrimary {
+ if cols := c.primaryKeyColumns(); cols != nil {
+ return &DuplicateKeyNameError{Table: c.Name(), Key: "PRIMARY"}
+ }
+ }
c.TableSpec.Columns = append(c.TableSpec.Columns, addedCol)
// see if we need to position it anywhere other than end of table
if err := reorderColumn(len(c.TableSpec.Columns)-1, opt.First, opt.After); err != nil {
@@ -1743,6 +1913,24 @@ func (c *CreateTableEntity) apply(diff *AlterTableEntityDiff) error {
if !found {
return &ApplyColumnNotFoundError{Table: c.Name(), Column: opt.NewColDefinition.Name.String()}
}
+ // if this column has the PRIMARY KEY option:
+ // - validate there isn't already a PRIMARY KEY for other columns
+ // - if there isn't any PRIMARY KEY, create one
+ // - if there exists a PRIMARY KEY for exactly this column, noop
+ if opt.NewColDefinition.Type.Options.KeyOpt == sqlparser.ColKeyPrimary {
+ cols := c.primaryKeyColumns()
+ if cols == nil {
+ // add primary key
+ c.CreateTable.TableSpec.Indexes = append([]*sqlparser.IndexDefinition{newPrimaryKeyIndexDefinitionSingleColumn(opt.NewColDefinition.Name)}, c.CreateTable.TableSpec.Indexes...)
+ } else {
+ if len(cols) == 1 && strings.EqualFold(cols[0].Column.String(), opt.NewColDefinition.Name.String()) {
+ // existing PK is exactly this column. Nothing to do
+ } else {
+ return &DuplicateKeyNameError{Table: c.Name(), Key: "PRIMARY"}
+ }
+ }
+ }
+ opt.NewColDefinition.Type.Options.KeyOpt = sqlparser.ColKeyNone
case *sqlparser.RenameColumn:
// we expect the column to exist
found := false
@@ -1821,6 +2009,8 @@ func (c *CreateTableEntity) apply(diff *AlterTableEntityDiff) error {
c.TableSpec.Options = append(c.TableSpec.Options, option)
}()
}
+ case sqlparser.AlgorithmValue:
+ // silently ignore. This has an operational effect on the MySQL engine, but has no semantical effect.
default:
return &UnsupportedApplyOperationError{Statement: sqlparser.CanonicalString(opt)}
}
@@ -1866,6 +2056,7 @@ func (c *CreateTableEntity) Apply(diff EntityDiff) (Entity, error) {
// - edit or remove keys if referenced columns are dropped
// - drop check constraints for a single specific column if that column
// is the only referenced column in that check constraint.
+// - add implicit keys for foreign key constraint, if needed
func (c *CreateTableEntity) postApplyNormalize() error {
// reduce or remove keys based on existing column list
// (a column may have been removed)postApplyNormalize
@@ -1927,6 +2118,10 @@ func (c *CreateTableEntity) postApplyNormalize() error {
}
c.CreateTable.TableSpec.Constraints = keptConstraints
+ c.normalizePrimaryKeyColumns()
+ c.normalizeForeignKeyIndexes()
+ c.normalizeKeys()
+
return nil
}
@@ -1957,6 +2152,56 @@ func getKeyColumnNames(key *sqlparser.IndexDefinition) (colNames map[string]bool
return colNames
}
+// indexCoversColumnsInOrder checks if the given index covers the given columns in order and in prefix.
+// the index must either covers the exact list of columns or continue to cover additional columns beyond.
+// Used for validating indexes covering foreign keys.
+func indexCoversColumnsInOrder(index *sqlparser.IndexDefinition, columns sqlparser.Columns) bool {
+ if len(columns) == 0 {
+ return false
+ }
+ if len(index.Columns) < len(columns) {
+ // obviously the index doesn't cover the required columns
+ return false
+ }
+ for i, col := range columns {
+ // the index must cover same columns, in order, wih possibly more columns covered than requested.
+ indexCol := index.Columns[i]
+ if !strings.EqualFold(col.String(), indexCol.Column.String()) {
+ return false
+ }
+ }
+ return true
+}
+
+// indexesCoveringForeignKeyColumns returns a list of indexes that cover a given list of coumns, in-oder and in prefix.
+// Used for validating indexes covering foreign keys.
+func (c *CreateTableEntity) indexesCoveringForeignKeyColumns(columns sqlparser.Columns) (indexes []*sqlparser.IndexDefinition) {
+ for _, index := range c.CreateTable.TableSpec.Indexes {
+ if indexCoversColumnsInOrder(index, columns) {
+ indexes = append(indexes, index)
+ }
+ }
+ return indexes
+}
+
+// columnsCoveredByInOrderIndex returns 'true' when there is at least one index that covers the given
+// list of columns in-order and in-prefix.
+func (c *CreateTableEntity) columnsCoveredByInOrderIndex(columns sqlparser.Columns) bool {
+ return len(c.indexesCoveringForeignKeyColumns(columns)) > 0
+}
+
+func (c *CreateTableEntity) validateDuplicateKeyNameError() error {
+ keyNames := map[string]bool{}
+ for _, key := range c.CreateTable.TableSpec.Indexes {
+ name := key.Info.Name
+ if _, ok := keyNames[name.Lowered()]; ok {
+ return &DuplicateKeyNameError{Table: c.Name(), Key: name.String()}
+ }
+ keyNames[name.Lowered()] = true
+ }
+ return nil
+}
+
// validate checks that the table structure is valid:
// - all columns referenced by keys exist
func (c *CreateTableEntity) validate() error {
@@ -1968,6 +2213,22 @@ func (c *CreateTableEntity) validate() error {
}
columnExists[colName] = true
}
+ // validate all columns used by foreign key constraints do in fact exist,
+ // and that there exists an index over those columns
+ for _, cs := range c.CreateTable.TableSpec.Constraints {
+ fk, ok := cs.Details.(*sqlparser.ForeignKeyDefinition)
+ if !ok {
+ continue
+ }
+ if len(fk.Source) != len(fk.ReferenceDefinition.ReferencedColumns) {
+ return &ForeignKeyColumnCountMismatchError{Table: c.Name(), Constraint: cs.Name.String(), ColumnCount: len(fk.Source), ReferencedTable: fk.ReferenceDefinition.ReferencedTable.Name.String(), ReferencedColumnCount: len(fk.ReferenceDefinition.ReferencedColumns)}
+ }
+ for _, col := range fk.Source {
+ if !columnExists[col.Lowered()] {
+ return &InvalidColumnInForeignKeyConstraintError{Table: c.Name(), Constraint: cs.Name.String(), Column: col.String()}
+ }
+ }
+ }
// validate all columns referenced by indexes do in fact exist
for _, key := range c.CreateTable.TableSpec.Indexes {
for colName := range getKeyColumnNames(key) {
@@ -2018,18 +2279,6 @@ func (c *CreateTableEntity) validate() error {
}
}
}
- // validate all columns referenced by foreign key constraints do in fact exist
- for _, cs := range c.CreateTable.TableSpec.Constraints {
- check, ok := cs.Details.(*sqlparser.ForeignKeyDefinition)
- if !ok {
- continue
- }
- for _, col := range check.Source {
- if !columnExists[col.Lowered()] {
- return &InvalidColumnInForeignKeyConstraintError{Table: c.Name(), Constraint: cs.Name.String(), Column: col.String()}
- }
- }
- }
// validate all columns referenced by constraint checks do in fact exist
for _, cs := range c.CreateTable.TableSpec.Constraints {
check, ok := cs.Details.(*sqlparser.CheckConstraintDefinition)
@@ -2053,6 +2302,10 @@ func (c *CreateTableEntity) validate() error {
}
}
}
+ // validate no two keys have same name
+ if err := c.validateDuplicateKeyNameError(); err != nil {
+ return err
+ }
if partition := c.CreateTable.TableSpec.PartitionOption; partition != nil {
// validate no two partitions have same name
@@ -2110,6 +2363,6 @@ func (c *CreateTableEntity) identicalOtherThanName(other *CreateTableEntity) boo
if other == nil {
return false
}
- return sqlparser.EqualsRefOfTableSpec(c.TableSpec, other.TableSpec) &&
- sqlparser.EqualsRefOfParsedComments(c.Comments, other.Comments)
+ return sqlparser.Equals.RefOfTableSpec(c.TableSpec, other.TableSpec) &&
+ sqlparser.Equals.RefOfParsedComments(c.Comments, other.Comments)
}
diff --git a/go/vt/schemadiff/table_test.go b/go/vt/schemadiff/table_test.go
index b1715b30f31..18fce4ec759 100644
--- a/go/vt/schemadiff/table_test.go
+++ b/go/vt/schemadiff/table_test.go
@@ -17,6 +17,7 @@ limitations under the License.
package schemadiff
import (
+ "strings"
"testing"
"github.com/stretchr/testify/assert"
@@ -43,6 +44,8 @@ func TestCreateTableDiff(t *testing.T) {
fulltext int
colrename int
constraint int
+ charset int
+ algorithm int
}{
{
name: "identical",
@@ -189,43 +192,43 @@ func TestCreateTableDiff(t *testing.T) {
name: "reorder column, far jump",
from: "create table t1 (id int primary key, a int, b int, c int, d int)",
to: "create table t2 (a int, b int, c int, d int, id int primary key)",
- diff: "alter table t1 modify column id int primary key after d",
- cdiff: "ALTER TABLE `t1` MODIFY COLUMN `id` int PRIMARY KEY AFTER `d`",
+ diff: "alter table t1 modify column id int after d",
+ cdiff: "ALTER TABLE `t1` MODIFY COLUMN `id` int AFTER `d`",
},
{
name: "reorder column, far jump with case sentivity",
from: "create table t1 (id int primary key, a int, b int, c int, d int)",
to: "create table t2 (a int, B int, c int, d int, id int primary key)",
- diff: "alter table t1 modify column B int, modify column id int primary key after d",
- cdiff: "ALTER TABLE `t1` MODIFY COLUMN `B` int, MODIFY COLUMN `id` int PRIMARY KEY AFTER `d`",
+ diff: "alter table t1 modify column B int, modify column id int after d",
+ cdiff: "ALTER TABLE `t1` MODIFY COLUMN `B` int, MODIFY COLUMN `id` int AFTER `d`",
},
{
name: "reorder column, far jump, another reorder",
from: "create table t1 (id int primary key, a int, b int, c int, d int)",
to: "create table t2 (a int, c int, b int, d int, id int primary key)",
- diff: "alter table t1 modify column c int after a, modify column id int primary key after d",
- cdiff: "ALTER TABLE `t1` MODIFY COLUMN `c` int AFTER `a`, MODIFY COLUMN `id` int PRIMARY KEY AFTER `d`",
+ diff: "alter table t1 modify column c int after a, modify column id int after d",
+ cdiff: "ALTER TABLE `t1` MODIFY COLUMN `c` int AFTER `a`, MODIFY COLUMN `id` int AFTER `d`",
},
{
name: "reorder column, far jump, another reorder 2",
from: "create table t1 (id int primary key, a int, b int, c int, d int)",
to: "create table t2 (c int, a int, b int, d int, id int primary key)",
- diff: "alter table t1 modify column c int first, modify column id int primary key after d",
- cdiff: "ALTER TABLE `t1` MODIFY COLUMN `c` int FIRST, MODIFY COLUMN `id` int PRIMARY KEY AFTER `d`",
+ diff: "alter table t1 modify column c int first, modify column id int after d",
+ cdiff: "ALTER TABLE `t1` MODIFY COLUMN `c` int FIRST, MODIFY COLUMN `id` int AFTER `d`",
},
{
name: "reorder column, far jump, another reorder 3",
from: "create table t1 (id int primary key, a int, b int, c int, d int, e int, f int)",
to: "create table t2 (a int, c int, b int, d int, id int primary key, e int, f int)",
- diff: "alter table t1 modify column c int after a, modify column id int primary key after d",
- cdiff: "ALTER TABLE `t1` MODIFY COLUMN `c` int AFTER `a`, MODIFY COLUMN `id` int PRIMARY KEY AFTER `d`",
+ diff: "alter table t1 modify column c int after a, modify column id int after d",
+ cdiff: "ALTER TABLE `t1` MODIFY COLUMN `c` int AFTER `a`, MODIFY COLUMN `id` int AFTER `d`",
},
{
name: "reorder column, far jump, another reorder, removed columns",
from: "create table t1 (id int primary key, a int, b int, c int, d int, e int, f int, g int)",
to: "create table t2 (a int, c int, f int, e int, id int primary key, g int)",
- diff: "alter table t1 drop column b, drop column d, modify column f int after c, modify column id int primary key after e",
- cdiff: "ALTER TABLE `t1` DROP COLUMN `b`, DROP COLUMN `d`, MODIFY COLUMN `f` int AFTER `c`, MODIFY COLUMN `id` int PRIMARY KEY AFTER `e`",
+ diff: "alter table t1 drop column b, drop column d, modify column f int after c, modify column id int after e",
+ cdiff: "ALTER TABLE `t1` DROP COLUMN `b`, DROP COLUMN `d`, MODIFY COLUMN `f` int AFTER `c`, MODIFY COLUMN `id` int AFTER `e`",
},
{
name: "two reorders",
@@ -337,8 +340,8 @@ func TestCreateTableDiff(t *testing.T) {
name: "modify column primary key",
from: "create table t1 (`id` int)",
to: "create table t2 (id int primary key)",
- diff: "alter table t1 modify column id int primary key",
- cdiff: "ALTER TABLE `t1` MODIFY COLUMN `id` int PRIMARY KEY",
+ diff: "alter table t1 add primary key (id)",
+ cdiff: "ALTER TABLE `t1` ADD PRIMARY KEY (`id`)",
},
{
name: "added primary key",
@@ -382,6 +385,11 @@ func TestCreateTableDiff(t *testing.T) {
diff: "alter table t1 drop primary key, add primary key (id, i)",
cdiff: "ALTER TABLE `t1` DROP PRIMARY KEY, ADD PRIMARY KEY (`id`, `i`)",
},
+ {
+ name: "alternative primary key definition, no diff",
+ from: "create table t1 (`id` int primary key, i int)",
+ to: "create table t2 (`id` int, i int, primary key (id))",
+ },
{
name: "reordered key, no diff",
from: "create table t1 (`id` int primary key, i int, key i_idx(i), key i2_idx(i, `id`))",
@@ -438,6 +446,20 @@ func TestCreateTableDiff(t *testing.T) {
diff: "alter table t1 add fulltext key name_ft (`name`)",
cdiff: "ALTER TABLE `t1` ADD FULLTEXT KEY `name_ft` (`name`)",
},
+ {
+ name: "add one fulltext key with explicit parser",
+ from: "create table t1 (id int primary key, name tinytext not null)",
+ to: "create table t1 (id int primary key, name tinytext not null, fulltext key name_ft(name) with parser ngram)",
+ diff: "alter table t1 add fulltext key name_ft (`name`) with parser ngram",
+ cdiff: "ALTER TABLE `t1` ADD FULLTEXT KEY `name_ft` (`name`) WITH PARSER ngram",
+ },
+ {
+ name: "add one fulltext key and one normal key",
+ from: "create table t1 (id int primary key, name tinytext not null)",
+ to: "create table t1 (id int primary key, name tinytext not null, key name_idx(name(32)), fulltext key name_ft(name))",
+ diff: "alter table t1 add key name_idx (`name`(32)), add fulltext key name_ft (`name`)",
+ cdiff: "ALTER TABLE `t1` ADD KEY `name_idx` (`name`(32)), ADD FULLTEXT KEY `name_ft` (`name`)",
+ },
{
name: "add two fulltext keys, distinct statements",
from: "create table t1 (id int primary key, name1 tinytext not null, name2 tinytext not null)",
@@ -453,6 +475,26 @@ func TestCreateTableDiff(t *testing.T) {
diff: "alter table t1 add fulltext key name1_ft (name1), add fulltext key name2_ft (name2)",
cdiff: "ALTER TABLE `t1` ADD FULLTEXT KEY `name1_ft` (`name1`), ADD FULLTEXT KEY `name2_ft` (`name2`)",
},
+ {
+ name: "no fulltext diff",
+ from: "create table t1 (id int primary key, name tinytext not null, fulltext key name_ft(name) with parser ngram)",
+ to: "create table t1 (id int primary key, name tinytext not null, fulltext key name_ft(name) with parser ngram)",
+ },
+ {
+ name: "no fulltext diff, 2",
+ from: "create table t1 (id int primary key, name tinytext not null, fulltext key name_ft(name) with parser ngram)",
+ to: "create table t1 (id int primary key, name tinytext not null, fulltext key name_ft(name) WITH PARSER `ngram`)",
+ },
+ {
+ name: "no fulltext diff, 3",
+ from: "create table t1 (id int primary key, name tinytext not null, fulltext key name_ft(name) with parser ngram)",
+ to: "create table t1 (id int primary key, name tinytext not null, fulltext key name_ft(name) /*!50100 WITH PARSER `ngram` */)",
+ },
+ {
+ name: "no fulltext diff",
+ from: "create table t1 (id int primary key, name tinytext not null, fulltext key name_ft(name) with parser ngram)",
+ to: "create table t1 (id int primary key, name tinytext not null, fulltext key name_ft(name) with parser NGRAM)",
+ },
// CHECK constraints
{
name: "identical check constraints",
@@ -554,35 +596,56 @@ func TestCreateTableDiff(t *testing.T) {
// foreign keys
{
name: "drop foreign key",
- from: "create table t1 (id int primary key, i int, constraint f foreign key (i) references parent(id))",
- to: "create table t2 (id int primary key, i int)",
+ from: "create table t1 (id int primary key, i int, key i_idex (i), constraint f foreign key (i) references parent(id))",
+ to: "create table t2 (id int primary key, i int, key i_idex (i))",
diff: "alter table t1 drop foreign key f",
cdiff: "ALTER TABLE `t1` DROP FOREIGN KEY `f`",
},
{
name: "add foreign key",
- from: "create table t1 (id int primary key, i int)",
- to: "create table t2 (id int primary key, i int, constraint f foreign key (i) references parent(id))",
+ from: "create table t1 (id int primary key, i int, key ix(i))",
+ to: "create table t2 (id int primary key, i int, key ix(i), constraint f foreign key (i) references parent(id))",
diff: "alter table t1 add constraint f foreign key (i) references parent (id)",
cdiff: "ALTER TABLE `t1` ADD CONSTRAINT `f` FOREIGN KEY (`i`) REFERENCES `parent` (`id`)",
},
+ {
+ name: "add foreign key and index",
+ from: "create table t1 (id int primary key, i int)",
+ to: "create table t2 (id int primary key, i int, key ix(i), constraint f foreign key (i) references parent(id))",
+ diff: "alter table t1 add key ix (i), add constraint f foreign key (i) references parent (id)",
+ cdiff: "ALTER TABLE `t1` ADD KEY `ix` (`i`), ADD CONSTRAINT `f` FOREIGN KEY (`i`) REFERENCES `parent` (`id`)",
+ },
{
name: "identical foreign key",
from: "create table t1 (id int primary key, i int, constraint f foreign key (i) references parent(id) on delete cascade)",
to: "create table t2 (id int primary key, i int, constraint f foreign key (i) references parent(id) on delete cascade)",
- diff: "",
+ },
+ {
+ name: "implicit foreign key indexes",
+ from: "create table t1 (id int primary key, i int, key f(i), constraint f foreign key (i) references parent(id) on delete cascade)",
+ to: "create table t2 (id int primary key, i int, constraint f foreign key (i) references parent(id) on delete cascade)",
+ },
+ {
+ name: "implicit foreign key indexes 2",
+ from: "create table t1 (id int primary key, i int, constraint f foreign key (i) references parent(id) on delete cascade)",
+ to: "create table t2 (id int primary key, i int, key f(i), constraint f foreign key (i) references parent(id) on delete cascade)",
+ },
+ {
+ name: "implicit unnamed foreign key indexes",
+ from: "create table t1 (id int primary key, i int, foreign key (i) references parent(id) on delete cascade)",
+ to: "create table t1 (id int primary key, i int, key i(i), constraint t1_ibfk_1 foreign key (i) references parent(id) on delete cascade)",
},
{
name: "modify foreign key",
- from: "create table t1 (id int primary key, i int, constraint f foreign key (i) references parent(id) on delete cascade)",
- to: "create table t2 (id int primary key, i int, constraint f foreign key (i) references parent(id) on delete set null)",
+ from: "create table t1 (id int primary key, i int, key ix(i), constraint f foreign key (i) references parent(id) on delete cascade)",
+ to: "create table t2 (id int primary key, i int, key ix(i), constraint f foreign key (i) references parent(id) on delete set null)",
diff: "alter table t1 drop foreign key f, add constraint f foreign key (i) references parent (id) on delete set null",
cdiff: "ALTER TABLE `t1` DROP FOREIGN KEY `f`, ADD CONSTRAINT `f` FOREIGN KEY (`i`) REFERENCES `parent` (`id`) ON DELETE SET NULL",
},
{
name: "drop and add foreign key",
- from: "create table t1 (id int primary key, i int, constraint f foreign key (i) references parent(id) on delete cascade)",
- to: "create table t2 (id int primary key, i int, constraint f2 foreign key (i) references parent(id) on delete set null)",
+ from: "create table t1 (id int primary key, i int, key ix(i), constraint f foreign key (i) references parent(id) on delete cascade)",
+ to: "create table t2 (id int primary key, i int, key ix(i), constraint f2 foreign key (i) references parent(id) on delete set null)",
diff: "alter table t1 drop foreign key f, add constraint f2 foreign key (i) references parent (id) on delete set null",
cdiff: "ALTER TABLE `t1` DROP FOREIGN KEY `f`, ADD CONSTRAINT `f2` FOREIGN KEY (`i`) REFERENCES `parent` (`id`) ON DELETE SET NULL",
},
@@ -592,6 +655,13 @@ func TestCreateTableDiff(t *testing.T) {
to: "create table t2 (id int primary key, i int, constraint f2 foreign key (i2) references parent2(id) on delete restrict, constraint f foreign key (i) references parent(id) on delete restrict)",
diff: "",
},
+ {
+ name: "drop foreign key, but not implicit index",
+ from: "create table t1 (id int primary key, i int, constraint f foreign key (i) references parent(id) on delete cascade)",
+ to: "create table t2 (id int primary key, i int, key f(i))",
+ diff: "alter table t1 drop foreign key f",
+ cdiff: "ALTER TABLE `t1` DROP FOREIGN KEY `f`",
+ },
// partitions
{
name: "identical partitioning",
@@ -933,7 +1003,57 @@ func TestCreateTableDiff(t *testing.T) {
cdiff: "ALTER TABLE `t1` AUTO_INCREMENT 100",
},
{
- name: `change table charset`,
+ name: "apply table charset",
+ from: "create table t (id int, primary key(id))",
+ to: "create table t (id int, primary key(id)) DEFAULT CHARSET = utf8mb4",
+ diff: "alter table t charset utf8mb4",
+ cdiff: "ALTER TABLE `t` CHARSET utf8mb4",
+ },
+ {
+ name: "ignore empty table charset",
+ from: "create table t (id int, primary key(id))",
+ to: "create table t (id int, primary key(id)) DEFAULT CHARSET = utf8mb4",
+ charset: TableCharsetCollateIgnoreEmpty,
+ },
+ {
+ name: "ignore empty table charset and collate",
+ from: "create table t (id int, primary key(id))",
+ to: "create table t (id int, primary key(id)) DEFAULT CHARSET = utf8mb4 COLLATE utf8mb4_0900_ai_ci",
+ charset: TableCharsetCollateIgnoreEmpty,
+ },
+ {
+ name: "ignore empty table collate",
+ from: "create table t (id int, primary key(id))",
+ to: "create table t (id int, primary key(id)) COLLATE utf8mb4_0900_ai_ci",
+ charset: TableCharsetCollateIgnoreEmpty,
+ },
+ {
+ name: "ignore empty table charset and collate in target",
+ from: "create table t (id int, primary key(id)) DEFAULT CHARSET = utf8mb4 COLLATE utf8mb4_0900_ai_ci",
+ to: "create table t (id int, primary key(id))",
+ charset: TableCharsetCollateIgnoreEmpty,
+ },
+ {
+ name: "ignore dropped collate",
+ from: "create table t (id int, primary key(id)) COLLATE utf8mb4_0900_ai_ci",
+ to: "create table t (id int, primary key(id))",
+ charset: TableCharsetCollateIgnoreEmpty,
+ },
+ {
+ name: "ignore table charset",
+ from: "create table t (id int, primary key(id)) DEFAULT CHARSET = utf8",
+ to: "create table t (id int, primary key(id)) DEFAULT CHARSET = utf8mb4",
+ charset: TableCharsetCollateIgnoreAlways,
+ },
+ {
+ name: "change table charset",
+ from: "create table t (id int, primary key(id)) DEFAULT CHARSET = utf8",
+ to: "create table t (id int, primary key(id)) DEFAULT CHARSET = utf8mb4",
+ diff: "alter table t charset utf8mb4",
+ cdiff: "ALTER TABLE `t` CHARSET utf8mb4",
+ },
+ {
+ name: `change table charset and columns`,
from: "create table t (id int primary key, t1 varchar(128) default null, t2 varchar(128) not null, t3 tinytext charset latin1, t4 tinytext charset latin1) default charset=utf8",
to: "create table t (id int primary key, t1 varchar(128) not null, t2 varchar(128) not null, t3 tinytext, t4 tinytext charset latin1) default charset=utf8mb4",
diff: "alter table t modify column t1 varchar(128) not null, modify column t2 varchar(128) not null, modify column t3 tinytext, charset utf8mb4",
@@ -943,8 +1063,8 @@ func TestCreateTableDiff(t *testing.T) {
name: "normalized unsigned attribute",
from: "create table t1 (id int primary key)",
to: "create table t1 (id int unsigned primary key)",
- diff: "alter table t1 modify column id int unsigned primary key",
- cdiff: "ALTER TABLE `t1` MODIFY COLUMN `id` int unsigned PRIMARY KEY",
+ diff: "alter table t1 modify column id int unsigned",
+ cdiff: "ALTER TABLE `t1` MODIFY COLUMN `id` int unsigned",
},
{
name: "normalized ENGINE InnoDB value",
@@ -985,8 +1105,40 @@ func TestCreateTableDiff(t *testing.T) {
name: "normalized COLLATE value",
from: "create table t1 (id int primary key) engine=innodb",
to: "create table t1 (id int primary key) engine=innodb, collate=UTF8_BIN",
- diff: "alter table t1 collate utf8_bin",
- cdiff: "ALTER TABLE `t1` COLLATE utf8_bin",
+ diff: "alter table t1 collate utf8mb3_bin",
+ cdiff: "ALTER TABLE `t1` COLLATE utf8mb3_bin",
+ },
+ {
+ name: "remove table comment",
+ from: "create table t1 (id int primary key) comment='foo'",
+ to: "create table t1 (id int primary key)",
+ diff: "alter table t1 comment ''",
+ cdiff: "ALTER TABLE `t1` COMMENT ''",
+ },
+ // algorithm
+ {
+ name: "algorithm: COPY",
+ from: "create table t1 (`id` int primary key)",
+ to: "create table t2 (id int primary key, `i` int not null default 0)",
+ diff: "alter table t1 add column i int not null default 0, algorithm = COPY",
+ cdiff: "ALTER TABLE `t1` ADD COLUMN `i` int NOT NULL DEFAULT 0, ALGORITHM = COPY",
+ algorithm: AlterTableAlgorithmStrategyCopy,
+ },
+ {
+ name: "algorithm: INPLACE",
+ from: "create table t1 (`id` int primary key)",
+ to: "create table t2 (id int primary key, `i` int not null default 0)",
+ diff: "alter table t1 add column i int not null default 0, algorithm = INPLACE",
+ cdiff: "ALTER TABLE `t1` ADD COLUMN `i` int NOT NULL DEFAULT 0, ALGORITHM = INPLACE",
+ algorithm: AlterTableAlgorithmStrategyInplace,
+ },
+ {
+ name: "algorithm: INSTANT",
+ from: "create table t1 (`id` int primary key)",
+ to: "create table t2 (id int primary key, `i` int not null default 0)",
+ diff: "alter table t1 add column i int not null default 0, algorithm = INSTANT",
+ cdiff: "ALTER TABLE `t1` ADD COLUMN `i` int NOT NULL DEFAULT 0, ALGORITHM = INSTANT",
+ algorithm: AlterTableAlgorithmStrategyInstant,
},
}
standardHints := DiffHints{}
@@ -1013,6 +1165,8 @@ func TestCreateTableDiff(t *testing.T) {
hints.ConstraintNamesStrategy = ts.constraint
hints.ColumnRenameStrategy = ts.colrename
hints.FullTextKeyStrategy = ts.fulltext
+ hints.TableCharsetCollateStrategy = ts.charset
+ hints.AlterTableAlgorithmStrategy = ts.algorithm
alter, err := c.Diff(other, &hints)
require.Equal(t, len(ts.diffs), len(ts.cdiffs))
@@ -1031,6 +1185,8 @@ func TestCreateTableDiff(t *testing.T) {
assert.True(t, alter.IsEmpty(), "expected empty diff, found changes")
if !alter.IsEmpty() {
t.Logf("statements[0]: %v", alter.StatementString())
+ t.Logf("c: %v", sqlparser.CanonicalString(c.CreateTable))
+ t.Logf("other: %v", sqlparser.CanonicalString(other.CreateTable))
}
default:
assert.NoError(t, err)
@@ -1119,6 +1275,66 @@ func TestValidate(t *testing.T) {
alter: "alter table t add key i_idx(i)",
to: "create table t (id int primary key, i int, key i_idx(i))",
},
+ {
+ name: "invalid table definition: primary key, same columns",
+ from: "create table t (id int primary key, i int, primary key (id))",
+ alter: "alter table t engine=innodb",
+ expectErr: &DuplicateKeyNameError{Table: "t", Key: "PRIMARY"},
+ },
+ {
+ name: "invalid table definition: primary key, different column",
+ from: "create table t (id int primary key, i int, primary key (i))",
+ alter: "alter table t engine=innodb",
+ expectErr: &DuplicateKeyNameError{Table: "t", Key: "PRIMARY"},
+ },
+ {
+ name: "add primary key",
+ from: "create table t (id int, i int)",
+ alter: "alter table t add primary key(id)",
+ to: "create table t (id int, i int, primary key (id))",
+ },
+ {
+ name: "add primary key with existing key",
+ from: "create table t (id int, i int, key i_idx (i))",
+ alter: "alter table t add primary key(id)",
+ to: "create table t (id int, i int, primary key (id), key i_idx (i))",
+ },
+ {
+ name: "modify into primary key",
+ from: "create table t (id int, i int)",
+ alter: "alter table t modify id int primary key",
+ to: "create table t (id int, i int, primary key (id))",
+ },
+ {
+ name: "modify a primary key column",
+ from: "create table t (id int primary key, i int)",
+ alter: "alter table t modify id bigint primary key",
+ to: "create table t (id bigint, i int, primary key (id))",
+ },
+ {
+ name: "modify a primary key column 2",
+ from: "create table t (id int, i int, primary key (id))",
+ alter: "alter table t modify id bigint primary key",
+ to: "create table t (id bigint, i int, primary key (id))",
+ },
+ {
+ name: "fail modify another column to primary key",
+ from: "create table t (id int primary key, i int)",
+ alter: "alter table t modify i int primary key",
+ expectErr: &DuplicateKeyNameError{Table: "t", Key: "PRIMARY"},
+ },
+ {
+ name: "fail add another primary key column",
+ from: "create table t (id int primary key, i int)",
+ alter: "alter table t add column i2 int primary key",
+ expectErr: &DuplicateKeyNameError{Table: "t", Key: "PRIMARY"},
+ },
+ {
+ name: "fail add another primary key",
+ from: "create table t (id int primary key, i int)",
+ alter: "alter table t add primary key (i)",
+ expectErr: &DuplicateKeyNameError{Table: "t", Key: "PRIMARY"},
+ },
{
name: "add key, column case",
from: "create table t (id int primary key, i int)",
@@ -1209,6 +1425,18 @@ func TestValidate(t *testing.T) {
alter: "alter table t add key i12_idx(i1, i2), add key i32_idx(i3, i2), add key i21_idx(i2, i1)",
expectErr: &InvalidColumnInKeyError{Table: "t", Column: "i3", Key: "i32_idx"},
},
+ {
+ name: "multiple primary keys",
+ from: "create table t (id int primary key, i1 int, i2 int, primary key (i1))",
+ alter: "alter table t engine=innodb",
+ expectErr: &DuplicateKeyNameError{Table: "t", Key: "PRIMARY"},
+ },
+ {
+ name: "multiple primary keys for same column",
+ from: "create table t (id int primary key, i1 int, i2 int, primary key (id))",
+ alter: "alter table t engine=innodb",
+ expectErr: &DuplicateKeyNameError{Table: "t", Key: "PRIMARY"},
+ },
// partitions
{
name: "drop column used by partitions",
@@ -1437,11 +1665,71 @@ func TestValidate(t *testing.T) {
alter: "alter table t add constraint f foreign key (z) references parent(id)",
expectErr: &InvalidColumnInForeignKeyConstraintError{Table: "t", Constraint: "f", Column: "z"},
},
+ {
+ name: "mismatching column count in foreign key",
+ from: "create table t (id int primary key, i int, constraint f foreign key (i) references parent(id, z))",
+ alter: "alter table t engine=innodb",
+ expectErr: &ForeignKeyColumnCountMismatchError{Table: "t", Constraint: "f", ColumnCount: 1, ReferencedTable: "parent", ReferencedColumnCount: 2},
+ },
{
name: "change with constraints with uppercase columns",
- from: "CREATE TABLE `Machine` (id int primary key, `a` int, `B` int, PRIMARY KEY (`id`), CONSTRAINT `chk` CHECK (`B` >= `a`))",
+ from: "CREATE TABLE `Machine` (id int primary key, `a` int, `B` int, CONSTRAINT `chk` CHECK (`B` >= `a`))",
alter: "ALTER TABLE `Machine` MODIFY COLUMN `id` bigint primary key",
- to: "CREATE TABLE `Machine` (id bigint primary key, `a` int, `B` int, PRIMARY KEY (`id`), CONSTRAINT `chk` CHECK (`B` >= `a`))",
+ to: "CREATE TABLE `Machine` (id bigint primary key, `a` int, `B` int, CONSTRAINT `chk` CHECK (`B` >= `a`))",
+ },
+ {
+ name: "add unnamed foreign key, implicitly add index",
+ from: "create table t (id int primary key, i int)",
+ alter: "alter table t add foreign key (i) references parent(id)",
+ to: "create table t (id int primary key, i int, key i (i), constraint t_ibfk_1 foreign key (i) references parent(id))",
+ },
+ {
+ name: "add foreign key, implicitly add index",
+ from: "create table t (id int primary key, i int)",
+ alter: "alter table t add constraint f foreign key (i) references parent(id)",
+ to: "create table t (id int primary key, i int, key f (i), constraint f foreign key (i) references parent(id))",
+ },
+ {
+ name: "add foreign key and index, no implicit index",
+ from: "create table t (id int primary key, i int)",
+ alter: "alter table t add key i_idx (i), add constraint f foreign key (i) references parent(id)",
+ to: "create table t (id int primary key, i int, key i_idx (i), constraint f foreign key (i) references parent(id))",
+ },
+ {
+ name: "add foreign key and extended index, no implicit index",
+ from: "create table t (id int primary key, i int)",
+ alter: "alter table t add key i_id_idx (i, id), add constraint f foreign key (i) references parent(id)",
+ to: "create table t (id int primary key, i int, key i_id_idx (i, id), constraint f foreign key (i) references parent(id))",
+ },
+ {
+ name: "add foreign key, implicitly add index, fail duplicate key name",
+ from: "create table t (id int primary key, i int, key f(id, i))",
+ alter: "alter table t add constraint f foreign key (i) references parent(id)",
+ expectErr: &ApplyDuplicateKeyError{Table: "t", Key: "f"},
+ },
+ {
+ name: "fail drop key leaving unindexed foreign key constraint",
+ from: "create table t (id int primary key, i int, key i (i), constraint f foreign key (i) references parent(id))",
+ alter: "alter table t drop key `i`",
+ expectErr: &IndexNeededByForeignKeyError{Table: "t", Key: "i"},
+ },
+ {
+ name: "drop key with alternative key for foreign key constraint, 1",
+ from: "create table t (id int primary key, i int, key i (i), key i2 (i, id), constraint f foreign key (i) references parent(id))",
+ alter: "alter table t drop key `i`",
+ to: "create table t (id int primary key, i int, key i2 (i, id), constraint f foreign key (i) references parent(id))",
+ },
+ {
+ name: "drop key with alternative key for foreign key constraint, 2",
+ from: "create table t (id int primary key, i int, key i (i), key i2 (i, id), constraint f foreign key (i) references parent(id))",
+ alter: "alter table t drop key `i2`",
+ to: "create table t (id int primary key, i int, key i (i), constraint f foreign key (i) references parent(id))",
+ },
+ {
+ name: "drop key with alternative key for foreign key constraint, 3",
+ from: "create table t (id int primary key, i int, key i (i), key i2 (i), constraint f foreign key (i) references parent(id))",
+ alter: "alter table t drop key `i`",
+ to: "create table t (id int primary key, i int, key i2 (i), constraint f foreign key (i) references parent(id))",
},
}
hints := DiffHints{}
@@ -1462,8 +1750,12 @@ func TestValidate(t *testing.T) {
a := &AlterTableEntityDiff{from: from, alterTable: alterTable}
applied, err := from.Apply(a)
if ts.expectErr != nil {
+ appliedCanonicalStatementString := ""
+ if applied != nil {
+ appliedCanonicalStatementString = applied.Create().CanonicalStatementString()
+ }
assert.Error(t, err)
- assert.EqualError(t, err, ts.expectErr.Error())
+ assert.EqualErrorf(t, err, ts.expectErr.Error(), "applied: %v", appliedCanonicalStatementString)
} else {
assert.NoError(t, err)
assert.NotNil(t, applied)
@@ -1495,148 +1787,178 @@ func TestNormalize(t *testing.T) {
}{
{
name: "basic table",
+ from: "create table t (id int, i int, primary key (id))",
+ to: "CREATE TABLE `t` (\n\t`id` int,\n\t`i` int,\n\tPRIMARY KEY (`id`)\n)",
+ },
+ {
+ name: "basic table, primary key",
from: "create table t (id int primary key, i int)",
- to: "CREATE TABLE `t` (\n\t`id` int PRIMARY KEY,\n\t`i` int\n)",
+ to: "CREATE TABLE `t` (\n\t`id` int,\n\t`i` int,\n\tPRIMARY KEY (`id`)\n)",
},
{
name: "removes default null",
- from: "create table t (id int primary key, i int default null)",
- to: "CREATE TABLE `t` (\n\t`id` int PRIMARY KEY,\n\t`i` int\n)",
+ from: "create table t (id int, i int default null, primary key (id))",
+ to: "CREATE TABLE `t` (\n\t`id` int,\n\t`i` int,\n\tPRIMARY KEY (`id`)\n)",
},
{
name: "keeps not exist",
from: "create table if not exists t (id int primary key, i int)",
- to: "CREATE TABLE IF NOT EXISTS `t` (\n\t`id` int PRIMARY KEY,\n\t`i` int\n)",
+ to: "CREATE TABLE IF NOT EXISTS `t` (\n\t`id` int,\n\t`i` int,\n\tPRIMARY KEY (`id`)\n)",
},
{
name: "timestamp null",
from: "create table t (id int primary key, t timestamp null)",
- to: "CREATE TABLE `t` (\n\t`id` int PRIMARY KEY,\n\t`t` timestamp NULL\n)",
+ to: "CREATE TABLE `t` (\n\t`id` int,\n\t`t` timestamp NULL,\n\tPRIMARY KEY (`id`)\n)",
},
{
name: "timestamp default null",
from: "create table t (id int primary key, t timestamp default null)",
- to: "CREATE TABLE `t` (\n\t`id` int PRIMARY KEY,\n\t`t` timestamp NULL\n)",
+ to: "CREATE TABLE `t` (\n\t`id` int,\n\t`t` timestamp NULL,\n\tPRIMARY KEY (`id`)\n)",
},
{
name: "uses lowercase type",
from: "create table t (id INT primary key, i INT default null)",
- to: "CREATE TABLE `t` (\n\t`id` int PRIMARY KEY,\n\t`i` int\n)",
+ to: "CREATE TABLE `t` (\n\t`id` int,\n\t`i` int,\n\tPRIMARY KEY (`id`)\n)",
},
{
name: "removes default signed",
from: "create table t (id int signed primary key, i int signed)",
- to: "CREATE TABLE `t` (\n\t`id` int PRIMARY KEY,\n\t`i` int\n)",
+ to: "CREATE TABLE `t` (\n\t`id` int,\n\t`i` int,\n\tPRIMARY KEY (`id`)\n)",
},
{
name: "does not remove tinyint(1) size",
from: "create table t (id int primary key, i tinyint(1) default null)",
- to: "CREATE TABLE `t` (\n\t`id` int PRIMARY KEY,\n\t`i` tinyint(1)\n)",
+ to: "CREATE TABLE `t` (\n\t`id` int,\n\t`i` tinyint(1),\n\tPRIMARY KEY (`id`)\n)",
},
{
name: "removes other tinyint size",
from: "create table t (id int primary key, i tinyint(2) default null)",
- to: "CREATE TABLE `t` (\n\t`id` int PRIMARY KEY,\n\t`i` tinyint\n)",
+ to: "CREATE TABLE `t` (\n\t`id` int,\n\t`i` tinyint,\n\tPRIMARY KEY (`id`)\n)",
},
{
name: "removes int size",
from: "create table t (id int primary key, i int(1) default null)",
- to: "CREATE TABLE `t` (\n\t`id` int PRIMARY KEY,\n\t`i` int\n)",
+ to: "CREATE TABLE `t` (\n\t`id` int,\n\t`i` int,\n\tPRIMARY KEY (`id`)\n)",
},
{
name: "removes bigint size",
from: "create table t (id int primary key, i bigint(1) default null)",
- to: "CREATE TABLE `t` (\n\t`id` int PRIMARY KEY,\n\t`i` bigint\n)",
+ to: "CREATE TABLE `t` (\n\t`id` int,\n\t`i` bigint,\n\tPRIMARY KEY (`id`)\n)",
},
{
name: "keeps zerofill",
from: "create table t (id int primary key, i int zerofill default null)",
- to: "CREATE TABLE `t` (\n\t`id` int PRIMARY KEY,\n\t`i` int zerofill\n)",
+ to: "CREATE TABLE `t` (\n\t`id` int,\n\t`i` int zerofill,\n\tPRIMARY KEY (`id`)\n)",
},
{
name: "removes int sizes case insensitive",
from: "create table t (id int primary key, i INT(11) default null)",
- to: "CREATE TABLE `t` (\n\t`id` int PRIMARY KEY,\n\t`i` int\n)",
+ to: "CREATE TABLE `t` (\n\t`id` int,\n\t`i` int,\n\tPRIMARY KEY (`id`)\n)",
+ },
+ {
+ name: "removes float size with correct type",
+ from: "create table t (id int primary key, f float(24) default null)",
+ to: "CREATE TABLE `t` (\n\t`id` int,\n\t`f` float,\n\tPRIMARY KEY (`id`)\n)",
+ },
+ {
+ name: "removes float size with correct type",
+ from: "create table t (id int primary key, f float(25) default null)",
+ to: "CREATE TABLE `t` (\n\t`id` int,\n\t`f` double,\n\tPRIMARY KEY (`id`)\n)",
+ },
+ {
+ name: "normalizes real type to double",
+ from: "create table t (id int primary key, f real default null)",
+ to: "CREATE TABLE `t` (\n\t`id` int,\n\t`f` double,\n\tPRIMARY KEY (`id`)\n)",
+ },
+ {
+ name: "normalizes float4 type to float",
+ from: "create table t (id int primary key, f float4 default null)",
+ to: "CREATE TABLE `t` (\n\t`id` int,\n\t`f` float,\n\tPRIMARY KEY (`id`)\n)",
+ },
+ {
+ name: "normalizes float8 type to double",
+ from: "create table t (id int primary key, f float8 default null)",
+ to: "CREATE TABLE `t` (\n\t`id` int,\n\t`f` double,\n\tPRIMARY KEY (`id`)\n)",
},
{
name: "removes matching charset",
from: "create table t (id int signed primary key, v varchar(255) charset utf8mb4) charset utf8mb4",
- to: "CREATE TABLE `t` (\n\t`id` int PRIMARY KEY,\n\t`v` varchar(255)\n) CHARSET utf8mb4",
+ to: "CREATE TABLE `t` (\n\t`id` int,\n\t`v` varchar(255),\n\tPRIMARY KEY (`id`)\n) CHARSET utf8mb4",
},
{
name: "removes matching case insensitive charset",
from: "create table t (id int signed primary key, v varchar(255) charset UTF8MB4) charset utf8mb4",
- to: "CREATE TABLE `t` (\n\t`id` int PRIMARY KEY,\n\t`v` varchar(255)\n) CHARSET utf8mb4",
+ to: "CREATE TABLE `t` (\n\t`id` int,\n\t`v` varchar(255),\n\tPRIMARY KEY (`id`)\n) CHARSET utf8mb4",
},
{
name: "removes matching collation if default",
from: "create table t (id int signed primary key, v varchar(255) collate utf8mb4_0900_ai_ci) collate utf8mb4_0900_ai_ci",
- to: "CREATE TABLE `t` (\n\t`id` int PRIMARY KEY,\n\t`v` varchar(255)\n) COLLATE utf8mb4_0900_ai_ci",
+ to: "CREATE TABLE `t` (\n\t`id` int,\n\t`v` varchar(255),\n\tPRIMARY KEY (`id`)\n) COLLATE utf8mb4_0900_ai_ci",
},
{
name: "removes matching collation case insensitive if default",
from: "create table t (id int signed primary key, v varchar(255) collate UTF8MB4_0900_AI_CI) collate utf8mb4_0900_ai_ci",
- to: "CREATE TABLE `t` (\n\t`id` int PRIMARY KEY,\n\t`v` varchar(255)\n) COLLATE utf8mb4_0900_ai_ci",
+ to: "CREATE TABLE `t` (\n\t`id` int,\n\t`v` varchar(255),\n\tPRIMARY KEY (`id`)\n) COLLATE utf8mb4_0900_ai_ci",
},
{
name: "removes matching charset & collation if default",
from: "create table t (id int signed primary key, v varchar(255) charset utf8mb4 collate utf8mb4_0900_ai_ci) charset utf8mb4 collate utf8mb4_0900_ai_ci",
- to: "CREATE TABLE `t` (\n\t`id` int PRIMARY KEY,\n\t`v` varchar(255)\n) CHARSET utf8mb4,\n COLLATE utf8mb4_0900_ai_ci",
+ to: "CREATE TABLE `t` (\n\t`id` int,\n\t`v` varchar(255),\n\tPRIMARY KEY (`id`)\n) CHARSET utf8mb4,\n COLLATE utf8mb4_0900_ai_ci",
},
{
name: "sets collation for non default collation at table level",
from: "create table t (id int signed primary key, v varchar(255) charset utf8mb4) charset utf8mb4 collate utf8mb4_0900_bin",
- to: "CREATE TABLE `t` (\n\t`id` int PRIMARY KEY,\n\t`v` varchar(255) COLLATE utf8mb4_0900_ai_ci\n) CHARSET utf8mb4,\n COLLATE utf8mb4_0900_bin",
+ to: "CREATE TABLE `t` (\n\t`id` int,\n\t`v` varchar(255) COLLATE utf8mb4_0900_ai_ci,\n\tPRIMARY KEY (`id`)\n) CHARSET utf8mb4,\n COLLATE utf8mb4_0900_bin",
},
{
name: "does not add collation for a non default collation at table level",
from: "create table t (id int signed primary key, v varchar(255)) charset utf8mb4 collate utf8mb4_0900_bin",
- to: "CREATE TABLE `t` (\n\t`id` int PRIMARY KEY,\n\t`v` varchar(255)\n) CHARSET utf8mb4,\n COLLATE utf8mb4_0900_bin",
+ to: "CREATE TABLE `t` (\n\t`id` int,\n\t`v` varchar(255),\n\tPRIMARY KEY (`id`)\n) CHARSET utf8mb4,\n COLLATE utf8mb4_0900_bin",
},
{
name: "cleans up collation at the column level if it matches the tabel level and both are given",
from: "create table t (id int signed primary key, v varchar(255) collate utf8mb4_0900_bin) charset utf8mb4 collate utf8mb4_0900_bin",
- to: "CREATE TABLE `t` (\n\t`id` int PRIMARY KEY,\n\t`v` varchar(255)\n) CHARSET utf8mb4,\n COLLATE utf8mb4_0900_bin",
+ to: "CREATE TABLE `t` (\n\t`id` int,\n\t`v` varchar(255),\n\tPRIMARY KEY (`id`)\n) CHARSET utf8mb4,\n COLLATE utf8mb4_0900_bin",
},
{
name: "cleans up charset and collation at the column level if it matches the tabel level and both are given",
from: "create table t (id int signed primary key, v varchar(255) charset utf8mb4 collate utf8mb4_0900_bin) charset utf8mb4 collate utf8mb4_0900_bin",
- to: "CREATE TABLE `t` (\n\t`id` int PRIMARY KEY,\n\t`v` varchar(255)\n) CHARSET utf8mb4,\n COLLATE utf8mb4_0900_bin",
+ to: "CREATE TABLE `t` (\n\t`id` int,\n\t`v` varchar(255),\n\tPRIMARY KEY (`id`)\n) CHARSET utf8mb4,\n COLLATE utf8mb4_0900_bin",
},
{
name: "keeps existing collation even if default for non default collation at table level",
from: "create table t (id int signed primary key, v varchar(255) charset utf8mb4 collate utf8mb4_0900_ai_ci) charset utf8mb4 collate utf8mb4_0900_bin",
- to: "CREATE TABLE `t` (\n\t`id` int PRIMARY KEY,\n\t`v` varchar(255) COLLATE utf8mb4_0900_ai_ci\n) CHARSET utf8mb4,\n COLLATE utf8mb4_0900_bin",
+ to: "CREATE TABLE `t` (\n\t`id` int,\n\t`v` varchar(255) COLLATE utf8mb4_0900_ai_ci,\n\tPRIMARY KEY (`id`)\n) CHARSET utf8mb4,\n COLLATE utf8mb4_0900_bin",
},
{
name: "keeps existing collation even if another non default collation",
from: "create table t (id int signed primary key, v varchar(255) charset utf8mb4 collate utf8mb4_german2_ci) charset utf8mb4 collate utf8mb4_0900_bin",
- to: "CREATE TABLE `t` (\n\t`id` int PRIMARY KEY,\n\t`v` varchar(255) COLLATE utf8mb4_german2_ci\n) CHARSET utf8mb4,\n COLLATE utf8mb4_0900_bin",
+ to: "CREATE TABLE `t` (\n\t`id` int,\n\t`v` varchar(255) COLLATE utf8mb4_german2_ci,\n\tPRIMARY KEY (`id`)\n) CHARSET utf8mb4,\n COLLATE utf8mb4_0900_bin",
},
{
name: "maps utf8 to utf8mb3",
from: "create table t (id int signed primary key, v varchar(255) charset utf8 collate utf8_general_ci) charset utf8 collate utf8_general_ci",
- to: "CREATE TABLE `t` (\n\t`id` int PRIMARY KEY,\n\t`v` varchar(255)\n) CHARSET utf8mb3,\n COLLATE utf8_general_ci",
+ to: "CREATE TABLE `t` (\n\t`id` int,\n\t`v` varchar(255),\n\tPRIMARY KEY (`id`)\n) CHARSET utf8mb3,\n COLLATE utf8mb3_general_ci",
},
{
name: "lowercase table options for charset and collation",
from: "create table t (id int signed primary key, v varchar(255) charset utf8 collate utf8_general_ci) charset UTF8 collate UTF8_GENERAL_CI",
- to: "CREATE TABLE `t` (\n\t`id` int PRIMARY KEY,\n\t`v` varchar(255)\n) CHARSET utf8mb3,\n COLLATE utf8_general_ci",
+ to: "CREATE TABLE `t` (\n\t`id` int,\n\t`v` varchar(255),\n\tPRIMARY KEY (`id`)\n) CHARSET utf8mb3,\n COLLATE utf8mb3_general_ci",
},
{
name: "drops existing collation if it matches table default at column level for non default charset",
from: "create table t (id int signed primary key, v varchar(255) charset utf8mb3 collate utf8_unicode_ci) charset utf8mb3 collate utf8_unicode_ci",
- to: "CREATE TABLE `t` (\n\t`id` int PRIMARY KEY,\n\t`v` varchar(255)\n) CHARSET utf8mb3,\n COLLATE utf8_unicode_ci",
+ to: "CREATE TABLE `t` (\n\t`id` int,\n\t`v` varchar(255),\n\tPRIMARY KEY (`id`)\n) CHARSET utf8mb3,\n COLLATE utf8mb3_unicode_ci",
},
{
name: "correct case table options for engine",
from: "create table t (id int signed primary key) engine innodb",
- to: "CREATE TABLE `t` (\n\t`id` int PRIMARY KEY\n) ENGINE InnoDB",
+ to: "CREATE TABLE `t` (\n\t`id` int,\n\tPRIMARY KEY (`id`)\n) ENGINE InnoDB",
},
{
name: "correct case for engine in partitions",
from: "create table a (id int not null primary key) engine InnoDB, charset utf8mb4, collate utf8mb4_0900_ai_ci partition by range (`id`) (partition `p10` values less than(10) engine innodb)",
- to: "CREATE TABLE `a` (\n\t`id` int NOT NULL PRIMARY KEY\n) ENGINE InnoDB,\n CHARSET utf8mb4,\n COLLATE utf8mb4_0900_ai_ci\nPARTITION BY RANGE (`id`)\n(PARTITION `p10` VALUES LESS THAN (10) ENGINE InnoDB)",
+ to: "CREATE TABLE `a` (\n\t`id` int NOT NULL,\n\tPRIMARY KEY (`id`)\n) ENGINE InnoDB,\n CHARSET utf8mb4,\n COLLATE utf8mb4_0900_ai_ci\nPARTITION BY RANGE (`id`)\n(PARTITION `p10` VALUES LESS THAN (10) ENGINE InnoDB)",
},
{
name: "generates a name for a key with proper casing",
@@ -1655,43 +1977,58 @@ func TestNormalize(t *testing.T) {
},
{
name: "generates a name for foreign key constraints",
+ from: "create table t1 (id int primary key, i int, key i_idx (i), foreign key (i) references parent(id))",
+ to: "CREATE TABLE `t1` (\n\t`id` int,\n\t`i` int,\n\tPRIMARY KEY (`id`),\n\tKEY `i_idx` (`i`),\n\tCONSTRAINT `t1_ibfk_1` FOREIGN KEY (`i`) REFERENCES `parent` (`id`)\n)",
+ },
+ {
+ name: "creates an index for foreign key constraints",
+ from: "create table t1 (id int primary key, i int, constraint f foreign key (i) references parent(id))",
+ to: "CREATE TABLE `t1` (\n\t`id` int,\n\t`i` int,\n\tPRIMARY KEY (`id`),\n\tKEY `f` (`i`),\n\tCONSTRAINT `f` FOREIGN KEY (`i`) REFERENCES `parent` (`id`)\n)",
+ },
+ {
+ name: "creates an index for unnamed foreign key constraints",
from: "create table t1 (id int primary key, i int, foreign key (i) references parent(id))",
- to: "CREATE TABLE `t1` (\n\t`id` int PRIMARY KEY,\n\t`i` int,\n\tCONSTRAINT `t1_ibfk_1` FOREIGN KEY (`i`) REFERENCES `parent` (`id`)\n)",
+ to: "CREATE TABLE `t1` (\n\t`id` int,\n\t`i` int,\n\tPRIMARY KEY (`id`),\n\tKEY `i` (`i`),\n\tCONSTRAINT `t1_ibfk_1` FOREIGN KEY (`i`) REFERENCES `parent` (`id`)\n)",
+ },
+ {
+ name: "does not add index since one already defined for foreign key constraint",
+ from: "create table t1 (id int primary key, i int, key i_idx (i), foreign key (i) references parent(id))",
+ to: "CREATE TABLE `t1` (\n\t`id` int,\n\t`i` int,\n\tPRIMARY KEY (`id`),\n\tKEY `i_idx` (`i`),\n\tCONSTRAINT `t1_ibfk_1` FOREIGN KEY (`i`) REFERENCES `parent` (`id`)\n)",
},
{
name: "uses KEY for indexes",
from: "create table t (id int primary key, i1 int, index i1_idx(i1))",
- to: "CREATE TABLE `t` (\n\t`id` int PRIMARY KEY,\n\t`i1` int,\n\tKEY `i1_idx` (`i1`)\n)",
+ to: "CREATE TABLE `t` (\n\t`id` int,\n\t`i1` int,\n\tPRIMARY KEY (`id`),\n\tKEY `i1_idx` (`i1`)\n)",
},
{
name: "drops default index type",
from: "create table t (id int primary key, i1 int, key i1_idx(i1) using btree)",
- to: "CREATE TABLE `t` (\n\t`id` int PRIMARY KEY,\n\t`i1` int,\n\tKEY `i1_idx` (`i1`)\n)",
+ to: "CREATE TABLE `t` (\n\t`id` int,\n\t`i1` int,\n\tPRIMARY KEY (`id`),\n\tKEY `i1_idx` (`i1`)\n)",
},
{
name: "does not drop non-default index type",
from: "create table t (id int primary key, i1 int, key i1_idx(i1) using hash)",
- to: "CREATE TABLE `t` (\n\t`id` int PRIMARY KEY,\n\t`i1` int,\n\tKEY `i1_idx` (`i1`) USING HASH\n)",
+ to: "CREATE TABLE `t` (\n\t`id` int,\n\t`i1` int,\n\tPRIMARY KEY (`id`),\n\tKEY `i1_idx` (`i1`) USING hash\n)",
},
{
name: "drops default index visibility",
from: "create table t (id int primary key, i1 int, key i1_idx(i1) visible)",
- to: "CREATE TABLE `t` (\n\t`id` int PRIMARY KEY,\n\t`i1` int,\n\tKEY `i1_idx` (`i1`)\n)",
+ to: "CREATE TABLE `t` (\n\t`id` int,\n\t`i1` int,\n\tPRIMARY KEY (`id`),\n\tKEY `i1_idx` (`i1`)\n)",
},
{
name: "drops non-default index visibility",
from: "create table t (id int primary key, i1 int, key i1_idx(i1) invisible)",
- to: "CREATE TABLE `t` (\n\t`id` int PRIMARY KEY,\n\t`i1` int,\n\tKEY `i1_idx` (`i1`) INVISIBLE\n)",
+ to: "CREATE TABLE `t` (\n\t`id` int,\n\t`i1` int,\n\tPRIMARY KEY (`id`),\n\tKEY `i1_idx` (`i1`) INVISIBLE\n)",
},
{
name: "drops default column visibility",
from: "create table t (id int primary key, i1 int visible)",
- to: "CREATE TABLE `t` (\n\t`id` int PRIMARY KEY,\n\t`i1` int\n)",
+ to: "CREATE TABLE `t` (\n\t`id` int,\n\t`i1` int,\n\tPRIMARY KEY (`id`)\n)",
},
{
name: "drops non-default column visibility",
from: "create table t (id int primary key, i1 int invisible)",
- to: "CREATE TABLE `t` (\n\t`id` int PRIMARY KEY,\n\t`i1` int INVISIBLE\n)",
+ to: "CREATE TABLE `t` (\n\t`id` int,\n\t`i1` int INVISIBLE,\n\tPRIMARY KEY (`id`)\n)",
},
}
for _, ts := range tt {
@@ -1707,3 +2044,110 @@ func TestNormalize(t *testing.T) {
})
}
}
+
+func TestIndexesCoveringForeignKeyColumns(t *testing.T) {
+ sql := `
+ create table t (
+ id int,
+ a int,
+ b int,
+ c int,
+ d int,
+ e int,
+ z int,
+ primary key (id),
+ key ax (a),
+ key abx (a, b),
+ key bx (b),
+ key bax (b, a),
+ key abcdx (a, b, c, d),
+ key dex (d, e)
+ )
+ `
+ tt := []struct {
+ columns []string
+ indexes []string
+ }{
+ {},
+ {
+ columns: []string{"a"},
+ indexes: []string{"ax", "abx", "abcdx"},
+ },
+ {
+ columns: []string{"b"},
+ indexes: []string{"bx", "bax"},
+ },
+ {
+ columns: []string{"c"},
+ },
+ {
+ columns: []string{"d"},
+ indexes: []string{"dex"},
+ },
+ {
+ columns: []string{"e"},
+ },
+ {
+ columns: []string{"z"},
+ },
+ {
+ columns: []string{"a", "b"},
+ indexes: []string{"abx", "abcdx"},
+ },
+ {
+ columns: []string{"A", "B"},
+ indexes: []string{"abx", "abcdx"},
+ },
+ {
+ columns: []string{"a", "b", "c"},
+ indexes: []string{"abcdx"},
+ },
+ {
+ columns: []string{"a", "b", "c", "d"},
+ indexes: []string{"abcdx"},
+ },
+ {
+ columns: []string{"a", "b", "c", "d", "e"},
+ },
+ {
+ columns: []string{"b", "a"},
+ indexes: []string{"bax"},
+ },
+ {
+ columns: []string{"d", "e"},
+ indexes: []string{"dex"},
+ },
+ {
+ columns: []string{"a", "e"},
+ },
+ }
+
+ stmt, err := sqlparser.ParseStrictDDL(sql)
+ require.NoError(t, err)
+ createTable, ok := stmt.(*sqlparser.CreateTable)
+ require.True(t, ok)
+ c, err := NewCreateTableEntity(createTable)
+ require.NoError(t, err)
+ tableColumns := map[string]sqlparser.IdentifierCI{}
+ for _, col := range c.CreateTable.TableSpec.Columns {
+ tableColumns[col.Name.Lowered()] = col.Name
+ }
+ for _, ts := range tt {
+ name := strings.Join(ts.columns, ",")
+ t.Run(name, func(t *testing.T) {
+ columns := sqlparser.Columns{}
+ for _, colName := range ts.columns {
+ col, ok := tableColumns[strings.ToLower(colName)]
+ require.True(t, ok)
+ columns = append(columns, col)
+ }
+
+ indexes := c.indexesCoveringForeignKeyColumns(columns)
+ var indexesNames []string
+ for _, index := range indexes {
+ indexesNames = append(indexesNames, index.Info.Name.String())
+ }
+ assert.Equal(t, ts.indexes, indexesNames)
+ })
+ }
+}
diff --git a/go/vt/schemadiff/types.go b/go/vt/schemadiff/types.go
index dce0d10c842..1f1186b41bd 100644
--- a/go/vt/schemadiff/types.go
+++ b/go/vt/schemadiff/types.go
@@ -88,13 +88,28 @@ const (
FullTextKeyUnifyStatements
)
+const (
+ TableCharsetCollateStrict int = iota
+ TableCharsetCollateIgnoreEmpty
+ TableCharsetCollateIgnoreAlways
+)
+
+const (
+ AlterTableAlgorithmStrategyNone int = iota
+ AlterTableAlgorithmStrategyInstant
+ AlterTableAlgorithmStrategyInplace
+ AlterTableAlgorithmStrategyCopy
+)
+
// DiffHints is an assortment of rules for diffing entities
type DiffHints struct {
- StrictIndexOrdering bool
- AutoIncrementStrategy int
- RangeRotationStrategy int
- ConstraintNamesStrategy int
- ColumnRenameStrategy int
- TableRenameStrategy int
- FullTextKeyStrategy int
+ StrictIndexOrdering bool
+ AutoIncrementStrategy int
+ RangeRotationStrategy int
+ ConstraintNamesStrategy int
+ ColumnRenameStrategy int
+ TableRenameStrategy int
+ FullTextKeyStrategy int
+ TableCharsetCollateStrategy int
+ AlterTableAlgorithmStrategy int
}
diff --git a/go/vt/schemadiff/view.go b/go/vt/schemadiff/view.go
index 46cb66dcff2..5be5386c106 100644
--- a/go/vt/schemadiff/view.go
+++ b/go/vt/schemadiff/view.go
@@ -304,8 +304,8 @@ func (c *CreateViewEntity) identicalOtherThanName(other *CreateViewEntity) bool
c.Security == other.Security &&
c.CheckOption == other.CheckOption &&
c.IsReplace == other.IsReplace &&
- sqlparser.EqualsRefOfDefiner(c.Definer, other.Definer) &&
- sqlparser.EqualsColumns(c.Columns, other.Columns) &&
- sqlparser.EqualsSelectStatement(c.Select, other.Select) &&
- sqlparser.EqualsRefOfParsedComments(c.Comments, other.Comments)
+ sqlparser.Equals.RefOfDefiner(c.Definer, other.Definer) &&
+ sqlparser.Equals.Columns(c.Columns, other.Columns) &&
+ sqlparser.Equals.SelectStatement(c.Select, other.Select) &&
+ sqlparser.Equals.RefOfParsedComments(c.Comments, other.Comments)
}
diff --git a/go/vt/servenv/buildinfo.go b/go/vt/servenv/buildinfo.go
index c03c85009ed..15e34217dae 100644
--- a/go/vt/servenv/buildinfo.go
+++ b/go/vt/servenv/buildinfo.go
@@ -40,7 +40,7 @@ var (
)
func registerVersionFlag(fs *pflag.FlagSet) {
- fs.BoolVar(&version, "version", version, "print binary version")
+ fs.BoolVarP(&version, "version", "v", version, "print binary version")
}
// AppVersion is the struct to store build info.
@@ -91,10 +91,7 @@ func (v *versionInfo) String() string {
}
func (v *versionInfo) MySQLVersion() string {
- if mySQLServerVersion != "" {
- return mySQLServerVersion
- }
- return "5.7.9-vitess-" + v.version
+ return mySQLServerVersion
}
func init() {
diff --git a/go/vt/servenv/buildinfo_test.go b/go/vt/servenv/buildinfo_test.go
index 15b2bd4ec80..e6793c915d0 100644
--- a/go/vt/servenv/buildinfo_test.go
+++ b/go/vt/servenv/buildinfo_test.go
@@ -33,17 +33,17 @@ func TestVersionString(t *testing.T) {
buildTimePretty: "time is now",
buildGitRev: "d54b87ca0be09b678bb4490060e8f23f890ddb92",
buildGitBranch: "gitBranch",
- goVersion: "1.18.5",
+ goVersion: "1.19.3",
goOS: "amiga",
goArch: "amd64",
version: "v1.2.3-SNAPSHOT",
}
- assert.Equal(t, "Version: v1.2.3-SNAPSHOT (Git revision d54b87ca0be09b678bb4490060e8f23f890ddb92 branch 'gitBranch') built on time is now by user@host using 1.18.5 amiga/amd64", v.String())
+ assert.Equal(t, "Version: v1.2.3-SNAPSHOT (Git revision d54b87ca0be09b678bb4490060e8f23f890ddb92 branch 'gitBranch') built on time is now by user@host using 1.19.3 amiga/amd64", v.String())
v.jenkinsBuildNumber = 422
- assert.Equal(t, "Version: v1.2.3-SNAPSHOT (Jenkins build 422) (Git revision d54b87ca0be09b678bb4490060e8f23f890ddb92 branch 'gitBranch') built on time is now by user@host using 1.18.5 amiga/amd64", v.String())
+ assert.Equal(t, "Version: v1.2.3-SNAPSHOT (Jenkins build 422) (Git revision d54b87ca0be09b678bb4490060e8f23f890ddb92 branch 'gitBranch') built on time is now by user@host using 1.19.3 amiga/amd64", v.String())
- assert.Equal(t, "5.7.9-vitess-v1.2.3-SNAPSHOT", v.MySQLVersion())
+ assert.Equal(t, "8.0.30-Vitess", v.MySQLVersion())
}
diff --git a/go/vt/servenv/exporter.go b/go/vt/servenv/exporter.go
index 397be415581..d8eb4ef428d 100644
--- a/go/vt/servenv/exporter.go
+++ b/go/vt/servenv/exporter.go
@@ -102,6 +102,7 @@ type Exporter struct {
name, label string
handleFuncs map[string]*handleFunc
sp *statusPage
+ mu sync.Mutex
}
// NewExporter creates a new Exporter with name as namespace.
@@ -154,6 +155,8 @@ func (e *Exporter) URLPrefix() string {
// url remapped from /path to /name/path. If name is empty, the request
// is passed through to http.HandleFunc.
func (e *Exporter) HandleFunc(url string, f func(w http.ResponseWriter, r *http.Request)) {
+ e.mu.Lock()
+ defer e.mu.Unlock()
if e.name == "" {
http.HandleFunc(url, f)
return
diff --git a/go/vt/servenv/grpc_server.go b/go/vt/servenv/grpc_server.go
index 97749bf7efc..bd79aed8108 100644
--- a/go/vt/servenv/grpc_server.go
+++ b/go/vt/servenv/grpc_server.go
@@ -183,8 +183,6 @@ func createGRPCServer() {
return
}
- grpccommon.EnableTracingOpt()
-
var opts []grpc.ServerOption
if gRPCCert != "" && gRPCKey != "" {
config, err := vttls.ServerConfig(gRPCCert, gRPCKey, gRPCCA, gRPCCRL, gRPCServerCA, tls.VersionTLS12)
diff --git a/go/vt/servenv/grpc_server_auth_static.go b/go/vt/servenv/grpc_server_auth_static.go
index ffcd8a72c56..b7c7142508a 100644
--- a/go/vt/servenv/grpc_server_auth_static.go
+++ b/go/vt/servenv/grpc_server_auth_static.go
@@ -36,6 +36,14 @@ var (
_ Authenticator = (*StaticAuthPlugin)(nil)
)
+// The datatype for static auth Context keys
+type staticAuthKey int
+
+const (
+ // Internal Context key for the authenticated username
+ staticAuthUsername staticAuthKey = 0
+)
+
func registerGRPCServerAuthStaticFlags(fs *pflag.FlagSet) {
fs.StringVar(&credsFile, "grpc_auth_static_password_file", credsFile, "JSON File to read the users/passwords from.")
}
@@ -66,7 +74,7 @@ func (sa *StaticAuthPlugin) Authenticate(ctx context.Context, fullMethod string)
password := md["password"][0]
for _, authEntry := range sa.entries {
if username == authEntry.Username && password == authEntry.Password {
- return ctx, nil
+ return newStaticAuthContext(ctx, username), nil
}
}
return nil, status.Errorf(codes.PermissionDenied, "auth failure: caller %q provided invalid credentials", username)
@@ -74,6 +82,19 @@ func (sa *StaticAuthPlugin) Authenticate(ctx context.Context, fullMethod string)
return nil, status.Errorf(codes.Unauthenticated, "username and password must be provided")
}
+// StaticAuthUsernameFromContext returns the username authenticated by the static auth plugin and stored in the Context, if any
+func StaticAuthUsernameFromContext(ctx context.Context) string {
+ username, ok := ctx.Value(staticAuthUsername).(string)
+ if ok {
+ return username
+ }
+ return ""
+}
+
+func newStaticAuthContext(ctx context.Context, username string) context.Context {
+ return context.WithValue(ctx, staticAuthUsername, username)
+}
+
func staticAuthPluginInitializer() (Authenticator, error) {
entries := make([]StaticAuthConfigEntry, 0)
if credsFile == "" {
diff --git a/go/vt/servenv/mysql.go b/go/vt/servenv/mysql.go
index e74d4d07acf..94019a1c42c 100644
--- a/go/vt/servenv/mysql.go
+++ b/go/vt/servenv/mysql.go
@@ -23,7 +23,7 @@ import (
// mySQLServerVersion is what Vitess will present as it's version during the connection handshake,
// and as the value to the @@version system variable. If nothing is provided, Vitess will report itself as
// a specific MySQL version with the vitess version appended to it
-var mySQLServerVersion string
+var mySQLServerVersion = "8.0.30-Vitess"
// RegisterMySQLServerFlags installs the flags needed to specify or expose a
// particular MySQL server version from Vitess.
@@ -51,7 +51,6 @@ func init() {
"vtbackup",
"vtcombo",
"vtctl",
- "vtctld",
"vtctldclient",
"vtexplain",
"vtgate",
diff --git a/go/vt/servenv/servenv.go b/go/vt/servenv/servenv.go
index 5c851b295ee..03b2973698b 100644
--- a/go/vt/servenv/servenv.go
+++ b/go/vt/servenv/servenv.go
@@ -29,9 +29,12 @@ limitations under the License.
package servenv
import (
+ // register the HTTP handlers for profiling
+ _ "net/http/pprof"
"net/url"
"os"
"os/signal"
+ "runtime/debug"
"strings"
"sync"
"syscall"
@@ -48,8 +51,6 @@ import (
"vitess.io/vitess/go/vt/logutil"
"vitess.io/vitess/go/vt/vterrors"
- // register the HTTP handlers for profiling
- _ "net/http/pprof"
// register the proper init and shutdown hooks for logging
_ "vitess.io/vitess/go/vt/logutil"
@@ -78,8 +79,10 @@ var (
var (
lameduckPeriod = 50 * time.Millisecond
onTermTimeout = 10 * time.Second
- onCloseTimeout = time.Nanosecond
+ onCloseTimeout = 10 * time.Second
catchSigpipe bool
+ maxStackSize = 64 * 1024 * 1024
+ initStartTime time.Time // time when tablet init started: for debug purposes to time how long a tablet init takes
)
// RegisterFlags installs the flags used by Init, Run, and RunDefault.
@@ -92,16 +95,24 @@ func RegisterFlags() {
fs.DurationVar(&onTermTimeout, "onterm_timeout", onTermTimeout, "wait no more than this for OnTermSync handlers before stopping")
fs.DurationVar(&onCloseTimeout, "onclose_timeout", onCloseTimeout, "wait no more than this for OnClose handlers before stopping")
fs.BoolVar(&catchSigpipe, "catch-sigpipe", catchSigpipe, "catch and ignore SIGPIPE on stdout and stderr if specified")
+ fs.IntVar(&maxStackSize, "max-stack-size", maxStackSize, "configure the maximum stack size in bytes")
// pid_file.go
fs.StringVar(&pidFile, "pid_file", pidFile, "If set, the process will write its pid to the named file, and delete it on graceful shutdown.")
})
}
+func GetInitStartTime() time.Time {
+ mu.Lock()
+ defer mu.Unlock()
+ return initStartTime
+}
+
// Init is the first phase of the server startup.
func Init() {
mu.Lock()
defer mu.Unlock()
+ initStartTime = time.Now()
// Ignore SIGPIPE if specified
// The Go runtime catches SIGPIPE for us on all fds except stdout/stderr
@@ -141,6 +152,11 @@ func Init() {
fdl := stats.NewGauge("MaxFds", "File descriptor limit")
fdl.Set(int64(fdLimit.Cur))
+ // Limit the stack size. We don't need huge stacks and smaller limits mean
+ // any infinite recursion fires earlier and on low memory systems avoids
+ // out of memory issues in favor of a stack overflow error.
+ debug.SetMaxStack(maxStackSize)
+
onInitHooks.Fire()
}
@@ -319,6 +335,8 @@ func ParseFlags(cmd string) {
_flag.Usage()
log.Exitf("%s doesn't take any positional arguments, got '%s'", cmd, strings.Join(args, " "))
}
+
+ logutil.PurgeLogs()
}
// GetFlagSetFor returns the flag set for a given command.
@@ -348,6 +366,8 @@ func ParseFlagsWithArgs(cmd string) []string {
log.Exitf("%s expected at least one positional argument", cmd)
}
+ logutil.PurgeLogs()
+
return args
}
@@ -379,12 +399,26 @@ func init() {
"vtgate",
"vtgateclienttest",
"vtgr",
+ "vtorc",
"vttablet",
"vttestserver",
} {
OnParseFor(cmd, grpccommon.RegisterFlags)
}
+ // These are the binaries that export stats
+ for _, cmd := range []string{
+ "vtbackup",
+ "vtcombo",
+ "vtctld",
+ "vtgate",
+ "vtgr",
+ "vttablet",
+ "vtorc",
+ } {
+ OnParseFor(cmd, stats.RegisterFlags)
+ }
+
// Flags in package log are installed for all binaries.
OnParse(log.RegisterFlags)
// Flags in package logutil are installed for all binaries.
diff --git a/go/vt/servenv/servenv_test.go b/go/vt/servenv/servenv_test.go
index 3d835fcea1a..b7bd874989a 100644
--- a/go/vt/servenv/servenv_test.go
+++ b/go/vt/servenv/servenv_test.go
@@ -65,9 +65,7 @@ func TestFireOnCloseHooksTimeout(t *testing.T) {
time.Sleep(1 * time.Second)
})
- // we deliberatly test the flag to make sure it's not accidently set to a
- // high value.
- if finished, want := fireOnCloseHooks(onCloseTimeout), false; finished != want {
+ if finished, want := fireOnCloseHooks(1*time.Nanosecond), false; finished != want {
t.Errorf("finished = %v, want %v", finished, want)
}
}
diff --git a/go/vt/servenv/version.go b/go/vt/servenv/version.go
index 8585fdd55ee..0f2e11ff73c 100644
--- a/go/vt/servenv/version.go
+++ b/go/vt/servenv/version.go
@@ -1,5 +1,5 @@
/*
-Copyright 2022 The Vitess Authors.
+Copyright 2023 The Vitess Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -14,9 +14,9 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-// THIS FILE IS AUTO-GENERATED DURING NEW RELEASES
-// DO NOT EDIT
-
package servenv
-const versionName = "15.0.0-SNAPSHOT"
+// THIS FILE IS AUTO-GENERATED DURING NEW RELEASES BY ./tools/do_releases.sh
+// DO NOT EDIT
+
+const versionName = "16.0.5-SNAPSHOT"
diff --git a/go/vt/sidecardb/doc.go b/go/vt/sidecardb/doc.go
new file mode 100644
index 00000000000..72a10244b04
--- /dev/null
+++ b/go/vt/sidecardb/doc.go
@@ -0,0 +1,40 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package sidecardb
+
+/*
+
+The sidecardb module is used to create and upgrade the sidecar database schema on tablet init. The sidecar database
+is named `_vt`.
+
+The schema subdirectory has subdirectories, categorized by module, with one file per table in _vt. Each has the latest
+schema for each table in _vt (in the form of a create table statement).
+
+sidecardb uses the schemadiff module in Vitess to reach the desired schema for each table.
+
+Note:
+
+The `if not exists` in the schema files should not be needed since we only create tables in the sidecar database if they don't exist.
+However, during development, we came across some Vitess flows like backup restore on replicas where the database
+already had the tables but mysql replication also found these `create`s
+in the primary's binlog causing the replica tablet to halt since it could not execute the duplicate create.
+
+We did fix these flows and hence ideally this **should never happen**, but as an abundance of caution
+I have left it in now for operational reasons, so that we paper over any bugs for now.
+We can remove it in v17 or v18 once the schema init is stable and we have done more testing.
+
+*/
diff --git a/go/test/endtoend/backup/transform/backup_transform_test.go b/go/vt/sidecardb/schema/misc/heartbeat.sql
similarity index 65%
rename from go/test/endtoend/backup/transform/backup_transform_test.go
rename to go/vt/sidecardb/schema/misc/heartbeat.sql
index 071f7d536e3..cacd80529b5 100644
--- a/go/test/endtoend/backup/transform/backup_transform_test.go
+++ b/go/vt/sidecardb/schema/misc/heartbeat.sql
@@ -1,5 +1,5 @@
/*
-Copyright 2019 The Vitess Authors.
+Copyright 2023 The Vitess Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -14,17 +14,10 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package transform
-
-import "testing"
-
-func TestMain(m *testing.M) {
- TestMainSetup(m, false)
-}
-
-func TestBackupTransform(t *testing.T) {
- TestBackupTransformImpl(t)
-}
-func TestBackupTransformError(t *testing.T) {
- TestBackupTransformErrorImpl(t)
-}
+CREATE TABLE IF NOT EXISTS _vt.heartbeat
+(
+ keyspaceShard VARBINARY(256) NOT NULL,
+ tabletUid INT UNSIGNED NOT NULL,
+ ts BIGINT UNSIGNED NOT NULL,
+ PRIMARY KEY (`keyspaceShard`)
+) engine = InnoDB
diff --git a/go/test/endtoend/backup/transform/mysqlctld/backup_transform_mysqlctld_test.go b/go/vt/sidecardb/schema/misc/reparent_journal.sql
similarity index 59%
rename from go/test/endtoend/backup/transform/mysqlctld/backup_transform_mysqlctld_test.go
rename to go/vt/sidecardb/schema/misc/reparent_journal.sql
index 0a3b11227da..74534f57098 100644
--- a/go/test/endtoend/backup/transform/mysqlctld/backup_transform_mysqlctld_test.go
+++ b/go/vt/sidecardb/schema/misc/reparent_journal.sql
@@ -1,5 +1,5 @@
/*
-Copyright 2019 The Vitess Authors.
+Copyright 2023 The Vitess Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -14,21 +14,12 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package mysqlctld
+CREATE TABLE IF NOT EXISTS _vt.reparent_journal
+(
+ `time_created_ns` bigint(20) unsigned NOT NULL,
+ `action_name` varbinary(250) NOT NULL,
+ `primary_alias` varbinary(32) NOT NULL,
+ `replication_position` varbinary(64000) DEFAULT NULL,
-import (
- "testing"
-
- "vitess.io/vitess/go/test/endtoend/backup/transform"
-)
-
-func TestMain(m *testing.M) {
- transform.TestMainSetup(m, true)
-}
-
-func TestBackupTransform(t *testing.T) {
- transform.TestBackupTransformImpl(t)
-}
-func TestBackupTransformError(t *testing.T) {
- transform.TestBackupTransformErrorImpl(t)
-}
+ PRIMARY KEY (`time_created_ns`)
+) ENGINE = InnoDB
diff --git a/go/vt/sidecardb/schema/misc/views.sql b/go/vt/sidecardb/schema/misc/views.sql
new file mode 100644
index 00000000000..b70d9bb41df
--- /dev/null
+++ b/go/vt/sidecardb/schema/misc/views.sql
@@ -0,0 +1,24 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+CREATE TABLE IF NOT EXISTS _vt.views
+(
+ TABLE_SCHEMA varchar(64) NOT NULL,
+ TABLE_NAME varchar(64) NOT NULL,
+ CREATE_STATEMENT longtext NOT NULL,
+ UPDATED_AT TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
+ PRIMARY KEY (TABLE_SCHEMA, TABLE_NAME)
+) engine = InnoDB
diff --git a/go/vt/sidecardb/schema/onlineddl/schema_migrations.sql b/go/vt/sidecardb/schema/onlineddl/schema_migrations.sql
new file mode 100644
index 00000000000..54aa1f9cbb6
--- /dev/null
+++ b/go/vt/sidecardb/schema/onlineddl/schema_migrations.sql
@@ -0,0 +1,82 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+CREATE TABLE IF NOT EXISTS _vt.schema_migrations
+(
+ `id` bigint unsigned NOT NULL AUTO_INCREMENT,
+ `migration_uuid` varchar(64) NOT NULL,
+ `keyspace` varchar(256) NOT NULL,
+ `shard` varchar(255) NOT NULL,
+ `mysql_schema` varchar(128) NOT NULL,
+ `mysql_table` varchar(128) NOT NULL,
+ `migration_statement` text NOT NULL,
+ `strategy` varchar(128) NOT NULL,
+ `options` varchar(8192) NOT NULL,
+ `added_timestamp` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
+ `requested_timestamp` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
+ `ready_timestamp` timestamp NULL DEFAULT NULL,
+ `started_timestamp` timestamp NULL DEFAULT NULL,
+ `liveness_timestamp` timestamp NULL DEFAULT NULL,
+ `completed_timestamp` timestamp(6) NULL DEFAULT NULL,
+ `cleanup_timestamp` timestamp NULL DEFAULT NULL,
+ `migration_status` varchar(128) NOT NULL,
+ `log_path` varchar(1024) NOT NULL,
+ `artifacts` text NOT NULL,
+ `retries` int unsigned NOT NULL DEFAULT '0',
+ `tablet` varchar(128) NOT NULL DEFAULT '',
+ `tablet_failure` tinyint unsigned NOT NULL DEFAULT '0',
+ `progress` float NOT NULL DEFAULT '0',
+ `migration_context` varchar(1024) NOT NULL DEFAULT '',
+ `ddl_action` varchar(16) NOT NULL DEFAULT '',
+ `message` text NOT NULL,
+ `eta_seconds` bigint NOT NULL DEFAULT '-1',
+ `rows_copied` bigint unsigned NOT NULL DEFAULT '0',
+ `table_rows` bigint NOT NULL DEFAULT '0',
+ `added_unique_keys` int unsigned NOT NULL DEFAULT '0',
+ `removed_unique_keys` int unsigned NOT NULL DEFAULT '0',
+ `log_file` varchar(1024) NOT NULL DEFAULT '',
+ `retain_artifacts_seconds` bigint NOT NULL DEFAULT '0',
+ `postpone_completion` tinyint unsigned NOT NULL DEFAULT '0',
+ `removed_unique_key_names` text NOT NULL,
+ `dropped_no_default_column_names` text NOT NULL,
+ `expanded_column_names` text NOT NULL,
+ `revertible_notes` text NOT NULL,
+ `allow_concurrent` tinyint unsigned NOT NULL DEFAULT '0',
+ `reverted_uuid` varchar(64) NOT NULL DEFAULT '',
+ `is_view` tinyint unsigned NOT NULL DEFAULT '0',
+ `ready_to_complete` tinyint unsigned NOT NULL DEFAULT '0',
+ `stowaway_table` tinytext NOT NULL,
+ `vitess_liveness_indicator` bigint NOT NULL DEFAULT '0',
+ `user_throttle_ratio` float NOT NULL DEFAULT '0',
+ `special_plan` text NOT NULL,
+ `last_throttled_timestamp` timestamp NULL DEFAULT NULL,
+ `component_throttled` tinytext NOT NULL,
+ `cancelled_timestamp` timestamp NULL DEFAULT NULL,
+ `postpone_launch` tinyint unsigned NOT NULL DEFAULT '0',
+ `stage` text NOT NULL,
+ `cutover_attempts` int unsigned NOT NULL DEFAULT '0',
+ `is_immediate_operation` tinyint unsigned NOT NULL DEFAULT '0',
+ `reviewed_timestamp` timestamp NULL DEFAULT NULL,
+ PRIMARY KEY (`id`),
+ UNIQUE KEY `uuid_idx` (`migration_uuid`),
+ KEY `keyspace_shard_idx` (`keyspace`(64), `shard`(64)),
+ KEY `status_idx` (`migration_status`, `liveness_timestamp`),
+ KEY `cleanup_status_idx` (`cleanup_timestamp`, `migration_status`),
+ KEY `tablet_failure_idx` (`tablet_failure`, `migration_status`, `retries`),
+ KEY `table_complete_idx` (`migration_status`, `keyspace`(64), `mysql_table`(64), `completed_timestamp`),
+ KEY `migration_context_idx` (`migration_context`(64)),
+ KEY `reverted_uuid_idx` (`reverted_uuid`)
+) ENGINE = InnoDB
diff --git a/go/vt/sidecardb/schema/schematracker/schemacopy.sql b/go/vt/sidecardb/schema/schematracker/schemacopy.sql
new file mode 100644
index 00000000000..95cd7c34f3f
--- /dev/null
+++ b/go/vt/sidecardb/schema/schematracker/schemacopy.sql
@@ -0,0 +1,28 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+CREATE TABLE IF NOT EXISTS _vt.schemacopy
+(
+ `table_schema` varchar(64) NOT NULL,
+ `table_name` varchar(64) NOT NULL,
+ `column_name` varchar(64) NOT NULL,
+ `ordinal_position` bigint unsigned NOT NULL,
+ `character_set_name` varchar(32) DEFAULT NULL,
+ `collation_name` varchar(32) DEFAULT NULL,
+ `data_type` varchar(64) NOT NULL,
+ `column_key` varchar(3) NOT NULL,
+ PRIMARY KEY (`table_schema`, `table_name`, `ordinal_position`)
+) ENGINE = InnoDB
diff --git a/go/vt/sidecardb/schema/twopc/dt_participant.sql b/go/vt/sidecardb/schema/twopc/dt_participant.sql
new file mode 100644
index 00000000000..66ff4bda987
--- /dev/null
+++ b/go/vt/sidecardb/schema/twopc/dt_participant.sql
@@ -0,0 +1,24 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+CREATE TABLE IF NOT EXISTS _vt.dt_participant
+(
+ dtid varbinary(512) NOT NULL,
+ id bigint NOT NULL,
+ keyspace varchar(256) NOT NULL,
+ shard varchar(256) NOT NULL,
+ primary key(dtid, id)
+) ENGINE = InnoDB
diff --git a/go/vt/servenv/purgelogs.go b/go/vt/sidecardb/schema/twopc/dt_state.sql
similarity index 72%
rename from go/vt/servenv/purgelogs.go
rename to go/vt/sidecardb/schema/twopc/dt_state.sql
index e5edc9e7be2..e877a31a75f 100644
--- a/go/vt/servenv/purgelogs.go
+++ b/go/vt/sidecardb/schema/twopc/dt_state.sql
@@ -1,5 +1,5 @@
/*
-Copyright 2019 The Vitess Authors.
+Copyright 2023 The Vitess Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -14,15 +14,10 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package servenv
-
-import (
- "vitess.io/vitess/go/vt/logutil"
-)
-
-func init() {
- OnInit(func() {
- go logutil.PurgeLogs()
- })
-
-}
+CREATE TABLE IF NOT EXISTS _vt.dt_state
+(
+ dtid varbinary(512) NOT NULL,
+ state bigint NOT NULL,
+ time_created bigint NOT NULL,
+ primary key(dtid)
+) ENGINE = InnoDB
diff --git a/go/vt/sidecardb/schema/twopc/redo_state.sql b/go/vt/sidecardb/schema/twopc/redo_state.sql
new file mode 100644
index 00000000000..a1122b0ac8f
--- /dev/null
+++ b/go/vt/sidecardb/schema/twopc/redo_state.sql
@@ -0,0 +1,22 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+CREATE TABLE IF NOT EXISTS _vt.redo_state(
+ dtid varbinary(512) NOT NULL,
+ state bigint NOT NULL,
+ time_created bigint NOT NULL,
+ primary key(dtid)
+) ENGINE = InnoDB
diff --git a/go/vt/sidecardb/schema/twopc/redo_statement.sql b/go/vt/sidecardb/schema/twopc/redo_statement.sql
new file mode 100644
index 00000000000..148cc0bb3c0
--- /dev/null
+++ b/go/vt/sidecardb/schema/twopc/redo_statement.sql
@@ -0,0 +1,22 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+CREATE TABLE IF NOT EXISTS _vt.redo_statement(
+ dtid varbinary(512) NOT NULL,
+ id bigint NOT NULL,
+ statement mediumblob NOT NULL,
+ primary key(dtid, id)
+) ENGINE = InnoDB
diff --git a/go/vt/sidecardb/schema/vdiff/vdiff.sql b/go/vt/sidecardb/schema/vdiff/vdiff.sql
new file mode 100644
index 00000000000..24f5cf6e7ab
--- /dev/null
+++ b/go/vt/sidecardb/schema/vdiff/vdiff.sql
@@ -0,0 +1,36 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+CREATE TABLE IF NOT EXISTS _vt.vdiff
+(
+ `id` bigint(20) NOT NULL AUTO_INCREMENT,
+ `vdiff_uuid` varchar(64) NOT NULL,
+ `workflow` varbinary(1024) DEFAULT NULL,
+ `keyspace` varbinary(256) DEFAULT NULL,
+ `shard` varchar(255) NOT NULL,
+ `db_name` varbinary(1024) DEFAULT NULL,
+ `state` varbinary(64) DEFAULT NULL,
+ `options` json DEFAULT NULL,
+ `created_at` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
+ `started_at` timestamp NULL DEFAULT NULL,
+ `liveness_timestamp` timestamp NULL DEFAULT NULL,
+ `completed_at` timestamp NULL DEFAULT NULL,
+ `last_error` varbinary(512) DEFAULT NULL,
+ PRIMARY KEY (`id`),
+ UNIQUE KEY `uuid_idx` (`vdiff_uuid`),
+ KEY `state` (`state`),
+ KEY `ks_wf_idx` (`keyspace`(64), `workflow`(64))
+) ENGINE = InnoDB
diff --git a/go/vt/sidecardb/schema/vdiff/vdiff_log.sql b/go/vt/sidecardb/schema/vdiff/vdiff_log.sql
new file mode 100644
index 00000000000..2935baf9b24
--- /dev/null
+++ b/go/vt/sidecardb/schema/vdiff/vdiff_log.sql
@@ -0,0 +1,24 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+CREATE TABLE IF NOT EXISTS _vt.vdiff_log
+(
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `vdiff_id` int(11) NOT NULL,
+ `created_at` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
+ `message` text NOT NULL,
+ PRIMARY KEY (`id`)
+) ENGINE = InnoDB
diff --git a/go/vt/sidecardb/schema/vdiff/vdiff_table.sql b/go/vt/sidecardb/schema/vdiff/vdiff_table.sql
new file mode 100644
index 00000000000..81f0ba17599
--- /dev/null
+++ b/go/vt/sidecardb/schema/vdiff/vdiff_table.sql
@@ -0,0 +1,30 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+CREATE TABLE IF NOT EXISTS _vt.vdiff_table
+(
+ `vdiff_id` varchar(64) NOT NULL,
+ `table_name` varbinary(128) NOT NULL,
+ `state` varbinary(64) DEFAULT NULL,
+ `lastpk` varbinary(2000) DEFAULT NULL,
+ `table_rows` bigint(20) NOT NULL DEFAULT '0',
+ `rows_compared` bigint(20) NOT NULL DEFAULT '0',
+ `mismatch` tinyint(1) NOT NULL DEFAULT '0',
+ `report` json DEFAULT NULL,
+ `created_at` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
+ `updated_at` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
+ PRIMARY KEY (`vdiff_id`, `table_name`)
+) ENGINE = InnoDB
diff --git a/go/vt/sidecardb/schema/vreplication/copy_state.sql b/go/vt/sidecardb/schema/vreplication/copy_state.sql
new file mode 100644
index 00000000000..f7005135aba
--- /dev/null
+++ b/go/vt/sidecardb/schema/vreplication/copy_state.sql
@@ -0,0 +1,25 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+CREATE TABLE IF NOT EXISTS _vt.copy_state
+(
+ `id` bigint unsigned NOT NULL AUTO_INCREMENT,
+ `vrepl_id` int NOT NULL,
+ `table_name` varbinary(128) NOT NULL,
+ `lastpk` varbinary(2000) DEFAULT NULL,
+ PRIMARY KEY (`id`),
+ KEY `vrepl_id` (`vrepl_id`,`table_name`)
+) ENGINE = InnoDB
diff --git a/go/vt/sidecardb/schema/vreplication/post_copy_action.sql b/go/vt/sidecardb/schema/vreplication/post_copy_action.sql
new file mode 100644
index 00000000000..8ca979fc15d
--- /dev/null
+++ b/go/vt/sidecardb/schema/vreplication/post_copy_action.sql
@@ -0,0 +1,24 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+CREATE TABLE IF NOT EXISTS _vt.post_copy_action(
+ id BIGINT NOT NULL auto_increment,
+ vrepl_id INT NOT NULL,
+ table_name VARBINARY(128) NOT NULL,
+ action JSON NOT NULL,
+ UNIQUE KEY (vrepl_id, table_name),
+ PRIMARY KEY(id)
+) ENGINE = InnoDB
diff --git a/go/vt/sidecardb/schema/vreplication/resharding_journal.sql b/go/vt/sidecardb/schema/vreplication/resharding_journal.sql
new file mode 100644
index 00000000000..b5b960c92aa
--- /dev/null
+++ b/go/vt/sidecardb/schema/vreplication/resharding_journal.sql
@@ -0,0 +1,23 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+CREATE TABLE IF NOT EXISTS _vt.resharding_journal
+(
+ `id` bigint NOT NULL,
+ `db_name` varbinary(255) DEFAULT NULL,
+ `val` blob,
+ PRIMARY KEY (`id`)
+) ENGINE = InnoDB
diff --git a/go/vt/sidecardb/schema/vreplication/schema_version.sql b/go/vt/sidecardb/schema/vreplication/schema_version.sql
new file mode 100644
index 00000000000..86f782ddac1
--- /dev/null
+++ b/go/vt/sidecardb/schema/vreplication/schema_version.sql
@@ -0,0 +1,25 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+CREATE TABLE IF NOT EXISTS _vt.schema_version
+(
+ id INT NOT NULL AUTO_INCREMENT,
+ pos VARBINARY(10000) NOT NULL,
+ time_updated BIGINT(20) NOT NULL,
+ ddl BLOB DEFAULT NULL,
+ schemax LONGBLOB NOT NULL,
+ PRIMARY KEY (id)
+) ENGINE = InnoDB
diff --git a/go/vt/sidecardb/schema/vreplication/vreplication.sql b/go/vt/sidecardb/schema/vreplication/vreplication.sql
new file mode 100644
index 00000000000..3b30d1250c9
--- /dev/null
+++ b/go/vt/sidecardb/schema/vreplication/vreplication.sql
@@ -0,0 +1,43 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+CREATE TABLE IF NOT EXISTS _vt.vreplication
+(
+ `id` int NOT NULL AUTO_INCREMENT,
+ `workflow` varbinary(1000) DEFAULT NULL,
+ `source` mediumblob NOT NULL,
+ `pos` varbinary(10000) NOT NULL,
+ `stop_pos` varbinary(10000) DEFAULT NULL,
+ `max_tps` bigint NOT NULL,
+ `max_replication_lag` bigint NOT NULL,
+ `cell` varbinary(1000) DEFAULT NULL,
+ `tablet_types` varbinary(100) DEFAULT NULL,
+ `time_updated` bigint NOT NULL,
+ `transaction_timestamp` bigint NOT NULL,
+ `state` varbinary(100) NOT NULL,
+ `message` varbinary(1000) DEFAULT NULL,
+ `db_name` varbinary(255) NOT NULL,
+ `rows_copied` bigint NOT NULL DEFAULT '0',
+ `tags` varbinary(1024) NOT NULL DEFAULT '',
+ `time_heartbeat` bigint NOT NULL DEFAULT '0',
+ `workflow_type` int NOT NULL DEFAULT '0',
+ `time_throttled` bigint NOT NULL DEFAULT '0',
+ `component_throttled` varchar(255) NOT NULL DEFAULT '',
+ `workflow_sub_type` int NOT NULL DEFAULT '0',
+ `defer_secondary_keys` tinyint(1) NOT NULL DEFAULT '0',
+ PRIMARY KEY (`id`),
+ KEY `workflow_idx` (`workflow`(64))
+) ENGINE = InnoDB
diff --git a/go/vt/sidecardb/schema/vreplication/vreplication_log.sql b/go/vt/sidecardb/schema/vreplication/vreplication_log.sql
new file mode 100644
index 00000000000..6700ede3c47
--- /dev/null
+++ b/go/vt/sidecardb/schema/vreplication/vreplication_log.sql
@@ -0,0 +1,28 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+CREATE TABLE IF NOT EXISTS _vt.vreplication_log
+(
+ `id` bigint NOT NULL AUTO_INCREMENT,
+ `vrepl_id` int NOT NULL,
+ `type` varbinary(256) NOT NULL,
+ `state` varbinary(100) NOT NULL,
+ `created_at` timestamp NULL DEFAULT CURRENT_TIMESTAMP,
+ `updated_at` timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
+ `message` text NOT NULL,
+ `count` bigint NOT NULL DEFAULT '1',
+ PRIMARY KEY (`id`)
+) ENGINE = InnoDB
diff --git a/go/vt/sidecardb/sidecardb.go b/go/vt/sidecardb/sidecardb.go
new file mode 100644
index 00000000000..3d955995a6a
--- /dev/null
+++ b/go/vt/sidecardb/sidecardb.go
@@ -0,0 +1,513 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package sidecardb
+
+import (
+ "context"
+ "embed"
+ "fmt"
+ "io/fs"
+ "path/filepath"
+ "regexp"
+ "runtime"
+ "strings"
+ "sync"
+
+ "vitess.io/vitess/go/history"
+ "vitess.io/vitess/go/mysql"
+
+ "vitess.io/vitess/go/mysql/fakesqldb"
+
+ vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
+ "vitess.io/vitess/go/vt/sqlparser"
+ "vitess.io/vitess/go/vt/vterrors"
+
+ "vitess.io/vitess/go/stats"
+
+ "vitess.io/vitess/go/sqltypes"
+ "vitess.io/vitess/go/vt/log"
+ "vitess.io/vitess/go/vt/schemadiff"
+)
+
+const (
+ SidecarDBName = "_vt"
+ CreateSidecarDatabaseQuery = "create database if not exists _vt"
+ UseSidecarDatabaseQuery = "use _vt"
+ ShowSidecarDatabasesQuery = "SHOW DATABASES LIKE '\\_vt'"
+ SelectCurrentDatabaseQuery = "select database()"
+ ShowCreateTableQuery = "show create table _vt.%s"
+
+ CreateTableRegexp = "CREATE TABLE .* `\\_vt`\\..*"
+ AlterTableRegexp = "ALTER TABLE `\\_vt`\\..*"
+)
+
+// All tables needed in the sidecar database have their schema in the schema subdirectory.
+//
+//go:embed schema/*
+var schemaLocation embed.FS
+
+type sidecarTable struct {
+ module string // which module uses this table
+ path string // path of the schema relative to this module
+ name string // table name
+ schema string // create table dml
+}
+
+func (t *sidecarTable) String() string {
+ return fmt.Sprintf("%s.%s (%s)", SidecarDBName, t.name, t.module)
+}
+
+var sidecarTables []*sidecarTable
+var ddlCount *stats.Counter
+var ddlErrorCount *stats.Counter
+var ddlErrorHistory *history.History
+var mu sync.Mutex
+
+type ddlError struct {
+ tableName string
+ err error
+}
+
+const maxDDLErrorHistoryLength = 100
+
+// failOnSchemaInitError decides whether we fail the schema init process when we encounter an error while
+// applying a table schema upgrade DDL or continue with the next table.
+// If true, tablets will not launch. The cluster will not come up until the issue is resolved.
+// If false, the init process will continue trying to upgrade other tables. So some functionality might be broken
+// due to an incorrect schema, but the cluster should come up and serve queries.
+// This is an operational trade-off: if we always fail it could cause a major incident since the entire cluster will be down.
+// If we are more permissive, it could cause hard-to-detect errors, because a module
+// doesn't load or behaves incorrectly due to an incomplete upgrade. Errors however will be reported and if the
+// related stats endpoints are monitored we should be able to diagnose/get alerted in a timely fashion.
+const failOnSchemaInitError = false
+
+const StatsKeyPrefix = "SidecarDBDDL"
+const StatsKeyQueryCount = StatsKeyPrefix + "QueryCount"
+const StatsKeyErrorCount = StatsKeyPrefix + "ErrorCount"
+const StatsKeyErrors = StatsKeyPrefix + "Errors"
+
+func init() {
+ initSchemaFiles()
+ ddlCount = stats.NewCounter(StatsKeyQueryCount, "Number of queries executed")
+ ddlErrorCount = stats.NewCounter(StatsKeyErrorCount, "Number of errors during sidecar schema upgrade")
+ ddlErrorHistory = history.New(maxDDLErrorHistoryLength)
+ stats.Publish(StatsKeyErrors, stats.StringMapFunc(func() map[string]string {
+ mu.Lock()
+ defer mu.Unlock()
+ result := make(map[string]string, len(ddlErrorHistory.Records()))
+ for _, e := range ddlErrorHistory.Records() {
+ d, ok := e.(*ddlError)
+ if ok {
+ result[d.tableName] = d.err.Error()
+ }
+ }
+ return result
+ }))
+}
+
+func validateSchemaDefinition(name, schema string) (string, error) {
+ stmt, err := sqlparser.ParseStrictDDL(schema)
+
+ if err != nil {
+ return "", err
+ }
+ createTable, ok := stmt.(*sqlparser.CreateTable)
+ if !ok {
+ return "", vterrors.Errorf(vtrpcpb.Code_INTERNAL, "expected CREATE TABLE. Got %v", sqlparser.CanonicalString(stmt))
+ }
+ tableName := createTable.Table.Name.String()
+ qualifier := createTable.Table.Qualifier.String()
+ if qualifier != SidecarDBName {
+ return "", vterrors.Errorf(vtrpcpb.Code_INTERNAL, "database qualifier specified for the %s table is %s rather than the expected value of %s",
+ name, qualifier, SidecarDBName)
+ }
+ if !strings.EqualFold(tableName, name) {
+ return "", vterrors.Errorf(vtrpcpb.Code_INTERNAL, "table name of %s does not match the table name specified within the file: %s", name, tableName)
+ }
+ if !createTable.IfNotExists {
+ return "", vterrors.Errorf(vtrpcpb.Code_NOT_FOUND, "%s file did not include the required IF NOT EXISTS clause in the CREATE TABLE statement for the %s table", name, tableName)
+ }
+ normalizedSchema := sqlparser.CanonicalString(createTable)
+ return normalizedSchema, nil
+}
+
+func initSchemaFiles() {
+ sqlFileExtension := ".sql"
+ err := fs.WalkDir(schemaLocation, ".", func(path string, entry fs.DirEntry, err error) error {
+ if err != nil {
+ return err
+ }
+ if !entry.IsDir() {
+ var module string
+ dir, fname := filepath.Split(path)
+ if !strings.HasSuffix(strings.ToLower(fname), sqlFileExtension) {
+ log.Infof("Ignoring non-SQL file: %s, found during sidecar database initialization", path)
+ return nil
+ }
+ dirparts := strings.Split(strings.Trim(dir, "/"), "/")
+ switch len(dirparts) {
+ case 1:
+ module = dir
+ case 2:
+ module = fmt.Sprintf("%s/%s", dirparts[0], dirparts[1])
+ default:
+ return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unexpected path value of %s specified for sidecar schema table; expected structure is [/]/.sql", dir)
+ }
+
+ name := strings.Split(fname, ".")[0]
+ schema, err := schemaLocation.ReadFile(path)
+ if err != nil {
+ panic(err)
+ }
+ var normalizedSchema string
+ if normalizedSchema, err = validateSchemaDefinition(name, string(schema)); err != nil {
+ return err
+ }
+ sidecarTables = append(sidecarTables, &sidecarTable{name: name, module: module, path: path, schema: normalizedSchema})
+ }
+ return nil
+ })
+ if err != nil {
+ log.Errorf("error loading schema files: %+v", err)
+ }
+}
+
+// printCallerDetails is a helper for dev debugging.
+func printCallerDetails() {
+ pc, _, line, ok := runtime.Caller(2)
+ details := runtime.FuncForPC(pc)
+ if ok && details != nil {
+ log.Infof("%s schema init called from %s:%d\n", SidecarDBName, details.Name(), line)
+ }
+}
+
+type schemaInit struct {
+ ctx context.Context
+ exec Exec
+ existingTables map[string]bool
+ dbCreated bool // The first upgrade/create query will also create the sidecar database if required.
+}
+
+// Exec is a callback that has to be passed to Init() to execute the specified query in the database.
+type Exec func(ctx context.Context, query string, maxRows int, useDB bool) (*sqltypes.Result, error)
+
+// GetDDLCount returns the count of sidecardb DDLs that have been run as part of this vttablet's init process.
+func GetDDLCount() int64 {
+ return ddlCount.Get()
+}
+
+// GetDDLErrorCount returns the count of sidecardb DDLs that have been errored out as part of this vttablet's init process.
+func GetDDLErrorCount() int64 {
+ return ddlErrorCount.Get()
+}
+
+// GetDDLErrorHistory returns the errors encountered as part of this vttablet's init process..
+func GetDDLErrorHistory() []*ddlError {
+ var errors []*ddlError
+ for _, e := range ddlErrorHistory.Records() {
+ ddle, ok := e.(*ddlError)
+ if ok {
+ errors = append(errors, ddle)
+ }
+ }
+ return errors
+}
+
+// Init creates or upgrades the sidecar database based on declarative schema for all tables in the schema.
+func Init(ctx context.Context, exec Exec) error {
+ printCallerDetails() // for debug purposes only, remove in v17
+ log.Infof("Starting sidecardb.Init()")
+ si := &schemaInit{
+ ctx: ctx,
+ exec: exec,
+ }
+
+ // There are paths in the tablet initialization where we are in read-only mode but the schema is already updated.
+ // Hence, we should not always try to create the database, since it will then error out as the db is read-only.
+ dbExists, err := si.doesSidecarDBExist()
+ if err != nil {
+ return err
+ }
+ if !dbExists {
+ if err := si.createSidecarDB(); err != nil {
+ return err
+ }
+ si.dbCreated = true
+ }
+
+ if _, err := si.setCurrentDatabase(SidecarDBName); err != nil {
+ return err
+ }
+
+ resetSQLMode, err := si.setPermissiveSQLMode()
+ if err != nil {
+ return err
+ }
+ defer resetSQLMode()
+
+ for _, table := range sidecarTables {
+ if err := si.ensureSchema(table); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// setPermissiveSQLMode gets the current sql_mode for the session, removes any
+// restrictions, and returns a function to restore it back to the original session value.
+// We need to allow for the recreation of any data that currently exists in the table, such
+// as e.g. allowing any zero dates that may already exist in a preexisting sidecar table.
+func (si *schemaInit) setPermissiveSQLMode() (func(), error) {
+ rs, err := si.exec(si.ctx, `select @@session.sql_mode as sql_mode`, 1, false)
+ if err != nil {
+ return nil, vterrors.Errorf(vtrpcpb.Code_UNKNOWN, "could not read sql_mode: %v", err)
+ }
+ sqlMode, err := rs.Named().Row().ToString("sql_mode")
+ if err != nil {
+ return nil, vterrors.Errorf(vtrpcpb.Code_UNKNOWN, "could not read sql_mode: %v", err)
+ }
+
+ resetSQLModeFunc := func() {
+ restoreSQLModeQuery := fmt.Sprintf("set @@session.sql_mode='%s'", sqlMode)
+ _, _ = si.exec(si.ctx, restoreSQLModeQuery, 0, false)
+ }
+
+ if _, err := si.exec(si.ctx, "set @@session.sql_mode=''", 0, false); err != nil {
+ return nil, vterrors.Errorf(vtrpcpb.Code_UNKNOWN, "could not change sql_mode: %v", err)
+ }
+ return resetSQLModeFunc, nil
+}
+
+func (si *schemaInit) doesSidecarDBExist() (bool, error) {
+ rs, err := si.exec(si.ctx, ShowSidecarDatabasesQuery, 2, false)
+ if err != nil {
+ log.Error(err)
+ return false, err
+ }
+
+ switch len(rs.Rows) {
+ case 0:
+ log.Infof("doesSidecarDBExist: not found")
+ return false, nil
+ case 1:
+ log.Infof("doesSidecarDBExist: found")
+ return true, nil
+ default:
+ log.Errorf("found too many rows for sidecarDB %s: %d", SidecarDBName, len(rs.Rows))
+ return false, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "found too many rows for sidecarDB %s: %d", SidecarDBName, len(rs.Rows))
+ }
+}
+
+func (si *schemaInit) createSidecarDB() error {
+ _, err := si.exec(si.ctx, CreateSidecarDatabaseQuery, 1, false)
+ if err != nil {
+ log.Error(err)
+ return err
+ }
+ log.Infof("createSidecarDB: %s", CreateSidecarDatabaseQuery)
+ return nil
+}
+
+// Sets db of current connection, returning the currently selected database.
+func (si *schemaInit) setCurrentDatabase(dbName string) (string, error) {
+ rs, err := si.exec(si.ctx, SelectCurrentDatabaseQuery, 1, false)
+ if err != nil {
+ return "", err
+ }
+ if rs == nil || rs.Rows == nil { // we get this in tests
+ return "", nil
+ }
+ currentDB := rs.Rows[0][0].ToString()
+ if currentDB != "" { // while running tests we can get currentDB as empty
+ _, err = si.exec(si.ctx, fmt.Sprintf("use %s", dbName), 1, false)
+ if err != nil {
+ return "", err
+ }
+ }
+ return currentDB, nil
+}
+
+// Gets existing schema of a table in the sidecar database.
+func (si *schemaInit) getCurrentSchema(tableName string) (string, error) {
+ var currentTableSchema string
+
+ rs, err := si.exec(si.ctx, fmt.Sprintf(ShowCreateTableQuery, tableName), 1, false)
+ if err != nil {
+ if sqlErr, ok := err.(*mysql.SQLError); ok && sqlErr.Number() == mysql.ERNoSuchTable {
+ // table does not exist in the sidecar database
+ return "", nil
+ }
+ log.Errorf("Error getting table schema for %s: %+v", tableName, err)
+ return "", err
+ }
+ if len(rs.Rows) > 0 {
+ currentTableSchema = rs.Rows[0][1].ToString()
+ }
+ return currentTableSchema, nil
+}
+
+// findTableSchemaDiff gets the diff that needs to be applied to current table schema to get the desired one. Will be an empty string if they match.
+// This could be a CREATE statement if the table does not exist or an ALTER if table exists but has a different schema.
+func (si *schemaInit) findTableSchemaDiff(tableName, current, desired string) (string, error) {
+ hints := &schemadiff.DiffHints{
+ TableCharsetCollateStrategy: schemadiff.TableCharsetCollateIgnoreAlways,
+ AlterTableAlgorithmStrategy: schemadiff.AlterTableAlgorithmStrategyCopy,
+ }
+ diff, err := schemadiff.DiffCreateTablesQueries(current, desired, hints)
+ if err != nil {
+ return "", err
+ }
+
+ var ddl string
+ if diff != nil {
+ ddl = diff.CanonicalStatementString()
+
+ // Temporary logging to debug any eventual issues around the new schema init, should be removed in v17.
+ log.Infof("Current schema for table %s:\n%s", tableName, current)
+ if ddl == "" {
+ log.Infof("No changes needed for table %s", tableName)
+ } else {
+ log.Infof("Applying DDL for table %s:\n%s", tableName, ddl)
+ }
+ }
+
+ return ddl, nil
+}
+
+// ensureSchema first checks if the table exist, in which case it runs the create script provided in
+// the schema directory. If the table exists, schemadiff is used to compare the existing schema with the desired one.
+// If it needs to be altered then we run the alter script.
+func (si *schemaInit) ensureSchema(table *sidecarTable) error {
+ ctx := si.ctx
+ desiredTableSchema := table.schema
+
+ var ddl string
+ currentTableSchema, err := si.getCurrentSchema(table.name)
+ if err != nil {
+ return err
+ }
+ ddl, err = si.findTableSchemaDiff(table.name, currentTableSchema, desiredTableSchema)
+ if err != nil {
+ return err
+ }
+
+ if ddl != "" {
+ if !si.dbCreated {
+ // We use CreateSidecarDatabaseQuery to also create the first binlog entry when a primary comes up.
+ // That statement doesn't make it to the replicas, so we run the query again so that it is replicated
+ // to the replicas so that the replicas can create the sidecar database.
+ if err := si.createSidecarDB(); err != nil {
+ return err
+ }
+ si.dbCreated = true
+ }
+ _, err := si.exec(ctx, ddl, 1, true)
+ if err != nil {
+ ddlErr := vterrors.Wrapf(err,
+ "Error running DDL %s for table %s during sidecar database initialization", ddl, table)
+ recordDDLError(table.name, ddlErr)
+ if failOnSchemaInitError {
+ return ddlErr
+ }
+ return nil
+ }
+ log.Infof("Applied DDL %s for table %s during sidecar database initialization", ddl, table)
+ ddlCount.Add(1)
+ return nil
+ }
+ log.Infof("Table schema was already up to date for the %s table in the %s sidecar database", table.name, SidecarDBName)
+ return nil
+}
+
+func recordDDLError(tableName string, err error) {
+ log.Error(err)
+ ddlErrorCount.Add(1)
+ ddlErrorHistory.Add(&ddlError{
+ tableName: tableName,
+ err: err,
+ })
+}
+
+// region unit-test-only
+// This section uses helpers used in tests, but also in the go/vt/vtexplain/vtexplain_vttablet.go.
+// Hence, it is here and not in the _test.go file.
+
+// Query patterns to handle in mocks.
+var sidecarDBInitQueries = []string{
+ ShowSidecarDatabasesQuery,
+ SelectCurrentDatabaseQuery,
+ CreateSidecarDatabaseQuery,
+ UseSidecarDatabaseQuery,
+}
+
+var sidecarDBInitQueryPatterns = []string{
+ CreateTableRegexp,
+ AlterTableRegexp,
+}
+
+// AddSchemaInitQueries adds sidecar database schema related queries to a mock db.
+func AddSchemaInitQueries(db *fakesqldb.DB, populateTables bool) {
+ result := &sqltypes.Result{}
+ for _, q := range sidecarDBInitQueryPatterns {
+ db.AddQueryPattern(q, result)
+ }
+ for _, q := range sidecarDBInitQueries {
+ db.AddQuery(q, result)
+ }
+ for _, table := range sidecarTables {
+ result = &sqltypes.Result{}
+ if populateTables {
+ result = sqltypes.MakeTestResult(sqltypes.MakeTestFields(
+ "Table|Create Table",
+ "varchar|varchar"),
+ fmt.Sprintf("%s|%s", table.name, table.schema),
+ )
+ }
+ db.AddQuery(fmt.Sprintf(ShowCreateTableQuery, table.name), result)
+ }
+
+ sqlModeResult := sqltypes.MakeTestResult(sqltypes.MakeTestFields(
+ "sql_mode",
+ "varchar"),
+ "ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION",
+ )
+ db.AddQuery("select @@session.sql_mode as sql_mode", sqlModeResult)
+
+ db.AddQuery("set @@session.sql_mode=''", &sqltypes.Result{})
+}
+
+// MatchesInitQuery returns true if query has one of the test patterns as a substring, or it matches a provided regexp.
+func MatchesInitQuery(query string) bool {
+ query = strings.ToLower(query)
+ for _, q := range sidecarDBInitQueries {
+ if strings.EqualFold(q, query) {
+ return true
+ }
+ }
+ for _, q := range sidecarDBInitQueryPatterns {
+ q = strings.ToLower(q)
+ if strings.Contains(query, q) {
+ return true
+ }
+ if match, _ := regexp.MatchString(q, query); match {
+ return true
+ }
+ }
+ return false
+}
+
+// endregion
diff --git a/go/vt/sidecardb/sidecardb_test.go b/go/vt/sidecardb/sidecardb_test.go
new file mode 100644
index 00000000000..1ca8f2f63a4
--- /dev/null
+++ b/go/vt/sidecardb/sidecardb_test.go
@@ -0,0 +1,245 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package sidecardb
+
+import (
+ "context"
+ "expvar"
+ "fmt"
+ "sort"
+ "strings"
+ "testing"
+
+ "vitess.io/vitess/go/vt/sqlparser"
+
+ "github.com/stretchr/testify/require"
+
+ "vitess.io/vitess/go/stats"
+
+ "vitess.io/vitess/go/mysql/fakesqldb"
+ "vitess.io/vitess/go/sqltypes"
+)
+
+// TestInitErrors validates that the schema init error stats are being correctly set
+func TestInitErrors(t *testing.T) {
+ ctx := context.Background()
+
+ db := fakesqldb.New(t)
+ defer db.Close()
+ AddSchemaInitQueries(db, false)
+ db.AddQuery("use dbname", &sqltypes.Result{})
+ sqlMode := sqltypes.MakeTestResult(sqltypes.MakeTestFields(
+ "sql_mode",
+ "varchar"),
+ "ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION",
+ )
+ db.AddQuery("select @@session.sql_mode as sql_mode", sqlMode)
+ db.AddQueryPattern("set @@session.sql_mode=.*", &sqltypes.Result{})
+
+ ddlErrorCount.Set(0)
+ ddlCount.Set(0)
+
+ cp := db.ConnParams()
+ conn, err := cp.Connect(ctx)
+ require.NoError(t, err)
+
+ type schemaError struct {
+ tableName string
+ errorValue string
+ }
+
+ // simulate two errors during table creation to validate error stats
+ schemaErrors := []schemaError{
+ {"vreplication_log", "vreplication_log error"},
+ {"copy_state", "copy_state error"},
+ }
+
+ exec := func(ctx context.Context, query string, maxRows int, useDB bool) (*sqltypes.Result, error) {
+ if useDB {
+ if _, err := conn.ExecuteFetch(UseSidecarDatabaseQuery, maxRows, true); err != nil {
+ return nil, err
+ }
+ }
+
+ // simulate errors for the table creation DDLs applied for tables specified in schemaErrors
+ stmt, err := sqlparser.Parse(query)
+ if err != nil {
+ return nil, err
+ }
+ createTable, ok := stmt.(*sqlparser.CreateTable)
+ if ok {
+ for _, e := range schemaErrors {
+ if strings.EqualFold(e.tableName, createTable.Table.Name.String()) {
+ return nil, fmt.Errorf(e.errorValue)
+ }
+ }
+ }
+ return conn.ExecuteFetch(query, maxRows, true)
+ }
+
+ require.Equal(t, int64(0), GetDDLCount())
+ err = Init(ctx, exec)
+ require.NoError(t, err)
+ require.Equal(t, int64(len(sidecarTables)-len(schemaErrors)), GetDDLCount())
+ require.Equal(t, int64(len(schemaErrors)), GetDDLErrorCount())
+
+ var want []string
+ for _, e := range schemaErrors {
+ want = append(want, e.errorValue)
+ }
+ // sort expected and reported errors for easy comparison
+ sort.Strings(want)
+ got := GetDDLErrorHistory()
+ sort.Slice(got, func(i, j int) bool {
+ return got[i].tableName < got[j].tableName
+ })
+ var gotErrors string
+ stats.Register(func(name string, v expvar.Var) {
+ if name == StatsKeyErrors {
+ gotErrors = v.String()
+ }
+ })
+
+ // for DDL errors, validate both the internal data structure and the stats endpoint
+ for i := range want {
+ if !strings.Contains(got[i].err.Error(), want[i]) {
+ require.FailNowf(t, "incorrect schema error", "got %s, want %s", got[i], want[i])
+ }
+ if !strings.Contains(gotErrors, want[i]) {
+ require.FailNowf(t, "schema error not published", "got %s, want %s", gotErrors, want[i])
+ }
+ }
+}
+
+// test the logic that confirms that the user defined schema's table name and qualifier are valid
+func TestValidateSchema(t *testing.T) {
+ type testCase struct {
+ testName string
+ name string
+ schema string
+ mustError bool
+ }
+ testCases := []testCase{
+ {"valid", "t1", "create table if not exists _vt.t1(i int)", false},
+ {"no if not exists", "t1", "create table _vt.t1(i int)", true},
+ {"invalid table name", "t2", "create table if not exists _vt.t1(i int)", true},
+ {"invalid table name", "t1", "create table if not exists _vt.t2(i int)", true},
+ {"invalid qualifier", "t1", "create table if not exists vt_product.t1(i int)", true},
+ {"invalid qualifier", "t1", "create table if not exists t1(i int)", true},
+ }
+ for _, tc := range testCases {
+ t.Run(tc.testName, func(t *testing.T) {
+ _, err := validateSchemaDefinition(tc.name, tc.schema)
+ if tc.mustError {
+ require.Error(t, err)
+ } else {
+ require.NoError(t, err)
+ }
+ })
+ }
+}
+
+// TestAlterTableAlgorithm confirms that we use ALGORITHM=COPY during alter tables
+func TestAlterTableAlgorithm(t *testing.T) {
+ type testCase struct {
+ testName string
+ tableName string
+ currentSchema string
+ desiredSchema string
+ }
+ testCases := []testCase{
+ {"add column", "t1", "create table if not exists _vt.t1(i int)", "create table if not exists _vt.t1(i int, i1 int)"},
+ {"modify column", "t1", "create table if not exists _vt.t1(i int)", "create table if not exists _vt.t(i float)"},
+ }
+ si := &schemaInit{}
+ copyAlgo := sqlparser.AlgorithmValue("COPY")
+ for _, tc := range testCases {
+ t.Run(tc.testName, func(t *testing.T) {
+ diff, err := si.findTableSchemaDiff(tc.tableName, tc.currentSchema, tc.desiredSchema)
+ require.NoError(t, err)
+ stmt, err := sqlparser.Parse(diff)
+ require.NoError(t, err)
+ alterTable, ok := stmt.(*sqlparser.AlterTable)
+ require.True(t, ok)
+ require.NotNil(t, alterTable)
+ var alterAlgo sqlparser.AlterOption
+ for i, opt := range alterTable.AlterOptions {
+ if _, ok := opt.(sqlparser.AlgorithmValue); ok {
+ alterAlgo = alterTable.AlterOptions[i]
+ }
+ }
+ require.Equal(t, copyAlgo, alterAlgo)
+ })
+ }
+}
+
+// Tests various non-error code paths in sidecardb
+func TestMiscSidecarDB(t *testing.T) {
+ ctx := context.Background()
+
+ db := fakesqldb.New(t)
+ defer db.Close()
+ AddSchemaInitQueries(db, false)
+ db.AddQuery("use dbname", &sqltypes.Result{})
+ db.AddQueryPattern("set @@session.sql_mode=.*", &sqltypes.Result{})
+
+ cp := db.ConnParams()
+ conn, err := cp.Connect(ctx)
+ require.NoError(t, err)
+ exec := func(ctx context.Context, query string, maxRows int, useDB bool) (*sqltypes.Result, error) {
+ if useDB {
+ if _, err := conn.ExecuteFetch(UseSidecarDatabaseQuery, maxRows, true); err != nil {
+ return nil, err
+ }
+ }
+ return conn.ExecuteFetch(query, maxRows, true)
+ }
+
+ // tests init on empty db
+ ddlErrorCount.Set(0)
+ ddlCount.Set(0)
+ require.Equal(t, int64(0), GetDDLCount())
+ err = Init(ctx, exec)
+ require.NoError(t, err)
+ require.Equal(t, int64(len(sidecarTables)), GetDDLCount())
+
+ // tests init on already inited db
+ AddSchemaInitQueries(db, true)
+ err = Init(ctx, exec)
+ require.NoError(t, err)
+ require.Equal(t, int64(len(sidecarTables)), GetDDLCount())
+
+ // tests misc paths not covered above
+ si := &schemaInit{
+ ctx: ctx,
+ exec: exec,
+ }
+ result := sqltypes.MakeTestResult(sqltypes.MakeTestFields(
+ "Database",
+ "varchar"),
+ "currentDB",
+ )
+ db.AddQuery(SelectCurrentDatabaseQuery, result)
+
+ currentDB, err := si.setCurrentDatabase("dbname")
+ require.NoError(t, err)
+ require.Equal(t, "currentDB", currentDB)
+
+ require.False(t, MatchesInitQuery("abc"))
+ require.True(t, MatchesInitQuery(SelectCurrentDatabaseQuery))
+ require.True(t, MatchesInitQuery("CREATE TABLE IF NOT EXISTS `_vt`.vreplication"))
+}
diff --git a/go/vt/sqlparser/analyzer.go b/go/vt/sqlparser/analyzer.go
index 798aa5914ac..a9900f39044 100644
--- a/go/vt/sqlparser/analyzer.go
+++ b/go/vt/sqlparser/analyzer.go
@@ -72,7 +72,7 @@ func ASTToStatementType(stmt Statement) StatementType {
return StmtUpdate
case *Delete:
return StmtDelete
- case *Set, *SetTransaction:
+ case *Set:
return StmtSet
case *Show:
return StmtShow
@@ -86,7 +86,7 @@ func ASTToStatementType(stmt Statement) StatementType {
return StmtUse
case *OtherRead, *OtherAdmin, *Load:
return StmtOther
- case Explain:
+ case Explain, *VExplainStmt:
return StmtExplain
case *Begin:
return StmtBegin
@@ -323,52 +323,6 @@ func IsDMLStatement(stmt Statement) bool {
return false
}
-// SplitAndExpression breaks up the Expr into AND-separated conditions
-// and appends them to filters. Outer parenthesis are removed. Precedence
-// should be taken into account if expressions are recombined.
-func SplitAndExpression(filters []Expr, node Expr) []Expr {
- if node == nil {
- return filters
- }
- switch node := node.(type) {
- case *AndExpr:
- filters = SplitAndExpression(filters, node.Left)
- return SplitAndExpression(filters, node.Right)
- }
- return append(filters, node)
-}
-
-// AndExpressions ands together two or more expressions, minimising the expr when possible
-func AndExpressions(exprs ...Expr) Expr {
- switch len(exprs) {
- case 0:
- return nil
- case 1:
- return exprs[0]
- default:
- result := (Expr)(nil)
- outer:
- // we'll loop and remove any duplicates
- for i, expr := range exprs {
- if expr == nil {
- continue
- }
- if result == nil {
- result = expr
- continue outer
- }
-
- for j := 0; j < i; j++ {
- if EqualsExpr(expr, exprs[j]) {
- continue outer
- }
- }
- result = &AndExpr{Left: result, Right: expr}
- }
- return result
- }
-}
-
// TableFromStatement returns the qualified table name for the query.
// This works only for select statements.
func TableFromStatement(sql string) (TableName, error) {
diff --git a/go/vt/sqlparser/ast.go b/go/vt/sqlparser/ast.go
index 686aa4ea089..331169bac44 100644
--- a/go/vt/sqlparser/ast.go
+++ b/go/vt/sqlparser/ast.go
@@ -369,28 +369,6 @@ type (
Exprs SetExprs
}
- // SetTransaction represents a SET TRANSACTION statement.
- SetTransaction struct {
- Comments *ParsedComments
- Scope Scope
- Characteristics []Characteristic
- }
-
- // Scope is an enum for scope of query
- Scope int8
-
- // Characteristic is a transaction related change
- Characteristic interface {
- SQLNode
- iChar()
- }
-
- // IsolationLevel is an enum for isolation levels
- IsolationLevel int8
-
- // AccessMode is enum for the mode - ReadOnly or ReadWrite
- AccessMode int8
-
// DropDatabase represents a DROP database statement.
DropDatabase struct {
Comments *ParsedComments
@@ -476,6 +454,11 @@ type (
Comments Comments
}
+ // ShowThrottlerStatus represents a SHOW VITESS_THROTTLED_APPS statement
+ ShowThrottlerStatus struct {
+ Comments Comments
+ }
+
// RevertMigration represents a REVERT VITESS_MIGRATION statement
RevertMigration struct {
UUID string
@@ -579,8 +562,13 @@ type (
DBName IdentifierCS
}
+ // TxAccessMode is an enum for Transaction Access Mode
+ TxAccessMode int8
+
// Begin represents a Begin statement.
- Begin struct{}
+ Begin struct {
+ TxAccessModes []TxAccessMode
+ }
// Commit represents a Commit statement.
Commit struct{}
@@ -639,6 +627,16 @@ type (
Comments *ParsedComments
}
+ // VExplainType is an enum for VExplainStmt.Type
+ VExplainType int8
+
+ // VExplainStmt represents an VtExplain statement
+ VExplainStmt struct {
+ Type VExplainType
+ Statement Statement
+ Comments *ParsedComments
+ }
+
// ExplainTab represents the Explain table
ExplainTab struct {
Table TableName
@@ -692,54 +690,55 @@ type (
}
)
-func (*Union) iStatement() {}
-func (*Select) iStatement() {}
-func (*Stream) iStatement() {}
-func (*VStream) iStatement() {}
-func (*Insert) iStatement() {}
-func (*Update) iStatement() {}
-func (*Delete) iStatement() {}
-func (*Set) iStatement() {}
-func (*SetTransaction) iStatement() {}
-func (*DropDatabase) iStatement() {}
-func (*Flush) iStatement() {}
-func (*Show) iStatement() {}
-func (*Use) iStatement() {}
-func (*Begin) iStatement() {}
-func (*Commit) iStatement() {}
-func (*Rollback) iStatement() {}
-func (*SRollback) iStatement() {}
-func (*Savepoint) iStatement() {}
-func (*Release) iStatement() {}
-func (*OtherRead) iStatement() {}
-func (*OtherAdmin) iStatement() {}
-func (*CommentOnly) iStatement() {}
-func (*Select) iSelectStatement() {}
-func (*Union) iSelectStatement() {}
-func (*Load) iStatement() {}
-func (*CreateDatabase) iStatement() {}
-func (*AlterDatabase) iStatement() {}
-func (*CreateTable) iStatement() {}
-func (*CreateView) iStatement() {}
-func (*AlterView) iStatement() {}
-func (*LockTables) iStatement() {}
-func (*UnlockTables) iStatement() {}
-func (*AlterTable) iStatement() {}
-func (*AlterVschema) iStatement() {}
-func (*AlterMigration) iStatement() {}
-func (*RevertMigration) iStatement() {}
-func (*ShowMigrationLogs) iStatement() {}
-func (*ShowThrottledApps) iStatement() {}
-func (*DropTable) iStatement() {}
-func (*DropView) iStatement() {}
-func (*TruncateTable) iStatement() {}
-func (*RenameTable) iStatement() {}
-func (*CallProc) iStatement() {}
-func (*ExplainStmt) iStatement() {}
-func (*ExplainTab) iStatement() {}
-func (*PrepareStmt) iStatement() {}
-func (*ExecuteStmt) iStatement() {}
-func (*DeallocateStmt) iStatement() {}
+func (*Union) iStatement() {}
+func (*Select) iStatement() {}
+func (*Stream) iStatement() {}
+func (*VStream) iStatement() {}
+func (*Insert) iStatement() {}
+func (*Update) iStatement() {}
+func (*Delete) iStatement() {}
+func (*Set) iStatement() {}
+func (*DropDatabase) iStatement() {}
+func (*Flush) iStatement() {}
+func (*Show) iStatement() {}
+func (*Use) iStatement() {}
+func (*Begin) iStatement() {}
+func (*Commit) iStatement() {}
+func (*Rollback) iStatement() {}
+func (*SRollback) iStatement() {}
+func (*Savepoint) iStatement() {}
+func (*Release) iStatement() {}
+func (*OtherRead) iStatement() {}
+func (*OtherAdmin) iStatement() {}
+func (*CommentOnly) iStatement() {}
+func (*Select) iSelectStatement() {}
+func (*Union) iSelectStatement() {}
+func (*Load) iStatement() {}
+func (*CreateDatabase) iStatement() {}
+func (*AlterDatabase) iStatement() {}
+func (*CreateTable) iStatement() {}
+func (*CreateView) iStatement() {}
+func (*AlterView) iStatement() {}
+func (*LockTables) iStatement() {}
+func (*UnlockTables) iStatement() {}
+func (*AlterTable) iStatement() {}
+func (*AlterVschema) iStatement() {}
+func (*AlterMigration) iStatement() {}
+func (*RevertMigration) iStatement() {}
+func (*ShowMigrationLogs) iStatement() {}
+func (*ShowThrottledApps) iStatement() {}
+func (*ShowThrottlerStatus) iStatement() {}
+func (*DropTable) iStatement() {}
+func (*DropView) iStatement() {}
+func (*TruncateTable) iStatement() {}
+func (*RenameTable) iStatement() {}
+func (*CallProc) iStatement() {}
+func (*ExplainStmt) iStatement() {}
+func (*VExplainStmt) iStatement() {}
+func (*ExplainTab) iStatement() {}
+func (*PrepareStmt) iStatement() {}
+func (*ExecuteStmt) iStatement() {}
+func (*DeallocateStmt) iStatement() {}
func (*CreateView) iDDLStatement() {}
func (*AlterView) iDDLStatement() {}
@@ -1261,47 +1260,52 @@ func (node *AlterView) SetFromTables(tables TableNames) {
// irrelevant
}
-// SetComments implements DDLStatement.
+// SetComments implements Commented interface.
func (node *RenameTable) SetComments(comments Comments) {
// irrelevant
}
-// SetComments implements DDLStatement.
+// SetComments implements Commented interface.
func (node *TruncateTable) SetComments(comments Comments) {
// irrelevant
}
-// SetComments implements DDLStatement.
+// SetComments implements Commented interface.
func (node *AlterTable) SetComments(comments Comments) {
node.Comments = comments.Parsed()
}
-// SetComments implements DDLStatement.
+// SetComments implements Commented interface.
func (node *ExplainStmt) SetComments(comments Comments) {
node.Comments = comments.Parsed()
}
-// SetComments implements DDLStatement.
+// SetComments implements Commented interface.
+func (node *VExplainStmt) SetComments(comments Comments) {
+ node.Comments = comments.Parsed()
+}
+
+// SetComments implements Commented interface.
func (node *CreateTable) SetComments(comments Comments) {
node.Comments = comments.Parsed()
}
-// SetComments implements DDLStatement.
+// SetComments implements Commented interface.
func (node *CreateView) SetComments(comments Comments) {
node.Comments = comments.Parsed()
}
-// SetComments implements DDLStatement.
+// SetComments implements Commented interface.
func (node *DropTable) SetComments(comments Comments) {
node.Comments = comments.Parsed()
}
-// SetComments implements DDLStatement.
+// SetComments implements Commented interface.
func (node *DropView) SetComments(comments Comments) {
node.Comments = comments.Parsed()
}
-// SetComments implements DDLStatement.
+// SetComments implements Commented interface.
func (node *AlterView) SetComments(comments Comments) {
node.Comments = comments.Parsed()
}
@@ -1336,49 +1340,54 @@ func (node *VStream) SetComments(comments Comments) {
node.Comments = comments.Parsed()
}
-// GetParsedComments implements DDLStatement.
+// GetParsedComments implements Commented interface.
func (node *RenameTable) GetParsedComments() *ParsedComments {
// irrelevant
return nil
}
-// GetParsedComments implements DDLStatement.
+// GetParsedComments implements Commented interface.
func (node *TruncateTable) GetParsedComments() *ParsedComments {
// irrelevant
return nil
}
-// GetParsedComments implements DDLStatement.
+// GetParsedComments implements Commented interface.
func (node *AlterTable) GetParsedComments() *ParsedComments {
return node.Comments
}
-// GetParsedComments implements DDLStatement.
+// GetParsedComments implements Commented interface.
func (node *ExplainStmt) GetParsedComments() *ParsedComments {
return node.Comments
}
-// GetParsedComments implements DDLStatement.
+// GetParsedComments implements Commented interface.
+func (node *VExplainStmt) GetParsedComments() *ParsedComments {
+ return node.Comments
+}
+
+// GetParsedComments implements Commented interface.
func (node *CreateTable) GetParsedComments() *ParsedComments {
return node.Comments
}
-// GetParsedComments implements DDLStatement.
+// GetParsedComments implements Commented interface.
func (node *CreateView) GetParsedComments() *ParsedComments {
return node.Comments
}
-// GetParsedComments implements DDLStatement.
+// GetParsedComments implements Commented interface.
func (node *DropTable) GetParsedComments() *ParsedComments {
return node.Comments
}
-// GetParsedComments implements DDLStatement.
+// GetParsedComments implements Commented interface.
func (node *DropView) GetParsedComments() *ParsedComments {
return node.Comments
}
-// GetParsedComments implements DDLStatement.
+// GetParsedComments implements Commented interface.
func (node *AlterView) GetParsedComments() *ParsedComments {
return node.Comments
}
@@ -1753,8 +1762,7 @@ type TableSpec struct {
// ColumnDefinition describes a column in a CREATE TABLE statement
type ColumnDefinition struct {
Name IdentifierCI
- // TODO: Should this not be a reference?
- Type ColumnType
+ Type *ColumnType
}
// ColumnType represents a sql type in a CREATE TABLE statement
@@ -2274,6 +2282,9 @@ type (
Qualifier TableName
}
+ // Scope is an enum for scope of query
+ Scope int8
+
Variable struct {
Scope Scope
Name IdentifierCI
@@ -2520,7 +2531,7 @@ type (
}
// JSONTableExpr describes the components of JSON_TABLE()
- // For more information, visit https://dev.mysql.com/doc/refman/8.0/en/json-table-functions.html#function_json-table
+ // For more information, postVisit https://dev.mysql.com/doc/refman/8.0/en/json-table-functions.html#function_json-table
JSONTableExpr struct {
Expr Expr
Alias IdentifierCS
@@ -2546,7 +2557,7 @@ type (
// JtPathColDef is a type of column definition specifying the path in JSON structure to extract values
JtPathColDef struct {
Name IdentifierCI
- Type ColumnType
+ Type *ColumnType
JtColExists bool
Path Expr
EmptyOnResponse *JtOnResponse
@@ -2681,14 +2692,14 @@ type (
JSONValueMergeType int8
// JSONRemoveExpr represents the JSON_REMOVE()
- // For more information, visit https://dev.mysql.com/doc/refman/8.0/en/json-modification-functions.html#function_json-remove
+ // For more information, postVisit https://dev.mysql.com/doc/refman/8.0/en/json-modification-functions.html#function_json-remove
JSONRemoveExpr struct {
JSONDoc Expr
PathList Exprs
}
// JSONRemoveExpr represents the JSON_UNQUOTE()
- // For more information, visit https://dev.mysql.com/doc/refman/8.0/en/json-modification-functions.html#function_json-unquote
+ // For more information, postVisit https://dev.mysql.com/doc/refman/8.0/en/json-modification-functions.html#function_json-unquote
JSONUnquoteExpr struct {
JSONValue Expr
}
@@ -2779,7 +2790,7 @@ type (
}
// RegexpInstrExpr represents REGEXP_INSTR()
- // For more information, visit https://dev.mysql.com/doc/refman/8.0/en/regexp.html#function_regexp-instr
+ // For more information, postVisit https://dev.mysql.com/doc/refman/8.0/en/regexp.html#function_regexp-instr
RegexpInstrExpr struct {
Expr Expr
Pattern Expr
@@ -2790,7 +2801,7 @@ type (
}
// RegexpLikeExpr represents REGEXP_LIKE()
- // For more information, visit https://dev.mysql.com/doc/refman/8.0/en/regexp.html#function_regexp-like
+ // For more information, postVisit https://dev.mysql.com/doc/refman/8.0/en/regexp.html#function_regexp-like
RegexpLikeExpr struct {
Expr Expr
Pattern Expr
@@ -2798,7 +2809,7 @@ type (
}
// RegexpReplaceExpr represents REGEXP_REPLACE()
- // For more information, visit https://dev.mysql.com/doc/refman/8.0/en/regexp.html#function_regexp-replace
+ // For more information, postVisit https://dev.mysql.com/doc/refman/8.0/en/regexp.html#function_regexp-replace
RegexpReplaceExpr struct {
Expr Expr
Pattern Expr
@@ -2809,7 +2820,7 @@ type (
}
// RegexpSubstrExpr represents REGEXP_SUBSTR()
- // For more information, visit https://dev.mysql.com/doc/refman/8.0/en/regexp.html#function_regexp-substr
+ // For more information, postVisit https://dev.mysql.com/doc/refman/8.0/en/regexp.html#function_regexp-substr
RegexpSubstrExpr struct {
Expr Expr
Pattern Expr
@@ -2869,7 +2880,7 @@ type (
// ExtractValueExpr stands for EXTRACTVALUE() XML function
// Extract a value from an XML string using XPath notation
- // For more details, visit https://dev.mysql.com/doc/refman/8.0/en/xml-functions.html#function_extractvalue
+ // For more details, postVisit https://dev.mysql.com/doc/refman/8.0/en/xml-functions.html#function_extractvalue
ExtractValueExpr struct {
Fragment Expr
XPathExpr Expr
@@ -2877,7 +2888,7 @@ type (
// UpdateXMLExpr stands for UpdateXML() XML function
// Return replaced XML fragment
- // For more details, visit https://dev.mysql.com/doc/refman/8.0/en/xml-functions.html#function_updatexml
+ // For more details, postVisit https://dev.mysql.com/doc/refman/8.0/en/xml-functions.html#function_updatexml
UpdateXMLExpr struct {
Target Expr
XPathExpr Expr
@@ -2902,7 +2913,7 @@ type (
// For FORMAT_BYTES, it means count
// For FORMAT_PICO_TIME, it means time_val
// For PS_THREAD_ID it means connection_id
- // For more details, visit https://dev.mysql.com/doc/refman/8.0/en/performance-schema-functions.html
+ // For more details, postVisit https://dev.mysql.com/doc/refman/8.0/en/performance-schema-functions.html
PerformanceSchemaFuncExpr struct {
Type PerformanceSchemaType
Argument Expr
@@ -2913,7 +2924,7 @@ type (
// GTIDFuncExpr stands for GTID Functions
// Set1 Acts as gtid_set for WAIT_FOR_EXECUTED_GTID_SET() and WAIT_UNTIL_SQL_THREAD_AFTER_GTIDS()
- // For more details, visit https://dev.mysql.com/doc/refman/8.0/en/gtid-functions.html
+ // For more details, postVisit https://dev.mysql.com/doc/refman/8.0/en/gtid-functions.html
GTIDFuncExpr struct {
Type GTIDType
Set1 Expr
@@ -3227,6 +3238,3 @@ type IdentifierCI struct {
type IdentifierCS struct {
v string
}
-
-func (IsolationLevel) iChar() {}
-func (AccessMode) iChar() {}
diff --git a/go/vt/sqlparser/ast_clone.go b/go/vt/sqlparser/ast_clone.go
index 4fd88d4beb9..147b5a864b7 100644
--- a/go/vt/sqlparser/ast_clone.go
+++ b/go/vt/sqlparser/ast_clone.go
@@ -1,5 +1,5 @@
/*
-Copyright 2021 The Vitess Authors.
+Copyright 2023 The Vitess Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -23,8 +23,6 @@ func CloneSQLNode(in SQLNode) SQLNode {
return nil
}
switch in := in.(type) {
- case AccessMode:
- return in
case *AddColumns:
return CloneRefOfAddColumns(in)
case *AddConstraintDefinition:
@@ -211,8 +209,6 @@ func CloneSQLNode(in SQLNode) SQLNode {
return CloneRefOfIntroducerExpr(in)
case *IsExpr:
return CloneRefOfIsExpr(in)
- case IsolationLevel:
- return in
case *JSONArrayExpr:
return CloneRefOfJSONArrayExpr(in)
case *JSONAttributesExpr:
@@ -227,8 +223,8 @@ func CloneSQLNode(in SQLNode) SQLNode {
return CloneRefOfJSONKeysExpr(in)
case *JSONObjectExpr:
return CloneRefOfJSONObjectExpr(in)
- case JSONObjectParam:
- return CloneJSONObjectParam(in)
+ case *JSONObjectParam:
+ return CloneRefOfJSONObjectParam(in)
case *JSONOverlapsExpr:
return CloneRefOfJSONOverlapsExpr(in)
case *JSONPrettyExpr:
@@ -399,8 +395,6 @@ func CloneSQLNode(in SQLNode) SQLNode {
return CloneRefOfSetExpr(in)
case SetExprs:
return CloneSetExprs(in)
- case *SetTransaction:
- return CloneRefOfSetTransaction(in)
case *Show:
return CloneRefOfShow(in)
case *ShowBasic:
@@ -415,6 +409,8 @@ func CloneSQLNode(in SQLNode) SQLNode {
return CloneRefOfShowOther(in)
case *ShowThrottledApps:
return CloneRefOfShowThrottledApps(in)
+ case *ShowThrottlerStatus:
+ return CloneRefOfShowThrottlerStatus(in)
case *StarExpr:
return CloneRefOfStarExpr(in)
case *Std:
@@ -475,6 +471,8 @@ func CloneSQLNode(in SQLNode) SQLNode {
return CloneRefOfUpdateXMLExpr(in)
case *Use:
return CloneRefOfUse(in)
+ case *VExplainStmt:
+ return CloneRefOfVExplainStmt(in)
case *VStream:
return CloneRefOfVStream(in)
case ValTuple:
@@ -726,6 +724,7 @@ func CloneRefOfBegin(n *Begin) *Begin {
return nil
}
out := *n
+ out.TxAccessModes = CloneSliceOfTxAccessMode(n.TxAccessModes)
return &out
}
@@ -870,7 +869,7 @@ func CloneRefOfColumnDefinition(n *ColumnDefinition) *ColumnDefinition {
}
out := *n
out.Name = CloneIdentifierCI(n.Name)
- out.Type = CloneColumnType(n.Type)
+ out.Type = CloneRefOfColumnType(n.Type)
return &out
}
@@ -1578,9 +1577,15 @@ func CloneRefOfJSONObjectExpr(n *JSONObjectExpr) *JSONObjectExpr {
return &out
}
-// CloneJSONObjectParam creates a deep clone of the input.
-func CloneJSONObjectParam(n JSONObjectParam) JSONObjectParam {
- return *CloneRefOfJSONObjectParam(&n)
+// CloneRefOfJSONObjectParam creates a deep clone of the input.
+func CloneRefOfJSONObjectParam(n *JSONObjectParam) *JSONObjectParam {
+ if n == nil {
+ return nil
+ }
+ out := *n
+ out.Key = CloneExpr(n.Key)
+ out.Value = CloneExpr(n.Value)
+ return &out
}
// CloneRefOfJSONOverlapsExpr creates a deep clone of the input.
@@ -2487,17 +2492,6 @@ func CloneSetExprs(n SetExprs) SetExprs {
return res
}
-// CloneRefOfSetTransaction creates a deep clone of the input.
-func CloneRefOfSetTransaction(n *SetTransaction) *SetTransaction {
- if n == nil {
- return nil
- }
- out := *n
- out.Comments = CloneRefOfParsedComments(n.Comments)
- out.Characteristics = CloneSliceOfCharacteristic(n.Characteristics)
- return &out
-}
-
// CloneRefOfShow creates a deep clone of the input.
func CloneRefOfShow(n *Show) *Show {
if n == nil {
@@ -2569,6 +2563,16 @@ func CloneRefOfShowThrottledApps(n *ShowThrottledApps) *ShowThrottledApps {
return &out
}
+// CloneRefOfShowThrottlerStatus creates a deep clone of the input.
+func CloneRefOfShowThrottlerStatus(n *ShowThrottlerStatus) *ShowThrottlerStatus {
+ if n == nil {
+ return nil
+ }
+ out := *n
+ out.Comments = CloneComments(n.Comments)
+ return &out
+}
+
// CloneRefOfStarExpr creates a deep clone of the input.
func CloneRefOfStarExpr(n *StarExpr) *StarExpr {
if n == nil {
@@ -2903,6 +2907,17 @@ func CloneRefOfUse(n *Use) *Use {
return &out
}
+// CloneRefOfVExplainStmt creates a deep clone of the input.
+func CloneRefOfVExplainStmt(n *VExplainStmt) *VExplainStmt {
+ if n == nil {
+ return nil
+ }
+ out := *n
+ out.Statement = CloneStatement(n.Statement)
+ out.Comments = CloneRefOfParsedComments(n.Comments)
+ return &out
+}
+
// CloneRefOfVStream creates a deep clone of the input.
func CloneRefOfVStream(n *VStream) *VStream {
if n == nil {
@@ -3334,22 +3349,6 @@ func CloneCallable(in Callable) Callable {
}
}
-// CloneCharacteristic creates a deep clone of the input.
-func CloneCharacteristic(in Characteristic) Characteristic {
- if in == nil {
- return nil
- }
- switch in := in.(type) {
- case AccessMode:
- return in
- case IsolationLevel:
- return in
- default:
- // this should never happen
- return nil
- }
-}
-
// CloneColTuple creates a deep clone of the input.
func CloneColTuple(in ColTuple) ColTuple {
if in == nil {
@@ -3810,14 +3809,14 @@ func CloneStatement(in Statement) Statement {
return CloneRefOfSelect(in)
case *Set:
return CloneRefOfSet(in)
- case *SetTransaction:
- return CloneRefOfSetTransaction(in)
case *Show:
return CloneRefOfShow(in)
case *ShowMigrationLogs:
return CloneRefOfShowMigrationLogs(in)
case *ShowThrottledApps:
return CloneRefOfShowThrottledApps(in)
+ case *ShowThrottlerStatus:
+ return CloneRefOfShowThrottlerStatus(in)
case *Stream:
return CloneRefOfStream(in)
case *TruncateTable:
@@ -3830,6 +3829,8 @@ func CloneStatement(in Statement) Statement {
return CloneRefOfUpdate(in)
case *Use:
return CloneRefOfUse(in)
+ case *VExplainStmt:
+ return CloneRefOfVExplainStmt(in)
case *VStream:
return CloneRefOfVStream(in)
default:
@@ -3915,6 +3916,16 @@ func CloneSliceOfIdentifierCI(n []IdentifierCI) []IdentifierCI {
return res
}
+// CloneSliceOfTxAccessMode creates a deep clone of the input.
+func CloneSliceOfTxAccessMode(n []TxAccessMode) []TxAccessMode {
+ if n == nil {
+ return nil
+ }
+ res := make([]TxAccessMode, len(n))
+ copy(res, n)
+ return res
+}
+
// CloneSliceOfRefOfWhen creates a deep clone of the input.
func CloneSliceOfRefOfWhen(n []*When) []*When {
if n == nil {
@@ -3927,11 +3938,6 @@ func CloneSliceOfRefOfWhen(n []*When) []*When {
return res
}
-// CloneColumnType creates a deep clone of the input.
-func CloneColumnType(n ColumnType) ColumnType {
- return *CloneRefOfColumnType(&n)
-}
-
// CloneRefOfColumnTypeOptions creates a deep clone of the input.
func CloneRefOfColumnTypeOptions(n *ColumnTypeOptions) *ColumnTypeOptions {
if n == nil {
@@ -4044,17 +4050,6 @@ func CloneSliceOfRefOfJSONObjectParam(n []*JSONObjectParam) []*JSONObjectParam {
return res
}
-// CloneRefOfJSONObjectParam creates a deep clone of the input.
-func CloneRefOfJSONObjectParam(n *JSONObjectParam) *JSONObjectParam {
- if n == nil {
- return nil
- }
- out := *n
- out.Key = CloneExpr(n.Key)
- out.Value = CloneExpr(n.Value)
- return &out
-}
-
// CloneSliceOfRefOfJtColumnDefinition creates a deep clone of the input.
func CloneSliceOfRefOfJtColumnDefinition(n []*JtColumnDefinition) []*JtColumnDefinition {
if n == nil {
@@ -4084,7 +4079,7 @@ func CloneRefOfJtPathColDef(n *JtPathColDef) *JtPathColDef {
}
out := *n
out.Name = CloneIdentifierCI(n.Name)
- out.Type = CloneColumnType(n.Type)
+ out.Type = CloneRefOfColumnType(n.Type)
out.Path = CloneExpr(n.Path)
out.EmptyOnResponse = CloneRefOfJtOnResponse(n.EmptyOnResponse)
out.ErrorOnResponse = CloneRefOfJtOnResponse(n.ErrorOnResponse)
@@ -4193,18 +4188,6 @@ func CloneSliceOfTableExpr(n []TableExpr) []TableExpr {
return res
}
-// CloneSliceOfCharacteristic creates a deep clone of the input.
-func CloneSliceOfCharacteristic(n []Characteristic) []Characteristic {
- if n == nil {
- return nil
- }
- res := make([]Characteristic, len(n))
- for i, x := range n {
- res[i] = CloneCharacteristic(x)
- }
- return res
-}
-
// CloneRefOfTableName creates a deep clone of the input.
func CloneRefOfTableName(n *TableName) *TableName {
if n == nil {
diff --git a/go/vt/sqlparser/ast_copy_on_rewrite.go b/go/vt/sqlparser/ast_copy_on_rewrite.go
new file mode 100644
index 00000000000..e9132ea15b9
--- /dev/null
+++ b/go/vt/sqlparser/ast_copy_on_rewrite.go
@@ -0,0 +1,6930 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+// Code generated by ASTHelperGen. DO NOT EDIT.
+
+package sqlparser
+
+func (c *cow) copyOnRewriteSQLNode(n SQLNode, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ switch n := n.(type) {
+ case *AddColumns:
+ return c.copyOnRewriteRefOfAddColumns(n, parent)
+ case *AddConstraintDefinition:
+ return c.copyOnRewriteRefOfAddConstraintDefinition(n, parent)
+ case *AddIndexDefinition:
+ return c.copyOnRewriteRefOfAddIndexDefinition(n, parent)
+ case AlgorithmValue:
+ return c.copyOnRewriteAlgorithmValue(n, parent)
+ case *AliasedExpr:
+ return c.copyOnRewriteRefOfAliasedExpr(n, parent)
+ case *AliasedTableExpr:
+ return c.copyOnRewriteRefOfAliasedTableExpr(n, parent)
+ case *AlterCharset:
+ return c.copyOnRewriteRefOfAlterCharset(n, parent)
+ case *AlterCheck:
+ return c.copyOnRewriteRefOfAlterCheck(n, parent)
+ case *AlterColumn:
+ return c.copyOnRewriteRefOfAlterColumn(n, parent)
+ case *AlterDatabase:
+ return c.copyOnRewriteRefOfAlterDatabase(n, parent)
+ case *AlterIndex:
+ return c.copyOnRewriteRefOfAlterIndex(n, parent)
+ case *AlterMigration:
+ return c.copyOnRewriteRefOfAlterMigration(n, parent)
+ case *AlterTable:
+ return c.copyOnRewriteRefOfAlterTable(n, parent)
+ case *AlterView:
+ return c.copyOnRewriteRefOfAlterView(n, parent)
+ case *AlterVschema:
+ return c.copyOnRewriteRefOfAlterVschema(n, parent)
+ case *AndExpr:
+ return c.copyOnRewriteRefOfAndExpr(n, parent)
+ case Argument:
+ return c.copyOnRewriteArgument(n, parent)
+ case *ArgumentLessWindowExpr:
+ return c.copyOnRewriteRefOfArgumentLessWindowExpr(n, parent)
+ case *AutoIncSpec:
+ return c.copyOnRewriteRefOfAutoIncSpec(n, parent)
+ case *Avg:
+ return c.copyOnRewriteRefOfAvg(n, parent)
+ case *Begin:
+ return c.copyOnRewriteRefOfBegin(n, parent)
+ case *BetweenExpr:
+ return c.copyOnRewriteRefOfBetweenExpr(n, parent)
+ case *BinaryExpr:
+ return c.copyOnRewriteRefOfBinaryExpr(n, parent)
+ case *BitAnd:
+ return c.copyOnRewriteRefOfBitAnd(n, parent)
+ case *BitOr:
+ return c.copyOnRewriteRefOfBitOr(n, parent)
+ case *BitXor:
+ return c.copyOnRewriteRefOfBitXor(n, parent)
+ case BoolVal:
+ return c.copyOnRewriteBoolVal(n, parent)
+ case *CallProc:
+ return c.copyOnRewriteRefOfCallProc(n, parent)
+ case *CaseExpr:
+ return c.copyOnRewriteRefOfCaseExpr(n, parent)
+ case *CastExpr:
+ return c.copyOnRewriteRefOfCastExpr(n, parent)
+ case *ChangeColumn:
+ return c.copyOnRewriteRefOfChangeColumn(n, parent)
+ case *CharExpr:
+ return c.copyOnRewriteRefOfCharExpr(n, parent)
+ case *CheckConstraintDefinition:
+ return c.copyOnRewriteRefOfCheckConstraintDefinition(n, parent)
+ case *ColName:
+ return c.copyOnRewriteRefOfColName(n, parent)
+ case *CollateExpr:
+ return c.copyOnRewriteRefOfCollateExpr(n, parent)
+ case *ColumnDefinition:
+ return c.copyOnRewriteRefOfColumnDefinition(n, parent)
+ case *ColumnType:
+ return c.copyOnRewriteRefOfColumnType(n, parent)
+ case Columns:
+ return c.copyOnRewriteColumns(n, parent)
+ case *CommentOnly:
+ return c.copyOnRewriteRefOfCommentOnly(n, parent)
+ case *Commit:
+ return c.copyOnRewriteRefOfCommit(n, parent)
+ case *CommonTableExpr:
+ return c.copyOnRewriteRefOfCommonTableExpr(n, parent)
+ case *ComparisonExpr:
+ return c.copyOnRewriteRefOfComparisonExpr(n, parent)
+ case *ConstraintDefinition:
+ return c.copyOnRewriteRefOfConstraintDefinition(n, parent)
+ case *ConvertExpr:
+ return c.copyOnRewriteRefOfConvertExpr(n, parent)
+ case *ConvertType:
+ return c.copyOnRewriteRefOfConvertType(n, parent)
+ case *ConvertUsingExpr:
+ return c.copyOnRewriteRefOfConvertUsingExpr(n, parent)
+ case *Count:
+ return c.copyOnRewriteRefOfCount(n, parent)
+ case *CountStar:
+ return c.copyOnRewriteRefOfCountStar(n, parent)
+ case *CreateDatabase:
+ return c.copyOnRewriteRefOfCreateDatabase(n, parent)
+ case *CreateTable:
+ return c.copyOnRewriteRefOfCreateTable(n, parent)
+ case *CreateView:
+ return c.copyOnRewriteRefOfCreateView(n, parent)
+ case *CurTimeFuncExpr:
+ return c.copyOnRewriteRefOfCurTimeFuncExpr(n, parent)
+ case *DeallocateStmt:
+ return c.copyOnRewriteRefOfDeallocateStmt(n, parent)
+ case *Default:
+ return c.copyOnRewriteRefOfDefault(n, parent)
+ case *Definer:
+ return c.copyOnRewriteRefOfDefiner(n, parent)
+ case *Delete:
+ return c.copyOnRewriteRefOfDelete(n, parent)
+ case *DerivedTable:
+ return c.copyOnRewriteRefOfDerivedTable(n, parent)
+ case *DropColumn:
+ return c.copyOnRewriteRefOfDropColumn(n, parent)
+ case *DropDatabase:
+ return c.copyOnRewriteRefOfDropDatabase(n, parent)
+ case *DropKey:
+ return c.copyOnRewriteRefOfDropKey(n, parent)
+ case *DropTable:
+ return c.copyOnRewriteRefOfDropTable(n, parent)
+ case *DropView:
+ return c.copyOnRewriteRefOfDropView(n, parent)
+ case *ExecuteStmt:
+ return c.copyOnRewriteRefOfExecuteStmt(n, parent)
+ case *ExistsExpr:
+ return c.copyOnRewriteRefOfExistsExpr(n, parent)
+ case *ExplainStmt:
+ return c.copyOnRewriteRefOfExplainStmt(n, parent)
+ case *ExplainTab:
+ return c.copyOnRewriteRefOfExplainTab(n, parent)
+ case Exprs:
+ return c.copyOnRewriteExprs(n, parent)
+ case *ExtractFuncExpr:
+ return c.copyOnRewriteRefOfExtractFuncExpr(n, parent)
+ case *ExtractValueExpr:
+ return c.copyOnRewriteRefOfExtractValueExpr(n, parent)
+ case *ExtractedSubquery:
+ return c.copyOnRewriteRefOfExtractedSubquery(n, parent)
+ case *FirstOrLastValueExpr:
+ return c.copyOnRewriteRefOfFirstOrLastValueExpr(n, parent)
+ case *Flush:
+ return c.copyOnRewriteRefOfFlush(n, parent)
+ case *Force:
+ return c.copyOnRewriteRefOfForce(n, parent)
+ case *ForeignKeyDefinition:
+ return c.copyOnRewriteRefOfForeignKeyDefinition(n, parent)
+ case *FrameClause:
+ return c.copyOnRewriteRefOfFrameClause(n, parent)
+ case *FramePoint:
+ return c.copyOnRewriteRefOfFramePoint(n, parent)
+ case *FromFirstLastClause:
+ return c.copyOnRewriteRefOfFromFirstLastClause(n, parent)
+ case *FuncExpr:
+ return c.copyOnRewriteRefOfFuncExpr(n, parent)
+ case *GTIDFuncExpr:
+ return c.copyOnRewriteRefOfGTIDFuncExpr(n, parent)
+ case GroupBy:
+ return c.copyOnRewriteGroupBy(n, parent)
+ case *GroupConcatExpr:
+ return c.copyOnRewriteRefOfGroupConcatExpr(n, parent)
+ case IdentifierCI:
+ return c.copyOnRewriteIdentifierCI(n, parent)
+ case IdentifierCS:
+ return c.copyOnRewriteIdentifierCS(n, parent)
+ case *IndexDefinition:
+ return c.copyOnRewriteRefOfIndexDefinition(n, parent)
+ case *IndexHint:
+ return c.copyOnRewriteRefOfIndexHint(n, parent)
+ case IndexHints:
+ return c.copyOnRewriteIndexHints(n, parent)
+ case *IndexInfo:
+ return c.copyOnRewriteRefOfIndexInfo(n, parent)
+ case *Insert:
+ return c.copyOnRewriteRefOfInsert(n, parent)
+ case *InsertExpr:
+ return c.copyOnRewriteRefOfInsertExpr(n, parent)
+ case *IntervalExpr:
+ return c.copyOnRewriteRefOfIntervalExpr(n, parent)
+ case *IntervalFuncExpr:
+ return c.copyOnRewriteRefOfIntervalFuncExpr(n, parent)
+ case *IntroducerExpr:
+ return c.copyOnRewriteRefOfIntroducerExpr(n, parent)
+ case *IsExpr:
+ return c.copyOnRewriteRefOfIsExpr(n, parent)
+ case *JSONArrayExpr:
+ return c.copyOnRewriteRefOfJSONArrayExpr(n, parent)
+ case *JSONAttributesExpr:
+ return c.copyOnRewriteRefOfJSONAttributesExpr(n, parent)
+ case *JSONContainsExpr:
+ return c.copyOnRewriteRefOfJSONContainsExpr(n, parent)
+ case *JSONContainsPathExpr:
+ return c.copyOnRewriteRefOfJSONContainsPathExpr(n, parent)
+ case *JSONExtractExpr:
+ return c.copyOnRewriteRefOfJSONExtractExpr(n, parent)
+ case *JSONKeysExpr:
+ return c.copyOnRewriteRefOfJSONKeysExpr(n, parent)
+ case *JSONObjectExpr:
+ return c.copyOnRewriteRefOfJSONObjectExpr(n, parent)
+ case *JSONObjectParam:
+ return c.copyOnRewriteRefOfJSONObjectParam(n, parent)
+ case *JSONOverlapsExpr:
+ return c.copyOnRewriteRefOfJSONOverlapsExpr(n, parent)
+ case *JSONPrettyExpr:
+ return c.copyOnRewriteRefOfJSONPrettyExpr(n, parent)
+ case *JSONQuoteExpr:
+ return c.copyOnRewriteRefOfJSONQuoteExpr(n, parent)
+ case *JSONRemoveExpr:
+ return c.copyOnRewriteRefOfJSONRemoveExpr(n, parent)
+ case *JSONSchemaValidFuncExpr:
+ return c.copyOnRewriteRefOfJSONSchemaValidFuncExpr(n, parent)
+ case *JSONSchemaValidationReportFuncExpr:
+ return c.copyOnRewriteRefOfJSONSchemaValidationReportFuncExpr(n, parent)
+ case *JSONSearchExpr:
+ return c.copyOnRewriteRefOfJSONSearchExpr(n, parent)
+ case *JSONStorageFreeExpr:
+ return c.copyOnRewriteRefOfJSONStorageFreeExpr(n, parent)
+ case *JSONStorageSizeExpr:
+ return c.copyOnRewriteRefOfJSONStorageSizeExpr(n, parent)
+ case *JSONTableExpr:
+ return c.copyOnRewriteRefOfJSONTableExpr(n, parent)
+ case *JSONUnquoteExpr:
+ return c.copyOnRewriteRefOfJSONUnquoteExpr(n, parent)
+ case *JSONValueExpr:
+ return c.copyOnRewriteRefOfJSONValueExpr(n, parent)
+ case *JSONValueMergeExpr:
+ return c.copyOnRewriteRefOfJSONValueMergeExpr(n, parent)
+ case *JSONValueModifierExpr:
+ return c.copyOnRewriteRefOfJSONValueModifierExpr(n, parent)
+ case *JoinCondition:
+ return c.copyOnRewriteRefOfJoinCondition(n, parent)
+ case *JoinTableExpr:
+ return c.copyOnRewriteRefOfJoinTableExpr(n, parent)
+ case *JtColumnDefinition:
+ return c.copyOnRewriteRefOfJtColumnDefinition(n, parent)
+ case *JtOnResponse:
+ return c.copyOnRewriteRefOfJtOnResponse(n, parent)
+ case *KeyState:
+ return c.copyOnRewriteRefOfKeyState(n, parent)
+ case *LagLeadExpr:
+ return c.copyOnRewriteRefOfLagLeadExpr(n, parent)
+ case *Limit:
+ return c.copyOnRewriteRefOfLimit(n, parent)
+ case ListArg:
+ return c.copyOnRewriteListArg(n, parent)
+ case *Literal:
+ return c.copyOnRewriteRefOfLiteral(n, parent)
+ case *Load:
+ return c.copyOnRewriteRefOfLoad(n, parent)
+ case *LocateExpr:
+ return c.copyOnRewriteRefOfLocateExpr(n, parent)
+ case *LockOption:
+ return c.copyOnRewriteRefOfLockOption(n, parent)
+ case *LockTables:
+ return c.copyOnRewriteRefOfLockTables(n, parent)
+ case *LockingFunc:
+ return c.copyOnRewriteRefOfLockingFunc(n, parent)
+ case MatchAction:
+ return c.copyOnRewriteMatchAction(n, parent)
+ case *MatchExpr:
+ return c.copyOnRewriteRefOfMatchExpr(n, parent)
+ case *Max:
+ return c.copyOnRewriteRefOfMax(n, parent)
+ case *MemberOfExpr:
+ return c.copyOnRewriteRefOfMemberOfExpr(n, parent)
+ case *Min:
+ return c.copyOnRewriteRefOfMin(n, parent)
+ case *ModifyColumn:
+ return c.copyOnRewriteRefOfModifyColumn(n, parent)
+ case *NTHValueExpr:
+ return c.copyOnRewriteRefOfNTHValueExpr(n, parent)
+ case *NamedWindow:
+ return c.copyOnRewriteRefOfNamedWindow(n, parent)
+ case NamedWindows:
+ return c.copyOnRewriteNamedWindows(n, parent)
+ case *Nextval:
+ return c.copyOnRewriteRefOfNextval(n, parent)
+ case *NotExpr:
+ return c.copyOnRewriteRefOfNotExpr(n, parent)
+ case *NtileExpr:
+ return c.copyOnRewriteRefOfNtileExpr(n, parent)
+ case *NullTreatmentClause:
+ return c.copyOnRewriteRefOfNullTreatmentClause(n, parent)
+ case *NullVal:
+ return c.copyOnRewriteRefOfNullVal(n, parent)
+ case *Offset:
+ return c.copyOnRewriteRefOfOffset(n, parent)
+ case OnDup:
+ return c.copyOnRewriteOnDup(n, parent)
+ case *OptLike:
+ return c.copyOnRewriteRefOfOptLike(n, parent)
+ case *OrExpr:
+ return c.copyOnRewriteRefOfOrExpr(n, parent)
+ case *Order:
+ return c.copyOnRewriteRefOfOrder(n, parent)
+ case OrderBy:
+ return c.copyOnRewriteOrderBy(n, parent)
+ case *OrderByOption:
+ return c.copyOnRewriteRefOfOrderByOption(n, parent)
+ case *OtherAdmin:
+ return c.copyOnRewriteRefOfOtherAdmin(n, parent)
+ case *OtherRead:
+ return c.copyOnRewriteRefOfOtherRead(n, parent)
+ case *OverClause:
+ return c.copyOnRewriteRefOfOverClause(n, parent)
+ case *ParenTableExpr:
+ return c.copyOnRewriteRefOfParenTableExpr(n, parent)
+ case *ParsedComments:
+ return c.copyOnRewriteRefOfParsedComments(n, parent)
+ case *PartitionDefinition:
+ return c.copyOnRewriteRefOfPartitionDefinition(n, parent)
+ case *PartitionDefinitionOptions:
+ return c.copyOnRewriteRefOfPartitionDefinitionOptions(n, parent)
+ case *PartitionEngine:
+ return c.copyOnRewriteRefOfPartitionEngine(n, parent)
+ case *PartitionOption:
+ return c.copyOnRewriteRefOfPartitionOption(n, parent)
+ case *PartitionSpec:
+ return c.copyOnRewriteRefOfPartitionSpec(n, parent)
+ case *PartitionValueRange:
+ return c.copyOnRewriteRefOfPartitionValueRange(n, parent)
+ case Partitions:
+ return c.copyOnRewritePartitions(n, parent)
+ case *PerformanceSchemaFuncExpr:
+ return c.copyOnRewriteRefOfPerformanceSchemaFuncExpr(n, parent)
+ case *PrepareStmt:
+ return c.copyOnRewriteRefOfPrepareStmt(n, parent)
+ case ReferenceAction:
+ return c.copyOnRewriteReferenceAction(n, parent)
+ case *ReferenceDefinition:
+ return c.copyOnRewriteRefOfReferenceDefinition(n, parent)
+ case *RegexpInstrExpr:
+ return c.copyOnRewriteRefOfRegexpInstrExpr(n, parent)
+ case *RegexpLikeExpr:
+ return c.copyOnRewriteRefOfRegexpLikeExpr(n, parent)
+ case *RegexpReplaceExpr:
+ return c.copyOnRewriteRefOfRegexpReplaceExpr(n, parent)
+ case *RegexpSubstrExpr:
+ return c.copyOnRewriteRefOfRegexpSubstrExpr(n, parent)
+ case *Release:
+ return c.copyOnRewriteRefOfRelease(n, parent)
+ case *RenameColumn:
+ return c.copyOnRewriteRefOfRenameColumn(n, parent)
+ case *RenameIndex:
+ return c.copyOnRewriteRefOfRenameIndex(n, parent)
+ case *RenameTable:
+ return c.copyOnRewriteRefOfRenameTable(n, parent)
+ case *RenameTableName:
+ return c.copyOnRewriteRefOfRenameTableName(n, parent)
+ case *RevertMigration:
+ return c.copyOnRewriteRefOfRevertMigration(n, parent)
+ case *Rollback:
+ return c.copyOnRewriteRefOfRollback(n, parent)
+ case RootNode:
+ return c.copyOnRewriteRootNode(n, parent)
+ case *SRollback:
+ return c.copyOnRewriteRefOfSRollback(n, parent)
+ case *Savepoint:
+ return c.copyOnRewriteRefOfSavepoint(n, parent)
+ case *Select:
+ return c.copyOnRewriteRefOfSelect(n, parent)
+ case SelectExprs:
+ return c.copyOnRewriteSelectExprs(n, parent)
+ case *SelectInto:
+ return c.copyOnRewriteRefOfSelectInto(n, parent)
+ case *Set:
+ return c.copyOnRewriteRefOfSet(n, parent)
+ case *SetExpr:
+ return c.copyOnRewriteRefOfSetExpr(n, parent)
+ case SetExprs:
+ return c.copyOnRewriteSetExprs(n, parent)
+ case *Show:
+ return c.copyOnRewriteRefOfShow(n, parent)
+ case *ShowBasic:
+ return c.copyOnRewriteRefOfShowBasic(n, parent)
+ case *ShowCreate:
+ return c.copyOnRewriteRefOfShowCreate(n, parent)
+ case *ShowFilter:
+ return c.copyOnRewriteRefOfShowFilter(n, parent)
+ case *ShowMigrationLogs:
+ return c.copyOnRewriteRefOfShowMigrationLogs(n, parent)
+ case *ShowOther:
+ return c.copyOnRewriteRefOfShowOther(n, parent)
+ case *ShowThrottledApps:
+ return c.copyOnRewriteRefOfShowThrottledApps(n, parent)
+ case *ShowThrottlerStatus:
+ return c.copyOnRewriteRefOfShowThrottlerStatus(n, parent)
+ case *StarExpr:
+ return c.copyOnRewriteRefOfStarExpr(n, parent)
+ case *Std:
+ return c.copyOnRewriteRefOfStd(n, parent)
+ case *StdDev:
+ return c.copyOnRewriteRefOfStdDev(n, parent)
+ case *StdPop:
+ return c.copyOnRewriteRefOfStdPop(n, parent)
+ case *StdSamp:
+ return c.copyOnRewriteRefOfStdSamp(n, parent)
+ case *Stream:
+ return c.copyOnRewriteRefOfStream(n, parent)
+ case *SubPartition:
+ return c.copyOnRewriteRefOfSubPartition(n, parent)
+ case *SubPartitionDefinition:
+ return c.copyOnRewriteRefOfSubPartitionDefinition(n, parent)
+ case *SubPartitionDefinitionOptions:
+ return c.copyOnRewriteRefOfSubPartitionDefinitionOptions(n, parent)
+ case SubPartitionDefinitions:
+ return c.copyOnRewriteSubPartitionDefinitions(n, parent)
+ case *Subquery:
+ return c.copyOnRewriteRefOfSubquery(n, parent)
+ case *SubstrExpr:
+ return c.copyOnRewriteRefOfSubstrExpr(n, parent)
+ case *Sum:
+ return c.copyOnRewriteRefOfSum(n, parent)
+ case TableExprs:
+ return c.copyOnRewriteTableExprs(n, parent)
+ case TableName:
+ return c.copyOnRewriteTableName(n, parent)
+ case TableNames:
+ return c.copyOnRewriteTableNames(n, parent)
+ case TableOptions:
+ return c.copyOnRewriteTableOptions(n, parent)
+ case *TableSpec:
+ return c.copyOnRewriteRefOfTableSpec(n, parent)
+ case *TablespaceOperation:
+ return c.copyOnRewriteRefOfTablespaceOperation(n, parent)
+ case *TimestampFuncExpr:
+ return c.copyOnRewriteRefOfTimestampFuncExpr(n, parent)
+ case *TrimFuncExpr:
+ return c.copyOnRewriteRefOfTrimFuncExpr(n, parent)
+ case *TruncateTable:
+ return c.copyOnRewriteRefOfTruncateTable(n, parent)
+ case *UnaryExpr:
+ return c.copyOnRewriteRefOfUnaryExpr(n, parent)
+ case *Union:
+ return c.copyOnRewriteRefOfUnion(n, parent)
+ case *UnlockTables:
+ return c.copyOnRewriteRefOfUnlockTables(n, parent)
+ case *Update:
+ return c.copyOnRewriteRefOfUpdate(n, parent)
+ case *UpdateExpr:
+ return c.copyOnRewriteRefOfUpdateExpr(n, parent)
+ case UpdateExprs:
+ return c.copyOnRewriteUpdateExprs(n, parent)
+ case *UpdateXMLExpr:
+ return c.copyOnRewriteRefOfUpdateXMLExpr(n, parent)
+ case *Use:
+ return c.copyOnRewriteRefOfUse(n, parent)
+ case *VExplainStmt:
+ return c.copyOnRewriteRefOfVExplainStmt(n, parent)
+ case *VStream:
+ return c.copyOnRewriteRefOfVStream(n, parent)
+ case ValTuple:
+ return c.copyOnRewriteValTuple(n, parent)
+ case *Validation:
+ return c.copyOnRewriteRefOfValidation(n, parent)
+ case Values:
+ return c.copyOnRewriteValues(n, parent)
+ case *ValuesFuncExpr:
+ return c.copyOnRewriteRefOfValuesFuncExpr(n, parent)
+ case *VarPop:
+ return c.copyOnRewriteRefOfVarPop(n, parent)
+ case *VarSamp:
+ return c.copyOnRewriteRefOfVarSamp(n, parent)
+ case *Variable:
+ return c.copyOnRewriteRefOfVariable(n, parent)
+ case *Variance:
+ return c.copyOnRewriteRefOfVariance(n, parent)
+ case VindexParam:
+ return c.copyOnRewriteVindexParam(n, parent)
+ case *VindexSpec:
+ return c.copyOnRewriteRefOfVindexSpec(n, parent)
+ case *WeightStringFuncExpr:
+ return c.copyOnRewriteRefOfWeightStringFuncExpr(n, parent)
+ case *When:
+ return c.copyOnRewriteRefOfWhen(n, parent)
+ case *Where:
+ return c.copyOnRewriteRefOfWhere(n, parent)
+ case *WindowDefinition:
+ return c.copyOnRewriteRefOfWindowDefinition(n, parent)
+ case WindowDefinitions:
+ return c.copyOnRewriteWindowDefinitions(n, parent)
+ case *WindowSpecification:
+ return c.copyOnRewriteRefOfWindowSpecification(n, parent)
+ case *With:
+ return c.copyOnRewriteRefOfWith(n, parent)
+ case *XorExpr:
+ return c.copyOnRewriteRefOfXorExpr(n, parent)
+ default:
+ // this should never happen
+ return nil, false
+ }
+}
+func (c *cow) copyOnRewriteRefOfAddColumns(n *AddColumns, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ var changedColumns bool
+ _Columns := make([]*ColumnDefinition, len(n.Columns))
+ for x, el := range n.Columns {
+ this, changed := c.copyOnRewriteRefOfColumnDefinition(el, n)
+ _Columns[x] = this.(*ColumnDefinition)
+ if changed {
+ changedColumns = true
+ }
+ }
+ _After, changedAfter := c.copyOnRewriteRefOfColName(n.After, n)
+ if changedColumns || changedAfter {
+ res := *n
+ res.Columns = _Columns
+ res.After, _ = _After.(*ColName)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfAddConstraintDefinition(n *AddConstraintDefinition, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _ConstraintDefinition, changedConstraintDefinition := c.copyOnRewriteRefOfConstraintDefinition(n.ConstraintDefinition, n)
+ if changedConstraintDefinition {
+ res := *n
+ res.ConstraintDefinition, _ = _ConstraintDefinition.(*ConstraintDefinition)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfAddIndexDefinition(n *AddIndexDefinition, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _IndexDefinition, changedIndexDefinition := c.copyOnRewriteRefOfIndexDefinition(n.IndexDefinition, n)
+ if changedIndexDefinition {
+ res := *n
+ res.IndexDefinition, _ = _IndexDefinition.(*IndexDefinition)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfAliasedExpr(n *AliasedExpr, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Expr, changedExpr := c.copyOnRewriteExpr(n.Expr, n)
+ _As, changedAs := c.copyOnRewriteIdentifierCI(n.As, n)
+ if changedExpr || changedAs {
+ res := *n
+ res.Expr, _ = _Expr.(Expr)
+ res.As, _ = _As.(IdentifierCI)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfAliasedTableExpr(n *AliasedTableExpr, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Expr, changedExpr := c.copyOnRewriteSimpleTableExpr(n.Expr, n)
+ _Partitions, changedPartitions := c.copyOnRewritePartitions(n.Partitions, n)
+ _As, changedAs := c.copyOnRewriteIdentifierCS(n.As, n)
+ _Hints, changedHints := c.copyOnRewriteIndexHints(n.Hints, n)
+ _Columns, changedColumns := c.copyOnRewriteColumns(n.Columns, n)
+ if changedExpr || changedPartitions || changedAs || changedHints || changedColumns {
+ res := *n
+ res.Expr, _ = _Expr.(SimpleTableExpr)
+ res.Partitions, _ = _Partitions.(Partitions)
+ res.As, _ = _As.(IdentifierCS)
+ res.Hints, _ = _Hints.(IndexHints)
+ res.Columns, _ = _Columns.(Columns)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfAlterCharset(n *AlterCharset, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfAlterCheck(n *AlterCheck, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Name, changedName := c.copyOnRewriteIdentifierCI(n.Name, n)
+ if changedName {
+ res := *n
+ res.Name, _ = _Name.(IdentifierCI)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfAlterColumn(n *AlterColumn, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Column, changedColumn := c.copyOnRewriteRefOfColName(n.Column, n)
+ _DefaultVal, changedDefaultVal := c.copyOnRewriteExpr(n.DefaultVal, n)
+ if changedColumn || changedDefaultVal {
+ res := *n
+ res.Column, _ = _Column.(*ColName)
+ res.DefaultVal, _ = _DefaultVal.(Expr)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfAlterDatabase(n *AlterDatabase, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _DBName, changedDBName := c.copyOnRewriteIdentifierCS(n.DBName, n)
+ if changedDBName {
+ res := *n
+ res.DBName, _ = _DBName.(IdentifierCS)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfAlterIndex(n *AlterIndex, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Name, changedName := c.copyOnRewriteIdentifierCI(n.Name, n)
+ if changedName {
+ res := *n
+ res.Name, _ = _Name.(IdentifierCI)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfAlterMigration(n *AlterMigration, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Ratio, changedRatio := c.copyOnRewriteRefOfLiteral(n.Ratio, n)
+ if changedRatio {
+ res := *n
+ res.Ratio, _ = _Ratio.(*Literal)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfAlterTable(n *AlterTable, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Table, changedTable := c.copyOnRewriteTableName(n.Table, n)
+ var changedAlterOptions bool
+ _AlterOptions := make([]AlterOption, len(n.AlterOptions))
+ for x, el := range n.AlterOptions {
+ this, changed := c.copyOnRewriteAlterOption(el, n)
+ _AlterOptions[x] = this.(AlterOption)
+ if changed {
+ changedAlterOptions = true
+ }
+ }
+ _PartitionSpec, changedPartitionSpec := c.copyOnRewriteRefOfPartitionSpec(n.PartitionSpec, n)
+ _PartitionOption, changedPartitionOption := c.copyOnRewriteRefOfPartitionOption(n.PartitionOption, n)
+ _Comments, changedComments := c.copyOnRewriteRefOfParsedComments(n.Comments, n)
+ if changedTable || changedAlterOptions || changedPartitionSpec || changedPartitionOption || changedComments {
+ res := *n
+ res.Table, _ = _Table.(TableName)
+ res.AlterOptions = _AlterOptions
+ res.PartitionSpec, _ = _PartitionSpec.(*PartitionSpec)
+ res.PartitionOption, _ = _PartitionOption.(*PartitionOption)
+ res.Comments, _ = _Comments.(*ParsedComments)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfAlterView(n *AlterView, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _ViewName, changedViewName := c.copyOnRewriteTableName(n.ViewName, n)
+ _Definer, changedDefiner := c.copyOnRewriteRefOfDefiner(n.Definer, n)
+ _Columns, changedColumns := c.copyOnRewriteColumns(n.Columns, n)
+ _Select, changedSelect := c.copyOnRewriteSelectStatement(n.Select, n)
+ _Comments, changedComments := c.copyOnRewriteRefOfParsedComments(n.Comments, n)
+ if changedViewName || changedDefiner || changedColumns || changedSelect || changedComments {
+ res := *n
+ res.ViewName, _ = _ViewName.(TableName)
+ res.Definer, _ = _Definer.(*Definer)
+ res.Columns, _ = _Columns.(Columns)
+ res.Select, _ = _Select.(SelectStatement)
+ res.Comments, _ = _Comments.(*ParsedComments)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfAlterVschema(n *AlterVschema, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Table, changedTable := c.copyOnRewriteTableName(n.Table, n)
+ _VindexSpec, changedVindexSpec := c.copyOnRewriteRefOfVindexSpec(n.VindexSpec, n)
+ var changedVindexCols bool
+ _VindexCols := make([]IdentifierCI, len(n.VindexCols))
+ for x, el := range n.VindexCols {
+ this, changed := c.copyOnRewriteIdentifierCI(el, n)
+ _VindexCols[x] = this.(IdentifierCI)
+ if changed {
+ changedVindexCols = true
+ }
+ }
+ _AutoIncSpec, changedAutoIncSpec := c.copyOnRewriteRefOfAutoIncSpec(n.AutoIncSpec, n)
+ if changedTable || changedVindexSpec || changedVindexCols || changedAutoIncSpec {
+ res := *n
+ res.Table, _ = _Table.(TableName)
+ res.VindexSpec, _ = _VindexSpec.(*VindexSpec)
+ res.VindexCols = _VindexCols
+ res.AutoIncSpec, _ = _AutoIncSpec.(*AutoIncSpec)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfAndExpr(n *AndExpr, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Left, changedLeft := c.copyOnRewriteExpr(n.Left, n)
+ _Right, changedRight := c.copyOnRewriteExpr(n.Right, n)
+ if changedLeft || changedRight {
+ res := *n
+ res.Left, _ = _Left.(Expr)
+ res.Right, _ = _Right.(Expr)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfArgumentLessWindowExpr(n *ArgumentLessWindowExpr, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _OverClause, changedOverClause := c.copyOnRewriteRefOfOverClause(n.OverClause, n)
+ if changedOverClause {
+ res := *n
+ res.OverClause, _ = _OverClause.(*OverClause)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfAutoIncSpec(n *AutoIncSpec, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Column, changedColumn := c.copyOnRewriteIdentifierCI(n.Column, n)
+ _Sequence, changedSequence := c.copyOnRewriteTableName(n.Sequence, n)
+ if changedColumn || changedSequence {
+ res := *n
+ res.Column, _ = _Column.(IdentifierCI)
+ res.Sequence, _ = _Sequence.(TableName)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfAvg(n *Avg, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Arg, changedArg := c.copyOnRewriteExpr(n.Arg, n)
+ if changedArg {
+ res := *n
+ res.Arg, _ = _Arg.(Expr)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfBegin(n *Begin, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfBetweenExpr(n *BetweenExpr, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Left, changedLeft := c.copyOnRewriteExpr(n.Left, n)
+ _From, changedFrom := c.copyOnRewriteExpr(n.From, n)
+ _To, changedTo := c.copyOnRewriteExpr(n.To, n)
+ if changedLeft || changedFrom || changedTo {
+ res := *n
+ res.Left, _ = _Left.(Expr)
+ res.From, _ = _From.(Expr)
+ res.To, _ = _To.(Expr)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfBinaryExpr(n *BinaryExpr, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Left, changedLeft := c.copyOnRewriteExpr(n.Left, n)
+ _Right, changedRight := c.copyOnRewriteExpr(n.Right, n)
+ if changedLeft || changedRight {
+ res := *n
+ res.Left, _ = _Left.(Expr)
+ res.Right, _ = _Right.(Expr)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfBitAnd(n *BitAnd, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Arg, changedArg := c.copyOnRewriteExpr(n.Arg, n)
+ if changedArg {
+ res := *n
+ res.Arg, _ = _Arg.(Expr)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfBitOr(n *BitOr, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Arg, changedArg := c.copyOnRewriteExpr(n.Arg, n)
+ if changedArg {
+ res := *n
+ res.Arg, _ = _Arg.(Expr)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfBitXor(n *BitXor, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Arg, changedArg := c.copyOnRewriteExpr(n.Arg, n)
+ if changedArg {
+ res := *n
+ res.Arg, _ = _Arg.(Expr)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfCallProc(n *CallProc, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Name, changedName := c.copyOnRewriteTableName(n.Name, n)
+ _Params, changedParams := c.copyOnRewriteExprs(n.Params, n)
+ if changedName || changedParams {
+ res := *n
+ res.Name, _ = _Name.(TableName)
+ res.Params, _ = _Params.(Exprs)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfCaseExpr(n *CaseExpr, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Expr, changedExpr := c.copyOnRewriteExpr(n.Expr, n)
+ var changedWhens bool
+ _Whens := make([]*When, len(n.Whens))
+ for x, el := range n.Whens {
+ this, changed := c.copyOnRewriteRefOfWhen(el, n)
+ _Whens[x] = this.(*When)
+ if changed {
+ changedWhens = true
+ }
+ }
+ _Else, changedElse := c.copyOnRewriteExpr(n.Else, n)
+ if changedExpr || changedWhens || changedElse {
+ res := *n
+ res.Expr, _ = _Expr.(Expr)
+ res.Whens = _Whens
+ res.Else, _ = _Else.(Expr)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfCastExpr(n *CastExpr, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Expr, changedExpr := c.copyOnRewriteExpr(n.Expr, n)
+ _Type, changedType := c.copyOnRewriteRefOfConvertType(n.Type, n)
+ if changedExpr || changedType {
+ res := *n
+ res.Expr, _ = _Expr.(Expr)
+ res.Type, _ = _Type.(*ConvertType)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfChangeColumn(n *ChangeColumn, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _OldColumn, changedOldColumn := c.copyOnRewriteRefOfColName(n.OldColumn, n)
+ _NewColDefinition, changedNewColDefinition := c.copyOnRewriteRefOfColumnDefinition(n.NewColDefinition, n)
+ _After, changedAfter := c.copyOnRewriteRefOfColName(n.After, n)
+ if changedOldColumn || changedNewColDefinition || changedAfter {
+ res := *n
+ res.OldColumn, _ = _OldColumn.(*ColName)
+ res.NewColDefinition, _ = _NewColDefinition.(*ColumnDefinition)
+ res.After, _ = _After.(*ColName)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfCharExpr(n *CharExpr, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Exprs, changedExprs := c.copyOnRewriteExprs(n.Exprs, n)
+ if changedExprs {
+ res := *n
+ res.Exprs, _ = _Exprs.(Exprs)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfCheckConstraintDefinition(n *CheckConstraintDefinition, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Expr, changedExpr := c.copyOnRewriteExpr(n.Expr, n)
+ if changedExpr {
+ res := *n
+ res.Expr, _ = _Expr.(Expr)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfColName(n *ColName, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Name, changedName := c.copyOnRewriteIdentifierCI(n.Name, n)
+ _Qualifier, changedQualifier := c.copyOnRewriteTableName(n.Qualifier, n)
+ if changedName || changedQualifier {
+ res := *n
+ res.Name, _ = _Name.(IdentifierCI)
+ res.Qualifier, _ = _Qualifier.(TableName)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfCollateExpr(n *CollateExpr, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Expr, changedExpr := c.copyOnRewriteExpr(n.Expr, n)
+ if changedExpr {
+ res := *n
+ res.Expr, _ = _Expr.(Expr)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfColumnDefinition(n *ColumnDefinition, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Name, changedName := c.copyOnRewriteIdentifierCI(n.Name, n)
+ _Type, changedType := c.copyOnRewriteRefOfColumnType(n.Type, n)
+ if changedName || changedType {
+ res := *n
+ res.Name, _ = _Name.(IdentifierCI)
+ res.Type, _ = _Type.(*ColumnType)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfColumnType(n *ColumnType, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Length, changedLength := c.copyOnRewriteRefOfLiteral(n.Length, n)
+ _Scale, changedScale := c.copyOnRewriteRefOfLiteral(n.Scale, n)
+ if changedLength || changedScale {
+ res := *n
+ res.Length, _ = _Length.(*Literal)
+ res.Scale, _ = _Scale.(*Literal)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteColumns(n Columns, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ res := make(Columns, len(n))
+ for x, el := range n {
+ this, change := c.copyOnRewriteIdentifierCI(el, n)
+ res[x] = this.(IdentifierCI)
+ if change {
+ changed = true
+ }
+ }
+ if changed {
+ out = res
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfCommentOnly(n *CommentOnly, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfCommit(n *Commit, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfCommonTableExpr(n *CommonTableExpr, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _ID, changedID := c.copyOnRewriteIdentifierCS(n.ID, n)
+ _Columns, changedColumns := c.copyOnRewriteColumns(n.Columns, n)
+ _Subquery, changedSubquery := c.copyOnRewriteRefOfSubquery(n.Subquery, n)
+ if changedID || changedColumns || changedSubquery {
+ res := *n
+ res.ID, _ = _ID.(IdentifierCS)
+ res.Columns, _ = _Columns.(Columns)
+ res.Subquery, _ = _Subquery.(*Subquery)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfComparisonExpr(n *ComparisonExpr, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Left, changedLeft := c.copyOnRewriteExpr(n.Left, n)
+ _Right, changedRight := c.copyOnRewriteExpr(n.Right, n)
+ _Escape, changedEscape := c.copyOnRewriteExpr(n.Escape, n)
+ if changedLeft || changedRight || changedEscape {
+ res := *n
+ res.Left, _ = _Left.(Expr)
+ res.Right, _ = _Right.(Expr)
+ res.Escape, _ = _Escape.(Expr)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfConstraintDefinition(n *ConstraintDefinition, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Name, changedName := c.copyOnRewriteIdentifierCI(n.Name, n)
+ _Details, changedDetails := c.copyOnRewriteConstraintInfo(n.Details, n)
+ if changedName || changedDetails {
+ res := *n
+ res.Name, _ = _Name.(IdentifierCI)
+ res.Details, _ = _Details.(ConstraintInfo)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfConvertExpr(n *ConvertExpr, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Expr, changedExpr := c.copyOnRewriteExpr(n.Expr, n)
+ _Type, changedType := c.copyOnRewriteRefOfConvertType(n.Type, n)
+ if changedExpr || changedType {
+ res := *n
+ res.Expr, _ = _Expr.(Expr)
+ res.Type, _ = _Type.(*ConvertType)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfConvertType(n *ConvertType, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Length, changedLength := c.copyOnRewriteRefOfLiteral(n.Length, n)
+ _Scale, changedScale := c.copyOnRewriteRefOfLiteral(n.Scale, n)
+ if changedLength || changedScale {
+ res := *n
+ res.Length, _ = _Length.(*Literal)
+ res.Scale, _ = _Scale.(*Literal)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfConvertUsingExpr(n *ConvertUsingExpr, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Expr, changedExpr := c.copyOnRewriteExpr(n.Expr, n)
+ if changedExpr {
+ res := *n
+ res.Expr, _ = _Expr.(Expr)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfCount(n *Count, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Args, changedArgs := c.copyOnRewriteExprs(n.Args, n)
+ if changedArgs {
+ res := *n
+ res.Args, _ = _Args.(Exprs)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfCountStar(n *CountStar, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfCreateDatabase(n *CreateDatabase, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Comments, changedComments := c.copyOnRewriteRefOfParsedComments(n.Comments, n)
+ _DBName, changedDBName := c.copyOnRewriteIdentifierCS(n.DBName, n)
+ if changedComments || changedDBName {
+ res := *n
+ res.Comments, _ = _Comments.(*ParsedComments)
+ res.DBName, _ = _DBName.(IdentifierCS)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfCreateTable(n *CreateTable, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Table, changedTable := c.copyOnRewriteTableName(n.Table, n)
+ _TableSpec, changedTableSpec := c.copyOnRewriteRefOfTableSpec(n.TableSpec, n)
+ _OptLike, changedOptLike := c.copyOnRewriteRefOfOptLike(n.OptLike, n)
+ _Comments, changedComments := c.copyOnRewriteRefOfParsedComments(n.Comments, n)
+ if changedTable || changedTableSpec || changedOptLike || changedComments {
+ res := *n
+ res.Table, _ = _Table.(TableName)
+ res.TableSpec, _ = _TableSpec.(*TableSpec)
+ res.OptLike, _ = _OptLike.(*OptLike)
+ res.Comments, _ = _Comments.(*ParsedComments)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfCreateView(n *CreateView, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _ViewName, changedViewName := c.copyOnRewriteTableName(n.ViewName, n)
+ _Definer, changedDefiner := c.copyOnRewriteRefOfDefiner(n.Definer, n)
+ _Columns, changedColumns := c.copyOnRewriteColumns(n.Columns, n)
+ _Select, changedSelect := c.copyOnRewriteSelectStatement(n.Select, n)
+ _Comments, changedComments := c.copyOnRewriteRefOfParsedComments(n.Comments, n)
+ if changedViewName || changedDefiner || changedColumns || changedSelect || changedComments {
+ res := *n
+ res.ViewName, _ = _ViewName.(TableName)
+ res.Definer, _ = _Definer.(*Definer)
+ res.Columns, _ = _Columns.(Columns)
+ res.Select, _ = _Select.(SelectStatement)
+ res.Comments, _ = _Comments.(*ParsedComments)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfCurTimeFuncExpr(n *CurTimeFuncExpr, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Name, changedName := c.copyOnRewriteIdentifierCI(n.Name, n)
+ _Fsp, changedFsp := c.copyOnRewriteExpr(n.Fsp, n)
+ if changedName || changedFsp {
+ res := *n
+ res.Name, _ = _Name.(IdentifierCI)
+ res.Fsp, _ = _Fsp.(Expr)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfDeallocateStmt(n *DeallocateStmt, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Comments, changedComments := c.copyOnRewriteRefOfParsedComments(n.Comments, n)
+ _Name, changedName := c.copyOnRewriteIdentifierCI(n.Name, n)
+ if changedComments || changedName {
+ res := *n
+ res.Comments, _ = _Comments.(*ParsedComments)
+ res.Name, _ = _Name.(IdentifierCI)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfDefault(n *Default, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfDefiner(n *Definer, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfDelete(n *Delete, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _With, changedWith := c.copyOnRewriteRefOfWith(n.With, n)
+ _Comments, changedComments := c.copyOnRewriteRefOfParsedComments(n.Comments, n)
+ _Targets, changedTargets := c.copyOnRewriteTableNames(n.Targets, n)
+ _TableExprs, changedTableExprs := c.copyOnRewriteTableExprs(n.TableExprs, n)
+ _Partitions, changedPartitions := c.copyOnRewritePartitions(n.Partitions, n)
+ _Where, changedWhere := c.copyOnRewriteRefOfWhere(n.Where, n)
+ _OrderBy, changedOrderBy := c.copyOnRewriteOrderBy(n.OrderBy, n)
+ _Limit, changedLimit := c.copyOnRewriteRefOfLimit(n.Limit, n)
+ if changedWith || changedComments || changedTargets || changedTableExprs || changedPartitions || changedWhere || changedOrderBy || changedLimit {
+ res := *n
+ res.With, _ = _With.(*With)
+ res.Comments, _ = _Comments.(*ParsedComments)
+ res.Targets, _ = _Targets.(TableNames)
+ res.TableExprs, _ = _TableExprs.(TableExprs)
+ res.Partitions, _ = _Partitions.(Partitions)
+ res.Where, _ = _Where.(*Where)
+ res.OrderBy, _ = _OrderBy.(OrderBy)
+ res.Limit, _ = _Limit.(*Limit)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfDerivedTable(n *DerivedTable, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Select, changedSelect := c.copyOnRewriteSelectStatement(n.Select, n)
+ if changedSelect {
+ res := *n
+ res.Select, _ = _Select.(SelectStatement)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfDropColumn(n *DropColumn, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Name, changedName := c.copyOnRewriteRefOfColName(n.Name, n)
+ if changedName {
+ res := *n
+ res.Name, _ = _Name.(*ColName)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfDropDatabase(n *DropDatabase, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Comments, changedComments := c.copyOnRewriteRefOfParsedComments(n.Comments, n)
+ _DBName, changedDBName := c.copyOnRewriteIdentifierCS(n.DBName, n)
+ if changedComments || changedDBName {
+ res := *n
+ res.Comments, _ = _Comments.(*ParsedComments)
+ res.DBName, _ = _DBName.(IdentifierCS)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfDropKey(n *DropKey, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Name, changedName := c.copyOnRewriteIdentifierCI(n.Name, n)
+ if changedName {
+ res := *n
+ res.Name, _ = _Name.(IdentifierCI)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfDropTable(n *DropTable, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _FromTables, changedFromTables := c.copyOnRewriteTableNames(n.FromTables, n)
+ _Comments, changedComments := c.copyOnRewriteRefOfParsedComments(n.Comments, n)
+ if changedFromTables || changedComments {
+ res := *n
+ res.FromTables, _ = _FromTables.(TableNames)
+ res.Comments, _ = _Comments.(*ParsedComments)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfDropView(n *DropView, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _FromTables, changedFromTables := c.copyOnRewriteTableNames(n.FromTables, n)
+ _Comments, changedComments := c.copyOnRewriteRefOfParsedComments(n.Comments, n)
+ if changedFromTables || changedComments {
+ res := *n
+ res.FromTables, _ = _FromTables.(TableNames)
+ res.Comments, _ = _Comments.(*ParsedComments)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfExecuteStmt(n *ExecuteStmt, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Name, changedName := c.copyOnRewriteIdentifierCI(n.Name, n)
+ _Comments, changedComments := c.copyOnRewriteRefOfParsedComments(n.Comments, n)
+ var changedArguments bool
+ _Arguments := make([]*Variable, len(n.Arguments))
+ for x, el := range n.Arguments {
+ this, changed := c.copyOnRewriteRefOfVariable(el, n)
+ _Arguments[x] = this.(*Variable)
+ if changed {
+ changedArguments = true
+ }
+ }
+ if changedName || changedComments || changedArguments {
+ res := *n
+ res.Name, _ = _Name.(IdentifierCI)
+ res.Comments, _ = _Comments.(*ParsedComments)
+ res.Arguments = _Arguments
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfExistsExpr(n *ExistsExpr, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Subquery, changedSubquery := c.copyOnRewriteRefOfSubquery(n.Subquery, n)
+ if changedSubquery {
+ res := *n
+ res.Subquery, _ = _Subquery.(*Subquery)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfExplainStmt(n *ExplainStmt, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Statement, changedStatement := c.copyOnRewriteStatement(n.Statement, n)
+ _Comments, changedComments := c.copyOnRewriteRefOfParsedComments(n.Comments, n)
+ if changedStatement || changedComments {
+ res := *n
+ res.Statement, _ = _Statement.(Statement)
+ res.Comments, _ = _Comments.(*ParsedComments)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfExplainTab(n *ExplainTab, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Table, changedTable := c.copyOnRewriteTableName(n.Table, n)
+ if changedTable {
+ res := *n
+ res.Table, _ = _Table.(TableName)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteExprs(n Exprs, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ res := make(Exprs, len(n))
+ for x, el := range n {
+ this, change := c.copyOnRewriteExpr(el, n)
+ res[x] = this.(Expr)
+ if change {
+ changed = true
+ }
+ }
+ if changed {
+ out = res
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfExtractFuncExpr(n *ExtractFuncExpr, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Expr, changedExpr := c.copyOnRewriteExpr(n.Expr, n)
+ if changedExpr {
+ res := *n
+ res.Expr, _ = _Expr.(Expr)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfExtractValueExpr(n *ExtractValueExpr, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Fragment, changedFragment := c.copyOnRewriteExpr(n.Fragment, n)
+ _XPathExpr, changedXPathExpr := c.copyOnRewriteExpr(n.XPathExpr, n)
+ if changedFragment || changedXPathExpr {
+ res := *n
+ res.Fragment, _ = _Fragment.(Expr)
+ res.XPathExpr, _ = _XPathExpr.(Expr)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfExtractedSubquery(n *ExtractedSubquery, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Original, changedOriginal := c.copyOnRewriteExpr(n.Original, n)
+ _Subquery, changedSubquery := c.copyOnRewriteRefOfSubquery(n.Subquery, n)
+ _OtherSide, changedOtherSide := c.copyOnRewriteExpr(n.OtherSide, n)
+ _alternative, changedalternative := c.copyOnRewriteExpr(n.alternative, n)
+ if changedOriginal || changedSubquery || changedOtherSide || changedalternative {
+ res := *n
+ res.Original, _ = _Original.(Expr)
+ res.Subquery, _ = _Subquery.(*Subquery)
+ res.OtherSide, _ = _OtherSide.(Expr)
+ res.alternative, _ = _alternative.(Expr)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfFirstOrLastValueExpr(n *FirstOrLastValueExpr, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Expr, changedExpr := c.copyOnRewriteExpr(n.Expr, n)
+ _NullTreatmentClause, changedNullTreatmentClause := c.copyOnRewriteRefOfNullTreatmentClause(n.NullTreatmentClause, n)
+ _OverClause, changedOverClause := c.copyOnRewriteRefOfOverClause(n.OverClause, n)
+ if changedExpr || changedNullTreatmentClause || changedOverClause {
+ res := *n
+ res.Expr, _ = _Expr.(Expr)
+ res.NullTreatmentClause, _ = _NullTreatmentClause.(*NullTreatmentClause)
+ res.OverClause, _ = _OverClause.(*OverClause)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfFlush(n *Flush, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _TableNames, changedTableNames := c.copyOnRewriteTableNames(n.TableNames, n)
+ if changedTableNames {
+ res := *n
+ res.TableNames, _ = _TableNames.(TableNames)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfForce(n *Force, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfForeignKeyDefinition(n *ForeignKeyDefinition, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Source, changedSource := c.copyOnRewriteColumns(n.Source, n)
+ _IndexName, changedIndexName := c.copyOnRewriteIdentifierCI(n.IndexName, n)
+ _ReferenceDefinition, changedReferenceDefinition := c.copyOnRewriteRefOfReferenceDefinition(n.ReferenceDefinition, n)
+ if changedSource || changedIndexName || changedReferenceDefinition {
+ res := *n
+ res.Source, _ = _Source.(Columns)
+ res.IndexName, _ = _IndexName.(IdentifierCI)
+ res.ReferenceDefinition, _ = _ReferenceDefinition.(*ReferenceDefinition)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfFrameClause(n *FrameClause, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Start, changedStart := c.copyOnRewriteRefOfFramePoint(n.Start, n)
+ _End, changedEnd := c.copyOnRewriteRefOfFramePoint(n.End, n)
+ if changedStart || changedEnd {
+ res := *n
+ res.Start, _ = _Start.(*FramePoint)
+ res.End, _ = _End.(*FramePoint)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfFramePoint(n *FramePoint, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Expr, changedExpr := c.copyOnRewriteExpr(n.Expr, n)
+ if changedExpr {
+ res := *n
+ res.Expr, _ = _Expr.(Expr)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfFromFirstLastClause(n *FromFirstLastClause, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfFuncExpr(n *FuncExpr, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Qualifier, changedQualifier := c.copyOnRewriteIdentifierCS(n.Qualifier, n)
+ _Name, changedName := c.copyOnRewriteIdentifierCI(n.Name, n)
+ _Exprs, changedExprs := c.copyOnRewriteSelectExprs(n.Exprs, n)
+ if changedQualifier || changedName || changedExprs {
+ res := *n
+ res.Qualifier, _ = _Qualifier.(IdentifierCS)
+ res.Name, _ = _Name.(IdentifierCI)
+ res.Exprs, _ = _Exprs.(SelectExprs)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfGTIDFuncExpr(n *GTIDFuncExpr, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Set1, changedSet1 := c.copyOnRewriteExpr(n.Set1, n)
+ _Set2, changedSet2 := c.copyOnRewriteExpr(n.Set2, n)
+ _Timeout, changedTimeout := c.copyOnRewriteExpr(n.Timeout, n)
+ _Channel, changedChannel := c.copyOnRewriteExpr(n.Channel, n)
+ if changedSet1 || changedSet2 || changedTimeout || changedChannel {
+ res := *n
+ res.Set1, _ = _Set1.(Expr)
+ res.Set2, _ = _Set2.(Expr)
+ res.Timeout, _ = _Timeout.(Expr)
+ res.Channel, _ = _Channel.(Expr)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteGroupBy(n GroupBy, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ res := make(GroupBy, len(n))
+ for x, el := range n {
+ this, change := c.copyOnRewriteExpr(el, n)
+ res[x] = this.(Expr)
+ if change {
+ changed = true
+ }
+ }
+ if changed {
+ out = res
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfGroupConcatExpr(n *GroupConcatExpr, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Exprs, changedExprs := c.copyOnRewriteExprs(n.Exprs, n)
+ _OrderBy, changedOrderBy := c.copyOnRewriteOrderBy(n.OrderBy, n)
+ _Limit, changedLimit := c.copyOnRewriteRefOfLimit(n.Limit, n)
+ if changedExprs || changedOrderBy || changedLimit {
+ res := *n
+ res.Exprs, _ = _Exprs.(Exprs)
+ res.OrderBy, _ = _OrderBy.(OrderBy)
+ res.Limit, _ = _Limit.(*Limit)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteIdentifierCI(n IdentifierCI, parent SQLNode) (out SQLNode, changed bool) {
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteIdentifierCS(n IdentifierCS, parent SQLNode) (out SQLNode, changed bool) {
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfIndexDefinition(n *IndexDefinition, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Info, changedInfo := c.copyOnRewriteRefOfIndexInfo(n.Info, n)
+ if changedInfo {
+ res := *n
+ res.Info, _ = _Info.(*IndexInfo)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfIndexHint(n *IndexHint, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ var changedIndexes bool
+ _Indexes := make([]IdentifierCI, len(n.Indexes))
+ for x, el := range n.Indexes {
+ this, changed := c.copyOnRewriteIdentifierCI(el, n)
+ _Indexes[x] = this.(IdentifierCI)
+ if changed {
+ changedIndexes = true
+ }
+ }
+ if changedIndexes {
+ res := *n
+ res.Indexes = _Indexes
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteIndexHints(n IndexHints, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ res := make(IndexHints, len(n))
+ for x, el := range n {
+ this, change := c.copyOnRewriteRefOfIndexHint(el, n)
+ res[x] = this.(*IndexHint)
+ if change {
+ changed = true
+ }
+ }
+ if changed {
+ out = res
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfIndexInfo(n *IndexInfo, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Name, changedName := c.copyOnRewriteIdentifierCI(n.Name, n)
+ _ConstraintName, changedConstraintName := c.copyOnRewriteIdentifierCI(n.ConstraintName, n)
+ if changedName || changedConstraintName {
+ res := *n
+ res.Name, _ = _Name.(IdentifierCI)
+ res.ConstraintName, _ = _ConstraintName.(IdentifierCI)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfInsert(n *Insert, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Comments, changedComments := c.copyOnRewriteRefOfParsedComments(n.Comments, n)
+ _Table, changedTable := c.copyOnRewriteTableName(n.Table, n)
+ _Partitions, changedPartitions := c.copyOnRewritePartitions(n.Partitions, n)
+ _Columns, changedColumns := c.copyOnRewriteColumns(n.Columns, n)
+ _Rows, changedRows := c.copyOnRewriteInsertRows(n.Rows, n)
+ _OnDup, changedOnDup := c.copyOnRewriteOnDup(n.OnDup, n)
+ if changedComments || changedTable || changedPartitions || changedColumns || changedRows || changedOnDup {
+ res := *n
+ res.Comments, _ = _Comments.(*ParsedComments)
+ res.Table, _ = _Table.(TableName)
+ res.Partitions, _ = _Partitions.(Partitions)
+ res.Columns, _ = _Columns.(Columns)
+ res.Rows, _ = _Rows.(InsertRows)
+ res.OnDup, _ = _OnDup.(OnDup)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfInsertExpr(n *InsertExpr, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Str, changedStr := c.copyOnRewriteExpr(n.Str, n)
+ _Pos, changedPos := c.copyOnRewriteExpr(n.Pos, n)
+ _Len, changedLen := c.copyOnRewriteExpr(n.Len, n)
+ _NewStr, changedNewStr := c.copyOnRewriteExpr(n.NewStr, n)
+ if changedStr || changedPos || changedLen || changedNewStr {
+ res := *n
+ res.Str, _ = _Str.(Expr)
+ res.Pos, _ = _Pos.(Expr)
+ res.Len, _ = _Len.(Expr)
+ res.NewStr, _ = _NewStr.(Expr)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfIntervalExpr(n *IntervalExpr, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Expr, changedExpr := c.copyOnRewriteExpr(n.Expr, n)
+ if changedExpr {
+ res := *n
+ res.Expr, _ = _Expr.(Expr)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfIntervalFuncExpr(n *IntervalFuncExpr, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Expr, changedExpr := c.copyOnRewriteExpr(n.Expr, n)
+ _Exprs, changedExprs := c.copyOnRewriteExprs(n.Exprs, n)
+ if changedExpr || changedExprs {
+ res := *n
+ res.Expr, _ = _Expr.(Expr)
+ res.Exprs, _ = _Exprs.(Exprs)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfIntroducerExpr(n *IntroducerExpr, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Expr, changedExpr := c.copyOnRewriteExpr(n.Expr, n)
+ if changedExpr {
+ res := *n
+ res.Expr, _ = _Expr.(Expr)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfIsExpr(n *IsExpr, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Left, changedLeft := c.copyOnRewriteExpr(n.Left, n)
+ if changedLeft {
+ res := *n
+ res.Left, _ = _Left.(Expr)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfJSONArrayExpr(n *JSONArrayExpr, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Params, changedParams := c.copyOnRewriteExprs(n.Params, n)
+ if changedParams {
+ res := *n
+ res.Params, _ = _Params.(Exprs)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfJSONAttributesExpr(n *JSONAttributesExpr, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _JSONDoc, changedJSONDoc := c.copyOnRewriteExpr(n.JSONDoc, n)
+ _Path, changedPath := c.copyOnRewriteExpr(n.Path, n)
+ if changedJSONDoc || changedPath {
+ res := *n
+ res.JSONDoc, _ = _JSONDoc.(Expr)
+ res.Path, _ = _Path.(Expr)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfJSONContainsExpr(n *JSONContainsExpr, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Target, changedTarget := c.copyOnRewriteExpr(n.Target, n)
+ _Candidate, changedCandidate := c.copyOnRewriteExpr(n.Candidate, n)
+ var changedPathList bool
+ _PathList := make([]Expr, len(n.PathList))
+ for x, el := range n.PathList {
+ this, changed := c.copyOnRewriteExpr(el, n)
+ _PathList[x] = this.(Expr)
+ if changed {
+ changedPathList = true
+ }
+ }
+ if changedTarget || changedCandidate || changedPathList {
+ res := *n
+ res.Target, _ = _Target.(Expr)
+ res.Candidate, _ = _Candidate.(Expr)
+ res.PathList = _PathList
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfJSONContainsPathExpr(n *JSONContainsPathExpr, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _JSONDoc, changedJSONDoc := c.copyOnRewriteExpr(n.JSONDoc, n)
+ _OneOrAll, changedOneOrAll := c.copyOnRewriteExpr(n.OneOrAll, n)
+ var changedPathList bool
+ _PathList := make([]Expr, len(n.PathList))
+ for x, el := range n.PathList {
+ this, changed := c.copyOnRewriteExpr(el, n)
+ _PathList[x] = this.(Expr)
+ if changed {
+ changedPathList = true
+ }
+ }
+ if changedJSONDoc || changedOneOrAll || changedPathList {
+ res := *n
+ res.JSONDoc, _ = _JSONDoc.(Expr)
+ res.OneOrAll, _ = _OneOrAll.(Expr)
+ res.PathList = _PathList
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfJSONExtractExpr(n *JSONExtractExpr, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _JSONDoc, changedJSONDoc := c.copyOnRewriteExpr(n.JSONDoc, n)
+ var changedPathList bool
+ _PathList := make([]Expr, len(n.PathList))
+ for x, el := range n.PathList {
+ this, changed := c.copyOnRewriteExpr(el, n)
+ _PathList[x] = this.(Expr)
+ if changed {
+ changedPathList = true
+ }
+ }
+ if changedJSONDoc || changedPathList {
+ res := *n
+ res.JSONDoc, _ = _JSONDoc.(Expr)
+ res.PathList = _PathList
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfJSONKeysExpr(n *JSONKeysExpr, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _JSONDoc, changedJSONDoc := c.copyOnRewriteExpr(n.JSONDoc, n)
+ _Path, changedPath := c.copyOnRewriteExpr(n.Path, n)
+ if changedJSONDoc || changedPath {
+ res := *n
+ res.JSONDoc, _ = _JSONDoc.(Expr)
+ res.Path, _ = _Path.(Expr)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfJSONObjectExpr(n *JSONObjectExpr, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ var changedParams bool
+ _Params := make([]*JSONObjectParam, len(n.Params))
+ for x, el := range n.Params {
+ this, changed := c.copyOnRewriteRefOfJSONObjectParam(el, n)
+ _Params[x] = this.(*JSONObjectParam)
+ if changed {
+ changedParams = true
+ }
+ }
+ if changedParams {
+ res := *n
+ res.Params = _Params
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfJSONObjectParam(n *JSONObjectParam, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Key, changedKey := c.copyOnRewriteExpr(n.Key, n)
+ _Value, changedValue := c.copyOnRewriteExpr(n.Value, n)
+ if changedKey || changedValue {
+ res := *n
+ res.Key, _ = _Key.(Expr)
+ res.Value, _ = _Value.(Expr)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfJSONOverlapsExpr(n *JSONOverlapsExpr, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _JSONDoc1, changedJSONDoc1 := c.copyOnRewriteExpr(n.JSONDoc1, n)
+ _JSONDoc2, changedJSONDoc2 := c.copyOnRewriteExpr(n.JSONDoc2, n)
+ if changedJSONDoc1 || changedJSONDoc2 {
+ res := *n
+ res.JSONDoc1, _ = _JSONDoc1.(Expr)
+ res.JSONDoc2, _ = _JSONDoc2.(Expr)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfJSONPrettyExpr(n *JSONPrettyExpr, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _JSONVal, changedJSONVal := c.copyOnRewriteExpr(n.JSONVal, n)
+ if changedJSONVal {
+ res := *n
+ res.JSONVal, _ = _JSONVal.(Expr)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfJSONQuoteExpr(n *JSONQuoteExpr, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _StringArg, changedStringArg := c.copyOnRewriteExpr(n.StringArg, n)
+ if changedStringArg {
+ res := *n
+ res.StringArg, _ = _StringArg.(Expr)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfJSONRemoveExpr(n *JSONRemoveExpr, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _JSONDoc, changedJSONDoc := c.copyOnRewriteExpr(n.JSONDoc, n)
+ _PathList, changedPathList := c.copyOnRewriteExprs(n.PathList, n)
+ if changedJSONDoc || changedPathList {
+ res := *n
+ res.JSONDoc, _ = _JSONDoc.(Expr)
+ res.PathList, _ = _PathList.(Exprs)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfJSONSchemaValidFuncExpr(n *JSONSchemaValidFuncExpr, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Schema, changedSchema := c.copyOnRewriteExpr(n.Schema, n)
+ _Document, changedDocument := c.copyOnRewriteExpr(n.Document, n)
+ if changedSchema || changedDocument {
+ res := *n
+ res.Schema, _ = _Schema.(Expr)
+ res.Document, _ = _Document.(Expr)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfJSONSchemaValidationReportFuncExpr(n *JSONSchemaValidationReportFuncExpr, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Schema, changedSchema := c.copyOnRewriteExpr(n.Schema, n)
+ _Document, changedDocument := c.copyOnRewriteExpr(n.Document, n)
+ if changedSchema || changedDocument {
+ res := *n
+ res.Schema, _ = _Schema.(Expr)
+ res.Document, _ = _Document.(Expr)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfJSONSearchExpr(n *JSONSearchExpr, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _JSONDoc, changedJSONDoc := c.copyOnRewriteExpr(n.JSONDoc, n)
+ _OneOrAll, changedOneOrAll := c.copyOnRewriteExpr(n.OneOrAll, n)
+ _SearchStr, changedSearchStr := c.copyOnRewriteExpr(n.SearchStr, n)
+ _EscapeChar, changedEscapeChar := c.copyOnRewriteExpr(n.EscapeChar, n)
+ var changedPathList bool
+ _PathList := make([]Expr, len(n.PathList))
+ for x, el := range n.PathList {
+ this, changed := c.copyOnRewriteExpr(el, n)
+ _PathList[x] = this.(Expr)
+ if changed {
+ changedPathList = true
+ }
+ }
+ if changedJSONDoc || changedOneOrAll || changedSearchStr || changedEscapeChar || changedPathList {
+ res := *n
+ res.JSONDoc, _ = _JSONDoc.(Expr)
+ res.OneOrAll, _ = _OneOrAll.(Expr)
+ res.SearchStr, _ = _SearchStr.(Expr)
+ res.EscapeChar, _ = _EscapeChar.(Expr)
+ res.PathList = _PathList
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfJSONStorageFreeExpr(n *JSONStorageFreeExpr, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _JSONVal, changedJSONVal := c.copyOnRewriteExpr(n.JSONVal, n)
+ if changedJSONVal {
+ res := *n
+ res.JSONVal, _ = _JSONVal.(Expr)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfJSONStorageSizeExpr(n *JSONStorageSizeExpr, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _JSONVal, changedJSONVal := c.copyOnRewriteExpr(n.JSONVal, n)
+ if changedJSONVal {
+ res := *n
+ res.JSONVal, _ = _JSONVal.(Expr)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfJSONTableExpr(n *JSONTableExpr, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Expr, changedExpr := c.copyOnRewriteExpr(n.Expr, n)
+ _Alias, changedAlias := c.copyOnRewriteIdentifierCS(n.Alias, n)
+ _Filter, changedFilter := c.copyOnRewriteExpr(n.Filter, n)
+ var changedColumns bool
+ _Columns := make([]*JtColumnDefinition, len(n.Columns))
+ for x, el := range n.Columns {
+ this, changed := c.copyOnRewriteRefOfJtColumnDefinition(el, n)
+ _Columns[x] = this.(*JtColumnDefinition)
+ if changed {
+ changedColumns = true
+ }
+ }
+ if changedExpr || changedAlias || changedFilter || changedColumns {
+ res := *n
+ res.Expr, _ = _Expr.(Expr)
+ res.Alias, _ = _Alias.(IdentifierCS)
+ res.Filter, _ = _Filter.(Expr)
+ res.Columns = _Columns
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfJSONUnquoteExpr(n *JSONUnquoteExpr, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _JSONValue, changedJSONValue := c.copyOnRewriteExpr(n.JSONValue, n)
+ if changedJSONValue {
+ res := *n
+ res.JSONValue, _ = _JSONValue.(Expr)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfJSONValueExpr(n *JSONValueExpr, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _JSONDoc, changedJSONDoc := c.copyOnRewriteExpr(n.JSONDoc, n)
+ _Path, changedPath := c.copyOnRewriteExpr(n.Path, n)
+ _ReturningType, changedReturningType := c.copyOnRewriteRefOfConvertType(n.ReturningType, n)
+ _EmptyOnResponse, changedEmptyOnResponse := c.copyOnRewriteRefOfJtOnResponse(n.EmptyOnResponse, n)
+ _ErrorOnResponse, changedErrorOnResponse := c.copyOnRewriteRefOfJtOnResponse(n.ErrorOnResponse, n)
+ if changedJSONDoc || changedPath || changedReturningType || changedEmptyOnResponse || changedErrorOnResponse {
+ res := *n
+ res.JSONDoc, _ = _JSONDoc.(Expr)
+ res.Path, _ = _Path.(Expr)
+ res.ReturningType, _ = _ReturningType.(*ConvertType)
+ res.EmptyOnResponse, _ = _EmptyOnResponse.(*JtOnResponse)
+ res.ErrorOnResponse, _ = _ErrorOnResponse.(*JtOnResponse)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfJSONValueMergeExpr(n *JSONValueMergeExpr, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _JSONDoc, changedJSONDoc := c.copyOnRewriteExpr(n.JSONDoc, n)
+ _JSONDocList, changedJSONDocList := c.copyOnRewriteExprs(n.JSONDocList, n)
+ if changedJSONDoc || changedJSONDocList {
+ res := *n
+ res.JSONDoc, _ = _JSONDoc.(Expr)
+ res.JSONDocList, _ = _JSONDocList.(Exprs)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfJSONValueModifierExpr(n *JSONValueModifierExpr, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _JSONDoc, changedJSONDoc := c.copyOnRewriteExpr(n.JSONDoc, n)
+ var changedParams bool
+ _Params := make([]*JSONObjectParam, len(n.Params))
+ for x, el := range n.Params {
+ this, changed := c.copyOnRewriteRefOfJSONObjectParam(el, n)
+ _Params[x] = this.(*JSONObjectParam)
+ if changed {
+ changedParams = true
+ }
+ }
+ if changedJSONDoc || changedParams {
+ res := *n
+ res.JSONDoc, _ = _JSONDoc.(Expr)
+ res.Params = _Params
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfJoinCondition(n *JoinCondition, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _On, changedOn := c.copyOnRewriteExpr(n.On, n)
+ _Using, changedUsing := c.copyOnRewriteColumns(n.Using, n)
+ if changedOn || changedUsing {
+ res := *n
+ res.On, _ = _On.(Expr)
+ res.Using, _ = _Using.(Columns)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfJoinTableExpr(n *JoinTableExpr, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _LeftExpr, changedLeftExpr := c.copyOnRewriteTableExpr(n.LeftExpr, n)
+ _RightExpr, changedRightExpr := c.copyOnRewriteTableExpr(n.RightExpr, n)
+ _Condition, changedCondition := c.copyOnRewriteRefOfJoinCondition(n.Condition, n)
+ if changedLeftExpr || changedRightExpr || changedCondition {
+ res := *n
+ res.LeftExpr, _ = _LeftExpr.(TableExpr)
+ res.RightExpr, _ = _RightExpr.(TableExpr)
+ res.Condition, _ = _Condition.(*JoinCondition)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfJtColumnDefinition(n *JtColumnDefinition, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfJtOnResponse(n *JtOnResponse, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Expr, changedExpr := c.copyOnRewriteExpr(n.Expr, n)
+ if changedExpr {
+ res := *n
+ res.Expr, _ = _Expr.(Expr)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfKeyState(n *KeyState, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfLagLeadExpr(n *LagLeadExpr, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Expr, changedExpr := c.copyOnRewriteExpr(n.Expr, n)
+ _N, changedN := c.copyOnRewriteExpr(n.N, n)
+ _Default, changedDefault := c.copyOnRewriteExpr(n.Default, n)
+ _OverClause, changedOverClause := c.copyOnRewriteRefOfOverClause(n.OverClause, n)
+ _NullTreatmentClause, changedNullTreatmentClause := c.copyOnRewriteRefOfNullTreatmentClause(n.NullTreatmentClause, n)
+ if changedExpr || changedN || changedDefault || changedOverClause || changedNullTreatmentClause {
+ res := *n
+ res.Expr, _ = _Expr.(Expr)
+ res.N, _ = _N.(Expr)
+ res.Default, _ = _Default.(Expr)
+ res.OverClause, _ = _OverClause.(*OverClause)
+ res.NullTreatmentClause, _ = _NullTreatmentClause.(*NullTreatmentClause)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfLimit(n *Limit, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Offset, changedOffset := c.copyOnRewriteExpr(n.Offset, n)
+ _Rowcount, changedRowcount := c.copyOnRewriteExpr(n.Rowcount, n)
+ if changedOffset || changedRowcount {
+ res := *n
+ res.Offset, _ = _Offset.(Expr)
+ res.Rowcount, _ = _Rowcount.(Expr)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfLiteral(n *Literal, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfLoad(n *Load, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfLocateExpr(n *LocateExpr, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _SubStr, changedSubStr := c.copyOnRewriteExpr(n.SubStr, n)
+ _Str, changedStr := c.copyOnRewriteExpr(n.Str, n)
+ _Pos, changedPos := c.copyOnRewriteExpr(n.Pos, n)
+ if changedSubStr || changedStr || changedPos {
+ res := *n
+ res.SubStr, _ = _SubStr.(Expr)
+ res.Str, _ = _Str.(Expr)
+ res.Pos, _ = _Pos.(Expr)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfLockOption(n *LockOption, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfLockTables(n *LockTables, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfLockingFunc(n *LockingFunc, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Name, changedName := c.copyOnRewriteExpr(n.Name, n)
+ _Timeout, changedTimeout := c.copyOnRewriteExpr(n.Timeout, n)
+ if changedName || changedTimeout {
+ res := *n
+ res.Name, _ = _Name.(Expr)
+ res.Timeout, _ = _Timeout.(Expr)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfMatchExpr(n *MatchExpr, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ var changedColumns bool
+ _Columns := make([]*ColName, len(n.Columns))
+ for x, el := range n.Columns {
+ this, changed := c.copyOnRewriteRefOfColName(el, n)
+ _Columns[x] = this.(*ColName)
+ if changed {
+ changedColumns = true
+ }
+ }
+ _Expr, changedExpr := c.copyOnRewriteExpr(n.Expr, n)
+ if changedColumns || changedExpr {
+ res := *n
+ res.Columns = _Columns
+ res.Expr, _ = _Expr.(Expr)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfMax(n *Max, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Arg, changedArg := c.copyOnRewriteExpr(n.Arg, n)
+ if changedArg {
+ res := *n
+ res.Arg, _ = _Arg.(Expr)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfMemberOfExpr(n *MemberOfExpr, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Value, changedValue := c.copyOnRewriteExpr(n.Value, n)
+ _JSONArr, changedJSONArr := c.copyOnRewriteExpr(n.JSONArr, n)
+ if changedValue || changedJSONArr {
+ res := *n
+ res.Value, _ = _Value.(Expr)
+ res.JSONArr, _ = _JSONArr.(Expr)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfMin(n *Min, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Arg, changedArg := c.copyOnRewriteExpr(n.Arg, n)
+ if changedArg {
+ res := *n
+ res.Arg, _ = _Arg.(Expr)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfModifyColumn(n *ModifyColumn, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _NewColDefinition, changedNewColDefinition := c.copyOnRewriteRefOfColumnDefinition(n.NewColDefinition, n)
+ _After, changedAfter := c.copyOnRewriteRefOfColName(n.After, n)
+ if changedNewColDefinition || changedAfter {
+ res := *n
+ res.NewColDefinition, _ = _NewColDefinition.(*ColumnDefinition)
+ res.After, _ = _After.(*ColName)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfNTHValueExpr(n *NTHValueExpr, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Expr, changedExpr := c.copyOnRewriteExpr(n.Expr, n)
+ _N, changedN := c.copyOnRewriteExpr(n.N, n)
+ _OverClause, changedOverClause := c.copyOnRewriteRefOfOverClause(n.OverClause, n)
+ _FromFirstLastClause, changedFromFirstLastClause := c.copyOnRewriteRefOfFromFirstLastClause(n.FromFirstLastClause, n)
+ _NullTreatmentClause, changedNullTreatmentClause := c.copyOnRewriteRefOfNullTreatmentClause(n.NullTreatmentClause, n)
+ if changedExpr || changedN || changedOverClause || changedFromFirstLastClause || changedNullTreatmentClause {
+ res := *n
+ res.Expr, _ = _Expr.(Expr)
+ res.N, _ = _N.(Expr)
+ res.OverClause, _ = _OverClause.(*OverClause)
+ res.FromFirstLastClause, _ = _FromFirstLastClause.(*FromFirstLastClause)
+ res.NullTreatmentClause, _ = _NullTreatmentClause.(*NullTreatmentClause)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfNamedWindow(n *NamedWindow, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Windows, changedWindows := c.copyOnRewriteWindowDefinitions(n.Windows, n)
+ if changedWindows {
+ res := *n
+ res.Windows, _ = _Windows.(WindowDefinitions)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteNamedWindows(n NamedWindows, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ res := make(NamedWindows, len(n))
+ for x, el := range n {
+ this, change := c.copyOnRewriteRefOfNamedWindow(el, n)
+ res[x] = this.(*NamedWindow)
+ if change {
+ changed = true
+ }
+ }
+ if changed {
+ out = res
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfNextval(n *Nextval, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Expr, changedExpr := c.copyOnRewriteExpr(n.Expr, n)
+ if changedExpr {
+ res := *n
+ res.Expr, _ = _Expr.(Expr)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfNotExpr(n *NotExpr, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Expr, changedExpr := c.copyOnRewriteExpr(n.Expr, n)
+ if changedExpr {
+ res := *n
+ res.Expr, _ = _Expr.(Expr)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfNtileExpr(n *NtileExpr, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _N, changedN := c.copyOnRewriteExpr(n.N, n)
+ _OverClause, changedOverClause := c.copyOnRewriteRefOfOverClause(n.OverClause, n)
+ if changedN || changedOverClause {
+ res := *n
+ res.N, _ = _N.(Expr)
+ res.OverClause, _ = _OverClause.(*OverClause)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfNullTreatmentClause(n *NullTreatmentClause, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfNullVal(n *NullVal, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfOffset(n *Offset, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteOnDup(n OnDup, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ res := make(OnDup, len(n))
+ for x, el := range n {
+ this, change := c.copyOnRewriteRefOfUpdateExpr(el, n)
+ res[x] = this.(*UpdateExpr)
+ if change {
+ changed = true
+ }
+ }
+ if changed {
+ out = res
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfOptLike(n *OptLike, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _LikeTable, changedLikeTable := c.copyOnRewriteTableName(n.LikeTable, n)
+ if changedLikeTable {
+ res := *n
+ res.LikeTable, _ = _LikeTable.(TableName)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfOrExpr(n *OrExpr, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Left, changedLeft := c.copyOnRewriteExpr(n.Left, n)
+ _Right, changedRight := c.copyOnRewriteExpr(n.Right, n)
+ if changedLeft || changedRight {
+ res := *n
+ res.Left, _ = _Left.(Expr)
+ res.Right, _ = _Right.(Expr)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfOrder(n *Order, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Expr, changedExpr := c.copyOnRewriteExpr(n.Expr, n)
+ if changedExpr {
+ res := *n
+ res.Expr, _ = _Expr.(Expr)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteOrderBy(n OrderBy, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ res := make(OrderBy, len(n))
+ for x, el := range n {
+ this, change := c.copyOnRewriteRefOfOrder(el, n)
+ res[x] = this.(*Order)
+ if change {
+ changed = true
+ }
+ }
+ if changed {
+ out = res
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfOrderByOption(n *OrderByOption, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Cols, changedCols := c.copyOnRewriteColumns(n.Cols, n)
+ if changedCols {
+ res := *n
+ res.Cols, _ = _Cols.(Columns)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfOtherAdmin(n *OtherAdmin, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfOtherRead(n *OtherRead, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfOverClause(n *OverClause, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _WindowName, changedWindowName := c.copyOnRewriteIdentifierCI(n.WindowName, n)
+ _WindowSpec, changedWindowSpec := c.copyOnRewriteRefOfWindowSpecification(n.WindowSpec, n)
+ if changedWindowName || changedWindowSpec {
+ res := *n
+ res.WindowName, _ = _WindowName.(IdentifierCI)
+ res.WindowSpec, _ = _WindowSpec.(*WindowSpecification)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfParenTableExpr(n *ParenTableExpr, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Exprs, changedExprs := c.copyOnRewriteTableExprs(n.Exprs, n)
+ if changedExprs {
+ res := *n
+ res.Exprs, _ = _Exprs.(TableExprs)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfParsedComments(n *ParsedComments, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfPartitionDefinition(n *PartitionDefinition, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Name, changedName := c.copyOnRewriteIdentifierCI(n.Name, n)
+ _Options, changedOptions := c.copyOnRewriteRefOfPartitionDefinitionOptions(n.Options, n)
+ if changedName || changedOptions {
+ res := *n
+ res.Name, _ = _Name.(IdentifierCI)
+ res.Options, _ = _Options.(*PartitionDefinitionOptions)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfPartitionDefinitionOptions(n *PartitionDefinitionOptions, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _ValueRange, changedValueRange := c.copyOnRewriteRefOfPartitionValueRange(n.ValueRange, n)
+ _Comment, changedComment := c.copyOnRewriteRefOfLiteral(n.Comment, n)
+ _Engine, changedEngine := c.copyOnRewriteRefOfPartitionEngine(n.Engine, n)
+ _DataDirectory, changedDataDirectory := c.copyOnRewriteRefOfLiteral(n.DataDirectory, n)
+ _IndexDirectory, changedIndexDirectory := c.copyOnRewriteRefOfLiteral(n.IndexDirectory, n)
+ _SubPartitionDefinitions, changedSubPartitionDefinitions := c.copyOnRewriteSubPartitionDefinitions(n.SubPartitionDefinitions, n)
+ if changedValueRange || changedComment || changedEngine || changedDataDirectory || changedIndexDirectory || changedSubPartitionDefinitions {
+ res := *n
+ res.ValueRange, _ = _ValueRange.(*PartitionValueRange)
+ res.Comment, _ = _Comment.(*Literal)
+ res.Engine, _ = _Engine.(*PartitionEngine)
+ res.DataDirectory, _ = _DataDirectory.(*Literal)
+ res.IndexDirectory, _ = _IndexDirectory.(*Literal)
+ res.SubPartitionDefinitions, _ = _SubPartitionDefinitions.(SubPartitionDefinitions)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfPartitionEngine(n *PartitionEngine, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfPartitionOption(n *PartitionOption, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _ColList, changedColList := c.copyOnRewriteColumns(n.ColList, n)
+ _Expr, changedExpr := c.copyOnRewriteExpr(n.Expr, n)
+ _SubPartition, changedSubPartition := c.copyOnRewriteRefOfSubPartition(n.SubPartition, n)
+ var changedDefinitions bool
+ _Definitions := make([]*PartitionDefinition, len(n.Definitions))
+ for x, el := range n.Definitions {
+ this, changed := c.copyOnRewriteRefOfPartitionDefinition(el, n)
+ _Definitions[x] = this.(*PartitionDefinition)
+ if changed {
+ changedDefinitions = true
+ }
+ }
+ if changedColList || changedExpr || changedSubPartition || changedDefinitions {
+ res := *n
+ res.ColList, _ = _ColList.(Columns)
+ res.Expr, _ = _Expr.(Expr)
+ res.SubPartition, _ = _SubPartition.(*SubPartition)
+ res.Definitions = _Definitions
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfPartitionSpec(n *PartitionSpec, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Names, changedNames := c.copyOnRewritePartitions(n.Names, n)
+ _Number, changedNumber := c.copyOnRewriteRefOfLiteral(n.Number, n)
+ _TableName, changedTableName := c.copyOnRewriteTableName(n.TableName, n)
+ var changedDefinitions bool
+ _Definitions := make([]*PartitionDefinition, len(n.Definitions))
+ for x, el := range n.Definitions {
+ this, changed := c.copyOnRewriteRefOfPartitionDefinition(el, n)
+ _Definitions[x] = this.(*PartitionDefinition)
+ if changed {
+ changedDefinitions = true
+ }
+ }
+ if changedNames || changedNumber || changedTableName || changedDefinitions {
+ res := *n
+ res.Names, _ = _Names.(Partitions)
+ res.Number, _ = _Number.(*Literal)
+ res.TableName, _ = _TableName.(TableName)
+ res.Definitions = _Definitions
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfPartitionValueRange(n *PartitionValueRange, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Range, changedRange := c.copyOnRewriteValTuple(n.Range, n)
+ if changedRange {
+ res := *n
+ res.Range, _ = _Range.(ValTuple)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewritePartitions(n Partitions, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ res := make(Partitions, len(n))
+ for x, el := range n {
+ this, change := c.copyOnRewriteIdentifierCI(el, n)
+ res[x] = this.(IdentifierCI)
+ if change {
+ changed = true
+ }
+ }
+ if changed {
+ out = res
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfPerformanceSchemaFuncExpr(n *PerformanceSchemaFuncExpr, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Argument, changedArgument := c.copyOnRewriteExpr(n.Argument, n)
+ if changedArgument {
+ res := *n
+ res.Argument, _ = _Argument.(Expr)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfPrepareStmt(n *PrepareStmt, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Name, changedName := c.copyOnRewriteIdentifierCI(n.Name, n)
+ _Statement, changedStatement := c.copyOnRewriteExpr(n.Statement, n)
+ _Comments, changedComments := c.copyOnRewriteRefOfParsedComments(n.Comments, n)
+ if changedName || changedStatement || changedComments {
+ res := *n
+ res.Name, _ = _Name.(IdentifierCI)
+ res.Statement, _ = _Statement.(Expr)
+ res.Comments, _ = _Comments.(*ParsedComments)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfReferenceDefinition(n *ReferenceDefinition, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _ReferencedTable, changedReferencedTable := c.copyOnRewriteTableName(n.ReferencedTable, n)
+ _ReferencedColumns, changedReferencedColumns := c.copyOnRewriteColumns(n.ReferencedColumns, n)
+ _Match, changedMatch := c.copyOnRewriteMatchAction(n.Match, n)
+ _OnDelete, changedOnDelete := c.copyOnRewriteReferenceAction(n.OnDelete, n)
+ _OnUpdate, changedOnUpdate := c.copyOnRewriteReferenceAction(n.OnUpdate, n)
+ if changedReferencedTable || changedReferencedColumns || changedMatch || changedOnDelete || changedOnUpdate {
+ res := *n
+ res.ReferencedTable, _ = _ReferencedTable.(TableName)
+ res.ReferencedColumns, _ = _ReferencedColumns.(Columns)
+ res.Match, _ = _Match.(MatchAction)
+ res.OnDelete, _ = _OnDelete.(ReferenceAction)
+ res.OnUpdate, _ = _OnUpdate.(ReferenceAction)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfRegexpInstrExpr(n *RegexpInstrExpr, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Expr, changedExpr := c.copyOnRewriteExpr(n.Expr, n)
+ _Pattern, changedPattern := c.copyOnRewriteExpr(n.Pattern, n)
+ _Position, changedPosition := c.copyOnRewriteExpr(n.Position, n)
+ _Occurrence, changedOccurrence := c.copyOnRewriteExpr(n.Occurrence, n)
+ _ReturnOption, changedReturnOption := c.copyOnRewriteExpr(n.ReturnOption, n)
+ _MatchType, changedMatchType := c.copyOnRewriteExpr(n.MatchType, n)
+ if changedExpr || changedPattern || changedPosition || changedOccurrence || changedReturnOption || changedMatchType {
+ res := *n
+ res.Expr, _ = _Expr.(Expr)
+ res.Pattern, _ = _Pattern.(Expr)
+ res.Position, _ = _Position.(Expr)
+ res.Occurrence, _ = _Occurrence.(Expr)
+ res.ReturnOption, _ = _ReturnOption.(Expr)
+ res.MatchType, _ = _MatchType.(Expr)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfRegexpLikeExpr(n *RegexpLikeExpr, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Expr, changedExpr := c.copyOnRewriteExpr(n.Expr, n)
+ _Pattern, changedPattern := c.copyOnRewriteExpr(n.Pattern, n)
+ _MatchType, changedMatchType := c.copyOnRewriteExpr(n.MatchType, n)
+ if changedExpr || changedPattern || changedMatchType {
+ res := *n
+ res.Expr, _ = _Expr.(Expr)
+ res.Pattern, _ = _Pattern.(Expr)
+ res.MatchType, _ = _MatchType.(Expr)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfRegexpReplaceExpr(n *RegexpReplaceExpr, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Expr, changedExpr := c.copyOnRewriteExpr(n.Expr, n)
+ _Pattern, changedPattern := c.copyOnRewriteExpr(n.Pattern, n)
+ _Repl, changedRepl := c.copyOnRewriteExpr(n.Repl, n)
+ _Occurrence, changedOccurrence := c.copyOnRewriteExpr(n.Occurrence, n)
+ _Position, changedPosition := c.copyOnRewriteExpr(n.Position, n)
+ _MatchType, changedMatchType := c.copyOnRewriteExpr(n.MatchType, n)
+ if changedExpr || changedPattern || changedRepl || changedOccurrence || changedPosition || changedMatchType {
+ res := *n
+ res.Expr, _ = _Expr.(Expr)
+ res.Pattern, _ = _Pattern.(Expr)
+ res.Repl, _ = _Repl.(Expr)
+ res.Occurrence, _ = _Occurrence.(Expr)
+ res.Position, _ = _Position.(Expr)
+ res.MatchType, _ = _MatchType.(Expr)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfRegexpSubstrExpr(n *RegexpSubstrExpr, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Expr, changedExpr := c.copyOnRewriteExpr(n.Expr, n)
+ _Pattern, changedPattern := c.copyOnRewriteExpr(n.Pattern, n)
+ _Occurrence, changedOccurrence := c.copyOnRewriteExpr(n.Occurrence, n)
+ _Position, changedPosition := c.copyOnRewriteExpr(n.Position, n)
+ _MatchType, changedMatchType := c.copyOnRewriteExpr(n.MatchType, n)
+ if changedExpr || changedPattern || changedOccurrence || changedPosition || changedMatchType {
+ res := *n
+ res.Expr, _ = _Expr.(Expr)
+ res.Pattern, _ = _Pattern.(Expr)
+ res.Occurrence, _ = _Occurrence.(Expr)
+ res.Position, _ = _Position.(Expr)
+ res.MatchType, _ = _MatchType.(Expr)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfRelease(n *Release, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Name, changedName := c.copyOnRewriteIdentifierCI(n.Name, n)
+ if changedName {
+ res := *n
+ res.Name, _ = _Name.(IdentifierCI)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfRenameColumn(n *RenameColumn, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _OldName, changedOldName := c.copyOnRewriteRefOfColName(n.OldName, n)
+ _NewName, changedNewName := c.copyOnRewriteRefOfColName(n.NewName, n)
+ if changedOldName || changedNewName {
+ res := *n
+ res.OldName, _ = _OldName.(*ColName)
+ res.NewName, _ = _NewName.(*ColName)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfRenameIndex(n *RenameIndex, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _OldName, changedOldName := c.copyOnRewriteIdentifierCI(n.OldName, n)
+ _NewName, changedNewName := c.copyOnRewriteIdentifierCI(n.NewName, n)
+ if changedOldName || changedNewName {
+ res := *n
+ res.OldName, _ = _OldName.(IdentifierCI)
+ res.NewName, _ = _NewName.(IdentifierCI)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfRenameTable(n *RenameTable, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfRenameTableName(n *RenameTableName, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Table, changedTable := c.copyOnRewriteTableName(n.Table, n)
+ if changedTable {
+ res := *n
+ res.Table, _ = _Table.(TableName)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfRevertMigration(n *RevertMigration, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Comments, changedComments := c.copyOnRewriteRefOfParsedComments(n.Comments, n)
+ if changedComments {
+ res := *n
+ res.Comments, _ = _Comments.(*ParsedComments)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfRollback(n *Rollback, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRootNode(n RootNode, parent SQLNode) (out SQLNode, changed bool) {
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _SQLNode, changedSQLNode := c.copyOnRewriteSQLNode(n.SQLNode, n)
+ if changedSQLNode {
+ res := n
+ res.SQLNode, _ = _SQLNode.(SQLNode)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfSRollback(n *SRollback, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Name, changedName := c.copyOnRewriteIdentifierCI(n.Name, n)
+ if changedName {
+ res := *n
+ res.Name, _ = _Name.(IdentifierCI)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfSavepoint(n *Savepoint, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Name, changedName := c.copyOnRewriteIdentifierCI(n.Name, n)
+ if changedName {
+ res := *n
+ res.Name, _ = _Name.(IdentifierCI)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfSelect(n *Select, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ var changedFrom bool
+ _From := make([]TableExpr, len(n.From))
+ for x, el := range n.From {
+ this, changed := c.copyOnRewriteTableExpr(el, n)
+ _From[x] = this.(TableExpr)
+ if changed {
+ changedFrom = true
+ }
+ }
+ _Comments, changedComments := c.copyOnRewriteRefOfParsedComments(n.Comments, n)
+ _SelectExprs, changedSelectExprs := c.copyOnRewriteSelectExprs(n.SelectExprs, n)
+ _Where, changedWhere := c.copyOnRewriteRefOfWhere(n.Where, n)
+ _With, changedWith := c.copyOnRewriteRefOfWith(n.With, n)
+ _GroupBy, changedGroupBy := c.copyOnRewriteGroupBy(n.GroupBy, n)
+ _Having, changedHaving := c.copyOnRewriteRefOfWhere(n.Having, n)
+ _Windows, changedWindows := c.copyOnRewriteNamedWindows(n.Windows, n)
+ _OrderBy, changedOrderBy := c.copyOnRewriteOrderBy(n.OrderBy, n)
+ _Limit, changedLimit := c.copyOnRewriteRefOfLimit(n.Limit, n)
+ _Into, changedInto := c.copyOnRewriteRefOfSelectInto(n.Into, n)
+ if changedFrom || changedComments || changedSelectExprs || changedWhere || changedWith || changedGroupBy || changedHaving || changedWindows || changedOrderBy || changedLimit || changedInto {
+ res := *n
+ res.From = _From
+ res.Comments, _ = _Comments.(*ParsedComments)
+ res.SelectExprs, _ = _SelectExprs.(SelectExprs)
+ res.Where, _ = _Where.(*Where)
+ res.With, _ = _With.(*With)
+ res.GroupBy, _ = _GroupBy.(GroupBy)
+ res.Having, _ = _Having.(*Where)
+ res.Windows, _ = _Windows.(NamedWindows)
+ res.OrderBy, _ = _OrderBy.(OrderBy)
+ res.Limit, _ = _Limit.(*Limit)
+ res.Into, _ = _Into.(*SelectInto)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteSelectExprs(n SelectExprs, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ res := make(SelectExprs, len(n))
+ for x, el := range n {
+ this, change := c.copyOnRewriteSelectExpr(el, n)
+ res[x] = this.(SelectExpr)
+ if change {
+ changed = true
+ }
+ }
+ if changed {
+ out = res
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfSelectInto(n *SelectInto, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfSet(n *Set, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Comments, changedComments := c.copyOnRewriteRefOfParsedComments(n.Comments, n)
+ _Exprs, changedExprs := c.copyOnRewriteSetExprs(n.Exprs, n)
+ if changedComments || changedExprs {
+ res := *n
+ res.Comments, _ = _Comments.(*ParsedComments)
+ res.Exprs, _ = _Exprs.(SetExprs)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfSetExpr(n *SetExpr, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Var, changedVar := c.copyOnRewriteRefOfVariable(n.Var, n)
+ _Expr, changedExpr := c.copyOnRewriteExpr(n.Expr, n)
+ if changedVar || changedExpr {
+ res := *n
+ res.Var, _ = _Var.(*Variable)
+ res.Expr, _ = _Expr.(Expr)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteSetExprs(n SetExprs, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ res := make(SetExprs, len(n))
+ for x, el := range n {
+ this, change := c.copyOnRewriteRefOfSetExpr(el, n)
+ res[x] = this.(*SetExpr)
+ if change {
+ changed = true
+ }
+ }
+ if changed {
+ out = res
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfShow(n *Show, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Internal, changedInternal := c.copyOnRewriteShowInternal(n.Internal, n)
+ if changedInternal {
+ res := *n
+ res.Internal, _ = _Internal.(ShowInternal)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfShowBasic(n *ShowBasic, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Tbl, changedTbl := c.copyOnRewriteTableName(n.Tbl, n)
+ _DbName, changedDbName := c.copyOnRewriteIdentifierCS(n.DbName, n)
+ _Filter, changedFilter := c.copyOnRewriteRefOfShowFilter(n.Filter, n)
+ if changedTbl || changedDbName || changedFilter {
+ res := *n
+ res.Tbl, _ = _Tbl.(TableName)
+ res.DbName, _ = _DbName.(IdentifierCS)
+ res.Filter, _ = _Filter.(*ShowFilter)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfShowCreate(n *ShowCreate, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Op, changedOp := c.copyOnRewriteTableName(n.Op, n)
+ if changedOp {
+ res := *n
+ res.Op, _ = _Op.(TableName)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfShowFilter(n *ShowFilter, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Filter, changedFilter := c.copyOnRewriteExpr(n.Filter, n)
+ if changedFilter {
+ res := *n
+ res.Filter, _ = _Filter.(Expr)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfShowMigrationLogs(n *ShowMigrationLogs, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Comments, changedComments := c.copyOnRewriteRefOfParsedComments(n.Comments, n)
+ if changedComments {
+ res := *n
+ res.Comments, _ = _Comments.(*ParsedComments)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfShowOther(n *ShowOther, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfShowThrottledApps(n *ShowThrottledApps, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfShowThrottlerStatus(n *ShowThrottlerStatus, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfStarExpr(n *StarExpr, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _TableName, changedTableName := c.copyOnRewriteTableName(n.TableName, n)
+ if changedTableName {
+ res := *n
+ res.TableName, _ = _TableName.(TableName)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfStd(n *Std, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Arg, changedArg := c.copyOnRewriteExpr(n.Arg, n)
+ if changedArg {
+ res := *n
+ res.Arg, _ = _Arg.(Expr)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfStdDev(n *StdDev, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Arg, changedArg := c.copyOnRewriteExpr(n.Arg, n)
+ if changedArg {
+ res := *n
+ res.Arg, _ = _Arg.(Expr)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfStdPop(n *StdPop, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Arg, changedArg := c.copyOnRewriteExpr(n.Arg, n)
+ if changedArg {
+ res := *n
+ res.Arg, _ = _Arg.(Expr)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfStdSamp(n *StdSamp, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Arg, changedArg := c.copyOnRewriteExpr(n.Arg, n)
+ if changedArg {
+ res := *n
+ res.Arg, _ = _Arg.(Expr)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfStream(n *Stream, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Comments, changedComments := c.copyOnRewriteRefOfParsedComments(n.Comments, n)
+ _SelectExpr, changedSelectExpr := c.copyOnRewriteSelectExpr(n.SelectExpr, n)
+ _Table, changedTable := c.copyOnRewriteTableName(n.Table, n)
+ if changedComments || changedSelectExpr || changedTable {
+ res := *n
+ res.Comments, _ = _Comments.(*ParsedComments)
+ res.SelectExpr, _ = _SelectExpr.(SelectExpr)
+ res.Table, _ = _Table.(TableName)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfSubPartition(n *SubPartition, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _ColList, changedColList := c.copyOnRewriteColumns(n.ColList, n)
+ _Expr, changedExpr := c.copyOnRewriteExpr(n.Expr, n)
+ if changedColList || changedExpr {
+ res := *n
+ res.ColList, _ = _ColList.(Columns)
+ res.Expr, _ = _Expr.(Expr)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfSubPartitionDefinition(n *SubPartitionDefinition, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Name, changedName := c.copyOnRewriteIdentifierCI(n.Name, n)
+ _Options, changedOptions := c.copyOnRewriteRefOfSubPartitionDefinitionOptions(n.Options, n)
+ if changedName || changedOptions {
+ res := *n
+ res.Name, _ = _Name.(IdentifierCI)
+ res.Options, _ = _Options.(*SubPartitionDefinitionOptions)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfSubPartitionDefinitionOptions(n *SubPartitionDefinitionOptions, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Comment, changedComment := c.copyOnRewriteRefOfLiteral(n.Comment, n)
+ _Engine, changedEngine := c.copyOnRewriteRefOfPartitionEngine(n.Engine, n)
+ _DataDirectory, changedDataDirectory := c.copyOnRewriteRefOfLiteral(n.DataDirectory, n)
+ _IndexDirectory, changedIndexDirectory := c.copyOnRewriteRefOfLiteral(n.IndexDirectory, n)
+ if changedComment || changedEngine || changedDataDirectory || changedIndexDirectory {
+ res := *n
+ res.Comment, _ = _Comment.(*Literal)
+ res.Engine, _ = _Engine.(*PartitionEngine)
+ res.DataDirectory, _ = _DataDirectory.(*Literal)
+ res.IndexDirectory, _ = _IndexDirectory.(*Literal)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteSubPartitionDefinitions(n SubPartitionDefinitions, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ res := make(SubPartitionDefinitions, len(n))
+ for x, el := range n {
+ this, change := c.copyOnRewriteRefOfSubPartitionDefinition(el, n)
+ res[x] = this.(*SubPartitionDefinition)
+ if change {
+ changed = true
+ }
+ }
+ if changed {
+ out = res
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfSubquery(n *Subquery, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Select, changedSelect := c.copyOnRewriteSelectStatement(n.Select, n)
+ if changedSelect {
+ res := *n
+ res.Select, _ = _Select.(SelectStatement)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfSubstrExpr(n *SubstrExpr, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Name, changedName := c.copyOnRewriteExpr(n.Name, n)
+ _From, changedFrom := c.copyOnRewriteExpr(n.From, n)
+ _To, changedTo := c.copyOnRewriteExpr(n.To, n)
+ if changedName || changedFrom || changedTo {
+ res := *n
+ res.Name, _ = _Name.(Expr)
+ res.From, _ = _From.(Expr)
+ res.To, _ = _To.(Expr)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfSum(n *Sum, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Arg, changedArg := c.copyOnRewriteExpr(n.Arg, n)
+ if changedArg {
+ res := *n
+ res.Arg, _ = _Arg.(Expr)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteTableExprs(n TableExprs, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ res := make(TableExprs, len(n))
+ for x, el := range n {
+ this, change := c.copyOnRewriteTableExpr(el, n)
+ res[x] = this.(TableExpr)
+ if change {
+ changed = true
+ }
+ }
+ if changed {
+ out = res
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteTableName(n TableName, parent SQLNode) (out SQLNode, changed bool) {
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Name, changedName := c.copyOnRewriteIdentifierCS(n.Name, n)
+ _Qualifier, changedQualifier := c.copyOnRewriteIdentifierCS(n.Qualifier, n)
+ if changedName || changedQualifier {
+ res := n
+ res.Name, _ = _Name.(IdentifierCS)
+ res.Qualifier, _ = _Qualifier.(IdentifierCS)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteTableNames(n TableNames, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ res := make(TableNames, len(n))
+ for x, el := range n {
+ this, change := c.copyOnRewriteTableName(el, n)
+ res[x] = this.(TableName)
+ if change {
+ changed = true
+ }
+ }
+ if changed {
+ out = res
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteTableOptions(n TableOptions, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre != nil {
+ c.pre(n, parent)
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfTableSpec(n *TableSpec, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ var changedColumns bool
+ _Columns := make([]*ColumnDefinition, len(n.Columns))
+ for x, el := range n.Columns {
+ this, changed := c.copyOnRewriteRefOfColumnDefinition(el, n)
+ _Columns[x] = this.(*ColumnDefinition)
+ if changed {
+ changedColumns = true
+ }
+ }
+ var changedIndexes bool
+ _Indexes := make([]*IndexDefinition, len(n.Indexes))
+ for x, el := range n.Indexes {
+ this, changed := c.copyOnRewriteRefOfIndexDefinition(el, n)
+ _Indexes[x] = this.(*IndexDefinition)
+ if changed {
+ changedIndexes = true
+ }
+ }
+ var changedConstraints bool
+ _Constraints := make([]*ConstraintDefinition, len(n.Constraints))
+ for x, el := range n.Constraints {
+ this, changed := c.copyOnRewriteRefOfConstraintDefinition(el, n)
+ _Constraints[x] = this.(*ConstraintDefinition)
+ if changed {
+ changedConstraints = true
+ }
+ }
+ _Options, changedOptions := c.copyOnRewriteTableOptions(n.Options, n)
+ _PartitionOption, changedPartitionOption := c.copyOnRewriteRefOfPartitionOption(n.PartitionOption, n)
+ if changedColumns || changedIndexes || changedConstraints || changedOptions || changedPartitionOption {
+ res := *n
+ res.Columns = _Columns
+ res.Indexes = _Indexes
+ res.Constraints = _Constraints
+ res.Options, _ = _Options.(TableOptions)
+ res.PartitionOption, _ = _PartitionOption.(*PartitionOption)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfTablespaceOperation(n *TablespaceOperation, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfTimestampFuncExpr(n *TimestampFuncExpr, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Expr1, changedExpr1 := c.copyOnRewriteExpr(n.Expr1, n)
+ _Expr2, changedExpr2 := c.copyOnRewriteExpr(n.Expr2, n)
+ if changedExpr1 || changedExpr2 {
+ res := *n
+ res.Expr1, _ = _Expr1.(Expr)
+ res.Expr2, _ = _Expr2.(Expr)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfTrimFuncExpr(n *TrimFuncExpr, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _TrimArg, changedTrimArg := c.copyOnRewriteExpr(n.TrimArg, n)
+ _StringArg, changedStringArg := c.copyOnRewriteExpr(n.StringArg, n)
+ if changedTrimArg || changedStringArg {
+ res := *n
+ res.TrimArg, _ = _TrimArg.(Expr)
+ res.StringArg, _ = _StringArg.(Expr)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfTruncateTable(n *TruncateTable, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Table, changedTable := c.copyOnRewriteTableName(n.Table, n)
+ if changedTable {
+ res := *n
+ res.Table, _ = _Table.(TableName)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfUnaryExpr(n *UnaryExpr, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Expr, changedExpr := c.copyOnRewriteExpr(n.Expr, n)
+ if changedExpr {
+ res := *n
+ res.Expr, _ = _Expr.(Expr)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfUnion(n *Union, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Left, changedLeft := c.copyOnRewriteSelectStatement(n.Left, n)
+ _Right, changedRight := c.copyOnRewriteSelectStatement(n.Right, n)
+ _OrderBy, changedOrderBy := c.copyOnRewriteOrderBy(n.OrderBy, n)
+ _With, changedWith := c.copyOnRewriteRefOfWith(n.With, n)
+ _Limit, changedLimit := c.copyOnRewriteRefOfLimit(n.Limit, n)
+ _Into, changedInto := c.copyOnRewriteRefOfSelectInto(n.Into, n)
+ if changedLeft || changedRight || changedOrderBy || changedWith || changedLimit || changedInto {
+ res := *n
+ res.Left, _ = _Left.(SelectStatement)
+ res.Right, _ = _Right.(SelectStatement)
+ res.OrderBy, _ = _OrderBy.(OrderBy)
+ res.With, _ = _With.(*With)
+ res.Limit, _ = _Limit.(*Limit)
+ res.Into, _ = _Into.(*SelectInto)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfUnlockTables(n *UnlockTables, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfUpdate(n *Update, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _With, changedWith := c.copyOnRewriteRefOfWith(n.With, n)
+ _Comments, changedComments := c.copyOnRewriteRefOfParsedComments(n.Comments, n)
+ _TableExprs, changedTableExprs := c.copyOnRewriteTableExprs(n.TableExprs, n)
+ _Exprs, changedExprs := c.copyOnRewriteUpdateExprs(n.Exprs, n)
+ _Where, changedWhere := c.copyOnRewriteRefOfWhere(n.Where, n)
+ _OrderBy, changedOrderBy := c.copyOnRewriteOrderBy(n.OrderBy, n)
+ _Limit, changedLimit := c.copyOnRewriteRefOfLimit(n.Limit, n)
+ if changedWith || changedComments || changedTableExprs || changedExprs || changedWhere || changedOrderBy || changedLimit {
+ res := *n
+ res.With, _ = _With.(*With)
+ res.Comments, _ = _Comments.(*ParsedComments)
+ res.TableExprs, _ = _TableExprs.(TableExprs)
+ res.Exprs, _ = _Exprs.(UpdateExprs)
+ res.Where, _ = _Where.(*Where)
+ res.OrderBy, _ = _OrderBy.(OrderBy)
+ res.Limit, _ = _Limit.(*Limit)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfUpdateExpr(n *UpdateExpr, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Name, changedName := c.copyOnRewriteRefOfColName(n.Name, n)
+ _Expr, changedExpr := c.copyOnRewriteExpr(n.Expr, n)
+ if changedName || changedExpr {
+ res := *n
+ res.Name, _ = _Name.(*ColName)
+ res.Expr, _ = _Expr.(Expr)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteUpdateExprs(n UpdateExprs, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ res := make(UpdateExprs, len(n))
+ for x, el := range n {
+ this, change := c.copyOnRewriteRefOfUpdateExpr(el, n)
+ res[x] = this.(*UpdateExpr)
+ if change {
+ changed = true
+ }
+ }
+ if changed {
+ out = res
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfUpdateXMLExpr(n *UpdateXMLExpr, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Target, changedTarget := c.copyOnRewriteExpr(n.Target, n)
+ _XPathExpr, changedXPathExpr := c.copyOnRewriteExpr(n.XPathExpr, n)
+ _NewXML, changedNewXML := c.copyOnRewriteExpr(n.NewXML, n)
+ if changedTarget || changedXPathExpr || changedNewXML {
+ res := *n
+ res.Target, _ = _Target.(Expr)
+ res.XPathExpr, _ = _XPathExpr.(Expr)
+ res.NewXML, _ = _NewXML.(Expr)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfUse(n *Use, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _DBName, changedDBName := c.copyOnRewriteIdentifierCS(n.DBName, n)
+ if changedDBName {
+ res := *n
+ res.DBName, _ = _DBName.(IdentifierCS)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfVExplainStmt(n *VExplainStmt, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Statement, changedStatement := c.copyOnRewriteStatement(n.Statement, n)
+ _Comments, changedComments := c.copyOnRewriteRefOfParsedComments(n.Comments, n)
+ if changedStatement || changedComments {
+ res := *n
+ res.Statement, _ = _Statement.(Statement)
+ res.Comments, _ = _Comments.(*ParsedComments)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfVStream(n *VStream, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Comments, changedComments := c.copyOnRewriteRefOfParsedComments(n.Comments, n)
+ _SelectExpr, changedSelectExpr := c.copyOnRewriteSelectExpr(n.SelectExpr, n)
+ _Table, changedTable := c.copyOnRewriteTableName(n.Table, n)
+ _Where, changedWhere := c.copyOnRewriteRefOfWhere(n.Where, n)
+ _Limit, changedLimit := c.copyOnRewriteRefOfLimit(n.Limit, n)
+ if changedComments || changedSelectExpr || changedTable || changedWhere || changedLimit {
+ res := *n
+ res.Comments, _ = _Comments.(*ParsedComments)
+ res.SelectExpr, _ = _SelectExpr.(SelectExpr)
+ res.Table, _ = _Table.(TableName)
+ res.Where, _ = _Where.(*Where)
+ res.Limit, _ = _Limit.(*Limit)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteValTuple(n ValTuple, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ res := make(ValTuple, len(n))
+ for x, el := range n {
+ this, change := c.copyOnRewriteExpr(el, n)
+ res[x] = this.(Expr)
+ if change {
+ changed = true
+ }
+ }
+ if changed {
+ out = res
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfValidation(n *Validation, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteValues(n Values, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ res := make(Values, len(n))
+ for x, el := range n {
+ this, change := c.copyOnRewriteValTuple(el, n)
+ res[x] = this.(ValTuple)
+ if change {
+ changed = true
+ }
+ }
+ if changed {
+ out = res
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfValuesFuncExpr(n *ValuesFuncExpr, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Name, changedName := c.copyOnRewriteRefOfColName(n.Name, n)
+ if changedName {
+ res := *n
+ res.Name, _ = _Name.(*ColName)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfVarPop(n *VarPop, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Arg, changedArg := c.copyOnRewriteExpr(n.Arg, n)
+ if changedArg {
+ res := *n
+ res.Arg, _ = _Arg.(Expr)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfVarSamp(n *VarSamp, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Arg, changedArg := c.copyOnRewriteExpr(n.Arg, n)
+ if changedArg {
+ res := *n
+ res.Arg, _ = _Arg.(Expr)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfVariable(n *Variable, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Name, changedName := c.copyOnRewriteIdentifierCI(n.Name, n)
+ if changedName {
+ res := *n
+ res.Name, _ = _Name.(IdentifierCI)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfVariance(n *Variance, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Arg, changedArg := c.copyOnRewriteExpr(n.Arg, n)
+ if changedArg {
+ res := *n
+ res.Arg, _ = _Arg.(Expr)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteVindexParam(n VindexParam, parent SQLNode) (out SQLNode, changed bool) {
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Key, changedKey := c.copyOnRewriteIdentifierCI(n.Key, n)
+ if changedKey {
+ res := n
+ res.Key, _ = _Key.(IdentifierCI)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfVindexSpec(n *VindexSpec, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Name, changedName := c.copyOnRewriteIdentifierCI(n.Name, n)
+ _Type, changedType := c.copyOnRewriteIdentifierCI(n.Type, n)
+ var changedParams bool
+ _Params := make([]VindexParam, len(n.Params))
+ for x, el := range n.Params {
+ this, changed := c.copyOnRewriteVindexParam(el, n)
+ _Params[x] = this.(VindexParam)
+ if changed {
+ changedParams = true
+ }
+ }
+ if changedName || changedType || changedParams {
+ res := *n
+ res.Name, _ = _Name.(IdentifierCI)
+ res.Type, _ = _Type.(IdentifierCI)
+ res.Params = _Params
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfWeightStringFuncExpr(n *WeightStringFuncExpr, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Expr, changedExpr := c.copyOnRewriteExpr(n.Expr, n)
+ _As, changedAs := c.copyOnRewriteRefOfConvertType(n.As, n)
+ if changedExpr || changedAs {
+ res := *n
+ res.Expr, _ = _Expr.(Expr)
+ res.As, _ = _As.(*ConvertType)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfWhen(n *When, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Cond, changedCond := c.copyOnRewriteExpr(n.Cond, n)
+ _Val, changedVal := c.copyOnRewriteExpr(n.Val, n)
+ if changedCond || changedVal {
+ res := *n
+ res.Cond, _ = _Cond.(Expr)
+ res.Val, _ = _Val.(Expr)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfWhere(n *Where, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Expr, changedExpr := c.copyOnRewriteExpr(n.Expr, n)
+ if changedExpr {
+ res := *n
+ res.Expr, _ = _Expr.(Expr)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfWindowDefinition(n *WindowDefinition, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Name, changedName := c.copyOnRewriteIdentifierCI(n.Name, n)
+ _WindowSpec, changedWindowSpec := c.copyOnRewriteRefOfWindowSpecification(n.WindowSpec, n)
+ if changedName || changedWindowSpec {
+ res := *n
+ res.Name, _ = _Name.(IdentifierCI)
+ res.WindowSpec, _ = _WindowSpec.(*WindowSpecification)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteWindowDefinitions(n WindowDefinitions, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ res := make(WindowDefinitions, len(n))
+ for x, el := range n {
+ this, change := c.copyOnRewriteRefOfWindowDefinition(el, n)
+ res[x] = this.(*WindowDefinition)
+ if change {
+ changed = true
+ }
+ }
+ if changed {
+ out = res
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfWindowSpecification(n *WindowSpecification, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Name, changedName := c.copyOnRewriteIdentifierCI(n.Name, n)
+ _PartitionClause, changedPartitionClause := c.copyOnRewriteExprs(n.PartitionClause, n)
+ _OrderClause, changedOrderClause := c.copyOnRewriteOrderBy(n.OrderClause, n)
+ _FrameClause, changedFrameClause := c.copyOnRewriteRefOfFrameClause(n.FrameClause, n)
+ if changedName || changedPartitionClause || changedOrderClause || changedFrameClause {
+ res := *n
+ res.Name, _ = _Name.(IdentifierCI)
+ res.PartitionClause, _ = _PartitionClause.(Exprs)
+ res.OrderClause, _ = _OrderClause.(OrderBy)
+ res.FrameClause, _ = _FrameClause.(*FrameClause)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfWith(n *With, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ var changedctes bool
+ _ctes := make([]*CommonTableExpr, len(n.ctes))
+ for x, el := range n.ctes {
+ this, changed := c.copyOnRewriteRefOfCommonTableExpr(el, n)
+ _ctes[x] = this.(*CommonTableExpr)
+ if changed {
+ changedctes = true
+ }
+ }
+ if changedctes {
+ res := *n
+ res.ctes = _ctes
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfXorExpr(n *XorExpr, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Left, changedLeft := c.copyOnRewriteExpr(n.Left, n)
+ _Right, changedRight := c.copyOnRewriteExpr(n.Right, n)
+ if changedLeft || changedRight {
+ res := *n
+ res.Left, _ = _Left.(Expr)
+ res.Right, _ = _Right.(Expr)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteAggrFunc(n AggrFunc, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ switch n := n.(type) {
+ case *Avg:
+ return c.copyOnRewriteRefOfAvg(n, parent)
+ case *BitAnd:
+ return c.copyOnRewriteRefOfBitAnd(n, parent)
+ case *BitOr:
+ return c.copyOnRewriteRefOfBitOr(n, parent)
+ case *BitXor:
+ return c.copyOnRewriteRefOfBitXor(n, parent)
+ case *Count:
+ return c.copyOnRewriteRefOfCount(n, parent)
+ case *CountStar:
+ return c.copyOnRewriteRefOfCountStar(n, parent)
+ case *GroupConcatExpr:
+ return c.copyOnRewriteRefOfGroupConcatExpr(n, parent)
+ case *Max:
+ return c.copyOnRewriteRefOfMax(n, parent)
+ case *Min:
+ return c.copyOnRewriteRefOfMin(n, parent)
+ case *Std:
+ return c.copyOnRewriteRefOfStd(n, parent)
+ case *StdDev:
+ return c.copyOnRewriteRefOfStdDev(n, parent)
+ case *StdPop:
+ return c.copyOnRewriteRefOfStdPop(n, parent)
+ case *StdSamp:
+ return c.copyOnRewriteRefOfStdSamp(n, parent)
+ case *Sum:
+ return c.copyOnRewriteRefOfSum(n, parent)
+ case *VarPop:
+ return c.copyOnRewriteRefOfVarPop(n, parent)
+ case *VarSamp:
+ return c.copyOnRewriteRefOfVarSamp(n, parent)
+ case *Variance:
+ return c.copyOnRewriteRefOfVariance(n, parent)
+ default:
+ // this should never happen
+ return nil, false
+ }
+}
+func (c *cow) copyOnRewriteAlterOption(n AlterOption, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ switch n := n.(type) {
+ case *AddColumns:
+ return c.copyOnRewriteRefOfAddColumns(n, parent)
+ case *AddConstraintDefinition:
+ return c.copyOnRewriteRefOfAddConstraintDefinition(n, parent)
+ case *AddIndexDefinition:
+ return c.copyOnRewriteRefOfAddIndexDefinition(n, parent)
+ case AlgorithmValue:
+ return c.copyOnRewriteAlgorithmValue(n, parent)
+ case *AlterCharset:
+ return c.copyOnRewriteRefOfAlterCharset(n, parent)
+ case *AlterCheck:
+ return c.copyOnRewriteRefOfAlterCheck(n, parent)
+ case *AlterColumn:
+ return c.copyOnRewriteRefOfAlterColumn(n, parent)
+ case *AlterIndex:
+ return c.copyOnRewriteRefOfAlterIndex(n, parent)
+ case *ChangeColumn:
+ return c.copyOnRewriteRefOfChangeColumn(n, parent)
+ case *DropColumn:
+ return c.copyOnRewriteRefOfDropColumn(n, parent)
+ case *DropKey:
+ return c.copyOnRewriteRefOfDropKey(n, parent)
+ case *Force:
+ return c.copyOnRewriteRefOfForce(n, parent)
+ case *KeyState:
+ return c.copyOnRewriteRefOfKeyState(n, parent)
+ case *LockOption:
+ return c.copyOnRewriteRefOfLockOption(n, parent)
+ case *ModifyColumn:
+ return c.copyOnRewriteRefOfModifyColumn(n, parent)
+ case *OrderByOption:
+ return c.copyOnRewriteRefOfOrderByOption(n, parent)
+ case *RenameColumn:
+ return c.copyOnRewriteRefOfRenameColumn(n, parent)
+ case *RenameIndex:
+ return c.copyOnRewriteRefOfRenameIndex(n, parent)
+ case *RenameTableName:
+ return c.copyOnRewriteRefOfRenameTableName(n, parent)
+ case TableOptions:
+ return c.copyOnRewriteTableOptions(n, parent)
+ case *TablespaceOperation:
+ return c.copyOnRewriteRefOfTablespaceOperation(n, parent)
+ case *Validation:
+ return c.copyOnRewriteRefOfValidation(n, parent)
+ default:
+ // this should never happen
+ return nil, false
+ }
+}
+func (c *cow) copyOnRewriteCallable(n Callable, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ switch n := n.(type) {
+ case *ArgumentLessWindowExpr:
+ return c.copyOnRewriteRefOfArgumentLessWindowExpr(n, parent)
+ case *Avg:
+ return c.copyOnRewriteRefOfAvg(n, parent)
+ case *CharExpr:
+ return c.copyOnRewriteRefOfCharExpr(n, parent)
+ case *ConvertExpr:
+ return c.copyOnRewriteRefOfConvertExpr(n, parent)
+ case *ConvertUsingExpr:
+ return c.copyOnRewriteRefOfConvertUsingExpr(n, parent)
+ case *Count:
+ return c.copyOnRewriteRefOfCount(n, parent)
+ case *CountStar:
+ return c.copyOnRewriteRefOfCountStar(n, parent)
+ case *CurTimeFuncExpr:
+ return c.copyOnRewriteRefOfCurTimeFuncExpr(n, parent)
+ case *ExtractFuncExpr:
+ return c.copyOnRewriteRefOfExtractFuncExpr(n, parent)
+ case *ExtractValueExpr:
+ return c.copyOnRewriteRefOfExtractValueExpr(n, parent)
+ case *FirstOrLastValueExpr:
+ return c.copyOnRewriteRefOfFirstOrLastValueExpr(n, parent)
+ case *FuncExpr:
+ return c.copyOnRewriteRefOfFuncExpr(n, parent)
+ case *GTIDFuncExpr:
+ return c.copyOnRewriteRefOfGTIDFuncExpr(n, parent)
+ case *GroupConcatExpr:
+ return c.copyOnRewriteRefOfGroupConcatExpr(n, parent)
+ case *InsertExpr:
+ return c.copyOnRewriteRefOfInsertExpr(n, parent)
+ case *IntervalFuncExpr:
+ return c.copyOnRewriteRefOfIntervalFuncExpr(n, parent)
+ case *JSONArrayExpr:
+ return c.copyOnRewriteRefOfJSONArrayExpr(n, parent)
+ case *JSONAttributesExpr:
+ return c.copyOnRewriteRefOfJSONAttributesExpr(n, parent)
+ case *JSONContainsExpr:
+ return c.copyOnRewriteRefOfJSONContainsExpr(n, parent)
+ case *JSONContainsPathExpr:
+ return c.copyOnRewriteRefOfJSONContainsPathExpr(n, parent)
+ case *JSONExtractExpr:
+ return c.copyOnRewriteRefOfJSONExtractExpr(n, parent)
+ case *JSONKeysExpr:
+ return c.copyOnRewriteRefOfJSONKeysExpr(n, parent)
+ case *JSONObjectExpr:
+ return c.copyOnRewriteRefOfJSONObjectExpr(n, parent)
+ case *JSONOverlapsExpr:
+ return c.copyOnRewriteRefOfJSONOverlapsExpr(n, parent)
+ case *JSONPrettyExpr:
+ return c.copyOnRewriteRefOfJSONPrettyExpr(n, parent)
+ case *JSONQuoteExpr:
+ return c.copyOnRewriteRefOfJSONQuoteExpr(n, parent)
+ case *JSONRemoveExpr:
+ return c.copyOnRewriteRefOfJSONRemoveExpr(n, parent)
+ case *JSONSchemaValidFuncExpr:
+ return c.copyOnRewriteRefOfJSONSchemaValidFuncExpr(n, parent)
+ case *JSONSchemaValidationReportFuncExpr:
+ return c.copyOnRewriteRefOfJSONSchemaValidationReportFuncExpr(n, parent)
+ case *JSONSearchExpr:
+ return c.copyOnRewriteRefOfJSONSearchExpr(n, parent)
+ case *JSONStorageFreeExpr:
+ return c.copyOnRewriteRefOfJSONStorageFreeExpr(n, parent)
+ case *JSONStorageSizeExpr:
+ return c.copyOnRewriteRefOfJSONStorageSizeExpr(n, parent)
+ case *JSONUnquoteExpr:
+ return c.copyOnRewriteRefOfJSONUnquoteExpr(n, parent)
+ case *JSONValueExpr:
+ return c.copyOnRewriteRefOfJSONValueExpr(n, parent)
+ case *JSONValueMergeExpr:
+ return c.copyOnRewriteRefOfJSONValueMergeExpr(n, parent)
+ case *JSONValueModifierExpr:
+ return c.copyOnRewriteRefOfJSONValueModifierExpr(n, parent)
+ case *LagLeadExpr:
+ return c.copyOnRewriteRefOfLagLeadExpr(n, parent)
+ case *LocateExpr:
+ return c.copyOnRewriteRefOfLocateExpr(n, parent)
+ case *MatchExpr:
+ return c.copyOnRewriteRefOfMatchExpr(n, parent)
+ case *Max:
+ return c.copyOnRewriteRefOfMax(n, parent)
+ case *MemberOfExpr:
+ return c.copyOnRewriteRefOfMemberOfExpr(n, parent)
+ case *Min:
+ return c.copyOnRewriteRefOfMin(n, parent)
+ case *NTHValueExpr:
+ return c.copyOnRewriteRefOfNTHValueExpr(n, parent)
+ case *NamedWindow:
+ return c.copyOnRewriteRefOfNamedWindow(n, parent)
+ case *NtileExpr:
+ return c.copyOnRewriteRefOfNtileExpr(n, parent)
+ case *PerformanceSchemaFuncExpr:
+ return c.copyOnRewriteRefOfPerformanceSchemaFuncExpr(n, parent)
+ case *RegexpInstrExpr:
+ return c.copyOnRewriteRefOfRegexpInstrExpr(n, parent)
+ case *RegexpLikeExpr:
+ return c.copyOnRewriteRefOfRegexpLikeExpr(n, parent)
+ case *RegexpReplaceExpr:
+ return c.copyOnRewriteRefOfRegexpReplaceExpr(n, parent)
+ case *RegexpSubstrExpr:
+ return c.copyOnRewriteRefOfRegexpSubstrExpr(n, parent)
+ case *SubstrExpr:
+ return c.copyOnRewriteRefOfSubstrExpr(n, parent)
+ case *Sum:
+ return c.copyOnRewriteRefOfSum(n, parent)
+ case *TimestampFuncExpr:
+ return c.copyOnRewriteRefOfTimestampFuncExpr(n, parent)
+ case *TrimFuncExpr:
+ return c.copyOnRewriteRefOfTrimFuncExpr(n, parent)
+ case *UpdateXMLExpr:
+ return c.copyOnRewriteRefOfUpdateXMLExpr(n, parent)
+ case *ValuesFuncExpr:
+ return c.copyOnRewriteRefOfValuesFuncExpr(n, parent)
+ case *WeightStringFuncExpr:
+ return c.copyOnRewriteRefOfWeightStringFuncExpr(n, parent)
+ default:
+ // this should never happen
+ return nil, false
+ }
+}
+func (c *cow) copyOnRewriteColTuple(n ColTuple, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ switch n := n.(type) {
+ case ListArg:
+ return c.copyOnRewriteListArg(n, parent)
+ case *Subquery:
+ return c.copyOnRewriteRefOfSubquery(n, parent)
+ case ValTuple:
+ return c.copyOnRewriteValTuple(n, parent)
+ default:
+ // this should never happen
+ return nil, false
+ }
+}
+func (c *cow) copyOnRewriteConstraintInfo(n ConstraintInfo, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ switch n := n.(type) {
+ case *CheckConstraintDefinition:
+ return c.copyOnRewriteRefOfCheckConstraintDefinition(n, parent)
+ case *ForeignKeyDefinition:
+ return c.copyOnRewriteRefOfForeignKeyDefinition(n, parent)
+ default:
+ // this should never happen
+ return nil, false
+ }
+}
+func (c *cow) copyOnRewriteDBDDLStatement(n DBDDLStatement, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ switch n := n.(type) {
+ case *AlterDatabase:
+ return c.copyOnRewriteRefOfAlterDatabase(n, parent)
+ case *CreateDatabase:
+ return c.copyOnRewriteRefOfCreateDatabase(n, parent)
+ case *DropDatabase:
+ return c.copyOnRewriteRefOfDropDatabase(n, parent)
+ default:
+ // this should never happen
+ return nil, false
+ }
+}
+func (c *cow) copyOnRewriteDDLStatement(n DDLStatement, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ switch n := n.(type) {
+ case *AlterTable:
+ return c.copyOnRewriteRefOfAlterTable(n, parent)
+ case *AlterView:
+ return c.copyOnRewriteRefOfAlterView(n, parent)
+ case *CreateTable:
+ return c.copyOnRewriteRefOfCreateTable(n, parent)
+ case *CreateView:
+ return c.copyOnRewriteRefOfCreateView(n, parent)
+ case *DropTable:
+ return c.copyOnRewriteRefOfDropTable(n, parent)
+ case *DropView:
+ return c.copyOnRewriteRefOfDropView(n, parent)
+ case *RenameTable:
+ return c.copyOnRewriteRefOfRenameTable(n, parent)
+ case *TruncateTable:
+ return c.copyOnRewriteRefOfTruncateTable(n, parent)
+ default:
+ // this should never happen
+ return nil, false
+ }
+}
+func (c *cow) copyOnRewriteExplain(n Explain, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ switch n := n.(type) {
+ case *ExplainStmt:
+ return c.copyOnRewriteRefOfExplainStmt(n, parent)
+ case *ExplainTab:
+ return c.copyOnRewriteRefOfExplainTab(n, parent)
+ default:
+ // this should never happen
+ return nil, false
+ }
+}
+func (c *cow) copyOnRewriteExpr(n Expr, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ switch n := n.(type) {
+ case *AndExpr:
+ return c.copyOnRewriteRefOfAndExpr(n, parent)
+ case Argument:
+ return c.copyOnRewriteArgument(n, parent)
+ case *ArgumentLessWindowExpr:
+ return c.copyOnRewriteRefOfArgumentLessWindowExpr(n, parent)
+ case *Avg:
+ return c.copyOnRewriteRefOfAvg(n, parent)
+ case *BetweenExpr:
+ return c.copyOnRewriteRefOfBetweenExpr(n, parent)
+ case *BinaryExpr:
+ return c.copyOnRewriteRefOfBinaryExpr(n, parent)
+ case *BitAnd:
+ return c.copyOnRewriteRefOfBitAnd(n, parent)
+ case *BitOr:
+ return c.copyOnRewriteRefOfBitOr(n, parent)
+ case *BitXor:
+ return c.copyOnRewriteRefOfBitXor(n, parent)
+ case BoolVal:
+ return c.copyOnRewriteBoolVal(n, parent)
+ case *CaseExpr:
+ return c.copyOnRewriteRefOfCaseExpr(n, parent)
+ case *CastExpr:
+ return c.copyOnRewriteRefOfCastExpr(n, parent)
+ case *CharExpr:
+ return c.copyOnRewriteRefOfCharExpr(n, parent)
+ case *ColName:
+ return c.copyOnRewriteRefOfColName(n, parent)
+ case *CollateExpr:
+ return c.copyOnRewriteRefOfCollateExpr(n, parent)
+ case *ComparisonExpr:
+ return c.copyOnRewriteRefOfComparisonExpr(n, parent)
+ case *ConvertExpr:
+ return c.copyOnRewriteRefOfConvertExpr(n, parent)
+ case *ConvertUsingExpr:
+ return c.copyOnRewriteRefOfConvertUsingExpr(n, parent)
+ case *Count:
+ return c.copyOnRewriteRefOfCount(n, parent)
+ case *CountStar:
+ return c.copyOnRewriteRefOfCountStar(n, parent)
+ case *CurTimeFuncExpr:
+ return c.copyOnRewriteRefOfCurTimeFuncExpr(n, parent)
+ case *Default:
+ return c.copyOnRewriteRefOfDefault(n, parent)
+ case *ExistsExpr:
+ return c.copyOnRewriteRefOfExistsExpr(n, parent)
+ case *ExtractFuncExpr:
+ return c.copyOnRewriteRefOfExtractFuncExpr(n, parent)
+ case *ExtractValueExpr:
+ return c.copyOnRewriteRefOfExtractValueExpr(n, parent)
+ case *ExtractedSubquery:
+ return c.copyOnRewriteRefOfExtractedSubquery(n, parent)
+ case *FirstOrLastValueExpr:
+ return c.copyOnRewriteRefOfFirstOrLastValueExpr(n, parent)
+ case *FuncExpr:
+ return c.copyOnRewriteRefOfFuncExpr(n, parent)
+ case *GTIDFuncExpr:
+ return c.copyOnRewriteRefOfGTIDFuncExpr(n, parent)
+ case *GroupConcatExpr:
+ return c.copyOnRewriteRefOfGroupConcatExpr(n, parent)
+ case *InsertExpr:
+ return c.copyOnRewriteRefOfInsertExpr(n, parent)
+ case *IntervalExpr:
+ return c.copyOnRewriteRefOfIntervalExpr(n, parent)
+ case *IntervalFuncExpr:
+ return c.copyOnRewriteRefOfIntervalFuncExpr(n, parent)
+ case *IntroducerExpr:
+ return c.copyOnRewriteRefOfIntroducerExpr(n, parent)
+ case *IsExpr:
+ return c.copyOnRewriteRefOfIsExpr(n, parent)
+ case *JSONArrayExpr:
+ return c.copyOnRewriteRefOfJSONArrayExpr(n, parent)
+ case *JSONAttributesExpr:
+ return c.copyOnRewriteRefOfJSONAttributesExpr(n, parent)
+ case *JSONContainsExpr:
+ return c.copyOnRewriteRefOfJSONContainsExpr(n, parent)
+ case *JSONContainsPathExpr:
+ return c.copyOnRewriteRefOfJSONContainsPathExpr(n, parent)
+ case *JSONExtractExpr:
+ return c.copyOnRewriteRefOfJSONExtractExpr(n, parent)
+ case *JSONKeysExpr:
+ return c.copyOnRewriteRefOfJSONKeysExpr(n, parent)
+ case *JSONObjectExpr:
+ return c.copyOnRewriteRefOfJSONObjectExpr(n, parent)
+ case *JSONOverlapsExpr:
+ return c.copyOnRewriteRefOfJSONOverlapsExpr(n, parent)
+ case *JSONPrettyExpr:
+ return c.copyOnRewriteRefOfJSONPrettyExpr(n, parent)
+ case *JSONQuoteExpr:
+ return c.copyOnRewriteRefOfJSONQuoteExpr(n, parent)
+ case *JSONRemoveExpr:
+ return c.copyOnRewriteRefOfJSONRemoveExpr(n, parent)
+ case *JSONSchemaValidFuncExpr:
+ return c.copyOnRewriteRefOfJSONSchemaValidFuncExpr(n, parent)
+ case *JSONSchemaValidationReportFuncExpr:
+ return c.copyOnRewriteRefOfJSONSchemaValidationReportFuncExpr(n, parent)
+ case *JSONSearchExpr:
+ return c.copyOnRewriteRefOfJSONSearchExpr(n, parent)
+ case *JSONStorageFreeExpr:
+ return c.copyOnRewriteRefOfJSONStorageFreeExpr(n, parent)
+ case *JSONStorageSizeExpr:
+ return c.copyOnRewriteRefOfJSONStorageSizeExpr(n, parent)
+ case *JSONUnquoteExpr:
+ return c.copyOnRewriteRefOfJSONUnquoteExpr(n, parent)
+ case *JSONValueExpr:
+ return c.copyOnRewriteRefOfJSONValueExpr(n, parent)
+ case *JSONValueMergeExpr:
+ return c.copyOnRewriteRefOfJSONValueMergeExpr(n, parent)
+ case *JSONValueModifierExpr:
+ return c.copyOnRewriteRefOfJSONValueModifierExpr(n, parent)
+ case *LagLeadExpr:
+ return c.copyOnRewriteRefOfLagLeadExpr(n, parent)
+ case ListArg:
+ return c.copyOnRewriteListArg(n, parent)
+ case *Literal:
+ return c.copyOnRewriteRefOfLiteral(n, parent)
+ case *LocateExpr:
+ return c.copyOnRewriteRefOfLocateExpr(n, parent)
+ case *LockingFunc:
+ return c.copyOnRewriteRefOfLockingFunc(n, parent)
+ case *MatchExpr:
+ return c.copyOnRewriteRefOfMatchExpr(n, parent)
+ case *Max:
+ return c.copyOnRewriteRefOfMax(n, parent)
+ case *MemberOfExpr:
+ return c.copyOnRewriteRefOfMemberOfExpr(n, parent)
+ case *Min:
+ return c.copyOnRewriteRefOfMin(n, parent)
+ case *NTHValueExpr:
+ return c.copyOnRewriteRefOfNTHValueExpr(n, parent)
+ case *NamedWindow:
+ return c.copyOnRewriteRefOfNamedWindow(n, parent)
+ case *NotExpr:
+ return c.copyOnRewriteRefOfNotExpr(n, parent)
+ case *NtileExpr:
+ return c.copyOnRewriteRefOfNtileExpr(n, parent)
+ case *NullVal:
+ return c.copyOnRewriteRefOfNullVal(n, parent)
+ case *Offset:
+ return c.copyOnRewriteRefOfOffset(n, parent)
+ case *OrExpr:
+ return c.copyOnRewriteRefOfOrExpr(n, parent)
+ case *PerformanceSchemaFuncExpr:
+ return c.copyOnRewriteRefOfPerformanceSchemaFuncExpr(n, parent)
+ case *RegexpInstrExpr:
+ return c.copyOnRewriteRefOfRegexpInstrExpr(n, parent)
+ case *RegexpLikeExpr:
+ return c.copyOnRewriteRefOfRegexpLikeExpr(n, parent)
+ case *RegexpReplaceExpr:
+ return c.copyOnRewriteRefOfRegexpReplaceExpr(n, parent)
+ case *RegexpSubstrExpr:
+ return c.copyOnRewriteRefOfRegexpSubstrExpr(n, parent)
+ case *Std:
+ return c.copyOnRewriteRefOfStd(n, parent)
+ case *StdDev:
+ return c.copyOnRewriteRefOfStdDev(n, parent)
+ case *StdPop:
+ return c.copyOnRewriteRefOfStdPop(n, parent)
+ case *StdSamp:
+ return c.copyOnRewriteRefOfStdSamp(n, parent)
+ case *Subquery:
+ return c.copyOnRewriteRefOfSubquery(n, parent)
+ case *SubstrExpr:
+ return c.copyOnRewriteRefOfSubstrExpr(n, parent)
+ case *Sum:
+ return c.copyOnRewriteRefOfSum(n, parent)
+ case *TimestampFuncExpr:
+ return c.copyOnRewriteRefOfTimestampFuncExpr(n, parent)
+ case *TrimFuncExpr:
+ return c.copyOnRewriteRefOfTrimFuncExpr(n, parent)
+ case *UnaryExpr:
+ return c.copyOnRewriteRefOfUnaryExpr(n, parent)
+ case *UpdateXMLExpr:
+ return c.copyOnRewriteRefOfUpdateXMLExpr(n, parent)
+ case ValTuple:
+ return c.copyOnRewriteValTuple(n, parent)
+ case *ValuesFuncExpr:
+ return c.copyOnRewriteRefOfValuesFuncExpr(n, parent)
+ case *VarPop:
+ return c.copyOnRewriteRefOfVarPop(n, parent)
+ case *VarSamp:
+ return c.copyOnRewriteRefOfVarSamp(n, parent)
+ case *Variable:
+ return c.copyOnRewriteRefOfVariable(n, parent)
+ case *Variance:
+ return c.copyOnRewriteRefOfVariance(n, parent)
+ case *WeightStringFuncExpr:
+ return c.copyOnRewriteRefOfWeightStringFuncExpr(n, parent)
+ case *XorExpr:
+ return c.copyOnRewriteRefOfXorExpr(n, parent)
+ default:
+ // this should never happen
+ return nil, false
+ }
+}
+func (c *cow) copyOnRewriteInsertRows(n InsertRows, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ switch n := n.(type) {
+ case *Select:
+ return c.copyOnRewriteRefOfSelect(n, parent)
+ case *Union:
+ return c.copyOnRewriteRefOfUnion(n, parent)
+ case Values:
+ return c.copyOnRewriteValues(n, parent)
+ default:
+ // this should never happen
+ return nil, false
+ }
+}
+func (c *cow) copyOnRewriteSelectExpr(n SelectExpr, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ switch n := n.(type) {
+ case *AliasedExpr:
+ return c.copyOnRewriteRefOfAliasedExpr(n, parent)
+ case *Nextval:
+ return c.copyOnRewriteRefOfNextval(n, parent)
+ case *StarExpr:
+ return c.copyOnRewriteRefOfStarExpr(n, parent)
+ default:
+ // this should never happen
+ return nil, false
+ }
+}
+func (c *cow) copyOnRewriteSelectStatement(n SelectStatement, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ switch n := n.(type) {
+ case *Select:
+ return c.copyOnRewriteRefOfSelect(n, parent)
+ case *Union:
+ return c.copyOnRewriteRefOfUnion(n, parent)
+ default:
+ // this should never happen
+ return nil, false
+ }
+}
+func (c *cow) copyOnRewriteShowInternal(n ShowInternal, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ switch n := n.(type) {
+ case *ShowBasic:
+ return c.copyOnRewriteRefOfShowBasic(n, parent)
+ case *ShowCreate:
+ return c.copyOnRewriteRefOfShowCreate(n, parent)
+ case *ShowOther:
+ return c.copyOnRewriteRefOfShowOther(n, parent)
+ default:
+ // this should never happen
+ return nil, false
+ }
+}
+func (c *cow) copyOnRewriteSimpleTableExpr(n SimpleTableExpr, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ switch n := n.(type) {
+ case *DerivedTable:
+ return c.copyOnRewriteRefOfDerivedTable(n, parent)
+ case TableName:
+ return c.copyOnRewriteTableName(n, parent)
+ default:
+ // this should never happen
+ return nil, false
+ }
+}
+func (c *cow) copyOnRewriteStatement(n Statement, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ switch n := n.(type) {
+ case *AlterDatabase:
+ return c.copyOnRewriteRefOfAlterDatabase(n, parent)
+ case *AlterMigration:
+ return c.copyOnRewriteRefOfAlterMigration(n, parent)
+ case *AlterTable:
+ return c.copyOnRewriteRefOfAlterTable(n, parent)
+ case *AlterView:
+ return c.copyOnRewriteRefOfAlterView(n, parent)
+ case *AlterVschema:
+ return c.copyOnRewriteRefOfAlterVschema(n, parent)
+ case *Begin:
+ return c.copyOnRewriteRefOfBegin(n, parent)
+ case *CallProc:
+ return c.copyOnRewriteRefOfCallProc(n, parent)
+ case *CommentOnly:
+ return c.copyOnRewriteRefOfCommentOnly(n, parent)
+ case *Commit:
+ return c.copyOnRewriteRefOfCommit(n, parent)
+ case *CreateDatabase:
+ return c.copyOnRewriteRefOfCreateDatabase(n, parent)
+ case *CreateTable:
+ return c.copyOnRewriteRefOfCreateTable(n, parent)
+ case *CreateView:
+ return c.copyOnRewriteRefOfCreateView(n, parent)
+ case *DeallocateStmt:
+ return c.copyOnRewriteRefOfDeallocateStmt(n, parent)
+ case *Delete:
+ return c.copyOnRewriteRefOfDelete(n, parent)
+ case *DropDatabase:
+ return c.copyOnRewriteRefOfDropDatabase(n, parent)
+ case *DropTable:
+ return c.copyOnRewriteRefOfDropTable(n, parent)
+ case *DropView:
+ return c.copyOnRewriteRefOfDropView(n, parent)
+ case *ExecuteStmt:
+ return c.copyOnRewriteRefOfExecuteStmt(n, parent)
+ case *ExplainStmt:
+ return c.copyOnRewriteRefOfExplainStmt(n, parent)
+ case *ExplainTab:
+ return c.copyOnRewriteRefOfExplainTab(n, parent)
+ case *Flush:
+ return c.copyOnRewriteRefOfFlush(n, parent)
+ case *Insert:
+ return c.copyOnRewriteRefOfInsert(n, parent)
+ case *Load:
+ return c.copyOnRewriteRefOfLoad(n, parent)
+ case *LockTables:
+ return c.copyOnRewriteRefOfLockTables(n, parent)
+ case *OtherAdmin:
+ return c.copyOnRewriteRefOfOtherAdmin(n, parent)
+ case *OtherRead:
+ return c.copyOnRewriteRefOfOtherRead(n, parent)
+ case *PrepareStmt:
+ return c.copyOnRewriteRefOfPrepareStmt(n, parent)
+ case *Release:
+ return c.copyOnRewriteRefOfRelease(n, parent)
+ case *RenameTable:
+ return c.copyOnRewriteRefOfRenameTable(n, parent)
+ case *RevertMigration:
+ return c.copyOnRewriteRefOfRevertMigration(n, parent)
+ case *Rollback:
+ return c.copyOnRewriteRefOfRollback(n, parent)
+ case *SRollback:
+ return c.copyOnRewriteRefOfSRollback(n, parent)
+ case *Savepoint:
+ return c.copyOnRewriteRefOfSavepoint(n, parent)
+ case *Select:
+ return c.copyOnRewriteRefOfSelect(n, parent)
+ case *Set:
+ return c.copyOnRewriteRefOfSet(n, parent)
+ case *Show:
+ return c.copyOnRewriteRefOfShow(n, parent)
+ case *ShowMigrationLogs:
+ return c.copyOnRewriteRefOfShowMigrationLogs(n, parent)
+ case *ShowThrottledApps:
+ return c.copyOnRewriteRefOfShowThrottledApps(n, parent)
+ case *ShowThrottlerStatus:
+ return c.copyOnRewriteRefOfShowThrottlerStatus(n, parent)
+ case *Stream:
+ return c.copyOnRewriteRefOfStream(n, parent)
+ case *TruncateTable:
+ return c.copyOnRewriteRefOfTruncateTable(n, parent)
+ case *Union:
+ return c.copyOnRewriteRefOfUnion(n, parent)
+ case *UnlockTables:
+ return c.copyOnRewriteRefOfUnlockTables(n, parent)
+ case *Update:
+ return c.copyOnRewriteRefOfUpdate(n, parent)
+ case *Use:
+ return c.copyOnRewriteRefOfUse(n, parent)
+ case *VExplainStmt:
+ return c.copyOnRewriteRefOfVExplainStmt(n, parent)
+ case *VStream:
+ return c.copyOnRewriteRefOfVStream(n, parent)
+ default:
+ // this should never happen
+ return nil, false
+ }
+}
+func (c *cow) copyOnRewriteTableExpr(n TableExpr, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ switch n := n.(type) {
+ case *AliasedTableExpr:
+ return c.copyOnRewriteRefOfAliasedTableExpr(n, parent)
+ case *JSONTableExpr:
+ return c.copyOnRewriteRefOfJSONTableExpr(n, parent)
+ case *JoinTableExpr:
+ return c.copyOnRewriteRefOfJoinTableExpr(n, parent)
+ case *ParenTableExpr:
+ return c.copyOnRewriteRefOfParenTableExpr(n, parent)
+ default:
+ // this should never happen
+ return nil, false
+ }
+}
+func (c *cow) copyOnRewriteAlgorithmValue(n AlgorithmValue, parent SQLNode) (out SQLNode, changed bool) {
+ if c.cursor.stop {
+ return n, false
+ }
+ if c.pre != nil {
+ c.pre(n, parent)
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(n, parent, changed)
+ } else {
+ out = n
+ }
+ return
+}
+func (c *cow) copyOnRewriteArgument(n Argument, parent SQLNode) (out SQLNode, changed bool) {
+ if c.cursor.stop {
+ return n, false
+ }
+ if c.pre != nil {
+ c.pre(n, parent)
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(n, parent, changed)
+ } else {
+ out = n
+ }
+ return
+}
+func (c *cow) copyOnRewriteBoolVal(n BoolVal, parent SQLNode) (out SQLNode, changed bool) {
+ if c.cursor.stop {
+ return n, false
+ }
+ if c.pre != nil {
+ c.pre(n, parent)
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(n, parent, changed)
+ } else {
+ out = n
+ }
+ return
+}
+func (c *cow) copyOnRewriteListArg(n ListArg, parent SQLNode) (out SQLNode, changed bool) {
+ if c.cursor.stop {
+ return n, false
+ }
+ if c.pre != nil {
+ c.pre(n, parent)
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(n, parent, changed)
+ } else {
+ out = n
+ }
+ return
+}
+func (c *cow) copyOnRewriteMatchAction(n MatchAction, parent SQLNode) (out SQLNode, changed bool) {
+ if c.cursor.stop {
+ return n, false
+ }
+ if c.pre != nil {
+ c.pre(n, parent)
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(n, parent, changed)
+ } else {
+ out = n
+ }
+ return
+}
+func (c *cow) copyOnRewriteReferenceAction(n ReferenceAction, parent SQLNode) (out SQLNode, changed bool) {
+ if c.cursor.stop {
+ return n, false
+ }
+ if c.pre != nil {
+ c.pre(n, parent)
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(n, parent, changed)
+ } else {
+ out = n
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfIdentifierCI(n *IdentifierCI, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfIdentifierCS(n *IdentifierCS, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfRootNode(n *RootNode, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _SQLNode, changedSQLNode := c.copyOnRewriteSQLNode(n.SQLNode, n)
+ if changedSQLNode {
+ res := *n
+ res.SQLNode, _ = _SQLNode.(SQLNode)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfTableName(n *TableName, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Name, changedName := c.copyOnRewriteIdentifierCS(n.Name, n)
+ _Qualifier, changedQualifier := c.copyOnRewriteIdentifierCS(n.Qualifier, n)
+ if changedName || changedQualifier {
+ res := *n
+ res.Name, _ = _Name.(IdentifierCS)
+ res.Qualifier, _ = _Qualifier.(IdentifierCS)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
+func (c *cow) copyOnRewriteRefOfVindexParam(n *VindexParam, parent SQLNode) (out SQLNode, changed bool) {
+ if n == nil || c.cursor.stop {
+ return n, false
+ }
+ out = n
+ if c.pre == nil || c.pre(n, parent) {
+ _Key, changedKey := c.copyOnRewriteIdentifierCI(n.Key, n)
+ if changedKey {
+ res := *n
+ res.Key, _ = _Key.(IdentifierCI)
+ out = &res
+ if c.cloned != nil {
+ c.cloned(n, out)
+ }
+ changed = true
+ }
+ }
+ if c.post != nil {
+ out, changed = c.postVisit(out, parent, changed)
+ }
+ return
+}
diff --git a/go/vt/sqlparser/ast_copy_on_rewrite_test.go b/go/vt/sqlparser/ast_copy_on_rewrite_test.go
new file mode 100644
index 00000000000..389b2a4bc29
--- /dev/null
+++ b/go/vt/sqlparser/ast_copy_on_rewrite_test.go
@@ -0,0 +1,123 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package sqlparser
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestCopyOnRewrite(t *testing.T) {
+ // rewrite an expression without changing the original
+ expr, err := ParseExpr("a = b")
+ require.NoError(t, err)
+ out := CopyOnRewrite(expr, nil, func(cursor *CopyOnWriteCursor) {
+ col, ok := cursor.Node().(*ColName)
+ if !ok {
+ return
+ }
+ if col.Name.EqualString("a") {
+ cursor.Replace(NewIntLiteral("1"))
+ }
+ }, nil)
+
+ assert.Equal(t, "a = b", String(expr))
+ assert.Equal(t, "1 = b", String(out))
+}
+
+func TestCopyOnRewriteDeeper(t *testing.T) {
+ // rewrite an expression without changing the original. the changed happens deep in the syntax tree,
+ // here we are testing that all ancestors up to the root are cloned correctly
+ expr, err := ParseExpr("a + b * c = 12")
+ require.NoError(t, err)
+ var path []string
+ out := CopyOnRewrite(expr, nil, func(cursor *CopyOnWriteCursor) {
+ col, ok := cursor.Node().(*ColName)
+ if !ok {
+ return
+ }
+ if col.Name.EqualString("c") {
+ cursor.Replace(NewIntLiteral("1"))
+ }
+ }, func(before, _ SQLNode) {
+ path = append(path, String(before))
+ })
+
+ assert.Equal(t, "a + b * c = 12", String(expr))
+ assert.Equal(t, "a + b * 1 = 12", String(out))
+
+ expected := []string{ // this are all the nodes that we need to clone when changing the `c` node
+ "c",
+ "b * c",
+ "a + b * c",
+ "a + b * c = 12",
+ }
+ assert.Equal(t, expected, path)
+}
+
+func TestDontCopyWithoutRewrite(t *testing.T) {
+ // when no rewriting happens, we want the original back
+ expr, err := ParseExpr("a = b")
+ require.NoError(t, err)
+ out := CopyOnRewrite(expr, nil, func(cursor *CopyOnWriteCursor) {}, nil)
+
+ assert.Same(t, expr, out)
+}
+
+func TestStopTreeWalk(t *testing.T) {
+ // stop walking down part of the AST
+ original := "a = b + c"
+ expr, err := ParseExpr(original)
+ require.NoError(t, err)
+ out := CopyOnRewrite(expr, func(node, parent SQLNode) bool {
+ _, ok := node.(*BinaryExpr)
+ return !ok
+ }, func(cursor *CopyOnWriteCursor) {
+ col, ok := cursor.Node().(*ColName)
+ if !ok {
+ return
+ }
+
+ cursor.Replace(NewStrLiteral(col.Name.String()))
+ }, nil)
+
+ assert.Equal(t, original, String(expr))
+ assert.Equal(t, "'a' = b + c", String(out)) // b + c are unchanged since they are under the + (*BinaryExpr)
+}
+
+func TestStopTreeWalkButStillVisit(t *testing.T) {
+ // here we are asserting that even when we stop at the binary expression, we still visit it in the post visitor
+ original := "1337 = b + c"
+ expr, err := ParseExpr(original)
+ require.NoError(t, err)
+ out := CopyOnRewrite(expr, func(node, parent SQLNode) bool {
+ _, ok := node.(*BinaryExpr)
+ return !ok
+ }, func(cursor *CopyOnWriteCursor) {
+ switch cursor.Node().(type) {
+ case *BinaryExpr:
+ cursor.Replace(NewStrLiteral("johnny was here"))
+ case *ColName:
+ t.Errorf("should not visit ColName in the post")
+ }
+ }, nil)
+
+ assert.Equal(t, original, String(expr))
+ assert.Equal(t, "1337 = 'johnny was here'", String(out)) // b + c are replaced
+}
diff --git a/go/vt/sqlparser/ast_equals.go b/go/vt/sqlparser/ast_equals.go
index c4631140588..31ec8b8b10f 100644
--- a/go/vt/sqlparser/ast_equals.go
+++ b/go/vt/sqlparser/ast_equals.go
@@ -1,5 +1,5 @@
/*
-Copyright 2021 The Vitess Authors.
+Copyright 2023 The Vitess Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -17,8 +17,8 @@ limitations under the License.
package sqlparser
-// EqualsSQLNode does deep equals between the two objects.
-func EqualsSQLNode(inA, inB SQLNode) bool {
+// SQLNode does deep equals between the two objects.
+func (cmp *Comparator) SQLNode(inA, inB SQLNode) bool {
if inA == nil && inB == nil {
return true
}
@@ -26,30 +26,24 @@ func EqualsSQLNode(inA, inB SQLNode) bool {
return false
}
switch a := inA.(type) {
- case AccessMode:
- b, ok := inB.(AccessMode)
- if !ok {
- return false
- }
- return a == b
case *AddColumns:
b, ok := inB.(*AddColumns)
if !ok {
return false
}
- return EqualsRefOfAddColumns(a, b)
+ return cmp.RefOfAddColumns(a, b)
case *AddConstraintDefinition:
b, ok := inB.(*AddConstraintDefinition)
if !ok {
return false
}
- return EqualsRefOfAddConstraintDefinition(a, b)
+ return cmp.RefOfAddConstraintDefinition(a, b)
case *AddIndexDefinition:
b, ok := inB.(*AddIndexDefinition)
if !ok {
return false
}
- return EqualsRefOfAddIndexDefinition(a, b)
+ return cmp.RefOfAddIndexDefinition(a, b)
case AlgorithmValue:
b, ok := inB.(AlgorithmValue)
if !ok {
@@ -61,73 +55,73 @@ func EqualsSQLNode(inA, inB SQLNode) bool {
if !ok {
return false
}
- return EqualsRefOfAliasedExpr(a, b)
+ return cmp.RefOfAliasedExpr(a, b)
case *AliasedTableExpr:
b, ok := inB.(*AliasedTableExpr)
if !ok {
return false
}
- return EqualsRefOfAliasedTableExpr(a, b)
+ return cmp.RefOfAliasedTableExpr(a, b)
case *AlterCharset:
b, ok := inB.(*AlterCharset)
if !ok {
return false
}
- return EqualsRefOfAlterCharset(a, b)
+ return cmp.RefOfAlterCharset(a, b)
case *AlterCheck:
b, ok := inB.(*AlterCheck)
if !ok {
return false
}
- return EqualsRefOfAlterCheck(a, b)
+ return cmp.RefOfAlterCheck(a, b)
case *AlterColumn:
b, ok := inB.(*AlterColumn)
if !ok {
return false
}
- return EqualsRefOfAlterColumn(a, b)
+ return cmp.RefOfAlterColumn(a, b)
case *AlterDatabase:
b, ok := inB.(*AlterDatabase)
if !ok {
return false
}
- return EqualsRefOfAlterDatabase(a, b)
+ return cmp.RefOfAlterDatabase(a, b)
case *AlterIndex:
b, ok := inB.(*AlterIndex)
if !ok {
return false
}
- return EqualsRefOfAlterIndex(a, b)
+ return cmp.RefOfAlterIndex(a, b)
case *AlterMigration:
b, ok := inB.(*AlterMigration)
if !ok {
return false
}
- return EqualsRefOfAlterMigration(a, b)
+ return cmp.RefOfAlterMigration(a, b)
case *AlterTable:
b, ok := inB.(*AlterTable)
if !ok {
return false
}
- return EqualsRefOfAlterTable(a, b)
+ return cmp.RefOfAlterTable(a, b)
case *AlterView:
b, ok := inB.(*AlterView)
if !ok {
return false
}
- return EqualsRefOfAlterView(a, b)
+ return cmp.RefOfAlterView(a, b)
case *AlterVschema:
b, ok := inB.(*AlterVschema)
if !ok {
return false
}
- return EqualsRefOfAlterVschema(a, b)
+ return cmp.RefOfAlterVschema(a, b)
case *AndExpr:
b, ok := inB.(*AndExpr)
if !ok {
return false
}
- return EqualsRefOfAndExpr(a, b)
+ return cmp.RefOfAndExpr(a, b)
case Argument:
b, ok := inB.(Argument)
if !ok {
@@ -139,55 +133,55 @@ func EqualsSQLNode(inA, inB SQLNode) bool {
if !ok {
return false
}
- return EqualsRefOfArgumentLessWindowExpr(a, b)
+ return cmp.RefOfArgumentLessWindowExpr(a, b)
case *AutoIncSpec:
b, ok := inB.(*AutoIncSpec)
if !ok {
return false
}
- return EqualsRefOfAutoIncSpec(a, b)
+ return cmp.RefOfAutoIncSpec(a, b)
case *Avg:
b, ok := inB.(*Avg)
if !ok {
return false
}
- return EqualsRefOfAvg(a, b)
+ return cmp.RefOfAvg(a, b)
case *Begin:
b, ok := inB.(*Begin)
if !ok {
return false
}
- return EqualsRefOfBegin(a, b)
+ return cmp.RefOfBegin(a, b)
case *BetweenExpr:
b, ok := inB.(*BetweenExpr)
if !ok {
return false
}
- return EqualsRefOfBetweenExpr(a, b)
+ return cmp.RefOfBetweenExpr(a, b)
case *BinaryExpr:
b, ok := inB.(*BinaryExpr)
if !ok {
return false
}
- return EqualsRefOfBinaryExpr(a, b)
+ return cmp.RefOfBinaryExpr(a, b)
case *BitAnd:
b, ok := inB.(*BitAnd)
if !ok {
return false
}
- return EqualsRefOfBitAnd(a, b)
+ return cmp.RefOfBitAnd(a, b)
case *BitOr:
b, ok := inB.(*BitOr)
if !ok {
return false
}
- return EqualsRefOfBitOr(a, b)
+ return cmp.RefOfBitOr(a, b)
case *BitXor:
b, ok := inB.(*BitXor)
if !ok {
return false
}
- return EqualsRefOfBitXor(a, b)
+ return cmp.RefOfBitXor(a, b)
case BoolVal:
b, ok := inB.(BoolVal)
if !ok {
@@ -199,577 +193,571 @@ func EqualsSQLNode(inA, inB SQLNode) bool {
if !ok {
return false
}
- return EqualsRefOfCallProc(a, b)
+ return cmp.RefOfCallProc(a, b)
case *CaseExpr:
b, ok := inB.(*CaseExpr)
if !ok {
return false
}
- return EqualsRefOfCaseExpr(a, b)
+ return cmp.RefOfCaseExpr(a, b)
case *CastExpr:
b, ok := inB.(*CastExpr)
if !ok {
return false
}
- return EqualsRefOfCastExpr(a, b)
+ return cmp.RefOfCastExpr(a, b)
case *ChangeColumn:
b, ok := inB.(*ChangeColumn)
if !ok {
return false
}
- return EqualsRefOfChangeColumn(a, b)
+ return cmp.RefOfChangeColumn(a, b)
case *CharExpr:
b, ok := inB.(*CharExpr)
if !ok {
return false
}
- return EqualsRefOfCharExpr(a, b)
+ return cmp.RefOfCharExpr(a, b)
case *CheckConstraintDefinition:
b, ok := inB.(*CheckConstraintDefinition)
if !ok {
return false
}
- return EqualsRefOfCheckConstraintDefinition(a, b)
+ return cmp.RefOfCheckConstraintDefinition(a, b)
case *ColName:
b, ok := inB.(*ColName)
if !ok {
return false
}
- return EqualsRefOfColName(a, b)
+ return cmp.RefOfColName(a, b)
case *CollateExpr:
b, ok := inB.(*CollateExpr)
if !ok {
return false
}
- return EqualsRefOfCollateExpr(a, b)
+ return cmp.RefOfCollateExpr(a, b)
case *ColumnDefinition:
b, ok := inB.(*ColumnDefinition)
if !ok {
return false
}
- return EqualsRefOfColumnDefinition(a, b)
+ return cmp.RefOfColumnDefinition(a, b)
case *ColumnType:
b, ok := inB.(*ColumnType)
if !ok {
return false
}
- return EqualsRefOfColumnType(a, b)
+ return cmp.RefOfColumnType(a, b)
case Columns:
b, ok := inB.(Columns)
if !ok {
return false
}
- return EqualsColumns(a, b)
+ return cmp.Columns(a, b)
case *CommentOnly:
b, ok := inB.(*CommentOnly)
if !ok {
return false
}
- return EqualsRefOfCommentOnly(a, b)
+ return cmp.RefOfCommentOnly(a, b)
case *Commit:
b, ok := inB.(*Commit)
if !ok {
return false
}
- return EqualsRefOfCommit(a, b)
+ return cmp.RefOfCommit(a, b)
case *CommonTableExpr:
b, ok := inB.(*CommonTableExpr)
if !ok {
return false
}
- return EqualsRefOfCommonTableExpr(a, b)
+ return cmp.RefOfCommonTableExpr(a, b)
case *ComparisonExpr:
b, ok := inB.(*ComparisonExpr)
if !ok {
return false
}
- return EqualsRefOfComparisonExpr(a, b)
+ return cmp.RefOfComparisonExpr(a, b)
case *ConstraintDefinition:
b, ok := inB.(*ConstraintDefinition)
if !ok {
return false
}
- return EqualsRefOfConstraintDefinition(a, b)
+ return cmp.RefOfConstraintDefinition(a, b)
case *ConvertExpr:
b, ok := inB.(*ConvertExpr)
if !ok {
return false
}
- return EqualsRefOfConvertExpr(a, b)
+ return cmp.RefOfConvertExpr(a, b)
case *ConvertType:
b, ok := inB.(*ConvertType)
if !ok {
return false
}
- return EqualsRefOfConvertType(a, b)
+ return cmp.RefOfConvertType(a, b)
case *ConvertUsingExpr:
b, ok := inB.(*ConvertUsingExpr)
if !ok {
return false
}
- return EqualsRefOfConvertUsingExpr(a, b)
+ return cmp.RefOfConvertUsingExpr(a, b)
case *Count:
b, ok := inB.(*Count)
if !ok {
return false
}
- return EqualsRefOfCount(a, b)
+ return cmp.RefOfCount(a, b)
case *CountStar:
b, ok := inB.(*CountStar)
if !ok {
return false
}
- return EqualsRefOfCountStar(a, b)
+ return cmp.RefOfCountStar(a, b)
case *CreateDatabase:
b, ok := inB.(*CreateDatabase)
if !ok {
return false
}
- return EqualsRefOfCreateDatabase(a, b)
+ return cmp.RefOfCreateDatabase(a, b)
case *CreateTable:
b, ok := inB.(*CreateTable)
if !ok {
return false
}
- return EqualsRefOfCreateTable(a, b)
+ return cmp.RefOfCreateTable(a, b)
case *CreateView:
b, ok := inB.(*CreateView)
if !ok {
return false
}
- return EqualsRefOfCreateView(a, b)
+ return cmp.RefOfCreateView(a, b)
case *CurTimeFuncExpr:
b, ok := inB.(*CurTimeFuncExpr)
if !ok {
return false
}
- return EqualsRefOfCurTimeFuncExpr(a, b)
+ return cmp.RefOfCurTimeFuncExpr(a, b)
case *DeallocateStmt:
b, ok := inB.(*DeallocateStmt)
if !ok {
return false
}
- return EqualsRefOfDeallocateStmt(a, b)
+ return cmp.RefOfDeallocateStmt(a, b)
case *Default:
b, ok := inB.(*Default)
if !ok {
return false
}
- return EqualsRefOfDefault(a, b)
+ return cmp.RefOfDefault(a, b)
case *Definer:
b, ok := inB.(*Definer)
if !ok {
return false
}
- return EqualsRefOfDefiner(a, b)
+ return cmp.RefOfDefiner(a, b)
case *Delete:
b, ok := inB.(*Delete)
if !ok {
return false
}
- return EqualsRefOfDelete(a, b)
+ return cmp.RefOfDelete(a, b)
case *DerivedTable:
b, ok := inB.(*DerivedTable)
if !ok {
return false
}
- return EqualsRefOfDerivedTable(a, b)
+ return cmp.RefOfDerivedTable(a, b)
case *DropColumn:
b, ok := inB.(*DropColumn)
if !ok {
return false
}
- return EqualsRefOfDropColumn(a, b)
+ return cmp.RefOfDropColumn(a, b)
case *DropDatabase:
b, ok := inB.(*DropDatabase)
if !ok {
return false
}
- return EqualsRefOfDropDatabase(a, b)
+ return cmp.RefOfDropDatabase(a, b)
case *DropKey:
b, ok := inB.(*DropKey)
if !ok {
return false
}
- return EqualsRefOfDropKey(a, b)
+ return cmp.RefOfDropKey(a, b)
case *DropTable:
b, ok := inB.(*DropTable)
if !ok {
return false
}
- return EqualsRefOfDropTable(a, b)
+ return cmp.RefOfDropTable(a, b)
case *DropView:
b, ok := inB.(*DropView)
if !ok {
return false
}
- return EqualsRefOfDropView(a, b)
+ return cmp.RefOfDropView(a, b)
case *ExecuteStmt:
b, ok := inB.(*ExecuteStmt)
if !ok {
return false
}
- return EqualsRefOfExecuteStmt(a, b)
+ return cmp.RefOfExecuteStmt(a, b)
case *ExistsExpr:
b, ok := inB.(*ExistsExpr)
if !ok {
return false
}
- return EqualsRefOfExistsExpr(a, b)
+ return cmp.RefOfExistsExpr(a, b)
case *ExplainStmt:
b, ok := inB.(*ExplainStmt)
if !ok {
return false
}
- return EqualsRefOfExplainStmt(a, b)
+ return cmp.RefOfExplainStmt(a, b)
case *ExplainTab:
b, ok := inB.(*ExplainTab)
if !ok {
return false
}
- return EqualsRefOfExplainTab(a, b)
+ return cmp.RefOfExplainTab(a, b)
case Exprs:
b, ok := inB.(Exprs)
if !ok {
return false
}
- return EqualsExprs(a, b)
+ return cmp.Exprs(a, b)
case *ExtractFuncExpr:
b, ok := inB.(*ExtractFuncExpr)
if !ok {
return false
}
- return EqualsRefOfExtractFuncExpr(a, b)
+ return cmp.RefOfExtractFuncExpr(a, b)
case *ExtractValueExpr:
b, ok := inB.(*ExtractValueExpr)
if !ok {
return false
}
- return EqualsRefOfExtractValueExpr(a, b)
+ return cmp.RefOfExtractValueExpr(a, b)
case *ExtractedSubquery:
b, ok := inB.(*ExtractedSubquery)
if !ok {
return false
}
- return EqualsRefOfExtractedSubquery(a, b)
+ return cmp.RefOfExtractedSubquery(a, b)
case *FirstOrLastValueExpr:
b, ok := inB.(*FirstOrLastValueExpr)
if !ok {
return false
}
- return EqualsRefOfFirstOrLastValueExpr(a, b)
+ return cmp.RefOfFirstOrLastValueExpr(a, b)
case *Flush:
b, ok := inB.(*Flush)
if !ok {
return false
}
- return EqualsRefOfFlush(a, b)
+ return cmp.RefOfFlush(a, b)
case *Force:
b, ok := inB.(*Force)
if !ok {
return false
}
- return EqualsRefOfForce(a, b)
+ return cmp.RefOfForce(a, b)
case *ForeignKeyDefinition:
b, ok := inB.(*ForeignKeyDefinition)
if !ok {
return false
}
- return EqualsRefOfForeignKeyDefinition(a, b)
+ return cmp.RefOfForeignKeyDefinition(a, b)
case *FrameClause:
b, ok := inB.(*FrameClause)
if !ok {
return false
}
- return EqualsRefOfFrameClause(a, b)
+ return cmp.RefOfFrameClause(a, b)
case *FramePoint:
b, ok := inB.(*FramePoint)
if !ok {
return false
}
- return EqualsRefOfFramePoint(a, b)
+ return cmp.RefOfFramePoint(a, b)
case *FromFirstLastClause:
b, ok := inB.(*FromFirstLastClause)
if !ok {
return false
}
- return EqualsRefOfFromFirstLastClause(a, b)
+ return cmp.RefOfFromFirstLastClause(a, b)
case *FuncExpr:
b, ok := inB.(*FuncExpr)
if !ok {
return false
}
- return EqualsRefOfFuncExpr(a, b)
+ return cmp.RefOfFuncExpr(a, b)
case *GTIDFuncExpr:
b, ok := inB.(*GTIDFuncExpr)
if !ok {
return false
}
- return EqualsRefOfGTIDFuncExpr(a, b)
+ return cmp.RefOfGTIDFuncExpr(a, b)
case GroupBy:
b, ok := inB.(GroupBy)
if !ok {
return false
}
- return EqualsGroupBy(a, b)
+ return cmp.GroupBy(a, b)
case *GroupConcatExpr:
b, ok := inB.(*GroupConcatExpr)
if !ok {
return false
}
- return EqualsRefOfGroupConcatExpr(a, b)
+ return cmp.RefOfGroupConcatExpr(a, b)
case IdentifierCI:
b, ok := inB.(IdentifierCI)
if !ok {
return false
}
- return EqualsIdentifierCI(a, b)
+ return cmp.IdentifierCI(a, b)
case IdentifierCS:
b, ok := inB.(IdentifierCS)
if !ok {
return false
}
- return EqualsIdentifierCS(a, b)
+ return cmp.IdentifierCS(a, b)
case *IndexDefinition:
b, ok := inB.(*IndexDefinition)
if !ok {
return false
}
- return EqualsRefOfIndexDefinition(a, b)
+ return cmp.RefOfIndexDefinition(a, b)
case *IndexHint:
b, ok := inB.(*IndexHint)
if !ok {
return false
}
- return EqualsRefOfIndexHint(a, b)
+ return cmp.RefOfIndexHint(a, b)
case IndexHints:
b, ok := inB.(IndexHints)
if !ok {
return false
}
- return EqualsIndexHints(a, b)
+ return cmp.IndexHints(a, b)
case *IndexInfo:
b, ok := inB.(*IndexInfo)
if !ok {
return false
}
- return EqualsRefOfIndexInfo(a, b)
+ return cmp.RefOfIndexInfo(a, b)
case *Insert:
b, ok := inB.(*Insert)
if !ok {
return false
}
- return EqualsRefOfInsert(a, b)
+ return cmp.RefOfInsert(a, b)
case *InsertExpr:
b, ok := inB.(*InsertExpr)
if !ok {
return false
}
- return EqualsRefOfInsertExpr(a, b)
+ return cmp.RefOfInsertExpr(a, b)
case *IntervalExpr:
b, ok := inB.(*IntervalExpr)
if !ok {
return false
}
- return EqualsRefOfIntervalExpr(a, b)
+ return cmp.RefOfIntervalExpr(a, b)
case *IntervalFuncExpr:
b, ok := inB.(*IntervalFuncExpr)
if !ok {
return false
}
- return EqualsRefOfIntervalFuncExpr(a, b)
+ return cmp.RefOfIntervalFuncExpr(a, b)
case *IntroducerExpr:
b, ok := inB.(*IntroducerExpr)
if !ok {
return false
}
- return EqualsRefOfIntroducerExpr(a, b)
+ return cmp.RefOfIntroducerExpr(a, b)
case *IsExpr:
b, ok := inB.(*IsExpr)
if !ok {
return false
}
- return EqualsRefOfIsExpr(a, b)
- case IsolationLevel:
- b, ok := inB.(IsolationLevel)
- if !ok {
- return false
- }
- return a == b
+ return cmp.RefOfIsExpr(a, b)
case *JSONArrayExpr:
b, ok := inB.(*JSONArrayExpr)
if !ok {
return false
}
- return EqualsRefOfJSONArrayExpr(a, b)
+ return cmp.RefOfJSONArrayExpr(a, b)
case *JSONAttributesExpr:
b, ok := inB.(*JSONAttributesExpr)
if !ok {
return false
}
- return EqualsRefOfJSONAttributesExpr(a, b)
+ return cmp.RefOfJSONAttributesExpr(a, b)
case *JSONContainsExpr:
b, ok := inB.(*JSONContainsExpr)
if !ok {
return false
}
- return EqualsRefOfJSONContainsExpr(a, b)
+ return cmp.RefOfJSONContainsExpr(a, b)
case *JSONContainsPathExpr:
b, ok := inB.(*JSONContainsPathExpr)
if !ok {
return false
}
- return EqualsRefOfJSONContainsPathExpr(a, b)
+ return cmp.RefOfJSONContainsPathExpr(a, b)
case *JSONExtractExpr:
b, ok := inB.(*JSONExtractExpr)
if !ok {
return false
}
- return EqualsRefOfJSONExtractExpr(a, b)
+ return cmp.RefOfJSONExtractExpr(a, b)
case *JSONKeysExpr:
b, ok := inB.(*JSONKeysExpr)
if !ok {
return false
}
- return EqualsRefOfJSONKeysExpr(a, b)
+ return cmp.RefOfJSONKeysExpr(a, b)
case *JSONObjectExpr:
b, ok := inB.(*JSONObjectExpr)
if !ok {
return false
}
- return EqualsRefOfJSONObjectExpr(a, b)
- case JSONObjectParam:
- b, ok := inB.(JSONObjectParam)
+ return cmp.RefOfJSONObjectExpr(a, b)
+ case *JSONObjectParam:
+ b, ok := inB.(*JSONObjectParam)
if !ok {
return false
}
- return EqualsJSONObjectParam(a, b)
+ return cmp.RefOfJSONObjectParam(a, b)
case *JSONOverlapsExpr:
b, ok := inB.(*JSONOverlapsExpr)
if !ok {
return false
}
- return EqualsRefOfJSONOverlapsExpr(a, b)
+ return cmp.RefOfJSONOverlapsExpr(a, b)
case *JSONPrettyExpr:
b, ok := inB.(*JSONPrettyExpr)
if !ok {
return false
}
- return EqualsRefOfJSONPrettyExpr(a, b)
+ return cmp.RefOfJSONPrettyExpr(a, b)
case *JSONQuoteExpr:
b, ok := inB.(*JSONQuoteExpr)
if !ok {
return false
}
- return EqualsRefOfJSONQuoteExpr(a, b)
+ return cmp.RefOfJSONQuoteExpr(a, b)
case *JSONRemoveExpr:
b, ok := inB.(*JSONRemoveExpr)
if !ok {
return false
}
- return EqualsRefOfJSONRemoveExpr(a, b)
+ return cmp.RefOfJSONRemoveExpr(a, b)
case *JSONSchemaValidFuncExpr:
b, ok := inB.(*JSONSchemaValidFuncExpr)
if !ok {
return false
}
- return EqualsRefOfJSONSchemaValidFuncExpr(a, b)
+ return cmp.RefOfJSONSchemaValidFuncExpr(a, b)
case *JSONSchemaValidationReportFuncExpr:
b, ok := inB.(*JSONSchemaValidationReportFuncExpr)
if !ok {
return false
}
- return EqualsRefOfJSONSchemaValidationReportFuncExpr(a, b)
+ return cmp.RefOfJSONSchemaValidationReportFuncExpr(a, b)
case *JSONSearchExpr:
b, ok := inB.(*JSONSearchExpr)
if !ok {
return false
}
- return EqualsRefOfJSONSearchExpr(a, b)
+ return cmp.RefOfJSONSearchExpr(a, b)
case *JSONStorageFreeExpr:
b, ok := inB.(*JSONStorageFreeExpr)
if !ok {
return false
}
- return EqualsRefOfJSONStorageFreeExpr(a, b)
+ return cmp.RefOfJSONStorageFreeExpr(a, b)
case *JSONStorageSizeExpr:
b, ok := inB.(*JSONStorageSizeExpr)
if !ok {
return false
}
- return EqualsRefOfJSONStorageSizeExpr(a, b)
+ return cmp.RefOfJSONStorageSizeExpr(a, b)
case *JSONTableExpr:
b, ok := inB.(*JSONTableExpr)
if !ok {
return false
}
- return EqualsRefOfJSONTableExpr(a, b)
+ return cmp.RefOfJSONTableExpr(a, b)
case *JSONUnquoteExpr:
b, ok := inB.(*JSONUnquoteExpr)
if !ok {
return false
}
- return EqualsRefOfJSONUnquoteExpr(a, b)
+ return cmp.RefOfJSONUnquoteExpr(a, b)
case *JSONValueExpr:
b, ok := inB.(*JSONValueExpr)
if !ok {
return false
}
- return EqualsRefOfJSONValueExpr(a, b)
+ return cmp.RefOfJSONValueExpr(a, b)
case *JSONValueMergeExpr:
b, ok := inB.(*JSONValueMergeExpr)
if !ok {
return false
}
- return EqualsRefOfJSONValueMergeExpr(a, b)
+ return cmp.RefOfJSONValueMergeExpr(a, b)
case *JSONValueModifierExpr:
b, ok := inB.(*JSONValueModifierExpr)
if !ok {
return false
}
- return EqualsRefOfJSONValueModifierExpr(a, b)
+ return cmp.RefOfJSONValueModifierExpr(a, b)
case *JoinCondition:
b, ok := inB.(*JoinCondition)
if !ok {
return false
}
- return EqualsRefOfJoinCondition(a, b)
+ return cmp.RefOfJoinCondition(a, b)
case *JoinTableExpr:
b, ok := inB.(*JoinTableExpr)
if !ok {
return false
}
- return EqualsRefOfJoinTableExpr(a, b)
+ return cmp.RefOfJoinTableExpr(a, b)
case *JtColumnDefinition:
b, ok := inB.(*JtColumnDefinition)
if !ok {
return false
}
- return EqualsRefOfJtColumnDefinition(a, b)
+ return cmp.RefOfJtColumnDefinition(a, b)
case *JtOnResponse:
b, ok := inB.(*JtOnResponse)
if !ok {
return false
}
- return EqualsRefOfJtOnResponse(a, b)
+ return cmp.RefOfJtOnResponse(a, b)
case *KeyState:
b, ok := inB.(*KeyState)
if !ok {
return false
}
- return EqualsRefOfKeyState(a, b)
+ return cmp.RefOfKeyState(a, b)
case *LagLeadExpr:
b, ok := inB.(*LagLeadExpr)
if !ok {
return false
}
- return EqualsRefOfLagLeadExpr(a, b)
+ return cmp.RefOfLagLeadExpr(a, b)
case *Limit:
b, ok := inB.(*Limit)
if !ok {
return false
}
- return EqualsRefOfLimit(a, b)
+ return cmp.RefOfLimit(a, b)
case ListArg:
b, ok := inB.(ListArg)
if !ok {
@@ -781,37 +769,37 @@ func EqualsSQLNode(inA, inB SQLNode) bool {
if !ok {
return false
}
- return EqualsRefOfLiteral(a, b)
+ return cmp.RefOfLiteral(a, b)
case *Load:
b, ok := inB.(*Load)
if !ok {
return false
}
- return EqualsRefOfLoad(a, b)
+ return cmp.RefOfLoad(a, b)
case *LocateExpr:
b, ok := inB.(*LocateExpr)
if !ok {
return false
}
- return EqualsRefOfLocateExpr(a, b)
+ return cmp.RefOfLocateExpr(a, b)
case *LockOption:
b, ok := inB.(*LockOption)
if !ok {
return false
}
- return EqualsRefOfLockOption(a, b)
+ return cmp.RefOfLockOption(a, b)
case *LockTables:
b, ok := inB.(*LockTables)
if !ok {
return false
}
- return EqualsRefOfLockTables(a, b)
+ return cmp.RefOfLockTables(a, b)
case *LockingFunc:
b, ok := inB.(*LockingFunc)
if !ok {
return false
}
- return EqualsRefOfLockingFunc(a, b)
+ return cmp.RefOfLockingFunc(a, b)
case MatchAction:
b, ok := inB.(MatchAction)
if !ok {
@@ -823,205 +811,205 @@ func EqualsSQLNode(inA, inB SQLNode) bool {
if !ok {
return false
}
- return EqualsRefOfMatchExpr(a, b)
+ return cmp.RefOfMatchExpr(a, b)
case *Max:
b, ok := inB.(*Max)
if !ok {
return false
}
- return EqualsRefOfMax(a, b)
+ return cmp.RefOfMax(a, b)
case *MemberOfExpr:
b, ok := inB.(*MemberOfExpr)
if !ok {
return false
}
- return EqualsRefOfMemberOfExpr(a, b)
+ return cmp.RefOfMemberOfExpr(a, b)
case *Min:
b, ok := inB.(*Min)
if !ok {
return false
}
- return EqualsRefOfMin(a, b)
+ return cmp.RefOfMin(a, b)
case *ModifyColumn:
b, ok := inB.(*ModifyColumn)
if !ok {
return false
}
- return EqualsRefOfModifyColumn(a, b)
+ return cmp.RefOfModifyColumn(a, b)
case *NTHValueExpr:
b, ok := inB.(*NTHValueExpr)
if !ok {
return false
}
- return EqualsRefOfNTHValueExpr(a, b)
+ return cmp.RefOfNTHValueExpr(a, b)
case *NamedWindow:
b, ok := inB.(*NamedWindow)
if !ok {
return false
}
- return EqualsRefOfNamedWindow(a, b)
+ return cmp.RefOfNamedWindow(a, b)
case NamedWindows:
b, ok := inB.(NamedWindows)
if !ok {
return false
}
- return EqualsNamedWindows(a, b)
+ return cmp.NamedWindows(a, b)
case *Nextval:
b, ok := inB.(*Nextval)
if !ok {
return false
}
- return EqualsRefOfNextval(a, b)
+ return cmp.RefOfNextval(a, b)
case *NotExpr:
b, ok := inB.(*NotExpr)
if !ok {
return false
}
- return EqualsRefOfNotExpr(a, b)
+ return cmp.RefOfNotExpr(a, b)
case *NtileExpr:
b, ok := inB.(*NtileExpr)
if !ok {
return false
}
- return EqualsRefOfNtileExpr(a, b)
+ return cmp.RefOfNtileExpr(a, b)
case *NullTreatmentClause:
b, ok := inB.(*NullTreatmentClause)
if !ok {
return false
}
- return EqualsRefOfNullTreatmentClause(a, b)
+ return cmp.RefOfNullTreatmentClause(a, b)
case *NullVal:
b, ok := inB.(*NullVal)
if !ok {
return false
}
- return EqualsRefOfNullVal(a, b)
+ return cmp.RefOfNullVal(a, b)
case *Offset:
b, ok := inB.(*Offset)
if !ok {
return false
}
- return EqualsRefOfOffset(a, b)
+ return cmp.RefOfOffset(a, b)
case OnDup:
b, ok := inB.(OnDup)
if !ok {
return false
}
- return EqualsOnDup(a, b)
+ return cmp.OnDup(a, b)
case *OptLike:
b, ok := inB.(*OptLike)
if !ok {
return false
}
- return EqualsRefOfOptLike(a, b)
+ return cmp.RefOfOptLike(a, b)
case *OrExpr:
b, ok := inB.(*OrExpr)
if !ok {
return false
}
- return EqualsRefOfOrExpr(a, b)
+ return cmp.RefOfOrExpr(a, b)
case *Order:
b, ok := inB.(*Order)
if !ok {
return false
}
- return EqualsRefOfOrder(a, b)
+ return cmp.RefOfOrder(a, b)
case OrderBy:
b, ok := inB.(OrderBy)
if !ok {
return false
}
- return EqualsOrderBy(a, b)
+ return cmp.OrderBy(a, b)
case *OrderByOption:
b, ok := inB.(*OrderByOption)
if !ok {
return false
}
- return EqualsRefOfOrderByOption(a, b)
+ return cmp.RefOfOrderByOption(a, b)
case *OtherAdmin:
b, ok := inB.(*OtherAdmin)
if !ok {
return false
}
- return EqualsRefOfOtherAdmin(a, b)
+ return cmp.RefOfOtherAdmin(a, b)
case *OtherRead:
b, ok := inB.(*OtherRead)
if !ok {
return false
}
- return EqualsRefOfOtherRead(a, b)
+ return cmp.RefOfOtherRead(a, b)
case *OverClause:
b, ok := inB.(*OverClause)
if !ok {
return false
}
- return EqualsRefOfOverClause(a, b)
+ return cmp.RefOfOverClause(a, b)
case *ParenTableExpr:
b, ok := inB.(*ParenTableExpr)
if !ok {
return false
}
- return EqualsRefOfParenTableExpr(a, b)
+ return cmp.RefOfParenTableExpr(a, b)
case *ParsedComments:
b, ok := inB.(*ParsedComments)
if !ok {
return false
}
- return EqualsRefOfParsedComments(a, b)
+ return cmp.RefOfParsedComments(a, b)
case *PartitionDefinition:
b, ok := inB.(*PartitionDefinition)
if !ok {
return false
}
- return EqualsRefOfPartitionDefinition(a, b)
+ return cmp.RefOfPartitionDefinition(a, b)
case *PartitionDefinitionOptions:
b, ok := inB.(*PartitionDefinitionOptions)
if !ok {
return false
}
- return EqualsRefOfPartitionDefinitionOptions(a, b)
+ return cmp.RefOfPartitionDefinitionOptions(a, b)
case *PartitionEngine:
b, ok := inB.(*PartitionEngine)
if !ok {
return false
}
- return EqualsRefOfPartitionEngine(a, b)
+ return cmp.RefOfPartitionEngine(a, b)
case *PartitionOption:
b, ok := inB.(*PartitionOption)
if !ok {
return false
}
- return EqualsRefOfPartitionOption(a, b)
+ return cmp.RefOfPartitionOption(a, b)
case *PartitionSpec:
b, ok := inB.(*PartitionSpec)
if !ok {
return false
}
- return EqualsRefOfPartitionSpec(a, b)
+ return cmp.RefOfPartitionSpec(a, b)
case *PartitionValueRange:
b, ok := inB.(*PartitionValueRange)
if !ok {
return false
}
- return EqualsRefOfPartitionValueRange(a, b)
+ return cmp.RefOfPartitionValueRange(a, b)
case Partitions:
b, ok := inB.(Partitions)
if !ok {
return false
}
- return EqualsPartitions(a, b)
+ return cmp.Partitions(a, b)
case *PerformanceSchemaFuncExpr:
b, ok := inB.(*PerformanceSchemaFuncExpr)
if !ok {
return false
}
- return EqualsRefOfPerformanceSchemaFuncExpr(a, b)
+ return cmp.RefOfPerformanceSchemaFuncExpr(a, b)
case *PrepareStmt:
b, ok := inB.(*PrepareStmt)
if !ok {
return false
}
- return EqualsRefOfPrepareStmt(a, b)
+ return cmp.RefOfPrepareStmt(a, b)
case ReferenceAction:
b, ok := inB.(ReferenceAction)
if !ok {
@@ -1033,477 +1021,483 @@ func EqualsSQLNode(inA, inB SQLNode) bool {
if !ok {
return false
}
- return EqualsRefOfReferenceDefinition(a, b)
+ return cmp.RefOfReferenceDefinition(a, b)
case *RegexpInstrExpr:
b, ok := inB.(*RegexpInstrExpr)
if !ok {
return false
}
- return EqualsRefOfRegexpInstrExpr(a, b)
+ return cmp.RefOfRegexpInstrExpr(a, b)
case *RegexpLikeExpr:
b, ok := inB.(*RegexpLikeExpr)
if !ok {
return false
}
- return EqualsRefOfRegexpLikeExpr(a, b)
+ return cmp.RefOfRegexpLikeExpr(a, b)
case *RegexpReplaceExpr:
b, ok := inB.(*RegexpReplaceExpr)
if !ok {
return false
}
- return EqualsRefOfRegexpReplaceExpr(a, b)
+ return cmp.RefOfRegexpReplaceExpr(a, b)
case *RegexpSubstrExpr:
b, ok := inB.(*RegexpSubstrExpr)
if !ok {
return false
}
- return EqualsRefOfRegexpSubstrExpr(a, b)
+ return cmp.RefOfRegexpSubstrExpr(a, b)
case *Release:
b, ok := inB.(*Release)
if !ok {
return false
}
- return EqualsRefOfRelease(a, b)
+ return cmp.RefOfRelease(a, b)
case *RenameColumn:
b, ok := inB.(*RenameColumn)
if !ok {
return false
}
- return EqualsRefOfRenameColumn(a, b)
+ return cmp.RefOfRenameColumn(a, b)
case *RenameIndex:
b, ok := inB.(*RenameIndex)
if !ok {
return false
}
- return EqualsRefOfRenameIndex(a, b)
+ return cmp.RefOfRenameIndex(a, b)
case *RenameTable:
b, ok := inB.(*RenameTable)
if !ok {
return false
}
- return EqualsRefOfRenameTable(a, b)
+ return cmp.RefOfRenameTable(a, b)
case *RenameTableName:
b, ok := inB.(*RenameTableName)
if !ok {
return false
}
- return EqualsRefOfRenameTableName(a, b)
+ return cmp.RefOfRenameTableName(a, b)
case *RevertMigration:
b, ok := inB.(*RevertMigration)
if !ok {
return false
}
- return EqualsRefOfRevertMigration(a, b)
+ return cmp.RefOfRevertMigration(a, b)
case *Rollback:
b, ok := inB.(*Rollback)
if !ok {
return false
}
- return EqualsRefOfRollback(a, b)
+ return cmp.RefOfRollback(a, b)
case RootNode:
b, ok := inB.(RootNode)
if !ok {
return false
}
- return EqualsRootNode(a, b)
+ return cmp.RootNode(a, b)
case *SRollback:
b, ok := inB.(*SRollback)
if !ok {
return false
}
- return EqualsRefOfSRollback(a, b)
+ return cmp.RefOfSRollback(a, b)
case *Savepoint:
b, ok := inB.(*Savepoint)
if !ok {
return false
}
- return EqualsRefOfSavepoint(a, b)
+ return cmp.RefOfSavepoint(a, b)
case *Select:
b, ok := inB.(*Select)
if !ok {
return false
}
- return EqualsRefOfSelect(a, b)
+ return cmp.RefOfSelect(a, b)
case SelectExprs:
b, ok := inB.(SelectExprs)
if !ok {
return false
}
- return EqualsSelectExprs(a, b)
+ return cmp.SelectExprs(a, b)
case *SelectInto:
b, ok := inB.(*SelectInto)
if !ok {
return false
}
- return EqualsRefOfSelectInto(a, b)
+ return cmp.RefOfSelectInto(a, b)
case *Set:
b, ok := inB.(*Set)
if !ok {
return false
}
- return EqualsRefOfSet(a, b)
+ return cmp.RefOfSet(a, b)
case *SetExpr:
b, ok := inB.(*SetExpr)
if !ok {
return false
}
- return EqualsRefOfSetExpr(a, b)
+ return cmp.RefOfSetExpr(a, b)
case SetExprs:
b, ok := inB.(SetExprs)
if !ok {
return false
}
- return EqualsSetExprs(a, b)
- case *SetTransaction:
- b, ok := inB.(*SetTransaction)
- if !ok {
- return false
- }
- return EqualsRefOfSetTransaction(a, b)
+ return cmp.SetExprs(a, b)
case *Show:
b, ok := inB.(*Show)
if !ok {
return false
}
- return EqualsRefOfShow(a, b)
+ return cmp.RefOfShow(a, b)
case *ShowBasic:
b, ok := inB.(*ShowBasic)
if !ok {
return false
}
- return EqualsRefOfShowBasic(a, b)
+ return cmp.RefOfShowBasic(a, b)
case *ShowCreate:
b, ok := inB.(*ShowCreate)
if !ok {
return false
}
- return EqualsRefOfShowCreate(a, b)
+ return cmp.RefOfShowCreate(a, b)
case *ShowFilter:
b, ok := inB.(*ShowFilter)
if !ok {
return false
}
- return EqualsRefOfShowFilter(a, b)
+ return cmp.RefOfShowFilter(a, b)
case *ShowMigrationLogs:
b, ok := inB.(*ShowMigrationLogs)
if !ok {
return false
}
- return EqualsRefOfShowMigrationLogs(a, b)
+ return cmp.RefOfShowMigrationLogs(a, b)
case *ShowOther:
b, ok := inB.(*ShowOther)
if !ok {
return false
}
- return EqualsRefOfShowOther(a, b)
+ return cmp.RefOfShowOther(a, b)
case *ShowThrottledApps:
b, ok := inB.(*ShowThrottledApps)
if !ok {
return false
}
- return EqualsRefOfShowThrottledApps(a, b)
+ return cmp.RefOfShowThrottledApps(a, b)
+ case *ShowThrottlerStatus:
+ b, ok := inB.(*ShowThrottlerStatus)
+ if !ok {
+ return false
+ }
+ return cmp.RefOfShowThrottlerStatus(a, b)
case *StarExpr:
b, ok := inB.(*StarExpr)
if !ok {
return false
}
- return EqualsRefOfStarExpr(a, b)
+ return cmp.RefOfStarExpr(a, b)
case *Std:
b, ok := inB.(*Std)
if !ok {
return false
}
- return EqualsRefOfStd(a, b)
+ return cmp.RefOfStd(a, b)
case *StdDev:
b, ok := inB.(*StdDev)
if !ok {
return false
}
- return EqualsRefOfStdDev(a, b)
+ return cmp.RefOfStdDev(a, b)
case *StdPop:
b, ok := inB.(*StdPop)
if !ok {
return false
}
- return EqualsRefOfStdPop(a, b)
+ return cmp.RefOfStdPop(a, b)
case *StdSamp:
b, ok := inB.(*StdSamp)
if !ok {
return false
}
- return EqualsRefOfStdSamp(a, b)
+ return cmp.RefOfStdSamp(a, b)
case *Stream:
b, ok := inB.(*Stream)
if !ok {
return false
}
- return EqualsRefOfStream(a, b)
+ return cmp.RefOfStream(a, b)
case *SubPartition:
b, ok := inB.(*SubPartition)
if !ok {
return false
}
- return EqualsRefOfSubPartition(a, b)
+ return cmp.RefOfSubPartition(a, b)
case *SubPartitionDefinition:
b, ok := inB.(*SubPartitionDefinition)
if !ok {
return false
}
- return EqualsRefOfSubPartitionDefinition(a, b)
+ return cmp.RefOfSubPartitionDefinition(a, b)
case *SubPartitionDefinitionOptions:
b, ok := inB.(*SubPartitionDefinitionOptions)
if !ok {
return false
}
- return EqualsRefOfSubPartitionDefinitionOptions(a, b)
+ return cmp.RefOfSubPartitionDefinitionOptions(a, b)
case SubPartitionDefinitions:
b, ok := inB.(SubPartitionDefinitions)
if !ok {
return false
}
- return EqualsSubPartitionDefinitions(a, b)
+ return cmp.SubPartitionDefinitions(a, b)
case *Subquery:
b, ok := inB.(*Subquery)
if !ok {
return false
}
- return EqualsRefOfSubquery(a, b)
+ return cmp.RefOfSubquery(a, b)
case *SubstrExpr:
b, ok := inB.(*SubstrExpr)
if !ok {
return false
}
- return EqualsRefOfSubstrExpr(a, b)
+ return cmp.RefOfSubstrExpr(a, b)
case *Sum:
b, ok := inB.(*Sum)
if !ok {
return false
}
- return EqualsRefOfSum(a, b)
+ return cmp.RefOfSum(a, b)
case TableExprs:
b, ok := inB.(TableExprs)
if !ok {
return false
}
- return EqualsTableExprs(a, b)
+ return cmp.TableExprs(a, b)
case TableName:
b, ok := inB.(TableName)
if !ok {
return false
}
- return EqualsTableName(a, b)
+ return cmp.TableName(a, b)
case TableNames:
b, ok := inB.(TableNames)
if !ok {
return false
}
- return EqualsTableNames(a, b)
+ return cmp.TableNames(a, b)
case TableOptions:
b, ok := inB.(TableOptions)
if !ok {
return false
}
- return EqualsTableOptions(a, b)
+ return cmp.TableOptions(a, b)
case *TableSpec:
b, ok := inB.(*TableSpec)
if !ok {
return false
}
- return EqualsRefOfTableSpec(a, b)
+ return cmp.RefOfTableSpec(a, b)
case *TablespaceOperation:
b, ok := inB.(*TablespaceOperation)
if !ok {
return false
}
- return EqualsRefOfTablespaceOperation(a, b)
+ return cmp.RefOfTablespaceOperation(a, b)
case *TimestampFuncExpr:
b, ok := inB.(*TimestampFuncExpr)
if !ok {
return false
}
- return EqualsRefOfTimestampFuncExpr(a, b)
+ return cmp.RefOfTimestampFuncExpr(a, b)
case *TrimFuncExpr:
b, ok := inB.(*TrimFuncExpr)
if !ok {
return false
}
- return EqualsRefOfTrimFuncExpr(a, b)
+ return cmp.RefOfTrimFuncExpr(a, b)
case *TruncateTable:
b, ok := inB.(*TruncateTable)
if !ok {
return false
}
- return EqualsRefOfTruncateTable(a, b)
+ return cmp.RefOfTruncateTable(a, b)
case *UnaryExpr:
b, ok := inB.(*UnaryExpr)
if !ok {
return false
}
- return EqualsRefOfUnaryExpr(a, b)
+ return cmp.RefOfUnaryExpr(a, b)
case *Union:
b, ok := inB.(*Union)
if !ok {
return false
}
- return EqualsRefOfUnion(a, b)
+ return cmp.RefOfUnion(a, b)
case *UnlockTables:
b, ok := inB.(*UnlockTables)
if !ok {
return false
}
- return EqualsRefOfUnlockTables(a, b)
+ return cmp.RefOfUnlockTables(a, b)
case *Update:
b, ok := inB.(*Update)
if !ok {
return false
}
- return EqualsRefOfUpdate(a, b)
+ return cmp.RefOfUpdate(a, b)
case *UpdateExpr:
b, ok := inB.(*UpdateExpr)
if !ok {
return false
}
- return EqualsRefOfUpdateExpr(a, b)
+ return cmp.RefOfUpdateExpr(a, b)
case UpdateExprs:
b, ok := inB.(UpdateExprs)
if !ok {
return false
}
- return EqualsUpdateExprs(a, b)
+ return cmp.UpdateExprs(a, b)
case *UpdateXMLExpr:
b, ok := inB.(*UpdateXMLExpr)
if !ok {
return false
}
- return EqualsRefOfUpdateXMLExpr(a, b)
+ return cmp.RefOfUpdateXMLExpr(a, b)
case *Use:
b, ok := inB.(*Use)
if !ok {
return false
}
- return EqualsRefOfUse(a, b)
+ return cmp.RefOfUse(a, b)
+ case *VExplainStmt:
+ b, ok := inB.(*VExplainStmt)
+ if !ok {
+ return false
+ }
+ return cmp.RefOfVExplainStmt(a, b)
case *VStream:
b, ok := inB.(*VStream)
if !ok {
return false
}
- return EqualsRefOfVStream(a, b)
+ return cmp.RefOfVStream(a, b)
case ValTuple:
b, ok := inB.(ValTuple)
if !ok {
return false
}
- return EqualsValTuple(a, b)
+ return cmp.ValTuple(a, b)
case *Validation:
b, ok := inB.(*Validation)
if !ok {
return false
}
- return EqualsRefOfValidation(a, b)
+ return cmp.RefOfValidation(a, b)
case Values:
b, ok := inB.(Values)
if !ok {
return false
}
- return EqualsValues(a, b)
+ return cmp.Values(a, b)
case *ValuesFuncExpr:
b, ok := inB.(*ValuesFuncExpr)
if !ok {
return false
}
- return EqualsRefOfValuesFuncExpr(a, b)
+ return cmp.RefOfValuesFuncExpr(a, b)
case *VarPop:
b, ok := inB.(*VarPop)
if !ok {
return false
}
- return EqualsRefOfVarPop(a, b)
+ return cmp.RefOfVarPop(a, b)
case *VarSamp:
b, ok := inB.(*VarSamp)
if !ok {
return false
}
- return EqualsRefOfVarSamp(a, b)
+ return cmp.RefOfVarSamp(a, b)
case *Variable:
b, ok := inB.(*Variable)
if !ok {
return false
}
- return EqualsRefOfVariable(a, b)
+ return cmp.RefOfVariable(a, b)
case *Variance:
b, ok := inB.(*Variance)
if !ok {
return false
}
- return EqualsRefOfVariance(a, b)
+ return cmp.RefOfVariance(a, b)
case VindexParam:
b, ok := inB.(VindexParam)
if !ok {
return false
}
- return EqualsVindexParam(a, b)
+ return cmp.VindexParam(a, b)
case *VindexSpec:
b, ok := inB.(*VindexSpec)
if !ok {
return false
}
- return EqualsRefOfVindexSpec(a, b)
+ return cmp.RefOfVindexSpec(a, b)
case *WeightStringFuncExpr:
b, ok := inB.(*WeightStringFuncExpr)
if !ok {
return false
}
- return EqualsRefOfWeightStringFuncExpr(a, b)
+ return cmp.RefOfWeightStringFuncExpr(a, b)
case *When:
b, ok := inB.(*When)
if !ok {
return false
}
- return EqualsRefOfWhen(a, b)
+ return cmp.RefOfWhen(a, b)
case *Where:
b, ok := inB.(*Where)
if !ok {
return false
}
- return EqualsRefOfWhere(a, b)
+ return cmp.RefOfWhere(a, b)
case *WindowDefinition:
b, ok := inB.(*WindowDefinition)
if !ok {
return false
}
- return EqualsRefOfWindowDefinition(a, b)
+ return cmp.RefOfWindowDefinition(a, b)
case WindowDefinitions:
b, ok := inB.(WindowDefinitions)
if !ok {
return false
}
- return EqualsWindowDefinitions(a, b)
+ return cmp.WindowDefinitions(a, b)
case *WindowSpecification:
b, ok := inB.(*WindowSpecification)
if !ok {
return false
}
- return EqualsRefOfWindowSpecification(a, b)
+ return cmp.RefOfWindowSpecification(a, b)
case *With:
b, ok := inB.(*With)
if !ok {
return false
}
- return EqualsRefOfWith(a, b)
+ return cmp.RefOfWith(a, b)
case *XorExpr:
b, ok := inB.(*XorExpr)
if !ok {
return false
}
- return EqualsRefOfXorExpr(a, b)
+ return cmp.RefOfXorExpr(a, b)
default:
// this should never happen
return false
}
}
-// EqualsRefOfAddColumns does deep equals between the two objects.
-func EqualsRefOfAddColumns(a, b *AddColumns) bool {
+// RefOfAddColumns does deep equals between the two objects.
+func (cmp *Comparator) RefOfAddColumns(a, b *AddColumns) bool {
if a == b {
return true
}
@@ -1511,61 +1505,61 @@ func EqualsRefOfAddColumns(a, b *AddColumns) bool {
return false
}
return a.First == b.First &&
- EqualsSliceOfRefOfColumnDefinition(a.Columns, b.Columns) &&
- EqualsRefOfColName(a.After, b.After)
+ cmp.SliceOfRefOfColumnDefinition(a.Columns, b.Columns) &&
+ cmp.RefOfColName(a.After, b.After)
}
-// EqualsRefOfAddConstraintDefinition does deep equals between the two objects.
-func EqualsRefOfAddConstraintDefinition(a, b *AddConstraintDefinition) bool {
+// RefOfAddConstraintDefinition does deep equals between the two objects.
+func (cmp *Comparator) RefOfAddConstraintDefinition(a, b *AddConstraintDefinition) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsRefOfConstraintDefinition(a.ConstraintDefinition, b.ConstraintDefinition)
+ return cmp.RefOfConstraintDefinition(a.ConstraintDefinition, b.ConstraintDefinition)
}
-// EqualsRefOfAddIndexDefinition does deep equals between the two objects.
-func EqualsRefOfAddIndexDefinition(a, b *AddIndexDefinition) bool {
+// RefOfAddIndexDefinition does deep equals between the two objects.
+func (cmp *Comparator) RefOfAddIndexDefinition(a, b *AddIndexDefinition) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsRefOfIndexDefinition(a.IndexDefinition, b.IndexDefinition)
+ return cmp.RefOfIndexDefinition(a.IndexDefinition, b.IndexDefinition)
}
-// EqualsRefOfAliasedExpr does deep equals between the two objects.
-func EqualsRefOfAliasedExpr(a, b *AliasedExpr) bool {
+// RefOfAliasedExpr does deep equals between the two objects.
+func (cmp *Comparator) RefOfAliasedExpr(a, b *AliasedExpr) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsExpr(a.Expr, b.Expr) &&
- EqualsIdentifierCI(a.As, b.As)
+ return cmp.Expr(a.Expr, b.Expr) &&
+ cmp.IdentifierCI(a.As, b.As)
}
-// EqualsRefOfAliasedTableExpr does deep equals between the two objects.
-func EqualsRefOfAliasedTableExpr(a, b *AliasedTableExpr) bool {
+// RefOfAliasedTableExpr does deep equals between the two objects.
+func (cmp *Comparator) RefOfAliasedTableExpr(a, b *AliasedTableExpr) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsSimpleTableExpr(a.Expr, b.Expr) &&
- EqualsPartitions(a.Partitions, b.Partitions) &&
- EqualsIdentifierCS(a.As, b.As) &&
- EqualsIndexHints(a.Hints, b.Hints) &&
- EqualsColumns(a.Columns, b.Columns)
+ return cmp.SimpleTableExpr(a.Expr, b.Expr) &&
+ cmp.Partitions(a.Partitions, b.Partitions) &&
+ cmp.IdentifierCS(a.As, b.As) &&
+ cmp.IndexHints(a.Hints, b.Hints) &&
+ cmp.Columns(a.Columns, b.Columns)
}
-// EqualsRefOfAlterCharset does deep equals between the two objects.
-func EqualsRefOfAlterCharset(a, b *AlterCharset) bool {
+// RefOfAlterCharset does deep equals between the two objects.
+func (cmp *Comparator) RefOfAlterCharset(a, b *AlterCharset) bool {
if a == b {
return true
}
@@ -1576,8 +1570,8 @@ func EqualsRefOfAlterCharset(a, b *AlterCharset) bool {
a.Collate == b.Collate
}
-// EqualsRefOfAlterCheck does deep equals between the two objects.
-func EqualsRefOfAlterCheck(a, b *AlterCheck) bool {
+// RefOfAlterCheck does deep equals between the two objects.
+func (cmp *Comparator) RefOfAlterCheck(a, b *AlterCheck) bool {
if a == b {
return true
}
@@ -1585,11 +1579,11 @@ func EqualsRefOfAlterCheck(a, b *AlterCheck) bool {
return false
}
return a.Enforced == b.Enforced &&
- EqualsIdentifierCI(a.Name, b.Name)
+ cmp.IdentifierCI(a.Name, b.Name)
}
-// EqualsRefOfAlterColumn does deep equals between the two objects.
-func EqualsRefOfAlterColumn(a, b *AlterColumn) bool {
+// RefOfAlterColumn does deep equals between the two objects.
+func (cmp *Comparator) RefOfAlterColumn(a, b *AlterColumn) bool {
if a == b {
return true
}
@@ -1597,13 +1591,13 @@ func EqualsRefOfAlterColumn(a, b *AlterColumn) bool {
return false
}
return a.DropDefault == b.DropDefault &&
- EqualsRefOfColName(a.Column, b.Column) &&
- EqualsExpr(a.DefaultVal, b.DefaultVal) &&
- EqualsRefOfBool(a.Invisible, b.Invisible)
+ cmp.RefOfColName(a.Column, b.Column) &&
+ cmp.Expr(a.DefaultVal, b.DefaultVal) &&
+ cmp.RefOfBool(a.Invisible, b.Invisible)
}
-// EqualsRefOfAlterDatabase does deep equals between the two objects.
-func EqualsRefOfAlterDatabase(a, b *AlterDatabase) bool {
+// RefOfAlterDatabase does deep equals between the two objects.
+func (cmp *Comparator) RefOfAlterDatabase(a, b *AlterDatabase) bool {
if a == b {
return true
}
@@ -1612,12 +1606,12 @@ func EqualsRefOfAlterDatabase(a, b *AlterDatabase) bool {
}
return a.UpdateDataDirectory == b.UpdateDataDirectory &&
a.FullyParsed == b.FullyParsed &&
- EqualsIdentifierCS(a.DBName, b.DBName) &&
- EqualsSliceOfDatabaseOption(a.AlterOptions, b.AlterOptions)
+ cmp.IdentifierCS(a.DBName, b.DBName) &&
+ cmp.SliceOfDatabaseOption(a.AlterOptions, b.AlterOptions)
}
-// EqualsRefOfAlterIndex does deep equals between the two objects.
-func EqualsRefOfAlterIndex(a, b *AlterIndex) bool {
+// RefOfAlterIndex does deep equals between the two objects.
+func (cmp *Comparator) RefOfAlterIndex(a, b *AlterIndex) bool {
if a == b {
return true
}
@@ -1625,11 +1619,11 @@ func EqualsRefOfAlterIndex(a, b *AlterIndex) bool {
return false
}
return a.Invisible == b.Invisible &&
- EqualsIdentifierCI(a.Name, b.Name)
+ cmp.IdentifierCI(a.Name, b.Name)
}
-// EqualsRefOfAlterMigration does deep equals between the two objects.
-func EqualsRefOfAlterMigration(a, b *AlterMigration) bool {
+// RefOfAlterMigration does deep equals between the two objects.
+func (cmp *Comparator) RefOfAlterMigration(a, b *AlterMigration) bool {
if a == b {
return true
}
@@ -1640,11 +1634,11 @@ func EqualsRefOfAlterMigration(a, b *AlterMigration) bool {
a.Expire == b.Expire &&
a.Shards == b.Shards &&
a.Type == b.Type &&
- EqualsRefOfLiteral(a.Ratio, b.Ratio)
+ cmp.RefOfLiteral(a.Ratio, b.Ratio)
}
-// EqualsRefOfAlterTable does deep equals between the two objects.
-func EqualsRefOfAlterTable(a, b *AlterTable) bool {
+// RefOfAlterTable does deep equals between the two objects.
+func (cmp *Comparator) RefOfAlterTable(a, b *AlterTable) bool {
if a == b {
return true
}
@@ -1652,15 +1646,15 @@ func EqualsRefOfAlterTable(a, b *AlterTable) bool {
return false
}
return a.FullyParsed == b.FullyParsed &&
- EqualsTableName(a.Table, b.Table) &&
- EqualsSliceOfAlterOption(a.AlterOptions, b.AlterOptions) &&
- EqualsRefOfPartitionSpec(a.PartitionSpec, b.PartitionSpec) &&
- EqualsRefOfPartitionOption(a.PartitionOption, b.PartitionOption) &&
- EqualsRefOfParsedComments(a.Comments, b.Comments)
+ cmp.TableName(a.Table, b.Table) &&
+ cmp.SliceOfAlterOption(a.AlterOptions, b.AlterOptions) &&
+ cmp.RefOfPartitionSpec(a.PartitionSpec, b.PartitionSpec) &&
+ cmp.RefOfPartitionOption(a.PartitionOption, b.PartitionOption) &&
+ cmp.RefOfParsedComments(a.Comments, b.Comments)
}
-// EqualsRefOfAlterView does deep equals between the two objects.
-func EqualsRefOfAlterView(a, b *AlterView) bool {
+// RefOfAlterView does deep equals between the two objects.
+func (cmp *Comparator) RefOfAlterView(a, b *AlterView) bool {
if a == b {
return true
}
@@ -1670,15 +1664,15 @@ func EqualsRefOfAlterView(a, b *AlterView) bool {
return a.Algorithm == b.Algorithm &&
a.Security == b.Security &&
a.CheckOption == b.CheckOption &&
- EqualsTableName(a.ViewName, b.ViewName) &&
- EqualsRefOfDefiner(a.Definer, b.Definer) &&
- EqualsColumns(a.Columns, b.Columns) &&
- EqualsSelectStatement(a.Select, b.Select) &&
- EqualsRefOfParsedComments(a.Comments, b.Comments)
+ cmp.TableName(a.ViewName, b.ViewName) &&
+ cmp.RefOfDefiner(a.Definer, b.Definer) &&
+ cmp.Columns(a.Columns, b.Columns) &&
+ cmp.SelectStatement(a.Select, b.Select) &&
+ cmp.RefOfParsedComments(a.Comments, b.Comments)
}
-// EqualsRefOfAlterVschema does deep equals between the two objects.
-func EqualsRefOfAlterVschema(a, b *AlterVschema) bool {
+// RefOfAlterVschema does deep equals between the two objects.
+func (cmp *Comparator) RefOfAlterVschema(a, b *AlterVschema) bool {
if a == b {
return true
}
@@ -1686,26 +1680,26 @@ func EqualsRefOfAlterVschema(a, b *AlterVschema) bool {
return false
}
return a.Action == b.Action &&
- EqualsTableName(a.Table, b.Table) &&
- EqualsRefOfVindexSpec(a.VindexSpec, b.VindexSpec) &&
- EqualsSliceOfIdentifierCI(a.VindexCols, b.VindexCols) &&
- EqualsRefOfAutoIncSpec(a.AutoIncSpec, b.AutoIncSpec)
+ cmp.TableName(a.Table, b.Table) &&
+ cmp.RefOfVindexSpec(a.VindexSpec, b.VindexSpec) &&
+ cmp.SliceOfIdentifierCI(a.VindexCols, b.VindexCols) &&
+ cmp.RefOfAutoIncSpec(a.AutoIncSpec, b.AutoIncSpec)
}
-// EqualsRefOfAndExpr does deep equals between the two objects.
-func EqualsRefOfAndExpr(a, b *AndExpr) bool {
+// RefOfAndExpr does deep equals between the two objects.
+func (cmp *Comparator) RefOfAndExpr(a, b *AndExpr) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsExpr(a.Left, b.Left) &&
- EqualsExpr(a.Right, b.Right)
+ return cmp.Expr(a.Left, b.Left) &&
+ cmp.Expr(a.Right, b.Right)
}
-// EqualsRefOfArgumentLessWindowExpr does deep equals between the two objects.
-func EqualsRefOfArgumentLessWindowExpr(a, b *ArgumentLessWindowExpr) bool {
+// RefOfArgumentLessWindowExpr does deep equals between the two objects.
+func (cmp *Comparator) RefOfArgumentLessWindowExpr(a, b *ArgumentLessWindowExpr) bool {
if a == b {
return true
}
@@ -1713,23 +1707,23 @@ func EqualsRefOfArgumentLessWindowExpr(a, b *ArgumentLessWindowExpr) bool {
return false
}
return a.Type == b.Type &&
- EqualsRefOfOverClause(a.OverClause, b.OverClause)
+ cmp.RefOfOverClause(a.OverClause, b.OverClause)
}
-// EqualsRefOfAutoIncSpec does deep equals between the two objects.
-func EqualsRefOfAutoIncSpec(a, b *AutoIncSpec) bool {
+// RefOfAutoIncSpec does deep equals between the two objects.
+func (cmp *Comparator) RefOfAutoIncSpec(a, b *AutoIncSpec) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsIdentifierCI(a.Column, b.Column) &&
- EqualsTableName(a.Sequence, b.Sequence)
+ return cmp.IdentifierCI(a.Column, b.Column) &&
+ cmp.TableName(a.Sequence, b.Sequence)
}
-// EqualsRefOfAvg does deep equals between the two objects.
-func EqualsRefOfAvg(a, b *Avg) bool {
+// RefOfAvg does deep equals between the two objects.
+func (cmp *Comparator) RefOfAvg(a, b *Avg) bool {
if a == b {
return true
}
@@ -1737,22 +1731,22 @@ func EqualsRefOfAvg(a, b *Avg) bool {
return false
}
return a.Distinct == b.Distinct &&
- EqualsExpr(a.Arg, b.Arg)
+ cmp.Expr(a.Arg, b.Arg)
}
-// EqualsRefOfBegin does deep equals between the two objects.
-func EqualsRefOfBegin(a, b *Begin) bool {
+// RefOfBegin does deep equals between the two objects.
+func (cmp *Comparator) RefOfBegin(a, b *Begin) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return true
+ return cmp.SliceOfTxAccessMode(a.TxAccessModes, b.TxAccessModes)
}
-// EqualsRefOfBetweenExpr does deep equals between the two objects.
-func EqualsRefOfBetweenExpr(a, b *BetweenExpr) bool {
+// RefOfBetweenExpr does deep equals between the two objects.
+func (cmp *Comparator) RefOfBetweenExpr(a, b *BetweenExpr) bool {
if a == b {
return true
}
@@ -1760,13 +1754,13 @@ func EqualsRefOfBetweenExpr(a, b *BetweenExpr) bool {
return false
}
return a.IsBetween == b.IsBetween &&
- EqualsExpr(a.Left, b.Left) &&
- EqualsExpr(a.From, b.From) &&
- EqualsExpr(a.To, b.To)
+ cmp.Expr(a.Left, b.Left) &&
+ cmp.Expr(a.From, b.From) &&
+ cmp.Expr(a.To, b.To)
}
-// EqualsRefOfBinaryExpr does deep equals between the two objects.
-func EqualsRefOfBinaryExpr(a, b *BinaryExpr) bool {
+// RefOfBinaryExpr does deep equals between the two objects.
+func (cmp *Comparator) RefOfBinaryExpr(a, b *BinaryExpr) bool {
if a == b {
return true
}
@@ -1774,70 +1768,70 @@ func EqualsRefOfBinaryExpr(a, b *BinaryExpr) bool {
return false
}
return a.Operator == b.Operator &&
- EqualsExpr(a.Left, b.Left) &&
- EqualsExpr(a.Right, b.Right)
+ cmp.Expr(a.Left, b.Left) &&
+ cmp.Expr(a.Right, b.Right)
}
-// EqualsRefOfBitAnd does deep equals between the two objects.
-func EqualsRefOfBitAnd(a, b *BitAnd) bool {
+// RefOfBitAnd does deep equals between the two objects.
+func (cmp *Comparator) RefOfBitAnd(a, b *BitAnd) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsExpr(a.Arg, b.Arg)
+ return cmp.Expr(a.Arg, b.Arg)
}
-// EqualsRefOfBitOr does deep equals between the two objects.
-func EqualsRefOfBitOr(a, b *BitOr) bool {
+// RefOfBitOr does deep equals between the two objects.
+func (cmp *Comparator) RefOfBitOr(a, b *BitOr) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsExpr(a.Arg, b.Arg)
+ return cmp.Expr(a.Arg, b.Arg)
}
-// EqualsRefOfBitXor does deep equals between the two objects.
-func EqualsRefOfBitXor(a, b *BitXor) bool {
+// RefOfBitXor does deep equals between the two objects.
+func (cmp *Comparator) RefOfBitXor(a, b *BitXor) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsExpr(a.Arg, b.Arg)
+ return cmp.Expr(a.Arg, b.Arg)
}
-// EqualsRefOfCallProc does deep equals between the two objects.
-func EqualsRefOfCallProc(a, b *CallProc) bool {
+// RefOfCallProc does deep equals between the two objects.
+func (cmp *Comparator) RefOfCallProc(a, b *CallProc) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsTableName(a.Name, b.Name) &&
- EqualsExprs(a.Params, b.Params)
+ return cmp.TableName(a.Name, b.Name) &&
+ cmp.Exprs(a.Params, b.Params)
}
-// EqualsRefOfCaseExpr does deep equals between the two objects.
-func EqualsRefOfCaseExpr(a, b *CaseExpr) bool {
+// RefOfCaseExpr does deep equals between the two objects.
+func (cmp *Comparator) RefOfCaseExpr(a, b *CaseExpr) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsExpr(a.Expr, b.Expr) &&
- EqualsSliceOfRefOfWhen(a.Whens, b.Whens) &&
- EqualsExpr(a.Else, b.Else)
+ return cmp.Expr(a.Expr, b.Expr) &&
+ cmp.SliceOfRefOfWhen(a.Whens, b.Whens) &&
+ cmp.Expr(a.Else, b.Else)
}
-// EqualsRefOfCastExpr does deep equals between the two objects.
-func EqualsRefOfCastExpr(a, b *CastExpr) bool {
+// RefOfCastExpr does deep equals between the two objects.
+func (cmp *Comparator) RefOfCastExpr(a, b *CastExpr) bool {
if a == b {
return true
}
@@ -1845,12 +1839,12 @@ func EqualsRefOfCastExpr(a, b *CastExpr) bool {
return false
}
return a.Array == b.Array &&
- EqualsExpr(a.Expr, b.Expr) &&
- EqualsRefOfConvertType(a.Type, b.Type)
+ cmp.Expr(a.Expr, b.Expr) &&
+ cmp.RefOfConvertType(a.Type, b.Type)
}
-// EqualsRefOfChangeColumn does deep equals between the two objects.
-func EqualsRefOfChangeColumn(a, b *ChangeColumn) bool {
+// RefOfChangeColumn does deep equals between the two objects.
+func (cmp *Comparator) RefOfChangeColumn(a, b *ChangeColumn) bool {
if a == b {
return true
}
@@ -1858,13 +1852,13 @@ func EqualsRefOfChangeColumn(a, b *ChangeColumn) bool {
return false
}
return a.First == b.First &&
- EqualsRefOfColName(a.OldColumn, b.OldColumn) &&
- EqualsRefOfColumnDefinition(a.NewColDefinition, b.NewColDefinition) &&
- EqualsRefOfColName(a.After, b.After)
+ cmp.RefOfColName(a.OldColumn, b.OldColumn) &&
+ cmp.RefOfColumnDefinition(a.NewColDefinition, b.NewColDefinition) &&
+ cmp.RefOfColName(a.After, b.After)
}
-// EqualsRefOfCharExpr does deep equals between the two objects.
-func EqualsRefOfCharExpr(a, b *CharExpr) bool {
+// RefOfCharExpr does deep equals between the two objects.
+func (cmp *Comparator) RefOfCharExpr(a, b *CharExpr) bool {
if a == b {
return true
}
@@ -1872,11 +1866,11 @@ func EqualsRefOfCharExpr(a, b *CharExpr) bool {
return false
}
return a.Charset == b.Charset &&
- EqualsExprs(a.Exprs, b.Exprs)
+ cmp.Exprs(a.Exprs, b.Exprs)
}
-// EqualsRefOfCheckConstraintDefinition does deep equals between the two objects.
-func EqualsRefOfCheckConstraintDefinition(a, b *CheckConstraintDefinition) bool {
+// RefOfCheckConstraintDefinition does deep equals between the two objects.
+func (cmp *Comparator) RefOfCheckConstraintDefinition(a, b *CheckConstraintDefinition) bool {
if a == b {
return true
}
@@ -1884,23 +1878,26 @@ func EqualsRefOfCheckConstraintDefinition(a, b *CheckConstraintDefinition) bool
return false
}
return a.Enforced == b.Enforced &&
- EqualsExpr(a.Expr, b.Expr)
+ cmp.Expr(a.Expr, b.Expr)
}
-// EqualsRefOfColName does deep equals between the two objects.
-func EqualsRefOfColName(a, b *ColName) bool {
+// RefOfColName does deep equals between the two objects.
+func (cmp *Comparator) RefOfColName(a, b *ColName) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsIdentifierCI(a.Name, b.Name) &&
- EqualsTableName(a.Qualifier, b.Qualifier)
+ if cmp.RefOfColName_ != nil {
+ return cmp.RefOfColName_(a, b)
+ }
+ return cmp.IdentifierCI(a.Name, b.Name) &&
+ cmp.TableName(a.Qualifier, b.Qualifier)
}
-// EqualsRefOfCollateExpr does deep equals between the two objects.
-func EqualsRefOfCollateExpr(a, b *CollateExpr) bool {
+// RefOfCollateExpr does deep equals between the two objects.
+func (cmp *Comparator) RefOfCollateExpr(a, b *CollateExpr) bool {
if a == b {
return true
}
@@ -1908,23 +1905,23 @@ func EqualsRefOfCollateExpr(a, b *CollateExpr) bool {
return false
}
return a.Collation == b.Collation &&
- EqualsExpr(a.Expr, b.Expr)
+ cmp.Expr(a.Expr, b.Expr)
}
-// EqualsRefOfColumnDefinition does deep equals between the two objects.
-func EqualsRefOfColumnDefinition(a, b *ColumnDefinition) bool {
+// RefOfColumnDefinition does deep equals between the two objects.
+func (cmp *Comparator) RefOfColumnDefinition(a, b *ColumnDefinition) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsIdentifierCI(a.Name, b.Name) &&
- EqualsColumnType(a.Type, b.Type)
+ return cmp.IdentifierCI(a.Name, b.Name) &&
+ cmp.RefOfColumnType(a.Type, b.Type)
}
-// EqualsRefOfColumnType does deep equals between the two objects.
-func EqualsRefOfColumnType(a, b *ColumnType) bool {
+// RefOfColumnType does deep equals between the two objects.
+func (cmp *Comparator) RefOfColumnType(a, b *ColumnType) bool {
if a == b {
return true
}
@@ -1934,39 +1931,39 @@ func EqualsRefOfColumnType(a, b *ColumnType) bool {
return a.Type == b.Type &&
a.Unsigned == b.Unsigned &&
a.Zerofill == b.Zerofill &&
- EqualsRefOfColumnTypeOptions(a.Options, b.Options) &&
- EqualsRefOfLiteral(a.Length, b.Length) &&
- EqualsRefOfLiteral(a.Scale, b.Scale) &&
- EqualsColumnCharset(a.Charset, b.Charset) &&
- EqualsSliceOfString(a.EnumValues, b.EnumValues)
+ cmp.RefOfColumnTypeOptions(a.Options, b.Options) &&
+ cmp.RefOfLiteral(a.Length, b.Length) &&
+ cmp.RefOfLiteral(a.Scale, b.Scale) &&
+ cmp.ColumnCharset(a.Charset, b.Charset) &&
+ cmp.SliceOfString(a.EnumValues, b.EnumValues)
}
-// EqualsColumns does deep equals between the two objects.
-func EqualsColumns(a, b Columns) bool {
+// Columns does deep equals between the two objects.
+func (cmp *Comparator) Columns(a, b Columns) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
- if !EqualsIdentifierCI(a[i], b[i]) {
+ if !cmp.IdentifierCI(a[i], b[i]) {
return false
}
}
return true
}
-// EqualsRefOfCommentOnly does deep equals between the two objects.
-func EqualsRefOfCommentOnly(a, b *CommentOnly) bool {
+// RefOfCommentOnly does deep equals between the two objects.
+func (cmp *Comparator) RefOfCommentOnly(a, b *CommentOnly) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsSliceOfString(a.Comments, b.Comments)
+ return cmp.SliceOfString(a.Comments, b.Comments)
}
-// EqualsRefOfCommit does deep equals between the two objects.
-func EqualsRefOfCommit(a, b *Commit) bool {
+// RefOfCommit does deep equals between the two objects.
+func (cmp *Comparator) RefOfCommit(a, b *Commit) bool {
if a == b {
return true
}
@@ -1976,21 +1973,21 @@ func EqualsRefOfCommit(a, b *Commit) bool {
return true
}
-// EqualsRefOfCommonTableExpr does deep equals between the two objects.
-func EqualsRefOfCommonTableExpr(a, b *CommonTableExpr) bool {
+// RefOfCommonTableExpr does deep equals between the two objects.
+func (cmp *Comparator) RefOfCommonTableExpr(a, b *CommonTableExpr) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsIdentifierCS(a.ID, b.ID) &&
- EqualsColumns(a.Columns, b.Columns) &&
- EqualsRefOfSubquery(a.Subquery, b.Subquery)
+ return cmp.IdentifierCS(a.ID, b.ID) &&
+ cmp.Columns(a.Columns, b.Columns) &&
+ cmp.RefOfSubquery(a.Subquery, b.Subquery)
}
-// EqualsRefOfComparisonExpr does deep equals between the two objects.
-func EqualsRefOfComparisonExpr(a, b *ComparisonExpr) bool {
+// RefOfComparisonExpr does deep equals between the two objects.
+func (cmp *Comparator) RefOfComparisonExpr(a, b *ComparisonExpr) bool {
if a == b {
return true
}
@@ -1998,37 +1995,37 @@ func EqualsRefOfComparisonExpr(a, b *ComparisonExpr) bool {
return false
}
return a.Operator == b.Operator &&
- EqualsExpr(a.Left, b.Left) &&
- EqualsExpr(a.Right, b.Right) &&
- EqualsExpr(a.Escape, b.Escape)
+ cmp.Expr(a.Left, b.Left) &&
+ cmp.Expr(a.Right, b.Right) &&
+ cmp.Expr(a.Escape, b.Escape)
}
-// EqualsRefOfConstraintDefinition does deep equals between the two objects.
-func EqualsRefOfConstraintDefinition(a, b *ConstraintDefinition) bool {
+// RefOfConstraintDefinition does deep equals between the two objects.
+func (cmp *Comparator) RefOfConstraintDefinition(a, b *ConstraintDefinition) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsIdentifierCI(a.Name, b.Name) &&
- EqualsConstraintInfo(a.Details, b.Details)
+ return cmp.IdentifierCI(a.Name, b.Name) &&
+ cmp.ConstraintInfo(a.Details, b.Details)
}
-// EqualsRefOfConvertExpr does deep equals between the two objects.
-func EqualsRefOfConvertExpr(a, b *ConvertExpr) bool {
+// RefOfConvertExpr does deep equals between the two objects.
+func (cmp *Comparator) RefOfConvertExpr(a, b *ConvertExpr) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsExpr(a.Expr, b.Expr) &&
- EqualsRefOfConvertType(a.Type, b.Type)
+ return cmp.Expr(a.Expr, b.Expr) &&
+ cmp.RefOfConvertType(a.Type, b.Type)
}
-// EqualsRefOfConvertType does deep equals between the two objects.
-func EqualsRefOfConvertType(a, b *ConvertType) bool {
+// RefOfConvertType does deep equals between the two objects.
+func (cmp *Comparator) RefOfConvertType(a, b *ConvertType) bool {
if a == b {
return true
}
@@ -2036,13 +2033,13 @@ func EqualsRefOfConvertType(a, b *ConvertType) bool {
return false
}
return a.Type == b.Type &&
- EqualsRefOfLiteral(a.Length, b.Length) &&
- EqualsRefOfLiteral(a.Scale, b.Scale) &&
- EqualsColumnCharset(a.Charset, b.Charset)
+ cmp.RefOfLiteral(a.Length, b.Length) &&
+ cmp.RefOfLiteral(a.Scale, b.Scale) &&
+ cmp.ColumnCharset(a.Charset, b.Charset)
}
-// EqualsRefOfConvertUsingExpr does deep equals between the two objects.
-func EqualsRefOfConvertUsingExpr(a, b *ConvertUsingExpr) bool {
+// RefOfConvertUsingExpr does deep equals between the two objects.
+func (cmp *Comparator) RefOfConvertUsingExpr(a, b *ConvertUsingExpr) bool {
if a == b {
return true
}
@@ -2050,11 +2047,11 @@ func EqualsRefOfConvertUsingExpr(a, b *ConvertUsingExpr) bool {
return false
}
return a.Type == b.Type &&
- EqualsExpr(a.Expr, b.Expr)
+ cmp.Expr(a.Expr, b.Expr)
}
-// EqualsRefOfCount does deep equals between the two objects.
-func EqualsRefOfCount(a, b *Count) bool {
+// RefOfCount does deep equals between the two objects.
+func (cmp *Comparator) RefOfCount(a, b *Count) bool {
if a == b {
return true
}
@@ -2062,11 +2059,11 @@ func EqualsRefOfCount(a, b *Count) bool {
return false
}
return a.Distinct == b.Distinct &&
- EqualsExprs(a.Args, b.Args)
+ cmp.Exprs(a.Args, b.Args)
}
-// EqualsRefOfCountStar does deep equals between the two objects.
-func EqualsRefOfCountStar(a, b *CountStar) bool {
+// RefOfCountStar does deep equals between the two objects.
+func (cmp *Comparator) RefOfCountStar(a, b *CountStar) bool {
if a == b {
return true
}
@@ -2076,8 +2073,8 @@ func EqualsRefOfCountStar(a, b *CountStar) bool {
return true
}
-// EqualsRefOfCreateDatabase does deep equals between the two objects.
-func EqualsRefOfCreateDatabase(a, b *CreateDatabase) bool {
+// RefOfCreateDatabase does deep equals between the two objects.
+func (cmp *Comparator) RefOfCreateDatabase(a, b *CreateDatabase) bool {
if a == b {
return true
}
@@ -2086,13 +2083,13 @@ func EqualsRefOfCreateDatabase(a, b *CreateDatabase) bool {
}
return a.IfNotExists == b.IfNotExists &&
a.FullyParsed == b.FullyParsed &&
- EqualsRefOfParsedComments(a.Comments, b.Comments) &&
- EqualsIdentifierCS(a.DBName, b.DBName) &&
- EqualsSliceOfDatabaseOption(a.CreateOptions, b.CreateOptions)
+ cmp.RefOfParsedComments(a.Comments, b.Comments) &&
+ cmp.IdentifierCS(a.DBName, b.DBName) &&
+ cmp.SliceOfDatabaseOption(a.CreateOptions, b.CreateOptions)
}
-// EqualsRefOfCreateTable does deep equals between the two objects.
-func EqualsRefOfCreateTable(a, b *CreateTable) bool {
+// RefOfCreateTable does deep equals between the two objects.
+func (cmp *Comparator) RefOfCreateTable(a, b *CreateTable) bool {
if a == b {
return true
}
@@ -2102,14 +2099,14 @@ func EqualsRefOfCreateTable(a, b *CreateTable) bool {
return a.Temp == b.Temp &&
a.IfNotExists == b.IfNotExists &&
a.FullyParsed == b.FullyParsed &&
- EqualsTableName(a.Table, b.Table) &&
- EqualsRefOfTableSpec(a.TableSpec, b.TableSpec) &&
- EqualsRefOfOptLike(a.OptLike, b.OptLike) &&
- EqualsRefOfParsedComments(a.Comments, b.Comments)
+ cmp.TableName(a.Table, b.Table) &&
+ cmp.RefOfTableSpec(a.TableSpec, b.TableSpec) &&
+ cmp.RefOfOptLike(a.OptLike, b.OptLike) &&
+ cmp.RefOfParsedComments(a.Comments, b.Comments)
}
-// EqualsRefOfCreateView does deep equals between the two objects.
-func EqualsRefOfCreateView(a, b *CreateView) bool {
+// RefOfCreateView does deep equals between the two objects.
+func (cmp *Comparator) RefOfCreateView(a, b *CreateView) bool {
if a == b {
return true
}
@@ -2120,27 +2117,27 @@ func EqualsRefOfCreateView(a, b *CreateView) bool {
a.Security == b.Security &&
a.CheckOption == b.CheckOption &&
a.IsReplace == b.IsReplace &&
- EqualsTableName(a.ViewName, b.ViewName) &&
- EqualsRefOfDefiner(a.Definer, b.Definer) &&
- EqualsColumns(a.Columns, b.Columns) &&
- EqualsSelectStatement(a.Select, b.Select) &&
- EqualsRefOfParsedComments(a.Comments, b.Comments)
+ cmp.TableName(a.ViewName, b.ViewName) &&
+ cmp.RefOfDefiner(a.Definer, b.Definer) &&
+ cmp.Columns(a.Columns, b.Columns) &&
+ cmp.SelectStatement(a.Select, b.Select) &&
+ cmp.RefOfParsedComments(a.Comments, b.Comments)
}
-// EqualsRefOfCurTimeFuncExpr does deep equals between the two objects.
-func EqualsRefOfCurTimeFuncExpr(a, b *CurTimeFuncExpr) bool {
+// RefOfCurTimeFuncExpr does deep equals between the two objects.
+func (cmp *Comparator) RefOfCurTimeFuncExpr(a, b *CurTimeFuncExpr) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsIdentifierCI(a.Name, b.Name) &&
- EqualsExpr(a.Fsp, b.Fsp)
+ return cmp.IdentifierCI(a.Name, b.Name) &&
+ cmp.Expr(a.Fsp, b.Fsp)
}
-// EqualsRefOfDeallocateStmt does deep equals between the two objects.
-func EqualsRefOfDeallocateStmt(a, b *DeallocateStmt) bool {
+// RefOfDeallocateStmt does deep equals between the two objects.
+func (cmp *Comparator) RefOfDeallocateStmt(a, b *DeallocateStmt) bool {
if a == b {
return true
}
@@ -2148,12 +2145,12 @@ func EqualsRefOfDeallocateStmt(a, b *DeallocateStmt) bool {
return false
}
return a.Type == b.Type &&
- EqualsRefOfParsedComments(a.Comments, b.Comments) &&
- EqualsIdentifierCI(a.Name, b.Name)
+ cmp.RefOfParsedComments(a.Comments, b.Comments) &&
+ cmp.IdentifierCI(a.Name, b.Name)
}
-// EqualsRefOfDefault does deep equals between the two objects.
-func EqualsRefOfDefault(a, b *Default) bool {
+// RefOfDefault does deep equals between the two objects.
+func (cmp *Comparator) RefOfDefault(a, b *Default) bool {
if a == b {
return true
}
@@ -2163,8 +2160,8 @@ func EqualsRefOfDefault(a, b *Default) bool {
return a.ColName == b.ColName
}
-// EqualsRefOfDefiner does deep equals between the two objects.
-func EqualsRefOfDefiner(a, b *Definer) bool {
+// RefOfDefiner does deep equals between the two objects.
+func (cmp *Comparator) RefOfDefiner(a, b *Definer) bool {
if a == b {
return true
}
@@ -2175,27 +2172,27 @@ func EqualsRefOfDefiner(a, b *Definer) bool {
a.Address == b.Address
}
-// EqualsRefOfDelete does deep equals between the two objects.
-func EqualsRefOfDelete(a, b *Delete) bool {
+// RefOfDelete does deep equals between the two objects.
+func (cmp *Comparator) RefOfDelete(a, b *Delete) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsRefOfWith(a.With, b.With) &&
+ return cmp.RefOfWith(a.With, b.With) &&
a.Ignore == b.Ignore &&
- EqualsRefOfParsedComments(a.Comments, b.Comments) &&
- EqualsTableNames(a.Targets, b.Targets) &&
- EqualsTableExprs(a.TableExprs, b.TableExprs) &&
- EqualsPartitions(a.Partitions, b.Partitions) &&
- EqualsRefOfWhere(a.Where, b.Where) &&
- EqualsOrderBy(a.OrderBy, b.OrderBy) &&
- EqualsRefOfLimit(a.Limit, b.Limit)
+ cmp.RefOfParsedComments(a.Comments, b.Comments) &&
+ cmp.TableNames(a.Targets, b.Targets) &&
+ cmp.TableExprs(a.TableExprs, b.TableExprs) &&
+ cmp.Partitions(a.Partitions, b.Partitions) &&
+ cmp.RefOfWhere(a.Where, b.Where) &&
+ cmp.OrderBy(a.OrderBy, b.OrderBy) &&
+ cmp.RefOfLimit(a.Limit, b.Limit)
}
-// EqualsRefOfDerivedTable does deep equals between the two objects.
-func EqualsRefOfDerivedTable(a, b *DerivedTable) bool {
+// RefOfDerivedTable does deep equals between the two objects.
+func (cmp *Comparator) RefOfDerivedTable(a, b *DerivedTable) bool {
if a == b {
return true
}
@@ -2203,22 +2200,22 @@ func EqualsRefOfDerivedTable(a, b *DerivedTable) bool {
return false
}
return a.Lateral == b.Lateral &&
- EqualsSelectStatement(a.Select, b.Select)
+ cmp.SelectStatement(a.Select, b.Select)
}
-// EqualsRefOfDropColumn does deep equals between the two objects.
-func EqualsRefOfDropColumn(a, b *DropColumn) bool {
+// RefOfDropColumn does deep equals between the two objects.
+func (cmp *Comparator) RefOfDropColumn(a, b *DropColumn) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsRefOfColName(a.Name, b.Name)
+ return cmp.RefOfColName(a.Name, b.Name)
}
-// EqualsRefOfDropDatabase does deep equals between the two objects.
-func EqualsRefOfDropDatabase(a, b *DropDatabase) bool {
+// RefOfDropDatabase does deep equals between the two objects.
+func (cmp *Comparator) RefOfDropDatabase(a, b *DropDatabase) bool {
if a == b {
return true
}
@@ -2226,12 +2223,12 @@ func EqualsRefOfDropDatabase(a, b *DropDatabase) bool {
return false
}
return a.IfExists == b.IfExists &&
- EqualsRefOfParsedComments(a.Comments, b.Comments) &&
- EqualsIdentifierCS(a.DBName, b.DBName)
+ cmp.RefOfParsedComments(a.Comments, b.Comments) &&
+ cmp.IdentifierCS(a.DBName, b.DBName)
}
-// EqualsRefOfDropKey does deep equals between the two objects.
-func EqualsRefOfDropKey(a, b *DropKey) bool {
+// RefOfDropKey does deep equals between the two objects.
+func (cmp *Comparator) RefOfDropKey(a, b *DropKey) bool {
if a == b {
return true
}
@@ -2239,11 +2236,11 @@ func EqualsRefOfDropKey(a, b *DropKey) bool {
return false
}
return a.Type == b.Type &&
- EqualsIdentifierCI(a.Name, b.Name)
+ cmp.IdentifierCI(a.Name, b.Name)
}
-// EqualsRefOfDropTable does deep equals between the two objects.
-func EqualsRefOfDropTable(a, b *DropTable) bool {
+// RefOfDropTable does deep equals between the two objects.
+func (cmp *Comparator) RefOfDropTable(a, b *DropTable) bool {
if a == b {
return true
}
@@ -2252,12 +2249,12 @@ func EqualsRefOfDropTable(a, b *DropTable) bool {
}
return a.Temp == b.Temp &&
a.IfExists == b.IfExists &&
- EqualsTableNames(a.FromTables, b.FromTables) &&
- EqualsRefOfParsedComments(a.Comments, b.Comments)
+ cmp.TableNames(a.FromTables, b.FromTables) &&
+ cmp.RefOfParsedComments(a.Comments, b.Comments)
}
-// EqualsRefOfDropView does deep equals between the two objects.
-func EqualsRefOfDropView(a, b *DropView) bool {
+// RefOfDropView does deep equals between the two objects.
+func (cmp *Comparator) RefOfDropView(a, b *DropView) bool {
if a == b {
return true
}
@@ -2265,36 +2262,36 @@ func EqualsRefOfDropView(a, b *DropView) bool {
return false
}
return a.IfExists == b.IfExists &&
- EqualsTableNames(a.FromTables, b.FromTables) &&
- EqualsRefOfParsedComments(a.Comments, b.Comments)
+ cmp.TableNames(a.FromTables, b.FromTables) &&
+ cmp.RefOfParsedComments(a.Comments, b.Comments)
}
-// EqualsRefOfExecuteStmt does deep equals between the two objects.
-func EqualsRefOfExecuteStmt(a, b *ExecuteStmt) bool {
+// RefOfExecuteStmt does deep equals between the two objects.
+func (cmp *Comparator) RefOfExecuteStmt(a, b *ExecuteStmt) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsIdentifierCI(a.Name, b.Name) &&
- EqualsRefOfParsedComments(a.Comments, b.Comments) &&
- EqualsSliceOfRefOfVariable(a.Arguments, b.Arguments)
+ return cmp.IdentifierCI(a.Name, b.Name) &&
+ cmp.RefOfParsedComments(a.Comments, b.Comments) &&
+ cmp.SliceOfRefOfVariable(a.Arguments, b.Arguments)
}
-// EqualsRefOfExistsExpr does deep equals between the two objects.
-func EqualsRefOfExistsExpr(a, b *ExistsExpr) bool {
+// RefOfExistsExpr does deep equals between the two objects.
+func (cmp *Comparator) RefOfExistsExpr(a, b *ExistsExpr) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsRefOfSubquery(a.Subquery, b.Subquery)
+ return cmp.RefOfSubquery(a.Subquery, b.Subquery)
}
-// EqualsRefOfExplainStmt does deep equals between the two objects.
-func EqualsRefOfExplainStmt(a, b *ExplainStmt) bool {
+// RefOfExplainStmt does deep equals between the two objects.
+func (cmp *Comparator) RefOfExplainStmt(a, b *ExplainStmt) bool {
if a == b {
return true
}
@@ -2302,12 +2299,12 @@ func EqualsRefOfExplainStmt(a, b *ExplainStmt) bool {
return false
}
return a.Type == b.Type &&
- EqualsStatement(a.Statement, b.Statement) &&
- EqualsRefOfParsedComments(a.Comments, b.Comments)
+ cmp.Statement(a.Statement, b.Statement) &&
+ cmp.RefOfParsedComments(a.Comments, b.Comments)
}
-// EqualsRefOfExplainTab does deep equals between the two objects.
-func EqualsRefOfExplainTab(a, b *ExplainTab) bool {
+// RefOfExplainTab does deep equals between the two objects.
+func (cmp *Comparator) RefOfExplainTab(a, b *ExplainTab) bool {
if a == b {
return true
}
@@ -2315,24 +2312,24 @@ func EqualsRefOfExplainTab(a, b *ExplainTab) bool {
return false
}
return a.Wild == b.Wild &&
- EqualsTableName(a.Table, b.Table)
+ cmp.TableName(a.Table, b.Table)
}
-// EqualsExprs does deep equals between the two objects.
-func EqualsExprs(a, b Exprs) bool {
+// Exprs does deep equals between the two objects.
+func (cmp *Comparator) Exprs(a, b Exprs) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
- if !EqualsExpr(a[i], b[i]) {
+ if !cmp.Expr(a[i], b[i]) {
return false
}
}
return true
}
-// EqualsRefOfExtractFuncExpr does deep equals between the two objects.
-func EqualsRefOfExtractFuncExpr(a, b *ExtractFuncExpr) bool {
+// RefOfExtractFuncExpr does deep equals between the two objects.
+func (cmp *Comparator) RefOfExtractFuncExpr(a, b *ExtractFuncExpr) bool {
if a == b {
return true
}
@@ -2340,23 +2337,23 @@ func EqualsRefOfExtractFuncExpr(a, b *ExtractFuncExpr) bool {
return false
}
return a.IntervalTypes == b.IntervalTypes &&
- EqualsExpr(a.Expr, b.Expr)
+ cmp.Expr(a.Expr, b.Expr)
}
-// EqualsRefOfExtractValueExpr does deep equals between the two objects.
-func EqualsRefOfExtractValueExpr(a, b *ExtractValueExpr) bool {
+// RefOfExtractValueExpr does deep equals between the two objects.
+func (cmp *Comparator) RefOfExtractValueExpr(a, b *ExtractValueExpr) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsExpr(a.Fragment, b.Fragment) &&
- EqualsExpr(a.XPathExpr, b.XPathExpr)
+ return cmp.Expr(a.Fragment, b.Fragment) &&
+ cmp.Expr(a.XPathExpr, b.XPathExpr)
}
-// EqualsRefOfExtractedSubquery does deep equals between the two objects.
-func EqualsRefOfExtractedSubquery(a, b *ExtractedSubquery) bool {
+// RefOfExtractedSubquery does deep equals between the two objects.
+func (cmp *Comparator) RefOfExtractedSubquery(a, b *ExtractedSubquery) bool {
if a == b {
return true
}
@@ -2367,14 +2364,14 @@ func EqualsRefOfExtractedSubquery(a, b *ExtractedSubquery) bool {
a.NeedsRewrite == b.NeedsRewrite &&
a.hasValuesArg == b.hasValuesArg &&
a.argName == b.argName &&
- EqualsExpr(a.Original, b.Original) &&
- EqualsRefOfSubquery(a.Subquery, b.Subquery) &&
- EqualsExpr(a.OtherSide, b.OtherSide) &&
- EqualsExpr(a.alternative, b.alternative)
+ cmp.Expr(a.Original, b.Original) &&
+ cmp.RefOfSubquery(a.Subquery, b.Subquery) &&
+ cmp.Expr(a.OtherSide, b.OtherSide) &&
+ cmp.Expr(a.alternative, b.alternative)
}
-// EqualsRefOfFirstOrLastValueExpr does deep equals between the two objects.
-func EqualsRefOfFirstOrLastValueExpr(a, b *FirstOrLastValueExpr) bool {
+// RefOfFirstOrLastValueExpr does deep equals between the two objects.
+func (cmp *Comparator) RefOfFirstOrLastValueExpr(a, b *FirstOrLastValueExpr) bool {
if a == b {
return true
}
@@ -2382,13 +2379,13 @@ func EqualsRefOfFirstOrLastValueExpr(a, b *FirstOrLastValueExpr) bool {
return false
}
return a.Type == b.Type &&
- EqualsExpr(a.Expr, b.Expr) &&
- EqualsRefOfNullTreatmentClause(a.NullTreatmentClause, b.NullTreatmentClause) &&
- EqualsRefOfOverClause(a.OverClause, b.OverClause)
+ cmp.Expr(a.Expr, b.Expr) &&
+ cmp.RefOfNullTreatmentClause(a.NullTreatmentClause, b.NullTreatmentClause) &&
+ cmp.RefOfOverClause(a.OverClause, b.OverClause)
}
-// EqualsRefOfFlush does deep equals between the two objects.
-func EqualsRefOfFlush(a, b *Flush) bool {
+// RefOfFlush does deep equals between the two objects.
+func (cmp *Comparator) RefOfFlush(a, b *Flush) bool {
if a == b {
return true
}
@@ -2398,12 +2395,12 @@ func EqualsRefOfFlush(a, b *Flush) bool {
return a.IsLocal == b.IsLocal &&
a.WithLock == b.WithLock &&
a.ForExport == b.ForExport &&
- EqualsSliceOfString(a.FlushOptions, b.FlushOptions) &&
- EqualsTableNames(a.TableNames, b.TableNames)
+ cmp.SliceOfString(a.FlushOptions, b.FlushOptions) &&
+ cmp.TableNames(a.TableNames, b.TableNames)
}
-// EqualsRefOfForce does deep equals between the two objects.
-func EqualsRefOfForce(a, b *Force) bool {
+// RefOfForce does deep equals between the two objects.
+func (cmp *Comparator) RefOfForce(a, b *Force) bool {
if a == b {
return true
}
@@ -2413,21 +2410,21 @@ func EqualsRefOfForce(a, b *Force) bool {
return true
}
-// EqualsRefOfForeignKeyDefinition does deep equals between the two objects.
-func EqualsRefOfForeignKeyDefinition(a, b *ForeignKeyDefinition) bool {
+// RefOfForeignKeyDefinition does deep equals between the two objects.
+func (cmp *Comparator) RefOfForeignKeyDefinition(a, b *ForeignKeyDefinition) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsColumns(a.Source, b.Source) &&
- EqualsIdentifierCI(a.IndexName, b.IndexName) &&
- EqualsRefOfReferenceDefinition(a.ReferenceDefinition, b.ReferenceDefinition)
+ return cmp.Columns(a.Source, b.Source) &&
+ cmp.IdentifierCI(a.IndexName, b.IndexName) &&
+ cmp.RefOfReferenceDefinition(a.ReferenceDefinition, b.ReferenceDefinition)
}
-// EqualsRefOfFrameClause does deep equals between the two objects.
-func EqualsRefOfFrameClause(a, b *FrameClause) bool {
+// RefOfFrameClause does deep equals between the two objects.
+func (cmp *Comparator) RefOfFrameClause(a, b *FrameClause) bool {
if a == b {
return true
}
@@ -2435,12 +2432,12 @@ func EqualsRefOfFrameClause(a, b *FrameClause) bool {
return false
}
return a.Unit == b.Unit &&
- EqualsRefOfFramePoint(a.Start, b.Start) &&
- EqualsRefOfFramePoint(a.End, b.End)
+ cmp.RefOfFramePoint(a.Start, b.Start) &&
+ cmp.RefOfFramePoint(a.End, b.End)
}
-// EqualsRefOfFramePoint does deep equals between the two objects.
-func EqualsRefOfFramePoint(a, b *FramePoint) bool {
+// RefOfFramePoint does deep equals between the two objects.
+func (cmp *Comparator) RefOfFramePoint(a, b *FramePoint) bool {
if a == b {
return true
}
@@ -2448,11 +2445,11 @@ func EqualsRefOfFramePoint(a, b *FramePoint) bool {
return false
}
return a.Type == b.Type &&
- EqualsExpr(a.Expr, b.Expr)
+ cmp.Expr(a.Expr, b.Expr)
}
-// EqualsRefOfFromFirstLastClause does deep equals between the two objects.
-func EqualsRefOfFromFirstLastClause(a, b *FromFirstLastClause) bool {
+// RefOfFromFirstLastClause does deep equals between the two objects.
+func (cmp *Comparator) RefOfFromFirstLastClause(a, b *FromFirstLastClause) bool {
if a == b {
return true
}
@@ -2462,21 +2459,21 @@ func EqualsRefOfFromFirstLastClause(a, b *FromFirstLastClause) bool {
return a.Type == b.Type
}
-// EqualsRefOfFuncExpr does deep equals between the two objects.
-func EqualsRefOfFuncExpr(a, b *FuncExpr) bool {
+// RefOfFuncExpr does deep equals between the two objects.
+func (cmp *Comparator) RefOfFuncExpr(a, b *FuncExpr) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsIdentifierCS(a.Qualifier, b.Qualifier) &&
- EqualsIdentifierCI(a.Name, b.Name) &&
- EqualsSelectExprs(a.Exprs, b.Exprs)
+ return cmp.IdentifierCS(a.Qualifier, b.Qualifier) &&
+ cmp.IdentifierCI(a.Name, b.Name) &&
+ cmp.SelectExprs(a.Exprs, b.Exprs)
}
-// EqualsRefOfGTIDFuncExpr does deep equals between the two objects.
-func EqualsRefOfGTIDFuncExpr(a, b *GTIDFuncExpr) bool {
+// RefOfGTIDFuncExpr does deep equals between the two objects.
+func (cmp *Comparator) RefOfGTIDFuncExpr(a, b *GTIDFuncExpr) bool {
if a == b {
return true
}
@@ -2484,27 +2481,27 @@ func EqualsRefOfGTIDFuncExpr(a, b *GTIDFuncExpr) bool {
return false
}
return a.Type == b.Type &&
- EqualsExpr(a.Set1, b.Set1) &&
- EqualsExpr(a.Set2, b.Set2) &&
- EqualsExpr(a.Timeout, b.Timeout) &&
- EqualsExpr(a.Channel, b.Channel)
+ cmp.Expr(a.Set1, b.Set1) &&
+ cmp.Expr(a.Set2, b.Set2) &&
+ cmp.Expr(a.Timeout, b.Timeout) &&
+ cmp.Expr(a.Channel, b.Channel)
}
-// EqualsGroupBy does deep equals between the two objects.
-func EqualsGroupBy(a, b GroupBy) bool {
+// GroupBy does deep equals between the two objects.
+func (cmp *Comparator) GroupBy(a, b GroupBy) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
- if !EqualsExpr(a[i], b[i]) {
+ if !cmp.Expr(a[i], b[i]) {
return false
}
}
return true
}
-// EqualsRefOfGroupConcatExpr does deep equals between the two objects.
-func EqualsRefOfGroupConcatExpr(a, b *GroupConcatExpr) bool {
+// RefOfGroupConcatExpr does deep equals between the two objects.
+func (cmp *Comparator) RefOfGroupConcatExpr(a, b *GroupConcatExpr) bool {
if a == b {
return true
}
@@ -2513,37 +2510,37 @@ func EqualsRefOfGroupConcatExpr(a, b *GroupConcatExpr) bool {
}
return a.Distinct == b.Distinct &&
a.Separator == b.Separator &&
- EqualsExprs(a.Exprs, b.Exprs) &&
- EqualsOrderBy(a.OrderBy, b.OrderBy) &&
- EqualsRefOfLimit(a.Limit, b.Limit)
+ cmp.Exprs(a.Exprs, b.Exprs) &&
+ cmp.OrderBy(a.OrderBy, b.OrderBy) &&
+ cmp.RefOfLimit(a.Limit, b.Limit)
}
-// EqualsIdentifierCI does deep equals between the two objects.
-func EqualsIdentifierCI(a, b IdentifierCI) bool {
+// IdentifierCI does deep equals between the two objects.
+func (cmp *Comparator) IdentifierCI(a, b IdentifierCI) bool {
return a.val == b.val &&
a.lowered == b.lowered
}
-// EqualsIdentifierCS does deep equals between the two objects.
-func EqualsIdentifierCS(a, b IdentifierCS) bool {
+// IdentifierCS does deep equals between the two objects.
+func (cmp *Comparator) IdentifierCS(a, b IdentifierCS) bool {
return a.v == b.v
}
-// EqualsRefOfIndexDefinition does deep equals between the two objects.
-func EqualsRefOfIndexDefinition(a, b *IndexDefinition) bool {
+// RefOfIndexDefinition does deep equals between the two objects.
+func (cmp *Comparator) RefOfIndexDefinition(a, b *IndexDefinition) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsRefOfIndexInfo(a.Info, b.Info) &&
- EqualsSliceOfRefOfIndexColumn(a.Columns, b.Columns) &&
- EqualsSliceOfRefOfIndexOption(a.Options, b.Options)
+ return cmp.RefOfIndexInfo(a.Info, b.Info) &&
+ cmp.SliceOfRefOfIndexColumn(a.Columns, b.Columns) &&
+ cmp.SliceOfRefOfIndexOption(a.Options, b.Options)
}
-// EqualsRefOfIndexHint does deep equals between the two objects.
-func EqualsRefOfIndexHint(a, b *IndexHint) bool {
+// RefOfIndexHint does deep equals between the two objects.
+func (cmp *Comparator) RefOfIndexHint(a, b *IndexHint) bool {
if a == b {
return true
}
@@ -2552,24 +2549,24 @@ func EqualsRefOfIndexHint(a, b *IndexHint) bool {
}
return a.Type == b.Type &&
a.ForType == b.ForType &&
- EqualsSliceOfIdentifierCI(a.Indexes, b.Indexes)
+ cmp.SliceOfIdentifierCI(a.Indexes, b.Indexes)
}
-// EqualsIndexHints does deep equals between the two objects.
-func EqualsIndexHints(a, b IndexHints) bool {
+// IndexHints does deep equals between the two objects.
+func (cmp *Comparator) IndexHints(a, b IndexHints) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
- if !EqualsRefOfIndexHint(a[i], b[i]) {
+ if !cmp.RefOfIndexHint(a[i], b[i]) {
return false
}
}
return true
}
-// EqualsRefOfIndexInfo does deep equals between the two objects.
-func EqualsRefOfIndexInfo(a, b *IndexInfo) bool {
+// RefOfIndexInfo does deep equals between the two objects.
+func (cmp *Comparator) RefOfIndexInfo(a, b *IndexInfo) bool {
if a == b {
return true
}
@@ -2581,12 +2578,12 @@ func EqualsRefOfIndexInfo(a, b *IndexInfo) bool {
a.Spatial == b.Spatial &&
a.Fulltext == b.Fulltext &&
a.Unique == b.Unique &&
- EqualsIdentifierCI(a.Name, b.Name) &&
- EqualsIdentifierCI(a.ConstraintName, b.ConstraintName)
+ cmp.IdentifierCI(a.Name, b.Name) &&
+ cmp.IdentifierCI(a.ConstraintName, b.ConstraintName)
}
-// EqualsRefOfInsert does deep equals between the two objects.
-func EqualsRefOfInsert(a, b *Insert) bool {
+// RefOfInsert does deep equals between the two objects.
+func (cmp *Comparator) RefOfInsert(a, b *Insert) bool {
if a == b {
return true
}
@@ -2594,31 +2591,31 @@ func EqualsRefOfInsert(a, b *Insert) bool {
return false
}
return a.Action == b.Action &&
- EqualsRefOfParsedComments(a.Comments, b.Comments) &&
+ cmp.RefOfParsedComments(a.Comments, b.Comments) &&
a.Ignore == b.Ignore &&
- EqualsTableName(a.Table, b.Table) &&
- EqualsPartitions(a.Partitions, b.Partitions) &&
- EqualsColumns(a.Columns, b.Columns) &&
- EqualsInsertRows(a.Rows, b.Rows) &&
- EqualsOnDup(a.OnDup, b.OnDup)
+ cmp.TableName(a.Table, b.Table) &&
+ cmp.Partitions(a.Partitions, b.Partitions) &&
+ cmp.Columns(a.Columns, b.Columns) &&
+ cmp.InsertRows(a.Rows, b.Rows) &&
+ cmp.OnDup(a.OnDup, b.OnDup)
}
-// EqualsRefOfInsertExpr does deep equals between the two objects.
-func EqualsRefOfInsertExpr(a, b *InsertExpr) bool {
+// RefOfInsertExpr does deep equals between the two objects.
+func (cmp *Comparator) RefOfInsertExpr(a, b *InsertExpr) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsExpr(a.Str, b.Str) &&
- EqualsExpr(a.Pos, b.Pos) &&
- EqualsExpr(a.Len, b.Len) &&
- EqualsExpr(a.NewStr, b.NewStr)
+ return cmp.Expr(a.Str, b.Str) &&
+ cmp.Expr(a.Pos, b.Pos) &&
+ cmp.Expr(a.Len, b.Len) &&
+ cmp.Expr(a.NewStr, b.NewStr)
}
-// EqualsRefOfIntervalExpr does deep equals between the two objects.
-func EqualsRefOfIntervalExpr(a, b *IntervalExpr) bool {
+// RefOfIntervalExpr does deep equals between the two objects.
+func (cmp *Comparator) RefOfIntervalExpr(a, b *IntervalExpr) bool {
if a == b {
return true
}
@@ -2626,23 +2623,23 @@ func EqualsRefOfIntervalExpr(a, b *IntervalExpr) bool {
return false
}
return a.Unit == b.Unit &&
- EqualsExpr(a.Expr, b.Expr)
+ cmp.Expr(a.Expr, b.Expr)
}
-// EqualsRefOfIntervalFuncExpr does deep equals between the two objects.
-func EqualsRefOfIntervalFuncExpr(a, b *IntervalFuncExpr) bool {
+// RefOfIntervalFuncExpr does deep equals between the two objects.
+func (cmp *Comparator) RefOfIntervalFuncExpr(a, b *IntervalFuncExpr) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsExpr(a.Expr, b.Expr) &&
- EqualsExprs(a.Exprs, b.Exprs)
+ return cmp.Expr(a.Expr, b.Expr) &&
+ cmp.Exprs(a.Exprs, b.Exprs)
}
-// EqualsRefOfIntroducerExpr does deep equals between the two objects.
-func EqualsRefOfIntroducerExpr(a, b *IntroducerExpr) bool {
+// RefOfIntroducerExpr does deep equals between the two objects.
+func (cmp *Comparator) RefOfIntroducerExpr(a, b *IntroducerExpr) bool {
if a == b {
return true
}
@@ -2650,34 +2647,34 @@ func EqualsRefOfIntroducerExpr(a, b *IntroducerExpr) bool {
return false
}
return a.CharacterSet == b.CharacterSet &&
- EqualsExpr(a.Expr, b.Expr)
+ cmp.Expr(a.Expr, b.Expr)
}
-// EqualsRefOfIsExpr does deep equals between the two objects.
-func EqualsRefOfIsExpr(a, b *IsExpr) bool {
+// RefOfIsExpr does deep equals between the two objects.
+func (cmp *Comparator) RefOfIsExpr(a, b *IsExpr) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsExpr(a.Left, b.Left) &&
+ return cmp.Expr(a.Left, b.Left) &&
a.Right == b.Right
}
-// EqualsRefOfJSONArrayExpr does deep equals between the two objects.
-func EqualsRefOfJSONArrayExpr(a, b *JSONArrayExpr) bool {
+// RefOfJSONArrayExpr does deep equals between the two objects.
+func (cmp *Comparator) RefOfJSONArrayExpr(a, b *JSONArrayExpr) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsExprs(a.Params, b.Params)
+ return cmp.Exprs(a.Params, b.Params)
}
-// EqualsRefOfJSONAttributesExpr does deep equals between the two objects.
-func EqualsRefOfJSONAttributesExpr(a, b *JSONAttributesExpr) bool {
+// RefOfJSONAttributesExpr does deep equals between the two objects.
+func (cmp *Comparator) RefOfJSONAttributesExpr(a, b *JSONAttributesExpr) bool {
if a == b {
return true
}
@@ -2685,226 +2682,232 @@ func EqualsRefOfJSONAttributesExpr(a, b *JSONAttributesExpr) bool {
return false
}
return a.Type == b.Type &&
- EqualsExpr(a.JSONDoc, b.JSONDoc) &&
- EqualsExpr(a.Path, b.Path)
+ cmp.Expr(a.JSONDoc, b.JSONDoc) &&
+ cmp.Expr(a.Path, b.Path)
}
-// EqualsRefOfJSONContainsExpr does deep equals between the two objects.
-func EqualsRefOfJSONContainsExpr(a, b *JSONContainsExpr) bool {
+// RefOfJSONContainsExpr does deep equals between the two objects.
+func (cmp *Comparator) RefOfJSONContainsExpr(a, b *JSONContainsExpr) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsExpr(a.Target, b.Target) &&
- EqualsExpr(a.Candidate, b.Candidate) &&
- EqualsSliceOfExpr(a.PathList, b.PathList)
+ return cmp.Expr(a.Target, b.Target) &&
+ cmp.Expr(a.Candidate, b.Candidate) &&
+ cmp.SliceOfExpr(a.PathList, b.PathList)
}
-// EqualsRefOfJSONContainsPathExpr does deep equals between the two objects.
-func EqualsRefOfJSONContainsPathExpr(a, b *JSONContainsPathExpr) bool {
+// RefOfJSONContainsPathExpr does deep equals between the two objects.
+func (cmp *Comparator) RefOfJSONContainsPathExpr(a, b *JSONContainsPathExpr) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsExpr(a.JSONDoc, b.JSONDoc) &&
- EqualsExpr(a.OneOrAll, b.OneOrAll) &&
- EqualsSliceOfExpr(a.PathList, b.PathList)
+ return cmp.Expr(a.JSONDoc, b.JSONDoc) &&
+ cmp.Expr(a.OneOrAll, b.OneOrAll) &&
+ cmp.SliceOfExpr(a.PathList, b.PathList)
}
-// EqualsRefOfJSONExtractExpr does deep equals between the two objects.
-func EqualsRefOfJSONExtractExpr(a, b *JSONExtractExpr) bool {
+// RefOfJSONExtractExpr does deep equals between the two objects.
+func (cmp *Comparator) RefOfJSONExtractExpr(a, b *JSONExtractExpr) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsExpr(a.JSONDoc, b.JSONDoc) &&
- EqualsSliceOfExpr(a.PathList, b.PathList)
+ return cmp.Expr(a.JSONDoc, b.JSONDoc) &&
+ cmp.SliceOfExpr(a.PathList, b.PathList)
}
-// EqualsRefOfJSONKeysExpr does deep equals between the two objects.
-func EqualsRefOfJSONKeysExpr(a, b *JSONKeysExpr) bool {
+// RefOfJSONKeysExpr does deep equals between the two objects.
+func (cmp *Comparator) RefOfJSONKeysExpr(a, b *JSONKeysExpr) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsExpr(a.JSONDoc, b.JSONDoc) &&
- EqualsExpr(a.Path, b.Path)
+ return cmp.Expr(a.JSONDoc, b.JSONDoc) &&
+ cmp.Expr(a.Path, b.Path)
}
-// EqualsRefOfJSONObjectExpr does deep equals between the two objects.
-func EqualsRefOfJSONObjectExpr(a, b *JSONObjectExpr) bool {
+// RefOfJSONObjectExpr does deep equals between the two objects.
+func (cmp *Comparator) RefOfJSONObjectExpr(a, b *JSONObjectExpr) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsSliceOfRefOfJSONObjectParam(a.Params, b.Params)
+ return cmp.SliceOfRefOfJSONObjectParam(a.Params, b.Params)
}
-// EqualsJSONObjectParam does deep equals between the two objects.
-func EqualsJSONObjectParam(a, b JSONObjectParam) bool {
- return EqualsExpr(a.Key, b.Key) &&
- EqualsExpr(a.Value, b.Value)
+// RefOfJSONObjectParam does deep equals between the two objects.
+func (cmp *Comparator) RefOfJSONObjectParam(a, b *JSONObjectParam) bool {
+ if a == b {
+ return true
+ }
+ if a == nil || b == nil {
+ return false
+ }
+ return cmp.Expr(a.Key, b.Key) &&
+ cmp.Expr(a.Value, b.Value)
}
-// EqualsRefOfJSONOverlapsExpr does deep equals between the two objects.
-func EqualsRefOfJSONOverlapsExpr(a, b *JSONOverlapsExpr) bool {
+// RefOfJSONOverlapsExpr does deep equals between the two objects.
+func (cmp *Comparator) RefOfJSONOverlapsExpr(a, b *JSONOverlapsExpr) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsExpr(a.JSONDoc1, b.JSONDoc1) &&
- EqualsExpr(a.JSONDoc2, b.JSONDoc2)
+ return cmp.Expr(a.JSONDoc1, b.JSONDoc1) &&
+ cmp.Expr(a.JSONDoc2, b.JSONDoc2)
}
-// EqualsRefOfJSONPrettyExpr does deep equals between the two objects.
-func EqualsRefOfJSONPrettyExpr(a, b *JSONPrettyExpr) bool {
+// RefOfJSONPrettyExpr does deep equals between the two objects.
+func (cmp *Comparator) RefOfJSONPrettyExpr(a, b *JSONPrettyExpr) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsExpr(a.JSONVal, b.JSONVal)
+ return cmp.Expr(a.JSONVal, b.JSONVal)
}
-// EqualsRefOfJSONQuoteExpr does deep equals between the two objects.
-func EqualsRefOfJSONQuoteExpr(a, b *JSONQuoteExpr) bool {
+// RefOfJSONQuoteExpr does deep equals between the two objects.
+func (cmp *Comparator) RefOfJSONQuoteExpr(a, b *JSONQuoteExpr) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsExpr(a.StringArg, b.StringArg)
+ return cmp.Expr(a.StringArg, b.StringArg)
}
-// EqualsRefOfJSONRemoveExpr does deep equals between the two objects.
-func EqualsRefOfJSONRemoveExpr(a, b *JSONRemoveExpr) bool {
+// RefOfJSONRemoveExpr does deep equals between the two objects.
+func (cmp *Comparator) RefOfJSONRemoveExpr(a, b *JSONRemoveExpr) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsExpr(a.JSONDoc, b.JSONDoc) &&
- EqualsExprs(a.PathList, b.PathList)
+ return cmp.Expr(a.JSONDoc, b.JSONDoc) &&
+ cmp.Exprs(a.PathList, b.PathList)
}
-// EqualsRefOfJSONSchemaValidFuncExpr does deep equals between the two objects.
-func EqualsRefOfJSONSchemaValidFuncExpr(a, b *JSONSchemaValidFuncExpr) bool {
+// RefOfJSONSchemaValidFuncExpr does deep equals between the two objects.
+func (cmp *Comparator) RefOfJSONSchemaValidFuncExpr(a, b *JSONSchemaValidFuncExpr) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsExpr(a.Schema, b.Schema) &&
- EqualsExpr(a.Document, b.Document)
+ return cmp.Expr(a.Schema, b.Schema) &&
+ cmp.Expr(a.Document, b.Document)
}
-// EqualsRefOfJSONSchemaValidationReportFuncExpr does deep equals between the two objects.
-func EqualsRefOfJSONSchemaValidationReportFuncExpr(a, b *JSONSchemaValidationReportFuncExpr) bool {
+// RefOfJSONSchemaValidationReportFuncExpr does deep equals between the two objects.
+func (cmp *Comparator) RefOfJSONSchemaValidationReportFuncExpr(a, b *JSONSchemaValidationReportFuncExpr) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsExpr(a.Schema, b.Schema) &&
- EqualsExpr(a.Document, b.Document)
+ return cmp.Expr(a.Schema, b.Schema) &&
+ cmp.Expr(a.Document, b.Document)
}
-// EqualsRefOfJSONSearchExpr does deep equals between the two objects.
-func EqualsRefOfJSONSearchExpr(a, b *JSONSearchExpr) bool {
+// RefOfJSONSearchExpr does deep equals between the two objects.
+func (cmp *Comparator) RefOfJSONSearchExpr(a, b *JSONSearchExpr) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsExpr(a.JSONDoc, b.JSONDoc) &&
- EqualsExpr(a.OneOrAll, b.OneOrAll) &&
- EqualsExpr(a.SearchStr, b.SearchStr) &&
- EqualsExpr(a.EscapeChar, b.EscapeChar) &&
- EqualsSliceOfExpr(a.PathList, b.PathList)
+ return cmp.Expr(a.JSONDoc, b.JSONDoc) &&
+ cmp.Expr(a.OneOrAll, b.OneOrAll) &&
+ cmp.Expr(a.SearchStr, b.SearchStr) &&
+ cmp.Expr(a.EscapeChar, b.EscapeChar) &&
+ cmp.SliceOfExpr(a.PathList, b.PathList)
}
-// EqualsRefOfJSONStorageFreeExpr does deep equals between the two objects.
-func EqualsRefOfJSONStorageFreeExpr(a, b *JSONStorageFreeExpr) bool {
+// RefOfJSONStorageFreeExpr does deep equals between the two objects.
+func (cmp *Comparator) RefOfJSONStorageFreeExpr(a, b *JSONStorageFreeExpr) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsExpr(a.JSONVal, b.JSONVal)
+ return cmp.Expr(a.JSONVal, b.JSONVal)
}
-// EqualsRefOfJSONStorageSizeExpr does deep equals between the two objects.
-func EqualsRefOfJSONStorageSizeExpr(a, b *JSONStorageSizeExpr) bool {
+// RefOfJSONStorageSizeExpr does deep equals between the two objects.
+func (cmp *Comparator) RefOfJSONStorageSizeExpr(a, b *JSONStorageSizeExpr) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsExpr(a.JSONVal, b.JSONVal)
+ return cmp.Expr(a.JSONVal, b.JSONVal)
}
-// EqualsRefOfJSONTableExpr does deep equals between the two objects.
-func EqualsRefOfJSONTableExpr(a, b *JSONTableExpr) bool {
+// RefOfJSONTableExpr does deep equals between the two objects.
+func (cmp *Comparator) RefOfJSONTableExpr(a, b *JSONTableExpr) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsExpr(a.Expr, b.Expr) &&
- EqualsIdentifierCS(a.Alias, b.Alias) &&
- EqualsExpr(a.Filter, b.Filter) &&
- EqualsSliceOfRefOfJtColumnDefinition(a.Columns, b.Columns)
+ return cmp.Expr(a.Expr, b.Expr) &&
+ cmp.IdentifierCS(a.Alias, b.Alias) &&
+ cmp.Expr(a.Filter, b.Filter) &&
+ cmp.SliceOfRefOfJtColumnDefinition(a.Columns, b.Columns)
}
-// EqualsRefOfJSONUnquoteExpr does deep equals between the two objects.
-func EqualsRefOfJSONUnquoteExpr(a, b *JSONUnquoteExpr) bool {
+// RefOfJSONUnquoteExpr does deep equals between the two objects.
+func (cmp *Comparator) RefOfJSONUnquoteExpr(a, b *JSONUnquoteExpr) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsExpr(a.JSONValue, b.JSONValue)
+ return cmp.Expr(a.JSONValue, b.JSONValue)
}
-// EqualsRefOfJSONValueExpr does deep equals between the two objects.
-func EqualsRefOfJSONValueExpr(a, b *JSONValueExpr) bool {
+// RefOfJSONValueExpr does deep equals between the two objects.
+func (cmp *Comparator) RefOfJSONValueExpr(a, b *JSONValueExpr) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsExpr(a.JSONDoc, b.JSONDoc) &&
- EqualsExpr(a.Path, b.Path) &&
- EqualsRefOfConvertType(a.ReturningType, b.ReturningType) &&
- EqualsRefOfJtOnResponse(a.EmptyOnResponse, b.EmptyOnResponse) &&
- EqualsRefOfJtOnResponse(a.ErrorOnResponse, b.ErrorOnResponse)
+ return cmp.Expr(a.JSONDoc, b.JSONDoc) &&
+ cmp.Expr(a.Path, b.Path) &&
+ cmp.RefOfConvertType(a.ReturningType, b.ReturningType) &&
+ cmp.RefOfJtOnResponse(a.EmptyOnResponse, b.EmptyOnResponse) &&
+ cmp.RefOfJtOnResponse(a.ErrorOnResponse, b.ErrorOnResponse)
}
-// EqualsRefOfJSONValueMergeExpr does deep equals between the two objects.
-func EqualsRefOfJSONValueMergeExpr(a, b *JSONValueMergeExpr) bool {
+// RefOfJSONValueMergeExpr does deep equals between the two objects.
+func (cmp *Comparator) RefOfJSONValueMergeExpr(a, b *JSONValueMergeExpr) bool {
if a == b {
return true
}
@@ -2912,12 +2915,12 @@ func EqualsRefOfJSONValueMergeExpr(a, b *JSONValueMergeExpr) bool {
return false
}
return a.Type == b.Type &&
- EqualsExpr(a.JSONDoc, b.JSONDoc) &&
- EqualsExprs(a.JSONDocList, b.JSONDocList)
+ cmp.Expr(a.JSONDoc, b.JSONDoc) &&
+ cmp.Exprs(a.JSONDocList, b.JSONDocList)
}
-// EqualsRefOfJSONValueModifierExpr does deep equals between the two objects.
-func EqualsRefOfJSONValueModifierExpr(a, b *JSONValueModifierExpr) bool {
+// RefOfJSONValueModifierExpr does deep equals between the two objects.
+func (cmp *Comparator) RefOfJSONValueModifierExpr(a, b *JSONValueModifierExpr) bool {
if a == b {
return true
}
@@ -2925,51 +2928,51 @@ func EqualsRefOfJSONValueModifierExpr(a, b *JSONValueModifierExpr) bool {
return false
}
return a.Type == b.Type &&
- EqualsExpr(a.JSONDoc, b.JSONDoc) &&
- EqualsSliceOfRefOfJSONObjectParam(a.Params, b.Params)
+ cmp.Expr(a.JSONDoc, b.JSONDoc) &&
+ cmp.SliceOfRefOfJSONObjectParam(a.Params, b.Params)
}
-// EqualsRefOfJoinCondition does deep equals between the two objects.
-func EqualsRefOfJoinCondition(a, b *JoinCondition) bool {
+// RefOfJoinCondition does deep equals between the two objects.
+func (cmp *Comparator) RefOfJoinCondition(a, b *JoinCondition) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsExpr(a.On, b.On) &&
- EqualsColumns(a.Using, b.Using)
+ return cmp.Expr(a.On, b.On) &&
+ cmp.Columns(a.Using, b.Using)
}
-// EqualsRefOfJoinTableExpr does deep equals between the two objects.
-func EqualsRefOfJoinTableExpr(a, b *JoinTableExpr) bool {
+// RefOfJoinTableExpr does deep equals between the two objects.
+func (cmp *Comparator) RefOfJoinTableExpr(a, b *JoinTableExpr) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsTableExpr(a.LeftExpr, b.LeftExpr) &&
+ return cmp.TableExpr(a.LeftExpr, b.LeftExpr) &&
a.Join == b.Join &&
- EqualsTableExpr(a.RightExpr, b.RightExpr) &&
- EqualsRefOfJoinCondition(a.Condition, b.Condition)
+ cmp.TableExpr(a.RightExpr, b.RightExpr) &&
+ cmp.RefOfJoinCondition(a.Condition, b.Condition)
}
-// EqualsRefOfJtColumnDefinition does deep equals between the two objects.
-func EqualsRefOfJtColumnDefinition(a, b *JtColumnDefinition) bool {
+// RefOfJtColumnDefinition does deep equals between the two objects.
+func (cmp *Comparator) RefOfJtColumnDefinition(a, b *JtColumnDefinition) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsRefOfJtOrdinalColDef(a.JtOrdinal, b.JtOrdinal) &&
- EqualsRefOfJtPathColDef(a.JtPath, b.JtPath) &&
- EqualsRefOfJtNestedPathColDef(a.JtNestedPath, b.JtNestedPath)
+ return cmp.RefOfJtOrdinalColDef(a.JtOrdinal, b.JtOrdinal) &&
+ cmp.RefOfJtPathColDef(a.JtPath, b.JtPath) &&
+ cmp.RefOfJtNestedPathColDef(a.JtNestedPath, b.JtNestedPath)
}
-// EqualsRefOfJtOnResponse does deep equals between the two objects.
-func EqualsRefOfJtOnResponse(a, b *JtOnResponse) bool {
+// RefOfJtOnResponse does deep equals between the two objects.
+func (cmp *Comparator) RefOfJtOnResponse(a, b *JtOnResponse) bool {
if a == b {
return true
}
@@ -2977,11 +2980,11 @@ func EqualsRefOfJtOnResponse(a, b *JtOnResponse) bool {
return false
}
return a.ResponseType == b.ResponseType &&
- EqualsExpr(a.Expr, b.Expr)
+ cmp.Expr(a.Expr, b.Expr)
}
-// EqualsRefOfKeyState does deep equals between the two objects.
-func EqualsRefOfKeyState(a, b *KeyState) bool {
+// RefOfKeyState does deep equals between the two objects.
+func (cmp *Comparator) RefOfKeyState(a, b *KeyState) bool {
if a == b {
return true
}
@@ -2991,8 +2994,8 @@ func EqualsRefOfKeyState(a, b *KeyState) bool {
return a.Enable == b.Enable
}
-// EqualsRefOfLagLeadExpr does deep equals between the two objects.
-func EqualsRefOfLagLeadExpr(a, b *LagLeadExpr) bool {
+// RefOfLagLeadExpr does deep equals between the two objects.
+func (cmp *Comparator) RefOfLagLeadExpr(a, b *LagLeadExpr) bool {
if a == b {
return true
}
@@ -3000,27 +3003,27 @@ func EqualsRefOfLagLeadExpr(a, b *LagLeadExpr) bool {
return false
}
return a.Type == b.Type &&
- EqualsExpr(a.Expr, b.Expr) &&
- EqualsExpr(a.N, b.N) &&
- EqualsExpr(a.Default, b.Default) &&
- EqualsRefOfOverClause(a.OverClause, b.OverClause) &&
- EqualsRefOfNullTreatmentClause(a.NullTreatmentClause, b.NullTreatmentClause)
+ cmp.Expr(a.Expr, b.Expr) &&
+ cmp.Expr(a.N, b.N) &&
+ cmp.Expr(a.Default, b.Default) &&
+ cmp.RefOfOverClause(a.OverClause, b.OverClause) &&
+ cmp.RefOfNullTreatmentClause(a.NullTreatmentClause, b.NullTreatmentClause)
}
-// EqualsRefOfLimit does deep equals between the two objects.
-func EqualsRefOfLimit(a, b *Limit) bool {
+// RefOfLimit does deep equals between the two objects.
+func (cmp *Comparator) RefOfLimit(a, b *Limit) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsExpr(a.Offset, b.Offset) &&
- EqualsExpr(a.Rowcount, b.Rowcount)
+ return cmp.Expr(a.Offset, b.Offset) &&
+ cmp.Expr(a.Rowcount, b.Rowcount)
}
-// EqualsRefOfLiteral does deep equals between the two objects.
-func EqualsRefOfLiteral(a, b *Literal) bool {
+// RefOfLiteral does deep equals between the two objects.
+func (cmp *Comparator) RefOfLiteral(a, b *Literal) bool {
if a == b {
return true
}
@@ -3031,8 +3034,8 @@ func EqualsRefOfLiteral(a, b *Literal) bool {
a.Type == b.Type
}
-// EqualsRefOfLoad does deep equals between the two objects.
-func EqualsRefOfLoad(a, b *Load) bool {
+// RefOfLoad does deep equals between the two objects.
+func (cmp *Comparator) RefOfLoad(a, b *Load) bool {
if a == b {
return true
}
@@ -3042,21 +3045,21 @@ func EqualsRefOfLoad(a, b *Load) bool {
return true
}
-// EqualsRefOfLocateExpr does deep equals between the two objects.
-func EqualsRefOfLocateExpr(a, b *LocateExpr) bool {
+// RefOfLocateExpr does deep equals between the two objects.
+func (cmp *Comparator) RefOfLocateExpr(a, b *LocateExpr) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsExpr(a.SubStr, b.SubStr) &&
- EqualsExpr(a.Str, b.Str) &&
- EqualsExpr(a.Pos, b.Pos)
+ return cmp.Expr(a.SubStr, b.SubStr) &&
+ cmp.Expr(a.Str, b.Str) &&
+ cmp.Expr(a.Pos, b.Pos)
}
-// EqualsRefOfLockOption does deep equals between the two objects.
-func EqualsRefOfLockOption(a, b *LockOption) bool {
+// RefOfLockOption does deep equals between the two objects.
+func (cmp *Comparator) RefOfLockOption(a, b *LockOption) bool {
if a == b {
return true
}
@@ -3066,19 +3069,19 @@ func EqualsRefOfLockOption(a, b *LockOption) bool {
return a.Type == b.Type
}
-// EqualsRefOfLockTables does deep equals between the two objects.
-func EqualsRefOfLockTables(a, b *LockTables) bool {
+// RefOfLockTables does deep equals between the two objects.
+func (cmp *Comparator) RefOfLockTables(a, b *LockTables) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsTableAndLockTypes(a.Tables, b.Tables)
+ return cmp.TableAndLockTypes(a.Tables, b.Tables)
}
-// EqualsRefOfLockingFunc does deep equals between the two objects.
-func EqualsRefOfLockingFunc(a, b *LockingFunc) bool {
+// RefOfLockingFunc does deep equals between the two objects.
+func (cmp *Comparator) RefOfLockingFunc(a, b *LockingFunc) bool {
if a == b {
return true
}
@@ -3086,25 +3089,25 @@ func EqualsRefOfLockingFunc(a, b *LockingFunc) bool {
return false
}
return a.Type == b.Type &&
- EqualsExpr(a.Name, b.Name) &&
- EqualsExpr(a.Timeout, b.Timeout)
+ cmp.Expr(a.Name, b.Name) &&
+ cmp.Expr(a.Timeout, b.Timeout)
}
-// EqualsRefOfMatchExpr does deep equals between the two objects.
-func EqualsRefOfMatchExpr(a, b *MatchExpr) bool {
+// RefOfMatchExpr does deep equals between the two objects.
+func (cmp *Comparator) RefOfMatchExpr(a, b *MatchExpr) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsSliceOfRefOfColName(a.Columns, b.Columns) &&
- EqualsExpr(a.Expr, b.Expr) &&
+ return cmp.SliceOfRefOfColName(a.Columns, b.Columns) &&
+ cmp.Expr(a.Expr, b.Expr) &&
a.Option == b.Option
}
-// EqualsRefOfMax does deep equals between the two objects.
-func EqualsRefOfMax(a, b *Max) bool {
+// RefOfMax does deep equals between the two objects.
+func (cmp *Comparator) RefOfMax(a, b *Max) bool {
if a == b {
return true
}
@@ -3112,23 +3115,23 @@ func EqualsRefOfMax(a, b *Max) bool {
return false
}
return a.Distinct == b.Distinct &&
- EqualsExpr(a.Arg, b.Arg)
+ cmp.Expr(a.Arg, b.Arg)
}
-// EqualsRefOfMemberOfExpr does deep equals between the two objects.
-func EqualsRefOfMemberOfExpr(a, b *MemberOfExpr) bool {
+// RefOfMemberOfExpr does deep equals between the two objects.
+func (cmp *Comparator) RefOfMemberOfExpr(a, b *MemberOfExpr) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsExpr(a.Value, b.Value) &&
- EqualsExpr(a.JSONArr, b.JSONArr)
+ return cmp.Expr(a.Value, b.Value) &&
+ cmp.Expr(a.JSONArr, b.JSONArr)
}
-// EqualsRefOfMin does deep equals between the two objects.
-func EqualsRefOfMin(a, b *Min) bool {
+// RefOfMin does deep equals between the two objects.
+func (cmp *Comparator) RefOfMin(a, b *Min) bool {
if a == b {
return true
}
@@ -3136,11 +3139,11 @@ func EqualsRefOfMin(a, b *Min) bool {
return false
}
return a.Distinct == b.Distinct &&
- EqualsExpr(a.Arg, b.Arg)
+ cmp.Expr(a.Arg, b.Arg)
}
-// EqualsRefOfModifyColumn does deep equals between the two objects.
-func EqualsRefOfModifyColumn(a, b *ModifyColumn) bool {
+// RefOfModifyColumn does deep equals between the two objects.
+func (cmp *Comparator) RefOfModifyColumn(a, b *ModifyColumn) bool {
if a == b {
return true
}
@@ -3148,85 +3151,85 @@ func EqualsRefOfModifyColumn(a, b *ModifyColumn) bool {
return false
}
return a.First == b.First &&
- EqualsRefOfColumnDefinition(a.NewColDefinition, b.NewColDefinition) &&
- EqualsRefOfColName(a.After, b.After)
+ cmp.RefOfColumnDefinition(a.NewColDefinition, b.NewColDefinition) &&
+ cmp.RefOfColName(a.After, b.After)
}
-// EqualsRefOfNTHValueExpr does deep equals between the two objects.
-func EqualsRefOfNTHValueExpr(a, b *NTHValueExpr) bool {
+// RefOfNTHValueExpr does deep equals between the two objects.
+func (cmp *Comparator) RefOfNTHValueExpr(a, b *NTHValueExpr) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsExpr(a.Expr, b.Expr) &&
- EqualsExpr(a.N, b.N) &&
- EqualsRefOfOverClause(a.OverClause, b.OverClause) &&
- EqualsRefOfFromFirstLastClause(a.FromFirstLastClause, b.FromFirstLastClause) &&
- EqualsRefOfNullTreatmentClause(a.NullTreatmentClause, b.NullTreatmentClause)
+ return cmp.Expr(a.Expr, b.Expr) &&
+ cmp.Expr(a.N, b.N) &&
+ cmp.RefOfOverClause(a.OverClause, b.OverClause) &&
+ cmp.RefOfFromFirstLastClause(a.FromFirstLastClause, b.FromFirstLastClause) &&
+ cmp.RefOfNullTreatmentClause(a.NullTreatmentClause, b.NullTreatmentClause)
}
-// EqualsRefOfNamedWindow does deep equals between the two objects.
-func EqualsRefOfNamedWindow(a, b *NamedWindow) bool {
+// RefOfNamedWindow does deep equals between the two objects.
+func (cmp *Comparator) RefOfNamedWindow(a, b *NamedWindow) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsWindowDefinitions(a.Windows, b.Windows)
+ return cmp.WindowDefinitions(a.Windows, b.Windows)
}
-// EqualsNamedWindows does deep equals between the two objects.
-func EqualsNamedWindows(a, b NamedWindows) bool {
+// NamedWindows does deep equals between the two objects.
+func (cmp *Comparator) NamedWindows(a, b NamedWindows) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
- if !EqualsRefOfNamedWindow(a[i], b[i]) {
+ if !cmp.RefOfNamedWindow(a[i], b[i]) {
return false
}
}
return true
}
-// EqualsRefOfNextval does deep equals between the two objects.
-func EqualsRefOfNextval(a, b *Nextval) bool {
+// RefOfNextval does deep equals between the two objects.
+func (cmp *Comparator) RefOfNextval(a, b *Nextval) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsExpr(a.Expr, b.Expr)
+ return cmp.Expr(a.Expr, b.Expr)
}
-// EqualsRefOfNotExpr does deep equals between the two objects.
-func EqualsRefOfNotExpr(a, b *NotExpr) bool {
+// RefOfNotExpr does deep equals between the two objects.
+func (cmp *Comparator) RefOfNotExpr(a, b *NotExpr) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsExpr(a.Expr, b.Expr)
+ return cmp.Expr(a.Expr, b.Expr)
}
-// EqualsRefOfNtileExpr does deep equals between the two objects.
-func EqualsRefOfNtileExpr(a, b *NtileExpr) bool {
+// RefOfNtileExpr does deep equals between the two objects.
+func (cmp *Comparator) RefOfNtileExpr(a, b *NtileExpr) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsExpr(a.N, b.N) &&
- EqualsRefOfOverClause(a.OverClause, b.OverClause)
+ return cmp.Expr(a.N, b.N) &&
+ cmp.RefOfOverClause(a.OverClause, b.OverClause)
}
-// EqualsRefOfNullTreatmentClause does deep equals between the two objects.
-func EqualsRefOfNullTreatmentClause(a, b *NullTreatmentClause) bool {
+// RefOfNullTreatmentClause does deep equals between the two objects.
+func (cmp *Comparator) RefOfNullTreatmentClause(a, b *NullTreatmentClause) bool {
if a == b {
return true
}
@@ -3236,8 +3239,8 @@ func EqualsRefOfNullTreatmentClause(a, b *NullTreatmentClause) bool {
return a.Type == b.Type
}
-// EqualsRefOfNullVal does deep equals between the two objects.
-func EqualsRefOfNullVal(a, b *NullVal) bool {
+// RefOfNullVal does deep equals between the two objects.
+func (cmp *Comparator) RefOfNullVal(a, b *NullVal) bool {
if a == b {
return true
}
@@ -3247,8 +3250,8 @@ func EqualsRefOfNullVal(a, b *NullVal) bool {
return true
}
-// EqualsRefOfOffset does deep equals between the two objects.
-func EqualsRefOfOffset(a, b *Offset) bool {
+// RefOfOffset does deep equals between the two objects.
+func (cmp *Comparator) RefOfOffset(a, b *Offset) bool {
if a == b {
return true
}
@@ -3259,80 +3262,80 @@ func EqualsRefOfOffset(a, b *Offset) bool {
a.Original == b.Original
}
-// EqualsOnDup does deep equals between the two objects.
-func EqualsOnDup(a, b OnDup) bool {
+// OnDup does deep equals between the two objects.
+func (cmp *Comparator) OnDup(a, b OnDup) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
- if !EqualsRefOfUpdateExpr(a[i], b[i]) {
+ if !cmp.RefOfUpdateExpr(a[i], b[i]) {
return false
}
}
return true
}
-// EqualsRefOfOptLike does deep equals between the two objects.
-func EqualsRefOfOptLike(a, b *OptLike) bool {
+// RefOfOptLike does deep equals between the two objects.
+func (cmp *Comparator) RefOfOptLike(a, b *OptLike) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsTableName(a.LikeTable, b.LikeTable)
+ return cmp.TableName(a.LikeTable, b.LikeTable)
}
-// EqualsRefOfOrExpr does deep equals between the two objects.
-func EqualsRefOfOrExpr(a, b *OrExpr) bool {
+// RefOfOrExpr does deep equals between the two objects.
+func (cmp *Comparator) RefOfOrExpr(a, b *OrExpr) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsExpr(a.Left, b.Left) &&
- EqualsExpr(a.Right, b.Right)
+ return cmp.Expr(a.Left, b.Left) &&
+ cmp.Expr(a.Right, b.Right)
}
-// EqualsRefOfOrder does deep equals between the two objects.
-func EqualsRefOfOrder(a, b *Order) bool {
+// RefOfOrder does deep equals between the two objects.
+func (cmp *Comparator) RefOfOrder(a, b *Order) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsExpr(a.Expr, b.Expr) &&
+ return cmp.Expr(a.Expr, b.Expr) &&
a.Direction == b.Direction
}
-// EqualsOrderBy does deep equals between the two objects.
-func EqualsOrderBy(a, b OrderBy) bool {
+// OrderBy does deep equals between the two objects.
+func (cmp *Comparator) OrderBy(a, b OrderBy) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
- if !EqualsRefOfOrder(a[i], b[i]) {
+ if !cmp.RefOfOrder(a[i], b[i]) {
return false
}
}
return true
}
-// EqualsRefOfOrderByOption does deep equals between the two objects.
-func EqualsRefOfOrderByOption(a, b *OrderByOption) bool {
+// RefOfOrderByOption does deep equals between the two objects.
+func (cmp *Comparator) RefOfOrderByOption(a, b *OrderByOption) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsColumns(a.Cols, b.Cols)
+ return cmp.Columns(a.Cols, b.Cols)
}
-// EqualsRefOfOtherAdmin does deep equals between the two objects.
-func EqualsRefOfOtherAdmin(a, b *OtherAdmin) bool {
+// RefOfOtherAdmin does deep equals between the two objects.
+func (cmp *Comparator) RefOfOtherAdmin(a, b *OtherAdmin) bool {
if a == b {
return true
}
@@ -3342,8 +3345,8 @@ func EqualsRefOfOtherAdmin(a, b *OtherAdmin) bool {
return true
}
-// EqualsRefOfOtherRead does deep equals between the two objects.
-func EqualsRefOfOtherRead(a, b *OtherRead) bool {
+// RefOfOtherRead does deep equals between the two objects.
+func (cmp *Comparator) RefOfOtherRead(a, b *OtherRead) bool {
if a == b {
return true
}
@@ -3353,54 +3356,54 @@ func EqualsRefOfOtherRead(a, b *OtherRead) bool {
return true
}
-// EqualsRefOfOverClause does deep equals between the two objects.
-func EqualsRefOfOverClause(a, b *OverClause) bool {
+// RefOfOverClause does deep equals between the two objects.
+func (cmp *Comparator) RefOfOverClause(a, b *OverClause) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsIdentifierCI(a.WindowName, b.WindowName) &&
- EqualsRefOfWindowSpecification(a.WindowSpec, b.WindowSpec)
+ return cmp.IdentifierCI(a.WindowName, b.WindowName) &&
+ cmp.RefOfWindowSpecification(a.WindowSpec, b.WindowSpec)
}
-// EqualsRefOfParenTableExpr does deep equals between the two objects.
-func EqualsRefOfParenTableExpr(a, b *ParenTableExpr) bool {
+// RefOfParenTableExpr does deep equals between the two objects.
+func (cmp *Comparator) RefOfParenTableExpr(a, b *ParenTableExpr) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsTableExprs(a.Exprs, b.Exprs)
+ return cmp.TableExprs(a.Exprs, b.Exprs)
}
-// EqualsRefOfParsedComments does deep equals between the two objects.
-func EqualsRefOfParsedComments(a, b *ParsedComments) bool {
+// RefOfParsedComments does deep equals between the two objects.
+func (cmp *Comparator) RefOfParsedComments(a, b *ParsedComments) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsComments(a.comments, b.comments)
+ return cmp.Comments(a.comments, b.comments)
}
-// EqualsRefOfPartitionDefinition does deep equals between the two objects.
-func EqualsRefOfPartitionDefinition(a, b *PartitionDefinition) bool {
+// RefOfPartitionDefinition does deep equals between the two objects.
+func (cmp *Comparator) RefOfPartitionDefinition(a, b *PartitionDefinition) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsIdentifierCI(a.Name, b.Name) &&
- EqualsRefOfPartitionDefinitionOptions(a.Options, b.Options)
+ return cmp.IdentifierCI(a.Name, b.Name) &&
+ cmp.RefOfPartitionDefinitionOptions(a.Options, b.Options)
}
-// EqualsRefOfPartitionDefinitionOptions does deep equals between the two objects.
-func EqualsRefOfPartitionDefinitionOptions(a, b *PartitionDefinitionOptions) bool {
+// RefOfPartitionDefinitionOptions does deep equals between the two objects.
+func (cmp *Comparator) RefOfPartitionDefinitionOptions(a, b *PartitionDefinitionOptions) bool {
if a == b {
return true
}
@@ -3408,18 +3411,18 @@ func EqualsRefOfPartitionDefinitionOptions(a, b *PartitionDefinitionOptions) boo
return false
}
return a.TableSpace == b.TableSpace &&
- EqualsRefOfPartitionValueRange(a.ValueRange, b.ValueRange) &&
- EqualsRefOfLiteral(a.Comment, b.Comment) &&
- EqualsRefOfPartitionEngine(a.Engine, b.Engine) &&
- EqualsRefOfLiteral(a.DataDirectory, b.DataDirectory) &&
- EqualsRefOfLiteral(a.IndexDirectory, b.IndexDirectory) &&
- EqualsRefOfInt(a.MaxRows, b.MaxRows) &&
- EqualsRefOfInt(a.MinRows, b.MinRows) &&
- EqualsSubPartitionDefinitions(a.SubPartitionDefinitions, b.SubPartitionDefinitions)
+ cmp.RefOfPartitionValueRange(a.ValueRange, b.ValueRange) &&
+ cmp.RefOfLiteral(a.Comment, b.Comment) &&
+ cmp.RefOfPartitionEngine(a.Engine, b.Engine) &&
+ cmp.RefOfLiteral(a.DataDirectory, b.DataDirectory) &&
+ cmp.RefOfLiteral(a.IndexDirectory, b.IndexDirectory) &&
+ cmp.RefOfInt(a.MaxRows, b.MaxRows) &&
+ cmp.RefOfInt(a.MinRows, b.MinRows) &&
+ cmp.SubPartitionDefinitions(a.SubPartitionDefinitions, b.SubPartitionDefinitions)
}
-// EqualsRefOfPartitionEngine does deep equals between the two objects.
-func EqualsRefOfPartitionEngine(a, b *PartitionEngine) bool {
+// RefOfPartitionEngine does deep equals between the two objects.
+func (cmp *Comparator) RefOfPartitionEngine(a, b *PartitionEngine) bool {
if a == b {
return true
}
@@ -3430,8 +3433,8 @@ func EqualsRefOfPartitionEngine(a, b *PartitionEngine) bool {
a.Name == b.Name
}
-// EqualsRefOfPartitionOption does deep equals between the two objects.
-func EqualsRefOfPartitionOption(a, b *PartitionOption) bool {
+// RefOfPartitionOption does deep equals between the two objects.
+func (cmp *Comparator) RefOfPartitionOption(a, b *PartitionOption) bool {
if a == b {
return true
}
@@ -3442,14 +3445,14 @@ func EqualsRefOfPartitionOption(a, b *PartitionOption) bool {
a.KeyAlgorithm == b.KeyAlgorithm &&
a.Partitions == b.Partitions &&
a.Type == b.Type &&
- EqualsColumns(a.ColList, b.ColList) &&
- EqualsExpr(a.Expr, b.Expr) &&
- EqualsRefOfSubPartition(a.SubPartition, b.SubPartition) &&
- EqualsSliceOfRefOfPartitionDefinition(a.Definitions, b.Definitions)
+ cmp.Columns(a.ColList, b.ColList) &&
+ cmp.Expr(a.Expr, b.Expr) &&
+ cmp.RefOfSubPartition(a.SubPartition, b.SubPartition) &&
+ cmp.SliceOfRefOfPartitionDefinition(a.Definitions, b.Definitions)
}
-// EqualsRefOfPartitionSpec does deep equals between the two objects.
-func EqualsRefOfPartitionSpec(a, b *PartitionSpec) bool {
+// RefOfPartitionSpec does deep equals between the two objects.
+func (cmp *Comparator) RefOfPartitionSpec(a, b *PartitionSpec) bool {
if a == b {
return true
}
@@ -3459,14 +3462,14 @@ func EqualsRefOfPartitionSpec(a, b *PartitionSpec) bool {
return a.IsAll == b.IsAll &&
a.WithoutValidation == b.WithoutValidation &&
a.Action == b.Action &&
- EqualsPartitions(a.Names, b.Names) &&
- EqualsRefOfLiteral(a.Number, b.Number) &&
- EqualsTableName(a.TableName, b.TableName) &&
- EqualsSliceOfRefOfPartitionDefinition(a.Definitions, b.Definitions)
+ cmp.Partitions(a.Names, b.Names) &&
+ cmp.RefOfLiteral(a.Number, b.Number) &&
+ cmp.TableName(a.TableName, b.TableName) &&
+ cmp.SliceOfRefOfPartitionDefinition(a.Definitions, b.Definitions)
}
-// EqualsRefOfPartitionValueRange does deep equals between the two objects.
-func EqualsRefOfPartitionValueRange(a, b *PartitionValueRange) bool {
+// RefOfPartitionValueRange does deep equals between the two objects.
+func (cmp *Comparator) RefOfPartitionValueRange(a, b *PartitionValueRange) bool {
if a == b {
return true
}
@@ -3475,24 +3478,24 @@ func EqualsRefOfPartitionValueRange(a, b *PartitionValueRange) bool {
}
return a.Maxvalue == b.Maxvalue &&
a.Type == b.Type &&
- EqualsValTuple(a.Range, b.Range)
+ cmp.ValTuple(a.Range, b.Range)
}
-// EqualsPartitions does deep equals between the two objects.
-func EqualsPartitions(a, b Partitions) bool {
+// Partitions does deep equals between the two objects.
+func (cmp *Comparator) Partitions(a, b Partitions) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
- if !EqualsIdentifierCI(a[i], b[i]) {
+ if !cmp.IdentifierCI(a[i], b[i]) {
return false
}
}
return true
}
-// EqualsRefOfPerformanceSchemaFuncExpr does deep equals between the two objects.
-func EqualsRefOfPerformanceSchemaFuncExpr(a, b *PerformanceSchemaFuncExpr) bool {
+// RefOfPerformanceSchemaFuncExpr does deep equals between the two objects.
+func (cmp *Comparator) RefOfPerformanceSchemaFuncExpr(a, b *PerformanceSchemaFuncExpr) bool {
if a == b {
return true
}
@@ -3500,156 +3503,156 @@ func EqualsRefOfPerformanceSchemaFuncExpr(a, b *PerformanceSchemaFuncExpr) bool
return false
}
return a.Type == b.Type &&
- EqualsExpr(a.Argument, b.Argument)
+ cmp.Expr(a.Argument, b.Argument)
}
-// EqualsRefOfPrepareStmt does deep equals between the two objects.
-func EqualsRefOfPrepareStmt(a, b *PrepareStmt) bool {
+// RefOfPrepareStmt does deep equals between the two objects.
+func (cmp *Comparator) RefOfPrepareStmt(a, b *PrepareStmt) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsIdentifierCI(a.Name, b.Name) &&
- EqualsExpr(a.Statement, b.Statement) &&
- EqualsRefOfParsedComments(a.Comments, b.Comments)
+ return cmp.IdentifierCI(a.Name, b.Name) &&
+ cmp.Expr(a.Statement, b.Statement) &&
+ cmp.RefOfParsedComments(a.Comments, b.Comments)
}
-// EqualsRefOfReferenceDefinition does deep equals between the two objects.
-func EqualsRefOfReferenceDefinition(a, b *ReferenceDefinition) bool {
+// RefOfReferenceDefinition does deep equals between the two objects.
+func (cmp *Comparator) RefOfReferenceDefinition(a, b *ReferenceDefinition) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsTableName(a.ReferencedTable, b.ReferencedTable) &&
- EqualsColumns(a.ReferencedColumns, b.ReferencedColumns) &&
+ return cmp.TableName(a.ReferencedTable, b.ReferencedTable) &&
+ cmp.Columns(a.ReferencedColumns, b.ReferencedColumns) &&
a.Match == b.Match &&
a.OnDelete == b.OnDelete &&
a.OnUpdate == b.OnUpdate
}
-// EqualsRefOfRegexpInstrExpr does deep equals between the two objects.
-func EqualsRefOfRegexpInstrExpr(a, b *RegexpInstrExpr) bool {
+// RefOfRegexpInstrExpr does deep equals between the two objects.
+func (cmp *Comparator) RefOfRegexpInstrExpr(a, b *RegexpInstrExpr) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsExpr(a.Expr, b.Expr) &&
- EqualsExpr(a.Pattern, b.Pattern) &&
- EqualsExpr(a.Position, b.Position) &&
- EqualsExpr(a.Occurrence, b.Occurrence) &&
- EqualsExpr(a.ReturnOption, b.ReturnOption) &&
- EqualsExpr(a.MatchType, b.MatchType)
+ return cmp.Expr(a.Expr, b.Expr) &&
+ cmp.Expr(a.Pattern, b.Pattern) &&
+ cmp.Expr(a.Position, b.Position) &&
+ cmp.Expr(a.Occurrence, b.Occurrence) &&
+ cmp.Expr(a.ReturnOption, b.ReturnOption) &&
+ cmp.Expr(a.MatchType, b.MatchType)
}
-// EqualsRefOfRegexpLikeExpr does deep equals between the two objects.
-func EqualsRefOfRegexpLikeExpr(a, b *RegexpLikeExpr) bool {
+// RefOfRegexpLikeExpr does deep equals between the two objects.
+func (cmp *Comparator) RefOfRegexpLikeExpr(a, b *RegexpLikeExpr) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsExpr(a.Expr, b.Expr) &&
- EqualsExpr(a.Pattern, b.Pattern) &&
- EqualsExpr(a.MatchType, b.MatchType)
+ return cmp.Expr(a.Expr, b.Expr) &&
+ cmp.Expr(a.Pattern, b.Pattern) &&
+ cmp.Expr(a.MatchType, b.MatchType)
}
-// EqualsRefOfRegexpReplaceExpr does deep equals between the two objects.
-func EqualsRefOfRegexpReplaceExpr(a, b *RegexpReplaceExpr) bool {
+// RefOfRegexpReplaceExpr does deep equals between the two objects.
+func (cmp *Comparator) RefOfRegexpReplaceExpr(a, b *RegexpReplaceExpr) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsExpr(a.Expr, b.Expr) &&
- EqualsExpr(a.Pattern, b.Pattern) &&
- EqualsExpr(a.Repl, b.Repl) &&
- EqualsExpr(a.Occurrence, b.Occurrence) &&
- EqualsExpr(a.Position, b.Position) &&
- EqualsExpr(a.MatchType, b.MatchType)
+ return cmp.Expr(a.Expr, b.Expr) &&
+ cmp.Expr(a.Pattern, b.Pattern) &&
+ cmp.Expr(a.Repl, b.Repl) &&
+ cmp.Expr(a.Occurrence, b.Occurrence) &&
+ cmp.Expr(a.Position, b.Position) &&
+ cmp.Expr(a.MatchType, b.MatchType)
}
-// EqualsRefOfRegexpSubstrExpr does deep equals between the two objects.
-func EqualsRefOfRegexpSubstrExpr(a, b *RegexpSubstrExpr) bool {
+// RefOfRegexpSubstrExpr does deep equals between the two objects.
+func (cmp *Comparator) RefOfRegexpSubstrExpr(a, b *RegexpSubstrExpr) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsExpr(a.Expr, b.Expr) &&
- EqualsExpr(a.Pattern, b.Pattern) &&
- EqualsExpr(a.Occurrence, b.Occurrence) &&
- EqualsExpr(a.Position, b.Position) &&
- EqualsExpr(a.MatchType, b.MatchType)
+ return cmp.Expr(a.Expr, b.Expr) &&
+ cmp.Expr(a.Pattern, b.Pattern) &&
+ cmp.Expr(a.Occurrence, b.Occurrence) &&
+ cmp.Expr(a.Position, b.Position) &&
+ cmp.Expr(a.MatchType, b.MatchType)
}
-// EqualsRefOfRelease does deep equals between the two objects.
-func EqualsRefOfRelease(a, b *Release) bool {
+// RefOfRelease does deep equals between the two objects.
+func (cmp *Comparator) RefOfRelease(a, b *Release) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsIdentifierCI(a.Name, b.Name)
+ return cmp.IdentifierCI(a.Name, b.Name)
}
-// EqualsRefOfRenameColumn does deep equals between the two objects.
-func EqualsRefOfRenameColumn(a, b *RenameColumn) bool {
+// RefOfRenameColumn does deep equals between the two objects.
+func (cmp *Comparator) RefOfRenameColumn(a, b *RenameColumn) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsRefOfColName(a.OldName, b.OldName) &&
- EqualsRefOfColName(a.NewName, b.NewName)
+ return cmp.RefOfColName(a.OldName, b.OldName) &&
+ cmp.RefOfColName(a.NewName, b.NewName)
}
-// EqualsRefOfRenameIndex does deep equals between the two objects.
-func EqualsRefOfRenameIndex(a, b *RenameIndex) bool {
+// RefOfRenameIndex does deep equals between the two objects.
+func (cmp *Comparator) RefOfRenameIndex(a, b *RenameIndex) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsIdentifierCI(a.OldName, b.OldName) &&
- EqualsIdentifierCI(a.NewName, b.NewName)
+ return cmp.IdentifierCI(a.OldName, b.OldName) &&
+ cmp.IdentifierCI(a.NewName, b.NewName)
}
-// EqualsRefOfRenameTable does deep equals between the two objects.
-func EqualsRefOfRenameTable(a, b *RenameTable) bool {
+// RefOfRenameTable does deep equals between the two objects.
+func (cmp *Comparator) RefOfRenameTable(a, b *RenameTable) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsSliceOfRefOfRenameTablePair(a.TablePairs, b.TablePairs)
+ return cmp.SliceOfRefOfRenameTablePair(a.TablePairs, b.TablePairs)
}
-// EqualsRefOfRenameTableName does deep equals between the two objects.
-func EqualsRefOfRenameTableName(a, b *RenameTableName) bool {
+// RefOfRenameTableName does deep equals between the two objects.
+func (cmp *Comparator) RefOfRenameTableName(a, b *RenameTableName) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsTableName(a.Table, b.Table)
+ return cmp.TableName(a.Table, b.Table)
}
-// EqualsRefOfRevertMigration does deep equals between the two objects.
-func EqualsRefOfRevertMigration(a, b *RevertMigration) bool {
+// RefOfRevertMigration does deep equals between the two objects.
+func (cmp *Comparator) RefOfRevertMigration(a, b *RevertMigration) bool {
if a == b {
return true
}
@@ -3657,11 +3660,11 @@ func EqualsRefOfRevertMigration(a, b *RevertMigration) bool {
return false
}
return a.UUID == b.UUID &&
- EqualsRefOfParsedComments(a.Comments, b.Comments)
+ cmp.RefOfParsedComments(a.Comments, b.Comments)
}
-// EqualsRefOfRollback does deep equals between the two objects.
-func EqualsRefOfRollback(a, b *Rollback) bool {
+// RefOfRollback does deep equals between the two objects.
+func (cmp *Comparator) RefOfRollback(a, b *Rollback) bool {
if a == b {
return true
}
@@ -3671,35 +3674,35 @@ func EqualsRefOfRollback(a, b *Rollback) bool {
return true
}
-// EqualsRootNode does deep equals between the two objects.
-func EqualsRootNode(a, b RootNode) bool {
- return EqualsSQLNode(a.SQLNode, b.SQLNode)
+// RootNode does deep equals between the two objects.
+func (cmp *Comparator) RootNode(a, b RootNode) bool {
+ return cmp.SQLNode(a.SQLNode, b.SQLNode)
}
-// EqualsRefOfSRollback does deep equals between the two objects.
-func EqualsRefOfSRollback(a, b *SRollback) bool {
+// RefOfSRollback does deep equals between the two objects.
+func (cmp *Comparator) RefOfSRollback(a, b *SRollback) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsIdentifierCI(a.Name, b.Name)
+ return cmp.IdentifierCI(a.Name, b.Name)
}
-// EqualsRefOfSavepoint does deep equals between the two objects.
-func EqualsRefOfSavepoint(a, b *Savepoint) bool {
+// RefOfSavepoint does deep equals between the two objects.
+func (cmp *Comparator) RefOfSavepoint(a, b *Savepoint) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsIdentifierCI(a.Name, b.Name)
+ return cmp.IdentifierCI(a.Name, b.Name)
}
-// EqualsRefOfSelect does deep equals between the two objects.
-func EqualsRefOfSelect(a, b *Select) bool {
+// RefOfSelect does deep equals between the two objects.
+func (cmp *Comparator) RefOfSelect(a, b *Select) bool {
if a == b {
return true
}
@@ -3709,36 +3712,36 @@ func EqualsRefOfSelect(a, b *Select) bool {
return a.Distinct == b.Distinct &&
a.StraightJoinHint == b.StraightJoinHint &&
a.SQLCalcFoundRows == b.SQLCalcFoundRows &&
- EqualsRefOfBool(a.Cache, b.Cache) &&
- EqualsSliceOfTableExpr(a.From, b.From) &&
- EqualsRefOfParsedComments(a.Comments, b.Comments) &&
- EqualsSelectExprs(a.SelectExprs, b.SelectExprs) &&
- EqualsRefOfWhere(a.Where, b.Where) &&
- EqualsRefOfWith(a.With, b.With) &&
- EqualsGroupBy(a.GroupBy, b.GroupBy) &&
- EqualsRefOfWhere(a.Having, b.Having) &&
- EqualsNamedWindows(a.Windows, b.Windows) &&
- EqualsOrderBy(a.OrderBy, b.OrderBy) &&
- EqualsRefOfLimit(a.Limit, b.Limit) &&
+ cmp.RefOfBool(a.Cache, b.Cache) &&
+ cmp.SliceOfTableExpr(a.From, b.From) &&
+ cmp.RefOfParsedComments(a.Comments, b.Comments) &&
+ cmp.SelectExprs(a.SelectExprs, b.SelectExprs) &&
+ cmp.RefOfWhere(a.Where, b.Where) &&
+ cmp.RefOfWith(a.With, b.With) &&
+ cmp.GroupBy(a.GroupBy, b.GroupBy) &&
+ cmp.RefOfWhere(a.Having, b.Having) &&
+ cmp.NamedWindows(a.Windows, b.Windows) &&
+ cmp.OrderBy(a.OrderBy, b.OrderBy) &&
+ cmp.RefOfLimit(a.Limit, b.Limit) &&
a.Lock == b.Lock &&
- EqualsRefOfSelectInto(a.Into, b.Into)
+ cmp.RefOfSelectInto(a.Into, b.Into)
}
-// EqualsSelectExprs does deep equals between the two objects.
-func EqualsSelectExprs(a, b SelectExprs) bool {
+// SelectExprs does deep equals between the two objects.
+func (cmp *Comparator) SelectExprs(a, b SelectExprs) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
- if !EqualsSelectExpr(a[i], b[i]) {
+ if !cmp.SelectExpr(a[i], b[i]) {
return false
}
}
return true
}
-// EqualsRefOfSelectInto does deep equals between the two objects.
-func EqualsRefOfSelectInto(a, b *SelectInto) bool {
+// RefOfSelectInto does deep equals between the two objects.
+func (cmp *Comparator) RefOfSelectInto(a, b *SelectInto) bool {
if a == b {
return true
}
@@ -3751,213 +3754,211 @@ func EqualsRefOfSelectInto(a, b *SelectInto) bool {
a.Manifest == b.Manifest &&
a.Overwrite == b.Overwrite &&
a.Type == b.Type &&
- EqualsColumnCharset(a.Charset, b.Charset)
+ cmp.ColumnCharset(a.Charset, b.Charset)
}
-// EqualsRefOfSet does deep equals between the two objects.
-func EqualsRefOfSet(a, b *Set) bool {
+// RefOfSet does deep equals between the two objects.
+func (cmp *Comparator) RefOfSet(a, b *Set) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsRefOfParsedComments(a.Comments, b.Comments) &&
- EqualsSetExprs(a.Exprs, b.Exprs)
+ return cmp.RefOfParsedComments(a.Comments, b.Comments) &&
+ cmp.SetExprs(a.Exprs, b.Exprs)
}
-// EqualsRefOfSetExpr does deep equals between the two objects.
-func EqualsRefOfSetExpr(a, b *SetExpr) bool {
+// RefOfSetExpr does deep equals between the two objects.
+func (cmp *Comparator) RefOfSetExpr(a, b *SetExpr) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsRefOfVariable(a.Var, b.Var) &&
- EqualsExpr(a.Expr, b.Expr)
+ return cmp.RefOfVariable(a.Var, b.Var) &&
+ cmp.Expr(a.Expr, b.Expr)
}
-// EqualsSetExprs does deep equals between the two objects.
-func EqualsSetExprs(a, b SetExprs) bool {
+// SetExprs does deep equals between the two objects.
+func (cmp *Comparator) SetExprs(a, b SetExprs) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
- if !EqualsRefOfSetExpr(a[i], b[i]) {
+ if !cmp.RefOfSetExpr(a[i], b[i]) {
return false
}
}
return true
}
-// EqualsRefOfSetTransaction does deep equals between the two objects.
-func EqualsRefOfSetTransaction(a, b *SetTransaction) bool {
+// RefOfShow does deep equals between the two objects.
+func (cmp *Comparator) RefOfShow(a, b *Show) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsRefOfParsedComments(a.Comments, b.Comments) &&
- a.Scope == b.Scope &&
- EqualsSliceOfCharacteristic(a.Characteristics, b.Characteristics)
+ return cmp.ShowInternal(a.Internal, b.Internal)
}
-// EqualsRefOfShow does deep equals between the two objects.
-func EqualsRefOfShow(a, b *Show) bool {
+// RefOfShowBasic does deep equals between the two objects.
+func (cmp *Comparator) RefOfShowBasic(a, b *ShowBasic) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsShowInternal(a.Internal, b.Internal)
+ return a.Full == b.Full &&
+ a.Command == b.Command &&
+ cmp.TableName(a.Tbl, b.Tbl) &&
+ cmp.IdentifierCS(a.DbName, b.DbName) &&
+ cmp.RefOfShowFilter(a.Filter, b.Filter)
}
-// EqualsRefOfShowBasic does deep equals between the two objects.
-func EqualsRefOfShowBasic(a, b *ShowBasic) bool {
+// RefOfShowCreate does deep equals between the two objects.
+func (cmp *Comparator) RefOfShowCreate(a, b *ShowCreate) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return a.Full == b.Full &&
- a.Command == b.Command &&
- EqualsTableName(a.Tbl, b.Tbl) &&
- EqualsIdentifierCS(a.DbName, b.DbName) &&
- EqualsRefOfShowFilter(a.Filter, b.Filter)
+ return a.Command == b.Command &&
+ cmp.TableName(a.Op, b.Op)
}
-// EqualsRefOfShowCreate does deep equals between the two objects.
-func EqualsRefOfShowCreate(a, b *ShowCreate) bool {
+// RefOfShowFilter does deep equals between the two objects.
+func (cmp *Comparator) RefOfShowFilter(a, b *ShowFilter) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return a.Command == b.Command &&
- EqualsTableName(a.Op, b.Op)
+ return a.Like == b.Like &&
+ cmp.Expr(a.Filter, b.Filter)
}
-// EqualsRefOfShowFilter does deep equals between the two objects.
-func EqualsRefOfShowFilter(a, b *ShowFilter) bool {
+// RefOfShowMigrationLogs does deep equals between the two objects.
+func (cmp *Comparator) RefOfShowMigrationLogs(a, b *ShowMigrationLogs) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return a.Like == b.Like &&
- EqualsExpr(a.Filter, b.Filter)
+ return a.UUID == b.UUID &&
+ cmp.RefOfParsedComments(a.Comments, b.Comments)
}
-// EqualsRefOfShowMigrationLogs does deep equals between the two objects.
-func EqualsRefOfShowMigrationLogs(a, b *ShowMigrationLogs) bool {
+// RefOfShowOther does deep equals between the two objects.
+func (cmp *Comparator) RefOfShowOther(a, b *ShowOther) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return a.UUID == b.UUID &&
- EqualsRefOfParsedComments(a.Comments, b.Comments)
+ return a.Command == b.Command
}
-// EqualsRefOfShowOther does deep equals between the two objects.
-func EqualsRefOfShowOther(a, b *ShowOther) bool {
+// RefOfShowThrottledApps does deep equals between the two objects.
+func (cmp *Comparator) RefOfShowThrottledApps(a, b *ShowThrottledApps) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return a.Command == b.Command
+ return cmp.Comments(a.Comments, b.Comments)
}
-// EqualsRefOfShowThrottledApps does deep equals between the two objects.
-func EqualsRefOfShowThrottledApps(a, b *ShowThrottledApps) bool {
+// RefOfShowThrottlerStatus does deep equals between the two objects.
+func (cmp *Comparator) RefOfShowThrottlerStatus(a, b *ShowThrottlerStatus) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsComments(a.Comments, b.Comments)
+ return cmp.Comments(a.Comments, b.Comments)
}
-// EqualsRefOfStarExpr does deep equals between the two objects.
-func EqualsRefOfStarExpr(a, b *StarExpr) bool {
+// RefOfStarExpr does deep equals between the two objects.
+func (cmp *Comparator) RefOfStarExpr(a, b *StarExpr) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsTableName(a.TableName, b.TableName)
+ return cmp.TableName(a.TableName, b.TableName)
}
-// EqualsRefOfStd does deep equals between the two objects.
-func EqualsRefOfStd(a, b *Std) bool {
+// RefOfStd does deep equals between the two objects.
+func (cmp *Comparator) RefOfStd(a, b *Std) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsExpr(a.Arg, b.Arg)
+ return cmp.Expr(a.Arg, b.Arg)
}
-// EqualsRefOfStdDev does deep equals between the two objects.
-func EqualsRefOfStdDev(a, b *StdDev) bool {
+// RefOfStdDev does deep equals between the two objects.
+func (cmp *Comparator) RefOfStdDev(a, b *StdDev) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsExpr(a.Arg, b.Arg)
+ return cmp.Expr(a.Arg, b.Arg)
}
-// EqualsRefOfStdPop does deep equals between the two objects.
-func EqualsRefOfStdPop(a, b *StdPop) bool {
+// RefOfStdPop does deep equals between the two objects.
+func (cmp *Comparator) RefOfStdPop(a, b *StdPop) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsExpr(a.Arg, b.Arg)
+ return cmp.Expr(a.Arg, b.Arg)
}
-// EqualsRefOfStdSamp does deep equals between the two objects.
-func EqualsRefOfStdSamp(a, b *StdSamp) bool {
+// RefOfStdSamp does deep equals between the two objects.
+func (cmp *Comparator) RefOfStdSamp(a, b *StdSamp) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsExpr(a.Arg, b.Arg)
+ return cmp.Expr(a.Arg, b.Arg)
}
-// EqualsRefOfStream does deep equals between the two objects.
-func EqualsRefOfStream(a, b *Stream) bool {
+// RefOfStream does deep equals between the two objects.
+func (cmp *Comparator) RefOfStream(a, b *Stream) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsRefOfParsedComments(a.Comments, b.Comments) &&
- EqualsSelectExpr(a.SelectExpr, b.SelectExpr) &&
- EqualsTableName(a.Table, b.Table)
+ return cmp.RefOfParsedComments(a.Comments, b.Comments) &&
+ cmp.SelectExpr(a.SelectExpr, b.SelectExpr) &&
+ cmp.TableName(a.Table, b.Table)
}
-// EqualsRefOfSubPartition does deep equals between the two objects.
-func EqualsRefOfSubPartition(a, b *SubPartition) bool {
+// RefOfSubPartition does deep equals between the two objects.
+func (cmp *Comparator) RefOfSubPartition(a, b *SubPartition) bool {
if a == b {
return true
}
@@ -3968,24 +3969,24 @@ func EqualsRefOfSubPartition(a, b *SubPartition) bool {
a.KeyAlgorithm == b.KeyAlgorithm &&
a.SubPartitions == b.SubPartitions &&
a.Type == b.Type &&
- EqualsColumns(a.ColList, b.ColList) &&
- EqualsExpr(a.Expr, b.Expr)
+ cmp.Columns(a.ColList, b.ColList) &&
+ cmp.Expr(a.Expr, b.Expr)
}
-// EqualsRefOfSubPartitionDefinition does deep equals between the two objects.
-func EqualsRefOfSubPartitionDefinition(a, b *SubPartitionDefinition) bool {
+// RefOfSubPartitionDefinition does deep equals between the two objects.
+func (cmp *Comparator) RefOfSubPartitionDefinition(a, b *SubPartitionDefinition) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsIdentifierCI(a.Name, b.Name) &&
- EqualsRefOfSubPartitionDefinitionOptions(a.Options, b.Options)
+ return cmp.IdentifierCI(a.Name, b.Name) &&
+ cmp.RefOfSubPartitionDefinitionOptions(a.Options, b.Options)
}
-// EqualsRefOfSubPartitionDefinitionOptions does deep equals between the two objects.
-func EqualsRefOfSubPartitionDefinitionOptions(a, b *SubPartitionDefinitionOptions) bool {
+// RefOfSubPartitionDefinitionOptions does deep equals between the two objects.
+func (cmp *Comparator) RefOfSubPartitionDefinitionOptions(a, b *SubPartitionDefinitionOptions) bool {
if a == b {
return true
}
@@ -3993,53 +3994,53 @@ func EqualsRefOfSubPartitionDefinitionOptions(a, b *SubPartitionDefinitionOption
return false
}
return a.TableSpace == b.TableSpace &&
- EqualsRefOfLiteral(a.Comment, b.Comment) &&
- EqualsRefOfPartitionEngine(a.Engine, b.Engine) &&
- EqualsRefOfLiteral(a.DataDirectory, b.DataDirectory) &&
- EqualsRefOfLiteral(a.IndexDirectory, b.IndexDirectory) &&
- EqualsRefOfInt(a.MaxRows, b.MaxRows) &&
- EqualsRefOfInt(a.MinRows, b.MinRows)
+ cmp.RefOfLiteral(a.Comment, b.Comment) &&
+ cmp.RefOfPartitionEngine(a.Engine, b.Engine) &&
+ cmp.RefOfLiteral(a.DataDirectory, b.DataDirectory) &&
+ cmp.RefOfLiteral(a.IndexDirectory, b.IndexDirectory) &&
+ cmp.RefOfInt(a.MaxRows, b.MaxRows) &&
+ cmp.RefOfInt(a.MinRows, b.MinRows)
}
-// EqualsSubPartitionDefinitions does deep equals between the two objects.
-func EqualsSubPartitionDefinitions(a, b SubPartitionDefinitions) bool {
+// SubPartitionDefinitions does deep equals between the two objects.
+func (cmp *Comparator) SubPartitionDefinitions(a, b SubPartitionDefinitions) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
- if !EqualsRefOfSubPartitionDefinition(a[i], b[i]) {
+ if !cmp.RefOfSubPartitionDefinition(a[i], b[i]) {
return false
}
}
return true
}
-// EqualsRefOfSubquery does deep equals between the two objects.
-func EqualsRefOfSubquery(a, b *Subquery) bool {
+// RefOfSubquery does deep equals between the two objects.
+func (cmp *Comparator) RefOfSubquery(a, b *Subquery) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsSelectStatement(a.Select, b.Select)
+ return cmp.SelectStatement(a.Select, b.Select)
}
-// EqualsRefOfSubstrExpr does deep equals between the two objects.
-func EqualsRefOfSubstrExpr(a, b *SubstrExpr) bool {
+// RefOfSubstrExpr does deep equals between the two objects.
+func (cmp *Comparator) RefOfSubstrExpr(a, b *SubstrExpr) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsExpr(a.Name, b.Name) &&
- EqualsExpr(a.From, b.From) &&
- EqualsExpr(a.To, b.To)
+ return cmp.Expr(a.Name, b.Name) &&
+ cmp.Expr(a.From, b.From) &&
+ cmp.Expr(a.To, b.To)
}
-// EqualsRefOfSum does deep equals between the two objects.
-func EqualsRefOfSum(a, b *Sum) bool {
+// RefOfSum does deep equals between the two objects.
+func (cmp *Comparator) RefOfSum(a, b *Sum) bool {
if a == b {
return true
}
@@ -4047,71 +4048,71 @@ func EqualsRefOfSum(a, b *Sum) bool {
return false
}
return a.Distinct == b.Distinct &&
- EqualsExpr(a.Arg, b.Arg)
+ cmp.Expr(a.Arg, b.Arg)
}
-// EqualsTableExprs does deep equals between the two objects.
-func EqualsTableExprs(a, b TableExprs) bool {
+// TableExprs does deep equals between the two objects.
+func (cmp *Comparator) TableExprs(a, b TableExprs) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
- if !EqualsTableExpr(a[i], b[i]) {
+ if !cmp.TableExpr(a[i], b[i]) {
return false
}
}
return true
}
-// EqualsTableName does deep equals between the two objects.
-func EqualsTableName(a, b TableName) bool {
- return EqualsIdentifierCS(a.Name, b.Name) &&
- EqualsIdentifierCS(a.Qualifier, b.Qualifier)
+// TableName does deep equals between the two objects.
+func (cmp *Comparator) TableName(a, b TableName) bool {
+ return cmp.IdentifierCS(a.Name, b.Name) &&
+ cmp.IdentifierCS(a.Qualifier, b.Qualifier)
}
-// EqualsTableNames does deep equals between the two objects.
-func EqualsTableNames(a, b TableNames) bool {
+// TableNames does deep equals between the two objects.
+func (cmp *Comparator) TableNames(a, b TableNames) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
- if !EqualsTableName(a[i], b[i]) {
+ if !cmp.TableName(a[i], b[i]) {
return false
}
}
return true
}
-// EqualsTableOptions does deep equals between the two objects.
-func EqualsTableOptions(a, b TableOptions) bool {
+// TableOptions does deep equals between the two objects.
+func (cmp *Comparator) TableOptions(a, b TableOptions) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
- if !EqualsRefOfTableOption(a[i], b[i]) {
+ if !cmp.RefOfTableOption(a[i], b[i]) {
return false
}
}
return true
}
-// EqualsRefOfTableSpec does deep equals between the two objects.
-func EqualsRefOfTableSpec(a, b *TableSpec) bool {
+// RefOfTableSpec does deep equals between the two objects.
+func (cmp *Comparator) RefOfTableSpec(a, b *TableSpec) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsSliceOfRefOfColumnDefinition(a.Columns, b.Columns) &&
- EqualsSliceOfRefOfIndexDefinition(a.Indexes, b.Indexes) &&
- EqualsSliceOfRefOfConstraintDefinition(a.Constraints, b.Constraints) &&
- EqualsTableOptions(a.Options, b.Options) &&
- EqualsRefOfPartitionOption(a.PartitionOption, b.PartitionOption)
+ return cmp.SliceOfRefOfColumnDefinition(a.Columns, b.Columns) &&
+ cmp.SliceOfRefOfIndexDefinition(a.Indexes, b.Indexes) &&
+ cmp.SliceOfRefOfConstraintDefinition(a.Constraints, b.Constraints) &&
+ cmp.TableOptions(a.Options, b.Options) &&
+ cmp.RefOfPartitionOption(a.PartitionOption, b.PartitionOption)
}
-// EqualsRefOfTablespaceOperation does deep equals between the two objects.
-func EqualsRefOfTablespaceOperation(a, b *TablespaceOperation) bool {
+// RefOfTablespaceOperation does deep equals between the two objects.
+func (cmp *Comparator) RefOfTablespaceOperation(a, b *TablespaceOperation) bool {
if a == b {
return true
}
@@ -4121,8 +4122,8 @@ func EqualsRefOfTablespaceOperation(a, b *TablespaceOperation) bool {
return a.Import == b.Import
}
-// EqualsRefOfTimestampFuncExpr does deep equals between the two objects.
-func EqualsRefOfTimestampFuncExpr(a, b *TimestampFuncExpr) bool {
+// RefOfTimestampFuncExpr does deep equals between the two objects.
+func (cmp *Comparator) RefOfTimestampFuncExpr(a, b *TimestampFuncExpr) bool {
if a == b {
return true
}
@@ -4131,12 +4132,12 @@ func EqualsRefOfTimestampFuncExpr(a, b *TimestampFuncExpr) bool {
}
return a.Name == b.Name &&
a.Unit == b.Unit &&
- EqualsExpr(a.Expr1, b.Expr1) &&
- EqualsExpr(a.Expr2, b.Expr2)
+ cmp.Expr(a.Expr1, b.Expr1) &&
+ cmp.Expr(a.Expr2, b.Expr2)
}
-// EqualsRefOfTrimFuncExpr does deep equals between the two objects.
-func EqualsRefOfTrimFuncExpr(a, b *TrimFuncExpr) bool {
+// RefOfTrimFuncExpr does deep equals between the two objects.
+func (cmp *Comparator) RefOfTrimFuncExpr(a, b *TrimFuncExpr) bool {
if a == b {
return true
}
@@ -4145,23 +4146,23 @@ func EqualsRefOfTrimFuncExpr(a, b *TrimFuncExpr) bool {
}
return a.TrimFuncType == b.TrimFuncType &&
a.Type == b.Type &&
- EqualsExpr(a.TrimArg, b.TrimArg) &&
- EqualsExpr(a.StringArg, b.StringArg)
+ cmp.Expr(a.TrimArg, b.TrimArg) &&
+ cmp.Expr(a.StringArg, b.StringArg)
}
-// EqualsRefOfTruncateTable does deep equals between the two objects.
-func EqualsRefOfTruncateTable(a, b *TruncateTable) bool {
+// RefOfTruncateTable does deep equals between the two objects.
+func (cmp *Comparator) RefOfTruncateTable(a, b *TruncateTable) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsTableName(a.Table, b.Table)
+ return cmp.TableName(a.Table, b.Table)
}
-// EqualsRefOfUnaryExpr does deep equals between the two objects.
-func EqualsRefOfUnaryExpr(a, b *UnaryExpr) bool {
+// RefOfUnaryExpr does deep equals between the two objects.
+func (cmp *Comparator) RefOfUnaryExpr(a, b *UnaryExpr) bool {
if a == b {
return true
}
@@ -4169,11 +4170,11 @@ func EqualsRefOfUnaryExpr(a, b *UnaryExpr) bool {
return false
}
return a.Operator == b.Operator &&
- EqualsExpr(a.Expr, b.Expr)
+ cmp.Expr(a.Expr, b.Expr)
}
-// EqualsRefOfUnion does deep equals between the two objects.
-func EqualsRefOfUnion(a, b *Union) bool {
+// RefOfUnion does deep equals between the two objects.
+func (cmp *Comparator) RefOfUnion(a, b *Union) bool {
if a == b {
return true
}
@@ -4181,17 +4182,17 @@ func EqualsRefOfUnion(a, b *Union) bool {
return false
}
return a.Distinct == b.Distinct &&
- EqualsSelectStatement(a.Left, b.Left) &&
- EqualsSelectStatement(a.Right, b.Right) &&
- EqualsOrderBy(a.OrderBy, b.OrderBy) &&
- EqualsRefOfWith(a.With, b.With) &&
- EqualsRefOfLimit(a.Limit, b.Limit) &&
+ cmp.SelectStatement(a.Left, b.Left) &&
+ cmp.SelectStatement(a.Right, b.Right) &&
+ cmp.OrderBy(a.OrderBy, b.OrderBy) &&
+ cmp.RefOfWith(a.With, b.With) &&
+ cmp.RefOfLimit(a.Limit, b.Limit) &&
a.Lock == b.Lock &&
- EqualsRefOfSelectInto(a.Into, b.Into)
+ cmp.RefOfSelectInto(a.Into, b.Into)
}
-// EqualsRefOfUnlockTables does deep equals between the two objects.
-func EqualsRefOfUnlockTables(a, b *UnlockTables) bool {
+// RefOfUnlockTables does deep equals between the two objects.
+func (cmp *Comparator) RefOfUnlockTables(a, b *UnlockTables) bool {
if a == b {
return true
}
@@ -4201,103 +4202,116 @@ func EqualsRefOfUnlockTables(a, b *UnlockTables) bool {
return true
}
-// EqualsRefOfUpdate does deep equals between the two objects.
-func EqualsRefOfUpdate(a, b *Update) bool {
+// RefOfUpdate does deep equals between the two objects.
+func (cmp *Comparator) RefOfUpdate(a, b *Update) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsRefOfWith(a.With, b.With) &&
- EqualsRefOfParsedComments(a.Comments, b.Comments) &&
+ return cmp.RefOfWith(a.With, b.With) &&
+ cmp.RefOfParsedComments(a.Comments, b.Comments) &&
a.Ignore == b.Ignore &&
- EqualsTableExprs(a.TableExprs, b.TableExprs) &&
- EqualsUpdateExprs(a.Exprs, b.Exprs) &&
- EqualsRefOfWhere(a.Where, b.Where) &&
- EqualsOrderBy(a.OrderBy, b.OrderBy) &&
- EqualsRefOfLimit(a.Limit, b.Limit)
+ cmp.TableExprs(a.TableExprs, b.TableExprs) &&
+ cmp.UpdateExprs(a.Exprs, b.Exprs) &&
+ cmp.RefOfWhere(a.Where, b.Where) &&
+ cmp.OrderBy(a.OrderBy, b.OrderBy) &&
+ cmp.RefOfLimit(a.Limit, b.Limit)
}
-// EqualsRefOfUpdateExpr does deep equals between the two objects.
-func EqualsRefOfUpdateExpr(a, b *UpdateExpr) bool {
+// RefOfUpdateExpr does deep equals between the two objects.
+func (cmp *Comparator) RefOfUpdateExpr(a, b *UpdateExpr) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsRefOfColName(a.Name, b.Name) &&
- EqualsExpr(a.Expr, b.Expr)
+ return cmp.RefOfColName(a.Name, b.Name) &&
+ cmp.Expr(a.Expr, b.Expr)
}
-// EqualsUpdateExprs does deep equals between the two objects.
-func EqualsUpdateExprs(a, b UpdateExprs) bool {
+// UpdateExprs does deep equals between the two objects.
+func (cmp *Comparator) UpdateExprs(a, b UpdateExprs) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
- if !EqualsRefOfUpdateExpr(a[i], b[i]) {
+ if !cmp.RefOfUpdateExpr(a[i], b[i]) {
return false
}
}
return true
}
-// EqualsRefOfUpdateXMLExpr does deep equals between the two objects.
-func EqualsRefOfUpdateXMLExpr(a, b *UpdateXMLExpr) bool {
+// RefOfUpdateXMLExpr does deep equals between the two objects.
+func (cmp *Comparator) RefOfUpdateXMLExpr(a, b *UpdateXMLExpr) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsExpr(a.Target, b.Target) &&
- EqualsExpr(a.XPathExpr, b.XPathExpr) &&
- EqualsExpr(a.NewXML, b.NewXML)
+ return cmp.Expr(a.Target, b.Target) &&
+ cmp.Expr(a.XPathExpr, b.XPathExpr) &&
+ cmp.Expr(a.NewXML, b.NewXML)
}
-// EqualsRefOfUse does deep equals between the two objects.
-func EqualsRefOfUse(a, b *Use) bool {
+// RefOfUse does deep equals between the two objects.
+func (cmp *Comparator) RefOfUse(a, b *Use) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsIdentifierCS(a.DBName, b.DBName)
+ return cmp.IdentifierCS(a.DBName, b.DBName)
}
-// EqualsRefOfVStream does deep equals between the two objects.
-func EqualsRefOfVStream(a, b *VStream) bool {
+// RefOfVExplainStmt does deep equals between the two objects.
+func (cmp *Comparator) RefOfVExplainStmt(a, b *VExplainStmt) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsRefOfParsedComments(a.Comments, b.Comments) &&
- EqualsSelectExpr(a.SelectExpr, b.SelectExpr) &&
- EqualsTableName(a.Table, b.Table) &&
- EqualsRefOfWhere(a.Where, b.Where) &&
- EqualsRefOfLimit(a.Limit, b.Limit)
+ return a.Type == b.Type &&
+ cmp.Statement(a.Statement, b.Statement) &&
+ cmp.RefOfParsedComments(a.Comments, b.Comments)
}
-// EqualsValTuple does deep equals between the two objects.
-func EqualsValTuple(a, b ValTuple) bool {
+// RefOfVStream does deep equals between the two objects.
+func (cmp *Comparator) RefOfVStream(a, b *VStream) bool {
+ if a == b {
+ return true
+ }
+ if a == nil || b == nil {
+ return false
+ }
+ return cmp.RefOfParsedComments(a.Comments, b.Comments) &&
+ cmp.SelectExpr(a.SelectExpr, b.SelectExpr) &&
+ cmp.TableName(a.Table, b.Table) &&
+ cmp.RefOfWhere(a.Where, b.Where) &&
+ cmp.RefOfLimit(a.Limit, b.Limit)
+}
+
+// ValTuple does deep equals between the two objects.
+func (cmp *Comparator) ValTuple(a, b ValTuple) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
- if !EqualsExpr(a[i], b[i]) {
+ if !cmp.Expr(a[i], b[i]) {
return false
}
}
return true
}
-// EqualsRefOfValidation does deep equals between the two objects.
-func EqualsRefOfValidation(a, b *Validation) bool {
+// RefOfValidation does deep equals between the two objects.
+func (cmp *Comparator) RefOfValidation(a, b *Validation) bool {
if a == b {
return true
}
@@ -4307,54 +4321,54 @@ func EqualsRefOfValidation(a, b *Validation) bool {
return a.With == b.With
}
-// EqualsValues does deep equals between the two objects.
-func EqualsValues(a, b Values) bool {
+// Values does deep equals between the two objects.
+func (cmp *Comparator) Values(a, b Values) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
- if !EqualsValTuple(a[i], b[i]) {
+ if !cmp.ValTuple(a[i], b[i]) {
return false
}
}
return true
}
-// EqualsRefOfValuesFuncExpr does deep equals between the two objects.
-func EqualsRefOfValuesFuncExpr(a, b *ValuesFuncExpr) bool {
+// RefOfValuesFuncExpr does deep equals between the two objects.
+func (cmp *Comparator) RefOfValuesFuncExpr(a, b *ValuesFuncExpr) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsRefOfColName(a.Name, b.Name)
+ return cmp.RefOfColName(a.Name, b.Name)
}
-// EqualsRefOfVarPop does deep equals between the two objects.
-func EqualsRefOfVarPop(a, b *VarPop) bool {
+// RefOfVarPop does deep equals between the two objects.
+func (cmp *Comparator) RefOfVarPop(a, b *VarPop) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsExpr(a.Arg, b.Arg)
+ return cmp.Expr(a.Arg, b.Arg)
}
-// EqualsRefOfVarSamp does deep equals between the two objects.
-func EqualsRefOfVarSamp(a, b *VarSamp) bool {
+// RefOfVarSamp does deep equals between the two objects.
+func (cmp *Comparator) RefOfVarSamp(a, b *VarSamp) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsExpr(a.Arg, b.Arg)
+ return cmp.Expr(a.Arg, b.Arg)
}
-// EqualsRefOfVariable does deep equals between the two objects.
-func EqualsRefOfVariable(a, b *Variable) bool {
+// RefOfVariable does deep equals between the two objects.
+func (cmp *Comparator) RefOfVariable(a, b *Variable) bool {
if a == b {
return true
}
@@ -4362,65 +4376,65 @@ func EqualsRefOfVariable(a, b *Variable) bool {
return false
}
return a.Scope == b.Scope &&
- EqualsIdentifierCI(a.Name, b.Name)
+ cmp.IdentifierCI(a.Name, b.Name)
}
-// EqualsRefOfVariance does deep equals between the two objects.
-func EqualsRefOfVariance(a, b *Variance) bool {
+// RefOfVariance does deep equals between the two objects.
+func (cmp *Comparator) RefOfVariance(a, b *Variance) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsExpr(a.Arg, b.Arg)
+ return cmp.Expr(a.Arg, b.Arg)
}
-// EqualsVindexParam does deep equals between the two objects.
-func EqualsVindexParam(a, b VindexParam) bool {
+// VindexParam does deep equals between the two objects.
+func (cmp *Comparator) VindexParam(a, b VindexParam) bool {
return a.Val == b.Val &&
- EqualsIdentifierCI(a.Key, b.Key)
+ cmp.IdentifierCI(a.Key, b.Key)
}
-// EqualsRefOfVindexSpec does deep equals between the two objects.
-func EqualsRefOfVindexSpec(a, b *VindexSpec) bool {
+// RefOfVindexSpec does deep equals between the two objects.
+func (cmp *Comparator) RefOfVindexSpec(a, b *VindexSpec) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsIdentifierCI(a.Name, b.Name) &&
- EqualsIdentifierCI(a.Type, b.Type) &&
- EqualsSliceOfVindexParam(a.Params, b.Params)
+ return cmp.IdentifierCI(a.Name, b.Name) &&
+ cmp.IdentifierCI(a.Type, b.Type) &&
+ cmp.SliceOfVindexParam(a.Params, b.Params)
}
-// EqualsRefOfWeightStringFuncExpr does deep equals between the two objects.
-func EqualsRefOfWeightStringFuncExpr(a, b *WeightStringFuncExpr) bool {
+// RefOfWeightStringFuncExpr does deep equals between the two objects.
+func (cmp *Comparator) RefOfWeightStringFuncExpr(a, b *WeightStringFuncExpr) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsExpr(a.Expr, b.Expr) &&
- EqualsRefOfConvertType(a.As, b.As)
+ return cmp.Expr(a.Expr, b.Expr) &&
+ cmp.RefOfConvertType(a.As, b.As)
}
-// EqualsRefOfWhen does deep equals between the two objects.
-func EqualsRefOfWhen(a, b *When) bool {
+// RefOfWhen does deep equals between the two objects.
+func (cmp *Comparator) RefOfWhen(a, b *When) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsExpr(a.Cond, b.Cond) &&
- EqualsExpr(a.Val, b.Val)
+ return cmp.Expr(a.Cond, b.Cond) &&
+ cmp.Expr(a.Val, b.Val)
}
-// EqualsRefOfWhere does deep equals between the two objects.
-func EqualsRefOfWhere(a, b *Where) bool {
+// RefOfWhere does deep equals between the two objects.
+func (cmp *Comparator) RefOfWhere(a, b *Where) bool {
if a == b {
return true
}
@@ -4428,50 +4442,50 @@ func EqualsRefOfWhere(a, b *Where) bool {
return false
}
return a.Type == b.Type &&
- EqualsExpr(a.Expr, b.Expr)
+ cmp.Expr(a.Expr, b.Expr)
}
-// EqualsRefOfWindowDefinition does deep equals between the two objects.
-func EqualsRefOfWindowDefinition(a, b *WindowDefinition) bool {
+// RefOfWindowDefinition does deep equals between the two objects.
+func (cmp *Comparator) RefOfWindowDefinition(a, b *WindowDefinition) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsIdentifierCI(a.Name, b.Name) &&
- EqualsRefOfWindowSpecification(a.WindowSpec, b.WindowSpec)
+ return cmp.IdentifierCI(a.Name, b.Name) &&
+ cmp.RefOfWindowSpecification(a.WindowSpec, b.WindowSpec)
}
-// EqualsWindowDefinitions does deep equals between the two objects.
-func EqualsWindowDefinitions(a, b WindowDefinitions) bool {
+// WindowDefinitions does deep equals between the two objects.
+func (cmp *Comparator) WindowDefinitions(a, b WindowDefinitions) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
- if !EqualsRefOfWindowDefinition(a[i], b[i]) {
+ if !cmp.RefOfWindowDefinition(a[i], b[i]) {
return false
}
}
return true
}
-// EqualsRefOfWindowSpecification does deep equals between the two objects.
-func EqualsRefOfWindowSpecification(a, b *WindowSpecification) bool {
+// RefOfWindowSpecification does deep equals between the two objects.
+func (cmp *Comparator) RefOfWindowSpecification(a, b *WindowSpecification) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsIdentifierCI(a.Name, b.Name) &&
- EqualsExprs(a.PartitionClause, b.PartitionClause) &&
- EqualsOrderBy(a.OrderClause, b.OrderClause) &&
- EqualsRefOfFrameClause(a.FrameClause, b.FrameClause)
+ return cmp.IdentifierCI(a.Name, b.Name) &&
+ cmp.Exprs(a.PartitionClause, b.PartitionClause) &&
+ cmp.OrderBy(a.OrderClause, b.OrderClause) &&
+ cmp.RefOfFrameClause(a.FrameClause, b.FrameClause)
}
-// EqualsRefOfWith does deep equals between the two objects.
-func EqualsRefOfWith(a, b *With) bool {
+// RefOfWith does deep equals between the two objects.
+func (cmp *Comparator) RefOfWith(a, b *With) bool {
if a == b {
return true
}
@@ -4479,23 +4493,23 @@ func EqualsRefOfWith(a, b *With) bool {
return false
}
return a.Recursive == b.Recursive &&
- EqualsSliceOfRefOfCommonTableExpr(a.ctes, b.ctes)
+ cmp.SliceOfRefOfCommonTableExpr(a.ctes, b.ctes)
}
-// EqualsRefOfXorExpr does deep equals between the two objects.
-func EqualsRefOfXorExpr(a, b *XorExpr) bool {
+// RefOfXorExpr does deep equals between the two objects.
+func (cmp *Comparator) RefOfXorExpr(a, b *XorExpr) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsExpr(a.Left, b.Left) &&
- EqualsExpr(a.Right, b.Right)
+ return cmp.Expr(a.Left, b.Left) &&
+ cmp.Expr(a.Right, b.Right)
}
-// EqualsAggrFunc does deep equals between the two objects.
-func EqualsAggrFunc(inA, inB AggrFunc) bool {
+// AggrFunc does deep equals between the two objects.
+func (cmp *Comparator) AggrFunc(inA, inB AggrFunc) bool {
if inA == nil && inB == nil {
return true
}
@@ -4508,111 +4522,111 @@ func EqualsAggrFunc(inA, inB AggrFunc) bool {
if !ok {
return false
}
- return EqualsRefOfAvg(a, b)
+ return cmp.RefOfAvg(a, b)
case *BitAnd:
b, ok := inB.(*BitAnd)
if !ok {
return false
}
- return EqualsRefOfBitAnd(a, b)
+ return cmp.RefOfBitAnd(a, b)
case *BitOr:
b, ok := inB.(*BitOr)
if !ok {
return false
}
- return EqualsRefOfBitOr(a, b)
+ return cmp.RefOfBitOr(a, b)
case *BitXor:
b, ok := inB.(*BitXor)
if !ok {
return false
}
- return EqualsRefOfBitXor(a, b)
+ return cmp.RefOfBitXor(a, b)
case *Count:
b, ok := inB.(*Count)
if !ok {
return false
}
- return EqualsRefOfCount(a, b)
+ return cmp.RefOfCount(a, b)
case *CountStar:
b, ok := inB.(*CountStar)
if !ok {
return false
}
- return EqualsRefOfCountStar(a, b)
+ return cmp.RefOfCountStar(a, b)
case *GroupConcatExpr:
b, ok := inB.(*GroupConcatExpr)
if !ok {
return false
}
- return EqualsRefOfGroupConcatExpr(a, b)
+ return cmp.RefOfGroupConcatExpr(a, b)
case *Max:
b, ok := inB.(*Max)
if !ok {
return false
}
- return EqualsRefOfMax(a, b)
+ return cmp.RefOfMax(a, b)
case *Min:
b, ok := inB.(*Min)
if !ok {
return false
}
- return EqualsRefOfMin(a, b)
+ return cmp.RefOfMin(a, b)
case *Std:
b, ok := inB.(*Std)
if !ok {
return false
}
- return EqualsRefOfStd(a, b)
+ return cmp.RefOfStd(a, b)
case *StdDev:
b, ok := inB.(*StdDev)
if !ok {
return false
}
- return EqualsRefOfStdDev(a, b)
+ return cmp.RefOfStdDev(a, b)
case *StdPop:
b, ok := inB.(*StdPop)
if !ok {
return false
}
- return EqualsRefOfStdPop(a, b)
+ return cmp.RefOfStdPop(a, b)
case *StdSamp:
b, ok := inB.(*StdSamp)
if !ok {
return false
}
- return EqualsRefOfStdSamp(a, b)
+ return cmp.RefOfStdSamp(a, b)
case *Sum:
b, ok := inB.(*Sum)
if !ok {
return false
}
- return EqualsRefOfSum(a, b)
+ return cmp.RefOfSum(a, b)
case *VarPop:
b, ok := inB.(*VarPop)
if !ok {
return false
}
- return EqualsRefOfVarPop(a, b)
+ return cmp.RefOfVarPop(a, b)
case *VarSamp:
b, ok := inB.(*VarSamp)
if !ok {
return false
}
- return EqualsRefOfVarSamp(a, b)
+ return cmp.RefOfVarSamp(a, b)
case *Variance:
b, ok := inB.(*Variance)
if !ok {
return false
}
- return EqualsRefOfVariance(a, b)
+ return cmp.RefOfVariance(a, b)
default:
// this should never happen
return false
}
}
-// EqualsAlterOption does deep equals between the two objects.
-func EqualsAlterOption(inA, inB AlterOption) bool {
+// AlterOption does deep equals between the two objects.
+func (cmp *Comparator) AlterOption(inA, inB AlterOption) bool {
if inA == nil && inB == nil {
return true
}
@@ -4625,19 +4639,19 @@ func EqualsAlterOption(inA, inB AlterOption) bool {
if !ok {
return false
}
- return EqualsRefOfAddColumns(a, b)
+ return cmp.RefOfAddColumns(a, b)
case *AddConstraintDefinition:
b, ok := inB.(*AddConstraintDefinition)
if !ok {
return false
}
- return EqualsRefOfAddConstraintDefinition(a, b)
+ return cmp.RefOfAddConstraintDefinition(a, b)
case *AddIndexDefinition:
b, ok := inB.(*AddIndexDefinition)
if !ok {
return false
}
- return EqualsRefOfAddIndexDefinition(a, b)
+ return cmp.RefOfAddIndexDefinition(a, b)
case AlgorithmValue:
b, ok := inB.(AlgorithmValue)
if !ok {
@@ -4649,117 +4663,117 @@ func EqualsAlterOption(inA, inB AlterOption) bool {
if !ok {
return false
}
- return EqualsRefOfAlterCharset(a, b)
+ return cmp.RefOfAlterCharset(a, b)
case *AlterCheck:
b, ok := inB.(*AlterCheck)
if !ok {
return false
}
- return EqualsRefOfAlterCheck(a, b)
+ return cmp.RefOfAlterCheck(a, b)
case *AlterColumn:
b, ok := inB.(*AlterColumn)
if !ok {
return false
}
- return EqualsRefOfAlterColumn(a, b)
+ return cmp.RefOfAlterColumn(a, b)
case *AlterIndex:
b, ok := inB.(*AlterIndex)
if !ok {
return false
}
- return EqualsRefOfAlterIndex(a, b)
+ return cmp.RefOfAlterIndex(a, b)
case *ChangeColumn:
b, ok := inB.(*ChangeColumn)
if !ok {
return false
}
- return EqualsRefOfChangeColumn(a, b)
+ return cmp.RefOfChangeColumn(a, b)
case *DropColumn:
b, ok := inB.(*DropColumn)
if !ok {
return false
}
- return EqualsRefOfDropColumn(a, b)
+ return cmp.RefOfDropColumn(a, b)
case *DropKey:
b, ok := inB.(*DropKey)
if !ok {
return false
}
- return EqualsRefOfDropKey(a, b)
+ return cmp.RefOfDropKey(a, b)
case *Force:
b, ok := inB.(*Force)
if !ok {
return false
}
- return EqualsRefOfForce(a, b)
+ return cmp.RefOfForce(a, b)
case *KeyState:
b, ok := inB.(*KeyState)
if !ok {
return false
}
- return EqualsRefOfKeyState(a, b)
+ return cmp.RefOfKeyState(a, b)
case *LockOption:
b, ok := inB.(*LockOption)
if !ok {
return false
}
- return EqualsRefOfLockOption(a, b)
+ return cmp.RefOfLockOption(a, b)
case *ModifyColumn:
b, ok := inB.(*ModifyColumn)
if !ok {
return false
}
- return EqualsRefOfModifyColumn(a, b)
+ return cmp.RefOfModifyColumn(a, b)
case *OrderByOption:
b, ok := inB.(*OrderByOption)
if !ok {
return false
}
- return EqualsRefOfOrderByOption(a, b)
+ return cmp.RefOfOrderByOption(a, b)
case *RenameColumn:
b, ok := inB.(*RenameColumn)
if !ok {
return false
}
- return EqualsRefOfRenameColumn(a, b)
+ return cmp.RefOfRenameColumn(a, b)
case *RenameIndex:
b, ok := inB.(*RenameIndex)
if !ok {
return false
}
- return EqualsRefOfRenameIndex(a, b)
+ return cmp.RefOfRenameIndex(a, b)
case *RenameTableName:
b, ok := inB.(*RenameTableName)
if !ok {
return false
}
- return EqualsRefOfRenameTableName(a, b)
+ return cmp.RefOfRenameTableName(a, b)
case TableOptions:
b, ok := inB.(TableOptions)
if !ok {
return false
}
- return EqualsTableOptions(a, b)
+ return cmp.TableOptions(a, b)
case *TablespaceOperation:
b, ok := inB.(*TablespaceOperation)
if !ok {
return false
}
- return EqualsRefOfTablespaceOperation(a, b)
+ return cmp.RefOfTablespaceOperation(a, b)
case *Validation:
b, ok := inB.(*Validation)
if !ok {
return false
}
- return EqualsRefOfValidation(a, b)
+ return cmp.RefOfValidation(a, b)
default:
// this should never happen
return false
}
}
-// EqualsCallable does deep equals between the two objects.
-func EqualsCallable(inA, inB Callable) bool {
+// Callable does deep equals between the two objects.
+func (cmp *Comparator) Callable(inA, inB Callable) bool {
if inA == nil && inB == nil {
return true
}
@@ -4772,378 +4786,351 @@ func EqualsCallable(inA, inB Callable) bool {
if !ok {
return false
}
- return EqualsRefOfArgumentLessWindowExpr(a, b)
+ return cmp.RefOfArgumentLessWindowExpr(a, b)
case *Avg:
b, ok := inB.(*Avg)
if !ok {
return false
}
- return EqualsRefOfAvg(a, b)
+ return cmp.RefOfAvg(a, b)
case *CharExpr:
b, ok := inB.(*CharExpr)
if !ok {
return false
}
- return EqualsRefOfCharExpr(a, b)
+ return cmp.RefOfCharExpr(a, b)
case *ConvertExpr:
b, ok := inB.(*ConvertExpr)
if !ok {
return false
}
- return EqualsRefOfConvertExpr(a, b)
+ return cmp.RefOfConvertExpr(a, b)
case *ConvertUsingExpr:
b, ok := inB.(*ConvertUsingExpr)
if !ok {
return false
}
- return EqualsRefOfConvertUsingExpr(a, b)
+ return cmp.RefOfConvertUsingExpr(a, b)
case *Count:
b, ok := inB.(*Count)
if !ok {
return false
}
- return EqualsRefOfCount(a, b)
+ return cmp.RefOfCount(a, b)
case *CountStar:
b, ok := inB.(*CountStar)
if !ok {
return false
}
- return EqualsRefOfCountStar(a, b)
+ return cmp.RefOfCountStar(a, b)
case *CurTimeFuncExpr:
b, ok := inB.(*CurTimeFuncExpr)
if !ok {
return false
}
- return EqualsRefOfCurTimeFuncExpr(a, b)
+ return cmp.RefOfCurTimeFuncExpr(a, b)
case *ExtractFuncExpr:
b, ok := inB.(*ExtractFuncExpr)
if !ok {
return false
}
- return EqualsRefOfExtractFuncExpr(a, b)
+ return cmp.RefOfExtractFuncExpr(a, b)
case *ExtractValueExpr:
b, ok := inB.(*ExtractValueExpr)
if !ok {
return false
}
- return EqualsRefOfExtractValueExpr(a, b)
+ return cmp.RefOfExtractValueExpr(a, b)
case *FirstOrLastValueExpr:
b, ok := inB.(*FirstOrLastValueExpr)
if !ok {
return false
}
- return EqualsRefOfFirstOrLastValueExpr(a, b)
+ return cmp.RefOfFirstOrLastValueExpr(a, b)
case *FuncExpr:
b, ok := inB.(*FuncExpr)
if !ok {
return false
}
- return EqualsRefOfFuncExpr(a, b)
+ return cmp.RefOfFuncExpr(a, b)
case *GTIDFuncExpr:
b, ok := inB.(*GTIDFuncExpr)
if !ok {
return false
}
- return EqualsRefOfGTIDFuncExpr(a, b)
+ return cmp.RefOfGTIDFuncExpr(a, b)
case *GroupConcatExpr:
b, ok := inB.(*GroupConcatExpr)
if !ok {
return false
}
- return EqualsRefOfGroupConcatExpr(a, b)
+ return cmp.RefOfGroupConcatExpr(a, b)
case *InsertExpr:
b, ok := inB.(*InsertExpr)
if !ok {
return false
}
- return EqualsRefOfInsertExpr(a, b)
+ return cmp.RefOfInsertExpr(a, b)
case *IntervalFuncExpr:
b, ok := inB.(*IntervalFuncExpr)
if !ok {
return false
}
- return EqualsRefOfIntervalFuncExpr(a, b)
+ return cmp.RefOfIntervalFuncExpr(a, b)
case *JSONArrayExpr:
b, ok := inB.(*JSONArrayExpr)
if !ok {
return false
}
- return EqualsRefOfJSONArrayExpr(a, b)
+ return cmp.RefOfJSONArrayExpr(a, b)
case *JSONAttributesExpr:
b, ok := inB.(*JSONAttributesExpr)
if !ok {
return false
}
- return EqualsRefOfJSONAttributesExpr(a, b)
+ return cmp.RefOfJSONAttributesExpr(a, b)
case *JSONContainsExpr:
b, ok := inB.(*JSONContainsExpr)
if !ok {
return false
}
- return EqualsRefOfJSONContainsExpr(a, b)
+ return cmp.RefOfJSONContainsExpr(a, b)
case *JSONContainsPathExpr:
b, ok := inB.(*JSONContainsPathExpr)
if !ok {
return false
}
- return EqualsRefOfJSONContainsPathExpr(a, b)
+ return cmp.RefOfJSONContainsPathExpr(a, b)
case *JSONExtractExpr:
b, ok := inB.(*JSONExtractExpr)
if !ok {
return false
}
- return EqualsRefOfJSONExtractExpr(a, b)
+ return cmp.RefOfJSONExtractExpr(a, b)
case *JSONKeysExpr:
b, ok := inB.(*JSONKeysExpr)
if !ok {
return false
}
- return EqualsRefOfJSONKeysExpr(a, b)
+ return cmp.RefOfJSONKeysExpr(a, b)
case *JSONObjectExpr:
b, ok := inB.(*JSONObjectExpr)
if !ok {
return false
}
- return EqualsRefOfJSONObjectExpr(a, b)
+ return cmp.RefOfJSONObjectExpr(a, b)
case *JSONOverlapsExpr:
b, ok := inB.(*JSONOverlapsExpr)
if !ok {
return false
}
- return EqualsRefOfJSONOverlapsExpr(a, b)
+ return cmp.RefOfJSONOverlapsExpr(a, b)
case *JSONPrettyExpr:
b, ok := inB.(*JSONPrettyExpr)
if !ok {
return false
}
- return EqualsRefOfJSONPrettyExpr(a, b)
+ return cmp.RefOfJSONPrettyExpr(a, b)
case *JSONQuoteExpr:
b, ok := inB.(*JSONQuoteExpr)
if !ok {
return false
}
- return EqualsRefOfJSONQuoteExpr(a, b)
+ return cmp.RefOfJSONQuoteExpr(a, b)
case *JSONRemoveExpr:
b, ok := inB.(*JSONRemoveExpr)
if !ok {
return false
}
- return EqualsRefOfJSONRemoveExpr(a, b)
+ return cmp.RefOfJSONRemoveExpr(a, b)
case *JSONSchemaValidFuncExpr:
b, ok := inB.(*JSONSchemaValidFuncExpr)
if !ok {
return false
}
- return EqualsRefOfJSONSchemaValidFuncExpr(a, b)
+ return cmp.RefOfJSONSchemaValidFuncExpr(a, b)
case *JSONSchemaValidationReportFuncExpr:
b, ok := inB.(*JSONSchemaValidationReportFuncExpr)
if !ok {
return false
}
- return EqualsRefOfJSONSchemaValidationReportFuncExpr(a, b)
+ return cmp.RefOfJSONSchemaValidationReportFuncExpr(a, b)
case *JSONSearchExpr:
b, ok := inB.(*JSONSearchExpr)
if !ok {
return false
}
- return EqualsRefOfJSONSearchExpr(a, b)
+ return cmp.RefOfJSONSearchExpr(a, b)
case *JSONStorageFreeExpr:
b, ok := inB.(*JSONStorageFreeExpr)
if !ok {
return false
}
- return EqualsRefOfJSONStorageFreeExpr(a, b)
+ return cmp.RefOfJSONStorageFreeExpr(a, b)
case *JSONStorageSizeExpr:
b, ok := inB.(*JSONStorageSizeExpr)
if !ok {
return false
}
- return EqualsRefOfJSONStorageSizeExpr(a, b)
+ return cmp.RefOfJSONStorageSizeExpr(a, b)
case *JSONUnquoteExpr:
b, ok := inB.(*JSONUnquoteExpr)
if !ok {
return false
}
- return EqualsRefOfJSONUnquoteExpr(a, b)
+ return cmp.RefOfJSONUnquoteExpr(a, b)
case *JSONValueExpr:
b, ok := inB.(*JSONValueExpr)
if !ok {
return false
}
- return EqualsRefOfJSONValueExpr(a, b)
+ return cmp.RefOfJSONValueExpr(a, b)
case *JSONValueMergeExpr:
b, ok := inB.(*JSONValueMergeExpr)
if !ok {
return false
}
- return EqualsRefOfJSONValueMergeExpr(a, b)
+ return cmp.RefOfJSONValueMergeExpr(a, b)
case *JSONValueModifierExpr:
b, ok := inB.(*JSONValueModifierExpr)
if !ok {
return false
}
- return EqualsRefOfJSONValueModifierExpr(a, b)
+ return cmp.RefOfJSONValueModifierExpr(a, b)
case *LagLeadExpr:
b, ok := inB.(*LagLeadExpr)
if !ok {
return false
}
- return EqualsRefOfLagLeadExpr(a, b)
+ return cmp.RefOfLagLeadExpr(a, b)
case *LocateExpr:
b, ok := inB.(*LocateExpr)
if !ok {
return false
}
- return EqualsRefOfLocateExpr(a, b)
+ return cmp.RefOfLocateExpr(a, b)
case *MatchExpr:
b, ok := inB.(*MatchExpr)
if !ok {
return false
}
- return EqualsRefOfMatchExpr(a, b)
+ return cmp.RefOfMatchExpr(a, b)
case *Max:
b, ok := inB.(*Max)
if !ok {
return false
}
- return EqualsRefOfMax(a, b)
+ return cmp.RefOfMax(a, b)
case *MemberOfExpr:
b, ok := inB.(*MemberOfExpr)
if !ok {
return false
}
- return EqualsRefOfMemberOfExpr(a, b)
+ return cmp.RefOfMemberOfExpr(a, b)
case *Min:
b, ok := inB.(*Min)
if !ok {
return false
}
- return EqualsRefOfMin(a, b)
+ return cmp.RefOfMin(a, b)
case *NTHValueExpr:
b, ok := inB.(*NTHValueExpr)
if !ok {
return false
}
- return EqualsRefOfNTHValueExpr(a, b)
+ return cmp.RefOfNTHValueExpr(a, b)
case *NamedWindow:
b, ok := inB.(*NamedWindow)
if !ok {
return false
}
- return EqualsRefOfNamedWindow(a, b)
+ return cmp.RefOfNamedWindow(a, b)
case *NtileExpr:
b, ok := inB.(*NtileExpr)
if !ok {
return false
}
- return EqualsRefOfNtileExpr(a, b)
+ return cmp.RefOfNtileExpr(a, b)
case *PerformanceSchemaFuncExpr:
b, ok := inB.(*PerformanceSchemaFuncExpr)
if !ok {
return false
}
- return EqualsRefOfPerformanceSchemaFuncExpr(a, b)
+ return cmp.RefOfPerformanceSchemaFuncExpr(a, b)
case *RegexpInstrExpr:
b, ok := inB.(*RegexpInstrExpr)
if !ok {
return false
}
- return EqualsRefOfRegexpInstrExpr(a, b)
+ return cmp.RefOfRegexpInstrExpr(a, b)
case *RegexpLikeExpr:
b, ok := inB.(*RegexpLikeExpr)
if !ok {
return false
}
- return EqualsRefOfRegexpLikeExpr(a, b)
+ return cmp.RefOfRegexpLikeExpr(a, b)
case *RegexpReplaceExpr:
b, ok := inB.(*RegexpReplaceExpr)
if !ok {
return false
}
- return EqualsRefOfRegexpReplaceExpr(a, b)
+ return cmp.RefOfRegexpReplaceExpr(a, b)
case *RegexpSubstrExpr:
b, ok := inB.(*RegexpSubstrExpr)
if !ok {
return false
}
- return EqualsRefOfRegexpSubstrExpr(a, b)
+ return cmp.RefOfRegexpSubstrExpr(a, b)
case *SubstrExpr:
b, ok := inB.(*SubstrExpr)
if !ok {
return false
}
- return EqualsRefOfSubstrExpr(a, b)
+ return cmp.RefOfSubstrExpr(a, b)
case *Sum:
b, ok := inB.(*Sum)
if !ok {
return false
}
- return EqualsRefOfSum(a, b)
+ return cmp.RefOfSum(a, b)
case *TimestampFuncExpr:
b, ok := inB.(*TimestampFuncExpr)
if !ok {
return false
}
- return EqualsRefOfTimestampFuncExpr(a, b)
+ return cmp.RefOfTimestampFuncExpr(a, b)
case *TrimFuncExpr:
b, ok := inB.(*TrimFuncExpr)
if !ok {
return false
}
- return EqualsRefOfTrimFuncExpr(a, b)
+ return cmp.RefOfTrimFuncExpr(a, b)
case *UpdateXMLExpr:
b, ok := inB.(*UpdateXMLExpr)
if !ok {
return false
}
- return EqualsRefOfUpdateXMLExpr(a, b)
+ return cmp.RefOfUpdateXMLExpr(a, b)
case *ValuesFuncExpr:
b, ok := inB.(*ValuesFuncExpr)
if !ok {
return false
}
- return EqualsRefOfValuesFuncExpr(a, b)
+ return cmp.RefOfValuesFuncExpr(a, b)
case *WeightStringFuncExpr:
b, ok := inB.(*WeightStringFuncExpr)
if !ok {
return false
}
- return EqualsRefOfWeightStringFuncExpr(a, b)
- default:
- // this should never happen
- return false
- }
-}
-
-// EqualsCharacteristic does deep equals between the two objects.
-func EqualsCharacteristic(inA, inB Characteristic) bool {
- if inA == nil && inB == nil {
- return true
- }
- if inA == nil || inB == nil {
- return false
- }
- switch a := inA.(type) {
- case AccessMode:
- b, ok := inB.(AccessMode)
- if !ok {
- return false
- }
- return a == b
- case IsolationLevel:
- b, ok := inB.(IsolationLevel)
- if !ok {
- return false
- }
- return a == b
+ return cmp.RefOfWeightStringFuncExpr(a, b)
default:
// this should never happen
return false
}
}
-// EqualsColTuple does deep equals between the two objects.
-func EqualsColTuple(inA, inB ColTuple) bool {
+// ColTuple does deep equals between the two objects.
+func (cmp *Comparator) ColTuple(inA, inB ColTuple) bool {
if inA == nil && inB == nil {
return true
}
@@ -5162,21 +5149,21 @@ func EqualsColTuple(inA, inB ColTuple) bool {
if !ok {
return false
}
- return EqualsRefOfSubquery(a, b)
+ return cmp.RefOfSubquery(a, b)
case ValTuple:
b, ok := inB.(ValTuple)
if !ok {
return false
}
- return EqualsValTuple(a, b)
+ return cmp.ValTuple(a, b)
default:
// this should never happen
return false
}
}
-// EqualsConstraintInfo does deep equals between the two objects.
-func EqualsConstraintInfo(inA, inB ConstraintInfo) bool {
+// ConstraintInfo does deep equals between the two objects.
+func (cmp *Comparator) ConstraintInfo(inA, inB ConstraintInfo) bool {
if inA == nil && inB == nil {
return true
}
@@ -5189,21 +5176,21 @@ func EqualsConstraintInfo(inA, inB ConstraintInfo) bool {
if !ok {
return false
}
- return EqualsRefOfCheckConstraintDefinition(a, b)
+ return cmp.RefOfCheckConstraintDefinition(a, b)
case *ForeignKeyDefinition:
b, ok := inB.(*ForeignKeyDefinition)
if !ok {
return false
}
- return EqualsRefOfForeignKeyDefinition(a, b)
+ return cmp.RefOfForeignKeyDefinition(a, b)
default:
// this should never happen
return false
}
}
-// EqualsDBDDLStatement does deep equals between the two objects.
-func EqualsDBDDLStatement(inA, inB DBDDLStatement) bool {
+// DBDDLStatement does deep equals between the two objects.
+func (cmp *Comparator) DBDDLStatement(inA, inB DBDDLStatement) bool {
if inA == nil && inB == nil {
return true
}
@@ -5216,27 +5203,27 @@ func EqualsDBDDLStatement(inA, inB DBDDLStatement) bool {
if !ok {
return false
}
- return EqualsRefOfAlterDatabase(a, b)
+ return cmp.RefOfAlterDatabase(a, b)
case *CreateDatabase:
b, ok := inB.(*CreateDatabase)
if !ok {
return false
}
- return EqualsRefOfCreateDatabase(a, b)
+ return cmp.RefOfCreateDatabase(a, b)
case *DropDatabase:
b, ok := inB.(*DropDatabase)
if !ok {
return false
}
- return EqualsRefOfDropDatabase(a, b)
+ return cmp.RefOfDropDatabase(a, b)
default:
// this should never happen
return false
}
}
-// EqualsDDLStatement does deep equals between the two objects.
-func EqualsDDLStatement(inA, inB DDLStatement) bool {
+// DDLStatement does deep equals between the two objects.
+func (cmp *Comparator) DDLStatement(inA, inB DDLStatement) bool {
if inA == nil && inB == nil {
return true
}
@@ -5249,57 +5236,57 @@ func EqualsDDLStatement(inA, inB DDLStatement) bool {
if !ok {
return false
}
- return EqualsRefOfAlterTable(a, b)
+ return cmp.RefOfAlterTable(a, b)
case *AlterView:
b, ok := inB.(*AlterView)
if !ok {
return false
}
- return EqualsRefOfAlterView(a, b)
+ return cmp.RefOfAlterView(a, b)
case *CreateTable:
b, ok := inB.(*CreateTable)
if !ok {
return false
}
- return EqualsRefOfCreateTable(a, b)
+ return cmp.RefOfCreateTable(a, b)
case *CreateView:
b, ok := inB.(*CreateView)
if !ok {
return false
}
- return EqualsRefOfCreateView(a, b)
+ return cmp.RefOfCreateView(a, b)
case *DropTable:
b, ok := inB.(*DropTable)
if !ok {
return false
}
- return EqualsRefOfDropTable(a, b)
+ return cmp.RefOfDropTable(a, b)
case *DropView:
b, ok := inB.(*DropView)
if !ok {
return false
}
- return EqualsRefOfDropView(a, b)
+ return cmp.RefOfDropView(a, b)
case *RenameTable:
b, ok := inB.(*RenameTable)
if !ok {
return false
}
- return EqualsRefOfRenameTable(a, b)
+ return cmp.RefOfRenameTable(a, b)
case *TruncateTable:
b, ok := inB.(*TruncateTable)
if !ok {
return false
}
- return EqualsRefOfTruncateTable(a, b)
+ return cmp.RefOfTruncateTable(a, b)
default:
// this should never happen
return false
}
}
-// EqualsExplain does deep equals between the two objects.
-func EqualsExplain(inA, inB Explain) bool {
+// Explain does deep equals between the two objects.
+func (cmp *Comparator) Explain(inA, inB Explain) bool {
if inA == nil && inB == nil {
return true
}
@@ -5312,21 +5299,21 @@ func EqualsExplain(inA, inB Explain) bool {
if !ok {
return false
}
- return EqualsRefOfExplainStmt(a, b)
+ return cmp.RefOfExplainStmt(a, b)
case *ExplainTab:
b, ok := inB.(*ExplainTab)
if !ok {
return false
}
- return EqualsRefOfExplainTab(a, b)
+ return cmp.RefOfExplainTab(a, b)
default:
// this should never happen
return false
}
}
-// EqualsExpr does deep equals between the two objects.
-func EqualsExpr(inA, inB Expr) bool {
+// Expr does deep equals between the two objects.
+func (cmp *Comparator) Expr(inA, inB Expr) bool {
if inA == nil && inB == nil {
return true
}
@@ -5339,7 +5326,7 @@ func EqualsExpr(inA, inB Expr) bool {
if !ok {
return false
}
- return EqualsRefOfAndExpr(a, b)
+ return cmp.RefOfAndExpr(a, b)
case Argument:
b, ok := inB.(Argument)
if !ok {
@@ -5351,43 +5338,43 @@ func EqualsExpr(inA, inB Expr) bool {
if !ok {
return false
}
- return EqualsRefOfArgumentLessWindowExpr(a, b)
+ return cmp.RefOfArgumentLessWindowExpr(a, b)
case *Avg:
b, ok := inB.(*Avg)
if !ok {
return false
}
- return EqualsRefOfAvg(a, b)
+ return cmp.RefOfAvg(a, b)
case *BetweenExpr:
b, ok := inB.(*BetweenExpr)
if !ok {
return false
}
- return EqualsRefOfBetweenExpr(a, b)
+ return cmp.RefOfBetweenExpr(a, b)
case *BinaryExpr:
b, ok := inB.(*BinaryExpr)
if !ok {
return false
}
- return EqualsRefOfBinaryExpr(a, b)
+ return cmp.RefOfBinaryExpr(a, b)
case *BitAnd:
b, ok := inB.(*BitAnd)
if !ok {
return false
}
- return EqualsRefOfBitAnd(a, b)
+ return cmp.RefOfBitAnd(a, b)
case *BitOr:
b, ok := inB.(*BitOr)
if !ok {
return false
}
- return EqualsRefOfBitOr(a, b)
+ return cmp.RefOfBitOr(a, b)
case *BitXor:
b, ok := inB.(*BitXor)
if !ok {
return false
}
- return EqualsRefOfBitXor(a, b)
+ return cmp.RefOfBitXor(a, b)
case BoolVal:
b, ok := inB.(BoolVal)
if !ok {
@@ -5399,277 +5386,277 @@ func EqualsExpr(inA, inB Expr) bool {
if !ok {
return false
}
- return EqualsRefOfCaseExpr(a, b)
+ return cmp.RefOfCaseExpr(a, b)
case *CastExpr:
b, ok := inB.(*CastExpr)
if !ok {
return false
}
- return EqualsRefOfCastExpr(a, b)
+ return cmp.RefOfCastExpr(a, b)
case *CharExpr:
b, ok := inB.(*CharExpr)
if !ok {
return false
}
- return EqualsRefOfCharExpr(a, b)
+ return cmp.RefOfCharExpr(a, b)
case *ColName:
b, ok := inB.(*ColName)
if !ok {
return false
}
- return EqualsRefOfColName(a, b)
+ return cmp.RefOfColName(a, b)
case *CollateExpr:
b, ok := inB.(*CollateExpr)
if !ok {
return false
}
- return EqualsRefOfCollateExpr(a, b)
+ return cmp.RefOfCollateExpr(a, b)
case *ComparisonExpr:
b, ok := inB.(*ComparisonExpr)
if !ok {
return false
}
- return EqualsRefOfComparisonExpr(a, b)
+ return cmp.RefOfComparisonExpr(a, b)
case *ConvertExpr:
b, ok := inB.(*ConvertExpr)
if !ok {
return false
}
- return EqualsRefOfConvertExpr(a, b)
+ return cmp.RefOfConvertExpr(a, b)
case *ConvertUsingExpr:
b, ok := inB.(*ConvertUsingExpr)
if !ok {
return false
}
- return EqualsRefOfConvertUsingExpr(a, b)
+ return cmp.RefOfConvertUsingExpr(a, b)
case *Count:
b, ok := inB.(*Count)
if !ok {
return false
}
- return EqualsRefOfCount(a, b)
+ return cmp.RefOfCount(a, b)
case *CountStar:
b, ok := inB.(*CountStar)
if !ok {
return false
}
- return EqualsRefOfCountStar(a, b)
+ return cmp.RefOfCountStar(a, b)
case *CurTimeFuncExpr:
b, ok := inB.(*CurTimeFuncExpr)
if !ok {
return false
}
- return EqualsRefOfCurTimeFuncExpr(a, b)
+ return cmp.RefOfCurTimeFuncExpr(a, b)
case *Default:
b, ok := inB.(*Default)
if !ok {
return false
}
- return EqualsRefOfDefault(a, b)
+ return cmp.RefOfDefault(a, b)
case *ExistsExpr:
b, ok := inB.(*ExistsExpr)
if !ok {
return false
}
- return EqualsRefOfExistsExpr(a, b)
+ return cmp.RefOfExistsExpr(a, b)
case *ExtractFuncExpr:
b, ok := inB.(*ExtractFuncExpr)
if !ok {
return false
}
- return EqualsRefOfExtractFuncExpr(a, b)
+ return cmp.RefOfExtractFuncExpr(a, b)
case *ExtractValueExpr:
b, ok := inB.(*ExtractValueExpr)
if !ok {
return false
}
- return EqualsRefOfExtractValueExpr(a, b)
+ return cmp.RefOfExtractValueExpr(a, b)
case *ExtractedSubquery:
b, ok := inB.(*ExtractedSubquery)
if !ok {
return false
}
- return EqualsRefOfExtractedSubquery(a, b)
+ return cmp.RefOfExtractedSubquery(a, b)
case *FirstOrLastValueExpr:
b, ok := inB.(*FirstOrLastValueExpr)
if !ok {
return false
}
- return EqualsRefOfFirstOrLastValueExpr(a, b)
+ return cmp.RefOfFirstOrLastValueExpr(a, b)
case *FuncExpr:
b, ok := inB.(*FuncExpr)
if !ok {
return false
}
- return EqualsRefOfFuncExpr(a, b)
+ return cmp.RefOfFuncExpr(a, b)
case *GTIDFuncExpr:
b, ok := inB.(*GTIDFuncExpr)
if !ok {
return false
}
- return EqualsRefOfGTIDFuncExpr(a, b)
+ return cmp.RefOfGTIDFuncExpr(a, b)
case *GroupConcatExpr:
b, ok := inB.(*GroupConcatExpr)
if !ok {
return false
}
- return EqualsRefOfGroupConcatExpr(a, b)
+ return cmp.RefOfGroupConcatExpr(a, b)
case *InsertExpr:
b, ok := inB.(*InsertExpr)
if !ok {
return false
}
- return EqualsRefOfInsertExpr(a, b)
+ return cmp.RefOfInsertExpr(a, b)
case *IntervalExpr:
b, ok := inB.(*IntervalExpr)
if !ok {
return false
}
- return EqualsRefOfIntervalExpr(a, b)
+ return cmp.RefOfIntervalExpr(a, b)
case *IntervalFuncExpr:
b, ok := inB.(*IntervalFuncExpr)
if !ok {
return false
}
- return EqualsRefOfIntervalFuncExpr(a, b)
+ return cmp.RefOfIntervalFuncExpr(a, b)
case *IntroducerExpr:
b, ok := inB.(*IntroducerExpr)
if !ok {
return false
}
- return EqualsRefOfIntroducerExpr(a, b)
+ return cmp.RefOfIntroducerExpr(a, b)
case *IsExpr:
b, ok := inB.(*IsExpr)
if !ok {
return false
}
- return EqualsRefOfIsExpr(a, b)
+ return cmp.RefOfIsExpr(a, b)
case *JSONArrayExpr:
b, ok := inB.(*JSONArrayExpr)
if !ok {
return false
}
- return EqualsRefOfJSONArrayExpr(a, b)
+ return cmp.RefOfJSONArrayExpr(a, b)
case *JSONAttributesExpr:
b, ok := inB.(*JSONAttributesExpr)
if !ok {
return false
}
- return EqualsRefOfJSONAttributesExpr(a, b)
+ return cmp.RefOfJSONAttributesExpr(a, b)
case *JSONContainsExpr:
b, ok := inB.(*JSONContainsExpr)
if !ok {
return false
}
- return EqualsRefOfJSONContainsExpr(a, b)
+ return cmp.RefOfJSONContainsExpr(a, b)
case *JSONContainsPathExpr:
b, ok := inB.(*JSONContainsPathExpr)
if !ok {
return false
}
- return EqualsRefOfJSONContainsPathExpr(a, b)
+ return cmp.RefOfJSONContainsPathExpr(a, b)
case *JSONExtractExpr:
b, ok := inB.(*JSONExtractExpr)
if !ok {
return false
}
- return EqualsRefOfJSONExtractExpr(a, b)
+ return cmp.RefOfJSONExtractExpr(a, b)
case *JSONKeysExpr:
b, ok := inB.(*JSONKeysExpr)
if !ok {
return false
}
- return EqualsRefOfJSONKeysExpr(a, b)
+ return cmp.RefOfJSONKeysExpr(a, b)
case *JSONObjectExpr:
b, ok := inB.(*JSONObjectExpr)
if !ok {
return false
}
- return EqualsRefOfJSONObjectExpr(a, b)
+ return cmp.RefOfJSONObjectExpr(a, b)
case *JSONOverlapsExpr:
b, ok := inB.(*JSONOverlapsExpr)
if !ok {
return false
}
- return EqualsRefOfJSONOverlapsExpr(a, b)
+ return cmp.RefOfJSONOverlapsExpr(a, b)
case *JSONPrettyExpr:
b, ok := inB.(*JSONPrettyExpr)
if !ok {
return false
}
- return EqualsRefOfJSONPrettyExpr(a, b)
+ return cmp.RefOfJSONPrettyExpr(a, b)
case *JSONQuoteExpr:
b, ok := inB.(*JSONQuoteExpr)
if !ok {
return false
}
- return EqualsRefOfJSONQuoteExpr(a, b)
+ return cmp.RefOfJSONQuoteExpr(a, b)
case *JSONRemoveExpr:
b, ok := inB.(*JSONRemoveExpr)
if !ok {
return false
}
- return EqualsRefOfJSONRemoveExpr(a, b)
+ return cmp.RefOfJSONRemoveExpr(a, b)
case *JSONSchemaValidFuncExpr:
b, ok := inB.(*JSONSchemaValidFuncExpr)
if !ok {
return false
}
- return EqualsRefOfJSONSchemaValidFuncExpr(a, b)
+ return cmp.RefOfJSONSchemaValidFuncExpr(a, b)
case *JSONSchemaValidationReportFuncExpr:
b, ok := inB.(*JSONSchemaValidationReportFuncExpr)
if !ok {
return false
}
- return EqualsRefOfJSONSchemaValidationReportFuncExpr(a, b)
+ return cmp.RefOfJSONSchemaValidationReportFuncExpr(a, b)
case *JSONSearchExpr:
b, ok := inB.(*JSONSearchExpr)
if !ok {
return false
}
- return EqualsRefOfJSONSearchExpr(a, b)
+ return cmp.RefOfJSONSearchExpr(a, b)
case *JSONStorageFreeExpr:
b, ok := inB.(*JSONStorageFreeExpr)
if !ok {
return false
}
- return EqualsRefOfJSONStorageFreeExpr(a, b)
+ return cmp.RefOfJSONStorageFreeExpr(a, b)
case *JSONStorageSizeExpr:
b, ok := inB.(*JSONStorageSizeExpr)
if !ok {
return false
}
- return EqualsRefOfJSONStorageSizeExpr(a, b)
+ return cmp.RefOfJSONStorageSizeExpr(a, b)
case *JSONUnquoteExpr:
b, ok := inB.(*JSONUnquoteExpr)
if !ok {
return false
}
- return EqualsRefOfJSONUnquoteExpr(a, b)
+ return cmp.RefOfJSONUnquoteExpr(a, b)
case *JSONValueExpr:
b, ok := inB.(*JSONValueExpr)
if !ok {
return false
}
- return EqualsRefOfJSONValueExpr(a, b)
+ return cmp.RefOfJSONValueExpr(a, b)
case *JSONValueMergeExpr:
b, ok := inB.(*JSONValueMergeExpr)
if !ok {
return false
}
- return EqualsRefOfJSONValueMergeExpr(a, b)
+ return cmp.RefOfJSONValueMergeExpr(a, b)
case *JSONValueModifierExpr:
b, ok := inB.(*JSONValueModifierExpr)
if !ok {
return false
}
- return EqualsRefOfJSONValueModifierExpr(a, b)
+ return cmp.RefOfJSONValueModifierExpr(a, b)
case *LagLeadExpr:
b, ok := inB.(*LagLeadExpr)
if !ok {
return false
}
- return EqualsRefOfLagLeadExpr(a, b)
+ return cmp.RefOfLagLeadExpr(a, b)
case ListArg:
b, ok := inB.(ListArg)
if !ok {
@@ -5681,237 +5668,237 @@ func EqualsExpr(inA, inB Expr) bool {
if !ok {
return false
}
- return EqualsRefOfLiteral(a, b)
+ return cmp.RefOfLiteral(a, b)
case *LocateExpr:
b, ok := inB.(*LocateExpr)
if !ok {
return false
}
- return EqualsRefOfLocateExpr(a, b)
+ return cmp.RefOfLocateExpr(a, b)
case *LockingFunc:
b, ok := inB.(*LockingFunc)
if !ok {
return false
}
- return EqualsRefOfLockingFunc(a, b)
+ return cmp.RefOfLockingFunc(a, b)
case *MatchExpr:
b, ok := inB.(*MatchExpr)
if !ok {
return false
}
- return EqualsRefOfMatchExpr(a, b)
+ return cmp.RefOfMatchExpr(a, b)
case *Max:
b, ok := inB.(*Max)
if !ok {
return false
}
- return EqualsRefOfMax(a, b)
+ return cmp.RefOfMax(a, b)
case *MemberOfExpr:
b, ok := inB.(*MemberOfExpr)
if !ok {
return false
}
- return EqualsRefOfMemberOfExpr(a, b)
+ return cmp.RefOfMemberOfExpr(a, b)
case *Min:
b, ok := inB.(*Min)
if !ok {
return false
}
- return EqualsRefOfMin(a, b)
+ return cmp.RefOfMin(a, b)
case *NTHValueExpr:
b, ok := inB.(*NTHValueExpr)
if !ok {
return false
}
- return EqualsRefOfNTHValueExpr(a, b)
+ return cmp.RefOfNTHValueExpr(a, b)
case *NamedWindow:
b, ok := inB.(*NamedWindow)
if !ok {
return false
}
- return EqualsRefOfNamedWindow(a, b)
+ return cmp.RefOfNamedWindow(a, b)
case *NotExpr:
b, ok := inB.(*NotExpr)
if !ok {
return false
}
- return EqualsRefOfNotExpr(a, b)
+ return cmp.RefOfNotExpr(a, b)
case *NtileExpr:
b, ok := inB.(*NtileExpr)
if !ok {
return false
}
- return EqualsRefOfNtileExpr(a, b)
+ return cmp.RefOfNtileExpr(a, b)
case *NullVal:
b, ok := inB.(*NullVal)
if !ok {
return false
}
- return EqualsRefOfNullVal(a, b)
+ return cmp.RefOfNullVal(a, b)
case *Offset:
b, ok := inB.(*Offset)
if !ok {
return false
}
- return EqualsRefOfOffset(a, b)
+ return cmp.RefOfOffset(a, b)
case *OrExpr:
b, ok := inB.(*OrExpr)
if !ok {
return false
}
- return EqualsRefOfOrExpr(a, b)
+ return cmp.RefOfOrExpr(a, b)
case *PerformanceSchemaFuncExpr:
b, ok := inB.(*PerformanceSchemaFuncExpr)
if !ok {
return false
}
- return EqualsRefOfPerformanceSchemaFuncExpr(a, b)
+ return cmp.RefOfPerformanceSchemaFuncExpr(a, b)
case *RegexpInstrExpr:
b, ok := inB.(*RegexpInstrExpr)
if !ok {
return false
}
- return EqualsRefOfRegexpInstrExpr(a, b)
+ return cmp.RefOfRegexpInstrExpr(a, b)
case *RegexpLikeExpr:
b, ok := inB.(*RegexpLikeExpr)
if !ok {
return false
}
- return EqualsRefOfRegexpLikeExpr(a, b)
+ return cmp.RefOfRegexpLikeExpr(a, b)
case *RegexpReplaceExpr:
b, ok := inB.(*RegexpReplaceExpr)
if !ok {
return false
}
- return EqualsRefOfRegexpReplaceExpr(a, b)
+ return cmp.RefOfRegexpReplaceExpr(a, b)
case *RegexpSubstrExpr:
b, ok := inB.(*RegexpSubstrExpr)
if !ok {
return false
}
- return EqualsRefOfRegexpSubstrExpr(a, b)
+ return cmp.RefOfRegexpSubstrExpr(a, b)
case *Std:
b, ok := inB.(*Std)
if !ok {
return false
}
- return EqualsRefOfStd(a, b)
+ return cmp.RefOfStd(a, b)
case *StdDev:
b, ok := inB.(*StdDev)
if !ok {
return false
}
- return EqualsRefOfStdDev(a, b)
+ return cmp.RefOfStdDev(a, b)
case *StdPop:
b, ok := inB.(*StdPop)
if !ok {
return false
}
- return EqualsRefOfStdPop(a, b)
+ return cmp.RefOfStdPop(a, b)
case *StdSamp:
b, ok := inB.(*StdSamp)
if !ok {
return false
}
- return EqualsRefOfStdSamp(a, b)
+ return cmp.RefOfStdSamp(a, b)
case *Subquery:
b, ok := inB.(*Subquery)
if !ok {
return false
}
- return EqualsRefOfSubquery(a, b)
+ return cmp.RefOfSubquery(a, b)
case *SubstrExpr:
b, ok := inB.(*SubstrExpr)
if !ok {
return false
}
- return EqualsRefOfSubstrExpr(a, b)
+ return cmp.RefOfSubstrExpr(a, b)
case *Sum:
b, ok := inB.(*Sum)
if !ok {
return false
}
- return EqualsRefOfSum(a, b)
+ return cmp.RefOfSum(a, b)
case *TimestampFuncExpr:
b, ok := inB.(*TimestampFuncExpr)
if !ok {
return false
}
- return EqualsRefOfTimestampFuncExpr(a, b)
+ return cmp.RefOfTimestampFuncExpr(a, b)
case *TrimFuncExpr:
b, ok := inB.(*TrimFuncExpr)
if !ok {
return false
}
- return EqualsRefOfTrimFuncExpr(a, b)
+ return cmp.RefOfTrimFuncExpr(a, b)
case *UnaryExpr:
b, ok := inB.(*UnaryExpr)
if !ok {
return false
}
- return EqualsRefOfUnaryExpr(a, b)
+ return cmp.RefOfUnaryExpr(a, b)
case *UpdateXMLExpr:
b, ok := inB.(*UpdateXMLExpr)
if !ok {
return false
}
- return EqualsRefOfUpdateXMLExpr(a, b)
+ return cmp.RefOfUpdateXMLExpr(a, b)
case ValTuple:
b, ok := inB.(ValTuple)
if !ok {
return false
}
- return EqualsValTuple(a, b)
+ return cmp.ValTuple(a, b)
case *ValuesFuncExpr:
b, ok := inB.(*ValuesFuncExpr)
if !ok {
return false
}
- return EqualsRefOfValuesFuncExpr(a, b)
+ return cmp.RefOfValuesFuncExpr(a, b)
case *VarPop:
b, ok := inB.(*VarPop)
if !ok {
return false
}
- return EqualsRefOfVarPop(a, b)
+ return cmp.RefOfVarPop(a, b)
case *VarSamp:
b, ok := inB.(*VarSamp)
if !ok {
return false
}
- return EqualsRefOfVarSamp(a, b)
+ return cmp.RefOfVarSamp(a, b)
case *Variable:
b, ok := inB.(*Variable)
if !ok {
return false
}
- return EqualsRefOfVariable(a, b)
+ return cmp.RefOfVariable(a, b)
case *Variance:
b, ok := inB.(*Variance)
if !ok {
return false
}
- return EqualsRefOfVariance(a, b)
+ return cmp.RefOfVariance(a, b)
case *WeightStringFuncExpr:
b, ok := inB.(*WeightStringFuncExpr)
if !ok {
return false
}
- return EqualsRefOfWeightStringFuncExpr(a, b)
+ return cmp.RefOfWeightStringFuncExpr(a, b)
case *XorExpr:
b, ok := inB.(*XorExpr)
if !ok {
return false
}
- return EqualsRefOfXorExpr(a, b)
+ return cmp.RefOfXorExpr(a, b)
default:
// this should never happen
return false
}
}
-// EqualsInsertRows does deep equals between the two objects.
-func EqualsInsertRows(inA, inB InsertRows) bool {
+// InsertRows does deep equals between the two objects.
+func (cmp *Comparator) InsertRows(inA, inB InsertRows) bool {
if inA == nil && inB == nil {
return true
}
@@ -5924,27 +5911,27 @@ func EqualsInsertRows(inA, inB InsertRows) bool {
if !ok {
return false
}
- return EqualsRefOfSelect(a, b)
+ return cmp.RefOfSelect(a, b)
case *Union:
b, ok := inB.(*Union)
if !ok {
return false
}
- return EqualsRefOfUnion(a, b)
+ return cmp.RefOfUnion(a, b)
case Values:
b, ok := inB.(Values)
if !ok {
return false
}
- return EqualsValues(a, b)
+ return cmp.Values(a, b)
default:
// this should never happen
return false
}
}
-// EqualsSelectExpr does deep equals between the two objects.
-func EqualsSelectExpr(inA, inB SelectExpr) bool {
+// SelectExpr does deep equals between the two objects.
+func (cmp *Comparator) SelectExpr(inA, inB SelectExpr) bool {
if inA == nil && inB == nil {
return true
}
@@ -5957,27 +5944,27 @@ func EqualsSelectExpr(inA, inB SelectExpr) bool {
if !ok {
return false
}
- return EqualsRefOfAliasedExpr(a, b)
+ return cmp.RefOfAliasedExpr(a, b)
case *Nextval:
b, ok := inB.(*Nextval)
if !ok {
return false
}
- return EqualsRefOfNextval(a, b)
+ return cmp.RefOfNextval(a, b)
case *StarExpr:
b, ok := inB.(*StarExpr)
if !ok {
return false
}
- return EqualsRefOfStarExpr(a, b)
+ return cmp.RefOfStarExpr(a, b)
default:
// this should never happen
return false
}
}
-// EqualsSelectStatement does deep equals between the two objects.
-func EqualsSelectStatement(inA, inB SelectStatement) bool {
+// SelectStatement does deep equals between the two objects.
+func (cmp *Comparator) SelectStatement(inA, inB SelectStatement) bool {
if inA == nil && inB == nil {
return true
}
@@ -5990,21 +5977,21 @@ func EqualsSelectStatement(inA, inB SelectStatement) bool {
if !ok {
return false
}
- return EqualsRefOfSelect(a, b)
+ return cmp.RefOfSelect(a, b)
case *Union:
b, ok := inB.(*Union)
if !ok {
return false
}
- return EqualsRefOfUnion(a, b)
+ return cmp.RefOfUnion(a, b)
default:
// this should never happen
return false
}
}
-// EqualsShowInternal does deep equals between the two objects.
-func EqualsShowInternal(inA, inB ShowInternal) bool {
+// ShowInternal does deep equals between the two objects.
+func (cmp *Comparator) ShowInternal(inA, inB ShowInternal) bool {
if inA == nil && inB == nil {
return true
}
@@ -6017,27 +6004,27 @@ func EqualsShowInternal(inA, inB ShowInternal) bool {
if !ok {
return false
}
- return EqualsRefOfShowBasic(a, b)
+ return cmp.RefOfShowBasic(a, b)
case *ShowCreate:
b, ok := inB.(*ShowCreate)
if !ok {
return false
}
- return EqualsRefOfShowCreate(a, b)
+ return cmp.RefOfShowCreate(a, b)
case *ShowOther:
b, ok := inB.(*ShowOther)
if !ok {
return false
}
- return EqualsRefOfShowOther(a, b)
+ return cmp.RefOfShowOther(a, b)
default:
// this should never happen
return false
}
}
-// EqualsSimpleTableExpr does deep equals between the two objects.
-func EqualsSimpleTableExpr(inA, inB SimpleTableExpr) bool {
+// SimpleTableExpr does deep equals between the two objects.
+func (cmp *Comparator) SimpleTableExpr(inA, inB SimpleTableExpr) bool {
if inA == nil && inB == nil {
return true
}
@@ -6050,21 +6037,21 @@ func EqualsSimpleTableExpr(inA, inB SimpleTableExpr) bool {
if !ok {
return false
}
- return EqualsRefOfDerivedTable(a, b)
+ return cmp.RefOfDerivedTable(a, b)
case TableName:
b, ok := inB.(TableName)
if !ok {
return false
}
- return EqualsTableName(a, b)
+ return cmp.TableName(a, b)
default:
// this should never happen
return false
}
}
-// EqualsStatement does deep equals between the two objects.
-func EqualsStatement(inA, inB Statement) bool {
+// Statement does deep equals between the two objects.
+func (cmp *Comparator) Statement(inA, inB Statement) bool {
if inA == nil && inB == nil {
return true
}
@@ -6077,285 +6064,291 @@ func EqualsStatement(inA, inB Statement) bool {
if !ok {
return false
}
- return EqualsRefOfAlterDatabase(a, b)
+ return cmp.RefOfAlterDatabase(a, b)
case *AlterMigration:
b, ok := inB.(*AlterMigration)
if !ok {
return false
}
- return EqualsRefOfAlterMigration(a, b)
+ return cmp.RefOfAlterMigration(a, b)
case *AlterTable:
b, ok := inB.(*AlterTable)
if !ok {
return false
}
- return EqualsRefOfAlterTable(a, b)
+ return cmp.RefOfAlterTable(a, b)
case *AlterView:
b, ok := inB.(*AlterView)
if !ok {
return false
}
- return EqualsRefOfAlterView(a, b)
+ return cmp.RefOfAlterView(a, b)
case *AlterVschema:
b, ok := inB.(*AlterVschema)
if !ok {
return false
}
- return EqualsRefOfAlterVschema(a, b)
+ return cmp.RefOfAlterVschema(a, b)
case *Begin:
b, ok := inB.(*Begin)
if !ok {
return false
}
- return EqualsRefOfBegin(a, b)
+ return cmp.RefOfBegin(a, b)
case *CallProc:
b, ok := inB.(*CallProc)
if !ok {
return false
}
- return EqualsRefOfCallProc(a, b)
+ return cmp.RefOfCallProc(a, b)
case *CommentOnly:
b, ok := inB.(*CommentOnly)
if !ok {
return false
}
- return EqualsRefOfCommentOnly(a, b)
+ return cmp.RefOfCommentOnly(a, b)
case *Commit:
b, ok := inB.(*Commit)
if !ok {
return false
}
- return EqualsRefOfCommit(a, b)
+ return cmp.RefOfCommit(a, b)
case *CreateDatabase:
b, ok := inB.(*CreateDatabase)
if !ok {
return false
}
- return EqualsRefOfCreateDatabase(a, b)
+ return cmp.RefOfCreateDatabase(a, b)
case *CreateTable:
b, ok := inB.(*CreateTable)
if !ok {
return false
}
- return EqualsRefOfCreateTable(a, b)
+ return cmp.RefOfCreateTable(a, b)
case *CreateView:
b, ok := inB.(*CreateView)
if !ok {
return false
}
- return EqualsRefOfCreateView(a, b)
+ return cmp.RefOfCreateView(a, b)
case *DeallocateStmt:
b, ok := inB.(*DeallocateStmt)
if !ok {
return false
}
- return EqualsRefOfDeallocateStmt(a, b)
+ return cmp.RefOfDeallocateStmt(a, b)
case *Delete:
b, ok := inB.(*Delete)
if !ok {
return false
}
- return EqualsRefOfDelete(a, b)
+ return cmp.RefOfDelete(a, b)
case *DropDatabase:
b, ok := inB.(*DropDatabase)
if !ok {
return false
}
- return EqualsRefOfDropDatabase(a, b)
+ return cmp.RefOfDropDatabase(a, b)
case *DropTable:
b, ok := inB.(*DropTable)
if !ok {
return false
}
- return EqualsRefOfDropTable(a, b)
+ return cmp.RefOfDropTable(a, b)
case *DropView:
b, ok := inB.(*DropView)
if !ok {
return false
}
- return EqualsRefOfDropView(a, b)
+ return cmp.RefOfDropView(a, b)
case *ExecuteStmt:
b, ok := inB.(*ExecuteStmt)
if !ok {
return false
}
- return EqualsRefOfExecuteStmt(a, b)
+ return cmp.RefOfExecuteStmt(a, b)
case *ExplainStmt:
b, ok := inB.(*ExplainStmt)
if !ok {
return false
}
- return EqualsRefOfExplainStmt(a, b)
+ return cmp.RefOfExplainStmt(a, b)
case *ExplainTab:
b, ok := inB.(*ExplainTab)
if !ok {
return false
}
- return EqualsRefOfExplainTab(a, b)
+ return cmp.RefOfExplainTab(a, b)
case *Flush:
b, ok := inB.(*Flush)
if !ok {
return false
}
- return EqualsRefOfFlush(a, b)
+ return cmp.RefOfFlush(a, b)
case *Insert:
b, ok := inB.(*Insert)
if !ok {
return false
}
- return EqualsRefOfInsert(a, b)
+ return cmp.RefOfInsert(a, b)
case *Load:
b, ok := inB.(*Load)
if !ok {
return false
}
- return EqualsRefOfLoad(a, b)
+ return cmp.RefOfLoad(a, b)
case *LockTables:
b, ok := inB.(*LockTables)
if !ok {
return false
}
- return EqualsRefOfLockTables(a, b)
+ return cmp.RefOfLockTables(a, b)
case *OtherAdmin:
b, ok := inB.(*OtherAdmin)
if !ok {
return false
}
- return EqualsRefOfOtherAdmin(a, b)
+ return cmp.RefOfOtherAdmin(a, b)
case *OtherRead:
b, ok := inB.(*OtherRead)
if !ok {
return false
}
- return EqualsRefOfOtherRead(a, b)
+ return cmp.RefOfOtherRead(a, b)
case *PrepareStmt:
b, ok := inB.(*PrepareStmt)
if !ok {
return false
}
- return EqualsRefOfPrepareStmt(a, b)
+ return cmp.RefOfPrepareStmt(a, b)
case *Release:
b, ok := inB.(*Release)
if !ok {
return false
}
- return EqualsRefOfRelease(a, b)
+ return cmp.RefOfRelease(a, b)
case *RenameTable:
b, ok := inB.(*RenameTable)
if !ok {
return false
}
- return EqualsRefOfRenameTable(a, b)
+ return cmp.RefOfRenameTable(a, b)
case *RevertMigration:
b, ok := inB.(*RevertMigration)
if !ok {
return false
}
- return EqualsRefOfRevertMigration(a, b)
+ return cmp.RefOfRevertMigration(a, b)
case *Rollback:
b, ok := inB.(*Rollback)
if !ok {
return false
}
- return EqualsRefOfRollback(a, b)
+ return cmp.RefOfRollback(a, b)
case *SRollback:
b, ok := inB.(*SRollback)
if !ok {
return false
}
- return EqualsRefOfSRollback(a, b)
+ return cmp.RefOfSRollback(a, b)
case *Savepoint:
b, ok := inB.(*Savepoint)
if !ok {
return false
}
- return EqualsRefOfSavepoint(a, b)
+ return cmp.RefOfSavepoint(a, b)
case *Select:
b, ok := inB.(*Select)
if !ok {
return false
}
- return EqualsRefOfSelect(a, b)
+ return cmp.RefOfSelect(a, b)
case *Set:
b, ok := inB.(*Set)
if !ok {
return false
}
- return EqualsRefOfSet(a, b)
- case *SetTransaction:
- b, ok := inB.(*SetTransaction)
- if !ok {
- return false
- }
- return EqualsRefOfSetTransaction(a, b)
+ return cmp.RefOfSet(a, b)
case *Show:
b, ok := inB.(*Show)
if !ok {
return false
}
- return EqualsRefOfShow(a, b)
+ return cmp.RefOfShow(a, b)
case *ShowMigrationLogs:
b, ok := inB.(*ShowMigrationLogs)
if !ok {
return false
}
- return EqualsRefOfShowMigrationLogs(a, b)
+ return cmp.RefOfShowMigrationLogs(a, b)
case *ShowThrottledApps:
b, ok := inB.(*ShowThrottledApps)
if !ok {
return false
}
- return EqualsRefOfShowThrottledApps(a, b)
+ return cmp.RefOfShowThrottledApps(a, b)
+ case *ShowThrottlerStatus:
+ b, ok := inB.(*ShowThrottlerStatus)
+ if !ok {
+ return false
+ }
+ return cmp.RefOfShowThrottlerStatus(a, b)
case *Stream:
b, ok := inB.(*Stream)
if !ok {
return false
}
- return EqualsRefOfStream(a, b)
+ return cmp.RefOfStream(a, b)
case *TruncateTable:
b, ok := inB.(*TruncateTable)
if !ok {
return false
}
- return EqualsRefOfTruncateTable(a, b)
+ return cmp.RefOfTruncateTable(a, b)
case *Union:
b, ok := inB.(*Union)
if !ok {
return false
}
- return EqualsRefOfUnion(a, b)
+ return cmp.RefOfUnion(a, b)
case *UnlockTables:
b, ok := inB.(*UnlockTables)
if !ok {
return false
}
- return EqualsRefOfUnlockTables(a, b)
+ return cmp.RefOfUnlockTables(a, b)
case *Update:
b, ok := inB.(*Update)
if !ok {
return false
}
- return EqualsRefOfUpdate(a, b)
+ return cmp.RefOfUpdate(a, b)
case *Use:
b, ok := inB.(*Use)
if !ok {
return false
}
- return EqualsRefOfUse(a, b)
+ return cmp.RefOfUse(a, b)
+ case *VExplainStmt:
+ b, ok := inB.(*VExplainStmt)
+ if !ok {
+ return false
+ }
+ return cmp.RefOfVExplainStmt(a, b)
case *VStream:
b, ok := inB.(*VStream)
if !ok {
return false
}
- return EqualsRefOfVStream(a, b)
+ return cmp.RefOfVStream(a, b)
default:
// this should never happen
return false
}
}
-// EqualsTableExpr does deep equals between the two objects.
-func EqualsTableExpr(inA, inB TableExpr) bool {
+// TableExpr does deep equals between the two objects.
+func (cmp *Comparator) TableExpr(inA, inB TableExpr) bool {
if inA == nil && inB == nil {
return true
}
@@ -6368,46 +6361,46 @@ func EqualsTableExpr(inA, inB TableExpr) bool {
if !ok {
return false
}
- return EqualsRefOfAliasedTableExpr(a, b)
+ return cmp.RefOfAliasedTableExpr(a, b)
case *JSONTableExpr:
b, ok := inB.(*JSONTableExpr)
if !ok {
return false
}
- return EqualsRefOfJSONTableExpr(a, b)
+ return cmp.RefOfJSONTableExpr(a, b)
case *JoinTableExpr:
b, ok := inB.(*JoinTableExpr)
if !ok {
return false
}
- return EqualsRefOfJoinTableExpr(a, b)
+ return cmp.RefOfJoinTableExpr(a, b)
case *ParenTableExpr:
b, ok := inB.(*ParenTableExpr)
if !ok {
return false
}
- return EqualsRefOfParenTableExpr(a, b)
+ return cmp.RefOfParenTableExpr(a, b)
default:
// this should never happen
return false
}
}
-// EqualsSliceOfRefOfColumnDefinition does deep equals between the two objects.
-func EqualsSliceOfRefOfColumnDefinition(a, b []*ColumnDefinition) bool {
+// SliceOfRefOfColumnDefinition does deep equals between the two objects.
+func (cmp *Comparator) SliceOfRefOfColumnDefinition(a, b []*ColumnDefinition) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
- if !EqualsRefOfColumnDefinition(a[i], b[i]) {
+ if !cmp.RefOfColumnDefinition(a[i], b[i]) {
return false
}
}
return true
}
-// EqualsRefOfBool does deep equals between the two objects.
-func EqualsRefOfBool(a, b *bool) bool {
+// RefOfBool does deep equals between the two objects.
+func (cmp *Comparator) RefOfBool(a, b *bool) bool {
if a == b {
return true
}
@@ -6417,72 +6410,73 @@ func EqualsRefOfBool(a, b *bool) bool {
return *a == *b
}
-// EqualsSliceOfDatabaseOption does deep equals between the two objects.
-func EqualsSliceOfDatabaseOption(a, b []DatabaseOption) bool {
+// SliceOfDatabaseOption does deep equals between the two objects.
+func (cmp *Comparator) SliceOfDatabaseOption(a, b []DatabaseOption) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
- if !EqualsDatabaseOption(a[i], b[i]) {
+ if !cmp.DatabaseOption(a[i], b[i]) {
return false
}
}
return true
}
-// EqualsSliceOfAlterOption does deep equals between the two objects.
-func EqualsSliceOfAlterOption(a, b []AlterOption) bool {
+// SliceOfAlterOption does deep equals between the two objects.
+func (cmp *Comparator) SliceOfAlterOption(a, b []AlterOption) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
- if !EqualsAlterOption(a[i], b[i]) {
+ if !cmp.AlterOption(a[i], b[i]) {
return false
}
}
return true
}
-// EqualsSliceOfIdentifierCI does deep equals between the two objects.
-func EqualsSliceOfIdentifierCI(a, b []IdentifierCI) bool {
+// SliceOfIdentifierCI does deep equals between the two objects.
+func (cmp *Comparator) SliceOfIdentifierCI(a, b []IdentifierCI) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
- if !EqualsIdentifierCI(a[i], b[i]) {
+ if !cmp.IdentifierCI(a[i], b[i]) {
return false
}
}
return true
}
-// EqualsSliceOfRefOfWhen does deep equals between the two objects.
-func EqualsSliceOfRefOfWhen(a, b []*When) bool {
+// SliceOfTxAccessMode does deep equals between the two objects.
+func (cmp *Comparator) SliceOfTxAccessMode(a, b []TxAccessMode) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
- if !EqualsRefOfWhen(a[i], b[i]) {
+ if a[i] != b[i] {
return false
}
}
return true
}
-// EqualsColumnType does deep equals between the two objects.
-func EqualsColumnType(a, b ColumnType) bool {
- return a.Type == b.Type &&
- a.Unsigned == b.Unsigned &&
- a.Zerofill == b.Zerofill &&
- EqualsRefOfColumnTypeOptions(a.Options, b.Options) &&
- EqualsRefOfLiteral(a.Length, b.Length) &&
- EqualsRefOfLiteral(a.Scale, b.Scale) &&
- EqualsColumnCharset(a.Charset, b.Charset) &&
- EqualsSliceOfString(a.EnumValues, b.EnumValues)
+// SliceOfRefOfWhen does deep equals between the two objects.
+func (cmp *Comparator) SliceOfRefOfWhen(a, b []*When) bool {
+ if len(a) != len(b) {
+ return false
+ }
+ for i := 0; i < len(a); i++ {
+ if !cmp.RefOfWhen(a[i], b[i]) {
+ return false
+ }
+ }
+ return true
}
-// EqualsRefOfColumnTypeOptions does deep equals between the two objects.
-func EqualsRefOfColumnTypeOptions(a, b *ColumnTypeOptions) bool {
+// RefOfColumnTypeOptions does deep equals between the two objects.
+func (cmp *Comparator) RefOfColumnTypeOptions(a, b *ColumnTypeOptions) bool {
if a == b {
return true
}
@@ -6491,29 +6485,29 @@ func EqualsRefOfColumnTypeOptions(a, b *ColumnTypeOptions) bool {
}
return a.Autoincrement == b.Autoincrement &&
a.Collate == b.Collate &&
- EqualsRefOfBool(a.Null, b.Null) &&
- EqualsExpr(a.Default, b.Default) &&
- EqualsExpr(a.OnUpdate, b.OnUpdate) &&
- EqualsExpr(a.As, b.As) &&
- EqualsRefOfLiteral(a.Comment, b.Comment) &&
+ cmp.RefOfBool(a.Null, b.Null) &&
+ cmp.Expr(a.Default, b.Default) &&
+ cmp.Expr(a.OnUpdate, b.OnUpdate) &&
+ cmp.Expr(a.As, b.As) &&
+ cmp.RefOfLiteral(a.Comment, b.Comment) &&
a.Storage == b.Storage &&
- EqualsRefOfReferenceDefinition(a.Reference, b.Reference) &&
+ cmp.RefOfReferenceDefinition(a.Reference, b.Reference) &&
a.KeyOpt == b.KeyOpt &&
- EqualsRefOfBool(a.Invisible, b.Invisible) &&
+ cmp.RefOfBool(a.Invisible, b.Invisible) &&
a.Format == b.Format &&
- EqualsRefOfLiteral(a.EngineAttribute, b.EngineAttribute) &&
- EqualsRefOfLiteral(a.SecondaryEngineAttribute, b.SecondaryEngineAttribute) &&
- EqualsRefOfLiteral(a.SRID, b.SRID)
+ cmp.RefOfLiteral(a.EngineAttribute, b.EngineAttribute) &&
+ cmp.RefOfLiteral(a.SecondaryEngineAttribute, b.SecondaryEngineAttribute) &&
+ cmp.RefOfLiteral(a.SRID, b.SRID)
}
-// EqualsColumnCharset does deep equals between the two objects.
-func EqualsColumnCharset(a, b ColumnCharset) bool {
+// ColumnCharset does deep equals between the two objects.
+func (cmp *Comparator) ColumnCharset(a, b ColumnCharset) bool {
return a.Name == b.Name &&
a.Binary == b.Binary
}
-// EqualsSliceOfString does deep equals between the two objects.
-func EqualsSliceOfString(a, b []string) bool {
+// SliceOfString does deep equals between the two objects.
+func (cmp *Comparator) SliceOfString(a, b []string) bool {
if len(a) != len(b) {
return false
}
@@ -6525,21 +6519,21 @@ func EqualsSliceOfString(a, b []string) bool {
return true
}
-// EqualsSliceOfRefOfVariable does deep equals between the two objects.
-func EqualsSliceOfRefOfVariable(a, b []*Variable) bool {
+// SliceOfRefOfVariable does deep equals between the two objects.
+func (cmp *Comparator) SliceOfRefOfVariable(a, b []*Variable) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
- if !EqualsRefOfVariable(a[i], b[i]) {
+ if !cmp.RefOfVariable(a[i], b[i]) {
return false
}
}
return true
}
-// EqualsRefOfIdentifierCI does deep equals between the two objects.
-func EqualsRefOfIdentifierCI(a, b *IdentifierCI) bool {
+// RefOfIdentifierCI does deep equals between the two objects.
+func (cmp *Comparator) RefOfIdentifierCI(a, b *IdentifierCI) bool {
if a == b {
return true
}
@@ -6550,8 +6544,8 @@ func EqualsRefOfIdentifierCI(a, b *IdentifierCI) bool {
a.lowered == b.lowered
}
-// EqualsRefOfIdentifierCS does deep equals between the two objects.
-func EqualsRefOfIdentifierCS(a, b *IdentifierCS) bool {
+// RefOfIdentifierCS does deep equals between the two objects.
+func (cmp *Comparator) RefOfIdentifierCS(a, b *IdentifierCS) bool {
if a == b {
return true
}
@@ -6561,96 +6555,84 @@ func EqualsRefOfIdentifierCS(a, b *IdentifierCS) bool {
return a.v == b.v
}
-// EqualsSliceOfRefOfIndexColumn does deep equals between the two objects.
-func EqualsSliceOfRefOfIndexColumn(a, b []*IndexColumn) bool {
+// SliceOfRefOfIndexColumn does deep equals between the two objects.
+func (cmp *Comparator) SliceOfRefOfIndexColumn(a, b []*IndexColumn) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
- if !EqualsRefOfIndexColumn(a[i], b[i]) {
+ if !cmp.RefOfIndexColumn(a[i], b[i]) {
return false
}
}
return true
}
-// EqualsSliceOfRefOfIndexOption does deep equals between the two objects.
-func EqualsSliceOfRefOfIndexOption(a, b []*IndexOption) bool {
+// SliceOfRefOfIndexOption does deep equals between the two objects.
+func (cmp *Comparator) SliceOfRefOfIndexOption(a, b []*IndexOption) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
- if !EqualsRefOfIndexOption(a[i], b[i]) {
+ if !cmp.RefOfIndexOption(a[i], b[i]) {
return false
}
}
return true
}
-// EqualsSliceOfExpr does deep equals between the two objects.
-func EqualsSliceOfExpr(a, b []Expr) bool {
+// SliceOfExpr does deep equals between the two objects.
+func (cmp *Comparator) SliceOfExpr(a, b []Expr) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
- if !EqualsExpr(a[i], b[i]) {
+ if !cmp.Expr(a[i], b[i]) {
return false
}
}
return true
}
-// EqualsSliceOfRefOfJSONObjectParam does deep equals between the two objects.
-func EqualsSliceOfRefOfJSONObjectParam(a, b []*JSONObjectParam) bool {
+// SliceOfRefOfJSONObjectParam does deep equals between the two objects.
+func (cmp *Comparator) SliceOfRefOfJSONObjectParam(a, b []*JSONObjectParam) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
- if !EqualsRefOfJSONObjectParam(a[i], b[i]) {
+ if !cmp.RefOfJSONObjectParam(a[i], b[i]) {
return false
}
}
return true
}
-// EqualsRefOfJSONObjectParam does deep equals between the two objects.
-func EqualsRefOfJSONObjectParam(a, b *JSONObjectParam) bool {
- if a == b {
- return true
- }
- if a == nil || b == nil {
- return false
- }
- return EqualsExpr(a.Key, b.Key) &&
- EqualsExpr(a.Value, b.Value)
-}
-
-// EqualsSliceOfRefOfJtColumnDefinition does deep equals between the two objects.
-func EqualsSliceOfRefOfJtColumnDefinition(a, b []*JtColumnDefinition) bool {
+// SliceOfRefOfJtColumnDefinition does deep equals between the two objects.
+func (cmp *Comparator) SliceOfRefOfJtColumnDefinition(a, b []*JtColumnDefinition) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
- if !EqualsRefOfJtColumnDefinition(a[i], b[i]) {
+ if !cmp.RefOfJtColumnDefinition(a[i], b[i]) {
return false
}
}
return true
}
-// EqualsRefOfJtOrdinalColDef does deep equals between the two objects.
-func EqualsRefOfJtOrdinalColDef(a, b *JtOrdinalColDef) bool {
+// RefOfJtOrdinalColDef does deep equals between the two objects.
+func (cmp *Comparator) RefOfJtOrdinalColDef(a, b *JtOrdinalColDef) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsIdentifierCI(a.Name, b.Name)
+ return cmp.IdentifierCI(a.Name, b.Name)
}
-// EqualsRefOfJtPathColDef does deep equals between the two objects.
-func EqualsRefOfJtPathColDef(a, b *JtPathColDef) bool {
+// RefOfJtPathColDef does deep equals between the two objects.
+func (cmp *Comparator) RefOfJtPathColDef(a, b *JtPathColDef) bool {
if a == b {
return true
}
@@ -6658,53 +6640,53 @@ func EqualsRefOfJtPathColDef(a, b *JtPathColDef) bool {
return false
}
return a.JtColExists == b.JtColExists &&
- EqualsIdentifierCI(a.Name, b.Name) &&
- EqualsColumnType(a.Type, b.Type) &&
- EqualsExpr(a.Path, b.Path) &&
- EqualsRefOfJtOnResponse(a.EmptyOnResponse, b.EmptyOnResponse) &&
- EqualsRefOfJtOnResponse(a.ErrorOnResponse, b.ErrorOnResponse)
+ cmp.IdentifierCI(a.Name, b.Name) &&
+ cmp.RefOfColumnType(a.Type, b.Type) &&
+ cmp.Expr(a.Path, b.Path) &&
+ cmp.RefOfJtOnResponse(a.EmptyOnResponse, b.EmptyOnResponse) &&
+ cmp.RefOfJtOnResponse(a.ErrorOnResponse, b.ErrorOnResponse)
}
-// EqualsRefOfJtNestedPathColDef does deep equals between the two objects.
-func EqualsRefOfJtNestedPathColDef(a, b *JtNestedPathColDef) bool {
+// RefOfJtNestedPathColDef does deep equals between the two objects.
+func (cmp *Comparator) RefOfJtNestedPathColDef(a, b *JtNestedPathColDef) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsExpr(a.Path, b.Path) &&
- EqualsSliceOfRefOfJtColumnDefinition(a.Columns, b.Columns)
+ return cmp.Expr(a.Path, b.Path) &&
+ cmp.SliceOfRefOfJtColumnDefinition(a.Columns, b.Columns)
}
-// EqualsTableAndLockTypes does deep equals between the two objects.
-func EqualsTableAndLockTypes(a, b TableAndLockTypes) bool {
+// TableAndLockTypes does deep equals between the two objects.
+func (cmp *Comparator) TableAndLockTypes(a, b TableAndLockTypes) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
- if !EqualsRefOfTableAndLockType(a[i], b[i]) {
+ if !cmp.RefOfTableAndLockType(a[i], b[i]) {
return false
}
}
return true
}
-// EqualsSliceOfRefOfColName does deep equals between the two objects.
-func EqualsSliceOfRefOfColName(a, b []*ColName) bool {
+// SliceOfRefOfColName does deep equals between the two objects.
+func (cmp *Comparator) SliceOfRefOfColName(a, b []*ColName) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
- if !EqualsRefOfColName(a[i], b[i]) {
+ if !cmp.RefOfColName(a[i], b[i]) {
return false
}
}
return true
}
-// EqualsComments does deep equals between the two objects.
-func EqualsComments(a, b Comments) bool {
+// Comments does deep equals between the two objects.
+func (cmp *Comparator) Comments(a, b Comments) bool {
if len(a) != len(b) {
return false
}
@@ -6716,8 +6698,8 @@ func EqualsComments(a, b Comments) bool {
return true
}
-// EqualsRefOfInt does deep equals between the two objects.
-func EqualsRefOfInt(a, b *int) bool {
+// RefOfInt does deep equals between the two objects.
+func (cmp *Comparator) RefOfInt(a, b *int) bool {
if a == b {
return true
}
@@ -6727,83 +6709,70 @@ func EqualsRefOfInt(a, b *int) bool {
return *a == *b
}
-// EqualsSliceOfRefOfPartitionDefinition does deep equals between the two objects.
-func EqualsSliceOfRefOfPartitionDefinition(a, b []*PartitionDefinition) bool {
+// SliceOfRefOfPartitionDefinition does deep equals between the two objects.
+func (cmp *Comparator) SliceOfRefOfPartitionDefinition(a, b []*PartitionDefinition) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
- if !EqualsRefOfPartitionDefinition(a[i], b[i]) {
+ if !cmp.RefOfPartitionDefinition(a[i], b[i]) {
return false
}
}
return true
}
-// EqualsSliceOfRefOfRenameTablePair does deep equals between the two objects.
-func EqualsSliceOfRefOfRenameTablePair(a, b []*RenameTablePair) bool {
+// SliceOfRefOfRenameTablePair does deep equals between the two objects.
+func (cmp *Comparator) SliceOfRefOfRenameTablePair(a, b []*RenameTablePair) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
- if !EqualsRefOfRenameTablePair(a[i], b[i]) {
+ if !cmp.RefOfRenameTablePair(a[i], b[i]) {
return false
}
}
return true
}
-// EqualsRefOfRootNode does deep equals between the two objects.
-func EqualsRefOfRootNode(a, b *RootNode) bool {
+// RefOfRootNode does deep equals between the two objects.
+func (cmp *Comparator) RefOfRootNode(a, b *RootNode) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsSQLNode(a.SQLNode, b.SQLNode)
+ return cmp.SQLNode(a.SQLNode, b.SQLNode)
}
-// EqualsSliceOfTableExpr does deep equals between the two objects.
-func EqualsSliceOfTableExpr(a, b []TableExpr) bool {
+// SliceOfTableExpr does deep equals between the two objects.
+func (cmp *Comparator) SliceOfTableExpr(a, b []TableExpr) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
- if !EqualsTableExpr(a[i], b[i]) {
+ if !cmp.TableExpr(a[i], b[i]) {
return false
}
}
return true
}
-// EqualsSliceOfCharacteristic does deep equals between the two objects.
-func EqualsSliceOfCharacteristic(a, b []Characteristic) bool {
- if len(a) != len(b) {
- return false
- }
- for i := 0; i < len(a); i++ {
- if !EqualsCharacteristic(a[i], b[i]) {
- return false
- }
- }
- return true
-}
-
-// EqualsRefOfTableName does deep equals between the two objects.
-func EqualsRefOfTableName(a, b *TableName) bool {
+// RefOfTableName does deep equals between the two objects.
+func (cmp *Comparator) RefOfTableName(a, b *TableName) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsIdentifierCS(a.Name, b.Name) &&
- EqualsIdentifierCS(a.Qualifier, b.Qualifier)
+ return cmp.IdentifierCS(a.Name, b.Name) &&
+ cmp.IdentifierCS(a.Qualifier, b.Qualifier)
}
-// EqualsRefOfTableOption does deep equals between the two objects.
-func EqualsRefOfTableOption(a, b *TableOption) bool {
+// RefOfTableOption does deep equals between the two objects.
+func (cmp *Comparator) RefOfTableOption(a, b *TableOption) bool {
if a == b {
return true
}
@@ -6813,38 +6782,38 @@ func EqualsRefOfTableOption(a, b *TableOption) bool {
return a.Name == b.Name &&
a.String == b.String &&
a.CaseSensitive == b.CaseSensitive &&
- EqualsRefOfLiteral(a.Value, b.Value) &&
- EqualsTableNames(a.Tables, b.Tables)
+ cmp.RefOfLiteral(a.Value, b.Value) &&
+ cmp.TableNames(a.Tables, b.Tables)
}
-// EqualsSliceOfRefOfIndexDefinition does deep equals between the two objects.
-func EqualsSliceOfRefOfIndexDefinition(a, b []*IndexDefinition) bool {
+// SliceOfRefOfIndexDefinition does deep equals between the two objects.
+func (cmp *Comparator) SliceOfRefOfIndexDefinition(a, b []*IndexDefinition) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
- if !EqualsRefOfIndexDefinition(a[i], b[i]) {
+ if !cmp.RefOfIndexDefinition(a[i], b[i]) {
return false
}
}
return true
}
-// EqualsSliceOfRefOfConstraintDefinition does deep equals between the two objects.
-func EqualsSliceOfRefOfConstraintDefinition(a, b []*ConstraintDefinition) bool {
+// SliceOfRefOfConstraintDefinition does deep equals between the two objects.
+func (cmp *Comparator) SliceOfRefOfConstraintDefinition(a, b []*ConstraintDefinition) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
- if !EqualsRefOfConstraintDefinition(a[i], b[i]) {
+ if !cmp.RefOfConstraintDefinition(a[i], b[i]) {
return false
}
}
return true
}
-// EqualsRefOfVindexParam does deep equals between the two objects.
-func EqualsRefOfVindexParam(a, b *VindexParam) bool {
+// RefOfVindexParam does deep equals between the two objects.
+func (cmp *Comparator) RefOfVindexParam(a, b *VindexParam) bool {
if a == b {
return true
}
@@ -6852,44 +6821,44 @@ func EqualsRefOfVindexParam(a, b *VindexParam) bool {
return false
}
return a.Val == b.Val &&
- EqualsIdentifierCI(a.Key, b.Key)
+ cmp.IdentifierCI(a.Key, b.Key)
}
-// EqualsSliceOfVindexParam does deep equals between the two objects.
-func EqualsSliceOfVindexParam(a, b []VindexParam) bool {
+// SliceOfVindexParam does deep equals between the two objects.
+func (cmp *Comparator) SliceOfVindexParam(a, b []VindexParam) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
- if !EqualsVindexParam(a[i], b[i]) {
+ if !cmp.VindexParam(a[i], b[i]) {
return false
}
}
return true
}
-// EqualsSliceOfRefOfCommonTableExpr does deep equals between the two objects.
-func EqualsSliceOfRefOfCommonTableExpr(a, b []*CommonTableExpr) bool {
+// SliceOfRefOfCommonTableExpr does deep equals between the two objects.
+func (cmp *Comparator) SliceOfRefOfCommonTableExpr(a, b []*CommonTableExpr) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
- if !EqualsRefOfCommonTableExpr(a[i], b[i]) {
+ if !cmp.RefOfCommonTableExpr(a[i], b[i]) {
return false
}
}
return true
}
-// EqualsDatabaseOption does deep equals between the two objects.
-func EqualsDatabaseOption(a, b DatabaseOption) bool {
+// DatabaseOption does deep equals between the two objects.
+func (cmp *Comparator) DatabaseOption(a, b DatabaseOption) bool {
return a.IsDefault == b.IsDefault &&
a.Value == b.Value &&
a.Type == b.Type
}
-// EqualsRefOfColumnCharset does deep equals between the two objects.
-func EqualsRefOfColumnCharset(a, b *ColumnCharset) bool {
+// RefOfColumnCharset does deep equals between the two objects.
+func (cmp *Comparator) RefOfColumnCharset(a, b *ColumnCharset) bool {
if a == b {
return true
}
@@ -6900,22 +6869,22 @@ func EqualsRefOfColumnCharset(a, b *ColumnCharset) bool {
a.Binary == b.Binary
}
-// EqualsRefOfIndexColumn does deep equals between the two objects.
-func EqualsRefOfIndexColumn(a, b *IndexColumn) bool {
+// RefOfIndexColumn does deep equals between the two objects.
+func (cmp *Comparator) RefOfIndexColumn(a, b *IndexColumn) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsIdentifierCI(a.Column, b.Column) &&
- EqualsRefOfLiteral(a.Length, b.Length) &&
- EqualsExpr(a.Expression, b.Expression) &&
+ return cmp.IdentifierCI(a.Column, b.Column) &&
+ cmp.RefOfLiteral(a.Length, b.Length) &&
+ cmp.Expr(a.Expression, b.Expression) &&
a.Direction == b.Direction
}
-// EqualsRefOfIndexOption does deep equals between the two objects.
-func EqualsRefOfIndexOption(a, b *IndexOption) bool {
+// RefOfIndexOption does deep equals between the two objects.
+func (cmp *Comparator) RefOfIndexOption(a, b *IndexOption) bool {
if a == b {
return true
}
@@ -6924,35 +6893,35 @@ func EqualsRefOfIndexOption(a, b *IndexOption) bool {
}
return a.Name == b.Name &&
a.String == b.String &&
- EqualsRefOfLiteral(a.Value, b.Value)
+ cmp.RefOfLiteral(a.Value, b.Value)
}
-// EqualsRefOfTableAndLockType does deep equals between the two objects.
-func EqualsRefOfTableAndLockType(a, b *TableAndLockType) bool {
+// RefOfTableAndLockType does deep equals between the two objects.
+func (cmp *Comparator) RefOfTableAndLockType(a, b *TableAndLockType) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsTableExpr(a.Table, b.Table) &&
+ return cmp.TableExpr(a.Table, b.Table) &&
a.Lock == b.Lock
}
-// EqualsRefOfRenameTablePair does deep equals between the two objects.
-func EqualsRefOfRenameTablePair(a, b *RenameTablePair) bool {
+// RefOfRenameTablePair does deep equals between the two objects.
+func (cmp *Comparator) RefOfRenameTablePair(a, b *RenameTablePair) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
- return EqualsTableName(a.FromTable, b.FromTable) &&
- EqualsTableName(a.ToTable, b.ToTable)
+ return cmp.TableName(a.FromTable, b.FromTable) &&
+ cmp.TableName(a.ToTable, b.ToTable)
}
-// EqualsRefOfDatabaseOption does deep equals between the two objects.
-func EqualsRefOfDatabaseOption(a, b *DatabaseOption) bool {
+// RefOfDatabaseOption does deep equals between the two objects.
+func (cmp *Comparator) RefOfDatabaseOption(a, b *DatabaseOption) bool {
if a == b {
return true
}
@@ -6963,3 +6932,7 @@ func EqualsRefOfDatabaseOption(a, b *DatabaseOption) bool {
a.Value == b.Value &&
a.Type == b.Type
}
+
+type Comparator struct {
+ RefOfColName_ func(a, b *ColName) bool
+}
diff --git a/go/vt/sqlparser/ast_format.go b/go/vt/sqlparser/ast_format.go
index e94a1b24ab4..d8bb1146eb7 100644
--- a/go/vt/sqlparser/ast_format.go
+++ b/go/vt/sqlparser/ast_format.go
@@ -180,22 +180,6 @@ func (node *Set) Format(buf *TrackedBuffer) {
buf.astPrintf(node, "set %v%v", node.Comments, node.Exprs)
}
-// Format formats the node.
-func (node *SetTransaction) Format(buf *TrackedBuffer) {
- if node.Scope == NoScope {
- buf.astPrintf(node, "set %vtransaction ", node.Comments)
- } else {
- buf.astPrintf(node, "set %v%s transaction ", node.Comments, node.Scope.ToString())
- }
-
- for i, char := range node.Characteristics {
- if i > 0 {
- buf.literal(", ")
- }
- buf.astPrintf(node, "%v", char)
- }
-}
-
// Format formats the node.
func (node *DropDatabase) Format(buf *TrackedBuffer) {
exists := ""
@@ -326,6 +310,11 @@ func (node *ShowThrottledApps) Format(buf *TrackedBuffer) {
buf.astPrintf(node, "show vitess_throttled_apps")
}
+// Format formats the node.
+func (node *ShowThrottlerStatus) Format(buf *TrackedBuffer) {
+ buf.astPrintf(node, "show vitess_throttler status")
+}
+
// Format formats the node.
func (node *OptLike) Format(buf *TrackedBuffer) {
buf.astPrintf(node, "like %v", node.LikeTable)
@@ -678,7 +667,7 @@ func (ts *TableSpec) Format(buf *TrackedBuffer) {
// Format formats the node.
func (col *ColumnDefinition) Format(buf *TrackedBuffer) {
- buf.astPrintf(col, "%v %v", col.Name, &col.Type)
+ buf.astPrintf(col, "%v %v", col.Name, col.Type)
}
// Format returns a canonical string representation of the type and all relevant options
@@ -775,22 +764,22 @@ func (ct *ColumnType) Format(buf *TrackedBuffer) {
if ct.Options.SecondaryEngineAttribute != nil {
buf.astPrintf(ct, " %s %v", keywordStrings[SECONDARY_ENGINE_ATTRIBUTE], ct.Options.SecondaryEngineAttribute)
}
- if ct.Options.KeyOpt == colKeyPrimary {
+ if ct.Options.KeyOpt == ColKeyPrimary {
buf.astPrintf(ct, " %s %s", keywordStrings[PRIMARY], keywordStrings[KEY])
}
- if ct.Options.KeyOpt == colKeyUnique {
+ if ct.Options.KeyOpt == ColKeyUnique {
buf.astPrintf(ct, " %s", keywordStrings[UNIQUE])
}
- if ct.Options.KeyOpt == colKeyUniqueKey {
+ if ct.Options.KeyOpt == ColKeyUniqueKey {
buf.astPrintf(ct, " %s %s", keywordStrings[UNIQUE], keywordStrings[KEY])
}
- if ct.Options.KeyOpt == colKeySpatialKey {
+ if ct.Options.KeyOpt == ColKeySpatialKey {
buf.astPrintf(ct, " %s %s", keywordStrings[SPATIAL], keywordStrings[KEY])
}
- if ct.Options.KeyOpt == colKeyFulltextKey {
+ if ct.Options.KeyOpt == ColKeyFulltextKey {
buf.astPrintf(ct, " %s %s", keywordStrings[FULLTEXT], keywordStrings[KEY])
}
- if ct.Options.KeyOpt == colKey {
+ if ct.Options.KeyOpt == ColKey {
buf.astPrintf(ct, " %s", keywordStrings[KEY])
}
if ct.Options.Reference != nil {
@@ -826,7 +815,7 @@ func (idx *IndexDefinition) Format(buf *TrackedBuffer) {
for _, opt := range idx.Options {
buf.astPrintf(idx, " %s", opt.Name)
if opt.String != "" {
- buf.astPrintf(idx, " %s", opt.String)
+ buf.astPrintf(idx, " %#s", opt.String)
} else if opt.Value != nil {
buf.astPrintf(idx, " %v", opt.Value)
}
@@ -974,7 +963,19 @@ func (node *Commit) Format(buf *TrackedBuffer) {
// Format formats the node.
func (node *Begin) Format(buf *TrackedBuffer) {
- buf.literal("begin")
+ if node.TxAccessModes == nil {
+ buf.literal("begin")
+ return
+ }
+ buf.literal("start transaction")
+ for idx, accessMode := range node.TxAccessModes {
+ if idx == 0 {
+ buf.astPrintf(node, " %s", accessMode.ToString())
+ continue
+ }
+ buf.astPrintf(node, ", %s", accessMode.ToString())
+ }
+
}
// Format formats the node.
@@ -1010,6 +1011,11 @@ func (node *ExplainStmt) Format(buf *TrackedBuffer) {
buf.astPrintf(node, "explain %v%s%v", node.Comments, format, node.Statement)
}
+// Format formats the node.
+func (node *VExplainStmt) Format(buf *TrackedBuffer) {
+ buf.astPrintf(node, "vexplain %v%s %v", node.Comments, node.Type.ToString(), node.Statement)
+}
+
// Format formats the node.
func (node *ExplainTab) Format(buf *TrackedBuffer) {
buf.astPrintf(node, "explain %v", node.Table)
@@ -1767,7 +1773,7 @@ func (node *ConvertExpr) Format(buf *TrackedBuffer) {
// Format formats the node.
func (node *ConvertUsingExpr) Format(buf *TrackedBuffer) {
- buf.astPrintf(node, "convert(%v using %s)", node.Expr, node.Type)
+ buf.astPrintf(node, "convert(%v using %#s)", node.Expr, node.Type)
}
// Format formats the node.
@@ -1941,32 +1947,6 @@ func (node IdentifierCS) Format(buf *TrackedBuffer) {
formatID(buf, node.v, NoAt)
}
-// Format formats the node.
-func (node IsolationLevel) Format(buf *TrackedBuffer) {
- buf.literal("isolation level ")
- switch node {
- case ReadUncommitted:
- buf.literal(ReadUncommittedStr)
- case ReadCommitted:
- buf.literal(ReadCommittedStr)
- case RepeatableRead:
- buf.literal(RepeatableReadStr)
- case Serializable:
- buf.literal(SerializableStr)
- default:
- buf.literal("Unknown Isolation level value")
- }
-}
-
-// Format formats the node.
-func (node AccessMode) Format(buf *TrackedBuffer) {
- if node == ReadOnly {
- buf.literal(TxReadOnly)
- } else {
- buf.literal(TxReadWrite)
- }
-}
-
// Format formats the node.
func (node *Load) Format(buf *TrackedBuffer) {
buf.literal("AST node missing for Load type")
@@ -2220,7 +2200,7 @@ func (node *AddColumns) Format(buf *TrackedBuffer) {
// Format formats the node.
func (node AlgorithmValue) Format(buf *TrackedBuffer) {
- buf.astPrintf(node, "algorithm = %s", string(node))
+ buf.astPrintf(node, "algorithm = %#s", string(node))
}
// Format formats the node
@@ -2497,7 +2477,7 @@ func (node *JSONObjectExpr) Format(buf *TrackedBuffer) {
}
// Format formats the node.
-func (node JSONObjectParam) Format(buf *TrackedBuffer) {
+func (node *JSONObjectParam) Format(buf *TrackedBuffer) {
buf.astPrintf(node, "%v, %v", node.Key, node.Value)
}
@@ -2760,9 +2740,17 @@ func (node *Variable) Format(buf *TrackedBuffer) {
case VariableScope:
buf.literal("@")
case SessionScope:
+ if node.Name.EqualString(TransactionIsolationStr) || node.Name.EqualString(TransactionReadOnlyStr) {
+ // @@ without session have `next transaction` scope for these system variables.
+ // so if they are in session scope it has to be printed explicitly.
+ buf.astPrintf(node, "@@%s.", node.Scope.ToString())
+ break
+ }
buf.literal("@@")
case GlobalScope, PersistSysScope, PersistOnlySysScope:
buf.astPrintf(node, "@@%s.", node.Scope.ToString())
+ case NextTxScope:
+ buf.literal("@@")
}
buf.astPrintf(node, "%v", node.Name)
}
diff --git a/go/vt/sqlparser/ast_format_fast.go b/go/vt/sqlparser/ast_format_fast.go
index f8483506e9f..d74f34febd7 100644
--- a/go/vt/sqlparser/ast_format_fast.go
+++ b/go/vt/sqlparser/ast_format_fast.go
@@ -273,27 +273,6 @@ func (node *Set) formatFast(buf *TrackedBuffer) {
node.Exprs.formatFast(buf)
}
-// formatFast formats the node.
-func (node *SetTransaction) formatFast(buf *TrackedBuffer) {
- if node.Scope == NoScope {
- buf.WriteString("set ")
- node.Comments.formatFast(buf)
- buf.WriteString("transaction ")
- } else {
- buf.WriteString("set ")
- node.Comments.formatFast(buf)
- buf.WriteString(node.Scope.ToString())
- buf.WriteString(" transaction ")
- }
-
- for i, char := range node.Characteristics {
- if i > 0 {
- buf.WriteString(", ")
- }
- char.formatFast(buf)
- }
-}
-
// formatFast formats the node.
func (node *DropDatabase) formatFast(buf *TrackedBuffer) {
exists := ""
@@ -466,6 +445,11 @@ func (node *ShowThrottledApps) formatFast(buf *TrackedBuffer) {
buf.WriteString("show vitess_throttled_apps")
}
+// formatFast formats the node.
+func (node *ShowThrottlerStatus) formatFast(buf *TrackedBuffer) {
+ buf.WriteString("show vitess_throttler status")
+}
+
// formatFast formats the node.
func (node *OptLike) formatFast(buf *TrackedBuffer) {
buf.WriteString("like ")
@@ -895,7 +879,7 @@ func (ts *TableSpec) formatFast(buf *TrackedBuffer) {
func (col *ColumnDefinition) formatFast(buf *TrackedBuffer) {
col.Name.formatFast(buf)
buf.WriteByte(' ')
- (&col.Type).formatFast(buf)
+ col.Type.formatFast(buf)
}
// formatFast returns a canonical string representation of the type and all relevant options
@@ -1047,35 +1031,35 @@ func (ct *ColumnType) formatFast(buf *TrackedBuffer) {
buf.WriteByte(' ')
ct.Options.SecondaryEngineAttribute.formatFast(buf)
}
- if ct.Options.KeyOpt == colKeyPrimary {
+ if ct.Options.KeyOpt == ColKeyPrimary {
buf.WriteByte(' ')
buf.WriteString(keywordStrings[PRIMARY])
buf.WriteByte(' ')
buf.WriteString(keywordStrings[KEY])
}
- if ct.Options.KeyOpt == colKeyUnique {
+ if ct.Options.KeyOpt == ColKeyUnique {
buf.WriteByte(' ')
buf.WriteString(keywordStrings[UNIQUE])
}
- if ct.Options.KeyOpt == colKeyUniqueKey {
+ if ct.Options.KeyOpt == ColKeyUniqueKey {
buf.WriteByte(' ')
buf.WriteString(keywordStrings[UNIQUE])
buf.WriteByte(' ')
buf.WriteString(keywordStrings[KEY])
}
- if ct.Options.KeyOpt == colKeySpatialKey {
+ if ct.Options.KeyOpt == ColKeySpatialKey {
buf.WriteByte(' ')
buf.WriteString(keywordStrings[SPATIAL])
buf.WriteByte(' ')
buf.WriteString(keywordStrings[KEY])
}
- if ct.Options.KeyOpt == colKeyFulltextKey {
+ if ct.Options.KeyOpt == ColKeyFulltextKey {
buf.WriteByte(' ')
buf.WriteString(keywordStrings[FULLTEXT])
buf.WriteByte(' ')
buf.WriteString(keywordStrings[KEY])
}
- if ct.Options.KeyOpt == colKey {
+ if ct.Options.KeyOpt == ColKey {
buf.WriteByte(' ')
buf.WriteString(keywordStrings[KEY])
}
@@ -1296,7 +1280,21 @@ func (node *Commit) formatFast(buf *TrackedBuffer) {
// formatFast formats the node.
func (node *Begin) formatFast(buf *TrackedBuffer) {
- buf.WriteString("begin")
+ if node.TxAccessModes == nil {
+ buf.WriteString("begin")
+ return
+ }
+ buf.WriteString("start transaction")
+ for idx, accessMode := range node.TxAccessModes {
+ if idx == 0 {
+ buf.WriteByte(' ')
+ buf.WriteString(accessMode.ToString())
+ continue
+ }
+ buf.WriteString(", ")
+ buf.WriteString(accessMode.ToString())
+ }
+
}
// formatFast formats the node.
@@ -1338,6 +1336,15 @@ func (node *ExplainStmt) formatFast(buf *TrackedBuffer) {
node.Statement.formatFast(buf)
}
+// formatFast formats the node.
+func (node *VExplainStmt) formatFast(buf *TrackedBuffer) {
+ buf.WriteString("vexplain ")
+ node.Comments.formatFast(buf)
+ buf.WriteString(node.Type.ToString())
+ buf.WriteByte(' ')
+ node.Statement.formatFast(buf)
+}
+
// formatFast formats the node.
func (node *ExplainTab) formatFast(buf *TrackedBuffer) {
buf.WriteString("explain ")
@@ -2567,32 +2574,6 @@ func (node IdentifierCS) formatFast(buf *TrackedBuffer) {
formatID(buf, node.v, NoAt)
}
-// formatFast formats the node.
-func (node IsolationLevel) formatFast(buf *TrackedBuffer) {
- buf.WriteString("isolation level ")
- switch node {
- case ReadUncommitted:
- buf.WriteString(ReadUncommittedStr)
- case ReadCommitted:
- buf.WriteString(ReadCommittedStr)
- case RepeatableRead:
- buf.WriteString(RepeatableReadStr)
- case Serializable:
- buf.WriteString(SerializableStr)
- default:
- buf.WriteString("Unknown Isolation level value")
- }
-}
-
-// formatFast formats the node.
-func (node AccessMode) formatFast(buf *TrackedBuffer) {
- if node == ReadOnly {
- buf.WriteString(TxReadOnly)
- } else {
- buf.WriteString(TxReadWrite)
- }
-}
-
// formatFast formats the node.
func (node *Load) formatFast(buf *TrackedBuffer) {
buf.WriteString("AST node missing for Load type")
@@ -3266,7 +3247,7 @@ func (node *JSONObjectExpr) formatFast(buf *TrackedBuffer) {
}
// formatFast formats the node.
-func (node JSONObjectParam) formatFast(buf *TrackedBuffer) {
+func (node *JSONObjectParam) formatFast(buf *TrackedBuffer) {
node.Key.formatFast(buf)
buf.WriteString(", ")
node.Value.formatFast(buf)
@@ -3616,11 +3597,21 @@ func (node *Variable) formatFast(buf *TrackedBuffer) {
case VariableScope:
buf.WriteString("@")
case SessionScope:
+ if node.Name.EqualString(TransactionIsolationStr) || node.Name.EqualString(TransactionReadOnlyStr) {
+ // @@ without session have `next transaction` scope for these system variables.
+ // so if they are in session scope it has to be printed explicitly.
+ buf.WriteString("@@")
+ buf.WriteString(node.Scope.ToString())
+ buf.WriteByte('.')
+ break
+ }
buf.WriteString("@@")
case GlobalScope, PersistSysScope, PersistOnlySysScope:
buf.WriteString("@@")
buf.WriteString(node.Scope.ToString())
buf.WriteByte('.')
+ case NextTxScope:
+ buf.WriteString("@@")
}
node.Name.formatFast(buf)
}
diff --git a/go/vt/sqlparser/ast_funcs.go b/go/vt/sqlparser/ast_funcs.go
index 8ece2749788..1278b0379e7 100644
--- a/go/vt/sqlparser/ast_funcs.go
+++ b/go/vt/sqlparser/ast_funcs.go
@@ -34,8 +34,13 @@ import (
querypb "vitess.io/vitess/go/vt/proto/query"
)
-// Walk calls visit on every node.
-// If visit returns true, the underlying nodes
+// Generate all the AST helpers using the tooling in `go/tools`
+
+//go:generate go run ../../tools/asthelpergen/main --in . --iface vitess.io/vitess/go/vt/sqlparser.SQLNode --clone_exclude "*ColName" --equals_custom "*ColName"
+//go:generate go run ../../tools/astfmtgen vitess.io/vitess/go/vt/sqlparser/...
+
+// Walk calls postVisit on every node.
+// If postVisit returns true, the underlying nodes
// are also visited. If it returns an error, walking
// is interrupted, and the error is returned.
func Walk(visit Visit, nodes ...SQLNode) error {
@@ -49,7 +54,7 @@ func Walk(visit Visit, nodes ...SQLNode) error {
}
// Visit defines the signature of a function that
-// can be used to visit all nodes of a parse tree.
+// can be used to postVisit all nodes of a parse tree.
// returning false on kontinue means that children will not be visited
// returning an error will abort the visitation and return the error
type Visit func(node SQLNode) (kontinue bool, err error)
@@ -101,13 +106,13 @@ type TableOption struct {
type ColumnKeyOption int
const (
- colKeyNone ColumnKeyOption = iota
- colKeyPrimary
- colKeySpatialKey
- colKeyFulltextKey
- colKeyUnique
- colKeyUniqueKey
- colKey
+ ColKeyNone ColumnKeyOption = iota
+ ColKeyPrimary
+ ColKeySpatialKey
+ ColKeyFulltextKey
+ ColKeyUnique
+ ColKeyUniqueKey
+ ColKey
)
// ReferenceAction indicates the action takes by a referential constraint e.g.
@@ -210,97 +215,101 @@ func (ct *ColumnType) DescribeType() string {
// SQLType returns the sqltypes type code for the given column
func (ct *ColumnType) SQLType() querypb.Type {
- switch strings.ToLower(ct.Type) {
- case keywordStrings[TINYINT]:
- if ct.Unsigned {
+ return SQLTypeToQueryType(ct.Type, ct.Unsigned)
+}
+
+func SQLTypeToQueryType(typeName string, unsigned bool) querypb.Type {
+ switch keywordVals[strings.ToLower(typeName)] {
+ case TINYINT:
+ if unsigned {
return sqltypes.Uint8
}
return sqltypes.Int8
- case keywordStrings[SMALLINT]:
- if ct.Unsigned {
+ case SMALLINT:
+ if unsigned {
return sqltypes.Uint16
}
return sqltypes.Int16
- case keywordStrings[MEDIUMINT]:
- if ct.Unsigned {
+ case MEDIUMINT:
+ if unsigned {
return sqltypes.Uint24
}
return sqltypes.Int24
- case keywordStrings[INT], keywordStrings[INTEGER]:
- if ct.Unsigned {
+ case INT, INTEGER:
+ if unsigned {
return sqltypes.Uint32
}
return sqltypes.Int32
- case keywordStrings[BIGINT]:
- if ct.Unsigned {
+ case BIGINT:
+ if unsigned {
return sqltypes.Uint64
}
return sqltypes.Int64
- case keywordStrings[BOOL], keywordStrings[BOOLEAN]:
+ case BOOL, BOOLEAN:
return sqltypes.Uint8
- case keywordStrings[TEXT]:
+ case TEXT:
return sqltypes.Text
- case keywordStrings[TINYTEXT]:
+ case TINYTEXT:
return sqltypes.Text
- case keywordStrings[MEDIUMTEXT]:
+ case MEDIUMTEXT:
return sqltypes.Text
- case keywordStrings[LONGTEXT]:
+ case LONGTEXT:
return sqltypes.Text
- case keywordStrings[BLOB]:
+ case BLOB:
return sqltypes.Blob
- case keywordStrings[TINYBLOB]:
+ case TINYBLOB:
return sqltypes.Blob
- case keywordStrings[MEDIUMBLOB]:
+ case MEDIUMBLOB:
return sqltypes.Blob
- case keywordStrings[LONGBLOB]:
+ case LONGBLOB:
return sqltypes.Blob
- case keywordStrings[CHAR]:
+ case CHAR:
return sqltypes.Char
- case keywordStrings[VARCHAR]:
+ case VARCHAR:
return sqltypes.VarChar
- case keywordStrings[BINARY]:
+ case BINARY:
return sqltypes.Binary
- case keywordStrings[VARBINARY]:
+ case VARBINARY:
return sqltypes.VarBinary
- case keywordStrings[DATE]:
+ case DATE:
return sqltypes.Date
- case keywordStrings[TIME]:
+ case TIME:
return sqltypes.Time
- case keywordStrings[DATETIME]:
+ case DATETIME:
return sqltypes.Datetime
- case keywordStrings[TIMESTAMP]:
+ case TIMESTAMP:
return sqltypes.Timestamp
- case keywordStrings[YEAR]:
+ case YEAR:
return sqltypes.Year
- case keywordStrings[FLOAT_TYPE]:
+ case FLOAT_TYPE, FLOAT4_TYPE:
return sqltypes.Float32
- case keywordStrings[DOUBLE]:
+ case DOUBLE, FLOAT8_TYPE:
return sqltypes.Float64
- case keywordStrings[DECIMAL]:
+ case DECIMAL, DECIMAL_TYPE:
return sqltypes.Decimal
- case keywordStrings[BIT]:
+ case BIT:
return sqltypes.Bit
- case keywordStrings[ENUM]:
+ case ENUM:
return sqltypes.Enum
- case keywordStrings[SET]:
+ case SET:
return sqltypes.Set
- case keywordStrings[JSON]:
+ case JSON:
return sqltypes.TypeJSON
- case keywordStrings[GEOMETRY]:
+ case GEOMETRY:
return sqltypes.Geometry
- case keywordStrings[POINT]:
+ case POINT:
return sqltypes.Geometry
- case keywordStrings[LINESTRING]:
+ case LINESTRING:
return sqltypes.Geometry
- case keywordStrings[POLYGON]:
+ case POLYGON:
return sqltypes.Geometry
- case keywordStrings[GEOMETRYCOLLECTION]:
+ case GEOMETRYCOLLECTION:
return sqltypes.Geometry
- case keywordStrings[MULTIPOINT]:
+ case MULTIPOINT:
return sqltypes.Geometry
- case keywordStrings[MULTILINESTRING]:
+ case MULTILINESTRING:
return sqltypes.Geometry
- case keywordStrings[MULTIPOLYGON]:
+ case MULTIPOLYGON:
return sqltypes.Geometry
}
return sqltypes.Null
@@ -434,7 +443,7 @@ func NewWhere(typ WhereType, expr Expr) *Where {
// and replaces it with to. If from matches root,
// then to is returned.
func ReplaceExpr(root, from, to Expr) Expr {
- tmp := Rewrite(root, replaceExpr(from, to), nil)
+ tmp := SafeRewrite(root, stopWalking, replaceExpr(from, to))
expr, success := tmp.(Expr)
if !success {
@@ -445,16 +454,20 @@ func ReplaceExpr(root, from, to Expr) Expr {
return expr
}
+func stopWalking(e SQLNode, _ SQLNode) bool {
+ switch e.(type) {
+ case *ExistsExpr, *Literal, *Subquery, *ValuesFuncExpr, *Default:
+ return false
+ default:
+ return true
+ }
+}
+
func replaceExpr(from, to Expr) func(cursor *Cursor) bool {
return func(cursor *Cursor) bool {
if cursor.Node() == from {
cursor.Replace(to)
}
- switch cursor.Node().(type) {
- case *ExistsExpr, *Literal, *Subquery, *ValuesFuncExpr, *Default:
- return false
- }
-
return true
}
}
@@ -687,6 +700,14 @@ func NewSelect(comments Comments, exprs SelectExprs, selectOptions []string, int
}
}
+// UpdateSetExprsScope updates the scope of the variables in SetExprs.
+func UpdateSetExprsScope(setExprs SetExprs, scope Scope) SetExprs {
+ for _, setExpr := range setExprs {
+ setExpr.Var.Scope = scope
+ }
+ return setExprs
+}
+
// NewSetVariable returns a variable that can be used with SET.
func NewSetVariable(str string, scope Scope) *Variable {
return &Variable{Name: createIdentifierCI(str), Scope: scope}
@@ -720,6 +741,8 @@ func NewVariableExpression(str string, at AtCount) *Variable {
case strings.HasPrefix(l, "vitess_metadata."):
v.Name = createIdentifierCI(str[16:])
v.Scope = VitessMetadataScope
+ case strings.HasSuffix(l, TransactionIsolationStr) || strings.HasSuffix(l, TransactionReadOnlyStr):
+ v.Scope = NextTxScope
default:
v.Scope = SessionScope
}
@@ -987,16 +1010,14 @@ func (node *Select) AddHaving(expr Expr) {
}
return
}
- node.Having.Expr = &AndExpr{
- Left: node.Having.Expr,
- Right: expr,
- }
+ exprs := SplitAndExpression(nil, node.Having.Expr)
+ node.Having.Expr = AndExpressions(append(exprs, expr)...)
}
// AddGroupBy adds a grouping expression, unless it's already present
func (node *Select) AddGroupBy(expr Expr) {
for _, gb := range node.GroupBy {
- if EqualsExpr(gb, expr) {
+ if Equals.Expr(gb, expr) {
// group by columns are sets - duplicates don't add anything, so we can just skip these
return
}
@@ -1145,7 +1166,7 @@ func (scope Scope) ToString() string {
return VitessMetadataStr
case VariableScope:
return VariableStr
- case NoScope:
+ case NoScope, NextTxScope:
return ""
default:
return "Unknown Scope"
@@ -1167,6 +1188,16 @@ func (lock Lock) ToString() string {
return NoLockStr
case ForUpdateLock:
return ForUpdateStr
+ case ForUpdateLockNoWait:
+ return ForUpdateNoWaitStr
+ case ForUpdateLockSkipLocked:
+ return ForUpdateSkipLockedStr
+ case ForShareLock:
+ return ForShareStr
+ case ForShareLockNoWait:
+ return ForShareNoWaitStr
+ case ForShareLockSkipLocked:
+ return ForShareSkipLockedStr
case ShareModeLock:
return ShareModeStr
default:
@@ -1660,6 +1691,20 @@ func (ty ExplainType) ToString() string {
}
}
+// ToString returns the type as a string
+func (ty VExplainType) ToString() string {
+ switch ty {
+ case PlanVExplainType:
+ return PlanStr
+ case QueriesVExplainType:
+ return QueriesStr
+ case AllVExplainType:
+ return AllVExplainStr
+ default:
+ return "Unknown VExplainType"
+ }
+}
+
// ToString returns the type as a string
func (ty IntervalTypes) ToString() string {
switch ty {
@@ -1802,7 +1847,7 @@ func (ty ShowCommandType) ToString() string {
case StatusSession:
return StatusSessionStr
case Table:
- return TableStr
+ return TablesStr
case TableStatus:
return TableStatusStr
case Trigger:
@@ -1885,6 +1930,20 @@ func (columnFormat ColumnFormat) ToString() string {
}
}
+// ToString returns the TxAccessMode type as a string
+func (ty TxAccessMode) ToString() string {
+ switch ty {
+ case WithConsistentSnapshot:
+ return WithConsistentSnapshotStr
+ case ReadWrite:
+ return ReadWriteStr
+ case ReadOnly:
+ return ReadOnlyStr
+ default:
+ return "Unknown Transaction Access Mode"
+ }
+}
+
// CompliantName is used to get the name of the bind variable to use for this column name
func (node *ColName) CompliantName() string {
if !node.Qualifier.IsEmpty() {
@@ -2098,24 +2157,74 @@ func defaultRequiresParens(ct *ColumnType) bool {
}
// RemoveKeyspaceFromColName removes the Qualifier.Qualifier on all ColNames in the expression tree
-func RemoveKeyspaceFromColName(expr Expr) Expr {
- return RemoveKeyspace(expr).(Expr) // This hard cast is safe because we do not change the type the input
+func RemoveKeyspaceFromColName(expr Expr) {
+ RemoveKeyspace(expr)
}
// RemoveKeyspace removes the Qualifier.Qualifier on all ColNames in the AST
-func RemoveKeyspace(in SQLNode) SQLNode {
- return Rewrite(in, nil, func(cursor *Cursor) bool {
- switch col := cursor.Node().(type) {
+func RemoveKeyspace(in SQLNode) {
+ // Walk will only return an error if we return an error from the inner func. safe to ignore here
+ _ = Walk(func(node SQLNode) (kontinue bool, err error) {
+ switch col := node.(type) {
case *ColName:
if !col.Qualifier.Qualifier.IsEmpty() {
col.Qualifier.Qualifier = NewIdentifierCS("")
}
}
- return true
- })
+ return true, nil
+ }, in)
}
func convertStringToInt(integer string) int {
val, _ := strconv.Atoi(integer)
return val
}
+
+// SplitAndExpression breaks up the Expr into AND-separated conditions
+// and appends them to filters. Outer parenthesis are removed. Precedence
+// should be taken into account if expressions are recombined.
+func SplitAndExpression(filters []Expr, node Expr) []Expr {
+ if node == nil {
+ return filters
+ }
+ switch node := node.(type) {
+ case *AndExpr:
+ filters = SplitAndExpression(filters, node.Left)
+ return SplitAndExpression(filters, node.Right)
+ }
+ return append(filters, node)
+}
+
+// AndExpressions ands together two or more expressions, minimising the expr when possible
+func AndExpressions(exprs ...Expr) Expr {
+ switch len(exprs) {
+ case 0:
+ return nil
+ case 1:
+ return exprs[0]
+ default:
+ result := (Expr)(nil)
+ outer:
+ // we'll loop and remove any duplicates
+ for i, expr := range exprs {
+ if expr == nil {
+ continue
+ }
+ if result == nil {
+ result = expr
+ continue outer
+ }
+
+ for j := 0; j < i; j++ {
+ if Equals.Expr(expr, exprs[j]) {
+ continue outer
+ }
+ }
+ result = &AndExpr{Left: result, Right: expr}
+ }
+ return result
+ }
+}
+
+// Equals is the default Comparator for AST expressions.
+var Equals = &Comparator{}
diff --git a/go/vt/sqlparser/ast_funcs_test.go b/go/vt/sqlparser/ast_funcs_test.go
index 7ecb7cf72a4..b6a79da45ab 100644
--- a/go/vt/sqlparser/ast_funcs_test.go
+++ b/go/vt/sqlparser/ast_funcs_test.go
@@ -22,6 +22,9 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
+
+ "vitess.io/vitess/go/sqltypes"
+ querypb "vitess.io/vitess/go/vt/proto/query"
)
func TestAddQueryHint(t *testing.T) {
@@ -81,3 +84,53 @@ func TestAddQueryHint(t *testing.T) {
})
}
}
+
+func TestSQLTypeToQueryType(t *testing.T) {
+ tcs := []struct {
+ input string
+ unsigned bool
+ output querypb.Type
+ }{
+ {
+ input: "tinyint",
+ unsigned: true,
+ output: sqltypes.Uint8,
+ },
+ {
+ input: "tinyint",
+ unsigned: false,
+ output: sqltypes.Int8,
+ },
+ {
+ input: "double",
+ output: sqltypes.Float64,
+ },
+ {
+ input: "float8",
+ output: sqltypes.Float64,
+ },
+ {
+ input: "float",
+ output: sqltypes.Float32,
+ },
+ {
+ input: "float4",
+ output: sqltypes.Float32,
+ },
+ {
+ input: "decimal",
+ output: sqltypes.Decimal,
+ },
+ }
+
+ for _, tc := range tcs {
+ name := tc.input
+ if tc.unsigned {
+ name += " unsigned"
+ }
+ t.Run(name, func(t *testing.T) {
+ got := SQLTypeToQueryType(tc.input, tc.unsigned)
+ require.Equal(t, tc.output, got)
+ })
+ }
+}
diff --git a/go/vt/sqlparser/ast_rewrite.go b/go/vt/sqlparser/ast_rewrite.go
index 9914b89afc2..ff4384992cc 100644
--- a/go/vt/sqlparser/ast_rewrite.go
+++ b/go/vt/sqlparser/ast_rewrite.go
@@ -1,5 +1,5 @@
/*
-Copyright 2021 The Vitess Authors.
+Copyright 2023 The Vitess Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -22,8 +22,6 @@ func (a *application) rewriteSQLNode(parent SQLNode, node SQLNode, replacer repl
return true
}
switch node := node.(type) {
- case AccessMode:
- return a.rewriteAccessMode(parent, node, replacer)
case *AddColumns:
return a.rewriteRefOfAddColumns(parent, node, replacer)
case *AddConstraintDefinition:
@@ -210,8 +208,6 @@ func (a *application) rewriteSQLNode(parent SQLNode, node SQLNode, replacer repl
return a.rewriteRefOfIntroducerExpr(parent, node, replacer)
case *IsExpr:
return a.rewriteRefOfIsExpr(parent, node, replacer)
- case IsolationLevel:
- return a.rewriteIsolationLevel(parent, node, replacer)
case *JSONArrayExpr:
return a.rewriteRefOfJSONArrayExpr(parent, node, replacer)
case *JSONAttributesExpr:
@@ -226,8 +222,8 @@ func (a *application) rewriteSQLNode(parent SQLNode, node SQLNode, replacer repl
return a.rewriteRefOfJSONKeysExpr(parent, node, replacer)
case *JSONObjectExpr:
return a.rewriteRefOfJSONObjectExpr(parent, node, replacer)
- case JSONObjectParam:
- return a.rewriteJSONObjectParam(parent, node, replacer)
+ case *JSONObjectParam:
+ return a.rewriteRefOfJSONObjectParam(parent, node, replacer)
case *JSONOverlapsExpr:
return a.rewriteRefOfJSONOverlapsExpr(parent, node, replacer)
case *JSONPrettyExpr:
@@ -398,8 +394,6 @@ func (a *application) rewriteSQLNode(parent SQLNode, node SQLNode, replacer repl
return a.rewriteRefOfSetExpr(parent, node, replacer)
case SetExprs:
return a.rewriteSetExprs(parent, node, replacer)
- case *SetTransaction:
- return a.rewriteRefOfSetTransaction(parent, node, replacer)
case *Show:
return a.rewriteRefOfShow(parent, node, replacer)
case *ShowBasic:
@@ -414,6 +408,8 @@ func (a *application) rewriteSQLNode(parent SQLNode, node SQLNode, replacer repl
return a.rewriteRefOfShowOther(parent, node, replacer)
case *ShowThrottledApps:
return a.rewriteRefOfShowThrottledApps(parent, node, replacer)
+ case *ShowThrottlerStatus:
+ return a.rewriteRefOfShowThrottlerStatus(parent, node, replacer)
case *StarExpr:
return a.rewriteRefOfStarExpr(parent, node, replacer)
case *Std:
@@ -474,6 +470,8 @@ func (a *application) rewriteSQLNode(parent SQLNode, node SQLNode, replacer repl
return a.rewriteRefOfUpdateXMLExpr(parent, node, replacer)
case *Use:
return a.rewriteRefOfUse(parent, node, replacer)
+ case *VExplainStmt:
+ return a.rewriteRefOfVExplainStmt(parent, node, replacer)
case *VStream:
return a.rewriteRefOfVStream(parent, node, replacer)
case ValTuple:
@@ -1558,6 +1556,11 @@ func (a *application) rewriteRefOfColumnDefinition(parent SQLNode, node *ColumnD
}) {
return false
}
+ if !a.rewriteRefOfColumnType(node, node.Type, func(newNode, parent SQLNode) {
+ parent.(*ColumnDefinition).Type = newNode.(*ColumnType)
+ }) {
+ return false
+ }
if a.post != nil {
a.cur.replacer = replacer
a.cur.parent = parent
@@ -3647,7 +3650,10 @@ func (a *application) rewriteRefOfJSONObjectExpr(parent SQLNode, node *JSONObjec
}
return true
}
-func (a *application) rewriteJSONObjectParam(parent SQLNode, node JSONObjectParam, replacer replacerFunc) bool {
+func (a *application) rewriteRefOfJSONObjectParam(parent SQLNode, node *JSONObjectParam, replacer replacerFunc) bool {
+ if node == nil {
+ return true
+ }
if a.pre != nil {
a.cur.replacer = replacer
a.cur.parent = parent
@@ -3657,12 +3663,12 @@ func (a *application) rewriteJSONObjectParam(parent SQLNode, node JSONObjectPara
}
}
if !a.rewriteExpr(node, node.Key, func(newNode, parent SQLNode) {
- panic("[BUG] tried to replace 'Key' on 'JSONObjectParam'")
+ parent.(*JSONObjectParam).Key = newNode.(Expr)
}) {
return false
}
if !a.rewriteExpr(node, node.Value, func(newNode, parent SQLNode) {
- panic("[BUG] tried to replace 'Value' on 'JSONObjectParam'")
+ parent.(*JSONObjectParam).Value = newNode.(Expr)
}) {
return false
}
@@ -6357,42 +6363,6 @@ func (a *application) rewriteSetExprs(parent SQLNode, node SetExprs, replacer re
}
return true
}
-func (a *application) rewriteRefOfSetTransaction(parent SQLNode, node *SetTransaction, replacer replacerFunc) bool {
- if node == nil {
- return true
- }
- if a.pre != nil {
- a.cur.replacer = replacer
- a.cur.parent = parent
- a.cur.node = node
- if !a.pre(&a.cur) {
- return true
- }
- }
- if !a.rewriteRefOfParsedComments(node, node.Comments, func(newNode, parent SQLNode) {
- parent.(*SetTransaction).Comments = newNode.(*ParsedComments)
- }) {
- return false
- }
- for x, el := range node.Characteristics {
- if !a.rewriteCharacteristic(node, el, func(idx int) replacerFunc {
- return func(newNode, parent SQLNode) {
- parent.(*SetTransaction).Characteristics[idx] = newNode.(Characteristic)
- }
- }(x)) {
- return false
- }
- }
- if a.post != nil {
- a.cur.replacer = replacer
- a.cur.parent = parent
- a.cur.node = node
- if !a.post(&a.cur) {
- return false
- }
- }
- return true
-}
func (a *application) rewriteRefOfShow(parent SQLNode, node *Show, replacer replacerFunc) bool {
if node == nil {
return true
@@ -6586,6 +6556,30 @@ func (a *application) rewriteRefOfShowThrottledApps(parent SQLNode, node *ShowTh
}
return true
}
+func (a *application) rewriteRefOfShowThrottlerStatus(parent SQLNode, node *ShowThrottlerStatus, replacer replacerFunc) bool {
+ if node == nil {
+ return true
+ }
+ if a.pre != nil {
+ a.cur.replacer = replacer
+ a.cur.parent = parent
+ a.cur.node = node
+ if !a.pre(&a.cur) {
+ return true
+ }
+ }
+ if a.post != nil {
+ if a.pre == nil {
+ a.cur.replacer = replacer
+ a.cur.parent = parent
+ a.cur.node = node
+ }
+ if !a.post(&a.cur) {
+ return false
+ }
+ }
+ return true
+}
func (a *application) rewriteRefOfStarExpr(parent SQLNode, node *StarExpr, replacer replacerFunc) bool {
if node == nil {
return true
@@ -7592,6 +7586,38 @@ func (a *application) rewriteRefOfUse(parent SQLNode, node *Use, replacer replac
}
return true
}
+func (a *application) rewriteRefOfVExplainStmt(parent SQLNode, node *VExplainStmt, replacer replacerFunc) bool {
+ if node == nil {
+ return true
+ }
+ if a.pre != nil {
+ a.cur.replacer = replacer
+ a.cur.parent = parent
+ a.cur.node = node
+ if !a.pre(&a.cur) {
+ return true
+ }
+ }
+ if !a.rewriteStatement(node, node.Statement, func(newNode, parent SQLNode) {
+ parent.(*VExplainStmt).Statement = newNode.(Statement)
+ }) {
+ return false
+ }
+ if !a.rewriteRefOfParsedComments(node, node.Comments, func(newNode, parent SQLNode) {
+ parent.(*VExplainStmt).Comments = newNode.(*ParsedComments)
+ }) {
+ return false
+ }
+ if a.post != nil {
+ a.cur.replacer = replacer
+ a.cur.parent = parent
+ a.cur.node = node
+ if !a.post(&a.cur) {
+ return false
+ }
+ }
+ return true
+}
func (a *application) rewriteRefOfVStream(parent SQLNode, node *VStream, replacer replacerFunc) bool {
if node == nil {
return true
@@ -8424,20 +8450,6 @@ func (a *application) rewriteCallable(parent SQLNode, node Callable, replacer re
return true
}
}
-func (a *application) rewriteCharacteristic(parent SQLNode, node Characteristic, replacer replacerFunc) bool {
- if node == nil {
- return true
- }
- switch node := node.(type) {
- case AccessMode:
- return a.rewriteAccessMode(parent, node, replacer)
- case IsolationLevel:
- return a.rewriteIsolationLevel(parent, node, replacer)
- default:
- // this should never happen
- return true
- }
-}
func (a *application) rewriteColTuple(parent SQLNode, node ColTuple, replacer replacerFunc) bool {
if node == nil {
return true
@@ -8875,14 +8887,14 @@ func (a *application) rewriteStatement(parent SQLNode, node Statement, replacer
return a.rewriteRefOfSelect(parent, node, replacer)
case *Set:
return a.rewriteRefOfSet(parent, node, replacer)
- case *SetTransaction:
- return a.rewriteRefOfSetTransaction(parent, node, replacer)
case *Show:
return a.rewriteRefOfShow(parent, node, replacer)
case *ShowMigrationLogs:
return a.rewriteRefOfShowMigrationLogs(parent, node, replacer)
case *ShowThrottledApps:
return a.rewriteRefOfShowThrottledApps(parent, node, replacer)
+ case *ShowThrottlerStatus:
+ return a.rewriteRefOfShowThrottlerStatus(parent, node, replacer)
case *Stream:
return a.rewriteRefOfStream(parent, node, replacer)
case *TruncateTable:
@@ -8895,6 +8907,8 @@ func (a *application) rewriteStatement(parent SQLNode, node Statement, replacer
return a.rewriteRefOfUpdate(parent, node, replacer)
case *Use:
return a.rewriteRefOfUse(parent, node, replacer)
+ case *VExplainStmt:
+ return a.rewriteRefOfVExplainStmt(parent, node, replacer)
case *VStream:
return a.rewriteRefOfVStream(parent, node, replacer)
default:
@@ -8920,27 +8934,6 @@ func (a *application) rewriteTableExpr(parent SQLNode, node TableExpr, replacer
return true
}
}
-func (a *application) rewriteAccessMode(parent SQLNode, node AccessMode, replacer replacerFunc) bool {
- if a.pre != nil {
- a.cur.replacer = replacer
- a.cur.parent = parent
- a.cur.node = node
- if !a.pre(&a.cur) {
- return true
- }
- }
- if a.post != nil {
- if a.pre == nil {
- a.cur.replacer = replacer
- a.cur.parent = parent
- a.cur.node = node
- }
- if !a.post(&a.cur) {
- return false
- }
- }
- return true
-}
func (a *application) rewriteAlgorithmValue(parent SQLNode, node AlgorithmValue, replacer replacerFunc) bool {
if a.pre != nil {
a.cur.replacer = replacer
@@ -9004,27 +8997,6 @@ func (a *application) rewriteBoolVal(parent SQLNode, node BoolVal, replacer repl
}
return true
}
-func (a *application) rewriteIsolationLevel(parent SQLNode, node IsolationLevel, replacer replacerFunc) bool {
- if a.pre != nil {
- a.cur.replacer = replacer
- a.cur.parent = parent
- a.cur.node = node
- if !a.pre(&a.cur) {
- return true
- }
- }
- if a.post != nil {
- if a.pre == nil {
- a.cur.replacer = replacer
- a.cur.parent = parent
- a.cur.node = node
- }
- if !a.post(&a.cur) {
- return false
- }
- }
- return true
-}
func (a *application) rewriteListArg(parent SQLNode, node ListArg, replacer replacerFunc) bool {
if a.pre != nil {
a.cur.replacer = replacer
@@ -9136,38 +9108,6 @@ func (a *application) rewriteRefOfIdentifierCS(parent SQLNode, node *IdentifierC
}
return true
}
-func (a *application) rewriteRefOfJSONObjectParam(parent SQLNode, node *JSONObjectParam, replacer replacerFunc) bool {
- if node == nil {
- return true
- }
- if a.pre != nil {
- a.cur.replacer = replacer
- a.cur.parent = parent
- a.cur.node = node
- if !a.pre(&a.cur) {
- return true
- }
- }
- if !a.rewriteExpr(node, node.Key, func(newNode, parent SQLNode) {
- parent.(*JSONObjectParam).Key = newNode.(Expr)
- }) {
- return false
- }
- if !a.rewriteExpr(node, node.Value, func(newNode, parent SQLNode) {
- parent.(*JSONObjectParam).Value = newNode.(Expr)
- }) {
- return false
- }
- if a.post != nil {
- a.cur.replacer = replacer
- a.cur.parent = parent
- a.cur.node = node
- if !a.post(&a.cur) {
- return false
- }
- }
- return true
-}
func (a *application) rewriteRefOfRootNode(parent SQLNode, node *RootNode, replacer replacerFunc) bool {
if node == nil {
return true
diff --git a/go/vt/sqlparser/ast_rewriting.go b/go/vt/sqlparser/ast_rewriting.go
index 5ecdea8896b..1e7029e3b87 100644
--- a/go/vt/sqlparser/ast_rewriting.go
+++ b/go/vt/sqlparser/ast_rewriting.go
@@ -53,8 +53,12 @@ type ReservedVars struct {
sqNext int64
}
+type VSchemaViews interface {
+ FindView(name TableName) SelectStatement
+}
+
// ReserveAll tries to reserve all the given variable names. If they're all available,
-// they are reserved and the function returns true. Otherwise the function returns false.
+// they are reserved and the function returns true. Otherwise, the function returns false.
func (r *ReservedVars) ReserveAll(names ...string) bool {
for _, name := range names {
if _, ok := r.reserved[name]; ok {
@@ -203,6 +207,7 @@ func PrepareAST(
selectLimit int,
setVarComment string,
sysVars map[string]string,
+ views VSchemaViews,
) (*RewriteASTResult, error) {
if parameterize {
err := Normalize(in, reservedVars, bindVars)
@@ -210,15 +215,25 @@ func PrepareAST(
return nil, err
}
}
- return RewriteAST(in, keyspace, selectLimit, setVarComment, sysVars)
+ return RewriteAST(in, keyspace, selectLimit, setVarComment, sysVars, views)
}
// RewriteAST rewrites the whole AST, replacing function calls and adding column aliases to queries.
// SET_VAR comments are also added to the AST if required.
-func RewriteAST(in Statement, keyspace string, selectLimit int, setVarComment string, sysVars map[string]string) (*RewriteASTResult, error) {
- er := newASTRewriter(keyspace, selectLimit, setVarComment, sysVars)
+func RewriteAST(
+ in Statement,
+ keyspace string,
+ selectLimit int,
+ setVarComment string,
+ sysVars map[string]string,
+ views VSchemaViews,
+) (*RewriteASTResult, error) {
+ er := newASTRewriter(keyspace, selectLimit, setVarComment, sysVars, views)
er.shouldRewriteDatabaseFunc = shouldRewriteDatabaseFunc(in)
- result := Rewrite(in, er.rewrite, nil)
+ result := SafeRewrite(in, er.rewriteDown, er.rewriteUp)
+ if er.err != nil {
+ return nil, er.err
+ }
out, ok := result.(Statement)
if !ok {
@@ -263,15 +278,17 @@ type astRewriter struct {
selectLimit int
setVarComment string
sysVars map[string]string
+ views VSchemaViews
}
-func newASTRewriter(keyspace string, selectLimit int, setVarComment string, sysVars map[string]string) *astRewriter {
+func newASTRewriter(keyspace string, selectLimit int, setVarComment string, sysVars map[string]string, views VSchemaViews) *astRewriter {
return &astRewriter{
bindVars: &BindVarNeeds{},
keyspace: keyspace,
selectLimit: selectLimit,
setVarComment: setVarComment,
sysVars: sysVars,
+ views: views,
}
}
@@ -293,9 +310,9 @@ const (
)
func (er *astRewriter) rewriteAliasedExpr(node *AliasedExpr) (*BindVarNeeds, error) {
- inner := newASTRewriter(er.keyspace, er.selectLimit, er.setVarComment, er.sysVars)
+ inner := newASTRewriter(er.keyspace, er.selectLimit, er.setVarComment, er.sysVars, er.views)
inner.shouldRewriteDatabaseFunc = er.shouldRewriteDatabaseFunc
- tmp := Rewrite(node.Expr, inner.rewrite, nil)
+ tmp := SafeRewrite(node.Expr, inner.rewriteDown, inner.rewriteUp)
newExpr, ok := tmp.(Expr)
if !ok {
return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "failed to rewrite AST. function expected to return Expr returned a %s", String(tmp))
@@ -304,7 +321,15 @@ func (er *astRewriter) rewriteAliasedExpr(node *AliasedExpr) (*BindVarNeeds, err
return inner.bindVars, nil
}
-func (er *astRewriter) rewrite(cursor *Cursor) bool {
+func (er *astRewriter) rewriteDown(node SQLNode, _ SQLNode) bool {
+ switch node := node.(type) {
+ case *Select:
+ er.visitSelect(node)
+ }
+ return true
+}
+
+func (er *astRewriter) rewriteUp(cursor *Cursor) bool {
// Add SET_VAR comment to this node if it supports it and is needed
if supportOptimizerHint, supportsOptimizerHint := cursor.Node().(SupportOptimizerHint); supportsOptimizerHint && er.setVarComment != "" {
newComments, err := supportOptimizerHint.GetParsedComments().AddQueryHint(er.setVarComment)
@@ -316,103 +341,145 @@ func (er *astRewriter) rewrite(cursor *Cursor) bool {
}
switch node := cursor.Node().(type) {
- // select last_insert_id() -> select :__lastInsertId as `last_insert_id()`
- case *Select:
- for _, col := range node.SelectExprs {
- _, hasStar := col.(*StarExpr)
- if hasStar {
- er.hasStarInSelect = true
- }
-
- aliasedExpr, ok := col.(*AliasedExpr)
- if ok && aliasedExpr.As.IsEmpty() {
- buf := NewTrackedBuffer(nil)
- aliasedExpr.Expr.Format(buf)
- innerBindVarNeeds, err := er.rewriteAliasedExpr(aliasedExpr)
- if err != nil {
- er.err = err
- return false
- }
- if innerBindVarNeeds.HasRewrites() {
- aliasedExpr.As = NewIdentifierCI(buf.String())
- }
- er.bindVars.MergeWith(innerBindVarNeeds)
- }
- }
- // set select limit if explicitly not set when sql_select_limit is set on the connection.
- if er.selectLimit > 0 && node.Limit == nil {
- node.Limit = &Limit{Rowcount: NewIntLiteral(strconv.Itoa(er.selectLimit))}
- }
case *Union:
- // set select limit if explicitly not set when sql_select_limit is set on the connection.
- if er.selectLimit > 0 && node.Limit == nil {
- node.Limit = &Limit{Rowcount: NewIntLiteral(strconv.Itoa(er.selectLimit))}
- }
+ er.rewriteUnion(node)
case *FuncExpr:
er.funcRewrite(cursor, node)
case *Variable:
- // Iff we are in SET, we want to change the scope of variables if a modifier has been set
- // and only on the lhs of the assignment:
- // set session sql_mode = @someElse
- // here we need to change the scope of `sql_mode` and not of `@someElse`
- if v, isSet := cursor.Parent().(*SetExpr); isSet && v.Var == node {
- break
- }
- switch node.Scope {
- case VariableScope:
- er.udvRewrite(cursor, node)
- case GlobalScope, SessionScope:
- er.sysVarRewrite(cursor, node)
- }
+ er.rewriteVariable(cursor, node)
case *Subquery:
er.unnestSubQueries(cursor, node)
case *NotExpr:
- switch inner := node.Expr.(type) {
- case *ComparisonExpr:
- // not col = 42 => col != 42
- // not col > 42 => col <= 42
- // etc
- canChange, inverse := inverseOp(inner.Operator)
- if canChange {
- inner.Operator = inverse
- cursor.Replace(inner)
- }
- case *NotExpr:
- // not not true => true
- cursor.Replace(inner.Expr)
- case BoolVal:
- // not true => false
- inner = !inner
- cursor.Replace(inner)
- }
+ er.rewriteNotExpr(cursor, node)
case *AliasedTableExpr:
- if !SystemSchema(er.keyspace) {
- break
- }
- aliasTableName, ok := node.Expr.(TableName)
- if !ok {
- return true
- }
- // Qualifier should not be added to dual table
- if aliasTableName.Name.String() == "dual" {
- break
- }
- if er.keyspace != "" && aliasTableName.Qualifier.IsEmpty() {
+ er.rewriteAliasedTable(cursor, node)
+ case *ShowBasic:
+ er.rewriteShowBasic(node)
+ case *ExistsExpr:
+ er.existsRewrite(cursor, node)
+ }
+ return true
+}
+
+func (er *astRewriter) rewriteUnion(node *Union) {
+ // set select limit if explicitly not set when sql_select_limit is set on the connection.
+ if er.selectLimit > 0 && node.Limit == nil {
+ node.Limit = &Limit{Rowcount: NewIntLiteral(strconv.Itoa(er.selectLimit))}
+ }
+}
+
+func (er *astRewriter) rewriteAliasedTable(cursor *Cursor, node *AliasedTableExpr) {
+ aliasTableName, ok := node.Expr.(TableName)
+ if !ok {
+ return
+ }
+
+ // Qualifier should not be added to dual table
+ tblName := aliasTableName.Name.String()
+ if tblName == "dual" {
+ return
+ }
+
+ if SystemSchema(er.keyspace) {
+ if aliasTableName.Qualifier.IsEmpty() {
aliasTableName.Qualifier = NewIdentifierCS(er.keyspace)
node.Expr = aliasTableName
cursor.Replace(node)
}
- case *ShowBasic:
- if node.Command == VariableGlobal || node.Command == VariableSession {
- varsToAdd := sysvars.GetInterestingVariables()
- for _, sysVar := range varsToAdd {
- er.bindVars.AddSysVar(sysVar)
- }
+ return
+ }
+
+ // Could we be dealing with a view?
+ if er.views == nil {
+ return
+ }
+ view := er.views.FindView(aliasTableName)
+ if view == nil {
+ return
+ }
+
+ // Aha! It's a view. Let's replace it with a derived table
+ node.Expr = &DerivedTable{Select: CloneSelectStatement(view)}
+ if node.As.IsEmpty() {
+ node.As = NewIdentifierCS(tblName)
+ }
+}
+
+func (er *astRewriter) rewriteShowBasic(node *ShowBasic) {
+ if node.Command == VariableGlobal || node.Command == VariableSession {
+ varsToAdd := sysvars.GetInterestingVariables()
+ for _, sysVar := range varsToAdd {
+ er.bindVars.AddSysVar(sysVar)
}
- case *ExistsExpr:
- er.existsRewrite(cursor, node)
}
- return true
+}
+
+func (er *astRewriter) rewriteNotExpr(cursor *Cursor, node *NotExpr) {
+ switch inner := node.Expr.(type) {
+ case *ComparisonExpr:
+ // not col = 42 => col != 42
+ // not col > 42 => col <= 42
+ // etc
+ canChange, inverse := inverseOp(inner.Operator)
+ if canChange {
+ inner.Operator = inverse
+ cursor.Replace(inner)
+ }
+ case *NotExpr:
+ // not not true => true
+ cursor.Replace(inner.Expr)
+ case BoolVal:
+ // not true => false
+ inner = !inner
+ cursor.Replace(inner)
+ }
+}
+
+func (er *astRewriter) rewriteVariable(cursor *Cursor, node *Variable) {
+ // Iff we are in SET, we want to change the scope of variables if a modifier has been set
+ // and only on the lhs of the assignment:
+ // set session sql_mode = @someElse
+ // here we need to change the scope of `sql_mode` and not of `@someElse`
+ if v, isSet := cursor.Parent().(*SetExpr); isSet && v.Var == node {
+ return
+ }
+ switch node.Scope {
+ case VariableScope:
+ er.udvRewrite(cursor, node)
+ case GlobalScope, SessionScope, NextTxScope:
+ er.sysVarRewrite(cursor, node)
+ }
+}
+
+func (er *astRewriter) visitSelect(node *Select) {
+ for _, col := range node.SelectExprs {
+ if _, hasStar := col.(*StarExpr); hasStar {
+ er.hasStarInSelect = true
+ continue
+ }
+
+ aliasedExpr, ok := col.(*AliasedExpr)
+ if !ok || !aliasedExpr.As.IsEmpty() {
+ continue
+ }
+ buf := NewTrackedBuffer(nil)
+ aliasedExpr.Expr.Format(buf)
+ // select last_insert_id() -> select :__lastInsertId as `last_insert_id()`
+ innerBindVarNeeds, err := er.rewriteAliasedExpr(aliasedExpr)
+ if err != nil {
+ er.err = err
+ return
+ }
+ if innerBindVarNeeds.HasRewrites() {
+ aliasedExpr.As = NewIdentifierCI(buf.String())
+ }
+ er.bindVars.MergeWith(innerBindVarNeeds)
+
+ }
+ // set select limit if explicitly not set when sql_select_limit is set on the connection.
+ if er.selectLimit > 0 && node.Limit == nil {
+ node.Limit = &Limit{Rowcount: NewIntLiteral(strconv.Itoa(er.selectLimit))}
+ }
}
func inverseOp(i ComparisonExprOperator) (bool, ComparisonExprOperator) {
@@ -471,6 +538,7 @@ func (er *astRewriter) sysVarRewrite(cursor *Cursor, node *Variable) {
sysvars.SQLSelectLimit.Name,
sysvars.Version.Name,
sysvars.VersionComment.Name,
+ sysvars.QueryTimeout.Name,
sysvars.Workload.Name:
found = true
}
@@ -496,18 +564,23 @@ var funcRewrites = map[string]string{
}
func (er *astRewriter) funcRewrite(cursor *Cursor, node *FuncExpr) {
- bindVar, found := funcRewrites[node.Name.Lowered()]
- if found {
- if bindVar == DBVarName && !er.shouldRewriteDatabaseFunc {
- return
- }
- if len(node.Exprs) > 0 {
- er.err = vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "Argument to %s() not supported", node.Name.Lowered())
- return
- }
- cursor.Replace(bindVarExpression(bindVar))
- er.bindVars.AddFuncResult(bindVar)
+ lowered := node.Name.Lowered()
+ if lowered == "last_insert_id" && len(node.Exprs) > 0 {
+ // if we are dealing with is LAST_INSERT_ID() with an argument, we don't need to rewrite it.
+ // with an argument, this is an identity function that will update the session state and
+ // sets the correct fields in the OK TCP packet that we send back
+ return
+ }
+ bindVar, found := funcRewrites[lowered]
+ if !found || (bindVar == DBVarName && !er.shouldRewriteDatabaseFunc) {
+ return
+ }
+ if len(node.Exprs) > 0 {
+ er.err = vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "Argument to %s() not supported", lowered)
+ return
}
+ cursor.Replace(bindVarExpression(bindVar))
+ er.bindVars.AddFuncResult(bindVar)
}
func (er *astRewriter) unnestSubQueries(cursor *Cursor, subquery *Subquery) {
@@ -552,7 +625,7 @@ func (er *astRewriter) unnestSubQueries(cursor *Cursor, subquery *Subquery) {
er.bindVars.NoteRewrite()
// we need to make sure that the inner expression also gets rewritten,
// so we fire off another rewriter traversal here
- rewritten := Rewrite(expr.Expr, er.rewrite, nil)
+ rewritten := SafeRewrite(expr.Expr, er.rewriteDown, er.rewriteUp)
// Here we need to handle the subquery rewrite in case in occurs in an IN clause
// For example, SELECT id FROM user WHERE id IN (SELECT 1 FROM DUAL)
@@ -574,31 +647,33 @@ func (er *astRewriter) unnestSubQueries(cursor *Cursor, subquery *Subquery) {
}
func (er *astRewriter) existsRewrite(cursor *Cursor, node *ExistsExpr) {
- switch node := node.Subquery.Select.(type) {
- case *Select:
- if node.Limit == nil {
- node.Limit = &Limit{}
- }
- node.Limit.Rowcount = NewIntLiteral("1")
+ sel, ok := node.Subquery.Select.(*Select)
+ if !ok {
+ return
+ }
- if node.Having != nil {
- // If the query has HAVING, we can't take any shortcuts
- return
- }
+ if sel.Limit == nil {
+ sel.Limit = &Limit{}
+ }
+ sel.Limit.Rowcount = NewIntLiteral("1")
- if len(node.GroupBy) == 0 && node.SelectExprs.AllAggregation() {
- // in these situations, we are guaranteed to always get a non-empty result,
- // so we can replace the EXISTS with a literal true
- cursor.Replace(BoolVal(true))
- }
+ if sel.Having != nil {
+ // If the query has HAVING, we can't take any shortcuts
+ return
+ }
- // If we are not doing HAVING, we can safely replace all select expressions with a
- // single `1` and remove any grouping
- node.SelectExprs = SelectExprs{
- &AliasedExpr{Expr: NewIntLiteral("1")},
- }
- node.GroupBy = nil
+ if len(sel.GroupBy) == 0 && sel.SelectExprs.AllAggregation() {
+ // in these situations, we are guaranteed to always get a non-empty result,
+ // so we can replace the EXISTS with a literal true
+ cursor.Replace(BoolVal(true))
}
+
+ // If we are not doing HAVING, we can safely replace all select expressions with a
+ // single `1` and remove any grouping
+ sel.SelectExprs = SelectExprs{
+ &AliasedExpr{Expr: NewIntLiteral("1")},
+ }
+ sel.GroupBy = nil
}
func bindVarExpression(name string) Expr {
@@ -612,188 +687,3 @@ func SystemSchema(schema string) bool {
strings.EqualFold(schema, "sys") ||
strings.EqualFold(schema, "mysql")
}
-
-// RewriteToCNF walks the input AST and rewrites any boolean logic into CNF
-// Note: In order to re-plan, we need to empty the accumulated metadata in the AST,
-// so ColName.Metadata will be nil:ed out as part of this rewrite
-func RewriteToCNF(ast SQLNode) SQLNode {
- for {
- finishedRewrite := true
- ast = Rewrite(ast, func(cursor *Cursor) bool {
- if e, isExpr := cursor.node.(Expr); isExpr {
- rewritten, didRewrite := rewriteToCNFExpr(e)
- if didRewrite {
- finishedRewrite = false
- cursor.Replace(rewritten)
- }
- }
- if col, isCol := cursor.node.(*ColName); isCol {
- col.Metadata = nil
- }
- return true
- }, nil)
-
- if finishedRewrite {
- return ast
- }
- }
-}
-
-func distinctOr(in *OrExpr) (Expr, bool) {
- todo := []*OrExpr{in}
- var leaves []Expr
- for len(todo) > 0 {
- curr := todo[0]
- todo = todo[1:]
- addAnd := func(in Expr) {
- and, ok := in.(*OrExpr)
- if ok {
- todo = append(todo, and)
- } else {
- leaves = append(leaves, in)
- }
- }
- addAnd(curr.Left)
- addAnd(curr.Right)
- }
- original := len(leaves)
- var predicates []Expr
-
-outer1:
- for len(leaves) > 0 {
- curr := leaves[0]
- leaves = leaves[1:]
- for _, alreadyIn := range predicates {
- if EqualsExpr(alreadyIn, curr) {
- continue outer1
- }
- }
- predicates = append(predicates, curr)
- }
- if original == len(predicates) {
- return in, false
- }
- var result Expr
- for i, curr := range predicates {
- if i == 0 {
- result = curr
- continue
- }
- result = &OrExpr{Left: result, Right: curr}
- }
- return result, true
-}
-func distinctAnd(in *AndExpr) (Expr, bool) {
- todo := []*AndExpr{in}
- var leaves []Expr
- for len(todo) > 0 {
- curr := todo[0]
- todo = todo[1:]
- addAnd := func(in Expr) {
- and, ok := in.(*AndExpr)
- if ok {
- todo = append(todo, and)
- } else {
- leaves = append(leaves, in)
- }
- }
- addAnd(curr.Left)
- addAnd(curr.Right)
- }
- original := len(leaves)
- var predicates []Expr
-
-outer1:
- for len(leaves) > 0 {
- curr := leaves[0]
- leaves = leaves[1:]
- for _, alreadyIn := range predicates {
- if EqualsExpr(alreadyIn, curr) {
- continue outer1
- }
- }
- predicates = append(predicates, curr)
- }
- if original == len(predicates) {
- return in, false
- }
- var result Expr
- for i, curr := range predicates {
- if i == 0 {
- result = curr
- continue
- }
- result = &AndExpr{Left: result, Right: curr}
- }
- return result, true
-}
-
-func rewriteToCNFExpr(expr Expr) (Expr, bool) {
- switch expr := expr.(type) {
- case *NotExpr:
- switch child := expr.Expr.(type) {
- case *NotExpr:
- // NOT NOT A => A
- return child.Expr, true
- case *OrExpr:
- // DeMorgan Rewriter
- // NOT (A OR B) => NOT A AND NOT B
- return &AndExpr{Right: &NotExpr{Expr: child.Right}, Left: &NotExpr{Expr: child.Left}}, true
- case *AndExpr:
- // DeMorgan Rewriter
- // NOT (A AND B) => NOT A OR NOT B
- return &OrExpr{Right: &NotExpr{Expr: child.Right}, Left: &NotExpr{Expr: child.Left}}, true
- }
- case *OrExpr:
- or := expr
- if and, ok := or.Left.(*AndExpr); ok {
- // Simplification
- // (A AND B) OR A => A
- if EqualsExpr(or.Right, and.Left) || EqualsExpr(or.Right, and.Right) {
- return or.Right, true
- }
- // Distribution Law
- // (A AND B) OR C => (A OR C) AND (B OR C)
- return &AndExpr{Left: &OrExpr{Left: and.Left, Right: or.Right}, Right: &OrExpr{Left: and.Right, Right: or.Right}}, true
- }
- if and, ok := or.Right.(*AndExpr); ok {
- // Simplification
- // A OR (A AND B) => A
- if EqualsExpr(or.Left, and.Left) || EqualsExpr(or.Left, and.Right) {
- return or.Left, true
- }
- // Distribution Law
- // C OR (A AND B) => (C OR A) AND (C OR B)
- return &AndExpr{Left: &OrExpr{Left: or.Left, Right: and.Left}, Right: &OrExpr{Left: or.Left, Right: and.Right}}, true
- }
- // Try to make distinct
- return distinctOr(expr)
-
- case *XorExpr:
- // DeMorgan Rewriter
- // (A XOR B) => (A OR B) AND NOT (A AND B)
- return &AndExpr{Left: &OrExpr{Left: expr.Left, Right: expr.Right}, Right: &NotExpr{Expr: &AndExpr{Left: expr.Left, Right: expr.Right}}}, true
- case *AndExpr:
- res, rewritten := distinctAnd(expr)
- if rewritten {
- return res, rewritten
- }
- and := expr
- if or, ok := and.Left.(*OrExpr); ok {
- // Simplification
- // (A OR B) AND A => A
- if EqualsExpr(or.Left, and.Right) || EqualsExpr(or.Right, and.Right) {
- return and.Right, true
- }
- }
- if or, ok := and.Right.(*OrExpr); ok {
- // Simplification
- // A OR (A AND B) => A
- if EqualsExpr(or.Left, and.Left) || EqualsExpr(or.Right, and.Left) {
- return or.Left, true
- }
- }
-
- }
- return expr, false
-}
diff --git a/go/vt/sqlparser/ast_rewriting_test.go b/go/vt/sqlparser/ast_rewriting_test.go
index 95e84ae8c20..6fe59acbc85 100644
--- a/go/vt/sqlparser/ast_rewriting_test.go
+++ b/go/vt/sqlparser/ast_rewriting_test.go
@@ -37,12 +37,13 @@ type testCaseSysVar struct {
}
type myTestCase struct {
- in, expected string
- liid, db, foundRows, rowCount, rawGTID, rawTimeout, sessTrackGTID bool
- ddlStrategy, sessionUUID, sessionEnableSystemSettings bool
- udv int
- autocommit, clientFoundRows, skipQueryPlanCache, socket bool
- sqlSelectLimit, transactionMode, workload, version, versionComment bool
+ in, expected string
+ liid, db, foundRows, rowCount, rawGTID, rawTimeout, sessTrackGTID bool
+ ddlStrategy, sessionUUID, sessionEnableSystemSettings bool
+ udv int
+ autocommit, clientFoundRows, skipQueryPlanCache, socket, queryTimeout bool
+ sqlSelectLimit, transactionMode, workload, version, versionComment bool
+ txIsolation bool
}
func TestRewrites(in *testing.T) {
@@ -54,6 +55,10 @@ func TestRewrites(in *testing.T) {
in: "SELECT @@version",
expected: "SELECT :__vtversion as `@@version`",
version: true,
+ }, {
+ in: "SELECT @@query_timeout",
+ expected: "SELECT :__vtquery_timeout as `@@query_timeout`",
+ queryTimeout: true,
}, {
in: "SELECT @@version_comment",
expected: "SELECT :__vtversion_comment as `@@version_comment`",
@@ -281,6 +286,9 @@ func TestRewrites(in *testing.T) {
}, {
in: "SELECT * FROM tbl WHERE exists(select col1, col2, count(*) from other_table where foo > bar group by col1, col2 having count(*) > 3)",
expected: "SELECT * FROM tbl WHERE exists(select col1, col2, count(*) from other_table where foo > bar group by col1, col2 having count(*) > 3 limit 1)",
+ }, {
+ in: "SELECT id, name, salary FROM user_details",
+ expected: "SELECT id, name, salary FROM (select user.id, user.name, user_extra.salary from user join user_extra where user.id = user_extra.user_id) as user_details",
}, {
in: "SHOW VARIABLES",
expected: "SHOW VARIABLES",
@@ -299,6 +307,7 @@ func TestRewrites(in *testing.T) {
rawTimeout: true,
sessTrackGTID: true,
socket: true,
+ queryTimeout: true,
}, {
in: "SHOW GLOBAL VARIABLES",
expected: "SHOW GLOBAL VARIABLES",
@@ -317,6 +326,7 @@ func TestRewrites(in *testing.T) {
rawTimeout: true,
sessTrackGTID: true,
socket: true,
+ queryTimeout: true,
}}
for _, tc := range tests {
@@ -325,7 +335,14 @@ func TestRewrites(in *testing.T) {
stmt, err := Parse(tc.in)
require.NoError(err)
- result, err := RewriteAST(stmt, "ks", SQLSelectLimitUnset, "", nil) // passing `ks` just to test that no rewriting happens as it is not system schema
+ result, err := RewriteAST(
+ stmt,
+ "ks", // passing `ks` just to test that no rewriting happens as it is not system schema
+ SQLSelectLimitUnset,
+ "",
+ nil,
+ &fakeViews{},
+ )
require.NoError(err)
expected, err := Parse(tc.expected)
@@ -345,6 +362,7 @@ func TestRewrites(in *testing.T) {
assert.Equal(tc.sqlSelectLimit, result.NeedsSysVar(sysvars.SQLSelectLimit.Name), "should need :__vtsqlSelectLimit")
assert.Equal(tc.transactionMode, result.NeedsSysVar(sysvars.TransactionMode.Name), "should need :__vttransactionMode")
assert.Equal(tc.workload, result.NeedsSysVar(sysvars.Workload.Name), "should need :__vtworkload")
+ assert.Equal(tc.queryTimeout, result.NeedsSysVar(sysvars.QueryTimeout.Name), "should need :__vtquery_timeout")
assert.Equal(tc.ddlStrategy, result.NeedsSysVar(sysvars.DDLStrategy.Name), "should need ddlStrategy")
assert.Equal(tc.sessionUUID, result.NeedsSysVar(sysvars.SessionUUID.Name), "should need sessionUUID")
assert.Equal(tc.sessionEnableSystemSettings, result.NeedsSysVar(sysvars.SessionEnableSystemSettings.Name), "should need sessionEnableSystemSettings")
@@ -358,6 +376,19 @@ func TestRewrites(in *testing.T) {
}
}
+type fakeViews struct{}
+
+func (*fakeViews) FindView(name TableName) SelectStatement {
+ if name.Name.String() != "user_details" {
+ return nil
+ }
+ statement, err := Parse("select user.id, user.name, user_extra.salary from user join user_extra where user.id = user_extra.user_id")
+ if err != nil {
+ return nil
+ }
+ return statement.(SelectStatement)
+}
+
func TestRewritesWithSetVarComment(in *testing.T) {
tests := []testCaseSetVar{{
in: "select 1",
@@ -399,7 +430,7 @@ func TestRewritesWithSetVarComment(in *testing.T) {
stmt, err := Parse(tc.in)
require.NoError(err)
- result, err := RewriteAST(stmt, "ks", SQLSelectLimitUnset, tc.setVarComment, nil)
+ result, err := RewriteAST(stmt, "ks", SQLSelectLimitUnset, tc.setVarComment, nil, &fakeViews{})
require.NoError(err)
expected, err := Parse(tc.expected)
@@ -418,6 +449,27 @@ func TestRewritesSysVar(in *testing.T) {
in: "select @x = @@sql_mode",
expected: "select :__vtudvx = :__vtsql_mode as `@x = @@sql_mode` from dual",
sysVar: map[string]string{"sql_mode": "' '"},
+ }, {
+ in: "SELECT @@tx_isolation",
+ expected: "select @@tx_isolation from dual",
+ }, {
+ in: "SELECT @@transaction_isolation",
+ expected: "select @@transaction_isolation from dual",
+ }, {
+ in: "SELECT @@session.transaction_isolation",
+ expected: "select @@session.transaction_isolation from dual",
+ }, {
+ in: "SELECT @@tx_isolation",
+ sysVar: map[string]string{"tx_isolation": "'READ-COMMITTED'"},
+ expected: "select :__vttx_isolation as `@@tx_isolation` from dual",
+ }, {
+ in: "SELECT @@transaction_isolation",
+ sysVar: map[string]string{"transaction_isolation": "'READ-COMMITTED'"},
+ expected: "select :__vttransaction_isolation as `@@transaction_isolation` from dual",
+ }, {
+ in: "SELECT @@session.transaction_isolation",
+ sysVar: map[string]string{"transaction_isolation": "'READ-COMMITTED'"},
+ expected: "select :__vttransaction_isolation as `@@session.transaction_isolation` from dual",
}}
for _, tc := range tests {
@@ -426,7 +478,7 @@ func TestRewritesSysVar(in *testing.T) {
stmt, err := Parse(tc.in)
require.NoError(err)
- result, err := RewriteAST(stmt, "ks", SQLSelectLimitUnset, "", tc.sysVar)
+ result, err := RewriteAST(stmt, "ks", SQLSelectLimitUnset, "", tc.sysVar, &fakeViews{})
require.NoError(err)
expected, err := Parse(tc.expected)
@@ -476,7 +528,7 @@ func TestRewritesWithDefaultKeyspace(in *testing.T) {
stmt, err := Parse(tc.in)
require.NoError(err)
- result, err := RewriteAST(stmt, "sys", SQLSelectLimitUnset, "", nil)
+ result, err := RewriteAST(stmt, "sys", SQLSelectLimitUnset, "", nil, &fakeViews{})
require.NoError(err)
expected, err := Parse(tc.expected)
@@ -487,101 +539,6 @@ func TestRewritesWithDefaultKeyspace(in *testing.T) {
}
}
-func TestRewriteToCNF(in *testing.T) {
- tests := []struct {
- in string
- expected string
- }{{
- in: "not (not A = 3)",
- expected: "A = 3",
- }, {
- in: "not (A = 3 and B = 2)",
- expected: "not A = 3 or not B = 2",
- }, {
- in: "not (A = 3 or B = 2)",
- expected: "not A = 3 and not B = 2",
- }, {
- in: "A xor B",
- expected: "(A or B) and not (A and B)",
- }, {
- in: "(A and B) or C",
- expected: "(A or C) and (B or C)",
- }, {
- in: "C or (A and B)",
- expected: "(C or A) and (C or B)",
- }, {
- in: "A and A",
- expected: "A",
- }, {
- in: "A OR A",
- expected: "A",
- }, {
- in: "A OR (A AND B)",
- expected: "A",
- }, {
- in: "A OR (B AND A)",
- expected: "A",
- }, {
- in: "(A AND B) OR A",
- expected: "A",
- }, {
- in: "(B AND A) OR A",
- expected: "A",
- }, {
- in: "(A and B) and (B and A)",
- expected: "A and B",
- }, {
- in: "(A or B) and A",
- expected: "A",
- }, {
- in: "A and (A or B)",
- expected: "A",
- }}
-
- for _, tc := range tests {
- in.Run(tc.in, func(t *testing.T) {
- stmt, err := Parse("SELECT * FROM T WHERE " + tc.in)
- require.NoError(t, err)
-
- expr := stmt.(*Select).Where.Expr
- expr, didRewrite := rewriteToCNFExpr(expr)
- assert.True(t, didRewrite)
- assert.Equal(t, tc.expected, String(expr))
- })
- }
-}
-
-func TestFixedPointRewriteToCNF(in *testing.T) {
- tests := []struct {
- in string
- expected string
- }{{
- in: "A xor B",
- expected: "(A or B) and (not A or not B)",
- }, {
- in: "(A and B) and (B and A) and (B and A) and (A and B)",
- expected: "A and B",
- }, {
- in: "((A and B) OR (A and C) OR (A and D)) and E and F",
- expected: "A and ((A or B) and (B or C or A)) and ((A or D) and ((B or A or D) and (B or C or D))) and E and F",
- }, {
- in: "(A and B) OR (A and C)",
- expected: "A and ((B or A) and (B or C))",
- }}
-
- for _, tc := range tests {
- in.Run(tc.in, func(t *testing.T) {
- require := require.New(t)
- stmt, err := Parse("SELECT * FROM T WHERE " + tc.in)
- require.NoError(err)
-
- expr := stmt.(*Select).Where.Expr
- output := RewriteToCNF(expr)
- assert.Equal(t, tc.expected, String(output))
- })
- }
-}
-
func TestReservedVars(t *testing.T) {
for _, prefix := range []string{"vtg", "bv"} {
t.Run("prefix_"+prefix, func(t *testing.T) {
diff --git a/go/vt/sqlparser/ast_test.go b/go/vt/sqlparser/ast_test.go
index 71c56594875..0d6841755c8 100644
--- a/go/vt/sqlparser/ast_test.go
+++ b/go/vt/sqlparser/ast_test.go
@@ -57,32 +57,22 @@ func TestSelect(t *testing.T) {
sel.AddWhere(expr)
buf := NewTrackedBuffer(nil)
sel.Where.Format(buf)
- want := " where a = 1"
- if buf.String() != want {
- t.Errorf("where: %q, want %s", buf.String(), want)
- }
+ assert.Equal(t, " where a = 1", buf.String())
sel.AddWhere(expr)
buf = NewTrackedBuffer(nil)
sel.Where.Format(buf)
- want = " where a = 1"
- if buf.String() != want {
- t.Errorf("where: %q, want %s", buf.String(), want)
- }
+ assert.Equal(t, " where a = 1", buf.String())
+
sel = &Select{}
sel.AddHaving(expr)
buf = NewTrackedBuffer(nil)
sel.Having.Format(buf)
- want = " having a = 1"
- if buf.String() != want {
- t.Errorf("having: %q, want %s", buf.String(), want)
- }
+ assert.Equal(t, " having a = 1", buf.String())
+
sel.AddHaving(expr)
buf = NewTrackedBuffer(nil)
sel.Having.Format(buf)
- want = " having a = 1 and a = 1"
- if buf.String() != want {
- t.Errorf("having: %q, want %s", buf.String(), want)
- }
+ assert.Equal(t, " having a = 1", buf.String())
tree, err = Parse("select * from t where a = 1 or b = 1")
require.NoError(t, err)
@@ -91,18 +81,14 @@ func TestSelect(t *testing.T) {
sel.AddWhere(expr)
buf = NewTrackedBuffer(nil)
sel.Where.Format(buf)
- want = " where a = 1 or b = 1"
- if buf.String() != want {
- t.Errorf("where: %q, want %s", buf.String(), want)
- }
+ assert.Equal(t, " where a = 1 or b = 1", buf.String())
+
sel = &Select{}
sel.AddHaving(expr)
buf = NewTrackedBuffer(nil)
sel.Having.Format(buf)
- want = " having a = 1 or b = 1"
- if buf.String() != want {
- t.Errorf("having: %q, want %s", buf.String(), want)
- }
+ assert.Equal(t, " having a = 1 or b = 1", buf.String())
+
}
func TestUpdate(t *testing.T) {
@@ -835,3 +821,32 @@ func BenchmarkStringTraces(b *testing.B) {
})
}
}
+
+func TestCloneComments(t *testing.T) {
+ c := []string{"/*vt+ a=b */"}
+ parsedComments := Comments(c).Parsed()
+ directives := parsedComments.Directives()
+ {
+ assert.NotEmpty(t, directives.m)
+ val, ok := directives.m["a"]
+ assert.Truef(t, ok, "directives map: %v", directives.m)
+ assert.Equal(t, "b", val)
+ }
+ cloned := CloneRefOfParsedComments(parsedComments)
+ cloned.ResetDirectives()
+ clonedDirectives := cloned.Directives()
+ {
+ assert.NotEmpty(t, clonedDirectives.m)
+ val, ok := clonedDirectives.m["a"]
+ assert.Truef(t, ok, "directives map: %v", directives.m)
+ assert.Equal(t, "b", val)
+ }
+ {
+ delete(directives.m, "a")
+ assert.Empty(t, directives.m)
+
+ val, ok := clonedDirectives.m["a"]
+ assert.Truef(t, ok, "directives map: %v", directives.m)
+ assert.Equal(t, "b", val)
+ }
+}
diff --git a/go/vt/sqlparser/ast_visit.go b/go/vt/sqlparser/ast_visit.go
index 27d1fdf2636..15296a91d3a 100644
--- a/go/vt/sqlparser/ast_visit.go
+++ b/go/vt/sqlparser/ast_visit.go
@@ -1,5 +1,5 @@
/*
-Copyright 2021 The Vitess Authors.
+Copyright 2023 The Vitess Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -22,8 +22,6 @@ func VisitSQLNode(in SQLNode, f Visit) error {
return nil
}
switch in := in.(type) {
- case AccessMode:
- return VisitAccessMode(in, f)
case *AddColumns:
return VisitRefOfAddColumns(in, f)
case *AddConstraintDefinition:
@@ -210,8 +208,6 @@ func VisitSQLNode(in SQLNode, f Visit) error {
return VisitRefOfIntroducerExpr(in, f)
case *IsExpr:
return VisitRefOfIsExpr(in, f)
- case IsolationLevel:
- return VisitIsolationLevel(in, f)
case *JSONArrayExpr:
return VisitRefOfJSONArrayExpr(in, f)
case *JSONAttributesExpr:
@@ -226,8 +222,8 @@ func VisitSQLNode(in SQLNode, f Visit) error {
return VisitRefOfJSONKeysExpr(in, f)
case *JSONObjectExpr:
return VisitRefOfJSONObjectExpr(in, f)
- case JSONObjectParam:
- return VisitJSONObjectParam(in, f)
+ case *JSONObjectParam:
+ return VisitRefOfJSONObjectParam(in, f)
case *JSONOverlapsExpr:
return VisitRefOfJSONOverlapsExpr(in, f)
case *JSONPrettyExpr:
@@ -398,8 +394,6 @@ func VisitSQLNode(in SQLNode, f Visit) error {
return VisitRefOfSetExpr(in, f)
case SetExprs:
return VisitSetExprs(in, f)
- case *SetTransaction:
- return VisitRefOfSetTransaction(in, f)
case *Show:
return VisitRefOfShow(in, f)
case *ShowBasic:
@@ -414,6 +408,8 @@ func VisitSQLNode(in SQLNode, f Visit) error {
return VisitRefOfShowOther(in, f)
case *ShowThrottledApps:
return VisitRefOfShowThrottledApps(in, f)
+ case *ShowThrottlerStatus:
+ return VisitRefOfShowThrottlerStatus(in, f)
case *StarExpr:
return VisitRefOfStarExpr(in, f)
case *Std:
@@ -474,6 +470,8 @@ func VisitSQLNode(in SQLNode, f Visit) error {
return VisitRefOfUpdateXMLExpr(in, f)
case *Use:
return VisitRefOfUse(in, f)
+ case *VExplainStmt:
+ return VisitRefOfVExplainStmt(in, f)
case *VStream:
return VisitRefOfVStream(in, f)
case ValTuple:
@@ -1003,6 +1001,9 @@ func VisitRefOfColumnDefinition(in *ColumnDefinition, f Visit) error {
if err := VisitIdentifierCI(in.Name, f); err != nil {
return err
}
+ if err := VisitRefOfColumnType(in.Type, f); err != nil {
+ return err
+ }
return nil
}
func VisitRefOfColumnType(in *ColumnType, f Visit) error {
@@ -1952,7 +1953,10 @@ func VisitRefOfJSONObjectExpr(in *JSONObjectExpr, f Visit) error {
}
return nil
}
-func VisitJSONObjectParam(in JSONObjectParam, f Visit) error {
+func VisitRefOfJSONObjectParam(in *JSONObjectParam, f Visit) error {
+ if in == nil {
+ return nil
+ }
if cont, err := f(in); err != nil || !cont {
return err
}
@@ -3181,23 +3185,6 @@ func VisitSetExprs(in SetExprs, f Visit) error {
}
return nil
}
-func VisitRefOfSetTransaction(in *SetTransaction, f Visit) error {
- if in == nil {
- return nil
- }
- if cont, err := f(in); err != nil || !cont {
- return err
- }
- if err := VisitRefOfParsedComments(in.Comments, f); err != nil {
- return err
- }
- for _, el := range in.Characteristics {
- if err := VisitCharacteristic(el, f); err != nil {
- return err
- }
- }
- return nil
-}
func VisitRefOfShow(in *Show, f Visit) error {
if in == nil {
return nil
@@ -3282,6 +3269,15 @@ func VisitRefOfShowThrottledApps(in *ShowThrottledApps, f Visit) error {
}
return nil
}
+func VisitRefOfShowThrottlerStatus(in *ShowThrottlerStatus, f Visit) error {
+ if in == nil {
+ return nil
+ }
+ if cont, err := f(in); err != nil || !cont {
+ return err
+ }
+ return nil
+}
func VisitRefOfStarExpr(in *StarExpr, f Visit) error {
if in == nil {
return nil
@@ -3729,6 +3725,21 @@ func VisitRefOfUse(in *Use, f Visit) error {
}
return nil
}
+func VisitRefOfVExplainStmt(in *VExplainStmt, f Visit) error {
+ if in == nil {
+ return nil
+ }
+ if cont, err := f(in); err != nil || !cont {
+ return err
+ }
+ if err := VisitStatement(in.Statement, f); err != nil {
+ return err
+ }
+ if err := VisitRefOfParsedComments(in.Comments, f); err != nil {
+ return err
+ }
+ return nil
+}
func VisitRefOfVStream(in *VStream, f Visit) error {
if in == nil {
return nil
@@ -4222,20 +4233,6 @@ func VisitCallable(in Callable, f Visit) error {
return nil
}
}
-func VisitCharacteristic(in Characteristic, f Visit) error {
- if in == nil {
- return nil
- }
- switch in := in.(type) {
- case AccessMode:
- return VisitAccessMode(in, f)
- case IsolationLevel:
- return VisitIsolationLevel(in, f)
- default:
- // this should never happen
- return nil
- }
-}
func VisitColTuple(in ColTuple, f Visit) error {
if in == nil {
return nil
@@ -4673,14 +4670,14 @@ func VisitStatement(in Statement, f Visit) error {
return VisitRefOfSelect(in, f)
case *Set:
return VisitRefOfSet(in, f)
- case *SetTransaction:
- return VisitRefOfSetTransaction(in, f)
case *Show:
return VisitRefOfShow(in, f)
case *ShowMigrationLogs:
return VisitRefOfShowMigrationLogs(in, f)
case *ShowThrottledApps:
return VisitRefOfShowThrottledApps(in, f)
+ case *ShowThrottlerStatus:
+ return VisitRefOfShowThrottlerStatus(in, f)
case *Stream:
return VisitRefOfStream(in, f)
case *TruncateTable:
@@ -4693,6 +4690,8 @@ func VisitStatement(in Statement, f Visit) error {
return VisitRefOfUpdate(in, f)
case *Use:
return VisitRefOfUse(in, f)
+ case *VExplainStmt:
+ return VisitRefOfVExplainStmt(in, f)
case *VStream:
return VisitRefOfVStream(in, f)
default:
@@ -4718,10 +4717,6 @@ func VisitTableExpr(in TableExpr, f Visit) error {
return nil
}
}
-func VisitAccessMode(in AccessMode, f Visit) error {
- _, err := f(in)
- return err
-}
func VisitAlgorithmValue(in AlgorithmValue, f Visit) error {
_, err := f(in)
return err
@@ -4734,10 +4729,6 @@ func VisitBoolVal(in BoolVal, f Visit) error {
_, err := f(in)
return err
}
-func VisitIsolationLevel(in IsolationLevel, f Visit) error {
- _, err := f(in)
- return err
-}
func VisitListArg(in ListArg, f Visit) error {
_, err := f(in)
return err
@@ -4768,21 +4759,6 @@ func VisitRefOfIdentifierCS(in *IdentifierCS, f Visit) error {
}
return nil
}
-func VisitRefOfJSONObjectParam(in *JSONObjectParam, f Visit) error {
- if in == nil {
- return nil
- }
- if cont, err := f(in); err != nil || !cont {
- return err
- }
- if err := VisitExpr(in.Key, f); err != nil {
- return err
- }
- if err := VisitExpr(in.Value, f); err != nil {
- return err
- }
- return nil
-}
func VisitRefOfRootNode(in *RootNode, f Visit) error {
if in == nil {
return nil
diff --git a/go/vt/sqlparser/cached_size.go b/go/vt/sqlparser/cached_size.go
index e818a18e8ab..93c4b942705 100644
--- a/go/vt/sqlparser/cached_size.go
+++ b/go/vt/sqlparser/cached_size.go
@@ -359,6 +359,20 @@ func (cached *Avg) CachedSize(alloc bool) int64 {
}
return size
}
+func (cached *Begin) CachedSize(alloc bool) int64 {
+ if cached == nil {
+ return int64(0)
+ }
+ size := int64(0)
+ if alloc {
+ size += int64(24)
+ }
+ // field TxAccessModes []vitess.io/vitess/go/vt/sqlparser.TxAccessMode
+ {
+ size += hack.RuntimeAllocSize(int64(cap(cached.TxAccessModes)))
+ }
+ return size
+}
func (cached *BetweenExpr) CachedSize(alloc bool) int64 {
if cached == nil {
return int64(0)
@@ -633,12 +647,12 @@ func (cached *ColumnDefinition) CachedSize(alloc bool) int64 {
}
size := int64(0)
if alloc {
- size += int64(128)
+ size += int64(48)
}
// field Name vitess.io/vitess/go/vt/sqlparser.IdentifierCI
size += cached.Name.CachedSize(false)
- // field Type vitess.io/vitess/go/vt/sqlparser.ColumnType
- size += cached.Type.CachedSize(false)
+ // field Type *vitess.io/vitess/go/vt/sqlparser.ColumnType
+ size += cached.Type.CachedSize(true)
return size
}
func (cached *ColumnType) CachedSize(alloc bool) int64 {
@@ -2258,12 +2272,12 @@ func (cached *JtPathColDef) CachedSize(alloc bool) int64 {
}
size := int64(0)
if alloc {
- size += int64(176)
+ size += int64(80)
}
// field Name vitess.io/vitess/go/vt/sqlparser.IdentifierCI
size += cached.Name.CachedSize(false)
- // field Type vitess.io/vitess/go/vt/sqlparser.ColumnType
- size += cached.Type.CachedSize(false)
+ // field Type *vitess.io/vitess/go/vt/sqlparser.ColumnType
+ size += cached.Type.CachedSize(true)
// field Path vitess.io/vitess/go/vt/sqlparser.Expr
if cc, ok := cached.Path.(cachedObject); ok {
size += cc.CachedSize(true)
@@ -3270,27 +3284,6 @@ func (cached *SetExpr) CachedSize(alloc bool) int64 {
}
return size
}
-func (cached *SetTransaction) CachedSize(alloc bool) int64 {
- if cached == nil {
- return int64(0)
- }
- size := int64(0)
- if alloc {
- size += int64(48)
- }
- // field Comments *vitess.io/vitess/go/vt/sqlparser.ParsedComments
- size += cached.Comments.CachedSize(true)
- // field Characteristics []vitess.io/vitess/go/vt/sqlparser.Characteristic
- {
- size += hack.RuntimeAllocSize(int64(cap(cached.Characteristics)) * int64(16))
- for _, elem := range cached.Characteristics {
- if cc, ok := elem.(cachedObject); ok {
- size += cc.CachedSize(true)
- }
- }
- }
- return size
-}
func (cached *Show) CachedSize(alloc bool) int64 {
if cached == nil {
return int64(0)
@@ -3392,6 +3385,23 @@ func (cached *ShowThrottledApps) CachedSize(alloc bool) int64 {
}
return size
}
+func (cached *ShowThrottlerStatus) CachedSize(alloc bool) int64 {
+ if cached == nil {
+ return int64(0)
+ }
+ size := int64(0)
+ if alloc {
+ size += int64(24)
+ }
+ // field Comments vitess.io/vitess/go/vt/sqlparser.Comments
+ {
+ size += hack.RuntimeAllocSize(int64(cap(cached.Comments)) * int64(16))
+ for _, elem := range cached.Comments {
+ size += hack.RuntimeAllocSize(int64(len(elem)))
+ }
+ }
+ return size
+}
func (cached *StarExpr) CachedSize(alloc bool) int64 {
if cached == nil {
return int64(0)
@@ -3876,6 +3886,22 @@ func (cached *Use) CachedSize(alloc bool) int64 {
size += cached.DBName.CachedSize(false)
return size
}
+func (cached *VExplainStmt) CachedSize(alloc bool) int64 {
+ if cached == nil {
+ return int64(0)
+ }
+ size := int64(0)
+ if alloc {
+ size += int64(32)
+ }
+ // field Statement vitess.io/vitess/go/vt/sqlparser.Statement
+ if cc, ok := cached.Statement.(cachedObject); ok {
+ size += cc.CachedSize(true)
+ }
+ // field Comments *vitess.io/vitess/go/vt/sqlparser.ParsedComments
+ size += cached.Comments.CachedSize(true)
+ return size
+}
func (cached *VStream) CachedSize(alloc bool) int64 {
if cached == nil {
return int64(0)
diff --git a/go/vt/sqlparser/comments.go b/go/vt/sqlparser/comments.go
index 528d0e250bd..6466c4facfd 100644
--- a/go/vt/sqlparser/comments.go
+++ b/go/vt/sqlparser/comments.go
@@ -20,6 +20,8 @@ import (
"strconv"
"strings"
"unicode"
+
+ querypb "vitess.io/vitess/go/vt/proto/query"
)
const (
@@ -42,8 +44,10 @@ const (
DirectiveAllowHashJoin = "ALLOW_HASH_JOIN"
// DirectiveQueryPlanner lets the user specify per query which planner should be used
DirectiveQueryPlanner = "PLANNER"
- // DirectiveVtexplainRunDMLQueries tells explain format = vtexplain that it is okay to also run the query.
- DirectiveVtexplainRunDMLQueries = "EXECUTE_DML_QUERIES"
+ // DirectiveVExplainRunDMLQueries tells vexplain queries/all that it is okay to also run the query.
+ DirectiveVExplainRunDMLQueries = "EXECUTE_DML_QUERIES"
+ // DirectiveConsolidator enables the query consolidator.
+ DirectiveConsolidator = "CONSOLIDATOR"
)
func isNonSpace(r rune) bool {
@@ -206,6 +210,15 @@ type CommentDirectives struct {
m map[string]string
}
+// ResetDirectives sets the _directives member to `nil`, which means the next call to Directives()
+// will re-evaluate it.
+func (c *ParsedComments) ResetDirectives() {
+ if c == nil {
+ return
+ }
+ c._directives = nil
+}
+
// Directives parses the comment list for any execution directives
// of the form:
//
@@ -369,3 +382,26 @@ func AllowScatterDirective(stmt Statement) bool {
}
return comments != nil && comments.Directives().IsSet(DirectiveAllowScatter)
}
+
+// Consolidator returns the consolidator option.
+func Consolidator(stmt Statement) querypb.ExecuteOptions_Consolidator {
+ var comments *ParsedComments
+ switch stmt := stmt.(type) {
+ case *Select:
+ comments = stmt.Comments
+ default:
+ return querypb.ExecuteOptions_CONSOLIDATOR_UNSPECIFIED
+ }
+ if comments == nil {
+ return querypb.ExecuteOptions_CONSOLIDATOR_UNSPECIFIED
+ }
+ directives := comments.Directives()
+ strv, isSet := directives.GetString(DirectiveConsolidator, "")
+ if !isSet {
+ return querypb.ExecuteOptions_CONSOLIDATOR_UNSPECIFIED
+ }
+ if i32v, ok := querypb.ExecuteOptions_Consolidator_value["CONSOLIDATOR_"+strings.ToUpper(strv)]; ok {
+ return querypb.ExecuteOptions_Consolidator(i32v)
+ }
+ return querypb.ExecuteOptions_CONSOLIDATOR_UNSPECIFIED
+}
diff --git a/go/vt/sqlparser/comments_test.go b/go/vt/sqlparser/comments_test.go
index 4906b9fbcd7..7200e3828bd 100644
--- a/go/vt/sqlparser/comments_test.go
+++ b/go/vt/sqlparser/comments_test.go
@@ -24,6 +24,8 @@ import (
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/assert"
+
+ querypb "vitess.io/vitess/go/vt/proto/query"
)
func TestSplitComments(t *testing.T) {
@@ -468,3 +470,29 @@ func TestIgnoreMaxMaxMemoryRowsDirective(t *testing.T) {
})
}
}
+
+func TestConsolidator(t *testing.T) {
+ testCases := []struct {
+ query string
+ expected querypb.ExecuteOptions_Consolidator
+ }{
+ {"insert /*vt+ CONSOLIDATOR=enabled */ into user(id) values (1), (2)", querypb.ExecuteOptions_CONSOLIDATOR_UNSPECIFIED},
+ {"update /*vt+ CONSOLIDATOR=enabled */ users set name=1", querypb.ExecuteOptions_CONSOLIDATOR_UNSPECIFIED},
+ {"delete /*vt+ CONSOLIDATOR=enabled */ from users", querypb.ExecuteOptions_CONSOLIDATOR_UNSPECIFIED},
+ {"show /*vt+ CONSOLIDATOR=enabled */ create table users", querypb.ExecuteOptions_CONSOLIDATOR_UNSPECIFIED},
+ {"select * from users", querypb.ExecuteOptions_CONSOLIDATOR_UNSPECIFIED},
+ {"select /*vt+ CONSOLIDATOR=invalid_value */ * from users", querypb.ExecuteOptions_CONSOLIDATOR_UNSPECIFIED},
+ {"select /*vt+ IGNORE_MAX_MEMORY_ROWS=1 */ * from users", querypb.ExecuteOptions_CONSOLIDATOR_UNSPECIFIED},
+ {"select /*vt+ CONSOLIDATOR=disabled */ * from users", querypb.ExecuteOptions_CONSOLIDATOR_DISABLED},
+ {"select /*vt+ CONSOLIDATOR=enabled */ * from users", querypb.ExecuteOptions_CONSOLIDATOR_ENABLED},
+ {"select /*vt+ CONSOLIDATOR=enabled_replicas */ * from users", querypb.ExecuteOptions_CONSOLIDATOR_ENABLED_REPLICAS},
+ }
+
+ for _, test := range testCases {
+ t.Run(test.query, func(t *testing.T) {
+ stmt, _ := Parse(test.query)
+ got := Consolidator(stmt)
+ assert.Equalf(t, test.expected, got, fmt.Sprintf("Consolidator(stmt) returned %v but expected %v", got, test.expected))
+ })
+ }
+}
diff --git a/go/vt/sqlparser/constants.go b/go/vt/sqlparser/constants.go
index 4204f34b376..330afeaed05 100644
--- a/go/vt/sqlparser/constants.go
+++ b/go/vt/sqlparser/constants.go
@@ -25,9 +25,14 @@ const (
SQLCalcFoundRowsStr = "sql_calc_found_rows "
// Select.Lock
- NoLockStr = ""
- ForUpdateStr = " for update"
- ShareModeStr = " lock in share mode"
+ NoLockStr = ""
+ ForUpdateStr = " for update"
+ ForUpdateNoWaitStr = " for update nowait"
+ ForUpdateSkipLockedStr = " for update skip locked"
+ ForShareStr = " for share"
+ ForShareNoWaitStr = " for share nowait"
+ ForShareSkipLockedStr = " for share skip locked"
+ ShareModeStr = " lock in share mode"
// Select.Cache
SQLCacheStr = "sql_cache "
@@ -65,6 +70,12 @@ const (
AddSequenceStr = "add sequence"
AddAutoIncStr = "add auto_increment"
+ // ALTER TABLE ALGORITHM string.
+ DefaultStr = "default"
+ CopyStr = "copy"
+ InplaceStr = "inplace"
+ InstantStr = "instant"
+
// Partition and subpartition type strings
HashTypeStr = "hash"
KeyTypeStr = "key"
@@ -228,18 +239,20 @@ const (
AscScr = "asc"
DescScr = "desc"
- // SetExpr.Expr, for SET TRANSACTION ... or START TRANSACTION
- // TransactionStr is the Name for a SET TRANSACTION statement
- TransactionStr = "transaction"
+ // SetExpr.Expr transaction variables
+ TransactionIsolationStr = "transaction_isolation"
+ TransactionReadOnlyStr = "transaction_read_only"
// Transaction isolation levels
- ReadUncommittedStr = "read uncommitted"
- ReadCommittedStr = "read committed"
- RepeatableReadStr = "repeatable read"
+ ReadUncommittedStr = "read-uncommitted"
+ ReadCommittedStr = "read-committed"
+ RepeatableReadStr = "repeatable-read"
SerializableStr = "serializable"
- TxReadOnly = "read only"
- TxReadWrite = "read write"
+ // Transaction access mode
+ WithConsistentSnapshotStr = "with consistent snapshot"
+ ReadWriteStr = "read write"
+ ReadOnlyStr = "read only"
// Explain formats
EmptyStr = ""
@@ -249,6 +262,9 @@ const (
TraditionalStr = "traditional"
AnalyzeStr = "analyze"
VTExplainStr = "vtexplain"
+ QueriesStr = "queries"
+ AllVExplainStr = "all"
+ PlanStr = "plan"
// Lock Types
ReadStr = "read"
@@ -280,7 +296,7 @@ const (
ProcedureStr = " procedure status"
StatusGlobalStr = " global status"
StatusSessionStr = " status"
- TableStr = " tables"
+ TablesStr = " tables"
TableStatusStr = " table status"
TriggerStr = " triggers"
VariableGlobalStr = " global variables"
@@ -413,20 +429,6 @@ const (
YearMonthStr = "year_month"
)
-// Constants for Enum type - AccessMode
-const (
- ReadOnly AccessMode = iota
- ReadWrite
-)
-
-// Constants for Enum type - IsolationLevel
-const (
- ReadUncommitted IsolationLevel = iota
- ReadCommitted
- RepeatableRead
- Serializable
-)
-
// Constants for Enum Type - Insert.Action
const (
InsertAct InsertAction = iota
@@ -454,13 +456,14 @@ const (
// Constants for scope of variables
// See https://dev.mysql.com/doc/refman/8.0/en/set-variable.html
const (
- NoScope Scope = iota // This is only used for SET ISOLATION LEVEL
- SessionScope // [SESSION | @@SESSION.| @@LOCAL. | @@] This is the default if no scope is given
- GlobalScope // {GLOBAL | @@GLOBAL.} system_var_name
- VitessMetadataScope // @@vitess_metadata.system_var_name
- PersistSysScope // {PERSIST_ONLY | @@PERSIST_ONLY.} system_var_name
- PersistOnlySysScope // {PERSIST_ONLY | @@PERSIST_ONLY.} system_var_name
- VariableScope // @var_name This is used for user defined variables.
+ NoScope Scope = iota
+ SessionScope // [SESSION | @@SESSION.| @@LOCAL. | @@] This is the default if no scope is given
+ GlobalScope // {GLOBAL | @@GLOBAL.} system_var_name
+ VitessMetadataScope // @@vitess_metadata.system_var_name
+ PersistSysScope // {PERSIST_ONLY | @@PERSIST_ONLY.} system_var_name
+ PersistOnlySysScope // {PERSIST_ONLY | @@PERSIST_ONLY.} system_var_name
+ VariableScope // @var_name This is used for user defined variables.
+ NextTxScope // This is used for transaction related variables like transaction_isolation, transaction_read_write and set transaction statement.
)
// Constants for Enum Type - Lock
@@ -468,6 +471,11 @@ const (
NoLock Lock = iota
ForUpdateLock
ShareModeLock
+ ForShareLock
+ ForShareLockNoWait
+ ForShareLockSkipLocked
+ ForUpdateLockNoWait
+ ForUpdateLockSkipLocked
)
// Constants for Enum Type - TrimType
@@ -726,6 +734,13 @@ const (
AnalyzeType
)
+// Constant for Enum Type - VExplainType
+const (
+ QueriesVExplainType VExplainType = iota
+ PlanVExplainType
+ AllVExplainType
+)
+
// Constant for Enum Type - SelectIntoType
const (
IntoOutfile SelectIntoType = iota
@@ -875,3 +890,10 @@ const (
IntervalMinuteMicrosecond
IntervalSecondMicrosecond
)
+
+// Transaction access mode
+const (
+ WithConsistentSnapshot TxAccessMode = iota
+ ReadWrite
+ ReadOnly
+)
diff --git a/go/vt/sqlparser/cow.go b/go/vt/sqlparser/cow.go
new file mode 100644
index 00000000000..e807fdeef63
--- /dev/null
+++ b/go/vt/sqlparser/cow.go
@@ -0,0 +1,105 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package sqlparser
+
+// CopyOnRewrite traverses a syntax tree recursively, starting with root,
+// and calling pre and post for each node as described below.
+// Rewrite returns a syntax tree, where some nodes can be shared with the
+// original syntax tree.
+//
+// If pre is not nil, it is called for each node before the node's
+// children are traversed (pre-order). If pre returns false, no
+// children are traversed, but post is still called for that node.
+//
+// If post is not nil, and a prior call of pre didn't return false,
+// post is called for each node after its children are traversed
+// (post-order).
+//
+// In the post call, the cursor can be used to abort the current
+// traversal altogether.
+//
+// Also in the post call, a user can replace the current node.
+// When a node is replaced, all the ancestors of the node are cloned,
+// so that the original syntax tree remains untouched
+//
+// The `cloned` function will be called for all nodes that are cloned
+// or replaced, to give the user a chance to copy any metadata that
+// needs copying.
+//
+// Only fields that refer to AST nodes are considered children;
+// i.e., fields of basic types (strings, []byte, etc.) are ignored.
+func CopyOnRewrite(
+ node SQLNode,
+ pre func(node, parent SQLNode) bool,
+ post func(cursor *CopyOnWriteCursor),
+ cloned func(before, after SQLNode),
+) SQLNode {
+ cow := cow{pre: pre, post: post, cursor: CopyOnWriteCursor{}, cloned: cloned}
+ out, _ := cow.copyOnRewriteSQLNode(node, nil)
+ return out
+}
+
+// StopTreeWalk aborts the current tree walking. No more nodes will be visited, and the rewriter will exit out early
+func (c *CopyOnWriteCursor) StopTreeWalk() {
+ c.stop = true
+}
+
+// Node returns the current node we are visiting
+func (c *CopyOnWriteCursor) Node() SQLNode {
+ return c.node
+}
+
+// Parent returns the parent of the current node.
+// Note: This is the parent before any changes have been done - the parent in the output might be a copy of this
+func (c *CopyOnWriteCursor) Parent() SQLNode {
+ return c.parent
+}
+
+// Replace replaces the current node with the given node.
+// Note: If you try to set an invalid type on a field, the field will end up with a nil and no error will be reported.
+func (c *CopyOnWriteCursor) Replace(n SQLNode) {
+ c.replaced = n
+}
+
+func (c *cow) postVisit(node, parent SQLNode, changed bool) (SQLNode, bool) {
+ c.cursor.node = node
+ c.cursor.parent = parent
+ c.cursor.replaced = nil
+ c.post(&c.cursor)
+ if c.cursor.replaced != nil {
+ if c.cloned != nil {
+ c.cloned(node, c.cursor.replaced)
+ }
+ return c.cursor.replaced, true
+ }
+ return node, changed
+}
+
+type (
+ CopyOnWriteCursor struct {
+ node SQLNode
+ parent SQLNode
+ replaced SQLNode
+ stop bool
+ }
+ cow struct {
+ pre func(node, parent SQLNode) bool
+ post func(cursor *CopyOnWriteCursor)
+ cloned func(old, new SQLNode)
+ cursor CopyOnWriteCursor
+ }
+)
diff --git a/go/vt/sqlparser/keywords.go b/go/vt/sqlparser/keywords.go
index ec980a05c65..6152b0ed527 100644
--- a/go/vt/sqlparser/keywords.go
+++ b/go/vt/sqlparser/keywords.go
@@ -179,6 +179,7 @@ var keywords = []keyword{
{"compression", COMPRESSION},
{"condition", UNUSED},
{"connection", CONNECTION},
+ {"consistent", CONSISTENT},
{"constraint", CONSTRAINT},
{"continue", UNUSED},
{"convert", CONVERT},
@@ -271,8 +272,8 @@ var keywords = []keyword{
{"first_value", FIRST_VALUE},
{"fixed", FIXED},
{"float", FLOAT_TYPE},
- {"float4", UNUSED},
- {"float8", UNUSED},
+ {"float4", FLOAT4_TYPE},
+ {"float8", FLOAT8_TYPE},
{"flush", FLUSH},
{"following", FOLLOWING},
{"for", FOR},
@@ -403,6 +404,7 @@ var keywords = []keyword{
{"localtimestamp", LOCALTIMESTAMP},
{"locate", LOCATE},
{"lock", LOCK},
+ {"locked", LOCKED},
{"logs", LOGS},
{"long", UNUSED},
{"longblob", LONGBLOB},
@@ -447,6 +449,7 @@ var keywords = []keyword{
{"none", NONE},
{"not", NOT},
{"now", NOW},
+ {"nowait", NOWAIT},
{"no_write_to_binlog", NO_WRITE_TO_BINLOG},
{"nth_value", NTH_VALUE},
{"ntile", NTILE},
@@ -480,6 +483,7 @@ var keywords = []keyword{
{"password", PASSWORD},
{"path", PATH},
{"percent_rank", PERCENT_RANK},
+ {"plan", PLAN},
{"plugins", PLUGINS},
{"point", POINT},
{"polygon", POLYGON},
@@ -493,6 +497,7 @@ var keywords = []keyword{
{"procedure", PROCEDURE},
{"ps_current_thread_id", PS_CURRENT_THREAD_ID},
{"ps_thread_id", PS_THREAD_ID},
+ {"queries", QUERIES},
{"query", QUERY},
{"range", RANGE},
{"quarter", QUARTER},
@@ -560,8 +565,10 @@ var keywords = []keyword{
{"signal", UNUSED},
{"signed", SIGNED},
{"simple", SIMPLE},
+ {"skip", SKIP},
{"slow", SLOW},
{"smallint", SMALLINT},
+ {"snapshot", SNAPSHOT},
{"spatial", SPATIAL},
{"specific", UNUSED},
{"sql", SQL},
@@ -648,6 +655,7 @@ var keywords = []keyword{
{"varcharacter", UNUSED},
{"variance", VARIANCE},
{"varying", UNUSED},
+ {"vexplain", VEXPLAIN},
{"vgtid_executed", VGTID_EXECUTED},
{"virtual", VIRTUAL},
{"vindex", VINDEX},
@@ -663,6 +671,7 @@ var keywords = []keyword{
{"vitess_tablets", VITESS_TABLETS},
{"vitess_target", VITESS_TARGET},
{"vitess_throttled_apps", VITESS_THROTTLED_APPS},
+ {"vitess_throttler", VITESS_THROTTLER},
{"vschema", VSCHEMA},
{"vstream", VSTREAM},
{"vtexplain", VTEXPLAIN},
@@ -687,6 +696,7 @@ var keywords = []keyword{
// keywordStrings contains the reverse mapping of token to keyword strings
var keywordStrings = map[int]string{}
+var keywordVals = map[string]int{}
// keywordLookupTable is a perfect hash map that maps **case insensitive** keyword names to their ids
var keywordLookupTable *caseInsensitiveTable
@@ -735,6 +745,7 @@ func init() {
panic(fmt.Sprintf("keyword %q must be lowercase in table", kw.name))
}
keywordStrings[kw.id] = kw.name
+ keywordVals[kw.name] = kw.id
}
keywordLookupTable = buildCaseInsensitiveTable(keywords)
diff --git a/go/vt/sqlparser/normalizer.go b/go/vt/sqlparser/normalizer.go
index 6fb93c5778d..6d7ee526bf6 100644
--- a/go/vt/sqlparser/normalizer.go
+++ b/go/vt/sqlparser/normalizer.go
@@ -37,15 +37,16 @@ type BindVars map[string]struct{}
// treated as distinct.
func Normalize(stmt Statement, reserved *ReservedVars, bindVars map[string]*querypb.BindVariable) error {
nz := newNormalizer(reserved, bindVars)
- _ = Rewrite(stmt, nz.WalkStatement, nil)
+ _ = SafeRewrite(stmt, nz.walkStatementDown, nz.walkStatementUp)
return nz.err
}
type normalizer struct {
- bindVars map[string]*querypb.BindVariable
- reserved *ReservedVars
- vals map[string]string
- err error
+ bindVars map[string]*querypb.BindVariable
+ reserved *ReservedVars
+ vals map[string]string
+ err error
+ inDerived bool
}
func newNormalizer(reserved *ReservedVars, bindVars map[string]*querypb.BindVariable) *normalizer {
@@ -56,22 +57,39 @@ func newNormalizer(reserved *ReservedVars, bindVars map[string]*querypb.BindVari
}
}
-// WalkStatement is the top level walk function.
+// walkStatementUp is one half of the top level walk function.
+func (nz *normalizer) walkStatementUp(cursor *Cursor) bool {
+ if nz.err != nil {
+ return false
+ }
+ node, isLiteral := cursor.Node().(*Literal)
+ if !isLiteral {
+ return true
+ }
+ nz.convertLiteral(node, cursor)
+ return nz.err == nil // only continue if we haven't found any errors
+}
+
+// walkStatementDown is the top level walk function.
// If it encounters a Select, it switches to a mode
// where variables are deduped.
-func (nz *normalizer) WalkStatement(cursor *Cursor) bool {
- switch node := cursor.Node().(type) {
+func (nz *normalizer) walkStatementDown(node, parent SQLNode) bool {
+ switch node := node.(type) {
// no need to normalize the statement types
- case *Set, *Show, *Begin, *Commit, *Rollback, *Savepoint, *SetTransaction, DDLStatement, *SRollback, *Release, *OtherAdmin, *OtherRead:
+ case *Set, *Show, *Begin, *Commit, *Rollback, *Savepoint, DDLStatement, *SRollback, *Release, *OtherAdmin, *OtherRead:
return false
case *Select:
- _ = Rewrite(node, nz.WalkSelect, nil)
+ _, isDerived := parent.(*DerivedTable)
+ var tmp bool
+ tmp, nz.inDerived = nz.inDerived, isDerived
+ _ = SafeRewrite(node, nz.walkDownSelect, nz.walkUpSelect)
// Don't continue
+ nz.inDerived = tmp
return false
- case *Literal:
- nz.convertLiteral(node, cursor)
case *ComparisonExpr:
nz.convertComparison(node)
+ case *UpdateExpr:
+ nz.convertUpdateExpr(node)
case *ColName, TableName:
// Common node types that never contain Literal or ListArgs but create a lot of object
// allocations.
@@ -82,11 +100,25 @@ func (nz *normalizer) WalkStatement(cursor *Cursor) bool {
return nz.err == nil // only continue if we haven't found any errors
}
-// WalkSelect normalizes the AST in Select mode.
-func (nz *normalizer) WalkSelect(cursor *Cursor) bool {
- switch node := cursor.Node().(type) {
- case *Literal:
- nz.convertLiteralDedup(node, cursor)
+// walkDownSelect normalizes the AST in Select mode.
+func (nz *normalizer) walkDownSelect(node, parent SQLNode) bool {
+ switch node := node.(type) {
+ case *Select:
+ _, isDerived := parent.(*DerivedTable)
+ if !isDerived {
+ return true
+ }
+ var tmp bool
+ tmp, nz.inDerived = nz.inDerived, isDerived
+ // initiating a new AST walk here means that we might change something while walking down on the tree,
+ // but since we are only changing literals, we can be safe that we are not changing the SELECT struct,
+ // only something much further down, and that should be safe
+ _ = SafeRewrite(node, nz.walkDownSelect, nz.walkUpSelect)
+ // Don't continue
+ nz.inDerived = tmp
+ return false
+ case SelectExprs:
+ return !nz.inDerived
case *ComparisonExpr:
nz.convertComparison(node)
case *FramePoint:
@@ -96,9 +128,6 @@ func (nz *normalizer) WalkSelect(cursor *Cursor) bool {
// Common node types that never contain Literals or ListArgs but create a lot of object
// allocations.
return false
- case OrderBy, GroupBy:
- // do not make a bind var for order by column_position
- return false
case *ConvertType:
// we should not rewrite the type description
return false
@@ -106,6 +135,27 @@ func (nz *normalizer) WalkSelect(cursor *Cursor) bool {
return nz.err == nil // only continue if we haven't found any errors
}
+// walkUpSelect normalizes the Literals in Select mode.
+func (nz *normalizer) walkUpSelect(cursor *Cursor) bool {
+ if nz.err != nil {
+ return false
+ }
+ node, isLiteral := cursor.Node().(*Literal)
+ if !isLiteral {
+ return true
+ }
+ parent := cursor.Parent()
+ switch parent.(type) {
+ case *Order, GroupBy:
+ return false
+ case *Limit:
+ nz.convertLiteral(node, cursor)
+ default:
+ nz.convertLiteralDedup(node, cursor)
+ }
+ return nz.err == nil // only continue if we haven't found any errors
+}
+
func validateLiteral(node *Literal) (err error) {
switch node.Type {
case DateVal:
@@ -140,15 +190,7 @@ func (nz *normalizer) convertLiteralDedup(node *Literal, cursor *Cursor) {
}
// Check if there's a bindvar for that value already.
- var key string
- if bval.Type == sqltypes.VarBinary || bval.Type == sqltypes.VarChar {
- // Prefixing strings with "'" ensures that a string
- // and number that have the same representation don't
- // collide.
- key = "'" + node.Val
- } else {
- key = node.Val
- }
+ key := keyFor(bval, node)
bvname, ok := nz.vals[key]
if !ok {
// If there's no such bindvar, make a new one.
@@ -161,6 +203,18 @@ func (nz *normalizer) convertLiteralDedup(node *Literal, cursor *Cursor) {
cursor.Replace(NewArgument(bvname))
}
+func keyFor(bval *querypb.BindVariable, lit *Literal) string {
+ if bval.Type != sqltypes.VarBinary && bval.Type != sqltypes.VarChar {
+ return lit.Val
+ }
+
+ // Prefixing strings with "'" ensures that a string
+ // and number that have the same representation don't
+ // collide.
+ return "'" + lit.Val
+
+}
+
// convertLiteral converts an Literal without the dedup.
func (nz *normalizer) convertLiteral(node *Literal, cursor *Cursor) {
err := validateLiteral(node)
@@ -185,13 +239,70 @@ func (nz *normalizer) convertLiteral(node *Literal, cursor *Cursor) {
// and iterate on converting each individual value into separate
// bind vars.
func (nz *normalizer) convertComparison(node *ComparisonExpr) {
- if node.Operator != InOp && node.Operator != NotInOp {
- return
+ switch node.Operator {
+ case InOp, NotInOp:
+ nz.rewriteInComparisons(node)
+ default:
+ nz.rewriteOtherComparisons(node)
}
+}
+
+func (nz *normalizer) rewriteOtherComparisons(node *ComparisonExpr) {
+ newR := nz.parameterize(node.Left, node.Right)
+ if newR != nil {
+ node.Right = newR
+ }
+}
+
+func (nz *normalizer) parameterize(left, right Expr) Expr {
+ col, ok := left.(*ColName)
+ if !ok {
+ return nil
+ }
+ lit, ok := right.(*Literal)
+ if !ok {
+ return nil
+ }
+ err := validateLiteral(lit)
+ if err != nil {
+ nz.err = err
+ return nil
+ }
+
+ bval := SQLToBindvar(lit)
+ if bval == nil {
+ return nil
+ }
+ key := keyFor(bval, lit)
+ bvname := nz.decideBindVarName(key, lit, col, bval)
+ return Argument(bvname)
+}
+
+func (nz *normalizer) decideBindVarName(key string, lit *Literal, col *ColName, bval *querypb.BindVariable) string {
+ if len(lit.Val) <= 256 {
+ // first we check if we already have a bindvar for this value. if we do, we re-use that bindvar name
+ bvname, ok := nz.vals[key]
+ if ok {
+ return bvname
+ }
+ }
+
+ // If there's no such bindvar, or we have a big value, make a new one.
+ // Big values are most likely not for vindexes.
+ // We save a lot of CPU because we avoid building
+ bvname := nz.reserved.ReserveColName(col)
+ nz.vals[key] = bvname
+ nz.bindVars[bvname] = bval
+
+ return bvname
+}
+
+func (nz *normalizer) rewriteInComparisons(node *ComparisonExpr) {
tupleVals, ok := node.Right.(ValTuple)
if !ok {
return
}
+
// The RHS is a tuple of values.
// Make a list bindvar.
bvals := &querypb.BindVariable{
@@ -213,6 +324,13 @@ func (nz *normalizer) convertComparison(node *ComparisonExpr) {
node.Right = ListArg(bvname)
}
+func (nz *normalizer) convertUpdateExpr(node *UpdateExpr) {
+ newR := nz.parameterize(node.Name, node.Expr)
+ if newR != nil {
+ node.Expr = newR
+ }
+}
+
func SQLToBindvar(node SQLNode) *querypb.BindVariable {
if node, ok := node.(*Literal); ok {
var v sqltypes.Value
diff --git a/go/vt/sqlparser/normalizer_test.go b/go/vt/sqlparser/normalizer_test.go
index aa47f1e5634..fe3dc85b8c8 100644
--- a/go/vt/sqlparser/normalizer_test.go
+++ b/go/vt/sqlparser/normalizer_test.go
@@ -43,10 +43,10 @@ func TestNormalize(t *testing.T) {
outbv map[string]*querypb.BindVariable
}{{
// str val
- in: "select * from t where v1 = 'aa'",
- outstmt: "select * from t where v1 = :bv1",
+ in: "select * from t where foobar = 'aa'",
+ outstmt: "select * from t where foobar = :foobar",
outbv: map[string]*querypb.BindVariable{
- "bv1": sqltypes.StringBindVariable("aa"),
+ "foobar": sqltypes.StringBindVariable("aa"),
},
}, {
// placeholder
@@ -67,47 +67,47 @@ func TestNormalize(t *testing.T) {
},
}, {
// int val
- in: "select * from t where v1 = 1",
- outstmt: "select * from t where v1 = :bv1",
+ in: "select * from t where foobar = 1",
+ outstmt: "select * from t where foobar = :foobar",
outbv: map[string]*querypb.BindVariable{
- "bv1": sqltypes.Int64BindVariable(1),
+ "foobar": sqltypes.Int64BindVariable(1),
},
}, {
// float val
- in: "select * from t where v1 = 1.2",
- outstmt: "select * from t where v1 = :bv1",
+ in: "select * from t where foobar = 1.2",
+ outstmt: "select * from t where foobar = :foobar",
outbv: map[string]*querypb.BindVariable{
- "bv1": sqltypes.DecimalBindVariable(1.2),
+ "foobar": sqltypes.DecimalBindVariable(1.2),
},
}, {
// multiple vals
- in: "select * from t where v1 = 1.2 and v2 = 2",
- outstmt: "select * from t where v1 = :bv1 and v2 = :bv2",
+ in: "select * from t where foo = 1.2 and bar = 2",
+ outstmt: "select * from t where foo = :foo and bar = :bar",
outbv: map[string]*querypb.BindVariable{
- "bv1": sqltypes.DecimalBindVariable(1.2),
- "bv2": sqltypes.Int64BindVariable(2),
+ "foo": sqltypes.DecimalBindVariable(1.2),
+ "bar": sqltypes.Int64BindVariable(2),
},
}, {
// bv collision
- in: "select * from t where v1 = :bv1 and v2 = 1",
- outstmt: "select * from t where v1 = :bv1 and v2 = :bv2",
+ in: "select * from t where foo = :bar and bar = 12",
+ outstmt: "select * from t where foo = :bar and bar = :bar1",
outbv: map[string]*querypb.BindVariable{
- "bv2": sqltypes.Int64BindVariable(1),
+ "bar1": sqltypes.Int64BindVariable(12),
},
}, {
// val reuse
- in: "select * from t where v1 = 1 and v2 = 1",
- outstmt: "select * from t where v1 = :bv1 and v2 = :bv1",
+ in: "select * from t where foo = 1 and bar = 1",
+ outstmt: "select * from t where foo = :foo and bar = :foo",
outbv: map[string]*querypb.BindVariable{
- "bv1": sqltypes.Int64BindVariable(1),
+ "foo": sqltypes.Int64BindVariable(1),
},
}, {
// ints and strings are different
- in: "select * from t where v1 = 1 and v2 = '1'",
- outstmt: "select * from t where v1 = :bv1 and v2 = :bv2",
+ in: "select * from t where foo = 1 and bar = '1'",
+ outstmt: "select * from t where foo = :foo and bar = :bar",
outbv: map[string]*querypb.BindVariable{
- "bv1": sqltypes.Int64BindVariable(1),
- "bv2": sqltypes.StringBindVariable("1"),
+ "foo": sqltypes.Int64BindVariable(1),
+ "bar": sqltypes.StringBindVariable("1"),
},
}, {
// val should not be reused for non-select statements
@@ -120,33 +120,31 @@ func TestNormalize(t *testing.T) {
}, {
// val should be reused only in subqueries of DMLs
in: "update a set v1=(select 5 from t), v2=5, v3=(select 5 from t), v4=5",
- outstmt: "update a set v1 = (select :bv1 from t), v2 = :bv2, v3 = (select :bv1 from t), v4 = :bv3",
+ outstmt: "update a set v1 = (select :bv1 from t), v2 = :bv1, v3 = (select :bv1 from t), v4 = :bv1",
outbv: map[string]*querypb.BindVariable{
"bv1": sqltypes.Int64BindVariable(5),
- "bv2": sqltypes.Int64BindVariable(5),
- "bv3": sqltypes.Int64BindVariable(5),
},
}, {
// list vars should work for DMLs also
in: "update a set v1=5 where v2 in (1, 4, 5)",
- outstmt: "update a set v1 = :bv1 where v2 in ::bv2",
+ outstmt: "update a set v1 = :v1 where v2 in ::bv1",
outbv: map[string]*querypb.BindVariable{
- "bv1": sqltypes.Int64BindVariable(5),
- "bv2": sqltypes.TestBindVariable([]any{1, 4, 5}),
+ "v1": sqltypes.Int64BindVariable(5),
+ "bv1": sqltypes.TestBindVariable([]any{1, 4, 5}),
},
}, {
// Hex number values should work for selects
- in: "select * from t where v1 = 0x1234",
- outstmt: "select * from t where v1 = :bv1",
+ in: "select * from t where foo = 0x1234",
+ outstmt: "select * from t where foo = :foo",
outbv: map[string]*querypb.BindVariable{
- "bv1": sqltypes.HexNumBindVariable([]byte("0x1234")),
+ "foo": sqltypes.HexNumBindVariable([]byte("0x1234")),
},
}, {
// Hex encoded string values should work for selects
- in: "select * from t where v1 = x'7b7d'",
- outstmt: "select * from t where v1 = :bv1",
+ in: "select * from t where foo = x'7b7d'",
+ outstmt: "select * from t where foo = :foo",
outbv: map[string]*querypb.BindVariable{
- "bv1": sqltypes.HexValBindVariable([]byte("x'7b7d'")),
+ "foo": sqltypes.HexValBindVariable([]byte("x'7b7d'")),
},
}, {
// Ensure that hex notation bind vars work with collation based conversions
@@ -157,30 +155,44 @@ func TestNormalize(t *testing.T) {
},
}, {
// Hex number values should work for DMLs
- in: "update a set v1 = 0x12",
- outstmt: "update a set v1 = :bv1",
+ in: "update a set foo = 0x12",
+ outstmt: "update a set foo = :foo",
outbv: map[string]*querypb.BindVariable{
- "bv1": sqltypes.HexNumBindVariable([]byte("0x12")),
+ "foo": sqltypes.HexNumBindVariable([]byte("0x12")),
},
}, {
- // Bin value does not convert
- in: "select * from t where v1 = b'11'",
- outstmt: "select * from t where v1 = :bv1",
+ // Bin values work fine
+ in: "select * from t where foo = b'11'",
+ outstmt: "select * from t where foo = :foo",
outbv: map[string]*querypb.BindVariable{
- "bv1": sqltypes.HexNumBindVariable([]byte("0x3")),
+ "foo": sqltypes.HexNumBindVariable([]byte("0x3")),
},
}, {
// Bin value does not convert for DMLs
in: "update a set v1 = b'11'",
- outstmt: "update a set v1 = :bv1",
+ outstmt: "update a set v1 = :v1",
outbv: map[string]*querypb.BindVariable{
- "bv1": sqltypes.HexNumBindVariable([]byte("0x3")),
+ "v1": sqltypes.HexNumBindVariable([]byte("0x3")),
},
}, {
// ORDER BY column_position
in: "select a, b from t order by 1 asc",
outstmt: "select a, b from t order by 1 asc",
outbv: map[string]*querypb.BindVariable{},
+ }, {
+ // GROUP BY column_position
+ in: "select a, b from t group by 1",
+ outstmt: "select a, b from t group by 1",
+ outbv: map[string]*querypb.BindVariable{},
+ }, {
+ // ORDER BY with literal inside complex expression
+ in: "select a, b from t order by field(a,1,2,3) asc",
+ outstmt: "select a, b from t order by field(a, :bv1, :bv2, :bv3) asc",
+ outbv: map[string]*querypb.BindVariable{
+ "bv1": sqltypes.Int64BindVariable(1),
+ "bv2": sqltypes.Int64BindVariable(2),
+ "bv3": sqltypes.Int64BindVariable(3),
+ },
}, {
// ORDER BY variable
in: "select a, b from t order by c asc",
@@ -188,18 +200,18 @@ func TestNormalize(t *testing.T) {
outbv: map[string]*querypb.BindVariable{},
}, {
// Values up to len 256 will reuse.
- in: fmt.Sprintf("select * from t where v1 = '%256s' and v2 = '%256s'", "a", "a"),
- outstmt: "select * from t where v1 = :bv1 and v2 = :bv1",
+ in: fmt.Sprintf("select * from t where foo = '%256s' and bar = '%256s'", "a", "a"),
+ outstmt: "select * from t where foo = :foo and bar = :foo",
outbv: map[string]*querypb.BindVariable{
- "bv1": sqltypes.StringBindVariable(fmt.Sprintf("%256s", "a")),
+ "foo": sqltypes.StringBindVariable(fmt.Sprintf("%256s", "a")),
},
}, {
// Values greater than len 256 will not reuse.
- in: fmt.Sprintf("select * from t where v1 = '%257s' and v2 = '%257s'", "b", "b"),
- outstmt: "select * from t where v1 = :bv1 and v2 = :bv2",
+ in: fmt.Sprintf("select * from t where foo = '%257s' and bar = '%257s'", "b", "b"),
+ outstmt: "select * from t where foo = :foo and bar = :bar",
outbv: map[string]*querypb.BindVariable{
- "bv1": sqltypes.StringBindVariable(fmt.Sprintf("%257s", "b")),
- "bv2": sqltypes.StringBindVariable(fmt.Sprintf("%257s", "b")),
+ "foo": sqltypes.StringBindVariable(fmt.Sprintf("%257s", "b")),
+ "bar": sqltypes.StringBindVariable(fmt.Sprintf("%257s", "b")),
},
}, {
// bad int
@@ -230,6 +242,13 @@ func TestNormalize(t *testing.T) {
outbv: map[string]*querypb.BindVariable{
"bv1": sqltypes.TestBindVariable([]any{1, "2"}),
},
+ }, {
+ // EXPLAIN queries
+ in: "explain select * from t where v1 in (1, '2')",
+ outstmt: "explain select * from t where v1 in ::bv1",
+ outbv: map[string]*querypb.BindVariable{
+ "bv1": sqltypes.TestBindVariable([]any{1, "2"}),
+ },
}, {
// NOT IN clause
in: "select * from t where v1 not in (1, '2')",
@@ -284,6 +303,39 @@ func TestNormalize(t *testing.T) {
outbv: map[string]*querypb.BindVariable{
"bv1": sqltypes.ValueBindVariable(sqltypes.MakeTrusted(sqltypes.Datetime, []byte("2022-08-06 17:05:12"))),
},
+ }, {
+ // TimestampVal should also be normalized
+ in: `explain select comms_by_companies.* from comms_by_companies where comms_by_companies.id = 'rjve634shXzaavKHbAH16ql6OrxJ' limit 1,1`,
+ outstmt: `explain select comms_by_companies.* from comms_by_companies where comms_by_companies.id = :comms_by_companies_id limit :bv1, :bv2`,
+ outbv: map[string]*querypb.BindVariable{
+ "bv1": sqltypes.Int64BindVariable(1),
+ "bv2": sqltypes.Int64BindVariable(1),
+ "comms_by_companies_id": sqltypes.StringBindVariable("rjve634shXzaavKHbAH16ql6OrxJ"),
+ },
+ }, {
+ // Int leading with zero should also be normalized
+ in: `select * from t where zipcode = 01001900`,
+ outstmt: `select * from t where zipcode = :zipcode`,
+ outbv: map[string]*querypb.BindVariable{
+ "zipcode": sqltypes.ValueBindVariable(sqltypes.MakeTrusted(sqltypes.Int64, []byte("01001900"))),
+ },
+ }, {
+ // literals in limit and offset should not reuse bindvars
+ in: `select * from t where id = 10 limit 10 offset 10`,
+ outstmt: `select * from t where id = :id limit :bv1, :bv2`,
+ outbv: map[string]*querypb.BindVariable{
+ "bv1": sqltypes.Int64BindVariable(10),
+ "bv2": sqltypes.Int64BindVariable(10),
+ "id": sqltypes.Int64BindVariable(10),
+ },
+ }, {
+ // we don't want to replace literals on the select expressions of a derived table
+ // these expressions can be referenced from the outside,
+ // and changing them to bindvars can change the meaning of the query
+ // example of problematic query: select tmp.`1` from (select 1) as tmp
+ in: `select * from (select 12) as t`,
+ outstmt: `select * from (select 12 from dual) as t`,
+ outbv: map[string]*querypb.BindVariable{},
}}
for _, tc := range testcases {
t.Run(tc.in, func(t *testing.T) {
@@ -441,7 +493,17 @@ func BenchmarkNormalizeVTGate(b *testing.B) {
// Normalize if possible and retry.
if CanNormalize(stmt) || MustRewriteAST(stmt, false) {
- result, err := PrepareAST(stmt, NewReservedVars("vtg", reservedVars), bindVars, true, keyspace, SQLSelectLimitUnset, "", nil)
+ result, err := PrepareAST(
+ stmt,
+ NewReservedVars("vtg", reservedVars),
+ bindVars,
+ true,
+ keyspace,
+ SQLSelectLimitUnset,
+ "",
+ nil, /*sysvars*/
+ nil, /*views*/
+ )
if err != nil {
b.Fatal(err)
}
@@ -721,7 +783,17 @@ func benchmarkNormalization(b *testing.B, sqls []string) {
}
reservedVars := NewReservedVars("vtg", reserved)
- _, err = PrepareAST(stmt, reservedVars, make(map[string]*querypb.BindVariable), true, "keyspace0", SQLSelectLimitUnset, "", nil)
+ _, err = PrepareAST(
+ stmt,
+ reservedVars,
+ make(map[string]*querypb.BindVariable),
+ true,
+ "keyspace0",
+ SQLSelectLimitUnset,
+ "",
+ nil,
+ nil,
+ )
if err != nil {
b.Fatal(err)
}
diff --git a/go/vt/sqlparser/parse_test.go b/go/vt/sqlparser/parse_test.go
index bb3ab071820..d5acbbc2092 100644
--- a/go/vt/sqlparser/parse_test.go
+++ b/go/vt/sqlparser/parse_test.go
@@ -371,8 +371,18 @@ var (
input: "select /* distinct */ distinct 1 from t",
}, {
input: "select /* straight_join */ straight_join 1 from t",
+ }, {
+ input: "select /* for share */ 1 from t for share",
+ }, {
+ input: "select /* for share */ 1 from t for share nowait",
+ }, {
+ input: "select /* for share */ 1 from t for share skip locked",
}, {
input: "select /* for update */ 1 from t for update",
+ }, {
+ input: "select /* for update */ 1 from t for update nowait",
+ }, {
+ input: "select /* for update */ 1 from t for update skip locked",
}, {
input: "select /* lock in share mode */ 1 from t lock in share mode",
}, {
@@ -850,6 +860,12 @@ var (
}, {
input: "select /* TIMESTAMPDIFF */ TIMESTAMPDIFF(MINUTE, '2008-01-02', '2008-01-04') from t",
output: "select /* TIMESTAMPDIFF */ timestampdiff(MINUTE, '2008-01-02', '2008-01-04') from t",
+ }, {
+ input: "select DATE_ADD(MIN(FROM_UNIXTIME(1673444922)),interval -DAYOFWEEK(MIN(FROM_UNIXTIME(1673444922)))+1 DAY)",
+ output: "select DATE_ADD(min(FROM_UNIXTIME(1673444922)), interval (-DAYOFWEEK(min(FROM_UNIXTIME(1673444922))) + 1) DAY) from dual",
+ }, {
+ input: "select '2020-01-01' + interval month(DATE_SUB(FROM_UNIXTIME(1234), interval 1 month))-1 month",
+ output: "select '2020-01-01' + interval (month(DATE_SUB(FROM_UNIXTIME(1234), interval 1 month)) - 1) month from dual",
}, {
input: "select /* dual */ 1 from dual",
}, {
@@ -1106,23 +1122,35 @@ var (
input: "set /* mixed list */ a = 3, names 'utf8', charset 'ascii', b = 4",
output: "set /* mixed list */ @@a = 3, names 'utf8', charset 'ascii', @@b = 4",
}, {
- input: "set session transaction isolation level repeatable read",
+ input: "set session transaction isolation level repeatable read",
+ output: "set @@session.transaction_isolation = 'repeatable-read'",
+ }, {
+ input: "set transaction isolation level repeatable read",
+ output: "set @@transaction_isolation = 'repeatable-read'",
}, {
- input: "set transaction isolation level repeatable read",
+ input: "set global transaction isolation level repeatable read",
+ output: "set @@global.transaction_isolation = 'repeatable-read'",
}, {
- input: "set global transaction isolation level repeatable read",
+ input: "set transaction isolation level repeatable read",
+ output: "set @@transaction_isolation = 'repeatable-read'",
}, {
- input: "set transaction isolation level repeatable read",
+ input: "set transaction isolation level read committed",
+ output: "set @@transaction_isolation = 'read-committed'",
}, {
- input: "set transaction isolation level read committed",
+ input: "set transaction isolation level read uncommitted",
+ output: "set @@transaction_isolation = 'read-uncommitted'",
}, {
- input: "set transaction isolation level read uncommitted",
+ input: "set transaction isolation level serializable",
+ output: "set @@transaction_isolation = 'serializable'",
}, {
- input: "set transaction isolation level serializable",
+ input: "set transaction read write",
+ output: "set @@transaction_read_only = 'off'",
}, {
- input: "set transaction read write",
+ input: "set transaction read only",
+ output: "set @@transaction_read_only = 'on'",
}, {
- input: "set transaction read only",
+ input: "set session transaction read only, isolation level serializable",
+ output: "set @@session.transaction_read_only = 'on', @@session.transaction_isolation = 'serializable'",
}, {
input: "set tx_read_only = 1",
output: "set @@tx_read_only = 1",
@@ -1131,10 +1159,14 @@ var (
output: "set @@tx_read_only = 0",
}, {
input: "set transaction_read_only = 1",
- output: "set @@transaction_read_only = 1",
+ output: "set @@session.transaction_read_only = 1",
}, {
input: "set transaction_read_only = 0",
- output: "set @@transaction_read_only = 0",
+ output: "set @@session.transaction_read_only = 0",
+ }, {
+ input: "set @@transaction_read_only = 1",
+ }, {
+ input: "set @@transaction_isolation = 'read-committed'",
}, {
input: "set tx_isolation = 'repeatable read'",
output: "set @@tx_isolation = 'repeatable read'",
@@ -1216,6 +1248,12 @@ var (
input: "alter table a convert to character set utf32",
}, {
input: "alter table `By` add column foo int, algorithm = default",
+ }, {
+ input: "alter table `By` add column foo int, algorithm = copy",
+ }, {
+ input: "alter table `By` add column foo int, algorithm = inplace",
+ }, {
+ input: "alter table `By` add column foo int, algorithm = INPLACE",
}, {
input: "alter table `By` add column foo int, algorithm = instant",
}, {
@@ -1437,6 +1475,18 @@ var (
}, {
input: "create table a (\n\ta float not null default -2.1\n)",
output: "create table a (\n\ta float not null default -2.1\n)",
+ }, {
+ input: "create table a (\n\ta float(24) not null default -1\n)",
+ output: "create table a (\n\ta float(24) not null default -1\n)",
+ }, {
+ input: "create table a (\n\ta float(24,10) not null default -1\n)",
+ output: "create table a (\n\ta float(24,10) not null default -1\n)",
+ }, {
+ input: "create table a (\n\ta float4 not null default -1\n)",
+ output: "create table a (\n\ta float4 not null default -1\n)",
+ }, {
+ input: "create table a (\n\ta float8 not null default -1\n)",
+ output: "create table a (\n\ta float8 not null default -1\n)",
}, {
input: "create table a (a int not null default 0, primary key(a))",
output: "create table a (\n\ta int not null default 0,\n\tprimary key (a)\n)",
@@ -2052,6 +2102,10 @@ var (
input: "alter vitess_migration throttle all ratio 0.7",
}, {
input: "alter vitess_migration throttle all expire '1h' ratio 0.7",
+ }, {
+ input: "show vitess_throttled_apps",
+ }, {
+ input: "show vitess_throttler status",
}, {
input: "show warnings",
}, {
@@ -2103,6 +2157,15 @@ var (
input: "explain select * from t",
}, {
input: "explain format = traditional select * from t",
+ }, {
+ input: "vexplain queries select * from t",
+ }, {
+ input: "vexplain all select * from t",
+ }, {
+ input: "vexplain plan select * from t",
+ }, {
+ input: "vexplain select * from t",
+ output: "vexplain plan select * from t",
}, {
input: "explain analyze select * from t",
}, {
@@ -2298,6 +2361,14 @@ var (
}, {
input: "start transaction",
output: "begin",
+ }, {
+ input: "start transaction with consistent snapshot",
+ }, {
+ input: "start transaction read write",
+ }, {
+ input: "start transaction read only",
+ }, {
+ input: "start transaction read only, with consistent snapshot",
}, {
input: "commit",
}, {
@@ -3258,6 +3329,12 @@ var (
}, {
input: "select * from (((select 1))) as tbl",
output: "select * from (select 1 from dual) as tbl",
+ }, {
+ input: `select * from t1 where col1 like 'ks\%' and col2 = 'ks\%' and col1 like 'ks%' and col2 = 'ks%'`,
+ output: `select * from t1 where col1 like 'ks\%' and col2 = 'ks\%' and col1 like 'ks%' and col2 = 'ks%'`,
+ }, {
+ input: `select * from t1 where col1 like 'ks\_' and col2 = 'ks\_' and col1 like 'ks_' and col2 = 'ks_'`,
+ output: `select * from t1 where col1 like 'ks\_' and col2 = 'ks\_' and col1 like 'ks_' and col2 = 'ks_'`,
}}
)
@@ -5413,17 +5490,7 @@ var (
"(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(" +
"F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F" +
"(F(F(F(F(F(F(F(F(F(F(F(F(",
- output: "max nesting level reached at position 406",
- }, {
- input: "select(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F" +
- "(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(" +
- "F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F" +
- "(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(" +
- "F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F" +
- "(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(" +
- "F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F" +
- "(F(F(F(F(F(F(F(F(F(F(F(",
- output: "syntax error at position 404",
+ output: "syntax error at position 406",
}, {
// This construct is considered invalid due to a grammar conflict.
input: "insert into a select * from b join c on duplicate key update d=e",
@@ -5623,9 +5690,9 @@ partition by range (id)
for _, testcase := range testcases {
t.Run(testcase.input+":"+testcase.mysqlVersion, func(t *testing.T) {
- oldMySQLVersion := MySQLVersion
- defer func() { MySQLVersion = oldMySQLVersion }()
- MySQLVersion = testcase.mysqlVersion
+ oldMySQLVersion := mySQLParserVersion
+ defer func() { mySQLParserVersion = oldMySQLVersion }()
+ mySQLParserVersion = testcase.mysqlVersion
tree, err := Parse(testcase.input)
require.NoError(t, err, testcase.input)
out := String(tree)
diff --git a/go/vt/sqlparser/parser.go b/go/vt/sqlparser/parser.go
index a1033ef7c58..ae630ce3dea 100644
--- a/go/vt/sqlparser/parser.go
+++ b/go/vt/sqlparser/parser.go
@@ -43,8 +43,8 @@ var parserPool = sync.Pool{
// zeroParser is a zero-initialized parser to help reinitialize the parser for pooling.
var zeroParser yyParserImpl
-// MySQLVersion is the version of MySQL that the parser would emulate
-var MySQLVersion = "50709" // default version if nothing else is stated
+// mySQLParserVersion is the version of MySQL that the parser would emulate
+var mySQLParserVersion string
// yyParsePooled is a wrapper around yyParse that pools the parser objects. There isn't a
// particularly good reason to use yyParse directly, since it immediately discards its parser.
@@ -108,18 +108,25 @@ func Parse2(sql string) (Statement, BindVars, error) {
func checkParserVersionFlag() {
if flag.Parsed() {
versionFlagSync.Do(func() {
- if mySQLVersion := servenv.MySQLServerVersion(); mySQLVersion != "" {
- convVersion, err := convertMySQLVersionToCommentVersion(mySQLVersion)
- if err != nil {
- log.Error(err)
- } else {
- MySQLVersion = convVersion
- }
+ convVersion, err := convertMySQLVersionToCommentVersion(servenv.MySQLServerVersion())
+ if err != nil {
+ log.Fatalf("unable to parse mysql version: %v", err)
}
+ mySQLParserVersion = convVersion
})
}
}
+// SetParserVersion sets the mysql parser version
+func SetParserVersion(version string) {
+ mySQLParserVersion = version
+}
+
+// GetParserVersion returns the version of the mysql parser
+func GetParserVersion() string {
+ return mySQLParserVersion
+}
+
// convertMySQLVersionToCommentVersion converts the MySQL version into comment version format.
func convertMySQLVersionToCommentVersion(version string) (string, error) {
var res = make([]int, 3)
@@ -307,5 +314,5 @@ loop:
}
func IsMySQL80AndAbove() bool {
- return MySQLVersion >= "80000"
+ return mySQLParserVersion >= "80000"
}
diff --git a/go/vt/sqlparser/precedence.go b/go/vt/sqlparser/precedence.go
index e1beafef816..d63a56b62ef 100644
--- a/go/vt/sqlparser/precedence.go
+++ b/go/vt/sqlparser/precedence.go
@@ -59,7 +59,7 @@ func precedenceFor(in Expr) Precendence {
return P12
case *ComparisonExpr:
switch node.Operator {
- case EqualOp, NotEqualOp, GreaterThanOp, GreaterEqualOp, LessThanOp, LessEqualOp, LikeOp, InOp, RegexpOp:
+ case EqualOp, NotEqualOp, GreaterThanOp, GreaterEqualOp, LessThanOp, LessEqualOp, LikeOp, InOp, RegexpOp, NullSafeEqualOp:
return P11
}
case *IsExpr:
diff --git a/go/vt/sqlparser/precedence_test.go b/go/vt/sqlparser/precedence_test.go
index cbc481bb4d3..215c9480823 100644
--- a/go/vt/sqlparser/precedence_test.go
+++ b/go/vt/sqlparser/precedence_test.go
@@ -198,6 +198,7 @@ func TestParens(t *testing.T) {
{in: "10 - 2 - 1", expected: "10 - 2 - 1"},
{in: "(10 - 2) - 1", expected: "10 - 2 - 1"},
{in: "10 - (2 - 1)", expected: "10 - (2 - 1)"},
+ {in: "0 <=> (1 and 0)", expected: "0 <=> (1 and 0)"},
}
for _, tc := range tests {
diff --git a/go/vt/sqlparser/predicate_rewriting.go b/go/vt/sqlparser/predicate_rewriting.go
new file mode 100644
index 00000000000..0348f95f115
--- /dev/null
+++ b/go/vt/sqlparser/predicate_rewriting.go
@@ -0,0 +1,450 @@
+/*
+Copyright 2022 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package sqlparser
+
+import (
+ "vitess.io/vitess/go/vt/log"
+)
+
+// RewritePredicate walks the input AST and rewrites any boolean logic into a simpler form
+// This simpler form is CNF plus logic for extracting predicates from OR, plus logic for turning ORs into IN
+// Note: In order to re-plan, we need to empty the accumulated metadata in the AST,
+// so ColName.Metadata will be nil:ed out as part of this rewrite
+func RewritePredicate(ast SQLNode) SQLNode {
+ for {
+ printExpr(ast)
+ exprChanged := false
+ stopOnChange := func(SQLNode, SQLNode) bool {
+ return !exprChanged
+ }
+ ast = SafeRewrite(ast, stopOnChange, func(cursor *Cursor) bool {
+ e, isExpr := cursor.node.(Expr)
+ if !isExpr {
+ return true
+ }
+
+ rewritten, state := simplifyExpression(e)
+ if ch, isChange := state.(changed); isChange {
+ printRule(ch.rule, ch.exprMatched)
+ exprChanged = true
+ cursor.Replace(rewritten)
+ }
+
+ if col, isCol := cursor.node.(*ColName); isCol {
+ col.Metadata = nil
+ }
+ return !exprChanged
+ })
+
+ if !exprChanged {
+ return ast
+ }
+ }
+}
+
+func simplifyExpression(expr Expr) (Expr, rewriteState) {
+ switch expr := expr.(type) {
+ case *NotExpr:
+ return simplifyNot(expr)
+ case *OrExpr:
+ return simplifyOr(expr)
+ case *XorExpr:
+ return simplifyXor(expr)
+ case *AndExpr:
+ return simplifyAnd(expr)
+ }
+ return expr, noChange{}
+}
+
+func simplifyNot(expr *NotExpr) (Expr, rewriteState) {
+ switch child := expr.Expr.(type) {
+ case *NotExpr:
+ return child.Expr,
+ newChange("NOT NOT A => A", f(expr))
+ case *OrExpr:
+ return &AndExpr{Right: &NotExpr{Expr: child.Right}, Left: &NotExpr{Expr: child.Left}},
+ newChange("NOT (A OR B) => NOT A AND NOT B", f(expr))
+ case *AndExpr:
+ return &OrExpr{Right: &NotExpr{Expr: child.Right}, Left: &NotExpr{Expr: child.Left}},
+ newChange("NOT (A AND B) => NOT A OR NOT B", f(expr))
+ }
+ return expr, noChange{}
+}
+
+// ExtractINFromOR will add additional predicated to an OR.
+// this rewriter should not be used in a fixed point way, since it returns the original expression with additions,
+// and it will therefor OOM before it stops rewriting
+func ExtractINFromOR(expr *OrExpr) []Expr {
+ // we check if we have two comparisons on either side of the OR
+ // that we can add as an ANDed comparison.
+ // WHERE (a = 5 and B) or (a = 6 AND C) =>
+ // WHERE (a = 5 AND B) OR (a = 6 AND C) AND a IN (5,6)
+ // This rewrite makes it possible to find a better route than Scatter if the `a` column has a helpful vindex
+ lftPredicates := SplitAndExpression(nil, expr.Left)
+ rgtPredicates := SplitAndExpression(nil, expr.Right)
+ var ins []Expr
+ for _, lft := range lftPredicates {
+ l, ok := lft.(*ComparisonExpr)
+ if !ok {
+ continue
+ }
+ for _, rgt := range rgtPredicates {
+ r, ok := rgt.(*ComparisonExpr)
+ if !ok {
+ continue
+ }
+ in, state := tryTurningOrIntoIn(l, r)
+ if state.changed() {
+ ins = append(ins, in)
+ }
+ }
+ }
+
+ return uniquefy(ins)
+}
+
+func simplifyOr(expr *OrExpr) (Expr, rewriteState) {
+ or := expr
+
+ // first we search for ANDs and see how they can be simplified
+ land, lok := or.Left.(*AndExpr)
+ rand, rok := or.Right.(*AndExpr)
+ switch {
+ case lok && rok:
+ // (<> AND <>) OR (<> AND <>)
+ var a, b, c Expr
+ var change changed
+ switch {
+ case Equals.Expr(land.Left, rand.Left):
+ change = newChange("(A and B) or (A and C) => A AND (B OR C)", f(expr))
+ a, b, c = land.Left, land.Right, rand.Right
+ case Equals.Expr(land.Left, rand.Right):
+ change = newChange("(A and B) or (C and A) => A AND (B OR C)", f(expr))
+ a, b, c = land.Left, land.Right, rand.Left
+ case Equals.Expr(land.Right, rand.Left):
+ change = newChange("(B and A) or (A and C) => A AND (B OR C)", f(expr))
+ a, b, c = land.Right, land.Left, rand.Right
+ case Equals.Expr(land.Right, rand.Right):
+ change = newChange("(B and A) or (C and A) => A AND (B OR C)", f(expr))
+ a, b, c = land.Right, land.Left, rand.Left
+ default:
+ return expr, noChange{}
+ }
+ return &AndExpr{Left: a, Right: &OrExpr{Left: b, Right: c}}, change
+ case lok:
+ // (<> AND <>) OR <>
+ // Simplification
+ if Equals.Expr(or.Right, land.Left) || Equals.Expr(or.Right, land.Right) {
+ return or.Right, newChange("(A AND B) OR A => A", f(expr))
+ }
+ // Distribution Law
+ return &AndExpr{Left: &OrExpr{Left: land.Left, Right: or.Right}, Right: &OrExpr{Left: land.Right, Right: or.Right}},
+ newChange("(A AND B) OR C => (A OR C) AND (B OR C)", f(expr))
+ case rok:
+ // <> OR (<> AND <>)
+ // Simplification
+ if Equals.Expr(or.Left, rand.Left) || Equals.Expr(or.Left, rand.Right) {
+ return or.Left, newChange("A OR (A AND B) => A", f(expr))
+ }
+ // Distribution Law
+ return &AndExpr{
+ Left: &OrExpr{Left: or.Left, Right: rand.Left},
+ Right: &OrExpr{Left: or.Left, Right: rand.Right},
+ },
+ newChange("C OR (A AND B) => (C OR A) AND (C OR B)", f(expr))
+ }
+
+ // next, we want to try to turn multiple ORs into an IN when possible
+ lftCmp, lok := or.Left.(*ComparisonExpr)
+ rgtCmp, rok := or.Right.(*ComparisonExpr)
+ if lok && rok {
+ newExpr, rewritten := tryTurningOrIntoIn(lftCmp, rgtCmp)
+ if rewritten.changed() {
+ return newExpr, rewritten
+ }
+ }
+
+ // Try to make distinct
+ return distinctOr(expr)
+}
+
+func tryTurningOrIntoIn(l, r *ComparisonExpr) (Expr, rewriteState) {
+ // looks for A = X OR A = Y and turns them into A IN (X, Y)
+ col, ok := l.Left.(*ColName)
+ if !ok || !Equals.Expr(col, r.Left) {
+ return nil, noChange{}
+ }
+
+ var tuple ValTuple
+ var ruleStr string
+ switch l.Operator {
+ case EqualOp:
+ tuple = ValTuple{l.Right}
+ ruleStr = "A = <>"
+ case InOp:
+ lft, ok := l.Right.(ValTuple)
+ if !ok {
+ return nil, noChange{}
+ }
+ tuple = lft
+ ruleStr = "A IN (<>, <>)"
+ default:
+ return nil, noChange{}
+ }
+
+ ruleStr += " OR "
+
+ switch r.Operator {
+ case EqualOp:
+ tuple = append(tuple, r.Right)
+ ruleStr += "A = <>"
+ case InOp:
+ lft, ok := r.Right.(ValTuple)
+ if !ok {
+ return nil, noChange{}
+ }
+ tuple = append(tuple, lft...)
+ ruleStr += "A IN (<>, <>)"
+ default:
+ return nil, noChange{}
+ }
+
+ ruleStr += " => A IN (<>, <>)"
+
+ return &ComparisonExpr{
+ Operator: InOp,
+ Left: col,
+ Right: uniquefy(tuple),
+ }, newChange(ruleStr, f(&OrExpr{Left: l, Right: r}))
+}
+
+func uniquefy(tuple ValTuple) (output ValTuple) {
+outer:
+ for _, expr := range tuple {
+ for _, seen := range output {
+ if Equals.Expr(expr, seen) {
+ continue outer
+ }
+ }
+ output = append(output, expr)
+ }
+ return
+}
+
+func simplifyXor(expr *XorExpr) (Expr, rewriteState) {
+ // DeMorgan Rewriter
+ return &AndExpr{
+ Left: &OrExpr{Left: expr.Left, Right: expr.Right},
+ Right: &NotExpr{Expr: &AndExpr{Left: expr.Left, Right: expr.Right}},
+ }, newChange("(A XOR B) => (A OR B) AND NOT (A AND B)", f(expr))
+}
+
+func simplifyAnd(expr *AndExpr) (Expr, rewriteState) {
+ res, rewritten := distinctAnd(expr)
+ if rewritten.changed() {
+ return res, rewritten
+ }
+ and := expr
+ if or, ok := and.Left.(*OrExpr); ok {
+ // Simplification
+
+ if Equals.Expr(or.Left, and.Right) {
+ return and.Right, newChange("(A OR B) AND A => A", f(expr))
+ }
+ if Equals.Expr(or.Right, and.Right) {
+ return and.Right, newChange("(A OR B) AND B => B", f(expr))
+ }
+ }
+ if or, ok := and.Right.(*OrExpr); ok {
+ // Simplification
+ if Equals.Expr(or.Left, and.Left) {
+ return and.Left, newChange("A AND (A OR B) => A", f(expr))
+ }
+ if Equals.Expr(or.Right, and.Left) {
+ return and.Left, newChange("A AND (B OR A) => A", f(expr))
+ }
+ }
+
+ return expr, noChange{}
+}
+
+func distinctOr(in *OrExpr) (Expr, rewriteState) {
+ var skipped []*OrExpr
+ todo := []*OrExpr{in}
+ var leaves []Expr
+ for len(todo) > 0 {
+ curr := todo[0]
+ todo = todo[1:]
+ addAnd := func(in Expr) {
+ and, ok := in.(*OrExpr)
+ if ok {
+ todo = append(todo, and)
+ } else {
+ leaves = append(leaves, in)
+ }
+ }
+ addAnd(curr.Left)
+ addAnd(curr.Right)
+ }
+ original := len(leaves)
+ var predicates []Expr
+
+outer1:
+ for len(leaves) > 0 {
+ curr := leaves[0]
+ leaves = leaves[1:]
+ for _, alreadyIn := range predicates {
+ if Equals.Expr(alreadyIn, curr) {
+ if log.V(0) {
+ skipped = append(skipped, &OrExpr{Left: alreadyIn, Right: curr})
+ }
+ continue outer1
+ }
+ }
+ predicates = append(predicates, curr)
+ }
+ if original == len(predicates) {
+ return in, noChange{}
+ }
+ var result Expr
+ for i, curr := range predicates {
+ if i == 0 {
+ result = curr
+ continue
+ }
+ result = &OrExpr{Left: result, Right: curr}
+ }
+
+ return result, newChange("A OR A => A", func() Expr {
+ var result Expr
+ for _, orExpr := range skipped {
+ if result == nil {
+ result = orExpr
+ continue
+ }
+
+ result = &OrExpr{
+ Left: result,
+ Right: orExpr,
+ }
+ }
+ return result
+ })
+}
+
+func distinctAnd(in *AndExpr) (Expr, rewriteState) {
+ var skipped []*AndExpr
+ todo := []*AndExpr{in}
+ var leaves []Expr
+ for len(todo) > 0 {
+ curr := todo[0]
+ todo = todo[1:]
+ addExpr := func(in Expr) {
+ if and, ok := in.(*AndExpr); ok {
+ todo = append(todo, and)
+ } else {
+ leaves = append(leaves, in)
+ }
+ }
+ addExpr(curr.Left)
+ addExpr(curr.Right)
+ }
+ original := len(leaves)
+ var predicates []Expr
+
+outer1:
+ for _, curr := range leaves {
+ for _, alreadyIn := range predicates {
+ if Equals.Expr(alreadyIn, curr) {
+ if log.V(0) {
+ skipped = append(skipped, &AndExpr{Left: alreadyIn, Right: curr})
+ }
+ continue outer1
+ }
+ }
+ predicates = append(predicates, curr)
+ }
+ if original == len(predicates) {
+ return in, noChange{}
+ }
+ var result Expr
+ for i, curr := range predicates {
+ if i == 0 {
+ result = curr
+ continue
+ }
+ result = &AndExpr{Left: result, Right: curr}
+ }
+ return AndExpressions(leaves...), newChange("A AND A => A", func() Expr {
+ var result Expr
+ for _, andExpr := range skipped {
+ if result == nil {
+ result = andExpr
+ continue
+ }
+
+ result = &AndExpr{
+ Left: result,
+ Right: andExpr,
+ }
+ }
+ return result
+ })
+}
+
+type (
+ rewriteState interface {
+ changed() bool
+ }
+ noChange struct{}
+
+ // changed makes it possible to make sure we have a rule string for each change we do in the expression tree
+ changed struct {
+ rule string
+
+ // ExprMatched is a function here so building of this expression can be paid only when we are debug logging
+ exprMatched func() Expr
+ }
+)
+
+func (noChange) changed() bool { return false }
+func (changed) changed() bool { return true }
+
+// f returns a function that returns the expression. It's short by design, so it interferes minimally
+// used for logging
+func f(e Expr) func() Expr {
+ return func() Expr { return e }
+}
+
+func printRule(rule string, expr func() Expr) {
+ if log.V(10) {
+ log.Infof("Rule: %s ON %s", rule, String(expr()))
+ }
+}
+
+func printExpr(expr SQLNode) {
+ if log.V(10) {
+ log.Infof("Current: %s", String(expr))
+ }
+}
+
+func newChange(rule string, exprMatched func() Expr) changed {
+ return changed{
+ rule: rule,
+ exprMatched: exprMatched,
+ }
+}
diff --git a/go/vt/sqlparser/predicate_rewriting_test.go b/go/vt/sqlparser/predicate_rewriting_test.go
new file mode 100644
index 00000000000..34e23597894
--- /dev/null
+++ b/go/vt/sqlparser/predicate_rewriting_test.go
@@ -0,0 +1,178 @@
+/*
+Copyright 2022 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package sqlparser
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestSimplifyExpression(in *testing.T) {
+ tests := []struct {
+ in string
+ expected string
+ }{{
+ in: "not (not A = 3)",
+ expected: "A = 3",
+ }, {
+ in: "not (A = 3 and B = 2)",
+ expected: "not A = 3 or not B = 2",
+ }, {
+ in: "not (A = 3 or B = 2)",
+ expected: "not A = 3 and not B = 2",
+ }, {
+ in: "A xor B",
+ expected: "(A or B) and not (A and B)",
+ }, {
+ in: "(A and B) or C",
+ expected: "(A or C) and (B or C)",
+ }, {
+ in: "C or (A and B)",
+ expected: "(C or A) and (C or B)",
+ }, {
+ in: "A and A",
+ expected: "A",
+ }, {
+ in: "A OR A",
+ expected: "A",
+ }, {
+ in: "A OR (A AND B)",
+ expected: "A",
+ }, {
+ in: "A OR (B AND A)",
+ expected: "A",
+ }, {
+ in: "(A AND B) OR A",
+ expected: "A",
+ }, {
+ in: "(B AND A) OR A",
+ expected: "A",
+ }, {
+ in: "(A and B) and (B and A)",
+ expected: "A and B",
+ }, {
+ in: "(A or B) and A",
+ expected: "A",
+ }, {
+ in: "A and (A or B)",
+ expected: "A",
+ }, {
+ in: "(A and B) OR (A and C)",
+ expected: "A and (B or C)",
+ }, {
+ in: "(A and B) OR (C and A)",
+ expected: "A and (B or C)",
+ }, {
+ in: "(B and A) OR (A and C)",
+ expected: "A and (B or C)",
+ }, {
+ in: "(B and A) OR (C and A)",
+ expected: "A and (B or C)",
+ }}
+
+ for _, tc := range tests {
+ in.Run(tc.in, func(t *testing.T) {
+ expr, err := ParseExpr(tc.in)
+ require.NoError(t, err)
+
+ expr, didRewrite := simplifyExpression(expr)
+ assert.True(t, didRewrite.changed())
+ assert.Equal(t, tc.expected, String(expr))
+ })
+ }
+}
+
+func TestRewritePredicate(in *testing.T) {
+ tests := []struct {
+ in string
+ expected string
+ }{{
+ in: "A xor B",
+ expected: "(A or B) and (not A or not B)",
+ }, {
+ in: "(A and B) and (B and A) and (B and A) and (A and B)",
+ expected: "A and B",
+ }, {
+ in: "((A and B) OR (A and C) OR (A and D)) and E and F",
+ expected: "A and (B or C or D) and E and F",
+ }, {
+ in: "(A and B) OR (A and C)",
+ expected: "A and (B or C)",
+ }, {
+ in: "(A and B) OR (C and A)",
+ expected: "A and (B or C)",
+ }, {
+ in: "(B and A) OR (A and C)",
+ expected: "A and (B or C)",
+ }, {
+ in: "(A and B) or (A and C) or (A and D)",
+ expected: "A and (B or C or D)",
+ }, {
+ in: "(a=1 or a IN (1,2)) or (a = 2 or a = 3)",
+ expected: "a in (1, 2, 3)",
+ }, {
+ in: "A and (B or A)",
+ expected: "A",
+ }}
+
+ for _, tc := range tests {
+ in.Run(tc.in, func(t *testing.T) {
+ expr, err := ParseExpr(tc.in)
+ require.NoError(t, err)
+
+ output := RewritePredicate(expr)
+ assert.Equal(t, tc.expected, String(output))
+ })
+ }
+}
+
+func TestExtractINFromOR(in *testing.T) {
+ tests := []struct {
+ in string
+ expected string
+ }{{
+ in: "(A and B) or (B and A)",
+ expected: "",
+ }, {
+ in: "(a = 5 and B) or A",
+ expected: "",
+ }, {
+ in: "a = 5 and B or a = 6 and C",
+ expected: "a in (5, 6)",
+ }, {
+ in: "(a = 5 and b = 1 or b = 2 and a = 6)",
+ expected: "a in (5, 6) and b in (1, 2)",
+ }, {
+ in: "(a in (1,5) and B or C and a = 6)",
+ expected: "a in (1, 5, 6)",
+ }, {
+ in: "(a in (1, 5) and B or C and a in (5, 7))",
+ expected: "a in (1, 5, 7)",
+ }}
+
+ for _, tc := range tests {
+ in.Run(tc.in, func(t *testing.T) {
+ expr, err := ParseExpr(tc.in)
+ require.NoError(t, err)
+
+ output := ExtractINFromOR(expr.(*OrExpr))
+ assert.Equal(t, tc.expected, String(AndExpressions(output...)))
+ })
+ }
+}
diff --git a/go/vt/sqlparser/redact_query_test.go b/go/vt/sqlparser/redact_query_test.go
index 1921b52e5f1..029a307e7c8 100644
--- a/go/vt/sqlparser/redact_query_test.go
+++ b/go/vt/sqlparser/redact_query_test.go
@@ -18,6 +18,8 @@ package sqlparser
import (
"testing"
+
+ "github.com/stretchr/testify/require"
)
func TestRedactSQLStatements(t *testing.T) {
@@ -27,7 +29,5 @@ func TestRedactSQLStatements(t *testing.T) {
t.Fatalf("redacting sql failed: %v", err)
}
- if redactedSQL != "select a, b, c from t where x = :redacted1 and y = :redacted1 and z = :redacted2" {
- t.Fatalf("Unknown sql redaction: %v", redactedSQL)
- }
+ require.Equal(t, "select a, b, c from t where x = :x and y = :x and z = :z", redactedSQL)
}
diff --git a/go/vt/sqlparser/rewriter_api.go b/go/vt/sqlparser/rewriter_api.go
index 8444ba8f068..05d371bad13 100644
--- a/go/vt/sqlparser/rewriter_api.go
+++ b/go/vt/sqlparser/rewriter_api.go
@@ -51,6 +51,29 @@ func Rewrite(node SQLNode, pre, post ApplyFunc) (result SQLNode) {
return parent.SQLNode
}
+// SafeRewrite does not allow replacing nodes on the down walk of the tree walking
+// Long term this is the only Rewrite functionality we want
+func SafeRewrite(
+ node SQLNode,
+ shouldVisitChildren func(node SQLNode, parent SQLNode) bool,
+ up ApplyFunc,
+) SQLNode {
+ var pre func(cursor *Cursor) bool
+ if shouldVisitChildren != nil {
+ pre = func(cursor *Cursor) bool {
+ visitChildren := shouldVisitChildren(cursor.Node(), cursor.Parent())
+ if !visitChildren && up != nil {
+ // this gives the up-function a chance to do work on this node even if we are not visiting the children
+ // unfortunately, if the `up` function also returns false for this node, we won't abort the rest of the
+ // tree walking. This is a temporary limitation, and will be fixed when we generated the correct code
+ up(cursor)
+ }
+ return visitChildren
+ }
+ }
+ return Rewrite(node, pre, up)
+}
+
// RootNode is the root node of the AST when rewriting. It is the first element of the tree.
type RootNode struct {
SQLNode
diff --git a/go/vt/sqlparser/sql.go b/go/vt/sqlparser/sql.go
index 8fd76760a4d..3824c0b981b 100644
--- a/go/vt/sqlparser/sql.go
+++ b/go/vt/sqlparser/sql.go
@@ -22,18 +22,6 @@ func setDDL(yylex yyLexer, node Statement) {
yylex.(*Tokenizer).partialDDL = node
}
-func incNesting(yylex yyLexer) bool {
- yylex.(*Tokenizer).nesting++
- if yylex.(*Tokenizer).nesting == 200 {
- return true
- }
- return false
-}
-
-func decNesting(yylex yyLexer) {
- yylex.(*Tokenizer).nesting--
-}
-
// skipToEnd forces the lexer to end prematurely. Not all SQL statements
// are supported by the Parser, thus calling skipToEnd will make the lexer
// return EOF early.
@@ -260,427 +248,435 @@ const CHANGE = 57557
const MODIFY = 57558
const DEALLOCATE = 57559
const REVERT = 57560
-const SCHEMA = 57561
-const TABLE = 57562
-const INDEX = 57563
-const VIEW = 57564
-const TO = 57565
-const IGNORE = 57566
-const IF = 57567
-const PRIMARY = 57568
-const COLUMN = 57569
-const SPATIAL = 57570
-const FULLTEXT = 57571
-const KEY_BLOCK_SIZE = 57572
-const CHECK = 57573
-const INDEXES = 57574
-const ACTION = 57575
-const CASCADE = 57576
-const CONSTRAINT = 57577
-const FOREIGN = 57578
-const NO = 57579
-const REFERENCES = 57580
-const RESTRICT = 57581
-const SHOW = 57582
-const DESCRIBE = 57583
-const EXPLAIN = 57584
-const DATE = 57585
-const ESCAPE = 57586
-const REPAIR = 57587
-const OPTIMIZE = 57588
-const TRUNCATE = 57589
-const COALESCE = 57590
-const EXCHANGE = 57591
-const REBUILD = 57592
-const PARTITIONING = 57593
-const REMOVE = 57594
-const PREPARE = 57595
-const EXECUTE = 57596
-const MAXVALUE = 57597
-const PARTITION = 57598
-const REORGANIZE = 57599
-const LESS = 57600
-const THAN = 57601
-const PROCEDURE = 57602
-const TRIGGER = 57603
-const VINDEX = 57604
-const VINDEXES = 57605
-const DIRECTORY = 57606
-const NAME = 57607
-const UPGRADE = 57608
-const STATUS = 57609
-const VARIABLES = 57610
-const WARNINGS = 57611
-const CASCADED = 57612
-const DEFINER = 57613
-const OPTION = 57614
-const SQL = 57615
-const UNDEFINED = 57616
-const SEQUENCE = 57617
-const MERGE = 57618
-const TEMPORARY = 57619
-const TEMPTABLE = 57620
-const INVOKER = 57621
-const SECURITY = 57622
-const FIRST = 57623
-const AFTER = 57624
-const LAST = 57625
-const VITESS_MIGRATION = 57626
-const CANCEL = 57627
-const RETRY = 57628
-const LAUNCH = 57629
-const COMPLETE = 57630
-const CLEANUP = 57631
-const THROTTLE = 57632
-const UNTHROTTLE = 57633
-const EXPIRE = 57634
-const RATIO = 57635
-const BEGIN = 57636
-const START = 57637
-const TRANSACTION = 57638
-const COMMIT = 57639
-const ROLLBACK = 57640
-const SAVEPOINT = 57641
-const RELEASE = 57642
-const WORK = 57643
-const BIT = 57644
-const TINYINT = 57645
-const SMALLINT = 57646
-const MEDIUMINT = 57647
-const INT = 57648
-const INTEGER = 57649
-const BIGINT = 57650
-const INTNUM = 57651
-const REAL = 57652
-const DOUBLE = 57653
-const FLOAT_TYPE = 57654
-const DECIMAL_TYPE = 57655
-const NUMERIC = 57656
-const TIME = 57657
-const TIMESTAMP = 57658
-const DATETIME = 57659
-const YEAR = 57660
-const CHAR = 57661
-const VARCHAR = 57662
-const BOOL = 57663
-const CHARACTER = 57664
-const VARBINARY = 57665
-const NCHAR = 57666
-const TEXT = 57667
-const TINYTEXT = 57668
-const MEDIUMTEXT = 57669
-const LONGTEXT = 57670
-const BLOB = 57671
-const TINYBLOB = 57672
-const MEDIUMBLOB = 57673
-const LONGBLOB = 57674
-const JSON = 57675
-const JSON_SCHEMA_VALID = 57676
-const JSON_SCHEMA_VALIDATION_REPORT = 57677
-const ENUM = 57678
-const GEOMETRY = 57679
-const POINT = 57680
-const LINESTRING = 57681
-const POLYGON = 57682
-const GEOMCOLLECTION = 57683
-const GEOMETRYCOLLECTION = 57684
-const MULTIPOINT = 57685
-const MULTILINESTRING = 57686
-const MULTIPOLYGON = 57687
-const ASCII = 57688
-const UNICODE = 57689
-const NULLX = 57690
-const AUTO_INCREMENT = 57691
-const APPROXNUM = 57692
-const SIGNED = 57693
-const UNSIGNED = 57694
-const ZEROFILL = 57695
-const CODE = 57696
-const COLLATION = 57697
-const COLUMNS = 57698
-const DATABASES = 57699
-const ENGINES = 57700
-const EVENT = 57701
-const EXTENDED = 57702
-const FIELDS = 57703
-const FULL = 57704
-const FUNCTION = 57705
-const GTID_EXECUTED = 57706
-const KEYSPACES = 57707
-const OPEN = 57708
-const PLUGINS = 57709
-const PRIVILEGES = 57710
-const PROCESSLIST = 57711
-const SCHEMAS = 57712
-const TABLES = 57713
-const TRIGGERS = 57714
-const USER = 57715
-const VGTID_EXECUTED = 57716
-const VITESS_KEYSPACES = 57717
-const VITESS_METADATA = 57718
-const VITESS_MIGRATIONS = 57719
-const VITESS_REPLICATION_STATUS = 57720
-const VITESS_SHARDS = 57721
-const VITESS_TABLETS = 57722
-const VITESS_TARGET = 57723
-const VSCHEMA = 57724
-const VITESS_THROTTLED_APPS = 57725
-const NAMES = 57726
-const GLOBAL = 57727
-const SESSION = 57728
-const ISOLATION = 57729
-const LEVEL = 57730
-const READ = 57731
-const WRITE = 57732
-const ONLY = 57733
-const REPEATABLE = 57734
-const COMMITTED = 57735
-const UNCOMMITTED = 57736
-const SERIALIZABLE = 57737
-const CURRENT_TIMESTAMP = 57738
-const DATABASE = 57739
-const CURRENT_DATE = 57740
-const NOW = 57741
-const CURRENT_TIME = 57742
-const LOCALTIME = 57743
-const LOCALTIMESTAMP = 57744
-const CURRENT_USER = 57745
-const UTC_DATE = 57746
-const UTC_TIME = 57747
-const UTC_TIMESTAMP = 57748
-const DAY = 57749
-const DAY_HOUR = 57750
-const DAY_MICROSECOND = 57751
-const DAY_MINUTE = 57752
-const DAY_SECOND = 57753
-const HOUR = 57754
-const HOUR_MICROSECOND = 57755
-const HOUR_MINUTE = 57756
-const HOUR_SECOND = 57757
-const MICROSECOND = 57758
-const MINUTE = 57759
-const MINUTE_MICROSECOND = 57760
-const MINUTE_SECOND = 57761
-const MONTH = 57762
-const QUARTER = 57763
-const SECOND = 57764
-const SECOND_MICROSECOND = 57765
-const YEAR_MONTH = 57766
-const WEEK = 57767
-const REPLACE = 57768
-const CONVERT = 57769
-const CAST = 57770
-const SUBSTR = 57771
-const SUBSTRING = 57772
-const SEPARATOR = 57773
-const TIMESTAMPADD = 57774
-const TIMESTAMPDIFF = 57775
-const WEIGHT_STRING = 57776
-const LTRIM = 57777
-const RTRIM = 57778
-const TRIM = 57779
-const JSON_ARRAY = 57780
-const JSON_OBJECT = 57781
-const JSON_QUOTE = 57782
-const JSON_DEPTH = 57783
-const JSON_TYPE = 57784
-const JSON_LENGTH = 57785
-const JSON_VALID = 57786
-const JSON_ARRAY_APPEND = 57787
-const JSON_ARRAY_INSERT = 57788
-const JSON_INSERT = 57789
-const JSON_MERGE = 57790
-const JSON_MERGE_PATCH = 57791
-const JSON_MERGE_PRESERVE = 57792
-const JSON_REMOVE = 57793
-const JSON_REPLACE = 57794
-const JSON_SET = 57795
-const JSON_UNQUOTE = 57796
-const COUNT = 57797
-const AVG = 57798
-const MAX = 57799
-const MIN = 57800
-const SUM = 57801
-const GROUP_CONCAT = 57802
-const BIT_AND = 57803
-const BIT_OR = 57804
-const BIT_XOR = 57805
-const STD = 57806
-const STDDEV = 57807
-const STDDEV_POP = 57808
-const STDDEV_SAMP = 57809
-const VAR_POP = 57810
-const VAR_SAMP = 57811
-const VARIANCE = 57812
-const REGEXP_INSTR = 57813
-const REGEXP_LIKE = 57814
-const REGEXP_REPLACE = 57815
-const REGEXP_SUBSTR = 57816
-const ExtractValue = 57817
-const UpdateXML = 57818
-const GET_LOCK = 57819
-const RELEASE_LOCK = 57820
-const RELEASE_ALL_LOCKS = 57821
-const IS_FREE_LOCK = 57822
-const IS_USED_LOCK = 57823
-const LOCATE = 57824
-const POSITION = 57825
-const MATCH = 57826
-const AGAINST = 57827
-const BOOLEAN = 57828
-const LANGUAGE = 57829
-const WITH = 57830
-const QUERY = 57831
-const EXPANSION = 57832
-const WITHOUT = 57833
-const VALIDATION = 57834
-const UNUSED = 57835
-const ARRAY = 57836
-const BYTE = 57837
-const CUME_DIST = 57838
-const DESCRIPTION = 57839
-const DENSE_RANK = 57840
-const EMPTY = 57841
-const EXCEPT = 57842
-const FIRST_VALUE = 57843
-const GROUPING = 57844
-const GROUPS = 57845
-const JSON_TABLE = 57846
-const LAG = 57847
-const LAST_VALUE = 57848
-const LATERAL = 57849
-const LEAD = 57850
-const NTH_VALUE = 57851
-const NTILE = 57852
-const OF = 57853
-const OVER = 57854
-const PERCENT_RANK = 57855
-const RANK = 57856
-const RECURSIVE = 57857
-const ROW_NUMBER = 57858
-const SYSTEM = 57859
-const WINDOW = 57860
-const ACTIVE = 57861
-const ADMIN = 57862
-const AUTOEXTEND_SIZE = 57863
-const BUCKETS = 57864
-const CLONE = 57865
-const COLUMN_FORMAT = 57866
-const COMPONENT = 57867
-const DEFINITION = 57868
-const ENFORCED = 57869
-const ENGINE_ATTRIBUTE = 57870
-const EXCLUDE = 57871
-const FOLLOWING = 57872
-const GET_MASTER_PUBLIC_KEY = 57873
-const HISTOGRAM = 57874
-const HISTORY = 57875
-const INACTIVE = 57876
-const INVISIBLE = 57877
-const LOCKED = 57878
-const MASTER_COMPRESSION_ALGORITHMS = 57879
-const MASTER_PUBLIC_KEY_PATH = 57880
-const MASTER_TLS_CIPHERSUITES = 57881
-const MASTER_ZSTD_COMPRESSION_LEVEL = 57882
-const NESTED = 57883
-const NETWORK_NAMESPACE = 57884
-const NOWAIT = 57885
-const NULLS = 57886
-const OJ = 57887
-const OLD = 57888
-const OPTIONAL = 57889
-const ORDINALITY = 57890
-const ORGANIZATION = 57891
-const OTHERS = 57892
-const PARTIAL = 57893
-const PATH = 57894
-const PERSIST = 57895
-const PERSIST_ONLY = 57896
-const PRECEDING = 57897
-const PRIVILEGE_CHECKS_USER = 57898
-const PROCESS = 57899
-const RANDOM = 57900
-const REFERENCE = 57901
-const REQUIRE_ROW_FORMAT = 57902
-const RESOURCE = 57903
-const RESPECT = 57904
-const RESTART = 57905
-const RETAIN = 57906
-const REUSE = 57907
-const ROLE = 57908
-const SECONDARY = 57909
-const SECONDARY_ENGINE = 57910
-const SECONDARY_ENGINE_ATTRIBUTE = 57911
-const SECONDARY_LOAD = 57912
-const SECONDARY_UNLOAD = 57913
-const SIMPLE = 57914
-const SKIP = 57915
-const SRID = 57916
-const THREAD_PRIORITY = 57917
-const TIES = 57918
-const UNBOUNDED = 57919
-const VCPU = 57920
-const VISIBLE = 57921
-const RETURNING = 57922
-const FORMAT_BYTES = 57923
-const FORMAT_PICO_TIME = 57924
-const PS_CURRENT_THREAD_ID = 57925
-const PS_THREAD_ID = 57926
-const GTID_SUBSET = 57927
-const GTID_SUBTRACT = 57928
-const WAIT_FOR_EXECUTED_GTID_SET = 57929
-const WAIT_UNTIL_SQL_THREAD_AFTER_GTIDS = 57930
-const FORMAT = 57931
-const TREE = 57932
-const VITESS = 57933
-const TRADITIONAL = 57934
-const VTEXPLAIN = 57935
-const LOCAL = 57936
-const LOW_PRIORITY = 57937
-const NO_WRITE_TO_BINLOG = 57938
-const LOGS = 57939
-const ERROR = 57940
-const GENERAL = 57941
-const HOSTS = 57942
-const OPTIMIZER_COSTS = 57943
-const USER_RESOURCES = 57944
-const SLOW = 57945
-const CHANNEL = 57946
-const RELAY = 57947
-const EXPORT = 57948
-const CURRENT = 57949
-const ROW = 57950
-const ROWS = 57951
-const AVG_ROW_LENGTH = 57952
-const CONNECTION = 57953
-const CHECKSUM = 57954
-const DELAY_KEY_WRITE = 57955
-const ENCRYPTION = 57956
-const ENGINE = 57957
-const INSERT_METHOD = 57958
-const MAX_ROWS = 57959
-const MIN_ROWS = 57960
-const PACK_KEYS = 57961
-const PASSWORD = 57962
-const FIXED = 57963
-const DYNAMIC = 57964
-const COMPRESSED = 57965
-const REDUNDANT = 57966
-const COMPACT = 57967
-const ROW_FORMAT = 57968
-const STATS_AUTO_RECALC = 57969
-const STATS_PERSISTENT = 57970
-const STATS_SAMPLE_PAGES = 57971
-const STORAGE = 57972
-const MEMORY = 57973
-const DISK = 57974
-const PARTITIONS = 57975
-const LINEAR = 57976
-const RANGE = 57977
-const LIST = 57978
-const SUBPARTITION = 57979
-const SUBPARTITIONS = 57980
-const HASH = 57981
+const QUERIES = 57561
+const SCHEMA = 57562
+const TABLE = 57563
+const INDEX = 57564
+const VIEW = 57565
+const TO = 57566
+const IGNORE = 57567
+const IF = 57568
+const PRIMARY = 57569
+const COLUMN = 57570
+const SPATIAL = 57571
+const FULLTEXT = 57572
+const KEY_BLOCK_SIZE = 57573
+const CHECK = 57574
+const INDEXES = 57575
+const ACTION = 57576
+const CASCADE = 57577
+const CONSTRAINT = 57578
+const FOREIGN = 57579
+const NO = 57580
+const REFERENCES = 57581
+const RESTRICT = 57582
+const SHOW = 57583
+const DESCRIBE = 57584
+const EXPLAIN = 57585
+const DATE = 57586
+const ESCAPE = 57587
+const REPAIR = 57588
+const OPTIMIZE = 57589
+const TRUNCATE = 57590
+const COALESCE = 57591
+const EXCHANGE = 57592
+const REBUILD = 57593
+const PARTITIONING = 57594
+const REMOVE = 57595
+const PREPARE = 57596
+const EXECUTE = 57597
+const MAXVALUE = 57598
+const PARTITION = 57599
+const REORGANIZE = 57600
+const LESS = 57601
+const THAN = 57602
+const PROCEDURE = 57603
+const TRIGGER = 57604
+const VINDEX = 57605
+const VINDEXES = 57606
+const DIRECTORY = 57607
+const NAME = 57608
+const UPGRADE = 57609
+const STATUS = 57610
+const VARIABLES = 57611
+const WARNINGS = 57612
+const CASCADED = 57613
+const DEFINER = 57614
+const OPTION = 57615
+const SQL = 57616
+const UNDEFINED = 57617
+const SEQUENCE = 57618
+const MERGE = 57619
+const TEMPORARY = 57620
+const TEMPTABLE = 57621
+const INVOKER = 57622
+const SECURITY = 57623
+const FIRST = 57624
+const AFTER = 57625
+const LAST = 57626
+const VITESS_MIGRATION = 57627
+const CANCEL = 57628
+const RETRY = 57629
+const LAUNCH = 57630
+const COMPLETE = 57631
+const CLEANUP = 57632
+const THROTTLE = 57633
+const UNTHROTTLE = 57634
+const EXPIRE = 57635
+const RATIO = 57636
+const VITESS_THROTTLER = 57637
+const BEGIN = 57638
+const START = 57639
+const TRANSACTION = 57640
+const COMMIT = 57641
+const ROLLBACK = 57642
+const SAVEPOINT = 57643
+const RELEASE = 57644
+const WORK = 57645
+const CONSISTENT = 57646
+const SNAPSHOT = 57647
+const BIT = 57648
+const TINYINT = 57649
+const SMALLINT = 57650
+const MEDIUMINT = 57651
+const INT = 57652
+const INTEGER = 57653
+const BIGINT = 57654
+const INTNUM = 57655
+const REAL = 57656
+const DOUBLE = 57657
+const FLOAT_TYPE = 57658
+const FLOAT4_TYPE = 57659
+const FLOAT8_TYPE = 57660
+const DECIMAL_TYPE = 57661
+const NUMERIC = 57662
+const TIME = 57663
+const TIMESTAMP = 57664
+const DATETIME = 57665
+const YEAR = 57666
+const CHAR = 57667
+const VARCHAR = 57668
+const BOOL = 57669
+const CHARACTER = 57670
+const VARBINARY = 57671
+const NCHAR = 57672
+const TEXT = 57673
+const TINYTEXT = 57674
+const MEDIUMTEXT = 57675
+const LONGTEXT = 57676
+const BLOB = 57677
+const TINYBLOB = 57678
+const MEDIUMBLOB = 57679
+const LONGBLOB = 57680
+const JSON = 57681
+const JSON_SCHEMA_VALID = 57682
+const JSON_SCHEMA_VALIDATION_REPORT = 57683
+const ENUM = 57684
+const GEOMETRY = 57685
+const POINT = 57686
+const LINESTRING = 57687
+const POLYGON = 57688
+const GEOMCOLLECTION = 57689
+const GEOMETRYCOLLECTION = 57690
+const MULTIPOINT = 57691
+const MULTILINESTRING = 57692
+const MULTIPOLYGON = 57693
+const ASCII = 57694
+const UNICODE = 57695
+const NULLX = 57696
+const AUTO_INCREMENT = 57697
+const APPROXNUM = 57698
+const SIGNED = 57699
+const UNSIGNED = 57700
+const ZEROFILL = 57701
+const CODE = 57702
+const COLLATION = 57703
+const COLUMNS = 57704
+const DATABASES = 57705
+const ENGINES = 57706
+const EVENT = 57707
+const EXTENDED = 57708
+const FIELDS = 57709
+const FULL = 57710
+const FUNCTION = 57711
+const GTID_EXECUTED = 57712
+const KEYSPACES = 57713
+const OPEN = 57714
+const PLUGINS = 57715
+const PRIVILEGES = 57716
+const PROCESSLIST = 57717
+const SCHEMAS = 57718
+const TABLES = 57719
+const TRIGGERS = 57720
+const USER = 57721
+const VGTID_EXECUTED = 57722
+const VITESS_KEYSPACES = 57723
+const VITESS_METADATA = 57724
+const VITESS_MIGRATIONS = 57725
+const VITESS_REPLICATION_STATUS = 57726
+const VITESS_SHARDS = 57727
+const VITESS_TABLETS = 57728
+const VITESS_TARGET = 57729
+const VSCHEMA = 57730
+const VITESS_THROTTLED_APPS = 57731
+const NAMES = 57732
+const GLOBAL = 57733
+const SESSION = 57734
+const ISOLATION = 57735
+const LEVEL = 57736
+const READ = 57737
+const WRITE = 57738
+const ONLY = 57739
+const REPEATABLE = 57740
+const COMMITTED = 57741
+const UNCOMMITTED = 57742
+const SERIALIZABLE = 57743
+const CURRENT_TIMESTAMP = 57744
+const DATABASE = 57745
+const CURRENT_DATE = 57746
+const NOW = 57747
+const CURRENT_TIME = 57748
+const LOCALTIME = 57749
+const LOCALTIMESTAMP = 57750
+const CURRENT_USER = 57751
+const UTC_DATE = 57752
+const UTC_TIME = 57753
+const UTC_TIMESTAMP = 57754
+const DAY = 57755
+const DAY_HOUR = 57756
+const DAY_MICROSECOND = 57757
+const DAY_MINUTE = 57758
+const DAY_SECOND = 57759
+const HOUR = 57760
+const HOUR_MICROSECOND = 57761
+const HOUR_MINUTE = 57762
+const HOUR_SECOND = 57763
+const MICROSECOND = 57764
+const MINUTE = 57765
+const MINUTE_MICROSECOND = 57766
+const MINUTE_SECOND = 57767
+const MONTH = 57768
+const QUARTER = 57769
+const SECOND = 57770
+const SECOND_MICROSECOND = 57771
+const YEAR_MONTH = 57772
+const WEEK = 57773
+const REPLACE = 57774
+const CONVERT = 57775
+const CAST = 57776
+const SUBSTR = 57777
+const SUBSTRING = 57778
+const SEPARATOR = 57779
+const TIMESTAMPADD = 57780
+const TIMESTAMPDIFF = 57781
+const WEIGHT_STRING = 57782
+const LTRIM = 57783
+const RTRIM = 57784
+const TRIM = 57785
+const JSON_ARRAY = 57786
+const JSON_OBJECT = 57787
+const JSON_QUOTE = 57788
+const JSON_DEPTH = 57789
+const JSON_TYPE = 57790
+const JSON_LENGTH = 57791
+const JSON_VALID = 57792
+const JSON_ARRAY_APPEND = 57793
+const JSON_ARRAY_INSERT = 57794
+const JSON_INSERT = 57795
+const JSON_MERGE = 57796
+const JSON_MERGE_PATCH = 57797
+const JSON_MERGE_PRESERVE = 57798
+const JSON_REMOVE = 57799
+const JSON_REPLACE = 57800
+const JSON_SET = 57801
+const JSON_UNQUOTE = 57802
+const COUNT = 57803
+const AVG = 57804
+const MAX = 57805
+const MIN = 57806
+const SUM = 57807
+const GROUP_CONCAT = 57808
+const BIT_AND = 57809
+const BIT_OR = 57810
+const BIT_XOR = 57811
+const STD = 57812
+const STDDEV = 57813
+const STDDEV_POP = 57814
+const STDDEV_SAMP = 57815
+const VAR_POP = 57816
+const VAR_SAMP = 57817
+const VARIANCE = 57818
+const REGEXP_INSTR = 57819
+const REGEXP_LIKE = 57820
+const REGEXP_REPLACE = 57821
+const REGEXP_SUBSTR = 57822
+const ExtractValue = 57823
+const UpdateXML = 57824
+const GET_LOCK = 57825
+const RELEASE_LOCK = 57826
+const RELEASE_ALL_LOCKS = 57827
+const IS_FREE_LOCK = 57828
+const IS_USED_LOCK = 57829
+const LOCATE = 57830
+const POSITION = 57831
+const MATCH = 57832
+const AGAINST = 57833
+const BOOLEAN = 57834
+const LANGUAGE = 57835
+const WITH = 57836
+const QUERY = 57837
+const EXPANSION = 57838
+const WITHOUT = 57839
+const VALIDATION = 57840
+const UNUSED = 57841
+const ARRAY = 57842
+const BYTE = 57843
+const CUME_DIST = 57844
+const DESCRIPTION = 57845
+const DENSE_RANK = 57846
+const EMPTY = 57847
+const EXCEPT = 57848
+const FIRST_VALUE = 57849
+const GROUPING = 57850
+const GROUPS = 57851
+const JSON_TABLE = 57852
+const LAG = 57853
+const LAST_VALUE = 57854
+const LATERAL = 57855
+const LEAD = 57856
+const NTH_VALUE = 57857
+const NTILE = 57858
+const OF = 57859
+const OVER = 57860
+const PERCENT_RANK = 57861
+const RANK = 57862
+const RECURSIVE = 57863
+const ROW_NUMBER = 57864
+const SYSTEM = 57865
+const WINDOW = 57866
+const ACTIVE = 57867
+const ADMIN = 57868
+const AUTOEXTEND_SIZE = 57869
+const BUCKETS = 57870
+const CLONE = 57871
+const COLUMN_FORMAT = 57872
+const COMPONENT = 57873
+const DEFINITION = 57874
+const ENFORCED = 57875
+const ENGINE_ATTRIBUTE = 57876
+const EXCLUDE = 57877
+const FOLLOWING = 57878
+const GET_MASTER_PUBLIC_KEY = 57879
+const HISTOGRAM = 57880
+const HISTORY = 57881
+const INACTIVE = 57882
+const INVISIBLE = 57883
+const LOCKED = 57884
+const MASTER_COMPRESSION_ALGORITHMS = 57885
+const MASTER_PUBLIC_KEY_PATH = 57886
+const MASTER_TLS_CIPHERSUITES = 57887
+const MASTER_ZSTD_COMPRESSION_LEVEL = 57888
+const NESTED = 57889
+const NETWORK_NAMESPACE = 57890
+const NOWAIT = 57891
+const NULLS = 57892
+const OJ = 57893
+const OLD = 57894
+const OPTIONAL = 57895
+const ORDINALITY = 57896
+const ORGANIZATION = 57897
+const OTHERS = 57898
+const PARTIAL = 57899
+const PATH = 57900
+const PERSIST = 57901
+const PERSIST_ONLY = 57902
+const PRECEDING = 57903
+const PRIVILEGE_CHECKS_USER = 57904
+const PROCESS = 57905
+const RANDOM = 57906
+const REFERENCE = 57907
+const REQUIRE_ROW_FORMAT = 57908
+const RESOURCE = 57909
+const RESPECT = 57910
+const RESTART = 57911
+const RETAIN = 57912
+const REUSE = 57913
+const ROLE = 57914
+const SECONDARY = 57915
+const SECONDARY_ENGINE = 57916
+const SECONDARY_ENGINE_ATTRIBUTE = 57917
+const SECONDARY_LOAD = 57918
+const SECONDARY_UNLOAD = 57919
+const SIMPLE = 57920
+const SKIP = 57921
+const SRID = 57922
+const THREAD_PRIORITY = 57923
+const TIES = 57924
+const UNBOUNDED = 57925
+const VCPU = 57926
+const VISIBLE = 57927
+const RETURNING = 57928
+const FORMAT_BYTES = 57929
+const FORMAT_PICO_TIME = 57930
+const PS_CURRENT_THREAD_ID = 57931
+const PS_THREAD_ID = 57932
+const GTID_SUBSET = 57933
+const GTID_SUBTRACT = 57934
+const WAIT_FOR_EXECUTED_GTID_SET = 57935
+const WAIT_UNTIL_SQL_THREAD_AFTER_GTIDS = 57936
+const FORMAT = 57937
+const TREE = 57938
+const VITESS = 57939
+const TRADITIONAL = 57940
+const VTEXPLAIN = 57941
+const VEXPLAIN = 57942
+const PLAN = 57943
+const LOCAL = 57944
+const LOW_PRIORITY = 57945
+const NO_WRITE_TO_BINLOG = 57946
+const LOGS = 57947
+const ERROR = 57948
+const GENERAL = 57949
+const HOSTS = 57950
+const OPTIMIZER_COSTS = 57951
+const USER_RESOURCES = 57952
+const SLOW = 57953
+const CHANNEL = 57954
+const RELAY = 57955
+const EXPORT = 57956
+const CURRENT = 57957
+const ROW = 57958
+const ROWS = 57959
+const AVG_ROW_LENGTH = 57960
+const CONNECTION = 57961
+const CHECKSUM = 57962
+const DELAY_KEY_WRITE = 57963
+const ENCRYPTION = 57964
+const ENGINE = 57965
+const INSERT_METHOD = 57966
+const MAX_ROWS = 57967
+const MIN_ROWS = 57968
+const PACK_KEYS = 57969
+const PASSWORD = 57970
+const FIXED = 57971
+const DYNAMIC = 57972
+const COMPRESSED = 57973
+const REDUNDANT = 57974
+const COMPACT = 57975
+const ROW_FORMAT = 57976
+const STATS_AUTO_RECALC = 57977
+const STATS_PERSISTENT = 57978
+const STATS_SAMPLE_PAGES = 57979
+const STORAGE = 57980
+const MEMORY = 57981
+const DISK = 57982
+const PARTITIONS = 57983
+const LINEAR = 57984
+const RANGE = 57985
+const LIST = 57986
+const SUBPARTITION = 57987
+const SUBPARTITIONS = 57988
+const HASH = 57989
var yyToknames = [...]string{
"$end",
@@ -918,6 +914,7 @@ var yyToknames = [...]string{
"MODIFY",
"DEALLOCATE",
"REVERT",
+ "QUERIES",
"SCHEMA",
"TABLE",
"INDEX",
@@ -993,6 +990,7 @@ var yyToknames = [...]string{
"UNTHROTTLE",
"EXPIRE",
"RATIO",
+ "VITESS_THROTTLER",
"BEGIN",
"START",
"TRANSACTION",
@@ -1001,6 +999,8 @@ var yyToknames = [...]string{
"SAVEPOINT",
"RELEASE",
"WORK",
+ "CONSISTENT",
+ "SNAPSHOT",
"BIT",
"TINYINT",
"SMALLINT",
@@ -1012,6 +1012,8 @@ var yyToknames = [...]string{
"REAL",
"DOUBLE",
"FLOAT_TYPE",
+ "FLOAT4_TYPE",
+ "FLOAT8_TYPE",
"DECIMAL_TYPE",
"NUMERIC",
"TIME",
@@ -1293,6 +1295,8 @@ var yyToknames = [...]string{
"VITESS",
"TRADITIONAL",
"VTEXPLAIN",
+ "VEXPLAIN",
+ "PLAN",
"LOCAL",
"LOW_PRIORITY",
"NO_WRITE_TO_BINLOG",
@@ -1354,1139 +1358,1094 @@ var yyExca = [...]int{
1, -1,
-2, 0,
-1, 2,
- 13, 48,
- 14, 48,
- -2, 37,
- -1, 49,
- 1, 147,
- 657, 147,
- -2, 155,
+ 13, 49,
+ 14, 49,
+ -2, 38,
-1, 50,
- 135, 155,
- 176, 155,
- 339, 155,
- -2, 505,
- -1, 57,
- 36, 751,
- 238, 751,
- 249, 751,
- 284, 765,
- 285, 765,
- -2, 753,
- -1, 62,
- 240, 782,
- -2, 780,
- -1, 116,
- 237, 1438,
- -2, 121,
+ 1, 157,
+ 665, 157,
+ -2, 165,
+ -1, 51,
+ 135, 165,
+ 176, 165,
+ 345, 165,
+ -2, 520,
+ -1, 58,
+ 36, 767,
+ 239, 767,
+ 250, 767,
+ 285, 781,
+ 286, 781,
+ -2, 769,
+ -1, 63,
+ 241, 805,
+ -2, 803,
-1, 118,
- 1, 148,
- 657, 148,
- -2, 155,
- -1, 129,
- 136, 391,
- 243, 391,
- -2, 494,
- -1, 148,
- 135, 155,
- 176, 155,
- 339, 155,
- -2, 514,
- -1, 798,
- 87, 1455,
- -2, 1300,
- -1, 799,
- 87, 1456,
- 221, 1460,
- -2, 1301,
- -1, 800,
- 221, 1459,
- -2, 39,
- -1, 880,
- 60, 851,
- -2, 866,
- -1, 966,
- 248, 40,
- 253, 40,
- -2, 402,
- -1, 1051,
- 1, 562,
- 657, 562,
- -2, 155,
- -1, 1340,
- 221, 1460,
- -2, 1301,
- -1, 1488,
- 60, 852,
- -2, 871,
- -1, 1489,
- 60, 853,
- -2, 872,
- -1, 1540,
- 135, 155,
- 176, 155,
- 339, 155,
- -2, 441,
- -1, 1619,
- 136, 391,
- 243, 391,
- -2, 494,
- -1, 1628,
- 248, 41,
- 253, 41,
- -2, 403,
- -1, 1982,
- 221, 1464,
- -2, 1458,
- -1, 1983,
- 221, 1460,
- -2, 1456,
- -1, 2083,
- 135, 155,
- 176, 155,
- 339, 155,
- -2, 442,
- -1, 2090,
- 26, 176,
- -2, 178,
- -1, 2451,
- 78, 95,
- 88, 95,
- -2, 930,
- -1, 2519,
- 632, 678,
- -2, 652,
- -1, 2686,
- 50, 1397,
- -2, 1391,
- -1, 3338,
- 632, 678,
- -2, 666,
- -1, 3426,
- 23, 1816,
- 33, 1816,
- 177, 1816,
- 260, 1816,
- 319, 1816,
- 320, 1816,
- 321, 1816,
- 322, 1816,
- 323, 1816,
- 324, 1816,
- 325, 1816,
- 327, 1816,
- 328, 1816,
- 329, 1816,
- 330, 1816,
- 331, 1816,
- 332, 1816,
- 333, 1816,
- 334, 1816,
- 335, 1816,
- 336, 1816,
- 337, 1816,
- 338, 1816,
- 340, 1816,
- 342, 1816,
- 343, 1816,
- 344, 1816,
- 345, 1816,
- 346, 1816,
- 347, 1816,
- 348, 1816,
- 349, 1816,
- 350, 1816,
- 353, 1816,
- 354, 1816,
- 355, 1816,
- 356, 1816,
- 357, 1816,
- 359, 1816,
- 360, 1816,
- 361, 1816,
- 362, 1816,
- 503, 1816,
- -2, 610,
+ 238, 1462,
+ -2, 131,
+ -1, 120,
+ 1, 158,
+ 665, 158,
+ -2, 165,
+ -1, 131,
+ 136, 405,
+ 244, 405,
+ -2, 509,
+ -1, 150,
+ 135, 165,
+ 176, 165,
+ 345, 165,
+ -2, 529,
+ -1, 810,
+ 87, 1479,
+ -2, 1328,
+ -1, 811,
+ 87, 1480,
+ 221, 1484,
+ -2, 1329,
+ -1, 812,
+ 221, 1483,
+ -2, 40,
+ -1, 892,
+ 60, 879,
+ -2, 894,
+ -1, 979,
+ 249, 41,
+ 254, 41,
+ -2, 416,
+ -1, 1064,
+ 1, 577,
+ 665, 577,
+ -2, 165,
+ -1, 1363,
+ 221, 1484,
+ -2, 1329,
+ -1, 1511,
+ 60, 880,
+ -2, 899,
+ -1, 1512,
+ 60, 881,
+ -2, 900,
+ -1, 1567,
+ 135, 165,
+ 176, 165,
+ 345, 165,
+ -2, 455,
+ -1, 1648,
+ 136, 405,
+ 244, 405,
+ -2, 509,
+ -1, 1657,
+ 249, 42,
+ 254, 42,
+ -2, 417,
+ -1, 2016,
+ 221, 1488,
+ -2, 1482,
+ -1, 2017,
+ 221, 1484,
+ -2, 1480,
+ -1, 2119,
+ 135, 165,
+ 176, 165,
+ 345, 165,
+ -2, 456,
+ -1, 2126,
+ 26, 186,
+ -2, 188,
+ -1, 2493,
+ 78, 96,
+ 88, 96,
+ -2, 958,
+ -1, 2562,
+ 640, 693,
+ -2, 667,
+ -1, 2729,
+ 50, 1430,
+ -2, 1424,
+ -1, 3382,
+ 640, 693,
+ -2, 681,
+ -1, 3471,
+ 90, 625,
+ 95, 625,
+ 105, 625,
+ 178, 625,
+ 179, 625,
+ 180, 625,
+ 181, 625,
+ 182, 625,
+ 183, 625,
+ 184, 625,
+ 185, 625,
+ 186, 625,
+ 187, 625,
+ 188, 625,
+ 189, 625,
+ 190, 625,
+ 191, 625,
+ 192, 625,
+ 193, 625,
+ 194, 625,
+ 195, 625,
+ 196, 625,
+ 197, 625,
+ 198, 625,
+ 199, 625,
+ 200, 625,
+ 201, 625,
+ 202, 625,
+ 203, 625,
+ 204, 625,
+ 205, 625,
+ 206, 625,
+ 207, 625,
+ 208, 625,
+ 209, 625,
+ 210, 625,
+ 211, 625,
+ 212, 625,
+ 213, 625,
+ 214, 625,
+ 215, 625,
+ 216, 625,
+ 217, 625,
+ 218, 625,
+ 219, 625,
+ -2, 1841,
}
const yyPrivate = 57344
-const yyLast = 47864
+const yyLast = 48056
var yyAct = [...]int{
- 1496, 3085, 3497, 3086, 3087, 3319, 3467, 3424, 3508, 3466,
- 802, 672, 3403, 3056, 1543, 652, 2913, 3369, 809, 2031,
- 801, 2835, 3392, 1849, 2738, 2745, 2011, 3303, 3251, 2795,
- 2080, 2800, 2797, 2796, 2794, 2799, 2798, 5, 2786, 3301,
- 2351, 3043, 1114, 2699, 3115, 873, 2703, 2702, 2645, 3291,
- 2385, 2815, 2013, 2150, 654, 2814, 2700, 2952, 2424, 2580,
- 3120, 764, 2946, 2051, 2753, 682, 2817, 1503, 2972, 2054,
- 2697, 763, 762, 2687, 2035, 2411, 1973, 1063, 650, 928,
- 2516, 2938, 769, 2113, 2484, 2841, 2564, 2138, 2118, 2485,
- 1597, 2486, 2436, 768, 2181, 2068, 39, 2417, 2403, 157,
- 1644, 2387, 38, 2056, 40, 1845, 2055, 1944, 1864, 1092,
- 2556, 896, 897, 875, 1490, 2159, 2198, 143, 2043, 998,
- 2137, 2120, 1803, 1626, 2478, 961, 956, 2453, 1532, 1970,
- 1512, 2058, 1116, 1470, 664, 1352, 1822, 1868, 647, 1743,
- 1280, 1633, 935, 1978, 2135, 932, 1725, 967, 936, 659,
- 2109, 1531, 98, 99, 1517, 974, 962, 1747, 963, 887,
- 1940, 1336, 884, 94, 1312, 79, 10, 93, 914, 9,
- 916, 8, 1112, 877, 964, 881, 1106, 1752, 1592, 1618,
- 1047, 885, 126, 127, 882, 100, 883, 909, 161, 658,
- 121, 119, 120, 641, 899, 2036, 78, 1356, 3328, 92,
- 2509, 3498, 1360, 101, 2152, 2153, 2154, 3354, 3044, 2783,
- 2152, 87, 1943, 2507, 904, 908, 3164, 3166, 3165, 3183,
- 3184, 3185, 3186, 3187, 3188, 3189, 704, 2539, 2538, 2196,
- 128, 1710, 3036, 3355, 621, 3450, 1877, 2999, 122, 929,
- 2572, 900, 2573, 89, 906, 906, 89, 3090, 1003, 3349,
- 3350, 2239, 1810, 588, 1809, 1808, 627, 890, 89, 1281,
- 923, 642, 891, 3090, 874, 1000, 818, 819, 820, 818,
- 819, 820, 1807, 1979, 1806, 954, 876, 1825, 1017, 1018,
- 1019, 1805, 1022, 1023, 1024, 1025, 978, 2, 1028, 1029,
- 1030, 1031, 1032, 1033, 1034, 1035, 1036, 1037, 1038, 1039,
- 1040, 1041, 1042, 1043, 1044, 977, 924, 122, 1011, 952,
- 951, 950, 898, 2805, 1783, 3404, 3445, 184, 953, 644,
- 1497, 645, 2008, 2009, 1004, 1007, 1008, 2383, 2512, 105,
- 106, 107, 2683, 110, 3089, 1281, 116, 1297, 2413, 185,
- 2529, 123, 583, 145, 2185, 2805, 3518, 2649, 3350, 940,
- 3089, 3454, 3452, 640, 166, 3465, 3488, 1020, 2802, 2918,
- 2917, 2532, 869, 870, 871, 872, 1291, 945, 880, 2803,
- 2183, 651, 922, 766, 767, 122, 3453, 3451, 3304, 2352,
- 1466, 1815, 2129, 2860, 3247, 156, 3246, 89, 2184, 3049,
- 3410, 144, 3050, 3410, 1002, 2809, 911, 912, 1001, 3479,
- 80, 2803, 80, 80, 3257, 2123, 3448, 3256, 3068, 3057,
- 163, 2248, 3393, 164, 621, 3400, 621, 2178, 80, 3067,
- 1854, 82, 3470, 3429, 2427, 2880, 1607, 2809, 3333, 2546,
- 2547, 2384, 1620, 1621, 155, 154, 183, 2734, 622, 922,
- 766, 767, 1291, 2571, 3170, 2735, 2736, 2075, 2076, 2428,
- 646, 3133, 2462, 2245, 2074, 2461, 2039, 1082, 2463, 3178,
- 3179, 1533, 1313, 1534, 2555, 867, 866, 1287, 1070, 3320,
- 1279, 2510, 1294, 1071, 1295, 1296, 2246, 2474, 89, 2949,
- 89, 89, 635, 621, 2857, 1314, 1315, 1316, 1317, 1318,
- 1319, 1320, 1322, 1321, 1323, 1324, 89, 2614, 2093, 2092,
- 2420, 2421, 1070, 2868, 1050, 2866, 2806, 1071, 2240, 2241,
- 2243, 2242, 1087, 1088, 1109, 1069, 1791, 1068, 639, 1530,
- 621, 770, 633, 683, 774, 685, 771, 772, 2010, 681,
- 684, 773, 2842, 2557, 1474, 149, 1622, 152, 2806, 1619,
- 1083, 150, 151, 1287, 3279, 2160, 3280, 167, 2517, 1076,
- 2837, 2122, 2542, 1046, 2199, 3500, 173, 702, 703, 3163,
- 3167, 3168, 3169, 3180, 3181, 3182, 3190, 3192, 735, 3191,
- 3193, 3194, 3195, 3198, 3199, 3200, 3201, 3196, 3197, 3202,
- 3147, 3151, 3148, 3149, 3150, 3162, 3152, 3153, 3154, 3155,
- 3156, 3157, 3158, 3159, 3160, 3161, 3203, 3204, 3205, 3206,
- 3207, 3208, 3173, 3177, 3176, 3174, 3175, 3171, 3172, 1089,
- 949, 3471, 1056, 1057, 621, 1726, 1700, 2830, 622, 1090,
- 622, 775, 1108, 776, 1091, 2831, 780, 1084, 1085, 1086,
- 782, 781, 3472, 783, 749, 748, 1077, 2838, 777, 778,
- 3446, 779, 2037, 2038, 1059, 1103, 2219, 1052, 2220, 621,
- 2221, 1731, 2559, 3038, 3037, 1021, 2615, 2222, 949, 1045,
- 1701, 915, 1702, 1027, 1026, 2839, 2201, 1066, 947, 1072,
- 1073, 1074, 1075, 2204, 2203, 158, 1330, 1331, 1332, 1333,
- 3231, 2163, 925, 919, 917, 2648, 1344, 622, 3094, 1348,
- 3034, 2039, 1110, 1111, 2052, 1477, 1286, 1283, 1284, 1285,
- 1290, 1292, 1289, 958, 1288, 987, 3209, 3210, 3211, 3212,
- 3213, 3214, 3215, 3216, 1282, 985, 2202, 3327, 1104, 2508,
- 3434, 627, 944, 1049, 622, 946, 996, 995, 2205, 1338,
- 955, 2211, 2207, 2209, 2210, 2208, 2212, 2213, 3432, 957,
- 994, 1529, 993, 958, 992, 991, 990, 3438, 3439, 925,
- 919, 917, 1611, 153, 989, 2755, 2756, 2511, 984, 2544,
- 997, 2757, 3433, 933, 3406, 621, 3512, 3406, 970, 2950,
- 3519, 2182, 1286, 1283, 1284, 1285, 1290, 1292, 1289, 1267,
- 1288, 815, 3477, 933, 815, 1327, 1327, 1298, 948, 969,
- 1282, 2476, 1268, 1269, 3405, 2246, 815, 3405, 2563, 1632,
- 933, 1334, 1744, 3033, 931, 1006, 2136, 618, 2560, 2126,
- 910, 969, 976, 2189, 146, 1005, 1353, 147, 622, 2188,
- 1740, 1048, 949, 1272, 941, 1014, 2776, 2858, 2541, 1080,
- 3088, 943, 942, 1605, 1604, 1603, 948, 1712, 1711, 1713,
- 1714, 1715, 2527, 2997, 2998, 1741, 3088, 159, 3066, 2127,
- 1601, 988, 83, 622, 171, 604, 2125, 587, 582, 976,
- 2531, 986, 2754, 1734, 2180, 1732, 1733, 602, 1735, 1736,
- 2576, 2260, 2388, 2390, 2757, 1328, 1329, 2037, 2038, 1358,
- 947, 1359, 2554, 1498, 1500, 2553, 3316, 2986, 1362, 2968,
- 2128, 2458, 918, 2247, 2423, 179, 88, 2360, 88, 88,
- 2124, 1730, 2807, 2808, 2530, 1857, 1631, 599, 1464, 1521,
- 1429, 1061, 2418, 975, 88, 2811, 613, 1465, 118, 969,
- 972, 973, 1481, 933, 2081, 1327, 1324, 966, 970, 1067,
- 2902, 609, 2733, 1055, 2807, 2808, 160, 165, 162, 168,
- 169, 170, 172, 174, 175, 176, 177, 2811, 965, 893,
- 1107, 1058, 178, 180, 181, 182, 1093, 1753, 2566, 918,
- 975, 2566, 113, 2565, 3341, 999, 2565, 3029, 1480, 622,
- 2962, 1065, 1484, 1435, 1436, 1437, 1438, 1439, 877, 1878,
- 1099, 1465, 1101, 2200, 3510, 1478, 1800, 3511, 1737, 3509,
- 1482, 1483, 1535, 1879, 98, 99, 2600, 1297, 1471, 1458,
- 948, 976, 589, 1869, 591, 605, 1869, 624, 2277, 623,
- 595, 2500, 593, 597, 606, 598, 2875, 592, 1297, 603,
- 1098, 1100, 594, 607, 608, 611, 614, 615, 616, 612,
- 610, 1079, 601, 625, 114, 1295, 1296, 1013, 2389, 1638,
- 1513, 3480, 1081, 1296, 1624, 101, 3129, 3004, 1297, 1727,
- 1499, 1728, 3003, 2167, 1729, 1817, 1819, 1820, 2179, 1468,
- 1641, 2581, 874, 1640, 1630, 2177, 1673, 2172, 1502, 1676,
- 2304, 1678, 2175, 1608, 1609, 1610, 987, 1599, 1617, 1818,
- 976, 876, 1497, 1695, 985, 1646, 1479, 1647, 1677, 1649,
- 1651, 2172, 3473, 1655, 1657, 1659, 1661, 1663, 1636, 1526,
- 1527, 2987, 975, 1094, 1754, 1051, 2176, 979, 969, 889,
- 1827, 3514, 981, 3371, 1064, 1635, 982, 980, 3520, 1096,
- 1600, 976, 1876, 1097, 1828, 1325, 1326, 1826, 1634, 1634,
- 2174, 3239, 1294, 1102, 1295, 1296, 3063, 983, 3064, 1614,
- 1685, 1686, 1615, 1613, 2583, 1627, 1691, 1692, 976, 2252,
- 2253, 2254, 3309, 1294, 1757, 1295, 1296, 1095, 3372, 1720,
- 1718, 1761, 2602, 1763, 1764, 1765, 1766, 1485, 3238, 3229,
- 1770, 3437, 1319, 1320, 1322, 1321, 1323, 1324, 1755, 1756,
- 3079, 975, 1782, 1294, 1707, 1295, 1296, 969, 972, 973,
- 3078, 933, 1760, 1918, 1749, 966, 970, 3310, 1745, 1767,
- 1768, 1769, 3011, 3010, 1751, 3521, 818, 819, 820, 3000,
- 1681, 2593, 2592, 2591, 2585, 3436, 2589, 1297, 2584, 2784,
- 2582, 2772, 975, 1719, 1717, 2587, 1297, 979, 969, 2482,
- 2481, 1497, 981, 1606, 2586, 2132, 982, 980, 1874, 2265,
- 122, 626, 952, 951, 950, 1721, 1705, 1875, 1706, 975,
- 1704, 1012, 2588, 2590, 1703, 1009, 1759, 1693, 1687, 1684,
- 1297, 1683, 619, 1314, 1315, 1316, 1317, 1318, 1319, 1320,
- 1322, 1321, 1323, 1324, 1682, 1780, 1653, 620, 2994, 627,
- 1781, 1975, 1910, 1899, 1900, 1901, 1902, 1912, 1903, 1904,
- 1905, 1917, 1913, 1906, 1907, 1914, 1915, 1916, 1908, 1909,
- 1911, 1317, 1318, 1319, 1320, 1322, 1321, 1323, 1324, 1313,
- 3484, 1497, 1309, 1796, 1310, 1315, 1316, 1317, 1318, 1319,
- 1320, 1322, 1321, 1323, 1324, 2834, 1271, 1831, 1311, 1325,
- 1326, 1308, 1314, 1315, 1316, 1317, 1318, 1319, 1320, 1322,
- 1321, 1323, 1324, 627, 3482, 1497, 1530, 1852, 1852, 1850,
- 1850, 3474, 1294, 1853, 1295, 1296, 95, 1313, 1872, 2575,
- 3336, 1294, 1873, 1295, 1296, 2465, 627, 96, 1297, 39,
- 1301, 1302, 1303, 1304, 1305, 1306, 1307, 1299, 1313, 1821,
- 1314, 1315, 1316, 1317, 1318, 1319, 1320, 1322, 1321, 1323,
- 1324, 1823, 2044, 2045, 3335, 1294, 1936, 1295, 1296, 1338,
- 2316, 1314, 1315, 1316, 1317, 1318, 1319, 1320, 1322, 1321,
- 1323, 1324, 2148, 2147, 104, 3313, 1870, 2146, 2145, 2144,
- 2143, 3312, 1497, 1497, 1968, 103, 3311, 102, 1313, 3234,
- 2259, 3218, 1811, 1812, 1813, 1814, 97, 1506, 79, 1788,
- 1789, 1464, 1799, 1824, 3217, 1797, 3128, 1798, 3126, 1997,
- 1465, 1314, 1315, 1316, 1317, 1318, 1319, 1320, 1322, 1321,
- 1323, 1324, 95, 3075, 1975, 2314, 1498, 2004, 1972, 97,
- 1463, 1297, 1980, 96, 1829, 1293, 1497, 1974, 2409, 3499,
- 3329, 1858, 1859, 1507, 3461, 1497, 1861, 1462, 906, 906,
- 1866, 813, 1293, 1497, 1871, 1497, 3265, 1971, 1461, 1856,
- 2028, 2409, 3399, 1294, 3008, 1295, 1296, 1884, 1885, 1886,
- 1887, 1888, 1889, 1890, 1891, 1892, 1893, 2409, 3379, 2409,
- 3375, 1919, 1920, 1921, 1922, 1923, 1924, 1926, 1497, 1931,
- 2993, 1933, 1934, 1935, 1297, 1937, 1938, 1939, 2843, 1945,
- 1946, 1947, 1948, 1949, 1950, 1951, 1952, 1953, 1954, 1955,
- 1956, 1957, 1958, 1959, 1960, 1961, 1962, 1963, 1964, 1965,
- 1966, 1967, 1982, 1969, 1980, 1976, 1977, 906, 2840, 906,
- 906, 906, 906, 906, 2090, 1985, 1986, 98, 99, 1989,
- 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1981, 1998, 1999,
- 2000, 2001, 2002, 2021, 2775, 2022, 2774, 98, 99, 104,
- 2133, 1880, 1881, 1882, 1883, 2027, 1294, 2491, 1295, 1296,
- 103, 2479, 102, 3362, 1497, 1894, 3047, 3326, 3416, 1497,
- 2062, 2085, 1460, 1863, 1865, 2194, 906, 1297, 2193, 2099,
- 2100, 2101, 2102, 2003, 2015, 3242, 1497, 2409, 3230, 2094,
- 2084, 2095, 2096, 2097, 2098, 2034, 1297, 2016, 2040, 2041,
- 2066, 3047, 1497, 3264, 1982, 2409, 3045, 2105, 2106, 2107,
- 2108, 2029, 1784, 890, 2172, 1497, 3222, 2088, 1750, 1294,
- 1716, 1295, 1296, 2047, 2079, 2115, 97, 2161, 923, 2049,
- 1497, 2072, 1830, 2121, 1832, 1833, 1834, 1835, 1836, 1837,
- 1838, 1839, 1840, 1841, 1842, 1843, 1844, 2071, 2087, 2070,
- 2086, 2966, 1497, 2158, 1984, 1297, 2454, 1987, 1988, 2325,
- 1497, 3414, 1497, 2765, 2764, 3221, 2131, 2761, 2762, 2761,
- 2760, 2433, 1497, 2454, 924, 1297, 2246, 2540, 1596, 2521,
- 3412, 1497, 1297, 2117, 184, 1497, 2312, 1708, 2116, 2111,
- 2112, 2514, 2515, 2409, 2408, 2130, 1698, 2134, 2270, 1497,
- 2425, 2142, 2405, 1694, 2166, 1690, 2186, 2169, 123, 2170,
- 1689, 2026, 1294, 1932, 1295, 1296, 1297, 97, 1688, 2455,
- 1930, 166, 2165, 2116, 2168, 2164, 1313, 1508, 978, 2457,
- 1941, 1294, 1105, 1295, 1296, 3055, 2455, 2190, 1634, 3288,
- 1497, 2191, 2192, 2187, 2518, 1297, 2246, 977, 2496, 1314,
- 1315, 1316, 1317, 1318, 1319, 1320, 1322, 1321, 1323, 1324,
- 1497, 1297, 1855, 1497, 2467, 1297, 3286, 1497, 1596, 1595,
- 1541, 1540, 2433, 2432, 2698, 2270, 2274, 163, 2089, 2728,
- 164, 2961, 1293, 2232, 2233, 2961, 2425, 103, 2235, 2246,
- 1294, 2963, 1295, 1296, 2257, 3367, 2197, 2236, 2005, 3340,
- 3283, 1497, 2173, 183, 1941, 2409, 2433, 2263, 2922, 2763,
- 1294, 2268, 1295, 1296, 2271, 2671, 2272, 1294, 2073, 1295,
- 1296, 2279, 2270, 2325, 2301, 2281, 2282, 2283, 2433, 3269,
- 1497, 2225, 2300, 2172, 2155, 2289, 2290, 2291, 2292, 2293,
- 2294, 2295, 2296, 2297, 2298, 2937, 1497, 1297, 2042, 2273,
- 2264, 1294, 1501, 1295, 1296, 2006, 1293, 1297, 2961, 1823,
- 2172, 1855, 1801, 1739, 3012, 1528, 1297, 2262, 960, 959,
- 879, 2305, 2306, 2307, 2308, 2309, 89, 2311, 1297, 3442,
- 1294, 2313, 1295, 1296, 3382, 2318, 2319, 3253, 2320, 1297,
- 1504, 2323, 2244, 2324, 1297, 3219, 1294, 2327, 1295, 1296,
- 1294, 2331, 1295, 1296, 3140, 2336, 2337, 2338, 2339, 3028,
- 3025, 1824, 1297, 2255, 167, 3013, 3014, 3015, 2350, 2787,
- 2353, 2354, 3006, 173, 1669, 1297, 2885, 2884, 2356, 2358,
- 2488, 2930, 1497, 1598, 1297, 2361, 2362, 2363, 2364, 2365,
- 1982, 2927, 1497, 2114, 1297, 2832, 2372, 2373, 89, 2374,
- 2925, 1497, 2377, 2379, 2028, 2276, 2381, 2789, 2278, 1297,
- 2785, 1050, 2890, 1497, 1297, 1981, 2393, 2522, 1297, 2285,
- 2286, 2287, 2288, 2873, 1497, 1670, 1671, 1672, 2380, 1497,
- 2110, 1297, 1294, 2104, 1295, 1296, 1852, 2103, 1850, 1723,
- 1297, 2394, 1294, 1629, 1295, 1296, 2378, 1497, 1625, 1510,
- 1594, 1294, 115, 1295, 1296, 2836, 1297, 3016, 3494, 2357,
- 1497, 2310, 3254, 1294, 1353, 1295, 1296, 2392, 2340, 1497,
- 2973, 2974, 2487, 2129, 1294, 1297, 1295, 1296, 3260, 1294,
- 2019, 1295, 1296, 2267, 3492, 3468, 1665, 1297, 1786, 2429,
- 3348, 3344, 158, 2266, 2395, 1297, 2397, 1294, 2981, 1295,
- 1296, 3274, 2332, 1497, 3017, 3018, 3019, 2976, 1297, 2410,
- 1294, 2781, 1295, 1296, 2780, 1509, 2779, 2698, 1297, 1294,
- 2488, 1295, 1296, 2501, 2226, 1497, 2979, 2978, 2717, 1294,
- 2448, 1295, 1296, 1666, 1667, 1668, 2419, 2716, 1471, 3255,
- 2382, 3475, 2406, 1297, 1294, 39, 1295, 1296, 2033, 1294,
- 1787, 1295, 1296, 1294, 2447, 1295, 1296, 2449, 1505, 2025,
- 3324, 2967, 2720, 1465, 1472, 2407, 1294, 2721, 1295, 1296,
- 2475, 2477, 3226, 2422, 2676, 1294, 2513, 1295, 1296, 1297,
- 3030, 1513, 2718, 2468, 3110, 1297, 3109, 2719, 2452, 2675,
- 2402, 1294, 2932, 1295, 1296, 2456, 3308, 2954, 1297, 2256,
- 3119, 2258, 2459, 2483, 3121, 2953, 2957, 2121, 894, 2466,
- 1294, 2469, 1295, 1296, 1297, 2722, 895, 2442, 2443, 2685,
- 585, 2537, 1294, 1738, 1295, 1296, 2759, 2928, 865, 2480,
- 1294, 2472, 1295, 1296, 3108, 2688, 2690, 2492, 868, 1016,
- 1297, 2489, 2490, 1294, 2691, 1295, 1296, 2493, 2494, 1015,
- 2535, 2498, 2497, 1294, 95, 1295, 1296, 2851, 2284, 1297,
- 2502, 2503, 2504, 2900, 159, 96, 2487, 2569, 2534, 2896,
- 1878, 171, 1270, 1617, 2528, 2299, 2578, 2959, 1294, 934,
- 1295, 1296, 2882, 95, 1879, 2604, 2605, 2606, 2607, 2608,
- 97, 2523, 2524, 123, 96, 2044, 2045, 1297, 2881, 97,
- 3506, 2777, 2533, 2229, 2613, 3421, 3325, 1297, 104, 3249,
- 2758, 2446, 179, 2030, 1294, 1297, 1295, 1296, 2218, 103,
- 1294, 102, 1295, 1296, 2878, 2217, 2594, 2939, 2558, 2674,
- 97, 902, 903, 1294, 2216, 1295, 1296, 2673, 2577, 3296,
- 2215, 2567, 2214, 2376, 2568, 2609, 2250, 102, 2561, 1294,
- 3295, 1295, 1296, 160, 165, 162, 168, 169, 170, 172,
- 174, 175, 176, 177, 3277, 3127, 3125, 3124, 3117, 178,
- 180, 181, 182, 2595, 3026, 1294, 2958, 1295, 1296, 2597,
- 2956, 2375, 2438, 2441, 2442, 2443, 2439, 104, 2440, 2444,
- 2574, 2371, 2973, 2974, 1294, 2650, 1295, 1296, 103, 2370,
- 102, 2790, 2156, 2655, 1612, 2652, 104, 901, 103, 906,
- 3116, 2947, 2598, 2599, 3495, 2425, 2601, 103, 3098, 2603,
- 2618, 3496, 3495, 3, 2405, 2616, 2302, 1971, 2017, 1971,
- 1522, 2707, 1294, 2624, 1295, 1296, 1297, 1514, 3496, 2610,
- 2611, 2612, 1294, 3314, 1295, 1296, 108, 109, 2992, 2725,
- 1294, 2617, 1295, 1296, 2619, 2620, 2621, 892, 91, 1,
- 2622, 2623, 2678, 2996, 1945, 2625, 2655, 3431, 2627, 600,
- 2007, 2629, 2630, 2631, 2632, 2679, 2710, 1469, 2727, 2633,
- 1945, 1945, 1945, 1945, 1945, 2651, 3469, 2653, 2666, 2701,
- 3427, 3428, 2670, 1709, 2701, 1699, 3058, 1942, 906, 2062,
- 3250, 2677, 2793, 2654, 2162, 2656, 2657, 2658, 2659, 2660,
- 2661, 2704, 2692, 2693, 2662, 2663, 3024, 2664, 2119, 2665,
- 2369, 2680, 968, 2729, 881, 148, 2730, 2062, 2062, 2062,
- 2062, 2062, 2709, 882, 2082, 883, 2695, 2723, 2712, 2713,
- 2711, 2715, 2813, 2714, 2667, 2668, 2669, 2062, 2083, 3395,
- 2062, 112, 926, 2731, 2634, 2635, 2636, 2637, 2638, 111,
- 98, 99, 971, 1078, 2696, 2157, 3048, 2737, 2473, 2091,
- 1547, 1294, 1545, 1295, 1296, 2853, 2769, 2768, 2767, 2792,
- 1546, 2438, 2441, 2442, 2443, 2439, 1297, 2440, 2444, 2726,
- 1544, 1749, 2855, 1549, 2791, 2870, 2871, 2872, 1548, 2874,
- 2876, 2820, 2821, 799, 2859, 1297, 2303, 2770, 2771, 2121,
- 2812, 2901, 1790, 2883, 2827, 634, 2445, 628, 2887, 2888,
- 2889, 2891, 2892, 2893, 2894, 186, 1536, 2895, 1297, 2897,
- 2898, 2899, 1515, 2579, 2903, 2904, 2905, 2906, 2907, 2908,
- 2909, 2910, 2911, 2912, 2844, 2849, 2850, 2847, 2846, 2788,
- 2596, 2914, 2919, 1010, 2854, 2923, 189, 2924, 2926, 189,
- 2929, 2931, 632, 2933, 2934, 2935, 2936, 638, 590, 2864,
- 2368, 2942, 2861, 2862, 2766, 2863, 2195, 189, 2865, 596,
- 2867, 1345, 2869, 1297, 1785, 2672, 2916, 2460, 1297, 2367,
- 921, 913, 189, 2920, 1297, 2018, 2396, 920, 3227, 2706,
- 2951, 1297, 2684, 2686, 2412, 2689, 2964, 2965, 2682, 2626,
- 2969, 2628, 2366, 1297, 3307, 3118, 3380, 638, 189, 638,
- 2470, 1294, 1511, 1295, 1296, 2921, 2275, 2639, 2640, 2641,
- 2642, 1867, 1297, 1335, 2059, 3093, 2945, 1816, 656, 655,
- 1294, 2877, 1295, 1296, 1297, 653, 2398, 1054, 2943, 1060,
- 2940, 2941, 1062, 2426, 1300, 2886, 1297, 803, 2948, 2386,
- 2955, 1523, 1297, 1294, 2437, 1295, 1296, 2355, 1297, 2970,
- 2960, 2435, 2349, 2434, 1297, 2227, 2067, 2975, 2348, 2971,
- 1297, 3423, 2061, 2977, 2057, 2347, 2404, 754, 2984, 2985,
- 753, 2982, 2980, 665, 657, 649, 1297, 2346, 2983, 752,
- 751, 1297, 2989, 2819, 3407, 1274, 1297, 2990, 2991, 3031,
- 3032, 1297, 2543, 2820, 2821, 3046, 2345, 1297, 1294, 2833,
- 1295, 1296, 2062, 1294, 2545, 1295, 1296, 2471, 2344, 1294,
- 2829, 1295, 1296, 3052, 3053, 3007, 1294, 3009, 1295, 1296,
- 2343, 1297, 1278, 1487, 643, 939, 2342, 2856, 1294, 3331,
- 1295, 1296, 2341, 2249, 2879, 1486, 1897, 3065, 2335, 1898,
- 3069, 1297, 3338, 2801, 2334, 3042, 2782, 1294, 2519, 1295,
- 1296, 3001, 3002, 2149, 65, 43, 3302, 3368, 750, 1294,
- 2333, 1295, 1296, 747, 3095, 2330, 3096, 3080, 3097, 1297,
- 2329, 1294, 2646, 1295, 1296, 2328, 3054, 1294, 1297, 1295,
- 1296, 2326, 3084, 1294, 2647, 1295, 1296, 3351, 3352, 1294,
- 746, 1295, 1296, 3353, 3092, 1294, 1925, 1295, 1296, 1275,
- 1297, 3444, 3099, 3035, 3027, 2322, 1792, 3039, 3040, 3041,
- 90, 1294, 34, 1295, 1296, 33, 1294, 3070, 1295, 1296,
- 32, 1294, 31, 1295, 1296, 2321, 1294, 30, 1295, 1296,
- 25, 24, 1294, 23, 1295, 1296, 22, 3051, 21, 27,
- 1297, 20, 3091, 19, 18, 2804, 3464, 3505, 117, 3113,
- 52, 49, 47, 2317, 125, 124, 1294, 50, 1295, 1296,
- 46, 1053, 2315, 44, 29, 28, 17, 16, 15, 14,
- 13, 3074, 12, 11, 7, 6, 1294, 37, 1295, 1296,
- 1852, 36, 1850, 3134, 2280, 3142, 35, 1795, 3114, 3123,
- 3132, 3122, 3071, 26, 3072, 3136, 3138, 3073, 3130, 4,
- 3076, 3077, 2506, 2701, 1294, 2151, 1295, 1296, 0, 3081,
- 0, 0, 0, 1294, 0, 1295, 1296, 0, 0, 3083,
- 0, 3143, 3144, 3241, 2269, 0, 2704, 3146, 3228, 0,
- 2704, 0, 3248, 0, 0, 1294, 0, 1295, 1296, 39,
- 0, 3100, 0, 0, 3101, 0, 3102, 3103, 0, 3104,
- 0, 3105, 3258, 3259, 1525, 3261, 3106, 3262, 3263, 0,
- 3225, 3224, 3266, 3267, 3268, 0, 3270, 3273, 3223, 3271,
- 3272, 0, 1542, 0, 3240, 1294, 0, 1295, 1296, 3245,
- 3252, 3131, 3282, 3284, 3285, 3287, 3289, 3290, 3292, 1852,
- 3244, 1850, 3139, 0, 3275, 3141, 0, 3232, 0, 0,
- 0, 0, 0, 0, 0, 1495, 1491, 3145, 0, 0,
- 0, 0, 1495, 1491, 0, 0, 0, 0, 0, 3276,
- 1492, 0, 0, 0, 3278, 3220, 3322, 1492, 3281, 0,
- 0, 0, 0, 0, 0, 0, 3235, 3236, 3237, 3300,
- 3318, 3297, 3298, 0, 3299, 2023, 2024, 1494, 0, 1493,
- 0, 0, 1488, 1489, 1494, 1679, 1493, 0, 0, 0,
- 0, 3315, 0, 0, 0, 0, 189, 0, 189, 3321,
- 184, 189, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 1724, 0, 0, 0, 123, 0, 2704, 0, 0, 0,
- 0, 638, 0, 638, 638, 0, 0, 166, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 2741, 0, 3323,
- 0, 0, 1758, 638, 189, 0, 0, 0, 0, 1762,
- 3317, 0, 0, 0, 0, 3305, 0, 0, 0, 0,
- 1773, 1774, 1775, 1776, 1777, 1778, 1779, 0, 0, 0,
- 0, 1340, 3347, 3342, 0, 0, 0, 3082, 0, 0,
- 0, 0, 2742, 163, 3337, 0, 164, 3339, 3334, 0,
- 0, 0, 3363, 0, 0, 0, 0, 0, 3364, 3365,
- 0, 0, 0, 0, 0, 0, 2744, 0, 0, 183,
- 0, 0, 0, 0, 3330, 0, 0, 0, 0, 3357,
- 3376, 0, 3358, 0, 2739, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 3366, 2755, 2756, 39, 0, 0, 3401, 3402, 2740, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 3373, 0,
- 3411, 3413, 3415, 3381, 3408, 3409, 0, 3383, 3386, 3394,
- 3391, 3388, 3387, 3385, 3390, 3389, 2701, 0, 0, 0,
- 0, 2746, 0, 3252, 3396, 3443, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 3419, 0, 0, 0, 3345,
- 0, 0, 0, 3422, 3440, 3430, 3435, 0, 0, 0,
- 0, 0, 0, 1340, 0, 3408, 3409, 3449, 0, 39,
- 167, 3447, 3359, 0, 3460, 3360, 1348, 3361, 0, 173,
- 0, 0, 0, 3378, 0, 0, 0, 0, 3458, 0,
- 3463, 0, 0, 0, 1804, 0, 0, 0, 2754, 0,
- 0, 0, 0, 0, 0, 0, 3481, 3483, 3485, 0,
- 2757, 3478, 3476, 0, 0, 0, 0, 0, 0, 3487,
- 189, 0, 0, 3491, 638, 638, 3486, 3493, 0, 0,
- 0, 0, 0, 1852, 0, 1850, 0, 3504, 3490, 0,
- 189, 3408, 3409, 3501, 3489, 0, 3507, 0, 3306, 0,
- 0, 0, 3515, 3516, 3517, 3513, 0, 0, 0, 0,
- 638, 0, 0, 189, 0, 0, 0, 0, 0, 0,
- 0, 3441, 0, 0, 3523, 638, 3524, 3525, 3272, 0,
- 0, 189, 0, 0, 0, 0, 0, 1852, 0, 1850,
- 0, 0, 3522, 0, 0, 0, 0, 0, 158, 0,
- 0, 3455, 0, 3456, 0, 3457, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 638, 0,
- 0, 2743, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 1340, 0, 0, 0, 0, 0, 638, 638, 0,
- 638, 0, 638, 638, 0, 638, 638, 638, 638, 638,
- 638, 0, 0, 0, 0, 0, 0, 0, 1340, 0,
- 0, 1340, 638, 1340, 189, 0, 184, 0, 0, 0,
- 0, 3502, 0, 3503, 0, 0, 0, 1616, 0, 0,
- 0, 0, 0, 0, 189, 0, 0, 0, 0, 0,
- 123, 0, 145, 0, 0, 0, 0, 638, 0, 189,
- 3346, 0, 0, 166, 0, 0, 0, 0, 3356, 0,
- 0, 0, 0, 638, 0, 189, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 2046,
- 0, 189, 0, 0, 156, 0, 0, 2050, 189, 2053,
- 144, 0, 1804, 0, 0, 0, 0, 189, 189, 189,
- 189, 189, 189, 189, 189, 189, 638, 0, 0, 163,
- 0, 0, 164, 0, 0, 0, 0, 0, 0, 0,
- 159, 0, 0, 0, 0, 0, 0, 171, 0, 0,
- 0, 1620, 1621, 155, 154, 183, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 2747, 0, 0, 0,
- 2751, 0, 0, 0, 0, 0, 0, 2750, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 179, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 2752, 0, 0, 0, 0, 2748, 0, 0, 814,
- 0, 2749, 81, 0, 0, 0, 0, 0, 0, 160,
- 165, 162, 168, 169, 170, 172, 174, 175, 176, 177,
- 0, 0, 0, 0, 0, 178, 180, 181, 182, 0,
- 0, 0, 0, 0, 149, 1622, 152, 0, 1619, 0,
- 150, 151, 0, 0, 0, 0, 167, 0, 0, 0,
- 0, 0, 0, 0, 0, 173, 0, 0, 0, 3462,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 1564,
- 638, 638, 0, 0, 0, 0, 0, 0, 638, 878,
- 0, 81, 0, 189, 0, 0, 0, 0, 1804, 0,
- 0, 0, 0, 0, 0, 2206, 0, 0, 0, 0,
- 878, 0, 0, 0, 2223, 2224, 0, 0, 2228, 0,
- 0, 0, 0, 0, 0, 938, 0, 2231, 0, 0,
- 0, 0, 0, 0, 2234, 0, 0, 0, 0, 0,
- 0, 638, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 1340, 0, 0, 0, 0, 0, 0, 0, 0,
- 2237, 0, 638, 0, 0, 0, 0, 0, 1340, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 158, 0, 0, 0, 0, 0,
- 0, 0, 0, 638, 638, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 1552, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 1983, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 153, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 189, 0, 0, 0,
- 0, 638, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 1565, 0, 0, 0,
- 0, 0, 0, 0, 0, 189, 0, 0, 638, 0,
- 0, 0, 0, 146, 0, 0, 147, 0, 189, 0,
- 0, 0, 638, 0, 0, 1983, 189, 0, 189, 0,
- 189, 189, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 638, 159, 0, 0, 0,
- 0, 0, 0, 171, 0, 1578, 1581, 1582, 1583, 1584,
- 1585, 1586, 0, 1587, 1588, 1589, 1590, 1591, 1566, 1567,
- 1568, 1569, 1550, 1551, 1579, 0, 1553, 0, 1554, 1555,
- 1556, 1557, 1558, 1559, 1560, 1561, 1562, 0, 0, 1563,
- 1570, 1571, 1572, 1573, 179, 1574, 1575, 1576, 1577, 0,
- 638, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 638, 0, 0,
- 2451, 0, 0, 638, 0, 160, 165, 162, 168, 169,
- 170, 172, 174, 175, 176, 177, 0, 0, 0, 0,
- 0, 178, 180, 181, 182, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 638, 0, 0, 0, 0, 638, 0, 0, 0, 638,
- 638, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 2499, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 189, 0, 0,
- 0, 0, 0, 0, 189, 0, 0, 0, 0, 1580,
- 0, 0, 0, 189, 189, 0, 0, 189, 0, 189,
- 0, 0, 0, 0, 0, 0, 189, 0, 0, 0,
- 0, 0, 0, 189, 0, 0, 2548, 2549, 2550, 2551,
- 2552, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 189,
- 1804, 2562, 0, 0, 638, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 2570, 0, 0, 0, 0, 0, 1113, 0, 1113,
- 1113, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 1340,
- 0, 1983, 0, 0, 0, 0, 878, 1337, 1342, 1343,
- 0, 1346, 0, 1347, 1349, 1350, 1351, 0, 1354, 1355,
- 1357, 1357, 0, 1357, 1361, 1361, 1363, 1364, 1365, 1366,
- 1367, 1368, 1369, 1370, 1371, 1372, 1373, 1374, 1375, 1376,
- 1377, 1378, 1379, 1380, 1381, 1382, 1383, 1384, 1385, 1386,
- 1387, 1388, 1389, 1390, 1391, 1392, 1393, 1394, 1395, 1396,
- 1397, 1398, 1399, 1400, 1401, 1402, 1403, 1404, 1405, 1406,
- 1407, 1408, 1409, 1410, 1411, 1412, 1413, 1414, 1415, 1416,
- 1417, 1418, 1419, 1420, 1421, 1422, 1423, 1424, 1425, 1426,
- 1427, 0, 0, 0, 0, 1428, 0, 1430, 1431, 1432,
- 1433, 1434, 0, 0, 0, 0, 0, 0, 0, 0,
- 1361, 1361, 1361, 1361, 1361, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 1440, 1441, 1442, 1443, 1444, 1445,
- 1446, 1447, 1448, 1449, 1450, 1451, 1452, 1453, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 189, 0, 0, 0, 0, 1467, 0, 0, 189, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 638,
- 0, 0, 0, 184, 0, 0, 0, 0, 0, 0,
- 638, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 123, 0, 145,
- 0, 0, 0, 0, 189, 0, 2778, 0, 1473, 189,
- 166, 0, 0, 0, 878, 0, 0, 0, 878, 0,
- 0, 0, 0, 0, 878, 0, 0, 0, 0, 0,
- 0, 0, 2816, 0, 0, 0, 0, 0, 0, 0,
- 0, 156, 0, 0, 0, 0, 2828, 144, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 2845, 163, 0, 2848, 164,
- 0, 0, 0, 638, 0, 0, 0, 0, 0, 189,
- 0, 0, 0, 0, 0, 0, 189, 0, 132, 133,
- 155, 154, 183, 0, 0, 0, 0, 0, 0, 0,
- 638, 0, 0, 0, 0, 0, 0, 638, 0, 0,
- 0, 0, 0, 0, 0, 0, 638, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 1340, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 189, 189, 189, 189, 189,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 189,
- 189, 0, 0, 0, 0, 0, 0, 2944, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 189, 149, 130, 152, 137, 129, 0, 150, 151, 0,
- 0, 0, 0, 167, 0, 0, 0, 0, 0, 0,
- 0, 638, 173, 138, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 1564, 141, 139, 134,
- 135, 136, 140, 0, 0, 0, 0, 0, 0, 131,
- 0, 0, 1113, 0, 0, 0, 0, 0, 142, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 638, 0,
- 0, 0, 3005, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 3020, 0, 0, 3021, 3022, 3023, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 638, 0, 0, 0,
- 0, 0, 0, 0, 638, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 158, 0, 0, 0, 638, 0, 0, 0, 0,
- 798, 0, 0, 0, 0, 0, 0, 0, 0, 189,
- 0, 0, 0, 638, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 638, 0, 0,
- 1552, 1340, 0, 0, 638, 638, 1340, 189, 189, 189,
- 189, 189, 0, 0, 0, 0, 0, 0, 0, 189,
- 0, 0, 0, 0, 0, 189, 0, 189, 617, 0,
- 189, 189, 189, 0, 637, 0, 1113, 1113, 0, 153,
- 0, 0, 0, 0, 0, 81, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 189, 0, 0, 0, 0,
- 0, 0, 0, 1565, 637, 0, 637, 0, 638, 0,
- 0, 1340, 0, 0, 0, 0, 638, 0, 0, 0,
- 146, 189, 0, 147, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 189, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 159, 189, 0, 0, 189, 0, 0,
- 171, 0, 1578, 1581, 1582, 1583, 1584, 1585, 1586, 0,
- 1587, 1588, 1589, 1590, 1591, 1566, 1567, 1568, 1569, 1550,
- 1551, 1579, 0, 1553, 0, 1554, 1555, 1556, 1557, 1558,
- 1559, 1560, 1561, 1562, 0, 0, 1563, 1570, 1571, 1572,
- 1573, 179, 1574, 1575, 1576, 1577, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 638, 0, 0,
- 0, 0, 160, 165, 162, 168, 169, 170, 172, 174,
- 175, 176, 177, 0, 0, 0, 0, 0, 178, 180,
- 181, 182, 80, 41, 42, 82, 189, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 86, 0, 0, 0, 45, 71, 72, 0,
- 69, 73, 0, 0, 0, 0, 0, 0, 0, 70,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 2060, 0, 0, 0,
- 0, 0, 189, 0, 0, 0, 0, 0, 58, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 89, 0, 0, 0, 0, 89, 1580, 0, 816, 0,
- 0, 189, 804, 817, 818, 819, 820, 805, 0, 0,
- 806, 807, 0, 808, 0, 0, 0, 0, 0, 0,
- 189, 0, 0, 189, 189, 189, 0, 813, 821, 822,
- 0, 0, 0, 638, 638, 0, 938, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 3343,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 938,
- 0, 0, 0, 0, 2822, 2823, 0, 0, 0, 0,
- 638, 638, 638, 638, 0, 0, 823, 824, 825, 826,
- 827, 828, 829, 830, 831, 832, 833, 834, 835, 836,
- 837, 838, 839, 840, 841, 842, 843, 844, 845, 846,
- 847, 848, 849, 850, 851, 852, 853, 854, 855, 856,
- 857, 858, 859, 860, 861, 862, 863, 864, 48, 51,
- 54, 53, 56, 0, 68, 0, 0, 77, 74, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 57, 85, 84, 0, 0, 66, 67, 55, 2824, 0,
- 0, 0, 0, 75, 76, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 189, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 1340, 0, 0, 0, 0,
- 638, 0, 638, 0, 59, 60, 0, 61, 62, 63,
- 64, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 2825, 2826, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 2251, 0, 0, 637, 1266,
- 637, 637, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 638, 0, 0, 0, 0, 0, 0, 0,
- 637, 0, 0, 0, 0, 189, 0, 0, 638, 0,
- 0, 0, 0, 0, 2261, 0, 0, 0, 0, 0,
- 0, 638, 0, 0, 0, 0, 0, 0, 1339, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 770, 0, 0, 774, 0, 771, 772, 0, 0,
- 0, 773, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 83, 0, 0, 0,
- 0, 638, 0, 0, 0, 638, 638, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 638, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 88, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 2359, 0, 0, 0, 0, 755, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 1339, 0, 2391, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 638, 0, 0, 0, 0, 0, 0,
- 878, 0, 0, 0, 0, 0, 0, 0, 0, 189,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 636,
- 0, 2430, 2431, 0, 0, 0, 0, 638, 189, 0,
- 2060, 0, 0, 878, 2450, 0, 0, 0, 0, 0,
- 0, 637, 637, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 930,
- 0, 937, 0, 0, 0, 0, 0, 637, 638, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 1340, 0,
- 638, 0, 637, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 1593, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 1602, 0, 638, 638, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 637, 638, 1628, 0, 0,
- 0, 0, 0, 2526, 0, 1637, 0, 0, 1339, 1639,
- 189, 638, 1642, 1643, 637, 637, 0, 637, 0, 637,
- 637, 0, 637, 637, 637, 637, 637, 637, 0, 0,
- 0, 0, 0, 0, 0, 1339, 1674, 1675, 1339, 637,
- 1339, 0, 1680, 0, 0, 0, 0, 0, 89, 0,
- 0, 816, 0, 0, 638, 804, 817, 818, 819, 820,
- 805, 0, 0, 806, 807, 0, 808, 0, 0, 0,
- 0, 0, 0, 0, 637, 0, 638, 0, 0, 0,
- 813, 821, 822, 0, 0, 0, 0, 1742, 0, 0,
- 637, 0, 0, 638, 0, 638, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 2822, 2823, 0,
- 0, 0, 0, 637, 0, 0, 0, 0, 0, 823,
- 824, 825, 826, 827, 828, 829, 830, 831, 832, 833,
- 834, 835, 836, 837, 838, 839, 840, 841, 842, 843,
- 844, 845, 846, 847, 848, 849, 850, 851, 852, 853,
- 854, 855, 856, 857, 858, 859, 860, 861, 862, 863,
- 864, 0, 2643, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 816, 0,
- 1975, 2824, 0, 817, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 1851, 0, 2060, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 2705, 0,
- 81, 0, 0, 2060, 2060, 2060, 2060, 2060, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 2060, 0, 0, 2060, 0, 0, 0,
- 0, 0, 0, 2825, 2826, 0, 0, 637, 637, 0,
- 0, 0, 0, 0, 0, 637, 823, 824, 825, 826,
- 827, 828, 829, 830, 831, 832, 833, 834, 835, 836,
- 837, 838, 839, 840, 841, 842, 843, 844, 845, 846,
- 847, 848, 849, 850, 851, 852, 853, 854, 855, 856,
- 857, 858, 859, 860, 861, 862, 863, 864, 0, 2810,
- 0, 0, 0, 0, 0, 0, 0, 0, 637, 2818,
- 0, 0, 0, 0, 0, 0, 0, 0, 1339, 0,
- 0, 0, 0, 816, 0, 0, 0, 1860, 817, 637,
- 0, 0, 0, 0, 0, 1339, 0, 0, 1851, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 637, 637, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 1115, 0, 1115, 1115, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 1273, 0, 0, 0, 0,
- 637, 823, 824, 825, 826, 827, 828, 829, 830, 831,
- 832, 833, 834, 835, 836, 837, 838, 839, 840, 841,
- 842, 843, 844, 845, 846, 847, 848, 849, 850, 851,
- 852, 853, 854, 855, 856, 857, 858, 859, 860, 861,
- 862, 863, 864, 0, 0, 0, 0, 0, 637, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 637, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 637,
- 0, 0, 637, 0, 0, 0, 0, 0, 2060, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 2988, 637, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 637, 0, 0,
- 0, 0, 0, 0, 2139, 2140, 2141, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 637, 0, 0, 0, 0, 0,
- 637, 1637, 0, 0, 1637, 0, 1637, 0, 0, 0,
- 0, 0, 2171, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 1475, 1476, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 637, 0, 0,
- 0, 0, 637, 0, 0, 0, 637, 637, 0, 0,
- 0, 0, 1519, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 1537, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 3107, 0,
- 3111, 3112, 0, 0, 0, 0, 0, 0, 0, 0,
- 930, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 2705, 0, 81, 0, 2705, 0, 1645,
- 1645, 0, 1645, 0, 1645, 1645, 0, 1654, 1645, 1645,
- 1645, 1645, 1645, 0, 0, 0, 0, 0, 0, 0,
- 0, 637, 0, 0, 930, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 1722,
- 0, 0, 0, 0, 3233, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 1746, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 1339, 0, 637, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 1115, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 2705, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 1363, 1364, 1365,
- 1366, 1367, 1368, 1369, 1370, 1371, 1372, 1373, 1374, 1375,
- 1376, 1377, 1378, 1382, 1383, 1384, 1385, 1386, 1387, 1388,
- 1389, 1390, 1391, 1392, 1393, 1394, 1395, 1396, 1397, 1398,
- 1399, 1400, 1401, 1402, 1403, 1404, 1405, 1406, 1407, 1408,
- 1409, 1410, 1411, 1413, 1414, 1415, 1416, 1417, 1418, 1419,
- 1420, 1421, 1422, 1440, 1441, 1442, 1443, 1444, 1445, 1446,
- 1447, 1448, 1449, 1450, 1451, 1452, 1453, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 637, 0, 0, 0,
- 0, 3332, 0, 0, 0, 0, 0, 637, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 81,
- 0, 0, 1115, 1115, 0, 0, 0, 0, 0, 0,
- 1793, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 2464, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 1846, 0, 0, 0, 0, 0, 0,
- 3377, 0, 0, 0, 0, 81, 0, 0, 0, 756,
- 637, 0, 0, 0, 1862, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 637, 0, 0,
- 0, 0, 0, 0, 637, 1895, 1896, 0, 1637, 1637,
- 0, 0, 0, 637, 0, 0, 0, 0, 0, 0,
- 0, 0, 187, 0, 0, 586, 0, 0, 0, 1339,
- 2536, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 586, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 1115, 0, 0, 888, 0,
- 0, 3459, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 907, 907, 0, 0, 0,
- 0, 0, 0, 0, 586, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 2020, 0, 0, 0, 0, 637, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 2032, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 1519, 0, 0, 1115, 0, 0,
- 0, 0, 0, 0, 0, 637, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 930, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 637, 0, 0, 0, 0, 0, 0,
- 0, 637, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 937, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 637, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 930,
- 637, 0, 0, 0, 0, 937, 0, 0, 0, 0,
- 0, 0, 0, 0, 637, 0, 0, 0, 1339, 0,
- 0, 637, 637, 1339, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 930, 0, 0, 0, 0, 1846, 0, 0,
- 0, 1846, 1846, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 2773, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 637, 0, 0, 1339, 0,
- 0, 0, 0, 637, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 2852, 0, 2238, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 1115, 637, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 586, 0, 586, 0, 0, 586, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 2995, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 586, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 637, 637, 0, 0, 0, 0, 0, 1341, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 2399, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 2414, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 637, 637, 637,
- 637, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 2495, 0, 0, 0, 0,
+ 1519, 821, 3130, 3131, 813, 1883, 3129, 3544, 3555, 3363,
+ 814, 684, 3448, 3512, 2116, 3513, 2957, 3469, 1570, 2065,
+ 1130, 1776, 3413, 3100, 3436, 2879, 2781, 2788, 663, 3295,
+ 3347, 909, 2045, 2839, 2844, 2841, 2840, 780, 2830, 2838,
+ 40, 1128, 2843, 1829, 2742, 2842, 5, 3345, 3087, 885,
+ 2745, 3159, 2047, 776, 2796, 3335, 2393, 2427, 2688, 2858,
+ 1526, 666, 2746, 2190, 2996, 2743, 3164, 2990, 2071, 2859,
+ 2623, 775, 1011, 774, 1489, 694, 2466, 781, 2861, 2090,
+ 3016, 2087, 2740, 2982, 2453, 664, 2007, 2527, 2730, 2153,
+ 2885, 662, 2178, 2158, 2607, 2528, 1626, 2529, 2559, 2104,
+ 2221, 941, 2478, 2092, 41, 2459, 910, 887, 2091, 1513,
+ 159, 2445, 2429, 39, 1898, 1977, 1978, 1105, 1673, 889,
+ 2004, 893, 2012, 2599, 2177, 2079, 1655, 145, 2238, 1837,
+ 2160, 2520, 969, 1559, 2495, 1539, 96, 676, 2094, 1375,
+ 912, 1493, 1902, 2199, 974, 1076, 658, 100, 101, 1856,
+ 1754, 1303, 2175, 1288, 1662, 949, 977, 975, 1772, 948,
+ 2149, 976, 2072, 671, 899, 945, 1544, 1558, 2150, 980,
+ 2013, 927, 929, 1879, 1974, 95, 1359, 103, 1335, 1828,
+ 896, 1126, 1119, 81, 1911, 1781, 1621, 128, 895, 129,
+ 163, 123, 121, 122, 1060, 897, 102, 894, 670, 89,
+ 987, 80, 1383, 653, 922, 1379, 94, 3372, 2552, 3545,
+ 1647, 2192, 2193, 2194, 2192, 3088, 2827, 2582, 2581, 2236,
+ 1013, 598, 917, 921, 2550, 1739, 3080, 91, 1304, 3496,
+ 3043, 91, 902, 1030, 1031, 1032, 2849, 1035, 1036, 1037,
+ 1038, 124, 3398, 1041, 1042, 1043, 1044, 1045, 1046, 1047,
+ 1048, 1049, 1050, 1051, 1052, 1053, 1054, 1055, 1056, 1057,
+ 942, 1016, 130, 632, 2615, 830, 831, 832, 3399, 91,
+ 2616, 3134, 903, 654, 3394, 937, 936, 886, 1304, 3393,
+ 638, 1844, 3134, 888, 2, 2042, 2043, 966, 1859, 990,
+ 1843, 1842, 2847, 2849, 1841, 1840, 830, 831, 832, 935,
+ 778, 779, 911, 1839, 967, 1812, 2846, 2281, 1017, 1020,
+ 1021, 124, 965, 964, 963, 656, 1299, 657, 2853, 2425,
+ 186, 2726, 2225, 3490, 2169, 2455, 1033, 107, 108, 109,
+ 3565, 112, 1320, 991, 118, 91, 1314, 187, 638, 1520,
+ 593, 2572, 3516, 632, 125, 3449, 147, 2163, 958, 2847,
+ 953, 651, 652, 1530, 1528, 1024, 2692, 168, 3133, 3511,
+ 3500, 881, 882, 883, 884, 3498, 2224, 892, 3535, 3133,
+ 935, 778, 779, 2962, 2068, 2853, 2961, 2067, 186, 124,
+ 3394, 2223, 632, 1531, 1529, 3499, 1314, 2575, 158, 3455,
+ 3497, 3348, 2394, 1849, 146, 924, 925, 2904, 3291, 3290,
+ 1015, 3093, 125, 659, 3094, 3526, 1014, 3301, 3494, 632,
+ 82, 82, 3112, 165, 632, 168, 166, 3377, 82, 2290,
+ 3455, 84, 1290, 82, 629, 3101, 3479, 3437, 3445, 2218,
+ 2850, 3300, 1888, 3111, 2469, 134, 135, 157, 156, 185,
+ 3474, 2924, 1310, 1636, 3477, 1302, 2589, 2590, 1560, 2504,
+ 1561, 2075, 2503, 3483, 3484, 2505, 2778, 2779, 2509, 2470,
+ 3177, 2798, 2799, 2111, 2112, 1821, 1822, 1317, 3478, 1318,
+ 1319, 165, 614, 633, 166, 2426, 2777, 2614, 2287, 2110,
+ 2598, 1100, 1101, 1095, 2288, 612, 879, 2850, 91, 91,
+ 878, 1123, 1310, 2044, 2162, 2553, 91, 185, 3364, 1083,
+ 957, 91, 1083, 959, 1084, 1096, 1089, 1084, 913, 2516,
+ 2993, 919, 919, 2881, 1112, 1082, 1114, 1081, 2657, 2129,
+ 2128, 1289, 632, 928, 2279, 609, 2462, 2463, 1300, 632,
+ 632, 3517, 2912, 2910, 624, 1820, 1059, 646, 151, 132,
+ 154, 139, 131, 1824, 152, 153, 650, 644, 2886, 619,
+ 1556, 169, 3518, 633, 1111, 1113, 3323, 1497, 3324, 622,
+ 175, 140, 2600, 1034, 2282, 2283, 2285, 2284, 962, 2797,
+ 1069, 1070, 2075, 2560, 2200, 143, 141, 136, 137, 138,
+ 142, 2800, 2874, 1102, 2585, 1729, 2244, 133, 2239, 3547,
+ 2875, 1755, 633, 1103, 1116, 1121, 144, 1097, 1090, 1063,
+ 1104, 962, 1072, 954, 1122, 2882, 1098, 1099, 1065, 169,
+ 956, 955, 2602, 2883, 3082, 938, 932, 930, 175, 633,
+ 3081, 2262, 1040, 2241, 633, 599, 960, 601, 615, 1730,
+ 635, 1731, 634, 605, 1760, 603, 607, 616, 608, 1039,
+ 602, 2245, 613, 2073, 2074, 604, 617, 618, 621, 625,
+ 626, 627, 623, 620, 3491, 611, 636, 3275, 1109, 960,
+ 1000, 3078, 1110, 2251, 2247, 2249, 2250, 2248, 2252, 2253,
+ 998, 2203, 1115, 1309, 1306, 1307, 1308, 1313, 1315, 1312,
+ 1336, 1311, 1500, 2658, 160, 970, 938, 932, 930, 971,
+ 2259, 1305, 2260, 3138, 2261, 2243, 1108, 2088, 971, 2691,
+ 1009, 1008, 2901, 1337, 1338, 1339, 1340, 1341, 1342, 1343,
+ 1345, 1344, 1346, 1347, 1371, 1007, 1006, 1005, 1004, 1003,
+ 1002, 997, 1640, 1309, 1306, 1307, 1308, 1313, 1315, 1312,
+ 1010, 1311, 633, 1361, 3371, 2551, 1117, 2242, 1350, 633,
+ 633, 1305, 160, 1350, 2800, 1079, 3566, 1085, 1086, 1087,
+ 1088, 961, 946, 989, 962, 1058, 944, 2166, 1353, 1354,
+ 1355, 1356, 155, 3523, 2073, 2074, 2288, 968, 1367, 3451,
+ 946, 1124, 1125, 827, 1557, 3077, 946, 827, 1661, 1093,
+ 1019, 983, 2587, 982, 961, 1773, 982, 2554, 2222, 2176,
+ 1018, 923, 2518, 2430, 2432, 2603, 2229, 2167, 2228, 1769,
+ 3451, 3450, 1291, 2820, 2165, 1027, 2994, 1001, 2584, 3559,
+ 1634, 1281, 1282, 1283, 2606, 827, 1357, 999, 1633, 1062,
+ 1632, 989, 2570, 148, 2619, 1770, 149, 931, 1630, 2302,
+ 597, 592, 3450, 2851, 2852, 2220, 1351, 1352, 2168, 1741,
+ 1740, 1742, 1743, 1744, 3041, 3042, 2855, 1763, 2164, 1761,
+ 1762, 2597, 1764, 1765, 2596, 988, 161, 3492, 85, 989,
+ 992, 982, 3132, 173, 1487, 994, 637, 2574, 3110, 995,
+ 993, 3360, 3030, 3132, 3012, 2500, 2465, 2402, 1891, 1548,
+ 1452, 1074, 989, 3482, 1026, 2460, 1660, 630, 120, 2117,
+ 2851, 2852, 1347, 2624, 1912, 1521, 1523, 1350, 931, 1381,
+ 1385, 1382, 631, 2855, 181, 2776, 1106, 2289, 1913, 1759,
+ 1298, 2573, 90, 90, 161, 906, 1120, 1501, 1782, 3385,
+ 90, 173, 1061, 988, 1503, 90, 1080, 3481, 1507, 1012,
+ 2609, 2946, 1078, 3073, 889, 2608, 3006, 961, 1504, 2240,
+ 1488, 1833, 1071, 1766, 1068, 1320, 115, 162, 167, 164,
+ 170, 171, 172, 174, 176, 177, 178, 179, 1562, 2643,
+ 2431, 988, 181, 180, 182, 183, 184, 982, 985, 986,
+ 1903, 946, 2319, 1903, 2543, 979, 983, 2626, 2609, 1851,
+ 1853, 1854, 1092, 2608, 988, 1458, 1459, 1460, 1461, 1462,
+ 982, 985, 986, 1094, 946, 989, 1505, 1506, 979, 983,
+ 100, 101, 3173, 1852, 1488, 162, 167, 164, 170, 171,
+ 172, 174, 176, 177, 178, 179, 3527, 1494, 116, 978,
+ 1481, 180, 182, 183, 184, 1319, 1320, 3048, 3557, 2219,
+ 103, 3558, 989, 3556, 1318, 1319, 3047, 1910, 2207, 1637,
+ 1638, 1639, 1670, 1669, 1667, 1659, 2217, 2636, 2635, 2634,
+ 2215, 2902, 2628, 1107, 2632, 1000, 2627, 1756, 2625, 1757,
+ 998, 2212, 1758, 2630, 1321, 1783, 2212, 2294, 2295, 2296,
+ 1522, 1702, 2629, 3519, 1705, 1077, 1707, 1064, 3567, 1653,
+ 1317, 1491, 1318, 1319, 3031, 886, 1502, 3415, 3353, 2645,
+ 2631, 2633, 901, 1376, 1525, 1778, 1724, 988, 888, 1025,
+ 2216, 1646, 1320, 1022, 2009, 2214, 1714, 1715, 2006, 3561,
+ 3531, 1520, 1720, 1721, 1665, 1320, 1675, 2008, 1676, 3283,
+ 1678, 1680, 3282, 1706, 1684, 1686, 1688, 1690, 1692, 1553,
+ 1554, 825, 3416, 3354, 988, 3273, 3107, 2346, 3108, 992,
+ 982, 1664, 638, 1320, 994, 3123, 3122, 1629, 995, 993,
+ 1338, 1339, 1340, 1341, 1342, 1343, 1345, 1344, 1346, 1347,
+ 1644, 1317, 1642, 1318, 1319, 3568, 1656, 2080, 2081, 996,
+ 1342, 1343, 1345, 1344, 1346, 1347, 1508, 1749, 1336, 2784,
+ 2618, 3055, 3054, 1643, 3044, 2828, 1663, 1663, 2816, 2525,
+ 1710, 1340, 1341, 1342, 1343, 1345, 1344, 1346, 1347, 1784,
+ 1785, 1337, 1338, 1339, 1340, 1341, 1342, 1343, 1345, 1344,
+ 1346, 1347, 1861, 1789, 2524, 1747, 1520, 830, 831, 832,
+ 1796, 1797, 1798, 1908, 2785, 1736, 1862, 1348, 1349, 1860,
+ 1774, 2523, 1909, 2172, 1750, 1786, 1734, 1317, 2009, 1318,
+ 1319, 1748, 1790, 1635, 1792, 1793, 1794, 1795, 2787, 1320,
+ 1317, 1799, 1318, 1319, 1733, 124, 965, 964, 963, 1732,
+ 1722, 1716, 1713, 1811, 1712, 1711, 2782, 1324, 1325, 1326,
+ 1327, 1328, 1329, 1330, 1322, 1320, 1682, 1788, 1317, 1746,
+ 1318, 1319, 1336, 2798, 2799, 1332, 2878, 1333, 1285, 1735,
+ 2783, 3038, 638, 2507, 638, 1320, 1556, 1809, 3520, 1810,
+ 1320, 1334, 1348, 1349, 1331, 1337, 1338, 1339, 1340, 1341,
+ 1342, 1343, 1345, 1344, 1346, 1347, 1336, 3380, 1540, 1520,
+ 1320, 3379, 2307, 2789, 2188, 2187, 2186, 2185, 2184, 2183,
+ 3357, 1533, 40, 3529, 1520, 40, 2451, 3546, 2358, 1337,
+ 1338, 1339, 1340, 1341, 1342, 1343, 1345, 1344, 1346, 1347,
+ 3507, 1520, 97, 1886, 1886, 1628, 1884, 1884, 1887, 3461,
+ 1520, 1320, 1864, 98, 1866, 1867, 1868, 1869, 1870, 1871,
+ 1872, 1873, 1874, 1875, 1876, 1877, 1878, 1534, 2356, 3459,
+ 1520, 1906, 1316, 1520, 1317, 1907, 1318, 1319, 1316, 1520,
+ 3373, 2797, 1337, 1338, 1339, 1340, 1341, 1342, 1343, 1345,
+ 1344, 1346, 1347, 2800, 3457, 1520, 2451, 3444, 1487, 1857,
+ 1317, 106, 1318, 1319, 1361, 1855, 2451, 3423, 1520, 1970,
+ 1865, 3356, 105, 3355, 104, 2451, 3419, 3309, 106, 1320,
+ 1317, 1904, 1318, 1319, 3278, 1317, 3262, 1318, 1319, 105,
+ 3261, 104, 3172, 1966, 3170, 3332, 1520, 2002, 3406, 1520,
+ 99, 3091, 3370, 3286, 1520, 1317, 1952, 1318, 1319, 2451,
+ 3274, 3308, 1817, 1818, 3119, 1486, 1485, 1484, 3052, 1858,
+ 3091, 1520, 2031, 2016, 2015, 81, 3037, 1834, 81, 2451,
+ 3089, 3266, 1780, 2887, 1488, 2212, 1520, 1964, 2884, 1521,
+ 2038, 2819, 1520, 3265, 2014, 1320, 1317, 1975, 1318, 1319,
+ 1320, 1336, 2818, 2301, 1320, 3010, 1520, 3099, 1320, 1520,
+ 2561, 1863, 2534, 3330, 1520, 2367, 1520, 2539, 1537, 2786,
+ 2005, 2809, 2808, 2062, 1337, 1338, 1339, 1340, 1341, 1342,
+ 1343, 1345, 1344, 1346, 1347, 2521, 2055, 1890, 2056, 2806,
+ 2807, 2804, 2805, 1897, 1899, 1944, 1933, 1934, 1935, 1936,
+ 1946, 1937, 1938, 1939, 1951, 1947, 1940, 1941, 1948, 1949,
+ 1950, 1942, 1943, 1945, 1317, 2039, 1318, 1319, 1483, 2016,
+ 2085, 1975, 1914, 1915, 1916, 1917, 1336, 2019, 2020, 3327,
+ 1520, 2804, 2803, 2125, 1536, 2126, 1928, 2234, 3313, 1520,
+ 2014, 2098, 2981, 1520, 100, 101, 2475, 1520, 1320, 1337,
+ 1338, 1339, 1340, 1341, 1342, 1343, 1345, 1344, 1346, 1347,
+ 1320, 2288, 2583, 3005, 100, 101, 1625, 2564, 1320, 2557,
+ 2558, 2354, 1320, 2061, 2018, 2233, 1320, 2021, 2022, 2173,
+ 1317, 2070, 1318, 1319, 2050, 1317, 2496, 1318, 1319, 1317,
+ 1813, 1318, 1319, 1317, 1779, 1318, 1319, 2451, 2450, 97,
+ 2135, 2136, 2137, 2138, 1745, 1320, 99, 2312, 1520, 902,
+ 98, 1889, 1520, 1316, 2037, 2447, 1737, 1727, 2496, 1723,
+ 2121, 2049, 2130, 99, 2131, 2132, 2133, 2134, 2120, 1719,
+ 99, 2060, 2974, 1520, 2102, 1718, 1717, 1625, 1624, 2063,
+ 2141, 2142, 2143, 2144, 2790, 1520, 1568, 1567, 2794, 2497,
+ 1535, 1118, 2971, 1520, 105, 2793, 2969, 1520, 2213, 2499,
+ 2155, 1520, 2083, 2124, 91, 1520, 2474, 2161, 937, 936,
+ 1845, 1846, 1847, 1848, 2201, 2108, 2123, 2107, 2106, 3007,
+ 2122, 2497, 1520, 1317, 2467, 1318, 1319, 3411, 2312, 2795,
+ 3521, 2288, 3384, 2451, 2791, 1317, 2475, 1318, 1319, 2792,
+ 2966, 2741, 2171, 1317, 2198, 1318, 1319, 1317, 2771, 1318,
+ 1319, 1317, 3005, 1318, 1319, 2806, 2212, 2714, 2288, 1892,
+ 1893, 2475, 2109, 1316, 1895, 2312, 919, 919, 1900, 2152,
+ 2174, 2170, 1905, 2145, 2147, 2148, 2206, 2156, 2182, 2209,
+ 1317, 2210, 1318, 1319, 2467, 1918, 1919, 1920, 1921, 1922,
+ 1923, 1924, 1925, 1926, 1927, 2367, 2475, 2226, 990, 1953,
+ 1954, 1955, 1956, 1957, 1958, 1960, 2208, 1965, 2205, 1967,
+ 1968, 1969, 2156, 1971, 1972, 1973, 2204, 1979, 1980, 1981,
+ 1982, 1983, 1984, 1985, 1986, 1987, 1988, 1989, 1990, 1991,
+ 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
+ 2227, 2003, 991, 2010, 2011, 919, 2343, 919, 919, 919,
+ 919, 919, 1663, 2342, 2212, 2195, 3005, 2023, 2024, 2025,
+ 2026, 2027, 2028, 2029, 2030, 2078, 2032, 2033, 2034, 2035,
+ 2036, 1524, 2237, 2040, 2230, 1889, 1835, 1819, 2231, 2232,
+ 2298, 2305, 2300, 1768, 3056, 2310, 1555, 1320, 2313, 1698,
+ 2314, 891, 973, 972, 1320, 2321, 3487, 3426, 1320, 2323,
+ 2324, 2325, 2016, 2015, 919, 2316, 2919, 3297, 1527, 2331,
+ 2332, 2333, 2334, 2335, 2336, 2337, 2338, 2339, 2340, 2265,
+ 2272, 2273, 1320, 2306, 3263, 2275, 3184, 3072, 1857, 3069,
+ 2076, 2077, 3050, 2304, 2276, 3057, 3058, 3059, 2299, 2880,
+ 1699, 1700, 1701, 2929, 2831, 2347, 2348, 2349, 2350, 2351,
+ 2928, 2353, 1627, 2154, 2876, 2355, 2115, 1320, 2833, 2360,
+ 2361, 2829, 2362, 2278, 1320, 2365, 2565, 2366, 1320, 91,
+ 2531, 2369, 1520, 2151, 1320, 2373, 2326, 2286, 2315, 2378,
+ 2379, 2380, 2381, 3368, 2146, 1320, 2140, 2139, 1858, 1752,
+ 1658, 1654, 2392, 2341, 2395, 2396, 1320, 1623, 117, 1063,
+ 2297, 1320, 2398, 2400, 3298, 1320, 2934, 1520, 2530, 2403,
+ 2404, 2405, 2406, 2407, 1320, 3060, 2169, 2157, 1694, 1320,
+ 2414, 2415, 1317, 2416, 1318, 1319, 2419, 2421, 2062, 1317,
+ 2423, 1318, 1319, 1317, 2318, 1318, 1319, 2053, 3017, 3018,
+ 2435, 2917, 1520, 1886, 3541, 1815, 1884, 2436, 2422, 1520,
+ 3539, 1320, 2420, 1520, 3514, 3392, 2531, 1317, 3304, 1318,
+ 1319, 1320, 3061, 3062, 3063, 1695, 1696, 1697, 3318, 2399,
+ 1520, 3020, 2825, 2824, 1320, 2823, 2741, 2544, 1320, 2266,
+ 2382, 1520, 3388, 3023, 3022, 2374, 1520, 2760, 2309, 2352,
+ 3270, 2434, 1317, 2759, 1318, 1319, 3299, 2069, 2308, 1317,
+ 1532, 1318, 1319, 1317, 3074, 1318, 1319, 1816, 2763, 1317,
+ 1320, 1318, 1319, 2764, 904, 40, 2765, 1320, 2484, 2485,
+ 1317, 2471, 1318, 1319, 2489, 2059, 1320, 2491, 2437, 2452,
+ 2439, 1317, 1320, 1318, 1319, 3025, 1317, 3011, 1318, 1319,
+ 1317, 3163, 1318, 1319, 2719, 2976, 2718, 3352, 1320, 1317,
+ 2761, 1318, 1319, 1320, 1317, 2762, 1318, 1319, 2972, 2731,
+ 2733, 2448, 2944, 905, 2728, 1320, 2490, 3165, 2734, 1494,
+ 2461, 2998, 2424, 2802, 2480, 2483, 2484, 2485, 2481, 2997,
+ 2482, 2486, 3001, 1767, 3017, 3018, 1317, 2444, 1318, 1319,
+ 2517, 2519, 2449, 1320, 2940, 877, 1317, 1320, 1318, 1319,
+ 1488, 2926, 2535, 1320, 2464, 2514, 2556, 1029, 1320, 1317,
+ 2925, 1318, 1319, 1317, 1320, 1318, 1319, 2526, 1028, 907,
+ 2510, 2494, 1912, 97, 3154, 2533, 3153, 908, 2498, 2895,
+ 2536, 2537, 2922, 2501, 98, 2580, 1913, 2418, 1284, 97,
+ 2530, 2508, 2161, 2612, 2571, 1317, 99, 1318, 1319, 2417,
+ 98, 125, 1317, 1320, 1318, 1319, 2511, 3003, 1320, 2080,
+ 2081, 1317, 2522, 1318, 1319, 99, 1320, 1317, 3553, 1318,
+ 1319, 1320, 2578, 2821, 3152, 1320, 3466, 2413, 2269, 3369,
+ 2532, 2412, 3293, 1317, 1320, 1318, 1319, 2411, 1317, 2540,
+ 1318, 1319, 2410, 2541, 2545, 2546, 2547, 2320, 2409, 2801,
+ 1317, 2488, 1318, 1319, 2064, 915, 916, 2717, 2327, 2328,
+ 2329, 2330, 2258, 1646, 1320, 2716, 2577, 2647, 2648, 2649,
+ 2650, 2651, 2622, 2257, 2566, 2567, 2256, 1320, 1317, 2255,
+ 1318, 1319, 1317, 2254, 1318, 1319, 2656, 2408, 1317, 2983,
+ 1318, 1319, 2397, 1317, 2292, 1318, 1319, 1320, 104, 1317,
+ 2391, 1318, 1319, 1376, 106, 2390, 3340, 3339, 3321, 2389,
+ 2576, 1320, 3171, 3160, 3169, 105, 106, 104, 2388, 2637,
+ 105, 2601, 3168, 3161, 1518, 1514, 2639, 105, 2621, 2610,
+ 3070, 2652, 2611, 2620, 1320, 2604, 3002, 3000, 1317, 1515,
+ 1318, 1319, 1320, 1317, 2834, 1318, 1319, 2196, 2387, 1320,
+ 2991, 1317, 1641, 1318, 1319, 914, 1317, 2467, 1318, 1319,
+ 1317, 2386, 1318, 1319, 2057, 2058, 1517, 2640, 1516, 1317,
+ 3142, 1318, 1319, 2480, 2483, 2484, 2485, 2481, 106, 2482,
+ 2486, 2385, 3543, 3542, 2698, 2669, 2447, 2671, 2659, 105,
+ 2344, 104, 2051, 2693, 1549, 2384, 1541, 2695, 3542, 1317,
+ 99, 1318, 1319, 2682, 2683, 2684, 2685, 110, 111, 2005,
+ 1320, 2005, 1317, 2750, 1318, 1319, 3543, 3358, 2383, 3,
+ 3036, 93, 1540, 2638, 2667, 2098, 2377, 1832, 1, 1287,
+ 10, 2768, 1317, 2376, 1318, 1319, 1286, 3040, 2738, 2677,
+ 2678, 2679, 2680, 2681, 3476, 610, 1317, 2698, 1318, 1319,
+ 893, 2041, 1492, 2098, 2098, 2098, 2098, 2098, 2694, 2753,
+ 2696, 1778, 2721, 2744, 2697, 3515, 2770, 2722, 2744, 1317,
+ 2661, 1318, 1319, 2098, 3472, 1830, 2098, 1317, 9, 1318,
+ 1319, 2709, 1831, 3473, 1317, 8, 1318, 1319, 2747, 1738,
+ 1728, 3102, 2713, 1320, 2375, 1976, 2720, 3294, 2723, 2837,
+ 2202, 3068, 2735, 2736, 2159, 981, 150, 2118, 2119, 3439,
+ 114, 939, 113, 984, 1091, 2772, 2197, 895, 2773, 3092,
+ 2755, 2756, 2515, 2758, 2754, 2857, 894, 2757, 2127, 2766,
+ 1574, 1572, 2752, 1573, 1571, 1576, 100, 101, 1575, 2774,
+ 2903, 2345, 2945, 2814, 2815, 1317, 1823, 1318, 1319, 1518,
+ 1514, 645, 2487, 639, 2836, 188, 1563, 1542, 2897, 2958,
+ 2780, 1023, 600, 2810, 1515, 2813, 2812, 2811, 2235, 606,
+ 1368, 2710, 2711, 2712, 1814, 2715, 2502, 2372, 2914, 2915,
+ 2916, 934, 2918, 2920, 2864, 926, 2865, 2052, 2438, 1511,
+ 1512, 1517, 933, 1516, 3271, 2749, 2927, 2161, 2856, 2835,
+ 2995, 2931, 2932, 2933, 2935, 2936, 2937, 2938, 2727, 2729,
+ 2939, 2454, 2941, 2942, 2943, 2871, 2732, 2947, 2948, 2949,
+ 2950, 2951, 2952, 2953, 2954, 2955, 2956, 2725, 1317, 3351,
+ 1318, 1319, 2888, 2891, 3162, 2963, 3424, 2617, 2967, 2890,
+ 2968, 2970, 2512, 2973, 2975, 2899, 2977, 2978, 2979, 2980,
+ 2898, 1538, 2965, 2317, 2986, 1901, 919, 1358, 2908, 2641,
+ 2642, 2905, 2906, 2644, 2907, 2095, 2646, 2909, 3137, 2911,
+ 1850, 2913, 668, 667, 665, 2440, 2468, 1323, 815, 2428,
+ 1550, 2479, 2477, 2476, 2267, 2960, 2653, 2654, 2655, 3008,
+ 3009, 1320, 2964, 3013, 2103, 1320, 2893, 2894, 2660, 3019,
+ 1320, 2662, 2663, 2664, 3015, 3468, 2097, 2665, 2666, 2093,
+ 2446, 1979, 2668, 766, 765, 2670, 677, 1320, 2672, 2673,
+ 2674, 2675, 669, 661, 764, 763, 2676, 1979, 1979, 1979,
+ 1979, 1979, 1320, 3033, 2863, 2984, 2985, 2989, 3452, 2586,
+ 2877, 2987, 2588, 1320, 2513, 919, 2873, 1320, 1301, 2098,
+ 2992, 1510, 2699, 2700, 2701, 2702, 2703, 2704, 2999, 655,
+ 952, 2705, 2706, 2900, 2707, 3004, 2708, 3375, 3024, 2291,
+ 3014, 2923, 1509, 1320, 1931, 2371, 1932, 3382, 3021, 2370,
+ 2845, 3086, 2826, 2562, 2368, 2189, 811, 66, 1320, 3028,
+ 3029, 3027, 3026, 3075, 3076, 44, 2864, 3346, 2865, 3090,
+ 3034, 2364, 3412, 3035, 762, 759, 3139, 3045, 3046, 3140,
+ 3141, 2739, 3051, 2689, 3053, 2690, 2363, 3096, 3097, 3395,
+ 3396, 758, 3397, 1959, 1297, 1294, 1317, 2359, 1318, 1319,
+ 1317, 2357, 1318, 1319, 3489, 1317, 2769, 1318, 1319, 1825,
+ 191, 3109, 92, 191, 3113, 35, 643, 34, 33, 32,
+ 31, 649, 1317, 25, 1318, 1319, 24, 2322, 23, 22,
+ 3079, 21, 191, 28, 3083, 3084, 3085, 1317, 20, 1318,
+ 1319, 3124, 2311, 3098, 19, 18, 2848, 191, 1317, 3510,
+ 1318, 1319, 1317, 3552, 1318, 1319, 3128, 119, 53, 50,
+ 48, 127, 126, 51, 47, 3118, 1066, 2832, 3136, 3508,
+ 45, 30, 649, 191, 649, 29, 3143, 17, 1317, 1591,
+ 1318, 1319, 16, 15, 14, 3114, 13, 12, 11, 7,
+ 6, 38, 37, 1317, 27, 1318, 1319, 36, 26, 4,
+ 2549, 2191, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 3126, 0, 3135, 0, 3157,
+ 0, 0, 0, 0, 0, 0, 0, 1886, 0, 0,
+ 1884, 3186, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 40, 0, 2921,
+ 0, 0, 0, 3178, 0, 0, 0, 3158, 0, 2744,
+ 3167, 3166, 3180, 2930, 3182, 0, 0, 0, 0, 0,
+ 3176, 3174, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 2747, 0, 3285, 0, 2747,
+ 0, 0, 3272, 0, 0, 0, 3292, 0, 3190, 0,
+ 0, 0, 0, 3187, 3188, 0, 0, 0, 0, 0,
+ 0, 0, 0, 1579, 0, 0, 3302, 3303, 0, 3305,
+ 0, 3306, 3307, 0, 0, 3267, 3310, 3311, 3312, 3268,
+ 3314, 3317, 3315, 3316, 3269, 0, 1886, 0, 3284, 1884,
+ 3319, 0, 0, 3296, 3288, 0, 3326, 3328, 3329, 3331,
+ 3333, 3334, 3336, 3279, 3280, 3281, 3276, 0, 0, 0,
+ 0, 3289, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 3320, 0, 0,
+ 3366, 0, 0, 0, 0, 0, 0, 1592, 3322, 0,
+ 0, 0, 3325, 0, 3362, 0, 0, 0, 0, 0,
+ 3344, 3341, 3342, 0, 3361, 3343, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 3359, 0,
+ 0, 0, 0, 3071, 0, 0, 3350, 3365, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 2747, 0, 0, 0, 1605,
+ 1608, 1609, 1610, 1611, 1612, 1613, 3095, 1614, 1615, 1617,
+ 1618, 1616, 1619, 1620, 1593, 1594, 1595, 1596, 1577, 1578,
+ 1606, 0, 1580, 3367, 1581, 1582, 1583, 1584, 1585, 1586,
+ 1587, 1588, 1589, 0, 0, 1590, 1597, 1598, 1599, 1600,
+ 0, 1601, 1602, 1603, 1604, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 3391, 3386, 0, 0,
+ 0, 3115, 0, 3116, 0, 0, 3117, 0, 0, 3120,
+ 3121, 40, 3378, 0, 0, 3381, 3407, 3383, 3125, 0,
+ 0, 0, 3408, 3409, 0, 0, 0, 0, 3127, 0,
+ 0, 0, 0, 3374, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 3420, 0, 0, 0, 0, 0,
+ 3144, 0, 0, 3145, 0, 3146, 3147, 0, 3148, 3401,
+ 3149, 0, 3402, 0, 0, 3150, 0, 0, 3390, 0,
+ 3446, 3447, 3410, 0, 0, 0, 3400, 3422, 0, 0,
+ 0, 0, 0, 3417, 3456, 3458, 3460, 40, 3453, 3454,
+ 3175, 3427, 2744, 3425, 3438, 0, 3430, 3435, 3432, 3431,
+ 0, 3183, 3429, 0, 3185, 3434, 3296, 3441, 3433, 3488,
+ 0, 0, 0, 0, 0, 0, 3189, 0, 0, 191,
+ 0, 191, 3464, 0, 191, 1607, 0, 0, 0, 186,
+ 3485, 3475, 3480, 3467, 3264, 0, 0, 0, 0, 0,
+ 3453, 3454, 3495, 0, 0, 0, 0, 0, 0, 3506,
+ 3493, 0, 0, 125, 0, 649, 0, 649, 649, 0,
+ 0, 0, 0, 0, 1371, 0, 168, 0, 3504, 3509,
+ 0, 0, 0, 0, 0, 0, 0, 649, 191, 0,
+ 0, 0, 3528, 3530, 3532, 0, 0, 3522, 0, 0,
+ 0, 0, 1886, 1778, 3524, 1884, 3537, 3525, 0, 0,
+ 0, 0, 3533, 0, 3536, 3534, 1363, 3540, 3538, 0,
+ 0, 0, 0, 0, 3551, 0, 0, 0, 3453, 3454,
+ 3548, 0, 165, 0, 0, 166, 0, 3554, 0, 0,
+ 3563, 3564, 3560, 0, 3562, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 3349, 0, 0, 1886, 185, 0,
+ 1884, 3569, 3571, 3572, 3316, 0, 3570, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 186, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 2555, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 125, 0, 147, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 168, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 1363, 158,
+ 0, 0, 0, 0, 0, 146, 0, 0, 0, 0,
+ 169, 0, 0, 0, 0, 0, 0, 0, 0, 175,
+ 0, 0, 0, 0, 165, 0, 0, 166, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 3389, 0,
+ 0, 0, 0, 0, 0, 0, 1649, 1650, 157, 156,
+ 185, 0, 0, 0, 0, 191, 0, 0, 0, 649,
+ 649, 3403, 0, 0, 3404, 0, 3405, 0, 0, 0,
+ 0, 0, 0, 0, 0, 191, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 649, 0, 0, 191,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 649, 0, 0, 0, 0, 0, 191, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 160, 0, 0, 0, 0, 0, 151,
+ 1651, 154, 0, 1648, 649, 152, 153, 0, 0, 0,
+ 3486, 0, 169, 0, 0, 0, 0, 1363, 0, 0,
+ 0, 175, 0, 649, 649, 0, 649, 0, 649, 649,
+ 0, 649, 649, 649, 649, 649, 649, 0, 0, 0,
+ 0, 3501, 0, 3502, 1363, 3503, 0, 1363, 649, 1363,
+ 191, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 191, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 186, 0, 649, 0, 191, 0, 0, 0, 0,
+ 0, 0, 1645, 0, 0, 0, 0, 0, 0, 649,
+ 0, 191, 0, 0, 0, 125, 0, 147, 0, 0,
+ 0, 0, 0, 3549, 0, 3550, 0, 191, 168, 0,
+ 0, 0, 0, 0, 191, 0, 0, 0, 0, 0,
+ 0, 0, 0, 191, 191, 191, 191, 191, 191, 191,
+ 191, 191, 649, 0, 0, 160, 0, 0, 0, 158,
+ 0, 0, 0, 0, 0, 146, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 161, 0, 1591, 0, 0,
+ 0, 0, 173, 0, 165, 0, 0, 166, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 1649, 1650, 157, 156,
+ 185, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 181, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 155, 0, 0, 0, 0, 1495, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 826, 0, 0, 83, 162, 167, 164, 170,
+ 171, 172, 174, 176, 177, 178, 179, 0, 0, 0,
+ 0, 0, 180, 182, 183, 184, 0, 0, 0, 0,
+ 0, 0, 0, 0, 148, 595, 0, 149, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 151,
+ 1651, 154, 0, 1648, 880, 152, 153, 649, 649, 0,
+ 0, 1579, 169, 0, 0, 0, 0, 161, 0, 0,
+ 649, 175, 0, 0, 173, 0, 0, 0, 0, 191,
+ 0, 0, 0, 0, 890, 0, 83, 0, 0, 0,
+ 0, 0, 0, 0, 0, 947, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 890, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 181, 0, 0, 0, 0,
+ 951, 0, 0, 0, 0, 0, 0, 649, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 1363, 0, 0,
+ 0, 0, 0, 0, 0, 1592, 0, 0, 649, 0,
+ 0, 0, 0, 0, 1363, 0, 0, 0, 162, 167,
+ 164, 170, 171, 172, 174, 176, 177, 178, 179, 0,
+ 0, 0, 0, 0, 180, 182, 183, 184, 0, 649,
+ 649, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 160, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 1605, 1608, 1609,
+ 1610, 1611, 1612, 1613, 0, 1614, 1615, 1617, 1618, 1616,
+ 1619, 1620, 1593, 1594, 1595, 1596, 1577, 1578, 1606, 2017,
+ 1580, 0, 1581, 1582, 1583, 1584, 1585, 1586, 1587, 1588,
+ 1589, 0, 0, 1590, 1597, 1598, 1599, 1600, 0, 1601,
+ 1602, 1603, 1604, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 191, 155, 0, 0, 0, 649, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 191, 0, 0, 649, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 191, 0,
+ 0, 0, 649, 0, 0, 2017, 191, 0, 191, 0,
+ 191, 191, 0, 0, 148, 0, 0, 149, 0, 0,
+ 0, 0, 0, 0, 0, 649, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 161, 0, 0,
+ 0, 0, 0, 0, 173, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 1607, 0, 0, 0, 0, 0, 0,
+ 0, 0, 649, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 181, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 649,
+ 0, 0, 0, 0, 0, 649, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 162, 167,
+ 164, 170, 171, 172, 174, 176, 177, 178, 179, 0,
+ 0, 0, 0, 0, 180, 182, 183, 184, 0, 0,
+ 0, 0, 649, 0, 0, 0, 0, 649, 0, 0,
+ 0, 649, 649, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 191,
+ 0, 0, 0, 0, 0, 0, 191, 0, 0, 0,
+ 0, 0, 0, 0, 0, 191, 191, 0, 0, 191,
+ 0, 191, 0, 0, 0, 0, 0, 0, 191, 0,
+ 0, 0, 0, 0, 0, 191, 0, 0, 0, 0,
+ 0, 1067, 0, 1073, 0, 0, 1075, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 191, 0, 0, 0, 0, 0, 0, 0, 0,
+ 649, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 1293, 1127, 0, 1127, 1127, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 1363, 0, 2017, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 890, 1360, 1365, 1366, 0, 1369, 0, 1370, 1372,
+ 1373, 1374, 0, 1377, 1378, 1380, 1380, 0, 1380, 1384,
+ 1384, 1386, 1387, 1388, 1389, 1390, 1391, 1392, 1393, 1394,
+ 1395, 1396, 1397, 1398, 1399, 1400, 1401, 1402, 1403, 1404,
+ 1405, 1406, 1407, 1408, 1409, 1410, 1411, 1412, 1413, 1414,
+ 1415, 1416, 1417, 1418, 1419, 1420, 1421, 1422, 1423, 1424,
+ 1425, 1426, 1427, 1428, 1429, 1430, 1431, 1432, 1433, 1434,
+ 1435, 1436, 1437, 1438, 1439, 1440, 1441, 1442, 1443, 1444,
+ 1445, 1446, 1447, 1448, 1449, 1450, 0, 0, 0, 0,
+ 1451, 0, 1453, 1454, 1455, 1456, 1457, 0, 0, 0,
+ 0, 0, 0, 0, 0, 1384, 1384, 1384, 1384, 1384,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 1463,
+ 1464, 1465, 1466, 1467, 1468, 1469, 1470, 1471, 1472, 1473,
+ 1474, 1475, 1476, 0, 0, 0, 0, 191, 0, 0,
+ 0, 0, 0, 0, 0, 191, 0, 0, 0, 0,
+ 1490, 0, 0, 0, 0, 0, 649, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 649,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 191, 0, 0, 0, 0, 191, 0,
+ 0, 0, 0, 1496, 0, 82, 42, 43, 84, 890,
+ 0, 0, 0, 890, 0, 0, 0, 0, 0, 890,
+ 0, 1552, 0, 0, 0, 88, 0, 0, 0, 46,
+ 73, 74, 0, 71, 75, 0, 0, 0, 0, 1569,
+ 0, 0, 72, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 649, 0, 0, 0,
+ 0, 59, 191, 0, 0, 0, 0, 0, 0, 191,
+ 0, 0, 0, 91, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 649, 0, 0, 0, 0, 0, 0,
+ 649, 0, 0, 0, 0, 0, 0, 0, 0, 649,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 1708, 0, 0, 1363, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 191, 191,
+ 191, 191, 191, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 1753, 0, 0,
+ 0, 0, 191, 191, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 191, 0, 0, 0, 1787,
+ 0, 0, 0, 0, 0, 0, 1791, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 649, 1802, 1803, 1804,
+ 1805, 1806, 1807, 1808, 0, 0, 0, 0, 0, 0,
+ 0, 49, 52, 55, 54, 57, 0, 70, 0, 0,
+ 79, 76, 0, 0, 0, 0, 0, 0, 1127, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 649, 58, 87, 86, 0, 0, 68,
+ 69, 56, 0, 0, 0, 0, 0, 77, 78, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 649, 0, 0, 0, 0, 0, 0, 0, 649,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 60,
+ 61, 0, 62, 63, 64, 65, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 649, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 191, 0, 0, 0, 649, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 649, 0, 0, 0, 1363, 0, 0, 649,
+ 649, 1363, 191, 191, 191, 191, 191, 0, 0, 0,
+ 0, 0, 0, 0, 191, 0, 0, 0, 0, 0,
+ 191, 0, 191, 0, 0, 191, 191, 191, 0, 0,
+ 0, 1838, 0, 1127, 1127, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 83, 0, 91,
+ 83, 0, 828, 0, 0, 0, 816, 829, 830, 831,
+ 832, 817, 0, 0, 818, 819, 0, 820, 0, 0,
+ 0, 191, 0, 0, 0, 85, 0, 0, 0, 0,
+ 0, 825, 833, 834, 649, 0, 0, 1363, 0, 0,
+ 0, 0, 649, 0, 0, 0, 0, 191, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 191, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 2866, 2867,
+ 191, 0, 0, 191, 0, 0, 0, 90, 0, 0,
+ 835, 836, 837, 838, 839, 840, 841, 842, 843, 844,
+ 845, 846, 847, 848, 849, 850, 851, 852, 853, 854,
+ 855, 856, 857, 858, 859, 860, 861, 862, 863, 864,
+ 865, 866, 867, 868, 869, 870, 871, 872, 873, 874,
+ 875, 876, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 1341,
- 0, 0, 2032, 0, 0, 0, 0, 0, 0, 2520,
- 0, 0, 0, 0, 0, 0, 0, 0, 2525, 0,
- 0, 0, 1339, 0, 0, 0, 0, 637, 0, 637,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 649, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 2868, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 586, 0, 0, 0,
+ 0, 0, 191, 67, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 888, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 637,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 586,
- 0, 0, 0, 0, 0, 637, 0, 0, 0, 0,
- 0, 0, 0, 1846, 0, 0, 0, 586, 637, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 2082, 0, 0, 0, 0, 0, 0, 0, 2086, 0,
+ 2089, 0, 0, 1838, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 191, 0,
+ 2869, 2870, 0, 0, 0, 0, 2096, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 768, 0, 0, 0, 191, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 191, 0, 0,
+ 191, 191, 191, 0, 0, 0, 0, 0, 0, 0,
+ 649, 649, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 189, 951, 0,
+ 596, 782, 0, 0, 786, 0, 783, 784, 0, 0,
+ 0, 785, 0, 0, 0, 0, 0, 0, 0, 596,
+ 0, 0, 0, 0, 0, 0, 0, 649, 649, 649,
+ 649, 951, 0, 0, 900, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 920, 920, 0, 0, 0, 0, 0, 0, 0,
+ 596, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 91, 0, 0, 828, 0, 0, 0, 816, 829,
+ 830, 831, 832, 817, 0, 0, 818, 819, 0, 820,
+ 0, 1838, 0, 0, 0, 0, 0, 0, 2246, 0,
+ 0, 0, 0, 825, 833, 834, 0, 2263, 2264, 0,
+ 0, 2268, 0, 0, 0, 0, 0, 0, 0, 0,
+ 2271, 0, 0, 191, 0, 0, 0, 2274, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 1363, 0, 0, 0, 0, 649, 0, 649,
+ 2866, 2867, 0, 2277, 0, 0, 0, 0, 0, 0,
+ 0, 0, 835, 836, 837, 838, 839, 840, 841, 842,
+ 843, 844, 845, 846, 847, 848, 849, 850, 851, 852,
+ 853, 854, 855, 856, 857, 858, 859, 860, 861, 862,
+ 863, 864, 865, 866, 867, 868, 869, 870, 871, 872,
+ 873, 874, 875, 876, 0, 0, 0, 0, 0, 649,
+ 0, 0, 2293, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 191, 0, 0, 649, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 649, 0,
+ 0, 0, 0, 0, 0, 2868, 0, 0, 0, 0,
+ 0, 2303, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 649, 0,
+ 0, 0, 649, 649, 0, 0, 0, 0, 0, 0,
+ 0, 3208, 3210, 3209, 3227, 3228, 3229, 3230, 3231, 3232,
+ 3233, 716, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 649, 2869, 2870, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 810, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 2401, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 649, 0, 0, 0, 0, 0, 0, 0, 0, 2433,
+ 0, 0, 0, 0, 0, 0, 191, 628, 0, 0,
+ 0, 0, 0, 648, 0, 0, 0, 890, 0, 0,
+ 0, 0, 0, 0, 649, 191, 0, 0, 0, 0,
+ 2493, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 2472, 2473, 0, 0, 0, 0, 0, 0, 0, 2096,
+ 0, 0, 890, 2492, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 648, 0, 648, 0, 0, 0,
+ 0, 0, 0, 0, 0, 649, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 1363, 0, 649, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 2542, 0, 0, 0, 0, 0,
+ 0, 649, 2017, 0, 0, 0, 596, 0, 596, 0,
+ 0, 596, 0, 0, 0, 3214, 0, 0, 0, 0,
+ 0, 0, 0, 649, 0, 0, 0, 0, 0, 0,
+ 3222, 3223, 0, 0, 0, 0, 0, 191, 649, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 2569, 0, 0, 0,
+ 2591, 2592, 2593, 2594, 2595, 596, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 649, 0, 1838, 2605, 0, 0, 0, 0,
+ 0, 0, 782, 1364, 695, 786, 697, 783, 784, 0,
+ 693, 696, 785, 0, 649, 0, 0, 2613, 0, 191,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 649, 0, 649, 0, 0, 0, 714, 715,
+ 3207, 3211, 3212, 3213, 3224, 3225, 3226, 3234, 3236, 747,
+ 3235, 3237, 3238, 3239, 3242, 3243, 3244, 3245, 3240, 3241,
+ 3246, 3191, 3195, 3192, 3193, 3194, 3206, 3196, 3197, 3198,
+ 3199, 3200, 3201, 3202, 3203, 3204, 3205, 3247, 3248, 3249,
+ 3250, 3251, 3252, 3217, 3221, 3220, 3218, 3219, 3215, 3216,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 787, 0, 788, 0, 0, 792, 0, 0,
+ 0, 794, 793, 0, 795, 761, 760, 0, 0, 789,
+ 790, 0, 791, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 1364, 0, 2686, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 767, 0, 0, 3253, 3254, 3255,
+ 3256, 3257, 3258, 3259, 3260, 0, 0, 0, 0, 0,
+ 2096, 0, 596, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 900, 2748, 0, 83, 0, 0, 2096, 2096,
+ 2096, 2096, 2096, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 596, 0, 2096, 647,
+ 0, 2096, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 596, 0, 0, 0, 0, 0,
+ 0, 0, 0, 2822, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 2860,
+ 943, 0, 950, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 2872, 1364, 2854, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 2862, 0, 0, 0, 0,
+ 0, 0, 2889, 0, 0, 2892, 0, 0, 0, 0,
+ 0, 1364, 0, 0, 1364, 0, 1364, 596, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 828, 0, 2009,
+ 0, 0, 829, 0, 0, 0, 0, 1725, 0, 0,
+ 0, 0, 1885, 0, 0, 0, 0, 648, 1280, 648,
+ 648, 0, 596, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 1777, 648,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 596, 0, 0, 0, 0, 0,
+ 0, 596, 0, 0, 0, 0, 0, 0, 1362, 0,
+ 1800, 1801, 596, 596, 596, 596, 596, 596, 596, 0,
+ 0, 0, 0, 0, 2988, 835, 836, 837, 838, 839,
+ 840, 841, 842, 843, 844, 845, 846, 847, 848, 849,
+ 850, 851, 852, 853, 854, 855, 856, 857, 858, 859,
+ 860, 861, 862, 863, 864, 865, 866, 867, 868, 869,
+ 870, 871, 872, 873, 874, 875, 876, 828, 0, 0,
+ 0, 0, 829, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 1885, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 2096, 0, 0, 0, 0, 3049,
+ 0, 0, 0, 0, 0, 0, 0, 3032, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 3064,
+ 0, 0, 3065, 3066, 3067, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 1362, 0, 0, 0, 0, 835, 836, 837, 838, 839,
+ 840, 841, 842, 843, 844, 845, 846, 847, 848, 849,
+ 850, 851, 852, 853, 854, 855, 856, 857, 858, 859,
+ 860, 861, 862, 863, 864, 865, 866, 867, 868, 869,
+ 870, 871, 872, 873, 874, 875, 876, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 596, 0, 0, 0,
+ 0, 648, 648, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 1846, 0, 0, 0, 0, 0, 0, 1341, 637, 0,
- 0, 0, 637, 637, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 1341, 0, 0, 1341, 2644, 1341,
- 586, 637, 0, 0, 0, 0, 1115, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 648, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 1696, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 586, 0, 1645, 0, 0,
+ 0, 0, 0, 648, 1364, 0, 0, 0, 0, 0,
+ 0, 0, 0, 1622, 0, 0, 920, 920, 0, 0,
+ 0, 1364, 0, 1631, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 1748, 0, 0, 0, 2681, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 586, 0, 1115,
- 0, 0, 0, 0, 586, 0, 2708, 1645, 0, 0,
- 637, 0, 0, 1771, 1772, 586, 586, 586, 586, 586,
- 586, 586, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 648, 0, 1657, 0,
+ 0, 0, 0, 0, 0, 3151, 1666, 3155, 3156, 1362,
+ 1668, 0, 0, 1671, 1672, 648, 648, 0, 648, 0,
+ 648, 648, 0, 648, 648, 648, 648, 648, 648, 0,
+ 2748, 0, 83, 0, 2748, 0, 1362, 1703, 1704, 1362,
+ 648, 1362, 0, 1709, 0, 920, 1777, 920, 920, 920,
+ 920, 920, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 637, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 648, 0, 0, 0, 0,
+ 0, 0, 0, 1129, 0, 1129, 1129, 0, 1771, 1725,
+ 0, 648, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 3277, 0, 0, 920, 1292, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 900, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 648, 596, 0, 0, 0, 0,
+ 0, 0, 1777, 596, 0, 596, 0, 596, 2105, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 930, 0, 0, 0, 0, 637, 0, 0, 2032, 0,
- 0, 0, 0, 0, 0, 1339, 0, 637, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 637, 637, 0, 0, 0, 0, 0, 0, 0,
+ 2748, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 1386, 1387, 1388, 1389, 1390, 1391,
+ 1392, 1393, 1394, 1395, 1396, 1397, 1398, 1399, 1400, 1401,
+ 1405, 1406, 1407, 1408, 1409, 1410, 1411, 1412, 1413, 1414,
+ 1415, 1416, 1417, 1418, 1419, 1420, 1421, 1422, 1423, 1424,
+ 1425, 1426, 1427, 1428, 1429, 1430, 1431, 1432, 1433, 1434,
+ 1436, 1437, 1438, 1439, 1440, 1441, 1442, 1443, 1444, 1445,
+ 1463, 1464, 1465, 1466, 1467, 1468, 1469, 1470, 1471, 1472,
+ 1473, 1474, 1475, 1476, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 3387, 3376, 648,
+ 648, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 648, 0, 0, 0, 83, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 637, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 637, 0,
+ 0, 0, 0, 0, 0, 0, 0, 1498, 1499, 0,
+ 0, 0, 0, 0, 0, 0, 596, 0, 0, 0,
+ 0, 0, 0, 596, 0, 0, 0, 0, 0, 0,
+ 0, 0, 596, 596, 0, 0, 596, 0, 2270, 648,
+ 0, 0, 0, 0, 1546, 596, 0, 0, 0, 1362,
+ 0, 0, 596, 0, 0, 0, 0, 3421, 1894, 1564,
+ 648, 0, 83, 0, 0, 0, 1362, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 596, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 648, 648, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 943, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 586,
+ 0, 1674, 1674, 0, 1674, 0, 1674, 1674, 0, 1683,
+ 1674, 1674, 1674, 1674, 1674, 0, 0, 0, 0, 0,
+ 0, 648, 0, 0, 0, 0, 943, 0, 0, 0,
+ 0, 0, 0, 1364, 0, 1777, 0, 0, 0, 3505,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 637, 0, 0, 0, 0, 0, 0, 0, 2915,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 637, 0, 0, 0, 0, 0, 0,
+ 0, 1751, 0, 0, 0, 0, 0, 0, 0, 648,
+ 0, 0, 0, 0, 0, 0, 0, 1775, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 637, 0, 637, 0, 0, 0, 0, 1341, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 907,
- 907, 0, 0, 0, 1341, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 648, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 648, 0, 0, 648, 0, 0,
+ 1129, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 648, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 596, 0, 0, 0, 0, 0,
+ 0, 0, 1725, 0, 648, 0, 0, 0, 0, 0,
+ 0, 2179, 2180, 2181, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 907, 1748,
- 907, 907, 907, 907, 907, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 2032, 2032, 0, 0, 0,
+ 0, 648, 0, 0, 0, 0, 0, 648, 1666, 0,
+ 0, 1666, 0, 1666, 0, 0, 0, 0, 0, 2211,
+ 596, 0, 0, 0, 0, 596, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 1696, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 907, 0, 0,
- 0, 0, 3059, 3060, 3061, 3062, 0, 0, 0, 0,
- 0, 888, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 586, 0, 0, 0, 0, 0,
- 0, 1748, 586, 0, 586, 0, 586, 2069, 0, 0,
+ 0, 0, 0, 0, 648, 0, 0, 0, 0, 648,
+ 0, 0, 0, 648, 648, 1129, 1129, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 1826, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 596,
+ 0, 0, 0, 0, 0, 0, 2548, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 1880, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 1364, 0, 0, 0, 1896, 0, 0, 0,
+ 0, 0, 0, 0, 0, 596, 596, 596, 596, 596,
+ 0, 0, 648, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 1929, 1930, 596,
+ 596, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 596, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 920, 1129, 1362, 0,
+ 648, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 3135, 0, 3137, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 2054, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 2066, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 920, 0, 0, 0, 0,
+ 1546, 0, 0, 1129, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 943, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 596, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 2032, 0, 0, 0, 0, 0,
+ 0, 0, 0, 1364, 0, 0, 0, 0, 1364, 596,
+ 596, 596, 596, 596, 0, 0, 0, 0, 648, 0,
+ 950, 2767, 0, 0, 0, 0, 0, 1725, 0, 596,
+ 0, 648, 596, 2775, 1777, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 943, 0, 0,
+ 0, 0, 0, 950, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 3243, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 1115, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 586, 0, 0, 0, 0, 0, 0,
- 586, 0, 0, 0, 0, 0, 0, 0, 0, 586,
- 586, 0, 0, 586, 0, 2230, 0, 0, 0, 0,
- 0, 0, 586, 0, 0, 0, 0, 0, 0, 586,
- 0, 0, 0, 3293, 0, 0, 0, 3293, 3293, 0,
+ 0, 0, 0, 0, 0, 0, 2506, 0, 596, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 586, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 2032, 0, 0, 0,
+ 0, 0, 0, 0, 1364, 0, 0, 0, 0, 0,
+ 943, 0, 0, 0, 596, 1880, 0, 0, 0, 1880,
+ 1880, 0, 0, 0, 0, 0, 0, 0, 596, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 648, 0,
+ 0, 0, 0, 0, 0, 0, 0, 596, 0, 0,
+ 596, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 648, 0, 0, 0, 0,
+ 0, 0, 648, 0, 0, 0, 1666, 1666, 0, 0,
+ 0, 648, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 1362, 2579, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 2280, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 596,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 1341, 0, 1748, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 648, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 2032, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 1129, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 596, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 648, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 2032,
+ 0, 0, 0, 0, 596, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 648, 596, 0, 0, 596, 596, 596,
+ 0, 648, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 648, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 648, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 648, 0, 0, 0, 1362, 0,
+ 0, 648, 648, 1362, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 3370, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 3374, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 2441, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 2456, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 1115, 1115, 0, 0,
- 0, 0, 0, 0, 0, 0, 586, 0, 0, 0,
- 0, 0, 0, 0, 1696, 0, 0, 0, 3417, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 3425, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 2817, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 648, 0, 0, 1362,
+ 1725, 0, 0, 0, 648, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 1364,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 586, 0, 0, 0, 0, 586, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 3370, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 2032, 0,
+ 0, 0, 0, 0, 2538, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 2896, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 2915, 0, 3425, 0, 0,
+ 0, 2066, 0, 0, 0, 0, 0, 0, 2563, 0,
+ 0, 0, 0, 0, 0, 0, 0, 2568, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 1725,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 586, 0, 0, 0, 0,
- 0, 0, 2505, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 648, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 1341, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 586, 586, 586, 586, 586, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 586, 586, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 586, 0, 0, 0,
+ 0, 0, 0, 0, 1880, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 907, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 3039, 0,
+ 0, 1880, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 2687,
+ 0, 0, 648, 648, 0, 0, 0, 1129, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 1725, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 1674, 0,
+ 0, 0, 596, 0, 0, 0, 0, 0, 0, 648,
+ 648, 648, 648, 0, 0, 0, 2724, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 1129, 0, 0, 0, 0, 0, 0, 2751, 1674, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 907,
+ 0, 0, 1364, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 3440,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 586, 0, 0, 0, 0,
+ 0, 0, 943, 0, 1725, 0, 0, 0, 0, 0,
+ 2066, 0, 0, 0, 1362, 0, 0, 0, 0, 648,
+ 0, 648, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 1341, 0, 0,
- 0, 0, 1341, 586, 586, 586, 586, 586, 0, 0,
- 0, 0, 0, 0, 0, 2724, 0, 0, 0, 0,
- 0, 1696, 0, 586, 0, 0, 586, 2732, 1748, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 1777, 0, 0, 0,
+ 0, 648, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 648, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 586, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 1341, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 586, 0, 0,
+ 648, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 586, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 2959, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 586, 0, 0, 586, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 648, 0, 0, 0, 648, 648, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 648, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -2494,3835 +2453,3903 @@ var yyAct = [...]int{
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 586, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 648, 0, 0, 0, 0, 0, 2066, 2066,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 648, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 3103, 3104, 3105, 3106, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 586, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 648, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 1362, 0, 648,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 586, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 586, 0, 0, 586,
- 586, 586, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 648, 648, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 648, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 648, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 3179, 0, 3181, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 648, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 648, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 648, 0, 648, 2066, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 3287, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 1129, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 3337, 0, 0, 0,
+ 3337, 3337, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 2066,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 1696, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 1341, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 2066, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 2066, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 1696, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 3414, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 3418, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 1129,
+ 1129, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 3462, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 3470, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 3414, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 380, 0, 0, 0, 1249, 1234, 496, 0, 1177,
- 1252, 1146, 1165, 1262, 1168, 1171, 1213, 1125, 1191, 399,
- 1162, 1118, 1150, 1120, 1157, 1121, 1148, 1179, 257, 1145,
- 1236, 1195, 1251, 350, 254, 1127, 1151, 413, 1167, 196,
- 1215, 466, 241, 361, 358, 504, 269, 260, 256, 239,
- 303, 369, 411, 486, 405, 1258, 354, 1201, 0, 476,
- 384, 0, 0, 0, 1181, 1240, 1189, 1227, 1176, 1214,
- 1135, 1200, 1253, 1163, 1210, 1254, 309, 237, 311, 195,
- 396, 477, 273, 0, 0, 1696, 0, 3397, 627, 0,
- 0, 0, 0, 3398, 0, 0, 0, 0, 228, 0,
- 0, 235, 0, 0, 586, 335, 344, 343, 324, 325,
- 327, 329, 334, 341, 347, 1159, 1207, 1248, 1160, 1209,
- 252, 307, 259, 251, 501, 1259, 1239, 1124, 1188, 1247,
- 0, 0, 219, 1250, 1183, 0, 1212, 0, 1265, 1119,
- 1203, 0, 1122, 1126, 1261, 1243, 1154, 262, 0, 0,
- 0, 0, 0, 0, 0, 1180, 1190, 1224, 1228, 1174,
- 0, 0, 0, 0, 1341, 0, 0, 1152, 0, 1199,
- 0, 0, 0, 1131, 1123, 0, 0, 0, 0, 0,
+ 0, 0, 2066, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 383,
+ 2959, 0, 3470, 1263, 1248, 502, 0, 1191, 1266, 1160,
+ 1179, 1276, 1182, 1185, 1227, 1139, 1205, 402, 1176, 1132,
+ 1164, 1134, 1171, 1135, 1162, 1193, 260, 1159, 1250, 1209,
+ 1265, 353, 257, 1141, 1165, 416, 1181, 198, 1229, 471,
+ 244, 364, 361, 510, 272, 263, 259, 242, 306, 372,
+ 414, 492, 408, 1272, 357, 1215, 0, 481, 387, 0,
+ 0, 0, 1195, 1254, 1203, 1241, 1190, 1228, 1149, 1214,
+ 1267, 1177, 1224, 1268, 312, 240, 314, 197, 399, 482,
+ 276, 0, 0, 0, 0, 3442, 812, 0, 0, 0,
+ 0, 3443, 0, 0, 0, 0, 230, 0, 0, 237,
+ 0, 0, 0, 338, 347, 346, 327, 328, 330, 332,
+ 337, 344, 350, 1173, 1221, 1262, 1174, 1223, 255, 310,
+ 262, 254, 507, 1273, 1253, 1138, 1202, 1261, 0, 0,
+ 221, 1264, 1197, 0, 1226, 0, 1279, 1133, 1217, 0,
+ 1136, 1140, 1275, 1257, 1168, 265, 0, 0, 0, 0,
+ 0, 0, 0, 1194, 1204, 1238, 1242, 1188, 0, 0,
+ 0, 0, 0, 0, 0, 1166, 0, 1213, 0, 0,
+ 0, 1145, 1137, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 1178, 0, 0,
- 0, 0, 1134, 0, 1153, 1225, 1696, 1117, 284, 1128,
- 385, 244, 0, 1232, 1242, 1175, 541, 1246, 1173, 1172,
- 1219, 1132, 1238, 1166, 349, 1130, 316, 191, 215, 0,
- 1164, 395, 441, 453, 1237, 1149, 1158, 242, 1156, 451,
- 409, 520, 223, 271, 438, 415, 449, 422, 274, 1198,
- 1217, 450, 356, 506, 432, 517, 542, 543, 250, 389,
- 529, 490, 537, 558, 216, 247, 403, 483, 523, 473,
- 381, 502, 503, 315, 472, 282, 194, 353, 548, 214,
- 459, 355, 232, 221, 508, 526, 276, 436, 203, 485,
- 515, 229, 463, 0, 0, 560, 205, 513, 482, 377,
- 312, 313, 204, 0, 437, 255, 280, 245, 398, 510,
- 511, 243, 561, 218, 536, 210, 1129, 535, 391, 505,
- 514, 378, 367, 209, 512, 376, 366, 320, 339, 340,
- 267, 293, 429, 359, 430, 292, 294, 387, 386, 388,
- 198, 524, 0, 199, 0, 478, 525, 562, 224, 225,
- 227, 1144, 266, 270, 278, 281, 289, 290, 299, 351,
- 402, 428, 424, 433, 1233, 500, 518, 530, 540, 546,
- 547, 549, 550, 551, 552, 553, 555, 554, 390, 297,
- 474, 319, 357, 1222, 1264, 408, 452, 230, 522, 475,
- 1139, 1143, 1137, 1204, 1138, 1193, 1194, 1140, 1255, 1256,
- 1257, 563, 564, 565, 566, 567, 568, 569, 570, 571,
- 572, 573, 574, 575, 576, 577, 578, 579, 580, 0,
- 1226, 1133, 0, 1141, 1142, 1235, 1244, 1245, 581, 368,
- 465, 519, 321, 333, 336, 326, 345, 0, 346, 322,
- 323, 328, 330, 331, 332, 337, 338, 342, 348, 238,
- 201, 374, 382, 499, 298, 206, 207, 208, 492, 493,
- 494, 495, 533, 534, 538, 442, 443, 444, 445, 279,
- 528, 295, 448, 447, 317, 318, 363, 431, 1197, 190,
- 211, 352, 1260, 434, 275, 559, 532, 527, 197, 213,
- 1136, 249, 1147, 1155, 0, 1161, 1169, 1170, 1182, 1184,
- 1185, 1186, 1187, 1205, 1206, 1208, 1216, 1218, 1221, 1223,
- 1230, 1241, 1263, 192, 193, 200, 212, 222, 226, 233,
- 248, 263, 265, 272, 285, 296, 304, 305, 308, 314,
- 364, 370, 371, 372, 373, 392, 393, 394, 397, 400,
- 401, 404, 406, 407, 410, 414, 418, 419, 420, 421,
- 423, 425, 435, 440, 454, 455, 456, 457, 458, 461,
- 462, 467, 468, 469, 470, 471, 479, 480, 484, 507,
- 509, 521, 539, 544, 460, 287, 288, 426, 427, 300,
- 301, 556, 557, 286, 516, 545, 0, 0, 362, 1196,
- 1202, 365, 268, 291, 306, 1211, 531, 481, 217, 446,
- 277, 240, 1229, 1231, 202, 236, 220, 246, 261, 264,
- 310, 375, 383, 412, 417, 283, 258, 234, 439, 231,
- 464, 487, 488, 489, 491, 379, 253, 416, 1192, 1220,
- 360, 497, 498, 302, 380, 0, 0, 0, 1249, 1234,
- 496, 0, 1177, 1252, 1146, 1165, 1262, 1168, 1171, 1213,
- 1125, 1191, 399, 1162, 1118, 1150, 1120, 1157, 1121, 1148,
- 1179, 257, 1145, 1236, 1195, 1251, 350, 254, 1127, 1151,
- 413, 1167, 196, 1215, 466, 241, 361, 358, 504, 269,
- 260, 256, 239, 303, 369, 411, 486, 405, 1258, 354,
- 1201, 0, 476, 384, 0, 0, 0, 1181, 1240, 1189,
- 1227, 1176, 1214, 1135, 1200, 1253, 1163, 1210, 1254, 309,
- 237, 311, 195, 396, 477, 273, 0, 0, 0, 0,
- 0, 188, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 228, 0, 0, 235, 0, 0, 0, 335, 344,
- 343, 324, 325, 327, 329, 334, 341, 347, 1159, 1207,
- 1248, 1160, 1209, 252, 307, 259, 251, 501, 1259, 1239,
- 1124, 1188, 1247, 0, 0, 219, 1250, 1183, 0, 1212,
- 0, 1265, 1119, 1203, 0, 1122, 1126, 1261, 1243, 1154,
- 262, 0, 0, 0, 0, 0, 0, 0, 1180, 1190,
- 1224, 1228, 1174, 0, 0, 0, 0, 0, 2733, 0,
- 1152, 0, 1199, 0, 0, 0, 1131, 1123, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 1178, 0, 0, 0, 0, 1134, 0, 1153, 1225, 0,
- 1117, 284, 1128, 385, 244, 0, 1232, 1242, 1175, 541,
- 1246, 1173, 1172, 1219, 1132, 1238, 1166, 349, 1130, 316,
- 191, 215, 0, 1164, 395, 441, 453, 1237, 1149, 1158,
- 242, 1156, 451, 409, 520, 223, 271, 438, 415, 449,
- 422, 274, 1198, 1217, 450, 356, 506, 432, 517, 542,
- 543, 250, 389, 529, 490, 537, 558, 216, 247, 403,
- 483, 523, 473, 381, 502, 503, 315, 472, 282, 194,
- 353, 548, 214, 459, 355, 232, 221, 508, 526, 276,
- 436, 203, 485, 515, 229, 463, 0, 0, 560, 205,
- 513, 482, 377, 312, 313, 204, 0, 437, 255, 280,
- 245, 398, 510, 511, 243, 561, 218, 536, 210, 1129,
- 535, 391, 505, 514, 378, 367, 209, 512, 376, 366,
- 320, 339, 340, 267, 293, 429, 359, 430, 292, 294,
- 387, 386, 388, 198, 524, 0, 199, 0, 478, 525,
- 562, 224, 225, 227, 1144, 266, 270, 278, 281, 289,
- 290, 299, 351, 402, 428, 424, 433, 1233, 500, 518,
- 530, 540, 546, 547, 549, 550, 551, 552, 553, 555,
- 554, 390, 297, 474, 319, 357, 1222, 1264, 408, 452,
- 230, 522, 475, 1139, 1143, 1137, 1204, 1138, 1193, 1194,
- 1140, 1255, 1256, 1257, 563, 564, 565, 566, 567, 568,
- 569, 570, 571, 572, 573, 574, 575, 576, 577, 578,
- 579, 580, 0, 1226, 1133, 0, 1141, 1142, 1235, 1244,
- 1245, 581, 368, 465, 519, 321, 333, 336, 326, 345,
- 0, 346, 322, 323, 328, 330, 331, 332, 337, 338,
- 342, 348, 238, 201, 374, 382, 499, 298, 206, 207,
- 208, 492, 493, 494, 495, 533, 534, 538, 442, 443,
- 444, 445, 279, 528, 295, 448, 447, 317, 318, 363,
- 431, 1197, 190, 211, 352, 1260, 434, 275, 559, 532,
- 527, 197, 213, 1136, 249, 1147, 1155, 0, 1161, 1169,
- 1170, 1182, 1184, 1185, 1186, 1187, 1205, 1206, 1208, 1216,
- 1218, 1221, 1223, 1230, 1241, 1263, 192, 193, 200, 212,
- 222, 226, 233, 248, 263, 265, 272, 285, 296, 304,
- 305, 308, 314, 364, 370, 371, 372, 373, 392, 393,
- 394, 397, 400, 401, 404, 406, 407, 410, 414, 418,
- 419, 420, 421, 423, 425, 435, 440, 454, 455, 456,
- 457, 458, 461, 462, 467, 468, 469, 470, 471, 479,
- 480, 484, 507, 509, 521, 539, 544, 460, 287, 288,
- 426, 427, 300, 301, 556, 557, 286, 516, 545, 0,
- 0, 362, 1196, 1202, 365, 268, 291, 306, 1211, 531,
- 481, 217, 446, 277, 240, 1229, 1231, 202, 236, 220,
- 246, 261, 264, 310, 375, 383, 412, 417, 283, 258,
- 234, 439, 231, 464, 487, 488, 489, 491, 379, 253,
- 416, 1192, 1220, 360, 497, 498, 302, 380, 0, 0,
- 0, 1249, 1234, 496, 0, 1177, 1252, 1146, 1165, 1262,
- 1168, 1171, 1213, 1125, 1191, 399, 1162, 1118, 1150, 1120,
- 1157, 1121, 1148, 1179, 257, 1145, 1236, 1195, 1251, 350,
- 254, 1127, 1151, 413, 1167, 196, 1215, 466, 241, 361,
- 358, 504, 269, 260, 256, 239, 303, 369, 411, 486,
- 405, 1258, 354, 1201, 0, 476, 384, 0, 0, 0,
- 1181, 1240, 1189, 1227, 1176, 1214, 1135, 1200, 1253, 1163,
- 1210, 1254, 309, 237, 311, 195, 396, 477, 273, 0,
- 0, 0, 0, 0, 627, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 228, 0, 0, 235, 0, 0,
- 0, 335, 344, 343, 324, 325, 327, 329, 334, 341,
- 347, 1159, 1207, 1248, 1160, 1209, 252, 307, 259, 251,
- 501, 1259, 1239, 1124, 1188, 1247, 0, 0, 219, 1250,
- 1183, 0, 1212, 0, 1265, 1119, 1203, 0, 1122, 1126,
- 1261, 1243, 1154, 262, 0, 0, 0, 0, 0, 0,
- 0, 1180, 1190, 1224, 1228, 1174, 0, 0, 0, 0,
- 0, 2694, 0, 1152, 0, 1199, 0, 0, 0, 1131,
- 1123, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 1178, 0, 0, 0, 0, 1134, 0,
- 1153, 1225, 0, 1117, 284, 1128, 385, 244, 0, 1232,
- 1242, 1175, 541, 1246, 1173, 1172, 1219, 1132, 1238, 1166,
- 349, 1130, 316, 191, 215, 0, 1164, 395, 441, 453,
- 1237, 1149, 1158, 242, 1156, 451, 409, 520, 223, 271,
- 438, 415, 449, 422, 274, 1198, 1217, 450, 356, 506,
- 432, 517, 542, 543, 250, 389, 529, 490, 537, 558,
- 216, 247, 403, 483, 523, 473, 381, 502, 503, 315,
- 472, 282, 194, 353, 548, 214, 459, 355, 232, 221,
- 508, 526, 276, 436, 203, 485, 515, 229, 463, 0,
- 0, 560, 205, 513, 482, 377, 312, 313, 204, 0,
- 437, 255, 280, 245, 398, 510, 511, 243, 561, 218,
- 536, 210, 1129, 535, 391, 505, 514, 378, 367, 209,
- 512, 376, 366, 320, 339, 340, 267, 293, 429, 359,
- 430, 292, 294, 387, 386, 388, 198, 524, 0, 199,
- 0, 478, 525, 562, 224, 225, 227, 1144, 266, 270,
- 278, 281, 289, 290, 299, 351, 402, 428, 424, 433,
- 1233, 500, 518, 530, 540, 546, 547, 549, 550, 551,
- 552, 553, 555, 554, 390, 297, 474, 319, 357, 1222,
- 1264, 408, 452, 230, 522, 475, 1139, 1143, 1137, 1204,
- 1138, 1193, 1194, 1140, 1255, 1256, 1257, 563, 564, 565,
- 566, 567, 568, 569, 570, 571, 572, 573, 574, 575,
- 576, 577, 578, 579, 580, 0, 1226, 1133, 0, 1141,
- 1142, 1235, 1244, 1245, 581, 368, 465, 519, 321, 333,
- 336, 326, 345, 0, 346, 322, 323, 328, 330, 331,
- 332, 337, 338, 342, 348, 238, 201, 374, 382, 499,
- 298, 206, 207, 208, 492, 493, 494, 495, 533, 534,
- 538, 442, 443, 444, 445, 279, 528, 295, 448, 447,
- 317, 318, 363, 431, 1197, 190, 211, 352, 1260, 434,
- 275, 559, 532, 527, 197, 213, 1136, 249, 1147, 1155,
- 0, 1161, 1169, 1170, 1182, 1184, 1185, 1186, 1187, 1205,
- 1206, 1208, 1216, 1218, 1221, 1223, 1230, 1241, 1263, 192,
- 193, 200, 212, 222, 226, 233, 248, 263, 265, 272,
- 285, 296, 304, 305, 308, 314, 364, 370, 371, 372,
- 373, 392, 393, 394, 397, 400, 401, 404, 406, 407,
- 410, 414, 418, 419, 420, 421, 423, 425, 435, 440,
- 454, 455, 456, 457, 458, 461, 462, 467, 468, 469,
- 470, 471, 479, 480, 484, 507, 509, 521, 539, 544,
- 460, 287, 288, 426, 427, 300, 301, 556, 557, 286,
- 516, 545, 0, 0, 362, 1196, 1202, 365, 268, 291,
- 306, 1211, 531, 481, 217, 446, 277, 240, 1229, 1231,
- 202, 236, 220, 246, 261, 264, 310, 375, 383, 412,
- 417, 283, 258, 234, 439, 231, 464, 487, 488, 489,
- 491, 379, 253, 416, 1192, 1220, 360, 497, 498, 302,
- 380, 0, 0, 0, 1249, 1234, 496, 0, 1177, 1252,
- 1146, 1165, 1262, 1168, 1171, 1213, 1125, 1191, 399, 1162,
- 1118, 1150, 1120, 1157, 1121, 1148, 1179, 257, 1145, 1236,
- 1195, 1251, 350, 254, 1127, 1151, 413, 1167, 196, 1215,
- 466, 241, 361, 358, 504, 269, 260, 256, 239, 303,
- 369, 411, 486, 405, 1258, 354, 1201, 0, 476, 384,
- 0, 0, 0, 1181, 1240, 1189, 1227, 1176, 1214, 1135,
- 1200, 1253, 1163, 1210, 1254, 309, 237, 311, 195, 396,
- 477, 273, 0, 0, 0, 0, 0, 800, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 228, 0, 0,
- 235, 0, 0, 0, 335, 344, 343, 324, 325, 327,
- 329, 334, 341, 347, 1159, 1207, 1248, 1160, 1209, 252,
- 307, 259, 251, 501, 1259, 1239, 1124, 1188, 1247, 0,
- 0, 219, 1250, 1183, 0, 1212, 0, 1265, 1119, 1203,
- 0, 1122, 1126, 1261, 1243, 1154, 262, 0, 0, 0,
- 0, 0, 0, 0, 1180, 1190, 1224, 1228, 1174, 0,
- 0, 0, 0, 0, 2048, 0, 1152, 0, 1199, 0,
- 0, 0, 1131, 1123, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 1178, 0, 0, 0,
- 0, 1134, 0, 1153, 1225, 0, 1117, 284, 1128, 385,
- 244, 0, 1232, 1242, 1175, 541, 1246, 1173, 1172, 1219,
- 1132, 1238, 1166, 349, 1130, 316, 191, 215, 0, 1164,
- 395, 441, 453, 1237, 1149, 1158, 242, 1156, 451, 409,
- 520, 223, 271, 438, 415, 449, 422, 274, 1198, 1217,
- 450, 356, 506, 432, 517, 542, 543, 250, 389, 529,
- 490, 537, 558, 216, 247, 403, 483, 523, 473, 381,
- 502, 503, 315, 472, 282, 194, 353, 548, 214, 459,
- 355, 232, 221, 508, 526, 276, 436, 203, 485, 515,
- 229, 463, 0, 0, 560, 205, 513, 482, 377, 312,
- 313, 204, 0, 437, 255, 280, 245, 398, 510, 511,
- 243, 561, 218, 536, 210, 1129, 535, 391, 505, 514,
- 378, 367, 209, 512, 376, 366, 320, 339, 340, 267,
- 293, 429, 359, 430, 292, 294, 387, 386, 388, 198,
- 524, 0, 199, 0, 478, 525, 562, 224, 225, 227,
- 1144, 266, 270, 278, 281, 289, 290, 299, 351, 402,
- 428, 424, 433, 1233, 500, 518, 530, 540, 546, 547,
- 549, 550, 551, 552, 553, 555, 554, 390, 297, 474,
- 319, 357, 1222, 1264, 408, 452, 230, 522, 475, 1139,
- 1143, 1137, 1204, 1138, 1193, 1194, 1140, 1255, 1256, 1257,
- 563, 564, 565, 566, 567, 568, 569, 570, 571, 572,
- 573, 574, 575, 576, 577, 578, 579, 580, 0, 1226,
- 1133, 0, 1141, 1142, 1235, 1244, 1245, 581, 368, 465,
- 519, 321, 333, 336, 326, 345, 0, 346, 322, 323,
- 328, 330, 331, 332, 337, 338, 342, 348, 238, 201,
- 374, 382, 499, 298, 206, 207, 208, 492, 493, 494,
- 495, 533, 534, 538, 442, 443, 444, 445, 279, 528,
- 295, 448, 447, 317, 318, 363, 431, 1197, 190, 211,
- 352, 1260, 434, 275, 559, 532, 527, 197, 213, 1136,
- 249, 1147, 1155, 0, 1161, 1169, 1170, 1182, 1184, 1185,
- 1186, 1187, 1205, 1206, 1208, 1216, 1218, 1221, 1223, 1230,
- 1241, 1263, 192, 193, 200, 212, 222, 226, 233, 248,
- 263, 265, 272, 285, 296, 304, 305, 308, 314, 364,
- 370, 371, 372, 373, 392, 393, 394, 397, 400, 401,
- 404, 406, 407, 410, 414, 418, 419, 420, 421, 423,
- 425, 435, 440, 454, 455, 456, 457, 458, 461, 462,
- 467, 468, 469, 470, 471, 479, 480, 484, 507, 509,
- 521, 539, 544, 460, 287, 288, 426, 427, 300, 301,
- 556, 557, 286, 516, 545, 0, 0, 362, 1196, 1202,
- 365, 268, 291, 306, 1211, 531, 481, 217, 446, 277,
- 240, 1229, 1231, 202, 236, 220, 246, 261, 264, 310,
- 375, 383, 412, 417, 283, 258, 234, 439, 231, 464,
- 487, 488, 489, 491, 379, 253, 416, 1192, 1220, 360,
- 497, 498, 302, 380, 0, 0, 0, 1249, 1234, 496,
- 0, 1177, 1252, 1146, 1165, 1262, 1168, 1171, 1213, 1125,
- 1191, 399, 1162, 1118, 1150, 1120, 1157, 1121, 1148, 1179,
- 257, 1145, 1236, 1195, 1251, 350, 254, 1127, 1151, 413,
- 1167, 196, 1215, 466, 241, 361, 358, 504, 269, 260,
- 256, 239, 303, 369, 411, 486, 405, 1258, 354, 1201,
- 0, 476, 384, 0, 0, 0, 1181, 1240, 1189, 1227,
- 1176, 1214, 1135, 1200, 1253, 1163, 1210, 1254, 309, 237,
- 311, 195, 396, 477, 273, 0, 89, 0, 0, 0,
- 627, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 228, 0, 0, 235, 0, 0, 0, 335, 344, 343,
- 324, 325, 327, 329, 334, 341, 347, 1159, 1207, 1248,
- 1160, 1209, 252, 307, 259, 251, 501, 1259, 1239, 1124,
- 1188, 1247, 0, 0, 219, 1250, 1183, 0, 1212, 0,
- 1265, 1119, 1203, 0, 1122, 1126, 1261, 1243, 1154, 262,
- 0, 0, 0, 0, 0, 0, 0, 1180, 1190, 1224,
- 1228, 1174, 0, 0, 0, 0, 0, 0, 0, 1152,
- 0, 1199, 0, 0, 0, 1131, 1123, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 1178,
- 0, 0, 0, 0, 1134, 0, 1153, 1225, 0, 1117,
- 284, 1128, 385, 244, 0, 1232, 1242, 1175, 541, 1246,
- 1173, 1172, 1219, 1132, 1238, 1166, 349, 1130, 316, 191,
- 215, 0, 1164, 395, 441, 453, 1237, 1149, 1158, 242,
- 1156, 451, 409, 520, 223, 271, 438, 415, 449, 422,
- 274, 1198, 1217, 450, 356, 506, 432, 517, 542, 543,
- 250, 389, 529, 490, 537, 558, 216, 247, 403, 483,
- 523, 473, 381, 502, 503, 315, 472, 282, 194, 353,
- 548, 214, 459, 355, 232, 221, 508, 526, 276, 436,
- 203, 485, 515, 229, 463, 0, 0, 560, 205, 513,
- 482, 377, 312, 313, 204, 0, 437, 255, 280, 245,
- 398, 510, 511, 243, 561, 218, 536, 210, 1129, 535,
- 391, 505, 514, 378, 367, 209, 512, 376, 366, 320,
- 339, 340, 267, 293, 429, 359, 430, 292, 294, 387,
- 386, 388, 198, 524, 0, 199, 0, 478, 525, 562,
- 224, 225, 227, 1144, 266, 270, 278, 281, 289, 290,
- 299, 351, 402, 428, 424, 433, 1233, 500, 518, 530,
- 540, 546, 547, 549, 550, 551, 552, 553, 555, 554,
- 390, 297, 474, 319, 357, 1222, 1264, 408, 452, 230,
- 522, 475, 1139, 1143, 1137, 1204, 1138, 1193, 1194, 1140,
- 1255, 1256, 1257, 563, 564, 565, 566, 567, 568, 569,
- 570, 571, 572, 573, 574, 575, 576, 577, 578, 579,
- 580, 0, 1226, 1133, 0, 1141, 1142, 1235, 1244, 1245,
- 581, 368, 465, 519, 321, 333, 336, 326, 345, 0,
- 346, 322, 323, 328, 330, 331, 332, 337, 338, 342,
- 348, 238, 201, 374, 382, 499, 298, 206, 207, 208,
- 492, 493, 494, 495, 533, 534, 538, 442, 443, 444,
- 445, 279, 528, 295, 448, 447, 317, 318, 363, 431,
- 1197, 190, 211, 352, 1260, 434, 275, 559, 532, 527,
- 197, 213, 1136, 249, 1147, 1155, 0, 1161, 1169, 1170,
- 1182, 1184, 1185, 1186, 1187, 1205, 1206, 1208, 1216, 1218,
- 1221, 1223, 1230, 1241, 1263, 192, 193, 200, 212, 222,
- 226, 233, 248, 263, 265, 272, 285, 296, 304, 305,
- 308, 314, 364, 370, 371, 372, 373, 392, 393, 394,
- 397, 400, 401, 404, 406, 407, 410, 414, 418, 419,
- 420, 421, 423, 425, 435, 440, 454, 455, 456, 457,
- 458, 461, 462, 467, 468, 469, 470, 471, 479, 480,
- 484, 507, 509, 521, 539, 544, 460, 287, 288, 426,
- 427, 300, 301, 556, 557, 286, 516, 545, 0, 0,
- 362, 1196, 1202, 365, 268, 291, 306, 1211, 531, 481,
- 217, 446, 277, 240, 1229, 1231, 202, 236, 220, 246,
- 261, 264, 310, 375, 383, 412, 417, 283, 258, 234,
- 439, 231, 464, 487, 488, 489, 491, 379, 253, 416,
- 1192, 1220, 360, 497, 498, 302, 380, 0, 0, 0,
- 1249, 1234, 496, 0, 1177, 1252, 1146, 1165, 1262, 1168,
- 1171, 1213, 1125, 1191, 399, 1162, 1118, 1150, 1120, 1157,
- 1121, 1148, 1179, 257, 1145, 1236, 1195, 1251, 350, 254,
- 1127, 1151, 413, 1167, 196, 1215, 466, 241, 361, 358,
- 504, 269, 260, 256, 239, 303, 369, 411, 486, 405,
- 1258, 354, 1201, 0, 476, 384, 0, 0, 0, 1181,
- 1240, 1189, 1227, 1176, 1214, 1135, 1200, 1253, 1163, 1210,
- 1254, 309, 237, 311, 195, 396, 477, 273, 0, 0,
- 0, 0, 0, 627, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 228, 0, 0, 235, 0, 0, 0,
- 335, 344, 343, 324, 325, 327, 329, 334, 341, 347,
- 1159, 1207, 1248, 1160, 1209, 252, 307, 259, 251, 501,
- 1259, 1239, 1124, 1188, 1247, 0, 0, 219, 1250, 1183,
- 0, 1212, 0, 1265, 1119, 1203, 0, 1122, 1126, 1261,
- 1243, 1154, 262, 0, 0, 0, 0, 0, 0, 0,
- 1180, 1190, 1224, 1228, 1174, 0, 0, 0, 0, 0,
- 0, 0, 1152, 0, 1199, 0, 0, 0, 1131, 1123,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 1178, 0, 0, 0, 0, 1134, 0, 1153,
- 1225, 0, 1117, 284, 1128, 385, 244, 0, 1232, 1242,
- 1175, 541, 1246, 1173, 1172, 1219, 1132, 1238, 1166, 349,
- 1130, 316, 191, 215, 0, 1164, 395, 441, 453, 1237,
- 1149, 1158, 242, 1156, 451, 409, 520, 223, 271, 438,
- 415, 449, 422, 274, 1198, 1217, 450, 356, 506, 432,
- 517, 542, 543, 250, 389, 529, 490, 537, 558, 216,
- 247, 403, 483, 523, 473, 381, 502, 503, 315, 472,
- 282, 194, 353, 548, 214, 459, 355, 232, 221, 508,
- 526, 276, 436, 203, 485, 515, 229, 463, 0, 0,
- 560, 205, 513, 482, 377, 312, 313, 204, 0, 437,
- 255, 280, 245, 398, 510, 511, 243, 561, 218, 536,
- 210, 1129, 535, 391, 505, 514, 378, 367, 209, 512,
- 376, 366, 320, 339, 340, 267, 293, 429, 359, 430,
- 292, 294, 387, 386, 388, 198, 524, 0, 199, 0,
- 478, 525, 562, 224, 225, 227, 1144, 266, 270, 278,
- 281, 289, 290, 299, 351, 402, 428, 424, 433, 1233,
- 500, 518, 530, 540, 546, 547, 549, 550, 551, 552,
- 553, 555, 554, 390, 297, 474, 319, 357, 1222, 1264,
- 408, 452, 230, 522, 475, 1139, 1143, 1137, 1204, 1138,
- 1193, 1194, 1140, 1255, 1256, 1257, 563, 564, 565, 566,
- 567, 568, 569, 570, 571, 572, 573, 574, 575, 576,
- 577, 578, 579, 580, 0, 1226, 1133, 0, 1141, 1142,
- 1235, 1244, 1245, 581, 368, 465, 519, 321, 333, 336,
- 326, 345, 0, 346, 322, 323, 328, 330, 331, 332,
- 337, 338, 342, 348, 238, 201, 374, 382, 499, 298,
- 206, 207, 208, 492, 493, 494, 495, 533, 534, 538,
- 442, 443, 444, 445, 279, 528, 295, 448, 447, 317,
- 318, 363, 431, 1197, 190, 211, 352, 1260, 434, 275,
- 559, 532, 527, 197, 213, 1136, 249, 1147, 1155, 0,
- 1161, 1169, 1170, 1182, 1184, 1185, 1186, 1187, 1205, 1206,
- 1208, 1216, 1218, 1221, 1223, 1230, 1241, 1263, 192, 193,
- 200, 212, 222, 226, 233, 248, 263, 265, 272, 285,
- 296, 304, 305, 308, 314, 364, 370, 371, 372, 373,
- 392, 393, 394, 397, 400, 401, 404, 406, 407, 410,
- 414, 418, 419, 420, 421, 423, 425, 435, 440, 454,
- 455, 456, 457, 458, 461, 462, 467, 468, 469, 470,
- 471, 479, 480, 484, 507, 509, 521, 539, 544, 460,
- 287, 288, 426, 427, 300, 301, 556, 557, 286, 516,
- 545, 0, 0, 362, 1196, 1202, 365, 268, 291, 306,
- 1211, 531, 481, 217, 446, 277, 240, 1229, 1231, 202,
- 236, 220, 246, 261, 264, 310, 375, 383, 412, 417,
- 283, 258, 234, 439, 231, 464, 487, 488, 489, 491,
- 379, 253, 416, 1192, 1220, 360, 497, 498, 302, 380,
- 0, 0, 0, 1249, 1234, 496, 0, 1177, 1252, 1146,
- 1165, 1262, 1168, 1171, 1213, 1125, 1191, 399, 1162, 1118,
- 1150, 1120, 1157, 1121, 1148, 1179, 257, 1145, 1236, 1195,
- 1251, 350, 254, 1127, 1151, 413, 1167, 196, 1215, 466,
- 241, 361, 358, 504, 269, 260, 256, 239, 303, 369,
- 411, 486, 405, 1258, 354, 1201, 0, 476, 384, 0,
- 0, 0, 1181, 1240, 1189, 1227, 1176, 1214, 1135, 1200,
- 1253, 1163, 1210, 1254, 309, 237, 311, 195, 396, 477,
- 273, 0, 0, 0, 0, 0, 800, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 228, 0, 0, 235,
- 0, 0, 0, 335, 344, 343, 324, 325, 327, 329,
- 334, 341, 347, 1159, 1207, 1248, 1160, 1209, 252, 307,
- 259, 251, 501, 1259, 1239, 1124, 1188, 1247, 0, 0,
- 219, 1250, 1183, 0, 1212, 0, 1265, 1119, 1203, 0,
- 1122, 1126, 1261, 1243, 1154, 262, 0, 0, 0, 0,
- 0, 0, 0, 1180, 1190, 1224, 1228, 1174, 0, 0,
- 0, 0, 0, 0, 0, 1152, 0, 1199, 0, 0,
- 0, 1131, 1123, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 1178, 0, 0, 0, 0,
- 1134, 0, 1153, 1225, 0, 1117, 284, 1128, 385, 244,
- 0, 1232, 1242, 1175, 541, 1246, 1173, 1172, 1219, 1132,
- 1238, 1166, 349, 1130, 316, 191, 215, 0, 1164, 395,
- 441, 453, 1237, 1149, 1158, 242, 1156, 451, 409, 520,
- 223, 271, 438, 415, 449, 422, 274, 1198, 1217, 450,
- 356, 506, 432, 517, 542, 543, 250, 389, 529, 490,
- 537, 558, 216, 247, 403, 483, 523, 473, 381, 502,
- 503, 315, 472, 282, 194, 353, 548, 214, 459, 355,
- 232, 221, 508, 526, 276, 436, 203, 485, 515, 229,
- 463, 0, 0, 560, 205, 513, 482, 377, 312, 313,
- 204, 0, 437, 255, 280, 245, 398, 510, 511, 243,
- 561, 218, 536, 210, 1129, 535, 391, 505, 514, 378,
- 367, 209, 512, 376, 366, 320, 339, 340, 267, 293,
- 429, 359, 430, 292, 294, 387, 386, 388, 198, 524,
- 0, 199, 0, 478, 525, 562, 224, 225, 227, 1144,
- 266, 270, 278, 281, 289, 290, 299, 351, 402, 428,
- 424, 433, 1233, 500, 518, 530, 540, 546, 547, 549,
- 550, 551, 552, 553, 555, 554, 390, 297, 474, 319,
- 357, 1222, 1264, 408, 452, 230, 522, 475, 1139, 1143,
- 1137, 1204, 1138, 1193, 1194, 1140, 1255, 1256, 1257, 563,
- 564, 565, 566, 567, 568, 569, 570, 571, 572, 573,
- 574, 575, 576, 577, 578, 579, 580, 0, 1226, 1133,
- 0, 1141, 1142, 1235, 1244, 1245, 581, 368, 465, 519,
- 321, 333, 336, 326, 345, 0, 346, 322, 323, 328,
- 330, 331, 332, 337, 338, 342, 348, 238, 201, 374,
- 382, 499, 298, 206, 207, 208, 492, 493, 494, 495,
- 533, 534, 538, 442, 443, 444, 445, 279, 528, 295,
- 448, 447, 317, 318, 363, 431, 1197, 190, 211, 352,
- 1260, 434, 275, 559, 532, 527, 197, 213, 1136, 249,
- 1147, 1155, 0, 1161, 1169, 1170, 1182, 1184, 1185, 1186,
- 1187, 1205, 1206, 1208, 1216, 1218, 1221, 1223, 1230, 1241,
- 1263, 192, 193, 200, 212, 222, 226, 233, 248, 263,
- 265, 272, 285, 296, 304, 305, 308, 314, 364, 370,
- 371, 372, 373, 392, 393, 394, 397, 400, 401, 404,
- 406, 407, 410, 414, 418, 419, 420, 421, 423, 425,
- 435, 440, 454, 455, 456, 457, 458, 461, 462, 467,
- 468, 469, 470, 471, 479, 480, 484, 507, 509, 521,
- 539, 544, 460, 287, 288, 426, 427, 300, 301, 556,
- 557, 286, 516, 545, 0, 0, 362, 1196, 1202, 365,
- 268, 291, 306, 1211, 531, 481, 217, 446, 277, 240,
- 1229, 1231, 202, 236, 220, 246, 261, 264, 310, 375,
- 383, 412, 417, 283, 258, 234, 439, 231, 464, 487,
- 488, 489, 491, 379, 253, 416, 1192, 1220, 360, 497,
- 498, 302, 380, 0, 0, 0, 1249, 1234, 496, 0,
- 1177, 1252, 1146, 1165, 1262, 1168, 1171, 1213, 1125, 1191,
- 399, 1162, 1118, 1150, 1120, 1157, 1121, 1148, 1179, 257,
- 1145, 1236, 1195, 1251, 350, 254, 1127, 1151, 413, 1167,
- 196, 1215, 466, 241, 361, 358, 504, 269, 260, 256,
- 239, 303, 369, 411, 486, 405, 1258, 354, 1201, 0,
- 476, 384, 0, 0, 0, 1181, 1240, 1189, 1227, 1176,
- 1214, 1135, 1200, 1253, 1163, 1210, 1254, 309, 237, 311,
- 195, 396, 477, 273, 0, 0, 0, 0, 0, 188,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 228,
- 0, 0, 235, 0, 0, 0, 335, 344, 343, 324,
- 325, 327, 329, 334, 341, 347, 1159, 1207, 1248, 1160,
- 1209, 252, 307, 259, 251, 501, 1259, 1239, 1124, 1188,
- 1247, 0, 0, 219, 1250, 1183, 0, 1212, 0, 1265,
- 1119, 1203, 0, 1122, 1126, 1261, 1243, 1154, 262, 0,
- 0, 0, 0, 0, 0, 0, 1180, 1190, 1224, 1228,
- 1174, 0, 0, 0, 0, 0, 0, 0, 1152, 0,
- 1199, 0, 0, 0, 1131, 1123, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 1178, 0,
- 0, 0, 0, 1134, 0, 1153, 1225, 0, 1117, 284,
- 1128, 385, 244, 0, 1232, 1242, 1175, 541, 1246, 1173,
- 1172, 1219, 1132, 1238, 1166, 349, 1130, 316, 191, 215,
- 0, 1164, 395, 441, 453, 1237, 1149, 1158, 242, 1156,
- 451, 409, 520, 223, 271, 438, 415, 449, 422, 274,
- 1198, 1217, 450, 356, 506, 432, 517, 542, 543, 250,
- 389, 529, 490, 537, 558, 216, 247, 403, 483, 523,
- 473, 381, 502, 503, 315, 472, 282, 194, 353, 548,
- 214, 459, 355, 232, 221, 508, 526, 276, 436, 203,
- 485, 515, 229, 463, 0, 0, 560, 205, 513, 482,
- 377, 312, 313, 204, 0, 437, 255, 280, 245, 398,
- 510, 511, 243, 561, 218, 536, 210, 1129, 535, 391,
- 505, 514, 378, 367, 209, 512, 376, 366, 320, 339,
- 340, 267, 293, 429, 359, 430, 292, 294, 387, 386,
- 388, 198, 524, 0, 199, 0, 478, 525, 562, 224,
- 225, 227, 1144, 266, 270, 278, 281, 289, 290, 299,
- 351, 402, 428, 424, 433, 1233, 500, 518, 530, 540,
- 546, 547, 549, 550, 551, 552, 553, 555, 554, 390,
- 297, 474, 319, 357, 1222, 1264, 408, 452, 230, 522,
- 475, 1139, 1143, 1137, 1204, 1138, 1193, 1194, 1140, 1255,
- 1256, 1257, 563, 564, 565, 566, 567, 568, 569, 570,
- 571, 572, 573, 574, 575, 576, 577, 578, 579, 580,
- 0, 1226, 1133, 0, 1141, 1142, 1235, 1244, 1245, 581,
- 368, 465, 519, 321, 333, 336, 326, 345, 0, 346,
- 322, 323, 328, 330, 331, 332, 337, 338, 342, 348,
- 238, 201, 374, 382, 499, 298, 206, 207, 208, 492,
- 493, 494, 495, 533, 534, 538, 442, 443, 444, 445,
- 279, 528, 295, 448, 447, 317, 318, 363, 431, 1197,
- 190, 211, 352, 1260, 434, 275, 559, 532, 527, 197,
- 213, 1136, 249, 1147, 1155, 0, 1161, 1169, 1170, 1182,
- 1184, 1185, 1186, 1187, 1205, 1206, 1208, 1216, 1218, 1221,
- 1223, 1230, 1241, 1263, 192, 193, 200, 212, 222, 226,
- 233, 248, 263, 265, 272, 285, 296, 304, 305, 308,
- 314, 364, 370, 371, 372, 373, 392, 393, 394, 397,
- 400, 401, 404, 406, 407, 410, 414, 418, 419, 420,
- 421, 423, 425, 435, 440, 454, 455, 456, 457, 458,
- 461, 462, 467, 468, 469, 470, 471, 479, 480, 484,
- 507, 509, 521, 539, 544, 460, 287, 288, 426, 427,
- 300, 301, 556, 557, 286, 516, 545, 0, 0, 362,
- 1196, 1202, 365, 268, 291, 306, 1211, 531, 481, 217,
- 446, 277, 240, 1229, 1231, 202, 236, 220, 246, 261,
- 264, 310, 375, 383, 412, 417, 283, 258, 234, 439,
- 231, 464, 487, 488, 489, 491, 379, 253, 416, 1192,
- 1220, 360, 497, 498, 302, 380, 0, 0, 0, 0,
- 0, 496, 0, 679, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 399, 0, 0, 0, 0, 666, 0,
- 0, 0, 257, 671, 0, 0, 0, 350, 254, 0,
- 0, 413, 0, 196, 0, 466, 241, 361, 358, 504,
- 269, 260, 256, 239, 303, 369, 411, 486, 405, 678,
- 354, 0, 0, 476, 384, 0, 0, 0, 0, 0,
- 674, 675, 0, 0, 0, 0, 0, 0, 0, 0,
- 309, 237, 311, 195, 396, 477, 273, 0, 89, 0,
- 0, 816, 800, 766, 767, 804, 817, 818, 819, 820,
- 805, 0, 228, 806, 807, 235, 808, 0, 765, 706,
- 708, 707, 725, 726, 727, 728, 729, 730, 731, 704,
- 813, 821, 822, 0, 252, 307, 259, 251, 501, 0,
- 0, 1927, 1928, 1929, 0, 0, 219, 0, 0, 0,
- 0, 0, 0, 0, 648, 663, 0, 677, 0, 0,
- 0, 262, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 660, 661, 0,
- 0, 0, 0, 760, 0, 662, 0, 0, 670, 823,
- 824, 825, 826, 827, 828, 829, 830, 831, 832, 833,
- 834, 835, 836, 837, 838, 839, 840, 841, 842, 843,
+ 0, 0, 0, 0, 0, 1192, 0, 0, 0, 0,
+ 1148, 0, 1167, 1239, 0, 1131, 287, 1142, 388, 247,
+ 0, 438, 1246, 1256, 1189, 549, 1260, 1187, 1186, 1233,
+ 1146, 1252, 1180, 352, 1144, 319, 193, 217, 0, 1178,
+ 398, 446, 458, 1251, 1163, 1172, 245, 1170, 456, 412,
+ 527, 225, 274, 443, 418, 454, 426, 277, 1212, 1231,
+ 455, 359, 512, 436, 524, 550, 551, 253, 392, 536,
+ 496, 544, 568, 218, 250, 406, 489, 530, 478, 384,
+ 508, 509, 318, 477, 285, 196, 356, 556, 216, 464,
+ 358, 234, 223, 514, 533, 279, 441, 563, 205, 491,
+ 522, 231, 468, 0, 0, 570, 239, 488, 207, 519,
+ 487, 380, 315, 316, 206, 0, 442, 258, 283, 0,
+ 0, 248, 401, 516, 517, 246, 571, 220, 543, 212,
+ 1143, 542, 394, 511, 520, 381, 370, 211, 518, 379,
+ 369, 323, 342, 343, 270, 296, 433, 362, 434, 295,
+ 297, 390, 389, 391, 200, 531, 0, 201, 0, 483,
+ 532, 572, 226, 227, 229, 1158, 269, 273, 281, 284,
+ 292, 293, 302, 354, 405, 432, 428, 437, 1247, 506,
+ 525, 537, 548, 554, 555, 557, 558, 559, 560, 561,
+ 564, 562, 393, 300, 479, 322, 360, 1236, 1278, 411,
+ 457, 232, 529, 480, 1153, 1157, 1151, 1218, 1152, 1207,
+ 1208, 1154, 1269, 1270, 1271, 573, 574, 575, 576, 577,
+ 578, 579, 580, 581, 582, 583, 584, 585, 586, 587,
+ 588, 589, 590, 0, 1240, 1147, 0, 1155, 1156, 1249,
+ 1258, 1259, 591, 371, 470, 526, 324, 336, 339, 329,
+ 348, 0, 349, 325, 326, 331, 333, 334, 335, 340,
+ 341, 345, 351, 241, 203, 377, 385, 505, 301, 208,
+ 209, 210, 498, 499, 500, 501, 540, 541, 545, 447,
+ 448, 449, 450, 282, 535, 298, 453, 452, 320, 321,
+ 366, 435, 1211, 192, 213, 355, 1274, 439, 278, 569,
+ 539, 534, 199, 215, 1150, 252, 1161, 1169, 0, 1175,
+ 1183, 1184, 1196, 1198, 1199, 1200, 1201, 1219, 1220, 1222,
+ 1230, 1232, 1235, 1237, 1244, 1255, 1277, 194, 195, 202,
+ 214, 224, 228, 235, 251, 266, 268, 275, 288, 299,
+ 307, 308, 311, 317, 367, 373, 374, 375, 376, 395,
+ 396, 397, 400, 403, 404, 407, 409, 410, 413, 417,
+ 421, 422, 423, 425, 427, 429, 440, 445, 459, 460,
+ 461, 462, 463, 466, 467, 472, 473, 474, 475, 476,
+ 484, 485, 490, 513, 515, 528, 546, 552, 465, 290,
+ 291, 430, 431, 303, 304, 566, 567, 289, 523, 553,
+ 521, 565, 547, 424, 365, 1210, 1216, 368, 271, 294,
+ 309, 1225, 538, 486, 219, 451, 280, 243, 1243, 1245,
+ 204, 238, 222, 249, 264, 267, 313, 378, 386, 415,
+ 420, 286, 261, 236, 444, 233, 469, 493, 494, 495,
+ 497, 382, 256, 419, 1206, 1234, 363, 503, 504, 305,
+ 383, 0, 0, 0, 1263, 1248, 502, 0, 1191, 1266,
+ 1160, 1179, 1276, 1182, 1185, 1227, 1139, 1205, 402, 1176,
+ 1132, 1164, 1134, 1171, 1135, 1162, 1193, 260, 1159, 1250,
+ 1209, 1265, 353, 257, 1141, 1165, 416, 1181, 198, 1229,
+ 471, 244, 364, 361, 510, 272, 263, 259, 242, 306,
+ 372, 414, 492, 408, 1272, 357, 1215, 0, 481, 387,
+ 0, 0, 0, 1195, 1254, 1203, 1241, 1190, 1228, 1149,
+ 1214, 1267, 1177, 1224, 1268, 312, 240, 314, 197, 399,
+ 482, 276, 0, 0, 0, 0, 0, 190, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 230, 0, 0,
+ 237, 0, 0, 0, 338, 347, 346, 327, 328, 330,
+ 332, 337, 344, 350, 1173, 1221, 1262, 1174, 1223, 255,
+ 310, 262, 254, 507, 1273, 1253, 1138, 1202, 1261, 0,
+ 0, 221, 1264, 1197, 0, 1226, 0, 1279, 1133, 1217,
+ 0, 1136, 1140, 1275, 1257, 1168, 265, 0, 0, 0,
+ 0, 0, 0, 0, 1194, 1204, 1238, 1242, 1188, 0,
+ 0, 0, 0, 0, 2776, 0, 1166, 0, 1213, 0,
+ 0, 0, 1145, 1137, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 1192, 0, 0, 0,
+ 0, 1148, 0, 1167, 1239, 0, 1131, 287, 1142, 388,
+ 247, 0, 438, 1246, 1256, 1189, 549, 1260, 1187, 1186,
+ 1233, 1146, 1252, 1180, 352, 1144, 319, 193, 217, 0,
+ 1178, 398, 446, 458, 1251, 1163, 1172, 245, 1170, 456,
+ 412, 527, 225, 274, 443, 418, 454, 426, 277, 1212,
+ 1231, 455, 359, 512, 436, 524, 550, 551, 253, 392,
+ 536, 496, 544, 568, 218, 250, 406, 489, 530, 478,
+ 384, 508, 509, 318, 477, 285, 196, 356, 556, 216,
+ 464, 358, 234, 223, 514, 533, 279, 441, 563, 205,
+ 491, 522, 231, 468, 0, 0, 570, 239, 488, 207,
+ 519, 487, 380, 315, 316, 206, 0, 442, 258, 283,
+ 0, 0, 248, 401, 516, 517, 246, 571, 220, 543,
+ 212, 1143, 542, 394, 511, 520, 381, 370, 211, 518,
+ 379, 369, 323, 342, 343, 270, 296, 433, 362, 434,
+ 295, 297, 390, 389, 391, 200, 531, 0, 201, 0,
+ 483, 532, 572, 226, 227, 229, 1158, 269, 273, 281,
+ 284, 292, 293, 302, 354, 405, 432, 428, 437, 1247,
+ 506, 525, 537, 548, 554, 555, 557, 558, 559, 560,
+ 561, 564, 562, 393, 300, 479, 322, 360, 1236, 1278,
+ 411, 457, 232, 529, 480, 1153, 1157, 1151, 1218, 1152,
+ 1207, 1208, 1154, 1269, 1270, 1271, 573, 574, 575, 576,
+ 577, 578, 579, 580, 581, 582, 583, 584, 585, 586,
+ 587, 588, 589, 590, 0, 1240, 1147, 0, 1155, 1156,
+ 1249, 1258, 1259, 591, 371, 470, 526, 324, 336, 339,
+ 329, 348, 0, 349, 325, 326, 331, 333, 334, 335,
+ 340, 341, 345, 351, 241, 203, 377, 385, 505, 301,
+ 208, 209, 210, 498, 499, 500, 501, 540, 541, 545,
+ 447, 448, 449, 450, 282, 535, 298, 453, 452, 320,
+ 321, 366, 435, 1211, 192, 213, 355, 1274, 439, 278,
+ 569, 539, 534, 199, 215, 1150, 252, 1161, 1169, 0,
+ 1175, 1183, 1184, 1196, 1198, 1199, 1200, 1201, 1219, 1220,
+ 1222, 1230, 1232, 1235, 1237, 1244, 1255, 1277, 194, 195,
+ 202, 214, 224, 228, 235, 251, 266, 268, 275, 288,
+ 299, 307, 308, 311, 317, 367, 373, 374, 375, 376,
+ 395, 396, 397, 400, 403, 404, 407, 409, 410, 413,
+ 417, 421, 422, 423, 425, 427, 429, 440, 445, 459,
+ 460, 461, 462, 463, 466, 467, 472, 473, 474, 475,
+ 476, 484, 485, 490, 513, 515, 528, 546, 552, 465,
+ 290, 291, 430, 431, 303, 304, 566, 567, 289, 523,
+ 553, 521, 565, 547, 424, 365, 1210, 1216, 368, 271,
+ 294, 309, 1225, 538, 486, 219, 451, 280, 243, 1243,
+ 1245, 204, 238, 222, 249, 264, 267, 313, 378, 386,
+ 415, 420, 286, 261, 236, 444, 233, 469, 493, 494,
+ 495, 497, 382, 256, 419, 1206, 1234, 363, 503, 504,
+ 305, 383, 0, 0, 0, 1263, 1248, 502, 0, 1191,
+ 1266, 1160, 1179, 1276, 1182, 1185, 1227, 1139, 1205, 402,
+ 1176, 1132, 1164, 1134, 1171, 1135, 1162, 1193, 260, 1159,
+ 1250, 1209, 1265, 353, 257, 1141, 1165, 416, 1181, 198,
+ 1229, 471, 244, 364, 361, 510, 272, 263, 259, 242,
+ 306, 372, 414, 492, 408, 1272, 357, 1215, 0, 481,
+ 387, 0, 0, 0, 1195, 1254, 1203, 1241, 1190, 1228,
+ 1149, 1214, 1267, 1177, 1224, 1268, 312, 240, 314, 197,
+ 399, 482, 276, 0, 0, 0, 0, 0, 638, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 230, 0,
+ 0, 237, 0, 0, 0, 338, 347, 346, 327, 328,
+ 330, 332, 337, 344, 350, 1173, 1221, 1262, 1174, 1223,
+ 255, 310, 262, 254, 507, 1273, 1253, 1138, 1202, 1261,
+ 0, 0, 221, 1264, 1197, 0, 1226, 0, 1279, 1133,
+ 1217, 0, 1136, 1140, 1275, 1257, 1168, 265, 0, 0,
+ 0, 0, 0, 0, 0, 1194, 1204, 1238, 1242, 1188,
+ 0, 0, 0, 0, 0, 2737, 0, 1166, 0, 1213,
+ 0, 0, 0, 1145, 1137, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 1192, 0, 0,
+ 0, 0, 1148, 0, 1167, 1239, 0, 1131, 287, 1142,
+ 388, 247, 0, 438, 1246, 1256, 1189, 549, 1260, 1187,
+ 1186, 1233, 1146, 1252, 1180, 352, 1144, 319, 193, 217,
+ 0, 1178, 398, 446, 458, 1251, 1163, 1172, 245, 1170,
+ 456, 412, 527, 225, 274, 443, 418, 454, 426, 277,
+ 1212, 1231, 455, 359, 512, 436, 524, 550, 551, 253,
+ 392, 536, 496, 544, 568, 218, 250, 406, 489, 530,
+ 478, 384, 508, 509, 318, 477, 285, 196, 356, 556,
+ 216, 464, 358, 234, 223, 514, 533, 279, 441, 563,
+ 205, 491, 522, 231, 468, 0, 0, 570, 239, 488,
+ 207, 519, 487, 380, 315, 316, 206, 0, 442, 258,
+ 283, 0, 0, 248, 401, 516, 517, 246, 571, 220,
+ 543, 212, 1143, 542, 394, 511, 520, 381, 370, 211,
+ 518, 379, 369, 323, 342, 343, 270, 296, 433, 362,
+ 434, 295, 297, 390, 389, 391, 200, 531, 0, 201,
+ 0, 483, 532, 572, 226, 227, 229, 1158, 269, 273,
+ 281, 284, 292, 293, 302, 354, 405, 432, 428, 437,
+ 1247, 506, 525, 537, 548, 554, 555, 557, 558, 559,
+ 560, 561, 564, 562, 393, 300, 479, 322, 360, 1236,
+ 1278, 411, 457, 232, 529, 480, 1153, 1157, 1151, 1218,
+ 1152, 1207, 1208, 1154, 1269, 1270, 1271, 573, 574, 575,
+ 576, 577, 578, 579, 580, 581, 582, 583, 584, 585,
+ 586, 587, 588, 589, 590, 0, 1240, 1147, 0, 1155,
+ 1156, 1249, 1258, 1259, 591, 371, 470, 526, 324, 336,
+ 339, 329, 348, 0, 349, 325, 326, 331, 333, 334,
+ 335, 340, 341, 345, 351, 241, 203, 377, 385, 505,
+ 301, 208, 209, 210, 498, 499, 500, 501, 540, 541,
+ 545, 447, 448, 449, 450, 282, 535, 298, 453, 452,
+ 320, 321, 366, 435, 1211, 192, 213, 355, 1274, 439,
+ 278, 569, 539, 534, 199, 215, 1150, 252, 1161, 1169,
+ 0, 1175, 1183, 1184, 1196, 1198, 1199, 1200, 1201, 1219,
+ 1220, 1222, 1230, 1232, 1235, 1237, 1244, 1255, 1277, 194,
+ 195, 202, 214, 224, 228, 235, 251, 266, 268, 275,
+ 288, 299, 307, 308, 311, 317, 367, 373, 374, 375,
+ 376, 395, 396, 397, 400, 403, 404, 407, 409, 410,
+ 413, 417, 421, 422, 423, 425, 427, 429, 440, 445,
+ 459, 460, 461, 462, 463, 466, 467, 472, 473, 474,
+ 475, 476, 484, 485, 490, 513, 515, 528, 546, 552,
+ 465, 290, 291, 430, 431, 303, 304, 566, 567, 289,
+ 523, 553, 521, 565, 547, 424, 365, 1210, 1216, 368,
+ 271, 294, 309, 1225, 538, 486, 219, 451, 280, 243,
+ 1243, 1245, 204, 238, 222, 249, 264, 267, 313, 378,
+ 386, 415, 420, 286, 261, 236, 444, 233, 469, 493,
+ 494, 495, 497, 382, 256, 419, 1206, 1234, 363, 503,
+ 504, 305, 383, 0, 0, 0, 1263, 1248, 502, 0,
+ 1191, 1266, 1160, 1179, 1276, 1182, 1185, 1227, 1139, 1205,
+ 402, 1176, 1132, 1164, 1134, 1171, 1135, 1162, 1193, 260,
+ 1159, 1250, 1209, 1265, 353, 257, 1141, 1165, 416, 1181,
+ 198, 1229, 471, 244, 364, 361, 510, 272, 263, 259,
+ 242, 306, 372, 414, 492, 408, 1272, 357, 1215, 0,
+ 481, 387, 0, 0, 0, 1195, 1254, 1203, 1241, 1190,
+ 1228, 1149, 1214, 1267, 1177, 1224, 1268, 312, 240, 314,
+ 197, 399, 482, 276, 0, 0, 0, 0, 0, 812,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 230,
+ 0, 0, 237, 0, 0, 0, 338, 347, 346, 327,
+ 328, 330, 332, 337, 344, 350, 1173, 1221, 1262, 1174,
+ 1223, 255, 310, 262, 254, 507, 1273, 1253, 1138, 1202,
+ 1261, 0, 0, 221, 1264, 1197, 0, 1226, 0, 1279,
+ 1133, 1217, 0, 1136, 1140, 1275, 1257, 1168, 265, 0,
+ 0, 0, 0, 0, 0, 0, 1194, 1204, 1238, 1242,
+ 1188, 0, 0, 0, 0, 0, 2084, 0, 1166, 0,
+ 1213, 0, 0, 0, 1145, 1137, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 1192, 0,
+ 0, 0, 0, 1148, 0, 1167, 1239, 0, 1131, 287,
+ 1142, 388, 247, 0, 438, 1246, 1256, 1189, 549, 1260,
+ 1187, 1186, 1233, 1146, 1252, 1180, 352, 1144, 319, 193,
+ 217, 0, 1178, 398, 446, 458, 1251, 1163, 1172, 245,
+ 1170, 456, 412, 527, 225, 274, 443, 418, 454, 426,
+ 277, 1212, 1231, 455, 359, 512, 436, 524, 550, 551,
+ 253, 392, 536, 496, 544, 568, 218, 250, 406, 489,
+ 530, 478, 384, 508, 509, 318, 477, 285, 196, 356,
+ 556, 216, 464, 358, 234, 223, 514, 533, 279, 441,
+ 563, 205, 491, 522, 231, 468, 0, 0, 570, 239,
+ 488, 207, 519, 487, 380, 315, 316, 206, 0, 442,
+ 258, 283, 0, 0, 248, 401, 516, 517, 246, 571,
+ 220, 543, 212, 1143, 542, 394, 511, 520, 381, 370,
+ 211, 518, 379, 369, 323, 342, 343, 270, 296, 433,
+ 362, 434, 295, 297, 390, 389, 391, 200, 531, 0,
+ 201, 0, 483, 532, 572, 226, 227, 229, 1158, 269,
+ 273, 281, 284, 292, 293, 302, 354, 405, 432, 428,
+ 437, 1247, 506, 525, 537, 548, 554, 555, 557, 558,
+ 559, 560, 561, 564, 562, 393, 300, 479, 322, 360,
+ 1236, 1278, 411, 457, 232, 529, 480, 1153, 1157, 1151,
+ 1218, 1152, 1207, 1208, 1154, 1269, 1270, 1271, 573, 574,
+ 575, 576, 577, 578, 579, 580, 581, 582, 583, 584,
+ 585, 586, 587, 588, 589, 590, 0, 1240, 1147, 0,
+ 1155, 1156, 1249, 1258, 1259, 591, 371, 470, 526, 324,
+ 336, 339, 329, 348, 0, 349, 325, 326, 331, 333,
+ 334, 335, 340, 341, 345, 351, 241, 203, 377, 385,
+ 505, 301, 208, 209, 210, 498, 499, 500, 501, 540,
+ 541, 545, 447, 448, 449, 450, 282, 535, 298, 453,
+ 452, 320, 321, 366, 435, 1211, 192, 213, 355, 1274,
+ 439, 278, 569, 539, 534, 199, 215, 1150, 252, 1161,
+ 1169, 0, 1175, 1183, 1184, 1196, 1198, 1199, 1200, 1201,
+ 1219, 1220, 1222, 1230, 1232, 1235, 1237, 1244, 1255, 1277,
+ 194, 195, 202, 214, 224, 228, 235, 251, 266, 268,
+ 275, 288, 299, 307, 308, 311, 317, 367, 373, 374,
+ 375, 376, 395, 396, 397, 400, 403, 404, 407, 409,
+ 410, 413, 417, 421, 422, 423, 425, 427, 429, 440,
+ 445, 459, 460, 461, 462, 463, 466, 467, 472, 473,
+ 474, 475, 476, 484, 485, 490, 513, 515, 528, 546,
+ 552, 465, 290, 291, 430, 431, 303, 304, 566, 567,
+ 289, 523, 553, 521, 565, 547, 424, 365, 1210, 1216,
+ 368, 271, 294, 309, 1225, 538, 486, 219, 451, 280,
+ 243, 1243, 1245, 204, 238, 222, 249, 264, 267, 313,
+ 378, 386, 415, 420, 286, 261, 236, 444, 233, 469,
+ 493, 494, 495, 497, 382, 256, 419, 1206, 1234, 363,
+ 503, 504, 305, 383, 0, 0, 0, 1263, 1248, 502,
+ 0, 1191, 1266, 1160, 1179, 1276, 1182, 1185, 1227, 1139,
+ 1205, 402, 1176, 1132, 1164, 1134, 1171, 1135, 1162, 1193,
+ 260, 1159, 1250, 1209, 1265, 353, 257, 1141, 1165, 416,
+ 1181, 198, 1229, 471, 244, 364, 361, 510, 272, 263,
+ 259, 242, 306, 372, 414, 492, 408, 1272, 357, 1215,
+ 0, 481, 387, 0, 0, 0, 1195, 1254, 1203, 1241,
+ 1190, 1228, 1149, 1214, 1267, 1177, 1224, 1268, 312, 240,
+ 314, 197, 399, 482, 276, 0, 91, 0, 0, 0,
+ 638, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 230, 0, 0, 237, 0, 0, 0, 338, 347, 346,
+ 327, 328, 330, 332, 337, 344, 350, 1173, 1221, 1262,
+ 1174, 1223, 255, 310, 262, 254, 507, 1273, 1253, 1138,
+ 1202, 1261, 0, 0, 221, 1264, 1197, 0, 1226, 0,
+ 1279, 1133, 1217, 0, 1136, 1140, 1275, 1257, 1168, 265,
+ 0, 0, 0, 0, 0, 0, 0, 1194, 1204, 1238,
+ 1242, 1188, 0, 0, 0, 0, 0, 0, 0, 1166,
+ 0, 1213, 0, 0, 0, 1145, 1137, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 1192,
+ 0, 0, 0, 0, 1148, 0, 1167, 1239, 0, 1131,
+ 287, 1142, 388, 247, 0, 438, 1246, 1256, 1189, 549,
+ 1260, 1187, 1186, 1233, 1146, 1252, 1180, 352, 1144, 319,
+ 193, 217, 0, 1178, 398, 446, 458, 1251, 1163, 1172,
+ 245, 1170, 456, 412, 527, 225, 274, 443, 418, 454,
+ 426, 277, 1212, 1231, 455, 359, 512, 436, 524, 550,
+ 551, 253, 392, 536, 496, 544, 568, 218, 250, 406,
+ 489, 530, 478, 384, 508, 509, 318, 477, 285, 196,
+ 356, 556, 216, 464, 358, 234, 223, 514, 533, 279,
+ 441, 563, 205, 491, 522, 231, 468, 0, 0, 570,
+ 239, 488, 207, 519, 487, 380, 315, 316, 206, 0,
+ 442, 258, 283, 0, 0, 248, 401, 516, 517, 246,
+ 571, 220, 543, 212, 1143, 542, 394, 511, 520, 381,
+ 370, 211, 518, 379, 369, 323, 342, 343, 270, 296,
+ 433, 362, 434, 295, 297, 390, 389, 391, 200, 531,
+ 0, 201, 0, 483, 532, 572, 226, 227, 229, 1158,
+ 269, 273, 281, 284, 292, 293, 302, 354, 405, 432,
+ 428, 437, 1247, 506, 525, 537, 548, 554, 555, 557,
+ 558, 559, 560, 561, 564, 562, 393, 300, 479, 322,
+ 360, 1236, 1278, 411, 457, 232, 529, 480, 1153, 1157,
+ 1151, 1218, 1152, 1207, 1208, 1154, 1269, 1270, 1271, 573,
+ 574, 575, 576, 577, 578, 579, 580, 581, 582, 583,
+ 584, 585, 586, 587, 588, 589, 590, 0, 1240, 1147,
+ 0, 1155, 1156, 1249, 1258, 1259, 591, 371, 470, 526,
+ 324, 336, 339, 329, 348, 0, 349, 325, 326, 331,
+ 333, 334, 335, 340, 341, 345, 351, 241, 203, 377,
+ 385, 505, 301, 208, 209, 210, 498, 499, 500, 501,
+ 540, 541, 545, 447, 448, 449, 450, 282, 535, 298,
+ 453, 452, 320, 321, 366, 435, 1211, 192, 213, 355,
+ 1274, 439, 278, 569, 539, 534, 199, 215, 1150, 252,
+ 1161, 1169, 0, 1175, 1183, 1184, 1196, 1198, 1199, 1200,
+ 1201, 1219, 1220, 1222, 1230, 1232, 1235, 1237, 1244, 1255,
+ 1277, 194, 195, 202, 214, 224, 228, 235, 251, 266,
+ 268, 275, 288, 299, 307, 308, 311, 317, 367, 373,
+ 374, 375, 376, 395, 396, 397, 400, 403, 404, 407,
+ 409, 410, 413, 417, 421, 422, 423, 425, 427, 429,
+ 440, 445, 459, 460, 461, 462, 463, 466, 467, 472,
+ 473, 474, 475, 476, 484, 485, 490, 513, 515, 528,
+ 546, 552, 465, 290, 291, 430, 431, 303, 304, 566,
+ 567, 289, 523, 553, 521, 565, 547, 424, 365, 1210,
+ 1216, 368, 271, 294, 309, 1225, 538, 486, 219, 451,
+ 280, 243, 1243, 1245, 204, 238, 222, 249, 264, 267,
+ 313, 378, 386, 415, 420, 286, 261, 236, 444, 233,
+ 469, 493, 494, 495, 497, 382, 256, 419, 1206, 1234,
+ 363, 503, 504, 305, 383, 0, 0, 0, 1263, 1248,
+ 502, 0, 1191, 1266, 1160, 1179, 1276, 1182, 1185, 1227,
+ 1139, 1205, 402, 1176, 1132, 1164, 1134, 1171, 1135, 1162,
+ 1193, 260, 1159, 1250, 1209, 1265, 353, 257, 1141, 1165,
+ 416, 1181, 198, 1229, 471, 244, 364, 361, 510, 272,
+ 263, 259, 242, 306, 372, 414, 492, 408, 1272, 357,
+ 1215, 0, 481, 387, 0, 0, 0, 1195, 1254, 1203,
+ 1241, 1190, 1228, 1149, 1214, 1267, 1177, 1224, 1268, 312,
+ 240, 314, 197, 399, 482, 276, 0, 0, 0, 0,
+ 0, 190, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 230, 0, 0, 237, 0, 0, 0, 338, 347,
+ 346, 327, 328, 330, 332, 337, 344, 350, 1173, 1221,
+ 1262, 1174, 1223, 255, 310, 262, 254, 507, 1273, 1253,
+ 1138, 1202, 1261, 0, 0, 221, 1264, 1197, 0, 1226,
+ 0, 1279, 1133, 1217, 0, 1136, 1140, 1275, 1257, 1168,
+ 265, 0, 0, 0, 0, 0, 0, 0, 1194, 1204,
+ 1238, 1242, 1188, 0, 0, 0, 0, 0, 0, 0,
+ 1166, 0, 1213, 0, 0, 0, 1145, 1137, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 1192, 0, 0, 0, 0, 1148, 0, 1167, 1239, 0,
+ 1131, 287, 1142, 388, 247, 0, 438, 1246, 1256, 1189,
+ 549, 1260, 1187, 1186, 1233, 1146, 1252, 1180, 352, 1144,
+ 319, 193, 217, 0, 1178, 398, 446, 458, 1251, 1163,
+ 1172, 245, 1170, 456, 412, 527, 225, 274, 443, 418,
+ 454, 426, 277, 1212, 1231, 455, 359, 512, 436, 524,
+ 550, 551, 253, 392, 536, 496, 544, 568, 218, 250,
+ 406, 489, 530, 478, 384, 508, 509, 318, 477, 285,
+ 196, 356, 556, 216, 464, 358, 234, 223, 514, 533,
+ 279, 441, 563, 205, 491, 522, 231, 468, 0, 0,
+ 570, 239, 488, 207, 519, 487, 380, 315, 316, 206,
+ 0, 442, 258, 283, 0, 0, 248, 401, 516, 517,
+ 246, 571, 220, 543, 212, 1143, 542, 394, 511, 520,
+ 381, 370, 211, 518, 379, 369, 323, 342, 343, 270,
+ 296, 433, 362, 434, 295, 297, 390, 389, 391, 200,
+ 531, 0, 201, 0, 483, 532, 572, 226, 227, 229,
+ 1158, 269, 273, 281, 284, 292, 293, 302, 354, 405,
+ 432, 428, 437, 1247, 506, 525, 537, 548, 554, 555,
+ 557, 558, 559, 560, 561, 564, 562, 393, 300, 479,
+ 322, 360, 1236, 1278, 411, 457, 232, 529, 480, 1153,
+ 1157, 1151, 1218, 1152, 1207, 1208, 1154, 1269, 1270, 1271,
+ 573, 574, 575, 576, 577, 578, 579, 580, 581, 582,
+ 583, 584, 585, 586, 587, 588, 589, 590, 0, 1240,
+ 1147, 0, 1155, 1156, 1249, 1258, 1259, 591, 371, 470,
+ 526, 324, 336, 339, 329, 348, 0, 349, 325, 326,
+ 331, 333, 334, 335, 340, 341, 345, 351, 241, 203,
+ 377, 385, 505, 301, 208, 209, 210, 498, 499, 500,
+ 501, 540, 541, 545, 447, 448, 449, 450, 282, 535,
+ 298, 453, 452, 320, 321, 366, 435, 1211, 192, 213,
+ 355, 1274, 439, 278, 569, 539, 534, 199, 215, 1150,
+ 252, 1161, 1169, 0, 1175, 1183, 1184, 1196, 1198, 1199,
+ 1200, 1201, 1219, 1220, 1222, 1230, 1232, 1235, 1237, 1244,
+ 1255, 1277, 194, 195, 202, 214, 224, 228, 235, 251,
+ 266, 268, 275, 288, 299, 307, 308, 311, 317, 367,
+ 373, 374, 375, 376, 395, 396, 397, 400, 403, 404,
+ 407, 409, 410, 413, 417, 421, 422, 423, 425, 427,
+ 429, 440, 445, 459, 460, 461, 462, 463, 466, 467,
+ 472, 473, 474, 475, 476, 484, 485, 490, 513, 515,
+ 528, 546, 552, 465, 290, 291, 430, 431, 303, 304,
+ 566, 567, 289, 523, 553, 521, 565, 547, 424, 365,
+ 1210, 1216, 368, 271, 294, 309, 1225, 538, 486, 219,
+ 451, 280, 243, 1243, 1245, 204, 238, 222, 249, 264,
+ 267, 313, 378, 386, 415, 420, 286, 261, 236, 444,
+ 233, 469, 493, 494, 495, 497, 382, 256, 419, 1206,
+ 1234, 363, 503, 504, 305, 383, 0, 0, 0, 1263,
+ 1248, 502, 0, 1191, 1266, 1160, 1179, 1276, 1182, 1185,
+ 1227, 1139, 1205, 402, 1176, 1132, 1164, 1134, 1171, 1135,
+ 1162, 1193, 260, 1159, 1250, 1209, 1265, 353, 257, 1141,
+ 1165, 416, 1181, 198, 1229, 471, 244, 364, 361, 510,
+ 272, 263, 259, 242, 306, 372, 414, 492, 408, 1272,
+ 357, 1215, 0, 481, 387, 0, 0, 0, 1195, 1254,
+ 1203, 1241, 1190, 1228, 1149, 1214, 1267, 1177, 1224, 1268,
+ 312, 240, 314, 197, 399, 482, 276, 0, 0, 0,
+ 0, 0, 638, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 230, 0, 0, 237, 0, 0, 0, 338,
+ 347, 346, 327, 328, 330, 332, 337, 344, 350, 1173,
+ 1221, 1262, 1174, 1223, 255, 310, 262, 254, 507, 1273,
+ 1253, 1138, 1202, 1261, 0, 0, 221, 1264, 1197, 0,
+ 1226, 0, 1279, 1133, 1217, 0, 1136, 1140, 1275, 1257,
+ 1168, 265, 0, 0, 0, 0, 0, 0, 0, 1194,
+ 1204, 1238, 1242, 1188, 0, 0, 0, 0, 0, 0,
+ 0, 1166, 0, 1213, 0, 0, 0, 1145, 1137, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 1192, 0, 0, 0, 0, 1148, 0, 1167, 1239,
+ 0, 1131, 287, 1142, 388, 247, 0, 438, 1246, 1256,
+ 1189, 549, 1260, 1187, 1186, 1233, 1146, 1252, 1180, 352,
+ 1144, 319, 193, 217, 0, 1178, 398, 446, 458, 1251,
+ 1163, 1172, 245, 1170, 456, 412, 527, 225, 274, 443,
+ 418, 454, 426, 277, 1212, 1231, 455, 359, 512, 436,
+ 524, 550, 551, 253, 392, 536, 496, 544, 568, 218,
+ 250, 406, 489, 530, 478, 384, 508, 509, 318, 477,
+ 285, 196, 356, 556, 216, 464, 358, 234, 223, 514,
+ 533, 279, 441, 563, 205, 491, 522, 231, 468, 0,
+ 0, 570, 239, 488, 207, 519, 487, 380, 315, 316,
+ 206, 0, 442, 258, 283, 0, 0, 248, 401, 516,
+ 517, 246, 571, 220, 543, 212, 1143, 542, 394, 511,
+ 520, 381, 370, 211, 518, 379, 369, 323, 342, 343,
+ 270, 296, 433, 362, 434, 295, 297, 390, 389, 391,
+ 200, 531, 0, 201, 0, 483, 532, 572, 226, 227,
+ 229, 1158, 269, 273, 281, 284, 292, 293, 302, 354,
+ 405, 432, 428, 437, 1247, 506, 525, 537, 548, 554,
+ 555, 557, 558, 559, 560, 561, 564, 562, 393, 300,
+ 479, 322, 360, 1236, 1278, 411, 457, 232, 529, 480,
+ 1153, 1157, 1151, 1218, 1152, 1207, 1208, 1154, 1269, 1270,
+ 1271, 573, 574, 575, 576, 577, 578, 579, 580, 581,
+ 582, 583, 584, 585, 586, 587, 588, 589, 590, 0,
+ 1240, 1147, 0, 1155, 1156, 1249, 1258, 1259, 591, 371,
+ 470, 526, 324, 336, 339, 329, 348, 0, 349, 325,
+ 326, 331, 333, 334, 335, 340, 341, 345, 351, 241,
+ 203, 377, 385, 505, 301, 208, 209, 210, 498, 499,
+ 500, 501, 540, 541, 545, 447, 448, 449, 450, 282,
+ 535, 298, 453, 452, 320, 321, 366, 435, 1211, 192,
+ 213, 355, 1274, 439, 278, 569, 539, 534, 199, 215,
+ 1150, 252, 1161, 1169, 0, 1175, 1183, 1184, 1196, 1198,
+ 1199, 1200, 1201, 1219, 1220, 1222, 1230, 1232, 1235, 1237,
+ 1244, 1255, 1277, 194, 195, 202, 214, 224, 228, 235,
+ 251, 266, 268, 275, 288, 299, 307, 308, 311, 317,
+ 367, 373, 374, 375, 376, 395, 396, 397, 400, 403,
+ 404, 407, 409, 410, 413, 417, 421, 422, 423, 425,
+ 427, 429, 440, 445, 459, 460, 461, 462, 463, 466,
+ 467, 472, 473, 474, 475, 476, 484, 485, 490, 513,
+ 515, 528, 546, 552, 465, 290, 291, 430, 431, 303,
+ 304, 566, 567, 289, 523, 553, 521, 565, 547, 424,
+ 365, 1210, 1216, 368, 271, 294, 309, 1225, 538, 486,
+ 219, 451, 280, 243, 1243, 1245, 204, 238, 222, 249,
+ 264, 267, 313, 378, 386, 415, 420, 286, 261, 236,
+ 444, 233, 469, 493, 494, 495, 497, 382, 256, 419,
+ 1206, 1234, 363, 503, 504, 305, 383, 0, 0, 0,
+ 1263, 1248, 502, 0, 1191, 1266, 1160, 1179, 1276, 1182,
+ 1185, 1227, 1139, 1205, 402, 1176, 1132, 1164, 1134, 1171,
+ 1135, 1162, 1193, 260, 1159, 1250, 1209, 1265, 353, 257,
+ 1141, 1165, 416, 1181, 198, 1229, 471, 244, 364, 361,
+ 510, 272, 263, 259, 242, 306, 372, 414, 492, 408,
+ 1272, 357, 1215, 0, 481, 387, 0, 0, 0, 1195,
+ 1254, 1203, 1241, 1190, 1228, 1149, 1214, 1267, 1177, 1224,
+ 1268, 312, 240, 314, 197, 399, 482, 276, 0, 0,
+ 0, 0, 0, 812, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 230, 0, 0, 237, 0, 0, 0,
+ 338, 347, 346, 327, 328, 330, 332, 337, 344, 350,
+ 1173, 1221, 1262, 1174, 1223, 255, 310, 262, 254, 507,
+ 1273, 1253, 1138, 1202, 1261, 0, 0, 221, 1264, 1197,
+ 0, 1226, 0, 1279, 1133, 1217, 0, 1136, 1140, 1275,
+ 1257, 1168, 265, 0, 0, 0, 0, 0, 0, 0,
+ 1194, 1204, 1238, 1242, 1188, 0, 0, 0, 0, 0,
+ 0, 0, 1166, 0, 1213, 0, 0, 0, 1145, 1137,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 1192, 0, 0, 0, 0, 1148, 0, 1167,
+ 1239, 0, 1131, 287, 1142, 388, 247, 0, 438, 1246,
+ 1256, 1189, 549, 1260, 1187, 1186, 1233, 1146, 1252, 1180,
+ 352, 1144, 319, 193, 217, 0, 1178, 398, 446, 458,
+ 1251, 1163, 1172, 245, 1170, 456, 412, 527, 225, 274,
+ 443, 418, 454, 426, 277, 1212, 1231, 455, 359, 512,
+ 436, 524, 550, 551, 253, 392, 536, 496, 544, 568,
+ 218, 250, 406, 489, 530, 478, 384, 508, 509, 318,
+ 477, 285, 196, 356, 556, 216, 464, 358, 234, 223,
+ 514, 533, 279, 441, 563, 205, 491, 522, 231, 468,
+ 0, 0, 570, 239, 488, 207, 519, 487, 380, 315,
+ 316, 206, 0, 442, 258, 283, 0, 0, 248, 401,
+ 516, 517, 246, 571, 220, 543, 212, 1143, 542, 394,
+ 511, 520, 381, 370, 211, 518, 379, 369, 323, 342,
+ 343, 270, 296, 433, 362, 434, 295, 297, 390, 389,
+ 391, 200, 531, 0, 201, 0, 483, 532, 572, 226,
+ 227, 229, 1158, 269, 273, 281, 284, 292, 293, 302,
+ 354, 405, 432, 428, 437, 1247, 506, 525, 537, 548,
+ 554, 555, 557, 558, 559, 560, 561, 564, 562, 393,
+ 300, 479, 322, 360, 1236, 1278, 411, 457, 232, 529,
+ 480, 1153, 1157, 1151, 1218, 1152, 1207, 1208, 1154, 1269,
+ 1270, 1271, 573, 574, 575, 576, 577, 578, 579, 580,
+ 581, 582, 583, 584, 585, 586, 587, 588, 589, 590,
+ 0, 1240, 1147, 0, 1155, 1156, 1249, 1258, 1259, 591,
+ 371, 470, 526, 324, 336, 339, 329, 348, 0, 349,
+ 325, 326, 331, 333, 334, 335, 340, 341, 345, 351,
+ 241, 203, 377, 385, 505, 301, 208, 209, 210, 498,
+ 499, 500, 501, 540, 541, 545, 447, 448, 449, 450,
+ 282, 535, 298, 453, 452, 320, 321, 366, 435, 1211,
+ 192, 213, 355, 1274, 439, 278, 569, 539, 534, 199,
+ 215, 1150, 252, 1161, 1169, 0, 1175, 1183, 1184, 1196,
+ 1198, 1199, 1200, 1201, 1219, 1220, 1222, 1230, 1232, 1235,
+ 1237, 1244, 1255, 1277, 194, 195, 202, 214, 224, 228,
+ 235, 251, 266, 268, 275, 288, 299, 307, 308, 311,
+ 317, 367, 373, 374, 375, 376, 395, 396, 397, 400,
+ 403, 404, 407, 409, 410, 413, 417, 421, 422, 423,
+ 425, 427, 429, 440, 445, 459, 460, 461, 462, 463,
+ 466, 467, 472, 473, 474, 475, 476, 484, 485, 490,
+ 513, 515, 528, 546, 552, 465, 290, 291, 430, 431,
+ 303, 304, 566, 567, 289, 523, 553, 521, 565, 547,
+ 424, 365, 1210, 1216, 368, 271, 294, 309, 1225, 538,
+ 486, 219, 451, 280, 243, 1243, 1245, 204, 238, 222,
+ 249, 264, 267, 313, 378, 386, 415, 420, 286, 261,
+ 236, 444, 233, 469, 493, 494, 495, 497, 382, 256,
+ 419, 1206, 1234, 363, 503, 504, 305, 383, 0, 0,
+ 0, 0, 0, 502, 0, 691, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 402, 0, 0, 0, 0,
+ 678, 0, 0, 0, 260, 683, 0, 0, 0, 353,
+ 257, 0, 0, 416, 0, 198, 0, 471, 244, 364,
+ 361, 510, 272, 263, 259, 242, 306, 372, 414, 492,
+ 408, 690, 357, 0, 0, 481, 387, 0, 0, 0,
+ 0, 0, 686, 687, 0, 0, 0, 0, 0, 0,
+ 0, 0, 312, 240, 314, 197, 399, 482, 276, 0,
+ 91, 0, 0, 828, 812, 778, 779, 816, 829, 830,
+ 831, 832, 817, 0, 230, 818, 819, 237, 820, 0,
+ 777, 718, 720, 719, 737, 738, 739, 740, 741, 742,
+ 743, 716, 825, 833, 834, 0, 255, 310, 262, 254,
+ 507, 0, 0, 1961, 1962, 1963, 0, 0, 221, 0,
+ 0, 0, 0, 0, 0, 0, 660, 675, 0, 689,
+ 0, 0, 0, 265, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 672,
+ 673, 0, 0, 0, 0, 772, 0, 674, 0, 0,
+ 682, 835, 836, 837, 838, 839, 840, 841, 842, 843,
844, 845, 846, 847, 848, 849, 850, 851, 852, 853,
854, 855, 856, 857, 858, 859, 860, 861, 862, 863,
- 864, 673, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 284, 0, 385, 244, 0, 759, 0, 0,
- 541, 0, 0, 757, 0, 0, 0, 0, 349, 0,
- 316, 191, 215, 0, 0, 395, 441, 453, 0, 0,
- 0, 810, 0, 451, 409, 520, 223, 271, 438, 415,
- 449, 422, 274, 0, 0, 450, 356, 506, 432, 517,
- 542, 543, 250, 389, 529, 490, 537, 558, 216, 247,
- 403, 483, 523, 473, 381, 502, 503, 315, 472, 282,
- 194, 353, 548, 214, 459, 355, 232, 221, 508, 526,
- 276, 436, 203, 485, 515, 229, 463, 0, 0, 560,
- 205, 513, 482, 377, 312, 313, 204, 0, 437, 255,
- 280, 245, 398, 811, 812, 243, 561, 712, 536, 210,
- 0, 535, 391, 505, 514, 378, 367, 209, 512, 376,
- 366, 320, 720, 721, 267, 293, 429, 359, 430, 292,
- 294, 387, 386, 388, 198, 524, 0, 199, 0, 478,
- 525, 562, 224, 225, 227, 0, 266, 270, 278, 281,
- 289, 290, 299, 351, 402, 428, 424, 433, 0, 500,
- 518, 530, 540, 546, 547, 549, 550, 551, 552, 553,
- 555, 554, 390, 297, 474, 319, 357, 0, 0, 408,
- 452, 230, 522, 475, 770, 758, 683, 774, 685, 771,
- 772, 680, 681, 684, 773, 563, 564, 565, 566, 567,
- 568, 569, 570, 571, 572, 573, 574, 575, 576, 577,
- 578, 579, 580, 0, 761, 669, 668, 0, 676, 0,
- 702, 703, 705, 709, 710, 711, 722, 723, 724, 732,
- 734, 735, 733, 736, 737, 738, 741, 742, 743, 744,
- 739, 740, 745, 686, 690, 687, 688, 689, 701, 691,
- 692, 693, 694, 695, 696, 697, 698, 699, 700, 784,
- 785, 786, 787, 788, 789, 715, 719, 718, 716, 717,
- 713, 714, 667, 190, 211, 352, 0, 434, 275, 559,
- 532, 527, 197, 213, 775, 249, 776, 0, 0, 780,
- 0, 0, 0, 782, 781, 0, 783, 749, 748, 0,
- 0, 777, 778, 0, 779, 0, 0, 192, 193, 200,
- 212, 222, 226, 233, 248, 263, 265, 272, 285, 296,
- 304, 305, 308, 314, 364, 370, 371, 372, 373, 392,
- 393, 394, 397, 400, 401, 404, 406, 407, 410, 414,
- 418, 419, 420, 421, 423, 425, 435, 440, 454, 455,
- 456, 457, 458, 461, 462, 467, 468, 469, 470, 471,
- 479, 480, 484, 507, 509, 521, 539, 544, 460, 790,
- 791, 792, 793, 794, 795, 796, 797, 286, 516, 545,
- 0, 0, 362, 0, 0, 365, 268, 291, 306, 0,
- 531, 481, 217, 446, 277, 240, 815, 0, 202, 236,
- 220, 246, 261, 264, 310, 375, 383, 412, 417, 283,
- 258, 234, 439, 231, 464, 487, 488, 489, 491, 379,
- 253, 416, 380, 0, 360, 497, 498, 302, 496, 0,
- 679, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 399, 0, 0, 0, 0, 666, 0, 0, 0, 257,
- 671, 0, 0, 0, 350, 254, 0, 0, 413, 0,
- 196, 0, 466, 241, 361, 358, 504, 269, 260, 256,
- 239, 303, 369, 411, 486, 405, 678, 354, 0, 0,
- 476, 384, 0, 0, 0, 0, 0, 674, 675, 0,
- 0, 0, 0, 0, 0, 2077, 0, 309, 237, 311,
- 195, 396, 477, 273, 0, 89, 0, 0, 816, 800,
- 766, 767, 804, 817, 818, 819, 820, 805, 0, 228,
- 806, 807, 235, 808, 0, 765, 706, 708, 707, 725,
- 726, 727, 728, 729, 730, 731, 704, 813, 821, 822,
- 2078, 252, 307, 259, 251, 501, 0, 0, 0, 0,
- 0, 0, 0, 219, 0, 0, 0, 0, 0, 0,
- 0, 648, 663, 0, 677, 0, 0, 0, 262, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 660, 661, 0, 0, 0, 0,
- 760, 0, 662, 0, 0, 670, 823, 824, 825, 826,
- 827, 828, 829, 830, 831, 832, 833, 834, 835, 836,
- 837, 838, 839, 840, 841, 842, 843, 844, 845, 846,
- 847, 848, 849, 850, 851, 852, 853, 854, 855, 856,
- 857, 858, 859, 860, 861, 862, 863, 864, 673, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 284,
- 0, 385, 244, 0, 759, 0, 0, 541, 0, 0,
- 757, 0, 0, 0, 0, 349, 0, 316, 191, 215,
- 0, 0, 395, 441, 453, 0, 0, 0, 810, 0,
- 451, 409, 520, 223, 271, 438, 415, 449, 422, 274,
- 0, 0, 450, 356, 506, 432, 517, 542, 543, 250,
- 389, 529, 490, 537, 558, 216, 247, 403, 483, 523,
- 473, 381, 502, 503, 315, 472, 282, 194, 353, 548,
- 214, 459, 355, 232, 221, 508, 526, 276, 436, 203,
- 485, 515, 229, 463, 0, 0, 560, 205, 513, 482,
- 377, 312, 313, 204, 0, 437, 255, 280, 245, 398,
- 811, 812, 243, 561, 712, 536, 210, 0, 535, 391,
- 505, 514, 378, 367, 209, 512, 376, 366, 320, 720,
- 721, 267, 293, 429, 359, 430, 292, 294, 387, 386,
- 388, 198, 524, 0, 199, 0, 478, 525, 562, 224,
- 225, 227, 0, 266, 270, 278, 281, 289, 290, 299,
- 351, 402, 428, 424, 433, 0, 500, 518, 530, 540,
- 546, 547, 549, 550, 551, 552, 553, 555, 554, 390,
- 297, 474, 319, 357, 0, 0, 408, 452, 230, 522,
- 475, 770, 758, 683, 774, 685, 771, 772, 680, 681,
- 684, 773, 563, 564, 565, 566, 567, 568, 569, 570,
- 571, 572, 573, 574, 575, 576, 577, 578, 579, 580,
- 0, 761, 669, 668, 0, 676, 0, 702, 703, 705,
- 709, 710, 711, 722, 723, 724, 732, 734, 735, 733,
- 736, 737, 738, 741, 742, 743, 744, 739, 740, 745,
- 686, 690, 687, 688, 689, 701, 691, 692, 693, 694,
- 695, 696, 697, 698, 699, 700, 784, 785, 786, 787,
- 788, 789, 715, 719, 718, 716, 717, 713, 714, 667,
- 190, 211, 352, 0, 434, 275, 559, 532, 527, 197,
- 213, 775, 249, 776, 0, 0, 780, 0, 0, 0,
- 782, 781, 0, 783, 749, 748, 0, 0, 777, 778,
- 0, 779, 0, 0, 192, 193, 200, 212, 222, 226,
- 233, 248, 263, 265, 272, 285, 296, 304, 305, 308,
- 314, 364, 370, 371, 372, 373, 392, 393, 394, 397,
- 400, 401, 404, 406, 407, 410, 414, 418, 419, 420,
- 421, 423, 425, 435, 440, 454, 455, 456, 457, 458,
- 461, 462, 467, 468, 469, 470, 471, 479, 480, 484,
- 507, 509, 521, 539, 544, 460, 790, 791, 792, 793,
- 794, 795, 796, 797, 286, 516, 545, 0, 0, 362,
- 0, 0, 365, 268, 291, 306, 0, 531, 481, 217,
- 446, 277, 240, 815, 0, 202, 236, 220, 246, 261,
- 264, 310, 375, 383, 412, 417, 283, 258, 234, 439,
- 231, 464, 487, 488, 489, 491, 379, 253, 416, 0,
- 380, 360, 497, 498, 302, 80, 496, 0, 679, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 399, 0,
- 0, 0, 0, 666, 0, 0, 0, 257, 671, 0,
- 0, 0, 350, 254, 0, 0, 413, 0, 196, 0,
- 466, 241, 361, 358, 504, 269, 260, 256, 239, 303,
- 369, 411, 486, 405, 678, 354, 0, 0, 476, 384,
- 0, 0, 0, 0, 0, 674, 675, 0, 0, 0,
- 0, 0, 0, 0, 0, 309, 237, 311, 195, 396,
- 477, 273, 0, 89, 0, 0, 816, 800, 766, 767,
- 804, 817, 818, 819, 820, 805, 0, 228, 806, 807,
- 235, 808, 0, 765, 706, 708, 707, 725, 726, 727,
- 728, 729, 730, 731, 704, 813, 821, 822, 0, 252,
- 307, 259, 251, 501, 0, 0, 0, 0, 0, 0,
- 0, 219, 0, 0, 0, 0, 0, 0, 0, 648,
- 663, 0, 677, 0, 0, 0, 262, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 660, 661, 0, 0, 0, 0, 760, 0,
- 662, 0, 0, 670, 823, 824, 825, 826, 827, 828,
- 829, 830, 831, 832, 833, 834, 835, 836, 837, 838,
+ 864, 865, 866, 867, 868, 869, 870, 871, 872, 873,
+ 874, 875, 876, 685, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 287, 0, 388, 247, 0, 438,
+ 771, 0, 0, 549, 0, 0, 769, 0, 0, 0,
+ 0, 352, 0, 319, 193, 217, 0, 0, 398, 446,
+ 458, 0, 0, 0, 822, 0, 456, 412, 527, 225,
+ 274, 443, 418, 454, 426, 277, 0, 0, 455, 359,
+ 512, 436, 524, 550, 551, 253, 392, 536, 496, 544,
+ 568, 218, 250, 406, 489, 530, 478, 384, 508, 509,
+ 318, 477, 285, 196, 356, 556, 216, 464, 358, 234,
+ 223, 514, 533, 279, 441, 563, 205, 491, 522, 231,
+ 468, 0, 0, 570, 239, 488, 207, 519, 487, 380,
+ 315, 316, 206, 0, 442, 258, 283, 0, 0, 248,
+ 401, 823, 824, 246, 571, 724, 543, 212, 0, 542,
+ 394, 511, 520, 381, 370, 211, 518, 379, 369, 323,
+ 732, 733, 270, 296, 433, 362, 434, 295, 297, 390,
+ 389, 391, 200, 531, 0, 201, 0, 483, 532, 572,
+ 226, 227, 229, 0, 269, 273, 281, 284, 292, 293,
+ 302, 354, 405, 432, 428, 437, 0, 506, 525, 537,
+ 548, 554, 555, 557, 558, 559, 560, 561, 564, 562,
+ 393, 300, 479, 322, 360, 0, 0, 411, 457, 232,
+ 529, 480, 782, 770, 695, 786, 697, 783, 784, 692,
+ 693, 696, 785, 573, 574, 575, 576, 577, 578, 579,
+ 580, 581, 582, 583, 584, 585, 586, 587, 588, 589,
+ 590, 0, 773, 681, 680, 0, 688, 0, 714, 715,
+ 717, 721, 722, 723, 734, 735, 736, 744, 746, 747,
+ 745, 748, 749, 750, 753, 754, 755, 756, 751, 752,
+ 757, 698, 702, 699, 700, 701, 713, 703, 704, 705,
+ 706, 707, 708, 709, 710, 711, 712, 796, 797, 798,
+ 799, 800, 801, 727, 731, 730, 728, 729, 725, 726,
+ 679, 192, 213, 355, 0, 439, 278, 569, 539, 534,
+ 199, 215, 787, 252, 788, 0, 0, 792, 0, 0,
+ 0, 794, 793, 0, 795, 761, 760, 0, 0, 789,
+ 790, 0, 791, 0, 0, 194, 195, 202, 214, 224,
+ 228, 235, 251, 266, 268, 275, 288, 299, 307, 308,
+ 311, 317, 367, 373, 374, 375, 376, 395, 396, 397,
+ 400, 403, 404, 407, 409, 410, 413, 417, 421, 422,
+ 423, 425, 427, 429, 440, 445, 459, 460, 461, 462,
+ 463, 466, 467, 472, 473, 474, 475, 476, 484, 485,
+ 490, 513, 515, 528, 546, 552, 465, 802, 803, 804,
+ 805, 806, 807, 808, 809, 289, 523, 553, 521, 565,
+ 547, 424, 365, 0, 0, 368, 271, 294, 309, 0,
+ 538, 486, 219, 451, 280, 243, 827, 0, 204, 238,
+ 222, 249, 264, 267, 313, 378, 386, 415, 420, 286,
+ 261, 236, 444, 233, 469, 493, 494, 495, 497, 382,
+ 256, 419, 383, 0, 363, 503, 504, 305, 502, 0,
+ 691, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 402, 0, 0, 0, 0, 678, 0, 0, 0, 260,
+ 683, 0, 0, 0, 353, 257, 0, 0, 416, 0,
+ 198, 0, 471, 244, 364, 361, 510, 272, 263, 259,
+ 242, 306, 372, 414, 492, 408, 690, 357, 0, 0,
+ 481, 387, 0, 0, 0, 0, 0, 686, 687, 0,
+ 0, 0, 0, 0, 0, 2113, 0, 312, 240, 314,
+ 197, 399, 482, 276, 0, 91, 0, 0, 828, 812,
+ 778, 779, 816, 829, 830, 831, 832, 817, 0, 230,
+ 818, 819, 237, 820, 0, 777, 718, 720, 719, 737,
+ 738, 739, 740, 741, 742, 743, 716, 825, 833, 834,
+ 2114, 255, 310, 262, 254, 507, 0, 0, 0, 0,
+ 0, 0, 0, 221, 0, 0, 0, 0, 0, 0,
+ 0, 660, 675, 0, 689, 0, 0, 0, 265, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 672, 673, 0, 0, 0, 0,
+ 772, 0, 674, 0, 0, 682, 835, 836, 837, 838,
839, 840, 841, 842, 843, 844, 845, 846, 847, 848,
849, 850, 851, 852, 853, 854, 855, 856, 857, 858,
- 859, 860, 861, 862, 863, 864, 673, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 284, 0, 385,
- 244, 0, 759, 0, 0, 541, 0, 0, 757, 0,
- 0, 0, 0, 349, 0, 316, 191, 215, 0, 0,
- 395, 441, 453, 0, 0, 0, 810, 0, 451, 409,
- 520, 223, 271, 438, 415, 449, 422, 274, 0, 0,
- 450, 356, 506, 432, 517, 542, 543, 250, 389, 529,
- 490, 537, 558, 216, 247, 403, 483, 523, 473, 381,
- 502, 503, 315, 472, 282, 194, 353, 548, 214, 459,
- 355, 232, 221, 508, 526, 276, 436, 203, 485, 515,
- 229, 463, 0, 0, 560, 205, 513, 482, 377, 312,
- 313, 204, 0, 437, 255, 280, 245, 398, 811, 812,
- 243, 561, 712, 536, 210, 0, 535, 391, 505, 514,
- 378, 367, 209, 512, 376, 366, 320, 720, 721, 267,
- 293, 429, 359, 430, 292, 294, 387, 386, 388, 198,
- 524, 0, 199, 0, 478, 525, 562, 224, 225, 227,
- 0, 266, 270, 278, 281, 289, 290, 299, 351, 402,
- 428, 424, 433, 0, 500, 518, 530, 540, 546, 547,
- 549, 550, 551, 552, 553, 555, 554, 390, 297, 474,
- 319, 357, 0, 0, 408, 452, 230, 522, 475, 770,
- 758, 683, 774, 685, 771, 772, 680, 681, 684, 773,
- 563, 564, 565, 566, 567, 568, 569, 570, 571, 572,
- 573, 574, 575, 576, 577, 578, 579, 580, 0, 761,
- 669, 668, 0, 676, 0, 702, 703, 705, 709, 710,
- 711, 722, 723, 724, 732, 734, 735, 733, 736, 737,
- 738, 741, 742, 743, 744, 739, 740, 745, 686, 690,
- 687, 688, 689, 701, 691, 692, 693, 694, 695, 696,
- 697, 698, 699, 700, 784, 785, 786, 787, 788, 789,
- 715, 719, 718, 716, 717, 713, 714, 667, 190, 211,
- 352, 88, 434, 275, 559, 532, 527, 197, 213, 775,
- 249, 776, 0, 0, 780, 0, 0, 0, 782, 781,
- 0, 783, 749, 748, 0, 0, 777, 778, 0, 779,
- 0, 0, 192, 193, 200, 212, 222, 226, 233, 248,
- 263, 265, 272, 285, 296, 304, 305, 308, 314, 364,
- 370, 371, 372, 373, 392, 393, 394, 397, 400, 401,
- 404, 406, 407, 410, 414, 418, 419, 420, 421, 423,
- 425, 435, 440, 454, 455, 456, 457, 458, 461, 462,
- 467, 468, 469, 470, 471, 479, 480, 484, 507, 509,
- 521, 539, 544, 460, 790, 791, 792, 793, 794, 795,
- 796, 797, 286, 516, 545, 0, 0, 362, 0, 0,
- 365, 268, 291, 306, 0, 531, 481, 217, 446, 277,
- 240, 815, 0, 202, 236, 220, 246, 261, 264, 310,
- 375, 383, 412, 417, 283, 258, 234, 439, 231, 464,
- 487, 488, 489, 491, 379, 253, 416, 380, 0, 360,
- 497, 498, 302, 496, 0, 679, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 399, 0, 0, 0, 0,
- 666, 0, 0, 0, 257, 671, 0, 0, 0, 350,
- 254, 0, 0, 413, 0, 196, 0, 466, 241, 361,
- 358, 504, 269, 260, 256, 239, 303, 369, 411, 486,
- 405, 678, 354, 0, 0, 476, 384, 0, 0, 0,
- 0, 0, 674, 675, 0, 0, 0, 0, 0, 0,
- 0, 0, 309, 237, 311, 195, 396, 477, 273, 0,
- 89, 0, 0, 816, 800, 766, 767, 804, 817, 818,
- 819, 820, 805, 0, 228, 806, 807, 235, 808, 0,
- 765, 706, 708, 707, 725, 726, 727, 728, 729, 730,
- 731, 704, 813, 821, 822, 0, 252, 307, 259, 251,
- 501, 0, 0, 0, 0, 0, 0, 0, 219, 0,
- 0, 0, 0, 0, 0, 0, 648, 663, 0, 677,
- 0, 0, 0, 262, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 660,
- 661, 0, 0, 0, 0, 760, 0, 662, 0, 0,
- 670, 823, 824, 825, 826, 827, 828, 829, 830, 831,
- 832, 833, 834, 835, 836, 837, 838, 839, 840, 841,
- 842, 843, 844, 845, 846, 847, 848, 849, 850, 851,
- 852, 853, 854, 855, 856, 857, 858, 859, 860, 861,
- 862, 863, 864, 673, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 284, 0, 385, 244, 0, 759,
- 0, 0, 541, 0, 0, 757, 0, 0, 0, 0,
- 349, 0, 316, 191, 215, 0, 0, 395, 441, 453,
- 0, 0, 0, 810, 0, 451, 409, 520, 223, 271,
- 438, 415, 449, 422, 274, 3384, 0, 450, 356, 506,
- 432, 517, 542, 543, 250, 389, 529, 490, 537, 558,
- 216, 247, 403, 483, 523, 473, 381, 502, 503, 315,
- 472, 282, 194, 353, 548, 214, 459, 355, 232, 221,
- 508, 526, 276, 436, 203, 485, 515, 229, 463, 0,
- 0, 560, 205, 513, 482, 377, 312, 313, 204, 0,
- 437, 255, 280, 245, 398, 811, 812, 243, 561, 712,
- 536, 210, 0, 535, 391, 505, 514, 378, 367, 209,
- 512, 376, 366, 320, 720, 721, 267, 293, 429, 359,
- 430, 292, 294, 387, 386, 388, 198, 524, 0, 199,
- 0, 478, 525, 562, 224, 225, 227, 0, 266, 270,
- 278, 281, 289, 290, 299, 351, 402, 428, 424, 433,
- 0, 500, 518, 530, 540, 546, 547, 549, 550, 551,
- 552, 553, 555, 554, 390, 297, 474, 319, 357, 0,
- 0, 408, 452, 230, 522, 475, 770, 758, 683, 774,
- 685, 771, 772, 680, 681, 684, 773, 563, 564, 565,
- 566, 567, 568, 569, 570, 571, 572, 573, 574, 575,
- 576, 577, 578, 579, 580, 0, 761, 669, 668, 0,
- 676, 0, 702, 703, 705, 709, 710, 711, 722, 723,
- 724, 732, 734, 735, 733, 736, 737, 738, 741, 742,
- 743, 744, 739, 740, 745, 686, 690, 687, 688, 689,
- 701, 691, 692, 693, 694, 695, 696, 697, 698, 699,
- 700, 784, 785, 786, 787, 788, 789, 715, 719, 718,
- 716, 717, 713, 714, 667, 190, 211, 352, 0, 434,
- 275, 559, 532, 527, 197, 213, 775, 249, 776, 0,
- 0, 780, 0, 0, 0, 782, 781, 0, 783, 749,
- 748, 0, 0, 777, 778, 0, 779, 0, 0, 192,
- 193, 200, 212, 222, 226, 233, 248, 263, 265, 272,
- 285, 296, 304, 305, 308, 314, 364, 370, 371, 372,
- 373, 392, 393, 394, 397, 400, 401, 404, 406, 407,
- 410, 414, 418, 419, 420, 421, 423, 425, 435, 440,
- 454, 455, 456, 457, 458, 461, 462, 467, 468, 469,
- 470, 471, 479, 480, 484, 507, 509, 521, 539, 544,
- 460, 790, 791, 792, 793, 794, 795, 796, 797, 286,
- 516, 545, 0, 0, 362, 0, 0, 365, 268, 291,
- 306, 0, 531, 481, 217, 446, 277, 240, 815, 0,
- 202, 236, 220, 246, 261, 264, 310, 375, 383, 412,
- 417, 283, 258, 234, 439, 231, 464, 487, 488, 489,
- 491, 379, 253, 416, 380, 0, 360, 497, 498, 302,
- 496, 0, 679, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 399, 0, 0, 0, 0, 666, 0, 0,
- 0, 257, 671, 0, 0, 0, 350, 254, 0, 0,
- 413, 0, 196, 0, 466, 241, 361, 358, 504, 269,
- 260, 256, 239, 303, 369, 411, 486, 405, 678, 354,
- 0, 0, 476, 384, 0, 0, 0, 0, 0, 674,
- 675, 0, 0, 0, 0, 0, 0, 0, 0, 309,
- 237, 311, 195, 396, 477, 273, 0, 89, 0, 1497,
- 816, 800, 766, 767, 804, 817, 818, 819, 820, 805,
- 0, 228, 806, 807, 235, 808, 0, 765, 706, 708,
- 707, 725, 726, 727, 728, 729, 730, 731, 704, 813,
- 821, 822, 0, 252, 307, 259, 251, 501, 0, 0,
- 0, 0, 0, 0, 0, 219, 0, 0, 0, 0,
- 0, 0, 0, 648, 663, 0, 677, 0, 0, 0,
- 262, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 660, 661, 0, 0,
- 0, 0, 760, 0, 662, 0, 0, 670, 823, 824,
- 825, 826, 827, 828, 829, 830, 831, 832, 833, 834,
- 835, 836, 837, 838, 839, 840, 841, 842, 843, 844,
- 845, 846, 847, 848, 849, 850, 851, 852, 853, 854,
- 855, 856, 857, 858, 859, 860, 861, 862, 863, 864,
- 673, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 284, 0, 385, 244, 0, 759, 0, 0, 541,
- 0, 0, 757, 0, 0, 0, 0, 349, 0, 316,
- 191, 215, 0, 0, 395, 441, 453, 0, 0, 0,
- 810, 0, 451, 409, 520, 223, 271, 438, 415, 449,
- 422, 274, 0, 0, 450, 356, 506, 432, 517, 542,
- 543, 250, 389, 529, 490, 537, 558, 216, 247, 403,
- 483, 523, 473, 381, 502, 503, 315, 472, 282, 194,
- 353, 548, 214, 459, 355, 232, 221, 508, 526, 276,
- 436, 203, 485, 515, 229, 463, 0, 0, 560, 205,
- 513, 482, 377, 312, 313, 204, 0, 437, 255, 280,
- 245, 398, 811, 812, 243, 561, 712, 536, 210, 0,
- 535, 391, 505, 514, 378, 367, 209, 512, 376, 366,
- 320, 720, 721, 267, 293, 429, 359, 430, 292, 294,
- 387, 386, 388, 198, 524, 0, 199, 0, 478, 525,
- 562, 224, 225, 227, 0, 266, 270, 278, 281, 289,
- 290, 299, 351, 402, 428, 424, 433, 0, 500, 518,
- 530, 540, 546, 547, 549, 550, 551, 552, 553, 555,
- 554, 390, 297, 474, 319, 357, 0, 0, 408, 452,
- 230, 522, 475, 770, 758, 683, 774, 685, 771, 772,
- 680, 681, 684, 773, 563, 564, 565, 566, 567, 568,
- 569, 570, 571, 572, 573, 574, 575, 576, 577, 578,
- 579, 580, 0, 761, 669, 668, 0, 676, 0, 702,
- 703, 705, 709, 710, 711, 722, 723, 724, 732, 734,
- 735, 733, 736, 737, 738, 741, 742, 743, 744, 739,
- 740, 745, 686, 690, 687, 688, 689, 701, 691, 692,
- 693, 694, 695, 696, 697, 698, 699, 700, 784, 785,
- 786, 787, 788, 789, 715, 719, 718, 716, 717, 713,
- 714, 667, 190, 211, 352, 0, 434, 275, 559, 532,
- 527, 197, 213, 775, 249, 776, 0, 0, 780, 0,
- 0, 0, 782, 781, 0, 783, 749, 748, 0, 0,
- 777, 778, 0, 779, 0, 0, 192, 193, 200, 212,
- 222, 226, 233, 248, 263, 265, 272, 285, 296, 304,
- 305, 308, 314, 364, 370, 371, 372, 373, 392, 393,
- 394, 397, 400, 401, 404, 406, 407, 410, 414, 418,
- 419, 420, 421, 423, 425, 435, 440, 454, 455, 456,
- 457, 458, 461, 462, 467, 468, 469, 470, 471, 479,
- 480, 484, 507, 509, 521, 539, 544, 460, 790, 791,
- 792, 793, 794, 795, 796, 797, 286, 516, 545, 0,
- 0, 362, 0, 0, 365, 268, 291, 306, 0, 531,
- 481, 217, 446, 277, 240, 815, 0, 202, 236, 220,
- 246, 261, 264, 310, 375, 383, 412, 417, 283, 258,
- 234, 439, 231, 464, 487, 488, 489, 491, 379, 253,
- 416, 380, 0, 360, 497, 498, 302, 496, 0, 679,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 399,
- 0, 0, 0, 0, 666, 0, 0, 0, 257, 671,
- 0, 0, 0, 350, 254, 0, 0, 413, 0, 196,
- 0, 466, 241, 361, 358, 504, 269, 260, 256, 239,
- 303, 369, 411, 486, 405, 678, 354, 0, 0, 476,
- 384, 0, 0, 0, 0, 0, 674, 675, 0, 0,
- 0, 0, 0, 0, 0, 0, 309, 237, 311, 195,
- 396, 477, 273, 0, 89, 0, 0, 816, 800, 766,
- 767, 804, 817, 818, 819, 820, 805, 0, 228, 806,
- 807, 235, 808, 0, 765, 706, 708, 707, 725, 726,
- 727, 728, 729, 730, 731, 704, 813, 821, 822, 0,
- 252, 307, 259, 251, 501, 0, 0, 0, 0, 0,
- 0, 0, 219, 0, 0, 0, 0, 0, 0, 0,
- 648, 663, 0, 677, 0, 0, 0, 262, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 660, 661, 905, 0, 0, 0, 760,
- 0, 662, 0, 0, 670, 823, 824, 825, 826, 827,
- 828, 829, 830, 831, 832, 833, 834, 835, 836, 837,
+ 859, 860, 861, 862, 863, 864, 865, 866, 867, 868,
+ 869, 870, 871, 872, 873, 874, 875, 876, 685, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 287,
+ 0, 388, 247, 0, 438, 771, 0, 0, 549, 0,
+ 0, 769, 0, 0, 0, 0, 352, 0, 319, 193,
+ 217, 0, 0, 398, 446, 458, 0, 0, 0, 822,
+ 0, 456, 412, 527, 225, 274, 443, 418, 454, 426,
+ 277, 0, 0, 455, 359, 512, 436, 524, 550, 551,
+ 253, 392, 536, 496, 544, 568, 218, 250, 406, 489,
+ 530, 478, 384, 508, 509, 318, 477, 285, 196, 356,
+ 556, 216, 464, 358, 234, 223, 514, 533, 279, 441,
+ 563, 205, 491, 522, 231, 468, 0, 0, 570, 239,
+ 488, 207, 519, 487, 380, 315, 316, 206, 0, 442,
+ 258, 283, 0, 0, 248, 401, 823, 824, 246, 571,
+ 724, 543, 212, 0, 542, 394, 511, 520, 381, 370,
+ 211, 518, 379, 369, 323, 732, 733, 270, 296, 433,
+ 362, 434, 295, 297, 390, 389, 391, 200, 531, 0,
+ 201, 0, 483, 532, 572, 226, 227, 229, 0, 269,
+ 273, 281, 284, 292, 293, 302, 354, 405, 432, 428,
+ 437, 0, 506, 525, 537, 548, 554, 555, 557, 558,
+ 559, 560, 561, 564, 562, 393, 300, 479, 322, 360,
+ 0, 0, 411, 457, 232, 529, 480, 782, 770, 695,
+ 786, 697, 783, 784, 692, 693, 696, 785, 573, 574,
+ 575, 576, 577, 578, 579, 580, 581, 582, 583, 584,
+ 585, 586, 587, 588, 589, 590, 0, 773, 681, 680,
+ 0, 688, 0, 714, 715, 717, 721, 722, 723, 734,
+ 735, 736, 744, 746, 747, 745, 748, 749, 750, 753,
+ 754, 755, 756, 751, 752, 757, 698, 702, 699, 700,
+ 701, 713, 703, 704, 705, 706, 707, 708, 709, 710,
+ 711, 712, 796, 797, 798, 799, 800, 801, 727, 731,
+ 730, 728, 729, 725, 726, 679, 192, 213, 355, 0,
+ 439, 278, 569, 539, 534, 199, 215, 787, 252, 788,
+ 0, 0, 792, 0, 0, 0, 794, 793, 0, 795,
+ 761, 760, 0, 0, 789, 790, 0, 791, 0, 0,
+ 194, 195, 202, 214, 224, 228, 235, 251, 266, 268,
+ 275, 288, 299, 307, 308, 311, 317, 367, 373, 374,
+ 375, 376, 395, 396, 397, 400, 403, 404, 407, 409,
+ 410, 413, 417, 421, 422, 423, 425, 427, 429, 440,
+ 445, 459, 460, 461, 462, 463, 466, 467, 472, 473,
+ 474, 475, 476, 484, 485, 490, 513, 515, 528, 546,
+ 552, 465, 802, 803, 804, 805, 806, 807, 808, 809,
+ 289, 523, 553, 521, 565, 547, 424, 365, 0, 0,
+ 368, 271, 294, 309, 0, 538, 486, 219, 451, 280,
+ 243, 827, 0, 204, 238, 222, 249, 264, 267, 313,
+ 378, 386, 415, 420, 286, 261, 236, 444, 233, 469,
+ 493, 494, 495, 497, 382, 256, 419, 0, 383, 363,
+ 503, 504, 305, 82, 502, 0, 691, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 402, 0, 0, 0,
+ 0, 678, 0, 0, 0, 260, 683, 0, 0, 0,
+ 353, 257, 0, 0, 416, 0, 198, 0, 471, 244,
+ 364, 361, 510, 272, 263, 259, 242, 306, 372, 414,
+ 492, 408, 690, 357, 0, 0, 481, 387, 0, 0,
+ 0, 0, 0, 686, 687, 0, 0, 0, 0, 0,
+ 0, 0, 0, 312, 240, 314, 197, 399, 482, 276,
+ 0, 91, 0, 0, 828, 812, 778, 779, 816, 829,
+ 830, 831, 832, 817, 0, 230, 818, 819, 237, 820,
+ 0, 777, 718, 720, 719, 737, 738, 739, 740, 741,
+ 742, 743, 716, 825, 833, 834, 0, 255, 310, 262,
+ 254, 507, 0, 0, 0, 0, 0, 0, 0, 221,
+ 0, 0, 0, 0, 0, 0, 0, 660, 675, 0,
+ 689, 0, 0, 0, 265, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 672, 673, 0, 0, 0, 0, 772, 0, 674, 0,
+ 0, 682, 835, 836, 837, 838, 839, 840, 841, 842,
+ 843, 844, 845, 846, 847, 848, 849, 850, 851, 852,
+ 853, 854, 855, 856, 857, 858, 859, 860, 861, 862,
+ 863, 864, 865, 866, 867, 868, 869, 870, 871, 872,
+ 873, 874, 875, 876, 685, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 287, 0, 388, 247, 0,
+ 438, 771, 0, 0, 549, 0, 0, 769, 0, 0,
+ 0, 0, 352, 0, 319, 193, 217, 0, 0, 398,
+ 446, 458, 0, 0, 0, 822, 0, 456, 412, 527,
+ 225, 274, 443, 418, 454, 426, 277, 0, 0, 455,
+ 359, 512, 436, 524, 550, 551, 253, 392, 536, 496,
+ 544, 568, 218, 250, 406, 489, 530, 478, 384, 508,
+ 509, 318, 477, 285, 196, 356, 556, 216, 464, 358,
+ 234, 223, 514, 533, 279, 441, 563, 205, 491, 522,
+ 231, 468, 0, 0, 570, 239, 488, 207, 519, 487,
+ 380, 315, 316, 206, 0, 442, 258, 283, 0, 0,
+ 248, 401, 823, 824, 246, 571, 724, 543, 212, 0,
+ 542, 394, 511, 520, 381, 370, 211, 518, 379, 369,
+ 323, 732, 733, 270, 296, 433, 362, 434, 295, 297,
+ 390, 389, 391, 200, 531, 0, 201, 0, 483, 532,
+ 572, 226, 227, 229, 0, 269, 273, 281, 284, 292,
+ 293, 302, 354, 405, 432, 428, 437, 0, 506, 525,
+ 537, 548, 554, 555, 557, 558, 559, 560, 561, 564,
+ 562, 393, 300, 479, 322, 360, 0, 0, 411, 457,
+ 232, 529, 480, 782, 770, 695, 786, 697, 783, 784,
+ 692, 693, 696, 785, 573, 574, 575, 576, 577, 578,
+ 579, 580, 581, 582, 583, 584, 585, 586, 587, 588,
+ 589, 590, 0, 773, 681, 680, 0, 688, 0, 714,
+ 715, 717, 721, 722, 723, 734, 735, 736, 744, 746,
+ 747, 745, 748, 749, 750, 753, 754, 755, 756, 751,
+ 752, 757, 698, 702, 699, 700, 701, 713, 703, 704,
+ 705, 706, 707, 708, 709, 710, 711, 712, 796, 797,
+ 798, 799, 800, 801, 727, 731, 730, 728, 729, 725,
+ 726, 679, 192, 213, 355, 90, 439, 278, 569, 539,
+ 534, 199, 215, 787, 252, 788, 0, 0, 792, 0,
+ 0, 0, 794, 793, 0, 795, 761, 760, 0, 0,
+ 789, 790, 0, 791, 0, 0, 194, 195, 202, 214,
+ 224, 228, 235, 251, 266, 268, 275, 288, 299, 307,
+ 308, 311, 317, 367, 373, 374, 375, 376, 395, 396,
+ 397, 400, 403, 404, 407, 409, 410, 413, 417, 421,
+ 422, 423, 425, 427, 429, 440, 445, 459, 460, 461,
+ 462, 463, 466, 467, 472, 473, 474, 475, 476, 484,
+ 485, 490, 513, 515, 528, 546, 552, 465, 802, 803,
+ 804, 805, 806, 807, 808, 809, 289, 523, 553, 521,
+ 565, 547, 424, 365, 0, 0, 368, 271, 294, 309,
+ 0, 538, 486, 219, 451, 280, 243, 827, 0, 204,
+ 238, 222, 249, 264, 267, 313, 378, 386, 415, 420,
+ 286, 261, 236, 444, 233, 469, 493, 494, 495, 497,
+ 382, 256, 419, 383, 0, 363, 503, 504, 305, 502,
+ 0, 691, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 402, 0, 0, 0, 0, 678, 0, 0, 0,
+ 260, 683, 0, 0, 0, 353, 257, 0, 0, 416,
+ 0, 198, 0, 471, 244, 364, 361, 510, 272, 263,
+ 259, 242, 306, 372, 414, 492, 408, 690, 357, 0,
+ 0, 481, 387, 0, 0, 0, 0, 0, 686, 687,
+ 0, 0, 0, 0, 0, 0, 0, 0, 312, 240,
+ 314, 197, 399, 482, 276, 0, 91, 0, 0, 828,
+ 812, 778, 779, 816, 829, 830, 831, 832, 817, 0,
+ 230, 818, 819, 237, 820, 0, 777, 718, 720, 719,
+ 737, 738, 739, 740, 741, 742, 743, 716, 825, 833,
+ 834, 0, 255, 310, 262, 254, 507, 0, 0, 0,
+ 0, 0, 0, 0, 221, 0, 0, 0, 0, 0,
+ 0, 0, 660, 675, 0, 689, 0, 0, 0, 265,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 672, 673, 0, 0, 0,
+ 0, 772, 0, 674, 0, 0, 682, 835, 836, 837,
838, 839, 840, 841, 842, 843, 844, 845, 846, 847,
848, 849, 850, 851, 852, 853, 854, 855, 856, 857,
- 858, 859, 860, 861, 862, 863, 864, 673, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 284, 0,
- 385, 244, 0, 759, 0, 0, 541, 0, 0, 757,
- 0, 0, 0, 0, 349, 0, 316, 191, 215, 0,
- 0, 395, 441, 453, 0, 0, 0, 810, 0, 451,
- 409, 520, 223, 271, 438, 415, 449, 422, 274, 0,
- 0, 450, 356, 506, 432, 517, 542, 543, 250, 389,
- 529, 490, 537, 558, 216, 247, 403, 483, 523, 473,
- 381, 502, 503, 315, 472, 282, 194, 353, 548, 214,
- 459, 355, 232, 221, 508, 526, 276, 436, 203, 485,
- 515, 229, 463, 0, 0, 560, 205, 513, 482, 377,
- 312, 313, 204, 0, 437, 255, 280, 245, 398, 811,
- 812, 243, 561, 712, 536, 210, 0, 535, 391, 505,
- 514, 378, 367, 209, 512, 376, 366, 320, 720, 721,
- 267, 293, 429, 359, 430, 292, 294, 387, 386, 388,
- 198, 524, 0, 199, 0, 478, 525, 562, 224, 225,
- 227, 0, 266, 270, 278, 281, 289, 290, 299, 351,
- 402, 428, 424, 433, 0, 500, 518, 530, 540, 546,
- 547, 549, 550, 551, 552, 553, 555, 554, 390, 297,
- 474, 319, 357, 0, 0, 408, 452, 230, 522, 475,
- 770, 758, 683, 774, 685, 771, 772, 680, 681, 684,
- 773, 563, 564, 565, 566, 567, 568, 569, 570, 571,
- 572, 573, 574, 575, 576, 577, 578, 579, 580, 0,
- 761, 669, 668, 0, 676, 0, 702, 703, 705, 709,
- 710, 711, 722, 723, 724, 732, 734, 735, 733, 736,
- 737, 738, 741, 742, 743, 744, 739, 740, 745, 686,
- 690, 687, 688, 689, 701, 691, 692, 693, 694, 695,
- 696, 697, 698, 699, 700, 784, 785, 786, 787, 788,
- 789, 715, 719, 718, 716, 717, 713, 714, 667, 190,
- 211, 352, 0, 434, 275, 559, 532, 527, 197, 213,
- 775, 249, 776, 0, 0, 780, 0, 0, 0, 782,
- 781, 0, 783, 749, 748, 0, 0, 777, 778, 0,
- 779, 0, 0, 192, 193, 200, 212, 222, 226, 233,
- 248, 263, 265, 272, 285, 296, 304, 305, 308, 314,
- 364, 370, 371, 372, 373, 392, 393, 394, 397, 400,
- 401, 404, 406, 407, 410, 414, 418, 419, 420, 421,
- 423, 425, 435, 440, 454, 455, 456, 457, 458, 461,
- 462, 467, 468, 469, 470, 471, 479, 480, 484, 507,
- 509, 521, 539, 544, 460, 790, 791, 792, 793, 794,
- 795, 796, 797, 286, 516, 545, 0, 0, 362, 0,
- 0, 365, 268, 291, 306, 0, 531, 481, 217, 446,
- 277, 240, 815, 0, 202, 236, 220, 246, 261, 264,
- 310, 375, 383, 412, 417, 283, 258, 234, 439, 231,
- 464, 487, 488, 489, 491, 379, 253, 416, 380, 0,
- 360, 497, 498, 302, 496, 0, 679, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 399, 0, 0, 0,
- 0, 666, 0, 0, 0, 257, 671, 0, 0, 0,
- 350, 254, 0, 0, 413, 0, 196, 0, 466, 241,
- 361, 358, 504, 269, 260, 256, 239, 303, 369, 411,
- 486, 405, 678, 354, 0, 0, 476, 384, 0, 0,
- 0, 0, 0, 674, 675, 0, 0, 0, 0, 0,
- 0, 0, 0, 309, 237, 311, 195, 396, 477, 273,
- 0, 89, 0, 0, 816, 800, 766, 767, 804, 817,
- 818, 819, 820, 805, 0, 228, 806, 807, 235, 808,
- 0, 765, 706, 708, 707, 725, 726, 727, 728, 729,
- 730, 731, 704, 813, 821, 822, 0, 252, 307, 259,
- 251, 501, 0, 0, 0, 0, 0, 0, 0, 219,
- 0, 0, 0, 0, 0, 0, 0, 648, 663, 0,
- 677, 0, 0, 0, 262, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 660, 661, 0, 0, 0, 0, 760, 0, 662, 0,
- 0, 670, 823, 824, 825, 826, 827, 828, 829, 830,
- 831, 832, 833, 834, 835, 836, 837, 838, 839, 840,
- 841, 842, 843, 844, 845, 846, 847, 848, 849, 850,
- 851, 852, 853, 854, 855, 856, 857, 858, 859, 860,
- 861, 862, 863, 864, 673, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 284, 0, 385, 244, 0,
- 759, 0, 0, 541, 0, 0, 757, 0, 0, 0,
- 0, 349, 0, 316, 191, 215, 0, 0, 395, 441,
- 453, 0, 0, 0, 810, 0, 451, 409, 520, 223,
- 271, 438, 415, 449, 422, 274, 0, 0, 450, 356,
- 506, 432, 517, 542, 543, 250, 389, 529, 490, 537,
- 558, 216, 247, 403, 483, 523, 473, 381, 502, 503,
- 315, 472, 282, 194, 353, 548, 214, 459, 355, 232,
- 221, 508, 526, 276, 436, 203, 485, 515, 229, 463,
- 0, 0, 560, 205, 513, 482, 377, 312, 313, 204,
- 0, 437, 255, 280, 245, 398, 811, 812, 243, 561,
- 712, 536, 210, 0, 535, 391, 505, 514, 378, 367,
- 209, 512, 376, 366, 320, 720, 721, 267, 293, 429,
- 359, 430, 292, 294, 387, 386, 388, 198, 524, 0,
- 199, 0, 478, 525, 562, 224, 225, 227, 0, 266,
- 270, 278, 281, 289, 290, 299, 351, 402, 428, 424,
- 433, 0, 500, 518, 530, 540, 546, 547, 549, 550,
- 551, 552, 553, 555, 554, 390, 297, 474, 319, 357,
- 0, 0, 408, 452, 230, 522, 475, 770, 758, 683,
- 774, 685, 771, 772, 680, 681, 684, 773, 563, 564,
- 565, 566, 567, 568, 569, 570, 571, 572, 573, 574,
- 575, 576, 577, 578, 579, 580, 0, 761, 669, 668,
- 0, 676, 0, 702, 703, 705, 709, 710, 711, 722,
- 723, 724, 732, 734, 735, 733, 736, 737, 738, 741,
- 742, 743, 744, 739, 740, 745, 686, 690, 687, 688,
- 689, 701, 691, 692, 693, 694, 695, 696, 697, 698,
- 699, 700, 784, 785, 786, 787, 788, 789, 715, 719,
- 718, 716, 717, 713, 714, 667, 190, 211, 352, 0,
- 434, 275, 559, 532, 527, 197, 213, 775, 249, 776,
- 0, 0, 780, 0, 0, 0, 782, 781, 0, 783,
- 749, 748, 0, 0, 777, 778, 0, 779, 0, 0,
- 192, 193, 200, 212, 222, 226, 233, 248, 263, 265,
- 272, 285, 296, 304, 305, 308, 314, 364, 370, 371,
- 372, 373, 392, 393, 394, 397, 400, 401, 404, 406,
- 407, 410, 414, 418, 419, 420, 421, 423, 425, 435,
- 440, 454, 455, 456, 457, 458, 461, 462, 467, 468,
- 469, 470, 471, 479, 480, 484, 507, 509, 521, 539,
- 544, 460, 790, 791, 792, 793, 794, 795, 796, 797,
- 286, 516, 545, 0, 0, 362, 0, 0, 365, 268,
- 291, 306, 0, 531, 481, 217, 446, 277, 240, 815,
- 0, 202, 236, 220, 246, 261, 264, 310, 375, 383,
- 412, 417, 283, 258, 234, 439, 231, 464, 487, 488,
- 489, 491, 379, 253, 416, 380, 0, 360, 497, 498,
- 302, 496, 0, 679, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 399, 0, 0, 0, 0, 666, 0,
- 0, 0, 257, 671, 0, 0, 0, 350, 254, 0,
- 0, 413, 0, 196, 0, 466, 241, 361, 358, 504,
- 269, 260, 256, 239, 303, 369, 411, 486, 405, 678,
- 354, 0, 0, 476, 384, 0, 0, 0, 0, 0,
- 674, 675, 0, 0, 0, 0, 0, 0, 0, 0,
- 309, 237, 311, 195, 396, 477, 273, 0, 89, 0,
- 0, 816, 800, 766, 767, 804, 817, 818, 819, 820,
- 805, 0, 228, 806, 807, 235, 808, 0, 765, 706,
- 708, 707, 725, 726, 727, 728, 729, 730, 731, 704,
- 813, 821, 822, 0, 252, 307, 259, 251, 501, 0,
- 0, 0, 0, 0, 0, 0, 219, 0, 0, 0,
- 0, 0, 0, 0, 0, 663, 0, 677, 0, 0,
- 0, 262, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 660, 661, 0,
- 0, 0, 0, 760, 0, 662, 0, 0, 670, 823,
- 824, 825, 826, 827, 828, 829, 830, 831, 832, 833,
- 834, 835, 836, 837, 838, 839, 840, 841, 842, 843,
- 844, 845, 846, 847, 848, 849, 850, 851, 852, 853,
- 854, 855, 856, 857, 858, 859, 860, 861, 862, 863,
- 864, 673, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 284, 0, 385, 244, 0, 759, 0, 0,
- 541, 0, 0, 757, 0, 0, 0, 0, 349, 0,
- 316, 191, 215, 0, 0, 395, 441, 453, 0, 0,
- 0, 810, 0, 451, 409, 520, 223, 271, 438, 415,
- 449, 422, 274, 0, 0, 450, 356, 506, 432, 517,
- 542, 543, 250, 389, 529, 490, 537, 558, 216, 247,
- 403, 483, 523, 473, 381, 502, 503, 315, 472, 282,
- 194, 353, 548, 214, 459, 355, 232, 221, 508, 526,
- 276, 436, 203, 485, 515, 229, 463, 0, 0, 560,
- 205, 513, 482, 377, 312, 313, 204, 0, 437, 255,
- 280, 245, 398, 811, 812, 243, 561, 712, 536, 210,
- 0, 535, 391, 505, 514, 378, 367, 209, 512, 376,
- 366, 320, 720, 721, 267, 293, 429, 359, 430, 292,
- 294, 387, 386, 388, 198, 524, 0, 199, 0, 478,
- 525, 562, 224, 225, 227, 0, 266, 270, 278, 281,
- 289, 290, 299, 351, 402, 428, 424, 433, 0, 500,
- 518, 530, 540, 546, 547, 549, 550, 551, 552, 553,
- 555, 554, 390, 297, 474, 319, 357, 0, 0, 408,
- 452, 230, 522, 475, 770, 758, 683, 774, 685, 771,
- 772, 680, 681, 684, 773, 563, 564, 565, 566, 567,
- 568, 569, 570, 571, 572, 573, 574, 575, 576, 577,
- 578, 579, 580, 0, 761, 669, 668, 0, 676, 0,
- 702, 703, 705, 709, 710, 711, 722, 723, 724, 732,
- 734, 735, 733, 736, 737, 738, 741, 742, 743, 744,
- 739, 740, 745, 686, 690, 687, 688, 689, 701, 691,
- 692, 693, 694, 695, 696, 697, 698, 699, 700, 784,
- 785, 786, 787, 788, 789, 715, 719, 718, 716, 717,
- 713, 714, 667, 190, 211, 352, 0, 434, 275, 559,
- 532, 527, 197, 213, 775, 249, 776, 0, 0, 780,
- 0, 0, 0, 782, 781, 0, 783, 749, 748, 0,
- 0, 777, 778, 0, 779, 0, 0, 192, 193, 200,
- 212, 222, 226, 233, 248, 263, 265, 272, 285, 296,
- 304, 305, 308, 314, 364, 370, 371, 372, 373, 392,
- 393, 394, 397, 400, 401, 404, 406, 407, 410, 414,
- 418, 419, 420, 421, 423, 425, 435, 440, 454, 455,
- 456, 457, 458, 461, 462, 467, 468, 469, 470, 471,
- 479, 480, 484, 507, 509, 521, 539, 544, 460, 790,
- 791, 792, 793, 794, 795, 796, 797, 286, 516, 545,
- 0, 0, 362, 0, 0, 365, 268, 291, 306, 0,
- 531, 481, 217, 446, 277, 240, 815, 0, 202, 236,
- 220, 246, 261, 264, 310, 375, 383, 412, 417, 283,
- 258, 234, 439, 231, 464, 487, 488, 489, 491, 379,
- 253, 416, 380, 0, 360, 497, 498, 302, 496, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 399, 0, 0, 0, 0, 0, 0, 0, 0, 257,
- 0, 0, 0, 0, 350, 254, 0, 0, 413, 0,
- 196, 0, 466, 241, 361, 358, 504, 269, 260, 256,
- 239, 303, 369, 411, 486, 405, 0, 354, 0, 0,
- 476, 384, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 309, 237, 311,
- 195, 396, 477, 273, 0, 0, 0, 0, 0, 627,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 228,
- 0, 0, 235, 0, 0, 0, 335, 344, 343, 324,
- 325, 327, 329, 334, 341, 347, 0, 0, 0, 0,
- 0, 252, 307, 259, 251, 501, 0, 0, 0, 0,
- 0, 0, 0, 219, 0, 976, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 262, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 284,
- 0, 385, 244, 0, 0, 0, 975, 541, 0, 0,
- 0, 0, 0, 972, 973, 349, 933, 316, 191, 215,
- 966, 970, 395, 441, 453, 0, 0, 0, 242, 0,
- 451, 409, 520, 223, 271, 438, 415, 449, 422, 274,
- 0, 0, 450, 356, 506, 432, 517, 542, 543, 250,
- 389, 529, 490, 537, 558, 216, 247, 403, 483, 523,
- 473, 381, 502, 503, 315, 472, 282, 194, 353, 548,
- 214, 459, 355, 232, 221, 508, 526, 276, 436, 203,
- 485, 515, 229, 463, 0, 0, 560, 205, 513, 482,
- 377, 312, 313, 204, 0, 437, 255, 280, 245, 398,
- 510, 511, 243, 561, 218, 536, 210, 0, 535, 391,
- 505, 514, 378, 367, 209, 512, 376, 366, 320, 339,
- 340, 267, 293, 429, 359, 430, 292, 294, 387, 386,
- 388, 198, 524, 0, 199, 0, 478, 525, 562, 224,
- 225, 227, 0, 266, 270, 278, 281, 289, 290, 299,
- 351, 402, 428, 424, 433, 0, 500, 518, 530, 540,
- 546, 547, 549, 550, 551, 552, 553, 555, 554, 390,
- 297, 474, 319, 357, 0, 0, 408, 452, 230, 522,
- 475, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 563, 564, 565, 566, 567, 568, 569, 570,
- 571, 572, 573, 574, 575, 576, 577, 578, 579, 580,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 581,
- 368, 465, 519, 321, 333, 336, 326, 345, 0, 346,
- 322, 323, 328, 330, 331, 332, 337, 338, 342, 348,
- 238, 201, 374, 382, 499, 298, 206, 207, 208, 492,
- 493, 494, 495, 533, 534, 538, 442, 443, 444, 445,
- 279, 528, 295, 448, 447, 317, 318, 363, 431, 0,
- 190, 211, 352, 0, 434, 275, 559, 532, 527, 197,
- 213, 0, 249, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 192, 193, 200, 212, 222, 226,
- 233, 248, 263, 265, 272, 285, 296, 304, 305, 308,
- 314, 364, 370, 371, 372, 373, 392, 393, 394, 397,
- 400, 401, 404, 406, 407, 410, 414, 418, 419, 420,
- 421, 423, 425, 435, 440, 454, 455, 456, 457, 458,
- 461, 462, 467, 468, 469, 470, 471, 479, 480, 484,
- 507, 509, 521, 539, 544, 460, 287, 288, 426, 427,
- 300, 301, 556, 557, 286, 516, 545, 0, 0, 362,
- 0, 0, 365, 268, 291, 306, 0, 531, 481, 217,
- 446, 277, 240, 0, 0, 202, 236, 220, 246, 261,
- 264, 310, 375, 383, 412, 417, 283, 258, 234, 439,
- 231, 464, 487, 488, 489, 491, 379, 253, 416, 380,
- 0, 360, 497, 498, 302, 496, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 399, 0, 0,
- 0, 0, 0, 0, 0, 0, 257, 0, 0, 0,
- 0, 350, 254, 0, 0, 413, 0, 196, 0, 466,
- 241, 361, 358, 504, 269, 260, 256, 239, 303, 369,
- 411, 486, 405, 0, 354, 0, 0, 476, 384, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 309, 237, 311, 195, 396, 477,
- 273, 0, 0, 0, 0, 1460, 800, 0, 0, 1457,
- 0, 0, 0, 0, 1455, 0, 228, 1456, 1454, 235,
- 1459, 0, 765, 335, 344, 343, 324, 325, 327, 329,
- 334, 341, 347, 0, 0, 0, 0, 0, 252, 307,
- 259, 251, 501, 0, 0, 0, 0, 0, 0, 0,
- 219, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 262, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 284, 0, 385, 244,
- 0, 0, 0, 0, 541, 0, 0, 0, 0, 0,
- 0, 0, 349, 0, 316, 191, 215, 0, 0, 395,
- 441, 453, 0, 0, 0, 242, 0, 451, 409, 520,
- 223, 271, 438, 415, 449, 422, 274, 0, 0, 450,
- 356, 506, 432, 517, 542, 543, 250, 389, 529, 490,
- 537, 558, 216, 247, 403, 483, 523, 473, 381, 502,
- 503, 315, 472, 282, 194, 353, 548, 214, 459, 355,
- 232, 221, 508, 526, 276, 436, 203, 485, 515, 229,
- 463, 0, 0, 560, 205, 513, 482, 377, 312, 313,
- 204, 0, 437, 255, 280, 245, 398, 510, 511, 243,
- 561, 218, 536, 210, 0, 535, 391, 505, 514, 378,
- 367, 209, 512, 376, 366, 320, 339, 340, 267, 293,
- 429, 359, 430, 292, 294, 387, 386, 388, 198, 524,
- 0, 199, 0, 478, 525, 562, 224, 225, 227, 0,
- 266, 270, 278, 281, 289, 290, 299, 351, 402, 428,
- 424, 433, 0, 500, 518, 530, 540, 546, 547, 549,
- 550, 551, 552, 553, 555, 554, 390, 297, 474, 319,
- 357, 0, 0, 408, 452, 230, 522, 475, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 563,
- 564, 565, 566, 567, 568, 569, 570, 571, 572, 573,
- 574, 575, 576, 577, 578, 579, 580, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 581, 368, 465, 519,
- 321, 333, 336, 326, 345, 0, 346, 322, 323, 328,
- 330, 331, 332, 337, 338, 342, 348, 238, 201, 374,
- 382, 499, 298, 206, 207, 208, 492, 493, 494, 495,
- 533, 534, 538, 442, 443, 444, 445, 279, 528, 295,
- 448, 447, 317, 318, 363, 431, 0, 190, 211, 352,
- 0, 434, 275, 559, 532, 527, 197, 213, 0, 249,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 192, 193, 200, 212, 222, 226, 233, 248, 263,
- 265, 272, 285, 296, 304, 305, 308, 314, 364, 370,
- 371, 372, 373, 392, 393, 394, 397, 400, 401, 404,
- 406, 407, 410, 414, 418, 419, 420, 421, 423, 425,
- 435, 440, 454, 455, 456, 457, 458, 461, 462, 467,
- 468, 469, 470, 471, 479, 480, 484, 507, 509, 521,
- 539, 544, 460, 287, 288, 426, 427, 300, 301, 556,
- 557, 286, 516, 545, 0, 0, 362, 0, 0, 365,
- 268, 291, 306, 0, 531, 481, 217, 446, 277, 240,
- 0, 0, 202, 236, 220, 246, 261, 264, 310, 375,
- 383, 412, 417, 283, 258, 234, 439, 231, 464, 487,
- 488, 489, 491, 379, 253, 416, 0, 380, 360, 497,
- 498, 302, 80, 496, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 399, 0, 0, 0, 0,
- 0, 0, 0, 0, 257, 0, 0, 0, 0, 350,
- 254, 0, 0, 413, 0, 196, 0, 466, 241, 361,
- 358, 504, 269, 260, 256, 239, 303, 369, 411, 486,
- 405, 0, 354, 0, 0, 476, 384, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 309, 237, 311, 195, 396, 477, 273, 0,
- 89, 0, 0, 0, 188, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 228, 0, 0, 235, 0, 0,
- 0, 335, 344, 343, 324, 325, 327, 329, 334, 341,
- 347, 0, 0, 0, 0, 0, 252, 307, 259, 251,
- 501, 0, 0, 0, 0, 0, 0, 0, 219, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 262, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 284, 0, 385, 244, 0, 0,
- 0, 0, 541, 0, 0, 0, 0, 0, 0, 0,
- 349, 0, 316, 191, 215, 0, 0, 395, 441, 453,
- 0, 0, 0, 242, 0, 451, 409, 520, 223, 271,
- 438, 415, 449, 422, 274, 0, 0, 450, 356, 506,
- 432, 517, 542, 543, 250, 389, 529, 490, 537, 558,
- 216, 247, 403, 483, 523, 473, 381, 502, 503, 315,
- 472, 282, 194, 353, 548, 214, 459, 355, 232, 221,
- 508, 526, 276, 436, 203, 485, 515, 229, 463, 0,
- 0, 560, 205, 513, 482, 377, 312, 313, 204, 0,
- 437, 255, 280, 245, 398, 510, 511, 243, 561, 218,
- 536, 210, 0, 535, 391, 505, 514, 378, 367, 209,
- 512, 376, 366, 320, 339, 340, 267, 293, 429, 359,
- 430, 292, 294, 387, 386, 388, 198, 524, 0, 199,
- 0, 478, 525, 562, 224, 225, 227, 0, 266, 270,
- 278, 281, 289, 290, 299, 351, 402, 428, 424, 433,
- 0, 500, 518, 530, 540, 546, 547, 549, 550, 551,
- 552, 553, 555, 554, 390, 297, 474, 319, 357, 0,
- 0, 408, 452, 230, 522, 475, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 563, 564, 565,
- 566, 567, 568, 569, 570, 571, 572, 573, 574, 575,
- 576, 577, 578, 579, 580, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 581, 368, 465, 519, 321, 333,
- 336, 326, 345, 0, 346, 322, 323, 328, 330, 331,
- 332, 337, 338, 342, 348, 238, 201, 374, 382, 499,
- 298, 206, 207, 208, 492, 493, 494, 495, 533, 534,
- 538, 442, 443, 444, 445, 279, 528, 295, 448, 447,
- 317, 318, 363, 431, 0, 190, 211, 352, 88, 434,
- 275, 559, 532, 527, 197, 213, 0, 249, 0, 0,
- 0, 0, 0, 0, 2064, 0, 0, 2063, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 192,
- 193, 200, 212, 222, 226, 233, 248, 263, 265, 272,
- 285, 296, 304, 305, 308, 314, 364, 370, 371, 372,
- 373, 392, 393, 394, 397, 400, 401, 404, 406, 407,
- 410, 414, 418, 419, 420, 421, 423, 425, 435, 440,
- 454, 455, 456, 457, 458, 461, 462, 467, 468, 469,
- 470, 471, 479, 480, 484, 507, 509, 521, 539, 544,
- 460, 287, 288, 426, 427, 300, 301, 556, 557, 286,
- 516, 545, 0, 0, 362, 0, 0, 365, 268, 291,
- 306, 0, 531, 481, 217, 446, 277, 240, 0, 0,
- 202, 236, 220, 246, 261, 264, 310, 375, 383, 412,
- 417, 283, 258, 234, 439, 231, 464, 487, 488, 489,
- 491, 379, 253, 416, 1516, 0, 360, 497, 498, 302,
- 496, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 399, 0, 0, 0, 1518, 0, 0, 0,
- 0, 257, 0, 0, 0, 0, 350, 254, 0, 0,
- 413, 0, 196, 0, 466, 241, 361, 358, 504, 269,
- 260, 256, 239, 303, 369, 411, 486, 405, 0, 354,
- 0, 0, 476, 384, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 309,
- 237, 311, 195, 396, 477, 273, 0, 0, 0, 0,
- 1520, 627, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 228, 0, 0, 235, 0, 0, 0, 335, 344,
- 343, 324, 325, 327, 329, 334, 341, 347, 0, 0,
- 0, 0, 0, 252, 307, 259, 251, 501, 0, 0,
- 0, 0, 0, 0, 0, 219, 0, 0, 0, 1294,
- 0, 1295, 1296, 0, 0, 0, 0, 0, 0, 0,
- 262, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 284, 0, 385, 244, 0, 0, 0, 0, 541,
- 0, 0, 0, 0, 0, 0, 0, 349, 0, 316,
- 191, 215, 0, 0, 395, 441, 453, 0, 0, 0,
- 242, 0, 451, 409, 520, 223, 271, 438, 415, 449,
- 422, 274, 0, 0, 450, 356, 506, 432, 517, 542,
- 543, 250, 389, 529, 490, 537, 558, 216, 247, 403,
- 483, 523, 473, 381, 502, 503, 315, 472, 282, 194,
- 353, 548, 214, 459, 355, 232, 221, 508, 526, 276,
- 436, 203, 485, 515, 229, 463, 0, 0, 560, 205,
- 513, 482, 377, 312, 313, 204, 0, 437, 255, 280,
- 245, 398, 510, 511, 243, 561, 218, 536, 210, 0,
- 535, 391, 505, 514, 378, 367, 209, 512, 376, 366,
- 320, 339, 340, 267, 293, 429, 359, 430, 292, 294,
- 387, 386, 388, 198, 524, 0, 199, 0, 478, 525,
- 562, 224, 225, 227, 0, 266, 270, 278, 281, 289,
- 290, 299, 351, 402, 428, 424, 433, 0, 500, 518,
- 530, 540, 546, 547, 549, 550, 551, 552, 553, 555,
- 554, 390, 297, 474, 319, 357, 0, 0, 408, 452,
- 230, 522, 475, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 563, 564, 565, 566, 567, 568,
- 569, 570, 571, 572, 573, 574, 575, 576, 577, 578,
- 579, 580, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 581, 368, 465, 519, 321, 333, 336, 326, 345,
- 0, 346, 322, 323, 328, 330, 331, 332, 337, 338,
- 342, 348, 238, 201, 374, 382, 499, 298, 206, 207,
- 208, 492, 493, 494, 495, 533, 534, 538, 442, 443,
- 444, 445, 279, 528, 295, 448, 447, 317, 318, 363,
- 431, 0, 190, 211, 352, 0, 434, 275, 559, 532,
- 527, 197, 213, 0, 249, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 192, 193, 200, 212,
- 222, 226, 233, 248, 263, 265, 272, 285, 296, 304,
- 305, 308, 314, 364, 370, 371, 372, 373, 392, 393,
- 394, 397, 400, 401, 404, 406, 407, 410, 414, 418,
- 419, 420, 421, 423, 425, 435, 440, 454, 455, 456,
- 457, 458, 461, 462, 467, 468, 469, 470, 471, 479,
- 480, 484, 507, 509, 521, 539, 544, 460, 287, 288,
- 426, 427, 300, 301, 556, 557, 286, 516, 545, 0,
- 0, 362, 0, 0, 365, 268, 291, 306, 0, 531,
- 481, 217, 446, 277, 240, 0, 0, 202, 236, 220,
- 246, 261, 264, 310, 375, 383, 412, 417, 283, 258,
- 234, 439, 231, 464, 487, 488, 489, 491, 379, 253,
- 416, 0, 380, 360, 497, 498, 302, 80, 496, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 399, 0, 0, 0, 0, 0, 0, 0, 0, 257,
- 0, 0, 0, 0, 350, 254, 0, 0, 413, 0,
- 196, 0, 466, 241, 361, 358, 504, 269, 260, 256,
- 239, 303, 369, 411, 486, 405, 0, 354, 0, 0,
- 476, 384, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 309, 237, 311,
- 195, 396, 477, 273, 0, 89, 0, 1497, 0, 627,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 228,
- 0, 0, 235, 0, 0, 0, 335, 344, 343, 324,
- 325, 327, 329, 334, 341, 347, 0, 0, 0, 0,
- 0, 252, 307, 259, 251, 501, 0, 0, 0, 0,
- 0, 0, 0, 219, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 262, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 284,
- 0, 385, 244, 0, 0, 0, 0, 541, 0, 0,
- 0, 0, 0, 0, 0, 349, 0, 316, 191, 215,
- 0, 0, 395, 441, 453, 0, 0, 0, 242, 0,
- 451, 409, 520, 223, 271, 438, 415, 449, 422, 274,
- 0, 0, 450, 356, 506, 432, 517, 542, 543, 250,
- 389, 529, 490, 537, 558, 216, 247, 403, 483, 523,
- 473, 381, 502, 503, 315, 472, 282, 194, 353, 548,
- 214, 459, 355, 232, 221, 508, 526, 276, 436, 203,
- 485, 515, 229, 463, 0, 0, 560, 205, 513, 482,
- 377, 312, 313, 204, 0, 437, 255, 280, 245, 398,
- 510, 511, 243, 561, 218, 536, 210, 0, 535, 391,
- 505, 514, 378, 367, 209, 512, 376, 366, 320, 339,
- 340, 267, 293, 429, 359, 430, 292, 294, 387, 386,
- 388, 198, 524, 0, 199, 0, 478, 525, 562, 224,
- 225, 227, 0, 266, 270, 278, 281, 289, 290, 299,
- 351, 402, 428, 424, 433, 0, 500, 518, 530, 540,
- 546, 547, 549, 550, 551, 552, 553, 555, 554, 390,
- 297, 474, 319, 357, 0, 0, 408, 452, 230, 522,
- 475, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 563, 564, 565, 566, 567, 568, 569, 570,
- 571, 572, 573, 574, 575, 576, 577, 578, 579, 580,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 581,
- 368, 465, 519, 321, 333, 336, 326, 345, 0, 346,
- 322, 323, 328, 330, 331, 332, 337, 338, 342, 348,
- 238, 201, 374, 382, 499, 298, 206, 207, 208, 492,
- 493, 494, 495, 533, 534, 538, 442, 443, 444, 445,
- 279, 528, 295, 448, 447, 317, 318, 363, 431, 0,
- 190, 211, 352, 88, 434, 275, 559, 532, 527, 197,
- 213, 0, 249, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 192, 193, 200, 212, 222, 226,
- 233, 248, 263, 265, 272, 285, 296, 304, 305, 308,
- 314, 364, 370, 371, 372, 373, 392, 393, 394, 397,
- 400, 401, 404, 406, 407, 410, 414, 418, 419, 420,
- 421, 423, 425, 435, 440, 454, 455, 456, 457, 458,
- 461, 462, 467, 468, 469, 470, 471, 479, 480, 484,
- 507, 509, 521, 539, 544, 460, 287, 288, 426, 427,
- 300, 301, 556, 557, 286, 516, 545, 0, 0, 362,
- 0, 0, 365, 268, 291, 306, 0, 531, 481, 217,
- 446, 277, 240, 0, 0, 202, 236, 220, 246, 261,
- 264, 310, 375, 383, 412, 417, 283, 258, 234, 439,
- 231, 464, 487, 488, 489, 491, 379, 253, 416, 380,
- 0, 360, 497, 498, 302, 496, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 399, 0, 0,
- 0, 0, 0, 0, 0, 0, 257, 0, 0, 0,
- 0, 350, 254, 0, 0, 413, 0, 196, 0, 466,
- 241, 361, 358, 504, 269, 260, 256, 239, 303, 369,
- 411, 486, 405, 0, 354, 0, 0, 476, 384, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 309, 237, 311, 195, 396, 477,
- 273, 0, 89, 0, 0, 0, 188, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 228, 0, 0, 235,
- 0, 0, 0, 335, 344, 343, 324, 325, 327, 329,
- 334, 341, 347, 0, 0, 0, 0, 0, 252, 307,
- 259, 251, 501, 0, 0, 0, 0, 0, 0, 0,
- 219, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 262, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 284, 0, 385, 244,
- 0, 0, 0, 0, 541, 0, 0, 0, 0, 0,
- 0, 0, 349, 0, 316, 191, 215, 0, 0, 395,
- 441, 453, 0, 0, 0, 242, 0, 451, 409, 520,
- 223, 271, 438, 415, 449, 422, 274, 0, 0, 450,
- 356, 506, 432, 517, 542, 543, 250, 389, 529, 490,
- 537, 558, 216, 247, 403, 483, 523, 473, 381, 502,
- 503, 315, 472, 282, 194, 353, 548, 214, 459, 355,
- 232, 221, 508, 526, 276, 436, 203, 485, 515, 229,
- 463, 0, 0, 560, 205, 513, 482, 377, 312, 313,
- 204, 0, 437, 255, 280, 245, 398, 510, 511, 243,
- 561, 218, 536, 210, 0, 535, 391, 505, 514, 378,
- 367, 209, 512, 376, 366, 320, 339, 340, 267, 293,
- 429, 359, 430, 292, 294, 387, 386, 388, 198, 524,
- 0, 199, 0, 478, 525, 562, 224, 225, 227, 0,
- 266, 270, 278, 281, 289, 290, 299, 351, 402, 428,
- 424, 433, 0, 500, 518, 530, 540, 546, 547, 549,
- 550, 551, 552, 553, 555, 554, 390, 297, 474, 319,
- 357, 0, 0, 408, 452, 230, 522, 475, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 563,
- 564, 565, 566, 567, 568, 569, 570, 571, 572, 573,
- 574, 575, 576, 577, 578, 579, 580, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 581, 368, 465, 519,
- 321, 333, 336, 326, 345, 0, 346, 322, 323, 328,
- 330, 331, 332, 337, 338, 342, 348, 238, 201, 374,
- 382, 499, 298, 206, 207, 208, 492, 493, 494, 495,
- 533, 534, 538, 442, 443, 444, 445, 279, 528, 295,
- 448, 447, 317, 318, 363, 431, 0, 190, 211, 352,
- 0, 434, 275, 559, 532, 527, 197, 213, 0, 249,
- 0, 0, 0, 0, 0, 0, 2064, 0, 0, 2063,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 192, 193, 200, 212, 222, 226, 233, 248, 263,
- 265, 272, 285, 296, 304, 305, 308, 314, 364, 370,
- 371, 372, 373, 392, 393, 394, 397, 400, 401, 404,
- 406, 407, 410, 414, 418, 419, 420, 421, 423, 425,
- 435, 440, 454, 455, 456, 457, 458, 461, 462, 467,
- 468, 469, 470, 471, 479, 480, 484, 507, 509, 521,
- 539, 544, 460, 287, 288, 426, 427, 300, 301, 556,
- 557, 286, 516, 545, 0, 0, 362, 0, 0, 365,
- 268, 291, 306, 0, 531, 481, 217, 446, 277, 240,
- 0, 0, 202, 236, 220, 246, 261, 264, 310, 375,
- 383, 412, 417, 283, 258, 234, 439, 231, 464, 487,
- 488, 489, 491, 379, 253, 416, 380, 0, 360, 497,
- 498, 302, 496, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 399, 0, 0, 0, 2014, 0,
- 0, 0, 0, 257, 0, 0, 0, 0, 350, 254,
- 0, 0, 413, 0, 196, 0, 466, 241, 361, 358,
- 504, 269, 260, 256, 239, 303, 369, 411, 486, 405,
- 0, 354, 0, 0, 476, 384, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 309, 237, 311, 195, 396, 477, 273, 0, 0,
- 0, 0, 1697, 188, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 228, 0, 0, 235, 0, 0, 0,
- 335, 344, 343, 324, 325, 327, 329, 334, 341, 347,
- 0, 0, 0, 0, 0, 252, 307, 259, 251, 501,
- 0, 0, 0, 0, 0, 0, 0, 219, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 262, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 284, 0, 385, 244, 0, 0, 0,
- 0, 541, 0, 0, 0, 0, 0, 0, 0, 349,
- 0, 316, 191, 215, 0, 0, 395, 441, 453, 0,
- 0, 0, 242, 0, 451, 409, 520, 223, 271, 438,
- 415, 449, 422, 274, 0, 2012, 450, 356, 506, 432,
- 517, 542, 543, 250, 389, 529, 490, 537, 558, 216,
- 247, 403, 483, 523, 473, 381, 502, 503, 315, 472,
- 282, 194, 353, 548, 214, 459, 355, 232, 221, 508,
- 526, 276, 436, 203, 485, 515, 229, 463, 0, 0,
- 560, 205, 513, 482, 377, 312, 313, 204, 0, 437,
- 255, 280, 245, 398, 510, 511, 243, 561, 218, 536,
- 210, 0, 535, 391, 505, 514, 378, 367, 209, 512,
- 376, 366, 320, 339, 340, 267, 293, 429, 359, 430,
- 292, 294, 387, 386, 388, 198, 524, 0, 199, 0,
- 478, 525, 562, 224, 225, 227, 0, 266, 270, 278,
- 281, 289, 290, 299, 351, 402, 428, 424, 433, 0,
- 500, 518, 530, 540, 546, 547, 549, 550, 551, 552,
- 553, 555, 554, 390, 297, 474, 319, 357, 0, 0,
- 408, 452, 230, 522, 475, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 563, 564, 565, 566,
- 567, 568, 569, 570, 571, 572, 573, 574, 575, 576,
- 577, 578, 579, 580, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 581, 368, 465, 519, 321, 333, 336,
- 326, 345, 0, 346, 322, 323, 328, 330, 331, 332,
- 337, 338, 342, 348, 238, 201, 374, 382, 499, 298,
- 206, 207, 208, 492, 493, 494, 495, 533, 534, 538,
- 442, 443, 444, 445, 279, 528, 295, 448, 447, 317,
- 318, 363, 431, 0, 190, 211, 352, 0, 434, 275,
- 559, 532, 527, 197, 213, 0, 249, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 192, 193,
- 200, 212, 222, 226, 233, 248, 263, 265, 272, 285,
- 296, 304, 305, 308, 314, 364, 370, 371, 372, 373,
- 392, 393, 394, 397, 400, 401, 404, 406, 407, 410,
- 414, 418, 419, 420, 421, 423, 425, 435, 440, 454,
- 455, 456, 457, 458, 461, 462, 467, 468, 469, 470,
- 471, 479, 480, 484, 507, 509, 521, 539, 544, 460,
- 287, 288, 426, 427, 300, 301, 556, 557, 286, 516,
- 545, 0, 0, 362, 0, 0, 365, 268, 291, 306,
- 0, 531, 481, 217, 446, 277, 240, 0, 0, 202,
- 236, 220, 246, 261, 264, 310, 375, 383, 412, 417,
- 283, 258, 234, 439, 231, 464, 487, 488, 489, 491,
- 379, 253, 416, 380, 0, 360, 497, 498, 302, 496,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 399, 0, 0, 0, 0, 0, 0, 0, 0,
- 257, 0, 0, 0, 0, 350, 254, 0, 0, 413,
- 0, 196, 0, 466, 241, 361, 358, 504, 269, 260,
- 256, 239, 303, 369, 411, 486, 405, 0, 354, 0,
- 0, 476, 384, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 309, 237,
- 311, 195, 396, 477, 273, 0, 0, 0, 0, 0,
- 627, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 228, 0, 0, 235, 0, 0, 0, 335, 344, 343,
- 324, 325, 327, 329, 334, 341, 347, 0, 0, 0,
- 0, 0, 252, 307, 259, 251, 501, 0, 0, 0,
- 0, 0, 0, 0, 219, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 262,
- 0, 0, 0, 0, 0, 0, 0, 0, 927, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 284, 0, 385, 244, 0, 0, 0, 0, 541, 0,
- 0, 0, 0, 0, 0, 0, 349, 933, 316, 191,
- 215, 931, 0, 395, 441, 453, 0, 0, 0, 242,
- 0, 451, 409, 520, 223, 271, 438, 415, 449, 422,
- 274, 0, 0, 450, 356, 506, 432, 517, 542, 543,
- 250, 389, 529, 490, 537, 558, 216, 247, 403, 483,
- 523, 473, 381, 502, 503, 315, 472, 282, 194, 353,
- 548, 214, 459, 355, 232, 221, 508, 526, 276, 436,
- 203, 485, 515, 229, 463, 0, 0, 560, 205, 513,
- 482, 377, 312, 313, 204, 0, 437, 255, 280, 245,
- 398, 510, 511, 243, 561, 218, 536, 210, 0, 535,
- 391, 505, 514, 378, 367, 209, 512, 376, 366, 320,
- 339, 340, 267, 293, 429, 359, 430, 292, 294, 387,
- 386, 388, 198, 524, 0, 199, 0, 478, 525, 562,
- 224, 225, 227, 0, 266, 270, 278, 281, 289, 290,
- 299, 351, 402, 428, 424, 433, 0, 500, 518, 530,
- 540, 546, 547, 549, 550, 551, 552, 553, 555, 554,
- 390, 297, 474, 319, 357, 0, 0, 408, 452, 230,
- 522, 475, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 563, 564, 565, 566, 567, 568, 569,
- 570, 571, 572, 573, 574, 575, 576, 577, 578, 579,
- 580, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 581, 368, 465, 519, 321, 333, 336, 326, 345, 0,
- 346, 322, 323, 328, 330, 331, 332, 337, 338, 342,
- 348, 238, 201, 374, 382, 499, 298, 206, 207, 208,
- 492, 493, 494, 495, 533, 534, 538, 442, 443, 444,
- 445, 279, 528, 295, 448, 447, 317, 318, 363, 431,
- 0, 190, 211, 352, 0, 434, 275, 559, 532, 527,
- 197, 213, 0, 249, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 192, 193, 200, 212, 222,
- 226, 233, 248, 263, 265, 272, 285, 296, 304, 305,
- 308, 314, 364, 370, 371, 372, 373, 392, 393, 394,
- 397, 400, 401, 404, 406, 407, 410, 414, 418, 419,
- 420, 421, 423, 425, 435, 440, 454, 455, 456, 457,
- 458, 461, 462, 467, 468, 469, 470, 471, 479, 480,
- 484, 507, 509, 521, 539, 544, 460, 287, 288, 426,
- 427, 300, 301, 556, 557, 286, 516, 545, 0, 0,
- 362, 0, 0, 365, 268, 291, 306, 0, 531, 481,
- 217, 446, 277, 240, 0, 0, 202, 236, 220, 246,
- 261, 264, 310, 375, 383, 412, 417, 283, 258, 234,
- 439, 231, 464, 487, 488, 489, 491, 379, 253, 416,
- 380, 0, 360, 497, 498, 302, 496, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 399, 0,
- 0, 0, 2014, 0, 0, 0, 0, 257, 0, 0,
- 0, 0, 350, 254, 0, 0, 413, 0, 196, 0,
- 466, 241, 361, 358, 504, 269, 260, 256, 239, 303,
- 369, 411, 486, 405, 0, 354, 0, 0, 476, 384,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 309, 237, 311, 195, 396,
- 477, 273, 0, 0, 0, 0, 1697, 188, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 228, 0, 0,
- 235, 0, 0, 0, 335, 344, 343, 324, 325, 327,
- 329, 334, 341, 347, 0, 0, 0, 0, 0, 252,
- 307, 259, 251, 501, 0, 0, 0, 0, 0, 0,
- 0, 219, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 262, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 284, 0, 385,
- 244, 0, 0, 0, 0, 541, 0, 0, 0, 0,
- 0, 0, 0, 349, 0, 316, 191, 215, 0, 0,
- 395, 441, 453, 0, 0, 0, 242, 0, 451, 409,
- 520, 223, 271, 438, 415, 449, 422, 274, 0, 0,
- 450, 356, 506, 432, 517, 542, 543, 250, 389, 529,
- 490, 537, 558, 216, 247, 403, 483, 523, 473, 381,
- 502, 503, 315, 472, 282, 194, 353, 548, 214, 459,
- 355, 232, 221, 508, 526, 276, 436, 203, 485, 515,
- 229, 463, 0, 0, 560, 205, 513, 482, 377, 312,
- 313, 204, 0, 437, 255, 280, 245, 398, 510, 511,
- 243, 561, 218, 536, 210, 0, 535, 391, 505, 514,
- 378, 367, 209, 512, 376, 366, 320, 339, 340, 267,
- 293, 429, 359, 430, 292, 294, 387, 386, 388, 198,
- 524, 0, 199, 0, 478, 525, 562, 224, 225, 227,
- 0, 266, 270, 278, 281, 289, 290, 299, 351, 402,
- 428, 424, 433, 0, 500, 518, 530, 540, 546, 547,
- 549, 550, 551, 552, 553, 555, 554, 390, 297, 474,
- 319, 357, 0, 0, 408, 452, 230, 522, 475, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 563, 564, 565, 566, 567, 568, 569, 570, 571, 572,
- 573, 574, 575, 576, 577, 578, 579, 580, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 581, 368, 465,
- 519, 321, 333, 336, 326, 345, 0, 346, 322, 323,
- 328, 330, 331, 332, 337, 338, 342, 348, 238, 201,
- 374, 382, 499, 298, 206, 207, 208, 492, 493, 494,
- 495, 533, 534, 538, 442, 443, 444, 445, 279, 528,
- 295, 448, 447, 317, 318, 363, 431, 0, 190, 211,
- 352, 0, 434, 275, 559, 532, 527, 197, 213, 0,
- 249, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 192, 193, 200, 212, 222, 226, 233, 248,
- 263, 265, 272, 285, 296, 304, 305, 308, 314, 364,
- 370, 371, 372, 373, 392, 393, 394, 397, 400, 401,
- 404, 406, 407, 410, 414, 418, 419, 420, 421, 423,
- 425, 435, 440, 454, 455, 456, 457, 458, 461, 462,
- 467, 468, 469, 470, 471, 479, 480, 484, 507, 509,
- 521, 539, 544, 460, 287, 288, 426, 427, 300, 301,
- 556, 557, 286, 516, 545, 0, 0, 362, 0, 0,
- 365, 268, 291, 306, 0, 531, 481, 217, 446, 277,
- 240, 0, 0, 202, 236, 220, 246, 261, 264, 310,
- 375, 383, 412, 417, 283, 258, 234, 439, 231, 464,
- 487, 488, 489, 491, 379, 253, 416, 380, 0, 360,
- 497, 498, 302, 496, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 399, 0, 0, 0, 0,
- 0, 0, 0, 0, 257, 0, 0, 0, 0, 350,
- 254, 0, 0, 413, 0, 196, 0, 466, 241, 361,
- 358, 504, 269, 260, 256, 239, 303, 369, 411, 486,
- 405, 0, 354, 0, 0, 476, 384, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 309, 237, 311, 195, 396, 477, 273, 0,
- 0, 0, 1497, 0, 627, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 228, 0, 0, 235, 0, 0,
- 0, 335, 344, 343, 324, 325, 327, 329, 334, 341,
- 347, 0, 0, 0, 0, 0, 252, 307, 259, 251,
- 501, 0, 0, 0, 0, 0, 0, 0, 219, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 262, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 284, 0, 385, 244, 0, 0,
- 0, 0, 541, 0, 0, 0, 3294, 0, 0, 0,
- 349, 0, 316, 191, 215, 0, 0, 395, 441, 453,
- 0, 0, 0, 242, 0, 451, 409, 520, 223, 271,
- 438, 415, 449, 422, 274, 0, 0, 450, 356, 506,
- 432, 517, 542, 543, 250, 389, 529, 490, 537, 558,
- 216, 247, 403, 483, 523, 473, 381, 502, 503, 315,
- 472, 282, 194, 353, 548, 214, 459, 355, 232, 221,
- 508, 526, 276, 436, 203, 485, 515, 229, 463, 0,
- 0, 560, 205, 513, 482, 377, 312, 313, 204, 0,
- 437, 255, 280, 245, 398, 510, 511, 243, 561, 218,
- 536, 210, 0, 535, 391, 505, 514, 378, 367, 209,
- 512, 376, 366, 320, 339, 340, 267, 293, 429, 359,
- 430, 292, 294, 387, 386, 388, 198, 524, 0, 199,
- 0, 478, 525, 562, 224, 225, 227, 0, 266, 270,
- 278, 281, 289, 290, 299, 351, 402, 428, 424, 433,
- 0, 500, 518, 530, 540, 546, 547, 549, 550, 551,
- 552, 553, 555, 554, 390, 297, 474, 319, 357, 0,
- 0, 408, 452, 230, 522, 475, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 563, 564, 565,
- 566, 567, 568, 569, 570, 571, 572, 573, 574, 575,
- 576, 577, 578, 579, 580, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 581, 368, 465, 519, 321, 333,
- 336, 326, 345, 0, 346, 322, 323, 328, 330, 331,
- 332, 337, 338, 342, 348, 238, 201, 374, 382, 499,
- 298, 206, 207, 208, 492, 493, 494, 495, 533, 534,
- 538, 442, 443, 444, 445, 279, 528, 295, 448, 447,
- 317, 318, 363, 431, 0, 190, 211, 352, 0, 434,
- 275, 559, 532, 527, 197, 213, 0, 249, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 192,
- 193, 200, 212, 222, 226, 233, 248, 263, 265, 272,
- 285, 296, 304, 305, 308, 314, 364, 370, 371, 372,
- 373, 392, 393, 394, 397, 400, 401, 404, 406, 407,
- 410, 414, 418, 419, 420, 421, 423, 425, 435, 440,
- 454, 455, 456, 457, 458, 461, 462, 467, 468, 469,
- 470, 471, 479, 480, 484, 507, 509, 521, 539, 544,
- 460, 287, 288, 426, 427, 300, 301, 556, 557, 286,
- 516, 545, 0, 0, 362, 0, 0, 365, 268, 291,
- 306, 0, 531, 481, 217, 446, 277, 240, 0, 0,
- 202, 236, 220, 246, 261, 264, 310, 375, 383, 412,
- 417, 283, 258, 234, 439, 231, 464, 487, 488, 489,
- 491, 379, 253, 416, 380, 0, 360, 497, 498, 302,
- 496, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 399, 0, 0, 0, 0, 0, 0, 0,
- 0, 257, 0, 0, 0, 0, 350, 254, 0, 0,
- 413, 0, 196, 0, 466, 241, 361, 358, 504, 269,
- 260, 256, 239, 303, 369, 411, 486, 405, 0, 354,
- 0, 0, 476, 384, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 309,
- 237, 311, 195, 396, 477, 273, 0, 0, 0, 0,
- 1847, 627, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 228, 0, 0, 235, 0, 0, 0, 335, 344,
- 343, 324, 325, 327, 329, 334, 341, 347, 0, 0,
- 0, 0, 0, 252, 307, 259, 251, 501, 0, 0,
- 0, 0, 0, 0, 0, 219, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 262, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 1848, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 284, 0, 385, 244, 0, 0, 0, 0, 541,
- 0, 0, 0, 0, 0, 0, 0, 349, 0, 316,
- 191, 215, 0, 0, 395, 441, 453, 0, 0, 0,
- 242, 0, 451, 409, 520, 223, 271, 438, 415, 449,
- 422, 274, 0, 0, 450, 356, 506, 432, 517, 542,
- 543, 250, 389, 529, 490, 537, 558, 216, 247, 403,
- 483, 523, 473, 381, 502, 503, 315, 472, 282, 194,
- 353, 548, 214, 459, 355, 232, 221, 508, 526, 276,
- 436, 203, 485, 515, 229, 463, 0, 0, 560, 205,
- 513, 482, 377, 312, 313, 204, 0, 437, 255, 280,
- 245, 398, 510, 511, 243, 561, 218, 536, 210, 0,
- 535, 391, 505, 514, 378, 367, 209, 512, 376, 366,
- 320, 339, 340, 267, 293, 429, 359, 430, 292, 294,
- 387, 386, 388, 198, 524, 0, 199, 0, 478, 525,
- 562, 224, 225, 227, 0, 266, 270, 278, 281, 289,
- 290, 299, 351, 402, 428, 424, 433, 0, 500, 518,
- 530, 540, 546, 547, 549, 550, 551, 552, 553, 555,
- 554, 390, 297, 474, 319, 357, 0, 0, 408, 452,
- 230, 522, 475, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 563, 564, 565, 566, 567, 568,
- 569, 570, 571, 572, 573, 574, 575, 576, 577, 578,
- 579, 580, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 581, 368, 465, 519, 321, 333, 336, 326, 345,
- 0, 346, 322, 323, 328, 330, 331, 332, 337, 338,
- 342, 348, 238, 201, 374, 382, 499, 298, 206, 207,
- 208, 492, 493, 494, 495, 533, 534, 538, 442, 443,
- 444, 445, 279, 528, 295, 448, 447, 317, 318, 363,
- 431, 0, 190, 211, 352, 0, 434, 275, 559, 532,
- 527, 197, 213, 0, 249, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 192, 193, 200, 212,
- 222, 226, 233, 248, 263, 265, 272, 285, 296, 304,
- 305, 308, 314, 364, 370, 371, 372, 373, 392, 393,
- 394, 397, 400, 401, 404, 406, 407, 410, 414, 418,
- 419, 420, 421, 423, 425, 435, 440, 454, 455, 456,
- 457, 458, 461, 462, 467, 468, 469, 470, 471, 479,
- 480, 484, 507, 509, 521, 539, 544, 460, 287, 288,
- 426, 427, 300, 301, 556, 557, 286, 516, 545, 0,
- 0, 362, 0, 0, 365, 268, 291, 306, 0, 531,
- 481, 217, 446, 277, 240, 0, 0, 202, 236, 220,
- 246, 261, 264, 310, 375, 383, 412, 417, 283, 258,
- 234, 439, 231, 464, 487, 488, 489, 491, 379, 253,
- 416, 380, 0, 360, 497, 498, 302, 496, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 399,
- 0, 0, 0, 0, 0, 0, 0, 0, 257, 0,
- 0, 0, 0, 350, 254, 0, 0, 413, 0, 196,
- 0, 466, 241, 361, 358, 504, 269, 260, 256, 239,
- 303, 369, 411, 486, 405, 0, 354, 0, 0, 476,
- 384, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 309, 237, 311, 195,
- 396, 477, 273, 0, 0, 0, 0, 2415, 627, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 228, 0,
- 0, 235, 0, 0, 0, 335, 344, 343, 324, 325,
- 327, 329, 334, 341, 347, 0, 0, 0, 0, 0,
- 252, 307, 259, 251, 501, 0, 0, 0, 0, 0,
- 0, 0, 219, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 262, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 2416, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 284, 0,
- 385, 244, 0, 0, 0, 0, 541, 0, 0, 0,
- 0, 0, 0, 0, 349, 0, 316, 191, 215, 0,
- 0, 395, 441, 453, 0, 0, 0, 242, 0, 451,
- 409, 520, 223, 271, 438, 415, 449, 422, 274, 0,
- 0, 450, 356, 506, 432, 517, 542, 543, 250, 389,
- 529, 490, 537, 558, 216, 247, 403, 483, 523, 473,
- 381, 502, 503, 315, 472, 282, 194, 353, 548, 214,
- 459, 355, 232, 221, 508, 526, 276, 436, 203, 485,
- 515, 229, 463, 0, 0, 560, 205, 513, 482, 377,
- 312, 313, 204, 0, 437, 255, 280, 245, 398, 510,
- 511, 243, 561, 218, 536, 210, 0, 535, 391, 505,
- 514, 378, 367, 209, 512, 376, 366, 320, 339, 340,
- 267, 293, 429, 359, 430, 292, 294, 387, 386, 388,
- 198, 524, 0, 199, 0, 478, 525, 562, 224, 225,
- 227, 0, 266, 270, 278, 281, 289, 290, 299, 351,
- 402, 428, 424, 433, 0, 500, 518, 530, 540, 546,
- 547, 549, 550, 551, 552, 553, 555, 554, 390, 297,
- 474, 319, 357, 0, 0, 408, 452, 230, 522, 475,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 563, 564, 565, 566, 567, 568, 569, 570, 571,
- 572, 573, 574, 575, 576, 577, 578, 579, 580, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 581, 368,
- 465, 519, 321, 333, 336, 326, 345, 0, 346, 322,
- 323, 328, 330, 331, 332, 337, 338, 342, 348, 238,
- 201, 374, 382, 499, 298, 206, 207, 208, 492, 493,
- 494, 495, 533, 534, 538, 442, 443, 444, 445, 279,
- 528, 295, 448, 447, 317, 318, 363, 431, 0, 190,
- 211, 352, 0, 434, 275, 559, 532, 527, 197, 213,
- 0, 249, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 192, 193, 200, 212, 222, 226, 233,
- 248, 263, 265, 272, 285, 296, 304, 305, 308, 314,
- 364, 370, 371, 372, 373, 392, 393, 394, 397, 400,
- 401, 404, 406, 407, 410, 414, 418, 419, 420, 421,
- 423, 425, 435, 440, 454, 455, 456, 457, 458, 461,
- 462, 467, 468, 469, 470, 471, 479, 480, 484, 507,
- 509, 521, 539, 544, 460, 287, 288, 426, 427, 300,
- 301, 556, 557, 286, 516, 545, 0, 0, 362, 0,
- 0, 365, 268, 291, 306, 0, 531, 481, 217, 446,
- 277, 240, 0, 0, 202, 236, 220, 246, 261, 264,
- 310, 375, 383, 412, 417, 283, 258, 234, 439, 231,
- 464, 487, 488, 489, 491, 379, 253, 416, 380, 0,
- 360, 497, 498, 302, 496, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 399, 0, 0, 0,
- 0, 0, 0, 0, 0, 257, 0, 0, 0, 0,
- 350, 254, 0, 0, 413, 0, 196, 0, 466, 241,
- 361, 358, 504, 269, 260, 256, 239, 303, 369, 411,
- 486, 405, 0, 354, 0, 0, 476, 384, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 309, 237, 311, 195, 396, 477, 273,
- 0, 0, 0, 0, 0, 627, 0, 0, 0, 0,
- 2400, 0, 0, 0, 0, 228, 0, 0, 235, 2401,
- 0, 0, 335, 344, 343, 324, 325, 327, 329, 334,
- 341, 347, 0, 0, 0, 0, 0, 252, 307, 259,
- 251, 501, 0, 0, 0, 0, 0, 0, 0, 219,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 262, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 284, 0, 385, 244, 0,
- 0, 0, 0, 541, 0, 0, 0, 0, 0, 0,
- 0, 349, 0, 316, 191, 215, 0, 0, 395, 441,
- 453, 0, 0, 0, 242, 0, 451, 409, 520, 223,
- 271, 438, 415, 449, 422, 274, 0, 0, 450, 356,
- 506, 432, 517, 542, 543, 250, 389, 529, 490, 537,
- 558, 216, 247, 403, 483, 523, 473, 381, 502, 503,
- 315, 472, 282, 194, 353, 548, 214, 459, 355, 232,
- 221, 508, 526, 276, 436, 203, 485, 515, 229, 463,
- 0, 0, 560, 205, 513, 482, 377, 312, 313, 204,
- 0, 437, 255, 280, 245, 398, 510, 511, 243, 561,
- 218, 536, 210, 0, 535, 391, 505, 514, 378, 367,
- 209, 512, 376, 366, 320, 339, 340, 267, 293, 429,
- 359, 430, 292, 294, 387, 386, 388, 198, 524, 0,
- 199, 0, 478, 525, 562, 224, 225, 227, 0, 266,
- 270, 278, 281, 289, 290, 299, 351, 402, 428, 424,
- 433, 0, 500, 518, 530, 540, 546, 547, 549, 550,
- 551, 552, 553, 555, 554, 390, 297, 474, 319, 357,
- 0, 0, 408, 452, 230, 522, 475, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 563, 564,
- 565, 566, 567, 568, 569, 570, 571, 572, 573, 574,
- 575, 576, 577, 578, 579, 580, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 581, 368, 465, 519, 321,
- 333, 336, 326, 345, 0, 346, 322, 323, 328, 330,
- 331, 332, 337, 338, 342, 348, 238, 201, 374, 382,
- 499, 298, 206, 207, 208, 492, 493, 494, 495, 533,
- 534, 538, 442, 443, 444, 445, 279, 528, 295, 448,
- 447, 317, 318, 363, 431, 0, 190, 211, 352, 0,
- 434, 275, 559, 532, 527, 197, 213, 0, 249, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 192, 193, 200, 212, 222, 226, 233, 248, 263, 265,
- 272, 285, 296, 304, 305, 308, 314, 364, 370, 371,
- 372, 373, 392, 393, 394, 397, 400, 401, 404, 406,
- 407, 410, 414, 418, 419, 420, 421, 423, 425, 435,
- 440, 454, 455, 456, 457, 458, 461, 462, 467, 468,
- 469, 470, 471, 479, 480, 484, 507, 509, 521, 539,
- 544, 460, 287, 288, 426, 427, 300, 301, 556, 557,
- 286, 516, 545, 0, 0, 362, 0, 0, 365, 268,
- 291, 306, 0, 531, 481, 217, 446, 277, 240, 0,
- 0, 202, 236, 220, 246, 261, 264, 310, 375, 383,
- 412, 417, 283, 258, 234, 439, 231, 464, 487, 488,
- 489, 491, 379, 253, 416, 380, 0, 360, 497, 498,
- 302, 496, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 399, 0, 0, 0, 0, 0, 0,
- 0, 0, 257, 1539, 0, 0, 0, 350, 254, 0,
- 0, 413, 0, 196, 0, 466, 241, 361, 358, 504,
- 269, 260, 256, 239, 303, 369, 411, 486, 405, 0,
- 354, 0, 0, 476, 384, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 309, 237, 311, 195, 396, 477, 273, 0, 0, 0,
- 0, 1538, 627, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 228, 0, 0, 235, 0, 0, 0, 335,
- 344, 343, 324, 325, 327, 329, 334, 341, 347, 0,
- 0, 0, 0, 0, 252, 307, 259, 251, 501, 0,
- 0, 0, 0, 0, 0, 0, 219, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 262, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 284, 0, 385, 244, 0, 0, 0, 0,
- 541, 0, 0, 0, 0, 0, 0, 0, 349, 0,
- 316, 191, 215, 0, 0, 395, 441, 453, 0, 0,
- 0, 242, 0, 451, 409, 520, 223, 271, 438, 415,
- 449, 422, 274, 0, 0, 450, 356, 506, 432, 517,
- 542, 543, 250, 389, 529, 490, 537, 558, 216, 247,
- 403, 483, 523, 473, 381, 502, 503, 315, 472, 282,
- 194, 353, 548, 214, 459, 355, 232, 221, 508, 526,
- 276, 436, 203, 485, 515, 229, 463, 0, 0, 560,
- 205, 513, 482, 377, 312, 313, 204, 0, 437, 255,
- 280, 245, 398, 510, 511, 243, 561, 218, 536, 210,
- 0, 535, 391, 505, 514, 378, 367, 209, 512, 376,
- 366, 320, 339, 340, 267, 293, 429, 359, 430, 292,
- 294, 387, 386, 388, 198, 524, 0, 199, 0, 478,
- 525, 562, 224, 225, 227, 0, 266, 270, 278, 281,
- 289, 290, 299, 351, 402, 428, 424, 433, 0, 500,
- 518, 530, 540, 546, 547, 549, 550, 551, 552, 553,
- 555, 554, 390, 297, 474, 319, 357, 0, 0, 408,
- 452, 230, 522, 475, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 563, 564, 565, 566, 567,
- 568, 569, 570, 571, 572, 573, 574, 575, 576, 577,
- 578, 579, 580, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 581, 368, 465, 519, 321, 333, 336, 326,
- 345, 0, 346, 322, 323, 328, 330, 331, 332, 337,
- 338, 342, 348, 238, 201, 374, 382, 499, 298, 206,
- 207, 208, 492, 493, 494, 495, 533, 534, 538, 442,
- 443, 444, 445, 279, 528, 295, 448, 447, 317, 318,
- 363, 431, 0, 190, 211, 352, 0, 434, 275, 559,
- 532, 527, 197, 213, 0, 249, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 192, 193, 200,
- 212, 222, 226, 233, 248, 263, 265, 272, 285, 296,
- 304, 305, 308, 314, 364, 370, 371, 372, 373, 392,
- 393, 394, 397, 400, 401, 404, 406, 407, 410, 414,
- 418, 419, 420, 421, 423, 425, 435, 440, 454, 455,
- 456, 457, 458, 461, 462, 467, 468, 469, 470, 471,
- 479, 480, 484, 507, 509, 521, 539, 544, 460, 287,
- 288, 426, 427, 300, 301, 556, 557, 286, 516, 545,
- 0, 0, 362, 0, 0, 365, 268, 291, 306, 0,
- 531, 481, 217, 446, 277, 240, 0, 0, 202, 236,
- 220, 246, 261, 264, 310, 375, 383, 412, 417, 283,
- 258, 234, 439, 231, 464, 487, 488, 489, 491, 379,
- 253, 416, 380, 0, 360, 497, 498, 302, 496, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 399, 0, 0, 0, 0, 0, 0, 0, 0, 257,
- 0, 0, 0, 0, 350, 254, 0, 0, 413, 0,
- 196, 0, 466, 241, 361, 358, 504, 269, 260, 256,
- 239, 303, 369, 411, 486, 405, 0, 354, 0, 0,
- 476, 384, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 309, 237, 311,
- 195, 396, 477, 273, 0, 0, 0, 0, 0, 629,
- 630, 631, 0, 0, 0, 0, 0, 0, 0, 228,
- 0, 0, 235, 0, 0, 0, 335, 344, 343, 324,
- 325, 327, 329, 334, 341, 347, 0, 0, 0, 0,
- 0, 252, 307, 259, 251, 501, 0, 0, 0, 0,
- 0, 0, 0, 219, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 262, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 284,
- 0, 385, 244, 0, 0, 0, 0, 541, 0, 0,
- 0, 0, 0, 0, 0, 349, 0, 316, 191, 215,
- 0, 0, 395, 441, 453, 0, 0, 0, 242, 0,
- 451, 409, 520, 223, 271, 438, 415, 449, 422, 274,
- 0, 0, 450, 356, 506, 432, 517, 542, 543, 250,
- 389, 529, 490, 537, 558, 216, 247, 403, 483, 523,
- 473, 381, 502, 503, 315, 472, 282, 194, 353, 548,
- 214, 459, 355, 232, 221, 508, 526, 276, 436, 203,
- 485, 515, 229, 463, 0, 0, 560, 205, 513, 482,
- 377, 312, 313, 204, 0, 437, 255, 280, 245, 398,
- 510, 511, 243, 561, 218, 536, 210, 0, 535, 391,
- 505, 514, 378, 367, 209, 512, 376, 366, 320, 339,
- 340, 267, 293, 429, 359, 430, 292, 294, 387, 386,
- 388, 198, 524, 0, 199, 0, 478, 525, 562, 224,
- 225, 227, 0, 266, 270, 278, 281, 289, 290, 299,
- 351, 402, 428, 424, 433, 0, 500, 518, 530, 540,
- 546, 547, 549, 550, 551, 552, 553, 555, 554, 390,
- 297, 474, 319, 357, 0, 0, 408, 452, 230, 522,
- 475, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 563, 564, 565, 566, 567, 568, 569, 570,
- 571, 572, 573, 574, 575, 576, 577, 578, 579, 580,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 581,
- 368, 465, 519, 321, 333, 336, 326, 345, 0, 346,
- 322, 323, 328, 330, 331, 332, 337, 338, 342, 348,
- 238, 201, 374, 382, 499, 298, 206, 207, 208, 492,
- 493, 494, 495, 533, 534, 538, 442, 443, 444, 445,
- 279, 528, 295, 448, 447, 317, 318, 363, 431, 0,
- 190, 211, 352, 0, 434, 275, 559, 532, 527, 197,
- 213, 0, 249, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 192, 193, 200, 212, 222, 226,
- 233, 248, 263, 265, 272, 285, 296, 304, 305, 308,
- 314, 364, 370, 371, 372, 373, 392, 393, 394, 397,
- 400, 401, 404, 406, 407, 410, 414, 418, 419, 420,
- 421, 423, 425, 435, 440, 454, 455, 456, 457, 458,
- 461, 462, 467, 468, 469, 470, 471, 479, 480, 484,
- 507, 509, 521, 539, 544, 460, 287, 288, 426, 427,
- 300, 301, 556, 557, 286, 516, 545, 0, 0, 362,
- 0, 0, 365, 268, 291, 306, 0, 531, 481, 217,
- 446, 277, 240, 0, 0, 202, 236, 220, 246, 261,
- 264, 310, 375, 383, 412, 417, 283, 258, 234, 439,
- 231, 464, 487, 488, 489, 491, 379, 253, 416, 380,
- 0, 360, 497, 498, 302, 496, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 399, 0, 0,
- 0, 0, 0, 0, 0, 0, 257, 0, 0, 0,
- 0, 350, 254, 0, 0, 413, 0, 196, 0, 466,
- 241, 361, 358, 504, 269, 260, 256, 239, 303, 369,
- 411, 486, 405, 0, 354, 0, 0, 476, 384, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 309, 237, 311, 195, 396, 477,
- 273, 0, 0, 0, 0, 0, 627, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 228, 0, 0, 235,
- 0, 0, 0, 335, 344, 343, 324, 325, 327, 329,
- 334, 341, 347, 0, 0, 0, 0, 0, 252, 307,
- 259, 251, 501, 0, 0, 0, 0, 0, 0, 0,
- 219, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 262, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 284, 0, 385, 244,
- 0, 0, 0, 0, 541, 0, 0, 0, 3418, 0,
- 0, 0, 349, 0, 316, 191, 215, 0, 0, 395,
- 441, 453, 0, 0, 0, 242, 0, 451, 409, 520,
- 223, 271, 438, 415, 449, 422, 274, 0, 0, 450,
- 356, 506, 432, 517, 542, 543, 250, 389, 529, 490,
- 537, 558, 216, 247, 403, 483, 523, 473, 381, 502,
- 503, 315, 472, 282, 194, 353, 548, 214, 459, 355,
- 232, 221, 508, 526, 276, 436, 203, 485, 515, 229,
- 463, 0, 0, 560, 205, 513, 482, 377, 312, 313,
- 204, 0, 437, 255, 280, 245, 398, 510, 511, 243,
- 561, 218, 536, 210, 0, 535, 391, 505, 514, 378,
- 367, 209, 512, 376, 366, 320, 339, 340, 267, 293,
- 429, 359, 430, 292, 294, 387, 386, 388, 198, 524,
- 0, 199, 0, 478, 525, 562, 224, 225, 227, 0,
- 266, 270, 278, 281, 289, 290, 299, 351, 402, 428,
- 424, 433, 0, 500, 518, 530, 540, 546, 547, 549,
- 550, 551, 552, 553, 555, 554, 390, 297, 474, 319,
- 357, 0, 0, 408, 452, 230, 522, 475, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 563,
- 564, 565, 566, 567, 568, 569, 570, 571, 572, 573,
- 574, 575, 576, 577, 578, 579, 580, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 581, 368, 465, 519,
- 321, 333, 336, 326, 345, 0, 346, 322, 323, 328,
- 330, 331, 332, 337, 338, 342, 348, 238, 201, 374,
- 382, 499, 298, 206, 207, 208, 492, 493, 494, 495,
- 533, 534, 538, 442, 443, 444, 445, 279, 528, 295,
- 448, 447, 317, 318, 363, 431, 0, 190, 211, 352,
- 0, 434, 275, 559, 532, 527, 197, 213, 0, 249,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 192, 193, 200, 212, 222, 226, 233, 248, 263,
- 265, 272, 285, 296, 304, 305, 308, 314, 364, 370,
- 371, 372, 373, 392, 393, 394, 397, 400, 401, 404,
- 406, 407, 410, 414, 418, 419, 420, 421, 423, 425,
- 435, 440, 454, 455, 456, 457, 458, 461, 462, 467,
- 468, 469, 470, 471, 479, 480, 484, 507, 509, 521,
- 539, 544, 460, 287, 288, 426, 427, 300, 301, 556,
- 557, 286, 516, 545, 0, 0, 362, 0, 0, 365,
- 268, 291, 306, 0, 531, 481, 217, 446, 277, 240,
- 0, 0, 202, 236, 220, 246, 261, 264, 310, 375,
- 383, 412, 417, 283, 258, 234, 439, 231, 464, 487,
- 488, 489, 491, 379, 253, 416, 380, 0, 360, 497,
- 498, 302, 496, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 399, 0, 0, 0, 0, 0,
- 0, 0, 0, 257, 0, 0, 0, 0, 350, 254,
- 0, 0, 413, 0, 196, 0, 466, 241, 361, 358,
- 504, 269, 260, 256, 239, 303, 369, 411, 486, 405,
- 0, 354, 0, 0, 476, 384, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 309, 237, 311, 195, 396, 477, 273, 0, 0,
- 0, 0, 1697, 188, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 228, 0, 0, 235, 0, 0, 0,
- 335, 344, 343, 324, 325, 327, 329, 334, 341, 347,
- 0, 0, 0, 0, 0, 252, 307, 259, 251, 501,
- 0, 0, 0, 0, 0, 0, 0, 219, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 262, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 284, 0, 385, 244, 0, 0, 0,
- 0, 541, 0, 0, 0, 0, 0, 0, 0, 349,
- 0, 316, 191, 215, 0, 0, 395, 441, 453, 0,
- 0, 0, 242, 0, 451, 409, 520, 223, 271, 438,
- 415, 449, 422, 274, 0, 0, 450, 356, 506, 432,
- 517, 542, 543, 250, 389, 529, 490, 537, 558, 216,
- 247, 403, 483, 523, 473, 381, 502, 503, 315, 472,
- 282, 194, 353, 548, 214, 459, 355, 232, 221, 508,
- 526, 276, 436, 203, 485, 515, 229, 463, 0, 0,
- 560, 205, 513, 482, 377, 312, 313, 204, 0, 437,
- 255, 280, 245, 398, 510, 511, 243, 561, 218, 536,
- 210, 0, 535, 391, 505, 514, 378, 367, 209, 512,
- 376, 366, 320, 339, 340, 267, 293, 429, 359, 430,
- 292, 294, 387, 386, 388, 198, 524, 0, 199, 0,
- 478, 525, 562, 224, 225, 227, 0, 266, 270, 278,
- 281, 289, 290, 299, 351, 402, 428, 424, 433, 0,
- 500, 518, 530, 540, 546, 547, 549, 550, 551, 552,
- 553, 555, 554, 390, 297, 474, 319, 357, 0, 0,
- 408, 452, 230, 522, 475, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 563, 564, 565, 566,
- 567, 568, 569, 570, 571, 572, 573, 574, 575, 576,
- 577, 578, 579, 580, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 581, 368, 465, 519, 321, 333, 336,
- 326, 345, 0, 346, 322, 323, 328, 330, 331, 332,
- 337, 338, 342, 348, 238, 201, 374, 382, 499, 298,
- 206, 207, 208, 492, 493, 494, 495, 533, 534, 538,
- 442, 443, 444, 445, 279, 528, 295, 448, 447, 317,
- 318, 363, 431, 0, 190, 211, 352, 0, 434, 275,
- 559, 532, 527, 197, 213, 0, 249, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 192, 193,
- 200, 212, 222, 226, 233, 248, 263, 265, 272, 285,
- 296, 304, 305, 308, 314, 364, 370, 371, 372, 373,
- 392, 393, 394, 397, 400, 401, 404, 406, 407, 410,
- 414, 418, 419, 420, 421, 423, 425, 435, 440, 454,
- 455, 456, 457, 458, 461, 462, 467, 468, 469, 470,
- 471, 479, 480, 484, 507, 509, 521, 539, 544, 460,
- 287, 288, 426, 427, 300, 301, 556, 557, 286, 516,
- 545, 0, 0, 362, 0, 0, 365, 268, 291, 306,
- 0, 531, 481, 217, 446, 277, 240, 0, 0, 202,
- 236, 220, 246, 261, 264, 310, 375, 383, 412, 417,
- 283, 258, 234, 439, 231, 464, 487, 488, 489, 491,
- 379, 253, 416, 380, 0, 360, 497, 498, 302, 496,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 399, 0, 0, 0, 0, 0, 0, 0, 0,
- 257, 0, 0, 0, 0, 350, 254, 0, 0, 413,
- 0, 196, 0, 466, 241, 361, 358, 504, 269, 260,
- 256, 239, 303, 369, 411, 486, 405, 0, 354, 0,
- 0, 476, 384, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 309, 237,
- 311, 195, 396, 477, 273, 0, 0, 0, 0, 0,
- 627, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 228, 0, 0, 235, 0, 0, 0, 335, 344, 343,
- 324, 325, 327, 329, 334, 341, 347, 0, 0, 0,
- 0, 0, 252, 307, 259, 251, 501, 0, 0, 0,
- 0, 0, 0, 0, 219, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 262,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 284, 0, 385, 244, 0, 0, 0, 0, 541, 0,
- 0, 0, 3294, 0, 0, 0, 349, 0, 316, 191,
- 215, 0, 0, 395, 441, 453, 0, 0, 0, 242,
- 0, 451, 409, 520, 223, 271, 438, 415, 449, 422,
- 274, 0, 0, 450, 356, 506, 432, 517, 542, 543,
- 250, 389, 529, 490, 537, 558, 216, 247, 403, 483,
- 523, 473, 381, 502, 503, 315, 472, 282, 194, 353,
- 548, 214, 459, 355, 232, 221, 508, 526, 276, 436,
- 203, 485, 515, 229, 463, 0, 0, 560, 205, 513,
- 482, 377, 312, 313, 204, 0, 437, 255, 280, 245,
- 398, 510, 511, 243, 561, 218, 536, 210, 0, 535,
- 391, 505, 514, 378, 367, 209, 512, 376, 366, 320,
- 339, 340, 267, 293, 429, 359, 430, 292, 294, 387,
- 386, 388, 198, 524, 0, 199, 0, 478, 525, 562,
- 224, 225, 227, 0, 266, 270, 278, 281, 289, 290,
- 299, 351, 402, 428, 424, 433, 0, 500, 518, 530,
- 540, 546, 547, 549, 550, 551, 552, 553, 555, 554,
- 390, 297, 474, 319, 357, 0, 0, 408, 452, 230,
- 522, 475, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 563, 564, 565, 566, 567, 568, 569,
- 570, 571, 572, 573, 574, 575, 576, 577, 578, 579,
- 580, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 581, 368, 465, 519, 321, 333, 336, 326, 345, 0,
- 346, 322, 323, 328, 330, 331, 332, 337, 338, 342,
- 348, 238, 201, 374, 382, 499, 298, 206, 207, 208,
- 492, 493, 494, 495, 533, 534, 538, 442, 443, 444,
- 445, 279, 528, 295, 448, 447, 317, 318, 363, 431,
- 0, 190, 211, 352, 0, 434, 275, 559, 532, 527,
- 197, 213, 0, 249, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 192, 193, 200, 212, 222,
- 226, 233, 248, 263, 265, 272, 285, 296, 304, 305,
- 308, 314, 364, 370, 371, 372, 373, 392, 393, 394,
- 397, 400, 401, 404, 406, 407, 410, 414, 418, 419,
- 420, 421, 423, 425, 435, 440, 454, 455, 456, 457,
- 458, 461, 462, 467, 468, 469, 470, 471, 479, 480,
- 484, 507, 509, 521, 539, 544, 460, 287, 288, 426,
- 427, 300, 301, 556, 557, 286, 516, 545, 0, 0,
- 362, 0, 0, 365, 268, 291, 306, 0, 531, 481,
- 217, 446, 277, 240, 0, 0, 202, 236, 220, 246,
- 261, 264, 310, 375, 383, 412, 417, 283, 258, 234,
- 439, 231, 464, 487, 488, 489, 491, 379, 253, 416,
- 380, 0, 360, 497, 498, 302, 496, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 399, 0,
- 0, 0, 0, 0, 0, 0, 0, 257, 0, 0,
- 0, 0, 350, 254, 0, 0, 413, 0, 196, 0,
- 466, 241, 361, 358, 504, 269, 260, 256, 239, 303,
- 369, 411, 486, 405, 0, 354, 0, 0, 476, 384,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 309, 237, 311, 195, 396,
- 477, 273, 0, 89, 0, 0, 0, 627, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 228, 0, 0,
- 235, 0, 0, 0, 335, 344, 343, 324, 325, 327,
- 329, 334, 341, 347, 0, 0, 0, 0, 0, 252,
- 307, 259, 251, 501, 0, 0, 0, 0, 0, 0,
- 0, 219, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 262, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 284, 0, 385,
- 244, 0, 0, 0, 0, 541, 0, 0, 0, 0,
- 0, 0, 0, 349, 0, 316, 191, 215, 0, 0,
- 395, 441, 453, 0, 0, 0, 242, 0, 451, 409,
- 520, 223, 271, 438, 415, 449, 422, 274, 0, 0,
- 450, 356, 506, 432, 517, 542, 543, 250, 389, 529,
- 490, 537, 558, 216, 247, 403, 483, 523, 473, 381,
- 502, 503, 315, 472, 282, 194, 353, 548, 214, 459,
- 355, 232, 221, 508, 526, 276, 436, 203, 485, 515,
- 229, 463, 0, 0, 560, 205, 513, 482, 377, 312,
- 313, 204, 0, 437, 255, 280, 245, 398, 510, 511,
- 243, 561, 218, 536, 210, 0, 535, 391, 505, 514,
- 378, 367, 209, 512, 376, 366, 320, 339, 340, 267,
- 293, 429, 359, 430, 292, 294, 387, 386, 388, 198,
- 524, 0, 199, 0, 478, 525, 562, 224, 225, 227,
- 0, 266, 270, 278, 281, 289, 290, 299, 351, 402,
- 428, 424, 433, 0, 500, 518, 530, 540, 546, 547,
- 549, 550, 551, 552, 553, 555, 554, 390, 297, 474,
- 319, 357, 0, 0, 408, 452, 230, 522, 475, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 563, 564, 565, 566, 567, 568, 569, 570, 571, 572,
- 573, 574, 575, 576, 577, 578, 579, 580, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 581, 368, 465,
- 519, 321, 333, 336, 326, 345, 0, 346, 322, 323,
- 328, 330, 331, 332, 337, 338, 342, 348, 238, 201,
- 374, 382, 499, 298, 206, 207, 208, 492, 493, 494,
- 495, 533, 534, 538, 442, 443, 444, 445, 279, 528,
- 295, 448, 447, 317, 318, 363, 431, 0, 190, 211,
- 352, 0, 434, 275, 559, 532, 527, 197, 213, 0,
- 249, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 192, 193, 200, 212, 222, 226, 233, 248,
- 263, 265, 272, 285, 296, 304, 305, 308, 314, 364,
- 370, 371, 372, 373, 392, 393, 394, 397, 400, 401,
- 404, 406, 407, 410, 414, 418, 419, 420, 421, 423,
- 425, 435, 440, 454, 455, 456, 457, 458, 461, 462,
- 467, 468, 469, 470, 471, 479, 480, 484, 507, 509,
- 521, 539, 544, 460, 287, 288, 426, 427, 300, 301,
- 556, 557, 286, 516, 545, 0, 0, 362, 0, 0,
- 365, 268, 291, 306, 0, 531, 481, 217, 446, 277,
- 240, 0, 0, 202, 236, 220, 246, 261, 264, 310,
- 375, 383, 412, 417, 283, 258, 234, 439, 231, 464,
- 487, 488, 489, 491, 379, 253, 416, 380, 0, 360,
- 497, 498, 302, 496, 0, 0, 0, 0, 2065, 0,
- 0, 0, 0, 0, 0, 399, 0, 0, 0, 0,
- 0, 0, 0, 0, 257, 0, 0, 0, 0, 350,
- 254, 0, 0, 413, 0, 196, 0, 466, 241, 361,
- 358, 504, 269, 260, 256, 239, 303, 369, 411, 486,
- 405, 0, 354, 0, 0, 476, 384, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 309, 237, 311, 195, 396, 477, 273, 0,
- 0, 0, 0, 0, 188, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 228, 0, 0, 235, 0, 0,
- 0, 335, 344, 343, 324, 325, 327, 329, 334, 341,
- 347, 0, 0, 0, 0, 0, 252, 307, 259, 251,
- 501, 0, 0, 0, 0, 0, 0, 0, 219, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 262, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 284, 0, 385, 244, 0, 0,
- 0, 0, 541, 0, 0, 0, 0, 0, 0, 0,
- 349, 0, 316, 191, 215, 0, 0, 395, 441, 453,
- 0, 0, 0, 242, 0, 451, 409, 520, 223, 271,
- 438, 415, 449, 422, 274, 0, 0, 450, 356, 506,
- 432, 517, 542, 543, 250, 389, 529, 490, 537, 558,
- 216, 247, 403, 483, 523, 473, 381, 502, 503, 315,
- 472, 282, 194, 353, 548, 214, 459, 355, 232, 221,
- 508, 526, 276, 436, 203, 485, 515, 229, 463, 0,
- 0, 560, 205, 513, 482, 377, 312, 313, 204, 0,
- 437, 255, 280, 245, 398, 510, 511, 243, 561, 218,
- 536, 210, 0, 535, 391, 505, 514, 378, 367, 209,
- 512, 376, 366, 320, 339, 340, 267, 293, 429, 359,
- 430, 292, 294, 387, 386, 388, 198, 524, 0, 199,
- 0, 478, 525, 562, 224, 225, 227, 0, 266, 270,
- 278, 281, 289, 290, 299, 351, 402, 428, 424, 433,
- 0, 500, 518, 530, 540, 546, 547, 549, 550, 551,
- 552, 553, 555, 554, 390, 297, 474, 319, 357, 0,
- 0, 408, 452, 230, 522, 475, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 563, 564, 565,
- 566, 567, 568, 569, 570, 571, 572, 573, 574, 575,
- 576, 577, 578, 579, 580, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 581, 368, 465, 519, 321, 333,
- 336, 326, 345, 0, 346, 322, 323, 328, 330, 331,
- 332, 337, 338, 342, 348, 238, 201, 374, 382, 499,
- 298, 206, 207, 208, 492, 493, 494, 495, 533, 534,
- 538, 442, 443, 444, 445, 279, 528, 295, 448, 447,
- 317, 318, 363, 431, 0, 190, 211, 352, 0, 434,
- 275, 559, 532, 527, 197, 213, 0, 249, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 192,
- 193, 200, 212, 222, 226, 233, 248, 263, 265, 272,
- 285, 296, 304, 305, 308, 314, 364, 370, 371, 372,
- 373, 392, 393, 394, 397, 400, 401, 404, 406, 407,
- 410, 414, 418, 419, 420, 421, 423, 425, 435, 440,
- 454, 455, 456, 457, 458, 461, 462, 467, 468, 469,
- 470, 471, 479, 480, 484, 507, 509, 521, 539, 544,
- 460, 287, 288, 426, 427, 300, 301, 556, 557, 286,
- 516, 545, 0, 0, 362, 0, 0, 365, 268, 291,
- 306, 0, 531, 481, 217, 446, 277, 240, 0, 0,
- 202, 236, 220, 246, 261, 264, 310, 375, 383, 412,
- 417, 283, 258, 234, 439, 231, 464, 487, 488, 489,
- 491, 379, 253, 416, 380, 0, 360, 497, 498, 302,
- 496, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 399, 0, 0, 0, 0, 0, 0, 0,
- 0, 257, 0, 0, 0, 0, 350, 254, 0, 0,
- 413, 0, 196, 0, 466, 241, 361, 358, 504, 269,
- 260, 256, 239, 303, 369, 411, 486, 405, 0, 354,
- 0, 0, 476, 384, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 309,
- 237, 311, 195, 396, 477, 273, 0, 0, 0, 0,
- 1520, 627, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 228, 0, 0, 235, 0, 0, 0, 335, 344,
- 343, 324, 325, 327, 329, 334, 341, 347, 0, 0,
- 0, 0, 0, 252, 307, 259, 251, 501, 0, 0,
- 0, 0, 0, 0, 0, 219, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 262, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 284, 0, 385, 244, 0, 0, 0, 0, 541,
- 0, 0, 0, 0, 0, 0, 0, 349, 0, 316,
- 191, 215, 0, 0, 395, 441, 453, 0, 0, 0,
- 242, 0, 451, 409, 520, 223, 271, 438, 415, 449,
- 422, 274, 0, 0, 450, 356, 506, 432, 517, 542,
- 543, 250, 389, 529, 490, 537, 558, 216, 247, 403,
- 483, 523, 473, 381, 502, 503, 315, 472, 282, 194,
- 353, 548, 214, 459, 355, 232, 221, 508, 526, 276,
- 436, 203, 485, 515, 229, 463, 0, 0, 560, 205,
- 513, 482, 377, 312, 313, 204, 0, 437, 255, 280,
- 245, 398, 510, 511, 243, 561, 218, 536, 210, 0,
- 535, 391, 505, 514, 378, 367, 209, 512, 376, 366,
- 320, 339, 340, 267, 293, 429, 359, 430, 292, 294,
- 387, 386, 388, 198, 524, 0, 199, 0, 478, 525,
- 562, 224, 225, 227, 0, 266, 270, 278, 281, 289,
- 290, 299, 351, 402, 428, 424, 433, 0, 500, 518,
- 530, 540, 546, 547, 549, 550, 551, 552, 553, 555,
- 554, 390, 297, 474, 319, 357, 0, 0, 408, 452,
- 230, 522, 475, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 563, 564, 565, 566, 567, 568,
- 569, 570, 571, 572, 573, 574, 575, 576, 577, 578,
- 579, 580, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 581, 368, 465, 519, 321, 333, 336, 326, 345,
- 0, 346, 322, 323, 328, 330, 331, 332, 337, 338,
- 342, 348, 238, 201, 374, 382, 499, 298, 206, 207,
- 208, 492, 493, 494, 495, 533, 534, 538, 442, 443,
- 444, 445, 279, 528, 295, 448, 447, 317, 318, 363,
- 431, 0, 190, 211, 352, 0, 434, 275, 559, 532,
- 527, 197, 213, 0, 249, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 192, 193, 200, 212,
- 222, 226, 233, 248, 263, 265, 272, 285, 296, 304,
- 305, 308, 314, 364, 370, 371, 372, 373, 392, 393,
- 394, 397, 400, 401, 404, 406, 407, 410, 414, 418,
- 419, 420, 421, 423, 425, 435, 440, 454, 455, 456,
- 457, 458, 461, 462, 467, 468, 469, 470, 471, 479,
- 480, 484, 507, 509, 521, 539, 544, 460, 287, 288,
- 426, 427, 300, 301, 556, 557, 286, 516, 545, 0,
- 0, 362, 0, 0, 365, 268, 291, 306, 0, 531,
- 481, 217, 446, 277, 240, 0, 0, 202, 236, 220,
- 246, 261, 264, 310, 375, 383, 412, 417, 283, 258,
- 234, 439, 231, 464, 487, 488, 489, 491, 379, 253,
- 416, 380, 0, 360, 497, 498, 302, 496, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 399,
- 0, 0, 0, 0, 0, 0, 0, 0, 257, 0,
- 0, 0, 0, 350, 254, 0, 0, 413, 0, 196,
- 0, 466, 241, 361, 358, 504, 269, 260, 256, 239,
- 303, 369, 411, 486, 405, 0, 354, 0, 0, 476,
- 384, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 309, 237, 311, 195,
- 396, 477, 273, 0, 0, 0, 0, 0, 627, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 228, 0,
- 0, 235, 0, 0, 0, 335, 344, 343, 324, 325,
- 327, 329, 334, 341, 347, 0, 0, 0, 0, 0,
- 252, 307, 259, 251, 501, 0, 0, 0, 0, 0,
- 0, 0, 219, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 262, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 1327, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 284, 0,
- 385, 244, 0, 0, 0, 0, 541, 0, 0, 0,
- 0, 0, 0, 0, 349, 0, 316, 191, 215, 0,
- 0, 395, 441, 453, 0, 0, 0, 242, 0, 451,
- 409, 520, 223, 271, 438, 415, 449, 422, 274, 0,
- 0, 450, 356, 506, 432, 517, 542, 543, 250, 389,
- 529, 490, 537, 558, 216, 247, 403, 483, 523, 473,
- 381, 502, 503, 315, 472, 282, 194, 353, 548, 214,
- 459, 355, 232, 221, 508, 526, 276, 436, 203, 485,
- 515, 229, 463, 0, 0, 560, 205, 513, 482, 377,
- 312, 313, 204, 0, 437, 255, 280, 245, 398, 510,
- 511, 243, 561, 218, 536, 210, 0, 535, 391, 505,
- 514, 378, 367, 209, 512, 376, 366, 320, 339, 340,
- 267, 293, 429, 359, 430, 292, 294, 387, 386, 388,
- 198, 524, 0, 199, 0, 478, 525, 562, 224, 225,
- 227, 0, 266, 270, 278, 281, 289, 290, 299, 351,
- 402, 428, 424, 433, 0, 500, 518, 530, 540, 546,
- 547, 549, 550, 551, 552, 553, 555, 554, 390, 297,
- 474, 319, 357, 0, 0, 408, 452, 230, 522, 475,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 563, 564, 565, 566, 567, 568, 569, 570, 571,
- 572, 573, 574, 575, 576, 577, 578, 579, 580, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 581, 368,
- 465, 519, 321, 333, 336, 326, 345, 0, 346, 322,
- 323, 328, 330, 331, 332, 337, 338, 342, 348, 238,
- 201, 374, 382, 499, 298, 206, 207, 208, 492, 493,
- 494, 495, 533, 534, 538, 442, 443, 444, 445, 279,
- 528, 295, 448, 447, 317, 318, 363, 431, 0, 190,
- 211, 352, 0, 434, 275, 559, 532, 527, 197, 213,
- 0, 249, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 192, 193, 200, 212, 222, 226, 233,
- 248, 263, 265, 272, 285, 296, 304, 305, 308, 314,
- 364, 370, 371, 372, 373, 392, 393, 394, 397, 400,
- 401, 404, 406, 407, 410, 414, 418, 419, 420, 421,
- 423, 425, 435, 440, 454, 455, 456, 457, 458, 461,
- 462, 467, 468, 469, 470, 471, 479, 480, 484, 507,
- 509, 521, 539, 544, 460, 287, 288, 426, 427, 300,
- 301, 556, 557, 286, 516, 545, 0, 0, 362, 0,
- 0, 365, 268, 291, 306, 0, 531, 481, 217, 446,
- 277, 240, 0, 0, 202, 236, 220, 246, 261, 264,
- 310, 375, 383, 412, 417, 283, 258, 234, 439, 231,
- 464, 487, 488, 489, 491, 379, 253, 416, 380, 0,
- 360, 497, 498, 302, 496, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 399, 0, 0, 0,
- 0, 0, 0, 0, 0, 257, 0, 0, 0, 0,
- 350, 254, 0, 0, 413, 0, 196, 0, 466, 241,
- 361, 358, 504, 269, 260, 256, 239, 303, 369, 411,
- 486, 405, 0, 354, 0, 0, 476, 384, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 309, 237, 311, 195, 396, 477, 273,
- 0, 0, 0, 0, 0, 188, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 228, 0, 0, 235, 0,
- 0, 0, 335, 344, 343, 324, 325, 327, 329, 334,
- 341, 347, 0, 0, 0, 0, 0, 252, 307, 259,
- 251, 501, 0, 0, 0, 0, 0, 0, 0, 219,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 262, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 284, 0, 385, 244, 0,
- 0, 0, 0, 541, 0, 0, 0, 0, 0, 0,
- 0, 349, 0, 316, 191, 215, 0, 0, 395, 441,
- 453, 0, 0, 0, 242, 0, 451, 409, 520, 223,
- 271, 438, 415, 449, 422, 274, 0, 0, 450, 356,
- 506, 432, 517, 542, 543, 250, 389, 529, 490, 537,
- 558, 216, 247, 403, 483, 523, 473, 381, 502, 503,
- 315, 472, 282, 194, 353, 548, 214, 459, 355, 232,
- 221, 508, 526, 276, 436, 203, 485, 515, 229, 463,
- 0, 0, 560, 205, 513, 482, 377, 312, 313, 204,
- 0, 437, 255, 280, 245, 398, 510, 511, 243, 561,
- 218, 536, 210, 0, 535, 391, 505, 514, 378, 367,
- 209, 512, 376, 366, 320, 339, 340, 267, 293, 429,
- 359, 430, 292, 294, 387, 386, 388, 198, 524, 0,
- 199, 0, 478, 525, 562, 224, 225, 227, 0, 266,
- 270, 278, 281, 289, 290, 299, 351, 402, 428, 424,
- 433, 0, 500, 518, 530, 540, 546, 547, 549, 550,
- 551, 552, 553, 555, 554, 390, 297, 474, 319, 357,
- 0, 0, 408, 452, 230, 522, 475, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 563, 564,
- 565, 566, 567, 568, 569, 570, 571, 572, 573, 574,
- 575, 576, 577, 578, 579, 580, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 581, 368, 465, 519, 321,
- 333, 336, 326, 345, 0, 346, 322, 323, 328, 330,
- 331, 332, 337, 338, 342, 348, 238, 201, 374, 382,
- 499, 298, 206, 207, 208, 492, 493, 494, 495, 533,
- 534, 538, 442, 443, 444, 445, 279, 528, 295, 448,
- 447, 317, 318, 363, 431, 0, 190, 211, 352, 1802,
- 434, 275, 559, 532, 527, 197, 213, 0, 249, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 192, 193, 200, 212, 222, 226, 233, 248, 263, 265,
- 272, 285, 296, 304, 305, 308, 314, 364, 370, 371,
- 372, 373, 392, 393, 394, 397, 400, 401, 404, 406,
- 407, 410, 414, 418, 419, 420, 421, 423, 425, 435,
- 440, 454, 455, 456, 457, 458, 461, 462, 467, 468,
- 469, 470, 471, 479, 480, 484, 507, 509, 521, 539,
- 544, 460, 287, 288, 426, 427, 300, 301, 556, 557,
- 286, 516, 545, 0, 0, 362, 0, 0, 365, 268,
- 291, 306, 0, 531, 481, 217, 446, 277, 240, 0,
- 0, 202, 236, 220, 246, 261, 264, 310, 375, 383,
- 412, 417, 283, 258, 234, 439, 231, 464, 487, 488,
- 489, 491, 379, 253, 416, 380, 0, 360, 497, 498,
- 302, 496, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 399, 0, 0, 0, 0, 0, 0,
- 0, 0, 257, 0, 0, 0, 0, 350, 254, 0,
- 0, 413, 0, 196, 0, 466, 241, 361, 358, 504,
- 269, 260, 256, 239, 303, 369, 411, 486, 405, 0,
- 354, 0, 0, 476, 384, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 309, 237, 311, 195, 396, 477, 273, 0, 0, 0,
- 0, 1794, 627, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 228, 0, 0, 235, 0, 0, 0, 335,
- 344, 343, 324, 325, 327, 329, 334, 341, 347, 0,
- 0, 0, 0, 0, 252, 307, 259, 251, 501, 0,
- 0, 0, 0, 0, 0, 0, 219, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 262, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 284, 0, 385, 244, 0, 0, 0, 0,
- 541, 0, 0, 0, 0, 0, 0, 0, 349, 0,
- 316, 191, 215, 0, 0, 395, 441, 453, 0, 0,
- 0, 242, 0, 451, 409, 520, 223, 271, 438, 415,
- 449, 422, 274, 0, 0, 450, 356, 506, 432, 517,
- 542, 543, 250, 389, 529, 490, 537, 558, 216, 247,
- 403, 483, 523, 473, 381, 502, 503, 315, 472, 282,
- 194, 353, 548, 214, 459, 355, 232, 221, 508, 526,
- 276, 436, 203, 485, 515, 229, 463, 0, 0, 560,
- 205, 513, 482, 377, 312, 313, 204, 0, 437, 255,
- 280, 245, 398, 510, 511, 243, 561, 218, 536, 210,
- 0, 535, 391, 505, 514, 378, 367, 209, 512, 376,
- 366, 320, 339, 340, 267, 293, 429, 359, 430, 292,
- 294, 387, 386, 388, 198, 524, 0, 199, 0, 478,
- 525, 562, 224, 225, 227, 0, 266, 270, 278, 281,
- 289, 290, 299, 351, 402, 428, 424, 433, 0, 500,
- 518, 530, 540, 546, 547, 549, 550, 551, 552, 553,
- 555, 554, 390, 297, 474, 319, 357, 0, 0, 408,
- 452, 230, 522, 475, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 563, 564, 565, 566, 567,
- 568, 569, 570, 571, 572, 573, 574, 575, 576, 577,
- 578, 579, 580, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 581, 368, 465, 519, 321, 333, 336, 326,
- 345, 0, 346, 322, 323, 328, 330, 331, 332, 337,
- 338, 342, 348, 238, 201, 374, 382, 499, 298, 206,
- 207, 208, 492, 493, 494, 495, 533, 534, 538, 442,
- 443, 444, 445, 279, 528, 295, 448, 447, 317, 318,
- 363, 431, 0, 190, 211, 352, 0, 434, 275, 559,
- 532, 527, 197, 213, 0, 249, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 192, 193, 200,
- 212, 222, 226, 233, 248, 263, 265, 272, 285, 296,
- 304, 305, 308, 314, 364, 370, 371, 372, 373, 392,
- 393, 394, 397, 400, 401, 404, 406, 407, 410, 414,
- 418, 419, 420, 421, 423, 425, 435, 440, 454, 455,
- 456, 457, 458, 461, 462, 467, 468, 469, 470, 471,
- 479, 480, 484, 507, 509, 521, 539, 544, 460, 287,
- 288, 426, 427, 300, 301, 556, 557, 286, 516, 545,
- 0, 0, 362, 0, 0, 365, 268, 291, 306, 0,
- 531, 481, 217, 446, 277, 240, 0, 0, 202, 236,
- 220, 246, 261, 264, 310, 375, 383, 412, 417, 283,
- 258, 234, 439, 231, 464, 487, 488, 489, 491, 379,
- 253, 416, 380, 0, 360, 497, 498, 302, 496, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 399, 0, 1664, 0, 0, 0, 0, 0, 0, 257,
- 0, 0, 0, 0, 350, 254, 0, 0, 413, 0,
- 196, 0, 466, 241, 361, 358, 504, 269, 260, 256,
- 239, 303, 369, 411, 486, 405, 0, 354, 0, 0,
- 476, 384, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 309, 237, 311,
- 195, 396, 477, 273, 0, 0, 0, 0, 0, 627,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 228,
- 0, 0, 235, 0, 0, 0, 335, 344, 343, 324,
- 325, 327, 329, 334, 341, 347, 0, 0, 0, 0,
- 0, 252, 307, 259, 251, 501, 0, 0, 0, 0,
- 0, 0, 0, 219, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 262, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 284,
- 0, 385, 244, 0, 0, 0, 0, 541, 0, 0,
- 0, 0, 0, 0, 0, 349, 0, 316, 191, 215,
- 0, 0, 395, 441, 453, 0, 0, 0, 242, 0,
- 451, 409, 520, 223, 271, 438, 415, 449, 422, 274,
- 0, 0, 450, 356, 506, 432, 517, 542, 543, 250,
- 389, 529, 490, 537, 558, 216, 247, 403, 483, 523,
- 473, 381, 502, 503, 315, 472, 282, 194, 353, 548,
- 214, 459, 355, 232, 221, 508, 526, 276, 436, 203,
- 485, 515, 229, 463, 0, 0, 560, 205, 513, 482,
- 377, 312, 313, 204, 0, 437, 255, 280, 245, 398,
- 510, 511, 243, 561, 218, 536, 210, 0, 535, 391,
- 505, 514, 378, 367, 209, 512, 376, 366, 320, 339,
- 340, 267, 293, 429, 359, 430, 292, 294, 387, 386,
- 388, 198, 524, 0, 199, 0, 478, 525, 562, 224,
- 225, 227, 0, 266, 270, 278, 281, 289, 290, 299,
- 351, 402, 428, 424, 433, 0, 500, 518, 530, 540,
- 546, 547, 549, 550, 551, 552, 553, 555, 554, 390,
- 297, 474, 319, 357, 0, 0, 408, 452, 230, 522,
- 475, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 563, 564, 565, 566, 567, 568, 569, 570,
- 571, 572, 573, 574, 575, 576, 577, 578, 579, 580,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 581,
- 368, 465, 519, 321, 333, 336, 326, 345, 0, 346,
- 322, 323, 328, 330, 331, 332, 337, 338, 342, 348,
- 238, 201, 374, 382, 499, 298, 206, 207, 208, 492,
- 493, 494, 495, 533, 534, 538, 442, 443, 444, 445,
- 279, 528, 295, 448, 447, 317, 318, 363, 431, 0,
- 190, 211, 352, 0, 434, 275, 559, 532, 527, 197,
- 213, 0, 249, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 192, 193, 200, 212, 222, 226,
- 233, 248, 263, 265, 272, 285, 296, 304, 305, 308,
- 314, 364, 370, 371, 372, 373, 392, 393, 394, 397,
- 400, 401, 404, 406, 407, 410, 414, 418, 419, 420,
- 421, 423, 425, 435, 440, 454, 455, 456, 457, 458,
- 461, 462, 467, 468, 469, 470, 471, 479, 480, 484,
- 507, 509, 521, 539, 544, 460, 287, 288, 426, 427,
- 300, 301, 556, 557, 286, 516, 545, 0, 0, 362,
- 0, 0, 365, 268, 291, 306, 0, 531, 481, 217,
- 446, 277, 240, 0, 0, 202, 236, 220, 246, 261,
- 264, 310, 375, 383, 412, 417, 283, 258, 234, 439,
- 231, 464, 487, 488, 489, 491, 379, 253, 416, 380,
- 0, 360, 497, 498, 302, 496, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 399, 0, 1662,
- 0, 0, 0, 0, 0, 0, 257, 0, 0, 0,
- 0, 350, 254, 0, 0, 413, 0, 196, 0, 466,
- 241, 361, 358, 504, 269, 260, 256, 239, 303, 369,
- 411, 486, 405, 0, 354, 0, 0, 476, 384, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 309, 237, 311, 195, 396, 477,
- 273, 0, 0, 0, 0, 0, 627, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 228, 0, 0, 235,
- 0, 0, 0, 335, 344, 343, 324, 325, 327, 329,
- 334, 341, 347, 0, 0, 0, 0, 0, 252, 307,
- 259, 251, 501, 0, 0, 0, 0, 0, 0, 0,
- 219, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 262, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 284, 0, 385, 244,
- 0, 0, 0, 0, 541, 0, 0, 0, 0, 0,
- 0, 0, 349, 0, 316, 191, 215, 0, 0, 395,
- 441, 453, 0, 0, 0, 242, 0, 451, 409, 520,
- 223, 271, 438, 415, 449, 422, 274, 0, 0, 450,
- 356, 506, 432, 517, 542, 543, 250, 389, 529, 490,
- 537, 558, 216, 247, 403, 483, 523, 473, 381, 502,
- 503, 315, 472, 282, 194, 353, 548, 214, 459, 355,
- 232, 221, 508, 526, 276, 436, 203, 485, 515, 229,
- 463, 0, 0, 560, 205, 513, 482, 377, 312, 313,
- 204, 0, 437, 255, 280, 245, 398, 510, 511, 243,
- 561, 218, 536, 210, 0, 535, 391, 505, 514, 378,
- 367, 209, 512, 376, 366, 320, 339, 340, 267, 293,
- 429, 359, 430, 292, 294, 387, 386, 388, 198, 524,
- 0, 199, 0, 478, 525, 562, 224, 225, 227, 0,
- 266, 270, 278, 281, 289, 290, 299, 351, 402, 428,
- 424, 433, 0, 500, 518, 530, 540, 546, 547, 549,
- 550, 551, 552, 553, 555, 554, 390, 297, 474, 319,
- 357, 0, 0, 408, 452, 230, 522, 475, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 563,
- 564, 565, 566, 567, 568, 569, 570, 571, 572, 573,
- 574, 575, 576, 577, 578, 579, 580, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 581, 368, 465, 519,
- 321, 333, 336, 326, 345, 0, 346, 322, 323, 328,
- 330, 331, 332, 337, 338, 342, 348, 238, 201, 374,
- 382, 499, 298, 206, 207, 208, 492, 493, 494, 495,
- 533, 534, 538, 442, 443, 444, 445, 279, 528, 295,
- 448, 447, 317, 318, 363, 431, 0, 190, 211, 352,
- 0, 434, 275, 559, 532, 527, 197, 213, 0, 249,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 192, 193, 200, 212, 222, 226, 233, 248, 263,
- 265, 272, 285, 296, 304, 305, 308, 314, 364, 370,
- 371, 372, 373, 392, 393, 394, 397, 400, 401, 404,
- 406, 407, 410, 414, 418, 419, 420, 421, 423, 425,
- 435, 440, 454, 455, 456, 457, 458, 461, 462, 467,
- 468, 469, 470, 471, 479, 480, 484, 507, 509, 521,
- 539, 544, 460, 287, 288, 426, 427, 300, 301, 556,
- 557, 286, 516, 545, 0, 0, 362, 0, 0, 365,
- 268, 291, 306, 0, 531, 481, 217, 446, 277, 240,
- 0, 0, 202, 236, 220, 246, 261, 264, 310, 375,
- 383, 412, 417, 283, 258, 234, 439, 231, 464, 487,
- 488, 489, 491, 379, 253, 416, 380, 0, 360, 497,
- 498, 302, 496, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 399, 0, 1660, 0, 0, 0,
- 0, 0, 0, 257, 0, 0, 0, 0, 350, 254,
- 0, 0, 413, 0, 196, 0, 466, 241, 361, 358,
- 504, 269, 260, 256, 239, 303, 369, 411, 486, 405,
- 0, 354, 0, 0, 476, 384, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 309, 237, 311, 195, 396, 477, 273, 0, 0,
- 0, 0, 0, 627, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 228, 0, 0, 235, 0, 0, 0,
- 335, 344, 343, 324, 325, 327, 329, 334, 341, 347,
- 0, 0, 0, 0, 0, 252, 307, 259, 251, 501,
- 0, 0, 0, 0, 0, 0, 0, 219, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 262, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 284, 0, 385, 244, 0, 0, 0,
- 0, 541, 0, 0, 0, 0, 0, 0, 0, 349,
- 0, 316, 191, 215, 0, 0, 395, 441, 453, 0,
- 0, 0, 242, 0, 451, 409, 520, 223, 271, 438,
- 415, 449, 422, 274, 0, 0, 450, 356, 506, 432,
- 517, 542, 543, 250, 389, 529, 490, 537, 558, 216,
- 247, 403, 483, 523, 473, 381, 502, 503, 315, 472,
- 282, 194, 353, 548, 214, 459, 355, 232, 221, 508,
- 526, 276, 436, 203, 485, 515, 229, 463, 0, 0,
- 560, 205, 513, 482, 377, 312, 313, 204, 0, 437,
- 255, 280, 245, 398, 510, 511, 243, 561, 218, 536,
- 210, 0, 535, 391, 505, 514, 378, 367, 209, 512,
- 376, 366, 320, 339, 340, 267, 293, 429, 359, 430,
- 292, 294, 387, 386, 388, 198, 524, 0, 199, 0,
- 478, 525, 562, 224, 225, 227, 0, 266, 270, 278,
- 281, 289, 290, 299, 351, 402, 428, 424, 433, 0,
- 500, 518, 530, 540, 546, 547, 549, 550, 551, 552,
- 553, 555, 554, 390, 297, 474, 319, 357, 0, 0,
- 408, 452, 230, 522, 475, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 563, 564, 565, 566,
- 567, 568, 569, 570, 571, 572, 573, 574, 575, 576,
- 577, 578, 579, 580, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 581, 368, 465, 519, 321, 333, 336,
- 326, 345, 0, 346, 322, 323, 328, 330, 331, 332,
- 337, 338, 342, 348, 238, 201, 374, 382, 499, 298,
- 206, 207, 208, 492, 493, 494, 495, 533, 534, 538,
- 442, 443, 444, 445, 279, 528, 295, 448, 447, 317,
- 318, 363, 431, 0, 190, 211, 352, 0, 434, 275,
- 559, 532, 527, 197, 213, 0, 249, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 192, 193,
- 200, 212, 222, 226, 233, 248, 263, 265, 272, 285,
- 296, 304, 305, 308, 314, 364, 370, 371, 372, 373,
- 392, 393, 394, 397, 400, 401, 404, 406, 407, 410,
- 414, 418, 419, 420, 421, 423, 425, 435, 440, 454,
- 455, 456, 457, 458, 461, 462, 467, 468, 469, 470,
- 471, 479, 480, 484, 507, 509, 521, 539, 544, 460,
- 287, 288, 426, 427, 300, 301, 556, 557, 286, 516,
- 545, 0, 0, 362, 0, 0, 365, 268, 291, 306,
- 0, 531, 481, 217, 446, 277, 240, 0, 0, 202,
- 236, 220, 246, 261, 264, 310, 375, 383, 412, 417,
- 283, 258, 234, 439, 231, 464, 487, 488, 489, 491,
- 379, 253, 416, 380, 0, 360, 497, 498, 302, 496,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 399, 0, 1658, 0, 0, 0, 0, 0, 0,
- 257, 0, 0, 0, 0, 350, 254, 0, 0, 413,
- 0, 196, 0, 466, 241, 361, 358, 504, 269, 260,
- 256, 239, 303, 369, 411, 486, 405, 0, 354, 0,
- 0, 476, 384, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 309, 237,
- 311, 195, 396, 477, 273, 0, 0, 0, 0, 0,
- 627, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 228, 0, 0, 235, 0, 0, 0, 335, 344, 343,
- 324, 325, 327, 329, 334, 341, 347, 0, 0, 0,
- 0, 0, 252, 307, 259, 251, 501, 0, 0, 0,
- 0, 0, 0, 0, 219, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 262,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 284, 0, 385, 244, 0, 0, 0, 0, 541, 0,
- 0, 0, 0, 0, 0, 0, 349, 0, 316, 191,
- 215, 0, 0, 395, 441, 453, 0, 0, 0, 242,
- 0, 451, 409, 520, 223, 271, 438, 415, 449, 422,
- 274, 0, 0, 450, 356, 506, 432, 517, 542, 543,
- 250, 389, 529, 490, 537, 558, 216, 247, 403, 483,
- 523, 473, 381, 502, 503, 315, 472, 282, 194, 353,
- 548, 214, 459, 355, 232, 221, 508, 526, 276, 436,
- 203, 485, 515, 229, 463, 0, 0, 560, 205, 513,
- 482, 377, 312, 313, 204, 0, 437, 255, 280, 245,
- 398, 510, 511, 243, 561, 218, 536, 210, 0, 535,
- 391, 505, 514, 378, 367, 209, 512, 376, 366, 320,
- 339, 340, 267, 293, 429, 359, 430, 292, 294, 387,
- 386, 388, 198, 524, 0, 199, 0, 478, 525, 562,
- 224, 225, 227, 0, 266, 270, 278, 281, 289, 290,
- 299, 351, 402, 428, 424, 433, 0, 500, 518, 530,
- 540, 546, 547, 549, 550, 551, 552, 553, 555, 554,
- 390, 297, 474, 319, 357, 0, 0, 408, 452, 230,
- 522, 475, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 563, 564, 565, 566, 567, 568, 569,
- 570, 571, 572, 573, 574, 575, 576, 577, 578, 579,
- 580, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 581, 368, 465, 519, 321, 333, 336, 326, 345, 0,
- 346, 322, 323, 328, 330, 331, 332, 337, 338, 342,
- 348, 238, 201, 374, 382, 499, 298, 206, 207, 208,
- 492, 493, 494, 495, 533, 534, 538, 442, 443, 444,
- 445, 279, 528, 295, 448, 447, 317, 318, 363, 431,
- 0, 190, 211, 352, 0, 434, 275, 559, 532, 527,
- 197, 213, 0, 249, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 192, 193, 200, 212, 222,
- 226, 233, 248, 263, 265, 272, 285, 296, 304, 305,
- 308, 314, 364, 370, 371, 372, 373, 392, 393, 394,
- 397, 400, 401, 404, 406, 407, 410, 414, 418, 419,
- 420, 421, 423, 425, 435, 440, 454, 455, 456, 457,
- 458, 461, 462, 467, 468, 469, 470, 471, 479, 480,
- 484, 507, 509, 521, 539, 544, 460, 287, 288, 426,
- 427, 300, 301, 556, 557, 286, 516, 545, 0, 0,
- 362, 0, 0, 365, 268, 291, 306, 0, 531, 481,
- 217, 446, 277, 240, 0, 0, 202, 236, 220, 246,
- 261, 264, 310, 375, 383, 412, 417, 283, 258, 234,
- 439, 231, 464, 487, 488, 489, 491, 379, 253, 416,
- 380, 0, 360, 497, 498, 302, 496, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 399, 0,
- 1656, 0, 0, 0, 0, 0, 0, 257, 0, 0,
- 0, 0, 350, 254, 0, 0, 413, 0, 196, 0,
- 466, 241, 361, 358, 504, 269, 260, 256, 239, 303,
- 369, 411, 486, 405, 0, 354, 0, 0, 476, 384,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 309, 237, 311, 195, 396,
- 477, 273, 0, 0, 0, 0, 0, 627, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 228, 0, 0,
- 235, 0, 0, 0, 335, 344, 343, 324, 325, 327,
- 329, 334, 341, 347, 0, 0, 0, 0, 0, 252,
- 307, 259, 251, 501, 0, 0, 0, 0, 0, 0,
- 0, 219, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 262, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 284, 0, 385,
- 244, 0, 0, 0, 0, 541, 0, 0, 0, 0,
- 0, 0, 0, 349, 0, 316, 191, 215, 0, 0,
- 395, 441, 453, 0, 0, 0, 242, 0, 451, 409,
- 520, 223, 271, 438, 415, 449, 422, 274, 0, 0,
- 450, 356, 506, 432, 517, 542, 543, 250, 389, 529,
- 490, 537, 558, 216, 247, 403, 483, 523, 473, 381,
- 502, 503, 315, 472, 282, 194, 353, 548, 214, 459,
- 355, 232, 221, 508, 526, 276, 436, 203, 485, 515,
- 229, 463, 0, 0, 560, 205, 513, 482, 377, 312,
- 313, 204, 0, 437, 255, 280, 245, 398, 510, 511,
- 243, 561, 218, 536, 210, 0, 535, 391, 505, 514,
- 378, 367, 209, 512, 376, 366, 320, 339, 340, 267,
- 293, 429, 359, 430, 292, 294, 387, 386, 388, 198,
- 524, 0, 199, 0, 478, 525, 562, 224, 225, 227,
- 0, 266, 270, 278, 281, 289, 290, 299, 351, 402,
- 428, 424, 433, 0, 500, 518, 530, 540, 546, 547,
- 549, 550, 551, 552, 553, 555, 554, 390, 297, 474,
- 319, 357, 0, 0, 408, 452, 230, 522, 475, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 563, 564, 565, 566, 567, 568, 569, 570, 571, 572,
- 573, 574, 575, 576, 577, 578, 579, 580, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 581, 368, 465,
- 519, 321, 333, 336, 326, 345, 0, 346, 322, 323,
- 328, 330, 331, 332, 337, 338, 342, 348, 238, 201,
- 374, 382, 499, 298, 206, 207, 208, 492, 493, 494,
- 495, 533, 534, 538, 442, 443, 444, 445, 279, 528,
- 295, 448, 447, 317, 318, 363, 431, 0, 190, 211,
- 352, 0, 434, 275, 559, 532, 527, 197, 213, 0,
- 249, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 192, 193, 200, 212, 222, 226, 233, 248,
- 263, 265, 272, 285, 296, 304, 305, 308, 314, 364,
- 370, 371, 372, 373, 392, 393, 394, 397, 400, 401,
- 404, 406, 407, 410, 414, 418, 419, 420, 421, 423,
- 425, 435, 440, 454, 455, 456, 457, 458, 461, 462,
- 467, 468, 469, 470, 471, 479, 480, 484, 507, 509,
- 521, 539, 544, 460, 287, 288, 426, 427, 300, 301,
- 556, 557, 286, 516, 545, 0, 0, 362, 0, 0,
- 365, 268, 291, 306, 0, 531, 481, 217, 446, 277,
- 240, 0, 0, 202, 236, 220, 246, 261, 264, 310,
- 375, 383, 412, 417, 283, 258, 234, 439, 231, 464,
- 487, 488, 489, 491, 379, 253, 416, 380, 0, 360,
- 497, 498, 302, 496, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 399, 0, 1652, 0, 0,
- 0, 0, 0, 0, 257, 0, 0, 0, 0, 350,
- 254, 0, 0, 413, 0, 196, 0, 466, 241, 361,
- 358, 504, 269, 260, 256, 239, 303, 369, 411, 486,
- 405, 0, 354, 0, 0, 476, 384, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 309, 237, 311, 195, 396, 477, 273, 0,
- 0, 0, 0, 0, 627, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 228, 0, 0, 235, 0, 0,
- 0, 335, 344, 343, 324, 325, 327, 329, 334, 341,
- 347, 0, 0, 0, 0, 0, 252, 307, 259, 251,
- 501, 0, 0, 0, 0, 0, 0, 0, 219, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 262, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 284, 0, 385, 244, 0, 0,
- 0, 0, 541, 0, 0, 0, 0, 0, 0, 0,
- 349, 0, 316, 191, 215, 0, 0, 395, 441, 453,
- 0, 0, 0, 242, 0, 451, 409, 520, 223, 271,
- 438, 415, 449, 422, 274, 0, 0, 450, 356, 506,
- 432, 517, 542, 543, 250, 389, 529, 490, 537, 558,
- 216, 247, 403, 483, 523, 473, 381, 502, 503, 315,
- 472, 282, 194, 353, 548, 214, 459, 355, 232, 221,
- 508, 526, 276, 436, 203, 485, 515, 229, 463, 0,
- 0, 560, 205, 513, 482, 377, 312, 313, 204, 0,
- 437, 255, 280, 245, 398, 510, 511, 243, 561, 218,
- 536, 210, 0, 535, 391, 505, 514, 378, 367, 209,
- 512, 376, 366, 320, 339, 340, 267, 293, 429, 359,
- 430, 292, 294, 387, 386, 388, 198, 524, 0, 199,
- 0, 478, 525, 562, 224, 225, 227, 0, 266, 270,
- 278, 281, 289, 290, 299, 351, 402, 428, 424, 433,
- 0, 500, 518, 530, 540, 546, 547, 549, 550, 551,
- 552, 553, 555, 554, 390, 297, 474, 319, 357, 0,
- 0, 408, 452, 230, 522, 475, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 563, 564, 565,
- 566, 567, 568, 569, 570, 571, 572, 573, 574, 575,
- 576, 577, 578, 579, 580, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 581, 368, 465, 519, 321, 333,
- 336, 326, 345, 0, 346, 322, 323, 328, 330, 331,
- 332, 337, 338, 342, 348, 238, 201, 374, 382, 499,
- 298, 206, 207, 208, 492, 493, 494, 495, 533, 534,
- 538, 442, 443, 444, 445, 279, 528, 295, 448, 447,
- 317, 318, 363, 431, 0, 190, 211, 352, 0, 434,
- 275, 559, 532, 527, 197, 213, 0, 249, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 192,
- 193, 200, 212, 222, 226, 233, 248, 263, 265, 272,
- 285, 296, 304, 305, 308, 314, 364, 370, 371, 372,
- 373, 392, 393, 394, 397, 400, 401, 404, 406, 407,
- 410, 414, 418, 419, 420, 421, 423, 425, 435, 440,
- 454, 455, 456, 457, 458, 461, 462, 467, 468, 469,
- 470, 471, 479, 480, 484, 507, 509, 521, 539, 544,
- 460, 287, 288, 426, 427, 300, 301, 556, 557, 286,
- 516, 545, 0, 0, 362, 0, 0, 365, 268, 291,
- 306, 0, 531, 481, 217, 446, 277, 240, 0, 0,
- 202, 236, 220, 246, 261, 264, 310, 375, 383, 412,
- 417, 283, 258, 234, 439, 231, 464, 487, 488, 489,
- 491, 379, 253, 416, 380, 0, 360, 497, 498, 302,
- 496, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 399, 0, 1650, 0, 0, 0, 0, 0,
- 0, 257, 0, 0, 0, 0, 350, 254, 0, 0,
- 413, 0, 196, 0, 466, 241, 361, 358, 504, 269,
- 260, 256, 239, 303, 369, 411, 486, 405, 0, 354,
- 0, 0, 476, 384, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 309,
- 237, 311, 195, 396, 477, 273, 0, 0, 0, 0,
- 0, 627, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 228, 0, 0, 235, 0, 0, 0, 335, 344,
- 343, 324, 325, 327, 329, 334, 341, 347, 0, 0,
- 0, 0, 0, 252, 307, 259, 251, 501, 0, 0,
- 0, 0, 0, 0, 0, 219, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 262, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 284, 0, 385, 244, 0, 0, 0, 0, 541,
- 0, 0, 0, 0, 0, 0, 0, 349, 0, 316,
- 191, 215, 0, 0, 395, 441, 453, 0, 0, 0,
- 242, 0, 451, 409, 520, 223, 271, 438, 415, 449,
- 422, 274, 0, 0, 450, 356, 506, 432, 517, 542,
- 543, 250, 389, 529, 490, 537, 558, 216, 247, 403,
- 483, 523, 473, 381, 502, 503, 315, 472, 282, 194,
- 353, 548, 214, 459, 355, 232, 221, 508, 526, 276,
- 436, 203, 485, 515, 229, 463, 0, 0, 560, 205,
- 513, 482, 377, 312, 313, 204, 0, 437, 255, 280,
- 245, 398, 510, 511, 243, 561, 218, 536, 210, 0,
- 535, 391, 505, 514, 378, 367, 209, 512, 376, 366,
- 320, 339, 340, 267, 293, 429, 359, 430, 292, 294,
- 387, 386, 388, 198, 524, 0, 199, 0, 478, 525,
- 562, 224, 225, 227, 0, 266, 270, 278, 281, 289,
- 290, 299, 351, 402, 428, 424, 433, 0, 500, 518,
- 530, 540, 546, 547, 549, 550, 551, 552, 553, 555,
- 554, 390, 297, 474, 319, 357, 0, 0, 408, 452,
- 230, 522, 475, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 563, 564, 565, 566, 567, 568,
- 569, 570, 571, 572, 573, 574, 575, 576, 577, 578,
- 579, 580, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 581, 368, 465, 519, 321, 333, 336, 326, 345,
- 0, 346, 322, 323, 328, 330, 331, 332, 337, 338,
- 342, 348, 238, 201, 374, 382, 499, 298, 206, 207,
- 208, 492, 493, 494, 495, 533, 534, 538, 442, 443,
- 444, 445, 279, 528, 295, 448, 447, 317, 318, 363,
- 431, 0, 190, 211, 352, 0, 434, 275, 559, 532,
- 527, 197, 213, 0, 249, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 192, 193, 200, 212,
- 222, 226, 233, 248, 263, 265, 272, 285, 296, 304,
- 305, 308, 314, 364, 370, 371, 372, 373, 392, 393,
- 394, 397, 400, 401, 404, 406, 407, 410, 414, 418,
- 419, 420, 421, 423, 425, 435, 440, 454, 455, 456,
- 457, 458, 461, 462, 467, 468, 469, 470, 471, 479,
- 480, 484, 507, 509, 521, 539, 544, 460, 287, 288,
- 426, 427, 300, 301, 556, 557, 286, 516, 545, 0,
- 0, 362, 0, 0, 365, 268, 291, 306, 0, 531,
- 481, 217, 446, 277, 240, 0, 0, 202, 236, 220,
- 246, 261, 264, 310, 375, 383, 412, 417, 283, 258,
- 234, 439, 231, 464, 487, 488, 489, 491, 379, 253,
- 416, 380, 0, 360, 497, 498, 302, 496, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 399,
- 0, 1648, 0, 0, 0, 0, 0, 0, 257, 0,
- 0, 0, 0, 350, 254, 0, 0, 413, 0, 196,
- 0, 466, 241, 361, 358, 504, 269, 260, 256, 239,
- 303, 369, 411, 486, 405, 0, 354, 0, 0, 476,
- 384, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 309, 237, 311, 195,
- 396, 477, 273, 0, 0, 0, 0, 0, 627, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 228, 0,
- 0, 235, 0, 0, 0, 335, 344, 343, 324, 325,
- 327, 329, 334, 341, 347, 0, 0, 0, 0, 0,
- 252, 307, 259, 251, 501, 0, 0, 0, 0, 0,
- 0, 0, 219, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 262, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 284, 0,
- 385, 244, 0, 0, 0, 0, 541, 0, 0, 0,
- 0, 0, 0, 0, 349, 0, 316, 191, 215, 0,
- 0, 395, 441, 453, 0, 0, 0, 242, 0, 451,
- 409, 520, 223, 271, 438, 415, 449, 422, 274, 0,
- 0, 450, 356, 506, 432, 517, 542, 543, 250, 389,
- 529, 490, 537, 558, 216, 247, 403, 483, 523, 473,
- 381, 502, 503, 315, 472, 282, 194, 353, 548, 214,
- 459, 355, 232, 221, 508, 526, 276, 436, 203, 485,
- 515, 229, 463, 0, 0, 560, 205, 513, 482, 377,
- 312, 313, 204, 0, 437, 255, 280, 245, 398, 510,
- 511, 243, 561, 218, 536, 210, 0, 535, 391, 505,
- 514, 378, 367, 209, 512, 376, 366, 320, 339, 340,
- 267, 293, 429, 359, 430, 292, 294, 387, 386, 388,
- 198, 524, 0, 199, 0, 478, 525, 562, 224, 225,
- 227, 0, 266, 270, 278, 281, 289, 290, 299, 351,
- 402, 428, 424, 433, 0, 500, 518, 530, 540, 546,
- 547, 549, 550, 551, 552, 553, 555, 554, 390, 297,
- 474, 319, 357, 0, 0, 408, 452, 230, 522, 475,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 563, 564, 565, 566, 567, 568, 569, 570, 571,
- 572, 573, 574, 575, 576, 577, 578, 579, 580, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 581, 368,
- 465, 519, 321, 333, 336, 326, 345, 0, 346, 322,
- 323, 328, 330, 331, 332, 337, 338, 342, 348, 238,
- 201, 374, 382, 499, 298, 206, 207, 208, 492, 493,
- 494, 495, 533, 534, 538, 442, 443, 444, 445, 279,
- 528, 295, 448, 447, 317, 318, 363, 431, 0, 190,
- 211, 352, 0, 434, 275, 559, 532, 527, 197, 213,
- 0, 249, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 192, 193, 200, 212, 222, 226, 233,
- 248, 263, 265, 272, 285, 296, 304, 305, 308, 314,
- 364, 370, 371, 372, 373, 392, 393, 394, 397, 400,
- 401, 404, 406, 407, 410, 414, 418, 419, 420, 421,
- 423, 425, 435, 440, 454, 455, 456, 457, 458, 461,
- 462, 467, 468, 469, 470, 471, 479, 480, 484, 507,
- 509, 521, 539, 544, 460, 287, 288, 426, 427, 300,
- 301, 556, 557, 286, 516, 545, 0, 0, 362, 0,
- 0, 365, 268, 291, 306, 0, 531, 481, 217, 446,
- 277, 240, 0, 0, 202, 236, 220, 246, 261, 264,
- 310, 375, 383, 412, 417, 283, 258, 234, 439, 231,
- 464, 487, 488, 489, 491, 379, 253, 416, 380, 0,
- 360, 497, 498, 302, 496, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 399, 0, 0, 0,
- 0, 0, 0, 0, 0, 257, 0, 0, 0, 0,
- 350, 254, 0, 0, 413, 0, 196, 0, 466, 241,
- 361, 358, 504, 269, 260, 256, 239, 303, 369, 411,
- 486, 405, 0, 354, 0, 0, 476, 384, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 309, 237, 311, 195, 396, 477, 273,
- 0, 1623, 0, 0, 0, 627, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 228, 0, 0, 235, 0,
- 0, 0, 335, 344, 343, 324, 325, 327, 329, 334,
- 341, 347, 0, 0, 0, 0, 0, 252, 307, 259,
- 251, 501, 0, 0, 0, 0, 0, 0, 0, 219,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 262, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 284, 0, 385, 244, 0,
- 0, 0, 0, 541, 0, 0, 0, 0, 0, 0,
- 0, 349, 0, 316, 191, 215, 0, 0, 395, 441,
- 453, 0, 0, 0, 242, 0, 451, 409, 520, 223,
- 271, 438, 415, 449, 422, 274, 0, 0, 450, 356,
- 506, 432, 517, 542, 543, 250, 389, 529, 490, 537,
- 558, 216, 247, 403, 483, 523, 473, 381, 502, 503,
- 315, 472, 282, 194, 353, 548, 214, 459, 355, 232,
- 221, 508, 526, 276, 436, 203, 485, 515, 229, 463,
- 0, 0, 560, 205, 513, 482, 377, 312, 313, 204,
- 0, 437, 255, 280, 245, 398, 510, 511, 243, 561,
- 218, 536, 210, 0, 535, 391, 505, 514, 378, 367,
- 209, 512, 376, 366, 320, 339, 340, 267, 293, 429,
- 359, 430, 292, 294, 387, 386, 388, 198, 524, 0,
- 199, 0, 478, 525, 562, 224, 225, 227, 0, 266,
- 270, 278, 281, 289, 290, 299, 351, 402, 428, 424,
- 433, 0, 500, 518, 530, 540, 546, 547, 549, 550,
- 551, 552, 553, 555, 554, 390, 297, 474, 319, 357,
- 0, 0, 408, 452, 230, 522, 475, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 563, 564,
- 565, 566, 567, 568, 569, 570, 571, 572, 573, 574,
- 575, 576, 577, 578, 579, 580, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 581, 368, 465, 519, 321,
- 333, 336, 326, 345, 0, 346, 322, 323, 328, 330,
- 331, 332, 337, 338, 342, 348, 238, 201, 374, 382,
- 499, 298, 206, 207, 208, 492, 493, 494, 495, 533,
- 534, 538, 442, 443, 444, 445, 279, 528, 295, 448,
- 447, 317, 318, 363, 431, 0, 190, 211, 352, 0,
- 434, 275, 559, 532, 527, 197, 213, 0, 249, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 192, 193, 200, 212, 222, 226, 233, 248, 263, 265,
- 272, 285, 296, 304, 305, 308, 314, 364, 370, 371,
- 372, 373, 392, 393, 394, 397, 400, 401, 404, 406,
- 407, 410, 414, 418, 419, 420, 421, 423, 425, 435,
- 440, 454, 455, 456, 457, 458, 461, 462, 467, 468,
- 469, 470, 471, 479, 480, 484, 507, 509, 521, 539,
- 544, 460, 287, 288, 426, 427, 300, 301, 556, 557,
- 286, 516, 545, 0, 0, 362, 0, 0, 365, 268,
- 291, 306, 0, 531, 481, 217, 446, 277, 240, 0,
- 0, 202, 236, 220, 246, 261, 264, 310, 375, 383,
- 412, 417, 283, 258, 234, 439, 231, 464, 487, 488,
- 489, 491, 379, 253, 416, 380, 0, 360, 497, 498,
- 302, 496, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 399, 0, 0, 0, 0, 0, 0,
- 0, 1524, 257, 0, 0, 0, 0, 350, 254, 0,
- 0, 413, 0, 196, 0, 466, 241, 361, 358, 504,
- 269, 260, 256, 239, 303, 369, 411, 486, 405, 0,
- 354, 0, 0, 476, 384, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 309, 237, 311, 195, 396, 477, 273, 0, 0, 0,
- 0, 0, 188, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 228, 0, 0, 235, 0, 0, 0, 335,
- 344, 343, 324, 325, 327, 329, 334, 341, 347, 0,
- 0, 0, 0, 0, 252, 307, 259, 251, 501, 0,
- 0, 0, 0, 0, 0, 0, 219, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 262, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 284, 0, 385, 244, 0, 0, 0, 0,
- 541, 0, 0, 0, 0, 0, 0, 0, 349, 0,
- 316, 191, 215, 0, 0, 395, 441, 453, 0, 0,
- 0, 242, 0, 451, 409, 520, 223, 271, 438, 415,
- 449, 422, 274, 0, 0, 450, 356, 506, 432, 517,
- 542, 543, 250, 389, 529, 490, 537, 558, 216, 247,
- 403, 483, 523, 473, 381, 502, 503, 315, 472, 282,
- 194, 353, 548, 214, 459, 355, 232, 221, 508, 526,
- 276, 436, 203, 485, 515, 229, 463, 0, 0, 560,
- 205, 513, 482, 377, 312, 313, 204, 0, 437, 255,
- 280, 245, 398, 510, 511, 243, 561, 218, 536, 210,
- 0, 535, 391, 505, 514, 378, 367, 209, 512, 376,
- 366, 320, 339, 340, 267, 293, 429, 359, 430, 292,
- 294, 387, 386, 388, 198, 524, 0, 199, 0, 478,
- 525, 562, 224, 225, 227, 0, 266, 270, 278, 281,
- 289, 290, 299, 351, 402, 428, 424, 433, 0, 500,
- 518, 530, 540, 546, 547, 549, 550, 551, 552, 553,
- 555, 554, 390, 297, 474, 319, 357, 0, 0, 408,
- 452, 230, 522, 475, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 563, 564, 565, 566, 567,
- 568, 569, 570, 571, 572, 573, 574, 575, 576, 577,
- 578, 579, 580, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 581, 368, 465, 519, 321, 333, 336, 326,
- 345, 0, 346, 322, 323, 328, 330, 331, 332, 337,
- 338, 342, 348, 238, 201, 374, 382, 499, 298, 206,
- 207, 208, 492, 493, 494, 495, 533, 534, 538, 442,
- 443, 444, 445, 279, 528, 295, 448, 447, 317, 318,
- 363, 431, 0, 190, 211, 352, 0, 434, 275, 559,
- 532, 527, 197, 213, 0, 249, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 192, 193, 200,
- 212, 222, 226, 233, 248, 263, 265, 272, 285, 296,
- 304, 305, 308, 314, 364, 370, 371, 372, 373, 392,
- 393, 394, 397, 400, 401, 404, 406, 407, 410, 414,
- 418, 419, 420, 421, 423, 425, 435, 440, 454, 455,
- 456, 457, 458, 461, 462, 467, 468, 469, 470, 471,
- 479, 480, 484, 507, 509, 521, 539, 544, 460, 287,
- 288, 426, 427, 300, 301, 556, 557, 286, 516, 545,
- 0, 0, 362, 0, 0, 365, 268, 291, 306, 0,
- 531, 481, 217, 446, 277, 240, 0, 0, 202, 236,
- 220, 246, 261, 264, 310, 375, 383, 412, 417, 283,
- 258, 234, 439, 231, 464, 487, 488, 489, 491, 379,
- 253, 416, 380, 0, 360, 497, 498, 302, 496, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 399, 0, 0, 0, 0, 0, 0, 0, 0, 257,
- 0, 0, 0, 0, 350, 254, 0, 0, 413, 0,
- 196, 0, 466, 241, 361, 358, 504, 269, 260, 256,
- 239, 303, 369, 411, 486, 405, 0, 354, 0, 0,
- 476, 384, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 309, 237, 311,
- 195, 396, 477, 273, 0, 89, 0, 0, 0, 800,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 228,
- 0, 0, 235, 0, 0, 0, 335, 344, 343, 324,
- 325, 327, 329, 334, 341, 347, 0, 0, 0, 0,
- 0, 252, 307, 259, 251, 501, 0, 0, 0, 0,
- 0, 0, 0, 219, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 262, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 284,
- 0, 385, 244, 0, 0, 0, 0, 541, 0, 0,
- 0, 0, 0, 0, 0, 349, 0, 316, 191, 215,
- 0, 0, 395, 441, 453, 0, 0, 0, 242, 0,
- 451, 409, 520, 223, 271, 438, 415, 449, 422, 274,
- 0, 0, 450, 356, 506, 432, 517, 542, 543, 250,
- 389, 529, 490, 537, 558, 216, 247, 403, 483, 523,
- 473, 381, 502, 503, 315, 472, 282, 194, 353, 548,
- 214, 459, 355, 232, 221, 508, 526, 276, 436, 203,
- 485, 515, 229, 463, 0, 0, 560, 205, 513, 482,
- 377, 312, 313, 204, 0, 437, 255, 280, 245, 398,
- 510, 511, 243, 561, 218, 536, 210, 0, 535, 391,
- 505, 514, 378, 367, 209, 512, 376, 366, 320, 339,
- 340, 267, 293, 429, 359, 430, 292, 294, 387, 386,
- 388, 198, 524, 0, 199, 0, 478, 525, 562, 224,
- 225, 227, 0, 266, 270, 278, 281, 289, 290, 299,
- 351, 402, 428, 424, 433, 0, 500, 518, 530, 540,
- 546, 547, 549, 550, 551, 552, 553, 555, 554, 390,
- 297, 474, 319, 357, 0, 0, 408, 452, 230, 522,
- 475, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 563, 564, 565, 566, 567, 568, 569, 570,
- 571, 572, 573, 574, 575, 576, 577, 578, 579, 580,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 581,
- 368, 465, 519, 321, 333, 336, 326, 345, 0, 346,
- 322, 323, 328, 330, 331, 332, 337, 338, 342, 348,
- 238, 201, 374, 382, 499, 298, 206, 207, 208, 492,
- 493, 494, 495, 533, 534, 538, 442, 443, 444, 445,
- 279, 528, 295, 448, 447, 317, 318, 363, 431, 0,
- 190, 211, 352, 0, 434, 275, 559, 532, 527, 197,
- 213, 0, 249, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 192, 193, 200, 212, 222, 226,
- 233, 248, 263, 265, 272, 285, 296, 304, 305, 308,
- 314, 364, 370, 371, 372, 373, 392, 393, 394, 397,
- 400, 401, 404, 406, 407, 410, 414, 418, 419, 420,
- 421, 423, 425, 435, 440, 454, 455, 456, 457, 458,
- 461, 462, 467, 468, 469, 470, 471, 479, 480, 484,
- 507, 509, 521, 539, 544, 460, 287, 288, 426, 427,
- 300, 301, 556, 557, 286, 516, 545, 0, 0, 362,
- 0, 0, 365, 268, 291, 306, 0, 531, 481, 217,
- 446, 277, 240, 0, 0, 202, 236, 220, 246, 261,
- 264, 310, 375, 383, 412, 417, 283, 258, 234, 439,
- 231, 464, 487, 488, 489, 491, 379, 253, 416, 380,
- 0, 360, 497, 498, 302, 496, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 399, 0, 0,
- 0, 0, 0, 0, 0, 0, 257, 0, 0, 0,
- 0, 350, 254, 0, 0, 413, 0, 196, 0, 466,
- 241, 361, 358, 504, 269, 260, 256, 239, 303, 369,
- 411, 486, 405, 0, 354, 0, 0, 476, 384, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 309, 237, 311, 195, 396, 477,
- 273, 0, 0, 0, 0, 0, 188, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 228, 0, 0, 235,
- 0, 0, 0, 335, 344, 343, 324, 325, 327, 329,
- 334, 341, 347, 0, 0, 0, 0, 0, 252, 307,
- 259, 251, 501, 0, 0, 0, 0, 0, 0, 0,
- 219, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 262, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 1277, 0, 284, 0, 385, 244,
- 0, 0, 0, 0, 541, 0, 0, 0, 0, 0,
- 0, 0, 349, 0, 316, 191, 215, 0, 0, 395,
- 441, 453, 0, 0, 0, 242, 0, 451, 409, 520,
- 223, 271, 438, 415, 449, 422, 274, 0, 0, 450,
- 356, 506, 432, 517, 542, 543, 250, 389, 529, 490,
- 537, 558, 216, 247, 403, 483, 523, 473, 381, 502,
- 503, 315, 472, 282, 194, 353, 548, 214, 459, 355,
- 232, 221, 508, 526, 276, 436, 203, 485, 515, 229,
- 463, 0, 0, 560, 205, 513, 482, 377, 312, 313,
- 204, 0, 437, 255, 280, 245, 398, 510, 511, 243,
- 561, 218, 536, 210, 0, 535, 391, 505, 514, 378,
- 367, 209, 512, 376, 366, 320, 339, 340, 267, 293,
- 429, 359, 430, 292, 294, 387, 386, 388, 198, 524,
- 0, 199, 0, 478, 525, 562, 224, 225, 227, 0,
- 266, 270, 278, 281, 289, 290, 299, 351, 402, 428,
- 424, 433, 0, 500, 518, 530, 540, 546, 547, 549,
- 550, 551, 552, 553, 555, 554, 390, 297, 474, 319,
- 357, 0, 0, 408, 452, 230, 522, 475, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 563,
- 564, 565, 566, 567, 568, 569, 570, 571, 572, 573,
- 574, 575, 576, 577, 578, 579, 580, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 581, 368, 465, 519,
- 321, 333, 336, 326, 345, 0, 346, 322, 323, 328,
- 330, 331, 332, 337, 338, 342, 348, 238, 201, 374,
- 382, 499, 298, 206, 207, 208, 492, 493, 494, 495,
- 533, 534, 538, 442, 443, 444, 445, 279, 528, 295,
- 448, 447, 317, 318, 363, 431, 0, 190, 211, 352,
- 0, 434, 275, 559, 532, 527, 197, 213, 0, 249,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 192, 193, 200, 212, 222, 226, 233, 248, 263,
- 265, 272, 285, 296, 304, 305, 308, 314, 364, 370,
- 371, 372, 373, 392, 393, 394, 397, 400, 401, 404,
- 406, 407, 410, 414, 418, 419, 420, 421, 423, 425,
- 435, 440, 454, 455, 456, 457, 458, 461, 462, 467,
- 468, 469, 470, 471, 479, 480, 484, 507, 509, 521,
- 539, 544, 460, 287, 288, 426, 427, 300, 301, 556,
- 557, 1276, 516, 545, 0, 0, 362, 0, 0, 365,
- 268, 291, 306, 0, 531, 481, 217, 446, 277, 240,
- 0, 0, 202, 236, 220, 246, 261, 264, 310, 375,
- 383, 412, 417, 283, 258, 234, 439, 231, 464, 487,
- 488, 489, 491, 379, 253, 416, 380, 0, 360, 497,
- 498, 302, 496, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 399, 0, 0, 0, 0, 0,
- 0, 0, 0, 257, 0, 0, 0, 0, 350, 254,
- 0, 0, 413, 0, 196, 0, 466, 241, 361, 358,
- 504, 269, 260, 256, 239, 303, 369, 411, 486, 405,
- 0, 354, 0, 0, 476, 384, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 309, 237, 311, 195, 396, 477, 273, 0, 0,
- 0, 0, 0, 188, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 228, 0, 0, 235, 0, 0, 0,
- 335, 344, 343, 324, 325, 327, 329, 334, 341, 347,
- 0, 0, 0, 0, 0, 252, 307, 259, 251, 501,
- 0, 0, 0, 0, 0, 0, 0, 219, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 262, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 284, 0, 385, 244, 0, 0, 0,
- 0, 541, 0, 0, 0, 0, 0, 0, 0, 349,
- 0, 316, 191, 215, 0, 0, 395, 441, 453, 0,
- 0, 0, 242, 0, 451, 409, 520, 223, 271, 438,
- 415, 449, 422, 274, 0, 0, 450, 356, 506, 432,
- 517, 542, 543, 250, 389, 529, 490, 537, 558, 216,
- 247, 403, 483, 523, 473, 381, 502, 503, 315, 472,
- 282, 194, 353, 548, 214, 459, 355, 232, 221, 508,
- 526, 276, 436, 203, 485, 515, 229, 463, 0, 0,
- 560, 205, 513, 482, 377, 312, 313, 204, 0, 437,
- 255, 280, 245, 398, 510, 511, 243, 561, 218, 536,
- 210, 0, 535, 391, 505, 514, 378, 367, 209, 512,
- 376, 366, 320, 339, 340, 267, 293, 429, 359, 430,
- 292, 294, 387, 386, 388, 198, 524, 0, 199, 0,
- 478, 525, 562, 224, 225, 227, 0, 266, 270, 278,
- 281, 289, 290, 299, 351, 402, 428, 424, 433, 0,
- 500, 518, 530, 540, 546, 547, 549, 550, 551, 552,
- 553, 555, 554, 390, 297, 474, 319, 357, 0, 0,
- 408, 452, 230, 522, 475, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 563, 564, 565, 566,
- 567, 568, 569, 570, 571, 572, 573, 574, 575, 576,
- 577, 578, 579, 580, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 581, 368, 465, 519, 321, 333, 336,
- 326, 345, 0, 346, 322, 323, 328, 330, 331, 332,
- 337, 338, 342, 348, 238, 201, 374, 382, 499, 298,
- 206, 207, 208, 492, 493, 494, 495, 533, 534, 538,
- 442, 443, 444, 445, 279, 528, 295, 448, 447, 317,
- 318, 363, 431, 0, 190, 211, 352, 0, 434, 275,
- 559, 532, 527, 197, 213, 0, 249, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 886, 0, 0, 0, 192, 193,
- 200, 212, 222, 226, 233, 248, 263, 265, 272, 285,
- 296, 304, 305, 308, 314, 364, 370, 371, 372, 373,
- 392, 393, 394, 397, 400, 401, 404, 406, 407, 410,
- 414, 418, 419, 420, 421, 423, 425, 435, 440, 454,
- 455, 456, 457, 458, 461, 462, 467, 468, 469, 470,
- 471, 479, 480, 484, 507, 509, 521, 539, 544, 460,
- 287, 288, 426, 427, 300, 301, 556, 557, 286, 516,
- 545, 0, 0, 362, 0, 0, 365, 268, 291, 306,
- 0, 531, 481, 217, 446, 277, 240, 0, 0, 202,
- 236, 220, 246, 261, 264, 310, 375, 383, 412, 417,
- 283, 258, 234, 439, 231, 464, 487, 488, 489, 491,
- 379, 253, 416, 380, 0, 360, 497, 498, 302, 496,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 399, 0, 0, 0, 0, 0, 0, 0, 0,
- 257, 0, 0, 0, 0, 350, 254, 0, 0, 413,
- 0, 196, 0, 466, 241, 361, 358, 504, 269, 260,
- 256, 239, 303, 369, 411, 486, 405, 0, 354, 0,
- 0, 476, 384, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 309, 237,
- 311, 195, 396, 477, 273, 0, 0, 0, 0, 0,
- 188, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 228, 0, 0, 235, 0, 0, 0, 335, 344, 343,
- 324, 325, 327, 329, 334, 341, 347, 0, 0, 0,
- 0, 0, 252, 307, 259, 251, 501, 0, 0, 0,
- 0, 0, 0, 0, 219, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 262,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 284, 0, 385, 244, 0, 0, 584, 0, 541, 0,
- 0, 0, 0, 0, 0, 0, 349, 0, 316, 191,
- 215, 0, 0, 395, 441, 453, 0, 0, 0, 242,
- 0, 451, 409, 520, 223, 271, 438, 415, 449, 422,
- 274, 0, 0, 450, 356, 506, 432, 517, 542, 543,
- 250, 389, 529, 490, 537, 558, 216, 247, 403, 483,
- 523, 473, 381, 502, 503, 315, 472, 282, 194, 353,
- 548, 214, 459, 355, 232, 221, 508, 526, 276, 436,
- 203, 485, 515, 229, 463, 0, 0, 560, 205, 513,
- 482, 377, 312, 313, 204, 0, 437, 255, 280, 245,
- 398, 510, 511, 243, 561, 218, 536, 210, 0, 535,
- 391, 505, 514, 378, 367, 209, 512, 376, 366, 320,
- 339, 340, 267, 293, 429, 359, 430, 292, 294, 387,
- 386, 388, 198, 524, 0, 199, 0, 478, 525, 562,
- 224, 225, 227, 0, 266, 270, 278, 281, 289, 290,
- 299, 351, 402, 428, 424, 433, 0, 500, 518, 530,
- 540, 546, 547, 549, 550, 551, 552, 553, 555, 554,
- 390, 297, 474, 319, 357, 0, 0, 408, 452, 230,
- 522, 475, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 563, 564, 565, 566, 567, 568, 569,
- 570, 571, 572, 573, 574, 575, 576, 577, 578, 579,
- 580, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 581, 368, 465, 519, 321, 333, 336, 326, 345, 0,
- 346, 322, 323, 328, 330, 331, 332, 337, 338, 342,
- 348, 238, 201, 374, 382, 499, 298, 206, 207, 208,
- 492, 493, 494, 495, 533, 534, 538, 442, 443, 444,
- 445, 279, 528, 295, 448, 447, 317, 318, 363, 431,
- 0, 190, 211, 352, 0, 434, 275, 559, 532, 527,
- 197, 213, 0, 249, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 192, 193, 200, 212, 222,
- 226, 233, 248, 263, 265, 272, 285, 296, 304, 305,
- 308, 314, 364, 370, 371, 372, 373, 392, 393, 394,
- 397, 400, 401, 404, 406, 407, 410, 414, 418, 419,
- 420, 421, 423, 425, 435, 440, 454, 455, 456, 457,
- 458, 461, 462, 467, 468, 469, 470, 471, 479, 480,
- 484, 507, 509, 521, 539, 544, 460, 287, 288, 426,
- 427, 300, 301, 556, 557, 286, 516, 545, 0, 0,
- 362, 0, 0, 365, 268, 291, 306, 0, 531, 481,
- 217, 446, 277, 240, 0, 0, 202, 236, 220, 246,
- 261, 264, 310, 375, 383, 412, 417, 283, 258, 234,
- 439, 231, 464, 487, 488, 489, 491, 379, 253, 416,
- 380, 0, 360, 497, 498, 302, 496, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 399, 0,
- 0, 0, 0, 0, 0, 0, 0, 257, 0, 0,
- 0, 0, 350, 254, 0, 0, 413, 0, 196, 0,
- 466, 241, 361, 358, 504, 269, 260, 256, 239, 303,
- 369, 411, 486, 405, 0, 354, 0, 0, 476, 384,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 309, 237, 311, 195, 396,
- 477, 273, 0, 0, 0, 0, 0, 627, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 228, 0, 0,
- 235, 0, 0, 0, 335, 344, 343, 324, 325, 327,
- 329, 334, 341, 347, 0, 0, 0, 0, 0, 252,
- 307, 259, 251, 501, 0, 0, 0, 0, 0, 0,
- 0, 219, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 262, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 284, 0, 385,
- 244, 0, 0, 0, 0, 541, 0, 0, 0, 0,
- 0, 0, 0, 349, 0, 316, 191, 215, 0, 0,
- 395, 441, 453, 0, 0, 0, 242, 0, 451, 409,
- 520, 223, 271, 438, 415, 449, 422, 274, 0, 0,
- 450, 356, 506, 432, 517, 542, 543, 250, 389, 529,
- 490, 537, 558, 216, 247, 403, 483, 523, 473, 381,
- 502, 503, 315, 472, 282, 194, 353, 548, 214, 459,
- 355, 232, 221, 508, 526, 276, 436, 203, 485, 515,
- 229, 463, 0, 0, 560, 205, 513, 482, 377, 312,
- 313, 204, 0, 437, 255, 280, 245, 398, 510, 511,
- 243, 561, 218, 536, 210, 0, 535, 391, 505, 514,
- 378, 367, 209, 512, 376, 366, 320, 339, 340, 267,
- 293, 429, 359, 430, 292, 294, 387, 386, 388, 198,
- 524, 0, 199, 0, 478, 525, 562, 224, 225, 227,
- 0, 266, 270, 278, 281, 289, 290, 299, 351, 402,
- 428, 424, 433, 0, 500, 518, 530, 540, 546, 547,
- 549, 550, 551, 552, 553, 555, 554, 390, 297, 474,
- 319, 357, 0, 0, 408, 452, 230, 522, 475, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 563, 564, 565, 566, 567, 568, 569, 570, 571, 572,
- 573, 574, 575, 576, 577, 578, 579, 580, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 581, 368, 465,
- 519, 321, 333, 336, 326, 345, 0, 346, 322, 323,
- 328, 330, 331, 332, 337, 338, 342, 348, 238, 201,
- 374, 382, 499, 298, 206, 207, 208, 492, 493, 494,
- 495, 533, 534, 538, 442, 443, 444, 445, 279, 528,
- 295, 448, 447, 317, 318, 363, 431, 0, 190, 211,
- 352, 0, 434, 275, 559, 532, 527, 197, 213, 0,
- 249, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 192, 193, 200, 212, 222, 226, 233, 248,
- 263, 265, 272, 285, 296, 304, 305, 308, 314, 364,
- 370, 371, 372, 373, 3426, 393, 394, 397, 400, 401,
- 404, 406, 407, 410, 414, 418, 419, 420, 421, 423,
- 425, 435, 440, 454, 455, 456, 457, 458, 461, 462,
- 467, 468, 469, 470, 471, 479, 480, 484, 507, 509,
- 521, 539, 544, 460, 287, 288, 426, 427, 300, 301,
- 556, 557, 286, 516, 545, 0, 0, 362, 0, 0,
- 365, 268, 291, 306, 0, 531, 481, 217, 446, 277,
- 240, 0, 0, 202, 236, 220, 246, 261, 264, 310,
- 375, 383, 412, 417, 283, 258, 234, 439, 231, 464,
- 487, 488, 489, 491, 379, 253, 416, 380, 0, 360,
- 497, 498, 302, 496, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 399, 0, 0, 0, 0,
- 0, 0, 0, 0, 257, 0, 0, 0, 0, 350,
- 254, 0, 0, 413, 0, 196, 0, 466, 241, 361,
- 358, 504, 269, 260, 256, 239, 303, 369, 411, 486,
- 405, 0, 354, 0, 0, 476, 384, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 309, 237, 311, 195, 396, 477, 273, 0,
- 0, 0, 0, 0, 627, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 228, 0, 0, 235, 0, 0,
- 0, 335, 344, 343, 324, 325, 327, 329, 334, 341,
- 347, 0, 0, 0, 0, 0, 252, 307, 259, 251,
- 501, 0, 0, 0, 0, 0, 0, 0, 219, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 262, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 284, 0, 385, 244, 0, 0,
- 0, 0, 541, 0, 0, 0, 0, 0, 0, 0,
- 349, 0, 316, 191, 215, 0, 0, 395, 441, 453,
- 0, 0, 0, 242, 0, 451, 409, 520, 223, 271,
- 438, 415, 449, 422, 274, 0, 0, 450, 356, 506,
- 432, 517, 542, 543, 250, 389, 529, 490, 537, 558,
- 216, 247, 403, 483, 523, 473, 381, 502, 503, 315,
- 472, 282, 194, 353, 548, 214, 459, 355, 232, 221,
- 508, 526, 276, 436, 203, 485, 515, 229, 463, 0,
- 0, 560, 205, 513, 482, 377, 312, 313, 204, 0,
- 437, 255, 280, 245, 398, 510, 511, 243, 561, 218,
- 536, 210, 0, 535, 391, 505, 514, 378, 367, 209,
- 512, 376, 366, 320, 339, 340, 267, 293, 429, 359,
- 430, 292, 294, 387, 386, 388, 198, 524, 0, 199,
- 0, 478, 525, 562, 224, 225, 227, 0, 266, 270,
- 278, 281, 289, 290, 299, 351, 402, 428, 424, 433,
- 0, 500, 518, 530, 540, 546, 547, 549, 550, 551,
- 552, 553, 555, 554, 390, 297, 474, 319, 357, 0,
- 0, 408, 452, 230, 522, 475, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 563, 564, 565,
- 566, 567, 568, 569, 570, 571, 572, 573, 574, 575,
- 576, 577, 578, 579, 580, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 581, 368, 465, 519, 321, 333,
- 336, 326, 345, 0, 346, 322, 323, 328, 330, 331,
- 332, 337, 338, 342, 348, 238, 201, 374, 382, 499,
- 298, 206, 207, 208, 492, 493, 494, 495, 533, 534,
- 538, 442, 443, 444, 445, 279, 528, 295, 448, 447,
- 317, 318, 363, 431, 0, 190, 211, 352, 0, 434,
- 275, 559, 532, 527, 197, 213, 0, 249, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 192,
- 193, 200, 212, 222, 226, 233, 248, 263, 265, 272,
- 285, 296, 304, 305, 308, 314, 364, 370, 371, 372,
- 373, 392, 393, 394, 397, 400, 401, 404, 406, 407,
- 410, 414, 418, 419, 420, 421, 423, 425, 435, 440,
- 454, 455, 456, 457, 458, 461, 462, 467, 468, 469,
- 470, 471, 479, 480, 484, 507, 509, 521, 539, 544,
- 460, 287, 288, 426, 427, 300, 301, 556, 557, 286,
- 516, 545, 0, 0, 362, 0, 0, 365, 268, 291,
- 306, 0, 531, 481, 217, 446, 277, 240, 0, 0,
- 202, 236, 220, 246, 261, 264, 310, 375, 383, 412,
- 417, 283, 258, 234, 439, 231, 464, 487, 488, 489,
- 491, 379, 253, 416, 380, 0, 360, 497, 498, 302,
- 496, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 399, 0, 0, 0, 0, 0, 0, 0,
- 0, 257, 0, 0, 0, 0, 350, 254, 0, 0,
- 413, 0, 196, 0, 466, 241, 361, 358, 504, 269,
- 260, 256, 239, 303, 369, 411, 486, 405, 0, 354,
- 0, 0, 476, 384, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 309,
- 237, 311, 195, 396, 477, 273, 0, 0, 0, 0,
- 0, 800, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 228, 0, 0, 235, 0, 0, 0, 335, 344,
- 343, 324, 325, 327, 329, 334, 341, 347, 0, 0,
- 0, 0, 0, 252, 307, 259, 251, 501, 0, 0,
- 0, 0, 0, 0, 0, 219, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 262, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 284, 0, 385, 244, 0, 0, 0, 0, 541,
- 0, 0, 0, 0, 0, 0, 0, 349, 0, 316,
- 191, 215, 0, 0, 395, 441, 453, 0, 0, 0,
- 242, 0, 451, 409, 520, 223, 271, 438, 415, 449,
- 422, 274, 0, 0, 450, 356, 506, 432, 517, 542,
- 543, 250, 389, 529, 490, 537, 558, 216, 247, 403,
- 483, 523, 473, 381, 502, 503, 315, 472, 282, 194,
- 353, 548, 214, 459, 355, 232, 221, 508, 526, 276,
- 436, 203, 485, 515, 229, 463, 0, 0, 560, 205,
- 513, 482, 377, 312, 313, 204, 0, 437, 255, 280,
- 245, 398, 510, 511, 243, 561, 218, 536, 210, 0,
- 535, 391, 505, 514, 378, 367, 209, 512, 376, 366,
- 320, 339, 340, 267, 293, 429, 359, 430, 292, 294,
- 387, 386, 388, 198, 524, 0, 199, 0, 478, 525,
- 562, 224, 225, 227, 0, 266, 270, 278, 281, 289,
- 290, 299, 351, 402, 428, 424, 433, 0, 500, 518,
- 530, 540, 546, 547, 549, 550, 551, 552, 553, 555,
- 554, 390, 297, 474, 319, 357, 0, 0, 408, 452,
- 230, 522, 475, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 563, 564, 565, 566, 567, 568,
- 569, 570, 571, 572, 573, 574, 575, 576, 577, 578,
- 579, 580, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 581, 368, 465, 519, 321, 333, 336, 326, 345,
- 0, 346, 322, 323, 328, 330, 331, 332, 337, 338,
- 342, 348, 238, 201, 374, 382, 499, 298, 206, 207,
- 208, 492, 493, 494, 495, 533, 534, 538, 442, 443,
- 444, 445, 279, 528, 295, 448, 447, 317, 318, 363,
- 431, 0, 190, 211, 352, 0, 434, 275, 559, 532,
- 527, 197, 213, 0, 249, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 192, 193, 200, 212,
- 222, 226, 233, 248, 263, 265, 272, 285, 296, 304,
- 305, 308, 314, 364, 370, 371, 372, 373, 392, 393,
- 394, 397, 400, 401, 404, 406, 407, 410, 414, 418,
- 419, 420, 421, 423, 425, 435, 440, 454, 455, 456,
- 457, 458, 461, 462, 467, 468, 469, 470, 471, 479,
- 480, 484, 507, 509, 521, 539, 544, 460, 287, 288,
- 426, 427, 300, 301, 556, 557, 286, 516, 545, 0,
- 0, 362, 0, 0, 365, 268, 291, 306, 0, 531,
- 481, 217, 446, 277, 240, 0, 0, 202, 236, 220,
- 246, 261, 264, 310, 375, 383, 412, 417, 283, 258,
- 234, 439, 231, 464, 487, 488, 489, 491, 379, 253,
- 416, 380, 0, 360, 497, 498, 302, 496, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 399,
- 0, 0, 0, 0, 0, 0, 0, 0, 257, 0,
- 0, 0, 0, 350, 254, 0, 0, 413, 0, 196,
- 0, 466, 241, 361, 358, 504, 269, 260, 256, 239,
- 303, 369, 411, 486, 405, 0, 354, 0, 0, 476,
- 384, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 309, 237, 311, 195,
- 396, 477, 273, 0, 0, 0, 0, 0, 188, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 228, 0,
- 0, 235, 0, 0, 0, 335, 344, 343, 324, 325,
- 327, 329, 334, 341, 347, 0, 0, 0, 0, 0,
- 252, 307, 259, 251, 501, 0, 0, 0, 0, 0,
- 0, 0, 219, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 262, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 284, 0,
- 385, 244, 0, 0, 0, 0, 541, 0, 0, 0,
- 0, 0, 0, 0, 349, 0, 316, 191, 215, 0,
- 0, 395, 441, 453, 0, 0, 0, 242, 0, 451,
- 409, 520, 223, 271, 438, 415, 449, 422, 274, 0,
- 0, 450, 356, 506, 432, 517, 542, 543, 250, 389,
- 529, 490, 537, 558, 216, 247, 403, 483, 523, 473,
- 381, 502, 503, 315, 472, 282, 194, 353, 548, 214,
- 459, 355, 232, 221, 508, 526, 276, 436, 203, 485,
- 515, 229, 463, 0, 0, 560, 205, 513, 482, 377,
- 312, 313, 204, 0, 437, 255, 280, 245, 398, 510,
- 511, 243, 561, 218, 536, 210, 0, 535, 391, 505,
- 514, 378, 367, 209, 512, 376, 366, 320, 339, 340,
- 267, 293, 429, 359, 430, 292, 294, 387, 386, 388,
- 198, 524, 0, 199, 0, 478, 525, 562, 224, 225,
- 227, 0, 266, 270, 278, 281, 289, 290, 299, 351,
- 402, 428, 424, 433, 0, 500, 518, 530, 540, 546,
- 547, 549, 550, 551, 552, 553, 555, 554, 390, 297,
- 474, 319, 357, 0, 0, 408, 452, 230, 522, 475,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 563, 564, 565, 566, 567, 568, 569, 570, 571,
- 572, 573, 574, 575, 576, 577, 578, 579, 580, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 581, 368,
- 465, 519, 321, 333, 336, 326, 345, 0, 346, 322,
- 323, 328, 330, 331, 332, 337, 338, 342, 348, 238,
- 201, 374, 382, 499, 298, 206, 207, 208, 492, 493,
- 494, 495, 533, 534, 538, 442, 443, 444, 445, 279,
- 528, 295, 448, 447, 317, 318, 363, 431, 0, 190,
- 211, 352, 0, 434, 275, 559, 532, 527, 197, 213,
- 0, 249, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 192, 193, 200, 212, 222, 226, 233,
- 248, 263, 265, 272, 285, 296, 304, 305, 308, 314,
- 364, 370, 371, 372, 373, 392, 393, 394, 397, 400,
- 401, 404, 406, 407, 410, 414, 418, 419, 420, 421,
- 423, 425, 435, 440, 454, 455, 456, 457, 458, 461,
- 462, 467, 468, 469, 470, 471, 479, 480, 484, 507,
- 509, 521, 539, 544, 460, 287, 288, 426, 427, 300,
- 301, 556, 557, 286, 516, 545, 0, 0, 362, 0,
- 0, 365, 268, 291, 306, 0, 531, 481, 217, 446,
- 277, 240, 0, 0, 202, 236, 220, 246, 261, 264,
- 310, 375, 383, 412, 417, 283, 258, 234, 439, 231,
- 464, 487, 488, 489, 491, 379, 253, 416, 0, 0,
- 360, 497, 498, 302,
+ 858, 859, 860, 861, 862, 863, 864, 865, 866, 867,
+ 868, 869, 870, 871, 872, 873, 874, 875, 876, 685,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 287, 0, 388, 247, 0, 438, 771, 0, 0, 549,
+ 0, 0, 769, 0, 0, 0, 0, 352, 0, 319,
+ 193, 217, 0, 0, 398, 446, 458, 0, 0, 0,
+ 822, 0, 456, 412, 527, 225, 274, 443, 418, 454,
+ 426, 277, 3428, 0, 455, 359, 512, 436, 524, 550,
+ 551, 253, 392, 536, 496, 544, 568, 218, 250, 406,
+ 489, 530, 478, 384, 508, 509, 318, 477, 285, 196,
+ 356, 556, 216, 464, 358, 234, 223, 514, 533, 279,
+ 441, 563, 205, 491, 522, 231, 468, 0, 0, 570,
+ 239, 488, 207, 519, 487, 380, 315, 316, 206, 0,
+ 442, 258, 283, 0, 0, 248, 401, 823, 824, 246,
+ 571, 724, 543, 212, 0, 542, 394, 511, 520, 381,
+ 370, 211, 518, 379, 369, 323, 732, 733, 270, 296,
+ 433, 362, 434, 295, 297, 390, 389, 391, 200, 531,
+ 0, 201, 0, 483, 532, 572, 226, 227, 229, 0,
+ 269, 273, 281, 284, 292, 293, 302, 354, 405, 432,
+ 428, 437, 0, 506, 525, 537, 548, 554, 555, 557,
+ 558, 559, 560, 561, 564, 562, 393, 300, 479, 322,
+ 360, 0, 0, 411, 457, 232, 529, 480, 782, 770,
+ 695, 786, 697, 783, 784, 692, 693, 696, 785, 573,
+ 574, 575, 576, 577, 578, 579, 580, 581, 582, 583,
+ 584, 585, 586, 587, 588, 589, 590, 0, 773, 681,
+ 680, 0, 688, 0, 714, 715, 717, 721, 722, 723,
+ 734, 735, 736, 744, 746, 747, 745, 748, 749, 750,
+ 753, 754, 755, 756, 751, 752, 757, 698, 702, 699,
+ 700, 701, 713, 703, 704, 705, 706, 707, 708, 709,
+ 710, 711, 712, 796, 797, 798, 799, 800, 801, 727,
+ 731, 730, 728, 729, 725, 726, 679, 192, 213, 355,
+ 0, 439, 278, 569, 539, 534, 199, 215, 787, 252,
+ 788, 0, 0, 792, 0, 0, 0, 794, 793, 0,
+ 795, 761, 760, 0, 0, 789, 790, 0, 791, 0,
+ 0, 194, 195, 202, 214, 224, 228, 235, 251, 266,
+ 268, 275, 288, 299, 307, 308, 311, 317, 367, 373,
+ 374, 375, 376, 395, 396, 397, 400, 403, 404, 407,
+ 409, 410, 413, 417, 421, 422, 423, 425, 427, 429,
+ 440, 445, 459, 460, 461, 462, 463, 466, 467, 472,
+ 473, 474, 475, 476, 484, 485, 490, 513, 515, 528,
+ 546, 552, 465, 802, 803, 804, 805, 806, 807, 808,
+ 809, 289, 523, 553, 521, 565, 547, 424, 365, 0,
+ 0, 368, 271, 294, 309, 0, 538, 486, 219, 451,
+ 280, 243, 827, 0, 204, 238, 222, 249, 264, 267,
+ 313, 378, 386, 415, 420, 286, 261, 236, 444, 233,
+ 469, 493, 494, 495, 497, 382, 256, 419, 383, 0,
+ 363, 503, 504, 305, 502, 0, 691, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 402, 0, 0, 0,
+ 0, 678, 0, 0, 0, 260, 683, 0, 0, 0,
+ 353, 257, 0, 0, 416, 0, 198, 0, 471, 244,
+ 364, 361, 510, 272, 263, 259, 242, 306, 372, 414,
+ 492, 408, 690, 357, 0, 0, 481, 387, 0, 0,
+ 0, 0, 0, 686, 687, 0, 0, 0, 0, 0,
+ 0, 0, 0, 312, 240, 314, 197, 399, 482, 276,
+ 0, 91, 0, 1520, 828, 812, 778, 779, 816, 829,
+ 830, 831, 832, 817, 0, 230, 818, 819, 237, 820,
+ 0, 777, 718, 720, 719, 737, 738, 739, 740, 741,
+ 742, 743, 716, 825, 833, 834, 0, 255, 310, 262,
+ 254, 507, 0, 0, 0, 0, 0, 0, 0, 221,
+ 0, 0, 0, 0, 0, 0, 0, 660, 675, 0,
+ 689, 0, 0, 0, 265, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 672, 673, 0, 0, 0, 0, 772, 0, 674, 0,
+ 0, 682, 835, 836, 837, 838, 839, 840, 841, 842,
+ 843, 844, 845, 846, 847, 848, 849, 850, 851, 852,
+ 853, 854, 855, 856, 857, 858, 859, 860, 861, 862,
+ 863, 864, 865, 866, 867, 868, 869, 870, 871, 872,
+ 873, 874, 875, 876, 685, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 287, 0, 388, 247, 0,
+ 438, 771, 0, 0, 549, 0, 0, 769, 0, 0,
+ 0, 0, 352, 0, 319, 193, 217, 0, 0, 398,
+ 446, 458, 0, 0, 0, 822, 0, 456, 412, 527,
+ 225, 274, 443, 418, 454, 426, 277, 0, 0, 455,
+ 359, 512, 436, 524, 550, 551, 253, 392, 536, 496,
+ 544, 568, 218, 250, 406, 489, 530, 478, 384, 508,
+ 509, 318, 477, 285, 196, 356, 556, 216, 464, 358,
+ 234, 223, 514, 533, 279, 441, 563, 205, 491, 522,
+ 231, 468, 0, 0, 570, 239, 488, 207, 519, 487,
+ 380, 315, 316, 206, 0, 442, 258, 283, 0, 0,
+ 248, 401, 823, 824, 246, 571, 724, 543, 212, 0,
+ 542, 394, 511, 520, 381, 370, 211, 518, 379, 369,
+ 323, 732, 733, 270, 296, 433, 362, 434, 295, 297,
+ 390, 389, 391, 200, 531, 0, 201, 0, 483, 532,
+ 572, 226, 227, 229, 0, 269, 273, 281, 284, 292,
+ 293, 302, 354, 405, 432, 428, 437, 0, 506, 525,
+ 537, 548, 554, 555, 557, 558, 559, 560, 561, 564,
+ 562, 393, 300, 479, 322, 360, 0, 0, 411, 457,
+ 232, 529, 480, 782, 770, 695, 786, 697, 783, 784,
+ 692, 693, 696, 785, 573, 574, 575, 576, 577, 578,
+ 579, 580, 581, 582, 583, 584, 585, 586, 587, 588,
+ 589, 590, 0, 773, 681, 680, 0, 688, 0, 714,
+ 715, 717, 721, 722, 723, 734, 735, 736, 744, 746,
+ 747, 745, 748, 749, 750, 753, 754, 755, 756, 751,
+ 752, 757, 698, 702, 699, 700, 701, 713, 703, 704,
+ 705, 706, 707, 708, 709, 710, 711, 712, 796, 797,
+ 798, 799, 800, 801, 727, 731, 730, 728, 729, 725,
+ 726, 679, 192, 213, 355, 0, 439, 278, 569, 539,
+ 534, 199, 215, 787, 252, 788, 0, 0, 792, 0,
+ 0, 0, 794, 793, 0, 795, 761, 760, 0, 0,
+ 789, 790, 0, 791, 0, 0, 194, 195, 202, 214,
+ 224, 228, 235, 251, 266, 268, 275, 288, 299, 307,
+ 308, 311, 317, 367, 373, 374, 375, 376, 395, 396,
+ 397, 400, 403, 404, 407, 409, 410, 413, 417, 421,
+ 422, 423, 425, 427, 429, 440, 445, 459, 460, 461,
+ 462, 463, 466, 467, 472, 473, 474, 475, 476, 484,
+ 485, 490, 513, 515, 528, 546, 552, 465, 802, 803,
+ 804, 805, 806, 807, 808, 809, 289, 523, 553, 521,
+ 565, 547, 424, 365, 0, 0, 368, 271, 294, 309,
+ 0, 538, 486, 219, 451, 280, 243, 827, 0, 204,
+ 238, 222, 249, 264, 267, 313, 378, 386, 415, 420,
+ 286, 261, 236, 444, 233, 469, 493, 494, 495, 497,
+ 382, 256, 419, 383, 0, 363, 503, 504, 305, 502,
+ 0, 691, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 402, 0, 0, 0, 0, 678, 0, 0, 0,
+ 260, 683, 0, 0, 0, 353, 257, 0, 0, 416,
+ 0, 198, 0, 471, 244, 364, 361, 510, 272, 263,
+ 259, 242, 306, 372, 414, 492, 408, 690, 357, 0,
+ 0, 481, 387, 0, 0, 0, 0, 0, 686, 687,
+ 0, 0, 0, 0, 0, 0, 0, 0, 312, 240,
+ 314, 197, 399, 482, 276, 0, 91, 0, 0, 828,
+ 812, 778, 779, 816, 829, 830, 831, 832, 817, 0,
+ 230, 818, 819, 237, 820, 0, 777, 718, 720, 719,
+ 737, 738, 739, 740, 741, 742, 743, 716, 825, 833,
+ 834, 0, 255, 310, 262, 254, 507, 0, 0, 0,
+ 0, 0, 0, 0, 221, 0, 0, 0, 0, 0,
+ 0, 0, 660, 675, 0, 689, 0, 0, 0, 265,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 672, 673, 918, 0, 0,
+ 0, 772, 0, 674, 0, 0, 682, 835, 836, 837,
+ 838, 839, 840, 841, 842, 843, 844, 845, 846, 847,
+ 848, 849, 850, 851, 852, 853, 854, 855, 856, 857,
+ 858, 859, 860, 861, 862, 863, 864, 865, 866, 867,
+ 868, 869, 870, 871, 872, 873, 874, 875, 876, 685,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 287, 0, 388, 247, 0, 438, 771, 0, 0, 549,
+ 0, 0, 769, 0, 0, 0, 0, 352, 0, 319,
+ 193, 217, 0, 0, 398, 446, 458, 0, 0, 0,
+ 822, 0, 456, 412, 527, 225, 274, 443, 418, 454,
+ 426, 277, 0, 0, 455, 359, 512, 436, 524, 550,
+ 551, 253, 392, 536, 496, 544, 568, 218, 250, 406,
+ 489, 530, 478, 384, 508, 509, 318, 477, 285, 196,
+ 356, 556, 216, 464, 358, 234, 223, 514, 533, 279,
+ 441, 563, 205, 491, 522, 231, 468, 0, 0, 570,
+ 239, 488, 207, 519, 487, 380, 315, 316, 206, 0,
+ 442, 258, 283, 0, 0, 248, 401, 823, 824, 246,
+ 571, 724, 543, 212, 0, 542, 394, 511, 520, 381,
+ 370, 211, 518, 379, 369, 323, 732, 733, 270, 296,
+ 433, 362, 434, 295, 297, 390, 389, 391, 200, 531,
+ 0, 201, 0, 483, 532, 572, 226, 227, 229, 0,
+ 269, 273, 281, 284, 292, 293, 302, 354, 405, 432,
+ 428, 437, 0, 506, 525, 537, 548, 554, 555, 557,
+ 558, 559, 560, 561, 564, 562, 393, 300, 479, 322,
+ 360, 0, 0, 411, 457, 232, 529, 480, 782, 770,
+ 695, 786, 697, 783, 784, 692, 693, 696, 785, 573,
+ 574, 575, 576, 577, 578, 579, 580, 581, 582, 583,
+ 584, 585, 586, 587, 588, 589, 590, 0, 773, 681,
+ 680, 0, 688, 0, 714, 715, 717, 721, 722, 723,
+ 734, 735, 736, 744, 746, 747, 745, 748, 749, 750,
+ 753, 754, 755, 756, 751, 752, 757, 698, 702, 699,
+ 700, 701, 713, 703, 704, 705, 706, 707, 708, 709,
+ 710, 711, 712, 796, 797, 798, 799, 800, 801, 727,
+ 731, 730, 728, 729, 725, 726, 679, 192, 213, 355,
+ 0, 439, 278, 569, 539, 534, 199, 215, 787, 252,
+ 788, 0, 0, 792, 0, 0, 0, 794, 793, 0,
+ 795, 761, 760, 0, 0, 789, 790, 0, 791, 0,
+ 0, 194, 195, 202, 214, 224, 228, 235, 251, 266,
+ 268, 275, 288, 299, 307, 308, 311, 317, 367, 373,
+ 374, 375, 376, 395, 396, 397, 400, 403, 404, 407,
+ 409, 410, 413, 417, 421, 422, 423, 425, 427, 429,
+ 440, 445, 459, 460, 461, 462, 463, 466, 467, 472,
+ 473, 474, 475, 476, 484, 485, 490, 513, 515, 528,
+ 546, 552, 465, 802, 803, 804, 805, 806, 807, 808,
+ 809, 289, 523, 553, 521, 565, 547, 424, 365, 0,
+ 0, 368, 271, 294, 309, 0, 538, 486, 219, 451,
+ 280, 243, 827, 0, 204, 238, 222, 249, 264, 267,
+ 313, 378, 386, 415, 420, 286, 261, 236, 444, 233,
+ 469, 493, 494, 495, 497, 382, 256, 419, 383, 0,
+ 363, 503, 504, 305, 502, 0, 691, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 402, 0, 0, 0,
+ 0, 678, 0, 0, 0, 260, 683, 0, 0, 0,
+ 353, 257, 0, 0, 416, 0, 198, 0, 471, 244,
+ 364, 361, 510, 272, 263, 259, 242, 306, 372, 414,
+ 492, 408, 690, 357, 0, 0, 481, 387, 0, 0,
+ 0, 0, 0, 686, 687, 0, 0, 0, 0, 0,
+ 0, 0, 0, 312, 240, 314, 197, 399, 482, 276,
+ 0, 91, 0, 0, 828, 812, 778, 779, 816, 829,
+ 830, 831, 832, 817, 0, 230, 818, 819, 237, 820,
+ 0, 777, 718, 720, 719, 737, 738, 739, 740, 741,
+ 742, 743, 716, 825, 833, 834, 0, 255, 310, 262,
+ 254, 507, 0, 0, 0, 0, 0, 0, 0, 221,
+ 0, 0, 0, 0, 0, 0, 0, 660, 675, 0,
+ 689, 0, 0, 0, 265, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 672, 673, 0, 0, 0, 0, 772, 0, 674, 0,
+ 0, 682, 835, 836, 837, 838, 839, 840, 841, 842,
+ 843, 844, 845, 846, 847, 848, 849, 850, 851, 852,
+ 853, 854, 855, 856, 857, 858, 859, 860, 861, 862,
+ 863, 864, 865, 866, 867, 868, 869, 870, 871, 872,
+ 873, 874, 875, 876, 685, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 287, 0, 388, 247, 0,
+ 438, 771, 0, 0, 549, 0, 0, 769, 0, 0,
+ 0, 0, 352, 0, 319, 193, 217, 0, 0, 398,
+ 446, 458, 0, 0, 0, 822, 0, 456, 412, 527,
+ 225, 274, 443, 418, 454, 426, 277, 0, 0, 455,
+ 359, 512, 436, 524, 550, 551, 253, 392, 536, 496,
+ 544, 568, 218, 250, 406, 489, 530, 478, 384, 508,
+ 509, 318, 477, 285, 196, 356, 556, 216, 464, 358,
+ 234, 223, 514, 533, 279, 441, 563, 205, 491, 522,
+ 231, 468, 0, 0, 570, 239, 488, 207, 519, 487,
+ 380, 315, 316, 206, 0, 442, 258, 283, 0, 0,
+ 248, 401, 823, 824, 246, 571, 724, 543, 212, 0,
+ 542, 394, 511, 520, 381, 370, 211, 518, 379, 369,
+ 323, 732, 733, 270, 296, 433, 362, 434, 295, 297,
+ 390, 389, 391, 200, 531, 0, 201, 0, 483, 532,
+ 572, 226, 227, 229, 0, 269, 273, 281, 284, 292,
+ 293, 302, 354, 405, 432, 428, 437, 0, 506, 525,
+ 537, 548, 554, 555, 557, 558, 559, 560, 561, 564,
+ 562, 393, 300, 479, 322, 360, 0, 0, 411, 457,
+ 232, 529, 480, 782, 770, 695, 786, 697, 783, 784,
+ 692, 693, 696, 785, 573, 574, 575, 576, 577, 578,
+ 579, 580, 581, 582, 583, 584, 585, 586, 587, 588,
+ 589, 590, 0, 773, 681, 680, 0, 688, 0, 714,
+ 715, 717, 721, 722, 723, 734, 735, 736, 744, 746,
+ 747, 745, 748, 749, 750, 753, 754, 755, 756, 751,
+ 752, 757, 698, 702, 699, 700, 701, 713, 703, 704,
+ 705, 706, 707, 708, 709, 710, 711, 712, 796, 797,
+ 798, 799, 800, 801, 727, 731, 730, 728, 729, 725,
+ 726, 679, 192, 213, 355, 0, 439, 278, 569, 539,
+ 534, 199, 215, 787, 252, 788, 0, 0, 792, 0,
+ 0, 0, 794, 793, 0, 795, 761, 760, 0, 0,
+ 789, 790, 0, 791, 0, 0, 194, 195, 202, 214,
+ 224, 228, 235, 251, 266, 268, 275, 288, 299, 307,
+ 308, 311, 317, 367, 373, 374, 375, 376, 395, 396,
+ 397, 400, 403, 404, 407, 409, 410, 413, 417, 421,
+ 422, 423, 425, 427, 429, 440, 445, 459, 460, 461,
+ 462, 463, 466, 467, 472, 473, 474, 475, 476, 484,
+ 485, 490, 513, 515, 528, 546, 552, 465, 802, 803,
+ 804, 805, 806, 807, 808, 809, 289, 523, 553, 521,
+ 565, 547, 424, 365, 0, 0, 368, 271, 294, 309,
+ 0, 538, 486, 219, 451, 280, 243, 827, 0, 204,
+ 238, 222, 249, 264, 267, 313, 378, 386, 415, 420,
+ 286, 261, 236, 444, 233, 469, 493, 494, 495, 497,
+ 382, 256, 419, 383, 0, 363, 503, 504, 305, 502,
+ 0, 691, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 402, 0, 0, 0, 0, 678, 0, 0, 0,
+ 260, 683, 0, 0, 0, 353, 257, 0, 0, 416,
+ 0, 198, 0, 471, 244, 364, 361, 510, 272, 263,
+ 259, 242, 306, 372, 414, 492, 408, 690, 357, 0,
+ 0, 481, 387, 0, 0, 0, 0, 0, 686, 687,
+ 0, 0, 0, 0, 0, 0, 0, 0, 312, 240,
+ 314, 197, 399, 482, 276, 0, 91, 0, 0, 828,
+ 812, 778, 779, 816, 829, 830, 831, 832, 817, 0,
+ 230, 818, 819, 237, 820, 0, 777, 718, 720, 719,
+ 737, 738, 739, 740, 741, 742, 743, 716, 825, 833,
+ 834, 0, 255, 310, 262, 254, 507, 0, 0, 0,
+ 0, 0, 0, 0, 221, 0, 0, 0, 0, 0,
+ 0, 0, 0, 675, 0, 689, 0, 0, 0, 265,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 672, 673, 0, 0, 0,
+ 0, 772, 0, 674, 0, 0, 682, 835, 836, 837,
+ 838, 839, 840, 841, 842, 843, 844, 845, 846, 847,
+ 848, 849, 850, 851, 852, 853, 854, 855, 856, 857,
+ 858, 859, 860, 861, 862, 863, 864, 865, 866, 867,
+ 868, 869, 870, 871, 872, 873, 874, 875, 876, 685,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 287, 0, 388, 247, 0, 438, 771, 0, 0, 549,
+ 0, 0, 769, 0, 0, 0, 0, 352, 0, 319,
+ 193, 217, 0, 0, 398, 446, 458, 0, 0, 0,
+ 822, 0, 456, 412, 527, 225, 274, 443, 418, 454,
+ 426, 277, 0, 0, 455, 359, 512, 436, 524, 550,
+ 551, 253, 392, 536, 496, 544, 568, 218, 250, 406,
+ 489, 530, 478, 384, 508, 509, 318, 477, 285, 196,
+ 356, 556, 216, 464, 358, 234, 223, 514, 533, 279,
+ 441, 563, 205, 491, 522, 231, 468, 0, 0, 570,
+ 239, 488, 207, 519, 487, 380, 315, 316, 206, 0,
+ 442, 258, 283, 0, 0, 248, 401, 823, 824, 246,
+ 571, 724, 543, 212, 0, 542, 394, 511, 520, 381,
+ 370, 211, 518, 379, 369, 323, 732, 733, 270, 296,
+ 433, 362, 434, 295, 297, 390, 389, 391, 200, 531,
+ 0, 201, 0, 483, 532, 572, 226, 227, 229, 0,
+ 269, 273, 281, 284, 292, 293, 302, 354, 405, 432,
+ 428, 437, 0, 506, 525, 537, 548, 554, 555, 557,
+ 558, 559, 560, 561, 564, 562, 393, 300, 479, 322,
+ 360, 0, 0, 411, 457, 232, 529, 480, 782, 770,
+ 695, 786, 697, 783, 784, 692, 693, 696, 785, 573,
+ 574, 575, 576, 577, 578, 579, 580, 581, 582, 583,
+ 584, 585, 586, 587, 588, 589, 590, 0, 773, 681,
+ 680, 0, 688, 0, 714, 715, 717, 721, 722, 723,
+ 734, 735, 736, 744, 746, 747, 745, 748, 749, 750,
+ 753, 754, 755, 756, 751, 752, 757, 698, 702, 699,
+ 700, 701, 713, 703, 704, 705, 706, 707, 708, 709,
+ 710, 711, 712, 796, 797, 798, 799, 800, 801, 727,
+ 731, 730, 728, 729, 725, 726, 679, 192, 213, 355,
+ 0, 439, 278, 569, 539, 534, 199, 215, 787, 252,
+ 788, 0, 0, 792, 0, 0, 0, 794, 793, 0,
+ 795, 761, 760, 0, 0, 789, 790, 0, 791, 0,
+ 0, 194, 195, 202, 214, 224, 228, 235, 251, 266,
+ 268, 275, 288, 299, 307, 308, 311, 317, 367, 373,
+ 374, 375, 376, 395, 396, 397, 400, 403, 404, 407,
+ 409, 410, 413, 417, 421, 422, 423, 425, 427, 429,
+ 440, 445, 459, 460, 461, 462, 463, 466, 467, 472,
+ 473, 474, 475, 476, 484, 485, 490, 513, 515, 528,
+ 546, 552, 465, 802, 803, 804, 805, 806, 807, 808,
+ 809, 289, 523, 553, 521, 565, 547, 424, 365, 0,
+ 0, 368, 271, 294, 309, 0, 538, 486, 219, 451,
+ 280, 243, 827, 0, 204, 238, 222, 249, 264, 267,
+ 313, 378, 386, 415, 420, 286, 261, 236, 444, 233,
+ 469, 493, 494, 495, 497, 382, 256, 419, 383, 0,
+ 363, 503, 504, 305, 502, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 402, 0, 0, 0,
+ 0, 0, 0, 0, 0, 260, 0, 0, 0, 0,
+ 353, 257, 0, 0, 416, 0, 198, 0, 471, 244,
+ 364, 361, 510, 272, 263, 259, 242, 306, 372, 414,
+ 492, 408, 0, 357, 0, 0, 481, 387, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 312, 240, 314, 197, 399, 482, 276,
+ 0, 0, 0, 0, 0, 638, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 230, 0, 0, 237, 0,
+ 0, 0, 338, 347, 346, 327, 328, 330, 332, 337,
+ 344, 350, 0, 0, 0, 0, 0, 255, 310, 262,
+ 254, 507, 0, 0, 0, 0, 0, 0, 0, 221,
+ 0, 0, 0, 0, 1336, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 265, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 1337, 1338, 1339,
+ 1340, 1341, 1342, 1343, 1345, 1344, 1346, 1347, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 287, 0, 388, 247, 0,
+ 438, 0, 0, 0, 549, 0, 0, 0, 0, 0,
+ 0, 0, 352, 0, 319, 193, 217, 0, 0, 398,
+ 446, 458, 0, 0, 0, 245, 0, 456, 412, 527,
+ 225, 274, 443, 418, 454, 426, 277, 0, 0, 455,
+ 359, 512, 436, 524, 550, 551, 253, 392, 536, 496,
+ 544, 568, 218, 250, 406, 489, 530, 478, 384, 508,
+ 509, 318, 477, 285, 196, 356, 556, 216, 464, 358,
+ 234, 223, 514, 533, 279, 441, 563, 205, 491, 522,
+ 231, 468, 0, 0, 570, 239, 488, 207, 519, 487,
+ 380, 315, 316, 206, 0, 442, 258, 283, 0, 0,
+ 248, 401, 516, 517, 246, 571, 220, 543, 212, 0,
+ 542, 394, 511, 520, 381, 370, 211, 518, 379, 369,
+ 323, 342, 343, 270, 296, 433, 362, 434, 295, 297,
+ 390, 389, 391, 200, 531, 0, 201, 0, 483, 532,
+ 572, 226, 227, 229, 0, 269, 273, 281, 284, 292,
+ 293, 302, 354, 405, 432, 428, 437, 0, 506, 525,
+ 537, 548, 554, 555, 557, 558, 559, 560, 561, 564,
+ 562, 393, 300, 479, 322, 360, 0, 0, 411, 457,
+ 232, 529, 480, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 573, 574, 575, 576, 577, 578,
+ 579, 580, 581, 582, 583, 584, 585, 586, 587, 588,
+ 589, 590, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 591, 371, 470, 526, 324, 336, 339, 329, 348,
+ 0, 349, 325, 326, 331, 333, 334, 335, 340, 341,
+ 345, 351, 241, 203, 377, 385, 505, 301, 208, 209,
+ 210, 498, 499, 500, 501, 540, 541, 545, 447, 448,
+ 449, 450, 282, 535, 298, 453, 452, 320, 321, 366,
+ 435, 0, 192, 213, 355, 0, 439, 278, 569, 539,
+ 534, 199, 215, 0, 252, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 194, 195, 202, 214,
+ 224, 228, 235, 251, 266, 268, 275, 288, 299, 307,
+ 308, 311, 317, 367, 373, 374, 375, 376, 395, 396,
+ 397, 400, 403, 404, 407, 409, 410, 413, 417, 421,
+ 422, 423, 425, 427, 429, 440, 445, 459, 460, 461,
+ 462, 463, 466, 467, 472, 473, 474, 475, 476, 484,
+ 485, 490, 513, 515, 528, 546, 552, 465, 290, 291,
+ 430, 431, 303, 304, 566, 567, 289, 523, 553, 521,
+ 565, 547, 424, 365, 0, 0, 368, 271, 294, 309,
+ 0, 538, 486, 219, 451, 280, 243, 0, 0, 204,
+ 238, 222, 249, 264, 267, 313, 378, 386, 415, 420,
+ 286, 261, 236, 444, 233, 469, 493, 494, 495, 497,
+ 382, 256, 419, 383, 0, 363, 503, 504, 305, 502,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 402, 0, 0, 0, 0, 0, 0, 0, 0,
+ 260, 0, 0, 0, 0, 353, 257, 0, 0, 416,
+ 0, 198, 0, 471, 244, 364, 361, 510, 272, 263,
+ 259, 242, 306, 372, 414, 492, 408, 0, 357, 0,
+ 0, 481, 387, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 312, 240,
+ 314, 197, 399, 482, 276, 0, 0, 0, 0, 0,
+ 638, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 230, 0, 0, 237, 0, 0, 0, 338, 347, 346,
+ 327, 328, 330, 332, 337, 344, 350, 0, 0, 0,
+ 0, 0, 255, 310, 262, 254, 507, 0, 0, 0,
+ 0, 0, 0, 0, 221, 0, 989, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 265,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 287, 0, 388, 247, 0, 438, 0, 0, 988, 549,
+ 0, 0, 0, 0, 0, 985, 986, 352, 946, 319,
+ 193, 217, 979, 983, 398, 446, 458, 0, 0, 0,
+ 245, 0, 456, 412, 527, 225, 274, 443, 418, 454,
+ 426, 277, 0, 0, 455, 359, 512, 436, 524, 550,
+ 551, 253, 392, 536, 496, 544, 568, 218, 250, 406,
+ 489, 530, 478, 384, 508, 509, 318, 477, 285, 196,
+ 356, 556, 216, 464, 358, 234, 223, 514, 533, 279,
+ 441, 563, 205, 491, 522, 231, 468, 0, 0, 570,
+ 239, 488, 207, 519, 487, 380, 315, 316, 206, 0,
+ 442, 258, 283, 0, 0, 248, 401, 516, 517, 246,
+ 571, 220, 543, 212, 0, 542, 394, 511, 520, 381,
+ 370, 211, 518, 379, 369, 323, 342, 343, 270, 296,
+ 433, 362, 434, 295, 297, 390, 389, 391, 200, 531,
+ 0, 201, 0, 483, 532, 572, 226, 227, 229, 0,
+ 269, 273, 281, 284, 292, 293, 302, 354, 405, 432,
+ 428, 437, 0, 506, 525, 537, 548, 554, 555, 557,
+ 558, 559, 560, 561, 564, 562, 393, 300, 479, 322,
+ 360, 0, 0, 411, 457, 232, 529, 480, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 573,
+ 574, 575, 576, 577, 578, 579, 580, 581, 582, 583,
+ 584, 585, 586, 587, 588, 589, 590, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 591, 371, 470, 526,
+ 324, 336, 339, 329, 348, 0, 349, 325, 326, 331,
+ 333, 334, 335, 340, 341, 345, 351, 241, 203, 377,
+ 385, 505, 301, 208, 209, 210, 498, 499, 500, 501,
+ 540, 541, 545, 447, 448, 449, 450, 282, 535, 298,
+ 453, 452, 320, 321, 366, 435, 0, 192, 213, 355,
+ 0, 439, 278, 569, 539, 534, 199, 215, 0, 252,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 194, 195, 202, 214, 224, 228, 235, 251, 266,
+ 268, 275, 288, 299, 307, 308, 311, 317, 367, 373,
+ 374, 375, 376, 395, 396, 397, 400, 403, 404, 407,
+ 409, 410, 413, 417, 421, 422, 423, 425, 427, 429,
+ 440, 445, 459, 460, 461, 462, 463, 466, 467, 472,
+ 473, 474, 475, 476, 484, 485, 490, 513, 515, 528,
+ 546, 552, 465, 290, 291, 430, 431, 303, 304, 566,
+ 567, 289, 523, 553, 521, 565, 547, 424, 365, 0,
+ 0, 368, 271, 294, 309, 0, 538, 486, 219, 451,
+ 280, 243, 0, 0, 204, 238, 222, 249, 264, 267,
+ 313, 378, 386, 415, 420, 286, 261, 236, 444, 233,
+ 469, 493, 494, 495, 497, 382, 256, 419, 383, 0,
+ 363, 503, 504, 305, 502, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 402, 0, 0, 0,
+ 0, 0, 0, 0, 0, 260, 0, 0, 0, 0,
+ 353, 257, 0, 0, 416, 0, 198, 0, 471, 244,
+ 364, 361, 510, 272, 263, 259, 242, 306, 372, 414,
+ 492, 408, 0, 357, 0, 0, 481, 387, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 312, 240, 314, 197, 399, 482, 276,
+ 0, 0, 0, 0, 1483, 812, 0, 0, 1480, 0,
+ 0, 0, 0, 1478, 0, 230, 1479, 1477, 237, 1482,
+ 0, 777, 338, 347, 346, 327, 328, 330, 332, 337,
+ 344, 350, 0, 0, 0, 0, 0, 255, 310, 262,
+ 254, 507, 0, 0, 0, 0, 0, 0, 0, 221,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 265, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 287, 0, 388, 247, 0,
+ 438, 0, 0, 0, 549, 0, 0, 0, 0, 0,
+ 0, 0, 352, 0, 319, 193, 217, 0, 0, 398,
+ 446, 458, 0, 0, 0, 245, 0, 456, 412, 527,
+ 225, 274, 443, 418, 454, 426, 277, 0, 0, 455,
+ 359, 512, 436, 524, 550, 551, 253, 392, 536, 496,
+ 544, 568, 218, 250, 406, 489, 530, 478, 384, 508,
+ 509, 318, 477, 285, 196, 356, 556, 216, 464, 358,
+ 234, 223, 514, 533, 279, 441, 563, 205, 491, 522,
+ 231, 468, 0, 0, 570, 239, 488, 207, 519, 487,
+ 380, 315, 316, 206, 0, 442, 258, 283, 0, 0,
+ 248, 401, 516, 517, 246, 571, 220, 543, 212, 0,
+ 542, 394, 511, 520, 381, 370, 211, 518, 379, 369,
+ 323, 342, 343, 270, 296, 433, 362, 434, 295, 297,
+ 390, 389, 391, 200, 531, 0, 201, 0, 483, 532,
+ 572, 226, 227, 229, 0, 269, 273, 281, 284, 292,
+ 293, 302, 354, 405, 432, 428, 437, 0, 506, 525,
+ 537, 548, 554, 555, 557, 558, 559, 560, 561, 564,
+ 562, 393, 300, 479, 322, 360, 0, 0, 411, 457,
+ 232, 529, 480, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 573, 574, 575, 576, 577, 578,
+ 579, 580, 581, 582, 583, 584, 585, 586, 587, 588,
+ 589, 590, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 591, 371, 470, 526, 324, 336, 339, 329, 348,
+ 0, 349, 325, 326, 331, 333, 334, 335, 340, 341,
+ 345, 351, 241, 203, 377, 385, 505, 301, 208, 209,
+ 210, 498, 499, 500, 501, 540, 541, 545, 447, 448,
+ 449, 450, 282, 535, 298, 453, 452, 320, 321, 366,
+ 435, 0, 192, 213, 355, 0, 439, 278, 569, 539,
+ 534, 199, 215, 0, 252, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 194, 195, 202, 214,
+ 224, 228, 235, 251, 266, 268, 275, 288, 299, 307,
+ 308, 311, 317, 367, 373, 374, 375, 376, 395, 396,
+ 397, 400, 403, 404, 407, 409, 410, 413, 417, 421,
+ 422, 423, 425, 427, 429, 440, 445, 459, 460, 461,
+ 462, 463, 466, 467, 472, 473, 474, 475, 476, 484,
+ 485, 490, 513, 515, 528, 546, 552, 465, 290, 291,
+ 430, 431, 303, 304, 566, 567, 289, 523, 553, 521,
+ 565, 547, 424, 365, 0, 0, 368, 271, 294, 309,
+ 0, 538, 486, 219, 451, 280, 243, 0, 0, 204,
+ 238, 222, 249, 264, 267, 313, 378, 386, 415, 420,
+ 286, 261, 236, 444, 233, 469, 493, 494, 495, 497,
+ 382, 256, 419, 0, 383, 363, 503, 504, 305, 82,
+ 502, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 402, 0, 0, 0, 0, 0, 0, 0,
+ 0, 260, 0, 0, 0, 0, 353, 257, 0, 0,
+ 416, 0, 198, 0, 471, 244, 364, 361, 510, 272,
+ 263, 259, 242, 306, 372, 414, 492, 408, 0, 357,
+ 0, 0, 481, 387, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 312,
+ 240, 314, 197, 399, 482, 276, 0, 91, 0, 0,
+ 0, 190, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 230, 0, 0, 237, 0, 0, 0, 338, 347,
+ 346, 327, 328, 330, 332, 337, 344, 350, 0, 0,
+ 0, 0, 0, 255, 310, 262, 254, 507, 0, 0,
+ 0, 0, 0, 0, 0, 221, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 265, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 287, 0, 388, 247, 0, 438, 0, 0, 0,
+ 549, 0, 0, 0, 0, 0, 0, 0, 352, 0,
+ 319, 193, 217, 0, 0, 398, 446, 458, 0, 0,
+ 0, 245, 0, 456, 412, 527, 225, 274, 443, 418,
+ 454, 426, 277, 0, 0, 455, 359, 512, 436, 524,
+ 550, 551, 253, 392, 536, 496, 544, 568, 218, 250,
+ 406, 489, 530, 478, 384, 508, 509, 318, 477, 285,
+ 196, 356, 556, 216, 464, 358, 234, 223, 514, 533,
+ 279, 441, 563, 205, 491, 522, 231, 468, 0, 0,
+ 570, 239, 488, 207, 519, 487, 380, 315, 316, 206,
+ 0, 442, 258, 283, 0, 0, 248, 401, 516, 517,
+ 246, 571, 220, 543, 212, 0, 542, 394, 511, 520,
+ 381, 370, 211, 518, 379, 369, 323, 342, 343, 270,
+ 296, 433, 362, 434, 295, 297, 390, 389, 391, 200,
+ 531, 0, 201, 0, 483, 532, 572, 226, 227, 229,
+ 0, 269, 273, 281, 284, 292, 293, 302, 354, 405,
+ 432, 428, 437, 0, 506, 525, 537, 548, 554, 555,
+ 557, 558, 559, 560, 561, 564, 562, 393, 300, 479,
+ 322, 360, 0, 0, 411, 457, 232, 529, 480, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 573, 574, 575, 576, 577, 578, 579, 580, 581, 582,
+ 583, 584, 585, 586, 587, 588, 589, 590, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 591, 371, 470,
+ 526, 324, 336, 339, 329, 348, 0, 349, 325, 326,
+ 331, 333, 334, 335, 340, 341, 345, 351, 241, 203,
+ 377, 385, 505, 301, 208, 209, 210, 498, 499, 500,
+ 501, 540, 541, 545, 447, 448, 449, 450, 282, 535,
+ 298, 453, 452, 320, 321, 366, 435, 0, 192, 213,
+ 355, 90, 439, 278, 569, 539, 534, 199, 215, 0,
+ 252, 0, 0, 0, 0, 0, 0, 2100, 0, 0,
+ 2099, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 194, 195, 202, 214, 224, 228, 235, 251,
+ 266, 268, 275, 288, 299, 307, 308, 311, 317, 367,
+ 373, 374, 375, 376, 395, 396, 397, 400, 403, 404,
+ 407, 409, 410, 413, 417, 421, 422, 423, 425, 427,
+ 429, 440, 445, 459, 460, 461, 462, 463, 466, 467,
+ 472, 473, 474, 475, 476, 484, 485, 490, 513, 515,
+ 528, 546, 552, 465, 290, 291, 430, 431, 303, 304,
+ 566, 567, 289, 523, 553, 521, 565, 547, 424, 365,
+ 0, 0, 368, 271, 294, 309, 0, 538, 486, 219,
+ 451, 280, 243, 0, 0, 204, 238, 222, 249, 264,
+ 267, 313, 378, 386, 415, 420, 286, 261, 236, 444,
+ 233, 469, 493, 494, 495, 497, 382, 256, 419, 1543,
+ 0, 363, 503, 504, 305, 502, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 402, 0, 0,
+ 0, 1545, 0, 0, 0, 0, 260, 0, 0, 0,
+ 0, 353, 257, 0, 0, 416, 0, 198, 0, 471,
+ 244, 364, 361, 510, 272, 263, 259, 242, 306, 372,
+ 414, 492, 408, 0, 357, 0, 0, 481, 387, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 312, 240, 314, 197, 399, 482,
+ 276, 0, 0, 0, 0, 1547, 638, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 230, 0, 0, 237,
+ 0, 0, 0, 338, 347, 346, 327, 328, 330, 332,
+ 337, 344, 350, 0, 0, 0, 0, 0, 255, 310,
+ 262, 254, 507, 0, 0, 0, 0, 0, 0, 0,
+ 221, 0, 0, 0, 1317, 0, 1318, 1319, 0, 0,
+ 0, 0, 0, 0, 0, 265, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 287, 0, 388, 247,
+ 0, 438, 0, 0, 0, 549, 0, 0, 0, 0,
+ 0, 0, 0, 352, 0, 319, 193, 217, 0, 0,
+ 398, 446, 458, 0, 0, 0, 245, 0, 456, 412,
+ 527, 225, 274, 443, 418, 454, 426, 277, 0, 0,
+ 455, 359, 512, 436, 524, 550, 551, 253, 392, 536,
+ 496, 544, 568, 218, 250, 406, 489, 530, 478, 384,
+ 508, 509, 318, 477, 285, 196, 356, 556, 216, 464,
+ 358, 234, 223, 514, 533, 279, 441, 563, 205, 491,
+ 522, 231, 468, 0, 0, 570, 239, 488, 207, 519,
+ 487, 380, 315, 316, 206, 0, 442, 258, 283, 0,
+ 0, 248, 401, 516, 517, 246, 571, 220, 543, 212,
+ 0, 542, 394, 511, 520, 381, 370, 211, 518, 379,
+ 369, 323, 342, 343, 270, 296, 433, 362, 434, 295,
+ 297, 390, 389, 391, 200, 531, 0, 201, 0, 483,
+ 532, 572, 226, 227, 229, 0, 269, 273, 281, 284,
+ 292, 293, 302, 354, 405, 432, 428, 437, 0, 506,
+ 525, 537, 548, 554, 555, 557, 558, 559, 560, 561,
+ 564, 562, 393, 300, 479, 322, 360, 0, 0, 411,
+ 457, 232, 529, 480, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 573, 574, 575, 576, 577,
+ 578, 579, 580, 581, 582, 583, 584, 585, 586, 587,
+ 588, 589, 590, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 591, 371, 470, 526, 324, 336, 339, 329,
+ 348, 0, 349, 325, 326, 331, 333, 334, 335, 340,
+ 341, 345, 351, 241, 203, 377, 385, 505, 301, 208,
+ 209, 210, 498, 499, 500, 501, 540, 541, 545, 447,
+ 448, 449, 450, 282, 535, 298, 453, 452, 320, 321,
+ 366, 435, 0, 192, 213, 355, 0, 439, 278, 569,
+ 539, 534, 199, 215, 0, 252, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 194, 195, 202,
+ 214, 224, 228, 235, 251, 266, 268, 275, 288, 299,
+ 307, 308, 311, 317, 367, 373, 374, 375, 376, 395,
+ 396, 397, 400, 403, 404, 407, 409, 410, 413, 417,
+ 421, 422, 423, 425, 427, 429, 440, 445, 459, 460,
+ 461, 462, 463, 466, 467, 472, 473, 474, 475, 476,
+ 484, 485, 490, 513, 515, 528, 546, 552, 465, 290,
+ 291, 430, 431, 303, 304, 566, 567, 289, 523, 553,
+ 521, 565, 547, 424, 365, 0, 0, 368, 271, 294,
+ 309, 0, 538, 486, 219, 451, 280, 243, 0, 0,
+ 204, 238, 222, 249, 264, 267, 313, 378, 386, 415,
+ 420, 286, 261, 236, 444, 233, 469, 493, 494, 495,
+ 497, 382, 256, 419, 0, 383, 363, 503, 504, 305,
+ 82, 502, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 402, 0, 0, 0, 0, 0, 0,
+ 0, 0, 260, 0, 0, 0, 0, 353, 257, 0,
+ 0, 416, 0, 198, 0, 471, 244, 364, 361, 510,
+ 272, 263, 259, 242, 306, 372, 414, 492, 408, 0,
+ 357, 0, 0, 481, 387, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 312, 240, 314, 197, 399, 482, 276, 0, 91, 0,
+ 1520, 0, 638, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 230, 0, 0, 237, 0, 0, 0, 338,
+ 347, 346, 327, 328, 330, 332, 337, 344, 350, 0,
+ 0, 0, 0, 0, 255, 310, 262, 254, 507, 0,
+ 0, 0, 0, 0, 0, 0, 221, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 265, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 287, 0, 388, 247, 0, 438, 0, 0,
+ 0, 549, 0, 0, 0, 0, 0, 0, 0, 352,
+ 0, 319, 193, 217, 0, 0, 398, 446, 458, 0,
+ 0, 0, 245, 0, 456, 412, 527, 225, 274, 443,
+ 418, 454, 426, 277, 0, 0, 455, 359, 512, 436,
+ 524, 550, 551, 253, 392, 536, 496, 544, 568, 218,
+ 250, 406, 489, 530, 478, 384, 508, 509, 318, 477,
+ 285, 196, 356, 556, 216, 464, 358, 234, 223, 514,
+ 533, 279, 441, 563, 205, 491, 522, 231, 468, 0,
+ 0, 570, 239, 488, 207, 519, 487, 380, 315, 316,
+ 206, 0, 442, 258, 283, 0, 0, 248, 401, 516,
+ 517, 246, 571, 220, 543, 212, 0, 542, 394, 511,
+ 520, 381, 370, 211, 518, 379, 369, 323, 342, 343,
+ 270, 296, 433, 362, 434, 295, 297, 390, 389, 391,
+ 200, 531, 0, 201, 0, 483, 532, 572, 226, 227,
+ 229, 0, 269, 273, 281, 284, 292, 293, 302, 354,
+ 405, 432, 428, 437, 0, 506, 525, 537, 548, 554,
+ 555, 557, 558, 559, 560, 561, 564, 562, 393, 300,
+ 479, 322, 360, 0, 0, 411, 457, 232, 529, 480,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 573, 574, 575, 576, 577, 578, 579, 580, 581,
+ 582, 583, 584, 585, 586, 587, 588, 589, 590, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 591, 371,
+ 470, 526, 324, 336, 339, 329, 348, 0, 349, 325,
+ 326, 331, 333, 334, 335, 340, 341, 345, 351, 241,
+ 203, 377, 385, 505, 301, 208, 209, 210, 498, 499,
+ 500, 501, 540, 541, 545, 447, 448, 449, 450, 282,
+ 535, 298, 453, 452, 320, 321, 366, 435, 0, 192,
+ 213, 355, 90, 439, 278, 569, 539, 534, 199, 215,
+ 0, 252, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 194, 195, 202, 214, 224, 228, 235,
+ 251, 266, 268, 275, 288, 299, 307, 308, 311, 317,
+ 367, 373, 374, 375, 376, 395, 396, 397, 400, 403,
+ 404, 407, 409, 410, 413, 417, 421, 422, 423, 425,
+ 427, 429, 440, 445, 459, 460, 461, 462, 463, 466,
+ 467, 472, 473, 474, 475, 476, 484, 485, 490, 513,
+ 515, 528, 546, 552, 465, 290, 291, 430, 431, 303,
+ 304, 566, 567, 289, 523, 553, 521, 565, 547, 424,
+ 365, 0, 0, 368, 271, 294, 309, 0, 538, 486,
+ 219, 451, 280, 243, 0, 0, 204, 238, 222, 249,
+ 264, 267, 313, 378, 386, 415, 420, 286, 261, 236,
+ 444, 233, 469, 493, 494, 495, 497, 382, 256, 419,
+ 383, 0, 363, 503, 504, 305, 502, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 402, 0,
+ 0, 0, 0, 0, 0, 0, 0, 260, 0, 0,
+ 0, 0, 353, 257, 0, 0, 416, 0, 198, 0,
+ 471, 244, 364, 361, 510, 272, 263, 259, 242, 306,
+ 372, 414, 492, 408, 0, 357, 0, 0, 481, 387,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 312, 240, 314, 197, 399,
+ 482, 276, 0, 91, 0, 0, 0, 190, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 230, 0, 0,
+ 237, 0, 0, 0, 338, 347, 346, 327, 328, 330,
+ 332, 337, 344, 350, 0, 0, 0, 0, 0, 255,
+ 310, 262, 254, 507, 0, 0, 0, 0, 0, 0,
+ 0, 221, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 265, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 287, 0, 388,
+ 247, 0, 438, 0, 0, 0, 549, 0, 0, 0,
+ 0, 0, 0, 0, 352, 0, 319, 193, 217, 0,
+ 0, 398, 446, 458, 0, 0, 0, 245, 0, 456,
+ 412, 527, 225, 274, 443, 418, 454, 426, 277, 0,
+ 0, 455, 359, 512, 436, 524, 550, 551, 253, 392,
+ 536, 496, 544, 568, 218, 250, 406, 489, 530, 478,
+ 384, 508, 509, 318, 477, 285, 196, 356, 556, 216,
+ 464, 358, 234, 223, 514, 533, 279, 441, 563, 205,
+ 491, 522, 231, 468, 0, 0, 570, 239, 488, 207,
+ 519, 487, 380, 315, 316, 206, 0, 442, 258, 283,
+ 0, 0, 248, 401, 516, 517, 246, 571, 220, 543,
+ 212, 0, 542, 394, 511, 520, 381, 370, 211, 518,
+ 379, 369, 323, 342, 343, 270, 296, 433, 362, 434,
+ 295, 297, 390, 389, 391, 200, 531, 0, 201, 0,
+ 483, 532, 572, 226, 227, 229, 0, 269, 273, 281,
+ 284, 292, 293, 302, 354, 405, 432, 428, 437, 0,
+ 506, 525, 537, 548, 554, 555, 557, 558, 559, 560,
+ 561, 564, 562, 393, 300, 479, 322, 360, 0, 0,
+ 411, 457, 232, 529, 480, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 573, 574, 575, 576,
+ 577, 578, 579, 580, 581, 582, 583, 584, 585, 586,
+ 587, 588, 589, 590, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 591, 371, 470, 526, 324, 336, 339,
+ 329, 348, 0, 349, 325, 326, 331, 333, 334, 335,
+ 340, 341, 345, 351, 241, 203, 377, 385, 505, 301,
+ 208, 209, 210, 498, 499, 500, 501, 540, 541, 545,
+ 447, 448, 449, 450, 282, 535, 298, 453, 452, 320,
+ 321, 366, 435, 0, 192, 213, 355, 0, 439, 278,
+ 569, 539, 534, 199, 215, 0, 252, 0, 0, 0,
+ 0, 0, 0, 2100, 0, 0, 2099, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 194, 195,
+ 202, 214, 224, 228, 235, 251, 266, 268, 275, 288,
+ 299, 307, 308, 311, 317, 367, 373, 374, 375, 376,
+ 395, 396, 397, 400, 403, 404, 407, 409, 410, 413,
+ 417, 421, 422, 423, 425, 427, 429, 440, 445, 459,
+ 460, 461, 462, 463, 466, 467, 472, 473, 474, 475,
+ 476, 484, 485, 490, 513, 515, 528, 546, 552, 465,
+ 290, 291, 430, 431, 303, 304, 566, 567, 289, 523,
+ 553, 521, 565, 547, 424, 365, 0, 0, 368, 271,
+ 294, 309, 0, 538, 486, 219, 451, 280, 243, 0,
+ 0, 204, 238, 222, 249, 264, 267, 313, 378, 386,
+ 415, 420, 286, 261, 236, 444, 233, 469, 493, 494,
+ 495, 497, 382, 256, 419, 383, 0, 363, 503, 504,
+ 305, 502, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 402, 0, 0, 0, 2048, 0, 0,
+ 0, 0, 260, 0, 0, 0, 0, 353, 257, 0,
+ 0, 416, 0, 198, 0, 471, 244, 364, 361, 510,
+ 272, 263, 259, 242, 306, 372, 414, 492, 408, 0,
+ 357, 0, 0, 481, 387, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 312, 240, 314, 197, 399, 482, 276, 0, 0, 0,
+ 0, 1726, 190, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 230, 0, 0, 237, 0, 0, 0, 338,
+ 347, 346, 327, 328, 330, 332, 337, 344, 350, 0,
+ 0, 0, 0, 0, 255, 310, 262, 254, 507, 0,
+ 0, 0, 0, 0, 0, 0, 221, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 265, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 287, 0, 388, 247, 0, 438, 0, 0,
+ 0, 549, 0, 0, 0, 0, 0, 0, 0, 352,
+ 0, 319, 193, 217, 0, 0, 398, 446, 458, 0,
+ 0, 0, 245, 0, 456, 412, 527, 225, 274, 443,
+ 418, 454, 426, 277, 0, 2046, 455, 359, 512, 436,
+ 524, 550, 551, 253, 392, 536, 496, 544, 568, 218,
+ 250, 406, 489, 530, 478, 384, 508, 509, 318, 477,
+ 285, 196, 356, 556, 216, 464, 358, 234, 223, 514,
+ 533, 279, 441, 563, 205, 491, 522, 231, 468, 0,
+ 0, 570, 239, 488, 207, 519, 487, 380, 315, 316,
+ 206, 0, 442, 258, 283, 0, 0, 248, 401, 516,
+ 517, 246, 571, 220, 543, 212, 0, 542, 394, 511,
+ 520, 381, 370, 211, 518, 379, 369, 323, 342, 343,
+ 270, 296, 433, 362, 434, 295, 297, 390, 389, 391,
+ 200, 531, 0, 201, 0, 483, 532, 572, 226, 227,
+ 229, 0, 269, 273, 281, 284, 292, 293, 302, 354,
+ 405, 432, 428, 437, 0, 506, 525, 537, 548, 554,
+ 555, 557, 558, 559, 560, 561, 564, 562, 393, 300,
+ 479, 322, 360, 0, 0, 411, 457, 232, 529, 480,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 573, 574, 575, 576, 577, 578, 579, 580, 581,
+ 582, 583, 584, 585, 586, 587, 588, 589, 590, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 591, 371,
+ 470, 526, 324, 336, 339, 329, 348, 0, 349, 325,
+ 326, 331, 333, 334, 335, 340, 341, 345, 351, 241,
+ 203, 377, 385, 505, 301, 208, 209, 210, 498, 499,
+ 500, 501, 540, 541, 545, 447, 448, 449, 450, 282,
+ 535, 298, 453, 452, 320, 321, 366, 435, 0, 192,
+ 213, 355, 0, 439, 278, 569, 539, 534, 199, 215,
+ 0, 252, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 194, 195, 202, 214, 224, 228, 235,
+ 251, 266, 268, 275, 288, 299, 307, 308, 311, 317,
+ 367, 373, 374, 375, 376, 395, 396, 397, 400, 403,
+ 404, 407, 409, 410, 413, 417, 421, 422, 423, 425,
+ 427, 429, 440, 445, 459, 460, 461, 462, 463, 466,
+ 467, 472, 473, 474, 475, 476, 484, 485, 490, 513,
+ 515, 528, 546, 552, 465, 290, 291, 430, 431, 303,
+ 304, 566, 567, 289, 523, 553, 521, 565, 547, 424,
+ 365, 0, 0, 368, 271, 294, 309, 0, 538, 486,
+ 219, 451, 280, 243, 0, 0, 204, 238, 222, 249,
+ 264, 267, 313, 378, 386, 415, 420, 286, 261, 236,
+ 444, 233, 469, 493, 494, 495, 497, 382, 256, 419,
+ 383, 0, 363, 503, 504, 305, 502, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 402, 0,
+ 0, 0, 0, 0, 0, 0, 0, 260, 0, 0,
+ 0, 0, 353, 257, 0, 0, 416, 0, 198, 0,
+ 471, 244, 364, 361, 510, 272, 263, 259, 242, 306,
+ 372, 414, 492, 408, 0, 357, 0, 0, 481, 387,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 312, 240, 314, 197, 399,
+ 482, 276, 0, 0, 0, 0, 0, 638, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 230, 0, 0,
+ 237, 0, 0, 0, 338, 347, 346, 327, 328, 330,
+ 332, 337, 344, 350, 0, 0, 0, 0, 0, 255,
+ 310, 262, 254, 507, 0, 0, 0, 0, 0, 0,
+ 0, 221, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 265, 0, 0, 0,
+ 0, 0, 0, 0, 0, 940, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 287, 0, 388,
+ 247, 0, 438, 0, 0, 0, 549, 0, 0, 0,
+ 0, 0, 0, 0, 352, 946, 319, 193, 217, 944,
+ 0, 398, 446, 458, 0, 0, 0, 245, 0, 456,
+ 412, 527, 225, 274, 443, 418, 454, 426, 277, 0,
+ 0, 455, 359, 512, 436, 524, 550, 551, 253, 392,
+ 536, 496, 544, 568, 218, 250, 406, 489, 530, 478,
+ 384, 508, 509, 318, 477, 285, 196, 356, 556, 216,
+ 464, 358, 234, 223, 514, 533, 279, 441, 563, 205,
+ 491, 522, 231, 468, 0, 0, 570, 239, 488, 207,
+ 519, 487, 380, 315, 316, 206, 0, 442, 258, 283,
+ 0, 0, 248, 401, 516, 517, 246, 571, 220, 543,
+ 212, 0, 542, 394, 511, 520, 381, 370, 211, 518,
+ 379, 369, 323, 342, 343, 270, 296, 433, 362, 434,
+ 295, 297, 390, 389, 391, 200, 531, 0, 201, 0,
+ 483, 532, 572, 226, 227, 229, 0, 269, 273, 281,
+ 284, 292, 293, 302, 354, 405, 432, 428, 437, 0,
+ 506, 525, 537, 548, 554, 555, 557, 558, 559, 560,
+ 561, 564, 562, 393, 300, 479, 322, 360, 0, 0,
+ 411, 457, 232, 529, 480, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 573, 574, 575, 576,
+ 577, 578, 579, 580, 581, 582, 583, 584, 585, 586,
+ 587, 588, 589, 590, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 591, 371, 470, 526, 324, 336, 339,
+ 329, 348, 0, 349, 325, 326, 331, 333, 334, 335,
+ 340, 341, 345, 351, 241, 203, 377, 385, 505, 301,
+ 208, 209, 210, 498, 499, 500, 501, 540, 541, 545,
+ 447, 448, 449, 450, 282, 535, 298, 453, 452, 320,
+ 321, 366, 435, 0, 192, 213, 355, 0, 439, 278,
+ 569, 539, 534, 199, 215, 0, 252, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 194, 195,
+ 202, 214, 224, 228, 235, 251, 266, 268, 275, 288,
+ 299, 307, 308, 311, 317, 367, 373, 374, 375, 376,
+ 395, 396, 397, 400, 403, 404, 407, 409, 410, 413,
+ 417, 421, 422, 423, 425, 427, 429, 440, 445, 459,
+ 460, 461, 462, 463, 466, 467, 472, 473, 474, 475,
+ 476, 484, 485, 490, 513, 515, 528, 546, 552, 465,
+ 290, 291, 430, 431, 303, 304, 566, 567, 289, 523,
+ 553, 521, 565, 547, 424, 365, 0, 0, 368, 271,
+ 294, 309, 0, 538, 486, 219, 451, 280, 243, 0,
+ 0, 204, 238, 222, 249, 264, 267, 313, 378, 386,
+ 415, 420, 286, 261, 236, 444, 233, 469, 493, 494,
+ 495, 497, 382, 256, 419, 383, 0, 363, 503, 504,
+ 305, 502, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 402, 0, 0, 0, 2048, 0, 0,
+ 0, 0, 260, 0, 0, 0, 0, 353, 257, 0,
+ 0, 416, 0, 198, 0, 471, 244, 364, 361, 510,
+ 272, 263, 259, 242, 306, 372, 414, 492, 408, 0,
+ 357, 0, 0, 481, 387, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 312, 240, 314, 197, 399, 482, 276, 0, 0, 0,
+ 0, 1726, 190, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 230, 0, 0, 237, 0, 0, 0, 338,
+ 347, 346, 327, 328, 330, 332, 337, 344, 350, 0,
+ 0, 0, 0, 0, 255, 310, 262, 254, 507, 0,
+ 0, 0, 0, 0, 0, 0, 221, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 265, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 287, 0, 388, 247, 0, 438, 0, 0,
+ 0, 549, 0, 0, 0, 0, 0, 0, 0, 352,
+ 0, 319, 193, 217, 0, 0, 398, 446, 458, 0,
+ 0, 0, 245, 0, 456, 412, 527, 225, 274, 443,
+ 418, 454, 426, 277, 0, 0, 455, 359, 512, 436,
+ 524, 550, 551, 253, 392, 536, 496, 544, 568, 218,
+ 250, 406, 489, 530, 478, 384, 508, 509, 318, 477,
+ 285, 196, 356, 556, 216, 464, 358, 234, 223, 514,
+ 533, 279, 441, 563, 205, 491, 522, 231, 468, 0,
+ 0, 570, 239, 488, 207, 519, 487, 380, 315, 316,
+ 206, 0, 442, 258, 283, 0, 0, 248, 401, 516,
+ 517, 246, 571, 220, 543, 212, 0, 542, 394, 511,
+ 520, 381, 370, 211, 518, 379, 369, 323, 342, 343,
+ 270, 296, 433, 362, 434, 295, 297, 390, 389, 391,
+ 200, 531, 0, 201, 0, 483, 532, 572, 226, 227,
+ 229, 0, 269, 273, 281, 284, 292, 293, 302, 354,
+ 405, 432, 428, 437, 0, 506, 525, 537, 548, 554,
+ 555, 557, 558, 559, 560, 561, 564, 562, 393, 300,
+ 479, 322, 360, 0, 0, 411, 457, 232, 529, 480,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 573, 574, 575, 576, 577, 578, 579, 580, 581,
+ 582, 583, 584, 585, 586, 587, 588, 589, 590, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 591, 371,
+ 470, 526, 324, 336, 339, 329, 348, 0, 349, 325,
+ 326, 331, 333, 334, 335, 340, 341, 345, 351, 241,
+ 203, 377, 385, 505, 301, 208, 209, 210, 498, 499,
+ 500, 501, 540, 541, 545, 447, 448, 449, 450, 282,
+ 535, 298, 453, 452, 320, 321, 366, 435, 0, 192,
+ 213, 355, 0, 439, 278, 569, 539, 534, 199, 215,
+ 0, 252, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 194, 195, 202, 214, 224, 228, 235,
+ 251, 266, 268, 275, 288, 299, 307, 308, 311, 317,
+ 367, 373, 374, 375, 376, 395, 396, 397, 400, 403,
+ 404, 407, 409, 410, 413, 417, 421, 422, 423, 425,
+ 427, 429, 440, 445, 459, 460, 461, 462, 463, 466,
+ 467, 472, 473, 474, 475, 476, 484, 485, 490, 513,
+ 515, 528, 546, 552, 465, 290, 291, 430, 431, 303,
+ 304, 566, 567, 289, 523, 553, 521, 565, 547, 424,
+ 365, 0, 0, 368, 271, 294, 309, 0, 538, 486,
+ 219, 451, 280, 243, 0, 0, 204, 238, 222, 249,
+ 264, 267, 313, 378, 386, 415, 420, 286, 261, 236,
+ 444, 233, 469, 493, 494, 495, 497, 382, 256, 419,
+ 383, 0, 363, 503, 504, 305, 502, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 402, 0,
+ 0, 0, 0, 0, 0, 0, 0, 260, 0, 0,
+ 0, 0, 353, 257, 0, 0, 416, 0, 198, 0,
+ 471, 244, 364, 361, 510, 272, 263, 259, 242, 306,
+ 372, 414, 492, 408, 0, 357, 0, 0, 481, 387,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 312, 240, 314, 197, 399,
+ 482, 276, 0, 0, 0, 1520, 0, 638, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 230, 0, 0,
+ 237, 0, 0, 0, 338, 347, 346, 327, 328, 330,
+ 332, 337, 344, 350, 0, 0, 0, 0, 0, 255,
+ 310, 262, 254, 507, 0, 0, 0, 0, 0, 0,
+ 0, 221, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 265, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 287, 0, 388,
+ 247, 0, 438, 0, 0, 0, 549, 0, 0, 0,
+ 3338, 0, 0, 0, 352, 0, 319, 193, 217, 0,
+ 0, 398, 446, 458, 0, 0, 0, 245, 0, 456,
+ 412, 527, 225, 274, 443, 418, 454, 426, 277, 0,
+ 0, 455, 359, 512, 436, 524, 550, 551, 253, 392,
+ 536, 496, 544, 568, 218, 250, 406, 489, 530, 478,
+ 384, 508, 509, 318, 477, 285, 196, 356, 556, 216,
+ 464, 358, 234, 223, 514, 533, 279, 441, 563, 205,
+ 491, 522, 231, 468, 0, 0, 570, 239, 488, 207,
+ 519, 487, 380, 315, 316, 206, 0, 442, 258, 283,
+ 0, 0, 248, 401, 516, 517, 246, 571, 220, 543,
+ 212, 0, 542, 394, 511, 520, 381, 370, 211, 518,
+ 379, 369, 323, 342, 343, 270, 296, 433, 362, 434,
+ 295, 297, 390, 389, 391, 200, 531, 0, 201, 0,
+ 483, 532, 572, 226, 227, 229, 0, 269, 273, 281,
+ 284, 292, 293, 302, 354, 405, 432, 428, 437, 0,
+ 506, 525, 537, 548, 554, 555, 557, 558, 559, 560,
+ 561, 564, 562, 393, 300, 479, 322, 360, 0, 0,
+ 411, 457, 232, 529, 480, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 573, 574, 575, 576,
+ 577, 578, 579, 580, 581, 582, 583, 584, 585, 586,
+ 587, 588, 589, 590, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 591, 371, 470, 526, 324, 336, 339,
+ 329, 348, 0, 349, 325, 326, 331, 333, 334, 335,
+ 340, 341, 345, 351, 241, 203, 377, 385, 505, 301,
+ 208, 209, 210, 498, 499, 500, 501, 540, 541, 545,
+ 447, 448, 449, 450, 282, 535, 298, 453, 452, 320,
+ 321, 366, 435, 0, 192, 213, 355, 0, 439, 278,
+ 569, 539, 534, 199, 215, 0, 252, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 194, 195,
+ 202, 214, 224, 228, 235, 251, 266, 268, 275, 288,
+ 299, 307, 308, 311, 317, 367, 373, 374, 375, 376,
+ 395, 396, 397, 400, 403, 404, 407, 409, 410, 413,
+ 417, 421, 422, 423, 425, 427, 429, 440, 445, 459,
+ 460, 461, 462, 463, 466, 467, 472, 473, 474, 475,
+ 476, 484, 485, 490, 513, 515, 528, 546, 552, 465,
+ 290, 291, 430, 431, 303, 304, 566, 567, 289, 523,
+ 553, 521, 565, 547, 424, 365, 0, 0, 368, 271,
+ 294, 309, 0, 538, 486, 219, 451, 280, 243, 0,
+ 0, 204, 238, 222, 249, 264, 267, 313, 378, 386,
+ 415, 420, 286, 261, 236, 444, 233, 469, 493, 494,
+ 495, 497, 382, 256, 419, 383, 0, 363, 503, 504,
+ 305, 502, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 402, 0, 0, 0, 0, 0, 0,
+ 0, 0, 260, 0, 0, 0, 0, 353, 257, 0,
+ 0, 416, 0, 198, 0, 471, 244, 364, 361, 510,
+ 272, 263, 259, 242, 306, 372, 414, 492, 408, 0,
+ 357, 0, 0, 481, 387, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 312, 240, 314, 197, 399, 482, 276, 0, 0, 0,
+ 0, 1881, 638, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 230, 0, 0, 237, 0, 0, 0, 338,
+ 347, 346, 327, 328, 330, 332, 337, 344, 350, 0,
+ 0, 0, 0, 0, 255, 310, 262, 254, 507, 0,
+ 0, 0, 0, 0, 0, 0, 221, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 265, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 1882, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 287, 0, 388, 247, 0, 438, 0, 0,
+ 0, 549, 0, 0, 0, 0, 0, 0, 0, 352,
+ 0, 319, 193, 217, 0, 0, 398, 446, 458, 0,
+ 0, 0, 245, 0, 456, 412, 527, 225, 274, 443,
+ 418, 454, 426, 277, 0, 0, 455, 359, 512, 436,
+ 524, 550, 551, 253, 392, 536, 496, 544, 568, 218,
+ 250, 406, 489, 530, 478, 384, 508, 509, 318, 477,
+ 285, 196, 356, 556, 216, 464, 358, 234, 223, 514,
+ 533, 279, 441, 563, 205, 491, 522, 231, 468, 0,
+ 0, 570, 239, 488, 207, 519, 487, 380, 315, 316,
+ 206, 0, 442, 258, 283, 0, 0, 248, 401, 516,
+ 517, 246, 571, 220, 543, 212, 0, 542, 394, 511,
+ 520, 381, 370, 211, 518, 379, 369, 323, 342, 343,
+ 270, 296, 433, 362, 434, 295, 297, 390, 389, 391,
+ 200, 531, 0, 201, 0, 483, 532, 572, 226, 227,
+ 229, 0, 269, 273, 281, 284, 292, 293, 302, 354,
+ 405, 432, 428, 437, 0, 506, 525, 537, 548, 554,
+ 555, 557, 558, 559, 560, 561, 564, 562, 393, 300,
+ 479, 322, 360, 0, 0, 411, 457, 232, 529, 480,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 573, 574, 575, 576, 577, 578, 579, 580, 581,
+ 582, 583, 584, 585, 586, 587, 588, 589, 590, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 591, 371,
+ 470, 526, 324, 336, 339, 329, 348, 0, 349, 325,
+ 326, 331, 333, 334, 335, 340, 341, 345, 351, 241,
+ 203, 377, 385, 505, 301, 208, 209, 210, 498, 499,
+ 500, 501, 540, 541, 545, 447, 448, 449, 450, 282,
+ 535, 298, 453, 452, 320, 321, 366, 435, 0, 192,
+ 213, 355, 0, 439, 278, 569, 539, 534, 199, 215,
+ 0, 252, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 194, 195, 202, 214, 224, 228, 235,
+ 251, 266, 268, 275, 288, 299, 307, 308, 311, 317,
+ 367, 373, 374, 375, 376, 395, 396, 397, 400, 403,
+ 404, 407, 409, 410, 413, 417, 421, 422, 423, 425,
+ 427, 429, 440, 445, 459, 460, 461, 462, 463, 466,
+ 467, 472, 473, 474, 475, 476, 484, 485, 490, 513,
+ 515, 528, 546, 552, 465, 290, 291, 430, 431, 303,
+ 304, 566, 567, 289, 523, 553, 521, 565, 547, 424,
+ 365, 0, 0, 368, 271, 294, 309, 0, 538, 486,
+ 219, 451, 280, 243, 0, 0, 204, 238, 222, 249,
+ 264, 267, 313, 378, 386, 415, 420, 286, 261, 236,
+ 444, 233, 469, 493, 494, 495, 497, 382, 256, 419,
+ 383, 0, 363, 503, 504, 305, 502, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 402, 0,
+ 0, 0, 0, 0, 0, 0, 0, 260, 0, 0,
+ 0, 0, 353, 257, 0, 0, 416, 0, 198, 0,
+ 471, 244, 364, 361, 510, 272, 263, 259, 242, 306,
+ 372, 414, 492, 408, 0, 357, 0, 0, 481, 387,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 312, 240, 314, 197, 399,
+ 482, 276, 0, 0, 0, 0, 2457, 638, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 230, 0, 0,
+ 237, 0, 0, 0, 338, 347, 346, 327, 328, 330,
+ 332, 337, 344, 350, 0, 0, 0, 0, 0, 255,
+ 310, 262, 254, 507, 0, 0, 0, 0, 0, 0,
+ 0, 221, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 265, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 2458, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 287, 0, 388,
+ 247, 0, 438, 0, 0, 0, 549, 0, 0, 0,
+ 0, 0, 0, 0, 352, 0, 319, 193, 217, 0,
+ 0, 398, 446, 458, 0, 0, 0, 245, 0, 456,
+ 412, 527, 225, 274, 443, 418, 454, 426, 277, 0,
+ 0, 455, 359, 512, 436, 524, 550, 551, 253, 392,
+ 536, 496, 544, 568, 218, 250, 406, 489, 530, 478,
+ 384, 508, 509, 318, 477, 285, 196, 356, 556, 216,
+ 464, 358, 234, 223, 514, 533, 279, 441, 563, 205,
+ 491, 522, 231, 468, 0, 0, 570, 239, 488, 207,
+ 519, 487, 380, 315, 316, 206, 0, 442, 258, 283,
+ 0, 0, 248, 401, 516, 517, 246, 571, 220, 543,
+ 212, 0, 542, 394, 511, 520, 381, 370, 211, 518,
+ 379, 369, 323, 342, 343, 270, 296, 433, 362, 434,
+ 295, 297, 390, 389, 391, 200, 531, 0, 201, 0,
+ 483, 532, 572, 226, 227, 229, 0, 269, 273, 281,
+ 284, 292, 293, 302, 354, 405, 432, 428, 437, 0,
+ 506, 525, 537, 548, 554, 555, 557, 558, 559, 560,
+ 561, 564, 562, 393, 300, 479, 322, 360, 0, 0,
+ 411, 457, 232, 529, 480, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 573, 574, 575, 576,
+ 577, 578, 579, 580, 581, 582, 583, 584, 585, 586,
+ 587, 588, 589, 590, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 591, 371, 470, 526, 324, 336, 339,
+ 329, 348, 0, 349, 325, 326, 331, 333, 334, 335,
+ 340, 341, 345, 351, 241, 203, 377, 385, 505, 301,
+ 208, 209, 210, 498, 499, 500, 501, 540, 541, 545,
+ 447, 448, 449, 450, 282, 535, 298, 453, 452, 320,
+ 321, 366, 435, 0, 192, 213, 355, 0, 439, 278,
+ 569, 539, 534, 199, 215, 0, 252, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 194, 195,
+ 202, 214, 224, 228, 235, 251, 266, 268, 275, 288,
+ 299, 307, 308, 311, 317, 367, 373, 374, 375, 376,
+ 395, 396, 397, 400, 403, 404, 407, 409, 410, 413,
+ 417, 421, 422, 423, 425, 427, 429, 440, 445, 459,
+ 460, 461, 462, 463, 466, 467, 472, 473, 474, 475,
+ 476, 484, 485, 490, 513, 515, 528, 546, 552, 465,
+ 290, 291, 430, 431, 303, 304, 566, 567, 289, 523,
+ 553, 521, 565, 547, 424, 365, 0, 0, 368, 271,
+ 294, 309, 0, 538, 486, 219, 451, 280, 243, 0,
+ 0, 204, 238, 222, 249, 264, 267, 313, 378, 386,
+ 415, 420, 286, 261, 236, 444, 233, 469, 493, 494,
+ 495, 497, 382, 256, 419, 383, 0, 363, 503, 504,
+ 305, 502, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 402, 0, 0, 0, 0, 0, 0,
+ 0, 0, 260, 0, 0, 0, 0, 353, 257, 0,
+ 0, 416, 0, 198, 0, 471, 244, 364, 361, 510,
+ 272, 263, 259, 242, 306, 372, 414, 492, 408, 0,
+ 357, 0, 0, 481, 387, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 312, 240, 314, 197, 399, 482, 276, 0, 0, 0,
+ 0, 0, 638, 0, 0, 0, 0, 2442, 0, 0,
+ 0, 0, 230, 0, 0, 237, 2443, 0, 0, 338,
+ 347, 346, 327, 328, 330, 332, 337, 344, 350, 0,
+ 0, 0, 0, 0, 255, 310, 262, 254, 507, 0,
+ 0, 0, 0, 0, 0, 0, 221, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 265, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 287, 0, 388, 247, 0, 438, 0, 0,
+ 0, 549, 0, 0, 0, 0, 0, 0, 0, 352,
+ 0, 319, 193, 217, 0, 0, 398, 446, 458, 0,
+ 0, 0, 245, 0, 456, 412, 527, 225, 274, 443,
+ 418, 454, 426, 277, 0, 0, 455, 359, 512, 436,
+ 524, 550, 551, 253, 392, 536, 496, 544, 568, 218,
+ 250, 406, 489, 530, 478, 384, 508, 509, 318, 477,
+ 285, 196, 356, 556, 216, 464, 358, 234, 223, 514,
+ 533, 279, 441, 563, 205, 491, 522, 231, 468, 0,
+ 0, 570, 239, 488, 207, 519, 487, 380, 315, 316,
+ 206, 0, 442, 258, 283, 0, 0, 248, 401, 516,
+ 517, 246, 571, 220, 543, 212, 0, 542, 394, 511,
+ 520, 381, 370, 211, 518, 379, 369, 323, 342, 343,
+ 270, 296, 433, 362, 434, 295, 297, 390, 389, 391,
+ 200, 531, 0, 201, 0, 483, 532, 572, 226, 227,
+ 229, 0, 269, 273, 281, 284, 292, 293, 302, 354,
+ 405, 432, 428, 437, 0, 506, 525, 537, 548, 554,
+ 555, 557, 558, 559, 560, 561, 564, 562, 393, 300,
+ 479, 322, 360, 0, 0, 411, 457, 232, 529, 480,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 573, 574, 575, 576, 577, 578, 579, 580, 581,
+ 582, 583, 584, 585, 586, 587, 588, 589, 590, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 591, 371,
+ 470, 526, 324, 336, 339, 329, 348, 0, 349, 325,
+ 326, 331, 333, 334, 335, 340, 341, 345, 351, 241,
+ 203, 377, 385, 505, 301, 208, 209, 210, 498, 499,
+ 500, 501, 540, 541, 545, 447, 448, 449, 450, 282,
+ 535, 298, 453, 452, 320, 321, 366, 435, 0, 192,
+ 213, 355, 0, 439, 278, 569, 539, 534, 199, 215,
+ 0, 252, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 194, 195, 202, 214, 224, 228, 235,
+ 251, 266, 268, 275, 288, 299, 307, 308, 311, 317,
+ 367, 373, 374, 375, 376, 395, 396, 397, 400, 403,
+ 404, 407, 409, 410, 413, 417, 421, 422, 423, 425,
+ 427, 429, 440, 445, 459, 460, 461, 462, 463, 466,
+ 467, 472, 473, 474, 475, 476, 484, 485, 490, 513,
+ 515, 528, 546, 552, 465, 290, 291, 430, 431, 303,
+ 304, 566, 567, 289, 523, 553, 521, 565, 547, 424,
+ 365, 0, 0, 368, 271, 294, 309, 0, 538, 486,
+ 219, 451, 280, 243, 0, 0, 204, 238, 222, 249,
+ 264, 267, 313, 378, 386, 415, 420, 286, 261, 236,
+ 444, 233, 469, 493, 494, 495, 497, 382, 256, 419,
+ 383, 0, 363, 503, 504, 305, 502, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 402, 0,
+ 0, 0, 0, 0, 0, 0, 0, 260, 1566, 0,
+ 0, 0, 353, 257, 0, 0, 416, 0, 198, 0,
+ 471, 244, 364, 361, 510, 272, 263, 259, 242, 306,
+ 372, 414, 492, 408, 0, 357, 0, 0, 481, 387,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 312, 240, 314, 197, 399,
+ 482, 276, 0, 0, 0, 0, 1565, 638, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 230, 0, 0,
+ 237, 0, 0, 0, 338, 347, 346, 327, 328, 330,
+ 332, 337, 344, 350, 0, 0, 0, 0, 0, 255,
+ 310, 262, 254, 507, 0, 0, 0, 0, 0, 0,
+ 0, 221, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 265, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 287, 0, 388,
+ 247, 0, 438, 0, 0, 0, 549, 0, 0, 0,
+ 0, 0, 0, 0, 352, 0, 319, 193, 217, 0,
+ 0, 398, 446, 458, 0, 0, 0, 245, 0, 456,
+ 412, 527, 225, 274, 443, 418, 454, 426, 277, 0,
+ 0, 455, 359, 512, 436, 524, 550, 551, 253, 392,
+ 536, 496, 544, 568, 218, 250, 406, 489, 530, 478,
+ 384, 508, 509, 318, 477, 285, 196, 356, 556, 216,
+ 464, 358, 234, 223, 514, 533, 279, 441, 563, 205,
+ 491, 522, 231, 468, 0, 0, 570, 239, 488, 207,
+ 519, 487, 380, 315, 316, 206, 0, 442, 258, 283,
+ 0, 0, 248, 401, 516, 517, 246, 571, 220, 543,
+ 212, 0, 542, 394, 511, 520, 381, 370, 211, 518,
+ 379, 369, 323, 342, 343, 270, 296, 433, 362, 434,
+ 295, 297, 390, 389, 391, 200, 531, 0, 201, 0,
+ 483, 532, 572, 226, 227, 229, 0, 269, 273, 281,
+ 284, 292, 293, 302, 354, 405, 432, 428, 437, 0,
+ 506, 525, 537, 548, 554, 555, 557, 558, 559, 560,
+ 561, 564, 562, 393, 300, 479, 322, 360, 0, 0,
+ 411, 457, 232, 529, 480, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 573, 574, 575, 576,
+ 577, 578, 579, 580, 581, 582, 583, 584, 585, 586,
+ 587, 588, 589, 590, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 591, 371, 470, 526, 324, 336, 339,
+ 329, 348, 0, 349, 325, 326, 331, 333, 334, 335,
+ 340, 341, 345, 351, 241, 203, 377, 385, 505, 301,
+ 208, 209, 210, 498, 499, 500, 501, 540, 541, 545,
+ 447, 448, 449, 450, 282, 535, 298, 453, 452, 320,
+ 321, 366, 435, 0, 192, 213, 355, 0, 439, 278,
+ 569, 539, 534, 199, 215, 0, 252, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 194, 195,
+ 202, 214, 224, 228, 235, 251, 266, 268, 275, 288,
+ 299, 307, 308, 311, 317, 367, 373, 374, 375, 376,
+ 395, 396, 397, 400, 403, 404, 407, 409, 410, 413,
+ 417, 421, 422, 423, 425, 427, 429, 440, 445, 459,
+ 460, 461, 462, 463, 466, 467, 472, 473, 474, 475,
+ 476, 484, 485, 490, 513, 515, 528, 546, 552, 465,
+ 290, 291, 430, 431, 303, 304, 566, 567, 289, 523,
+ 553, 521, 565, 547, 424, 365, 0, 0, 368, 271,
+ 294, 309, 0, 538, 486, 219, 451, 280, 243, 0,
+ 0, 204, 238, 222, 249, 264, 267, 313, 378, 386,
+ 415, 420, 286, 261, 236, 444, 233, 469, 493, 494,
+ 495, 497, 382, 256, 419, 383, 0, 363, 503, 504,
+ 305, 502, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 402, 0, 0, 0, 0, 0, 0,
+ 0, 0, 260, 0, 0, 0, 0, 353, 257, 0,
+ 0, 416, 0, 198, 0, 471, 244, 364, 361, 510,
+ 272, 263, 259, 242, 306, 372, 414, 492, 408, 0,
+ 357, 0, 0, 481, 387, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 312, 240, 314, 197, 399, 482, 276, 0, 0, 0,
+ 0, 0, 640, 641, 642, 0, 0, 0, 0, 0,
+ 0, 0, 230, 0, 0, 237, 0, 0, 0, 338,
+ 347, 346, 327, 328, 330, 332, 337, 344, 350, 0,
+ 0, 0, 0, 0, 255, 310, 262, 254, 507, 0,
+ 0, 0, 0, 0, 0, 0, 221, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 265, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 287, 0, 388, 247, 0, 438, 0, 0,
+ 0, 549, 0, 0, 0, 0, 0, 0, 0, 352,
+ 0, 319, 193, 217, 0, 0, 398, 446, 458, 0,
+ 0, 0, 245, 0, 456, 412, 527, 225, 274, 443,
+ 418, 454, 426, 277, 0, 0, 455, 359, 512, 436,
+ 524, 550, 551, 253, 392, 536, 496, 544, 568, 218,
+ 250, 406, 489, 530, 478, 384, 508, 509, 318, 477,
+ 285, 196, 356, 556, 216, 464, 358, 234, 223, 514,
+ 533, 279, 441, 563, 205, 491, 522, 231, 468, 0,
+ 0, 570, 239, 488, 207, 519, 487, 380, 315, 316,
+ 206, 0, 442, 258, 283, 0, 0, 248, 401, 516,
+ 517, 246, 571, 220, 543, 212, 0, 542, 394, 511,
+ 520, 381, 370, 211, 518, 379, 369, 323, 342, 343,
+ 270, 296, 433, 362, 434, 295, 297, 390, 389, 391,
+ 200, 531, 0, 201, 0, 483, 532, 572, 226, 227,
+ 229, 0, 269, 273, 281, 284, 292, 293, 302, 354,
+ 405, 432, 428, 437, 0, 506, 525, 537, 548, 554,
+ 555, 557, 558, 559, 560, 561, 564, 562, 393, 300,
+ 479, 322, 360, 0, 0, 411, 457, 232, 529, 480,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 573, 574, 575, 576, 577, 578, 579, 580, 581,
+ 582, 583, 584, 585, 586, 587, 588, 589, 590, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 591, 371,
+ 470, 526, 324, 336, 339, 329, 348, 0, 349, 325,
+ 326, 331, 333, 334, 335, 340, 341, 345, 351, 241,
+ 203, 377, 385, 505, 301, 208, 209, 210, 498, 499,
+ 500, 501, 540, 541, 545, 447, 448, 449, 450, 282,
+ 535, 298, 453, 452, 320, 321, 366, 435, 0, 192,
+ 213, 355, 0, 439, 278, 569, 539, 534, 199, 215,
+ 0, 252, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 194, 195, 202, 214, 224, 228, 235,
+ 251, 266, 268, 275, 288, 299, 307, 308, 311, 317,
+ 367, 373, 374, 375, 376, 395, 396, 397, 400, 403,
+ 404, 407, 409, 410, 413, 417, 421, 422, 423, 425,
+ 427, 429, 440, 445, 459, 460, 461, 462, 463, 466,
+ 467, 472, 473, 474, 475, 476, 484, 485, 490, 513,
+ 515, 528, 546, 552, 465, 290, 291, 430, 431, 303,
+ 304, 566, 567, 289, 523, 553, 521, 565, 547, 424,
+ 365, 0, 0, 368, 271, 294, 309, 0, 538, 486,
+ 219, 451, 280, 243, 0, 0, 204, 238, 222, 249,
+ 264, 267, 313, 378, 386, 415, 420, 286, 261, 236,
+ 444, 233, 469, 493, 494, 495, 497, 382, 256, 419,
+ 383, 0, 363, 503, 504, 305, 502, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 402, 0,
+ 0, 0, 0, 0, 0, 0, 0, 260, 0, 0,
+ 0, 0, 353, 257, 0, 0, 416, 0, 198, 0,
+ 471, 244, 364, 361, 510, 272, 263, 259, 242, 306,
+ 372, 414, 492, 408, 0, 357, 0, 0, 481, 387,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 312, 240, 314, 197, 399,
+ 482, 276, 0, 0, 0, 0, 0, 638, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 230, 0, 0,
+ 237, 0, 0, 0, 338, 347, 346, 327, 328, 330,
+ 332, 337, 344, 350, 0, 0, 0, 0, 0, 255,
+ 310, 262, 254, 507, 0, 0, 0, 0, 0, 0,
+ 0, 221, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 265, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 287, 0, 388,
+ 247, 0, 438, 0, 0, 0, 549, 0, 0, 0,
+ 3463, 0, 0, 0, 352, 0, 319, 193, 217, 0,
+ 0, 398, 446, 458, 0, 0, 0, 245, 0, 456,
+ 412, 527, 225, 274, 443, 418, 454, 426, 277, 0,
+ 0, 455, 359, 512, 436, 524, 550, 551, 253, 392,
+ 536, 496, 544, 568, 218, 250, 406, 489, 530, 478,
+ 384, 508, 509, 318, 477, 285, 196, 356, 556, 216,
+ 464, 358, 234, 223, 514, 533, 279, 441, 563, 205,
+ 491, 522, 231, 468, 0, 0, 570, 239, 488, 207,
+ 519, 487, 380, 315, 316, 206, 0, 442, 258, 283,
+ 0, 0, 248, 401, 516, 517, 246, 571, 220, 543,
+ 212, 0, 542, 394, 511, 520, 381, 370, 211, 518,
+ 379, 369, 323, 342, 343, 270, 296, 433, 362, 434,
+ 295, 297, 390, 389, 391, 200, 531, 0, 201, 0,
+ 483, 532, 572, 226, 227, 229, 0, 269, 273, 281,
+ 284, 292, 293, 302, 354, 405, 432, 428, 437, 0,
+ 506, 525, 537, 548, 554, 555, 557, 558, 559, 560,
+ 561, 564, 562, 393, 300, 479, 322, 360, 0, 0,
+ 411, 457, 232, 529, 480, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 573, 574, 575, 576,
+ 577, 578, 579, 580, 581, 582, 583, 584, 585, 586,
+ 587, 588, 589, 590, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 591, 371, 470, 526, 324, 336, 339,
+ 329, 348, 0, 349, 325, 326, 331, 333, 334, 335,
+ 340, 341, 345, 351, 241, 203, 377, 385, 505, 301,
+ 208, 209, 210, 498, 499, 500, 501, 540, 541, 545,
+ 447, 448, 449, 450, 282, 535, 298, 453, 452, 320,
+ 321, 366, 435, 0, 192, 213, 355, 0, 439, 278,
+ 569, 539, 534, 199, 215, 0, 252, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 194, 195,
+ 202, 214, 224, 228, 235, 251, 266, 268, 275, 288,
+ 299, 307, 308, 311, 317, 367, 373, 374, 375, 376,
+ 395, 396, 397, 400, 403, 404, 407, 409, 410, 413,
+ 417, 421, 422, 423, 425, 427, 429, 440, 445, 459,
+ 460, 461, 462, 463, 466, 467, 472, 473, 474, 475,
+ 476, 484, 485, 490, 513, 515, 528, 546, 552, 465,
+ 290, 291, 430, 431, 303, 304, 566, 567, 289, 523,
+ 553, 521, 565, 547, 424, 365, 0, 0, 368, 271,
+ 294, 309, 0, 538, 486, 219, 451, 280, 243, 0,
+ 0, 204, 238, 222, 249, 264, 267, 313, 378, 386,
+ 415, 420, 286, 261, 236, 444, 233, 469, 493, 494,
+ 495, 497, 382, 256, 419, 383, 0, 363, 503, 504,
+ 305, 502, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 402, 0, 0, 0, 0, 0, 0,
+ 0, 0, 260, 0, 0, 0, 0, 353, 257, 0,
+ 0, 416, 0, 198, 0, 471, 244, 364, 361, 510,
+ 272, 263, 259, 242, 306, 372, 414, 492, 408, 0,
+ 357, 0, 0, 481, 387, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 312, 240, 314, 197, 399, 482, 276, 0, 0, 0,
+ 0, 1726, 190, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 230, 0, 0, 237, 0, 0, 0, 338,
+ 347, 346, 327, 328, 330, 332, 337, 344, 350, 0,
+ 0, 0, 0, 0, 255, 310, 262, 254, 507, 0,
+ 0, 0, 0, 0, 0, 0, 221, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 265, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 287, 0, 388, 247, 0, 438, 0, 0,
+ 0, 549, 0, 0, 0, 0, 0, 0, 0, 352,
+ 0, 319, 193, 217, 0, 0, 398, 446, 458, 0,
+ 0, 0, 245, 0, 456, 412, 527, 225, 274, 443,
+ 418, 454, 426, 277, 0, 0, 455, 359, 512, 436,
+ 524, 550, 551, 253, 392, 536, 496, 544, 568, 218,
+ 250, 406, 489, 530, 478, 384, 508, 509, 318, 477,
+ 285, 196, 356, 556, 216, 464, 358, 234, 223, 514,
+ 533, 279, 441, 563, 205, 491, 522, 231, 468, 0,
+ 0, 570, 239, 488, 207, 519, 487, 380, 315, 316,
+ 206, 0, 442, 258, 283, 0, 0, 248, 401, 516,
+ 517, 246, 571, 220, 543, 212, 0, 542, 394, 511,
+ 520, 381, 370, 211, 518, 379, 369, 323, 342, 343,
+ 270, 296, 433, 362, 434, 295, 297, 390, 389, 391,
+ 200, 531, 0, 201, 0, 483, 532, 572, 226, 227,
+ 229, 0, 269, 273, 281, 284, 292, 293, 302, 354,
+ 405, 432, 428, 437, 0, 506, 525, 537, 548, 554,
+ 555, 557, 558, 559, 560, 561, 564, 562, 393, 300,
+ 479, 322, 360, 0, 0, 411, 457, 232, 529, 480,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 573, 574, 575, 576, 577, 578, 579, 580, 581,
+ 582, 583, 584, 585, 586, 587, 588, 589, 590, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 591, 371,
+ 470, 526, 324, 336, 339, 329, 348, 0, 349, 325,
+ 326, 331, 333, 334, 335, 340, 341, 345, 351, 241,
+ 203, 377, 385, 505, 301, 208, 209, 210, 498, 499,
+ 500, 501, 540, 541, 545, 447, 448, 449, 450, 282,
+ 535, 298, 453, 452, 320, 321, 366, 435, 0, 192,
+ 213, 355, 0, 439, 278, 569, 539, 534, 199, 215,
+ 0, 252, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 194, 195, 202, 214, 224, 228, 235,
+ 251, 266, 268, 275, 288, 299, 307, 308, 311, 317,
+ 367, 373, 374, 375, 376, 395, 396, 397, 400, 403,
+ 404, 407, 409, 410, 413, 417, 421, 422, 423, 425,
+ 427, 429, 440, 445, 459, 460, 461, 462, 463, 466,
+ 467, 472, 473, 474, 475, 476, 484, 485, 490, 513,
+ 515, 528, 546, 552, 465, 290, 291, 430, 431, 303,
+ 304, 566, 567, 289, 523, 553, 521, 565, 547, 424,
+ 365, 0, 0, 368, 271, 294, 309, 0, 538, 486,
+ 219, 451, 280, 243, 0, 0, 204, 238, 222, 249,
+ 264, 267, 313, 378, 386, 415, 420, 286, 261, 236,
+ 444, 233, 469, 493, 494, 495, 497, 382, 256, 419,
+ 383, 0, 363, 503, 504, 305, 502, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 402, 0,
+ 0, 0, 0, 0, 0, 0, 0, 260, 0, 0,
+ 0, 0, 353, 257, 0, 0, 416, 0, 198, 0,
+ 471, 244, 364, 361, 510, 272, 263, 259, 242, 306,
+ 372, 414, 492, 408, 0, 357, 0, 0, 481, 387,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 312, 240, 314, 197, 399,
+ 482, 276, 0, 0, 0, 0, 0, 638, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 230, 0, 0,
+ 237, 0, 0, 0, 338, 347, 346, 327, 328, 330,
+ 332, 337, 344, 350, 0, 0, 0, 0, 0, 255,
+ 310, 262, 254, 507, 0, 0, 0, 0, 0, 0,
+ 0, 221, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 265, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 287, 0, 388,
+ 247, 0, 438, 0, 0, 0, 549, 0, 0, 0,
+ 3338, 0, 0, 0, 352, 0, 319, 193, 217, 0,
+ 0, 398, 446, 458, 0, 0, 0, 245, 0, 456,
+ 412, 527, 225, 274, 443, 418, 454, 426, 277, 0,
+ 0, 455, 359, 512, 436, 524, 550, 551, 253, 392,
+ 536, 496, 544, 568, 218, 250, 406, 489, 530, 478,
+ 384, 508, 509, 318, 477, 285, 196, 356, 556, 216,
+ 464, 358, 234, 223, 514, 533, 279, 441, 563, 205,
+ 491, 522, 231, 468, 0, 0, 570, 239, 488, 207,
+ 519, 487, 380, 315, 316, 206, 0, 442, 258, 283,
+ 0, 0, 248, 401, 516, 517, 246, 571, 220, 543,
+ 212, 0, 542, 394, 511, 520, 381, 370, 211, 518,
+ 379, 369, 323, 342, 343, 270, 296, 433, 362, 434,
+ 295, 297, 390, 389, 391, 200, 531, 0, 201, 0,
+ 483, 532, 572, 226, 227, 229, 0, 269, 273, 281,
+ 284, 292, 293, 302, 354, 405, 432, 428, 437, 0,
+ 506, 525, 537, 548, 554, 555, 557, 558, 559, 560,
+ 561, 564, 562, 393, 300, 479, 322, 360, 0, 0,
+ 411, 457, 232, 529, 480, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 573, 574, 575, 576,
+ 577, 578, 579, 580, 581, 582, 583, 584, 585, 586,
+ 587, 588, 589, 590, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 591, 371, 470, 526, 324, 336, 339,
+ 329, 348, 0, 349, 325, 326, 331, 333, 334, 335,
+ 340, 341, 345, 351, 241, 203, 377, 385, 505, 301,
+ 208, 209, 210, 498, 499, 500, 501, 540, 541, 545,
+ 447, 448, 449, 450, 282, 535, 298, 453, 452, 320,
+ 321, 366, 435, 0, 192, 213, 355, 0, 439, 278,
+ 569, 539, 534, 199, 215, 0, 252, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 194, 195,
+ 202, 214, 224, 228, 235, 251, 266, 268, 275, 288,
+ 299, 307, 308, 311, 317, 367, 373, 374, 375, 376,
+ 395, 396, 397, 400, 403, 404, 407, 409, 410, 413,
+ 417, 421, 422, 423, 425, 427, 429, 440, 445, 459,
+ 460, 461, 462, 463, 466, 467, 472, 473, 474, 475,
+ 476, 484, 485, 490, 513, 515, 528, 546, 552, 465,
+ 290, 291, 430, 431, 303, 304, 566, 567, 289, 523,
+ 553, 521, 565, 547, 424, 365, 0, 0, 368, 271,
+ 294, 309, 0, 538, 486, 219, 451, 280, 243, 0,
+ 0, 204, 238, 222, 249, 264, 267, 313, 378, 386,
+ 415, 420, 286, 261, 236, 444, 233, 469, 493, 494,
+ 495, 497, 382, 256, 419, 383, 0, 363, 503, 504,
+ 305, 502, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 402, 0, 0, 0, 0, 0, 0,
+ 0, 0, 260, 0, 0, 0, 0, 353, 257, 0,
+ 0, 416, 0, 198, 0, 471, 244, 364, 361, 510,
+ 272, 263, 259, 242, 306, 372, 414, 492, 408, 0,
+ 357, 0, 0, 481, 387, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 312, 240, 314, 197, 399, 482, 276, 0, 91, 0,
+ 0, 0, 638, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 230, 0, 0, 237, 0, 0, 0, 338,
+ 347, 346, 327, 328, 330, 332, 337, 344, 350, 0,
+ 0, 0, 0, 0, 255, 310, 262, 254, 507, 0,
+ 0, 0, 0, 0, 0, 0, 221, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 265, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 287, 0, 388, 247, 0, 438, 0, 0,
+ 0, 549, 0, 0, 0, 0, 0, 0, 0, 352,
+ 0, 319, 193, 217, 0, 0, 398, 446, 458, 0,
+ 0, 0, 245, 0, 456, 412, 527, 225, 274, 443,
+ 418, 454, 426, 277, 0, 0, 455, 359, 512, 436,
+ 524, 550, 551, 253, 392, 536, 496, 544, 568, 218,
+ 250, 406, 489, 530, 478, 384, 508, 509, 318, 477,
+ 285, 196, 356, 556, 216, 464, 358, 234, 223, 514,
+ 533, 279, 441, 563, 205, 491, 522, 231, 468, 0,
+ 0, 570, 239, 488, 207, 519, 487, 380, 315, 316,
+ 206, 0, 442, 258, 283, 0, 0, 248, 401, 516,
+ 517, 246, 571, 220, 543, 212, 0, 542, 394, 511,
+ 520, 381, 370, 211, 518, 379, 369, 323, 342, 343,
+ 270, 296, 433, 362, 434, 295, 297, 390, 389, 391,
+ 200, 531, 0, 201, 0, 483, 532, 572, 226, 227,
+ 229, 0, 269, 273, 281, 284, 292, 293, 302, 354,
+ 405, 432, 428, 437, 0, 506, 525, 537, 548, 554,
+ 555, 557, 558, 559, 560, 561, 564, 562, 393, 300,
+ 479, 322, 360, 0, 0, 411, 457, 232, 529, 480,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 573, 574, 575, 576, 577, 578, 579, 580, 581,
+ 582, 583, 584, 585, 586, 587, 588, 589, 590, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 591, 371,
+ 470, 526, 324, 336, 339, 329, 348, 0, 349, 325,
+ 326, 331, 333, 334, 335, 340, 341, 345, 351, 241,
+ 203, 377, 385, 505, 301, 208, 209, 210, 498, 499,
+ 500, 501, 540, 541, 545, 447, 448, 449, 450, 282,
+ 535, 298, 453, 452, 320, 321, 366, 435, 0, 192,
+ 213, 355, 0, 439, 278, 569, 539, 534, 199, 215,
+ 0, 252, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 194, 195, 202, 214, 224, 228, 235,
+ 251, 266, 268, 275, 288, 299, 307, 308, 311, 317,
+ 367, 373, 374, 375, 376, 395, 396, 397, 400, 403,
+ 404, 407, 409, 410, 413, 417, 421, 422, 423, 425,
+ 427, 429, 440, 445, 459, 460, 461, 462, 463, 466,
+ 467, 472, 473, 474, 475, 476, 484, 485, 490, 513,
+ 515, 528, 546, 552, 465, 290, 291, 430, 431, 303,
+ 304, 566, 567, 289, 523, 553, 521, 565, 547, 424,
+ 365, 0, 0, 368, 271, 294, 309, 0, 538, 486,
+ 219, 451, 280, 243, 0, 0, 204, 238, 222, 249,
+ 264, 267, 313, 378, 386, 415, 420, 286, 261, 236,
+ 444, 233, 469, 493, 494, 495, 497, 382, 256, 419,
+ 383, 0, 363, 503, 504, 305, 502, 0, 0, 0,
+ 0, 2101, 0, 0, 0, 0, 0, 0, 402, 0,
+ 0, 0, 0, 0, 0, 0, 0, 260, 0, 0,
+ 0, 0, 353, 257, 0, 0, 416, 0, 198, 0,
+ 471, 244, 364, 361, 510, 272, 263, 259, 242, 306,
+ 372, 414, 492, 408, 0, 357, 0, 0, 481, 387,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 312, 240, 314, 197, 399,
+ 482, 276, 0, 0, 0, 0, 0, 190, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 230, 0, 0,
+ 237, 0, 0, 0, 338, 347, 346, 327, 328, 330,
+ 332, 337, 344, 350, 0, 0, 0, 0, 0, 255,
+ 310, 262, 254, 507, 0, 0, 0, 0, 0, 0,
+ 0, 221, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 265, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 287, 0, 388,
+ 247, 0, 438, 0, 0, 0, 549, 0, 0, 0,
+ 0, 0, 0, 0, 352, 0, 319, 193, 217, 0,
+ 0, 398, 446, 458, 0, 0, 0, 245, 0, 456,
+ 412, 527, 225, 274, 443, 418, 454, 426, 277, 0,
+ 0, 455, 359, 512, 436, 524, 550, 551, 253, 392,
+ 536, 496, 544, 568, 218, 250, 406, 489, 530, 478,
+ 384, 508, 509, 318, 477, 285, 196, 356, 556, 216,
+ 464, 358, 234, 223, 514, 533, 279, 441, 563, 205,
+ 491, 522, 231, 468, 0, 0, 570, 239, 488, 207,
+ 519, 487, 380, 315, 316, 206, 0, 442, 258, 283,
+ 0, 0, 248, 401, 516, 517, 246, 571, 220, 543,
+ 212, 0, 542, 394, 511, 520, 381, 370, 211, 518,
+ 379, 369, 323, 342, 343, 270, 296, 433, 362, 434,
+ 295, 297, 390, 389, 391, 200, 531, 0, 201, 0,
+ 483, 532, 572, 226, 227, 229, 0, 269, 273, 281,
+ 284, 292, 293, 302, 354, 405, 432, 428, 437, 0,
+ 506, 525, 537, 548, 554, 555, 557, 558, 559, 560,
+ 561, 564, 562, 393, 300, 479, 322, 360, 0, 0,
+ 411, 457, 232, 529, 480, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 573, 574, 575, 576,
+ 577, 578, 579, 580, 581, 582, 583, 584, 585, 586,
+ 587, 588, 589, 590, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 591, 371, 470, 526, 324, 336, 339,
+ 329, 348, 0, 349, 325, 326, 331, 333, 334, 335,
+ 340, 341, 345, 351, 241, 203, 377, 385, 505, 301,
+ 208, 209, 210, 498, 499, 500, 501, 540, 541, 545,
+ 447, 448, 449, 450, 282, 535, 298, 453, 452, 320,
+ 321, 366, 435, 0, 192, 213, 355, 0, 439, 278,
+ 569, 539, 534, 199, 215, 0, 252, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 194, 195,
+ 202, 214, 224, 228, 235, 251, 266, 268, 275, 288,
+ 299, 307, 308, 311, 317, 367, 373, 374, 375, 376,
+ 395, 396, 397, 400, 403, 404, 407, 409, 410, 413,
+ 417, 421, 422, 423, 425, 427, 429, 440, 445, 459,
+ 460, 461, 462, 463, 466, 467, 472, 473, 474, 475,
+ 476, 484, 485, 490, 513, 515, 528, 546, 552, 465,
+ 290, 291, 430, 431, 303, 304, 566, 567, 289, 523,
+ 553, 521, 565, 547, 424, 365, 0, 0, 368, 271,
+ 294, 309, 0, 538, 486, 219, 451, 280, 243, 0,
+ 0, 204, 238, 222, 249, 264, 267, 313, 378, 386,
+ 415, 420, 286, 261, 236, 444, 233, 469, 493, 494,
+ 495, 497, 382, 256, 419, 383, 0, 363, 503, 504,
+ 305, 502, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 402, 0, 0, 0, 0, 0, 0,
+ 0, 0, 260, 0, 0, 0, 0, 353, 257, 0,
+ 0, 416, 0, 198, 0, 471, 244, 364, 361, 510,
+ 272, 263, 259, 242, 306, 372, 414, 492, 408, 0,
+ 357, 0, 0, 481, 387, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 312, 240, 314, 197, 399, 482, 276, 0, 0, 0,
+ 0, 1547, 638, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 230, 0, 0, 237, 0, 0, 0, 338,
+ 347, 346, 327, 328, 330, 332, 337, 344, 350, 0,
+ 0, 0, 0, 0, 255, 310, 262, 254, 507, 0,
+ 0, 0, 0, 0, 0, 0, 221, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 265, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 287, 0, 388, 247, 0, 438, 0, 0,
+ 0, 549, 0, 0, 0, 0, 0, 0, 0, 352,
+ 0, 319, 193, 217, 0, 0, 398, 446, 458, 0,
+ 0, 0, 245, 0, 456, 412, 527, 225, 274, 443,
+ 418, 454, 426, 277, 0, 0, 455, 359, 512, 436,
+ 524, 550, 551, 253, 392, 536, 496, 544, 568, 218,
+ 250, 406, 489, 530, 478, 384, 508, 509, 318, 477,
+ 285, 196, 356, 556, 216, 464, 358, 234, 223, 514,
+ 533, 279, 441, 563, 205, 491, 522, 231, 468, 0,
+ 0, 570, 239, 488, 207, 519, 487, 380, 315, 316,
+ 206, 0, 442, 258, 283, 0, 0, 248, 401, 516,
+ 517, 246, 571, 220, 543, 212, 0, 542, 394, 511,
+ 520, 381, 370, 211, 518, 379, 369, 323, 342, 343,
+ 270, 296, 433, 362, 434, 295, 297, 390, 389, 391,
+ 200, 531, 0, 201, 0, 483, 532, 572, 226, 227,
+ 229, 0, 269, 273, 281, 284, 292, 293, 302, 354,
+ 405, 432, 428, 437, 0, 506, 525, 537, 548, 554,
+ 555, 557, 558, 559, 560, 561, 564, 562, 393, 300,
+ 479, 322, 360, 0, 0, 411, 457, 232, 529, 480,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 573, 574, 575, 576, 577, 578, 579, 580, 581,
+ 582, 583, 584, 585, 586, 587, 588, 589, 590, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 591, 371,
+ 470, 526, 324, 336, 339, 329, 348, 0, 349, 325,
+ 326, 331, 333, 334, 335, 340, 341, 345, 351, 241,
+ 203, 377, 385, 505, 301, 208, 209, 210, 498, 499,
+ 500, 501, 540, 541, 545, 447, 448, 449, 450, 282,
+ 535, 298, 453, 452, 320, 321, 366, 435, 0, 192,
+ 213, 355, 0, 439, 278, 569, 539, 534, 199, 215,
+ 0, 252, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 194, 195, 202, 214, 224, 228, 235,
+ 251, 266, 268, 275, 288, 299, 307, 308, 311, 317,
+ 367, 373, 374, 375, 376, 395, 396, 397, 400, 403,
+ 404, 407, 409, 410, 413, 417, 421, 422, 423, 425,
+ 427, 429, 440, 445, 459, 460, 461, 462, 463, 466,
+ 467, 472, 473, 474, 475, 476, 484, 485, 490, 513,
+ 515, 528, 546, 552, 465, 290, 291, 430, 431, 303,
+ 304, 566, 567, 289, 523, 553, 521, 565, 547, 424,
+ 365, 0, 0, 368, 271, 294, 309, 0, 538, 486,
+ 219, 451, 280, 243, 0, 0, 204, 238, 222, 249,
+ 264, 267, 313, 378, 386, 415, 420, 286, 261, 236,
+ 444, 233, 469, 493, 494, 495, 497, 382, 256, 419,
+ 383, 0, 363, 503, 504, 305, 502, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 402, 0,
+ 0, 0, 0, 0, 0, 0, 0, 260, 0, 0,
+ 0, 0, 353, 257, 0, 0, 416, 0, 198, 0,
+ 471, 244, 364, 361, 510, 272, 263, 259, 242, 306,
+ 372, 414, 492, 408, 0, 357, 0, 0, 481, 387,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 312, 240, 314, 197, 399,
+ 482, 276, 0, 0, 0, 0, 0, 190, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 230, 0, 0,
+ 237, 0, 0, 0, 338, 347, 346, 327, 328, 330,
+ 332, 337, 344, 350, 0, 0, 0, 0, 0, 255,
+ 310, 262, 254, 507, 0, 0, 0, 0, 0, 0,
+ 0, 221, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 265, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 287, 0, 388,
+ 247, 0, 438, 0, 0, 0, 549, 0, 0, 0,
+ 0, 0, 0, 0, 352, 0, 319, 193, 217, 0,
+ 0, 398, 446, 458, 0, 0, 0, 245, 0, 456,
+ 412, 527, 225, 274, 443, 418, 454, 426, 277, 0,
+ 0, 455, 359, 512, 436, 524, 550, 551, 253, 392,
+ 536, 496, 544, 568, 218, 250, 406, 489, 530, 478,
+ 384, 508, 509, 318, 477, 285, 196, 356, 556, 216,
+ 464, 358, 234, 223, 514, 533, 279, 441, 563, 205,
+ 491, 522, 231, 468, 0, 0, 570, 239, 488, 207,
+ 519, 487, 380, 315, 316, 206, 0, 442, 258, 283,
+ 0, 0, 248, 401, 516, 517, 246, 571, 220, 543,
+ 212, 0, 542, 394, 511, 520, 381, 370, 211, 518,
+ 379, 369, 323, 342, 343, 270, 296, 433, 362, 434,
+ 295, 297, 390, 389, 391, 200, 531, 0, 201, 0,
+ 483, 532, 572, 226, 227, 229, 0, 269, 273, 281,
+ 284, 292, 293, 302, 354, 405, 432, 428, 437, 0,
+ 506, 525, 537, 548, 554, 555, 557, 558, 559, 560,
+ 561, 564, 562, 393, 300, 479, 322, 360, 0, 0,
+ 411, 457, 232, 529, 480, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 573, 574, 575, 576,
+ 577, 578, 579, 580, 581, 582, 583, 584, 585, 586,
+ 587, 588, 589, 590, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 591, 371, 470, 526, 324, 336, 339,
+ 329, 348, 0, 349, 325, 326, 331, 333, 334, 335,
+ 340, 341, 345, 351, 241, 203, 377, 385, 505, 301,
+ 208, 209, 210, 498, 499, 500, 501, 540, 541, 545,
+ 447, 448, 449, 450, 282, 535, 298, 453, 452, 320,
+ 321, 366, 435, 0, 192, 213, 355, 1836, 439, 278,
+ 569, 539, 534, 199, 215, 0, 252, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 194, 195,
+ 202, 214, 224, 228, 235, 251, 266, 268, 275, 288,
+ 299, 307, 308, 311, 317, 367, 373, 374, 375, 376,
+ 395, 396, 397, 400, 403, 404, 407, 409, 410, 413,
+ 417, 421, 422, 423, 425, 427, 429, 440, 445, 459,
+ 460, 461, 462, 463, 466, 467, 472, 473, 474, 475,
+ 476, 484, 485, 490, 513, 515, 528, 546, 552, 465,
+ 290, 291, 430, 431, 303, 304, 566, 567, 289, 523,
+ 553, 521, 565, 547, 424, 365, 0, 0, 368, 271,
+ 294, 309, 0, 538, 486, 219, 451, 280, 243, 0,
+ 0, 204, 238, 222, 249, 264, 267, 313, 378, 386,
+ 415, 420, 286, 261, 236, 444, 233, 469, 493, 494,
+ 495, 497, 382, 256, 419, 383, 0, 363, 503, 504,
+ 305, 502, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 402, 0, 0, 0, 0, 0, 0,
+ 0, 0, 260, 0, 0, 0, 0, 353, 257, 0,
+ 0, 416, 0, 198, 0, 471, 244, 364, 361, 510,
+ 272, 263, 259, 242, 306, 372, 414, 492, 408, 0,
+ 357, 0, 0, 481, 387, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 312, 240, 314, 197, 399, 482, 276, 0, 0, 0,
+ 0, 1827, 638, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 230, 0, 0, 237, 0, 0, 0, 338,
+ 347, 346, 327, 328, 330, 332, 337, 344, 350, 0,
+ 0, 0, 0, 0, 255, 310, 262, 254, 507, 0,
+ 0, 0, 0, 0, 0, 0, 221, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 265, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 287, 0, 388, 247, 0, 438, 0, 0,
+ 0, 549, 0, 0, 0, 0, 0, 0, 0, 352,
+ 0, 319, 193, 217, 0, 0, 398, 446, 458, 0,
+ 0, 0, 245, 0, 456, 412, 527, 225, 274, 443,
+ 418, 454, 426, 277, 0, 0, 455, 359, 512, 436,
+ 524, 550, 551, 253, 392, 536, 496, 544, 568, 218,
+ 250, 406, 489, 530, 478, 384, 508, 509, 318, 477,
+ 285, 196, 356, 556, 216, 464, 358, 234, 223, 514,
+ 533, 279, 441, 563, 205, 491, 522, 231, 468, 0,
+ 0, 570, 239, 488, 207, 519, 487, 380, 315, 316,
+ 206, 0, 442, 258, 283, 0, 0, 248, 401, 516,
+ 517, 246, 571, 220, 543, 212, 0, 542, 394, 511,
+ 520, 381, 370, 211, 518, 379, 369, 323, 342, 343,
+ 270, 296, 433, 362, 434, 295, 297, 390, 389, 391,
+ 200, 531, 0, 201, 0, 483, 532, 572, 226, 227,
+ 229, 0, 269, 273, 281, 284, 292, 293, 302, 354,
+ 405, 432, 428, 437, 0, 506, 525, 537, 548, 554,
+ 555, 557, 558, 559, 560, 561, 564, 562, 393, 300,
+ 479, 322, 360, 0, 0, 411, 457, 232, 529, 480,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 573, 574, 575, 576, 577, 578, 579, 580, 581,
+ 582, 583, 584, 585, 586, 587, 588, 589, 590, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 591, 371,
+ 470, 526, 324, 336, 339, 329, 348, 0, 349, 325,
+ 326, 331, 333, 334, 335, 340, 341, 345, 351, 241,
+ 203, 377, 385, 505, 301, 208, 209, 210, 498, 499,
+ 500, 501, 540, 541, 545, 447, 448, 449, 450, 282,
+ 535, 298, 453, 452, 320, 321, 366, 435, 0, 192,
+ 213, 355, 0, 439, 278, 569, 539, 534, 199, 215,
+ 0, 252, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 194, 195, 202, 214, 224, 228, 235,
+ 251, 266, 268, 275, 288, 299, 307, 308, 311, 317,
+ 367, 373, 374, 375, 376, 395, 396, 397, 400, 403,
+ 404, 407, 409, 410, 413, 417, 421, 422, 423, 425,
+ 427, 429, 440, 445, 459, 460, 461, 462, 463, 466,
+ 467, 472, 473, 474, 475, 476, 484, 485, 490, 513,
+ 515, 528, 546, 552, 465, 290, 291, 430, 431, 303,
+ 304, 566, 567, 289, 523, 553, 521, 565, 547, 424,
+ 365, 0, 0, 368, 271, 294, 309, 0, 538, 486,
+ 219, 451, 280, 243, 0, 0, 204, 238, 222, 249,
+ 264, 267, 313, 378, 386, 415, 420, 286, 261, 236,
+ 444, 233, 469, 493, 494, 495, 497, 382, 256, 419,
+ 383, 0, 363, 503, 504, 305, 502, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 402, 0,
+ 1693, 0, 0, 0, 0, 0, 0, 260, 0, 0,
+ 0, 0, 353, 257, 0, 0, 416, 0, 198, 0,
+ 471, 244, 364, 361, 510, 272, 263, 259, 242, 306,
+ 372, 414, 492, 408, 0, 357, 0, 0, 481, 387,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 312, 240, 314, 197, 399,
+ 482, 276, 0, 0, 0, 0, 0, 638, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 230, 0, 0,
+ 237, 0, 0, 0, 338, 347, 346, 327, 328, 330,
+ 332, 337, 344, 350, 0, 0, 0, 0, 0, 255,
+ 310, 262, 254, 507, 0, 0, 0, 0, 0, 0,
+ 0, 221, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 265, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 287, 0, 388,
+ 247, 0, 438, 0, 0, 0, 549, 0, 0, 0,
+ 0, 0, 0, 0, 352, 0, 319, 193, 217, 0,
+ 0, 398, 446, 458, 0, 0, 0, 245, 0, 456,
+ 412, 527, 225, 274, 443, 418, 454, 426, 277, 0,
+ 0, 455, 359, 512, 436, 524, 550, 551, 253, 392,
+ 536, 496, 544, 568, 218, 250, 406, 489, 530, 478,
+ 384, 508, 509, 318, 477, 285, 196, 356, 556, 216,
+ 464, 358, 234, 223, 514, 533, 279, 441, 563, 205,
+ 491, 522, 231, 468, 0, 0, 570, 239, 488, 207,
+ 519, 487, 380, 315, 316, 206, 0, 442, 258, 283,
+ 0, 0, 248, 401, 516, 517, 246, 571, 220, 543,
+ 212, 0, 542, 394, 511, 520, 381, 370, 211, 518,
+ 379, 369, 323, 342, 343, 270, 296, 433, 362, 434,
+ 295, 297, 390, 389, 391, 200, 531, 0, 201, 0,
+ 483, 532, 572, 226, 227, 229, 0, 269, 273, 281,
+ 284, 292, 293, 302, 354, 405, 432, 428, 437, 0,
+ 506, 525, 537, 548, 554, 555, 557, 558, 559, 560,
+ 561, 564, 562, 393, 300, 479, 322, 360, 0, 0,
+ 411, 457, 232, 529, 480, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 573, 574, 575, 576,
+ 577, 578, 579, 580, 581, 582, 583, 584, 585, 586,
+ 587, 588, 589, 590, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 591, 371, 470, 526, 324, 336, 339,
+ 329, 348, 0, 349, 325, 326, 331, 333, 334, 335,
+ 340, 341, 345, 351, 241, 203, 377, 385, 505, 301,
+ 208, 209, 210, 498, 499, 500, 501, 540, 541, 545,
+ 447, 448, 449, 450, 282, 535, 298, 453, 452, 320,
+ 321, 366, 435, 0, 192, 213, 355, 0, 439, 278,
+ 569, 539, 534, 199, 215, 0, 252, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 194, 195,
+ 202, 214, 224, 228, 235, 251, 266, 268, 275, 288,
+ 299, 307, 308, 311, 317, 367, 373, 374, 375, 376,
+ 395, 396, 397, 400, 403, 404, 407, 409, 410, 413,
+ 417, 421, 422, 423, 425, 427, 429, 440, 445, 459,
+ 460, 461, 462, 463, 466, 467, 472, 473, 474, 475,
+ 476, 484, 485, 490, 513, 515, 528, 546, 552, 465,
+ 290, 291, 430, 431, 303, 304, 566, 567, 289, 523,
+ 553, 521, 565, 547, 424, 365, 0, 0, 368, 271,
+ 294, 309, 0, 538, 486, 219, 451, 280, 243, 0,
+ 0, 204, 238, 222, 249, 264, 267, 313, 378, 386,
+ 415, 420, 286, 261, 236, 444, 233, 469, 493, 494,
+ 495, 497, 382, 256, 419, 383, 0, 363, 503, 504,
+ 305, 502, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 402, 0, 1691, 0, 0, 0, 0,
+ 0, 0, 260, 0, 0, 0, 0, 353, 257, 0,
+ 0, 416, 0, 198, 0, 471, 244, 364, 361, 510,
+ 272, 263, 259, 242, 306, 372, 414, 492, 408, 0,
+ 357, 0, 0, 481, 387, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 312, 240, 314, 197, 399, 482, 276, 0, 0, 0,
+ 0, 0, 638, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 230, 0, 0, 237, 0, 0, 0, 338,
+ 347, 346, 327, 328, 330, 332, 337, 344, 350, 0,
+ 0, 0, 0, 0, 255, 310, 262, 254, 507, 0,
+ 0, 0, 0, 0, 0, 0, 221, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 265, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 287, 0, 388, 247, 0, 438, 0, 0,
+ 0, 549, 0, 0, 0, 0, 0, 0, 0, 352,
+ 0, 319, 193, 217, 0, 0, 398, 446, 458, 0,
+ 0, 0, 245, 0, 456, 412, 527, 225, 274, 443,
+ 418, 454, 426, 277, 0, 0, 455, 359, 512, 436,
+ 524, 550, 551, 253, 392, 536, 496, 544, 568, 218,
+ 250, 406, 489, 530, 478, 384, 508, 509, 318, 477,
+ 285, 196, 356, 556, 216, 464, 358, 234, 223, 514,
+ 533, 279, 441, 563, 205, 491, 522, 231, 468, 0,
+ 0, 570, 239, 488, 207, 519, 487, 380, 315, 316,
+ 206, 0, 442, 258, 283, 0, 0, 248, 401, 516,
+ 517, 246, 571, 220, 543, 212, 0, 542, 394, 511,
+ 520, 381, 370, 211, 518, 379, 369, 323, 342, 343,
+ 270, 296, 433, 362, 434, 295, 297, 390, 389, 391,
+ 200, 531, 0, 201, 0, 483, 532, 572, 226, 227,
+ 229, 0, 269, 273, 281, 284, 292, 293, 302, 354,
+ 405, 432, 428, 437, 0, 506, 525, 537, 548, 554,
+ 555, 557, 558, 559, 560, 561, 564, 562, 393, 300,
+ 479, 322, 360, 0, 0, 411, 457, 232, 529, 480,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 573, 574, 575, 576, 577, 578, 579, 580, 581,
+ 582, 583, 584, 585, 586, 587, 588, 589, 590, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 591, 371,
+ 470, 526, 324, 336, 339, 329, 348, 0, 349, 325,
+ 326, 331, 333, 334, 335, 340, 341, 345, 351, 241,
+ 203, 377, 385, 505, 301, 208, 209, 210, 498, 499,
+ 500, 501, 540, 541, 545, 447, 448, 449, 450, 282,
+ 535, 298, 453, 452, 320, 321, 366, 435, 0, 192,
+ 213, 355, 0, 439, 278, 569, 539, 534, 199, 215,
+ 0, 252, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 194, 195, 202, 214, 224, 228, 235,
+ 251, 266, 268, 275, 288, 299, 307, 308, 311, 317,
+ 367, 373, 374, 375, 376, 395, 396, 397, 400, 403,
+ 404, 407, 409, 410, 413, 417, 421, 422, 423, 425,
+ 427, 429, 440, 445, 459, 460, 461, 462, 463, 466,
+ 467, 472, 473, 474, 475, 476, 484, 485, 490, 513,
+ 515, 528, 546, 552, 465, 290, 291, 430, 431, 303,
+ 304, 566, 567, 289, 523, 553, 521, 565, 547, 424,
+ 365, 0, 0, 368, 271, 294, 309, 0, 538, 486,
+ 219, 451, 280, 243, 0, 0, 204, 238, 222, 249,
+ 264, 267, 313, 378, 386, 415, 420, 286, 261, 236,
+ 444, 233, 469, 493, 494, 495, 497, 382, 256, 419,
+ 383, 0, 363, 503, 504, 305, 502, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 402, 0,
+ 1689, 0, 0, 0, 0, 0, 0, 260, 0, 0,
+ 0, 0, 353, 257, 0, 0, 416, 0, 198, 0,
+ 471, 244, 364, 361, 510, 272, 263, 259, 242, 306,
+ 372, 414, 492, 408, 0, 357, 0, 0, 481, 387,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 312, 240, 314, 197, 399,
+ 482, 276, 0, 0, 0, 0, 0, 638, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 230, 0, 0,
+ 237, 0, 0, 0, 338, 347, 346, 327, 328, 330,
+ 332, 337, 344, 350, 0, 0, 0, 0, 0, 255,
+ 310, 262, 254, 507, 0, 0, 0, 0, 0, 0,
+ 0, 221, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 265, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 287, 0, 388,
+ 247, 0, 438, 0, 0, 0, 549, 0, 0, 0,
+ 0, 0, 0, 0, 352, 0, 319, 193, 217, 0,
+ 0, 398, 446, 458, 0, 0, 0, 245, 0, 456,
+ 412, 527, 225, 274, 443, 418, 454, 426, 277, 0,
+ 0, 455, 359, 512, 436, 524, 550, 551, 253, 392,
+ 536, 496, 544, 568, 218, 250, 406, 489, 530, 478,
+ 384, 508, 509, 318, 477, 285, 196, 356, 556, 216,
+ 464, 358, 234, 223, 514, 533, 279, 441, 563, 205,
+ 491, 522, 231, 468, 0, 0, 570, 239, 488, 207,
+ 519, 487, 380, 315, 316, 206, 0, 442, 258, 283,
+ 0, 0, 248, 401, 516, 517, 246, 571, 220, 543,
+ 212, 0, 542, 394, 511, 520, 381, 370, 211, 518,
+ 379, 369, 323, 342, 343, 270, 296, 433, 362, 434,
+ 295, 297, 390, 389, 391, 200, 531, 0, 201, 0,
+ 483, 532, 572, 226, 227, 229, 0, 269, 273, 281,
+ 284, 292, 293, 302, 354, 405, 432, 428, 437, 0,
+ 506, 525, 537, 548, 554, 555, 557, 558, 559, 560,
+ 561, 564, 562, 393, 300, 479, 322, 360, 0, 0,
+ 411, 457, 232, 529, 480, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 573, 574, 575, 576,
+ 577, 578, 579, 580, 581, 582, 583, 584, 585, 586,
+ 587, 588, 589, 590, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 591, 371, 470, 526, 324, 336, 339,
+ 329, 348, 0, 349, 325, 326, 331, 333, 334, 335,
+ 340, 341, 345, 351, 241, 203, 377, 385, 505, 301,
+ 208, 209, 210, 498, 499, 500, 501, 540, 541, 545,
+ 447, 448, 449, 450, 282, 535, 298, 453, 452, 320,
+ 321, 366, 435, 0, 192, 213, 355, 0, 439, 278,
+ 569, 539, 534, 199, 215, 0, 252, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 194, 195,
+ 202, 214, 224, 228, 235, 251, 266, 268, 275, 288,
+ 299, 307, 308, 311, 317, 367, 373, 374, 375, 376,
+ 395, 396, 397, 400, 403, 404, 407, 409, 410, 413,
+ 417, 421, 422, 423, 425, 427, 429, 440, 445, 459,
+ 460, 461, 462, 463, 466, 467, 472, 473, 474, 475,
+ 476, 484, 485, 490, 513, 515, 528, 546, 552, 465,
+ 290, 291, 430, 431, 303, 304, 566, 567, 289, 523,
+ 553, 521, 565, 547, 424, 365, 0, 0, 368, 271,
+ 294, 309, 0, 538, 486, 219, 451, 280, 243, 0,
+ 0, 204, 238, 222, 249, 264, 267, 313, 378, 386,
+ 415, 420, 286, 261, 236, 444, 233, 469, 493, 494,
+ 495, 497, 382, 256, 419, 383, 0, 363, 503, 504,
+ 305, 502, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 402, 0, 1687, 0, 0, 0, 0,
+ 0, 0, 260, 0, 0, 0, 0, 353, 257, 0,
+ 0, 416, 0, 198, 0, 471, 244, 364, 361, 510,
+ 272, 263, 259, 242, 306, 372, 414, 492, 408, 0,
+ 357, 0, 0, 481, 387, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 312, 240, 314, 197, 399, 482, 276, 0, 0, 0,
+ 0, 0, 638, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 230, 0, 0, 237, 0, 0, 0, 338,
+ 347, 346, 327, 328, 330, 332, 337, 344, 350, 0,
+ 0, 0, 0, 0, 255, 310, 262, 254, 507, 0,
+ 0, 0, 0, 0, 0, 0, 221, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 265, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 287, 0, 388, 247, 0, 438, 0, 0,
+ 0, 549, 0, 0, 0, 0, 0, 0, 0, 352,
+ 0, 319, 193, 217, 0, 0, 398, 446, 458, 0,
+ 0, 0, 245, 0, 456, 412, 527, 225, 274, 443,
+ 418, 454, 426, 277, 0, 0, 455, 359, 512, 436,
+ 524, 550, 551, 253, 392, 536, 496, 544, 568, 218,
+ 250, 406, 489, 530, 478, 384, 508, 509, 318, 477,
+ 285, 196, 356, 556, 216, 464, 358, 234, 223, 514,
+ 533, 279, 441, 563, 205, 491, 522, 231, 468, 0,
+ 0, 570, 239, 488, 207, 519, 487, 380, 315, 316,
+ 206, 0, 442, 258, 283, 0, 0, 248, 401, 516,
+ 517, 246, 571, 220, 543, 212, 0, 542, 394, 511,
+ 520, 381, 370, 211, 518, 379, 369, 323, 342, 343,
+ 270, 296, 433, 362, 434, 295, 297, 390, 389, 391,
+ 200, 531, 0, 201, 0, 483, 532, 572, 226, 227,
+ 229, 0, 269, 273, 281, 284, 292, 293, 302, 354,
+ 405, 432, 428, 437, 0, 506, 525, 537, 548, 554,
+ 555, 557, 558, 559, 560, 561, 564, 562, 393, 300,
+ 479, 322, 360, 0, 0, 411, 457, 232, 529, 480,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 573, 574, 575, 576, 577, 578, 579, 580, 581,
+ 582, 583, 584, 585, 586, 587, 588, 589, 590, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 591, 371,
+ 470, 526, 324, 336, 339, 329, 348, 0, 349, 325,
+ 326, 331, 333, 334, 335, 340, 341, 345, 351, 241,
+ 203, 377, 385, 505, 301, 208, 209, 210, 498, 499,
+ 500, 501, 540, 541, 545, 447, 448, 449, 450, 282,
+ 535, 298, 453, 452, 320, 321, 366, 435, 0, 192,
+ 213, 355, 0, 439, 278, 569, 539, 534, 199, 215,
+ 0, 252, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 194, 195, 202, 214, 224, 228, 235,
+ 251, 266, 268, 275, 288, 299, 307, 308, 311, 317,
+ 367, 373, 374, 375, 376, 395, 396, 397, 400, 403,
+ 404, 407, 409, 410, 413, 417, 421, 422, 423, 425,
+ 427, 429, 440, 445, 459, 460, 461, 462, 463, 466,
+ 467, 472, 473, 474, 475, 476, 484, 485, 490, 513,
+ 515, 528, 546, 552, 465, 290, 291, 430, 431, 303,
+ 304, 566, 567, 289, 523, 553, 521, 565, 547, 424,
+ 365, 0, 0, 368, 271, 294, 309, 0, 538, 486,
+ 219, 451, 280, 243, 0, 0, 204, 238, 222, 249,
+ 264, 267, 313, 378, 386, 415, 420, 286, 261, 236,
+ 444, 233, 469, 493, 494, 495, 497, 382, 256, 419,
+ 383, 0, 363, 503, 504, 305, 502, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 402, 0,
+ 1685, 0, 0, 0, 0, 0, 0, 260, 0, 0,
+ 0, 0, 353, 257, 0, 0, 416, 0, 198, 0,
+ 471, 244, 364, 361, 510, 272, 263, 259, 242, 306,
+ 372, 414, 492, 408, 0, 357, 0, 0, 481, 387,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 312, 240, 314, 197, 399,
+ 482, 276, 0, 0, 0, 0, 0, 638, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 230, 0, 0,
+ 237, 0, 0, 0, 338, 347, 346, 327, 328, 330,
+ 332, 337, 344, 350, 0, 0, 0, 0, 0, 255,
+ 310, 262, 254, 507, 0, 0, 0, 0, 0, 0,
+ 0, 221, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 265, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 287, 0, 388,
+ 247, 0, 438, 0, 0, 0, 549, 0, 0, 0,
+ 0, 0, 0, 0, 352, 0, 319, 193, 217, 0,
+ 0, 398, 446, 458, 0, 0, 0, 245, 0, 456,
+ 412, 527, 225, 274, 443, 418, 454, 426, 277, 0,
+ 0, 455, 359, 512, 436, 524, 550, 551, 253, 392,
+ 536, 496, 544, 568, 218, 250, 406, 489, 530, 478,
+ 384, 508, 509, 318, 477, 285, 196, 356, 556, 216,
+ 464, 358, 234, 223, 514, 533, 279, 441, 563, 205,
+ 491, 522, 231, 468, 0, 0, 570, 239, 488, 207,
+ 519, 487, 380, 315, 316, 206, 0, 442, 258, 283,
+ 0, 0, 248, 401, 516, 517, 246, 571, 220, 543,
+ 212, 0, 542, 394, 511, 520, 381, 370, 211, 518,
+ 379, 369, 323, 342, 343, 270, 296, 433, 362, 434,
+ 295, 297, 390, 389, 391, 200, 531, 0, 201, 0,
+ 483, 532, 572, 226, 227, 229, 0, 269, 273, 281,
+ 284, 292, 293, 302, 354, 405, 432, 428, 437, 0,
+ 506, 525, 537, 548, 554, 555, 557, 558, 559, 560,
+ 561, 564, 562, 393, 300, 479, 322, 360, 0, 0,
+ 411, 457, 232, 529, 480, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 573, 574, 575, 576,
+ 577, 578, 579, 580, 581, 582, 583, 584, 585, 586,
+ 587, 588, 589, 590, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 591, 371, 470, 526, 324, 336, 339,
+ 329, 348, 0, 349, 325, 326, 331, 333, 334, 335,
+ 340, 341, 345, 351, 241, 203, 377, 385, 505, 301,
+ 208, 209, 210, 498, 499, 500, 501, 540, 541, 545,
+ 447, 448, 449, 450, 282, 535, 298, 453, 452, 320,
+ 321, 366, 435, 0, 192, 213, 355, 0, 439, 278,
+ 569, 539, 534, 199, 215, 0, 252, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 194, 195,
+ 202, 214, 224, 228, 235, 251, 266, 268, 275, 288,
+ 299, 307, 308, 311, 317, 367, 373, 374, 375, 376,
+ 395, 396, 397, 400, 403, 404, 407, 409, 410, 413,
+ 417, 421, 422, 423, 425, 427, 429, 440, 445, 459,
+ 460, 461, 462, 463, 466, 467, 472, 473, 474, 475,
+ 476, 484, 485, 490, 513, 515, 528, 546, 552, 465,
+ 290, 291, 430, 431, 303, 304, 566, 567, 289, 523,
+ 553, 521, 565, 547, 424, 365, 0, 0, 368, 271,
+ 294, 309, 0, 538, 486, 219, 451, 280, 243, 0,
+ 0, 204, 238, 222, 249, 264, 267, 313, 378, 386,
+ 415, 420, 286, 261, 236, 444, 233, 469, 493, 494,
+ 495, 497, 382, 256, 419, 383, 0, 363, 503, 504,
+ 305, 502, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 402, 0, 1681, 0, 0, 0, 0,
+ 0, 0, 260, 0, 0, 0, 0, 353, 257, 0,
+ 0, 416, 0, 198, 0, 471, 244, 364, 361, 510,
+ 272, 263, 259, 242, 306, 372, 414, 492, 408, 0,
+ 357, 0, 0, 481, 387, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 312, 240, 314, 197, 399, 482, 276, 0, 0, 0,
+ 0, 0, 638, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 230, 0, 0, 237, 0, 0, 0, 338,
+ 347, 346, 327, 328, 330, 332, 337, 344, 350, 0,
+ 0, 0, 0, 0, 255, 310, 262, 254, 507, 0,
+ 0, 0, 0, 0, 0, 0, 221, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 265, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 287, 0, 388, 247, 0, 438, 0, 0,
+ 0, 549, 0, 0, 0, 0, 0, 0, 0, 352,
+ 0, 319, 193, 217, 0, 0, 398, 446, 458, 0,
+ 0, 0, 245, 0, 456, 412, 527, 225, 274, 443,
+ 418, 454, 426, 277, 0, 0, 455, 359, 512, 436,
+ 524, 550, 551, 253, 392, 536, 496, 544, 568, 218,
+ 250, 406, 489, 530, 478, 384, 508, 509, 318, 477,
+ 285, 196, 356, 556, 216, 464, 358, 234, 223, 514,
+ 533, 279, 441, 563, 205, 491, 522, 231, 468, 0,
+ 0, 570, 239, 488, 207, 519, 487, 380, 315, 316,
+ 206, 0, 442, 258, 283, 0, 0, 248, 401, 516,
+ 517, 246, 571, 220, 543, 212, 0, 542, 394, 511,
+ 520, 381, 370, 211, 518, 379, 369, 323, 342, 343,
+ 270, 296, 433, 362, 434, 295, 297, 390, 389, 391,
+ 200, 531, 0, 201, 0, 483, 532, 572, 226, 227,
+ 229, 0, 269, 273, 281, 284, 292, 293, 302, 354,
+ 405, 432, 428, 437, 0, 506, 525, 537, 548, 554,
+ 555, 557, 558, 559, 560, 561, 564, 562, 393, 300,
+ 479, 322, 360, 0, 0, 411, 457, 232, 529, 480,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 573, 574, 575, 576, 577, 578, 579, 580, 581,
+ 582, 583, 584, 585, 586, 587, 588, 589, 590, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 591, 371,
+ 470, 526, 324, 336, 339, 329, 348, 0, 349, 325,
+ 326, 331, 333, 334, 335, 340, 341, 345, 351, 241,
+ 203, 377, 385, 505, 301, 208, 209, 210, 498, 499,
+ 500, 501, 540, 541, 545, 447, 448, 449, 450, 282,
+ 535, 298, 453, 452, 320, 321, 366, 435, 0, 192,
+ 213, 355, 0, 439, 278, 569, 539, 534, 199, 215,
+ 0, 252, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 194, 195, 202, 214, 224, 228, 235,
+ 251, 266, 268, 275, 288, 299, 307, 308, 311, 317,
+ 367, 373, 374, 375, 376, 395, 396, 397, 400, 403,
+ 404, 407, 409, 410, 413, 417, 421, 422, 423, 425,
+ 427, 429, 440, 445, 459, 460, 461, 462, 463, 466,
+ 467, 472, 473, 474, 475, 476, 484, 485, 490, 513,
+ 515, 528, 546, 552, 465, 290, 291, 430, 431, 303,
+ 304, 566, 567, 289, 523, 553, 521, 565, 547, 424,
+ 365, 0, 0, 368, 271, 294, 309, 0, 538, 486,
+ 219, 451, 280, 243, 0, 0, 204, 238, 222, 249,
+ 264, 267, 313, 378, 386, 415, 420, 286, 261, 236,
+ 444, 233, 469, 493, 494, 495, 497, 382, 256, 419,
+ 383, 0, 363, 503, 504, 305, 502, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 402, 0,
+ 1679, 0, 0, 0, 0, 0, 0, 260, 0, 0,
+ 0, 0, 353, 257, 0, 0, 416, 0, 198, 0,
+ 471, 244, 364, 361, 510, 272, 263, 259, 242, 306,
+ 372, 414, 492, 408, 0, 357, 0, 0, 481, 387,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 312, 240, 314, 197, 399,
+ 482, 276, 0, 0, 0, 0, 0, 638, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 230, 0, 0,
+ 237, 0, 0, 0, 338, 347, 346, 327, 328, 330,
+ 332, 337, 344, 350, 0, 0, 0, 0, 0, 255,
+ 310, 262, 254, 507, 0, 0, 0, 0, 0, 0,
+ 0, 221, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 265, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 287, 0, 388,
+ 247, 0, 438, 0, 0, 0, 549, 0, 0, 0,
+ 0, 0, 0, 0, 352, 0, 319, 193, 217, 0,
+ 0, 398, 446, 458, 0, 0, 0, 245, 0, 456,
+ 412, 527, 225, 274, 443, 418, 454, 426, 277, 0,
+ 0, 455, 359, 512, 436, 524, 550, 551, 253, 392,
+ 536, 496, 544, 568, 218, 250, 406, 489, 530, 478,
+ 384, 508, 509, 318, 477, 285, 196, 356, 556, 216,
+ 464, 358, 234, 223, 514, 533, 279, 441, 563, 205,
+ 491, 522, 231, 468, 0, 0, 570, 239, 488, 207,
+ 519, 487, 380, 315, 316, 206, 0, 442, 258, 283,
+ 0, 0, 248, 401, 516, 517, 246, 571, 220, 543,
+ 212, 0, 542, 394, 511, 520, 381, 370, 211, 518,
+ 379, 369, 323, 342, 343, 270, 296, 433, 362, 434,
+ 295, 297, 390, 389, 391, 200, 531, 0, 201, 0,
+ 483, 532, 572, 226, 227, 229, 0, 269, 273, 281,
+ 284, 292, 293, 302, 354, 405, 432, 428, 437, 0,
+ 506, 525, 537, 548, 554, 555, 557, 558, 559, 560,
+ 561, 564, 562, 393, 300, 479, 322, 360, 0, 0,
+ 411, 457, 232, 529, 480, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 573, 574, 575, 576,
+ 577, 578, 579, 580, 581, 582, 583, 584, 585, 586,
+ 587, 588, 589, 590, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 591, 371, 470, 526, 324, 336, 339,
+ 329, 348, 0, 349, 325, 326, 331, 333, 334, 335,
+ 340, 341, 345, 351, 241, 203, 377, 385, 505, 301,
+ 208, 209, 210, 498, 499, 500, 501, 540, 541, 545,
+ 447, 448, 449, 450, 282, 535, 298, 453, 452, 320,
+ 321, 366, 435, 0, 192, 213, 355, 0, 439, 278,
+ 569, 539, 534, 199, 215, 0, 252, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 194, 195,
+ 202, 214, 224, 228, 235, 251, 266, 268, 275, 288,
+ 299, 307, 308, 311, 317, 367, 373, 374, 375, 376,
+ 395, 396, 397, 400, 403, 404, 407, 409, 410, 413,
+ 417, 421, 422, 423, 425, 427, 429, 440, 445, 459,
+ 460, 461, 462, 463, 466, 467, 472, 473, 474, 475,
+ 476, 484, 485, 490, 513, 515, 528, 546, 552, 465,
+ 290, 291, 430, 431, 303, 304, 566, 567, 289, 523,
+ 553, 521, 565, 547, 424, 365, 0, 0, 368, 271,
+ 294, 309, 0, 538, 486, 219, 451, 280, 243, 0,
+ 0, 204, 238, 222, 249, 264, 267, 313, 378, 386,
+ 415, 420, 286, 261, 236, 444, 233, 469, 493, 494,
+ 495, 497, 382, 256, 419, 383, 0, 363, 503, 504,
+ 305, 502, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 402, 0, 1677, 0, 0, 0, 0,
+ 0, 0, 260, 0, 0, 0, 0, 353, 257, 0,
+ 0, 416, 0, 198, 0, 471, 244, 364, 361, 510,
+ 272, 263, 259, 242, 306, 372, 414, 492, 408, 0,
+ 357, 0, 0, 481, 387, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 312, 240, 314, 197, 399, 482, 276, 0, 0, 0,
+ 0, 0, 638, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 230, 0, 0, 237, 0, 0, 0, 338,
+ 347, 346, 327, 328, 330, 332, 337, 344, 350, 0,
+ 0, 0, 0, 0, 255, 310, 262, 254, 507, 0,
+ 0, 0, 0, 0, 0, 0, 221, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 265, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 287, 0, 388, 247, 0, 438, 0, 0,
+ 0, 549, 0, 0, 0, 0, 0, 0, 0, 352,
+ 0, 319, 193, 217, 0, 0, 398, 446, 458, 0,
+ 0, 0, 245, 0, 456, 412, 527, 225, 274, 443,
+ 418, 454, 426, 277, 0, 0, 455, 359, 512, 436,
+ 524, 550, 551, 253, 392, 536, 496, 544, 568, 218,
+ 250, 406, 489, 530, 478, 384, 508, 509, 318, 477,
+ 285, 196, 356, 556, 216, 464, 358, 234, 223, 514,
+ 533, 279, 441, 563, 205, 491, 522, 231, 468, 0,
+ 0, 570, 239, 488, 207, 519, 487, 380, 315, 316,
+ 206, 0, 442, 258, 283, 0, 0, 248, 401, 516,
+ 517, 246, 571, 220, 543, 212, 0, 542, 394, 511,
+ 520, 381, 370, 211, 518, 379, 369, 323, 342, 343,
+ 270, 296, 433, 362, 434, 295, 297, 390, 389, 391,
+ 200, 531, 0, 201, 0, 483, 532, 572, 226, 227,
+ 229, 0, 269, 273, 281, 284, 292, 293, 302, 354,
+ 405, 432, 428, 437, 0, 506, 525, 537, 548, 554,
+ 555, 557, 558, 559, 560, 561, 564, 562, 393, 300,
+ 479, 322, 360, 0, 0, 411, 457, 232, 529, 480,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 573, 574, 575, 576, 577, 578, 579, 580, 581,
+ 582, 583, 584, 585, 586, 587, 588, 589, 590, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 591, 371,
+ 470, 526, 324, 336, 339, 329, 348, 0, 349, 325,
+ 326, 331, 333, 334, 335, 340, 341, 345, 351, 241,
+ 203, 377, 385, 505, 301, 208, 209, 210, 498, 499,
+ 500, 501, 540, 541, 545, 447, 448, 449, 450, 282,
+ 535, 298, 453, 452, 320, 321, 366, 435, 0, 192,
+ 213, 355, 0, 439, 278, 569, 539, 534, 199, 215,
+ 0, 252, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 194, 195, 202, 214, 224, 228, 235,
+ 251, 266, 268, 275, 288, 299, 307, 308, 311, 317,
+ 367, 373, 374, 375, 376, 395, 396, 397, 400, 403,
+ 404, 407, 409, 410, 413, 417, 421, 422, 423, 425,
+ 427, 429, 440, 445, 459, 460, 461, 462, 463, 466,
+ 467, 472, 473, 474, 475, 476, 484, 485, 490, 513,
+ 515, 528, 546, 552, 465, 290, 291, 430, 431, 303,
+ 304, 566, 567, 289, 523, 553, 521, 565, 547, 424,
+ 365, 0, 0, 368, 271, 294, 309, 0, 538, 486,
+ 219, 451, 280, 243, 0, 0, 204, 238, 222, 249,
+ 264, 267, 313, 378, 386, 415, 420, 286, 261, 236,
+ 444, 233, 469, 493, 494, 495, 497, 382, 256, 419,
+ 383, 0, 363, 503, 504, 305, 502, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 402, 0,
+ 0, 0, 0, 0, 0, 0, 0, 260, 0, 0,
+ 0, 0, 353, 257, 0, 0, 416, 0, 198, 0,
+ 471, 244, 364, 361, 510, 272, 263, 259, 242, 306,
+ 372, 414, 492, 408, 0, 357, 0, 0, 481, 387,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 312, 240, 314, 197, 399,
+ 482, 276, 0, 1652, 0, 0, 0, 638, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 230, 0, 0,
+ 237, 0, 0, 0, 338, 347, 346, 327, 328, 330,
+ 332, 337, 344, 350, 0, 0, 0, 0, 0, 255,
+ 310, 262, 254, 507, 0, 0, 0, 0, 0, 0,
+ 0, 221, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 265, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 287, 0, 388,
+ 247, 0, 438, 0, 0, 0, 549, 0, 0, 0,
+ 0, 0, 0, 0, 352, 0, 319, 193, 217, 0,
+ 0, 398, 446, 458, 0, 0, 0, 245, 0, 456,
+ 412, 527, 225, 274, 443, 418, 454, 426, 277, 0,
+ 0, 455, 359, 512, 436, 524, 550, 551, 253, 392,
+ 536, 496, 544, 568, 218, 250, 406, 489, 530, 478,
+ 384, 508, 509, 318, 477, 285, 196, 356, 556, 216,
+ 464, 358, 234, 223, 514, 533, 279, 441, 563, 205,
+ 491, 522, 231, 468, 0, 0, 570, 239, 488, 207,
+ 519, 487, 380, 315, 316, 206, 0, 442, 258, 283,
+ 0, 0, 248, 401, 516, 517, 246, 571, 220, 543,
+ 212, 0, 542, 394, 511, 520, 381, 370, 211, 518,
+ 379, 369, 323, 342, 343, 270, 296, 433, 362, 434,
+ 295, 297, 390, 389, 391, 200, 531, 0, 201, 0,
+ 483, 532, 572, 226, 227, 229, 0, 269, 273, 281,
+ 284, 292, 293, 302, 354, 405, 432, 428, 437, 0,
+ 506, 525, 537, 548, 554, 555, 557, 558, 559, 560,
+ 561, 564, 562, 393, 300, 479, 322, 360, 0, 0,
+ 411, 457, 232, 529, 480, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 573, 574, 575, 576,
+ 577, 578, 579, 580, 581, 582, 583, 584, 585, 586,
+ 587, 588, 589, 590, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 591, 371, 470, 526, 324, 336, 339,
+ 329, 348, 0, 349, 325, 326, 331, 333, 334, 335,
+ 340, 341, 345, 351, 241, 203, 377, 385, 505, 301,
+ 208, 209, 210, 498, 499, 500, 501, 540, 541, 545,
+ 447, 448, 449, 450, 282, 535, 298, 453, 452, 320,
+ 321, 366, 435, 0, 192, 213, 355, 0, 439, 278,
+ 569, 539, 534, 199, 215, 0, 252, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 194, 195,
+ 202, 214, 224, 228, 235, 251, 266, 268, 275, 288,
+ 299, 307, 308, 311, 317, 367, 373, 374, 375, 376,
+ 395, 396, 397, 400, 403, 404, 407, 409, 410, 413,
+ 417, 421, 422, 423, 425, 427, 429, 440, 445, 459,
+ 460, 461, 462, 463, 466, 467, 472, 473, 474, 475,
+ 476, 484, 485, 490, 513, 515, 528, 546, 552, 465,
+ 290, 291, 430, 431, 303, 304, 566, 567, 289, 523,
+ 553, 521, 565, 547, 424, 365, 0, 0, 368, 271,
+ 294, 309, 0, 538, 486, 219, 451, 280, 243, 0,
+ 0, 204, 238, 222, 249, 264, 267, 313, 378, 386,
+ 415, 420, 286, 261, 236, 444, 233, 469, 493, 494,
+ 495, 497, 382, 256, 419, 383, 0, 363, 503, 504,
+ 305, 502, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 402, 0, 0, 0, 0, 0, 0,
+ 0, 1551, 260, 0, 0, 0, 0, 353, 257, 0,
+ 0, 416, 0, 198, 0, 471, 244, 364, 361, 510,
+ 272, 263, 259, 242, 306, 372, 414, 492, 408, 0,
+ 357, 0, 0, 481, 387, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 312, 240, 314, 197, 399, 482, 276, 0, 0, 0,
+ 0, 0, 190, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 230, 0, 0, 237, 0, 0, 0, 338,
+ 347, 346, 327, 328, 330, 332, 337, 344, 350, 0,
+ 0, 0, 0, 0, 255, 310, 262, 254, 507, 0,
+ 0, 0, 0, 0, 0, 0, 221, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 265, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 287, 0, 388, 247, 0, 438, 0, 0,
+ 0, 549, 0, 0, 0, 0, 0, 0, 0, 352,
+ 0, 319, 193, 217, 0, 0, 398, 446, 458, 0,
+ 0, 0, 245, 0, 456, 412, 527, 225, 274, 443,
+ 418, 454, 426, 277, 0, 0, 455, 359, 512, 436,
+ 524, 550, 551, 253, 392, 536, 496, 544, 568, 218,
+ 250, 406, 489, 530, 478, 384, 508, 509, 318, 477,
+ 285, 196, 356, 556, 216, 464, 358, 234, 223, 514,
+ 533, 279, 441, 563, 205, 491, 522, 231, 468, 0,
+ 0, 570, 239, 488, 207, 519, 487, 380, 315, 316,
+ 206, 0, 442, 258, 283, 0, 0, 248, 401, 516,
+ 517, 246, 571, 220, 543, 212, 0, 542, 394, 511,
+ 520, 381, 370, 211, 518, 379, 369, 323, 342, 343,
+ 270, 296, 433, 362, 434, 295, 297, 390, 389, 391,
+ 200, 531, 0, 201, 0, 483, 532, 572, 226, 227,
+ 229, 0, 269, 273, 281, 284, 292, 293, 302, 354,
+ 405, 432, 428, 437, 0, 506, 525, 537, 548, 554,
+ 555, 557, 558, 559, 560, 561, 564, 562, 393, 300,
+ 479, 322, 360, 0, 0, 411, 457, 232, 529, 480,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 573, 574, 575, 576, 577, 578, 579, 580, 581,
+ 582, 583, 584, 585, 586, 587, 588, 589, 590, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 591, 371,
+ 470, 526, 324, 336, 339, 329, 348, 0, 349, 325,
+ 326, 331, 333, 334, 335, 340, 341, 345, 351, 241,
+ 203, 377, 385, 505, 301, 208, 209, 210, 498, 499,
+ 500, 501, 540, 541, 545, 447, 448, 449, 450, 282,
+ 535, 298, 453, 452, 320, 321, 366, 435, 0, 192,
+ 213, 355, 0, 439, 278, 569, 539, 534, 199, 215,
+ 0, 252, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 194, 195, 202, 214, 224, 228, 235,
+ 251, 266, 268, 275, 288, 299, 307, 308, 311, 317,
+ 367, 373, 374, 375, 376, 395, 396, 397, 400, 403,
+ 404, 407, 409, 410, 413, 417, 421, 422, 423, 425,
+ 427, 429, 440, 445, 459, 460, 461, 462, 463, 466,
+ 467, 472, 473, 474, 475, 476, 484, 485, 490, 513,
+ 515, 528, 546, 552, 465, 290, 291, 430, 431, 303,
+ 304, 566, 567, 289, 523, 553, 521, 565, 547, 424,
+ 365, 0, 0, 368, 271, 294, 309, 0, 538, 486,
+ 219, 451, 280, 243, 0, 0, 204, 238, 222, 249,
+ 264, 267, 313, 378, 386, 415, 420, 286, 261, 236,
+ 444, 233, 469, 493, 494, 495, 497, 382, 256, 419,
+ 383, 0, 363, 503, 504, 305, 502, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 402, 0,
+ 0, 0, 0, 0, 0, 0, 0, 260, 0, 0,
+ 0, 0, 353, 257, 0, 0, 416, 0, 198, 0,
+ 471, 244, 364, 361, 510, 272, 263, 259, 242, 306,
+ 372, 414, 492, 408, 0, 357, 0, 0, 481, 387,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 312, 240, 314, 197, 399,
+ 482, 276, 0, 91, 0, 0, 0, 812, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 230, 0, 0,
+ 237, 0, 0, 0, 338, 347, 346, 327, 328, 330,
+ 332, 337, 344, 350, 0, 0, 0, 0, 0, 255,
+ 310, 262, 254, 507, 0, 0, 0, 0, 0, 0,
+ 0, 221, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 265, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 287, 0, 388,
+ 247, 0, 438, 0, 0, 0, 549, 0, 0, 0,
+ 0, 0, 0, 0, 352, 0, 319, 193, 217, 0,
+ 0, 398, 446, 458, 0, 0, 0, 245, 0, 456,
+ 412, 527, 225, 274, 443, 418, 454, 426, 277, 0,
+ 0, 455, 359, 512, 436, 524, 550, 551, 253, 392,
+ 536, 496, 544, 568, 218, 250, 406, 489, 530, 478,
+ 384, 508, 509, 318, 477, 285, 196, 356, 556, 216,
+ 464, 358, 234, 223, 514, 533, 279, 441, 563, 205,
+ 491, 522, 231, 468, 0, 0, 570, 239, 488, 207,
+ 519, 487, 380, 315, 316, 206, 0, 442, 258, 283,
+ 0, 0, 248, 401, 516, 517, 246, 571, 220, 543,
+ 212, 0, 542, 394, 511, 520, 381, 370, 211, 518,
+ 379, 369, 323, 342, 343, 270, 296, 433, 362, 434,
+ 295, 297, 390, 389, 391, 200, 531, 0, 201, 0,
+ 483, 532, 572, 226, 227, 229, 0, 269, 273, 281,
+ 284, 292, 293, 302, 354, 405, 432, 428, 437, 0,
+ 506, 525, 537, 548, 554, 555, 557, 558, 559, 560,
+ 561, 564, 562, 393, 300, 479, 322, 360, 0, 0,
+ 411, 457, 232, 529, 480, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 573, 574, 575, 576,
+ 577, 578, 579, 580, 581, 582, 583, 584, 585, 586,
+ 587, 588, 589, 590, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 591, 371, 470, 526, 324, 336, 339,
+ 329, 348, 0, 349, 325, 326, 331, 333, 334, 335,
+ 340, 341, 345, 351, 241, 203, 377, 385, 505, 301,
+ 208, 209, 210, 498, 499, 500, 501, 540, 541, 545,
+ 447, 448, 449, 450, 282, 535, 298, 453, 452, 320,
+ 321, 366, 435, 0, 192, 213, 355, 0, 439, 278,
+ 569, 539, 534, 199, 215, 0, 252, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 194, 195,
+ 202, 214, 224, 228, 235, 251, 266, 268, 275, 288,
+ 299, 307, 308, 311, 317, 367, 373, 374, 375, 376,
+ 395, 396, 397, 400, 403, 404, 407, 409, 410, 413,
+ 417, 421, 422, 423, 425, 427, 429, 440, 445, 459,
+ 460, 461, 462, 463, 466, 467, 472, 473, 474, 475,
+ 476, 484, 485, 490, 513, 515, 528, 546, 552, 465,
+ 290, 291, 430, 431, 303, 304, 566, 567, 289, 523,
+ 553, 521, 565, 547, 424, 365, 0, 0, 368, 271,
+ 294, 309, 0, 538, 486, 219, 451, 280, 243, 0,
+ 0, 204, 238, 222, 249, 264, 267, 313, 378, 386,
+ 415, 420, 286, 261, 236, 444, 233, 469, 493, 494,
+ 495, 497, 382, 256, 419, 383, 0, 363, 503, 504,
+ 305, 502, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 402, 0, 0, 0, 0, 0, 0,
+ 0, 0, 260, 0, 0, 0, 0, 353, 257, 0,
+ 0, 416, 0, 198, 0, 471, 244, 364, 361, 510,
+ 272, 263, 259, 242, 306, 372, 414, 492, 408, 0,
+ 357, 0, 0, 481, 387, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 312, 240, 314, 197, 399, 482, 276, 0, 0, 0,
+ 0, 0, 190, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 230, 0, 0, 237, 0, 0, 0, 338,
+ 347, 346, 327, 328, 330, 332, 337, 344, 350, 0,
+ 0, 0, 0, 0, 255, 310, 262, 254, 507, 0,
+ 0, 0, 0, 0, 0, 0, 221, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 265, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 1296, 0, 287, 0, 388, 247, 0, 438, 0, 0,
+ 0, 549, 0, 0, 0, 0, 0, 0, 0, 352,
+ 0, 319, 193, 217, 0, 0, 398, 446, 458, 0,
+ 0, 0, 245, 0, 456, 412, 527, 225, 274, 443,
+ 418, 454, 426, 277, 0, 0, 455, 359, 512, 436,
+ 524, 550, 551, 253, 392, 536, 496, 544, 568, 218,
+ 250, 406, 489, 530, 478, 384, 508, 509, 318, 477,
+ 285, 196, 356, 556, 216, 464, 358, 234, 223, 514,
+ 533, 279, 441, 563, 205, 491, 522, 231, 468, 0,
+ 0, 570, 239, 488, 207, 519, 487, 380, 315, 316,
+ 206, 0, 442, 258, 283, 0, 0, 248, 401, 516,
+ 517, 246, 571, 220, 543, 212, 0, 542, 394, 511,
+ 520, 381, 370, 211, 518, 379, 369, 323, 342, 343,
+ 270, 296, 433, 362, 434, 295, 297, 390, 389, 391,
+ 200, 531, 0, 201, 0, 483, 532, 572, 226, 227,
+ 229, 0, 269, 273, 281, 284, 292, 293, 302, 354,
+ 405, 432, 428, 437, 0, 506, 525, 537, 548, 554,
+ 555, 557, 558, 559, 560, 561, 564, 562, 393, 300,
+ 479, 322, 360, 0, 0, 411, 457, 232, 529, 480,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 573, 574, 575, 576, 577, 578, 579, 580, 581,
+ 582, 583, 584, 585, 586, 587, 588, 589, 590, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 591, 371,
+ 470, 526, 324, 336, 339, 329, 348, 0, 349, 325,
+ 326, 331, 333, 334, 335, 340, 341, 345, 351, 241,
+ 203, 377, 385, 505, 301, 208, 209, 210, 498, 499,
+ 500, 501, 540, 541, 545, 447, 448, 449, 450, 282,
+ 535, 298, 453, 452, 320, 321, 366, 435, 0, 192,
+ 213, 355, 0, 439, 278, 569, 539, 534, 199, 215,
+ 0, 252, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 194, 195, 202, 214, 224, 228, 235,
+ 251, 266, 268, 275, 288, 299, 307, 308, 311, 317,
+ 367, 373, 374, 375, 376, 395, 396, 397, 400, 403,
+ 404, 407, 409, 410, 413, 417, 421, 422, 423, 425,
+ 427, 429, 440, 445, 459, 460, 461, 462, 463, 466,
+ 467, 472, 473, 474, 475, 476, 484, 485, 490, 513,
+ 515, 528, 546, 552, 465, 290, 291, 430, 431, 303,
+ 304, 566, 567, 1295, 523, 553, 521, 565, 547, 424,
+ 365, 0, 0, 368, 271, 294, 309, 0, 538, 486,
+ 219, 451, 280, 243, 0, 0, 204, 238, 222, 249,
+ 264, 267, 313, 378, 386, 415, 420, 286, 261, 236,
+ 444, 233, 469, 493, 494, 495, 497, 382, 256, 419,
+ 383, 0, 363, 503, 504, 305, 502, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 402, 0,
+ 0, 0, 0, 0, 0, 0, 0, 260, 0, 0,
+ 0, 0, 353, 257, 0, 0, 416, 0, 198, 0,
+ 471, 244, 364, 361, 510, 272, 263, 259, 242, 306,
+ 372, 414, 492, 408, 0, 357, 0, 0, 481, 387,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 312, 240, 314, 197, 399,
+ 482, 276, 0, 0, 0, 0, 0, 190, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 230, 0, 0,
+ 237, 0, 0, 0, 338, 347, 346, 327, 328, 330,
+ 332, 337, 344, 350, 0, 0, 0, 0, 0, 255,
+ 310, 262, 254, 507, 0, 0, 0, 0, 0, 0,
+ 0, 221, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 265, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 287, 0, 388,
+ 247, 0, 438, 0, 0, 0, 549, 0, 0, 0,
+ 0, 0, 0, 0, 352, 0, 319, 193, 217, 0,
+ 0, 398, 446, 458, 0, 0, 0, 245, 0, 456,
+ 412, 527, 225, 274, 443, 418, 454, 426, 277, 0,
+ 0, 455, 359, 512, 436, 524, 550, 551, 253, 392,
+ 536, 496, 544, 568, 218, 250, 406, 489, 530, 478,
+ 384, 508, 509, 318, 477, 285, 196, 356, 556, 216,
+ 464, 358, 234, 223, 514, 533, 279, 441, 563, 205,
+ 491, 522, 231, 468, 0, 0, 570, 239, 488, 207,
+ 519, 487, 380, 315, 316, 206, 0, 442, 258, 283,
+ 0, 0, 248, 401, 516, 517, 246, 571, 220, 543,
+ 212, 0, 542, 394, 511, 520, 381, 370, 211, 518,
+ 379, 369, 323, 342, 343, 270, 296, 433, 362, 434,
+ 295, 297, 390, 389, 391, 200, 531, 0, 201, 0,
+ 483, 532, 572, 226, 227, 229, 0, 269, 273, 281,
+ 284, 292, 293, 302, 354, 405, 432, 428, 437, 0,
+ 506, 525, 537, 548, 554, 555, 557, 558, 559, 560,
+ 561, 564, 562, 393, 300, 479, 322, 360, 0, 0,
+ 411, 457, 232, 529, 480, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 573, 574, 575, 576,
+ 577, 578, 579, 580, 581, 582, 583, 584, 585, 586,
+ 587, 588, 589, 590, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 591, 371, 470, 526, 324, 336, 339,
+ 329, 348, 0, 349, 325, 326, 331, 333, 334, 335,
+ 340, 341, 345, 351, 241, 203, 377, 385, 505, 301,
+ 208, 209, 210, 498, 499, 500, 501, 540, 541, 545,
+ 447, 448, 449, 450, 282, 535, 298, 453, 452, 320,
+ 321, 366, 435, 0, 192, 213, 355, 0, 439, 278,
+ 569, 539, 534, 199, 215, 0, 252, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 898, 0, 0, 0, 194, 195,
+ 202, 214, 224, 228, 235, 251, 266, 268, 275, 288,
+ 299, 307, 308, 311, 317, 367, 373, 374, 375, 376,
+ 395, 396, 397, 400, 403, 404, 407, 409, 410, 413,
+ 417, 421, 422, 423, 425, 427, 429, 440, 445, 459,
+ 460, 461, 462, 463, 466, 467, 472, 473, 474, 475,
+ 476, 484, 485, 490, 513, 515, 528, 546, 552, 465,
+ 290, 291, 430, 431, 303, 304, 566, 567, 289, 523,
+ 553, 521, 565, 547, 424, 365, 0, 0, 368, 271,
+ 294, 309, 0, 538, 486, 219, 451, 280, 243, 0,
+ 0, 204, 238, 222, 249, 264, 267, 313, 378, 386,
+ 415, 420, 286, 261, 236, 444, 233, 469, 493, 494,
+ 495, 497, 382, 256, 419, 383, 0, 363, 503, 504,
+ 305, 502, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 402, 0, 0, 0, 0, 0, 0,
+ 0, 0, 260, 0, 0, 0, 0, 353, 257, 0,
+ 0, 416, 0, 198, 0, 471, 244, 364, 361, 510,
+ 272, 263, 259, 242, 306, 372, 414, 492, 408, 0,
+ 357, 0, 0, 481, 387, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 312, 240, 314, 197, 399, 482, 276, 0, 0, 0,
+ 0, 0, 190, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 230, 0, 0, 237, 0, 0, 0, 338,
+ 347, 346, 327, 328, 330, 332, 337, 344, 350, 0,
+ 0, 0, 0, 0, 255, 310, 262, 254, 507, 0,
+ 0, 0, 0, 0, 0, 0, 221, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 265, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 287, 0, 388, 247, 0, 438, 0, 594,
+ 0, 549, 0, 0, 0, 0, 0, 0, 0, 352,
+ 0, 319, 193, 217, 0, 0, 398, 446, 458, 0,
+ 0, 0, 245, 0, 456, 412, 527, 225, 274, 443,
+ 418, 454, 426, 277, 0, 0, 455, 359, 512, 436,
+ 524, 550, 551, 253, 392, 536, 496, 544, 568, 218,
+ 250, 406, 489, 530, 478, 384, 508, 509, 318, 477,
+ 285, 196, 356, 556, 216, 464, 358, 234, 223, 514,
+ 533, 279, 441, 563, 205, 491, 522, 231, 468, 0,
+ 0, 570, 239, 488, 207, 519, 487, 380, 315, 316,
+ 206, 0, 442, 258, 283, 0, 0, 248, 401, 516,
+ 517, 246, 571, 220, 543, 212, 0, 542, 394, 511,
+ 520, 381, 370, 211, 518, 379, 369, 323, 342, 343,
+ 270, 296, 433, 362, 434, 295, 297, 390, 389, 391,
+ 200, 531, 0, 201, 0, 483, 532, 572, 226, 227,
+ 229, 0, 269, 273, 281, 284, 292, 293, 302, 354,
+ 405, 432, 428, 437, 0, 506, 525, 537, 548, 554,
+ 555, 557, 558, 559, 560, 561, 564, 562, 393, 300,
+ 479, 322, 360, 0, 0, 411, 457, 232, 529, 480,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 573, 574, 575, 576, 577, 578, 579, 580, 581,
+ 582, 583, 584, 585, 586, 587, 588, 589, 590, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 591, 371,
+ 470, 526, 324, 336, 339, 329, 348, 0, 349, 325,
+ 326, 331, 333, 334, 335, 340, 341, 345, 351, 241,
+ 203, 377, 385, 505, 301, 208, 209, 210, 498, 499,
+ 500, 501, 540, 541, 545, 447, 448, 449, 450, 282,
+ 535, 298, 453, 452, 320, 321, 366, 435, 0, 192,
+ 213, 355, 0, 439, 278, 569, 539, 534, 199, 215,
+ 0, 252, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 194, 195, 202, 214, 224, 228, 235,
+ 251, 266, 268, 275, 288, 299, 307, 308, 311, 317,
+ 367, 373, 374, 375, 376, 395, 396, 397, 400, 403,
+ 404, 407, 409, 410, 413, 417, 421, 422, 423, 425,
+ 427, 429, 440, 445, 459, 460, 461, 462, 463, 466,
+ 467, 472, 473, 474, 475, 476, 484, 485, 490, 513,
+ 515, 528, 546, 552, 465, 290, 291, 430, 431, 303,
+ 304, 566, 567, 289, 523, 553, 521, 565, 547, 424,
+ 365, 0, 0, 368, 271, 294, 309, 0, 538, 486,
+ 219, 451, 280, 243, 0, 0, 204, 238, 222, 249,
+ 264, 267, 313, 378, 386, 415, 420, 286, 261, 236,
+ 444, 233, 469, 493, 494, 495, 497, 382, 256, 419,
+ 383, 0, 363, 503, 504, 305, 502, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 402, 0,
+ 0, 0, 0, 0, 0, 0, 0, 260, 0, 0,
+ 0, 0, 353, 257, 0, 0, 416, 0, 198, 0,
+ 471, 244, 364, 361, 510, 272, 263, 259, 242, 306,
+ 372, 414, 492, 408, 0, 357, 0, 0, 481, 387,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 312, 240, 314, 197, 399,
+ 482, 276, 0, 0, 0, 0, 0, 638, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 230, 0, 0,
+ 237, 0, 0, 0, 338, 347, 346, 327, 328, 330,
+ 332, 337, 344, 350, 0, 0, 0, 0, 0, 255,
+ 310, 262, 254, 507, 0, 0, 0, 0, 0, 0,
+ 0, 221, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 265, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 287, 0, 388,
+ 247, 0, 438, 0, 0, 0, 549, 0, 0, 0,
+ 0, 0, 0, 0, 352, 0, 319, 193, 217, 0,
+ 0, 398, 446, 458, 0, 0, 0, 245, 0, 456,
+ 412, 527, 225, 274, 443, 418, 454, 426, 277, 0,
+ 0, 455, 359, 512, 436, 524, 550, 551, 253, 392,
+ 536, 496, 544, 568, 218, 250, 406, 489, 530, 478,
+ 384, 508, 509, 318, 477, 285, 196, 356, 556, 216,
+ 464, 358, 234, 223, 514, 533, 279, 441, 563, 205,
+ 491, 522, 231, 468, 0, 0, 570, 239, 488, 207,
+ 519, 487, 380, 315, 316, 206, 0, 442, 258, 283,
+ 0, 0, 248, 401, 516, 517, 246, 571, 220, 543,
+ 212, 0, 542, 394, 511, 520, 381, 370, 211, 518,
+ 379, 369, 323, 342, 343, 270, 296, 433, 362, 434,
+ 295, 297, 390, 389, 391, 200, 531, 0, 201, 0,
+ 483, 532, 572, 226, 227, 229, 0, 269, 273, 281,
+ 284, 292, 293, 302, 354, 405, 432, 428, 437, 0,
+ 506, 525, 537, 548, 554, 555, 557, 558, 559, 560,
+ 561, 564, 562, 393, 300, 479, 322, 360, 0, 0,
+ 411, 457, 232, 529, 480, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 573, 574, 575, 576,
+ 577, 578, 579, 580, 581, 582, 583, 584, 585, 586,
+ 587, 588, 589, 590, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 591, 371, 470, 526, 324, 336, 339,
+ 329, 348, 0, 349, 325, 326, 331, 333, 334, 335,
+ 340, 341, 345, 351, 241, 203, 377, 385, 505, 301,
+ 208, 209, 210, 498, 499, 500, 501, 540, 541, 545,
+ 447, 448, 449, 450, 282, 535, 298, 453, 452, 320,
+ 321, 366, 435, 0, 192, 213, 355, 0, 439, 278,
+ 569, 539, 534, 199, 215, 0, 252, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 194, 195,
+ 202, 214, 224, 228, 235, 251, 266, 268, 275, 288,
+ 299, 307, 308, 311, 317, 367, 373, 374, 375, 376,
+ 3471, 396, 397, 400, 403, 404, 407, 409, 410, 413,
+ 417, 421, 422, 423, 425, 427, 429, 440, 445, 459,
+ 460, 461, 462, 463, 466, 467, 472, 473, 474, 475,
+ 476, 484, 485, 490, 513, 515, 528, 546, 552, 465,
+ 290, 291, 430, 431, 303, 304, 566, 567, 289, 523,
+ 553, 521, 565, 547, 424, 365, 0, 0, 368, 271,
+ 294, 309, 0, 538, 486, 219, 451, 280, 243, 0,
+ 0, 204, 238, 222, 249, 264, 267, 313, 378, 386,
+ 415, 420, 286, 261, 236, 444, 233, 469, 493, 494,
+ 495, 497, 382, 256, 419, 383, 0, 363, 503, 504,
+ 305, 502, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 402, 0, 0, 0, 0, 0, 0,
+ 0, 0, 260, 0, 0, 0, 0, 353, 257, 0,
+ 0, 416, 0, 198, 0, 471, 244, 364, 361, 510,
+ 272, 263, 259, 242, 306, 372, 414, 492, 408, 0,
+ 357, 0, 0, 481, 387, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 312, 240, 314, 197, 399, 482, 276, 0, 0, 0,
+ 0, 0, 638, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 230, 0, 0, 237, 0, 0, 0, 338,
+ 347, 346, 327, 328, 330, 332, 337, 344, 350, 0,
+ 0, 0, 0, 0, 255, 310, 262, 254, 507, 0,
+ 0, 0, 0, 0, 0, 0, 221, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 265, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 287, 0, 388, 247, 0, 438, 0, 0,
+ 0, 549, 0, 0, 0, 0, 0, 0, 0, 352,
+ 0, 319, 193, 217, 0, 0, 398, 446, 458, 0,
+ 0, 0, 245, 0, 456, 412, 527, 225, 274, 443,
+ 418, 454, 426, 277, 0, 0, 455, 359, 512, 436,
+ 524, 550, 551, 253, 392, 536, 496, 544, 568, 218,
+ 250, 406, 489, 530, 478, 384, 508, 509, 318, 477,
+ 285, 196, 356, 556, 216, 464, 358, 234, 223, 514,
+ 533, 279, 441, 563, 205, 491, 522, 231, 468, 0,
+ 0, 570, 239, 488, 207, 519, 487, 380, 315, 316,
+ 206, 0, 442, 258, 283, 0, 0, 248, 401, 516,
+ 517, 246, 571, 220, 543, 212, 0, 542, 394, 511,
+ 520, 381, 370, 211, 518, 379, 369, 323, 342, 343,
+ 270, 296, 433, 362, 434, 295, 297, 390, 389, 391,
+ 200, 531, 0, 201, 0, 483, 532, 572, 226, 227,
+ 229, 0, 269, 273, 281, 284, 292, 293, 302, 354,
+ 405, 432, 428, 437, 0, 506, 525, 537, 548, 554,
+ 555, 557, 558, 559, 560, 561, 564, 562, 393, 300,
+ 479, 322, 360, 0, 0, 411, 457, 232, 529, 480,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 573, 574, 575, 576, 577, 578, 579, 580, 581,
+ 582, 583, 584, 585, 586, 587, 588, 589, 590, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 591, 371,
+ 470, 526, 324, 336, 339, 329, 348, 0, 349, 325,
+ 326, 331, 333, 334, 335, 340, 341, 345, 351, 241,
+ 203, 377, 385, 505, 301, 208, 209, 210, 498, 499,
+ 500, 501, 540, 541, 545, 447, 448, 449, 450, 282,
+ 535, 298, 453, 452, 320, 321, 366, 435, 0, 192,
+ 213, 355, 0, 439, 278, 569, 539, 534, 199, 215,
+ 0, 252, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 194, 195, 202, 214, 224, 228, 235,
+ 251, 266, 268, 275, 288, 299, 307, 308, 311, 317,
+ 367, 373, 374, 375, 376, 395, 396, 397, 400, 403,
+ 404, 407, 409, 410, 413, 417, 421, 422, 423, 425,
+ 427, 429, 440, 445, 459, 460, 461, 462, 463, 466,
+ 467, 472, 473, 474, 475, 476, 484, 485, 490, 513,
+ 515, 528, 546, 552, 465, 290, 291, 430, 431, 303,
+ 304, 566, 567, 289, 523, 553, 521, 565, 547, 424,
+ 365, 0, 0, 368, 271, 294, 309, 0, 538, 486,
+ 219, 451, 280, 243, 0, 0, 204, 238, 222, 249,
+ 264, 267, 313, 378, 386, 415, 420, 286, 261, 236,
+ 444, 233, 469, 493, 494, 495, 497, 382, 256, 419,
+ 383, 0, 363, 503, 504, 305, 502, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 402, 0,
+ 0, 0, 0, 0, 0, 0, 0, 260, 0, 0,
+ 0, 0, 353, 257, 0, 0, 416, 0, 198, 0,
+ 471, 244, 364, 361, 510, 272, 263, 259, 242, 306,
+ 372, 414, 492, 408, 0, 357, 0, 0, 481, 387,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 312, 240, 314, 197, 399,
+ 482, 276, 0, 0, 0, 0, 0, 812, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 230, 0, 0,
+ 237, 0, 0, 0, 338, 347, 346, 327, 328, 330,
+ 332, 337, 344, 350, 0, 0, 0, 0, 0, 255,
+ 310, 262, 254, 507, 0, 0, 0, 0, 0, 0,
+ 0, 221, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 265, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 287, 0, 388,
+ 247, 0, 438, 0, 0, 0, 549, 0, 0, 0,
+ 0, 0, 0, 0, 352, 0, 319, 193, 217, 0,
+ 0, 398, 446, 458, 0, 0, 0, 245, 0, 456,
+ 412, 527, 225, 274, 443, 418, 454, 426, 277, 0,
+ 0, 455, 359, 512, 436, 524, 550, 551, 253, 392,
+ 536, 496, 544, 568, 218, 250, 406, 489, 530, 478,
+ 384, 508, 509, 318, 477, 285, 196, 356, 556, 216,
+ 464, 358, 234, 223, 514, 533, 279, 441, 563, 205,
+ 491, 522, 231, 468, 0, 0, 570, 239, 488, 207,
+ 519, 487, 380, 315, 316, 206, 0, 442, 258, 283,
+ 0, 0, 248, 401, 516, 517, 246, 571, 220, 543,
+ 212, 0, 542, 394, 511, 520, 381, 370, 211, 518,
+ 379, 369, 323, 342, 343, 270, 296, 433, 362, 434,
+ 295, 297, 390, 389, 391, 200, 531, 0, 201, 0,
+ 483, 532, 572, 226, 227, 229, 0, 269, 273, 281,
+ 284, 292, 293, 302, 354, 405, 432, 428, 437, 0,
+ 506, 525, 537, 548, 554, 555, 557, 558, 559, 560,
+ 561, 564, 562, 393, 300, 479, 322, 360, 0, 0,
+ 411, 457, 232, 529, 480, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 573, 574, 575, 576,
+ 577, 578, 579, 580, 581, 582, 583, 584, 585, 586,
+ 587, 588, 589, 590, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 591, 371, 470, 526, 324, 336, 339,
+ 329, 348, 0, 349, 325, 326, 331, 333, 334, 335,
+ 340, 341, 345, 351, 241, 203, 377, 385, 505, 301,
+ 208, 209, 210, 498, 499, 500, 501, 540, 541, 545,
+ 447, 448, 449, 450, 282, 535, 298, 453, 452, 320,
+ 321, 366, 435, 0, 192, 213, 355, 0, 439, 278,
+ 569, 539, 534, 199, 215, 0, 252, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 194, 195,
+ 202, 214, 224, 228, 235, 251, 266, 268, 275, 288,
+ 299, 307, 308, 311, 317, 367, 373, 374, 375, 376,
+ 395, 396, 397, 400, 403, 404, 407, 409, 410, 413,
+ 417, 421, 422, 423, 425, 427, 429, 440, 445, 459,
+ 460, 461, 462, 463, 466, 467, 472, 473, 474, 475,
+ 476, 484, 485, 490, 513, 515, 528, 546, 552, 465,
+ 290, 291, 430, 431, 303, 304, 566, 567, 289, 523,
+ 553, 521, 565, 547, 424, 365, 0, 0, 368, 271,
+ 294, 309, 0, 538, 486, 219, 451, 280, 243, 0,
+ 0, 204, 238, 222, 249, 264, 267, 313, 378, 386,
+ 415, 420, 286, 261, 236, 444, 233, 469, 493, 494,
+ 495, 497, 382, 256, 419, 383, 0, 363, 503, 504,
+ 305, 502, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 402, 0, 0, 0, 0, 0, 0,
+ 0, 0, 260, 0, 0, 0, 0, 353, 257, 0,
+ 0, 416, 0, 198, 0, 471, 244, 364, 361, 510,
+ 272, 263, 259, 242, 306, 372, 414, 492, 408, 0,
+ 357, 0, 0, 481, 387, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 312, 240, 314, 197, 399, 482, 276, 0, 0, 0,
+ 0, 0, 190, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 230, 0, 0, 237, 0, 0, 0, 338,
+ 347, 346, 327, 328, 330, 332, 337, 344, 350, 0,
+ 0, 0, 0, 0, 255, 310, 262, 254, 507, 0,
+ 0, 0, 0, 0, 0, 0, 221, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 265, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 287, 0, 388, 247, 0, 438, 0, 0,
+ 0, 549, 0, 0, 0, 0, 0, 0, 0, 352,
+ 0, 319, 193, 217, 0, 0, 398, 446, 458, 0,
+ 0, 0, 245, 0, 456, 412, 527, 225, 274, 443,
+ 418, 454, 426, 277, 0, 0, 455, 359, 512, 436,
+ 524, 550, 551, 253, 392, 536, 496, 544, 568, 218,
+ 250, 406, 489, 530, 478, 384, 508, 509, 318, 477,
+ 285, 196, 356, 556, 216, 464, 358, 234, 223, 514,
+ 533, 279, 441, 563, 205, 491, 522, 231, 468, 0,
+ 0, 570, 239, 488, 207, 519, 487, 380, 315, 316,
+ 206, 0, 442, 258, 283, 0, 0, 248, 401, 516,
+ 517, 246, 571, 220, 543, 212, 0, 542, 394, 511,
+ 520, 381, 370, 211, 518, 379, 369, 323, 342, 343,
+ 270, 296, 433, 362, 434, 295, 297, 390, 389, 391,
+ 200, 531, 0, 201, 0, 483, 532, 572, 226, 227,
+ 229, 0, 269, 273, 281, 284, 292, 293, 302, 354,
+ 405, 432, 428, 437, 0, 506, 525, 537, 548, 554,
+ 555, 557, 558, 559, 560, 561, 564, 562, 393, 300,
+ 479, 322, 360, 0, 0, 411, 457, 232, 529, 480,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 573, 574, 575, 576, 577, 578, 579, 580, 581,
+ 582, 583, 584, 585, 586, 587, 588, 589, 590, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 591, 371,
+ 470, 526, 324, 336, 339, 329, 348, 0, 349, 325,
+ 326, 331, 333, 334, 335, 340, 341, 345, 351, 241,
+ 203, 377, 385, 505, 301, 208, 209, 210, 498, 499,
+ 500, 501, 540, 541, 545, 447, 448, 449, 450, 282,
+ 535, 298, 453, 452, 320, 321, 366, 435, 0, 192,
+ 213, 355, 0, 439, 278, 569, 539, 534, 199, 215,
+ 0, 252, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 194, 195, 202, 214, 224, 228, 235,
+ 251, 266, 268, 275, 288, 299, 307, 308, 311, 317,
+ 367, 373, 374, 375, 376, 395, 396, 397, 400, 403,
+ 404, 407, 409, 410, 413, 417, 421, 422, 423, 425,
+ 427, 429, 440, 445, 459, 460, 461, 462, 463, 466,
+ 467, 472, 473, 474, 475, 476, 484, 485, 490, 513,
+ 515, 528, 546, 552, 465, 290, 291, 430, 431, 303,
+ 304, 566, 567, 289, 523, 553, 521, 565, 547, 424,
+ 365, 0, 0, 368, 271, 294, 309, 0, 538, 486,
+ 219, 451, 280, 243, 0, 0, 204, 238, 222, 249,
+ 264, 267, 313, 378, 386, 415, 420, 286, 261, 236,
+ 444, 233, 469, 493, 494, 495, 497, 382, 256, 419,
+ 0, 0, 363, 503, 504, 305,
}
var yyPact = [...]int{
- -1000, -1000, 5263, -1000, -458, -1000, -1000, -1000, -1000, -1000,
+ -1000, -1000, 4946, -1000, -459, -1000, -1000, -1000, -1000, -1000,
+ -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
+ -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
+ -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 2226,
+ 2430, -1000, -1000, -1000, -1000, 2454, -1000, 859, 1921, -1000,
+ 2229, 312, -1000, 47391, 593, -1000, 44771, 592, 247, 30361,
+ -1000, 232, -1000, 217, 46081, 228, -1000, -1000, -1000, -1000,
+ -304, 19224, 2160, 96, 92, 47391, -1000, -1000, -1000, -1000,
+ 2368, 1892, -1000, 414, -1000, -1000, -1000, -1000, -1000, -1000,
+ 44116, -1000, 992, -1000, -1000, 2245, 2210, 2111, 753, 2186,
+ -1000, 2337, 1892, -1000, 19224, 2395, 2291, 18569, 18569, 549,
+ -1000, -1000, 208, -1000, -1000, 25776, 47391, 32981, 364, -1000,
+ 2229, -1000, -1000, -1000, 128, -1000, 415, 1815, -1000, 1814,
+ -1000, 745, 895, 447, 543, 533, 446, 445, 444, 443,
+ 442, 441, 427, 426, 461, -1000, 778, 778, -109, -115,
+ 3441, 541, 538, 538, 858, 564, 2192, 2181, -1000, -1000,
+ 778, 778, 778, 387, 778, 778, 778, 778, 357, 340,
+ 778, 778, 778, 778, 778, 778, 778, 778, 778, 778,
+ 778, 778, 778, 778, 778, 778, 778, 517, 2229, 324,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
- -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 2260, 2300,
- -1000, -1000, -1000, -1000, 2433, -1000, 875, 1965, -1000, 2261,
- 4625, -1000, 47207, 621, -1000, 44619, 620, 630, 29738, -1000,
- 209, -1000, 164, 45913, 202, -1000, -1000, -1000, -292, 19384,
- 2183, 78, 77, 47207, -1000, -1000, -1000, -1000, 2398, 1911,
- -1000, 394, -1000, -1000, -1000, -1000, -1000, -1000, 43972, -1000,
- 1009, -1000, -1000, 2269, 2231, 2444, 787, 2165, -1000, 2326,
- 1911, -1000, 19384, 2387, 2307, 18737, 18737, 569, -1000, -1000,
- 348, -1000, -1000, 25209, 47207, 32326, 586, -1000, 2261, -1000,
- -1000, -1000, 99, -1000, 470, 1831, -1000, 1830, -1000, 675,
- 864, 485, 588, 578, 481, 473, 472, 471, 469, 467,
- 454, 453, 492, -1000, 814, 814, -111, -115, 3182, 567,
- 545, 545, 1011, 585, 2213, 2203, -1000, -1000, 814, 814,
- 814, 479, 814, 814, 814, 814, 383, 382, 814, 814,
- 814, 814, 814, 814, 814, 814, 814, 814, 814, 814,
- 814, 814, 814, 814, 814, 422, 2261, 364, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
@@ -6361,60 +6388,60 @@ var yyPact = [...]int{
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
+ -1000, -1000, 47391, 331, 47391, -1000, 660, 47391, 916, 916,
+ 123, 916, 916, 916, 916, 221, 743, 89, -1000, 220,
+ 321, 196, 315, 891, 276, -1000, -1000, 308, 891, 1611,
+ -1000, 757, 310, 210, -1000, 916, 916, -1000, 12649, 189,
+ 12649, 12649, -1000, 2215, -1000, -1000, -1000, -1000, -1000, 1196,
+ -1000, -1000, -1000, -1000, 10, 561, -1000, -1000, -1000, -1000,
+ 46081, 43461, 292, -1000, -1000, 51, -1000, -1000, 1575, 1098,
+ 19224, 1116, -1000, 1142, 721, -1000, -1000, -1000, -1000, -1000,
+ 613, -1000, 19879, 19879, 19879, 19879, -1000, -1000, 1627, 42806,
+ 1627, 1627, 19879, 1627, -1000, 19879, 1627, 1627, 1627, 19224,
+ 1627, 1627, 1627, 1627, -1000, 1627, 1627, 1627, 1627, 1627,
+ 1627, 1627, 1627, 1627, 1627, 1627, 1627, 1627, 1627, 1627,
+ 1627, 1627, 1627, 1627, 1627, 1627, 1627, 1627, 1627, 1627,
+ 1627, 1627, 1627, 1627, 1627, 1627, 1627, 1627, 1627, 1627,
+ 1627, 1627, 1627, 1627, 1627, 1627, 1627, 1627, 1627, 1627,
+ 1627, 1627, 1627, 1627, 1627, 1627, 1627, 1627, 1627, 1627,
+ 1627, 1627, 1627, 1627, 1627, 1627, 1627, 1627, 1627, 1627,
+ 1627, 1627, 1627, -1000, -1000, -1000, -1000, 1627, 659, 1627,
+ 1627, 1627, 1627, 1627, -1000, -1000, -1000, -1000, -1000, -1000,
+ -1000, -1000, 1627, 1627, 1627, 1627, 1627, -1000, -1000, -1000,
+ -1000, -1000, -1000, -1000, -1000, -1000, 1627, 1627, 1627, 1627,
+ 1627, 1627, 1627, 1627, 1627, 1627, 1627, 1627, 1627, 1627,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
- -1000, -1000, 47207, 374, 47207, -1000, 690, 47207, 955, 955,
- 129, 955, 955, 955, 955, 265, 793, 69, -1000, 256,
- 344, 228, 340, 941, 743, -1000, -1000, 360, 941, 1692,
- -1000, 791, 234, -1000, 955, 955, -1000, 12889, 165, 12889,
- 12889, -1000, 2239, -1000, -1000, -1000, -1000, -1000, 1234, -1000,
- -1000, -1000, -1000, -1000, 583, -1000, -1000, -1000, -1000, 45913,
- 43325, -1000, -1000, 82, -1000, -1000, 1744, 1467, 19384, 1219,
- -1000, 1169, 749, -1000, -1000, -1000, -1000, -1000, 652, -1000,
- 20031, 20031, 20031, 20031, -1000, -1000, 1839, 42678, 1839, 1839,
- 20031, 1839, -1000, 20031, 1839, 1839, 1839, 19384, 1839, 1839,
- 1839, 1839, -1000, 1839, 1839, 1839, 1839, 1839, 1839, 1839,
- 1839, 1839, 1839, 1839, 1839, 1839, 1839, 1839, 1839, 1839,
- 1839, 1839, 1839, 1839, 1839, 1839, 1839, 1839, 1839, 1839,
- 1839, 1839, 1839, 1839, 1839, 1839, 1839, 1839, 1839, 1839,
- 1839, 1839, 1839, 1839, 1839, 1839, 1839, 1839, 1839, 1839,
- 1839, 1839, 1839, 1839, 1839, 1839, 1839, 1839, 1839, 1839,
- 1839, 1839, 1839, 1839, 1839, 1839, 1839, 1839, 1839, 1839,
- 1839, -1000, -1000, -1000, -1000, 1839, 689, 1839, 1839, 1839,
- 1839, 1839, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
- 1839, 1839, 1839, 1839, 1839, -1000, -1000, -1000, -1000, -1000,
- -1000, -1000, -1000, -1000, 1839, 1839, 1839, 1839, 1839, 1839,
- 1839, 1839, 1839, 1839, 1839, 1839, 1839, 1839, -1000, -1000,
- -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 21325,
- 1408, 1397, 1380, -1000, 16796, 1839, -1000, -1000, -1000, -1000,
+ -1000, 21844, 1377, 1376, 1375, -1000, 16604, 1627, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
- -1000, -1000, -1000, -1000, -1000, -1000, 47207, -1000, 1839, 233,
- 45913, 45913, 425, 2326, 1911, -1000, 2398, 2379, 394, -1000,
- 3108, 1406, 1439, 1334, 1911, 1814, 47207, -1000, 1853, -1000,
- -1000, -1000, -1000, 2096, 1393, 1687, -1000, -1000, -1000, -1000,
- 2027, 19384, -1000, -1000, 2422, -1000, 22620, 688, 2415, 42031,
- -1000, 569, 569, 1827, 428, 57, -1000, -1000, -1000, -1000,
- 841, 29091, -1000, -1000, -1000, -1000, 1732, 47207, -1000, -1000,
- 4853, 1252, -1000, 1963, -1000, 1730, -1000, 1896, 19384, 1929,
- 613, 1252, 597, 596, 595, -1000, -17, -1000, -1000, -1000,
- -1000, -1000, -1000, 814, 814, 814, -1000, 484, 2384, 4625,
- 3608, -1000, -1000, -1000, 41384, 1961, 1252, -1000, 1956, -1000,
- 927, 663, 722, 722, 1252, -1000, -1000, 46560, 1252, 926,
- 923, 1252, 1252, 45913, 45913, -1000, 40737, -1000, 40090, 39443,
- 1180, 45913, 38796, 38149, 37502, 36855, 36208, -1000, 2054, -1000,
- 1942, -1000, -1000, -1000, 46560, 1252, 1252, 46560, 45913, 46560,
- 47207, 1252, -1000, -1000, 385, -1000, -1000, 1178, 1165, 1163,
- 814, 814, 1162, 1678, 1670, 1665, 814, 814, 1161, 1663,
- 31032, 1656, 362, 1158, 1154, 1150, 1152, 1647, 199, 1580,
- 1128, 1127, 1149, 45913, 1952, 47207, -1000, 327, 824, 561,
- 837, 2261, 2178, 1825, 580, 608, 1252, 560, 560, 45913,
- -1000, 14848, -1000, -1000, 1578, 19384, -1000, 942, 941, 941,
- -1000, -1000, -1000, -1000, -1000, -1000, 955, 47207, 942, -1000,
- -1000, -1000, 941, 955, 47207, 955, 955, 955, 955, 941,
- 941, 941, 955, 47207, 47207, 47207, 47207, 47207, 47207, 47207,
- 47207, 47207, 12889, 791, 955, -300, -1000, 1572, -1000, 2073,
+ -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 47391, -1000,
+ 1627, 255, 46081, 46081, 411, 2337, 1892, -1000, 2368, 2356,
+ 414, -1000, 2585, 1420, 1626, 1230, 1892, 1793, 47391, -1000,
+ 1831, -1000, -1000, -1000, -212, -213, 2048, 1287, 1610, -1000,
+ -1000, -1000, -1000, 1496, 19224, -1000, -1000, 2441, -1000, 23155,
+ 658, 2439, 42151, -1000, 549, 549, 1808, 459, 38, -1000,
+ -1000, -1000, -1000, 807, 29706, -1000, -1000, -1000, -1000, 1608,
+ 47391, -1000, -1000, 3954, 1051, -1000, 1920, -1000, 1599, -1000,
+ 1875, 19224, 1927, 590, 1051, 581, 579, 571, -1000, -6,
+ -1000, -1000, -1000, -1000, -1000, -1000, 778, 778, 778, -1000,
+ 453, 2392, 312, 3893, -1000, -1000, -1000, 41496, 1914, 1051,
+ -1000, 1913, -1000, 908, 642, 684, 684, 1051, -1000, -1000,
+ 46736, 1051, 906, 905, 1051, 1051, 46081, 46081, -1000, 40841,
+ -1000, 40186, 39531, 1180, 46081, 38876, 38221, 37566, 36911, 36256,
+ -1000, 1996, -1000, 1867, -1000, -1000, -1000, 46736, 1051, 1051,
+ 46736, 46081, 46736, 47391, 1051, -1000, -1000, 394, -1000, -1000,
+ 1169, 1168, 1166, 778, 778, 1165, 1596, 1595, 1589, 778,
+ 778, 1164, 1579, 31671, 1577, 330, 1163, 1158, 1140, 1193,
+ 1576, 193, 1564, 1183, 1145, 1138, 46081, 1912, 47391, -1000,
+ 302, 832, 544, 792, 2229, 2148, 1805, 558, 587, 1051,
+ 542, 542, 46081, -1000, 13310, -1000, -1000, 1554, 19224, -1000,
+ 903, 891, 891, -1000, -1000, -1000, -1000, -1000, -1000, 916,
+ 47391, 903, -1000, -1000, -1000, 891, 916, 47391, 916, 916,
+ 916, 916, 891, 891, 891, 916, 47391, 47391, 47391, 47391,
+ 47391, 47391, 47391, 47391, 47391, 12649, 757, 916, -317, -1000,
+ 1550, -1000, -1000, 2040, -1000, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
@@ -6430,281 +6457,284 @@ var yyPact = [...]int{
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
- -1000, -1000, -1000, -1000, -1000, -1000, 12889, 12889, -1000, -1000,
- -1000, -1000, 200, -1000, 35561, 409, 835, -1000, 1824, 34914,
- -1000, -333, -340, -342, -359, -1000, -1000, -1000, -360, -362,
- -1000, -1000, -1000, 19384, 19384, 19384, 19384, -147, -1000, 936,
- 20031, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 171, 965,
- 20031, 20031, 20031, 20031, 20031, 20031, 20031, 20031, 20031, 20031,
- 20031, 20031, 20031, 20031, 20031, -1000, -1000, 27150, 6253, 6253,
- 749, 749, 749, 749, -1000, -82, 1823, 46560, -1000, -1000,
- -1000, 684, 19384, 19384, 749, -1000, 1252, 16796, 34267, 18737,
- 18737, 19384, 856, 1467, 46560, 19384, -1000, 1334, -1000, -1000,
- -1000, 1142, -1000, 954, 2245, 2245, 2245, 2245, 19384, 19384,
- 19384, 19384, 19384, 19384, 19384, 19384, 19384, 19384, 2245, 45913,
- 45913, 858, 19384, 19384, 19384, 19384, 19384, 19384, 15501, 19384,
- 19384, 20031, 19384, 19384, 19384, 1334, 19384, 19384, 19384, 19384,
- 19384, 19384, 19384, 19384, 19384, 19384, 19384, 19384, 19384, 19384,
- 19384, 19384, 19384, 19384, 19384, 19384, 19384, 19384, 19384, 19384,
- 19384, 19384, 19384, 1334, 19384, 1372, 19384, 19384, 18737, 14195,
- 18737, 18737, 18737, 18737, 18737, -1000, -1000, -1000, -1000, -1000,
- 19384, 19384, 19384, 19384, 19384, 19384, 19384, 19384, 1334, 19384,
- 19384, 19384, 19384, 19384, -1000, -1000, -1000, -1000, -1000, -1000,
- -1000, -1000, -1000, -1000, 1591, 1333, 1404, 19384, -1000, 1817,
- -1000, -84, 24562, 19384, 1557, 2413, 2002, 45913, -1000, -1000,
- -1000, 2326, -1000, 2326, 1591, 3101, 2099, 18737, -1000, -1000,
- 3101, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 1646,
- -1000, 47207, 1814, 2287, 45913, 2085, 1555, 514, -1000, 19384,
- 19384, 1810, -1000, 1364, 47207, -1000, -147, -1000, 33620, -1000,
- -1000, 12236, 47207, 421, 47207, -1000, 23915, 32973, 281, 57,
- -1000, 1780, -1000, 49, 40, 16148, 748, -1000, -1000, -1000,
- 3182, 20678, 1739, 748, 130, -1000, -1000, -1000, 1896, -1000,
- 1896, 1896, 1896, 1896, 514, 514, 514, 514, -1000, -1000,
- -1000, -1000, -1000, 1950, 1946, -1000, 1896, 1896, 1896, 1896,
+ 12649, 12649, -1000, -1000, -1000, -1000, -1000, 1799, -1000, 214,
+ 52, 225, -1000, 35601, 409, 790, -1000, 409, -1000, -1000,
+ -1000, 1798, 34946, -1000, -319, -327, -328, -331, -1000, -1000,
+ -1000, -332, -341, -1000, -1000, -1000, 19224, 19224, 19224, 19224,
+ -141, -1000, 860, 19879, -1000, -1000, -1000, -1000, -1000, -1000,
+ -1000, 182, 1067, 19879, 19879, 19879, 19879, 19879, 19879, 19879,
+ 19879, 19879, 19879, 19879, 19879, 19879, 19879, 19879, -1000, -1000,
+ 27741, 6717, 6717, 721, 721, 721, 721, -1000, -76, 1797,
+ 46736, -1000, -1000, -1000, 657, 19224, 19224, 721, -1000, 1051,
+ 16604, 20534, 18569, 18569, 19224, 826, 1098, 46736, 19224, -1000,
+ 1230, -1000, -1000, -1000, 1127, -1000, 869, 2207, 2207, 2207,
+ 2207, 19224, 19224, 19224, 19224, 19224, 19224, 19224, 19224, 19224,
+ 19224, 2207, 46081, 46081, 1115, 19224, 19224, 19224, 19224, 19224,
+ 19224, 15293, 19224, 19224, 19879, 19224, 19224, 19224, 1230, 19224,
+ 19224, 19224, 19224, 19224, 19224, 19224, 19224, 19224, 19224, 19224,
+ 19224, 19224, 19224, 19224, 19224, 19224, 19224, 19224, 19224, 19224,
+ 19224, 19224, 19224, 19224, 19224, 19224, 1230, 19224, 1012, 19224,
+ 19224, 18569, 14632, 18569, 18569, 18569, 18569, 18569, -1000, -1000,
+ -1000, -1000, -1000, 19224, 19224, 19224, 19224, 19224, 19224, 19224,
+ 19224, 1230, 19224, 19224, 19224, 19224, 19224, -1000, -1000, -1000,
+ -1000, -1000, -1000, -1000, -1000, -1000, -1000, 1403, 1329, 1294,
+ 19224, -1000, 1795, -1000, -127, 25121, 19224, 1544, 2437, 1969,
+ 46081, -1000, -1000, -1000, 2337, -1000, 2337, 1403, 2360, 2075,
+ 18569, -1000, -1000, 2360, -1000, -1000, -1000, -1000, -1000, -1000,
+ -1000, -1000, 1643, -1000, 47391, 1793, 2288, 46081, -1000, -182,
+ -1000, -185, 2044, 1541, 395, -1000, 19224, 19224, 1787, -1000,
+ 1139, 47391, -1000, -141, -1000, 34291, -1000, -1000, 11988, 47391,
+ 423, 47391, -1000, 24466, 33636, 279, -1000, 38, 1684, -1000,
+ 68, 50, 15948, 713, -1000, -1000, -1000, 3441, 21189, 1494,
+ 713, 145, -1000, -1000, -1000, 1875, -1000, 1875, 1875, 1875,
+ 1875, 395, 395, 395, 395, -1000, -1000, -1000, -1000, -1000,
+ 1910, 1909, -1000, 1875, 1875, 1875, 1875, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
- -1000, -1000, -1000, -1000, -1000, -1000, -1000, 1943, 1943, 1943,
- 1906, 1906, 535, -1000, 19384, 304, 32326, 2267, 1139, 2036,
- 327, 564, 1995, 1252, 1252, 1252, 564, -1000, 1329, 1327,
- 1322, -1000, -447, 1796, -1000, -1000, 2382, -1000, -1000, 943,
- 957, 949, 984, 45913, 247, 408, -1000, 515, -1000, 32326,
- 1252, 916, 722, 1252, -1000, 1252, -1000, -1000, -1000, -1000,
- -1000, 1252, -1000, -1000, 1795, -1000, 1822, 1003, 945, 979,
- 938, 1795, -1000, -1000, -88, 1795, -1000, 1795, -1000, 1795,
- -1000, 1795, -1000, 1795, -1000, -1000, -1000, -1000, -1000, -1000,
- -1000, -1000, -1000, 831, 227, -208, 45913, 247, 579, -1000,
- 573, 27150, -1000, -1000, -1000, 27150, 27150, -1000, -1000, -1000,
- -1000, 1538, 1535, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
+ -1000, -1000, -1000, -1000, 1907, 1907, 1907, 1896, 1896, 1876,
+ 1876, 521, -1000, 19224, 246, 32981, 2241, 1137, 1622, 302,
+ 546, 1948, 1051, 1051, 1051, 546, -1000, 1238, 1236, 1234,
+ -1000, -448, 1777, -1000, -1000, 2387, -1000, -1000, 722, 933,
+ 928, 616, 46081, 275, 397, -1000, 527, -1000, 32981, 1051,
+ 901, 684, 1051, -1000, 1051, -1000, -1000, -1000, -1000, -1000,
+ 1051, -1000, -1000, 1776, -1000, 1678, 978, 923, 973, 919,
+ 1776, -1000, -1000, -82, 1776, -1000, 1776, -1000, 1776, -1000,
+ 1776, -1000, 1776, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
+ -1000, -1000, 802, 238, -236, 46081, 275, 557, -1000, 555,
+ 27741, -1000, -1000, -1000, 27741, 27741, -1000, -1000, -1000, -1000,
+ 1535, 1497, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
- -1000, -1000, -418, 47207, -1000, 264, 832, 387, 437, 436,
- 47207, 429, 2318, 2316, 2310, 2301, 2294, 355, 376, 47207,
- 47207, 560, 2047, 47207, 2276, 47207, -1000, -1000, -1000, -1000,
- -1000, 1467, 47207, -1000, -1000, 955, 955, -1000, -1000, 47207,
- 955, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 955,
+ -1000, -436, 47391, -1000, 297, 788, 343, 457, 348, 47391,
+ 360, 2319, 2315, 2312, 2309, 2298, 398, 339, 47391, 47391,
+ 542, 2012, 47391, 2261, 47391, -1000, -1000, -1000, -1000, -1000,
+ 1098, 47391, -1000, -1000, 916, 916, -1000, -1000, 47391, 916,
+ -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 916, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
- -1000, -1000, -1000, -1000, -1000, 47207, -1000, -1000, -1000, -1000,
- 45913, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
- -99, 158, 47, 388, -1000, -1000, -1000, -1000, -1000, 2323,
- -1000, 1467, 894, 901, -1000, 1839, -1000, -1000, 1030, -1000,
- -1000, -1000, -1000, -1000, -1000, -1000, 171, 20031, 20031, 20031,
- 1288, 610, 1636, 1100, 1151, 1135, 1135, 1004, 1004, 753,
- 753, 753, 753, 753, -1000, -1000, -1000, -1000, -1000, -1000,
- -1000, -1000, 1532, -1000, 1839, 46560, 1724, 14195, 1213, 2005,
- 1334, 2956, -1000, 1660, -1000, 1660, 1811, 859, -1000, 19384,
- 1334, 2916, -1000, -1000, 1334, 1334, 1334, 19384, -1000, -1000,
- 19384, 19384, 19384, 19384, 2036, 2036, 2036, 2036, 2036, 2036,
- 2036, 2036, 2036, 2036, 19384, 1794, 1786, 2411, -1000, -1000,
+ -1000, -1000, -1000, -1000, 47391, -1000, -1000, -1000, -1000, 10,
+ 202, -1000, -1000, 46081, -1000, -1000, -1000, -1000, -1000, -1000,
+ -1000, -1000, -1000, -49, -1000, 101, 66, 396, -1000, -1000,
+ -1000, -1000, -1000, 2331, -1000, 1098, 893, 883, -1000, 1627,
+ -1000, -1000, 948, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
+ 182, 19879, 19879, 19879, 1361, 567, 1436, 1229, 986, 1025,
+ 1025, 1002, 1002, 719, 719, 719, 719, 719, -1000, -1000,
+ -1000, -1000, -1000, -1000, -1000, -1000, 1478, -1000, 1627, 46736,
+ 1573, 14632, 1296, 2020, 1230, 2844, -1000, 1569, -1000, 1569,
+ 1900, 823, -1000, 19224, 1230, 2829, -1000, -1000, 1230, 1230,
+ 1230, 19224, -1000, -1000, 19224, 19224, 19224, 19224, 1622, 1622,
+ 1622, 1622, 1622, 1622, 1622, 1622, 1622, 1622, 19224, 1775,
+ 1768, 2435, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
- -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 1044,
- 2036, 2036, 2036, 2036, 2036, 19384, 1721, -1000, -1000, -1000,
- 1387, 2894, 1238, 2885, 2036, 2036, -1000, 2036, 2857, 2837,
- 1334, 1744, 1334, 1785, -1000, 2813, 2036, 2807, 2802, 2797,
- 2014, 2792, 2776, 2770, 2036, 2036, 2036, 1980, 2764, 2758,
- 2752, 2740, 2728, 2709, 2697, 2690, 2684, 2036, -150, 2036,
- 1334, -1000, -1000, -1000, -1000, -1000, 2679, 1971, 1334, 1784,
- 1839, 676, -1000, -1000, 1660, 1334, 1334, 1660, 1660, 2624,
- 2601, 2582, 2432, 2311, 2303, 2036, 2036, -1000, 2036, 2293,
- 2255, 1958, 1940, 1334, -1000, 1404, 47207, -1000, -284, -1000,
- 24, 797, 1839, -1000, 31032, 1334, -1000, 6098, -1000, 1189,
- -1000, -1000, -1000, -1000, -1000, 28444, 1737, 3101, -1000, -1000,
- 1839, 1655, -1000, -1000, 514, 95, 27797, 735, 735, 137,
- 1467, 1467, 19384, -1000, -1000, -1000, -1000, -1000, -1000, 673,
- 2399, 391, 1839, -1000, 1790, 2514, -1000, -1000, -1000, 2285,
- 21973, -1000, -1000, 1839, 1839, 47207, 1708, 1691, -1000, 670,
- -1000, 1255, 1780, 57, 46, -1000, -1000, -1000, -1000, 1467,
- -1000, 1275, 430, 1726, -1000, 552, -1000, -1000, -1000, -1000,
- 2190, 107, -1000, -1000, -1000, 279, 514, -1000, -1000, -1000,
- -1000, -1000, -1000, 1521, 1521, -1000, -1000, -1000, -1000, -1000,
- 1134, -1000, -1000, -1000, 1133, -1000, -1000, 2114, 2038, 304,
- -1000, -1000, 814, 1517, -1000, -1000, 2197, 814, 814, 45913,
- -1000, -1000, 1709, 2267, 264, 47207, 868, 2046, -1000, 1995,
- 1995, 1995, 47207, -1000, -1000, -1000, -1000, -1000, -1000, -437,
- 63, 384, -1000, -1000, -1000, 309, 45913, 1653, -1000, 249,
- -1000, 1705, -1000, 45913, -1000, 1640, 1930, 1252, 1252, -1000,
- -1000, -1000, 45913, 1839, -1000, -1000, -1000, -1000, 605, 2242,
- 308, -1000, -1000, -183, -1000, -1000, 247, 249, 46560, 1252,
- 748, -1000, -1000, -1000, -1000, -1000, -421, 1638, 589, 255,
- 339, 47207, 47207, 47207, 47207, 47207, 655, -1000, -1000, 68,
- -1000, -1000, 224, -1000, -1000, -1000, -1000, 224, -1000, -1000,
- -1000, -1000, 370, 568, -1000, 47207, 47207, 710, -1000, -1000,
- -1000, 941, -1000, -1000, 941, -1000, -1000, -1000, -1000, -1000,
- -1000, -1000, -1000, -1000, -1000, 2233, 47207, 37, -383, -1000,
- -379, 19384, -1000, -1000, -1000, -1000, 1217, 609, 1636, 20031,
- 20031, 20031, -1000, -1000, -1000, 884, 884, 27150, -1000, 19384,
- 18737, -1000, -1000, 19384, 19384, 846, -1000, 19384, 1014, -1000,
- 19384, -1000, -1000, -1000, 1404, 2036, 2036, 2036, 2036, -1000,
- -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 1818,
- 19384, 19384, 19384, 1334, 320, -1000, -1000, -1000, -1000, -1000,
- 2410, -1000, 19384, -1000, 27150, 19384, 19384, 19384, -1000, -1000,
- -1000, 19384, 19384, -1000, -1000, 19384, 19384, -1000, 19384, 19384,
- 19384, -1000, 19384, 19384, 19384, 19384, -1000, -1000, -1000, -1000,
- 19384, 19384, 19384, 19384, 19384, 19384, 19384, 19384, 19384, 19384,
- -1000, -1000, 32326, 106, -150, 1372, 106, 1372, -1000, 18737,
- 13542, -1000, -1000, -1000, -1000, -1000, 19384, 19384, 19384, 19384,
- 19384, 19384, -1000, -1000, -1000, 19384, 19384, -1000, 19384, -1000,
- 19384, -1000, -1000, -1000, -1000, -1000, 797, -1000, 722, 722,
- 722, 45913, -1000, -1000, -1000, -1000, 1777, -1000, 2314, -1000,
- 2131, 2116, 2409, 2399, -1000, 23915, 3101, -1000, -1000, 45913,
- -274, -1000, 2172, 2187, 735, 735, -1000, -1000, -1000, -1000,
- -1000, -1000, -1000, 11583, 2326, 19384, 2040, 46560, 159, -1000,
- 23268, 45913, 46560, 23915, 23915, 23915, 23915, 23915, -1000, 2070,
- 2061, -1000, 2115, 2095, 2148, 47207, -1000, 1591, 1633, -1000,
- 19384, 25856, 1751, 23915, -1000, -1000, 23915, 47207, 10930, -1000,
- -1000, 31, 35, -1000, -1000, -1000, -1000, 3182, -1000, -1000,
- 3205, 2284, 2184, -1000, -1000, -1000, -1000, -1000, 1631, -1000,
- 1629, 1771, 1625, 227, -1000, 1898, 2232, 814, 814, -1000,
- 1125, -1000, 1252, 1506, 1504, -1000, -1000, -1000, 587, -1000,
- 2274, 47207, 2039, 2037, 2034, -1000, -445, 1123, 1923, 1887,
- 19384, 1920, 2381, 1767, 45913, -1000, -1000, 46560, -1000, 300,
- -1000, 304, 45913, -1000, -1000, -1000, 408, 47207, -1000, 5931,
- -1000, -1000, -1000, 249, -1000, -1000, -1000, -1000, -1000, -1000,
- -1000, 47207, 329, -1000, 1908, 1233, -1000, -1000, 1977, -1000,
- -1000, -1000, -1000, 271, 386, 1478, 222, 1448, 222, -1000,
- 47207, 707, 2038, 47207, -1000, -1000, -1000, 955, 955, -1000,
- -1000, 2223, -1000, 1252, 2036, 20031, 20031, -1000, 749, 322,
- -128, 1896, 1896, -1000, 1896, 1906, -1000, 1896, 181, 1896,
- 179, 1896, -1000, -1000, 1334, 1334, 1404, -1000, 1935, 993,
- -1000, 1467, 19384, 2236, -1000, -1000, -1000, -1000, -1000, -23,
- 2210, 2194, 2036, -1000, 1890, 1889, 19384, 2036, 1334, 1924,
- 2036, 2036, 2036, 2036, -1000, 1467, 1404, 2181, 1404, 2036,
- 2036, 2175, 333, 2036, 1621, 1621, 1621, 1621, 1621, 1404,
- 1404, 1404, 1404, 45913, -1000, -150, -1000, -1000, -201, -202,
- -1000, 1334, -150, 1770, 1334, -1000, 1912, 1903, 2139, 1893,
- 2036, 2104, 2036, 2036, 2036, 1807, -1000, 2304, 2304, 2304,
- 1576, 1189, 47207, -1000, -1000, -1000, -1000, 2399, 2394, 1768,
- -1000, -1000, 95, 427, -1000, 2149, 2187, -1000, 2360, 2156,
- 2356, -1000, -1000, -1000, -1000, -1000, 1467, -1000, 2246, 1747,
- -1000, 819, 1753, -1000, -1000, 18090, 1613, 2103, 668, 1576,
- 1820, 2514, 1993, 2030, 2315, -1000, -1000, -1000, -1000, 2060,
- -1000, 2059, -1000, -1000, 1853, -1000, 2010, 421, 23915, 1734,
- 1734, -1000, 666, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
- 982, 5268, 2435, -1000, 1440, -1000, 1188, 205, 1113, -1000,
- -1000, 814, 814, -1000, 915, 910, -1000, 47207, 1885, -1000,
- 514, 1414, 514, 1107, -1000, 1106, -1000, -1000, -1000, -1000,
- 1882, 2025, -1000, -1000, -1000, -1000, 47207, -1000, -1000, 47207,
- 47207, 47207, 1873, 2354, -1000, 19384, 1872, 816, 2091, 45913,
- 45913, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
- -1000, -1000, 528, 814, -400, 373, 372, 814, 814, 814,
- -446, -1000, -1000, 1567, 1563, -1000, -116, -1000, 19384, -1000,
- -1000, -1000, 1110, 1110, 1408, 1397, 1380, -1000, 1853, -1000,
- -1000, -1000, 1696, -1000, -1000, -96, 45913, 45913, 45913, 45913,
- -1000, -1000, 1040, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
- -1000, -1000, -1000, -1000, -1000, 749, 1334, 345, -98, 1334,
- -1000, -1000, 514, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
- -1000, -1000, -1000, 19384, -1000, 19384, -1000, 1467, 19384, 2326,
- 1373, 19384, 19384, -1000, 1094, 1084, 2036, -1000, -1000, -1000,
- 19384, -1000, -1000, -1000, -1000, -1000, 19384, -1000, -1000, -1000,
- 19384, 231, 884, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
- -1000, -1000, -1000, 1334, 415, -1000, -1000, -1000, -1000, 2403,
- -1000, 1334, 19384, -1000, -1000, 19384, -1000, 19384, 19384, -1000,
- 19384, -1000, 19384, -1000, -1000, -1000, -1000, 19384, 1839, 2167,
- 1839, 1839, 25856, -1000, -1000, 2394, 2392, 2348, 2146, 2151,
- 2151, 2149, -1000, 2347, 2346, -1000, 1358, 2345, 1356, 909,
- -1000, 46560, 19384, 159, -1000, 393, 45913, 159, 45913, -1000,
- 2389, -1000, -1000, 19384, 1867, -1000, 19384, -1000, -1000, -1000,
- -1000, 6253, 2399, 1734, -1000, -1000, 764, -1000, 19384, -1000,
- -1000, -1000, 108, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
- -1000, 1354, 1341, -1000, -1000, 1858, 19384, -1000, -1000, -1000,
- 1626, 1577, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
- 1853, -1000, -1000, -1000, -1000, 408, -441, 2083, 45913, 1073,
- -1000, 1549, 1767, 404, 159, 1339, 814, 814, 814, 1072,
- 1035, 31032, 1547, -1000, 45913, 506, -1000, 408, -1000, -123,
- -125, 2036, -1000, -1000, 2283, -1000, -1000, 13542, -1000, -1000,
- 1850, 1984, -1000, -1000, -1000, -1000, 2076, -97, -103, -1000,
- -1000, 2036, 2036, 1990, 1334, -1000, 2036, 2036, 1564, 1407,
- -1000, 2036, 1404, 1791, -1000, 231, 1334, 2024, -1000, -1000,
- 6253, -1000, -1000, 2389, 2344, 106, -1000, -1000, 246, 106,
- 1467, 1762, 2036, 1728, 1701, 2036, 2036, 26503, -1000, 2330,
- 2319, 31679, 31679, 797, 2392, -157, 19384, 19384, 2141, 1075,
- -1000, -1000, -1000, -1000, 1336, 1331, -1000, 1325, -1000, 2430,
- -1000, 1467, -1000, 159, -1000, 665, 1753, -1000, 2326, 1467,
- 45913, 1467, 96, 2389, -1000, 2036, -1000, 1839, 1839, 1839,
- 1839, 1839, 1839, 1839, 1839, 1839, 1839, 1839, 1839, 1839,
- 1839, 1839, 1839, 1839, 1839, 1839, 1839, 1839, 1839, 1839,
- 1839, 1839, 1839, 1839, 1839, 1839, 1839, 1839, 1839, 1839,
- 1839, 1839, 1839, 1839, 1839, 1839, 1839, 1839, 1839, 1839,
- 1839, 1839, 1839, 1839, 1839, 1839, 1839, 1839, 1839, 1839,
- 1839, 1839, 1839, 1839, 1839, 1839, 1839, 1839, 1839, 1839,
- 1839, 1839, 1839, 1839, 1839, 1839, 1839, -1000, -1000, 45913,
- 2071, -1000, -1000, 2280, 1528, 61, -1000, 1391, 1767, -1000,
- -1000, 156, -1000, 19384, -1000, 31032, 1304, 1270, -1000, -1000,
- -1000, -1000, -446, -1000, -1000, -1000, -1000, -1000, -1000, 394,
- 1761, -1000, 813, 45913, 47207, -1000, 2028, -1000, -1000, -1000,
- 19384, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 19384,
- -1000, 1334, 2013, -1000, -267, -1000, -419, 19384, -150, -1000,
- -1000, -150, -1000, 19384, -1000, -1000, 19384, -1000, 19384, -1000,
- -1000, 1525, -1000, -1000, -1000, -1000, -1000, 1525, 1525, -1000,
- -157, -1000, 1757, -1000, 45913, 1467, 1744, -1000, 1036, -1000,
- -1000, -1000, -1000, -1000, 46560, 1753, 45913, -1000, 1431, 1334,
- 1839, 2326, -1000, 1429, -1000, 394, -1000, 1847, 1887, -1000,
- -1000, -1000, 17443, -1000, -1000, -1000, -1000, -1000, 268, -93,
- 13542, 10277, 1413, -1000, -90, 2036, 1404, -1000, -365, -1000,
- -1000, -1000, -1000, 170, -1000, -1000, 1744, -1000, -1000, 1642,
- 1623, 1530, 30385, -1000, -1000, -1000, -1000, -157, -1000, -1000,
- 2279, -1000, -1000, 1743, -1000, -1000, 25856, 45266, -1000, -78,
- 619, -93, 19384, 1842, 1334, -1000, -1000, -1000, -1000, -1000,
- -1000, -1000, -1000, 29, -1000, -1000, -1000, -1000, -1000, 1977,
- -100, -1000, -1000, -1000, 173, -390, -195, -196, -1000, -1000,
- 20031, -1000, 19384, -1000, 19384, -1000, 19384, -1000, -1000, -1000,
- 45913, 1839, -1000, 1396, -1000, 3836, -214, 2008, -1000, 43,
- -1000, -1000, -1000, 973, 1261, -1000, -1000, -1000, -1000, -1000,
- -1000, 2052, 45913, -1000, 534, -1000, -1000, -96, -108, 899,
- -1000, -1000, -1000, -1000, -1000, 1256, 1222, 2036, -1000, 45913,
- -1000, 45266, -209, 748, 6253, -1000, 2007, 1981, 2408, -1000,
- -1000, -1000, -1000, -1000, -1000, -454, 1390, 266, -1000, -1000,
- 173, -1000, 19384, -1000, 19384, -1000, 1334, -1000, -1000, 2273,
- 96, -1000, 2425, -1000, 2400, 733, 733, -1000, 1015, -454,
- -1000, -1000, 2036, 2036, -1000, -223, -1000, -1000, -1000, -1000,
- -1000, 520, 1086, -1000, -1000, -1000, -1000, -1000, 6253, -1000,
- -1000, -1000, 215, 215, -1000, -1000,
+ -1000, -1000, -1000, 1111, 1622, 1622, 1622, 1622, 1622, 19224,
+ 1606, -1000, -1000, -1000, 1300, 2803, 1176, 2799, 1622, 1622,
+ -1000, 1622, 2788, 2773, 1230, 1575, 1230, 1717, -1000, 2756,
+ 1622, 2751, 2747, 2549, 2007, 2466, 2405, 2398, 1622, 1622,
+ 1622, 2002, 2390, 2367, 2353, 2333, 2320, 2290, 2281, 2277,
+ 2272, 1622, -143, 1622, 1230, -1000, -1000, -1000, -1000, -1000,
+ 2264, 1991, 1230, 1687, 1627, 656, -1000, -1000, 1569, 1230,
+ 1230, 1569, 1569, 2259, 2220, 2214, 2209, 2203, 2199, 1622,
+ 1622, -1000, 1622, 2171, 2159, 1974, 1970, 1230, -1000, 1294,
+ 47391, -1000, -300, -1000, 62, 718, 1627, -1000, 31671, 1230,
+ -1000, 6587, -1000, 1146, -1000, -1000, -1000, -1000, -1000, 29051,
+ 1650, 2360, -1000, -1000, 1627, 1559, -1000, -1000, -1000, -1000,
+ 395, 120, 28396, 708, 708, 157, 1098, 1098, 19224, -1000,
+ -1000, -1000, -1000, -1000, -1000, 655, 2401, 401, 1627, -1000,
+ 1683, 2366, -1000, -1000, -1000, 2285, 22500, -1000, -1000, 1627,
+ 1627, 47391, 1653, 1621, -1000, 654, -1000, 1205, 1684, 38,
+ 37, -1000, -1000, -1000, -1000, 1098, -1000, 1203, 424, 370,
+ -1000, 503, -1000, -1000, -1000, -1000, 2174, 133, -1000, -1000,
+ -1000, 274, 395, -1000, -1000, -1000, -1000, -1000, -1000, 1445,
+ 1445, -1000, -1000, -1000, -1000, -1000, 1135, -1000, -1000, -1000,
+ -1000, 1118, -1000, -1000, 1093, -1000, -1000, 2138, 1984, 246,
+ -1000, -1000, 778, 1422, -1000, -1000, 2172, 778, 778, 46081,
+ -1000, -1000, 1428, 2241, 297, 47391, 831, 2010, -1000, 1948,
+ 1948, 1948, 47391, -1000, -1000, -1000, -1000, -1000, -1000, -434,
+ 71, 408, -1000, -1000, -1000, 3593, 46081, 1531, -1000, 273,
+ -1000, 1421, -1000, 46081, -1000, 1528, 1889, 1051, 1051, -1000,
+ -1000, -1000, 46081, 1627, -1000, -1000, -1000, -1000, 584, 2222,
+ 309, -1000, -1000, -163, -1000, -1000, 275, 273, 46736, 1051,
+ 713, -1000, -1000, -1000, -1000, -1000, -439, 1523, 568, 286,
+ 356, 47391, 47391, 47391, 47391, 47391, 624, -1000, -1000, 78,
+ -1000, -1000, 252, -1000, -1000, -1000, -1000, 252, -1000, -1000,
+ -1000, -1000, 329, 554, -1000, 47391, 47391, 726, -1000, -1000,
+ -1000, 891, -1000, -1000, 891, -1000, -1000, -1000, -1000, -1000,
+ -1000, -1000, -1000, -1000, -1000, -1000, -1000, 2219, 47391, 65,
+ -367, -1000, -359, 19224, -1000, -1000, -1000, -1000, 1038, 562,
+ 1436, 19879, 19879, 19879, -1000, -1000, -1000, 716, 716, 27741,
+ -1000, 19224, 18569, -1000, -1000, 19224, 19224, 809, -1000, 19224,
+ 941, -1000, 19224, -1000, -1000, -1000, 1294, 1622, 1622, 1622,
+ 1622, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
+ -1000, 1685, 19224, 19224, 19224, 1230, 341, -1000, -1000, -1000,
+ -1000, -1000, 2433, -1000, 19224, -1000, 27741, 19224, 19224, 19224,
+ -1000, -1000, -1000, 19224, 19224, -1000, -1000, 19224, 19224, -1000,
+ 19224, 19224, 19224, -1000, 19224, 19224, 19224, 19224, -1000, -1000,
+ -1000, -1000, 19224, 19224, 19224, 19224, 19224, 19224, 19224, 19224,
+ 19224, 19224, -1000, -1000, 32981, 114, -143, 1012, 114, 1012,
+ -1000, 18569, 13971, -1000, -1000, -1000, -1000, -1000, 19224, 19224,
+ 19224, 19224, 19224, 19224, -1000, -1000, -1000, 19224, 19224, -1000,
+ 19224, -1000, 19224, -1000, -1000, -1000, -1000, -1000, 718, -1000,
+ 684, 684, 684, 46081, -1000, -1000, -1000, -1000, 1679, -1000,
+ 2302, -1000, 2098, 2096, 2431, 2401, -1000, 24466, 2360, -1000,
+ -1000, 46081, -291, -1000, 2127, 2121, 708, 708, -1000, -1000,
+ -1000, -1000, -1000, -1000, -1000, 11327, 2337, 19224, 2009, 46736,
+ 140, -1000, 23811, 46081, 46736, 24466, 24466, 24466, 24466, 24466,
+ -1000, 2036, 2030, -1000, 2093, 2051, 2059, 47391, -1000, 1403,
+ 1508, -1000, 19224, 26431, 1680, 24466, -1000, -1000, 24466, 47391,
+ 10666, -1000, -1000, 64, 40, -1000, -1000, -1000, -1000, 3441,
+ -1000, -1000, 1147, 2283, 2141, -1000, -1000, -1000, -1000, -1000,
+ 1493, -1000, 1453, 1677, 1451, 1433, 238, -1000, 1898, 2216,
+ 778, 778, -1000, 1092, -1000, 1051, 1412, 1401, -1000, -1000,
+ -1000, 563, -1000, 2256, 47391, 2008, 2006, 2005, -1000, -446,
+ 1089, 1884, 1872, 19224, 1881, 2384, 1655, 46081, -1000, -1000,
+ 46736, -1000, 248, -1000, 246, 46081, -1000, -1000, -1000, 397,
+ 47391, -1000, 5694, -1000, -1000, -1000, 273, -1000, -1000, -1000,
+ -1000, -1000, -1000, -1000, 47391, 293, -1000, 1877, 1194, -1000,
+ -1000, 1871, -1000, -1000, -1000, -1000, 233, 333, 1398, 237,
+ 1393, 237, -1000, 47391, 678, 1984, 47391, -1000, -1000, -1000,
+ 916, 916, -1000, -1000, 2205, -1000, 1051, 1622, 19879, 19879,
+ -1000, 721, 540, -120, 1875, 1875, -1000, 1875, 1876, -1000,
+ 1875, 205, 1875, 204, 1875, -1000, -1000, 1230, 1230, 1294,
+ -1000, 1963, 1893, -1000, 1098, 19224, 2154, -1000, -1000, -1000,
+ -1000, -1000, -13, 2132, 2123, 1622, -1000, 1873, 1866, 19224,
+ 1622, 1230, 1928, 1622, 1622, 1622, 1622, -1000, 1098, 1294,
+ 2116, 1294, 1622, 1622, 2084, 328, 1622, 1427, 1427, 1427,
+ 1427, 1427, 1294, 1294, 1294, 1294, 46081, -1000, -143, -1000,
+ -1000, -191, -194, -1000, 1230, -143, 1662, 1230, -1000, 1618,
+ 1614, 2080, 1594, 1622, 2067, 1622, 1622, 1622, 1504, -1000,
+ 2326, 2326, 2326, 1397, 1146, 47391, -1000, -1000, -1000, -1000,
+ 2401, 2393, 1658, -1000, -1000, 120, 458, -1000, 2133, 2121,
+ -1000, 2377, 2142, 2376, -1000, -1000, -1000, -1000, -1000, 1098,
+ -1000, 2236, 1674, -1000, 785, 1641, -1000, -1000, 17914, 1417,
+ 2089, 653, 1397, 1778, 2366, 1971, 2004, 2117, -1000, -1000,
+ -1000, -1000, 2027, -1000, 2026, -1000, -1000, 1831, -1000, 2057,
+ 423, 24466, 1718, 1718, -1000, 651, -1000, -1000, -1000, -1000,
+ -1000, -1000, -1000, 965, 5282, 2467, -1000, 1386, -1000, 1201,
+ 198, 1088, -1000, -1000, 778, 778, -1000, 899, 890, -1000,
+ 47391, 1855, -1000, 395, 1378, 395, 1086, -1000, -1000, 1085,
+ -1000, -1000, -1000, -1000, 1862, 1993, -1000, -1000, -1000, -1000,
+ 47391, -1000, -1000, 47391, 47391, 47391, 1852, 2370, -1000, 19224,
+ 1850, 782, 2025, 46081, 46081, -1000, -1000, -1000, -1000, -1000,
+ -1000, -1000, -1000, -1000, -1000, -1000, 499, 778, -414, 338,
+ 332, 778, 778, 778, -447, -1000, -1000, 1391, 1382, -1000,
+ -110, -1000, 19224, -1000, -1000, -1000, 1121, 1121, 1377, 1376,
+ 1375, -1000, 1831, -1000, -1000, -1000, 1418, -1000, -1000, -86,
+ 46081, 46081, 46081, 46081, -1000, -1000, 1040, -1000, -1000, -1000,
+ -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 721,
+ 1230, 359, -100, 1230, -1000, -1000, 395, -1000, -1000, -1000,
+ -1000, -1000, -1000, -1000, -1000, -1000, -1000, 19224, -1000, 19224,
+ -1000, 1098, 19224, 2337, 1374, 19224, 19224, -1000, 1050, 1049,
+ 1622, -1000, -1000, -1000, 19224, -1000, -1000, -1000, -1000, -1000,
+ 19224, -1000, -1000, -1000, 19224, 250, 716, -1000, -1000, -1000,
+ -1000, -1000, -1000, -1000, -1000, -1000, -1000, 1230, 419, -1000,
+ -1000, -1000, -1000, 2415, -1000, 1230, 19224, -1000, -1000, 19224,
+ -1000, 19224, 19224, -1000, 19224, -1000, 19224, -1000, -1000, -1000,
+ -1000, 19224, 1627, 2217, 1627, 1627, 26431, -1000, -1000, 2393,
+ 2355, 2363, 2097, 2124, 2124, 2133, -1000, 2362, 2354, -1000,
+ 1354, 2352, 1352, 865, -1000, 46736, 19224, 140, -1000, 402,
+ 46081, 140, 46081, -1000, 2361, -1000, -1000, 19224, 1849, -1000,
+ 19224, -1000, -1000, -1000, -1000, 6717, 2401, 1718, -1000, -1000,
+ 737, -1000, 19224, -1000, -1000, -1000, 5903, -1000, -1000, -1000,
+ -1000, -1000, -1000, -1000, -1000, 1350, 1346, -1000, -1000, 1847,
+ 19224, -1000, -1000, -1000, 1404, 1392, -1000, -1000, -1000, -1000,
+ -1000, -1000, -1000, -1000, 1831, -1000, -1000, -1000, -1000, 397,
+ -445, 2011, 46081, 1039, -1000, 1371, 1655, 380, 140, 1344,
+ 778, 778, 778, 1026, 1023, 31671, 1365, -1000, 46081, 488,
+ -1000, 397, -1000, -116, -117, 1622, -1000, -1000, 2266, -1000,
+ -1000, 13971, -1000, -1000, 1830, 1936, -1000, -1000, -1000, -1000,
+ 2043, -79, -106, -1000, -1000, 1622, 1622, 1980, 1230, -1000,
+ 1622, 1622, 1372, 1338, -1000, 1622, 1294, 1500, -1000, 250,
+ 1230, 2001, -1000, -1000, 6717, -1000, -1000, 2361, 2348, 114,
+ -1000, -1000, 257, 114, 1098, 1491, 1622, 1425, 1357, 1622,
+ 1622, 27086, -1000, 2347, 2346, 32326, 32326, 718, 2355, -150,
+ 19224, 19224, 2102, 1011, -1000, -1000, -1000, -1000, 1333, 1331,
+ -1000, 1240, -1000, 2464, -1000, 1098, -1000, 140, -1000, 650,
+ 1641, -1000, 2337, 1098, 46081, 1098, 119, 2361, -1000, 1622,
+ -1000, 1627, 1627, 1627, 1627, 1627, 1627, 1627, 1627, 1627,
+ 1627, 1627, 1627, 1627, 1627, 1627, 1627, 1627, 1627, 1627,
+ 1627, 1627, 1627, 1627, 1627, 1627, 1627, 1627, 1627, 1627,
+ 1627, 1627, 1627, 1627, 1627, 1627, 1627, 1627, 1627, 1627,
+ 1627, 1627, 1627, 1627, 1627, 1627, 1627, 1627, 1627, 1627,
+ 1627, 1627, 1627, 1627, 1627, 1627, 1627, 1627, 1627, 1627,
+ 1627, 1627, 1627, 1627, 1627, 1627, 1627, 1627, 1627, 1627,
+ 1627, -1000, -1000, 46081, 1904, -1000, -1000, 2263, 1363, 70,
+ -1000, 1301, 1655, -1000, -1000, 144, -1000, 19224, -1000, 31671,
+ 1231, 1227, -1000, -1000, -1000, -1000, -447, -1000, -1000, -1000,
+ -1000, -1000, -1000, 414, 1654, -1000, 768, 46081, 47391, -1000,
+ 2029, -1000, -1000, -1000, 19224, -1000, -1000, -1000, -1000, -1000,
+ -1000, -1000, -1000, 19224, -1000, 1230, 1988, -1000, -243, -1000,
+ -392, 19224, -143, -1000, -1000, -143, -1000, 19224, -1000, -1000,
+ 19224, -1000, 19224, -1000, -1000, 1360, -1000, -1000, -1000, -1000,
+ -1000, 1360, 1360, -1000, -150, -1000, 1649, -1000, 46081, 1098,
+ 1575, -1000, 1010, -1000, -1000, -1000, -1000, -1000, 46736, 1641,
+ 46081, -1000, 1337, 1230, 1627, 2337, -1000, 1328, -1000, 414,
+ -1000, 1820, 1872, -1000, -1000, -1000, 17259, -1000, -1000, -1000,
+ -1000, -1000, 191, -84, 13971, 10005, 1318, -1000, -83, 1622,
+ 1294, -1000, -349, -1000, -1000, -1000, -1000, 200, -1000, -1000,
+ 1575, -1000, -1000, 1316, 1291, 1271, 31016, -1000, -1000, -1000,
+ -1000, -150, -1000, -1000, 2260, -1000, -1000, 1525, -1000, -1000,
+ 26431, 45426, -1000, -67, 325, -84, 19224, 1819, 1230, -1000,
+ -1000, -1000, -1000, -1000, -1000, -1000, -1000, 35, -1000, -1000,
+ 636, -1000, -1000, -1000, 1871, -104, -1000, -1000, -1000, 169,
+ -404, -188, -193, -1000, -1000, 19879, -1000, 19224, -1000, 19224,
+ -1000, 19224, -1000, -1000, -1000, 46081, 1627, -1000, 1262, -1000,
+ 2936, -216, 1987, -1000, -43, -1000, -1000, -1000, 954, 1208,
+ -1000, -1000, -1000, -1000, -1000, -1000, 1651, 46081, -1000, 514,
+ -1000, -1000, 13310, -86, -108, 874, -1000, -1000, -1000, -1000,
+ -1000, 1245, 1022, 1622, -1000, 46081, -1000, 45426, -203, 713,
+ 6717, -1000, 1983, 1977, 2429, -1000, -1000, -1000, -1000, -1000,
+ -1000, -454, 1248, 299, -1000, -1000, -1000, 169, -1000, 19224,
+ -1000, 19224, -1000, 1230, -1000, -1000, 2251, 119, -1000, 2463,
+ -1000, 2444, 776, 776, -1000, 1013, -454, -1000, -1000, 1622,
+ 1622, -1000, -245, -1000, -1000, -1000, -1000, -1000, 495, 1046,
+ -1000, -1000, -1000, -1000, -1000, 6717, -1000, -1000, -1000, 239,
+ 239, -1000, -1000,
}
var yyPgo = [...]int{
- 0, 3025, 3022, 38, 2, 36, 35, 3019, 93, 102,
- 196, 37, 211, 104, 3013, 3007, 3006, 3001, 2997, 2995,
- 2994, 171, 169, 166, 2993, 2992, 2990, 2989, 2988, 2987,
- 2986, 2985, 2984, 2983, 162, 159, 181, 2981, 2980, 2977,
- 117, 179, 89, 91, 183, 2975, 2974, 84, 2972, 2971,
- 2970, 192, 191, 190, 918, 2968, 188, 115, 53, 2967,
- 2966, 2965, 2964, 2963, 2961, 2959, 2958, 2956, 2953, 2951,
- 2950, 2947, 2942, 2940, 2935, 2932, 287, 2930, 2926, 22,
- 2921, 86, 2919, 2916, 2913, 2910, 12, 2908, 2907, 16,
- 40, 2904, 2892, 48, 2888, 2886, 2884, 2883, 2878, 17,
- 2877, 27, 2876, 39, 2875, 2874, 126, 2873, 2868, 2866,
- 41, 2865, 2863, 2862, 2859, 2856, 2855, 2854, 140, 2853,
- 2849, 2847, 236, 185, 2845, 2844, 273, 143, 108, 2843,
- 2842, 114, 180, 2830, 116, 2827, 2824, 2819, 146, 2812,
- 138, 2804, 2803, 66, 71, 2802, 371, 2800, 2799, 11,
- 15, 72, 10, 20, 23, 2795, 2794, 65, 78, 2793,
- 129, 2790, 2787, 98, 69, 2786, 106, 103, 2784, 2782,
- 7, 5, 2781, 3, 1, 4, 68, 2779, 2777, 122,
- 2776, 2775, 2773, 92, 2771, 2764, 2164, 2761, 95, 131,
- 101, 81, 2759, 50, 58, 2757, 2754, 2753, 2746, 2745,
- 54, 2739, 2738, 2737, 136, 380, 160, 2735, 47, 82,
- 46, 134, 2734, 61, 80, 189, 161, 2733, 2731, 137,
- 135, 2726, 2725, 62, 44, 45, 2722, 113, 130, 118,
- 111, 112, 163, 2720, 2716, 60, 75, 2715, 2714, 2708,
- 2705, 164, 2704, 2703, 73, 2702, 57, 2700, 167, 2699,
- 19, 67, 2698, 49, 149, 2697, 76, 2696, 2695, 63,
- 100, 70, 43, 2691, 195, 2690, 56, 170, 128, 151,
- 2687, 2685, 2684, 2681, 187, 338, 2679, 2676, 77, 176,
- 139, 144, 94, 2674, 349, 2668, 2653, 132, 2603, 5785,
- 2651, 42, 154, 2632, 2626, 7129, 157, 52, 26, 2625,
- 109, 2617, 2616, 2615, 2612, 193, 172, 105, 168, 59,
- 2611, 2606, 2604, 14, 2598, 2593, 2590, 2580, 2572, 2570,
- 90, 34, 33, 32, 202, 74, 30, 97, 150, 83,
- 2569, 2568, 2566, 124, 79, 2565, 158, 156, 125, 155,
- 2563, 177, 141, 123, 2562, 119, 31, 2559, 2552, 2551,
- 2549, 99, 2548, 2534, 2525, 2522, 148, 142, 121, 87,
- 2518, 88, 120, 147, 145, 55, 2516, 51, 2504, 2502,
- 29, 182, 28, 2500, 13, 107, 212, 2497, 5000, 178,
- 2496, 21, 367, 174, 2495, 2493, 8, 9, 6, 2491,
- 2490, 2486, 2477, 133, 2470, 2469, 2467, 2463, 25, 64,
- 24, 18, 110, 85, 2459, 2458, 3799, 0, 127, 2423,
- 197,
+ 0, 2991, 2990, 38, 7, 45, 42, 2989, 37, 113,
+ 201, 43, 199, 104, 2988, 179, 2987, 2984, 2982, 2981,
+ 2980, 2979, 2542, 2535, 2487, 2978, 2977, 2976, 2974, 2973,
+ 2972, 2967, 2965, 2961, 2960, 180, 164, 195, 2956, 2954,
+ 2953, 127, 210, 95, 97, 189, 2952, 2951, 87, 2950,
+ 2949, 2948, 193, 192, 191, 888, 2947, 190, 143, 63,
+ 2943, 2939, 2936, 2935, 2934, 2928, 2923, 2921, 2919, 2918,
+ 2916, 2913, 2910, 2909, 2908, 2907, 2905, 284, 2902, 2899,
+ 24, 2894, 94, 2885, 2884, 2883, 2882, 2881, 12, 2880,
+ 2879, 16, 56, 2875, 2873, 58, 2870, 2869, 2866, 2865,
+ 2864, 22, 2862, 30, 2857, 47, 2855, 2847, 132, 2845,
+ 2843, 2842, 48, 2841, 2840, 2837, 2836, 2834, 2832, 2831,
+ 151, 2829, 2827, 2823, 184, 196, 2820, 2819, 170, 122,
+ 114, 2811, 2808, 109, 194, 2806, 128, 2804, 2802, 2800,
+ 150, 2799, 403, 2798, 2794, 78, 71, 2793, 28, 2785,
+ 2784, 11, 85, 73, 10, 4, 5, 2783, 2782, 75,
+ 91, 2776, 120, 2774, 2773, 111, 79, 2770, 108, 103,
+ 2769, 2766, 17, 9, 2765, 2, 6, 3, 80, 2764,
+ 2759, 129, 2754, 2744, 2743, 102, 2742, 2741, 4048, 2740,
+ 99, 138, 112, 83, 2739, 57, 76, 2738, 2737, 2736,
+ 2735, 2734, 61, 2733, 2732, 2730, 149, 74, 174, 2728,
+ 50, 77, 62, 137, 2725, 53, 98, 198, 176, 2717,
+ 2715, 142, 139, 2713, 2712, 67, 51, 49, 2711, 107,
+ 135, 125, 31, 106, 136, 2702, 2696, 66, 84, 2694,
+ 2689, 2687, 2676, 178, 2671, 2669, 88, 2668, 64, 2660,
+ 175, 2655, 19, 60, 2654, 55, 163, 2652, 86, 2648,
+ 2647, 81, 118, 82, 44, 2645, 167, 172, 133, 162,
+ 2641, 2636, 65, 2635, 2634, 2630, 204, 325, 2629, 2628,
+ 145, 182, 158, 152, 100, 2623, 350, 2622, 2621, 20,
+ 2846, 6504, 2619, 41, 166, 2617, 2616, 5643, 21, 52,
+ 32, 2615, 117, 2613, 2612, 2611, 2606, 203, 181, 173,
+ 171, 70, 2602, 2601, 2600, 18, 2598, 2595, 2594, 2593,
+ 2591, 2590, 96, 39, 36, 35, 202, 68, 14, 105,
+ 168, 160, 89, 2588, 2582, 2579, 131, 101, 2576, 161,
+ 157, 144, 200, 2574, 185, 154, 126, 2573, 72, 34,
+ 2572, 2571, 2570, 2569, 110, 2568, 2567, 2566, 2565, 155,
+ 159, 130, 92, 2564, 93, 124, 169, 165, 59, 2561,
+ 69, 2560, 2559, 33, 187, 29, 2557, 23, 116, 115,
+ 2555, 6068, 186, 2551, 25, 348, 156, 2550, 2549, 8,
+ 13, 15, 2543, 2534, 2525, 2512, 141, 2511, 2505, 2504,
+ 2497, 27, 54, 26, 1, 123, 90, 2496, 2489, 153,
+ 2488, 2481, 4072, 0, 134, 2479, 205,
}
-//line sql.y:7884
+//line sql.y:7989
type yySymType struct {
union any
empty struct{}
@@ -6718,7 +6748,7 @@ type yySymType struct {
identifierCI IdentifierCI
joinCondition *JoinCondition
databaseOption DatabaseOption
- columnType ColumnType
+ columnType *ColumnType
columnCharset ColumnCharset
yys int
}
@@ -6773,16 +6803,6 @@ func (st *yySymType) booleanUnion() bool {
return v
}
-func (st *yySymType) characteristicUnion() Characteristic {
- v, _ := st.union.(Characteristic)
- return v
-}
-
-func (st *yySymType) characteristicsUnion() []Characteristic {
- v, _ := st.union.([]Characteristic)
- return v
-}
-
func (st *yySymType) colKeyOptUnion() ColumnKeyOption {
v, _ := st.union.(ColumnKeyOption)
return v
@@ -7013,11 +7033,6 @@ func (st *yySymType) isExprOperatorUnion() IsExprOperator {
return v
}
-func (st *yySymType) isolationLevelUnion() IsolationLevel {
- v, _ := st.union.(IsolationLevel)
- return v
-}
-
func (st *yySymType) joinTypeUnion() JoinType {
v, _ := st.union.(JoinType)
return v
@@ -7318,6 +7333,16 @@ func (st *yySymType) trimTypeUnion() TrimType {
return v
}
+func (st *yySymType) txAccessModeUnion() TxAccessMode {
+ v, _ := st.union.(TxAccessMode)
+ return v
+}
+
+func (st *yySymType) txAccessModesUnion() []TxAccessMode {
+ v, _ := st.union.([]TxAccessMode)
+ return v
+}
+
func (st *yySymType) updateExprUnion() *UpdateExpr {
v, _ := st.union.(*UpdateExpr)
return v
@@ -7348,6 +7373,11 @@ func (st *yySymType) variablesUnion() []*Variable {
return v
}
+func (st *yySymType) vexplainTypeUnion() VExplainType {
+ v, _ := st.union.(VExplainType)
+ return v
+}
+
func (st *yySymType) vindexParamsUnion() []VindexParam {
v, _ := st.union.([]VindexParam)
return v
@@ -7384,222 +7414,226 @@ func (st *yySymType) withUnion() *With {
}
var yyR1 = [...]int{
- 0, 404, 405, 405, 7, 7, 7, 7, 7, 7,
+ 0, 410, 411, 411, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
- 7, 7, 7, 7, 7, 7, 7, 7, 256, 378,
- 379, 379, 254, 254, 32, 71, 34, 34, 33, 33,
- 36, 36, 35, 8, 8, 8, 9, 9, 9, 9,
- 9, 9, 9, 9, 10, 10, 10, 10, 10, 11,
- 11, 11, 11, 13, 13, 13, 13, 13, 19, 20,
- 12, 12, 21, 21, 104, 104, 22, 23, 23, 23,
- 23, 408, 408, 181, 181, 179, 179, 180, 180, 259,
- 259, 24, 25, 25, 269, 269, 268, 268, 268, 270,
- 270, 270, 270, 308, 308, 308, 26, 26, 26, 26,
- 26, 124, 124, 381, 381, 380, 374, 374, 373, 373,
- 372, 377, 377, 376, 376, 375, 38, 39, 48, 48,
- 48, 48, 49, 50, 382, 382, 347, 55, 55, 54,
- 54, 54, 54, 54, 54, 56, 56, 52, 52, 51,
- 51, 53, 53, 349, 349, 335, 335, 348, 348, 348,
- 348, 348, 348, 348, 334, 334, 135, 135, 233, 233,
- 233, 233, 233, 233, 233, 233, 233, 233, 233, 233,
- 233, 233, 233, 233, 233, 397, 397, 397, 396, 396,
- 234, 234, 234, 234, 234, 234, 234, 234, 145, 145,
- 157, 157, 157, 157, 157, 143, 143, 144, 142, 142,
- 142, 151, 151, 151, 151, 151, 151, 151, 151, 151,
- 151, 151, 151, 151, 151, 151, 151, 151, 401, 401,
- 401, 401, 401, 401, 401, 401, 401, 401, 401, 401,
- 401, 401, 401, 401, 401, 401, 401, 401, 401, 401,
- 401, 401, 401, 401, 401, 401, 401, 401, 401, 401,
- 401, 401, 401, 401, 401, 401, 401, 401, 401, 401,
- 156, 156, 152, 152, 152, 153, 153, 153, 154, 154,
- 398, 398, 398, 398, 313, 313, 313, 313, 316, 316,
- 314, 314, 314, 314, 314, 314, 314, 314, 314, 315,
- 315, 315, 315, 315, 317, 317, 317, 317, 317, 318,
- 318, 318, 318, 318, 318, 318, 318, 318, 318, 318,
- 318, 318, 318, 318, 318, 319, 319, 319, 319, 319,
- 319, 319, 319, 333, 333, 320, 320, 328, 328, 329,
- 329, 329, 330, 330, 330, 331, 331, 325, 325, 325,
- 325, 325, 325, 325, 325, 325, 327, 327, 326, 326,
- 326, 336, 361, 361, 360, 360, 358, 358, 358, 358,
- 358, 358, 358, 358, 345, 345, 355, 355, 355, 355,
- 355, 344, 344, 340, 340, 340, 341, 341, 342, 342,
- 339, 339, 343, 343, 357, 357, 356, 356, 337, 337,
- 338, 338, 363, 399, 399, 399, 399, 399, 400, 400,
- 364, 389, 391, 391, 391, 390, 390, 387, 388, 386,
- 386, 386, 386, 386, 81, 81, 81, 282, 282, 283,
- 283, 353, 353, 352, 352, 352, 354, 354, 351, 351,
- 351, 351, 351, 351, 351, 351, 351, 351, 351, 351,
- 351, 351, 351, 351, 351, 351, 351, 351, 351, 351,
- 351, 351, 351, 351, 351, 351, 351, 351, 351, 277,
- 277, 277, 385, 385, 385, 385, 385, 385, 384, 384,
- 384, 350, 350, 350, 383, 383, 57, 57, 214, 214,
- 402, 402, 403, 403, 403, 45, 45, 45, 45, 45,
- 45, 44, 44, 44, 40, 40, 40, 40, 40, 40,
- 40, 40, 40, 40, 40, 40, 40, 40, 40, 40,
- 40, 40, 40, 40, 40, 40, 40, 40, 40, 40,
- 40, 40, 40, 40, 40, 46, 46, 41, 41, 41,
- 41, 41, 41, 41, 41, 41, 41, 27, 27, 27,
- 27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
- 27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
- 27, 27, 27, 27, 27, 106, 106, 107, 107, 107,
- 107, 109, 109, 109, 366, 366, 58, 58, 3, 3,
- 169, 171, 172, 172, 170, 170, 170, 170, 170, 170,
- 60, 60, 59, 59, 174, 173, 175, 175, 175, 1,
- 1, 2, 2, 4, 4, 371, 371, 371, 371, 371,
- 371, 371, 371, 371, 371, 371, 371, 371, 371, 371,
- 371, 371, 371, 371, 371, 371, 371, 332, 332, 332,
- 365, 365, 367, 108, 108, 108, 108, 108, 108, 108,
- 108, 108, 108, 112, 111, 111, 110, 113, 113, 113,
- 113, 113, 113, 113, 113, 369, 369, 369, 61, 61,
- 370, 321, 322, 323, 5, 6, 346, 368, 120, 120,
- 28, 37, 37, 29, 29, 29, 29, 30, 30, 62,
- 63, 63, 63, 63, 63, 63, 63, 63, 63, 63,
- 63, 63, 63, 63, 63, 63, 63, 63, 63, 63,
- 63, 63, 63, 63, 63, 63, 63, 63, 63, 63,
- 63, 63, 63, 63, 63, 63, 63, 63, 63, 63,
- 63, 63, 63, 63, 63, 63, 63, 63, 63, 63,
- 63, 276, 276, 285, 285, 275, 275, 300, 300, 300,
- 278, 278, 278, 279, 279, 395, 395, 395, 272, 272,
- 64, 64, 64, 301, 301, 301, 301, 66, 66, 67,
- 68, 68, 303, 303, 304, 304, 69, 70, 82, 82,
- 82, 82, 82, 82, 82, 105, 105, 105, 15, 15,
- 15, 15, 78, 78, 78, 14, 14, 65, 65, 72,
- 392, 392, 393, 394, 394, 394, 394, 73, 75, 31,
- 31, 31, 31, 31, 31, 130, 130, 118, 118, 118,
- 118, 118, 118, 118, 118, 118, 118, 118, 118, 125,
- 125, 125, 119, 119, 409, 76, 77, 77, 123, 123,
- 123, 116, 116, 116, 122, 122, 122, 16, 16, 17,
- 258, 258, 18, 18, 127, 127, 129, 129, 129, 129,
- 129, 131, 131, 131, 131, 131, 131, 131, 126, 126,
- 128, 128, 128, 128, 293, 293, 293, 292, 292, 163,
- 163, 165, 164, 164, 166, 166, 167, 167, 167, 167,
- 212, 212, 189, 189, 251, 251, 252, 252, 250, 250,
- 257, 257, 253, 253, 253, 253, 260, 260, 168, 168,
- 168, 168, 176, 176, 177, 177, 178, 178, 302, 302,
- 298, 298, 298, 297, 297, 182, 182, 182, 184, 183,
- 183, 183, 183, 185, 185, 187, 187, 186, 186, 188,
- 193, 193, 192, 192, 190, 190, 190, 190, 191, 191,
- 191, 191, 194, 194, 140, 140, 140, 140, 140, 140,
- 140, 155, 155, 155, 155, 158, 158, 158, 158, 158,
- 158, 158, 158, 158, 158, 158, 241, 241, 146, 146,
- 146, 146, 146, 146, 146, 146, 146, 146, 146, 146,
- 146, 150, 150, 150, 150, 150, 150, 150, 150, 150,
- 150, 150, 150, 150, 150, 150, 150, 150, 150, 150,
- 150, 150, 150, 150, 150, 150, 149, 217, 217, 216,
- 216, 83, 83, 83, 84, 84, 85, 85, 85, 85,
- 85, 86, 86, 86, 86, 86, 141, 141, 88, 88,
- 87, 87, 207, 207, 290, 290, 89, 90, 90, 93,
- 93, 92, 91, 91, 97, 97, 94, 94, 96, 96,
- 95, 98, 98, 99, 100, 100, 273, 273, 195, 195,
- 203, 203, 203, 203, 196, 196, 196, 196, 196, 196,
- 196, 204, 204, 204, 211, 205, 205, 201, 201, 199,
- 199, 199, 199, 199, 199, 199, 199, 199, 199, 200,
- 200, 200, 200, 200, 200, 200, 200, 200, 200, 200,
- 200, 200, 200, 200, 200, 200, 200, 200, 200, 200,
- 200, 200, 200, 200, 200, 200, 200, 200, 200, 200,
- 200, 200, 200, 200, 200, 200, 200, 200, 200, 200,
- 200, 200, 200, 200, 200, 200, 200, 200, 200, 200,
- 200, 200, 200, 200, 200, 200, 200, 200, 200, 200,
- 200, 200, 200, 200, 200, 200, 200, 200, 200, 200,
- 200, 200, 200, 200, 200, 200, 200, 200, 200, 200,
- 200, 200, 200, 200, 200, 160, 160, 160, 160, 222,
- 222, 147, 147, 147, 147, 147, 147, 147, 147, 147,
- 147, 147, 147, 147, 147, 147, 148, 148, 161, 161,
- 161, 161, 162, 162, 162, 162, 162, 162, 162, 310,
- 310, 115, 115, 115, 115, 115, 115, 115, 115, 115,
- 115, 115, 115, 114, 114, 114, 114, 114, 114, 114,
- 114, 114, 410, 410, 324, 324, 324, 324, 202, 202,
- 202, 202, 202, 121, 121, 121, 121, 121, 307, 307,
- 307, 311, 311, 311, 309, 309, 309, 309, 309, 309,
- 309, 309, 309, 309, 309, 309, 309, 309, 309, 312,
- 312, 220, 220, 117, 117, 218, 218, 219, 221, 221,
- 213, 213, 213, 213, 215, 215, 198, 198, 198, 223,
- 223, 224, 224, 101, 102, 102, 103, 103, 225, 225,
- 227, 226, 226, 228, 229, 229, 229, 230, 230, 231,
- 231, 231, 47, 47, 47, 47, 47, 42, 42, 42,
- 42, 43, 43, 43, 43, 132, 132, 132, 132, 134,
- 134, 133, 133, 79, 79, 80, 80, 80, 138, 138,
- 139, 139, 139, 136, 136, 137, 137, 248, 248, 232,
- 232, 232, 239, 239, 239, 235, 235, 237, 237, 237,
- 238, 238, 238, 236, 245, 245, 247, 247, 246, 246,
- 242, 242, 243, 243, 244, 244, 244, 240, 240, 197,
- 197, 197, 197, 197, 249, 249, 249, 249, 261, 261,
- 208, 208, 210, 210, 209, 209, 159, 262, 262, 266,
- 263, 263, 267, 267, 267, 267, 255, 255, 255, 264,
- 264, 265, 265, 294, 294, 294, 271, 271, 284, 284,
- 280, 280, 281, 281, 274, 274, 286, 286, 286, 74,
- 206, 206, 362, 362, 359, 289, 289, 291, 291, 295,
- 295, 299, 299, 296, 296, 287, 287, 287, 287, 287,
- 287, 287, 287, 287, 287, 287, 287, 287, 287, 287,
- 287, 287, 287, 287, 287, 287, 287, 287, 287, 287,
- 287, 287, 287, 287, 287, 287, 287, 287, 287, 287,
- 287, 287, 287, 287, 287, 287, 287, 287, 287, 287,
- 287, 287, 287, 287, 287, 287, 287, 287, 287, 287,
- 287, 287, 287, 287, 287, 287, 287, 287, 287, 287,
- 287, 287, 287, 287, 287, 287, 287, 287, 287, 287,
- 287, 287, 287, 287, 287, 287, 287, 287, 287, 287,
- 287, 287, 287, 287, 287, 287, 287, 287, 287, 287,
- 287, 287, 287, 287, 287, 287, 287, 287, 287, 287,
- 287, 287, 287, 287, 287, 287, 287, 287, 287, 287,
- 287, 287, 287, 287, 287, 287, 287, 287, 287, 287,
- 287, 287, 287, 287, 287, 287, 287, 287, 287, 287,
- 287, 287, 287, 287, 287, 287, 287, 287, 287, 287,
- 287, 287, 287, 287, 288, 288, 288, 288, 288, 288,
- 288, 288, 288, 288, 288, 288, 288, 288, 288, 288,
- 288, 288, 288, 288, 288, 288, 288, 288, 288, 288,
- 288, 288, 288, 288, 288, 288, 288, 288, 288, 288,
- 288, 288, 288, 288, 288, 288, 288, 288, 288, 288,
- 288, 288, 288, 288, 288, 288, 288, 288, 288, 288,
- 288, 288, 288, 288, 288, 288, 288, 288, 288, 288,
- 288, 288, 288, 288, 288, 288, 288, 288, 288, 288,
- 288, 288, 288, 288, 288, 288, 288, 288, 288, 288,
- 288, 288, 288, 288, 288, 288, 288, 288, 288, 288,
- 288, 288, 288, 288, 288, 288, 288, 288, 288, 288,
- 288, 288, 288, 288, 288, 288, 288, 288, 288, 288,
- 288, 288, 288, 288, 288, 288, 288, 288, 288, 288,
- 288, 288, 288, 288, 288, 288, 288, 288, 288, 288,
- 288, 288, 288, 288, 288, 288, 288, 288, 288, 288,
- 288, 288, 288, 288, 288, 288, 288, 288, 288, 288,
- 288, 288, 288, 288, 288, 288, 288, 288, 288, 288,
- 288, 288, 288, 288, 288, 288, 288, 288, 288, 288,
- 288, 288, 288, 288, 288, 288, 288, 288, 288, 288,
- 288, 288, 288, 288, 288, 288, 288, 288, 288, 288,
- 288, 288, 288, 288, 288, 288, 288, 288, 288, 288,
- 288, 288, 288, 288, 288, 288, 288, 288, 288, 288,
- 288, 288, 288, 288, 288, 288, 288, 288, 288, 288,
- 288, 288, 288, 288, 288, 288, 288, 288, 288, 288,
- 288, 288, 288, 288, 288, 288, 288, 288, 288, 288,
- 288, 288, 288, 288, 288, 288, 288, 288, 288, 288,
- 288, 288, 288, 288, 288, 288, 288, 288, 288, 288,
- 288, 288, 288, 288, 288, 288, 288, 288, 288, 288,
- 288, 288, 288, 288, 288, 288, 288, 288, 288, 288,
- 288, 288, 288, 288, 288, 288, 288, 288, 288, 288,
- 288, 288, 288, 288, 288, 288, 288, 288, 288, 288,
- 288, 288, 288, 288, 288, 288, 288, 288, 288, 288,
- 288, 288, 288, 288, 288, 288, 288, 288, 288, 288,
- 288, 288, 288, 288, 288, 288, 288, 288, 288, 288,
- 288, 288, 288, 288, 288, 288, 288, 288, 288, 288,
- 288, 288, 288, 288, 288, 288, 288, 288, 288, 288,
- 288, 288, 288, 288, 288, 288, 288, 288, 288, 288,
- 288, 288, 288, 288, 288, 288, 288, 288, 288, 288,
- 288, 288, 288, 288, 288, 288, 288, 288, 288, 288,
- 288, 288, 288, 288, 288, 288, 406, 407, 305, 306,
- 306, 306,
+ 7, 7, 7, 7, 7, 7, 7, 7, 7, 258,
+ 381, 382, 382, 256, 256, 33, 72, 35, 35, 34,
+ 34, 37, 37, 36, 8, 8, 8, 9, 9, 9,
+ 9, 9, 9, 9, 9, 10, 10, 10, 10, 10,
+ 11, 11, 11, 11, 13, 13, 13, 13, 13, 20,
+ 21, 12, 12, 22, 22, 106, 106, 23, 24, 24,
+ 24, 24, 414, 414, 183, 183, 181, 181, 182, 182,
+ 261, 261, 25, 265, 265, 267, 267, 267, 267, 257,
+ 257, 257, 26, 26, 266, 266, 268, 268, 268, 271,
+ 271, 271, 271, 310, 310, 310, 27, 27, 27, 27,
+ 27, 126, 126, 384, 384, 383, 377, 377, 376, 376,
+ 375, 380, 380, 379, 379, 378, 39, 40, 49, 49,
+ 49, 49, 50, 51, 385, 385, 350, 56, 56, 55,
+ 55, 55, 55, 55, 55, 57, 57, 53, 53, 52,
+ 52, 54, 54, 352, 352, 338, 338, 351, 351, 351,
+ 351, 351, 351, 351, 337, 337, 137, 137, 235, 235,
+ 235, 235, 235, 235, 235, 235, 235, 235, 235, 235,
+ 235, 235, 235, 235, 235, 400, 400, 400, 399, 399,
+ 236, 236, 236, 236, 236, 236, 236, 236, 147, 147,
+ 159, 159, 159, 159, 159, 145, 145, 146, 144, 144,
+ 144, 153, 153, 153, 153, 153, 153, 153, 153, 153,
+ 153, 153, 153, 153, 153, 153, 153, 153, 404, 404,
+ 404, 404, 404, 404, 404, 404, 404, 404, 404, 404,
+ 404, 404, 404, 404, 404, 404, 404, 404, 404, 404,
+ 404, 404, 404, 404, 404, 404, 404, 404, 404, 404,
+ 404, 404, 404, 404, 404, 404, 404, 404, 404, 404,
+ 158, 158, 154, 154, 154, 155, 155, 155, 156, 156,
+ 401, 401, 401, 401, 315, 315, 315, 315, 318, 318,
+ 316, 316, 316, 316, 316, 316, 316, 316, 316, 317,
+ 317, 317, 317, 317, 317, 317, 319, 319, 319, 319,
+ 319, 320, 320, 320, 320, 320, 320, 320, 320, 320,
+ 320, 320, 320, 320, 320, 320, 320, 321, 321, 321,
+ 321, 321, 321, 321, 321, 336, 336, 322, 322, 330,
+ 330, 331, 331, 332, 332, 332, 333, 333, 333, 334,
+ 334, 327, 327, 327, 327, 327, 327, 327, 327, 327,
+ 329, 329, 328, 328, 328, 339, 364, 364, 363, 363,
+ 361, 361, 361, 361, 361, 361, 361, 361, 348, 348,
+ 358, 358, 358, 358, 358, 347, 347, 343, 343, 343,
+ 344, 344, 345, 345, 342, 342, 346, 346, 360, 360,
+ 359, 359, 340, 340, 341, 341, 366, 402, 402, 402,
+ 402, 402, 403, 403, 367, 392, 394, 394, 394, 393,
+ 393, 390, 391, 389, 389, 389, 389, 389, 82, 82,
+ 82, 284, 284, 285, 285, 356, 356, 355, 355, 355,
+ 357, 357, 354, 354, 354, 354, 354, 354, 354, 354,
+ 354, 354, 354, 354, 354, 354, 354, 354, 354, 354,
+ 354, 354, 354, 354, 354, 354, 354, 354, 354, 354,
+ 354, 354, 354, 279, 279, 279, 388, 388, 388, 388,
+ 388, 388, 387, 387, 387, 353, 353, 353, 353, 386,
+ 386, 58, 58, 216, 216, 405, 405, 406, 406, 406,
+ 46, 46, 46, 46, 46, 46, 45, 45, 45, 41,
+ 41, 41, 41, 41, 41, 41, 41, 41, 41, 41,
+ 41, 41, 41, 41, 41, 41, 41, 41, 41, 41,
+ 41, 41, 41, 41, 41, 41, 41, 41, 41, 41,
+ 47, 47, 42, 42, 42, 42, 42, 42, 42, 42,
+ 42, 42, 28, 28, 28, 28, 28, 28, 28, 28,
+ 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
+ 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
+ 108, 108, 109, 109, 109, 109, 111, 111, 111, 369,
+ 369, 59, 59, 3, 3, 171, 173, 174, 174, 172,
+ 172, 172, 172, 172, 172, 61, 61, 60, 60, 176,
+ 175, 177, 177, 177, 1, 1, 2, 2, 4, 4,
+ 374, 374, 374, 374, 374, 374, 374, 374, 374, 374,
+ 374, 374, 374, 374, 374, 374, 374, 374, 374, 374,
+ 374, 374, 335, 335, 335, 368, 368, 370, 110, 110,
+ 110, 110, 110, 110, 110, 110, 110, 110, 114, 113,
+ 113, 112, 115, 115, 115, 115, 115, 115, 115, 115,
+ 372, 372, 372, 62, 62, 373, 323, 324, 325, 5,
+ 6, 349, 371, 122, 122, 29, 38, 38, 30, 30,
+ 30, 30, 31, 31, 63, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 278, 278, 287,
+ 287, 277, 277, 302, 302, 302, 280, 280, 280, 281,
+ 281, 398, 398, 398, 274, 274, 65, 65, 65, 303,
+ 303, 303, 303, 67, 67, 407, 407, 408, 408, 409,
+ 409, 409, 68, 69, 69, 305, 305, 306, 306, 70,
+ 71, 83, 83, 83, 83, 83, 83, 83, 84, 84,
+ 84, 84, 107, 107, 107, 15, 15, 15, 15, 79,
+ 79, 79, 14, 14, 17, 66, 66, 73, 395, 395,
+ 396, 397, 397, 397, 397, 74, 76, 32, 32, 32,
+ 32, 32, 32, 132, 132, 120, 120, 120, 120, 120,
+ 120, 120, 120, 120, 120, 120, 120, 127, 127, 127,
+ 121, 121, 415, 77, 78, 78, 125, 125, 125, 118,
+ 118, 118, 124, 124, 124, 16, 16, 18, 260, 260,
+ 19, 19, 129, 129, 131, 131, 131, 131, 131, 133,
+ 133, 133, 133, 133, 133, 133, 128, 128, 130, 130,
+ 130, 130, 295, 295, 295, 294, 294, 165, 165, 167,
+ 166, 166, 168, 168, 169, 169, 169, 169, 214, 214,
+ 191, 191, 253, 253, 254, 254, 252, 252, 259, 259,
+ 255, 255, 255, 255, 262, 262, 170, 170, 170, 170,
+ 178, 178, 179, 179, 180, 180, 304, 304, 300, 300,
+ 300, 299, 299, 184, 184, 184, 186, 185, 185, 185,
+ 185, 187, 187, 189, 189, 188, 188, 190, 195, 195,
+ 194, 194, 192, 192, 192, 192, 193, 193, 193, 193,
+ 196, 196, 142, 142, 142, 142, 142, 142, 142, 157,
+ 157, 157, 157, 160, 160, 160, 160, 160, 160, 160,
+ 160, 160, 160, 160, 243, 243, 148, 148, 148, 148,
+ 148, 148, 148, 148, 148, 148, 148, 148, 148, 152,
+ 152, 152, 152, 152, 152, 152, 152, 152, 152, 152,
+ 152, 152, 152, 152, 152, 152, 152, 152, 152, 152,
+ 152, 152, 152, 152, 151, 219, 219, 218, 218, 85,
+ 85, 85, 86, 86, 87, 87, 87, 87, 87, 88,
+ 88, 88, 88, 88, 143, 143, 90, 90, 89, 89,
+ 209, 209, 292, 292, 91, 92, 92, 95, 95, 94,
+ 93, 93, 99, 99, 96, 96, 98, 98, 97, 100,
+ 100, 101, 102, 102, 275, 275, 197, 197, 205, 205,
+ 205, 205, 198, 198, 198, 198, 198, 198, 198, 206,
+ 206, 206, 213, 207, 207, 203, 203, 201, 201, 201,
+ 201, 201, 201, 201, 201, 201, 201, 202, 202, 202,
+ 202, 202, 202, 202, 202, 202, 202, 202, 202, 202,
+ 202, 202, 202, 202, 202, 202, 202, 202, 202, 202,
+ 202, 202, 202, 202, 202, 202, 202, 202, 202, 202,
+ 202, 202, 202, 202, 202, 202, 202, 202, 202, 202,
+ 202, 202, 202, 202, 202, 202, 202, 202, 202, 202,
+ 202, 202, 202, 202, 202, 202, 202, 202, 202, 202,
+ 202, 202, 202, 202, 202, 202, 202, 202, 202, 202,
+ 202, 202, 202, 202, 202, 202, 202, 202, 202, 202,
+ 202, 202, 202, 162, 162, 162, 162, 224, 224, 149,
+ 149, 149, 149, 149, 149, 149, 149, 149, 149, 149,
+ 149, 149, 149, 149, 150, 150, 163, 163, 163, 163,
+ 164, 164, 164, 164, 164, 164, 164, 312, 312, 117,
+ 117, 117, 117, 117, 117, 117, 117, 117, 117, 117,
+ 117, 116, 116, 116, 116, 116, 116, 116, 116, 116,
+ 416, 416, 326, 326, 326, 326, 204, 204, 204, 204,
+ 204, 123, 123, 123, 123, 123, 309, 309, 309, 313,
+ 313, 313, 311, 311, 311, 311, 311, 311, 311, 311,
+ 311, 311, 311, 311, 311, 311, 311, 314, 314, 222,
+ 222, 119, 119, 220, 220, 221, 223, 223, 215, 215,
+ 215, 215, 217, 217, 200, 200, 200, 225, 225, 226,
+ 226, 103, 104, 104, 105, 105, 227, 227, 229, 228,
+ 228, 230, 231, 231, 231, 232, 232, 233, 233, 233,
+ 48, 48, 48, 48, 48, 43, 43, 43, 43, 44,
+ 44, 44, 44, 134, 134, 134, 134, 136, 136, 135,
+ 135, 80, 80, 81, 81, 81, 140, 140, 141, 141,
+ 141, 138, 138, 139, 139, 250, 250, 250, 250, 250,
+ 250, 250, 234, 234, 234, 241, 241, 241, 237, 237,
+ 239, 239, 239, 240, 240, 240, 238, 247, 247, 249,
+ 249, 248, 248, 244, 244, 245, 245, 246, 246, 246,
+ 242, 242, 199, 199, 199, 199, 199, 251, 251, 251,
+ 251, 263, 263, 210, 210, 212, 212, 211, 211, 161,
+ 264, 264, 272, 269, 269, 270, 270, 296, 296, 296,
+ 273, 273, 286, 286, 282, 282, 283, 283, 276, 276,
+ 288, 288, 288, 75, 208, 208, 365, 365, 362, 291,
+ 291, 293, 293, 297, 297, 301, 301, 298, 298, 289,
+ 289, 289, 289, 289, 289, 289, 289, 289, 289, 289,
+ 289, 289, 289, 289, 289, 289, 289, 289, 289, 289,
+ 289, 289, 289, 289, 289, 289, 289, 289, 289, 289,
+ 289, 289, 289, 289, 289, 289, 289, 289, 289, 289,
+ 289, 289, 289, 289, 289, 289, 289, 289, 289, 289,
+ 289, 289, 289, 289, 289, 289, 289, 289, 289, 289,
+ 289, 289, 289, 289, 289, 289, 289, 289, 289, 289,
+ 289, 289, 289, 289, 289, 289, 289, 289, 289, 289,
+ 289, 289, 289, 289, 289, 289, 289, 289, 289, 289,
+ 289, 289, 289, 289, 289, 289, 289, 289, 289, 289,
+ 289, 289, 289, 289, 289, 289, 289, 289, 289, 289,
+ 289, 289, 289, 289, 289, 289, 289, 289, 289, 289,
+ 289, 289, 289, 289, 289, 289, 289, 289, 289, 289,
+ 289, 289, 289, 289, 289, 289, 289, 289, 289, 289,
+ 289, 289, 289, 289, 289, 289, 289, 289, 290, 290,
+ 290, 290, 290, 290, 290, 290, 290, 290, 290, 290,
+ 290, 290, 290, 290, 290, 290, 290, 290, 290, 290,
+ 290, 290, 290, 290, 290, 290, 290, 290, 290, 290,
+ 290, 290, 290, 290, 290, 290, 290, 290, 290, 290,
+ 290, 290, 290, 290, 290, 290, 290, 290, 290, 290,
+ 290, 290, 290, 290, 290, 290, 290, 290, 290, 290,
+ 290, 290, 290, 290, 290, 290, 290, 290, 290, 290,
+ 290, 290, 290, 290, 290, 290, 290, 290, 290, 290,
+ 290, 290, 290, 290, 290, 290, 290, 290, 290, 290,
+ 290, 290, 290, 290, 290, 290, 290, 290, 290, 290,
+ 290, 290, 290, 290, 290, 290, 290, 290, 290, 290,
+ 290, 290, 290, 290, 290, 290, 290, 290, 290, 290,
+ 290, 290, 290, 290, 290, 290, 290, 290, 290, 290,
+ 290, 290, 290, 290, 290, 290, 290, 290, 290, 290,
+ 290, 290, 290, 290, 290, 290, 290, 290, 290, 290,
+ 290, 290, 290, 290, 290, 290, 290, 290, 290, 290,
+ 290, 290, 290, 290, 290, 290, 290, 290, 290, 290,
+ 290, 290, 290, 290, 290, 290, 290, 290, 290, 290,
+ 290, 290, 290, 290, 290, 290, 290, 290, 290, 290,
+ 290, 290, 290, 290, 290, 290, 290, 290, 290, 290,
+ 290, 290, 290, 290, 290, 290, 290, 290, 290, 290,
+ 290, 290, 290, 290, 290, 290, 290, 290, 290, 290,
+ 290, 290, 290, 290, 290, 290, 290, 290, 290, 290,
+ 290, 290, 290, 290, 290, 290, 290, 290, 290, 290,
+ 290, 290, 290, 290, 290, 290, 290, 290, 290, 290,
+ 290, 290, 290, 290, 290, 290, 290, 290, 290, 290,
+ 290, 290, 290, 290, 290, 290, 290, 290, 290, 290,
+ 290, 290, 290, 290, 290, 290, 290, 290, 290, 290,
+ 290, 290, 290, 290, 290, 290, 290, 290, 290, 290,
+ 290, 290, 290, 290, 290, 290, 290, 290, 290, 290,
+ 290, 290, 290, 290, 290, 290, 290, 290, 290, 290,
+ 290, 290, 290, 290, 290, 290, 290, 290, 290, 290,
+ 290, 290, 290, 290, 290, 290, 290, 290, 290, 290,
+ 290, 290, 290, 290, 290, 290, 290, 290, 290, 290,
+ 290, 290, 290, 290, 290, 290, 290, 290, 290, 290,
+ 290, 290, 290, 290, 290, 290, 290, 290, 290, 290,
+ 290, 290, 290, 290, 290, 290, 290, 290, 290, 290,
+ 290, 290, 290, 290, 290, 290, 290, 290, 290, 290,
+ 290, 290, 290, 290, 290, 290, 290, 290, 290, 290,
+ 290, 290, 290, 290, 290, 290, 290, 290, 412, 413,
+ 307, 308, 308, 308,
}
var yyR2 = [...]int{
0, 3, 0, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 0, 1, 1,
- 0, 1, 1, 1, 2, 3, 2, 3, 0, 1,
- 3, 1, 4, 3, 3, 4, 3, 2, 3, 4,
- 3, 4, 2, 7, 1, 3, 3, 3, 3, 1,
- 2, 1, 1, 3, 2, 3, 3, 2, 5, 7,
- 10, 9, 7, 8, 1, 1, 10, 11, 9, 8,
- 8, 1, 1, 1, 3, 1, 3, 1, 3, 0,
- 4, 3, 5, 4, 1, 3, 3, 2, 2, 2,
+ 1, 1, 1, 1, 1, 1, 1, 1, 0, 1,
+ 1, 0, 1, 1, 1, 2, 3, 2, 3, 0,
+ 1, 3, 1, 4, 3, 3, 4, 3, 2, 3,
+ 4, 3, 4, 2, 7, 1, 3, 3, 3, 3,
+ 1, 2, 1, 1, 3, 2, 3, 3, 2, 5,
+ 7, 10, 9, 7, 8, 1, 1, 10, 11, 9,
+ 8, 8, 1, 1, 1, 3, 1, 3, 1, 3,
+ 0, 4, 3, 1, 3, 3, 3, 3, 3, 1,
+ 1, 2, 5, 4, 1, 3, 3, 2, 2, 2,
2, 2, 1, 1, 1, 1, 2, 2, 6, 12,
2, 0, 2, 0, 2, 1, 0, 2, 1, 3,
3, 0, 1, 1, 3, 3, 6, 4, 7, 8,
@@ -7620,122 +7654,125 @@ var yyR2 = [...]int{
1, 1, 1, 1, 1, 1, 1, 2, 1, 1,
2, 1, 2, 1, 3, 1, 1, 1, 2, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 2,
- 2, 2, 2, 2, 1, 2, 2, 2, 2, 3,
- 3, 3, 2, 2, 2, 2, 2, 2, 1, 1,
- 1, 1, 1, 5, 5, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 3, 0, 3, 0, 5, 0,
- 3, 5, 0, 1, 1, 0, 1, 0, 3, 3,
- 2, 2, 2, 1, 2, 2, 0, 1, 0, 2,
- 2, 5, 0, 1, 1, 2, 1, 3, 2, 1,
- 1, 3, 3, 3, 0, 1, 4, 3, 3, 4,
- 2, 0, 2, 1, 1, 1, 1, 1, 0, 1,
- 1, 1, 0, 1, 1, 3, 3, 4, 3, 1,
- 3, 1, 7, 6, 7, 7, 8, 8, 0, 1,
- 5, 2, 1, 1, 1, 0, 1, 3, 3, 1,
- 1, 2, 2, 2, 0, 1, 1, 1, 2, 0,
- 1, 0, 1, 1, 3, 2, 1, 2, 3, 3,
- 3, 4, 4, 3, 3, 3, 3, 4, 4, 3,
+ 2, 2, 2, 2, 2, 2, 1, 2, 2, 2,
+ 2, 3, 3, 3, 2, 2, 2, 2, 2, 2,
+ 1, 1, 1, 1, 1, 5, 5, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 3, 0, 3, 0,
+ 5, 1, 3, 0, 3, 5, 0, 1, 1, 0,
+ 1, 0, 3, 3, 2, 2, 2, 1, 2, 2,
+ 0, 1, 0, 2, 2, 5, 0, 1, 1, 2,
+ 1, 3, 2, 1, 1, 3, 3, 3, 0, 1,
+ 4, 3, 3, 4, 2, 0, 2, 1, 1, 1,
+ 1, 1, 0, 1, 1, 1, 0, 1, 1, 3,
+ 3, 4, 3, 1, 3, 1, 7, 6, 7, 7,
+ 8, 8, 0, 1, 5, 2, 1, 1, 1, 0,
+ 1, 3, 3, 1, 1, 2, 2, 2, 0, 1,
+ 1, 1, 2, 0, 1, 0, 1, 1, 3, 2,
+ 1, 2, 3, 3, 3, 4, 4, 3, 3, 3,
+ 3, 4, 4, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
- 3, 3, 3, 3, 3, 3, 3, 4, 5, 0,
- 2, 2, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 0, 1, 0, 1, 0, 2,
- 0, 2, 0, 2, 2, 0, 1, 5, 1, 3,
- 7, 1, 3, 3, 1, 2, 2, 2, 5, 5,
- 5, 6, 8, 5, 5, 4, 4, 4, 6, 5,
- 5, 5, 2, 2, 2, 2, 3, 3, 3, 4,
- 3, 3, 1, 3, 5, 1, 3, 3, 3, 3,
- 3, 3, 3, 3, 3, 2, 2, 3, 4, 4,
- 2, 11, 3, 6, 8, 6, 6, 6, 13, 8,
- 6, 10, 5, 5, 5, 7, 5, 5, 5, 5,
- 5, 7, 7, 5, 5, 0, 6, 5, 6, 4,
- 5, 0, 8, 9, 0, 3, 0, 1, 0, 3,
- 8, 4, 1, 3, 3, 6, 7, 7, 8, 4,
- 0, 1, 0, 1, 3, 3, 1, 1, 2, 1,
- 1, 0, 2, 0, 2, 5, 3, 7, 4, 4,
- 4, 4, 3, 3, 3, 7, 3, 3, 3, 3,
- 3, 3, 3, 3, 3, 3, 2, 0, 2, 2,
- 1, 3, 2, 0, 2, 2, 2, 2, 2, 2,
- 2, 2, 2, 3, 1, 3, 3, 0, 2, 2,
- 2, 2, 2, 2, 2, 4, 4, 3, 0, 1,
- 4, 3, 4, 4, 3, 3, 3, 2, 1, 3,
- 3, 3, 5, 7, 7, 6, 5, 3, 2, 3,
- 3, 3, 7, 3, 3, 3, 3, 4, 7, 5,
- 2, 4, 4, 4, 4, 4, 5, 5, 4, 4,
- 4, 4, 4, 4, 4, 4, 2, 2, 4, 4,
- 4, 4, 4, 2, 3, 3, 3, 5, 2, 3,
- 3, 2, 3, 4, 4, 4, 3, 4, 4, 5,
- 3, 0, 1, 0, 1, 1, 1, 0, 2, 2,
- 0, 2, 2, 0, 2, 0, 1, 1, 1, 1,
- 2, 1, 3, 1, 1, 1, 1, 1, 2, 1,
- 1, 5, 0, 1, 0, 1, 2, 3, 0, 3,
- 3, 3, 3, 3, 1, 1, 1, 1, 1, 1,
- 1, 1, 0, 1, 1, 4, 4, 2, 2, 3,
- 1, 3, 2, 1, 2, 1, 2, 2, 4, 3,
- 3, 6, 4, 7, 6, 1, 3, 2, 2, 2,
- 2, 1, 1, 1, 3, 2, 1, 1, 1, 0,
- 1, 1, 0, 3, 0, 2, 0, 2, 1, 2,
- 2, 0, 1, 1, 0, 1, 1, 5, 5, 4,
- 0, 2, 4, 4, 0, 1, 0, 1, 2, 3,
- 4, 1, 1, 1, 1, 1, 1, 1, 1, 3,
- 1, 2, 3, 5, 0, 1, 2, 1, 1, 0,
- 1, 2, 1, 3, 1, 1, 1, 4, 3, 1,
- 1, 2, 3, 7, 0, 3, 0, 1, 1, 3,
- 1, 3, 1, 1, 3, 3, 1, 3, 4, 4,
- 4, 3, 2, 4, 0, 1, 0, 2, 0, 1,
- 0, 1, 2, 1, 1, 1, 2, 2, 1, 2,
- 3, 2, 3, 2, 2, 2, 1, 1, 3, 3,
- 0, 1, 1, 2, 6, 5, 6, 6, 0, 2,
- 3, 3, 0, 2, 3, 3, 3, 2, 3, 1,
- 6, 3, 4, 3, 1, 3, 4, 5, 6, 3,
- 4, 5, 6, 3, 4, 1, 1, 1, 3, 3,
- 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
- 1, 1, 1, 1, 1, 3, 1, 1, 1, 2,
- 2, 2, 2, 1, 1, 2, 7, 7, 6, 6,
- 2, 2, 1, 6, 3, 3, 3, 1, 3, 1,
- 3, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 2, 2, 2, 2, 2, 1, 1, 0, 1,
- 2, 5, 0, 3, 0, 1, 4, 4, 2, 0,
- 1, 1, 2, 2, 1, 1, 2, 2, 0, 1,
- 1, 1, 1, 5, 1, 3, 0, 3, 1, 1,
- 1, 2, 1, 2, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 3, 4, 6, 4,
- 4, 8, 6, 8, 6, 5, 4, 10, 2, 2,
- 1, 2, 2, 2, 4, 5, 5, 5, 5, 5,
- 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
- 8, 8, 8, 6, 5, 4, 4, 4, 4, 4,
- 7, 4, 4, 6, 6, 6, 8, 6, 6, 4,
- 4, 3, 4, 6, 6, 4, 4, 4, 6, 8,
- 6, 4, 6, 6, 8, 10, 7, 8, 8, 9,
- 4, 4, 4, 4, 6, 6, 6, 6, 6, 6,
- 6, 6, 6, 6, 4, 4, 6, 5, 9, 6,
- 9, 1, 1, 1, 1, 1, 1, 1, 1, 0,
- 2, 6, 8, 10, 12, 14, 6, 8, 8, 10,
- 12, 14, 6, 8, 10, 12, 6, 8, 4, 4,
- 3, 4, 6, 6, 4, 6, 4, 6, 8, 0,
- 2, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 3, 4, 5, 0, 2, 2, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 3, 1, 1, 1, 0,
+ 1, 0, 1, 0, 2, 0, 2, 0, 2, 2,
+ 0, 1, 5, 1, 3, 7, 1, 3, 3, 1,
+ 2, 2, 2, 5, 5, 5, 6, 8, 5, 5,
+ 4, 4, 4, 6, 5, 5, 5, 2, 2, 2,
+ 2, 3, 3, 3, 4, 3, 3, 1, 3, 5,
+ 1, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 2, 2, 3, 4, 4, 2, 11, 3, 6, 8,
+ 6, 6, 6, 13, 8, 6, 10, 5, 5, 5,
+ 7, 5, 5, 5, 5, 5, 7, 7, 5, 5,
+ 0, 6, 5, 6, 4, 5, 0, 8, 9, 0,
+ 3, 0, 1, 0, 3, 8, 4, 1, 3, 3,
+ 6, 7, 7, 8, 4, 0, 1, 0, 1, 3,
+ 3, 1, 1, 2, 1, 1, 0, 2, 0, 2,
+ 5, 3, 7, 4, 4, 4, 4, 3, 3, 3,
+ 7, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 2, 0, 2, 2, 1, 3, 2, 0, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 3, 1,
+ 3, 3, 0, 2, 2, 2, 2, 2, 2, 2,
+ 4, 4, 3, 0, 1, 4, 3, 4, 4, 3,
+ 3, 3, 2, 1, 3, 3, 3, 5, 7, 7,
+ 6, 5, 3, 2, 3, 3, 3, 7, 3, 3,
+ 3, 3, 4, 7, 5, 2, 4, 4, 4, 4,
+ 4, 5, 5, 4, 4, 4, 4, 4, 4, 4,
+ 4, 2, 2, 4, 4, 4, 4, 4, 2, 3,
+ 3, 3, 3, 5, 2, 3, 3, 2, 3, 4,
+ 4, 4, 3, 4, 4, 5, 3, 0, 1, 0,
+ 1, 1, 1, 0, 2, 2, 0, 2, 2, 0,
+ 2, 0, 1, 1, 1, 1, 2, 1, 3, 1,
+ 1, 1, 1, 1, 3, 0, 1, 1, 3, 3,
+ 2, 2, 1, 1, 5, 0, 1, 0, 1, 2,
+ 3, 0, 3, 3, 3, 3, 3, 1, 0, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 0,
+ 1, 1, 4, 4, 4, 2, 2, 3, 1, 3,
+ 2, 1, 2, 1, 2, 2, 4, 3, 3, 6,
+ 4, 7, 6, 1, 3, 2, 2, 2, 2, 1,
+ 1, 1, 3, 2, 1, 1, 1, 0, 1, 1,
+ 0, 3, 0, 2, 0, 2, 1, 2, 2, 0,
+ 1, 1, 0, 1, 1, 5, 5, 4, 0, 2,
+ 4, 4, 0, 1, 0, 1, 2, 3, 4, 1,
+ 1, 1, 1, 1, 1, 1, 1, 3, 1, 2,
+ 3, 5, 0, 1, 2, 1, 1, 0, 1, 2,
+ 1, 3, 1, 1, 1, 4, 3, 1, 1, 2,
+ 3, 7, 0, 3, 0, 1, 1, 3, 1, 3,
+ 1, 1, 3, 3, 1, 3, 4, 4, 4, 3,
+ 2, 4, 0, 1, 0, 2, 0, 1, 0, 1,
+ 2, 1, 1, 1, 2, 2, 1, 2, 3, 2,
+ 3, 2, 2, 2, 1, 1, 3, 3, 0, 1,
+ 1, 2, 6, 5, 6, 6, 0, 2, 3, 3,
+ 0, 2, 3, 3, 3, 2, 3, 1, 6, 3,
+ 4, 3, 1, 3, 4, 5, 6, 3, 4, 5,
+ 6, 3, 4, 1, 1, 1, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 1, 1,
+ 1, 1, 1, 3, 1, 1, 1, 2, 2, 2,
+ 2, 1, 1, 2, 7, 7, 6, 6, 2, 2,
+ 1, 6, 3, 3, 3, 1, 3, 1, 3, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 2,
+ 2, 2, 2, 2, 1, 1, 0, 1, 2, 5,
+ 0, 3, 0, 1, 4, 4, 2, 0, 1, 1,
+ 2, 2, 1, 1, 2, 2, 0, 1, 1, 1,
+ 1, 5, 1, 3, 0, 3, 1, 1, 1, 2,
+ 1, 2, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 3, 4, 6, 4, 4, 8,
+ 6, 8, 6, 5, 4, 10, 2, 2, 1, 2,
+ 2, 2, 4, 5, 5, 5, 5, 5, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4, 8, 8,
+ 8, 6, 5, 4, 4, 4, 4, 4, 7, 4,
+ 4, 6, 6, 6, 8, 6, 6, 4, 4, 3,
+ 4, 6, 6, 4, 4, 4, 6, 8, 6, 4,
+ 6, 6, 8, 10, 7, 8, 8, 9, 4, 4,
+ 4, 4, 6, 6, 6, 6, 6, 6, 6, 6,
+ 6, 6, 4, 4, 6, 5, 9, 6, 9, 1,
+ 1, 1, 1, 1, 1, 1, 1, 0, 2, 6,
+ 8, 10, 12, 14, 6, 8, 8, 10, 12, 14,
+ 6, 8, 10, 12, 6, 8, 4, 4, 3, 4,
+ 6, 6, 4, 6, 4, 6, 8, 0, 2, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 0, 2, 0, 2, 3, 3, 4, 4, 4, 4,
+ 4, 0, 3, 4, 7, 3, 1, 1, 1, 0,
+ 5, 5, 2, 3, 1, 2, 2, 1, 2, 1,
+ 2, 2, 1, 2, 2, 1, 1, 0, 1, 0,
+ 1, 0, 2, 1, 2, 4, 0, 2, 1, 1,
+ 3, 5, 1, 1, 1, 2, 2, 0, 3, 0,
+ 2, 2, 1, 3, 0, 1, 0, 1, 3, 1,
+ 3, 2, 0, 1, 1, 0, 1, 2, 4, 4,
+ 0, 2, 2, 1, 1, 3, 3, 3, 3, 3,
+ 3, 3, 3, 0, 3, 3, 3, 0, 3, 1,
+ 1, 0, 4, 0, 1, 1, 0, 3, 1, 3,
+ 2, 1, 1, 0, 1, 2, 3, 4, 2, 3,
+ 4, 4, 9, 3, 5, 0, 3, 3, 0, 1,
+ 0, 2, 2, 0, 2, 2, 2, 0, 2, 1,
+ 2, 3, 3, 0, 2, 1, 2, 3, 4, 3,
+ 0, 1, 2, 1, 5, 4, 4, 1, 3, 3,
+ 5, 0, 5, 1, 3, 1, 2, 3, 4, 1,
+ 1, 3, 3, 1, 2, 1, 1, 1, 1, 1,
+ 1, 1, 0, 1, 0, 2, 0, 3, 0, 1,
+ 0, 1, 1, 5, 0, 1, 0, 1, 2, 1,
+ 1, 1, 1, 1, 1, 0, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 0, 2, 0, 2, 3, 3, 4, 4,
- 4, 4, 4, 0, 3, 4, 7, 3, 1, 1,
- 1, 0, 5, 5, 2, 3, 1, 2, 2, 1,
- 2, 1, 2, 2, 1, 2, 2, 1, 1, 0,
- 1, 0, 1, 0, 2, 1, 2, 4, 0, 2,
- 1, 1, 3, 5, 1, 1, 1, 2, 2, 0,
- 3, 0, 2, 2, 1, 3, 0, 1, 0, 1,
- 3, 1, 3, 2, 0, 1, 1, 0, 1, 2,
- 4, 4, 0, 2, 2, 1, 1, 3, 3, 3,
- 3, 3, 3, 3, 3, 0, 3, 3, 3, 0,
- 3, 1, 1, 0, 4, 0, 1, 1, 0, 3,
- 1, 3, 2, 1, 1, 0, 1, 2, 4, 9,
- 3, 5, 0, 3, 3, 0, 1, 0, 2, 2,
- 0, 2, 2, 2, 0, 2, 1, 2, 3, 3,
- 0, 2, 1, 2, 3, 4, 3, 0, 1, 2,
- 1, 5, 4, 4, 1, 3, 3, 5, 0, 5,
- 1, 3, 1, 2, 3, 4, 1, 1, 3, 3,
- 1, 3, 3, 3, 3, 3, 1, 1, 2, 1,
- 2, 1, 1, 1, 1, 1, 1, 1, 0, 1,
- 0, 2, 0, 3, 0, 1, 0, 1, 1, 5,
- 0, 1, 0, 1, 2, 1, 1, 1, 1, 1,
- 1, 0, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
@@ -7789,482 +7826,485 @@ var yyR2 = [...]int{
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1, 0, 0,
- 1, 1,
+ 0, 0, 1, 1,
}
var yyChk = [...]int{
- -1000, -404, -76, -409, -7, -11, -19, -20, -21, -22,
- -23, -24, -25, -26, -27, -28, -29, -30, -62, -63,
- -64, -66, -67, -68, -69, -70, -14, -65, -31, -32,
- -71, -72, -73, -74, -75, -16, -17, -18, -9, -8,
- -13, 10, 11, -104, -33, 33, -38, -48, 225, -49,
- -39, 226, -50, 228, 227, 264, 229, 257, 75, 311,
- 312, 314, 315, 316, 317, -105, 262, 263, 231, 37,
- 46, 34, 35, 38, 235, 270, 271, 234, -10, -34,
- 9, -406, 12, 443, 259, 258, 29, -12, 505, 87,
- -77, -405, 657, -248, -232, 23, 34, 30, -231, -227,
- -123, -232, 21, 19, 8, -76, -76, -76, 13, 14,
- -76, -347, -349, 87, 159, 87, -76, -55, -54, -52,
- -51, -53, -56, 32, -45, -46, -371, -44, -41, 230,
- 227, 274, 123, 124, 264, 265, 266, 229, 248, 263,
- 267, 262, 283, -40, 82, 34, 505, 508, -354, 226,
- 232, 233, 228, 444, 126, 125, 76, -351, 366, 538,
- 627, -56, 629, 101, 104, 628, 45, 238, 630, 631,
- 632, 545, 633, 247, 634, 635, 636, 637, 643, 586,
- 644, 645, 646, 127, 8, -76, -299, -295, 91, -288,
- 502, 250, 536, 537, 299, 82, 42, 511, 363, 366,
- 538, 473, 627, 311, 325, 319, 478, 479, 480, 346,
- 338, 503, 539, 512, 302, 251, 287, 621, 336, 135,
- 629, 306, 540, 265, 371, 372, 541, 373, 101, 314,
- 410, 642, 305, 542, 640, 104, 628, 80, 472, 52,
- 624, 45, 260, 334, 234, 330, 630, 288, 543, 514,
- 281, 126, 123, 649, 37, 328, 51, 31, 639, 125,
- 50, 631, 150, 544, 632, 545, 375, 353, 615, 49,
- 376, 266, 546, 85, 271, 507, 309, 623, 377, 492,
- 329, 378, 298, 638, 231, 547, 606, 598, 599, 379,
- 380, 616, 358, 354, 359, 494, 548, 402, 477, 381,
- 602, 603, 656, 53, 549, 550, 617, 124, 551, 79,
- 633, 81, 323, 324, 552, 296, 249, 497, 498, 404,
- 350, 455, 462, 463, 111, 112, 458, 113, 464, 114,
- 465, 466, 467, 456, 115, 108, 457, 468, 469, 351,
- 352, 116, 470, 110, 109, 459, 461, 117, 471, 247,
- 36, 382, 504, 300, 59, 304, 275, 405, 47, 356,
- 653, 46, 611, 499, 553, 614, 349, 345, 452, 54,
- 554, 555, 556, 557, 474, 634, 348, 322, 344, 648,
- 4, 293, 475, 635, 63, 233, 361, 360, 362, 282,
- 401, 341, 558, 559, 560, 254, 83, 561, 331, 22,
- 562, 563, 383, 289, 564, 57, 565, 566, 408, 263,
- 567, 55, 636, 40, 568, 268, 650, 637, 569, 570,
- 571, 572, 270, 573, 385, 574, 600, 601, 384, 355,
- 357, 500, 277, 386, 506, 575, 310, 327, 267, 641,
- 576, 255, 488, 489, 490, 491, 622, 496, 495, 269,
- 274, 262, 409, 256, 577, 578, 579, 580, 581, 303,
- 597, 582, 583, 315, 643, 453, 44, 584, 585, 586,
- 587, 588, 297, 292, 403, 412, 62, 84, 368, 589,
- 590, 620, 321, 290, 591, 312, 56, 644, 645, 646,
- 284, 647, 481, 482, 483, 484, 10, 654, 655, 476,
- 388, 127, 294, 295, 48, 342, 276, 592, 307, 593,
- 332, 333, 347, 320, 343, 313, 607, 278, 389, 454,
- 264, 594, 411, 291, 364, 369, 308, 510, 493, 283,
- 390, 619, 509, 485, 486, 340, 337, 285, 487, 595,
- 391, 239, 279, 280, 596, 608, 392, 393, 301, 394,
- 395, 396, 397, 398, 400, 399, 604, 605, 286, 508,
- 318, 335, 370, 424, 425, 426, 427, 428, 429, 430,
- 431, 432, 433, 434, 435, 436, 437, 438, 439, 440,
- 441, 451, 237, -76, 237, -186, -295, 237, -264, 372,
- -285, 374, 387, 382, 392, 380, -276, 383, 385, 277,
- -395, 402, 237, 389, 225, 375, 384, 393, 394, 301,
- 400, 395, 399, 286, 396, 397, 398, -378, 177, 632,
- 647, 135, 339, 379, 377, 403, 611, 91, -301, 91,
- 92, 93, -288, 313, -303, 318, -289, -378, -288, 316,
- -76, -305, -305, -125, 611, 613, -205, -140, 143, -155,
- -158, -146, -150, -199, -200, -201, -202, -156, -215, -254,
- 166, 167, 174, 144, -211, -159, 27, 501, 445, 444,
- 177, 32, -149, 220, 69, 70, 447, 146, 58, 12,
- 420, 421, -157, 415, 422, 417, 472, 474, 475, 476,
- 473, 478, 479, 480, 481, 482, 483, 484, 485, 486,
- 487, 477, 449, 450, 118, 451, 108, 110, 109, 452,
- 453, 454, 336, 499, 500, 494, 497, 498, 496, 495,
- 351, 352, 455, 456, 457, 111, 112, 113, 114, 115,
- 116, 117, 458, 461, 459, 460, 462, 463, 464, 469,
- 470, 465, 466, 467, 468, 471, -85, -97, 527, 526,
- -98, -147, -148, -161, -162, -289, -295, 242, 414, 236,
- 172, 443, -151, -144, -213, 107, 92, 93, -8, -209,
- 413, 418, 419, 423, 416, 513, 515, 530, 531, 533,
- 518, 523, 522, 525, 488, 489, 490, 491, 492, 493,
- 598, 599, 600, 601, 602, 603, 604, 605, -378, -288,
- 91, -153, -152, -195, 94, 99, 102, 103, 105, -401,
- 260, 332, 333, 119, -406, 625, 90, 95, 96, 97,
- 98, 120, 121, 178, 179, 180, 181, 182, 183, 184,
- 185, 186, 187, 188, 189, 190, 191, 192, 193, 194,
- 195, 196, 197, 198, 199, 200, 201, 202, 203, 204,
- 205, 206, 207, 208, 209, 210, 211, 212, 213, 214,
- 215, 216, 217, 218, 219, 45, 388, 388, -186, -76,
- -76, -76, -76, -225, -123, -227, -10, -8, -406, 9,
- -76, -8, -9, -13, -34, -36, 532, -35, -295, 100,
- -232, -248, 13, 162, 43, 51, -230, -231, -12, -8,
- -140, 20, 24, 25, -128, 168, -140, -295, -128, -274,
- 241, -76, -76, -263, -308, 313, -267, 403, 611, 402,
- -255, -265, 91, -254, -264, 401, -348, 159, -334, -338,
- -289, 252, -364, 248, -186, -357, -356, -289, -406, -124,
- -284, 238, 246, 245, 136, -382, 139, 294, 414, 236,
- -51, -52, -53, -264, 176, 631, -106, 269, 273, 88,
- 88, -338, -337, -336, -383, 273, 252, -363, -355, 244,
- 253, -344, 245, 246, -339, 238, 137, -383, -339, 243,
- 253, 248, 252, 273, 273, 127, 273, 127, 273, 273,
- 273, 273, 273, 273, 273, 273, 273, 268, -345, 151,
- -345, 509, 509, -351, -383, 248, 238, -383, -383, 244,
- -286, -339, 240, 26, 240, 36, 36, -345, -345, -345,
- -264, 176, -345, -345, -345, -345, 281, 281, -345, -345,
- -345, -345, -345, -345, -345, -345, -345, -345, -345, -345,
- -345, -345, -345, -345, -345, 237, -382, -132, 399, 301,
- 82, -54, 283, -37, -186, -284, 238, 239, -382, 270,
- -186, 221, -186, -278, 159, 16, -278, -275, 388, 386,
- 373, 378, -278, -278, -278, -278, 284, 371, -340, 238,
- 36, 249, 388, 284, 371, 284, 285, 284, 285, 381,
- 391, 284, -300, 15, 162, 414, 376, 380, 277, 237,
- 278, 239, 390, 285, -300, 90, -279, 159, 388, 280,
- -278, -278, -306, -406, -291, -289, -287, 230, 24, 142,
- 26, 28, 145, 177, 130, 20, 146, 38, 232, 339,
- 248, 176, 244, 444, 225, 73, 513, 415, 417, 413,
- 420, 446, 447, 414, 374, 32, 14, 515, 29, 258,
- 25, 39, 170, 227, 149, 516, 261, 27, 259, 118,
- 121, 518, 23, 76, 253, 15, 246, 41, 17, 519,
- 520, 18, 242, 241, 162, 238, 71, 12, 220, 30,
- 158, 67, 521, 137, 522, 523, 524, 525, 131, 69,
- 159, 21, 651, 418, 419, 34, 612, 501, 272, 172,
- 74, 60, 613, 143, 416, 526, 527, 119, 528, 122,
- 77, 618, 139, 19, 72, 43, 529, 273, 530, 243,
- 652, 531, 406, 532, 160, 228, 443, 70, 161, 625,
- 533, 626, 236, 387, 9, 448, 33, 257, 245, 129,
- 68, 534, 237, 148, 449, 450, 240, 132, 120, 8,
- 136, 35, 13, 75, 78, 421, 422, 423, 58, 128,
- 505, 147, 16, 535, 407, 141, -378, 614, -306, -306,
- 33, 92, 240, -289, -186, -82, 606, 229, -130, 388,
- -118, 177, 632, 615, 616, 617, 614, 385, 622, 620,
- 618, 284, 619, 88, 139, 141, 142, 4, -140, 158,
- -196, 151, 152, 153, 154, 155, 156, 157, 162, 143,
- 145, 159, -241, 140, 163, 164, 165, 166, 167, 168,
- 169, 171, 170, 172, 173, 160, 161, 176, 223, 224,
- -150, -150, -150, -150, -211, -217, -216, -406, -213, -378,
- -288, -295, -406, -406, -150, -273, -406, -406, -150, -406,
- -406, -406, -220, -140, -406, -406, -410, -406, -410, -410,
- -324, -406, -324, -406, -406, -406, -406, -406, -406, -406,
- -406, -406, -406, -406, -406, -406, -406, -406, -406, -406,
- -406, -406, -406, -406, -406, -406, -406, -406, -406, -406,
- -406, -406, -406, -406, -406, -406, -406, -406, -406, -406,
- -406, -406, -406, -406, -406, -406, -406, -406, -406, -406,
- -406, -406, -406, -406, -406, -406, -406, -406, -406, -406,
- -406, -406, -406, -406, -406, -406, -406, -406, -406, 221,
- -406, -406, -406, -406, -406, -324, -324, -324, -324, -324,
- -406, -406, -406, -406, -406, -406, -406, -406, -406, -406,
- -406, -406, -406, -406, 103, 99, 102, 94, -215, 105,
- 90, 90, 90, 90, -8, -9, -205, -406, -305, -392,
- -393, -189, -186, -406, 301, -289, -289, 270, -230, -12,
- -8, -225, -231, -227, -8, -76, -116, -129, 64, 65,
- -131, 25, 39, 68, 66, 24, -407, 89, -407, -248,
- -407, 88, -36, -251, 87, 62, 44, 90, 90, 88,
- 22, -226, -228, -140, 15, -293, 4, -292, 26, -289,
- 90, 221, 15, -187, 30, -186, -274, -274, 88, 313,
- 91, -269, -268, 404, 406, 151, -294, -289, 90, 32,
- 89, 88, -186, -313, -316, -318, -317, -319, -314, -315,
- 336, 337, 177, 340, 342, 343, 344, 345, 346, 347,
- 348, 349, 350, 353, 33, 260, 332, 333, 334, 335,
- 354, 355, 356, 357, 359, 360, 361, 362, 319, 338,
- 503, 320, 321, 322, 323, 324, 325, 327, 328, 329,
- 330, 331, -379, -378, 87, 89, 88, -320, 87, -140,
- -132, 237, -378, 238, 238, 238, -76, 443, -345, -345,
- -345, 268, 20, -44, -41, -371, 19, -40, -41, 230,
- 123, 124, 227, 87, -334, 87, -343, -379, -378, 87,
- 137, 243, 136, -342, -339, -342, -343, -378, -213, -378,
- 137, 137, -378, -378, -260, -289, -260, -260, 24, -260,
- 24, -260, 24, 96, -289, -260, 24, -260, 24, -260,
- 24, -260, 24, -260, 24, 32, 79, 80, 81, 32,
- 83, 84, 85, -213, -378, -378, -213, -334, -213, -186,
- -378, -264, 96, 96, 96, -345, -345, 96, 90, 90,
- 90, -345, -345, 96, 90, -297, -295, 90, 90, -384,
- 254, 298, 300, 96, 96, 96, 96, 32, 90, -385,
- 32, 639, 638, 640, 641, 642, 90, 96, 32, 96,
- 32, 96, -289, 87, -186, -138, 288, 225, 227, 230,
- 77, 90, 304, 305, 302, 307, 308, 151, 45, 88,
- 240, 237, -378, -280, 242, -280, -289, -296, -295, -287,
- 90, -140, -341, 15, 162, -300, -300, -278, -186, -341,
- -300, -278, -186, -278, -278, -278, -278, -300, -300, -300,
- -278, -295, -295, -186, -186, -186, -186, -186, -186, -186,
- -306, -279, -278, 614, 90, -272, 15, 77, -306, -306,
- -304, 316, -78, -289, 90, -15, -11, -22, -21, -23,
- 151, 88, 505, -179, -186, 614, 614, 614, 614, 614,
- 614, -140, -140, -140, -140, 528, -203, 119, 143, 120,
- 121, -158, -204, -209, -211, 106, 162, 145, 159, -241,
- -146, -150, -146, -146, -146, -146, -146, -146, -146, -146,
- -146, -146, -146, -146, -146, -307, -289, 90, 177, -154,
- -153, 105, -401, -154, 502, 88, -216, 221, -140, -140,
- -378, -140, -289, -126, -128, -126, -140, -218, -219, 147,
- -213, -140, -407, -407, 96, 105, 168, -122, 25, 39,
- -122, -122, -122, -122, -140, -140, -140, -140, -140, -140,
- -140, -140, -140, -140, -122, -289, -289, -115, -114, 425,
- 426, 427, 428, 430, 431, 432, 435, 436, 440, 441,
- 424, 442, 429, 434, 437, 438, 439, 433, 335, -140,
- -140, -140, -140, -140, -140, -83, -140, 130, 131, 132,
- -205, -140, -146, -140, -140, -140, -407, -140, -140, -140,
- -206, -205, -377, -376, -375, -140, -140, -140, -140, -140,
- -140, -140, -140, -140, -140, -140, -140, -140, -140, -140,
- -140, -140, -140, -140, -140, -140, -140, -140, -407, -140,
- -160, -144, 96, -256, 105, 92, -140, -140, -127, -126,
- -291, -296, -287, -288, -126, -127, -127, -126, -126, -140,
- -140, -140, -140, -140, -140, -140, -140, -407, -140, -140,
- -140, -140, -140, -248, -407, -205, 88, -394, 406, 407,
- 612, -298, 273, -297, 26, -206, 90, 15, -258, 78,
- -289, -230, -230, 64, 65, 60, -126, -131, -407, -35,
- 26, -250, -289, 63, 90, -325, -264, 363, 364, 177,
- -140, -140, 88, -229, 28, 29, -186, -292, 168, -296,
- -186, -259, 273, -186, -164, -166, -167, -168, -189, -212,
- -406, -169, -8, 524, 521, 15, -179, -180, -188, -295,
- -267, -308, -269, 88, 405, 407, 408, 77, 122, -140,
- -326, 176, -353, -352, -351, -334, -336, -337, -338, 89,
- -326, -330, 369, 368, -320, -320, -320, -320, -320, -325,
- -325, -325, -325, 87, 87, -320, -320, -320, -320, -328,
- 87, -328, -328, -329, 87, -329, -364, -140, -361, -360,
- -358, -359, 247, 101, 596, 552, 505, 545, 586, 78,
- -356, -229, 96, -407, -138, -281, 242, -362, -359, -378,
- -378, -378, -281, 91, 90, 91, 90, 91, 90, -107,
- -58, -1, 651, 652, 653, 88, 20, -335, -334, -57,
- 298, -367, -368, 273, -363, -357, -343, 137, -342, -343,
- -343, -378, 88, 30, 127, 127, 127, 127, 505, 227,
- 33, -282, 544, 143, 596, 552, -334, -57, 240, 240,
- -307, -307, -307, 90, 90, -277, 647, -179, -134, 290,
- 151, 279, 279, 237, 237, 292, -186, 303, 306, 304,
- 305, 302, 307, 308, 24, 24, 24, 24, 24, 291,
- 293, 295, 281, -186, -186, -280, 77, -181, -186, 27,
- -295, -186, -278, -278, -186, -278, -278, -186, -289, 350,
- 607, 608, 610, 609, -118, 406, 88, 505, 23, -119,
- 23, -406, 119, 120, 121, -204, -146, -150, -146, 142,
- 261, -406, -213, -407, -291, 26, 88, 78, -407, 88,
- 88, -407, -407, 88, 15, -221, -219, 149, -140, -407,
- 88, -407, -407, -407, -205, -140, -140, -140, -140, -407,
- -407, -407, -407, -407, -407, -407, -407, -407, -407, -205,
- 88, 88, 15, -311, 26, -407, -407, -407, -407, -407,
- -220, -407, 15, -407, 78, 88, 162, 88, -407, -407,
- -407, 88, 88, -407, -407, 88, 88, -407, 88, 88,
- 88, -407, 88, 88, 88, 88, -407, -407, -407, -407,
- 88, 88, 88, 88, 88, 88, 88, 88, 88, 88,
- -407, -90, 529, -407, -407, 88, -407, 88, -407, -406,
- 221, -407, -407, -407, -407, -407, 88, 88, 88, 88,
- 88, 88, -407, -407, -407, 88, 88, -407, 88, -407,
- 88, -407, -393, 611, 407, -193, -192, -190, 75, 241,
- 76, -406, -297, -407, -154, -256, -257, -256, -198, -289,
- 96, 105, -232, -163, -165, 15, -131, -211, 89, 88,
- -325, -236, -242, -275, -289, 90, 177, -327, 177, -327,
- 363, 364, -228, 221, -194, 16, -197, 33, 58, -11,
- -406, -406, 33, 88, -182, -184, -183, -185, 67, 71,
- 73, 68, 69, 70, 74, -302, 26, -8, -164, -8,
- -406, -186, -179, -408, 15, 78, -408, 88, 221, -268,
- -270, 409, 406, 412, -378, 90, -106, 88, -351, -338,
- -233, -135, 41, -331, 370, -325, 512, -325, -333, 90,
- -333, 96, 96, 89, -47, -42, -43, 34, 82, -358,
- -345, 90, 40, -345, -345, -289, 89, -229, -134, -186,
- 143, 77, -362, -362, -362, -295, -2, 650, 656, 137,
- 87, 373, 19, -250, 88, 89, -214, 299, 89, -108,
- -289, 89, 87, -343, -343, -289, -406, 237, 32, 32,
- 596, 552, 544, -57, -214, -213, -378, -326, 649, 648,
- 89, 239, 297, -139, 420, -136, 90, 91, -186, -186,
- -186, -186, -186, 230, 227, 396, -402, 309, -402, 282,
- 240, -179, -186, 88, -81, 256, 251, -300, -300, 34,
- -186, 406, 623, 621, -140, 142, 261, -158, -150, -146,
- -309, 177, 336, 260, 334, 330, 350, 341, 368, 332,
- 369, 329, 328, 327, -309, -307, -205, -128, -140, -140,
- 150, -140, 148, -140, -407, -407, -407, -407, -407, -225,
- -140, -140, -140, -407, 177, 336, 15, -140, -307, -140,
- -140, -140, -140, -140, -375, -140, -205, -140, -205, -140,
- -140, -140, -140, -140, -376, -376, -376, -376, -376, -205,
- -205, -205, -205, -406, -289, -93, -92, -91, 579, 241,
- -90, -160, -93, -160, -127, -291, -140, -140, -140, -140,
- -140, -140, -140, -140, -140, -140, -190, -339, -339, -339,
- -260, 88, -271, 23, 15, 58, 58, -163, -194, -164,
- -131, -289, -239, 606, -245, 47, -243, -244, 48, -240,
- 49, 57, -327, -327, 168, -230, -140, -261, 77, -262,
- -266, -213, -208, -210, -209, -406, -249, -407, -289, -260,
- -262, -166, -167, -167, -166, -167, 67, 67, 67, 72,
- 67, 72, 67, -183, -295, -407, -140, -298, 78, -164,
- -164, -188, -295, 168, 406, 410, 411, -351, -400, 119,
- 143, 32, 77, 366, 101, -398, 176, 541, 591, 596,
- 552, 545, 586, -399, 243, 136, 137, 255, 26, 42,
- 89, 88, 89, 88, 89, 88, -283, -282, -43, -42,
- -345, -345, 96, -378, 90, 90, 239, 27, -186, 77,
- 77, 77, -109, 654, 96, 87, -3, 82, -140, 87,
- 20, -334, -213, -369, -321, -370, -322, -323, -5, -6,
- -346, -112, 58, 101, -61, 45, 238, 634, 635, 127,
- -406, 647, -361, -250, -365, -367, -186, -143, -406, -142,
- -144, -151, 166, 167, 260, 332, 333, -214, -186, -133,
- 288, 296, 87, -137, 92, -381, 78, 279, 366, 279,
- 90, -403, 310, 90, -403, -186, -81, -47, -186, -278,
- -278, 34, -378, -407, -158, -150, -121, 162, 505, -312,
- 511, -320, -320, -320, -329, -320, 324, -320, 324, -320,
- -407, -407, -407, 88, -407, 23, -407, -140, 88, -117,
- 448, 88, 88, -407, 87, 87, -140, -407, -407, -407,
- 88, -407, -407, -407, -407, -407, 88, -407, -407, -407,
- 88, -310, 597, -407, -407, -407, -407, -407, -407, -407,
- -407, -407, -407, -89, -290, -289, -90, 561, 561, -407,
- -90, -222, 88, -407, -407, 88, -407, 88, 88, -407,
- 88, -407, 88, -407, -407, -407, -407, 88, -191, 23,
- -191, -191, -407, -256, -186, -194, -223, 17, -236, 52,
- 342, -247, -246, 56, 48, -244, 20, 50, 20, 31,
- -261, 88, 151, 88, -407, -407, 88, 58, 221, -407,
- -194, -177, -176, 77, 78, -178, 77, -176, 67, 67,
- -251, 88, -259, -164, -194, -194, 221, 119, -406, -145,
- -157, -143, 13, 90, 90, -378, -397, 638, 639, 32,
- 96, -345, -345, 137, 137, -186, 87, -325, 90, -325,
- 96, 96, 32, 83, 84, 85, 32, 79, 80, 81,
- -186, -186, -186, -186, -366, 87, 20, -140, 87, 151,
- 89, -250, -250, 275, 162, -345, 632, 281, 281, -345,
- -345, -345, -111, -110, 654, 89, -407, 88, -332, 505,
- 508, -140, -152, -152, -251, 89, -374, 505, -380, -289,
- -289, -289, -289, 96, 98, -407, 503, 74, 506, -407,
- -325, -140, -140, -140, -230, 90, -140, -140, 96, 96,
- -407, -140, -205, -140, -407, -174, -173, -175, 615, 119,
- 32, -309, -407, -207, 273, -96, -95, -94, 15, -407,
- -140, -140, -140, -140, -140, -140, -140, -406, 67, 19,
- 17, -406, -406, -298, -223, -224, 18, 20, -237, 54,
- -235, 53, -235, -246, 20, 20, 90, 20, 90, 137,
- -266, -140, -210, 58, -11, -289, -208, -289, -225, -140,
- 87, -140, -154, -194, -194, -140, -200, 472, 474, 475,
- 476, 473, 478, 479, 480, 481, 482, 483, 484, 485,
- 486, 487, 477, 451, 108, 110, 109, 452, 453, 454,
- 336, 499, 500, 494, 497, 498, 496, 495, 351, 352,
- 455, 456, 457, 111, 112, 113, 114, 115, 116, 117,
- 458, 461, 459, 462, 463, 464, 469, 470, 465, 466,
- 467, 468, 471, 488, 489, 490, 491, 492, 493, 598,
- 599, 600, 601, 602, 603, 604, 605, 90, 90, 87,
- -140, 89, 89, -251, -365, -58, 89, -252, -250, 96,
- 89, 276, -209, -406, 90, -345, -345, -345, 96, 96,
- -297, -407, 88, -289, -399, -367, 509, 509, -407, 26,
- -373, -372, -291, 87, 78, 63, 504, 507, -407, -407,
- 88, -407, -407, -407, 89, 89, -407, -407, -407, 88,
- -407, -173, -175, -407, 77, -154, -225, 20, -93, 298,
- 300, -93, -407, 88, -407, -407, 88, -407, 88, -407,
- -407, -253, -407, -289, 243, 20, 20, -253, -253, -193,
- -224, -103, -102, -101, 535, -140, -205, -238, 55, 77,
- 122, 90, 90, 90, 13, -208, 221, -230, -250, -171,
- 373, -225, -407, -250, 89, 26, 89, 656, 137, 89,
- -209, -120, -406, 272, -297, 90, 90, -110, -113, -11,
- 88, 151, -250, -186, 63, -140, -205, -407, 77, 516,
- 615, -88, -87, -84, 626, 652, -205, -90, -90, -140,
- -140, -140, 88, -407, -407, -407, -103, 88, -100, -99,
- -289, 77, 122, -262, -289, 89, -407, -406, -230, 89,
- -234, -11, 87, -3, 272, -321, -370, -322, -323, -5,
- -6, -346, -79, 505, -372, -350, -291, 90, 96, 89,
- 505, -407, -407, -86, 145, 624, 594, -141, -152, -149,
- 220, -407, 88, -407, 88, -407, 88, -289, 243, -101,
- 88, 26, -298, -172, -170, -289, 558, -390, -389, 501,
- -400, -396, 119, 143, 101, -398, 596, 552, 128, 129,
- -79, -140, 87, -407, -80, 287, 611, -381, 506, -86,
- 625, 572, 547, 572, 547, -140, -140, -140, -99, -406,
- -407, 88, 23, -313, -60, 569, -387, -388, 77, -391,
- 379, 568, 589, 119, 90, 89, -250, 248, -374, 507,
- 142, -407, 88, -407, 88, -407, -89, -170, 565, -326,
- -154, -388, 77, -387, 77, 14, 13, -4, 655, 89,
- 289, -86, -140, -140, -407, -59, 27, -171, -386, 256,
- 251, 254, 33, -386, 96, -4, -407, -407, 569, 250,
- 32, 119, -154, -174, -173, -173,
+ -1000, -410, -77, -415, -7, -11, -20, -21, -22, -23,
+ -24, -25, -26, -27, -28, -29, -30, -31, -63, -64,
+ -65, -67, -68, -69, -70, -71, -14, -17, -66, -32,
+ -33, -72, -73, -74, -75, -76, -16, -18, -19, -9,
+ -8, -13, 10, 11, -106, -34, 33, -39, -49, 225,
+ -50, -40, 226, -51, 228, 227, 265, 229, 258, 75,
+ 313, 314, 316, 317, 318, 319, -107, 617, 263, 264,
+ 231, 37, 46, 34, 35, 38, 235, 271, 272, 234,
+ -10, -35, 9, -412, 12, 449, 260, 259, 29, -12,
+ 511, 87, -78, -411, 665, -250, -234, 23, 34, 30,
+ -233, -229, -125, -234, 21, 19, 8, -77, -77, -77,
+ 13, 14, -77, -350, -352, 87, 159, 87, -77, -56,
+ -55, -53, -52, -54, -57, 32, -46, -47, -374, -45,
+ -42, 230, 227, 275, 123, 124, 265, 266, 267, 229,
+ 249, 264, 268, 263, 284, -41, 82, 34, 511, 514,
+ -357, 226, 232, 233, 228, 450, 126, 125, 76, -354,
+ 372, 544, 635, -57, 637, 101, 104, 636, 45, 239,
+ 638, 639, 640, 551, 641, 248, 642, 643, 644, 645,
+ 651, 592, 652, 653, 654, 127, 8, -77, -301, -297,
+ 91, -290, 508, 251, 542, 543, 300, 82, 42, 517,
+ 369, 372, 544, 479, 635, 313, 329, 323, 484, 485,
+ 486, 352, 344, 509, 545, 518, 303, 252, 288, 629,
+ 342, 135, 637, 307, 546, 266, 377, 378, 547, 379,
+ 101, 316, 416, 650, 306, 548, 648, 104, 636, 321,
+ 80, 478, 52, 632, 45, 261, 340, 234, 336, 638,
+ 289, 549, 520, 282, 126, 123, 657, 37, 332, 51,
+ 31, 647, 125, 50, 639, 150, 550, 640, 551, 381,
+ 359, 623, 49, 382, 267, 552, 85, 272, 513, 310,
+ 631, 383, 498, 333, 384, 299, 646, 231, 553, 612,
+ 604, 605, 385, 386, 624, 364, 360, 365, 500, 554,
+ 408, 483, 387, 608, 609, 664, 53, 555, 556, 625,
+ 124, 557, 79, 641, 81, 327, 328, 558, 297, 250,
+ 503, 504, 410, 356, 461, 468, 469, 111, 112, 464,
+ 113, 470, 114, 471, 472, 473, 462, 115, 108, 463,
+ 474, 475, 357, 358, 116, 476, 110, 109, 465, 467,
+ 117, 477, 248, 36, 388, 510, 301, 59, 305, 276,
+ 411, 47, 362, 661, 46, 619, 505, 559, 622, 355,
+ 351, 458, 54, 560, 561, 562, 563, 480, 642, 354,
+ 326, 350, 656, 4, 294, 481, 643, 63, 233, 367,
+ 366, 368, 283, 407, 347, 564, 565, 566, 255, 83,
+ 567, 337, 22, 568, 569, 389, 290, 570, 57, 571,
+ 572, 414, 264, 573, 55, 644, 40, 574, 269, 658,
+ 645, 575, 576, 577, 618, 578, 271, 579, 391, 580,
+ 606, 607, 390, 361, 363, 506, 278, 392, 236, 512,
+ 581, 311, 331, 268, 649, 582, 256, 494, 495, 496,
+ 497, 630, 502, 501, 270, 275, 263, 415, 257, 583,
+ 584, 585, 586, 587, 304, 603, 588, 589, 317, 651,
+ 459, 44, 590, 591, 592, 593, 594, 298, 293, 409,
+ 418, 62, 84, 374, 595, 596, 628, 325, 322, 291,
+ 597, 314, 56, 652, 653, 654, 285, 655, 487, 488,
+ 489, 490, 10, 662, 663, 482, 394, 127, 295, 296,
+ 48, 348, 277, 598, 308, 599, 338, 339, 353, 324,
+ 349, 615, 315, 613, 279, 395, 460, 265, 600, 417,
+ 292, 370, 375, 309, 516, 499, 284, 396, 627, 515,
+ 491, 492, 346, 343, 286, 493, 601, 617, 397, 240,
+ 280, 281, 602, 614, 398, 399, 302, 400, 401, 402,
+ 403, 404, 406, 312, 405, 616, 610, 611, 287, 514,
+ 320, 341, 376, 430, 431, 432, 433, 434, 435, 436,
+ 437, 438, 439, 440, 441, 442, 443, 444, 445, 446,
+ 447, 457, 238, -77, 238, -188, -297, 238, -269, 378,
+ -287, 380, 393, 388, 398, 386, -278, 389, 391, 278,
+ -398, 408, 238, 395, 225, 381, 390, 399, 400, 302,
+ 406, 401, 312, 405, 287, 402, 403, 404, -381, 177,
+ 640, 655, 135, 345, 385, 383, 409, 619, 91, -303,
+ 91, 92, 93, -290, 315, -305, 320, -291, -381, -290,
+ 318, -77, -77, -307, -307, -127, 619, 621, -207, -142,
+ 143, -157, -160, -148, -152, -201, -202, -203, -204, -158,
+ -217, -256, 166, 167, 174, 144, -213, -161, 27, 507,
+ 451, 450, 177, 32, -151, 220, 69, 70, 453, 146,
+ 58, 12, 426, 427, -159, 421, 428, 423, 478, 480,
+ 481, 482, 479, 484, 485, 486, 487, 488, 489, 490,
+ 491, 492, 493, 483, 455, 456, 118, 457, 108, 110,
+ 109, 458, 459, 460, 342, 505, 506, 500, 503, 504,
+ 502, 501, 357, 358, 461, 462, 463, 111, 112, 113,
+ 114, 115, 116, 117, 464, 467, 465, 466, 468, 469,
+ 470, 475, 476, 471, 472, 473, 474, 477, -87, -99,
+ 533, 532, -100, -149, -150, -163, -164, -291, -297, 243,
+ 420, 237, 172, 449, -153, -146, -215, 107, 92, 93,
+ -8, -211, 419, 424, 425, 429, 422, 519, 521, 536,
+ 537, 539, 524, 529, 528, 531, 494, 495, 496, 497,
+ 498, 499, 604, 605, 606, 607, 608, 609, 610, 611,
+ -381, -290, 91, -155, -154, -197, 94, 99, 102, 103,
+ 105, -404, 261, 338, 339, 119, -412, 633, 90, 95,
+ 96, 97, 98, 120, 121, 178, 179, 180, 181, 182,
+ 183, 184, 185, 186, 187, 188, 189, 190, 191, 192,
+ 193, 194, 195, 196, 197, 198, 199, 200, 201, 202,
+ 203, 204, 205, 206, 207, 208, 209, 210, 211, 212,
+ 213, 214, 215, 216, 217, 218, 219, 45, 394, 394,
+ -188, -77, -77, -77, -77, -227, -125, -229, -10, -8,
+ -412, 9, -77, -8, -9, -13, -35, -37, 538, -36,
+ -297, 100, -234, -250, 13, 62, 162, 43, 51, -232,
+ -233, -12, -8, -142, 20, 24, 25, -130, 168, -142,
+ -297, -130, -276, 242, -77, -77, -265, -310, 315, -267,
+ 409, 619, 408, -257, -270, 91, -256, -269, 407, -351,
+ 159, -337, -341, -291, 253, -367, 249, -188, -360, -359,
+ -291, -412, -126, -286, 239, 247, 246, 136, -385, 139,
+ 295, 420, 237, -52, -53, -54, -269, 176, 639, -108,
+ 270, 274, 88, 88, -341, -340, -339, -386, 274, 253,
+ -366, -358, 245, 254, -347, 246, 247, -342, 239, 137,
+ -386, -342, 244, 254, 249, 253, 274, 274, 127, 274,
+ 127, 274, 274, 274, 274, 274, 274, 274, 274, 274,
+ 269, -348, 151, -348, 515, 515, -354, -386, 249, 239,
+ -386, -386, 245, -288, -342, 241, 26, 241, 36, 36,
+ -348, -348, -348, -269, 176, -348, -348, -348, -348, 282,
+ 282, -348, -348, -348, -348, -348, -348, -348, -348, -348,
+ -348, -348, -348, -348, -348, -348, -348, -348, 238, -385,
+ -134, 405, 302, 82, -55, 284, -38, -188, -286, 239,
+ 240, -385, 271, -188, 221, -188, -280, 159, 16, -280,
+ -277, 394, 392, 379, 384, -280, -280, -280, -280, 285,
+ 377, -343, 239, 36, 250, 394, 285, 377, 285, 286,
+ 285, 286, 387, 397, 285, -302, 15, 162, 420, 382,
+ 386, 278, 238, 279, 240, 396, 286, -302, 90, -281,
+ 159, 285, 394, 281, -280, -280, -308, -412, -293, -291,
+ -289, 230, 24, 142, 26, 28, 145, 177, 130, 20,
+ 146, 38, 232, 345, 249, 176, 245, 450, 225, 73,
+ 519, 421, 423, 419, 426, 452, 453, 420, 380, 32,
+ 14, 521, 29, 259, 25, 39, 170, 227, 149, 522,
+ 262, 27, 260, 118, 121, 524, 23, 76, 254, 15,
+ 247, 41, 17, 525, 526, 18, 243, 242, 162, 239,
+ 71, 12, 220, 30, 158, 67, 527, 137, 528, 529,
+ 530, 531, 131, 69, 159, 21, 659, 424, 425, 34,
+ 620, 507, 273, 172, 74, 60, 621, 143, 422, 532,
+ 533, 119, 534, 122, 77, 626, 139, 19, 72, 43,
+ 535, 274, 536, 244, 660, 537, 412, 538, 160, 228,
+ 449, 70, 161, 633, 539, 634, 237, 393, 9, 454,
+ 33, 258, 246, 129, 68, 540, 238, 148, 455, 456,
+ 241, 132, 120, 8, 136, 35, 13, 75, 78, 427,
+ 428, 429, 58, 128, 511, 147, 16, 541, 413, 141,
+ -381, 622, -308, -308, 33, 92, -407, -408, -409, 511,
+ 412, 241, -291, -188, -83, 612, 229, -84, 618, 24,
+ 236, -132, 394, -120, 177, 640, 623, 624, 625, 622,
+ 391, 630, 628, 626, 285, 627, 88, 139, 141, 142,
+ 4, -142, 158, -198, 151, 152, 153, 154, 155, 156,
+ 157, 162, 143, 145, 159, -243, 140, 163, 164, 165,
+ 166, 167, 168, 169, 171, 170, 172, 173, 160, 161,
+ 176, 223, 224, -152, -152, -152, -152, -213, -219, -218,
+ -412, -215, -381, -290, -297, -412, -412, -152, -275, -412,
+ -412, -148, -412, -412, -412, -222, -142, -412, -412, -416,
+ -412, -416, -416, -326, -412, -326, -412, -412, -412, -412,
+ -412, -412, -412, -412, -412, -412, -412, -412, -412, -412,
+ -412, -412, -412, -412, -412, -412, -412, -412, -412, -412,
+ -412, -412, -412, -412, -412, -412, -412, -412, -412, -412,
+ -412, -412, -412, -412, -412, -412, -412, -412, -412, -412,
+ -412, -412, -412, -412, -412, -412, -412, -412, -412, -412,
+ -412, -412, -412, -412, -412, -412, -412, -412, -412, -412,
+ -412, -412, 221, -412, -412, -412, -412, -412, -326, -326,
+ -326, -326, -326, -412, -412, -412, -412, -412, -412, -412,
+ -412, -412, -412, -412, -412, -412, -412, 103, 99, 102,
+ 94, -217, 105, 90, 90, 90, 90, -8, -9, -207,
+ -412, -307, -395, -396, -191, -188, -412, 302, -291, -291,
+ 271, -232, -12, -8, -227, -233, -229, -8, -77, -118,
+ -131, 64, 65, -133, 25, 39, 68, 66, 24, -413,
+ 89, -413, -250, -413, 88, -37, -253, 87, 566, 596,
+ 566, 596, 62, 44, 90, 90, 88, 22, -228, -230,
+ -142, 15, -295, 4, -294, 26, -291, 90, 221, 15,
+ -189, 30, -188, -276, -276, 88, 91, 315, -266, -268,
+ 410, 412, 151, -296, -291, 90, 32, 89, 88, -188,
+ -315, -318, -320, -319, -321, -316, -317, 342, 343, 177,
+ 346, 348, 349, 350, 351, 352, 353, 354, 355, 356,
+ 359, 33, 261, 338, 339, 340, 341, 360, 361, 362,
+ 363, 365, 366, 367, 368, 323, 344, 509, 324, 325,
+ 326, 327, 328, 329, 331, 332, 335, 333, 334, 336,
+ 337, -382, -381, 87, 89, 88, -322, 87, -142, -134,
+ 238, -381, 239, 239, 239, -77, 449, -348, -348, -348,
+ 269, 20, -45, -42, -374, 19, -41, -42, 230, 123,
+ 124, 227, 87, -337, 87, -346, -382, -381, 87, 137,
+ 244, 136, -345, -342, -345, -346, -381, -215, -381, 137,
+ 137, -381, -381, -262, -291, -262, -262, 24, -262, 24,
+ -262, 24, 96, -291, -262, 24, -262, 24, -262, 24,
+ -262, 24, -262, 24, 32, 79, 80, 81, 32, 83,
+ 84, 85, -215, -381, -381, -215, -337, -215, -188, -381,
+ -269, 96, 96, 96, -348, -348, 96, 90, 90, 90,
+ -348, -348, 96, 90, -299, -297, 90, 90, -387, 255,
+ 299, 301, 96, 96, 96, 96, 32, 90, -388, 32,
+ 647, 646, 648, 649, 650, 90, 96, 32, 96, 32,
+ 96, -291, 87, -188, -140, 289, 225, 227, 230, 77,
+ 90, 305, 306, 303, 308, 309, 151, 45, 88, 241,
+ 238, -381, -282, 243, -282, -291, -298, -297, -289, 90,
+ -142, -344, 15, 162, -302, -302, -280, -188, -344, -302,
+ -280, -188, -280, -280, -280, -280, -302, -302, -302, -280,
+ -297, -297, -188, -188, -188, -188, -188, -188, -188, -308,
+ -281, -280, 622, 90, -274, 15, 77, -308, -308, 88,
+ 321, 413, 414, -306, 318, -79, -291, 90, -15, -11,
+ -23, -22, -24, 151, -15, 88, 511, -181, -188, 622,
+ 622, 622, 622, 622, 622, -142, -142, -142, -142, 534,
+ -205, 119, 143, 120, 121, -160, -206, -211, -213, 106,
+ 162, 145, 159, -243, -148, -152, -148, -148, -148, -148,
+ -148, -148, -148, -148, -148, -148, -148, -148, -148, -309,
+ -291, 90, 177, -156, -155, 105, -404, -156, 508, 88,
+ -218, 221, -142, -142, -381, -142, -291, -128, -130, -128,
+ -142, -220, -221, 147, -215, -142, -413, -413, 96, 105,
+ 168, -124, 25, 39, -124, -124, -124, -124, -142, -142,
+ -142, -142, -142, -142, -142, -142, -142, -142, -124, -291,
+ -291, -117, -116, 431, 432, 433, 434, 436, 437, 438,
+ 441, 442, 446, 447, 430, 448, 435, 440, 443, 444,
+ 445, 439, 341, -142, -142, -142, -142, -142, -142, -85,
+ -142, 130, 131, 132, -207, -142, -148, -142, -142, -142,
+ -413, -142, -142, -142, -208, -207, -380, -379, -378, -142,
+ -142, -142, -142, -142, -142, -142, -142, -142, -142, -142,
+ -142, -142, -142, -142, -142, -142, -142, -142, -142, -142,
+ -142, -142, -413, -142, -162, -146, 96, -258, 105, 92,
+ -142, -142, -129, -128, -293, -298, -289, -290, -128, -129,
+ -129, -128, -128, -142, -142, -142, -142, -142, -142, -142,
+ -142, -413, -142, -142, -142, -142, -142, -250, -413, -207,
+ 88, -397, 412, 413, 620, -300, 274, -299, 26, -208,
+ 90, 15, -260, 78, -291, -232, -232, 64, 65, 60,
+ -128, -133, -413, -36, 26, -252, -291, 559, 559, 63,
+ 90, -327, -269, 369, 370, 177, -142, -142, 88, -231,
+ 28, 29, -188, -294, 168, -298, -188, -261, 274, -188,
+ -166, -168, -169, -170, -191, -214, -412, -171, -8, 530,
+ 527, 15, -181, -182, -190, -297, -267, -310, -266, 88,
+ 411, 413, 414, 77, 122, -142, -328, 176, -356, -355,
+ -354, -337, -339, -340, -341, 89, -328, -333, 375, 374,
+ -322, -322, -322, -322, -322, -327, -327, -327, -327, 87,
+ 87, -322, -322, -322, -322, -330, 87, -330, -330, -331,
+ -330, 87, -331, -332, 87, -332, -367, -142, -364, -363,
+ -361, -362, 248, 101, 602, 558, 511, 551, 592, 78,
+ -359, -231, 96, -413, -140, -283, 243, -365, -362, -381,
+ -381, -381, -283, 91, 90, 91, 90, 91, 90, -109,
+ -59, -1, 659, 660, 661, 88, 20, -338, -337, -58,
+ 299, -370, -371, 274, -366, -360, -346, 137, -345, -346,
+ -346, -381, 88, 30, 127, 127, 127, 127, 511, 227,
+ 33, -284, 550, 143, 602, 558, -337, -58, 241, 241,
+ -309, -309, -309, 90, 90, -279, 655, -181, -136, 291,
+ 151, 280, 280, 238, 238, 293, -188, 304, 307, 305,
+ 306, 303, 308, 309, 24, 24, 24, 24, 24, 292,
+ 294, 296, 282, -188, -188, -282, 77, -183, -188, 27,
+ -297, -188, -280, -280, -188, -280, -280, -188, -409, 322,
+ -291, 356, 613, 614, 616, 615, -120, 412, 88, 511,
+ 23, -121, 23, -412, 119, 120, 121, -206, -148, -152,
+ -148, 142, 262, -412, -215, -413, -293, 26, 88, 78,
+ -413, 88, 88, -413, -413, 88, 15, -223, -221, 149,
+ -142, -413, 88, -413, -413, -413, -207, -142, -142, -142,
+ -142, -413, -413, -413, -413, -413, -413, -413, -413, -413,
+ -413, -207, 88, 88, 15, -313, 26, -413, -413, -413,
+ -413, -413, -222, -413, 15, -413, 78, 88, 162, 88,
+ -413, -413, -413, 88, 88, -413, -413, 88, 88, -413,
+ 88, 88, 88, -413, 88, 88, 88, 88, -413, -413,
+ -413, -413, 88, 88, 88, 88, 88, 88, 88, 88,
+ 88, 88, -413, -92, 535, -413, -413, 88, -413, 88,
+ -413, -412, 221, -413, -413, -413, -413, -413, 88, 88,
+ 88, 88, 88, 88, -413, -413, -413, 88, 88, -413,
+ 88, -413, 88, -413, -396, 619, 413, -195, -194, -192,
+ 75, 242, 76, -412, -299, -413, -156, -258, -259, -258,
+ -200, -291, 96, 105, -234, -165, -167, 15, -133, -213,
+ 89, 88, -327, -238, -244, -277, -291, 90, 177, -329,
+ 177, -329, 369, 370, -230, 221, -196, 16, -199, 33,
+ 58, -11, -412, -412, 33, 88, -184, -186, -185, -187,
+ 67, 71, 73, 68, 69, 70, 74, -304, 26, -8,
+ -166, -8, -412, -188, -181, -414, 15, 78, -414, 88,
+ 221, -268, -271, 415, 412, 418, -381, 90, -108, 88,
+ -354, -341, -235, -137, 41, -334, 376, -327, 518, -327,
+ -336, 90, -336, 96, 96, 96, 89, -48, -43, -44,
+ 34, 82, -361, -348, 90, 40, -348, -348, -291, 89,
+ -231, -136, -188, 143, 77, -365, -365, -365, -297, -2,
+ 658, 664, 137, 87, 379, 19, -252, 88, 89, -216,
+ 300, 89, -110, -291, 89, 87, -346, -346, -291, -412,
+ 238, 32, 32, 602, 558, 550, -58, -216, -215, -381,
+ -328, 657, 656, 89, 240, 298, -141, 426, -138, 90,
+ 91, -188, -188, -188, -188, -188, 230, 227, 402, -405,
+ 310, -405, 283, 241, -181, -188, 88, -82, 257, 252,
+ -302, -302, 34, -188, 412, 631, 629, -142, 142, 262,
+ -160, -152, -148, -311, 177, 342, 261, 340, 336, 356,
+ 347, 374, 338, 375, 333, 332, 331, -311, -309, -207,
+ -130, -142, -142, 150, -142, 148, -142, -413, -413, -413,
+ -413, -413, -227, -142, -142, -142, -413, 177, 342, 15,
+ -142, -309, -142, -142, -142, -142, -142, -378, -142, -207,
+ -142, -207, -142, -142, -142, -142, -142, -379, -379, -379,
+ -379, -379, -207, -207, -207, -207, -412, -291, -95, -94,
+ -93, 585, 242, -92, -162, -95, -162, -129, -293, -142,
+ -142, -142, -142, -142, -142, -142, -142, -142, -142, -192,
+ -342, -342, -342, -262, 88, -273, 23, 15, 58, 58,
+ -165, -196, -166, -133, -291, -241, 612, -247, 47, -245,
+ -246, 48, -242, 49, 57, -329, -329, 168, -232, -142,
+ -263, 77, -264, -272, -215, -210, -212, -211, -412, -251,
+ -413, -291, -262, -264, -168, -169, -169, -168, -169, 67,
+ 67, 67, 72, 67, 72, 67, -185, -297, -413, -142,
+ -300, 78, -166, -166, -190, -297, 168, 412, 416, 417,
+ -354, -403, 119, 143, 32, 77, 372, 101, -401, 176,
+ 547, 597, 602, 558, 551, 592, -402, 244, 136, 137,
+ 256, 26, 42, 89, 88, 89, 88, 89, 89, 88,
+ -285, -284, -44, -43, -348, -348, 96, -381, 90, 90,
+ 240, 27, -188, 77, 77, 77, -111, 662, 96, 87,
+ -3, 82, -142, 87, 20, -337, -215, -372, -323, -373,
+ -324, -325, -5, -6, -349, -114, 58, 101, -62, 45,
+ 239, 642, 643, 127, -412, 655, -364, -252, -368, -370,
+ -188, -145, -412, -144, -146, -153, 166, 167, 261, 338,
+ 339, -216, -188, -135, 289, 297, 87, -139, 92, -384,
+ 78, 280, 372, 280, 90, -406, 311, 90, -406, -188,
+ -82, -48, -188, -280, -280, 34, -381, -413, -160, -152,
+ -123, 162, 511, -314, 517, -322, -322, -322, -332, -322,
+ 328, -322, 328, -322, -413, -413, -413, 88, -413, 23,
+ -413, -142, 88, -119, 454, 88, 88, -413, 87, 87,
+ -142, -413, -413, -413, 88, -413, -413, -413, -413, -413,
+ 88, -413, -413, -413, 88, -312, 603, -413, -413, -413,
+ -413, -413, -413, -413, -413, -413, -413, -91, -292, -291,
+ -92, 567, 567, -413, -92, -224, 88, -413, -413, 88,
+ -413, 88, 88, -413, 88, -413, 88, -413, -413, -413,
+ -413, 88, -193, 23, -193, -193, -413, -258, -188, -196,
+ -225, 17, -238, 52, 348, -249, -248, 56, 48, -246,
+ 20, 50, 20, 31, -263, 88, 151, 88, -413, -413,
+ 88, 58, 221, -413, -196, -179, -178, 77, 78, -180,
+ 77, -178, 67, 67, -253, 88, -261, -166, -196, -196,
+ 221, 119, -412, -147, -159, -145, 13, 90, 90, -381,
+ -400, 646, 647, 32, 96, -348, -348, 137, 137, -188,
+ 87, -327, 90, -327, 96, 96, 32, 83, 84, 85,
+ 32, 79, 80, 81, -188, -188, -188, -188, -369, 87,
+ 20, -142, 87, 151, 89, -252, -252, 276, 162, -348,
+ 640, 282, 282, -348, -348, -348, -113, -112, 662, 89,
+ -413, 88, -335, 511, 514, -142, -154, -154, -253, 89,
+ -377, 511, -383, -291, -291, -291, -291, 96, 98, -413,
+ 509, 74, 512, -413, -327, -142, -142, -142, -232, 90,
+ -142, -142, 96, 96, -413, -142, -207, -142, -413, -176,
+ -175, -177, 623, 119, 32, -311, -413, -209, 274, -98,
+ -97, -96, 15, -413, -142, -142, -142, -142, -142, -142,
+ -142, -412, 67, 19, 17, -412, -412, -300, -225, -226,
+ 18, 20, -239, 54, -237, 53, -237, -248, 20, 20,
+ 90, 20, 90, 137, -272, -142, -212, 58, -11, -291,
+ -210, -291, -227, -142, 87, -142, -156, -196, -196, -142,
+ -202, 478, 480, 481, 482, 479, 484, 485, 486, 487,
+ 488, 489, 490, 491, 492, 493, 483, 457, 108, 110,
+ 109, 458, 459, 460, 342, 505, 506, 500, 503, 504,
+ 502, 501, 357, 358, 461, 462, 463, 111, 112, 113,
+ 114, 115, 116, 117, 464, 467, 465, 468, 469, 470,
+ 475, 476, 471, 472, 473, 474, 477, 494, 495, 496,
+ 497, 498, 499, 604, 605, 606, 607, 608, 609, 610,
+ 611, 90, 90, 87, -142, 89, 89, -253, -368, -59,
+ 89, -254, -252, 96, 89, 277, -211, -412, 90, -348,
+ -348, -348, 96, 96, -299, -413, 88, -291, -402, -370,
+ 515, 515, -413, 26, -376, -375, -293, 87, 78, 63,
+ 510, 513, -413, -413, 88, -413, -413, -413, 89, 89,
+ -413, -413, -413, 88, -413, -175, -177, -413, 77, -156,
+ -227, 20, -95, 299, 301, -95, -413, 88, -413, -413,
+ 88, -413, 88, -413, -413, -255, -413, -291, 244, 20,
+ 20, -255, -255, -195, -226, -105, -104, -103, 541, -142,
+ -207, -240, 55, 77, 122, 90, 90, 90, 13, -210,
+ 221, -232, -252, -173, 379, -227, -413, -252, 89, 26,
+ 89, 664, 137, 89, -211, -122, -412, 273, -299, 90,
+ 90, -112, -115, -11, 88, 151, -252, -188, 63, -142,
+ -207, -413, 77, 522, 623, -90, -89, -86, 634, 660,
+ -207, -92, -92, -142, -142, -142, 88, -413, -413, -413,
+ -105, 88, -102, -101, -291, 77, 122, -264, -291, 89,
+ -413, -412, -232, 89, -236, -11, 87, -3, 273, -323,
+ -373, -324, -325, -5, -6, -349, -80, 511, -375, -353,
+ -297, -293, 90, 96, 89, 511, -413, -413, -88, 145,
+ 632, 600, -143, -154, -151, 220, -413, 88, -413, 88,
+ -413, 88, -291, 244, -103, 88, 26, -300, -174, -172,
+ -291, 564, -393, -392, 507, -403, -399, 119, 143, 101,
+ -401, 602, 558, 128, 129, -80, -142, 87, -413, -81,
+ 288, 619, 221, -384, 512, -88, 633, 578, 553, 578,
+ 553, -142, -142, -142, -101, -412, -413, 88, 23, -315,
+ -61, 575, -390, -391, 77, -394, 385, 574, 595, 119,
+ 90, 89, -252, 249, -298, -377, 513, 142, -413, 88,
+ -413, 88, -413, -91, -172, 571, -328, -156, -391, 77,
+ -390, 77, 14, 13, -4, 663, 89, 290, -88, -142,
+ -142, -413, -60, 27, -173, -389, 257, 252, 255, 33,
+ -389, 96, -4, -413, -413, 575, 251, 32, 119, -156,
+ -176, -175, -175,
}
var yyDef = [...]int{
- 844, -2, -2, 846, 2, 4, 5, 6, 7, 8,
+ 872, -2, -2, 874, 2, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
19, 20, 21, 22, 23, 24, 25, 26, 27, 28,
- 29, 30, 31, 32, 33, 34, 35, 36, 69, 71,
- 72, 844, 844, 844, 0, 844, 0, 0, 844, -2,
- -2, 844, 1461, 0, 844, 0, 0, -2, 771, 777,
- 0, 779, -2, 0, 0, 844, 2008, 2008, 839, 0,
- 0, 0, 0, 0, 844, 844, 844, 844, 1318, 49,
- 844, 0, 84, 85, 795, 796, 797, 64, 0, 2006,
- 845, 1, 3, 70, 74, 0, 0, 0, 57, 1327,
- 0, 77, 0, 0, 848, 0, 0, 1444, 844, 844,
- 0, 116, 117, 0, 0, 0, -2, 120, -2, 149,
- 150, 151, 0, 156, 585, 508, 560, 506, 545, -2,
- 494, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 511, 384, 384, 0, 0, -2, 494,
- 494, 494, 1446, 0, 0, 0, 542, 446, 384, 384,
- 384, 0, 384, 384, 384, 384, 0, 0, 384, 384,
- 384, 384, 384, 384, 384, 384, 384, 384, 384, 384,
- 384, 384, 384, 384, 384, 1345, 155, 1462, 1459, 1460,
- 1614, 1615, 1616, 1617, 1618, 1619, 1620, 1621, 1622, 1623,
- 1624, 1625, 1626, 1627, 1628, 1629, 1630, 1631, 1632, 1633,
- 1634, 1635, 1636, 1637, 1638, 1639, 1640, 1641, 1642, 1643,
- 1644, 1645, 1646, 1647, 1648, 1649, 1650, 1651, 1652, 1653,
- 1654, 1655, 1656, 1657, 1658, 1659, 1660, 1661, 1662, 1663,
- 1664, 1665, 1666, 1667, 1668, 1669, 1670, 1671, 1672, 1673,
- 1674, 1675, 1676, 1677, 1678, 1679, 1680, 1681, 1682, 1683,
- 1684, 1685, 1686, 1687, 1688, 1689, 1690, 1691, 1692, 1693,
- 1694, 1695, 1696, 1697, 1698, 1699, 1700, 1701, 1702, 1703,
- 1704, 1705, 1706, 1707, 1708, 1709, 1710, 1711, 1712, 1713,
- 1714, 1715, 1716, 1717, 1718, 1719, 1720, 1721, 1722, 1723,
- 1724, 1725, 1726, 1727, 1728, 1729, 1730, 1731, 1732, 1733,
- 1734, 1735, 1736, 1737, 1738, 1739, 1740, 1741, 1742, 1743,
- 1744, 1745, 1746, 1747, 1748, 1749, 1750, 1751, 1752, 1753,
- 1754, 1755, 1756, 1757, 1758, 1759, 1760, 1761, 1762, 1763,
- 1764, 1765, 1766, 1767, 1768, 1769, 1770, 1771, 1772, 1773,
- 1774, 1775, 1776, 1777, 1778, 1779, 1780, 1781, 1782, 1783,
- 1784, 1785, 1786, 1787, 1788, 1789, 1790, 1791, 1792, 1793,
- 1794, 1795, 1796, 1797, 1798, 1799, 1800, 1801, 1802, 1803,
- 1804, 1805, 1806, 1807, 1808, 1809, 1810, 1811, 1812, 1813,
- 1814, 1815, 1816, 1817, 1818, 1819, 1820, 1821, 1822, 1823,
- 1824, 1825, 1826, 1827, 1828, 1829, 1830, 1831, 1832, 1833,
- 1834, 1835, 1836, 1837, 1838, 1839, 1840, 1841, 1842, 1843,
- 1844, 1845, 1846, 1847, 1848, 1849, 1850, 1851, 1852, 1853,
- 1854, 1855, 1856, 1857, 1858, 1859, 1860, 1861, 1862, 1863,
- 1864, 1865, 1866, 1867, 1868, 1869, 1870, 1871, 1872, 1873,
- 1874, 1875, 1876, 1877, 1878, 1879, 1880, 1881, 1882, 1883,
- 1884, 1885, 1886, 1887, 1888, 1889, 1890, 1891, 1892, 1893,
- 1894, 1895, 1896, 1897, 1898, 1899, 1900, 1901, 1902, 1903,
- 1904, 1905, 1906, 1907, 1908, 1909, 1910, 1911, 1912, 1913,
- 1914, 1915, 1916, 1917, 1918, 1919, 1920, 1921, 1922, 1923,
- 1924, 1925, 1926, 1927, 1928, 1929, 1930, 1931, 1932, 1933,
- 1934, 1935, 1936, 1937, 1938, 1939, 1940, 1941, 1942, 1943,
- 1944, 1945, 1946, 1947, 1948, 1949, 1950, 1951, 1952, 1953,
- 1954, 1955, 1956, 1957, 1958, 1959, 1960, 1961, 1962, 1963,
- 1964, 1965, 1966, 1967, 1968, 1969, 1970, 1971, 1972, 1973,
- 1974, 1975, 1976, 1977, 1978, 1979, 1980, 1981, 1982, 1983,
- 1984, 1985, 1986, 1987, 1988, 1989, 1990, 1991, 1992, 1993,
- 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003,
- 2004, 2005, 0, 1438, 0, 698, 947, 0, 760, 760,
- 0, 760, 760, 760, 760, 0, 0, 0, 710, 0,
- 0, 0, 0, 757, 0, 726, 727, 0, 757, 0,
- 733, 763, 0, 738, 760, 760, 741, 2009, 0, 2009,
- 2009, 1429, 0, 754, 752, 766, 767, 39, 770, 773,
- 774, 775, 776, 778, 0, 783, 786, 1455, 1456, 0,
- 788, 807, 808, 0, 840, 841, 44, 1095, 0, 969,
- 974, 985, 1000, 1001, 1002, 1003, 1004, 1006, 1007, 1008,
- 0, 0, 0, 0, 1013, 1014, 0, 0, 0, 0,
- 0, 1076, 1022, 0, 0, 0, 0, 1291, 0, 0,
- 1252, 1252, 1110, 1252, 1254, 1254, 1662, 1798, 1806, 1923,
- 1625, 1630, 1631, 1632, 1916, 1917, 1918, 1919, 1957, 1958,
- 1962, 1722, 0, 0, 0, 2005, 1759, 1767, 1768, 1792,
- 1889, 1943, 1642, 1787, 1855, 1719, 1741, 1742, 1871, 1872,
- 1763, 1764, 1745, 1757, 1760, 1748, 1749, 1751, 1753, 1758,
- 1765, 1771, 1750, 1770, 1769, 0, 1746, 1747, 1752, 1762,
- 1766, 1754, 1755, 1756, 1761, 1772, 0, 0, 0, 0,
- 0, 1191, 1192, 1193, 1194, 0, 0, 0, 0, 0,
- 0, 0, 280, 281, 1304, 1305, 42, 43, 1094, 1416,
- 1254, 1254, 1254, 1254, 1254, 1036, 1037, 1038, 1039, 1040,
- 1064, 1065, 1071, 1072, 1866, 1867, 1868, 1869, 1703, 1952,
- 1711, 1712, 1850, 1851, 1724, 1725, 1980, 1981, -2, -2,
- -2, 221, 222, 223, 224, 225, 226, 227, 228, 0,
- 1666, 1934, 1935, 217, 0, 0, 285, 286, 282, 283,
- 284, 1078, 1079, 238, 239, 240, 241, 242, 243, 244,
- 245, 246, 247, 248, 249, 250, 251, 252, 253, 254,
- 255, 256, 257, 258, 259, 260, 261, 262, 263, 264,
- 265, 266, 267, 268, 269, 270, 271, 272, 273, 274,
- 275, 276, 277, 278, 279, 2008, 0, 817, 0, 0,
- 0, 0, 0, 1327, 0, 1319, 1318, 62, 0, 844,
- -2, 0, 0, 0, 0, 46, 0, 51, 904, 847,
- 76, 75, 1367, 0, 0, 0, 58, 1328, 66, 68,
- 1329, 0, 849, 850, 0, 880, 884, 0, 0, 0,
- 1445, 1444, 1444, 101, 0, 0, 1420, 113, 114, 115,
- 0, 0, 1426, 1427, 1431, 1432, 0, 0, 167, 168,
- 0, 40, 411, 0, 163, 0, 404, 345, 0, 1345,
- 0, 0, 0, 0, 0, 844, 0, 1439, 144, 145,
- 152, 153, 154, 384, 384, 384, 557, 0, 0, 155,
- 155, 515, 516, 517, 0, 0, -2, 409, 0, 495,
- 0, 0, 398, 398, 402, 400, 401, 0, 0, 0,
- 0, 0, 0, 0, 0, 534, 0, 535, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 646, 0, 385,
- 0, 555, 556, 447, 0, 0, 0, 0, 0, 0,
- 0, 0, 1447, 1448, 0, 532, 533, 0, 0, 0,
- 384, 384, 0, 0, 0, 0, 384, 384, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 143, 1358, 0, 0,
- 0, -2, 0, 690, 0, 0, 0, 1440, 1440, 0,
- 697, 0, 699, 700, 0, 0, 701, 0, 757, 757,
- 755, 756, 703, 704, 705, 706, 760, 0, 0, 393,
- 394, 395, 757, 760, 0, 760, 760, 760, 760, 757,
- 757, 757, 760, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 2009, 763, 760, 0, 734, 0, 735, 736,
- 739, 740, 742, 2010, 2011, 1457, 1458, 1465, 1466, 1467,
- 1468, 1469, 1470, 1471, 1472, 1473, 1474, 1475, 1476, 1477,
- 1478, 1479, 1480, 1481, 1482, 1483, 1484, 1485, 1486, 1487,
- 1488, 1489, 1490, 1491, 1492, 1493, 1494, 1495, 1496, 1497,
+ 29, 30, 31, 32, 33, 34, 35, 36, 37, 70,
+ 72, 73, 872, 872, 872, 0, 872, 0, 0, 872,
+ -2, -2, 872, 1485, 0, 872, 0, 0, -2, 787,
+ 793, 0, 802, -2, 0, 0, 872, 872, 2040, 2040,
+ 867, 0, 0, 0, 0, 0, 872, 872, 872, 872,
+ 1346, 50, 872, 0, 85, 86, 822, 823, 824, 65,
+ 0, 2038, 873, 1, 3, 71, 75, 0, 0, 0,
+ 58, 1355, 0, 78, 0, 0, 876, 0, 0, 1468,
+ 872, 872, 0, 126, 127, 0, 0, 0, -2, 130,
+ -2, 159, 160, 161, 0, 166, 600, 523, 575, 521,
+ 560, -2, 509, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 526, 398, 398, 0, 0,
+ -2, 509, 509, 509, 1470, 0, 0, 0, 557, 460,
+ 398, 398, 398, 0, 398, 398, 398, 398, 0, 0,
+ 398, 398, 398, 398, 398, 398, 398, 398, 398, 398,
+ 398, 398, 398, 398, 398, 398, 398, 1373, 165, 1486,
+ 1483, 1484, 1638, 1639, 1640, 1641, 1642, 1643, 1644, 1645,
+ 1646, 1647, 1648, 1649, 1650, 1651, 1652, 1653, 1654, 1655,
+ 1656, 1657, 1658, 1659, 1660, 1661, 1662, 1663, 1664, 1665,
+ 1666, 1667, 1668, 1669, 1670, 1671, 1672, 1673, 1674, 1675,
+ 1676, 1677, 1678, 1679, 1680, 1681, 1682, 1683, 1684, 1685,
+ 1686, 1687, 1688, 1689, 1690, 1691, 1692, 1693, 1694, 1695,
+ 1696, 1697, 1698, 1699, 1700, 1701, 1702, 1703, 1704, 1705,
+ 1706, 1707, 1708, 1709, 1710, 1711, 1712, 1713, 1714, 1715,
+ 1716, 1717, 1718, 1719, 1720, 1721, 1722, 1723, 1724, 1725,
+ 1726, 1727, 1728, 1729, 1730, 1731, 1732, 1733, 1734, 1735,
+ 1736, 1737, 1738, 1739, 1740, 1741, 1742, 1743, 1744, 1745,
+ 1746, 1747, 1748, 1749, 1750, 1751, 1752, 1753, 1754, 1755,
+ 1756, 1757, 1758, 1759, 1760, 1761, 1762, 1763, 1764, 1765,
+ 1766, 1767, 1768, 1769, 1770, 1771, 1772, 1773, 1774, 1775,
+ 1776, 1777, 1778, 1779, 1780, 1781, 1782, 1783, 1784, 1785,
+ 1786, 1787, 1788, 1789, 1790, 1791, 1792, 1793, 1794, 1795,
+ 1796, 1797, 1798, 1799, 1800, 1801, 1802, 1803, 1804, 1805,
+ 1806, 1807, 1808, 1809, 1810, 1811, 1812, 1813, 1814, 1815,
+ 1816, 1817, 1818, 1819, 1820, 1821, 1822, 1823, 1824, 1825,
+ 1826, 1827, 1828, 1829, 1830, 1831, 1832, 1833, 1834, 1835,
+ 1836, 1837, 1838, 1839, 1840, 1841, 1842, 1843, 1844, 1845,
+ 1846, 1847, 1848, 1849, 1850, 1851, 1852, 1853, 1854, 1855,
+ 1856, 1857, 1858, 1859, 1860, 1861, 1862, 1863, 1864, 1865,
+ 1866, 1867, 1868, 1869, 1870, 1871, 1872, 1873, 1874, 1875,
+ 1876, 1877, 1878, 1879, 1880, 1881, 1882, 1883, 1884, 1885,
+ 1886, 1887, 1888, 1889, 1890, 1891, 1892, 1893, 1894, 1895,
+ 1896, 1897, 1898, 1899, 1900, 1901, 1902, 1903, 1904, 1905,
+ 1906, 1907, 1908, 1909, 1910, 1911, 1912, 1913, 1914, 1915,
+ 1916, 1917, 1918, 1919, 1920, 1921, 1922, 1923, 1924, 1925,
+ 1926, 1927, 1928, 1929, 1930, 1931, 1932, 1933, 1934, 1935,
+ 1936, 1937, 1938, 1939, 1940, 1941, 1942, 1943, 1944, 1945,
+ 1946, 1947, 1948, 1949, 1950, 1951, 1952, 1953, 1954, 1955,
+ 1956, 1957, 1958, 1959, 1960, 1961, 1962, 1963, 1964, 1965,
+ 1966, 1967, 1968, 1969, 1970, 1971, 1972, 1973, 1974, 1975,
+ 1976, 1977, 1978, 1979, 1980, 1981, 1982, 1983, 1984, 1985,
+ 1986, 1987, 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1995,
+ 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005,
+ 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015,
+ 2016, 2017, 2018, 2019, 2020, 2021, 2022, 2023, 2024, 2025,
+ 2026, 2027, 2028, 2029, 2030, 2031, 2032, 2033, 2034, 2035,
+ 2036, 2037, 0, 1462, 0, 713, 975, 0, 776, 776,
+ 0, 776, 776, 776, 776, 0, 0, 0, 725, 0,
+ 0, 0, 0, 773, 0, 741, 742, 0, 773, 0,
+ 748, 779, 0, 0, 754, 776, 776, 757, 2041, 0,
+ 2041, 2041, 1453, 0, 770, 768, 782, 783, 40, 786,
+ 789, 790, 791, 792, 795, 0, 806, 809, 1479, 1480,
+ 0, 811, 818, 835, 836, 0, 868, 869, 45, 1123,
+ 0, 997, 1002, 1013, 1028, 1029, 1030, 1031, 1032, 1034,
+ 1035, 1036, 0, 0, 0, 0, 1041, 1042, 0, 0,
+ 0, 0, 0, 1104, 1050, 0, 0, 0, 0, 1319,
+ 0, 0, 1280, 1280, 1138, 1280, 1282, 1282, 1687, 1823,
+ 1831, 1951, 1649, 1654, 1655, 1656, 1944, 1945, 1946, 1947,
+ 1986, 1987, 1991, 1747, 0, 0, 0, 2037, 1784, 1792,
+ 1793, 1817, 1916, 1972, 1666, 1812, 1881, 1744, 1766, 1767,
+ 1898, 1899, 1788, 1789, 1770, 1782, 1785, 1773, 1774, 1776,
+ 1778, 1783, 1790, 1796, 1775, 1795, 1794, 0, 1771, 1772,
+ 1777, 1787, 1791, 1779, 1780, 1781, 1786, 1797, 0, 0,
+ 0, 0, 0, 1219, 1220, 1221, 1222, 0, 0, 0,
+ 0, 0, 0, 0, 290, 291, 1332, 1333, 43, 44,
+ 1122, 1449, 1282, 1282, 1282, 1282, 1282, 1064, 1065, 1066,
+ 1067, 1068, 1092, 1093, 1099, 1100, 1893, 1894, 1895, 1896,
+ 1728, 1981, 1736, 1737, 1876, 1877, 1749, 1750, 2012, 2013,
+ -2, -2, -2, 231, 232, 233, 234, 235, 236, 237,
+ 238, 0, 1691, 1962, 1963, 227, 0, 0, 295, 296,
+ 292, 293, 294, 1106, 1107, 248, 249, 250, 251, 252,
+ 253, 254, 255, 256, 257, 258, 259, 260, 261, 262,
+ 263, 264, 265, 266, 267, 268, 269, 270, 271, 272,
+ 273, 274, 275, 276, 277, 278, 279, 280, 281, 282,
+ 283, 284, 285, 286, 287, 288, 289, 2040, 0, 845,
+ 0, 0, 0, 0, 0, 1355, 0, 1347, 1346, 63,
+ 0, 872, -2, 0, 0, 0, 0, 47, 0, 52,
+ 932, 875, 77, 76, 1395, 1398, 0, 0, 0, 59,
+ 1356, 67, 69, 1357, 0, 877, 878, 0, 908, 912,
+ 0, 0, 0, 1469, 1468, 1468, 102, 0, 0, 103,
+ 123, 124, 125, 0, 0, 109, 110, 1455, 1456, 0,
+ 0, 177, 178, 0, 41, 425, 0, 173, 0, 418,
+ 357, 0, 1373, 0, 0, 0, 0, 0, 872, 0,
+ 1463, 154, 155, 162, 163, 164, 398, 398, 398, 572,
+ 0, 0, 165, 165, 530, 531, 532, 0, 0, -2,
+ 423, 0, 510, 0, 0, 412, 412, 416, 414, 415,
+ 0, 0, 0, 0, 0, 0, 0, 0, 549, 0,
+ 550, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 661, 0, 399, 0, 570, 571, 461, 0, 0, 0,
+ 0, 0, 0, 0, 0, 1471, 1472, 0, 547, 548,
+ 0, 0, 0, 398, 398, 0, 0, 0, 0, 398,
+ 398, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 153,
+ 1386, 0, 0, 0, -2, 0, 705, 0, 0, 0,
+ 1464, 1464, 0, 712, 0, 714, 715, 0, 0, 716,
+ 0, 773, 773, 771, 772, 718, 719, 720, 721, 776,
+ 0, 0, 407, 408, 409, 773, 776, 0, 776, 776,
+ 776, 776, 773, 773, 773, 776, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 2041, 779, 776, 0, 749,
+ 0, 750, 751, 752, 755, 756, 758, 2042, 2043, 1481,
+ 1482, 1489, 1490, 1491, 1492, 1493, 1494, 1495, 1496, 1497,
1498, 1499, 1500, 1501, 1502, 1503, 1504, 1505, 1506, 1507,
1508, 1509, 1510, 1511, 1512, 1513, 1514, 1515, 1516, 1517,
1518, 1519, 1520, 1521, 1522, 1523, 1524, 1525, 1526, 1527,
@@ -8276,233 +8316,239 @@ var yyDef = [...]int{
1578, 1579, 1580, 1581, 1582, 1583, 1584, 1585, 1586, 1587,
1588, 1589, 1590, 1591, 1592, 1593, 1594, 1595, 1596, 1597,
1598, 1599, 1600, 1601, 1602, 1603, 1604, 1605, 1606, 1607,
- 1608, 1609, 1610, 1611, 1612, 1613, 2009, 2009, 746, 750,
- 1430, 772, 784, 787, 802, 48, 1710, 794, 819, 820,
- 825, 0, 0, 0, 0, 831, 832, 833, 0, 0,
- 836, 837, 838, 0, 0, 0, 0, 0, 967, 0,
- 0, 1084, 1085, 1086, 1087, 1088, 1089, 1090, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 986, 987, 0, 0, 0,
- 1009, 1010, 1011, 1012, 1015, 0, 1027, 0, 1029, 1300,
- -2, 0, 0, 0, 1020, 1021, 0, 0, 0, 0,
- 0, 0, 0, 1292, 0, 0, 1108, 0, 1109, 1111,
- 1112, 0, 1113, 854, 854, 854, 854, 854, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 854, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 1450,
- 131, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 864, 0,
- 0, 864, 864, 0, 0, 210, 211, 212, 213, 214,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 229, 230, 231, 232, 233, 234,
- 287, 235, 236, 237, 1094, 0, 0, 0, 45, 809,
- 810, 0, 930, 1450, 0, 0, 860, 0, 56, 65,
- 67, 1327, 60, 1327, 0, 866, 0, 0, -2, -2,
- 867, 873, 874, 875, 876, 877, 53, 2007, 54, 0,
- 73, 0, 47, 0, 0, 0, 0, 357, 1370, 0,
- 0, 1320, 1321, 1324, 0, 881, 1804, 885, 0, 887,
- 888, 0, 0, 99, 0, 946, 0, 0, 0, 0,
- 1428, 103, 104, 0, 0, 0, 368, 1433, 1434, 1435,
- -2, 391, 0, 368, 352, 295, 296, 297, 345, 299,
- 345, 345, 345, 345, 357, 357, 357, 357, 328, 329,
- 330, 331, 332, 0, 0, 314, 345, 345, 345, 345,
- 335, 336, 337, 338, 339, 340, 341, 342, 300, 301,
- 302, 303, 304, 305, 306, 307, 308, 347, 347, 347,
- 349, 349, 0, 41, 0, 372, 0, 1324, 0, 0,
- 1358, 1442, 1452, 0, 0, 0, 1442, 122, 0, 0,
- 0, 558, 596, 509, 546, 559, 0, 512, 513, -2,
- 0, 0, 494, 0, 496, 0, 392, 0, -2, 0,
- 402, 0, 398, 402, 399, 402, 390, 403, 536, 537,
- 538, 0, 540, 541, 626, 916, 0, 0, 0, 0,
- 0, 632, 633, 634, 0, 636, 637, 638, 639, 640,
- 641, 642, 643, 644, 645, 547, 548, 549, 550, 551,
- 552, 553, 554, 0, 0, 0, 0, 496, 0, 543,
- 0, 0, 448, 449, 450, 0, 0, 453, 454, 455,
- 456, 0, 0, 459, 460, 461, 933, 934, 462, 463,
- 488, 489, 490, 464, 465, 466, 467, 468, 469, 470,
- 482, 483, 484, 485, 486, 487, 471, 472, 473, 474,
- 475, 476, 479, 0, 137, 1349, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 1440, 0, 0, 0, 0, 863, 948, 1463, 1464,
- 761, 762, 0, 396, 397, 760, 760, 707, 747, 0,
- 760, 711, 748, 712, 714, 713, 715, 728, 729, 760,
- 718, 758, 759, 719, 720, 721, 722, 723, 724, 725,
- 743, 730, 731, 732, 764, 0, 768, 769, 744, 745,
- 0, 785, 805, 803, 804, 806, 798, 799, 800, 801,
- 0, 0, 0, 822, 95, 827, 828, 829, 830, 842,
- 835, 1096, 964, 965, 966, 0, 968, 971, 0, 1080,
- 1082, 973, 975, 1091, 1092, 1093, 0, 0, 0, 0,
- 0, 979, 983, 988, 989, 990, 991, 992, 993, 994,
- 995, 996, 997, 998, 999, 1005, 1268, 1269, 1270, 1024,
- 288, 289, 0, 1025, 0, 0, 0, 0, 0, 0,
- 0, 1095, 1026, 0, 878, 0, 0, 1298, 1295, 0,
- 0, 0, 1253, 1255, 0, 0, 0, 0, 855, 856,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 1231, 1232,
- 1233, 1234, 1235, 1236, 1237, 1238, 1239, 1240, 1241, 1242,
- 1243, 1244, 1245, 1246, 1247, 1248, 1249, 1250, 1251, 1271,
- 0, 0, 0, 0, 0, 1291, 0, 1031, 1032, 1033,
- 0, 0, 0, 0, 0, 0, 1151, 0, 0, 0,
- 0, 1451, 0, 132, 133, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 1195, 1196, 1197, 1198, 38, 0, 0, 0, 865,
- 1302, 0, -2, -2, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 1220, 0, 0,
- 0, 0, 0, 0, 1414, 0, 0, 812, 813, 815,
- 0, 950, 0, 931, 0, 0, 818, 0, 859, 0,
- 862, 59, 61, 871, 872, 0, 889, 868, 55, 50,
- 0, 0, 908, 1368, 357, 1390, 0, 366, 366, 363,
- 1330, 1331, 0, 1323, 1325, 1326, 78, 886, 882, 0,
- 962, 0, 0, 945, 0, 892, 894, 895, 896, 928,
- 0, 899, 900, 0, 0, 0, 0, 0, 97, 947,
- 1421, 0, 102, 0, 0, 107, 108, 1422, 1423, 1424,
- 1425, 0, 585, -2, 443, 169, 171, 172, 173, 164,
- -2, 355, 353, 354, 298, 357, 357, 322, 323, 324,
- 325, 326, 327, 0, 0, 315, 316, 317, 318, 309,
- 0, 310, 311, 312, 0, 313, 410, 0, 1332, 373,
- 374, 376, 384, 0, 379, 380, 0, 384, 384, 0,
- 405, 406, 0, 1324, 1349, 0, 0, 0, 1453, 1452,
- 1452, 1452, 0, 157, 158, 159, 160, 161, 162, 621,
- 0, 0, 597, 619, 620, 155, 0, 0, 165, 498,
- 497, 0, 653, 0, 408, 0, 0, 402, 402, 387,
- 388, 539, 0, 0, 628, 629, 630, 631, 0, 0,
- 0, 525, 437, 0, 526, 527, 496, 498, 0, 0,
- 368, 451, 452, 457, 458, 477, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 572, 573, 574,
- 577, 579, 500, 583, 576, 578, 580, 500, 584, 1346,
- 1347, 1348, 0, 0, 691, 0, 0, 434, 93, 1441,
- 696, 757, 717, 749, 757, 709, 716, 737, 781, 789,
- 790, 791, 792, 793, 826, 0, 0, 0, 0, 834,
- 0, 0, 972, 1081, 1083, 976, 0, 980, 984, 0,
- 0, 0, 1030, 1028, 1302, 0, 0, 0, 1077, 0,
- 0, 1099, 1100, 0, 0, 0, 1296, 0, 0, 1106,
- 0, 1256, 1257, 1114, 0, 0, 0, 0, 0, 1120,
- 1121, 1122, 1123, 1124, 1125, 1126, 1127, 1128, 1129, 1318,
- 0, 0, 0, 0, 0, 1135, 1136, 1137, 1138, 1139,
- 0, 1141, 0, 1142, 0, 0, 0, 0, 1149, 1150,
- 1152, 0, 0, 1155, 1156, 0, 0, 1157, 0, 0,
- 0, 1161, 0, 0, 0, 0, 1170, 1171, 1172, 1173,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 1184, 1185, 0, 1059, 0, 0, 1059, 0, 1097, 864,
- 0, 1258, 1259, 1260, 1261, 1262, 0, 0, 0, 0,
- 0, 0, 1218, 1219, 1221, 0, 0, 1224, 0, 1226,
- 0, 1415, 811, 814, 816, 902, 951, 952, 0, 0,
- 0, 0, 932, 1449, 857, 858, 861, 910, 0, 1306,
- 0, 0, 889, 962, 890, 0, 869, 52, 905, 0,
- 1372, 1371, 1384, 1397, 366, 366, 360, 361, 367, 362,
- 364, 365, 1322, 0, 1327, 0, 1408, 0, 0, 1400,
- 0, 0, 0, 0, 0, 0, 0, 0, 935, 0,
- 0, 938, 0, 0, 0, 0, 929, 900, 0, 901,
- 0, -2, 0, 0, 91, 92, 0, 0, 0, 105,
- 106, 0, 0, 112, 369, 370, 146, 155, 445, 170,
- 418, 0, 0, 294, 356, 319, 320, 321, 0, 343,
- 0, 0, 0, 439, 118, 1336, 1335, 384, 384, 375,
- 0, 378, 0, 0, 0, 1454, 346, 407, 0, 136,
- 0, 0, 0, 0, 0, 142, 591, 0, 0, 598,
- 0, 0, 0, 507, 0, 518, 519, 0, 625, -2,
- 687, 372, 0, 386, 389, 917, 0, 0, 520, 0,
- 523, 524, 438, 498, 529, 530, 544, 531, 480, 481,
- 478, 0, 0, 1359, 1360, 1365, 1363, 1364, 123, 565,
- 567, 566, 570, 0, 0, 0, 502, 0, 502, 563,
- 0, 434, 1332, 0, 695, 435, 436, 760, 760, 821,
- 96, 0, 824, 0, 0, 0, 0, 977, 981, 1263,
- 1289, 345, 345, 1276, 345, 349, 1279, 345, 1281, 345,
- 1284, 345, 1287, 1288, 0, 0, 0, 879, 0, 0,
- 1105, 1299, 0, 0, 1115, 1116, 1117, 1118, 1119, 1293,
- 0, 0, 0, 1134, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 134, 135, 0, 0, 0, 0,
- 0, 0, 1229, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 1054, 1058, 0, 1060, 1061, 0, 0,
- 1187, 0, 0, 1199, 0, 1303, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 953, 958, 958, 958,
- 0, 0, 0, 1436, 1437, 1307, 1308, 962, 1309, 891,
- 870, 909, 1390, 0, 1383, 0, -2, 1392, 0, 0,
- 0, 1398, 358, 359, 883, 79, 963, 82, 0, 1408,
- 1417, 0, 1399, 1410, 1412, 0, 0, 0, 1404, 0,
- 962, 893, 924, 926, 0, 921, 936, 937, 939, 0,
- 941, 0, 943, 944, 904, 898, 0, 99, 0, 962,
- 962, 98, 0, 949, 109, 110, 111, 444, 174, 179,
- 0, 0, 0, 184, 0, 186, 0, 0, 0, 191,
- 192, 384, 384, 419, 0, 291, 293, 0, 0, 177,
- 357, 0, 357, 0, 350, 0, 420, 440, 1333, 1334,
- 0, 0, 377, 381, 382, 383, 0, 1443, 138, 0,
- 0, 0, 594, 0, 622, 0, 0, 0, 0, 0,
- 0, 166, 499, 654, 655, 656, 657, 658, 659, 660,
- 661, 662, 0, 384, 0, 0, 0, 384, 384, 384,
- 0, 679, 371, 0, 0, 650, 647, 521, 0, 215,
- 216, 218, 0, 0, 0, 0, 0, 528, 904, 1350,
- 1351, 1352, 0, 1362, 1366, 126, 0, 0, 0, 0,
- 575, 581, 0, 501, 582, 692, 693, 694, 94, 702,
- 708, 823, 843, 970, 978, 982, 0, 0, 0, 0,
- 1290, 1274, 357, 1277, 1278, 1280, 1282, 1283, 1285, 1286,
- 1018, 1019, 1023, 0, 1102, 0, 1104, 1297, 0, 1327,
- 0, 0, 0, 1133, 0, 0, 0, 1144, 1143, 1145,
- 0, 1147, 1148, 1153, 1154, 1158, 0, 1160, 1162, 1163,
- 0, 0, 0, 1174, 1175, 1176, 1177, 1178, 1179, 1180,
- 1181, 1182, 1183, 0, 1052, 1055, 1186, 1062, 1063, 1068,
- 1189, 0, 0, 1098, 1201, 0, 1206, 0, 0, 1212,
- 0, 1216, 0, 1222, 1223, 1225, 1227, 0, 0, 0,
- 0, 0, 930, 911, 63, 1309, 1311, 0, 1377, 1375,
- 1375, 1385, 1386, 0, 0, 1393, 0, 0, 0, 0,
- 83, 0, 0, 0, 1413, 0, 0, 0, 0, 100,
- 1318, 918, 925, 0, 0, 919, 0, 920, 940, 942,
- 897, 0, 962, 962, 89, 90, 0, 180, 0, 182,
- 208, 209, 0, 185, 187, 188, 189, 195, 196, 197,
- 190, 0, 0, 290, 292, 0, 0, 333, 344, 334,
- 0, 0, 1337, 1338, 1339, 1340, 1341, 1342, 1343, 1344,
- 904, 139, 140, 141, 586, 0, 596, 0, 906, 0,
- 589, 0, 510, 0, 0, 0, 384, 384, 384, 0,
- 0, 0, 0, 664, 0, 0, 627, 0, 635, 0,
- 0, 0, 219, 220, 0, 1361, 564, 0, 124, 125,
- 0, 0, 569, 503, 504, 1016, 0, 0, 0, 1017,
- 1275, 0, 0, 0, 0, 1294, 0, 0, 0, 0,
- 1140, 0, 0, 0, 1166, 0, 0, 0, 616, 617,
- 0, 1230, 1057, 1318, 0, 1059, 1069, 1070, 0, 1059,
- 1200, 0, 0, 0, 0, 0, 0, 0, 959, 0,
- 0, 0, 0, 950, 1311, 1316, 0, 0, 1380, 0,
- 1373, 1376, 1374, 1387, 0, 0, 1394, 0, 1396, 0,
- 1418, 1419, 1411, 0, 1403, 1406, 1402, 1405, 1327, 922,
- 0, 927, 0, 1318, 88, 0, 183, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 193, 194, 0,
- 0, 348, 351, 0, 0, 0, 587, 0, 907, 599,
- 590, 0, 677, 0, 681, 0, 0, 0, 684, 685,
- 686, 663, 0, 667, 412, 651, 648, 649, 522, 0,
- 127, 128, 0, 0, 0, 1264, 0, 1267, 1101, 1103,
- 0, 1130, 1131, 1132, 1272, 1273, 1146, 1159, 1164, 0,
- 1167, 0, 0, 1168, 0, 618, 1048, 0, 0, 1066,
- 1067, 0, 1202, 0, 1207, 1208, 0, 1213, 0, 1217,
- 1228, 0, 955, 912, 913, 960, 961, 0, 0, 903,
- 1316, 81, 1317, 1314, 0, 1312, 1310, 1369, 0, 1378,
- 1379, 1388, 1389, 1395, 0, 1401, 0, 86, 0, 0,
- 0, 1327, 181, 0, 200, 0, 595, 0, 598, 588,
- 675, 676, 0, 688, 680, 682, 683, 665, -2, 1353,
- 0, 0, 0, 571, 1265, 0, 0, 1169, 0, 614,
- 615, 1056, 1049, 0, 1034, 1035, 1053, 1188, 1190, 0,
- 0, 0, 0, 954, 956, 957, 80, 0, 1313, 1074,
- 0, 1381, 1382, 1409, 1407, 923, 930, 0, 87, 425,
- 418, 1353, 0, 0, 0, 668, 669, 670, 671, 672,
- 673, 674, 561, 1355, 129, 130, 491, 492, 493, 123,
- 0, 1107, 1165, 1050, 0, 0, 0, 0, 1046, 1047,
- 0, 1203, 0, 1209, 0, 1214, 0, 914, 915, 1315,
- 0, 0, 600, 0, 602, 0, -2, 413, 426, 0,
- 175, 201, 202, 0, 0, 205, 206, 207, 198, 199,
- 119, 0, 0, 689, 0, 1356, 1357, 126, 0, 0,
- 1041, 1042, 1043, 1044, 1045, 0, 0, 0, 1075, 1054,
- 601, 0, 0, 368, 0, 611, 414, 415, 0, 421,
- 422, 423, 424, 203, 204, 623, 0, 0, 568, 1266,
- 0, 1204, 0, 1210, 0, 1215, 0, 603, 604, 612,
- 0, 416, 0, 417, 0, 0, 0, 592, 0, 623,
- 1354, 1051, 0, 0, 1073, 0, 613, 609, 427, 429,
- 430, 0, 0, 428, 624, 593, 1205, 1211, 0, 431,
- 432, 433, 605, 606, 607, 608,
+ 1608, 1609, 1610, 1611, 1612, 1613, 1614, 1615, 1616, 1617,
+ 1618, 1619, 1620, 1621, 1622, 1623, 1624, 1625, 1626, 1627,
+ 1628, 1629, 1630, 1631, 1632, 1633, 1634, 1635, 1636, 1637,
+ 2041, 2041, 762, 766, 1454, 788, 794, 796, 797, 0,
+ 0, 807, 810, 829, 49, 1735, 817, 49, 819, 820,
+ 821, 847, 848, 853, 0, 0, 0, 0, 859, 860,
+ 861, 0, 0, 864, 865, 866, 0, 0, 0, 0,
+ 0, 995, 0, 0, 1112, 1113, 1114, 1115, 1116, 1117,
+ 1118, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 1014, 1015,
+ 0, 0, 0, 1037, 1038, 1039, 1040, 1043, 0, 1055,
+ 0, 1057, 1328, -2, 0, 0, 0, 1048, 1049, 0,
+ 0, 0, 0, 0, 0, 0, 1320, 0, 0, 1136,
+ 0, 1137, 1139, 1140, 0, 1141, 882, 882, 882, 882,
+ 882, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 882, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 1474, 141, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 892, 0, 0, 892, 892, 0, 0, 220, 221,
+ 222, 223, 224, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 239, 240, 241,
+ 242, 243, 244, 297, 245, 246, 247, 1122, 0, 0,
+ 0, 46, 837, 838, 0, 958, 1474, 0, 0, 888,
+ 0, 57, 66, 68, 1355, 61, 1355, 0, 894, 0,
+ 0, -2, -2, 895, 901, 902, 903, 904, 905, 54,
+ 2039, 55, 0, 74, 0, 48, 0, 0, 1396, 0,
+ 1399, 0, 0, 0, 371, 1403, 0, 0, 1348, 1349,
+ 1352, 0, 909, 1829, 913, 0, 915, 916, 0, 0,
+ 100, 0, 974, 0, 0, 0, 111, 0, 113, 114,
+ 0, 0, 0, 382, 1457, 1458, 1459, -2, 405, 0,
+ 382, 366, 305, 306, 307, 357, 309, 357, 357, 357,
+ 357, 371, 371, 371, 371, 340, 341, 342, 343, 344,
+ 0, 0, 326, 357, 357, 357, 357, 347, 348, 349,
+ 350, 351, 352, 353, 354, 310, 311, 312, 313, 314,
+ 315, 316, 317, 318, 359, 359, 359, 359, 359, 363,
+ 363, 0, 42, 0, 386, 0, 1352, 0, 0, 1386,
+ 1466, 1476, 0, 0, 0, 1466, 132, 0, 0, 0,
+ 573, 611, 524, 561, 574, 0, 527, 528, -2, 0,
+ 0, 509, 0, 511, 0, 406, 0, -2, 0, 416,
+ 0, 412, 416, 413, 416, 404, 417, 551, 552, 553,
+ 0, 555, 556, 641, 944, 0, 0, 0, 0, 0,
+ 647, 648, 649, 0, 651, 652, 653, 654, 655, 656,
+ 657, 658, 659, 660, 562, 563, 564, 565, 566, 567,
+ 568, 569, 0, 0, 0, 0, 511, 0, 558, 0,
+ 0, 462, 463, 464, 0, 0, 467, 468, 469, 470,
+ 0, 0, 473, 474, 475, 961, 962, 476, 477, 502,
+ 503, 504, 478, 479, 480, 481, 482, 483, 484, 496,
+ 497, 498, 499, 500, 501, 485, 486, 487, 488, 489,
+ 490, 493, 0, 147, 1377, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 1464, 0, 0, 0, 0, 891, 976, 1487, 1488, 777,
+ 778, 0, 410, 411, 776, 776, 722, 763, 0, 776,
+ 726, 764, 727, 729, 728, 730, 743, 744, 776, 733,
+ 774, 775, 734, 735, 736, 737, 738, 739, 740, 759,
+ 745, 746, 747, 780, 0, 784, 785, 760, 761, 0,
+ 0, 800, 801, 0, 808, 832, 830, 831, 833, 825,
+ 826, 827, 828, 0, 834, 0, 0, 850, 96, 855,
+ 856, 857, 858, 870, 863, 1124, 992, 993, 994, 0,
+ 996, 999, 0, 1108, 1110, 1001, 1003, 1119, 1120, 1121,
+ 0, 0, 0, 0, 0, 1007, 1011, 1016, 1017, 1018,
+ 1019, 1020, 1021, 1022, 1023, 1024, 1025, 1026, 1027, 1033,
+ 1296, 1297, 1298, 1052, 298, 299, 0, 1053, 0, 0,
+ 0, 0, 0, 0, 0, 1123, 1054, 0, 906, 0,
+ 0, 1326, 1323, 0, 0, 0, 1281, 1283, 0, 0,
+ 0, 0, 883, 884, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 1259, 1260, 1261, 1262, 1263, 1264, 1265, 1266,
+ 1267, 1268, 1269, 1270, 1271, 1272, 1273, 1274, 1275, 1276,
+ 1277, 1278, 1279, 1299, 0, 0, 0, 0, 0, 1319,
+ 0, 1059, 1060, 1061, 0, 0, 0, 0, 0, 0,
+ 1179, 0, 0, 0, 0, 1475, 0, 142, 143, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 1223, 1224, 1225, 1226, 39,
+ 0, 0, 0, 893, 1330, 0, -2, -2, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 1248, 0, 0, 0, 0, 0, 0, 1447, 0,
+ 0, 840, 841, 843, 0, 978, 0, 959, 0, 0,
+ 846, 0, 887, 0, 890, 60, 62, 899, 900, 0,
+ 917, 896, 56, 51, 0, 0, 936, 1397, 1400, 1401,
+ 371, 1423, 0, 380, 380, 377, 1358, 1359, 0, 1351,
+ 1353, 1354, 79, 914, 910, 0, 990, 0, 0, 973,
+ 0, 920, 922, 923, 924, 956, 0, 927, 928, 0,
+ 0, 0, 0, 0, 98, 975, 104, 0, 112, 0,
+ 0, 117, 118, 105, 106, 107, 108, 0, 600, -2,
+ 457, 179, 181, 182, 183, 174, -2, 369, 367, 368,
+ 308, 371, 371, 334, 335, 336, 337, 338, 339, 0,
+ 0, 327, 328, 329, 330, 319, 0, 320, 321, 322,
+ 361, 0, 323, 324, 0, 325, 424, 0, 1360, 387,
+ 388, 390, 398, 0, 393, 394, 0, 398, 398, 0,
+ 419, 420, 0, 1352, 1377, 0, 0, 0, 1477, 1476,
+ 1476, 1476, 0, 167, 168, 169, 170, 171, 172, 636,
+ 0, 0, 612, 634, 635, 165, 0, 0, 175, 513,
+ 512, 0, 668, 0, 422, 0, 0, 416, 416, 401,
+ 402, 554, 0, 0, 643, 644, 645, 646, 0, 0,
+ 0, 540, 451, 0, 541, 542, 511, 513, 0, 0,
+ 382, 465, 466, 471, 472, 491, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 587, 588, 589,
+ 592, 594, 515, 598, 591, 593, 595, 515, 599, 1374,
+ 1375, 1376, 0, 0, 706, 0, 0, 448, 94, 1465,
+ 711, 773, 732, 765, 773, 724, 731, 753, 798, 799,
+ 804, 812, 813, 814, 815, 816, 854, 0, 0, 0,
+ 0, 862, 0, 0, 1000, 1109, 1111, 1004, 0, 1008,
+ 1012, 0, 0, 0, 1058, 1056, 1330, 0, 0, 0,
+ 1105, 0, 0, 1127, 1128, 0, 0, 0, 1324, 0,
+ 0, 1134, 0, 1284, 1285, 1142, 0, 0, 0, 0,
+ 0, 1148, 1149, 1150, 1151, 1152, 1153, 1154, 1155, 1156,
+ 1157, 1346, 0, 0, 0, 0, 0, 1163, 1164, 1165,
+ 1166, 1167, 0, 1169, 0, 1170, 0, 0, 0, 0,
+ 1177, 1178, 1180, 0, 0, 1183, 1184, 0, 0, 1185,
+ 0, 0, 0, 1189, 0, 0, 0, 0, 1198, 1199,
+ 1200, 1201, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 1212, 1213, 0, 1087, 0, 0, 1087, 0,
+ 1125, 892, 0, 1286, 1287, 1288, 1289, 1290, 0, 0,
+ 0, 0, 0, 0, 1246, 1247, 1249, 0, 0, 1252,
+ 0, 1254, 0, 1448, 839, 842, 844, 930, 979, 980,
+ 0, 0, 0, 0, 960, 1473, 885, 886, 889, 938,
+ 0, 1334, 0, 0, 917, 990, 918, 0, 897, 53,
+ 933, 0, 1405, 1404, 1417, 1430, 380, 380, 374, 375,
+ 381, 376, 378, 379, 1350, 0, 1355, 0, 1441, 0,
+ 0, 1433, 0, 0, 0, 0, 0, 0, 0, 0,
+ 963, 0, 0, 966, 0, 0, 0, 0, 957, 928,
+ 0, 929, 0, -2, 0, 0, 92, 93, 0, 0,
+ 0, 115, 116, 0, 0, 122, 383, 384, 156, 165,
+ 459, 180, 432, 0, 0, 304, 370, 331, 332, 333,
+ 0, 355, 0, 0, 0, 0, 453, 128, 1364, 1363,
+ 398, 398, 389, 0, 392, 0, 0, 0, 1478, 358,
+ 421, 0, 146, 0, 0, 0, 0, 0, 152, 606,
+ 0, 0, 613, 0, 0, 0, 522, 0, 533, 534,
+ 0, 640, -2, 702, 386, 0, 400, 403, 945, 0,
+ 0, 535, 0, 538, 539, 452, 513, 544, 545, 559,
+ 546, 494, 495, 492, 0, 0, 1387, 1388, 1393, 1391,
+ 1392, 133, 580, 582, 581, 585, 0, 0, 0, 517,
+ 0, 517, 578, 0, 448, 1360, 0, 710, 449, 450,
+ 776, 776, 849, 97, 0, 852, 0, 0, 0, 0,
+ 1005, 1009, 1291, 1317, 357, 357, 1304, 357, 363, 1307,
+ 357, 1309, 357, 1312, 357, 1315, 1316, 0, 0, 0,
+ 907, 0, 0, 1133, 1327, 0, 0, 1143, 1144, 1145,
+ 1146, 1147, 1321, 0, 0, 0, 1162, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 144, 145, 0,
+ 0, 0, 0, 0, 0, 1257, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 1082, 1086, 0, 1088,
+ 1089, 0, 0, 1215, 0, 0, 1227, 0, 1331, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 981,
+ 986, 986, 986, 0, 0, 0, 1460, 1461, 1335, 1336,
+ 990, 1337, 919, 898, 937, 1423, 0, 1416, 0, -2,
+ 1425, 0, 0, 0, 1431, 372, 373, 911, 80, 991,
+ 83, 0, 1441, 1450, 0, 1432, 1443, 1445, 0, 0,
+ 0, 1437, 0, 990, 921, 952, 954, 0, 949, 964,
+ 965, 967, 0, 969, 0, 971, 972, 932, 926, 0,
+ 100, 0, 990, 990, 99, 0, 977, 119, 120, 121,
+ 458, 184, 189, 0, 0, 0, 194, 0, 196, 0,
+ 0, 0, 201, 202, 398, 398, 433, 0, 301, 303,
+ 0, 0, 187, 371, 0, 371, 0, 362, 364, 0,
+ 434, 454, 1361, 1362, 0, 0, 391, 395, 396, 397,
+ 0, 1467, 148, 0, 0, 0, 609, 0, 637, 0,
+ 0, 0, 0, 0, 0, 176, 514, 669, 670, 671,
+ 672, 673, 674, 675, 676, 677, 0, 398, 0, 0,
+ 0, 398, 398, 398, 0, 694, 385, 0, 0, 665,
+ 662, 536, 0, 225, 226, 228, 0, 0, 0, 0,
+ 0, 543, 932, 1378, 1379, 1380, 0, 1390, 1394, 136,
+ 0, 0, 0, 0, 590, 596, 0, 516, 597, 707,
+ 708, 709, 95, 717, 723, 851, 871, 998, 1006, 1010,
+ 0, 0, 0, 0, 1318, 1302, 371, 1305, 1306, 1308,
+ 1310, 1311, 1313, 1314, 1046, 1047, 1051, 0, 1130, 0,
+ 1132, 1325, 0, 1355, 0, 0, 0, 1161, 0, 0,
+ 0, 1172, 1171, 1173, 0, 1175, 1176, 1181, 1182, 1186,
+ 0, 1188, 1190, 1191, 0, 0, 0, 1202, 1203, 1204,
+ 1205, 1206, 1207, 1208, 1209, 1210, 1211, 0, 1080, 1083,
+ 1214, 1090, 1091, 1096, 1217, 0, 0, 1126, 1229, 0,
+ 1234, 0, 0, 1240, 0, 1244, 0, 1250, 1251, 1253,
+ 1255, 0, 0, 0, 0, 0, 958, 939, 64, 1337,
+ 1339, 0, 1410, 1408, 1408, 1418, 1419, 0, 0, 1426,
+ 0, 0, 0, 0, 84, 0, 0, 0, 1446, 0,
+ 0, 0, 0, 101, 1346, 946, 953, 0, 0, 947,
+ 0, 948, 968, 970, 925, 0, 990, 990, 90, 91,
+ 0, 190, 0, 192, 218, 219, 0, 195, 197, 198,
+ 199, 205, 206, 207, 200, 0, 0, 300, 302, 0,
+ 0, 345, 356, 346, 0, 0, 1365, 1366, 1367, 1368,
+ 1369, 1370, 1371, 1372, 932, 149, 150, 151, 601, 0,
+ 611, 0, 934, 0, 604, 0, 525, 0, 0, 0,
+ 398, 398, 398, 0, 0, 0, 0, 679, 0, 0,
+ 642, 0, 650, 0, 0, 0, 229, 230, 0, 1389,
+ 579, 0, 134, 135, 0, 0, 584, 518, 519, 1044,
+ 0, 0, 0, 1045, 1303, 0, 0, 0, 0, 1322,
+ 0, 0, 0, 0, 1168, 0, 0, 0, 1194, 0,
+ 0, 0, 631, 632, 0, 1258, 1085, 1346, 0, 1087,
+ 1097, 1098, 0, 1087, 1228, 0, 0, 0, 0, 0,
+ 0, 0, 987, 0, 0, 0, 0, 978, 1339, 1344,
+ 0, 0, 1413, 0, 1406, 1409, 1407, 1420, 0, 0,
+ 1427, 0, 1429, 0, 1451, 1452, 1444, 0, 1436, 1439,
+ 1435, 1438, 1355, 950, 0, 955, 0, 1346, 89, 0,
+ 193, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 203, 204, 0, 0, 360, 365, 0, 0, 0,
+ 602, 0, 935, 614, 605, 0, 692, 0, 696, 0,
+ 0, 0, 699, 700, 701, 678, 0, 682, 426, 666,
+ 663, 664, 537, 0, 137, 138, 0, 0, 0, 1292,
+ 0, 1295, 1129, 1131, 0, 1158, 1159, 1160, 1300, 1301,
+ 1174, 1187, 1192, 0, 1195, 0, 0, 1196, 0, 633,
+ 1076, 0, 0, 1094, 1095, 0, 1230, 0, 1235, 1236,
+ 0, 1241, 0, 1245, 1256, 0, 983, 940, 941, 988,
+ 989, 0, 0, 931, 1344, 82, 1345, 1342, 0, 1340,
+ 1338, 1402, 0, 1411, 1412, 1421, 1422, 1428, 0, 1434,
+ 0, 87, 0, 0, 0, 1355, 191, 0, 210, 0,
+ 610, 0, 613, 603, 690, 691, 0, 703, 695, 697,
+ 698, 680, -2, 1381, 0, 0, 0, 586, 1293, 0,
+ 0, 1197, 0, 629, 630, 1084, 1077, 0, 1062, 1063,
+ 1081, 1216, 1218, 0, 0, 0, 0, 982, 984, 985,
+ 81, 0, 1341, 1102, 0, 1414, 1415, 1442, 1440, 951,
+ 958, 0, 88, 439, 432, 1381, 0, 0, 0, 683,
+ 684, 685, 686, 687, 688, 689, 576, 1383, 139, 140,
+ 0, 506, 507, 508, 133, 0, 1135, 1193, 1078, 0,
+ 0, 0, 0, 1074, 1075, 0, 1231, 0, 1237, 0,
+ 1242, 0, 942, 943, 1343, 0, 0, 615, 0, 617,
+ 0, -2, 427, 440, 0, 185, 211, 212, 0, 0,
+ 215, 216, 217, 208, 209, 129, 0, 0, 704, 0,
+ 1384, 1385, 0, 136, 0, 0, 1069, 1070, 1071, 1072,
+ 1073, 0, 0, 0, 1103, 1082, 616, 0, 0, 382,
+ 0, 626, 428, 429, 0, 435, 436, 437, 438, 213,
+ 214, 638, 0, 0, 505, 583, 1294, 0, 1232, 0,
+ 1238, 0, 1243, 0, 618, 619, 627, 0, 430, 0,
+ 431, 0, 0, 0, 607, 0, 638, 1382, 1079, 0,
+ 0, 1101, 0, 628, 624, 441, 443, 444, 0, 0,
+ 442, 639, 608, 1233, 1239, 0, 445, 446, 447, 620,
+ 621, 622, 623,
}
var yyTok1 = [...]int{
@@ -8511,7 +8557,7 @@ var yyTok1 = [...]int{
3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 144, 3, 3, 3, 171, 163, 3,
87, 89, 168, 166, 88, 167, 221, 169, 3, 3,
- 3, 3, 3, 3, 3, 3, 3, 3, 3, 657,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 665,
152, 151, 153, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
@@ -8627,7 +8673,9 @@ var yyTok3 = [...]int{
57965, 640, 57966, 641, 57967, 642, 57968, 643, 57969, 644,
57970, 645, 57971, 646, 57972, 647, 57973, 648, 57974, 649,
57975, 650, 57976, 651, 57977, 652, 57978, 653, 57979, 654,
- 57980, 655, 57981, 656, 0,
+ 57980, 655, 57981, 656, 57982, 657, 57983, 658, 57984, 659,
+ 57985, 660, 57986, 661, 57987, 662, 57988, 663, 57989, 664,
+ 0,
}
var yyErrorMessages = [...]struct {
@@ -8977,7 +9025,7 @@ yydefault:
case 1:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:598
+//line sql.y:590
{
stmt := yyDollar[2].statementUnion()
// If the statement is empty and we have comments
@@ -8991,199 +9039,199 @@ yydefault:
}
case 2:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:611
+//line sql.y:603
{
}
case 3:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:612
+//line sql.y:604
{
}
case 4:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Statement
-//line sql.y:616
+//line sql.y:608
{
yyLOCAL = yyDollar[1].selStmtUnion()
}
yyVAL.union = yyLOCAL
- case 37:
+ case 38:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:652
+//line sql.y:645
{
setParseTree(yylex, nil)
}
- case 38:
+ case 39:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL *Variable
-//line sql.y:658
+//line sql.y:651
{
yyLOCAL = NewVariableExpression(yyDollar[1].str, SingleAt)
}
yyVAL.union = yyLOCAL
- case 39:
+ case 40:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:664
+//line sql.y:657
{
yyVAL.identifierCI = NewIdentifierCI(string(yyDollar[1].str))
}
- case 40:
+ case 41:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:669
+//line sql.y:662
{
yyVAL.identifierCI = NewIdentifierCI("")
}
- case 41:
+ case 42:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:673
+//line sql.y:666
{
yyVAL.identifierCI = yyDollar[1].identifierCI
}
- case 42:
+ case 43:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL *Variable
-//line sql.y:679
+//line sql.y:672
{
yyLOCAL = NewVariableExpression(string(yyDollar[1].str), SingleAt)
}
yyVAL.union = yyLOCAL
- case 43:
+ case 44:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL *Variable
-//line sql.y:683
+//line sql.y:676
{
yyLOCAL = NewVariableExpression(string(yyDollar[1].str), DoubleAt)
}
yyVAL.union = yyLOCAL
- case 44:
+ case 45:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Statement
-//line sql.y:689
+//line sql.y:682
{
yyLOCAL = &OtherAdmin{}
}
yyVAL.union = yyLOCAL
- case 45:
+ case 46:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Statement
-//line sql.y:695
+//line sql.y:688
{
yyLOCAL = &Load{}
}
yyVAL.union = yyLOCAL
- case 46:
+ case 47:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *With
-//line sql.y:701
+//line sql.y:694
{
yyLOCAL = &With{ctes: yyDollar[2].ctesUnion(), Recursive: false}
}
yyVAL.union = yyLOCAL
- case 47:
+ case 48:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *With
-//line sql.y:705
+//line sql.y:698
{
yyLOCAL = &With{ctes: yyDollar[3].ctesUnion(), Recursive: true}
}
yyVAL.union = yyLOCAL
- case 48:
+ case 49:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL *With
-//line sql.y:710
+//line sql.y:703
{
yyLOCAL = nil
}
yyVAL.union = yyLOCAL
- case 49:
+ case 50:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL *With
-//line sql.y:714
+//line sql.y:707
{
yyLOCAL = yyDollar[1].withUnion()
}
yyVAL.union = yyLOCAL
- case 50:
+ case 51:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:720
+//line sql.y:713
{
yySLICE := (*[]*CommonTableExpr)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, yyDollar[3].cteUnion())
}
- case 51:
+ case 52:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL []*CommonTableExpr
-//line sql.y:724
+//line sql.y:717
{
yyLOCAL = []*CommonTableExpr{yyDollar[1].cteUnion()}
}
yyVAL.union = yyLOCAL
- case 52:
+ case 53:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL *CommonTableExpr
-//line sql.y:730
+//line sql.y:723
{
yyLOCAL = &CommonTableExpr{ID: yyDollar[1].identifierCS, Columns: yyDollar[2].columnsUnion(), Subquery: yyDollar[4].subqueryUnion()}
}
yyVAL.union = yyLOCAL
- case 53:
+ case 54:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL SelectStatement
-//line sql.y:736
+//line sql.y:729
{
yyLOCAL = yyDollar[2].selStmtUnion()
}
yyVAL.union = yyLOCAL
- case 54:
+ case 55:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL SelectStatement
-//line sql.y:740
+//line sql.y:733
{
yyLOCAL = yyDollar[2].selStmtUnion()
}
yyVAL.union = yyLOCAL
- case 55:
+ case 56:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL SelectStatement
-//line sql.y:744
+//line sql.y:737
{
setLockInSelect(yyDollar[2].selStmtUnion(), yyDollar[3].lockUnion())
yyLOCAL = yyDollar[2].selStmtUnion()
}
yyVAL.union = yyLOCAL
- case 56:
+ case 57:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL SelectStatement
-//line sql.y:767
+//line sql.y:760
{
yyDollar[1].selStmtUnion().SetOrderBy(yyDollar[2].orderByUnion())
yyDollar[1].selStmtUnion().SetLimit(yyDollar[3].limitUnion())
yyLOCAL = yyDollar[1].selStmtUnion()
}
yyVAL.union = yyLOCAL
- case 57:
+ case 58:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL SelectStatement
-//line sql.y:773
+//line sql.y:766
{
yyDollar[1].selStmtUnion().SetLimit(yyDollar[2].limitUnion())
yyLOCAL = yyDollar[1].selStmtUnion()
}
yyVAL.union = yyLOCAL
- case 58:
+ case 59:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL SelectStatement
-//line sql.y:778
+//line sql.y:771
{
yyDollar[1].selStmtUnion().SetOrderBy(yyDollar[2].orderByUnion())
yyDollar[1].selStmtUnion().SetLimit(yyDollar[3].limitUnion())
yyLOCAL = yyDollar[1].selStmtUnion()
}
yyVAL.union = yyLOCAL
- case 59:
+ case 60:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL SelectStatement
-//line sql.y:784
+//line sql.y:777
{
yyDollar[2].selStmtUnion().SetWith(yyDollar[1].withUnion())
yyDollar[2].selStmtUnion().SetOrderBy(yyDollar[3].orderByUnion())
@@ -9191,20 +9239,20 @@ yydefault:
yyLOCAL = yyDollar[2].selStmtUnion()
}
yyVAL.union = yyLOCAL
- case 60:
+ case 61:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL SelectStatement
-//line sql.y:791
+//line sql.y:784
{
yyDollar[2].selStmtUnion().SetWith(yyDollar[1].withUnion())
yyDollar[2].selStmtUnion().SetLimit(yyDollar[3].limitUnion())
yyLOCAL = yyDollar[2].selStmtUnion()
}
yyVAL.union = yyLOCAL
- case 61:
+ case 62:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL SelectStatement
-//line sql.y:797
+//line sql.y:790
{
yyDollar[2].selStmtUnion().SetWith(yyDollar[1].withUnion())
yyDollar[2].selStmtUnion().SetOrderBy(yyDollar[3].orderByUnion())
@@ -9212,175 +9260,175 @@ yydefault:
yyLOCAL = yyDollar[2].selStmtUnion()
}
yyVAL.union = yyLOCAL
- case 62:
+ case 63:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:804
+//line sql.y:797
{
yyDollar[2].selStmtUnion().SetWith(yyDollar[1].withUnion())
}
- case 63:
+ case 64:
yyDollar = yyS[yypt-7 : yypt+1]
var yyLOCAL SelectStatement
-//line sql.y:808
+//line sql.y:801
{
yyLOCAL = NewSelect(Comments(yyDollar[2].strs), SelectExprs{&Nextval{Expr: yyDollar[5].exprUnion()}}, []string{yyDollar[3].str} /*options*/, nil, TableExprs{&AliasedTableExpr{Expr: yyDollar[7].tableName}}, nil /*where*/, nil /*groupBy*/, nil /*having*/, nil)
}
yyVAL.union = yyLOCAL
- case 64:
+ case 65:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL SelectStatement
-//line sql.y:814
+//line sql.y:807
{
yyLOCAL = yyDollar[1].selStmtUnion()
}
yyVAL.union = yyLOCAL
- case 65:
+ case 66:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL SelectStatement
-//line sql.y:818
+//line sql.y:811
{
yyLOCAL = &Union{Left: yyDollar[1].selStmtUnion(), Distinct: yyDollar[2].booleanUnion(), Right: yyDollar[3].selStmtUnion()}
}
yyVAL.union = yyLOCAL
- case 66:
+ case 67:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL SelectStatement
-//line sql.y:822
+//line sql.y:815
{
yyLOCAL = &Union{Left: yyDollar[1].selStmtUnion(), Distinct: yyDollar[2].booleanUnion(), Right: yyDollar[3].selStmtUnion()}
}
yyVAL.union = yyLOCAL
- case 67:
+ case 68:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL SelectStatement
-//line sql.y:826
+//line sql.y:819
{
yyLOCAL = &Union{Left: yyDollar[1].selStmtUnion(), Distinct: yyDollar[2].booleanUnion(), Right: yyDollar[3].selStmtUnion()}
}
yyVAL.union = yyLOCAL
- case 68:
+ case 69:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL SelectStatement
-//line sql.y:830
+//line sql.y:823
{
yyLOCAL = &Union{Left: yyDollar[1].selStmtUnion(), Distinct: yyDollar[2].booleanUnion(), Right: yyDollar[3].selStmtUnion()}
}
yyVAL.union = yyLOCAL
- case 69:
+ case 70:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL SelectStatement
-//line sql.y:836
+//line sql.y:829
{
yyLOCAL = yyDollar[1].selStmtUnion()
}
yyVAL.union = yyLOCAL
- case 70:
+ case 71:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL SelectStatement
-//line sql.y:840
+//line sql.y:833
{
setLockInSelect(yyDollar[1].selStmtUnion(), yyDollar[2].lockUnion())
yyLOCAL = yyDollar[1].selStmtUnion()
}
yyVAL.union = yyLOCAL
- case 71:
+ case 72:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL SelectStatement
-//line sql.y:845
+//line sql.y:838
{
yyLOCAL = yyDollar[1].selStmtUnion()
}
yyVAL.union = yyLOCAL
- case 72:
+ case 73:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL SelectStatement
-//line sql.y:849
+//line sql.y:842
{
yyLOCAL = yyDollar[1].selStmtUnion()
}
yyVAL.union = yyLOCAL
- case 73:
+ case 74:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL SelectStatement
-//line sql.y:855
+//line sql.y:848
{
yyLOCAL = yyDollar[2].selStmtUnion()
}
yyVAL.union = yyLOCAL
- case 74:
+ case 75:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL SelectStatement
-//line sql.y:859
+//line sql.y:852
{
yyDollar[1].selStmtUnion().SetInto(yyDollar[2].selectIntoUnion())
yyLOCAL = yyDollar[1].selStmtUnion()
}
yyVAL.union = yyLOCAL
- case 75:
+ case 76:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL SelectStatement
-//line sql.y:864
+//line sql.y:857
{
yyDollar[1].selStmtUnion().SetInto(yyDollar[2].selectIntoUnion())
yyDollar[1].selStmtUnion().SetLock(yyDollar[3].lockUnion())
yyLOCAL = yyDollar[1].selStmtUnion()
}
yyVAL.union = yyLOCAL
- case 76:
+ case 77:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL SelectStatement
-//line sql.y:870
+//line sql.y:863
{
yyDollar[1].selStmtUnion().SetInto(yyDollar[3].selectIntoUnion())
yyDollar[1].selStmtUnion().SetLock(yyDollar[2].lockUnion())
yyLOCAL = yyDollar[1].selStmtUnion()
}
yyVAL.union = yyLOCAL
- case 77:
+ case 78:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL SelectStatement
-//line sql.y:876
+//line sql.y:869
{
yyDollar[1].selStmtUnion().SetInto(yyDollar[2].selectIntoUnion())
yyLOCAL = yyDollar[1].selStmtUnion()
}
yyVAL.union = yyLOCAL
- case 78:
+ case 79:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL Statement
-//line sql.y:883
+//line sql.y:876
{
yyLOCAL = &Stream{Comments: Comments(yyDollar[2].strs).Parsed(), SelectExpr: yyDollar[3].selectExprUnion(), Table: yyDollar[5].tableName}
}
yyVAL.union = yyLOCAL
- case 79:
+ case 80:
yyDollar = yyS[yypt-7 : yypt+1]
var yyLOCAL Statement
-//line sql.y:889
+//line sql.y:882
{
yyLOCAL = &VStream{Comments: Comments(yyDollar[2].strs).Parsed(), SelectExpr: yyDollar[3].selectExprUnion(), Table: yyDollar[5].tableName, Where: NewWhere(WhereClause, yyDollar[6].exprUnion()), Limit: yyDollar[7].limitUnion()}
}
yyVAL.union = yyLOCAL
- case 80:
+ case 81:
yyDollar = yyS[yypt-10 : yypt+1]
var yyLOCAL SelectStatement
-//line sql.y:897
+//line sql.y:890
{
yyLOCAL = NewSelect(Comments(yyDollar[2].strs), yyDollar[4].selectExprsUnion() /*SelectExprs*/, yyDollar[3].strs /*options*/, yyDollar[5].selectIntoUnion() /*into*/, yyDollar[6].tableExprsUnion() /*from*/, NewWhere(WhereClause, yyDollar[7].exprUnion()), GroupBy(yyDollar[8].exprsUnion()), NewWhere(HavingClause, yyDollar[9].exprUnion()), yyDollar[10].namedWindowsUnion())
}
yyVAL.union = yyLOCAL
- case 81:
+ case 82:
yyDollar = yyS[yypt-9 : yypt+1]
var yyLOCAL SelectStatement
-//line sql.y:901
+//line sql.y:894
{
yyLOCAL = NewSelect(Comments(yyDollar[2].strs), yyDollar[4].selectExprsUnion() /*SelectExprs*/, yyDollar[3].strs /*options*/, nil, yyDollar[5].tableExprsUnion() /*from*/, NewWhere(WhereClause, yyDollar[6].exprUnion()), GroupBy(yyDollar[7].exprsUnion()), NewWhere(HavingClause, yyDollar[8].exprUnion()), yyDollar[9].namedWindowsUnion())
}
yyVAL.union = yyLOCAL
- case 82:
+ case 83:
yyDollar = yyS[yypt-7 : yypt+1]
var yyLOCAL Statement
-//line sql.y:907
+//line sql.y:900
{
// insert_data returns a *Insert pre-filled with Columns & Values
ins := yyDollar[6].insUnion()
@@ -9393,10 +9441,10 @@ yydefault:
yyLOCAL = ins
}
yyVAL.union = yyLOCAL
- case 83:
+ case 84:
yyDollar = yyS[yypt-8 : yypt+1]
var yyLOCAL Statement
-//line sql.y:919
+//line sql.y:912
{
cols := make(Columns, 0, len(yyDollar[7].updateExprsUnion()))
vals := make(ValTuple, 0, len(yyDollar[8].updateExprsUnion()))
@@ -9407,266 +9455,329 @@ yydefault:
yyLOCAL = &Insert{Action: yyDollar[1].insertActionUnion(), Comments: Comments(yyDollar[2].strs).Parsed(), Ignore: yyDollar[3].ignoreUnion(), Table: yyDollar[4].tableName, Partitions: yyDollar[5].partitionsUnion(), Columns: cols, Rows: Values{vals}, OnDup: OnDup(yyDollar[8].updateExprsUnion())}
}
yyVAL.union = yyLOCAL
- case 84:
+ case 85:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL InsertAction
-//line sql.y:931
+//line sql.y:924
{
yyLOCAL = InsertAct
}
yyVAL.union = yyLOCAL
- case 85:
+ case 86:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL InsertAction
-//line sql.y:935
+//line sql.y:928
{
yyLOCAL = ReplaceAct
}
yyVAL.union = yyLOCAL
- case 86:
+ case 87:
yyDollar = yyS[yypt-10 : yypt+1]
var yyLOCAL Statement
-//line sql.y:941
+//line sql.y:934
{
yyLOCAL = &Update{With: yyDollar[1].withUnion(), Comments: Comments(yyDollar[3].strs).Parsed(), Ignore: yyDollar[4].ignoreUnion(), TableExprs: yyDollar[5].tableExprsUnion(), Exprs: yyDollar[7].updateExprsUnion(), Where: NewWhere(WhereClause, yyDollar[8].exprUnion()), OrderBy: yyDollar[9].orderByUnion(), Limit: yyDollar[10].limitUnion()}
}
yyVAL.union = yyLOCAL
- case 87:
+ case 88:
yyDollar = yyS[yypt-11 : yypt+1]
var yyLOCAL Statement
-//line sql.y:947
+//line sql.y:940
{
yyLOCAL = &Delete{With: yyDollar[1].withUnion(), Comments: Comments(yyDollar[3].strs).Parsed(), Ignore: yyDollar[4].ignoreUnion(), TableExprs: TableExprs{&AliasedTableExpr{Expr: yyDollar[6].tableName, As: yyDollar[7].identifierCS}}, Partitions: yyDollar[8].partitionsUnion(), Where: NewWhere(WhereClause, yyDollar[9].exprUnion()), OrderBy: yyDollar[10].orderByUnion(), Limit: yyDollar[11].limitUnion()}
}
yyVAL.union = yyLOCAL
- case 88:
+ case 89:
yyDollar = yyS[yypt-9 : yypt+1]
var yyLOCAL Statement
-//line sql.y:951
+//line sql.y:944
{
yyLOCAL = &Delete{With: yyDollar[1].withUnion(), Comments: Comments(yyDollar[3].strs).Parsed(), Ignore: yyDollar[4].ignoreUnion(), Targets: yyDollar[6].tableNamesUnion(), TableExprs: yyDollar[8].tableExprsUnion(), Where: NewWhere(WhereClause, yyDollar[9].exprUnion())}
}
yyVAL.union = yyLOCAL
- case 89:
+ case 90:
yyDollar = yyS[yypt-8 : yypt+1]
var yyLOCAL Statement
-//line sql.y:955
+//line sql.y:948
{
yyLOCAL = &Delete{With: yyDollar[1].withUnion(), Comments: Comments(yyDollar[3].strs).Parsed(), Ignore: yyDollar[4].ignoreUnion(), Targets: yyDollar[5].tableNamesUnion(), TableExprs: yyDollar[7].tableExprsUnion(), Where: NewWhere(WhereClause, yyDollar[8].exprUnion())}
}
yyVAL.union = yyLOCAL
- case 90:
+ case 91:
yyDollar = yyS[yypt-8 : yypt+1]
var yyLOCAL Statement
-//line sql.y:959
+//line sql.y:952
{
yyLOCAL = &Delete{With: yyDollar[1].withUnion(), Comments: Comments(yyDollar[3].strs).Parsed(), Ignore: yyDollar[4].ignoreUnion(), Targets: yyDollar[5].tableNamesUnion(), TableExprs: yyDollar[7].tableExprsUnion(), Where: NewWhere(WhereClause, yyDollar[8].exprUnion())}
}
yyVAL.union = yyLOCAL
- case 91:
+ case 92:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:964
+//line sql.y:957
{
}
- case 92:
+ case 93:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:965
+//line sql.y:958
{
}
- case 93:
+ case 94:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL TableNames
-//line sql.y:969
+//line sql.y:962
{
yyLOCAL = TableNames{yyDollar[1].tableName.ToViewName()}
}
yyVAL.union = yyLOCAL
- case 94:
+ case 95:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:973
+//line sql.y:966
{
yySLICE := (*TableNames)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, yyDollar[3].tableName.ToViewName())
}
- case 95:
+ case 96:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL TableNames
-//line sql.y:979
+//line sql.y:972
{
yyLOCAL = TableNames{yyDollar[1].tableName}
}
yyVAL.union = yyLOCAL
- case 96:
+ case 97:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:983
+//line sql.y:976
{
yySLICE := (*TableNames)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, yyDollar[3].tableName)
}
- case 97:
+ case 98:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL TableNames
-//line sql.y:989
+//line sql.y:982
{
yyLOCAL = TableNames{yyDollar[1].tableName}
}
yyVAL.union = yyLOCAL
- case 98:
+ case 99:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:993
+//line sql.y:986
{
yySLICE := (*TableNames)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, yyDollar[3].tableName)
}
- case 99:
+ case 100:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL Partitions
-//line sql.y:998
+//line sql.y:991
{
yyLOCAL = nil
}
yyVAL.union = yyLOCAL
- case 100:
+ case 101:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Partitions
-//line sql.y:1002
+//line sql.y:995
{
yyLOCAL = yyDollar[3].partitionsUnion()
}
yyVAL.union = yyLOCAL
- case 101:
+ case 102:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Statement
-//line sql.y:1008
+//line sql.y:1001
{
yyLOCAL = NewSetStatement(Comments(yyDollar[2].strs).Parsed(), yyDollar[3].setExprsUnion())
}
yyVAL.union = yyLOCAL
- case 102:
- yyDollar = yyS[yypt-5 : yypt+1]
- var yyLOCAL Statement
-//line sql.y:1014
- {
- yyLOCAL = &SetTransaction{Comments: Comments(yyDollar[2].strs).Parsed(), Scope: yyDollar[3].scopeUnion(), Characteristics: yyDollar[5].characteristicsUnion()}
- }
- yyVAL.union = yyLOCAL
case 103:
- yyDollar = yyS[yypt-4 : yypt+1]
- var yyLOCAL Statement
-//line sql.y:1018
+ yyDollar = yyS[yypt-1 : yypt+1]
+ var yyLOCAL SetExprs
+//line sql.y:1007
{
- yyLOCAL = &SetTransaction{Comments: Comments(yyDollar[2].strs).Parsed(), Characteristics: yyDollar[4].characteristicsUnion(), Scope: NoScope}
+ yyLOCAL = SetExprs{yyDollar[1].setExprUnion()}
}
yyVAL.union = yyLOCAL
case 104:
- yyDollar = yyS[yypt-1 : yypt+1]
- var yyLOCAL []Characteristic
-//line sql.y:1024
+ yyDollar = yyS[yypt-3 : yypt+1]
+//line sql.y:1011
{
- yyLOCAL = []Characteristic{yyDollar[1].characteristicUnion()}
+ yySLICE := (*SetExprs)(yyIaddr(yyVAL.union))
+ *yySLICE = append(*yySLICE, yyDollar[3].setExprUnion())
}
- yyVAL.union = yyLOCAL
case 105:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:1028
+ var yyLOCAL *SetExpr
+//line sql.y:1017
{
- yySLICE := (*[]Characteristic)(yyIaddr(yyVAL.union))
- *yySLICE = append(*yySLICE, yyDollar[3].characteristicUnion())
+ yyLOCAL = &SetExpr{Var: yyDollar[1].variableUnion(), Expr: NewStrLiteral("on")}
}
+ yyVAL.union = yyLOCAL
case 106:
yyDollar = yyS[yypt-3 : yypt+1]
- var yyLOCAL Characteristic
-//line sql.y:1034
+ var yyLOCAL *SetExpr
+//line sql.y:1021
{
- yyLOCAL = yyDollar[3].isolationLevelUnion()
+ yyLOCAL = &SetExpr{Var: yyDollar[1].variableUnion(), Expr: NewStrLiteral("off")}
}
yyVAL.union = yyLOCAL
case 107:
- yyDollar = yyS[yypt-2 : yypt+1]
- var yyLOCAL Characteristic
-//line sql.y:1038
+ yyDollar = yyS[yypt-3 : yypt+1]
+ var yyLOCAL *SetExpr
+//line sql.y:1025
{
- yyLOCAL = ReadWrite
+ yyLOCAL = &SetExpr{Var: yyDollar[1].variableUnion(), Expr: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
case 108:
- yyDollar = yyS[yypt-2 : yypt+1]
- var yyLOCAL Characteristic
-//line sql.y:1042
+ yyDollar = yyS[yypt-3 : yypt+1]
+ var yyLOCAL *SetExpr
+//line sql.y:1029
{
- yyLOCAL = ReadOnly
+ yyLOCAL = &SetExpr{Var: NewSetVariable(string(yyDollar[1].str), SessionScope), Expr: yyDollar[2].exprUnion()}
}
yyVAL.union = yyLOCAL
case 109:
- yyDollar = yyS[yypt-2 : yypt+1]
- var yyLOCAL IsolationLevel
-//line sql.y:1048
+ yyDollar = yyS[yypt-1 : yypt+1]
+ var yyLOCAL *Variable
+//line sql.y:1035
{
- yyLOCAL = RepeatableRead
+ yyLOCAL = NewSetVariable(string(yyDollar[1].str), SessionScope)
}
yyVAL.union = yyLOCAL
case 110:
- yyDollar = yyS[yypt-2 : yypt+1]
- var yyLOCAL IsolationLevel
-//line sql.y:1052
+ yyDollar = yyS[yypt-1 : yypt+1]
+ var yyLOCAL *Variable
+//line sql.y:1039
{
- yyLOCAL = ReadCommitted
+ yyLOCAL = yyDollar[1].variableUnion()
}
yyVAL.union = yyLOCAL
case 111:
yyDollar = yyS[yypt-2 : yypt+1]
- var yyLOCAL IsolationLevel
-//line sql.y:1056
+ var yyLOCAL *Variable
+//line sql.y:1043
{
- yyLOCAL = ReadUncommitted
+ yyLOCAL = NewSetVariable(string(yyDollar[2].str), yyDollar[1].scopeUnion())
}
yyVAL.union = yyLOCAL
case 112:
- yyDollar = yyS[yypt-1 : yypt+1]
- var yyLOCAL IsolationLevel
-//line sql.y:1060
+ yyDollar = yyS[yypt-5 : yypt+1]
+ var yyLOCAL Statement
+//line sql.y:1049
{
- yyLOCAL = Serializable
+ yyLOCAL = NewSetStatement(Comments(yyDollar[2].strs).Parsed(), UpdateSetExprsScope(yyDollar[5].setExprsUnion(), yyDollar[3].scopeUnion()))
}
yyVAL.union = yyLOCAL
case 113:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ var yyLOCAL Statement
+//line sql.y:1053
+ {
+ yyLOCAL = NewSetStatement(Comments(yyDollar[2].strs).Parsed(), yyDollar[4].setExprsUnion())
+ }
+ yyVAL.union = yyLOCAL
+ case 114:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ var yyLOCAL SetExprs
+//line sql.y:1059
+ {
+ yyLOCAL = SetExprs{yyDollar[1].setExprUnion()}
+ }
+ yyVAL.union = yyLOCAL
+ case 115:
+ yyDollar = yyS[yypt-3 : yypt+1]
+//line sql.y:1063
+ {
+ yySLICE := (*SetExprs)(yyIaddr(yyVAL.union))
+ *yySLICE = append(*yySLICE, yyDollar[3].setExprUnion())
+ }
+ case 116:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ var yyLOCAL *SetExpr
+//line sql.y:1069
+ {
+ yyLOCAL = &SetExpr{Var: NewSetVariable(TransactionIsolationStr, NextTxScope), Expr: NewStrLiteral(yyDollar[3].str)}
+ }
+ yyVAL.union = yyLOCAL
+ case 117:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ var yyLOCAL *SetExpr
+//line sql.y:1073
+ {
+ yyLOCAL = &SetExpr{Var: NewSetVariable(TransactionReadOnlyStr, NextTxScope), Expr: NewStrLiteral("off")}
+ }
+ yyVAL.union = yyLOCAL
+ case 118:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ var yyLOCAL *SetExpr
+//line sql.y:1077
+ {
+ yyLOCAL = &SetExpr{Var: NewSetVariable(TransactionReadOnlyStr, NextTxScope), Expr: NewStrLiteral("on")}
+ }
+ yyVAL.union = yyLOCAL
+ case 119:
+ yyDollar = yyS[yypt-2 : yypt+1]
+//line sql.y:1083
+ {
+ yyVAL.str = RepeatableReadStr
+ }
+ case 120:
+ yyDollar = yyS[yypt-2 : yypt+1]
+//line sql.y:1087
+ {
+ yyVAL.str = ReadCommittedStr
+ }
+ case 121:
+ yyDollar = yyS[yypt-2 : yypt+1]
+//line sql.y:1091
+ {
+ yyVAL.str = ReadUncommittedStr
+ }
+ case 122:
+ yyDollar = yyS[yypt-1 : yypt+1]
+//line sql.y:1095
+ {
+ yyVAL.str = SerializableStr
+ }
+ case 123:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Scope
-//line sql.y:1066
+//line sql.y:1101
{
yyLOCAL = SessionScope
}
yyVAL.union = yyLOCAL
- case 114:
+ case 124:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Scope
-//line sql.y:1070
+//line sql.y:1105
{
yyLOCAL = SessionScope
}
yyVAL.union = yyLOCAL
- case 115:
+ case 125:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Scope
-//line sql.y:1074
+//line sql.y:1109
{
yyLOCAL = GlobalScope
}
yyVAL.union = yyLOCAL
- case 116:
+ case 126:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Statement
-//line sql.y:1080
+//line sql.y:1115
{
yyDollar[1].createTableUnion().TableSpec = yyDollar[2].tableSpecUnion()
yyDollar[1].createTableUnion().FullyParsed = true
yyLOCAL = yyDollar[1].createTableUnion()
}
yyVAL.union = yyLOCAL
- case 117:
+ case 127:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Statement
-//line sql.y:1086
+//line sql.y:1121
{
// Create table [name] like [name]
yyDollar[1].createTableUnion().OptLike = yyDollar[2].optLikeUnion()
@@ -9674,10 +9785,10 @@ yydefault:
yyLOCAL = yyDollar[1].createTableUnion()
}
yyVAL.union = yyLOCAL
- case 118:
+ case 128:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Statement
-//line sql.y:1093
+//line sql.y:1128
{
indexDef := yyDollar[1].alterTableUnion().AlterOptions[0].(*AddIndexDefinition).IndexDefinition
indexDef.Columns = yyDollar[3].indexColumnsUnion()
@@ -9687,413 +9798,413 @@ yydefault:
yyLOCAL = yyDollar[1].alterTableUnion()
}
yyVAL.union = yyLOCAL
- case 119:
+ case 129:
yyDollar = yyS[yypt-12 : yypt+1]
var yyLOCAL Statement
-//line sql.y:1102
+//line sql.y:1137
{
yyLOCAL = &CreateView{ViewName: yyDollar[8].tableName.ToViewName(), Comments: Comments(yyDollar[2].strs).Parsed(), IsReplace: yyDollar[3].booleanUnion(), Algorithm: yyDollar[4].str, Definer: yyDollar[5].definerUnion(), Security: yyDollar[6].str, Columns: yyDollar[9].columnsUnion(), Select: yyDollar[11].selStmtUnion(), CheckOption: yyDollar[12].str}
}
yyVAL.union = yyLOCAL
- case 120:
+ case 130:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Statement
-//line sql.y:1106
+//line sql.y:1141
{
yyDollar[1].createDatabaseUnion().FullyParsed = true
yyDollar[1].createDatabaseUnion().CreateOptions = yyDollar[2].databaseOptionsUnion()
yyLOCAL = yyDollar[1].createDatabaseUnion()
}
yyVAL.union = yyLOCAL
- case 121:
+ case 131:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL bool
-//line sql.y:1113
+//line sql.y:1148
{
yyLOCAL = false
}
yyVAL.union = yyLOCAL
- case 122:
+ case 132:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL bool
-//line sql.y:1117
+//line sql.y:1152
{
yyLOCAL = true
}
yyVAL.union = yyLOCAL
- case 123:
+ case 133:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:1122
+//line sql.y:1157
{
yyVAL.identifierCI = NewIdentifierCI("")
}
- case 124:
+ case 134:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:1126
+//line sql.y:1161
{
yyVAL.identifierCI = yyDollar[2].identifierCI
}
- case 125:
+ case 135:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1132
+//line sql.y:1167
{
yyVAL.identifierCI = yyDollar[1].identifierCI
}
- case 126:
+ case 136:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL []VindexParam
-//line sql.y:1137
+//line sql.y:1172
{
var v []VindexParam
yyLOCAL = v
}
yyVAL.union = yyLOCAL
- case 127:
+ case 137:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL []VindexParam
-//line sql.y:1142
+//line sql.y:1177
{
yyLOCAL = yyDollar[2].vindexParamsUnion()
}
yyVAL.union = yyLOCAL
- case 128:
+ case 138:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL []VindexParam
-//line sql.y:1148
+//line sql.y:1183
{
yyLOCAL = make([]VindexParam, 0, 4)
yyLOCAL = append(yyLOCAL, yyDollar[1].vindexParam)
}
yyVAL.union = yyLOCAL
- case 129:
+ case 139:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:1153
+//line sql.y:1188
{
yySLICE := (*[]VindexParam)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, yyDollar[3].vindexParam)
}
- case 130:
+ case 140:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:1159
+//line sql.y:1194
{
yyVAL.vindexParam = VindexParam{Key: yyDollar[1].identifierCI, Val: yyDollar[3].str}
}
- case 131:
+ case 141:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL []*JSONObjectParam
-//line sql.y:1164
+//line sql.y:1199
{
yyLOCAL = nil
}
yyVAL.union = yyLOCAL
- case 132:
+ case 142:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL []*JSONObjectParam
-//line sql.y:1168
+//line sql.y:1203
{
yyLOCAL = yyDollar[1].jsonObjectParamsUnion()
}
yyVAL.union = yyLOCAL
- case 133:
+ case 143:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL []*JSONObjectParam
-//line sql.y:1174
+//line sql.y:1209
{
yyLOCAL = []*JSONObjectParam{yyDollar[1].jsonObjectParam}
}
yyVAL.union = yyLOCAL
- case 134:
+ case 144:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:1178
+//line sql.y:1213
{
yySLICE := (*[]*JSONObjectParam)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, yyDollar[3].jsonObjectParam)
}
- case 135:
+ case 145:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:1184
+//line sql.y:1219
{
yyVAL.jsonObjectParam = &JSONObjectParam{Key: yyDollar[1].exprUnion(), Value: yyDollar[3].exprUnion()}
}
- case 136:
+ case 146:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL *CreateTable
-//line sql.y:1190
+//line sql.y:1225
{
yyLOCAL = &CreateTable{Comments: Comments(yyDollar[2].strs).Parsed(), Table: yyDollar[6].tableName, IfNotExists: yyDollar[5].booleanUnion(), Temp: yyDollar[3].booleanUnion()}
setDDL(yylex, yyLOCAL)
}
yyVAL.union = yyLOCAL
- case 137:
+ case 147:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL *AlterTable
-//line sql.y:1197
+//line sql.y:1232
{
yyLOCAL = &AlterTable{Comments: Comments(yyDollar[2].strs).Parsed(), Table: yyDollar[4].tableName}
setDDL(yylex, yyLOCAL)
}
yyVAL.union = yyLOCAL
- case 138:
+ case 148:
yyDollar = yyS[yypt-7 : yypt+1]
var yyLOCAL *AlterTable
-//line sql.y:1204
+//line sql.y:1239
{
yyLOCAL = &AlterTable{Table: yyDollar[7].tableName, AlterOptions: []AlterOption{&AddIndexDefinition{IndexDefinition: &IndexDefinition{Info: &IndexInfo{Name: yyDollar[4].identifierCI, Type: string(yyDollar[3].str)}, Options: yyDollar[5].indexOptionsUnion()}}}}
setDDL(yylex, yyLOCAL)
}
yyVAL.union = yyLOCAL
- case 139:
+ case 149:
yyDollar = yyS[yypt-8 : yypt+1]
var yyLOCAL *AlterTable
-//line sql.y:1209
+//line sql.y:1244
{
yyLOCAL = &AlterTable{Table: yyDollar[8].tableName, AlterOptions: []AlterOption{&AddIndexDefinition{IndexDefinition: &IndexDefinition{Info: &IndexInfo{Name: yyDollar[5].identifierCI, Type: string(yyDollar[3].str) + " " + string(yyDollar[4].str), Fulltext: true}, Options: yyDollar[6].indexOptionsUnion()}}}}
setDDL(yylex, yyLOCAL)
}
yyVAL.union = yyLOCAL
- case 140:
+ case 150:
yyDollar = yyS[yypt-8 : yypt+1]
var yyLOCAL *AlterTable
-//line sql.y:1214
+//line sql.y:1249
{
yyLOCAL = &AlterTable{Table: yyDollar[8].tableName, AlterOptions: []AlterOption{&AddIndexDefinition{IndexDefinition: &IndexDefinition{Info: &IndexInfo{Name: yyDollar[5].identifierCI, Type: string(yyDollar[3].str) + " " + string(yyDollar[4].str), Spatial: true}, Options: yyDollar[6].indexOptionsUnion()}}}}
setDDL(yylex, yyLOCAL)
}
yyVAL.union = yyLOCAL
- case 141:
+ case 151:
yyDollar = yyS[yypt-8 : yypt+1]
var yyLOCAL *AlterTable
-//line sql.y:1219
+//line sql.y:1254
{
yyLOCAL = &AlterTable{Table: yyDollar[8].tableName, AlterOptions: []AlterOption{&AddIndexDefinition{IndexDefinition: &IndexDefinition{Info: &IndexInfo{Name: yyDollar[5].identifierCI, Type: string(yyDollar[3].str) + " " + string(yyDollar[4].str), Unique: true}, Options: yyDollar[6].indexOptionsUnion()}}}}
setDDL(yylex, yyLOCAL)
}
yyVAL.union = yyLOCAL
- case 142:
+ case 152:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL *CreateDatabase
-//line sql.y:1226
+//line sql.y:1261
{
yyLOCAL = &CreateDatabase{Comments: Comments(yyDollar[4].strs).Parsed(), DBName: yyDollar[6].identifierCS, IfNotExists: yyDollar[5].booleanUnion()}
setDDL(yylex, yyLOCAL)
}
yyVAL.union = yyLOCAL
- case 143:
+ case 153:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *AlterDatabase
-//line sql.y:1233
+//line sql.y:1268
{
yyLOCAL = &AlterDatabase{}
setDDL(yylex, yyLOCAL)
}
yyVAL.union = yyLOCAL
- case 146:
+ case 156:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL *TableSpec
-//line sql.y:1244
+//line sql.y:1279
{
yyLOCAL = yyDollar[2].tableSpecUnion()
yyLOCAL.Options = yyDollar[4].tableOptionsUnion()
yyLOCAL.PartitionOption = yyDollar[5].partitionOptionUnion()
}
yyVAL.union = yyLOCAL
- case 147:
+ case 157:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL []DatabaseOption
-//line sql.y:1251
+//line sql.y:1286
{
yyLOCAL = nil
}
yyVAL.union = yyLOCAL
- case 148:
+ case 158:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL []DatabaseOption
-//line sql.y:1255
+//line sql.y:1290
{
yyLOCAL = yyDollar[1].databaseOptionsUnion()
}
yyVAL.union = yyLOCAL
- case 149:
+ case 159:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL []DatabaseOption
-//line sql.y:1261
+//line sql.y:1296
{
yyLOCAL = []DatabaseOption{yyDollar[1].databaseOption}
}
yyVAL.union = yyLOCAL
- case 150:
+ case 160:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL []DatabaseOption
-//line sql.y:1265
+//line sql.y:1300
{
yyLOCAL = []DatabaseOption{yyDollar[1].databaseOption}
}
yyVAL.union = yyLOCAL
- case 151:
+ case 161:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL []DatabaseOption
-//line sql.y:1269
+//line sql.y:1304
{
yyLOCAL = []DatabaseOption{yyDollar[1].databaseOption}
}
yyVAL.union = yyLOCAL
- case 152:
+ case 162:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:1273
+//line sql.y:1308
{
yySLICE := (*[]DatabaseOption)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, yyDollar[2].databaseOption)
}
- case 153:
+ case 163:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:1277
+//line sql.y:1312
{
yySLICE := (*[]DatabaseOption)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, yyDollar[2].databaseOption)
}
- case 154:
+ case 164:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:1281
+//line sql.y:1316
{
yySLICE := (*[]DatabaseOption)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, yyDollar[2].databaseOption)
}
- case 155:
+ case 165:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL bool
-//line sql.y:1287
+//line sql.y:1322
{
yyLOCAL = false
}
yyVAL.union = yyLOCAL
- case 156:
+ case 166:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL bool
-//line sql.y:1291
+//line sql.y:1326
{
yyLOCAL = true
}
yyVAL.union = yyLOCAL
- case 157:
+ case 167:
yyDollar = yyS[yypt-4 : yypt+1]
-//line sql.y:1297
+//line sql.y:1332
{
yyVAL.databaseOption = DatabaseOption{Type: CharacterSetType, Value: string(yyDollar[4].str), IsDefault: yyDollar[1].booleanUnion()}
}
- case 158:
+ case 168:
yyDollar = yyS[yypt-4 : yypt+1]
-//line sql.y:1301
+//line sql.y:1336
{
yyVAL.databaseOption = DatabaseOption{Type: CharacterSetType, Value: encodeSQLString(yyDollar[4].str), IsDefault: yyDollar[1].booleanUnion()}
}
- case 159:
+ case 169:
yyDollar = yyS[yypt-4 : yypt+1]
-//line sql.y:1307
+//line sql.y:1342
{
yyVAL.databaseOption = DatabaseOption{Type: CollateType, Value: string(yyDollar[4].str), IsDefault: yyDollar[1].booleanUnion()}
}
- case 160:
+ case 170:
yyDollar = yyS[yypt-4 : yypt+1]
-//line sql.y:1311
+//line sql.y:1346
{
yyVAL.databaseOption = DatabaseOption{Type: CollateType, Value: encodeSQLString(yyDollar[4].str), IsDefault: yyDollar[1].booleanUnion()}
}
- case 161:
+ case 171:
yyDollar = yyS[yypt-4 : yypt+1]
-//line sql.y:1317
+//line sql.y:1352
{
yyVAL.databaseOption = DatabaseOption{Type: EncryptionType, Value: string(yyDollar[4].str), IsDefault: yyDollar[1].booleanUnion()}
}
- case 162:
+ case 172:
yyDollar = yyS[yypt-4 : yypt+1]
-//line sql.y:1321
+//line sql.y:1356
{
yyVAL.databaseOption = DatabaseOption{Type: EncryptionType, Value: encodeSQLString(yyDollar[4].str), IsDefault: yyDollar[1].booleanUnion()}
}
- case 163:
+ case 173:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *OptLike
-//line sql.y:1327
+//line sql.y:1362
{
yyLOCAL = &OptLike{LikeTable: yyDollar[2].tableName}
}
yyVAL.union = yyLOCAL
- case 164:
+ case 174:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL *OptLike
-//line sql.y:1331
+//line sql.y:1366
{
yyLOCAL = &OptLike{LikeTable: yyDollar[3].tableName}
}
yyVAL.union = yyLOCAL
- case 165:
+ case 175:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL []*ColumnDefinition
-//line sql.y:1337
+//line sql.y:1372
{
yyLOCAL = []*ColumnDefinition{yyDollar[1].columnDefinitionUnion()}
}
yyVAL.union = yyLOCAL
- case 166:
+ case 176:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:1341
+//line sql.y:1376
{
yySLICE := (*[]*ColumnDefinition)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, yyDollar[3].columnDefinitionUnion())
}
- case 167:
+ case 177:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL *TableSpec
-//line sql.y:1347
+//line sql.y:1382
{
yyLOCAL = &TableSpec{}
yyLOCAL.AddColumn(yyDollar[1].columnDefinitionUnion())
}
yyVAL.union = yyLOCAL
- case 168:
+ case 178:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL *TableSpec
-//line sql.y:1352
+//line sql.y:1387
{
yyLOCAL = &TableSpec{}
yyLOCAL.AddConstraint(yyDollar[1].constraintDefinitionUnion())
}
yyVAL.union = yyLOCAL
- case 169:
+ case 179:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:1357
+//line sql.y:1392
{
yyVAL.tableSpecUnion().AddColumn(yyDollar[3].columnDefinitionUnion())
}
- case 170:
+ case 180:
yyDollar = yyS[yypt-4 : yypt+1]
-//line sql.y:1361
+//line sql.y:1396
{
yyVAL.tableSpecUnion().AddColumn(yyDollar[3].columnDefinitionUnion())
yyVAL.tableSpecUnion().AddConstraint(yyDollar[4].constraintDefinitionUnion())
}
- case 171:
+ case 181:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:1366
+//line sql.y:1401
{
yyVAL.tableSpecUnion().AddIndex(yyDollar[3].indexDefinitionUnion())
}
- case 172:
+ case 182:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:1370
+//line sql.y:1405
{
yyVAL.tableSpecUnion().AddConstraint(yyDollar[3].constraintDefinitionUnion())
}
- case 173:
+ case 183:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:1374
+//line sql.y:1409
{
yyVAL.tableSpecUnion().AddConstraint(yyDollar[3].constraintDefinitionUnion())
}
- case 174:
+ case 184:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL *ColumnDefinition
-//line sql.y:1385
+//line sql.y:1420
{
yyDollar[2].columnType.Options = yyDollar[4].columnTypeOptionsUnion()
if yyDollar[2].columnType.Options.Collate == "" {
@@ -10103,10 +10214,10 @@ yydefault:
yyLOCAL = &ColumnDefinition{Name: yyDollar[1].identifierCI, Type: yyDollar[2].columnType}
}
yyVAL.union = yyLOCAL
- case 175:
+ case 185:
yyDollar = yyS[yypt-10 : yypt+1]
var yyLOCAL *ColumnDefinition
-//line sql.y:1394
+//line sql.y:1429
{
yyDollar[2].columnType.Options = yyDollar[9].columnTypeOptionsUnion()
yyDollar[2].columnType.Options.As = yyDollar[7].exprUnion()
@@ -10115,2678 +10226,2714 @@ yydefault:
yyLOCAL = &ColumnDefinition{Name: yyDollar[1].identifierCI, Type: yyDollar[2].columnType}
}
yyVAL.union = yyLOCAL
- case 176:
+ case 186:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:1403
+//line sql.y:1438
{
yyVAL.str = ""
}
- case 177:
+ case 187:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:1407
+//line sql.y:1442
{
yyVAL.str = ""
}
- case 178:
+ case 188:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL *ColumnTypeOptions
-//line sql.y:1416
+//line sql.y:1451
{
- yyLOCAL = &ColumnTypeOptions{Null: nil, Default: nil, OnUpdate: nil, Autoincrement: false, KeyOpt: colKeyNone, Comment: nil, As: nil, Invisible: nil, Format: UnspecifiedFormat, EngineAttribute: nil, SecondaryEngineAttribute: nil}
+ yyLOCAL = &ColumnTypeOptions{Null: nil, Default: nil, OnUpdate: nil, Autoincrement: false, KeyOpt: ColKeyNone, Comment: nil, As: nil, Invisible: nil, Format: UnspecifiedFormat, EngineAttribute: nil, SecondaryEngineAttribute: nil}
}
yyVAL.union = yyLOCAL
- case 179:
+ case 189:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *ColumnTypeOptions
-//line sql.y:1420
+//line sql.y:1455
{
val := true
yyDollar[1].columnTypeOptionsUnion().Null = &val
yyLOCAL = yyDollar[1].columnTypeOptionsUnion()
}
yyVAL.union = yyLOCAL
- case 180:
+ case 190:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *ColumnTypeOptions
-//line sql.y:1426
+//line sql.y:1461
{
val := false
yyDollar[1].columnTypeOptionsUnion().Null = &val
yyLOCAL = yyDollar[1].columnTypeOptionsUnion()
}
yyVAL.union = yyLOCAL
- case 181:
+ case 191:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL *ColumnTypeOptions
-//line sql.y:1432
+//line sql.y:1467
{
yyDollar[1].columnTypeOptionsUnion().Default = yyDollar[4].exprUnion()
yyLOCAL = yyDollar[1].columnTypeOptionsUnion()
}
yyVAL.union = yyLOCAL
- case 182:
+ case 192:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *ColumnTypeOptions
-//line sql.y:1437
+//line sql.y:1472
{
yyDollar[1].columnTypeOptionsUnion().Default = yyDollar[3].exprUnion()
yyLOCAL = yyDollar[1].columnTypeOptionsUnion()
}
yyVAL.union = yyLOCAL
- case 183:
+ case 193:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL *ColumnTypeOptions
-//line sql.y:1442
+//line sql.y:1477
{
yyDollar[1].columnTypeOptionsUnion().OnUpdate = yyDollar[4].exprUnion()
yyLOCAL = yyDollar[1].columnTypeOptionsUnion()
}
yyVAL.union = yyLOCAL
- case 184:
+ case 194:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *ColumnTypeOptions
-//line sql.y:1447
+//line sql.y:1482
{
yyDollar[1].columnTypeOptionsUnion().Autoincrement = true
yyLOCAL = yyDollar[1].columnTypeOptionsUnion()
}
yyVAL.union = yyLOCAL
- case 185:
+ case 195:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *ColumnTypeOptions
-//line sql.y:1452
+//line sql.y:1487
{
yyDollar[1].columnTypeOptionsUnion().Comment = NewStrLiteral(yyDollar[3].str)
yyLOCAL = yyDollar[1].columnTypeOptionsUnion()
}
yyVAL.union = yyLOCAL
- case 186:
+ case 196:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *ColumnTypeOptions
-//line sql.y:1457
+//line sql.y:1492
{
yyDollar[1].columnTypeOptionsUnion().KeyOpt = yyDollar[2].colKeyOptUnion()
yyLOCAL = yyDollar[1].columnTypeOptionsUnion()
}
yyVAL.union = yyLOCAL
- case 187:
+ case 197:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:1462
+//line sql.y:1497
{
yyDollar[1].columnTypeOptionsUnion().Collate = encodeSQLString(yyDollar[3].str)
}
- case 188:
+ case 198:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *ColumnTypeOptions
-//line sql.y:1466
+//line sql.y:1501
{
yyDollar[1].columnTypeOptionsUnion().Collate = string(yyDollar[3].identifierCI.String())
yyLOCAL = yyDollar[1].columnTypeOptionsUnion()
}
yyVAL.union = yyLOCAL
- case 189:
+ case 199:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:1471
+//line sql.y:1506
{
yyDollar[1].columnTypeOptionsUnion().Format = yyDollar[3].columnFormatUnion()
}
- case 190:
+ case 200:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *ColumnTypeOptions
-//line sql.y:1475
+//line sql.y:1510
{
yyDollar[1].columnTypeOptionsUnion().SRID = NewIntLiteral(yyDollar[3].str)
yyLOCAL = yyDollar[1].columnTypeOptionsUnion()
}
yyVAL.union = yyLOCAL
- case 191:
+ case 201:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *ColumnTypeOptions
-//line sql.y:1480
+//line sql.y:1515
{
val := false
yyDollar[1].columnTypeOptionsUnion().Invisible = &val
yyLOCAL = yyDollar[1].columnTypeOptionsUnion()
}
yyVAL.union = yyLOCAL
- case 192:
+ case 202:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *ColumnTypeOptions
-//line sql.y:1486
+//line sql.y:1521
{
val := true
yyDollar[1].columnTypeOptionsUnion().Invisible = &val
yyLOCAL = yyDollar[1].columnTypeOptionsUnion()
}
yyVAL.union = yyLOCAL
- case 193:
+ case 203:
yyDollar = yyS[yypt-4 : yypt+1]
-//line sql.y:1492
+//line sql.y:1527
{
yyDollar[1].columnTypeOptionsUnion().EngineAttribute = NewStrLiteral(yyDollar[4].str)
}
- case 194:
+ case 204:
yyDollar = yyS[yypt-4 : yypt+1]
-//line sql.y:1496
+//line sql.y:1531
{
yyDollar[1].columnTypeOptionsUnion().SecondaryEngineAttribute = NewStrLiteral(yyDollar[4].str)
}
- case 195:
+ case 205:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL ColumnFormat
-//line sql.y:1502
+//line sql.y:1537
{
yyLOCAL = FixedFormat
}
yyVAL.union = yyLOCAL
- case 196:
+ case 206:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL ColumnFormat
-//line sql.y:1506
+//line sql.y:1541
{
yyLOCAL = DynamicFormat
}
yyVAL.union = yyLOCAL
- case 197:
+ case 207:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL ColumnFormat
-//line sql.y:1510
+//line sql.y:1545
{
yyLOCAL = DefaultFormat
}
yyVAL.union = yyLOCAL
- case 198:
+ case 208:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL ColumnStorage
-//line sql.y:1516
+//line sql.y:1551
{
yyLOCAL = VirtualStorage
}
yyVAL.union = yyLOCAL
- case 199:
+ case 209:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL ColumnStorage
-//line sql.y:1520
+//line sql.y:1555
{
yyLOCAL = StoredStorage
}
yyVAL.union = yyLOCAL
- case 200:
+ case 210:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL *ColumnTypeOptions
-//line sql.y:1525
+//line sql.y:1560
{
yyLOCAL = &ColumnTypeOptions{}
}
yyVAL.union = yyLOCAL
- case 201:
+ case 211:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *ColumnTypeOptions
-//line sql.y:1529
+//line sql.y:1564
{
yyDollar[1].columnTypeOptionsUnion().Storage = yyDollar[2].columnStorageUnion()
yyLOCAL = yyDollar[1].columnTypeOptionsUnion()
}
yyVAL.union = yyLOCAL
- case 202:
+ case 212:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *ColumnTypeOptions
-//line sql.y:1534
+//line sql.y:1569
{
val := true
yyDollar[1].columnTypeOptionsUnion().Null = &val
yyLOCAL = yyDollar[1].columnTypeOptionsUnion()
}
yyVAL.union = yyLOCAL
- case 203:
+ case 213:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *ColumnTypeOptions
-//line sql.y:1540
+//line sql.y:1575
{
val := false
yyDollar[1].columnTypeOptionsUnion().Null = &val
yyLOCAL = yyDollar[1].columnTypeOptionsUnion()
}
yyVAL.union = yyLOCAL
- case 204:
+ case 214:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *ColumnTypeOptions
-//line sql.y:1546
+//line sql.y:1581
{
yyDollar[1].columnTypeOptionsUnion().Comment = NewStrLiteral(yyDollar[3].str)
yyLOCAL = yyDollar[1].columnTypeOptionsUnion()
}
yyVAL.union = yyLOCAL
- case 205:
+ case 215:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *ColumnTypeOptions
-//line sql.y:1551
+//line sql.y:1586
{
yyDollar[1].columnTypeOptionsUnion().KeyOpt = yyDollar[2].colKeyOptUnion()
yyLOCAL = yyDollar[1].columnTypeOptionsUnion()
}
yyVAL.union = yyLOCAL
- case 206:
+ case 216:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *ColumnTypeOptions
-//line sql.y:1556
+//line sql.y:1591
{
val := false
yyDollar[1].columnTypeOptionsUnion().Invisible = &val
yyLOCAL = yyDollar[1].columnTypeOptionsUnion()
}
yyVAL.union = yyLOCAL
- case 207:
+ case 217:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *ColumnTypeOptions
-//line sql.y:1562
+//line sql.y:1597
{
val := true
yyDollar[1].columnTypeOptionsUnion().Invisible = &val
yyLOCAL = yyDollar[1].columnTypeOptionsUnion()
}
yyVAL.union = yyLOCAL
- case 208:
+ case 218:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:1570
+//line sql.y:1605
{
yyLOCAL = yyDollar[1].exprUnion()
}
yyVAL.union = yyLOCAL
- case 210:
+ case 220:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Expr
-//line sql.y:1577
+//line sql.y:1612
{
yyLOCAL = &CurTimeFuncExpr{Name: NewIdentifierCI("current_timestamp"), Fsp: yyDollar[2].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 211:
+ case 221:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Expr
-//line sql.y:1581
+//line sql.y:1616
{
yyLOCAL = &CurTimeFuncExpr{Name: NewIdentifierCI("localtime"), Fsp: yyDollar[2].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 212:
+ case 222:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Expr
-//line sql.y:1585
+//line sql.y:1620
{
yyLOCAL = &CurTimeFuncExpr{Name: NewIdentifierCI("localtimestamp"), Fsp: yyDollar[2].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 213:
+ case 223:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Expr
-//line sql.y:1589
+//line sql.y:1624
{
yyLOCAL = &CurTimeFuncExpr{Name: NewIdentifierCI("utc_timestamp"), Fsp: yyDollar[2].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 214:
+ case 224:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Expr
-//line sql.y:1593
+//line sql.y:1628
{
yyLOCAL = &CurTimeFuncExpr{Name: NewIdentifierCI("now"), Fsp: yyDollar[2].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 217:
+ case 227:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:1603
+//line sql.y:1638
{
yyLOCAL = &NullVal{}
}
yyVAL.union = yyLOCAL
- case 219:
+ case 229:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Expr
-//line sql.y:1610
+//line sql.y:1645
{
yyLOCAL = yyDollar[2].exprUnion()
}
yyVAL.union = yyLOCAL
- case 220:
+ case 230:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Expr
-//line sql.y:1614
+//line sql.y:1649
{
yyLOCAL = &UnaryExpr{Operator: UMinusOp, Expr: yyDollar[2].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 221:
+ case 231:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:1620
+//line sql.y:1655
{
yyLOCAL = yyDollar[1].exprUnion()
}
yyVAL.union = yyLOCAL
- case 222:
+ case 232:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:1624
+//line sql.y:1659
{
yyLOCAL = yyDollar[1].exprUnion()
}
yyVAL.union = yyLOCAL
- case 223:
+ case 233:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:1628
+//line sql.y:1663
{
yyLOCAL = yyDollar[1].boolValUnion()
}
yyVAL.union = yyLOCAL
- case 224:
+ case 234:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:1632
+//line sql.y:1667
{
yyLOCAL = NewHexLiteral(yyDollar[1].str)
}
yyVAL.union = yyLOCAL
- case 225:
+ case 235:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:1636
+//line sql.y:1671
{
yyLOCAL = NewHexNumLiteral(yyDollar[1].str)
}
yyVAL.union = yyLOCAL
- case 226:
+ case 236:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:1640
+//line sql.y:1675
{
yyLOCAL = NewBitLiteral(yyDollar[1].str[2:])
}
yyVAL.union = yyLOCAL
- case 227:
+ case 237:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:1644
+//line sql.y:1679
{
yyLOCAL = NewBitLiteral(yyDollar[1].str)
}
yyVAL.union = yyLOCAL
- case 228:
+ case 238:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:1648
+//line sql.y:1683
{
yyLOCAL = NewArgument(yyDollar[1].str[1:])
bindVariable(yylex, yyDollar[1].str[1:])
}
yyVAL.union = yyLOCAL
- case 229:
+ case 239:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Expr
-//line sql.y:1653
+//line sql.y:1688
{
yyLOCAL = &IntroducerExpr{CharacterSet: yyDollar[1].str, Expr: NewBitLiteral(yyDollar[2].str)}
}
yyVAL.union = yyLOCAL
- case 230:
+ case 240:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Expr
-//line sql.y:1657
+//line sql.y:1692
{
yyLOCAL = &IntroducerExpr{CharacterSet: yyDollar[1].str, Expr: NewHexNumLiteral(yyDollar[2].str)}
}
yyVAL.union = yyLOCAL
- case 231:
+ case 241:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Expr
-//line sql.y:1661
+//line sql.y:1696
{
yyLOCAL = &IntroducerExpr{CharacterSet: yyDollar[1].str, Expr: NewBitLiteral(yyDollar[2].str[2:])}
}
yyVAL.union = yyLOCAL
- case 232:
+ case 242:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Expr
-//line sql.y:1665
+//line sql.y:1700
{
yyLOCAL = &IntroducerExpr{CharacterSet: yyDollar[1].str, Expr: NewHexLiteral(yyDollar[2].str)}
}
yyVAL.union = yyLOCAL
- case 233:
+ case 243:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Expr
-//line sql.y:1669
+//line sql.y:1704
{
yyLOCAL = &IntroducerExpr{CharacterSet: yyDollar[1].str, Expr: yyDollar[2].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 234:
+ case 244:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Expr
-//line sql.y:1673
+//line sql.y:1708
{
bindVariable(yylex, yyDollar[2].str[1:])
yyLOCAL = &IntroducerExpr{CharacterSet: yyDollar[1].str, Expr: NewArgument(yyDollar[2].str[1:])}
}
yyVAL.union = yyLOCAL
- case 235:
+ case 245:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Expr
-//line sql.y:1678
+//line sql.y:1713
{
yyLOCAL = NewDateLiteral(yyDollar[2].str)
}
yyVAL.union = yyLOCAL
- case 236:
+ case 246:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Expr
-//line sql.y:1682
+//line sql.y:1717
{
yyLOCAL = NewTimeLiteral(yyDollar[2].str)
}
yyVAL.union = yyLOCAL
- case 237:
+ case 247:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Expr
-//line sql.y:1686
+//line sql.y:1721
{
yyLOCAL = NewTimestampLiteral(yyDollar[2].str)
}
yyVAL.union = yyLOCAL
- case 238:
+ case 248:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1692
+//line sql.y:1727
{
yyVAL.str = Armscii8Str
}
- case 239:
+ case 249:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1696
+//line sql.y:1731
{
yyVAL.str = ASCIIStr
}
- case 240:
+ case 250:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1700
+//line sql.y:1735
{
yyVAL.str = Big5Str
}
- case 241:
+ case 251:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1704
+//line sql.y:1739
{
yyVAL.str = UBinaryStr
}
- case 242:
+ case 252:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1708
+//line sql.y:1743
{
yyVAL.str = Cp1250Str
}
- case 243:
+ case 253:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1712
+//line sql.y:1747
{
yyVAL.str = Cp1251Str
}
- case 244:
+ case 254:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1716
+//line sql.y:1751
{
yyVAL.str = Cp1256Str
}
- case 245:
+ case 255:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1720
+//line sql.y:1755
{
yyVAL.str = Cp1257Str
}
- case 246:
+ case 256:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1724
+//line sql.y:1759
{
yyVAL.str = Cp850Str
}
- case 247:
+ case 257:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1728
+//line sql.y:1763
{
yyVAL.str = Cp852Str
}
- case 248:
+ case 258:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1732
+//line sql.y:1767
{
yyVAL.str = Cp866Str
}
- case 249:
+ case 259:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1736
+//line sql.y:1771
{
yyVAL.str = Cp932Str
}
- case 250:
+ case 260:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1740
+//line sql.y:1775
{
yyVAL.str = Dec8Str
}
- case 251:
+ case 261:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1744
+//line sql.y:1779
{
yyVAL.str = EucjpmsStr
}
- case 252:
+ case 262:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1748
+//line sql.y:1783
{
yyVAL.str = EuckrStr
}
- case 253:
+ case 263:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1752
+//line sql.y:1787
{
yyVAL.str = Gb18030Str
}
- case 254:
+ case 264:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1756
+//line sql.y:1791
{
yyVAL.str = Gb2312Str
}
- case 255:
+ case 265:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1760
+//line sql.y:1795
{
yyVAL.str = GbkStr
}
- case 256:
+ case 266:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1764
+//line sql.y:1799
{
yyVAL.str = Geostd8Str
}
- case 257:
+ case 267:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1768
+//line sql.y:1803
{
yyVAL.str = GreekStr
}
- case 258:
+ case 268:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1772
+//line sql.y:1807
{
yyVAL.str = HebrewStr
}
- case 259:
+ case 269:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1776
+//line sql.y:1811
{
yyVAL.str = Hp8Str
}
- case 260:
+ case 270:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1780
+//line sql.y:1815
{
yyVAL.str = Keybcs2Str
}
- case 261:
+ case 271:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1784
+//line sql.y:1819
{
yyVAL.str = Koi8rStr
}
- case 262:
+ case 272:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1788
+//line sql.y:1823
{
yyVAL.str = Koi8uStr
}
- case 263:
+ case 273:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1792
+//line sql.y:1827
{
yyVAL.str = Latin1Str
}
- case 264:
+ case 274:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1796
+//line sql.y:1831
{
yyVAL.str = Latin2Str
}
- case 265:
+ case 275:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1800
+//line sql.y:1835
{
yyVAL.str = Latin5Str
}
- case 266:
+ case 276:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1804
+//line sql.y:1839
{
yyVAL.str = Latin7Str
}
- case 267:
+ case 277:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1808
+//line sql.y:1843
{
yyVAL.str = MacceStr
}
- case 268:
+ case 278:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1812
+//line sql.y:1847
{
yyVAL.str = MacromanStr
}
- case 269:
+ case 279:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1816
+//line sql.y:1851
{
yyVAL.str = SjisStr
}
- case 270:
+ case 280:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1820
+//line sql.y:1855
{
yyVAL.str = Swe7Str
}
- case 271:
+ case 281:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1824
+//line sql.y:1859
{
yyVAL.str = Tis620Str
}
- case 272:
+ case 282:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1828
+//line sql.y:1863
{
yyVAL.str = Ucs2Str
}
- case 273:
+ case 283:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1832
+//line sql.y:1867
{
yyVAL.str = UjisStr
}
- case 274:
+ case 284:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1836
+//line sql.y:1871
{
yyVAL.str = Utf16Str
}
- case 275:
+ case 285:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1840
+//line sql.y:1875
{
yyVAL.str = Utf16leStr
}
- case 276:
+ case 286:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1844
+//line sql.y:1879
{
yyVAL.str = Utf32Str
}
- case 277:
+ case 287:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1848
+//line sql.y:1883
{
yyVAL.str = Utf8Str
}
- case 278:
+ case 288:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1852
+//line sql.y:1887
{
yyVAL.str = Utf8mb4Str
}
- case 279:
+ case 289:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1856
+//line sql.y:1891
{
yyVAL.str = Utf8Str
}
- case 282:
+ case 292:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:1866
+//line sql.y:1901
{
yyLOCAL = NewIntLiteral(yyDollar[1].str)
}
yyVAL.union = yyLOCAL
- case 283:
+ case 293:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:1870
+//line sql.y:1905
{
yyLOCAL = NewFloatLiteral(yyDollar[1].str)
}
yyVAL.union = yyLOCAL
- case 284:
+ case 294:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:1874
+//line sql.y:1909
{
yyLOCAL = NewDecimalLiteral(yyDollar[1].str)
}
yyVAL.union = yyLOCAL
- case 285:
+ case 295:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:1880
+//line sql.y:1915
{
yyLOCAL = NewStrLiteral(yyDollar[1].str)
}
yyVAL.union = yyLOCAL
- case 286:
+ case 296:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:1884
+//line sql.y:1919
{
yyLOCAL = &UnaryExpr{Operator: NStringOp, Expr: NewStrLiteral(yyDollar[1].str)}
}
yyVAL.union = yyLOCAL
- case 287:
+ case 297:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Expr
-//line sql.y:1888
+//line sql.y:1923
{
yyLOCAL = &IntroducerExpr{CharacterSet: yyDollar[1].str, Expr: NewStrLiteral(yyDollar[2].str)}
}
yyVAL.union = yyLOCAL
- case 288:
+ case 298:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:1894
+//line sql.y:1929
{
yyLOCAL = yyDollar[1].exprUnion()
}
yyVAL.union = yyLOCAL
- case 289:
+ case 299:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:1898
+//line sql.y:1933
{
yyLOCAL = NewArgument(yyDollar[1].str[1:])
bindVariable(yylex, yyDollar[1].str[1:])
}
yyVAL.union = yyLOCAL
- case 290:
+ case 300:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL ColumnKeyOption
-//line sql.y:1905
+//line sql.y:1940
{
- yyLOCAL = colKeyPrimary
+ yyLOCAL = ColKeyPrimary
}
yyVAL.union = yyLOCAL
- case 291:
+ case 301:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL ColumnKeyOption
-//line sql.y:1909
+//line sql.y:1944
{
- yyLOCAL = colKeyUnique
+ yyLOCAL = ColKeyUnique
}
yyVAL.union = yyLOCAL
- case 292:
+ case 302:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL ColumnKeyOption
-//line sql.y:1913
+//line sql.y:1948
{
- yyLOCAL = colKeyUniqueKey
+ yyLOCAL = ColKeyUniqueKey
}
yyVAL.union = yyLOCAL
- case 293:
+ case 303:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL ColumnKeyOption
-//line sql.y:1917
+//line sql.y:1952
{
- yyLOCAL = colKey
+ yyLOCAL = ColKey
}
yyVAL.union = yyLOCAL
- case 294:
+ case 304:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:1923
+//line sql.y:1958
{
yyVAL.columnType = yyDollar[1].columnType
yyVAL.columnType.Unsigned = yyDollar[2].booleanUnion()
yyVAL.columnType.Zerofill = yyDollar[3].booleanUnion()
}
- case 298:
+ case 308:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:1934
+//line sql.y:1969
{
yyVAL.columnType = yyDollar[1].columnType
yyVAL.columnType.Length = yyDollar[2].literalUnion()
}
- case 299:
+ case 309:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1939
+//line sql.y:1974
{
yyVAL.columnType = yyDollar[1].columnType
}
- case 300:
+ case 310:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1945
+//line sql.y:1980
{
- yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str)}
+ yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)}
}
- case 301:
+ case 311:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1949
+//line sql.y:1984
{
- yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str)}
+ yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)}
}
- case 302:
+ case 312:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1953
+//line sql.y:1988
{
- yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str)}
+ yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)}
}
- case 303:
+ case 313:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1957
+//line sql.y:1992
{
- yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str)}
+ yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)}
}
- case 304:
+ case 314:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1961
+//line sql.y:1996
{
- yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str)}
+ yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)}
}
- case 305:
+ case 315:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1965
+//line sql.y:2000
{
- yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str)}
+ yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)}
}
- case 306:
+ case 316:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1969
+//line sql.y:2004
{
- yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str)}
+ yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)}
}
- case 307:
+ case 317:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1973
+//line sql.y:2008
{
- yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str)}
+ yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)}
}
- case 308:
+ case 318:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1977
+//line sql.y:2012
{
- yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str)}
+ yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)}
}
- case 309:
+ case 319:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:1983
+//line sql.y:2018
{
- yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str)}
+ yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)}
yyVAL.columnType.Length = yyDollar[2].LengthScaleOption.Length
yyVAL.columnType.Scale = yyDollar[2].LengthScaleOption.Scale
}
- case 310:
+ case 320:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:1989
+//line sql.y:2024
{
- yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str)}
+ yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)}
yyVAL.columnType.Length = yyDollar[2].LengthScaleOption.Length
yyVAL.columnType.Scale = yyDollar[2].LengthScaleOption.Scale
}
- case 311:
+ case 321:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:1995
+//line sql.y:2030
{
- yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str)}
+ yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)}
yyVAL.columnType.Length = yyDollar[2].LengthScaleOption.Length
yyVAL.columnType.Scale = yyDollar[2].LengthScaleOption.Scale
}
- case 312:
+ case 322:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:2001
+//line sql.y:2036
{
- yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str)}
+ yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)}
yyVAL.columnType.Length = yyDollar[2].LengthScaleOption.Length
yyVAL.columnType.Scale = yyDollar[2].LengthScaleOption.Scale
}
- case 313:
+ case 323:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:2007
+//line sql.y:2042
{
- yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str)}
+ yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)}
yyVAL.columnType.Length = yyDollar[2].LengthScaleOption.Length
yyVAL.columnType.Scale = yyDollar[2].LengthScaleOption.Scale
}
- case 314:
+ case 324:
+ yyDollar = yyS[yypt-2 : yypt+1]
+//line sql.y:2048
+ {
+ yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)}
+ yyVAL.columnType.Length = yyDollar[2].LengthScaleOption.Length
+ yyVAL.columnType.Scale = yyDollar[2].LengthScaleOption.Scale
+ }
+ case 325:
+ yyDollar = yyS[yypt-2 : yypt+1]
+//line sql.y:2054
+ {
+ yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)}
+ yyVAL.columnType.Length = yyDollar[2].LengthScaleOption.Length
+ yyVAL.columnType.Scale = yyDollar[2].LengthScaleOption.Scale
+ }
+ case 326:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:2015
+//line sql.y:2062
{
- yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str)}
+ yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)}
}
- case 315:
+ case 327:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:2019
+//line sql.y:2066
{
- yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion()}
+ yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion()}
}
- case 316:
+ case 328:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:2023
+//line sql.y:2070
{
- yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion()}
+ yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion()}
}
- case 317:
+ case 329:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:2027
+//line sql.y:2074
{
- yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion()}
+ yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion()}
}
- case 318:
+ case 330:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:2031
+//line sql.y:2078
{
- yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion()}
+ yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion()}
}
- case 319:
+ case 331:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:2037
+//line sql.y:2084
{
- yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion(), Charset: yyDollar[3].columnCharset}
+ yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion(), Charset: yyDollar[3].columnCharset}
}
- case 320:
+ case 332:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:2041
+//line sql.y:2088
{
// CHAR BYTE is an alias for binary. See also:
// https://dev.mysql.com/doc/refman/8.0/en/string-type-syntax.html
- yyVAL.columnType = ColumnType{Type: "binary", Length: yyDollar[2].literalUnion()}
+ yyVAL.columnType = &ColumnType{Type: "binary", Length: yyDollar[2].literalUnion()}
}
- case 321:
+ case 333:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:2047
+//line sql.y:2094
{
- yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion(), Charset: yyDollar[3].columnCharset}
+ yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion(), Charset: yyDollar[3].columnCharset}
}
- case 322:
+ case 334:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:2051
+//line sql.y:2098
{
- yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion()}
+ yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion()}
}
- case 323:
+ case 335:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:2055
+//line sql.y:2102
{
- yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion()}
+ yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion()}
}
- case 324:
+ case 336:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:2059
+//line sql.y:2106
{
- yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str), Charset: yyDollar[2].columnCharset}
+ yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str), Charset: yyDollar[2].columnCharset}
}
- case 325:
+ case 337:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:2063
+//line sql.y:2110
{
- yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str), Charset: yyDollar[2].columnCharset}
+ yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str), Charset: yyDollar[2].columnCharset}
}
- case 326:
+ case 338:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:2067
+//line sql.y:2114
{
- yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str), Charset: yyDollar[2].columnCharset}
+ yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str), Charset: yyDollar[2].columnCharset}
}
- case 327:
+ case 339:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:2071
+//line sql.y:2118
{
- yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str), Charset: yyDollar[2].columnCharset}
+ yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str), Charset: yyDollar[2].columnCharset}
}
- case 328:
+ case 340:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:2075
+//line sql.y:2122
{
- yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str)}
+ yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)}
}
- case 329:
+ case 341:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:2079
+//line sql.y:2126
{
- yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str)}
+ yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)}
}
- case 330:
+ case 342:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:2083
+//line sql.y:2130
{
- yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str)}
+ yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)}
}
- case 331:
+ case 343:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:2087
+//line sql.y:2134
{
- yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str)}
+ yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)}
}
- case 332:
+ case 344:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:2091
+//line sql.y:2138
{
- yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str)}
+ yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)}
}
- case 333:
+ case 345:
yyDollar = yyS[yypt-5 : yypt+1]
-//line sql.y:2095
+//line sql.y:2142
{
- yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str), EnumValues: yyDollar[3].strs, Charset: yyDollar[5].columnCharset}
+ yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str), EnumValues: yyDollar[3].strs, Charset: yyDollar[5].columnCharset}
}
- case 334:
+ case 346:
yyDollar = yyS[yypt-5 : yypt+1]
-//line sql.y:2100
+//line sql.y:2147
{
- yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str), EnumValues: yyDollar[3].strs, Charset: yyDollar[5].columnCharset}
+ yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str), EnumValues: yyDollar[3].strs, Charset: yyDollar[5].columnCharset}
}
- case 335:
+ case 347:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:2106
+//line sql.y:2153
{
- yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str)}
+ yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)}
}
- case 336:
+ case 348:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:2110
+//line sql.y:2157
{
- yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str)}
+ yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)}
}
- case 337:
+ case 349:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:2114
+//line sql.y:2161
{
- yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str)}
+ yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)}
}
- case 338:
+ case 350:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:2118
+//line sql.y:2165
{
- yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str)}
+ yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)}
}
- case 339:
+ case 351:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:2122
+//line sql.y:2169
{
- yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str)}
+ yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)}
}
- case 340:
+ case 352:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:2126
+//line sql.y:2173
{
- yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str)}
+ yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)}
}
- case 341:
+ case 353:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:2130
+//line sql.y:2177
{
- yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str)}
+ yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)}
}
- case 342:
+ case 354:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:2134
+//line sql.y:2181
{
- yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str)}
+ yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)}
}
- case 343:
+ case 355:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:2140
+//line sql.y:2187
{
yyVAL.strs = make([]string, 0, 4)
yyVAL.strs = append(yyVAL.strs, encodeSQLString(yyDollar[1].str))
}
- case 344:
+ case 356:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:2145
+//line sql.y:2192
{
yyVAL.strs = append(yyDollar[1].strs, encodeSQLString(yyDollar[3].str))
}
- case 345:
+ case 357:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL *Literal
-//line sql.y:2150
+//line sql.y:2197
{
yyLOCAL = nil
}
yyVAL.union = yyLOCAL
- case 346:
+ case 358:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *Literal
-//line sql.y:2154
+//line sql.y:2201
{
yyLOCAL = NewIntLiteral(yyDollar[2].str)
}
yyVAL.union = yyLOCAL
- case 347:
+ case 359:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:2159
+//line sql.y:2206
{
yyVAL.LengthScaleOption = LengthScaleOption{}
}
- case 348:
+ case 360:
yyDollar = yyS[yypt-5 : yypt+1]
-//line sql.y:2163
+//line sql.y:2210
{
yyVAL.LengthScaleOption = LengthScaleOption{
Length: NewIntLiteral(yyDollar[2].str),
Scale: NewIntLiteral(yyDollar[4].str),
}
}
- case 349:
+ case 361:
+ yyDollar = yyS[yypt-1 : yypt+1]
+//line sql.y:2219
+ {
+ yyVAL.LengthScaleOption = yyDollar[1].LengthScaleOption
+ }
+ case 362:
+ yyDollar = yyS[yypt-3 : yypt+1]
+//line sql.y:2223
+ {
+ yyVAL.LengthScaleOption = LengthScaleOption{
+ Length: NewIntLiteral(yyDollar[2].str),
+ }
+ }
+ case 363:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:2171
+//line sql.y:2230
{
yyVAL.LengthScaleOption = LengthScaleOption{}
}
- case 350:
+ case 364:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:2175
+//line sql.y:2234
{
yyVAL.LengthScaleOption = LengthScaleOption{
Length: NewIntLiteral(yyDollar[2].str),
}
}
- case 351:
+ case 365:
yyDollar = yyS[yypt-5 : yypt+1]
-//line sql.y:2181
+//line sql.y:2240
{
yyVAL.LengthScaleOption = LengthScaleOption{
Length: NewIntLiteral(yyDollar[2].str),
Scale: NewIntLiteral(yyDollar[4].str),
}
}
- case 352:
+ case 366:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL bool
-//line sql.y:2189
+//line sql.y:2248
{
yyLOCAL = false
}
yyVAL.union = yyLOCAL
- case 353:
+ case 367:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL bool
-//line sql.y:2193
+//line sql.y:2252
{
yyLOCAL = true
}
yyVAL.union = yyLOCAL
- case 354:
+ case 368:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL bool
-//line sql.y:2197
+//line sql.y:2256
{
yyLOCAL = false
}
yyVAL.union = yyLOCAL
- case 355:
+ case 369:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL bool
-//line sql.y:2202
+//line sql.y:2261
{
yyLOCAL = false
}
yyVAL.union = yyLOCAL
- case 356:
+ case 370:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL bool
-//line sql.y:2206
+//line sql.y:2265
{
yyLOCAL = true
}
yyVAL.union = yyLOCAL
- case 357:
+ case 371:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:2211
+//line sql.y:2270
{
yyVAL.columnCharset = ColumnCharset{}
}
- case 358:
+ case 372:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:2215
+//line sql.y:2274
{
yyVAL.columnCharset = ColumnCharset{Name: string(yyDollar[2].identifierCI.String()), Binary: yyDollar[3].booleanUnion()}
}
- case 359:
+ case 373:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:2219
+//line sql.y:2278
{
yyVAL.columnCharset = ColumnCharset{Name: encodeSQLString(yyDollar[2].str), Binary: yyDollar[3].booleanUnion()}
}
- case 360:
+ case 374:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:2223
+//line sql.y:2282
{
yyVAL.columnCharset = ColumnCharset{Name: string(yyDollar[2].str)}
}
- case 361:
+ case 375:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:2227
+//line sql.y:2286
{
// ASCII: Shorthand for CHARACTER SET latin1.
yyVAL.columnCharset = ColumnCharset{Name: "latin1", Binary: yyDollar[2].booleanUnion()}
}
- case 362:
+ case 376:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:2232
+//line sql.y:2291
{
// UNICODE: Shorthand for CHARACTER SET ucs2.
yyVAL.columnCharset = ColumnCharset{Name: "ucs2", Binary: yyDollar[2].booleanUnion()}
}
- case 363:
+ case 377:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:2237
+//line sql.y:2296
{
// BINARY: Shorthand for default CHARACTER SET but with binary collation
yyVAL.columnCharset = ColumnCharset{Name: "", Binary: true}
}
- case 364:
+ case 378:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:2242
+//line sql.y:2301
{
// BINARY ASCII: Shorthand for CHARACTER SET latin1 with binary collation
yyVAL.columnCharset = ColumnCharset{Name: "latin1", Binary: true}
}
- case 365:
+ case 379:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:2247
+//line sql.y:2306
{
// BINARY UNICODE: Shorthand for CHARACTER SET ucs2 with binary collation
yyVAL.columnCharset = ColumnCharset{Name: "ucs2", Binary: true}
}
- case 366:
+ case 380:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL bool
-//line sql.y:2253
+//line sql.y:2312
{
yyLOCAL = false
}
yyVAL.union = yyLOCAL
- case 367:
+ case 381:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL bool
-//line sql.y:2257
+//line sql.y:2316
{
yyLOCAL = true
}
yyVAL.union = yyLOCAL
- case 368:
+ case 382:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:2262
+//line sql.y:2321
{
yyVAL.str = ""
}
- case 369:
+ case 383:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:2266
+//line sql.y:2325
{
yyVAL.str = string(yyDollar[2].identifierCI.String())
}
- case 370:
+ case 384:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:2270
+//line sql.y:2329
{
yyVAL.str = encodeSQLString(yyDollar[2].str)
}
- case 371:
+ case 385:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL *IndexDefinition
-//line sql.y:2276
+//line sql.y:2335
{
yyLOCAL = &IndexDefinition{Info: yyDollar[1].indexInfoUnion(), Columns: yyDollar[3].indexColumnsUnion(), Options: yyDollar[5].indexOptionsUnion()}
}
yyVAL.union = yyLOCAL
- case 372:
+ case 386:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL []*IndexOption
-//line sql.y:2281
+//line sql.y:2340
{
yyLOCAL = nil
}
yyVAL.union = yyLOCAL
- case 373:
+ case 387:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL []*IndexOption
-//line sql.y:2285
+//line sql.y:2344
{
yyLOCAL = yyDollar[1].indexOptionsUnion()
}
yyVAL.union = yyLOCAL
- case 374:
+ case 388:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL []*IndexOption
-//line sql.y:2291
+//line sql.y:2350
{
yyLOCAL = []*IndexOption{yyDollar[1].indexOptionUnion()}
}
yyVAL.union = yyLOCAL
- case 375:
+ case 389:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:2295
+//line sql.y:2354
{
yySLICE := (*[]*IndexOption)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, yyDollar[2].indexOptionUnion())
}
- case 376:
+ case 390:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL *IndexOption
-//line sql.y:2301
+//line sql.y:2360
{
yyLOCAL = yyDollar[1].indexOptionUnion()
}
yyVAL.union = yyLOCAL
- case 377:
+ case 391:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *IndexOption
-//line sql.y:2305
+//line sql.y:2364
{
// should not be string
yyLOCAL = &IndexOption{Name: string(yyDollar[1].str), Value: NewIntLiteral(yyDollar[3].str)}
}
yyVAL.union = yyLOCAL
- case 378:
+ case 392:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *IndexOption
-//line sql.y:2310
+//line sql.y:2369
{
yyLOCAL = &IndexOption{Name: string(yyDollar[1].str), Value: NewStrLiteral(yyDollar[2].str)}
}
yyVAL.union = yyLOCAL
- case 379:
+ case 393:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL *IndexOption
-//line sql.y:2314
+//line sql.y:2373
{
yyLOCAL = &IndexOption{Name: string(yyDollar[1].str)}
}
yyVAL.union = yyLOCAL
- case 380:
+ case 394:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL *IndexOption
-//line sql.y:2318
+//line sql.y:2377
{
yyLOCAL = &IndexOption{Name: string(yyDollar[1].str)}
}
yyVAL.union = yyLOCAL
- case 381:
+ case 395:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *IndexOption
-//line sql.y:2322
+//line sql.y:2381
{
yyLOCAL = &IndexOption{Name: string(yyDollar[1].str) + " " + string(yyDollar[2].str), String: yyDollar[3].identifierCI.String()}
}
yyVAL.union = yyLOCAL
- case 382:
+ case 396:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *IndexOption
-//line sql.y:2326
+//line sql.y:2385
{
yyLOCAL = &IndexOption{Name: string(yyDollar[1].str), Value: NewStrLiteral(yyDollar[3].str)}
}
yyVAL.union = yyLOCAL
- case 383:
+ case 397:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *IndexOption
-//line sql.y:2330
+//line sql.y:2389
{
yyLOCAL = &IndexOption{Name: string(yyDollar[1].str), Value: NewStrLiteral(yyDollar[3].str)}
}
yyVAL.union = yyLOCAL
- case 384:
+ case 398:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:2336
+//line sql.y:2395
{
yyVAL.str = ""
}
- case 385:
+ case 399:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:2340
+//line sql.y:2399
{
yyVAL.str = string(yyDollar[1].str)
}
- case 386:
+ case 400:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL *IndexInfo
-//line sql.y:2346
+//line sql.y:2405
{
yyLOCAL = &IndexInfo{Type: string(yyDollar[2].str) + " " + string(yyDollar[3].str), ConstraintName: NewIdentifierCI(yyDollar[1].str), Name: NewIdentifierCI("PRIMARY"), Primary: true, Unique: true}
}
yyVAL.union = yyLOCAL
- case 387:
+ case 401:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *IndexInfo
-//line sql.y:2350
+//line sql.y:2409
{
yyLOCAL = &IndexInfo{Type: string(yyDollar[1].str) + " " + string(yyDollar[2].str), Name: NewIdentifierCI(yyDollar[3].str), Spatial: true, Unique: false}
}
yyVAL.union = yyLOCAL
- case 388:
+ case 402:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *IndexInfo
-//line sql.y:2354
+//line sql.y:2413
{
yyLOCAL = &IndexInfo{Type: string(yyDollar[1].str) + " " + string(yyDollar[2].str), Name: NewIdentifierCI(yyDollar[3].str), Fulltext: true, Unique: false}
}
yyVAL.union = yyLOCAL
- case 389:
+ case 403:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL *IndexInfo
-//line sql.y:2358
+//line sql.y:2417
{
yyLOCAL = &IndexInfo{Type: string(yyDollar[2].str) + " " + string(yyDollar[3].str), ConstraintName: NewIdentifierCI(yyDollar[1].str), Name: NewIdentifierCI(yyDollar[4].str), Unique: true}
}
yyVAL.union = yyLOCAL
- case 390:
+ case 404:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *IndexInfo
-//line sql.y:2362
+//line sql.y:2421
{
yyLOCAL = &IndexInfo{Type: string(yyDollar[1].str), Name: NewIdentifierCI(yyDollar[2].str), Unique: false}
}
yyVAL.union = yyLOCAL
- case 391:
+ case 405:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:2367
+//line sql.y:2426
{
yyVAL.str = ""
}
- case 392:
+ case 406:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:2371
+//line sql.y:2430
{
yyVAL.str = yyDollar[2].str
}
- case 393:
+ case 407:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:2377
+//line sql.y:2436
{
yyVAL.str = string(yyDollar[1].str)
}
- case 394:
+ case 408:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:2381
+//line sql.y:2440
{
yyVAL.str = string(yyDollar[1].str)
}
- case 395:
+ case 409:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:2385
+//line sql.y:2444
{
yyVAL.str = string(yyDollar[1].str)
}
- case 396:
+ case 410:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:2391
+//line sql.y:2450
{
yyVAL.str = string(yyDollar[1].str)
}
- case 397:
+ case 411:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:2395
+//line sql.y:2454
{
yyVAL.str = string(yyDollar[1].str)
}
- case 398:
+ case 412:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:2400
+//line sql.y:2459
{
yyVAL.str = "key"
}
- case 399:
+ case 413:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:2404
+//line sql.y:2463
{
yyVAL.str = yyDollar[1].str
}
- case 400:
+ case 414:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:2410
+//line sql.y:2469
{
yyVAL.str = string(yyDollar[1].str)
}
- case 401:
+ case 415:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:2414
+//line sql.y:2473
{
yyVAL.str = string(yyDollar[1].str)
}
- case 402:
+ case 416:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:2419
+//line sql.y:2478
{
yyVAL.str = ""
}
- case 403:
+ case 417:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:2423
+//line sql.y:2482
{
yyVAL.str = string(yyDollar[1].identifierCI.String())
}
- case 404:
+ case 418:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL []*IndexColumn
-//line sql.y:2429
+//line sql.y:2488
{
yyLOCAL = []*IndexColumn{yyDollar[1].indexColumnUnion()}
}
yyVAL.union = yyLOCAL
- case 405:
+ case 419:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:2433
+//line sql.y:2492
{
yySLICE := (*[]*IndexColumn)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, yyDollar[3].indexColumnUnion())
}
- case 406:
+ case 420:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *IndexColumn
-//line sql.y:2439
+//line sql.y:2498
{
yyLOCAL = &IndexColumn{Column: yyDollar[1].identifierCI, Length: yyDollar[2].literalUnion(), Direction: yyDollar[3].orderDirectionUnion()}
}
yyVAL.union = yyLOCAL
- case 407:
+ case 421:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL *IndexColumn
-//line sql.y:2443
+//line sql.y:2502
{
yyLOCAL = &IndexColumn{Expression: yyDollar[2].exprUnion(), Direction: yyDollar[4].orderDirectionUnion()}
}
yyVAL.union = yyLOCAL
- case 408:
+ case 422:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *ConstraintDefinition
-//line sql.y:2449
+//line sql.y:2508
{
yyLOCAL = &ConstraintDefinition{Name: yyDollar[2].identifierCI, Details: yyDollar[3].constraintInfoUnion()}
}
yyVAL.union = yyLOCAL
- case 409:
+ case 423:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL *ConstraintDefinition
-//line sql.y:2453
+//line sql.y:2512
{
yyLOCAL = &ConstraintDefinition{Details: yyDollar[1].constraintInfoUnion()}
}
yyVAL.union = yyLOCAL
- case 410:
+ case 424:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *ConstraintDefinition
-//line sql.y:2459
+//line sql.y:2518
{
yyLOCAL = &ConstraintDefinition{Name: yyDollar[2].identifierCI, Details: yyDollar[3].constraintInfoUnion()}
}
yyVAL.union = yyLOCAL
- case 411:
+ case 425:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL *ConstraintDefinition
-//line sql.y:2463
+//line sql.y:2522
{
yyLOCAL = &ConstraintDefinition{Details: yyDollar[1].constraintInfoUnion()}
}
yyVAL.union = yyLOCAL
- case 412:
+ case 426:
yyDollar = yyS[yypt-7 : yypt+1]
var yyLOCAL ConstraintInfo
-//line sql.y:2469
+//line sql.y:2528
{
yyLOCAL = &ForeignKeyDefinition{IndexName: NewIdentifierCI(yyDollar[3].str), Source: yyDollar[5].columnsUnion(), ReferenceDefinition: yyDollar[7].referenceDefinitionUnion()}
}
yyVAL.union = yyLOCAL
- case 413:
+ case 427:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL *ReferenceDefinition
-//line sql.y:2475
+//line sql.y:2534
{
yyLOCAL = &ReferenceDefinition{ReferencedTable: yyDollar[2].tableName, ReferencedColumns: yyDollar[4].columnsUnion(), Match: yyDollar[6].matchActionUnion()}
}
yyVAL.union = yyLOCAL
- case 414:
+ case 428:
yyDollar = yyS[yypt-7 : yypt+1]
var yyLOCAL *ReferenceDefinition
-//line sql.y:2479
+//line sql.y:2538
{
yyLOCAL = &ReferenceDefinition{ReferencedTable: yyDollar[2].tableName, ReferencedColumns: yyDollar[4].columnsUnion(), Match: yyDollar[6].matchActionUnion(), OnDelete: yyDollar[7].referenceActionUnion()}
}
yyVAL.union = yyLOCAL
- case 415:
+ case 429:
yyDollar = yyS[yypt-7 : yypt+1]
var yyLOCAL *ReferenceDefinition
-//line sql.y:2483
+//line sql.y:2542
{
yyLOCAL = &ReferenceDefinition{ReferencedTable: yyDollar[2].tableName, ReferencedColumns: yyDollar[4].columnsUnion(), Match: yyDollar[6].matchActionUnion(), OnUpdate: yyDollar[7].referenceActionUnion()}
}
yyVAL.union = yyLOCAL
- case 416:
+ case 430:
yyDollar = yyS[yypt-8 : yypt+1]
var yyLOCAL *ReferenceDefinition
-//line sql.y:2487
+//line sql.y:2546
{
yyLOCAL = &ReferenceDefinition{ReferencedTable: yyDollar[2].tableName, ReferencedColumns: yyDollar[4].columnsUnion(), Match: yyDollar[6].matchActionUnion(), OnDelete: yyDollar[7].referenceActionUnion(), OnUpdate: yyDollar[8].referenceActionUnion()}
}
yyVAL.union = yyLOCAL
- case 417:
+ case 431:
yyDollar = yyS[yypt-8 : yypt+1]
var yyLOCAL *ReferenceDefinition
-//line sql.y:2491
+//line sql.y:2550
{
yyLOCAL = &ReferenceDefinition{ReferencedTable: yyDollar[2].tableName, ReferencedColumns: yyDollar[4].columnsUnion(), Match: yyDollar[6].matchActionUnion(), OnUpdate: yyDollar[7].referenceActionUnion(), OnDelete: yyDollar[8].referenceActionUnion()}
}
yyVAL.union = yyLOCAL
- case 418:
+ case 432:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL *ReferenceDefinition
-//line sql.y:2496
+//line sql.y:2555
{
yyLOCAL = nil
}
yyVAL.union = yyLOCAL
- case 419:
+ case 433:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL *ReferenceDefinition
-//line sql.y:2500
+//line sql.y:2559
{
yyLOCAL = yyDollar[1].referenceDefinitionUnion()
}
yyVAL.union = yyLOCAL
- case 420:
+ case 434:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL ConstraintInfo
-//line sql.y:2506
+//line sql.y:2565
{
yyLOCAL = &CheckConstraintDefinition{Expr: yyDollar[3].exprUnion(), Enforced: yyDollar[5].booleanUnion()}
}
yyVAL.union = yyLOCAL
- case 421:
+ case 435:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL MatchAction
-//line sql.y:2512
+//line sql.y:2571
{
yyLOCAL = yyDollar[2].matchActionUnion()
}
yyVAL.union = yyLOCAL
- case 422:
+ case 436:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL MatchAction
-//line sql.y:2518
+//line sql.y:2577
{
yyLOCAL = Full
}
yyVAL.union = yyLOCAL
- case 423:
+ case 437:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL MatchAction
-//line sql.y:2522
+//line sql.y:2581
{
yyLOCAL = Partial
}
yyVAL.union = yyLOCAL
- case 424:
+ case 438:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL MatchAction
-//line sql.y:2526
+//line sql.y:2585
{
yyLOCAL = Simple
}
yyVAL.union = yyLOCAL
- case 425:
+ case 439:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL MatchAction
-//line sql.y:2531
+//line sql.y:2590
{
yyLOCAL = DefaultMatch
}
yyVAL.union = yyLOCAL
- case 426:
+ case 440:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL MatchAction
-//line sql.y:2535
+//line sql.y:2594
{
yyLOCAL = yyDollar[1].matchActionUnion()
}
yyVAL.union = yyLOCAL
- case 427:
+ case 441:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL ReferenceAction
-//line sql.y:2541
+//line sql.y:2600
{
yyLOCAL = yyDollar[3].referenceActionUnion()
}
yyVAL.union = yyLOCAL
- case 428:
+ case 442:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL ReferenceAction
-//line sql.y:2547
+//line sql.y:2606
{
yyLOCAL = yyDollar[3].referenceActionUnion()
}
yyVAL.union = yyLOCAL
- case 429:
+ case 443:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL ReferenceAction
-//line sql.y:2553
+//line sql.y:2612
{
yyLOCAL = Restrict
}
yyVAL.union = yyLOCAL
- case 430:
+ case 444:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL ReferenceAction
-//line sql.y:2557
+//line sql.y:2616
{
yyLOCAL = Cascade
}
yyVAL.union = yyLOCAL
- case 431:
+ case 445:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL ReferenceAction
-//line sql.y:2561
+//line sql.y:2620
{
yyLOCAL = NoAction
}
yyVAL.union = yyLOCAL
- case 432:
+ case 446:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL ReferenceAction
-//line sql.y:2565
+//line sql.y:2624
{
yyLOCAL = SetDefault
}
yyVAL.union = yyLOCAL
- case 433:
+ case 447:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL ReferenceAction
-//line sql.y:2569
+//line sql.y:2628
{
yyLOCAL = SetNull
}
yyVAL.union = yyLOCAL
- case 434:
+ case 448:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:2574
+//line sql.y:2633
{
yyVAL.str = ""
}
- case 435:
+ case 449:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:2578
+//line sql.y:2637
{
yyVAL.str = string(yyDollar[1].str)
}
- case 436:
+ case 450:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:2582
+//line sql.y:2641
{
yyVAL.str = string(yyDollar[1].str)
}
- case 437:
+ case 451:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL bool
-//line sql.y:2588
+//line sql.y:2647
{
yyLOCAL = true
}
yyVAL.union = yyLOCAL
- case 438:
+ case 452:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL bool
-//line sql.y:2592
+//line sql.y:2651
{
yyLOCAL = false
}
yyVAL.union = yyLOCAL
- case 439:
+ case 453:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL bool
-//line sql.y:2597
+//line sql.y:2656
{
yyLOCAL = true
}
yyVAL.union = yyLOCAL
- case 440:
+ case 454:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL bool
-//line sql.y:2601
+//line sql.y:2660
{
yyLOCAL = yyDollar[1].booleanUnion()
}
yyVAL.union = yyLOCAL
- case 441:
+ case 455:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL TableOptions
-//line sql.y:2606
+//line sql.y:2665
{
yyLOCAL = nil
}
yyVAL.union = yyLOCAL
- case 442:
+ case 456:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL TableOptions
-//line sql.y:2610
+//line sql.y:2669
{
yyLOCAL = yyDollar[1].tableOptionsUnion()
}
yyVAL.union = yyLOCAL
- case 443:
+ case 457:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL TableOptions
-//line sql.y:2616
+//line sql.y:2675
{
yyLOCAL = TableOptions{yyDollar[1].tableOptionUnion()}
}
yyVAL.union = yyLOCAL
- case 444:
+ case 458:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:2620
+//line sql.y:2679
{
yySLICE := (*TableOptions)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, yyDollar[3].tableOptionUnion())
}
- case 445:
+ case 459:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:2624
+//line sql.y:2683
{
yySLICE := (*TableOptions)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, yyDollar[2].tableOptionUnion())
}
- case 446:
+ case 460:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL TableOptions
-//line sql.y:2630
+//line sql.y:2689
{
yyLOCAL = TableOptions{yyDollar[1].tableOptionUnion()}
}
yyVAL.union = yyLOCAL
- case 447:
+ case 461:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:2634
+//line sql.y:2693
{
yySLICE := (*TableOptions)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, yyDollar[2].tableOptionUnion())
}
- case 448:
+ case 462:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *TableOption
-//line sql.y:2640
+//line sql.y:2699
{
yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewIntLiteral(yyDollar[3].str)}
}
yyVAL.union = yyLOCAL
- case 449:
+ case 463:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *TableOption
-//line sql.y:2644
+//line sql.y:2703
{
yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewIntLiteral(yyDollar[3].str)}
}
yyVAL.union = yyLOCAL
- case 450:
+ case 464:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *TableOption
-//line sql.y:2648
+//line sql.y:2707
{
yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewIntLiteral(yyDollar[3].str)}
}
yyVAL.union = yyLOCAL
- case 451:
+ case 465:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL *TableOption
-//line sql.y:2652
+//line sql.y:2711
{
yyLOCAL = &TableOption{Name: (string(yyDollar[2].str)), String: yyDollar[4].str, CaseSensitive: true}
}
yyVAL.union = yyLOCAL
- case 452:
+ case 466:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL *TableOption
-//line sql.y:2656
+//line sql.y:2715
{
yyLOCAL = &TableOption{Name: string(yyDollar[2].str), String: yyDollar[4].str, CaseSensitive: true}
}
yyVAL.union = yyLOCAL
- case 453:
+ case 467:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *TableOption
-//line sql.y:2660
+//line sql.y:2719
{
yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewIntLiteral(yyDollar[3].str)}
}
yyVAL.union = yyLOCAL
- case 454:
+ case 468:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *TableOption
-//line sql.y:2664
+//line sql.y:2723
{
yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewStrLiteral(yyDollar[3].str)}
}
yyVAL.union = yyLOCAL
- case 455:
+ case 469:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *TableOption
-//line sql.y:2668
+//line sql.y:2727
{
yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewStrLiteral(yyDollar[3].str)}
}
yyVAL.union = yyLOCAL
- case 456:
+ case 470:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *TableOption
-//line sql.y:2672
+//line sql.y:2731
{
yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewStrLiteral(yyDollar[3].str)}
}
yyVAL.union = yyLOCAL
- case 457:
+ case 471:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL *TableOption
-//line sql.y:2676
+//line sql.y:2735
{
yyLOCAL = &TableOption{Name: (string(yyDollar[1].str) + " " + string(yyDollar[2].str)), Value: NewStrLiteral(yyDollar[4].str)}
}
yyVAL.union = yyLOCAL
- case 458:
+ case 472:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL *TableOption
-//line sql.y:2680
+//line sql.y:2739
{
yyLOCAL = &TableOption{Name: (string(yyDollar[1].str) + " " + string(yyDollar[2].str)), Value: NewStrLiteral(yyDollar[4].str)}
}
yyVAL.union = yyLOCAL
- case 459:
+ case 473:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *TableOption
-//line sql.y:2684
+//line sql.y:2743
{
yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewIntLiteral(yyDollar[3].str)}
}
yyVAL.union = yyLOCAL
- case 460:
+ case 474:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *TableOption
-//line sql.y:2688
+//line sql.y:2747
{
yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewStrLiteral(yyDollar[3].str)}
}
yyVAL.union = yyLOCAL
- case 461:
+ case 475:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *TableOption
-//line sql.y:2692
+//line sql.y:2751
{
yyLOCAL = &TableOption{Name: string(yyDollar[1].str), String: yyDollar[3].identifierCS.String(), CaseSensitive: true}
}
yyVAL.union = yyLOCAL
- case 462:
+ case 476:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *TableOption
-//line sql.y:2696
+//line sql.y:2755
{
yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewStrLiteral(yyDollar[3].str)}
}
yyVAL.union = yyLOCAL
- case 463:
+ case 477:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *TableOption
-//line sql.y:2700
+//line sql.y:2759
{
yyLOCAL = &TableOption{Name: string(yyDollar[1].str), String: string(yyDollar[3].str)}
}
yyVAL.union = yyLOCAL
- case 464:
+ case 478:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *TableOption
-//line sql.y:2704
+//line sql.y:2763
{
yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewIntLiteral(yyDollar[3].str)}
}
yyVAL.union = yyLOCAL
- case 465:
+ case 479:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *TableOption
-//line sql.y:2708
+//line sql.y:2767
{
yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewIntLiteral(yyDollar[3].str)}
}
yyVAL.union = yyLOCAL
- case 466:
+ case 480:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *TableOption
-//line sql.y:2712
+//line sql.y:2771
{
yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewIntLiteral(yyDollar[3].str)}
}
yyVAL.union = yyLOCAL
- case 467:
+ case 481:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *TableOption
-//line sql.y:2716
+//line sql.y:2775
{
yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewIntLiteral(yyDollar[3].str)}
}
yyVAL.union = yyLOCAL
- case 468:
+ case 482:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *TableOption
-//line sql.y:2720
+//line sql.y:2779
{
yyLOCAL = &TableOption{Name: string(yyDollar[1].str), String: string(yyDollar[3].str)}
}
yyVAL.union = yyLOCAL
- case 469:
+ case 483:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *TableOption
-//line sql.y:2724
+//line sql.y:2783
{
yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewStrLiteral(yyDollar[3].str)}
}
yyVAL.union = yyLOCAL
- case 470:
+ case 484:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *TableOption
-//line sql.y:2728
+//line sql.y:2787
{
yyLOCAL = &TableOption{Name: string(yyDollar[1].str), String: string(yyDollar[3].str)}
}
yyVAL.union = yyLOCAL
- case 471:
+ case 485:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *TableOption
-//line sql.y:2732
+//line sql.y:2791
{
yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewStrLiteral(yyDollar[3].str)}
}
yyVAL.union = yyLOCAL
- case 472:
+ case 486:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *TableOption
-//line sql.y:2736
+//line sql.y:2795
{
yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewIntLiteral(yyDollar[3].str)}
}
yyVAL.union = yyLOCAL
- case 473:
+ case 487:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *TableOption
-//line sql.y:2740
+//line sql.y:2799
{
yyLOCAL = &TableOption{Name: string(yyDollar[1].str), String: string(yyDollar[3].str)}
}
yyVAL.union = yyLOCAL
- case 474:
+ case 488:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *TableOption
-//line sql.y:2744
+//line sql.y:2803
{
yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewIntLiteral(yyDollar[3].str)}
}
yyVAL.union = yyLOCAL
- case 475:
+ case 489:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *TableOption
-//line sql.y:2748
+//line sql.y:2807
{
yyLOCAL = &TableOption{Name: string(yyDollar[1].str), String: string(yyDollar[3].str)}
}
yyVAL.union = yyLOCAL
- case 476:
+ case 490:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *TableOption
-//line sql.y:2752
+//line sql.y:2811
{
yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewIntLiteral(yyDollar[3].str)}
}
yyVAL.union = yyLOCAL
- case 477:
+ case 491:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL *TableOption
-//line sql.y:2756
+//line sql.y:2815
{
yyLOCAL = &TableOption{Name: string(yyDollar[1].str), String: (yyDollar[3].identifierCI.String() + yyDollar[4].str)}
}
yyVAL.union = yyLOCAL
- case 478:
+ case 492:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL *TableOption
-//line sql.y:2760
+//line sql.y:2819
{
yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Tables: yyDollar[4].tableNamesUnion()}
}
yyVAL.union = yyLOCAL
- case 479:
+ case 493:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:2765
+//line sql.y:2824
{
yyVAL.str = ""
}
- case 480:
+ case 494:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:2769
+//line sql.y:2828
{
yyVAL.str = " " + string(yyDollar[1].str) + " " + string(yyDollar[2].str)
}
- case 481:
+ case 495:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:2773
+//line sql.y:2832
{
yyVAL.str = " " + string(yyDollar[1].str) + " " + string(yyDollar[2].str)
}
- case 491:
+ case 505:
+ yyDollar = yyS[yypt-3 : yypt+1]
+//line sql.y:2851
+ {
+ yyVAL.str = String(TableName{Qualifier: yyDollar[1].identifierCS, Name: yyDollar[3].identifierCS})
+ }
+ case 506:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:2792
+//line sql.y:2855
{
yyVAL.str = yyDollar[1].identifierCI.String()
}
- case 492:
+ case 507:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:2796
+//line sql.y:2859
{
yyVAL.str = encodeSQLString(yyDollar[1].str)
}
- case 493:
+ case 508:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:2800
+//line sql.y:2863
{
yyVAL.str = string(yyDollar[1].str)
}
- case 494:
+ case 509:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:2805
+//line sql.y:2868
{
yyVAL.str = ""
}
- case 496:
+ case 511:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL bool
-//line sql.y:2811
+//line sql.y:2874
{
yyLOCAL = false
}
yyVAL.union = yyLOCAL
- case 497:
+ case 512:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL bool
-//line sql.y:2815
+//line sql.y:2878
{
yyLOCAL = true
}
yyVAL.union = yyLOCAL
- case 498:
+ case 513:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL *ColName
-//line sql.y:2820
+//line sql.y:2883
{
yyLOCAL = nil
}
yyVAL.union = yyLOCAL
- case 499:
+ case 514:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *ColName
-//line sql.y:2824
+//line sql.y:2887
{
yyLOCAL = yyDollar[2].colNameUnion()
}
yyVAL.union = yyLOCAL
- case 500:
+ case 515:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:2829
+//line sql.y:2892
{
yyVAL.str = ""
}
- case 501:
+ case 516:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:2833
+//line sql.y:2896
{
yyVAL.str = string(yyDollar[2].str)
}
- case 502:
+ case 517:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL *Literal
-//line sql.y:2838
+//line sql.y:2901
{
yyLOCAL = nil
}
yyVAL.union = yyLOCAL
- case 503:
+ case 518:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *Literal
-//line sql.y:2842
+//line sql.y:2905
{
yyLOCAL = NewIntLiteral(yyDollar[2].str)
}
yyVAL.union = yyLOCAL
- case 504:
+ case 519:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *Literal
-//line sql.y:2846
+//line sql.y:2909
{
yyLOCAL = NewDecimalLiteral(yyDollar[2].str)
}
yyVAL.union = yyLOCAL
- case 505:
+ case 520:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL []AlterOption
-//line sql.y:2851
+//line sql.y:2914
{
yyLOCAL = nil
}
yyVAL.union = yyLOCAL
- case 506:
+ case 521:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL []AlterOption
-//line sql.y:2855
+//line sql.y:2918
{
yyLOCAL = yyDollar[1].alterOptionsUnion()
}
yyVAL.union = yyLOCAL
- case 507:
+ case 522:
yyDollar = yyS[yypt-5 : yypt+1]
-//line sql.y:2859
+//line sql.y:2922
{
yySLICE := (*[]AlterOption)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, &OrderByOption{Cols: yyDollar[5].columnsUnion()})
}
- case 508:
+ case 523:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL []AlterOption
-//line sql.y:2863
+//line sql.y:2926
{
yyLOCAL = yyDollar[1].alterOptionsUnion()
}
yyVAL.union = yyLOCAL
- case 509:
+ case 524:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:2867
+//line sql.y:2930
{
yySLICE := (*[]AlterOption)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, yyDollar[3].alterOptionsUnion()...)
}
- case 510:
+ case 525:
yyDollar = yyS[yypt-7 : yypt+1]
var yyLOCAL []AlterOption
-//line sql.y:2871
+//line sql.y:2934
{
yyLOCAL = append(append(yyDollar[1].alterOptionsUnion(), yyDollar[3].alterOptionsUnion()...), &OrderByOption{Cols: yyDollar[7].columnsUnion()})
}
yyVAL.union = yyLOCAL
- case 511:
+ case 526:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL []AlterOption
-//line sql.y:2877
+//line sql.y:2940
{
yyLOCAL = []AlterOption{yyDollar[1].alterOptionUnion()}
}
yyVAL.union = yyLOCAL
- case 512:
+ case 527:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:2881
+//line sql.y:2944
{
yySLICE := (*[]AlterOption)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, yyDollar[3].alterOptionUnion())
}
- case 513:
+ case 528:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:2885
+//line sql.y:2948
{
yySLICE := (*[]AlterOption)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, yyDollar[3].alterOptionUnion())
}
- case 514:
+ case 529:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:2891
+//line sql.y:2954
{
yyLOCAL = yyDollar[1].tableOptionsUnion()
}
yyVAL.union = yyLOCAL
- case 515:
+ case 530:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:2895
+//line sql.y:2958
{
yyLOCAL = &AddConstraintDefinition{ConstraintDefinition: yyDollar[2].constraintDefinitionUnion()}
}
yyVAL.union = yyLOCAL
- case 516:
+ case 531:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:2899
+//line sql.y:2962
{
yyLOCAL = &AddConstraintDefinition{ConstraintDefinition: yyDollar[2].constraintDefinitionUnion()}
}
yyVAL.union = yyLOCAL
- case 517:
+ case 532:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:2903
+//line sql.y:2966
{
yyLOCAL = &AddIndexDefinition{IndexDefinition: yyDollar[2].indexDefinitionUnion()}
}
yyVAL.union = yyLOCAL
- case 518:
+ case 533:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:2907
+//line sql.y:2970
{
yyLOCAL = &AddColumns{Columns: yyDollar[4].columnDefinitionsUnion()}
}
yyVAL.union = yyLOCAL
- case 519:
+ case 534:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:2911
+//line sql.y:2974
{
yyLOCAL = &AddColumns{Columns: []*ColumnDefinition{yyDollar[3].columnDefinitionUnion()}, First: yyDollar[4].booleanUnion(), After: yyDollar[5].colNameUnion()}
}
yyVAL.union = yyLOCAL
- case 520:
+ case 535:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:2915
+//line sql.y:2978
{
yyLOCAL = &AlterColumn{Column: yyDollar[3].colNameUnion(), DropDefault: true}
}
yyVAL.union = yyLOCAL
- case 521:
+ case 536:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:2919
+//line sql.y:2982
{
yyLOCAL = &AlterColumn{Column: yyDollar[3].colNameUnion(), DropDefault: false, DefaultVal: yyDollar[6].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 522:
+ case 537:
yyDollar = yyS[yypt-8 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:2923
+//line sql.y:2986
{
yyLOCAL = &AlterColumn{Column: yyDollar[3].colNameUnion(), DropDefault: false, DefaultVal: yyDollar[7].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 523:
+ case 538:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:2927
+//line sql.y:2990
{
val := false
yyLOCAL = &AlterColumn{Column: yyDollar[3].colNameUnion(), Invisible: &val}
}
yyVAL.union = yyLOCAL
- case 524:
+ case 539:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:2932
+//line sql.y:2995
{
val := true
yyLOCAL = &AlterColumn{Column: yyDollar[3].colNameUnion(), Invisible: &val}
}
yyVAL.union = yyLOCAL
- case 525:
+ case 540:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:2937
+//line sql.y:3000
{
yyLOCAL = &AlterCheck{Name: yyDollar[3].identifierCI, Enforced: yyDollar[4].booleanUnion()}
}
yyVAL.union = yyLOCAL
- case 526:
+ case 541:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:2941
+//line sql.y:3004
{
yyLOCAL = &AlterIndex{Name: yyDollar[3].identifierCI, Invisible: false}
}
yyVAL.union = yyLOCAL
- case 527:
+ case 542:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:2945
+//line sql.y:3008
{
yyLOCAL = &AlterIndex{Name: yyDollar[3].identifierCI, Invisible: true}
}
yyVAL.union = yyLOCAL
- case 528:
+ case 543:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:2949
+//line sql.y:3012
{
yyLOCAL = &ChangeColumn{OldColumn: yyDollar[3].colNameUnion(), NewColDefinition: yyDollar[4].columnDefinitionUnion(), First: yyDollar[5].booleanUnion(), After: yyDollar[6].colNameUnion()}
}
yyVAL.union = yyLOCAL
- case 529:
+ case 544:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:2953
+//line sql.y:3016
{
yyLOCAL = &ModifyColumn{NewColDefinition: yyDollar[3].columnDefinitionUnion(), First: yyDollar[4].booleanUnion(), After: yyDollar[5].colNameUnion()}
}
yyVAL.union = yyLOCAL
- case 530:
+ case 545:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:2957
+//line sql.y:3020
{
yyLOCAL = &RenameColumn{OldName: yyDollar[3].colNameUnion(), NewName: yyDollar[5].colNameUnion()}
}
yyVAL.union = yyLOCAL
- case 531:
+ case 546:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:2961
+//line sql.y:3024
{
yyLOCAL = &AlterCharset{CharacterSet: yyDollar[4].str, Collate: yyDollar[5].str}
}
yyVAL.union = yyLOCAL
- case 532:
+ case 547:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:2965
+//line sql.y:3028
{
yyLOCAL = &KeyState{Enable: false}
}
yyVAL.union = yyLOCAL
- case 533:
+ case 548:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:2969
+//line sql.y:3032
{
yyLOCAL = &KeyState{Enable: true}
}
yyVAL.union = yyLOCAL
- case 534:
+ case 549:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:2973
+//line sql.y:3036
{
yyLOCAL = &TablespaceOperation{Import: false}
}
yyVAL.union = yyLOCAL
- case 535:
+ case 550:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:2977
+//line sql.y:3040
{
yyLOCAL = &TablespaceOperation{Import: true}
}
yyVAL.union = yyLOCAL
- case 536:
+ case 551:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:2981
+//line sql.y:3044
{
yyLOCAL = &DropColumn{Name: yyDollar[3].colNameUnion()}
}
yyVAL.union = yyLOCAL
- case 537:
+ case 552:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:2985
+//line sql.y:3048
{
yyLOCAL = &DropKey{Type: NormalKeyType, Name: yyDollar[3].identifierCI}
}
yyVAL.union = yyLOCAL
- case 538:
+ case 553:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:2989
+//line sql.y:3052
{
yyLOCAL = &DropKey{Type: PrimaryKeyType}
}
yyVAL.union = yyLOCAL
- case 539:
+ case 554:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:2993
+//line sql.y:3056
{
yyLOCAL = &DropKey{Type: ForeignKeyType, Name: yyDollar[4].identifierCI}
}
yyVAL.union = yyLOCAL
- case 540:
+ case 555:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:2997
+//line sql.y:3060
{
yyLOCAL = &DropKey{Type: CheckKeyType, Name: yyDollar[3].identifierCI}
}
yyVAL.union = yyLOCAL
- case 541:
+ case 556:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:3001
+//line sql.y:3064
{
yyLOCAL = &DropKey{Type: CheckKeyType, Name: yyDollar[3].identifierCI}
}
yyVAL.union = yyLOCAL
- case 542:
+ case 557:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:3005
+//line sql.y:3068
{
yyLOCAL = &Force{}
}
yyVAL.union = yyLOCAL
- case 543:
+ case 558:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:3009
+//line sql.y:3072
{
yyLOCAL = &RenameTableName{Table: yyDollar[3].tableName}
}
yyVAL.union = yyLOCAL
- case 544:
+ case 559:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:3013
+//line sql.y:3076
{
yyLOCAL = &RenameIndex{OldName: yyDollar[3].identifierCI, NewName: yyDollar[5].identifierCI}
}
yyVAL.union = yyLOCAL
- case 545:
+ case 560:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL []AlterOption
-//line sql.y:3019
+//line sql.y:3082
{
yyLOCAL = []AlterOption{yyDollar[1].alterOptionUnion()}
}
yyVAL.union = yyLOCAL
- case 546:
+ case 561:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:3023
+//line sql.y:3086
{
yySLICE := (*[]AlterOption)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, yyDollar[3].alterOptionUnion())
}
- case 547:
+ case 562:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:3029
+//line sql.y:3092
{
yyLOCAL = AlgorithmValue(string(yyDollar[3].str))
}
yyVAL.union = yyLOCAL
- case 548:
+ case 563:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:3033
+//line sql.y:3096
{
yyLOCAL = AlgorithmValue(string(yyDollar[3].str))
}
yyVAL.union = yyLOCAL
- case 549:
+ case 564:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:3037
+//line sql.y:3100
{
yyLOCAL = AlgorithmValue(string(yyDollar[3].str))
}
yyVAL.union = yyLOCAL
- case 550:
+ case 565:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:3041
+//line sql.y:3104
{
yyLOCAL = AlgorithmValue(string(yyDollar[3].str))
}
yyVAL.union = yyLOCAL
- case 551:
+ case 566:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:3045
+//line sql.y:3108
{
yyLOCAL = &LockOption{Type: DefaultType}
}
yyVAL.union = yyLOCAL
- case 552:
+ case 567:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:3049
+//line sql.y:3112
{
yyLOCAL = &LockOption{Type: NoneType}
}
yyVAL.union = yyLOCAL
- case 553:
+ case 568:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:3053
+//line sql.y:3116
{
yyLOCAL = &LockOption{Type: SharedType}
}
yyVAL.union = yyLOCAL
- case 554:
+ case 569:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:3057
+//line sql.y:3120
{
yyLOCAL = &LockOption{Type: ExclusiveType}
}
yyVAL.union = yyLOCAL
- case 555:
+ case 570:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:3061
+//line sql.y:3124
{
yyLOCAL = &Validation{With: true}
}
yyVAL.union = yyLOCAL
- case 556:
+ case 571:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:3065
+//line sql.y:3128
{
yyLOCAL = &Validation{With: false}
}
yyVAL.union = yyLOCAL
- case 557:
+ case 572:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3071
+//line sql.y:3134
{
yyDollar[1].alterTableUnion().FullyParsed = true
yyDollar[1].alterTableUnion().AlterOptions = yyDollar[2].alterOptionsUnion()
@@ -12794,10 +12941,10 @@ yydefault:
yyLOCAL = yyDollar[1].alterTableUnion()
}
yyVAL.union = yyLOCAL
- case 558:
+ case 573:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3078
+//line sql.y:3141
{
yyDollar[1].alterTableUnion().FullyParsed = true
yyDollar[1].alterTableUnion().AlterOptions = yyDollar[2].alterOptionsUnion()
@@ -12805,10 +12952,10 @@ yydefault:
yyLOCAL = yyDollar[1].alterTableUnion()
}
yyVAL.union = yyLOCAL
- case 559:
+ case 574:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3085
+//line sql.y:3148
{
yyDollar[1].alterTableUnion().FullyParsed = true
yyDollar[1].alterTableUnion().AlterOptions = yyDollar[2].alterOptionsUnion()
@@ -12816,28 +12963,28 @@ yydefault:
yyLOCAL = yyDollar[1].alterTableUnion()
}
yyVAL.union = yyLOCAL
- case 560:
+ case 575:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3092
+//line sql.y:3155
{
yyDollar[1].alterTableUnion().FullyParsed = true
yyDollar[1].alterTableUnion().PartitionSpec = yyDollar[2].partSpecUnion()
yyLOCAL = yyDollar[1].alterTableUnion()
}
yyVAL.union = yyLOCAL
- case 561:
+ case 576:
yyDollar = yyS[yypt-11 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3098
+//line sql.y:3161
{
yyLOCAL = &AlterView{ViewName: yyDollar[7].tableName.ToViewName(), Comments: Comments(yyDollar[2].strs).Parsed(), Algorithm: yyDollar[3].str, Definer: yyDollar[4].definerUnion(), Security: yyDollar[5].str, Columns: yyDollar[8].columnsUnion(), Select: yyDollar[10].selStmtUnion(), CheckOption: yyDollar[11].str}
}
yyVAL.union = yyLOCAL
- case 562:
+ case 577:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3108
+//line sql.y:3171
{
yyDollar[1].alterDatabaseUnion().FullyParsed = true
yyDollar[1].alterDatabaseUnion().DBName = yyDollar[2].identifierCS
@@ -12845,10 +12992,10 @@ yydefault:
yyLOCAL = yyDollar[1].alterDatabaseUnion()
}
yyVAL.union = yyLOCAL
- case 563:
+ case 578:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3115
+//line sql.y:3178
{
yyDollar[1].alterDatabaseUnion().FullyParsed = true
yyDollar[1].alterDatabaseUnion().DBName = yyDollar[2].identifierCS
@@ -12856,10 +13003,10 @@ yydefault:
yyLOCAL = yyDollar[1].alterDatabaseUnion()
}
yyVAL.union = yyLOCAL
- case 564:
+ case 579:
yyDollar = yyS[yypt-8 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3122
+//line sql.y:3185
{
yyLOCAL = &AlterVschema{
Action: CreateVindexDDLAction,
@@ -12872,10 +13019,10 @@ yydefault:
}
}
yyVAL.union = yyLOCAL
- case 565:
+ case 580:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3134
+//line sql.y:3197
{
yyLOCAL = &AlterVschema{
Action: DropVindexDDLAction,
@@ -12886,26 +13033,26 @@ yydefault:
}
}
yyVAL.union = yyLOCAL
- case 566:
+ case 581:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3144
+//line sql.y:3207
{
yyLOCAL = &AlterVschema{Action: AddVschemaTableDDLAction, Table: yyDollar[6].tableName}
}
yyVAL.union = yyLOCAL
- case 567:
+ case 582:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3148
+//line sql.y:3211
{
yyLOCAL = &AlterVschema{Action: DropVschemaTableDDLAction, Table: yyDollar[6].tableName}
}
yyVAL.union = yyLOCAL
- case 568:
+ case 583:
yyDollar = yyS[yypt-13 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3152
+//line sql.y:3215
{
yyLOCAL = &AlterVschema{
Action: AddColVindexDDLAction,
@@ -12919,10 +13066,10 @@ yydefault:
}
}
yyVAL.union = yyLOCAL
- case 569:
+ case 584:
yyDollar = yyS[yypt-8 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3165
+//line sql.y:3228
{
yyLOCAL = &AlterVschema{
Action: DropColVindexDDLAction,
@@ -12933,18 +13080,18 @@ yydefault:
}
}
yyVAL.union = yyLOCAL
- case 570:
+ case 585:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3175
+//line sql.y:3238
{
yyLOCAL = &AlterVschema{Action: AddSequenceDDLAction, Table: yyDollar[6].tableName}
}
yyVAL.union = yyLOCAL
- case 571:
+ case 586:
yyDollar = yyS[yypt-10 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3179
+//line sql.y:3242
{
yyLOCAL = &AlterVschema{
Action: AddAutoIncDDLAction,
@@ -12956,10 +13103,10 @@ yydefault:
}
}
yyVAL.union = yyLOCAL
- case 572:
+ case 587:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3190
+//line sql.y:3253
{
yyLOCAL = &AlterMigration{
Type: RetryMigrationType,
@@ -12967,10 +13114,10 @@ yydefault:
}
}
yyVAL.union = yyLOCAL
- case 573:
+ case 588:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3197
+//line sql.y:3260
{
yyLOCAL = &AlterMigration{
Type: CleanupMigrationType,
@@ -12978,10 +13125,10 @@ yydefault:
}
}
yyVAL.union = yyLOCAL
- case 574:
+ case 589:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3204
+//line sql.y:3267
{
yyLOCAL = &AlterMigration{
Type: LaunchMigrationType,
@@ -12989,10 +13136,10 @@ yydefault:
}
}
yyVAL.union = yyLOCAL
- case 575:
+ case 590:
yyDollar = yyS[yypt-7 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3211
+//line sql.y:3274
{
yyLOCAL = &AlterMigration{
Type: LaunchMigrationType,
@@ -13001,20 +13148,20 @@ yydefault:
}
}
yyVAL.union = yyLOCAL
- case 576:
+ case 591:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3219
+//line sql.y:3282
{
yyLOCAL = &AlterMigration{
Type: LaunchAllMigrationType,
}
}
yyVAL.union = yyLOCAL
- case 577:
+ case 592:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3225
+//line sql.y:3288
{
yyLOCAL = &AlterMigration{
Type: CompleteMigrationType,
@@ -13022,20 +13169,20 @@ yydefault:
}
}
yyVAL.union = yyLOCAL
- case 578:
+ case 593:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3232
+//line sql.y:3295
{
yyLOCAL = &AlterMigration{
Type: CompleteAllMigrationType,
}
}
yyVAL.union = yyLOCAL
- case 579:
+ case 594:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3238
+//line sql.y:3301
{
yyLOCAL = &AlterMigration{
Type: CancelMigrationType,
@@ -13043,20 +13190,20 @@ yydefault:
}
}
yyVAL.union = yyLOCAL
- case 580:
+ case 595:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3245
+//line sql.y:3308
{
yyLOCAL = &AlterMigration{
Type: CancelAllMigrationType,
}
}
yyVAL.union = yyLOCAL
- case 581:
+ case 596:
yyDollar = yyS[yypt-7 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3251
+//line sql.y:3314
{
yyLOCAL = &AlterMigration{
Type: ThrottleMigrationType,
@@ -13066,10 +13213,10 @@ yydefault:
}
}
yyVAL.union = yyLOCAL
- case 582:
+ case 597:
yyDollar = yyS[yypt-7 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3260
+//line sql.y:3323
{
yyLOCAL = &AlterMigration{
Type: ThrottleAllMigrationType,
@@ -13078,10 +13225,10 @@ yydefault:
}
}
yyVAL.union = yyLOCAL
- case 583:
+ case 598:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3268
+//line sql.y:3331
{
yyLOCAL = &AlterMigration{
Type: UnthrottleMigrationType,
@@ -13089,28 +13236,28 @@ yydefault:
}
}
yyVAL.union = yyLOCAL
- case 584:
+ case 599:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3275
+//line sql.y:3338
{
yyLOCAL = &AlterMigration{
Type: UnthrottleAllMigrationType,
}
}
yyVAL.union = yyLOCAL
- case 585:
+ case 600:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL *PartitionOption
-//line sql.y:3282
+//line sql.y:3345
{
yyLOCAL = nil
}
yyVAL.union = yyLOCAL
- case 586:
+ case 601:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL *PartitionOption
-//line sql.y:3286
+//line sql.y:3349
{
yyDollar[3].partitionOptionUnion().Partitions = yyDollar[4].integerUnion()
yyDollar[3].partitionOptionUnion().SubPartition = yyDollar[5].subPartitionUnion()
@@ -13118,10 +13265,10 @@ yydefault:
yyLOCAL = yyDollar[3].partitionOptionUnion()
}
yyVAL.union = yyLOCAL
- case 587:
+ case 602:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL *PartitionOption
-//line sql.y:3295
+//line sql.y:3358
{
yyLOCAL = &PartitionOption{
IsLinear: yyDollar[1].booleanUnion(),
@@ -13130,10 +13277,10 @@ yydefault:
}
}
yyVAL.union = yyLOCAL
- case 588:
+ case 603:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL *PartitionOption
-//line sql.y:3303
+//line sql.y:3366
{
yyLOCAL = &PartitionOption{
IsLinear: yyDollar[1].booleanUnion(),
@@ -13143,10 +13290,10 @@ yydefault:
}
}
yyVAL.union = yyLOCAL
- case 589:
+ case 604:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL *PartitionOption
-//line sql.y:3312
+//line sql.y:3375
{
yyLOCAL = &PartitionOption{
Type: yyDollar[1].partitionByTypeUnion(),
@@ -13154,10 +13301,10 @@ yydefault:
}
}
yyVAL.union = yyLOCAL
- case 590:
+ case 605:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL *PartitionOption
-//line sql.y:3319
+//line sql.y:3382
{
yyLOCAL = &PartitionOption{
Type: yyDollar[1].partitionByTypeUnion(),
@@ -13165,18 +13312,18 @@ yydefault:
}
}
yyVAL.union = yyLOCAL
- case 591:
+ case 606:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL *SubPartition
-//line sql.y:3327
+//line sql.y:3390
{
yyLOCAL = nil
}
yyVAL.union = yyLOCAL
- case 592:
+ case 607:
yyDollar = yyS[yypt-8 : yypt+1]
var yyLOCAL *SubPartition
-//line sql.y:3331
+//line sql.y:3394
{
yyLOCAL = &SubPartition{
IsLinear: yyDollar[3].booleanUnion(),
@@ -13186,10 +13333,10 @@ yydefault:
}
}
yyVAL.union = yyLOCAL
- case 593:
+ case 608:
yyDollar = yyS[yypt-9 : yypt+1]
var yyLOCAL *SubPartition
-//line sql.y:3340
+//line sql.y:3403
{
yyLOCAL = &SubPartition{
IsLinear: yyDollar[3].booleanUnion(),
@@ -13200,682 +13347,682 @@ yydefault:
}
}
yyVAL.union = yyLOCAL
- case 594:
+ case 609:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL []*PartitionDefinition
-//line sql.y:3351
+//line sql.y:3414
{
yyLOCAL = nil
}
yyVAL.union = yyLOCAL
- case 595:
+ case 610:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL []*PartitionDefinition
-//line sql.y:3355
+//line sql.y:3418
{
yyLOCAL = yyDollar[2].partDefsUnion()
}
yyVAL.union = yyLOCAL
- case 596:
+ case 611:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL bool
-//line sql.y:3360
+//line sql.y:3423
{
yyLOCAL = false
}
yyVAL.union = yyLOCAL
- case 597:
+ case 612:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL bool
-//line sql.y:3364
+//line sql.y:3427
{
yyLOCAL = true
}
yyVAL.union = yyLOCAL
- case 598:
+ case 613:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL int
-//line sql.y:3369
+//line sql.y:3432
{
yyLOCAL = 0
}
yyVAL.union = yyLOCAL
- case 599:
+ case 614:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL int
-//line sql.y:3373
+//line sql.y:3436
{
yyLOCAL = convertStringToInt(yyDollar[3].str)
}
yyVAL.union = yyLOCAL
- case 600:
+ case 615:
yyDollar = yyS[yypt-8 : yypt+1]
var yyLOCAL TableExpr
-//line sql.y:3379
+//line sql.y:3442
{
yyLOCAL = &JSONTableExpr{Expr: yyDollar[3].exprUnion(), Filter: yyDollar[5].exprUnion(), Columns: yyDollar[6].jtColumnListUnion(), Alias: yyDollar[8].identifierCS}
}
yyVAL.union = yyLOCAL
- case 601:
+ case 616:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL []*JtColumnDefinition
-//line sql.y:3385
+//line sql.y:3448
{
yyLOCAL = yyDollar[3].jtColumnListUnion()
}
yyVAL.union = yyLOCAL
- case 602:
+ case 617:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL []*JtColumnDefinition
-//line sql.y:3391
+//line sql.y:3454
{
yyLOCAL = []*JtColumnDefinition{yyDollar[1].jtColumnDefinitionUnion()}
}
yyVAL.union = yyLOCAL
- case 603:
+ case 618:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:3395
+//line sql.y:3458
{
yySLICE := (*[]*JtColumnDefinition)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, yyDollar[3].jtColumnDefinitionUnion())
}
- case 604:
+ case 619:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *JtColumnDefinition
-//line sql.y:3401
+//line sql.y:3464
{
yyLOCAL = &JtColumnDefinition{JtOrdinal: &JtOrdinalColDef{Name: yyDollar[1].identifierCI}}
}
yyVAL.union = yyLOCAL
- case 605:
+ case 620:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL *JtColumnDefinition
-//line sql.y:3405
+//line sql.y:3468
{
yyDollar[2].columnType.Options = &ColumnTypeOptions{Collate: yyDollar[3].str}
jtPath := &JtPathColDef{Name: yyDollar[1].identifierCI, Type: yyDollar[2].columnType, JtColExists: yyDollar[4].booleanUnion(), Path: yyDollar[6].exprUnion()}
yyLOCAL = &JtColumnDefinition{JtPath: jtPath}
}
yyVAL.union = yyLOCAL
- case 606:
+ case 621:
yyDollar = yyS[yypt-7 : yypt+1]
var yyLOCAL *JtColumnDefinition
-//line sql.y:3411
+//line sql.y:3474
{
yyDollar[2].columnType.Options = &ColumnTypeOptions{Collate: yyDollar[3].str}
jtPath := &JtPathColDef{Name: yyDollar[1].identifierCI, Type: yyDollar[2].columnType, JtColExists: yyDollar[4].booleanUnion(), Path: yyDollar[6].exprUnion(), EmptyOnResponse: yyDollar[7].jtOnResponseUnion()}
yyLOCAL = &JtColumnDefinition{JtPath: jtPath}
}
yyVAL.union = yyLOCAL
- case 607:
+ case 622:
yyDollar = yyS[yypt-7 : yypt+1]
var yyLOCAL *JtColumnDefinition
-//line sql.y:3417
+//line sql.y:3480
{
yyDollar[2].columnType.Options = &ColumnTypeOptions{Collate: yyDollar[3].str}
jtPath := &JtPathColDef{Name: yyDollar[1].identifierCI, Type: yyDollar[2].columnType, JtColExists: yyDollar[4].booleanUnion(), Path: yyDollar[6].exprUnion(), ErrorOnResponse: yyDollar[7].jtOnResponseUnion()}
yyLOCAL = &JtColumnDefinition{JtPath: jtPath}
}
yyVAL.union = yyLOCAL
- case 608:
+ case 623:
yyDollar = yyS[yypt-8 : yypt+1]
var yyLOCAL *JtColumnDefinition
-//line sql.y:3423
+//line sql.y:3486
{
yyDollar[2].columnType.Options = &ColumnTypeOptions{Collate: yyDollar[3].str}
jtPath := &JtPathColDef{Name: yyDollar[1].identifierCI, Type: yyDollar[2].columnType, JtColExists: yyDollar[4].booleanUnion(), Path: yyDollar[6].exprUnion(), EmptyOnResponse: yyDollar[7].jtOnResponseUnion(), ErrorOnResponse: yyDollar[8].jtOnResponseUnion()}
yyLOCAL = &JtColumnDefinition{JtPath: jtPath}
}
yyVAL.union = yyLOCAL
- case 609:
+ case 624:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL *JtColumnDefinition
-//line sql.y:3429
+//line sql.y:3492
{
jtNestedPath := &JtNestedPathColDef{Path: yyDollar[3].exprUnion(), Columns: yyDollar[4].jtColumnListUnion()}
yyLOCAL = &JtColumnDefinition{JtNestedPath: jtNestedPath}
}
yyVAL.union = yyLOCAL
- case 610:
+ case 625:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL bool
-//line sql.y:3435
+//line sql.y:3498
{
yyLOCAL = false
}
yyVAL.union = yyLOCAL
- case 611:
+ case 626:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL bool
-//line sql.y:3439
+//line sql.y:3502
{
yyLOCAL = true
}
yyVAL.union = yyLOCAL
- case 612:
+ case 627:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL bool
-//line sql.y:3443
+//line sql.y:3506
{
yyLOCAL = false
}
yyVAL.union = yyLOCAL
- case 613:
+ case 628:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL bool
-//line sql.y:3447
+//line sql.y:3510
{
yyLOCAL = true
}
yyVAL.union = yyLOCAL
- case 614:
+ case 629:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *JtOnResponse
-//line sql.y:3453
+//line sql.y:3516
{
yyLOCAL = yyDollar[1].jtOnResponseUnion()
}
yyVAL.union = yyLOCAL
- case 615:
+ case 630:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *JtOnResponse
-//line sql.y:3459
+//line sql.y:3522
{
yyLOCAL = yyDollar[1].jtOnResponseUnion()
}
yyVAL.union = yyLOCAL
- case 616:
+ case 631:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL *JtOnResponse
-//line sql.y:3465
+//line sql.y:3528
{
yyLOCAL = &JtOnResponse{ResponseType: ErrorJSONType}
}
yyVAL.union = yyLOCAL
- case 617:
+ case 632:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL *JtOnResponse
-//line sql.y:3469
+//line sql.y:3532
{
yyLOCAL = &JtOnResponse{ResponseType: NullJSONType}
}
yyVAL.union = yyLOCAL
- case 618:
+ case 633:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *JtOnResponse
-//line sql.y:3473
+//line sql.y:3536
{
yyLOCAL = &JtOnResponse{ResponseType: DefaultJSONType, Expr: yyDollar[2].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 619:
+ case 634:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL PartitionByType
-//line sql.y:3479
+//line sql.y:3542
{
yyLOCAL = RangeType
}
yyVAL.union = yyLOCAL
- case 620:
+ case 635:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL PartitionByType
-//line sql.y:3483
+//line sql.y:3546
{
yyLOCAL = ListType
}
yyVAL.union = yyLOCAL
- case 621:
+ case 636:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL int
-//line sql.y:3488
+//line sql.y:3551
{
yyLOCAL = -1
}
yyVAL.union = yyLOCAL
- case 622:
+ case 637:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL int
-//line sql.y:3492
+//line sql.y:3555
{
yyLOCAL = convertStringToInt(yyDollar[2].str)
}
yyVAL.union = yyLOCAL
- case 623:
+ case 638:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL int
-//line sql.y:3497
+//line sql.y:3560
{
yyLOCAL = -1
}
yyVAL.union = yyLOCAL
- case 624:
+ case 639:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL int
-//line sql.y:3501
+//line sql.y:3564
{
yyLOCAL = convertStringToInt(yyDollar[2].str)
}
yyVAL.union = yyLOCAL
- case 625:
+ case 640:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL *PartitionSpec
-//line sql.y:3507
+//line sql.y:3570
{
yyLOCAL = &PartitionSpec{Action: AddAction, Definitions: []*PartitionDefinition{yyDollar[4].partDefUnion()}}
}
yyVAL.union = yyLOCAL
- case 626:
+ case 641:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *PartitionSpec
-//line sql.y:3511
+//line sql.y:3574
{
yyLOCAL = &PartitionSpec{Action: DropAction, Names: yyDollar[3].partitionsUnion()}
}
yyVAL.union = yyLOCAL
- case 627:
+ case 642:
yyDollar = yyS[yypt-7 : yypt+1]
var yyLOCAL *PartitionSpec
-//line sql.y:3515
+//line sql.y:3578
{
yyLOCAL = &PartitionSpec{Action: ReorganizeAction, Names: yyDollar[3].partitionsUnion(), Definitions: yyDollar[6].partDefsUnion()}
}
yyVAL.union = yyLOCAL
- case 628:
+ case 643:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL *PartitionSpec
-//line sql.y:3519
+//line sql.y:3582
{
yyLOCAL = &PartitionSpec{Action: DiscardAction, Names: yyDollar[3].partitionsUnion()}
}
yyVAL.union = yyLOCAL
- case 629:
+ case 644:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL *PartitionSpec
-//line sql.y:3523
+//line sql.y:3586
{
yyLOCAL = &PartitionSpec{Action: DiscardAction, IsAll: true}
}
yyVAL.union = yyLOCAL
- case 630:
+ case 645:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL *PartitionSpec
-//line sql.y:3527
+//line sql.y:3590
{
yyLOCAL = &PartitionSpec{Action: ImportAction, Names: yyDollar[3].partitionsUnion()}
}
yyVAL.union = yyLOCAL
- case 631:
+ case 646:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL *PartitionSpec
-//line sql.y:3531
+//line sql.y:3594
{
yyLOCAL = &PartitionSpec{Action: ImportAction, IsAll: true}
}
yyVAL.union = yyLOCAL
- case 632:
+ case 647:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *PartitionSpec
-//line sql.y:3535
+//line sql.y:3598
{
yyLOCAL = &PartitionSpec{Action: TruncateAction, Names: yyDollar[3].partitionsUnion()}
}
yyVAL.union = yyLOCAL
- case 633:
+ case 648:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *PartitionSpec
-//line sql.y:3539
+//line sql.y:3602
{
yyLOCAL = &PartitionSpec{Action: TruncateAction, IsAll: true}
}
yyVAL.union = yyLOCAL
- case 634:
+ case 649:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *PartitionSpec
-//line sql.y:3543
+//line sql.y:3606
{
yyLOCAL = &PartitionSpec{Action: CoalesceAction, Number: NewIntLiteral(yyDollar[3].str)}
}
yyVAL.union = yyLOCAL
- case 635:
+ case 650:
yyDollar = yyS[yypt-7 : yypt+1]
var yyLOCAL *PartitionSpec
-//line sql.y:3547
+//line sql.y:3610
{
yyLOCAL = &PartitionSpec{Action: ExchangeAction, Names: Partitions{yyDollar[3].identifierCI}, TableName: yyDollar[6].tableName, WithoutValidation: yyDollar[7].booleanUnion()}
}
yyVAL.union = yyLOCAL
- case 636:
+ case 651:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *PartitionSpec
-//line sql.y:3551
+//line sql.y:3614
{
yyLOCAL = &PartitionSpec{Action: AnalyzeAction, Names: yyDollar[3].partitionsUnion()}
}
yyVAL.union = yyLOCAL
- case 637:
+ case 652:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *PartitionSpec
-//line sql.y:3555
+//line sql.y:3618
{
yyLOCAL = &PartitionSpec{Action: AnalyzeAction, IsAll: true}
}
yyVAL.union = yyLOCAL
- case 638:
+ case 653:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *PartitionSpec
-//line sql.y:3559
+//line sql.y:3622
{
yyLOCAL = &PartitionSpec{Action: CheckAction, Names: yyDollar[3].partitionsUnion()}
}
yyVAL.union = yyLOCAL
- case 639:
+ case 654:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *PartitionSpec
-//line sql.y:3563
+//line sql.y:3626
{
yyLOCAL = &PartitionSpec{Action: CheckAction, IsAll: true}
}
yyVAL.union = yyLOCAL
- case 640:
+ case 655:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *PartitionSpec
-//line sql.y:3567
+//line sql.y:3630
{
yyLOCAL = &PartitionSpec{Action: OptimizeAction, Names: yyDollar[3].partitionsUnion()}
}
yyVAL.union = yyLOCAL
- case 641:
+ case 656:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *PartitionSpec
-//line sql.y:3571
+//line sql.y:3634
{
yyLOCAL = &PartitionSpec{Action: OptimizeAction, IsAll: true}
}
yyVAL.union = yyLOCAL
- case 642:
+ case 657:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *PartitionSpec
-//line sql.y:3575
+//line sql.y:3638
{
yyLOCAL = &PartitionSpec{Action: RebuildAction, Names: yyDollar[3].partitionsUnion()}
}
yyVAL.union = yyLOCAL
- case 643:
+ case 658:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *PartitionSpec
-//line sql.y:3579
+//line sql.y:3642
{
yyLOCAL = &PartitionSpec{Action: RebuildAction, IsAll: true}
}
yyVAL.union = yyLOCAL
- case 644:
+ case 659:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *PartitionSpec
-//line sql.y:3583
+//line sql.y:3646
{
yyLOCAL = &PartitionSpec{Action: RepairAction, Names: yyDollar[3].partitionsUnion()}
}
yyVAL.union = yyLOCAL
- case 645:
+ case 660:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *PartitionSpec
-//line sql.y:3587
+//line sql.y:3650
{
yyLOCAL = &PartitionSpec{Action: RepairAction, IsAll: true}
}
yyVAL.union = yyLOCAL
- case 646:
+ case 661:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *PartitionSpec
-//line sql.y:3591
+//line sql.y:3654
{
yyLOCAL = &PartitionSpec{Action: UpgradeAction}
}
yyVAL.union = yyLOCAL
- case 647:
+ case 662:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL bool
-//line sql.y:3596
+//line sql.y:3659
{
yyLOCAL = false
}
yyVAL.union = yyLOCAL
- case 648:
+ case 663:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL bool
-//line sql.y:3600
+//line sql.y:3663
{
yyLOCAL = false
}
yyVAL.union = yyLOCAL
- case 649:
+ case 664:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL bool
-//line sql.y:3604
+//line sql.y:3667
{
yyLOCAL = true
}
yyVAL.union = yyLOCAL
- case 650:
+ case 665:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL []*PartitionDefinition
-//line sql.y:3610
+//line sql.y:3673
{
yyLOCAL = []*PartitionDefinition{yyDollar[1].partDefUnion()}
}
yyVAL.union = yyLOCAL
- case 651:
+ case 666:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:3614
+//line sql.y:3677
{
yySLICE := (*[]*PartitionDefinition)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, yyDollar[3].partDefUnion())
}
- case 652:
+ case 667:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:3620
+//line sql.y:3683
{
yyVAL.partDefUnion().Options = yyDollar[2].partitionDefinitionOptionsUnion()
}
- case 653:
+ case 668:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL *PartitionDefinitionOptions
-//line sql.y:3625
+//line sql.y:3688
{
yyLOCAL = &PartitionDefinitionOptions{}
}
yyVAL.union = yyLOCAL
- case 654:
+ case 669:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *PartitionDefinitionOptions
-//line sql.y:3629
+//line sql.y:3692
{
yyDollar[1].partitionDefinitionOptionsUnion().ValueRange = yyDollar[2].partitionValueRangeUnion()
yyLOCAL = yyDollar[1].partitionDefinitionOptionsUnion()
}
yyVAL.union = yyLOCAL
- case 655:
+ case 670:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *PartitionDefinitionOptions
-//line sql.y:3634
+//line sql.y:3697
{
yyDollar[1].partitionDefinitionOptionsUnion().Comment = yyDollar[2].literalUnion()
yyLOCAL = yyDollar[1].partitionDefinitionOptionsUnion()
}
yyVAL.union = yyLOCAL
- case 656:
+ case 671:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *PartitionDefinitionOptions
-//line sql.y:3639
+//line sql.y:3702
{
yyDollar[1].partitionDefinitionOptionsUnion().Engine = yyDollar[2].partitionEngineUnion()
yyLOCAL = yyDollar[1].partitionDefinitionOptionsUnion()
}
yyVAL.union = yyLOCAL
- case 657:
+ case 672:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *PartitionDefinitionOptions
-//line sql.y:3644
+//line sql.y:3707
{
yyDollar[1].partitionDefinitionOptionsUnion().DataDirectory = yyDollar[2].literalUnion()
yyLOCAL = yyDollar[1].partitionDefinitionOptionsUnion()
}
yyVAL.union = yyLOCAL
- case 658:
+ case 673:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *PartitionDefinitionOptions
-//line sql.y:3649
+//line sql.y:3712
{
yyDollar[1].partitionDefinitionOptionsUnion().IndexDirectory = yyDollar[2].literalUnion()
yyLOCAL = yyDollar[1].partitionDefinitionOptionsUnion()
}
yyVAL.union = yyLOCAL
- case 659:
+ case 674:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *PartitionDefinitionOptions
-//line sql.y:3654
+//line sql.y:3717
{
val := yyDollar[2].integerUnion()
yyDollar[1].partitionDefinitionOptionsUnion().MaxRows = &val
yyLOCAL = yyDollar[1].partitionDefinitionOptionsUnion()
}
yyVAL.union = yyLOCAL
- case 660:
+ case 675:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *PartitionDefinitionOptions
-//line sql.y:3660
+//line sql.y:3723
{
val := yyDollar[2].integerUnion()
yyDollar[1].partitionDefinitionOptionsUnion().MinRows = &val
yyLOCAL = yyDollar[1].partitionDefinitionOptionsUnion()
}
yyVAL.union = yyLOCAL
- case 661:
+ case 676:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *PartitionDefinitionOptions
-//line sql.y:3666
+//line sql.y:3729
{
yyDollar[1].partitionDefinitionOptionsUnion().TableSpace = yyDollar[2].str
yyLOCAL = yyDollar[1].partitionDefinitionOptionsUnion()
}
yyVAL.union = yyLOCAL
- case 662:
+ case 677:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *PartitionDefinitionOptions
-//line sql.y:3671
+//line sql.y:3734
{
yyDollar[1].partitionDefinitionOptionsUnion().SubPartitionDefinitions = yyDollar[2].subPartitionDefinitionsUnion()
yyLOCAL = yyDollar[1].partitionDefinitionOptionsUnion()
}
yyVAL.union = yyLOCAL
- case 663:
+ case 678:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL SubPartitionDefinitions
-//line sql.y:3677
+//line sql.y:3740
{
yyLOCAL = yyDollar[2].subPartitionDefinitionsUnion()
}
yyVAL.union = yyLOCAL
- case 664:
+ case 679:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL SubPartitionDefinitions
-//line sql.y:3683
+//line sql.y:3746
{
yyLOCAL = SubPartitionDefinitions{yyDollar[1].subPartitionDefinitionUnion()}
}
yyVAL.union = yyLOCAL
- case 665:
+ case 680:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:3687
+//line sql.y:3750
{
yySLICE := (*SubPartitionDefinitions)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, yyDollar[3].subPartitionDefinitionUnion())
}
- case 666:
+ case 681:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *SubPartitionDefinition
-//line sql.y:3693
+//line sql.y:3756
{
yyLOCAL = &SubPartitionDefinition{Name: yyDollar[2].identifierCI, Options: yyDollar[3].subPartitionDefinitionOptionsUnion()}
}
yyVAL.union = yyLOCAL
- case 667:
+ case 682:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL *SubPartitionDefinitionOptions
-//line sql.y:3698
+//line sql.y:3761
{
yyLOCAL = &SubPartitionDefinitionOptions{}
}
yyVAL.union = yyLOCAL
- case 668:
+ case 683:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *SubPartitionDefinitionOptions
-//line sql.y:3702
+//line sql.y:3765
{
yyDollar[1].subPartitionDefinitionOptionsUnion().Comment = yyDollar[2].literalUnion()
yyLOCAL = yyDollar[1].subPartitionDefinitionOptionsUnion()
}
yyVAL.union = yyLOCAL
- case 669:
+ case 684:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *SubPartitionDefinitionOptions
-//line sql.y:3707
+//line sql.y:3770
{
yyDollar[1].subPartitionDefinitionOptionsUnion().Engine = yyDollar[2].partitionEngineUnion()
yyLOCAL = yyDollar[1].subPartitionDefinitionOptionsUnion()
}
yyVAL.union = yyLOCAL
- case 670:
+ case 685:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *SubPartitionDefinitionOptions
-//line sql.y:3712
+//line sql.y:3775
{
yyDollar[1].subPartitionDefinitionOptionsUnion().DataDirectory = yyDollar[2].literalUnion()
yyLOCAL = yyDollar[1].subPartitionDefinitionOptionsUnion()
}
yyVAL.union = yyLOCAL
- case 671:
+ case 686:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *SubPartitionDefinitionOptions
-//line sql.y:3717
+//line sql.y:3780
{
yyDollar[1].subPartitionDefinitionOptionsUnion().IndexDirectory = yyDollar[2].literalUnion()
yyLOCAL = yyDollar[1].subPartitionDefinitionOptionsUnion()
}
yyVAL.union = yyLOCAL
- case 672:
+ case 687:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *SubPartitionDefinitionOptions
-//line sql.y:3722
+//line sql.y:3785
{
val := yyDollar[2].integerUnion()
yyDollar[1].subPartitionDefinitionOptionsUnion().MaxRows = &val
yyLOCAL = yyDollar[1].subPartitionDefinitionOptionsUnion()
}
yyVAL.union = yyLOCAL
- case 673:
+ case 688:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *SubPartitionDefinitionOptions
-//line sql.y:3728
+//line sql.y:3791
{
val := yyDollar[2].integerUnion()
yyDollar[1].subPartitionDefinitionOptionsUnion().MinRows = &val
yyLOCAL = yyDollar[1].subPartitionDefinitionOptionsUnion()
}
yyVAL.union = yyLOCAL
- case 674:
+ case 689:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *SubPartitionDefinitionOptions
-//line sql.y:3734
+//line sql.y:3797
{
yyDollar[1].subPartitionDefinitionOptionsUnion().TableSpace = yyDollar[2].str
yyLOCAL = yyDollar[1].subPartitionDefinitionOptionsUnion()
}
yyVAL.union = yyLOCAL
- case 675:
+ case 690:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL *PartitionValueRange
-//line sql.y:3741
+//line sql.y:3804
{
yyLOCAL = &PartitionValueRange{
Type: LessThanType,
@@ -13883,10 +14030,10 @@ yydefault:
}
}
yyVAL.union = yyLOCAL
- case 676:
+ case 691:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL *PartitionValueRange
-//line sql.y:3748
+//line sql.y:3811
{
yyLOCAL = &PartitionValueRange{
Type: LessThanType,
@@ -13894,10 +14041,10 @@ yydefault:
}
}
yyVAL.union = yyLOCAL
- case 677:
+ case 692:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *PartitionValueRange
-//line sql.y:3755
+//line sql.y:3818
{
yyLOCAL = &PartitionValueRange{
Type: InType,
@@ -13905,131 +14052,131 @@ yydefault:
}
}
yyVAL.union = yyLOCAL
- case 678:
+ case 693:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL bool
-//line sql.y:3763
+//line sql.y:3826
{
yyLOCAL = false
}
yyVAL.union = yyLOCAL
- case 679:
+ case 694:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL bool
-//line sql.y:3767
+//line sql.y:3830
{
yyLOCAL = true
}
yyVAL.union = yyLOCAL
- case 680:
+ case 695:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL *PartitionEngine
-//line sql.y:3773
+//line sql.y:3836
{
yyLOCAL = &PartitionEngine{Storage: yyDollar[1].booleanUnion(), Name: yyDollar[4].identifierCS.String()}
}
yyVAL.union = yyLOCAL
- case 681:
+ case 696:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *Literal
-//line sql.y:3779
+//line sql.y:3842
{
yyLOCAL = NewStrLiteral(yyDollar[3].str)
}
yyVAL.union = yyLOCAL
- case 682:
+ case 697:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL *Literal
-//line sql.y:3785
+//line sql.y:3848
{
yyLOCAL = NewStrLiteral(yyDollar[4].str)
}
yyVAL.union = yyLOCAL
- case 683:
+ case 698:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL *Literal
-//line sql.y:3791
+//line sql.y:3854
{
yyLOCAL = NewStrLiteral(yyDollar[4].str)
}
yyVAL.union = yyLOCAL
- case 684:
+ case 699:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL int
-//line sql.y:3797
+//line sql.y:3860
{
yyLOCAL = convertStringToInt(yyDollar[3].str)
}
yyVAL.union = yyLOCAL
- case 685:
+ case 700:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL int
-//line sql.y:3803
+//line sql.y:3866
{
yyLOCAL = convertStringToInt(yyDollar[3].str)
}
yyVAL.union = yyLOCAL
- case 686:
+ case 701:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:3809
+//line sql.y:3872
{
yyVAL.str = yyDollar[3].identifierCS.String()
}
- case 687:
+ case 702:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *PartitionDefinition
-//line sql.y:3815
+//line sql.y:3878
{
yyLOCAL = &PartitionDefinition{Name: yyDollar[2].identifierCI}
}
yyVAL.union = yyLOCAL
- case 688:
+ case 703:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:3821
+//line sql.y:3884
{
yyVAL.str = ""
}
- case 689:
+ case 704:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:3825
+//line sql.y:3888
{
yyVAL.str = ""
}
- case 690:
+ case 705:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3831
+//line sql.y:3894
{
yyLOCAL = &RenameTable{TablePairs: yyDollar[3].renameTablePairsUnion()}
}
yyVAL.union = yyLOCAL
- case 691:
+ case 706:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL []*RenameTablePair
-//line sql.y:3837
+//line sql.y:3900
{
yyLOCAL = []*RenameTablePair{{FromTable: yyDollar[1].tableName, ToTable: yyDollar[3].tableName}}
}
yyVAL.union = yyLOCAL
- case 692:
+ case 707:
yyDollar = yyS[yypt-5 : yypt+1]
-//line sql.y:3841
+//line sql.y:3904
{
yySLICE := (*[]*RenameTablePair)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, &RenameTablePair{FromTable: yyDollar[3].tableName, ToTable: yyDollar[5].tableName})
}
- case 693:
+ case 708:
yyDollar = yyS[yypt-7 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3847
+//line sql.y:3910
{
yyLOCAL = &DropTable{FromTables: yyDollar[6].tableNamesUnion(), IfExists: yyDollar[5].booleanUnion(), Comments: Comments(yyDollar[2].strs).Parsed(), Temp: yyDollar[3].booleanUnion()}
}
yyVAL.union = yyLOCAL
- case 694:
+ case 709:
yyDollar = yyS[yypt-7 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3851
+//line sql.y:3914
{
// Change this to an alter statement
if yyDollar[4].identifierCI.Lowered() == "primary" {
@@ -14039,1216 +14186,1319 @@ yydefault:
}
}
yyVAL.union = yyLOCAL
- case 695:
+ case 710:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3860
+//line sql.y:3923
{
yyLOCAL = &DropView{FromTables: yyDollar[5].tableNamesUnion(), Comments: Comments(yyDollar[2].strs).Parsed(), IfExists: yyDollar[4].booleanUnion()}
}
yyVAL.union = yyLOCAL
- case 696:
+ case 711:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3864
+//line sql.y:3927
{
yyLOCAL = &DropDatabase{Comments: Comments(yyDollar[2].strs).Parsed(), DBName: yyDollar[5].identifierCS, IfExists: yyDollar[4].booleanUnion()}
}
yyVAL.union = yyLOCAL
- case 697:
+ case 712:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3870
+//line sql.y:3933
{
yyLOCAL = &TruncateTable{Table: yyDollar[3].tableName}
}
yyVAL.union = yyLOCAL
- case 698:
+ case 713:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3874
+//line sql.y:3937
{
yyLOCAL = &TruncateTable{Table: yyDollar[2].tableName}
}
yyVAL.union = yyLOCAL
- case 699:
+ case 714:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3880
+//line sql.y:3943
{
yyLOCAL = &OtherRead{}
}
yyVAL.union = yyLOCAL
- case 700:
+ case 715:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3886
+//line sql.y:3949
{
yyLOCAL = &Show{&ShowBasic{Command: Charset, Filter: yyDollar[3].showFilterUnion()}}
}
yyVAL.union = yyLOCAL
- case 701:
+ case 716:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3890
+//line sql.y:3953
{
yyLOCAL = &Show{&ShowBasic{Command: Collation, Filter: yyDollar[3].showFilterUnion()}}
}
yyVAL.union = yyLOCAL
- case 702:
+ case 717:
yyDollar = yyS[yypt-7 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3894
+//line sql.y:3957
{
yyLOCAL = &Show{&ShowBasic{Full: yyDollar[2].booleanUnion(), Command: Column, Tbl: yyDollar[5].tableName, DbName: yyDollar[6].identifierCS, Filter: yyDollar[7].showFilterUnion()}}
}
yyVAL.union = yyLOCAL
- case 703:
+ case 718:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3898
+//line sql.y:3961
{
yyLOCAL = &Show{&ShowBasic{Command: Database, Filter: yyDollar[3].showFilterUnion()}}
}
yyVAL.union = yyLOCAL
- case 704:
+ case 719:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3902
+//line sql.y:3965
{
yyLOCAL = &Show{&ShowBasic{Command: Database, Filter: yyDollar[3].showFilterUnion()}}
}
yyVAL.union = yyLOCAL
- case 705:
+ case 720:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3906
+//line sql.y:3969
{
yyLOCAL = &Show{&ShowBasic{Command: Keyspace, Filter: yyDollar[3].showFilterUnion()}}
}
yyVAL.union = yyLOCAL
- case 706:
+ case 721:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3910
+//line sql.y:3973
{
yyLOCAL = &Show{&ShowBasic{Command: Keyspace, Filter: yyDollar[3].showFilterUnion()}}
}
yyVAL.union = yyLOCAL
- case 707:
+ case 722:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3914
+//line sql.y:3977
{
yyLOCAL = &Show{&ShowBasic{Command: Function, Filter: yyDollar[4].showFilterUnion()}}
}
yyVAL.union = yyLOCAL
- case 708:
+ case 723:
yyDollar = yyS[yypt-7 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3918
+//line sql.y:3981
{
yyLOCAL = &Show{&ShowBasic{Command: Index, Tbl: yyDollar[5].tableName, DbName: yyDollar[6].identifierCS, Filter: yyDollar[7].showFilterUnion()}}
}
yyVAL.union = yyLOCAL
- case 709:
+ case 724:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3922
+//line sql.y:3985
{
yyLOCAL = &Show{&ShowBasic{Command: OpenTable, DbName: yyDollar[4].identifierCS, Filter: yyDollar[5].showFilterUnion()}}
}
yyVAL.union = yyLOCAL
- case 710:
+ case 725:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3926
+//line sql.y:3989
{
yyLOCAL = &Show{&ShowBasic{Command: Privilege}}
}
yyVAL.union = yyLOCAL
- case 711:
+ case 726:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3930
+//line sql.y:3993
{
yyLOCAL = &Show{&ShowBasic{Command: Procedure, Filter: yyDollar[4].showFilterUnion()}}
}
yyVAL.union = yyLOCAL
- case 712:
+ case 727:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3934
+//line sql.y:3997
{
yyLOCAL = &Show{&ShowBasic{Command: StatusSession, Filter: yyDollar[4].showFilterUnion()}}
}
yyVAL.union = yyLOCAL
- case 713:
+ case 728:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3938
+//line sql.y:4001
{
yyLOCAL = &Show{&ShowBasic{Command: StatusGlobal, Filter: yyDollar[4].showFilterUnion()}}
}
yyVAL.union = yyLOCAL
- case 714:
+ case 729:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3942
+//line sql.y:4005
{
yyLOCAL = &Show{&ShowBasic{Command: VariableSession, Filter: yyDollar[4].showFilterUnion()}}
}
yyVAL.union = yyLOCAL
- case 715:
+ case 730:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3946
+//line sql.y:4009
{
yyLOCAL = &Show{&ShowBasic{Command: VariableGlobal, Filter: yyDollar[4].showFilterUnion()}}
}
yyVAL.union = yyLOCAL
- case 716:
+ case 731:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3950
+//line sql.y:4013
{
yyLOCAL = &Show{&ShowBasic{Command: TableStatus, DbName: yyDollar[4].identifierCS, Filter: yyDollar[5].showFilterUnion()}}
}
yyVAL.union = yyLOCAL
- case 717:
+ case 732:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3954
+//line sql.y:4017
{
yyLOCAL = &Show{&ShowBasic{Command: Table, Full: yyDollar[2].booleanUnion(), DbName: yyDollar[4].identifierCS, Filter: yyDollar[5].showFilterUnion()}}
}
yyVAL.union = yyLOCAL
- case 718:
+ case 733:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3958
+//line sql.y:4021
{
yyLOCAL = &Show{&ShowBasic{Command: Trigger, DbName: yyDollar[3].identifierCS, Filter: yyDollar[4].showFilterUnion()}}
}
yyVAL.union = yyLOCAL
- case 719:
+ case 734:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3962
+//line sql.y:4025
{
yyLOCAL = &Show{&ShowCreate{Command: CreateDb, Op: yyDollar[4].tableName}}
}
yyVAL.union = yyLOCAL
- case 720:
+ case 735:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3966
+//line sql.y:4029
{
yyLOCAL = &Show{&ShowCreate{Command: CreateE, Op: yyDollar[4].tableName}}
}
yyVAL.union = yyLOCAL
- case 721:
+ case 736:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3970
+//line sql.y:4033
{
yyLOCAL = &Show{&ShowCreate{Command: CreateF, Op: yyDollar[4].tableName}}
}
yyVAL.union = yyLOCAL
- case 722:
+ case 737:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3974
+//line sql.y:4037
{
yyLOCAL = &Show{&ShowCreate{Command: CreateProc, Op: yyDollar[4].tableName}}
}
yyVAL.union = yyLOCAL
- case 723:
+ case 738:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3978
+//line sql.y:4041
{
yyLOCAL = &Show{&ShowCreate{Command: CreateTbl, Op: yyDollar[4].tableName}}
}
yyVAL.union = yyLOCAL
- case 724:
+ case 739:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3982
+//line sql.y:4045
{
yyLOCAL = &Show{&ShowCreate{Command: CreateTr, Op: yyDollar[4].tableName}}
}
yyVAL.union = yyLOCAL
- case 725:
+ case 740:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3986
+//line sql.y:4049
{
yyLOCAL = &Show{&ShowCreate{Command: CreateV, Op: yyDollar[4].tableName}}
}
yyVAL.union = yyLOCAL
- case 726:
+ case 741:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3990
+//line sql.y:4053
{
yyLOCAL = &Show{&ShowBasic{Command: Engines}}
}
yyVAL.union = yyLOCAL
- case 727:
+ case 742:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3994
+//line sql.y:4057
{
yyLOCAL = &Show{&ShowBasic{Command: Plugins}}
}
yyVAL.union = yyLOCAL
- case 728:
+ case 743:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3998
+//line sql.y:4061
{
yyLOCAL = &Show{&ShowBasic{Command: GtidExecGlobal, DbName: yyDollar[4].identifierCS}}
}
yyVAL.union = yyLOCAL
- case 729:
+ case 744:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4002
+//line sql.y:4065
{
yyLOCAL = &Show{&ShowBasic{Command: VGtidExecGlobal, DbName: yyDollar[4].identifierCS}}
}
yyVAL.union = yyLOCAL
- case 730:
+ case 745:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4006
+//line sql.y:4069
{
yyLOCAL = &Show{&ShowBasic{Command: VitessVariables, Filter: yyDollar[4].showFilterUnion()}}
}
yyVAL.union = yyLOCAL
- case 731:
+ case 746:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4010
+//line sql.y:4073
{
yyLOCAL = &Show{&ShowBasic{Command: VitessMigrations, Filter: yyDollar[4].showFilterUnion(), DbName: yyDollar[3].identifierCS}}
}
yyVAL.union = yyLOCAL
- case 732:
+ case 747:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4014
+//line sql.y:4077
{
yyLOCAL = &ShowMigrationLogs{UUID: string(yyDollar[3].str)}
}
yyVAL.union = yyLOCAL
- case 733:
+ case 748:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4018
+//line sql.y:4081
{
yyLOCAL = &ShowThrottledApps{}
}
yyVAL.union = yyLOCAL
- case 734:
+ case 749:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4022
+//line sql.y:4085
{
yyLOCAL = &Show{&ShowBasic{Command: VitessReplicationStatus, Filter: yyDollar[3].showFilterUnion()}}
}
yyVAL.union = yyLOCAL
- case 735:
+ case 750:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ var yyLOCAL Statement
+//line sql.y:4089
+ {
+ yyLOCAL = &ShowThrottlerStatus{}
+ }
+ yyVAL.union = yyLOCAL
+ case 751:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4026
+//line sql.y:4093
{
yyLOCAL = &Show{&ShowBasic{Command: VschemaTables}}
}
yyVAL.union = yyLOCAL
- case 736:
+ case 752:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4030
+//line sql.y:4097
{
yyLOCAL = &Show{&ShowBasic{Command: VschemaVindexes}}
}
yyVAL.union = yyLOCAL
- case 737:
+ case 753:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4034
+//line sql.y:4101
{
yyLOCAL = &Show{&ShowBasic{Command: VschemaVindexes, Tbl: yyDollar[5].tableName}}
}
yyVAL.union = yyLOCAL
- case 738:
+ case 754:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4038
+//line sql.y:4105
{
yyLOCAL = &Show{&ShowBasic{Command: Warnings}}
}
yyVAL.union = yyLOCAL
- case 739:
+ case 755:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4042
+//line sql.y:4109
{
yyLOCAL = &Show{&ShowBasic{Command: VitessShards, Filter: yyDollar[3].showFilterUnion()}}
}
yyVAL.union = yyLOCAL
- case 740:
+ case 756:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4046
+//line sql.y:4113
{
yyLOCAL = &Show{&ShowBasic{Command: VitessTablets, Filter: yyDollar[3].showFilterUnion()}}
}
yyVAL.union = yyLOCAL
- case 741:
+ case 757:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4050
+//line sql.y:4117
{
yyLOCAL = &Show{&ShowBasic{Command: VitessTarget}}
}
yyVAL.union = yyLOCAL
- case 742:
+ case 758:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4057
+//line sql.y:4124
{
yyLOCAL = &Show{&ShowOther{Command: string(yyDollar[2].identifierCI.String())}}
}
yyVAL.union = yyLOCAL
- case 743:
+ case 759:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4061
+//line sql.y:4128
{
yyLOCAL = &Show{&ShowOther{Command: string(yyDollar[2].str) + " " + string(yyDollar[3].str)}}
}
yyVAL.union = yyLOCAL
- case 744:
+ case 760:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4065
+//line sql.y:4132
{
yyLOCAL = &Show{&ShowOther{Command: string(yyDollar[2].str) + " " + yyDollar[3].identifierCI.String()}}
}
yyVAL.union = yyLOCAL
- case 745:
+ case 761:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4069
+//line sql.y:4136
{
yyLOCAL = &Show{&ShowOther{Command: string(yyDollar[2].str) + " " + string(yyDollar[3].str)}}
}
yyVAL.union = yyLOCAL
- case 746:
+ case 762:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4073
+//line sql.y:4140
{
yyLOCAL = &Show{&ShowOther{Command: string(yyDollar[2].str)}}
}
yyVAL.union = yyLOCAL
- case 747:
+ case 763:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4077
+//line sql.y:4144
{
yyLOCAL = &Show{&ShowOther{Command: string(yyDollar[2].str) + " " + string(yyDollar[3].str) + " " + String(yyDollar[4].tableName)}}
}
yyVAL.union = yyLOCAL
- case 748:
+ case 764:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4081
+//line sql.y:4148
{
yyLOCAL = &Show{&ShowOther{Command: string(yyDollar[2].str) + " " + string(yyDollar[3].str) + " " + String(yyDollar[4].tableName)}}
}
yyVAL.union = yyLOCAL
- case 749:
+ case 765:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4085
+//line sql.y:4152
{
yyLOCAL = &Show{&ShowOther{Command: string(yyDollar[3].str)}}
}
yyVAL.union = yyLOCAL
- case 750:
+ case 766:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4089
+//line sql.y:4156
{
yyLOCAL = &Show{&ShowOther{Command: string(yyDollar[2].str)}}
}
yyVAL.union = yyLOCAL
- case 751:
+ case 767:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:4095
+//line sql.y:4162
{
yyVAL.str = ""
}
- case 752:
+ case 768:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:4099
+//line sql.y:4166
{
yyVAL.str = "extended "
}
- case 753:
+ case 769:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL bool
-//line sql.y:4105
+//line sql.y:4172
{
yyLOCAL = false
}
yyVAL.union = yyLOCAL
- case 754:
+ case 770:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL bool
-//line sql.y:4109
+//line sql.y:4176
{
yyLOCAL = true
}
yyVAL.union = yyLOCAL
- case 755:
+ case 771:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:4115
+//line sql.y:4182
{
yyVAL.str = string(yyDollar[1].str)
}
- case 756:
+ case 772:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:4119
+//line sql.y:4186
{
yyVAL.str = string(yyDollar[1].str)
}
- case 757:
+ case 773:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:4125
+//line sql.y:4192
{
yyVAL.identifierCS = NewIdentifierCS("")
}
- case 758:
+ case 774:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:4129
+//line sql.y:4196
{
yyVAL.identifierCS = yyDollar[2].identifierCS
}
- case 759:
+ case 775:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:4133
+//line sql.y:4200
{
yyVAL.identifierCS = yyDollar[2].identifierCS
}
- case 760:
+ case 776:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL *ShowFilter
-//line sql.y:4139
+//line sql.y:4206
{
yyLOCAL = nil
}
yyVAL.union = yyLOCAL
- case 761:
+ case 777:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *ShowFilter
-//line sql.y:4143
+//line sql.y:4210
{
yyLOCAL = &ShowFilter{Like: string(yyDollar[2].str)}
}
yyVAL.union = yyLOCAL
- case 762:
+ case 778:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *ShowFilter
-//line sql.y:4147
+//line sql.y:4214
{
yyLOCAL = &ShowFilter{Filter: yyDollar[2].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 763:
+ case 779:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL *ShowFilter
-//line sql.y:4153
+//line sql.y:4220
{
yyLOCAL = nil
}
yyVAL.union = yyLOCAL
- case 764:
+ case 780:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *ShowFilter
-//line sql.y:4157
+//line sql.y:4224
{
yyLOCAL = &ShowFilter{Like: string(yyDollar[2].str)}
}
yyVAL.union = yyLOCAL
- case 765:
+ case 781:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:4163
+//line sql.y:4230
{
yyVAL.empty = struct{}{}
}
- case 766:
+ case 782:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:4167
+//line sql.y:4234
{
yyVAL.empty = struct{}{}
}
- case 767:
+ case 783:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:4171
+//line sql.y:4238
{
yyVAL.empty = struct{}{}
}
- case 768:
+ case 784:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:4177
+//line sql.y:4244
{
yyVAL.str = string(yyDollar[1].str)
}
- case 769:
+ case 785:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:4181
+//line sql.y:4248
{
yyVAL.str = string(yyDollar[1].str)
}
- case 770:
+ case 786:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4187
+//line sql.y:4254
{
yyLOCAL = &Use{DBName: yyDollar[2].identifierCS}
}
yyVAL.union = yyLOCAL
- case 771:
+ case 787:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4191
+//line sql.y:4258
{
yyLOCAL = &Use{DBName: IdentifierCS{v: ""}}
}
yyVAL.union = yyLOCAL
- case 772:
+ case 788:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4195
+//line sql.y:4262
{
yyLOCAL = &Use{DBName: NewIdentifierCS(yyDollar[2].identifierCS.String() + "@" + string(yyDollar[3].str))}
}
yyVAL.union = yyLOCAL
- case 773:
+ case 789:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:4202
+//line sql.y:4269
{
yyVAL.identifierCS = NewIdentifierCS(string(yyDollar[1].str))
}
- case 774:
+ case 790:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:4206
+//line sql.y:4273
{
yyVAL.identifierCS = NewIdentifierCS("@" + string(yyDollar[1].str))
}
- case 775:
+ case 791:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:4210
+//line sql.y:4277
{
yyVAL.identifierCS = NewIdentifierCS("@@" + string(yyDollar[1].str))
}
- case 776:
+ case 792:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:4214
+//line sql.y:4281
{
yyVAL.identifierCS = NewIdentifierCS(string(yyDollar[1].str))
}
- case 777:
+ case 793:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4221
+//line sql.y:4288
{
yyLOCAL = &Begin{}
}
yyVAL.union = yyLOCAL
- case 778:
- yyDollar = yyS[yypt-2 : yypt+1]
+ case 794:
+ yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4225
+//line sql.y:4292
{
- yyLOCAL = &Begin{}
+ yyLOCAL = &Begin{TxAccessModes: yyDollar[3].txAccessModesUnion()}
}
yyVAL.union = yyLOCAL
- case 779:
+ case 795:
+ yyDollar = yyS[yypt-0 : yypt+1]
+ var yyLOCAL []TxAccessMode
+//line sql.y:4297
+ {
+ yyLOCAL = nil
+ }
+ yyVAL.union = yyLOCAL
+ case 796:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ var yyLOCAL []TxAccessMode
+//line sql.y:4301
+ {
+ yyLOCAL = yyDollar[1].txAccessModesUnion()
+ }
+ yyVAL.union = yyLOCAL
+ case 797:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ var yyLOCAL []TxAccessMode
+//line sql.y:4307
+ {
+ yyLOCAL = []TxAccessMode{yyDollar[1].txAccessModeUnion()}
+ }
+ yyVAL.union = yyLOCAL
+ case 798:
+ yyDollar = yyS[yypt-3 : yypt+1]
+//line sql.y:4311
+ {
+ yySLICE := (*[]TxAccessMode)(yyIaddr(yyVAL.union))
+ *yySLICE = append(*yySLICE, yyDollar[3].txAccessModeUnion())
+ }
+ case 799:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ var yyLOCAL TxAccessMode
+//line sql.y:4317
+ {
+ yyLOCAL = WithConsistentSnapshot
+ }
+ yyVAL.union = yyLOCAL
+ case 800:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ var yyLOCAL TxAccessMode
+//line sql.y:4321
+ {
+ yyLOCAL = ReadWrite
+ }
+ yyVAL.union = yyLOCAL
+ case 801:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ var yyLOCAL TxAccessMode
+//line sql.y:4325
+ {
+ yyLOCAL = ReadOnly
+ }
+ yyVAL.union = yyLOCAL
+ case 802:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4231
+//line sql.y:4332
{
yyLOCAL = &Commit{}
}
yyVAL.union = yyLOCAL
- case 780:
+ case 803:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4237
+//line sql.y:4338
{
yyLOCAL = &Rollback{}
}
yyVAL.union = yyLOCAL
- case 781:
+ case 804:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4241
+//line sql.y:4342
{
yyLOCAL = &SRollback{Name: yyDollar[5].identifierCI}
}
yyVAL.union = yyLOCAL
- case 782:
+ case 805:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:4246
+//line sql.y:4347
{
yyVAL.empty = struct{}{}
}
- case 783:
+ case 806:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:4248
+//line sql.y:4349
{
yyVAL.empty = struct{}{}
}
- case 784:
+ case 807:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:4251
+//line sql.y:4352
{
yyVAL.empty = struct{}{}
}
- case 785:
+ case 808:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:4253
+//line sql.y:4354
{
yyVAL.empty = struct{}{}
}
- case 786:
+ case 809:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4257
+//line sql.y:4358
{
yyLOCAL = &Savepoint{Name: yyDollar[2].identifierCI}
}
yyVAL.union = yyLOCAL
- case 787:
+ case 810:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4263
+//line sql.y:4364
{
yyLOCAL = &Release{Name: yyDollar[3].identifierCI}
}
yyVAL.union = yyLOCAL
- case 788:
+ case 811:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL ExplainType
-//line sql.y:4268
+//line sql.y:4369
{
yyLOCAL = EmptyType
}
yyVAL.union = yyLOCAL
- case 789:
+ case 812:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL ExplainType
-//line sql.y:4272
+//line sql.y:4373
{
yyLOCAL = JSONType
}
yyVAL.union = yyLOCAL
- case 790:
+ case 813:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL ExplainType
-//line sql.y:4276
+//line sql.y:4377
{
yyLOCAL = TreeType
}
yyVAL.union = yyLOCAL
- case 791:
+ case 814:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL ExplainType
-//line sql.y:4280
+//line sql.y:4381
{
yyLOCAL = VitessType
}
yyVAL.union = yyLOCAL
- case 792:
+ case 815:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL ExplainType
-//line sql.y:4284
+//line sql.y:4385
{
yyLOCAL = VTExplainType
}
yyVAL.union = yyLOCAL
- case 793:
+ case 816:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL ExplainType
-//line sql.y:4288
+//line sql.y:4389
{
yyLOCAL = TraditionalType
}
yyVAL.union = yyLOCAL
- case 794:
+ case 817:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL ExplainType
-//line sql.y:4292
+//line sql.y:4393
{
yyLOCAL = AnalyzeType
}
yyVAL.union = yyLOCAL
- case 795:
+ case 818:
+ yyDollar = yyS[yypt-0 : yypt+1]
+ var yyLOCAL VExplainType
+//line sql.y:4398
+ {
+ yyLOCAL = PlanVExplainType
+ }
+ yyVAL.union = yyLOCAL
+ case 819:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:4298
+ var yyLOCAL VExplainType
+//line sql.y:4402
+ {
+ yyLOCAL = PlanVExplainType
+ }
+ yyVAL.union = yyLOCAL
+ case 820:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ var yyLOCAL VExplainType
+//line sql.y:4406
+ {
+ yyLOCAL = AllVExplainType
+ }
+ yyVAL.union = yyLOCAL
+ case 821:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ var yyLOCAL VExplainType
+//line sql.y:4410
+ {
+ yyLOCAL = QueriesVExplainType
+ }
+ yyVAL.union = yyLOCAL
+ case 822:
+ yyDollar = yyS[yypt-1 : yypt+1]
+//line sql.y:4416
{
yyVAL.str = yyDollar[1].str
}
- case 796:
+ case 823:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:4302
+//line sql.y:4420
{
yyVAL.str = yyDollar[1].str
}
- case 797:
+ case 824:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:4306
+//line sql.y:4424
{
yyVAL.str = yyDollar[1].str
}
- case 798:
+ case 825:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4312
+//line sql.y:4430
{
yyLOCAL = yyDollar[1].selStmtUnion()
}
yyVAL.union = yyLOCAL
- case 799:
+ case 826:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4316
+//line sql.y:4434
{
yyLOCAL = yyDollar[1].statementUnion()
}
yyVAL.union = yyLOCAL
- case 800:
+ case 827:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4320
+//line sql.y:4438
{
yyLOCAL = yyDollar[1].statementUnion()
}
yyVAL.union = yyLOCAL
- case 801:
+ case 828:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4324
+//line sql.y:4442
{
yyLOCAL = yyDollar[1].statementUnion()
}
yyVAL.union = yyLOCAL
- case 802:
+ case 829:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:4329
+//line sql.y:4447
{
yyVAL.str = ""
}
- case 803:
+ case 830:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:4333
+//line sql.y:4451
{
yyVAL.str = yyDollar[1].identifierCI.val
}
- case 804:
+ case 831:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:4337
+//line sql.y:4455
{
yyVAL.str = encodeSQLString(yyDollar[1].str)
}
- case 805:
+ case 832:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4343
+//line sql.y:4461
{
yyLOCAL = &ExplainTab{Table: yyDollar[3].tableName, Wild: yyDollar[4].str}
}
yyVAL.union = yyLOCAL
- case 806:
+ case 833:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4347
+//line sql.y:4465
{
yyLOCAL = &ExplainStmt{Type: yyDollar[3].explainTypeUnion(), Statement: yyDollar[4].statementUnion(), Comments: Comments(yyDollar[2].strs).Parsed()}
}
yyVAL.union = yyLOCAL
- case 807:
+ case 834:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ var yyLOCAL Statement
+//line sql.y:4471
+ {
+ yyLOCAL = &VExplainStmt{Type: yyDollar[3].vexplainTypeUnion(), Statement: yyDollar[4].statementUnion(), Comments: Comments(yyDollar[2].strs).Parsed()}
+ }
+ yyVAL.union = yyLOCAL
+ case 835:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4353
+//line sql.y:4477
{
yyLOCAL = &OtherAdmin{}
}
yyVAL.union = yyLOCAL
- case 808:
+ case 836:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4357
+//line sql.y:4481
{
yyLOCAL = &OtherAdmin{}
}
yyVAL.union = yyLOCAL
- case 809:
+ case 837:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4363
+//line sql.y:4487
{
yyLOCAL = &LockTables{Tables: yyDollar[3].tableAndLockTypesUnion()}
}
yyVAL.union = yyLOCAL
- case 810:
+ case 838:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL TableAndLockTypes
-//line sql.y:4369
+//line sql.y:4493
{
yyLOCAL = TableAndLockTypes{yyDollar[1].tableAndLockTypeUnion()}
}
yyVAL.union = yyLOCAL
- case 811:
+ case 839:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:4373
+//line sql.y:4497
{
yySLICE := (*TableAndLockTypes)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, yyDollar[3].tableAndLockTypeUnion())
}
- case 812:
+ case 840:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *TableAndLockType
-//line sql.y:4379
+//line sql.y:4503
{
yyLOCAL = &TableAndLockType{Table: yyDollar[1].aliasedTableNameUnion(), Lock: yyDollar[2].lockTypeUnion()}
}
yyVAL.union = yyLOCAL
- case 813:
+ case 841:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL LockType
-//line sql.y:4385
+//line sql.y:4509
{
yyLOCAL = Read
}
yyVAL.union = yyLOCAL
- case 814:
+ case 842:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL LockType
-//line sql.y:4389
+//line sql.y:4513
{
yyLOCAL = ReadLocal
}
yyVAL.union = yyLOCAL
- case 815:
+ case 843:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL LockType
-//line sql.y:4393
+//line sql.y:4517
{
yyLOCAL = Write
}
yyVAL.union = yyLOCAL
- case 816:
+ case 844:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL LockType
-//line sql.y:4397
+//line sql.y:4521
{
yyLOCAL = LowPriorityWrite
}
yyVAL.union = yyLOCAL
- case 817:
+ case 845:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4403
+//line sql.y:4527
{
yyLOCAL = &UnlockTables{}
}
yyVAL.union = yyLOCAL
- case 818:
+ case 846:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4409
+//line sql.y:4533
{
yyLOCAL = &RevertMigration{Comments: Comments(yyDollar[2].strs).Parsed(), UUID: string(yyDollar[4].str)}
}
yyVAL.union = yyLOCAL
- case 819:
+ case 847:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4415
+//line sql.y:4539
{
yyLOCAL = &Flush{IsLocal: yyDollar[2].booleanUnion(), FlushOptions: yyDollar[3].strs}
}
yyVAL.union = yyLOCAL
- case 820:
+ case 848:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4419
+//line sql.y:4543
{
yyLOCAL = &Flush{IsLocal: yyDollar[2].booleanUnion()}
}
yyVAL.union = yyLOCAL
- case 821:
+ case 849:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4423
+//line sql.y:4547
{
yyLOCAL = &Flush{IsLocal: yyDollar[2].booleanUnion(), WithLock: true}
}
yyVAL.union = yyLOCAL
- case 822:
+ case 850:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4427
+//line sql.y:4551
{
yyLOCAL = &Flush{IsLocal: yyDollar[2].booleanUnion(), TableNames: yyDollar[4].tableNamesUnion()}
}
yyVAL.union = yyLOCAL
- case 823:
+ case 851:
yyDollar = yyS[yypt-7 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4431
+//line sql.y:4555
{
yyLOCAL = &Flush{IsLocal: yyDollar[2].booleanUnion(), TableNames: yyDollar[4].tableNamesUnion(), WithLock: true}
}
yyVAL.union = yyLOCAL
- case 824:
+ case 852:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4435
+//line sql.y:4559
{
yyLOCAL = &Flush{IsLocal: yyDollar[2].booleanUnion(), TableNames: yyDollar[4].tableNamesUnion(), ForExport: true}
}
yyVAL.union = yyLOCAL
- case 825:
+ case 853:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:4441
+//line sql.y:4565
{
yyVAL.strs = []string{yyDollar[1].str}
}
- case 826:
+ case 854:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:4445
+//line sql.y:4569
{
yyVAL.strs = append(yyDollar[1].strs, yyDollar[3].str)
}
- case 827:
+ case 855:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:4451
+//line sql.y:4575
{
yyVAL.str = string(yyDollar[1].str) + " " + string(yyDollar[2].str)
}
- case 828:
+ case 856:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:4455
+//line sql.y:4579
{
yyVAL.str = string(yyDollar[1].str) + " " + string(yyDollar[2].str)
}
- case 829:
+ case 857:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:4459
+//line sql.y:4583
{
yyVAL.str = string(yyDollar[1].str) + " " + string(yyDollar[2].str)
}
- case 830:
+ case 858:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:4463
+//line sql.y:4587
{
yyVAL.str = string(yyDollar[1].str) + " " + string(yyDollar[2].str)
}
- case 831:
+ case 859:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:4467
+//line sql.y:4591
{
yyVAL.str = string(yyDollar[1].str)
}
- case 832:
+ case 860:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:4471
+//line sql.y:4595
{
yyVAL.str = string(yyDollar[1].str)
}
- case 833:
+ case 861:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:4475
+//line sql.y:4599
{
yyVAL.str = string(yyDollar[1].str)
}
- case 834:
+ case 862:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:4479
+//line sql.y:4603
{
yyVAL.str = string(yyDollar[1].str) + " " + string(yyDollar[2].str) + yyDollar[3].str
}
- case 835:
+ case 863:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:4483
+//line sql.y:4607
{
yyVAL.str = string(yyDollar[1].str) + " " + string(yyDollar[2].str)
}
- case 836:
+ case 864:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:4487
+//line sql.y:4611
{
yyVAL.str = string(yyDollar[1].str)
}
- case 837:
+ case 865:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:4491
+//line sql.y:4615
{
yyVAL.str = string(yyDollar[1].str)
}
- case 838:
+ case 866:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:4495
+//line sql.y:4619
{
yyVAL.str = string(yyDollar[1].str)
}
- case 839:
+ case 867:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL bool
-//line sql.y:4500
+//line sql.y:4624
{
yyLOCAL = false
}
yyVAL.union = yyLOCAL
- case 840:
+ case 868:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL bool
-//line sql.y:4504
+//line sql.y:4628
{
yyLOCAL = true
}
yyVAL.union = yyLOCAL
- case 841:
+ case 869:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL bool
-//line sql.y:4508
+//line sql.y:4632
{
yyLOCAL = true
}
yyVAL.union = yyLOCAL
- case 842:
+ case 870:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:4513
+//line sql.y:4637
{
yyVAL.str = ""
}
- case 843:
+ case 871:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:4517
+//line sql.y:4641
{
yyVAL.str = " " + string(yyDollar[1].str) + " " + string(yyDollar[2].str) + " " + yyDollar[3].identifierCI.String()
}
- case 844:
+ case 872:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:4522
+//line sql.y:4646
{
setAllowComments(yylex, true)
}
- case 845:
+ case 873:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:4526
+//line sql.y:4650
{
yyVAL.strs = yyDollar[2].strs
setAllowComments(yylex, false)
}
- case 846:
+ case 874:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:4532
+//line sql.y:4656
{
yyVAL.strs = nil
}
- case 847:
+ case 875:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:4536
+//line sql.y:4660
{
yyVAL.strs = append(yyDollar[1].strs, yyDollar[2].str)
}
- case 848:
+ case 876:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL bool
-//line sql.y:4542
+//line sql.y:4666
{
yyLOCAL = true
}
yyVAL.union = yyLOCAL
- case 849:
+ case 877:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL bool
-//line sql.y:4546
+//line sql.y:4670
{
yyLOCAL = false
}
yyVAL.union = yyLOCAL
- case 850:
+ case 878:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL bool
-//line sql.y:4550
+//line sql.y:4674
{
yyLOCAL = true
}
yyVAL.union = yyLOCAL
- case 851:
+ case 879:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:4555
+//line sql.y:4679
{
yyVAL.str = ""
}
- case 852:
+ case 880:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:4559
+//line sql.y:4683
{
yyVAL.str = SQLNoCacheStr
}
- case 853:
+ case 881:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:4563
+//line sql.y:4687
{
yyVAL.str = SQLCacheStr
}
- case 854:
+ case 882:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL bool
-//line sql.y:4568
+//line sql.y:4692
{
yyLOCAL = false
}
yyVAL.union = yyLOCAL
- case 855:
+ case 883:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL bool
-//line sql.y:4572
+//line sql.y:4696
{
yyLOCAL = true
}
yyVAL.union = yyLOCAL
- case 856:
+ case 884:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL bool
-//line sql.y:4576
+//line sql.y:4700
{
yyLOCAL = true
}
yyVAL.union = yyLOCAL
- case 857:
+ case 885:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4582
+//line sql.y:4706
{
yyLOCAL = &PrepareStmt{Name: yyDollar[3].identifierCI, Comments: Comments(yyDollar[2].strs).Parsed(), Statement: yyDollar[5].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 858:
+ case 886:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4586
+//line sql.y:4710
{
yyLOCAL = &PrepareStmt{
Name: yyDollar[3].identifierCI,
@@ -15257,595 +15507,595 @@ yydefault:
}
}
yyVAL.union = yyLOCAL
- case 859:
+ case 887:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4596
+//line sql.y:4720
{
yyLOCAL = &ExecuteStmt{Name: yyDollar[3].identifierCI, Comments: Comments(yyDollar[2].strs).Parsed(), Arguments: yyDollar[4].variablesUnion()}
}
yyVAL.union = yyLOCAL
- case 860:
+ case 888:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL []*Variable
-//line sql.y:4601
+//line sql.y:4725
{
yyLOCAL = nil
}
yyVAL.union = yyLOCAL
- case 861:
+ case 889:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL []*Variable
-//line sql.y:4605
+//line sql.y:4729
{
yyLOCAL = yyDollar[2].variablesUnion()
}
yyVAL.union = yyLOCAL
- case 862:
+ case 890:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4611
+//line sql.y:4735
{
yyLOCAL = &DeallocateStmt{Type: DeallocateType, Comments: Comments(yyDollar[2].strs).Parsed(), Name: yyDollar[4].identifierCI}
}
yyVAL.union = yyLOCAL
- case 863:
+ case 891:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4615
+//line sql.y:4739
{
yyLOCAL = &DeallocateStmt{Type: DropType, Comments: Comments(yyDollar[2].strs).Parsed(), Name: yyDollar[4].identifierCI}
}
yyVAL.union = yyLOCAL
- case 864:
+ case 892:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL SelectExprs
-//line sql.y:4620
+//line sql.y:4744
{
yyLOCAL = nil
}
yyVAL.union = yyLOCAL
- case 865:
+ case 893:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL SelectExprs
-//line sql.y:4624
+//line sql.y:4748
{
yyLOCAL = yyDollar[1].selectExprsUnion()
}
yyVAL.union = yyLOCAL
- case 866:
+ case 894:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:4629
+//line sql.y:4753
{
yyVAL.strs = nil
}
- case 867:
+ case 895:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:4633
+//line sql.y:4757
{
yyVAL.strs = []string{yyDollar[1].str}
}
- case 868:
+ case 896:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:4637
+//line sql.y:4761
{ // TODO: This is a hack since I couldn't get it to work in a nicer way. I got 'conflicts: 8 shift/reduce'
yyVAL.strs = []string{yyDollar[1].str, yyDollar[2].str}
}
- case 869:
+ case 897:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:4641
+//line sql.y:4765
{
yyVAL.strs = []string{yyDollar[1].str, yyDollar[2].str, yyDollar[3].str}
}
- case 870:
+ case 898:
yyDollar = yyS[yypt-4 : yypt+1]
-//line sql.y:4645
+//line sql.y:4769
{
yyVAL.strs = []string{yyDollar[1].str, yyDollar[2].str, yyDollar[3].str, yyDollar[4].str}
}
- case 871:
+ case 899:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:4651
+//line sql.y:4775
{
yyVAL.str = SQLNoCacheStr
}
- case 872:
+ case 900:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:4655
+//line sql.y:4779
{
yyVAL.str = SQLCacheStr
}
- case 873:
+ case 901:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:4659
+//line sql.y:4783
{
yyVAL.str = DistinctStr
}
- case 874:
+ case 902:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:4663
+//line sql.y:4787
{
yyVAL.str = DistinctStr
}
- case 875:
+ case 903:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:4667
+//line sql.y:4791
{
yyVAL.str = StraightJoinHint
}
- case 876:
+ case 904:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:4671
+//line sql.y:4795
{
yyVAL.str = SQLCalcFoundRowsStr
}
- case 877:
+ case 905:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:4675
+//line sql.y:4799
{
yyVAL.str = AllStr // These are not picked up by NewSelect, and so ALL will be dropped. But this is OK, since it's redundant anyway
}
- case 878:
+ case 906:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL SelectExprs
-//line sql.y:4681
+//line sql.y:4805
{
yyLOCAL = SelectExprs{yyDollar[1].selectExprUnion()}
}
yyVAL.union = yyLOCAL
- case 879:
+ case 907:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:4685
+//line sql.y:4809
{
yySLICE := (*SelectExprs)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, yyDollar[3].selectExprUnion())
}
- case 880:
+ case 908:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL SelectExpr
-//line sql.y:4691
+//line sql.y:4815
{
yyLOCAL = &StarExpr{}
}
yyVAL.union = yyLOCAL
- case 881:
+ case 909:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL SelectExpr
-//line sql.y:4695
+//line sql.y:4819
{
yyLOCAL = &AliasedExpr{Expr: yyDollar[1].exprUnion(), As: yyDollar[2].identifierCI}
}
yyVAL.union = yyLOCAL
- case 882:
+ case 910:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL SelectExpr
-//line sql.y:4699
+//line sql.y:4823
{
yyLOCAL = &StarExpr{TableName: TableName{Name: yyDollar[1].identifierCS}}
}
yyVAL.union = yyLOCAL
- case 883:
+ case 911:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL SelectExpr
-//line sql.y:4703
+//line sql.y:4827
{
yyLOCAL = &StarExpr{TableName: TableName{Qualifier: yyDollar[1].identifierCS, Name: yyDollar[3].identifierCS}}
}
yyVAL.union = yyLOCAL
- case 884:
+ case 912:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:4708
+//line sql.y:4832
{
yyVAL.identifierCI = IdentifierCI{}
}
- case 885:
+ case 913:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:4712
+//line sql.y:4836
{
yyVAL.identifierCI = yyDollar[1].identifierCI
}
- case 886:
+ case 914:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:4716
+//line sql.y:4840
{
yyVAL.identifierCI = yyDollar[2].identifierCI
}
- case 888:
+ case 916:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:4723
+//line sql.y:4847
{
yyVAL.identifierCI = NewIdentifierCI(string(yyDollar[1].str))
}
- case 889:
+ case 917:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL TableExprs
-//line sql.y:4728
+//line sql.y:4852
{
yyLOCAL = TableExprs{&AliasedTableExpr{Expr: TableName{Name: NewIdentifierCS("dual")}}}
}
yyVAL.union = yyLOCAL
- case 890:
+ case 918:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL TableExprs
-//line sql.y:4732
+//line sql.y:4856
{
yyLOCAL = yyDollar[1].tableExprsUnion()
}
yyVAL.union = yyLOCAL
- case 891:
+ case 919:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL TableExprs
-//line sql.y:4738
+//line sql.y:4862
{
yyLOCAL = yyDollar[2].tableExprsUnion()
}
yyVAL.union = yyLOCAL
- case 892:
+ case 920:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL TableExprs
-//line sql.y:4744
+//line sql.y:4868
{
yyLOCAL = TableExprs{yyDollar[1].tableExprUnion()}
}
yyVAL.union = yyLOCAL
- case 893:
+ case 921:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:4748
+//line sql.y:4872
{
yySLICE := (*TableExprs)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, yyDollar[3].tableExprUnion())
}
- case 896:
+ case 924:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL TableExpr
-//line sql.y:4758
+//line sql.y:4882
{
yyLOCAL = yyDollar[1].aliasedTableNameUnion()
}
yyVAL.union = yyLOCAL
- case 897:
+ case 925:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL TableExpr
-//line sql.y:4762
+//line sql.y:4886
{
yyLOCAL = &AliasedTableExpr{Expr: yyDollar[1].derivedTableUnion(), As: yyDollar[3].identifierCS, Columns: yyDollar[4].columnsUnion()}
}
yyVAL.union = yyLOCAL
- case 898:
+ case 926:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL TableExpr
-//line sql.y:4766
+//line sql.y:4890
{
yyLOCAL = &ParenTableExpr{Exprs: yyDollar[2].tableExprsUnion()}
}
yyVAL.union = yyLOCAL
- case 899:
+ case 927:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL TableExpr
-//line sql.y:4770
+//line sql.y:4894
{
yyLOCAL = yyDollar[1].tableExprUnion()
}
yyVAL.union = yyLOCAL
- case 900:
+ case 928:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL *DerivedTable
-//line sql.y:4776
+//line sql.y:4900
{
yyLOCAL = &DerivedTable{Lateral: false, Select: yyDollar[1].selStmtUnion()}
}
yyVAL.union = yyLOCAL
- case 901:
+ case 929:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *DerivedTable
-//line sql.y:4780
+//line sql.y:4904
{
yyLOCAL = &DerivedTable{Lateral: true, Select: yyDollar[2].selStmtUnion()}
}
yyVAL.union = yyLOCAL
- case 902:
+ case 930:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *AliasedTableExpr
-//line sql.y:4786
+//line sql.y:4910
{
yyLOCAL = &AliasedTableExpr{Expr: yyDollar[1].tableName, As: yyDollar[2].identifierCS, Hints: yyDollar[3].indexHintsUnion()}
}
yyVAL.union = yyLOCAL
- case 903:
+ case 931:
yyDollar = yyS[yypt-7 : yypt+1]
var yyLOCAL *AliasedTableExpr
-//line sql.y:4790
+//line sql.y:4914
{
yyLOCAL = &AliasedTableExpr{Expr: yyDollar[1].tableName, Partitions: yyDollar[4].partitionsUnion(), As: yyDollar[6].identifierCS, Hints: yyDollar[7].indexHintsUnion()}
}
yyVAL.union = yyLOCAL
- case 904:
+ case 932:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL Columns
-//line sql.y:4795
+//line sql.y:4919
{
yyLOCAL = nil
}
yyVAL.union = yyLOCAL
- case 905:
+ case 933:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Columns
-//line sql.y:4799
+//line sql.y:4923
{
yyLOCAL = yyDollar[2].columnsUnion()
}
yyVAL.union = yyLOCAL
- case 906:
+ case 934:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL Columns
-//line sql.y:4804
+//line sql.y:4928
{
yyLOCAL = nil
}
yyVAL.union = yyLOCAL
- case 907:
+ case 935:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Columns
-//line sql.y:4808
+//line sql.y:4932
{
yyLOCAL = yyDollar[1].columnsUnion()
}
yyVAL.union = yyLOCAL
- case 908:
+ case 936:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Columns
-//line sql.y:4814
+//line sql.y:4938
{
yyLOCAL = Columns{yyDollar[1].identifierCI}
}
yyVAL.union = yyLOCAL
- case 909:
+ case 937:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:4818
+//line sql.y:4942
{
yySLICE := (*Columns)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, yyDollar[3].identifierCI)
}
- case 910:
+ case 938:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL []*Variable
-//line sql.y:4824
+//line sql.y:4948
{
yyLOCAL = []*Variable{yyDollar[1].variableUnion()}
}
yyVAL.union = yyLOCAL
- case 911:
+ case 939:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:4828
+//line sql.y:4952
{
yySLICE := (*[]*Variable)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, yyDollar[3].variableUnion())
}
- case 912:
+ case 940:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Columns
-//line sql.y:4834
+//line sql.y:4958
{
yyLOCAL = Columns{yyDollar[1].identifierCI}
}
yyVAL.union = yyLOCAL
- case 913:
+ case 941:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Columns
-//line sql.y:4838
+//line sql.y:4962
{
yyLOCAL = Columns{NewIdentifierCI(string(yyDollar[1].str))}
}
yyVAL.union = yyLOCAL
- case 914:
+ case 942:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:4842
+//line sql.y:4966
{
yySLICE := (*Columns)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, yyDollar[3].identifierCI)
}
- case 915:
+ case 943:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:4846
+//line sql.y:4970
{
yySLICE := (*Columns)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, NewIdentifierCI(string(yyDollar[3].str)))
}
- case 916:
+ case 944:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Partitions
-//line sql.y:4852
+//line sql.y:4976
{
yyLOCAL = Partitions{yyDollar[1].identifierCI}
}
yyVAL.union = yyLOCAL
- case 917:
+ case 945:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:4856
+//line sql.y:4980
{
yySLICE := (*Partitions)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, yyDollar[3].identifierCI)
}
- case 918:
+ case 946:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL TableExpr
-//line sql.y:4869
+//line sql.y:4993
{
yyLOCAL = &JoinTableExpr{LeftExpr: yyDollar[1].tableExprUnion(), Join: yyDollar[2].joinTypeUnion(), RightExpr: yyDollar[3].tableExprUnion(), Condition: yyDollar[4].joinCondition}
}
yyVAL.union = yyLOCAL
- case 919:
+ case 947:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL TableExpr
-//line sql.y:4873
+//line sql.y:4997
{
yyLOCAL = &JoinTableExpr{LeftExpr: yyDollar[1].tableExprUnion(), Join: yyDollar[2].joinTypeUnion(), RightExpr: yyDollar[3].tableExprUnion(), Condition: yyDollar[4].joinCondition}
}
yyVAL.union = yyLOCAL
- case 920:
+ case 948:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL TableExpr
-//line sql.y:4877
+//line sql.y:5001
{
yyLOCAL = &JoinTableExpr{LeftExpr: yyDollar[1].tableExprUnion(), Join: yyDollar[2].joinTypeUnion(), RightExpr: yyDollar[3].tableExprUnion(), Condition: yyDollar[4].joinCondition}
}
yyVAL.union = yyLOCAL
- case 921:
+ case 949:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL TableExpr
-//line sql.y:4881
+//line sql.y:5005
{
yyLOCAL = &JoinTableExpr{LeftExpr: yyDollar[1].tableExprUnion(), Join: yyDollar[2].joinTypeUnion(), RightExpr: yyDollar[3].tableExprUnion()}
}
yyVAL.union = yyLOCAL
- case 922:
+ case 950:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:4887
+//line sql.y:5011
{
yyVAL.joinCondition = &JoinCondition{On: yyDollar[2].exprUnion()}
}
- case 923:
+ case 951:
yyDollar = yyS[yypt-4 : yypt+1]
-//line sql.y:4889
+//line sql.y:5013
{
yyVAL.joinCondition = &JoinCondition{Using: yyDollar[3].columnsUnion()}
}
- case 924:
+ case 952:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:4893
+//line sql.y:5017
{
yyVAL.joinCondition = &JoinCondition{}
}
- case 925:
+ case 953:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:4895
+//line sql.y:5019
{
yyVAL.joinCondition = yyDollar[1].joinCondition
}
- case 926:
+ case 954:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:4899
+//line sql.y:5023
{
yyVAL.joinCondition = &JoinCondition{}
}
- case 927:
+ case 955:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:4901
+//line sql.y:5025
{
yyVAL.joinCondition = &JoinCondition{On: yyDollar[2].exprUnion()}
}
- case 928:
+ case 956:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:4904
+//line sql.y:5028
{
yyVAL.empty = struct{}{}
}
- case 929:
+ case 957:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:4906
+//line sql.y:5030
{
yyVAL.empty = struct{}{}
}
- case 930:
+ case 958:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:4909
+//line sql.y:5033
{
yyVAL.identifierCS = NewIdentifierCS("")
}
- case 931:
+ case 959:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:4913
+//line sql.y:5037
{
yyVAL.identifierCS = yyDollar[1].identifierCS
}
- case 932:
+ case 960:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:4917
+//line sql.y:5041
{
yyVAL.identifierCS = yyDollar[2].identifierCS
}
- case 934:
+ case 962:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:4924
+//line sql.y:5048
{
yyVAL.identifierCS = NewIdentifierCS(string(yyDollar[1].str))
}
- case 935:
+ case 963:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL JoinType
-//line sql.y:4930
+//line sql.y:5054
{
yyLOCAL = NormalJoinType
}
yyVAL.union = yyLOCAL
- case 936:
+ case 964:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL JoinType
-//line sql.y:4934
+//line sql.y:5058
{
yyLOCAL = NormalJoinType
}
yyVAL.union = yyLOCAL
- case 937:
+ case 965:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL JoinType
-//line sql.y:4938
+//line sql.y:5062
{
yyLOCAL = NormalJoinType
}
yyVAL.union = yyLOCAL
- case 938:
+ case 966:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL JoinType
-//line sql.y:4944
+//line sql.y:5068
{
yyLOCAL = StraightJoinType
}
yyVAL.union = yyLOCAL
- case 939:
+ case 967:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL JoinType
-//line sql.y:4950
+//line sql.y:5074
{
yyLOCAL = LeftJoinType
}
yyVAL.union = yyLOCAL
- case 940:
+ case 968:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL JoinType
-//line sql.y:4954
+//line sql.y:5078
{
yyLOCAL = LeftJoinType
}
yyVAL.union = yyLOCAL
- case 941:
+ case 969:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL JoinType
-//line sql.y:4958
+//line sql.y:5082
{
yyLOCAL = RightJoinType
}
yyVAL.union = yyLOCAL
- case 942:
+ case 970:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL JoinType
-//line sql.y:4962
+//line sql.y:5086
{
yyLOCAL = RightJoinType
}
yyVAL.union = yyLOCAL
- case 943:
+ case 971:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL JoinType
-//line sql.y:4968
+//line sql.y:5092
{
yyLOCAL = NaturalJoinType
}
yyVAL.union = yyLOCAL
- case 944:
+ case 972:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL JoinType
-//line sql.y:4972
+//line sql.y:5096
{
if yyDollar[2].joinTypeUnion() == LeftJoinType {
yyLOCAL = NaturalLeftJoinType
@@ -15854,593 +16104,593 @@ yydefault:
}
}
yyVAL.union = yyLOCAL
- case 945:
+ case 973:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:4982
+//line sql.y:5106
{
yyVAL.tableName = yyDollar[2].tableName
}
- case 946:
+ case 974:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:4986
+//line sql.y:5110
{
yyVAL.tableName = yyDollar[1].tableName
}
- case 947:
+ case 975:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:4992
+//line sql.y:5116
{
yyVAL.tableName = TableName{Name: yyDollar[1].identifierCS}
}
- case 948:
+ case 976:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:4996
+//line sql.y:5120
{
yyVAL.tableName = TableName{Qualifier: yyDollar[1].identifierCS, Name: yyDollar[3].identifierCS}
}
- case 949:
+ case 977:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:5002
+//line sql.y:5126
{
yyVAL.tableName = TableName{Name: yyDollar[1].identifierCS}
}
- case 950:
+ case 978:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL IndexHints
-//line sql.y:5007
+//line sql.y:5131
{
yyLOCAL = nil
}
yyVAL.union = yyLOCAL
- case 951:
+ case 979:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL IndexHints
-//line sql.y:5011
+//line sql.y:5135
{
yyLOCAL = yyDollar[1].indexHintsUnion()
}
yyVAL.union = yyLOCAL
- case 952:
+ case 980:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL IndexHints
-//line sql.y:5017
+//line sql.y:5141
{
yyLOCAL = IndexHints{yyDollar[1].indexHintUnion()}
}
yyVAL.union = yyLOCAL
- case 953:
+ case 981:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:5021
+//line sql.y:5145
{
yySLICE := (*IndexHints)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, yyDollar[2].indexHintUnion())
}
- case 954:
+ case 982:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL *IndexHint
-//line sql.y:5027
+//line sql.y:5151
{
yyLOCAL = &IndexHint{Type: UseOp, ForType: yyDollar[3].indexHintForTypeUnion(), Indexes: yyDollar[5].columnsUnion()}
}
yyVAL.union = yyLOCAL
- case 955:
+ case 983:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL *IndexHint
-//line sql.y:5031
+//line sql.y:5155
{
yyLOCAL = &IndexHint{Type: UseOp, ForType: yyDollar[3].indexHintForTypeUnion()}
}
yyVAL.union = yyLOCAL
- case 956:
+ case 984:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL *IndexHint
-//line sql.y:5035
+//line sql.y:5159
{
yyLOCAL = &IndexHint{Type: IgnoreOp, ForType: yyDollar[3].indexHintForTypeUnion(), Indexes: yyDollar[5].columnsUnion()}
}
yyVAL.union = yyLOCAL
- case 957:
+ case 985:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL *IndexHint
-//line sql.y:5039
+//line sql.y:5163
{
yyLOCAL = &IndexHint{Type: ForceOp, ForType: yyDollar[3].indexHintForTypeUnion(), Indexes: yyDollar[5].columnsUnion()}
}
yyVAL.union = yyLOCAL
- case 958:
+ case 986:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL IndexHintForType
-//line sql.y:5044
+//line sql.y:5168
{
yyLOCAL = NoForType
}
yyVAL.union = yyLOCAL
- case 959:
+ case 987:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL IndexHintForType
-//line sql.y:5048
+//line sql.y:5172
{
yyLOCAL = JoinForType
}
yyVAL.union = yyLOCAL
- case 960:
+ case 988:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL IndexHintForType
-//line sql.y:5052
+//line sql.y:5176
{
yyLOCAL = OrderByForType
}
yyVAL.union = yyLOCAL
- case 961:
+ case 989:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL IndexHintForType
-//line sql.y:5056
+//line sql.y:5180
{
yyLOCAL = GroupByForType
}
yyVAL.union = yyLOCAL
- case 962:
+ case 990:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5062
+//line sql.y:5186
{
yyLOCAL = nil
}
yyVAL.union = yyLOCAL
- case 963:
+ case 991:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5066
+//line sql.y:5190
{
yyLOCAL = yyDollar[2].exprUnion()
}
yyVAL.union = yyLOCAL
- case 964:
+ case 992:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5073
+//line sql.y:5197
{
yyLOCAL = &OrExpr{Left: yyDollar[1].exprUnion(), Right: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 965:
+ case 993:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5077
+//line sql.y:5201
{
yyLOCAL = &XorExpr{Left: yyDollar[1].exprUnion(), Right: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 966:
+ case 994:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5081
+//line sql.y:5205
{
yyLOCAL = &AndExpr{Left: yyDollar[1].exprUnion(), Right: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 967:
+ case 995:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5085
+//line sql.y:5209
{
yyLOCAL = &NotExpr{Expr: yyDollar[2].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 968:
+ case 996:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5089
+//line sql.y:5213
{
yyLOCAL = &IsExpr{Left: yyDollar[1].exprUnion(), Right: yyDollar[3].isExprOperatorUnion()}
}
yyVAL.union = yyLOCAL
- case 969:
+ case 997:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5093
+//line sql.y:5217
{
yyLOCAL = yyDollar[1].exprUnion()
}
yyVAL.union = yyLOCAL
- case 970:
+ case 998:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5097
+//line sql.y:5221
{
yyLOCAL = &MemberOfExpr{Value: yyDollar[1].exprUnion(), JSONArr: yyDollar[5].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 971:
+ case 999:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5103
+//line sql.y:5227
{
yyLOCAL = &IsExpr{Left: yyDollar[1].exprUnion(), Right: IsNullOp}
}
yyVAL.union = yyLOCAL
- case 972:
+ case 1000:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5107
+//line sql.y:5231
{
yyLOCAL = &IsExpr{Left: yyDollar[1].exprUnion(), Right: IsNotNullOp}
}
yyVAL.union = yyLOCAL
- case 973:
+ case 1001:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5111
+//line sql.y:5235
{
yyLOCAL = &ComparisonExpr{Left: yyDollar[1].exprUnion(), Operator: yyDollar[2].comparisonExprOperatorUnion(), Right: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 974:
+ case 1002:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5115
+//line sql.y:5239
{
yyLOCAL = yyDollar[1].exprUnion()
}
yyVAL.union = yyLOCAL
- case 975:
+ case 1003:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5121
+//line sql.y:5245
{
yyLOCAL = &ComparisonExpr{Left: yyDollar[1].exprUnion(), Operator: InOp, Right: yyDollar[3].colTupleUnion()}
}
yyVAL.union = yyLOCAL
- case 976:
+ case 1004:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5125
+//line sql.y:5249
{
yyLOCAL = &ComparisonExpr{Left: yyDollar[1].exprUnion(), Operator: NotInOp, Right: yyDollar[4].colTupleUnion()}
}
yyVAL.union = yyLOCAL
- case 977:
+ case 1005:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5129
+//line sql.y:5253
{
yyLOCAL = &BetweenExpr{Left: yyDollar[1].exprUnion(), IsBetween: true, From: yyDollar[3].exprUnion(), To: yyDollar[5].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 978:
+ case 1006:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5133
+//line sql.y:5257
{
yyLOCAL = &BetweenExpr{Left: yyDollar[1].exprUnion(), IsBetween: false, From: yyDollar[4].exprUnion(), To: yyDollar[6].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 979:
+ case 1007:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5137
+//line sql.y:5261
{
yyLOCAL = &ComparisonExpr{Left: yyDollar[1].exprUnion(), Operator: LikeOp, Right: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 980:
+ case 1008:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5141
+//line sql.y:5265
{
yyLOCAL = &ComparisonExpr{Left: yyDollar[1].exprUnion(), Operator: NotLikeOp, Right: yyDollar[4].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 981:
+ case 1009:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5145
+//line sql.y:5269
{
yyLOCAL = &ComparisonExpr{Left: yyDollar[1].exprUnion(), Operator: LikeOp, Right: yyDollar[3].exprUnion(), Escape: yyDollar[5].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 982:
+ case 1010:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5149
+//line sql.y:5273
{
yyLOCAL = &ComparisonExpr{Left: yyDollar[1].exprUnion(), Operator: NotLikeOp, Right: yyDollar[4].exprUnion(), Escape: yyDollar[6].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 983:
+ case 1011:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5153
+//line sql.y:5277
{
yyLOCAL = &ComparisonExpr{Left: yyDollar[1].exprUnion(), Operator: RegexpOp, Right: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 984:
+ case 1012:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5157
+//line sql.y:5281
{
yyLOCAL = &ComparisonExpr{Left: yyDollar[1].exprUnion(), Operator: NotRegexpOp, Right: yyDollar[4].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 985:
+ case 1013:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5161
+//line sql.y:5285
{
yyLOCAL = yyDollar[1].exprUnion()
}
yyVAL.union = yyLOCAL
- case 986:
+ case 1014:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:5167
+//line sql.y:5291
{
}
- case 987:
+ case 1015:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:5170
+//line sql.y:5294
{
}
- case 988:
+ case 1016:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5176
+//line sql.y:5300
{
yyLOCAL = &BinaryExpr{Left: yyDollar[1].exprUnion(), Operator: BitOrOp, Right: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 989:
+ case 1017:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5180
+//line sql.y:5304
{
yyLOCAL = &BinaryExpr{Left: yyDollar[1].exprUnion(), Operator: BitAndOp, Right: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 990:
+ case 1018:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5184
+//line sql.y:5308
{
yyLOCAL = &BinaryExpr{Left: yyDollar[1].exprUnion(), Operator: ShiftLeftOp, Right: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 991:
+ case 1019:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5188
+//line sql.y:5312
{
yyLOCAL = &BinaryExpr{Left: yyDollar[1].exprUnion(), Operator: ShiftRightOp, Right: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 992:
+ case 1020:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5192
+//line sql.y:5316
{
yyLOCAL = &BinaryExpr{Left: yyDollar[1].exprUnion(), Operator: PlusOp, Right: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 993:
+ case 1021:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5196
+//line sql.y:5320
{
yyLOCAL = &BinaryExpr{Left: yyDollar[1].exprUnion(), Operator: MinusOp, Right: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 994:
+ case 1022:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5200
+//line sql.y:5324
{
yyLOCAL = &BinaryExpr{Left: yyDollar[1].exprUnion(), Operator: MultOp, Right: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 995:
+ case 1023:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5204
+//line sql.y:5328
{
yyLOCAL = &BinaryExpr{Left: yyDollar[1].exprUnion(), Operator: DivOp, Right: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 996:
+ case 1024:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5208
+//line sql.y:5332
{
yyLOCAL = &BinaryExpr{Left: yyDollar[1].exprUnion(), Operator: ModOp, Right: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 997:
+ case 1025:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5212
+//line sql.y:5336
{
yyLOCAL = &BinaryExpr{Left: yyDollar[1].exprUnion(), Operator: IntDivOp, Right: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 998:
+ case 1026:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5216
+//line sql.y:5340
{
yyLOCAL = &BinaryExpr{Left: yyDollar[1].exprUnion(), Operator: ModOp, Right: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 999:
+ case 1027:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5220
+//line sql.y:5344
{
yyLOCAL = &BinaryExpr{Left: yyDollar[1].exprUnion(), Operator: BitXorOp, Right: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1000:
+ case 1028:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5224
+//line sql.y:5348
{
yyLOCAL = yyDollar[1].exprUnion()
}
yyVAL.union = yyLOCAL
- case 1001:
+ case 1029:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5230
+//line sql.y:5354
{
yyLOCAL = yyDollar[1].exprUnion()
}
yyVAL.union = yyLOCAL
- case 1002:
+ case 1030:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5234
+//line sql.y:5358
{
yyLOCAL = yyDollar[1].exprUnion()
}
yyVAL.union = yyLOCAL
- case 1003:
+ case 1031:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5238
+//line sql.y:5362
{
yyLOCAL = yyDollar[1].exprUnion()
}
yyVAL.union = yyLOCAL
- case 1004:
+ case 1032:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5242
+//line sql.y:5366
{
yyLOCAL = yyDollar[1].exprUnion()
}
yyVAL.union = yyLOCAL
- case 1005:
+ case 1033:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5246
+//line sql.y:5370
{
yyLOCAL = &CollateExpr{Expr: yyDollar[1].exprUnion(), Collation: yyDollar[3].str}
}
yyVAL.union = yyLOCAL
- case 1006:
+ case 1034:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5250
+//line sql.y:5374
{
yyLOCAL = yyDollar[1].exprUnion()
}
yyVAL.union = yyLOCAL
- case 1007:
+ case 1035:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5254
+//line sql.y:5378
{
yyLOCAL = yyDollar[1].exprUnion()
}
yyVAL.union = yyLOCAL
- case 1008:
+ case 1036:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5258
+//line sql.y:5382
{
yyLOCAL = yyDollar[1].variableUnion()
}
yyVAL.union = yyLOCAL
- case 1009:
+ case 1037:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5262
+//line sql.y:5386
{
yyLOCAL = yyDollar[2].exprUnion() // TODO: do we really want to ignore unary '+' before any kind of literals?
}
yyVAL.union = yyLOCAL
- case 1010:
+ case 1038:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5266
+//line sql.y:5390
{
yyLOCAL = &UnaryExpr{Operator: UMinusOp, Expr: yyDollar[2].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1011:
+ case 1039:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5270
+//line sql.y:5394
{
yyLOCAL = &UnaryExpr{Operator: TildaOp, Expr: yyDollar[2].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1012:
+ case 1040:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5274
+//line sql.y:5398
{
yyLOCAL = &UnaryExpr{Operator: BangOp, Expr: yyDollar[2].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1013:
+ case 1041:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5278
+//line sql.y:5402
{
yyLOCAL = yyDollar[1].subqueryUnion()
}
yyVAL.union = yyLOCAL
- case 1014:
+ case 1042:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5282
+//line sql.y:5406
{
yyLOCAL = yyDollar[1].exprUnion()
}
yyVAL.union = yyLOCAL
- case 1015:
+ case 1043:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5286
+//line sql.y:5410
{
yyLOCAL = &ExistsExpr{Subquery: yyDollar[2].subqueryUnion()}
}
yyVAL.union = yyLOCAL
- case 1016:
+ case 1044:
yyDollar = yyS[yypt-7 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5290
+//line sql.y:5414
{
yyLOCAL = &MatchExpr{Columns: yyDollar[2].colNamesUnion(), Expr: yyDollar[5].exprUnion(), Option: yyDollar[6].matchExprOptionUnion()}
}
yyVAL.union = yyLOCAL
- case 1017:
+ case 1045:
yyDollar = yyS[yypt-7 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5294
+//line sql.y:5418
{
yyLOCAL = &CastExpr{Expr: yyDollar[3].exprUnion(), Type: yyDollar[5].convertTypeUnion(), Array: yyDollar[6].booleanUnion()}
}
yyVAL.union = yyLOCAL
- case 1018:
+ case 1046:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5298
+//line sql.y:5422
{
yyLOCAL = &ConvertExpr{Expr: yyDollar[3].exprUnion(), Type: yyDollar[5].convertTypeUnion()}
}
yyVAL.union = yyLOCAL
- case 1019:
+ case 1047:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5302
+//line sql.y:5426
{
yyLOCAL = &ConvertUsingExpr{Expr: yyDollar[3].exprUnion(), Type: yyDollar[5].str}
}
yyVAL.union = yyLOCAL
- case 1020:
+ case 1048:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5306
+//line sql.y:5430
{
// From: https://dev.mysql.com/doc/refman/8.0/en/cast-functions.html#operator_binary
// To convert a string expression to a binary string, these constructs are equivalent:
@@ -16449,18 +16699,18 @@ yydefault:
yyLOCAL = &ConvertExpr{Expr: yyDollar[2].exprUnion(), Type: &ConvertType{Type: yyDollar[1].str}}
}
yyVAL.union = yyLOCAL
- case 1021:
+ case 1049:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5314
+//line sql.y:5438
{
yyLOCAL = &Default{ColName: yyDollar[2].str}
}
yyVAL.union = yyLOCAL
- case 1022:
+ case 1050:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5318
+//line sql.y:5442
{
// INTERVAL can trigger a shift / reduce conflict. We want
// to shift here for the interval rule. In case we do have
@@ -16469,2192 +16719,2192 @@ yydefault:
yyLOCAL = yyDollar[1].exprUnion()
}
yyVAL.union = yyLOCAL
- case 1023:
+ case 1051:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5326
+//line sql.y:5450
{
yyLOCAL = &IntervalFuncExpr{Expr: yyDollar[3].exprUnion(), Exprs: yyDollar[5].exprsUnion()}
}
yyVAL.union = yyLOCAL
- case 1024:
+ case 1052:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5330
+//line sql.y:5454
{
yyLOCAL = &BinaryExpr{Left: yyDollar[1].exprUnion(), Operator: JSONExtractOp, Right: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1025:
+ case 1053:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5334
+//line sql.y:5458
{
yyLOCAL = &BinaryExpr{Left: yyDollar[1].exprUnion(), Operator: JSONUnquoteExtractOp, Right: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1026:
+ case 1054:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5340
+//line sql.y:5464
{
yyLOCAL = &IntervalExpr{Expr: yyDollar[2].exprUnion(), Unit: yyDollar[3].identifierCI.String()}
}
yyVAL.union = yyLOCAL
- case 1027:
+ case 1055:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL []*ColName
-//line sql.y:5346
+//line sql.y:5470
{
yyLOCAL = yyDollar[1].colNamesUnion()
}
yyVAL.union = yyLOCAL
- case 1028:
+ case 1056:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL []*ColName
-//line sql.y:5350
+//line sql.y:5474
{
yyLOCAL = yyDollar[2].colNamesUnion()
}
yyVAL.union = yyLOCAL
- case 1029:
+ case 1057:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL []*ColName
-//line sql.y:5356
+//line sql.y:5480
{
yyLOCAL = []*ColName{yyDollar[1].colNameUnion()}
}
yyVAL.union = yyLOCAL
- case 1030:
+ case 1058:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:5360
+//line sql.y:5484
{
yySLICE := (*[]*ColName)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, yyDollar[3].colNameUnion())
}
- case 1031:
+ case 1059:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL TrimType
-//line sql.y:5366
+//line sql.y:5490
{
yyLOCAL = BothTrimType
}
yyVAL.union = yyLOCAL
- case 1032:
+ case 1060:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL TrimType
-//line sql.y:5370
+//line sql.y:5494
{
yyLOCAL = LeadingTrimType
}
yyVAL.union = yyLOCAL
- case 1033:
+ case 1061:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL TrimType
-//line sql.y:5374
+//line sql.y:5498
{
yyLOCAL = TrailingTrimType
}
yyVAL.union = yyLOCAL
- case 1034:
+ case 1062:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL FrameUnitType
-//line sql.y:5380
+//line sql.y:5504
{
yyLOCAL = FrameRowsType
}
yyVAL.union = yyLOCAL
- case 1035:
+ case 1063:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL FrameUnitType
-//line sql.y:5384
+//line sql.y:5508
{
yyLOCAL = FrameRangeType
}
yyVAL.union = yyLOCAL
- case 1036:
+ case 1064:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL ArgumentLessWindowExprType
-//line sql.y:5391
+//line sql.y:5515
{
yyLOCAL = CumeDistExprType
}
yyVAL.union = yyLOCAL
- case 1037:
+ case 1065:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL ArgumentLessWindowExprType
-//line sql.y:5395
+//line sql.y:5519
{
yyLOCAL = DenseRankExprType
}
yyVAL.union = yyLOCAL
- case 1038:
+ case 1066:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL ArgumentLessWindowExprType
-//line sql.y:5399
+//line sql.y:5523
{
yyLOCAL = PercentRankExprType
}
yyVAL.union = yyLOCAL
- case 1039:
+ case 1067:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL ArgumentLessWindowExprType
-//line sql.y:5403
+//line sql.y:5527
{
yyLOCAL = RankExprType
}
yyVAL.union = yyLOCAL
- case 1040:
+ case 1068:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL ArgumentLessWindowExprType
-//line sql.y:5407
+//line sql.y:5531
{
yyLOCAL = RowNumberExprType
}
yyVAL.union = yyLOCAL
- case 1041:
+ case 1069:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *FramePoint
-//line sql.y:5413
+//line sql.y:5537
{
yyLOCAL = &FramePoint{Type: CurrentRowType}
}
yyVAL.union = yyLOCAL
- case 1042:
+ case 1070:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *FramePoint
-//line sql.y:5417
+//line sql.y:5541
{
yyLOCAL = &FramePoint{Type: UnboundedPrecedingType}
}
yyVAL.union = yyLOCAL
- case 1043:
+ case 1071:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *FramePoint
-//line sql.y:5421
+//line sql.y:5545
{
yyLOCAL = &FramePoint{Type: UnboundedFollowingType}
}
yyVAL.union = yyLOCAL
- case 1044:
+ case 1072:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *FramePoint
-//line sql.y:5425
+//line sql.y:5549
{
yyLOCAL = &FramePoint{Type: ExprPrecedingType, Expr: yyDollar[1].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1045:
+ case 1073:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *FramePoint
-//line sql.y:5429
+//line sql.y:5553
{
yyLOCAL = &FramePoint{Type: ExprFollowingType, Expr: yyDollar[1].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1046:
+ case 1074:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5435
+//line sql.y:5559
{
yyLOCAL = yyDollar[1].exprUnion()
}
yyVAL.union = yyLOCAL
- case 1047:
+ case 1075:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5439
+//line sql.y:5563
{
yyLOCAL = yyDollar[1].exprUnion()
}
yyVAL.union = yyLOCAL
- case 1048:
+ case 1076:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL *FrameClause
-//line sql.y:5444
+//line sql.y:5568
{
yyLOCAL = nil
}
yyVAL.union = yyLOCAL
- case 1049:
+ case 1077:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL *FrameClause
-//line sql.y:5448
+//line sql.y:5572
{
yyLOCAL = yyDollar[1].frameClauseUnion()
}
yyVAL.union = yyLOCAL
- case 1050:
+ case 1078:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *FrameClause
-//line sql.y:5454
+//line sql.y:5578
{
yyLOCAL = &FrameClause{Unit: yyDollar[1].frameUnitTypeUnion(), Start: yyDollar[2].framePointUnion()}
}
yyVAL.union = yyLOCAL
- case 1051:
+ case 1079:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL *FrameClause
-//line sql.y:5458
+//line sql.y:5582
{
yyLOCAL = &FrameClause{Unit: yyDollar[1].frameUnitTypeUnion(), Start: yyDollar[3].framePointUnion(), End: yyDollar[5].framePointUnion()}
}
yyVAL.union = yyLOCAL
- case 1052:
+ case 1080:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL Exprs
-//line sql.y:5463
+//line sql.y:5587
{
yyLOCAL = nil
}
yyVAL.union = yyLOCAL
- case 1053:
+ case 1081:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Exprs
-//line sql.y:5467
+//line sql.y:5591
{
yyLOCAL = yyDollar[3].exprsUnion()
}
yyVAL.union = yyLOCAL
- case 1054:
+ case 1082:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:5472
+//line sql.y:5596
{
}
- case 1055:
+ case 1083:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:5475
+//line sql.y:5599
{
yyVAL.identifierCI = yyDollar[1].identifierCI
}
- case 1056:
+ case 1084:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL *WindowSpecification
-//line sql.y:5481
+//line sql.y:5605
{
yyLOCAL = &WindowSpecification{Name: yyDollar[1].identifierCI, PartitionClause: yyDollar[2].exprsUnion(), OrderClause: yyDollar[3].orderByUnion(), FrameClause: yyDollar[4].frameClauseUnion()}
}
yyVAL.union = yyLOCAL
- case 1057:
+ case 1085:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL *OverClause
-//line sql.y:5487
+//line sql.y:5611
{
yyLOCAL = &OverClause{WindowSpec: yyDollar[3].windowSpecificationUnion()}
}
yyVAL.union = yyLOCAL
- case 1058:
+ case 1086:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *OverClause
-//line sql.y:5491
+//line sql.y:5615
{
yyLOCAL = &OverClause{WindowName: yyDollar[2].identifierCI}
}
yyVAL.union = yyLOCAL
- case 1059:
+ case 1087:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL *NullTreatmentClause
-//line sql.y:5496
+//line sql.y:5620
{
yyLOCAL = nil
}
yyVAL.union = yyLOCAL
- case 1061:
+ case 1089:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL *NullTreatmentClause
-//line sql.y:5503
+//line sql.y:5627
{
yyLOCAL = &NullTreatmentClause{yyDollar[1].nullTreatmentTypeUnion()}
}
yyVAL.union = yyLOCAL
- case 1062:
+ case 1090:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL NullTreatmentType
-//line sql.y:5509
+//line sql.y:5633
{
yyLOCAL = RespectNullsType
}
yyVAL.union = yyLOCAL
- case 1063:
+ case 1091:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL NullTreatmentType
-//line sql.y:5513
+//line sql.y:5637
{
yyLOCAL = IgnoreNullsType
}
yyVAL.union = yyLOCAL
- case 1064:
+ case 1092:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL FirstOrLastValueExprType
-//line sql.y:5519
+//line sql.y:5643
{
yyLOCAL = FirstValueExprType
}
yyVAL.union = yyLOCAL
- case 1065:
+ case 1093:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL FirstOrLastValueExprType
-//line sql.y:5523
+//line sql.y:5647
{
yyLOCAL = LastValueExprType
}
yyVAL.union = yyLOCAL
- case 1066:
+ case 1094:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL FromFirstLastType
-//line sql.y:5529
+//line sql.y:5653
{
yyLOCAL = FromFirstType
}
yyVAL.union = yyLOCAL
- case 1067:
+ case 1095:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL FromFirstLastType
-//line sql.y:5533
+//line sql.y:5657
{
yyLOCAL = FromLastType
}
yyVAL.union = yyLOCAL
- case 1068:
+ case 1096:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL *FromFirstLastClause
-//line sql.y:5538
+//line sql.y:5662
{
yyLOCAL = nil
}
yyVAL.union = yyLOCAL
- case 1070:
+ case 1098:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL *FromFirstLastClause
-//line sql.y:5545
+//line sql.y:5669
{
yyLOCAL = &FromFirstLastClause{yyDollar[1].fromFirstLastTypeUnion()}
}
yyVAL.union = yyLOCAL
- case 1071:
+ case 1099:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL LagLeadExprType
-//line sql.y:5551
+//line sql.y:5675
{
yyLOCAL = LagExprType
}
yyVAL.union = yyLOCAL
- case 1072:
+ case 1100:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL LagLeadExprType
-//line sql.y:5555
+//line sql.y:5679
{
yyLOCAL = LeadExprType
}
yyVAL.union = yyLOCAL
- case 1073:
+ case 1101:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL *WindowDefinition
-//line sql.y:5561
+//line sql.y:5685
{
yyLOCAL = &WindowDefinition{Name: yyDollar[1].identifierCI, WindowSpec: yyDollar[4].windowSpecificationUnion()}
}
yyVAL.union = yyLOCAL
- case 1074:
+ case 1102:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL WindowDefinitions
-//line sql.y:5567
+//line sql.y:5691
{
yyLOCAL = WindowDefinitions{yyDollar[1].windowDefinitionUnion()}
}
yyVAL.union = yyLOCAL
- case 1075:
+ case 1103:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:5571
+//line sql.y:5695
{
yySLICE := (*WindowDefinitions)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, yyDollar[3].windowDefinitionUnion())
}
- case 1076:
+ case 1104:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:5577
+//line sql.y:5701
{
yyVAL.str = ""
}
- case 1077:
+ case 1105:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:5581
+//line sql.y:5705
{
yyVAL.str = string(yyDollar[2].identifierCI.String())
}
- case 1078:
+ case 1106:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL BoolVal
-//line sql.y:5587
+//line sql.y:5711
{
yyLOCAL = BoolVal(true)
}
yyVAL.union = yyLOCAL
- case 1079:
+ case 1107:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL BoolVal
-//line sql.y:5591
+//line sql.y:5715
{
yyLOCAL = BoolVal(false)
}
yyVAL.union = yyLOCAL
- case 1080:
+ case 1108:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL IsExprOperator
-//line sql.y:5598
+//line sql.y:5722
{
yyLOCAL = IsTrueOp
}
yyVAL.union = yyLOCAL
- case 1081:
+ case 1109:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL IsExprOperator
-//line sql.y:5602
+//line sql.y:5726
{
yyLOCAL = IsNotTrueOp
}
yyVAL.union = yyLOCAL
- case 1082:
+ case 1110:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL IsExprOperator
-//line sql.y:5606
+//line sql.y:5730
{
yyLOCAL = IsFalseOp
}
yyVAL.union = yyLOCAL
- case 1083:
+ case 1111:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL IsExprOperator
-//line sql.y:5610
+//line sql.y:5734
{
yyLOCAL = IsNotFalseOp
}
yyVAL.union = yyLOCAL
- case 1084:
+ case 1112:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL ComparisonExprOperator
-//line sql.y:5616
+//line sql.y:5740
{
yyLOCAL = EqualOp
}
yyVAL.union = yyLOCAL
- case 1085:
+ case 1113:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL ComparisonExprOperator
-//line sql.y:5620
+//line sql.y:5744
{
yyLOCAL = LessThanOp
}
yyVAL.union = yyLOCAL
- case 1086:
+ case 1114:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL ComparisonExprOperator
-//line sql.y:5624
+//line sql.y:5748
{
yyLOCAL = GreaterThanOp
}
yyVAL.union = yyLOCAL
- case 1087:
+ case 1115:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL ComparisonExprOperator
-//line sql.y:5628
+//line sql.y:5752
{
yyLOCAL = LessEqualOp
}
yyVAL.union = yyLOCAL
- case 1088:
+ case 1116:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL ComparisonExprOperator
-//line sql.y:5632
+//line sql.y:5756
{
yyLOCAL = GreaterEqualOp
}
yyVAL.union = yyLOCAL
- case 1089:
+ case 1117:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL ComparisonExprOperator
-//line sql.y:5636
+//line sql.y:5760
{
yyLOCAL = NotEqualOp
}
yyVAL.union = yyLOCAL
- case 1090:
+ case 1118:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL ComparisonExprOperator
-//line sql.y:5640
+//line sql.y:5764
{
yyLOCAL = NullSafeEqualOp
}
yyVAL.union = yyLOCAL
- case 1091:
+ case 1119:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL ColTuple
-//line sql.y:5646
+//line sql.y:5770
{
yyLOCAL = yyDollar[1].valTupleUnion()
}
yyVAL.union = yyLOCAL
- case 1092:
+ case 1120:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL ColTuple
-//line sql.y:5650
+//line sql.y:5774
{
yyLOCAL = yyDollar[1].subqueryUnion()
}
yyVAL.union = yyLOCAL
- case 1093:
+ case 1121:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL ColTuple
-//line sql.y:5654
+//line sql.y:5778
{
yyLOCAL = ListArg(yyDollar[1].str[2:])
bindVariable(yylex, yyDollar[1].str[2:])
}
yyVAL.union = yyLOCAL
- case 1094:
+ case 1122:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL *Subquery
-//line sql.y:5661
+//line sql.y:5785
{
yyLOCAL = &Subquery{yyDollar[1].selStmtUnion()}
}
yyVAL.union = yyLOCAL
- case 1095:
+ case 1123:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Exprs
-//line sql.y:5667
+//line sql.y:5791
{
yyLOCAL = Exprs{yyDollar[1].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1096:
+ case 1124:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:5671
+//line sql.y:5795
{
yySLICE := (*Exprs)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, yyDollar[3].exprUnion())
}
- case 1097:
+ case 1125:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5681
+//line sql.y:5805
{
yyLOCAL = &FuncExpr{Name: yyDollar[1].identifierCI, Exprs: yyDollar[3].selectExprsUnion()}
}
yyVAL.union = yyLOCAL
- case 1098:
+ case 1126:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5685
+//line sql.y:5809
{
yyLOCAL = &FuncExpr{Qualifier: yyDollar[1].identifierCS, Name: yyDollar[3].identifierCI, Exprs: yyDollar[5].selectExprsUnion()}
}
yyVAL.union = yyLOCAL
- case 1099:
+ case 1127:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5695
+//line sql.y:5819
{
yyLOCAL = &FuncExpr{Name: NewIdentifierCI("left"), Exprs: yyDollar[3].selectExprsUnion()}
}
yyVAL.union = yyLOCAL
- case 1100:
+ case 1128:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5699
+//line sql.y:5823
{
yyLOCAL = &FuncExpr{Name: NewIdentifierCI("right"), Exprs: yyDollar[3].selectExprsUnion()}
}
yyVAL.union = yyLOCAL
- case 1101:
+ case 1129:
yyDollar = yyS[yypt-8 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5703
+//line sql.y:5827
{
yyLOCAL = &SubstrExpr{Name: yyDollar[3].exprUnion(), From: yyDollar[5].exprUnion(), To: yyDollar[7].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1102:
+ case 1130:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5707
+//line sql.y:5831
{
yyLOCAL = &SubstrExpr{Name: yyDollar[3].exprUnion(), From: yyDollar[5].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1103:
+ case 1131:
yyDollar = yyS[yypt-8 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5711
+//line sql.y:5835
{
yyLOCAL = &SubstrExpr{Name: yyDollar[3].exprUnion(), From: yyDollar[5].exprUnion(), To: yyDollar[7].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1104:
+ case 1132:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5715
+//line sql.y:5839
{
yyLOCAL = &SubstrExpr{Name: yyDollar[3].exprUnion(), From: yyDollar[5].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1105:
+ case 1133:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5719
+//line sql.y:5843
{
yyLOCAL = &CaseExpr{Expr: yyDollar[2].exprUnion(), Whens: yyDollar[3].whensUnion(), Else: yyDollar[4].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1106:
+ case 1134:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5723
+//line sql.y:5847
{
yyLOCAL = &ValuesFuncExpr{Name: yyDollar[3].colNameUnion()}
}
yyVAL.union = yyLOCAL
- case 1107:
+ case 1135:
yyDollar = yyS[yypt-10 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5727
+//line sql.y:5851
{
yyLOCAL = &InsertExpr{Str: yyDollar[3].exprUnion(), Pos: yyDollar[5].exprUnion(), Len: yyDollar[7].exprUnion(), NewStr: yyDollar[9].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1108:
+ case 1136:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5731
+//line sql.y:5855
{
yyLOCAL = &FuncExpr{Name: NewIdentifierCI(yyDollar[1].str)}
}
yyVAL.union = yyLOCAL
- case 1109:
+ case 1137:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5742
+//line sql.y:5866
{
yyLOCAL = &FuncExpr{Name: NewIdentifierCI("utc_date")}
}
yyVAL.union = yyLOCAL
- case 1110:
+ case 1138:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5746
+//line sql.y:5870
{
yyLOCAL = yyDollar[1].exprUnion()
}
yyVAL.union = yyLOCAL
- case 1111:
+ case 1139:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5752
+//line sql.y:5876
{
yyLOCAL = &FuncExpr{Name: NewIdentifierCI("current_date")}
}
yyVAL.union = yyLOCAL
- case 1112:
+ case 1140:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5756
+//line sql.y:5880
{
yyLOCAL = &CurTimeFuncExpr{Name: NewIdentifierCI("utc_time"), Fsp: yyDollar[2].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1113:
+ case 1141:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5761
+//line sql.y:5885
{
yyLOCAL = &CurTimeFuncExpr{Name: NewIdentifierCI("current_time"), Fsp: yyDollar[2].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1114:
+ case 1142:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5765
+//line sql.y:5889
{
yyLOCAL = &CountStar{}
}
yyVAL.union = yyLOCAL
- case 1115:
+ case 1143:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5769
+//line sql.y:5893
{
yyLOCAL = &Count{Distinct: yyDollar[3].booleanUnion(), Args: yyDollar[4].exprsUnion()}
}
yyVAL.union = yyLOCAL
- case 1116:
+ case 1144:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5773
+//line sql.y:5897
{
yyLOCAL = &Max{Distinct: yyDollar[3].booleanUnion(), Arg: yyDollar[4].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1117:
+ case 1145:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5777
+//line sql.y:5901
{
yyLOCAL = &Min{Distinct: yyDollar[3].booleanUnion(), Arg: yyDollar[4].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1118:
+ case 1146:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5781
+//line sql.y:5905
{
yyLOCAL = &Sum{Distinct: yyDollar[3].booleanUnion(), Arg: yyDollar[4].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1119:
+ case 1147:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5785
+//line sql.y:5909
{
yyLOCAL = &Avg{Distinct: yyDollar[3].booleanUnion(), Arg: yyDollar[4].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1120:
+ case 1148:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5789
+//line sql.y:5913
{
yyLOCAL = &BitAnd{Arg: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1121:
+ case 1149:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5793
+//line sql.y:5917
{
yyLOCAL = &BitOr{Arg: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1122:
+ case 1150:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5797
+//line sql.y:5921
{
yyLOCAL = &BitXor{Arg: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1123:
+ case 1151:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5801
+//line sql.y:5925
{
yyLOCAL = &Std{Arg: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1124:
+ case 1152:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5805
+//line sql.y:5929
{
yyLOCAL = &StdDev{Arg: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1125:
+ case 1153:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5809
+//line sql.y:5933
{
yyLOCAL = &StdPop{Arg: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1126:
+ case 1154:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5813
+//line sql.y:5937
{
yyLOCAL = &StdSamp{Arg: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1127:
+ case 1155:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5817
+//line sql.y:5941
{
yyLOCAL = &VarPop{Arg: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1128:
+ case 1156:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5821
+//line sql.y:5945
{
yyLOCAL = &VarSamp{Arg: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1129:
+ case 1157:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5825
+//line sql.y:5949
{
yyLOCAL = &Variance{Arg: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1130:
+ case 1158:
yyDollar = yyS[yypt-8 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5829
+//line sql.y:5953
{
yyLOCAL = &GroupConcatExpr{Distinct: yyDollar[3].booleanUnion(), Exprs: yyDollar[4].exprsUnion(), OrderBy: yyDollar[5].orderByUnion(), Separator: yyDollar[6].str, Limit: yyDollar[7].limitUnion()}
}
yyVAL.union = yyLOCAL
- case 1131:
+ case 1159:
yyDollar = yyS[yypt-8 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5833
+//line sql.y:5957
{
yyLOCAL = &TimestampFuncExpr{Name: string("timestampadd"), Unit: yyDollar[3].identifierCI.String(), Expr1: yyDollar[5].exprUnion(), Expr2: yyDollar[7].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1132:
+ case 1160:
yyDollar = yyS[yypt-8 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5837
+//line sql.y:5961
{
yyLOCAL = &TimestampFuncExpr{Name: string("timestampdiff"), Unit: yyDollar[3].identifierCI.String(), Expr1: yyDollar[5].exprUnion(), Expr2: yyDollar[7].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1133:
+ case 1161:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5841
+//line sql.y:5965
{
yyLOCAL = &ExtractFuncExpr{IntervalTypes: yyDollar[3].intervalTypeUnion(), Expr: yyDollar[5].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1134:
+ case 1162:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5845
+//line sql.y:5969
{
yyLOCAL = &WeightStringFuncExpr{Expr: yyDollar[3].exprUnion(), As: yyDollar[4].convertTypeUnion()}
}
yyVAL.union = yyLOCAL
- case 1135:
+ case 1163:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5849
+//line sql.y:5973
{
yyLOCAL = &JSONPrettyExpr{JSONVal: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1136:
+ case 1164:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5853
+//line sql.y:5977
{
yyLOCAL = &JSONStorageFreeExpr{JSONVal: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1137:
+ case 1165:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5857
+//line sql.y:5981
{
yyLOCAL = &JSONStorageSizeExpr{JSONVal: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1138:
+ case 1166:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5861
+//line sql.y:5985
{
yyLOCAL = &TrimFuncExpr{TrimFuncType: LTrimType, StringArg: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1139:
+ case 1167:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5865
+//line sql.y:5989
{
yyLOCAL = &TrimFuncExpr{TrimFuncType: RTrimType, StringArg: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1140:
+ case 1168:
yyDollar = yyS[yypt-7 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5869
+//line sql.y:5993
{
yyLOCAL = &TrimFuncExpr{Type: yyDollar[3].trimTypeUnion(), TrimArg: yyDollar[4].exprUnion(), StringArg: yyDollar[6].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1141:
+ case 1169:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5873
+//line sql.y:5997
{
yyLOCAL = &TrimFuncExpr{StringArg: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1142:
+ case 1170:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5877
+//line sql.y:6001
{
yyLOCAL = &CharExpr{Exprs: yyDollar[3].exprsUnion()}
}
yyVAL.union = yyLOCAL
- case 1143:
+ case 1171:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5881
+//line sql.y:6005
{
yyLOCAL = &CharExpr{Exprs: yyDollar[3].exprsUnion(), Charset: yyDollar[5].str}
}
yyVAL.union = yyLOCAL
- case 1144:
+ case 1172:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5885
+//line sql.y:6009
{
yyLOCAL = &TrimFuncExpr{TrimArg: yyDollar[3].exprUnion(), StringArg: yyDollar[5].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1145:
+ case 1173:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5889
+//line sql.y:6013
{
yyLOCAL = &LocateExpr{SubStr: yyDollar[3].exprUnion(), Str: yyDollar[5].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1146:
+ case 1174:
yyDollar = yyS[yypt-8 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5893
+//line sql.y:6017
{
yyLOCAL = &LocateExpr{SubStr: yyDollar[3].exprUnion(), Str: yyDollar[5].exprUnion(), Pos: yyDollar[7].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1147:
+ case 1175:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5897
+//line sql.y:6021
{
yyLOCAL = &LocateExpr{SubStr: yyDollar[3].exprUnion(), Str: yyDollar[5].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1148:
+ case 1176:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5901
+//line sql.y:6025
{
yyLOCAL = &LockingFunc{Type: GetLock, Name: yyDollar[3].exprUnion(), Timeout: yyDollar[5].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1149:
+ case 1177:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5905
+//line sql.y:6029
{
yyLOCAL = &LockingFunc{Type: IsFreeLock, Name: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1150:
+ case 1178:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5909
+//line sql.y:6033
{
yyLOCAL = &LockingFunc{Type: IsUsedLock, Name: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1151:
+ case 1179:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5913
+//line sql.y:6037
{
yyLOCAL = &LockingFunc{Type: ReleaseAllLocks}
}
yyVAL.union = yyLOCAL
- case 1152:
+ case 1180:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5917
+//line sql.y:6041
{
yyLOCAL = &LockingFunc{Type: ReleaseLock, Name: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1153:
+ case 1181:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5921
+//line sql.y:6045
{
yyLOCAL = &JSONSchemaValidFuncExpr{Schema: yyDollar[3].exprUnion(), Document: yyDollar[5].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1154:
+ case 1182:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5925
+//line sql.y:6049
{
yyLOCAL = &JSONSchemaValidationReportFuncExpr{Schema: yyDollar[3].exprUnion(), Document: yyDollar[5].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1155:
+ case 1183:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5929
+//line sql.y:6053
{
yyLOCAL = &JSONArrayExpr{Params: yyDollar[3].exprsUnion()}
}
yyVAL.union = yyLOCAL
- case 1156:
+ case 1184:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5933
+//line sql.y:6057
{
yyLOCAL = &JSONObjectExpr{Params: yyDollar[3].jsonObjectParamsUnion()}
}
yyVAL.union = yyLOCAL
- case 1157:
+ case 1185:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5937
+//line sql.y:6061
{
yyLOCAL = &JSONQuoteExpr{StringArg: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1158:
+ case 1186:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5941
+//line sql.y:6065
{
yyLOCAL = &JSONContainsExpr{Target: yyDollar[3].exprUnion(), Candidate: yyDollar[5].exprsUnion()[0], PathList: yyDollar[5].exprsUnion()[1:]}
}
yyVAL.union = yyLOCAL
- case 1159:
+ case 1187:
yyDollar = yyS[yypt-8 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5945
+//line sql.y:6069
{
yyLOCAL = &JSONContainsPathExpr{JSONDoc: yyDollar[3].exprUnion(), OneOrAll: yyDollar[5].exprUnion(), PathList: yyDollar[7].exprsUnion()}
}
yyVAL.union = yyLOCAL
- case 1160:
+ case 1188:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5949
+//line sql.y:6073
{
yyLOCAL = &JSONExtractExpr{JSONDoc: yyDollar[3].exprUnion(), PathList: yyDollar[5].exprsUnion()}
}
yyVAL.union = yyLOCAL
- case 1161:
+ case 1189:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5953
+//line sql.y:6077
{
yyLOCAL = &JSONKeysExpr{JSONDoc: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1162:
+ case 1190:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5957
+//line sql.y:6081
{
yyLOCAL = &JSONKeysExpr{JSONDoc: yyDollar[3].exprUnion(), Path: yyDollar[5].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1163:
+ case 1191:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5961
+//line sql.y:6085
{
yyLOCAL = &JSONOverlapsExpr{JSONDoc1: yyDollar[3].exprUnion(), JSONDoc2: yyDollar[5].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1164:
+ case 1192:
yyDollar = yyS[yypt-8 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5965
+//line sql.y:6089
{
yyLOCAL = &JSONSearchExpr{JSONDoc: yyDollar[3].exprUnion(), OneOrAll: yyDollar[5].exprUnion(), SearchStr: yyDollar[7].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1165:
+ case 1193:
yyDollar = yyS[yypt-10 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5969
+//line sql.y:6093
{
yyLOCAL = &JSONSearchExpr{JSONDoc: yyDollar[3].exprUnion(), OneOrAll: yyDollar[5].exprUnion(), SearchStr: yyDollar[7].exprUnion(), EscapeChar: yyDollar[9].exprsUnion()[0], PathList: yyDollar[9].exprsUnion()[1:]}
}
yyVAL.union = yyLOCAL
- case 1166:
+ case 1194:
yyDollar = yyS[yypt-7 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5973
+//line sql.y:6097
{
yyLOCAL = &JSONValueExpr{JSONDoc: yyDollar[3].exprUnion(), Path: yyDollar[5].exprUnion(), ReturningType: yyDollar[6].convertTypeUnion()}
}
yyVAL.union = yyLOCAL
- case 1167:
+ case 1195:
yyDollar = yyS[yypt-8 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5977
+//line sql.y:6101
{
yyLOCAL = &JSONValueExpr{JSONDoc: yyDollar[3].exprUnion(), Path: yyDollar[5].exprUnion(), ReturningType: yyDollar[6].convertTypeUnion(), EmptyOnResponse: yyDollar[7].jtOnResponseUnion()}
}
yyVAL.union = yyLOCAL
- case 1168:
+ case 1196:
yyDollar = yyS[yypt-8 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5981
+//line sql.y:6105
{
yyLOCAL = &JSONValueExpr{JSONDoc: yyDollar[3].exprUnion(), Path: yyDollar[5].exprUnion(), ReturningType: yyDollar[6].convertTypeUnion(), ErrorOnResponse: yyDollar[7].jtOnResponseUnion()}
}
yyVAL.union = yyLOCAL
- case 1169:
+ case 1197:
yyDollar = yyS[yypt-9 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5985
+//line sql.y:6109
{
yyLOCAL = &JSONValueExpr{JSONDoc: yyDollar[3].exprUnion(), Path: yyDollar[5].exprUnion(), ReturningType: yyDollar[6].convertTypeUnion(), EmptyOnResponse: yyDollar[7].jtOnResponseUnion(), ErrorOnResponse: yyDollar[8].jtOnResponseUnion()}
}
yyVAL.union = yyLOCAL
- case 1170:
+ case 1198:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5989
+//line sql.y:6113
{
yyLOCAL = &JSONAttributesExpr{Type: DepthAttributeType, JSONDoc: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1171:
+ case 1199:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5993
+//line sql.y:6117
{
yyLOCAL = &JSONAttributesExpr{Type: ValidAttributeType, JSONDoc: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1172:
+ case 1200:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5997
+//line sql.y:6121
{
yyLOCAL = &JSONAttributesExpr{Type: TypeAttributeType, JSONDoc: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1173:
+ case 1201:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6001
+//line sql.y:6125
{
yyLOCAL = &JSONAttributesExpr{Type: LengthAttributeType, JSONDoc: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1174:
+ case 1202:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6005
+//line sql.y:6129
{
yyLOCAL = &JSONAttributesExpr{Type: LengthAttributeType, JSONDoc: yyDollar[3].exprUnion(), Path: yyDollar[5].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1175:
+ case 1203:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6009
+//line sql.y:6133
{
yyLOCAL = &JSONValueModifierExpr{Type: JSONArrayAppendType, JSONDoc: yyDollar[3].exprUnion(), Params: yyDollar[5].jsonObjectParamsUnion()}
}
yyVAL.union = yyLOCAL
- case 1176:
+ case 1204:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6013
+//line sql.y:6137
{
yyLOCAL = &JSONValueModifierExpr{Type: JSONArrayInsertType, JSONDoc: yyDollar[3].exprUnion(), Params: yyDollar[5].jsonObjectParamsUnion()}
}
yyVAL.union = yyLOCAL
- case 1177:
+ case 1205:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6017
+//line sql.y:6141
{
yyLOCAL = &JSONValueModifierExpr{Type: JSONInsertType, JSONDoc: yyDollar[3].exprUnion(), Params: yyDollar[5].jsonObjectParamsUnion()}
}
yyVAL.union = yyLOCAL
- case 1178:
+ case 1206:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6021
+//line sql.y:6145
{
yyLOCAL = &JSONValueModifierExpr{Type: JSONReplaceType, JSONDoc: yyDollar[3].exprUnion(), Params: yyDollar[5].jsonObjectParamsUnion()}
}
yyVAL.union = yyLOCAL
- case 1179:
+ case 1207:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6025
+//line sql.y:6149
{
yyLOCAL = &JSONValueModifierExpr{Type: JSONSetType, JSONDoc: yyDollar[3].exprUnion(), Params: yyDollar[5].jsonObjectParamsUnion()}
}
yyVAL.union = yyLOCAL
- case 1180:
+ case 1208:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6029
+//line sql.y:6153
{
yyLOCAL = &JSONValueMergeExpr{Type: JSONMergeType, JSONDoc: yyDollar[3].exprUnion(), JSONDocList: yyDollar[5].exprsUnion()}
}
yyVAL.union = yyLOCAL
- case 1181:
+ case 1209:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6033
+//line sql.y:6157
{
yyLOCAL = &JSONValueMergeExpr{Type: JSONMergePatchType, JSONDoc: yyDollar[3].exprUnion(), JSONDocList: yyDollar[5].exprsUnion()}
}
yyVAL.union = yyLOCAL
- case 1182:
+ case 1210:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6037
+//line sql.y:6161
{
yyLOCAL = &JSONValueMergeExpr{Type: JSONMergePreserveType, JSONDoc: yyDollar[3].exprUnion(), JSONDocList: yyDollar[5].exprsUnion()}
}
yyVAL.union = yyLOCAL
- case 1183:
+ case 1211:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6041
+//line sql.y:6165
{
yyLOCAL = &JSONRemoveExpr{JSONDoc: yyDollar[3].exprUnion(), PathList: yyDollar[5].exprsUnion()}
}
yyVAL.union = yyLOCAL
- case 1184:
+ case 1212:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6045
+//line sql.y:6169
{
yyLOCAL = &JSONUnquoteExpr{JSONValue: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1185:
+ case 1213:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6049
+//line sql.y:6173
{
yyLOCAL = &ArgumentLessWindowExpr{Type: yyDollar[1].argumentLessWindowExprTypeUnion(), OverClause: yyDollar[4].overClauseUnion()}
}
yyVAL.union = yyLOCAL
- case 1186:
+ case 1214:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6053
+//line sql.y:6177
{
yyLOCAL = &FirstOrLastValueExpr{Type: yyDollar[1].firstOrLastValueExprTypeUnion(), Expr: yyDollar[3].exprUnion(), NullTreatmentClause: yyDollar[5].nullTreatmentClauseUnion(), OverClause: yyDollar[6].overClauseUnion()}
}
yyVAL.union = yyLOCAL
- case 1187:
+ case 1215:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6057
+//line sql.y:6181
{
yyLOCAL = &NtileExpr{N: yyDollar[3].exprUnion(), OverClause: yyDollar[5].overClauseUnion()}
}
yyVAL.union = yyLOCAL
- case 1188:
+ case 1216:
yyDollar = yyS[yypt-9 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6061
+//line sql.y:6185
{
yyLOCAL = &NTHValueExpr{Expr: yyDollar[3].exprUnion(), N: yyDollar[5].exprUnion(), FromFirstLastClause: yyDollar[7].fromFirstLastClauseUnion(), NullTreatmentClause: yyDollar[8].nullTreatmentClauseUnion(), OverClause: yyDollar[9].overClauseUnion()}
}
yyVAL.union = yyLOCAL
- case 1189:
+ case 1217:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6065
+//line sql.y:6189
{
yyLOCAL = &LagLeadExpr{Type: yyDollar[1].lagLeadExprTypeUnion(), Expr: yyDollar[3].exprUnion(), NullTreatmentClause: yyDollar[5].nullTreatmentClauseUnion(), OverClause: yyDollar[6].overClauseUnion()}
}
yyVAL.union = yyLOCAL
- case 1190:
+ case 1218:
yyDollar = yyS[yypt-9 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6069
+//line sql.y:6193
{
yyLOCAL = &LagLeadExpr{Type: yyDollar[1].lagLeadExprTypeUnion(), Expr: yyDollar[3].exprUnion(), N: yyDollar[5].exprUnion(), Default: yyDollar[6].exprUnion(), NullTreatmentClause: yyDollar[8].nullTreatmentClauseUnion(), OverClause: yyDollar[9].overClauseUnion()}
}
yyVAL.union = yyLOCAL
- case 1195:
+ case 1223:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6079
+//line sql.y:6203
{
yyLOCAL = yyDollar[1].exprUnion()
}
yyVAL.union = yyLOCAL
- case 1196:
+ case 1224:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6083
+//line sql.y:6207
{
yyLOCAL = NewIntLiteral(yyDollar[1].str)
}
yyVAL.union = yyLOCAL
- case 1197:
+ case 1225:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6087
+//line sql.y:6211
{
yyLOCAL = yyDollar[1].variableUnion()
}
yyVAL.union = yyLOCAL
- case 1198:
+ case 1226:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6091
+//line sql.y:6215
{
yyLOCAL = NewArgument(yyDollar[1].str[1:])
bindVariable(yylex, yyDollar[1].str[1:])
}
yyVAL.union = yyLOCAL
- case 1199:
+ case 1227:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6097
+//line sql.y:6221
{
yyLOCAL = nil
}
yyVAL.union = yyLOCAL
- case 1200:
+ case 1228:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6101
+//line sql.y:6225
{
yyLOCAL = yyDollar[2].exprUnion()
}
yyVAL.union = yyLOCAL
- case 1201:
+ case 1229:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6107
+//line sql.y:6231
{
yyLOCAL = &RegexpInstrExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1202:
+ case 1230:
yyDollar = yyS[yypt-8 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6111
+//line sql.y:6235
{
yyLOCAL = &RegexpInstrExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion(), Position: yyDollar[7].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1203:
+ case 1231:
yyDollar = yyS[yypt-10 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6115
+//line sql.y:6239
{
yyLOCAL = &RegexpInstrExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion(), Position: yyDollar[7].exprUnion(), Occurrence: yyDollar[9].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1204:
+ case 1232:
yyDollar = yyS[yypt-12 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6119
+//line sql.y:6243
{
yyLOCAL = &RegexpInstrExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion(), Position: yyDollar[7].exprUnion(), Occurrence: yyDollar[9].exprUnion(), ReturnOption: yyDollar[11].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1205:
+ case 1233:
yyDollar = yyS[yypt-14 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6123
+//line sql.y:6247
{
// Match type is kept expression as TRIM( ' m ') is accepted
yyLOCAL = &RegexpInstrExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion(), Position: yyDollar[7].exprUnion(), Occurrence: yyDollar[9].exprUnion(), ReturnOption: yyDollar[11].exprUnion(), MatchType: yyDollar[13].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1206:
+ case 1234:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6128
+//line sql.y:6252
{
yyLOCAL = &RegexpLikeExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1207:
+ case 1235:
yyDollar = yyS[yypt-8 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6132
+//line sql.y:6256
{
yyLOCAL = &RegexpLikeExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion(), MatchType: yyDollar[7].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1208:
+ case 1236:
yyDollar = yyS[yypt-8 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6136
+//line sql.y:6260
{
yyLOCAL = &RegexpReplaceExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion(), Repl: yyDollar[7].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1209:
+ case 1237:
yyDollar = yyS[yypt-10 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6140
+//line sql.y:6264
{
yyLOCAL = &RegexpReplaceExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion(), Repl: yyDollar[7].exprUnion(), Position: yyDollar[9].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1210:
+ case 1238:
yyDollar = yyS[yypt-12 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6144
+//line sql.y:6268
{
yyLOCAL = &RegexpReplaceExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion(), Repl: yyDollar[7].exprUnion(), Position: yyDollar[9].exprUnion(), Occurrence: yyDollar[11].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1211:
+ case 1239:
yyDollar = yyS[yypt-14 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6148
+//line sql.y:6272
{
// Match type is kept expression as TRIM( ' m ') is accepted
yyLOCAL = &RegexpReplaceExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion(), Repl: yyDollar[7].exprUnion(), Position: yyDollar[9].exprUnion(), Occurrence: yyDollar[11].exprUnion(), MatchType: yyDollar[13].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1212:
+ case 1240:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6153
+//line sql.y:6277
{
yyLOCAL = &RegexpSubstrExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1213:
+ case 1241:
yyDollar = yyS[yypt-8 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6157
+//line sql.y:6281
{
yyLOCAL = &RegexpSubstrExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion(), Position: yyDollar[7].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1214:
+ case 1242:
yyDollar = yyS[yypt-10 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6161
+//line sql.y:6285
{
yyLOCAL = &RegexpSubstrExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion(), Position: yyDollar[7].exprUnion(), Occurrence: yyDollar[9].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1215:
+ case 1243:
yyDollar = yyS[yypt-12 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6165
+//line sql.y:6289
{
// Match type is kept expression as TRIM( ' m ') is accepted
yyLOCAL = &RegexpSubstrExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion(), Position: yyDollar[7].exprUnion(), Occurrence: yyDollar[9].exprUnion(), MatchType: yyDollar[11].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1216:
+ case 1244:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6172
+//line sql.y:6296
{
yyLOCAL = &ExtractValueExpr{Fragment: yyDollar[3].exprUnion(), XPathExpr: yyDollar[5].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1217:
+ case 1245:
yyDollar = yyS[yypt-8 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6176
+//line sql.y:6300
{
yyLOCAL = &UpdateXMLExpr{Target: yyDollar[3].exprUnion(), XPathExpr: yyDollar[5].exprUnion(), NewXML: yyDollar[7].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1218:
+ case 1246:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6182
+//line sql.y:6306
{
yyLOCAL = &PerformanceSchemaFuncExpr{Type: FormatBytesType, Argument: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1219:
+ case 1247:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6186
+//line sql.y:6310
{
yyLOCAL = &PerformanceSchemaFuncExpr{Type: FormatPicoTimeType, Argument: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1220:
+ case 1248:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6190
+//line sql.y:6314
{
yyLOCAL = &PerformanceSchemaFuncExpr{Type: PsCurrentThreadIDType}
}
yyVAL.union = yyLOCAL
- case 1221:
+ case 1249:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6194
+//line sql.y:6318
{
yyLOCAL = &PerformanceSchemaFuncExpr{Type: PsThreadIDType, Argument: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1222:
+ case 1250:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6200
+//line sql.y:6324
{
yyLOCAL = >IDFuncExpr{Type: GTIDSubsetType, Set1: yyDollar[3].exprUnion(), Set2: yyDollar[5].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1223:
+ case 1251:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6204
+//line sql.y:6328
{
yyLOCAL = >IDFuncExpr{Type: GTIDSubtractType, Set1: yyDollar[3].exprUnion(), Set2: yyDollar[5].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1224:
+ case 1252:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6208
+//line sql.y:6332
{
yyLOCAL = >IDFuncExpr{Type: WaitForExecutedGTIDSetType, Set1: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1225:
+ case 1253:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6212
+//line sql.y:6336
{
yyLOCAL = >IDFuncExpr{Type: WaitForExecutedGTIDSetType, Set1: yyDollar[3].exprUnion(), Timeout: yyDollar[5].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1226:
+ case 1254:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6216
+//line sql.y:6340
{
yyLOCAL = >IDFuncExpr{Type: WaitUntilSQLThreadAfterGTIDSType, Set1: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1227:
+ case 1255:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6220
+//line sql.y:6344
{
yyLOCAL = >IDFuncExpr{Type: WaitUntilSQLThreadAfterGTIDSType, Set1: yyDollar[3].exprUnion(), Timeout: yyDollar[5].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1228:
+ case 1256:
yyDollar = yyS[yypt-8 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6224
+//line sql.y:6348
{
yyLOCAL = >IDFuncExpr{Type: WaitUntilSQLThreadAfterGTIDSType, Set1: yyDollar[3].exprUnion(), Timeout: yyDollar[5].exprUnion(), Channel: yyDollar[7].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1229:
+ case 1257:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL *ConvertType
-//line sql.y:6229
+//line sql.y:6353
{
yyLOCAL = nil
}
yyVAL.union = yyLOCAL
- case 1230:
+ case 1258:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *ConvertType
-//line sql.y:6233
+//line sql.y:6357
{
yyLOCAL = yyDollar[2].convertTypeUnion()
}
yyVAL.union = yyLOCAL
- case 1231:
+ case 1259:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:6239
+//line sql.y:6363
{
}
- case 1232:
+ case 1260:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL IntervalTypes
-//line sql.y:6241
+//line sql.y:6365
{
yyLOCAL = IntervalDayHour
}
yyVAL.union = yyLOCAL
- case 1233:
+ case 1261:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL IntervalTypes
-//line sql.y:6245
+//line sql.y:6369
{
yyLOCAL = IntervalDayMicrosecond
}
yyVAL.union = yyLOCAL
- case 1234:
+ case 1262:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL IntervalTypes
-//line sql.y:6249
+//line sql.y:6373
{
yyLOCAL = IntervalDayMinute
}
yyVAL.union = yyLOCAL
- case 1235:
+ case 1263:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL IntervalTypes
-//line sql.y:6253
+//line sql.y:6377
{
yyLOCAL = IntervalDaySecond
}
yyVAL.union = yyLOCAL
- case 1236:
+ case 1264:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL IntervalTypes
-//line sql.y:6257
+//line sql.y:6381
{
yyLOCAL = IntervalHourMicrosecond
}
yyVAL.union = yyLOCAL
- case 1237:
+ case 1265:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL IntervalTypes
-//line sql.y:6261
+//line sql.y:6385
{
yyLOCAL = IntervalHourMinute
}
yyVAL.union = yyLOCAL
- case 1238:
+ case 1266:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL IntervalTypes
-//line sql.y:6265
+//line sql.y:6389
{
yyLOCAL = IntervalHourSecond
}
yyVAL.union = yyLOCAL
- case 1239:
+ case 1267:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL IntervalTypes
-//line sql.y:6269
+//line sql.y:6393
{
yyLOCAL = IntervalMinuteMicrosecond
}
yyVAL.union = yyLOCAL
- case 1240:
+ case 1268:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL IntervalTypes
-//line sql.y:6273
+//line sql.y:6397
{
yyLOCAL = IntervalMinuteSecond
}
yyVAL.union = yyLOCAL
- case 1241:
+ case 1269:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL IntervalTypes
-//line sql.y:6277
+//line sql.y:6401
{
yyLOCAL = IntervalSecondMicrosecond
}
yyVAL.union = yyLOCAL
- case 1242:
+ case 1270:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL IntervalTypes
-//line sql.y:6281
+//line sql.y:6405
{
yyLOCAL = IntervalYearMonth
}
yyVAL.union = yyLOCAL
- case 1243:
+ case 1271:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL IntervalTypes
-//line sql.y:6287
+//line sql.y:6411
{
yyLOCAL = IntervalDay
}
yyVAL.union = yyLOCAL
- case 1244:
+ case 1272:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL IntervalTypes
-//line sql.y:6291
+//line sql.y:6415
{
yyLOCAL = IntervalWeek
}
yyVAL.union = yyLOCAL
- case 1245:
+ case 1273:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL IntervalTypes
-//line sql.y:6295
+//line sql.y:6419
{
yyLOCAL = IntervalHour
}
yyVAL.union = yyLOCAL
- case 1246:
+ case 1274:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL IntervalTypes
-//line sql.y:6299
+//line sql.y:6423
{
yyLOCAL = IntervalMinute
}
yyVAL.union = yyLOCAL
- case 1247:
+ case 1275:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL IntervalTypes
-//line sql.y:6303
+//line sql.y:6427
{
yyLOCAL = IntervalMonth
}
yyVAL.union = yyLOCAL
- case 1248:
+ case 1276:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL IntervalTypes
-//line sql.y:6307
+//line sql.y:6431
{
yyLOCAL = IntervalQuarter
}
yyVAL.union = yyLOCAL
- case 1249:
+ case 1277:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL IntervalTypes
-//line sql.y:6311
+//line sql.y:6435
{
yyLOCAL = IntervalSecond
}
yyVAL.union = yyLOCAL
- case 1250:
+ case 1278:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL IntervalTypes
-//line sql.y:6315
+//line sql.y:6439
{
yyLOCAL = IntervalMicrosecond
}
yyVAL.union = yyLOCAL
- case 1251:
+ case 1279:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL IntervalTypes
-//line sql.y:6319
+//line sql.y:6443
{
yyLOCAL = IntervalYear
}
yyVAL.union = yyLOCAL
- case 1254:
+ case 1282:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6329
+//line sql.y:6453
{
yyLOCAL = nil
}
yyVAL.union = yyLOCAL
- case 1255:
+ case 1283:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6333
+//line sql.y:6457
{
yyLOCAL = nil
}
yyVAL.union = yyLOCAL
- case 1256:
+ case 1284:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6337
+//line sql.y:6461
{
yyLOCAL = NewIntLiteral(yyDollar[2].str)
}
yyVAL.union = yyLOCAL
- case 1257:
+ case 1285:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6341
+//line sql.y:6465
{
yyLOCAL = NewArgument(yyDollar[2].str[1:])
bindVariable(yylex, yyDollar[2].str[1:])
}
yyVAL.union = yyLOCAL
- case 1258:
+ case 1286:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6352
+//line sql.y:6476
{
yyLOCAL = &FuncExpr{Name: NewIdentifierCI("if"), Exprs: yyDollar[3].selectExprsUnion()}
}
yyVAL.union = yyLOCAL
- case 1259:
+ case 1287:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6356
+//line sql.y:6480
{
yyLOCAL = &FuncExpr{Name: NewIdentifierCI("database"), Exprs: yyDollar[3].selectExprsUnion()}
}
yyVAL.union = yyLOCAL
- case 1260:
+ case 1288:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6360
+//line sql.y:6484
{
yyLOCAL = &FuncExpr{Name: NewIdentifierCI("schema"), Exprs: yyDollar[3].selectExprsUnion()}
}
yyVAL.union = yyLOCAL
- case 1261:
+ case 1289:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6364
+//line sql.y:6488
{
yyLOCAL = &FuncExpr{Name: NewIdentifierCI("mod"), Exprs: yyDollar[3].selectExprsUnion()}
}
yyVAL.union = yyLOCAL
- case 1262:
+ case 1290:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6368
+//line sql.y:6492
{
yyLOCAL = &FuncExpr{Name: NewIdentifierCI("replace"), Exprs: yyDollar[3].selectExprsUnion()}
}
yyVAL.union = yyLOCAL
- case 1263:
+ case 1291:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL MatchExprOption
-//line sql.y:6374
+//line sql.y:6498
{
yyLOCAL = NoOption
}
yyVAL.union = yyLOCAL
- case 1264:
+ case 1292:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL MatchExprOption
-//line sql.y:6378
+//line sql.y:6502
{
yyLOCAL = BooleanModeOpt
}
yyVAL.union = yyLOCAL
- case 1265:
+ case 1293:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL MatchExprOption
-//line sql.y:6382
+//line sql.y:6506
{
yyLOCAL = NaturalLanguageModeOpt
}
yyVAL.union = yyLOCAL
- case 1266:
+ case 1294:
yyDollar = yyS[yypt-7 : yypt+1]
var yyLOCAL MatchExprOption
-//line sql.y:6386
+//line sql.y:6510
{
yyLOCAL = NaturalLanguageModeWithQueryExpansionOpt
}
yyVAL.union = yyLOCAL
- case 1267:
+ case 1295:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL MatchExprOption
-//line sql.y:6390
+//line sql.y:6514
{
yyLOCAL = QueryExpansionOpt
}
yyVAL.union = yyLOCAL
- case 1268:
+ case 1296:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:6396
+//line sql.y:6520
{
yyVAL.str = string(yyDollar[1].identifierCI.String())
}
- case 1269:
+ case 1297:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:6400
+//line sql.y:6524
{
yyVAL.str = string(yyDollar[1].str)
}
- case 1270:
+ case 1298:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:6404
+//line sql.y:6528
{
yyVAL.str = string(yyDollar[1].str)
}
- case 1271:
+ case 1299:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL *ConvertType
-//line sql.y:6410
+//line sql.y:6534
{
yyLOCAL = nil
}
yyVAL.union = yyLOCAL
- case 1272:
+ case 1300:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL *ConvertType
-//line sql.y:6414
+//line sql.y:6538
{
yyLOCAL = &ConvertType{Type: string(yyDollar[2].str), Length: NewIntLiteral(yyDollar[4].str)}
}
yyVAL.union = yyLOCAL
- case 1273:
+ case 1301:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL *ConvertType
-//line sql.y:6418
+//line sql.y:6542
{
yyLOCAL = &ConvertType{Type: string(yyDollar[2].str), Length: NewIntLiteral(yyDollar[4].str)}
}
yyVAL.union = yyLOCAL
- case 1274:
+ case 1302:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *ConvertType
-//line sql.y:6424
+//line sql.y:6548
{
yyLOCAL = &ConvertType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion()}
}
yyVAL.union = yyLOCAL
- case 1275:
+ case 1303:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *ConvertType
-//line sql.y:6428
+//line sql.y:6552
{
yyLOCAL = &ConvertType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion(), Charset: yyDollar[3].columnCharset}
}
yyVAL.union = yyLOCAL
- case 1276:
+ case 1304:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL *ConvertType
-//line sql.y:6432
+//line sql.y:6556
{
yyLOCAL = &ConvertType{Type: string(yyDollar[1].str)}
}
yyVAL.union = yyLOCAL
- case 1277:
+ case 1305:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *ConvertType
-//line sql.y:6436
+//line sql.y:6560
{
yyLOCAL = &ConvertType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion()}
}
yyVAL.union = yyLOCAL
- case 1278:
+ case 1306:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *ConvertType
-//line sql.y:6440
+//line sql.y:6564
{
yyLOCAL = &ConvertType{Type: string(yyDollar[1].str)}
yyLOCAL.Length = yyDollar[2].LengthScaleOption.Length
yyLOCAL.Scale = yyDollar[2].LengthScaleOption.Scale
}
yyVAL.union = yyLOCAL
- case 1279:
+ case 1307:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL *ConvertType
-//line sql.y:6446
+//line sql.y:6570
{
yyLOCAL = &ConvertType{Type: string(yyDollar[1].str)}
}
yyVAL.union = yyLOCAL
- case 1280:
+ case 1308:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *ConvertType
-//line sql.y:6450
+//line sql.y:6574
{
yyLOCAL = &ConvertType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion()}
}
yyVAL.union = yyLOCAL
- case 1281:
+ case 1309:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL *ConvertType
-//line sql.y:6454
+//line sql.y:6578
{
yyLOCAL = &ConvertType{Type: string(yyDollar[1].str)}
}
yyVAL.union = yyLOCAL
- case 1282:
+ case 1310:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *ConvertType
-//line sql.y:6458
+//line sql.y:6582
{
yyLOCAL = &ConvertType{Type: string(yyDollar[1].str)}
}
yyVAL.union = yyLOCAL
- case 1283:
+ case 1311:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *ConvertType
-//line sql.y:6462
+//line sql.y:6586
{
yyLOCAL = &ConvertType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion()}
}
yyVAL.union = yyLOCAL
- case 1284:
+ case 1312:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL *ConvertType
-//line sql.y:6466
+//line sql.y:6590
{
yyLOCAL = &ConvertType{Type: string(yyDollar[1].str)}
}
yyVAL.union = yyLOCAL
- case 1285:
+ case 1313:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *ConvertType
-//line sql.y:6470
+//line sql.y:6594
{
yyLOCAL = &ConvertType{Type: string(yyDollar[1].str)}
}
yyVAL.union = yyLOCAL
- case 1286:
+ case 1314:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *ConvertType
-//line sql.y:6474
+//line sql.y:6598
{
yyLOCAL = &ConvertType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion()}
}
yyVAL.union = yyLOCAL
- case 1287:
+ case 1315:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL *ConvertType
-//line sql.y:6478
+//line sql.y:6602
{
yyLOCAL = &ConvertType{Type: string(yyDollar[1].str)}
}
yyVAL.union = yyLOCAL
- case 1288:
+ case 1316:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL *ConvertType
-//line sql.y:6482
+//line sql.y:6606
{
yyLOCAL = &ConvertType{Type: string(yyDollar[1].str)}
}
yyVAL.union = yyLOCAL
- case 1289:
+ case 1317:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL bool
-//line sql.y:6488
+//line sql.y:6612
{
yyLOCAL = false
}
yyVAL.union = yyLOCAL
- case 1290:
+ case 1318:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL bool
-//line sql.y:6492
+//line sql.y:6616
{
yyLOCAL = true
}
yyVAL.union = yyLOCAL
- case 1291:
+ case 1319:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6497
+//line sql.y:6621
{
yyLOCAL = nil
}
yyVAL.union = yyLOCAL
- case 1292:
+ case 1320:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6501
+//line sql.y:6625
{
yyLOCAL = yyDollar[1].exprUnion()
}
yyVAL.union = yyLOCAL
- case 1293:
+ case 1321:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:6506
+//line sql.y:6630
{
yyVAL.str = string("")
}
- case 1294:
+ case 1322:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:6510
+//line sql.y:6634
{
yyVAL.str = " separator " + encodeSQLString(yyDollar[2].str)
}
- case 1295:
+ case 1323:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL []*When
-//line sql.y:6516
+//line sql.y:6640
{
yyLOCAL = []*When{yyDollar[1].whenUnion()}
}
yyVAL.union = yyLOCAL
- case 1296:
+ case 1324:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:6520
+//line sql.y:6644
{
yySLICE := (*[]*When)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, yyDollar[2].whenUnion())
}
- case 1297:
+ case 1325:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL *When
-//line sql.y:6526
+//line sql.y:6650
{
yyLOCAL = &When{Cond: yyDollar[2].exprUnion(), Val: yyDollar[4].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1298:
+ case 1326:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6531
+//line sql.y:6655
{
yyLOCAL = nil
}
yyVAL.union = yyLOCAL
- case 1299:
+ case 1327:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6535
+//line sql.y:6659
{
yyLOCAL = yyDollar[2].exprUnion()
}
yyVAL.union = yyLOCAL
- case 1300:
+ case 1328:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL *ColName
-//line sql.y:6541
+//line sql.y:6665
{
yyLOCAL = &ColName{Name: yyDollar[1].identifierCI}
}
yyVAL.union = yyLOCAL
- case 1301:
+ case 1329:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL *ColName
-//line sql.y:6545
+//line sql.y:6669
{
yyLOCAL = &ColName{Name: NewIdentifierCI(string(yyDollar[1].str))}
}
yyVAL.union = yyLOCAL
- case 1302:
+ case 1330:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *ColName
-//line sql.y:6549
+//line sql.y:6673
{
yyLOCAL = &ColName{Qualifier: TableName{Name: yyDollar[1].identifierCS}, Name: yyDollar[3].identifierCI}
}
yyVAL.union = yyLOCAL
- case 1303:
+ case 1331:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL *ColName
-//line sql.y:6553
+//line sql.y:6677
{
yyLOCAL = &ColName{Qualifier: TableName{Qualifier: yyDollar[1].identifierCS, Name: yyDollar[3].identifierCS}, Name: yyDollar[5].identifierCI}
}
yyVAL.union = yyLOCAL
- case 1304:
+ case 1332:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6559
+//line sql.y:6683
{
yyLOCAL = yyDollar[1].colNameUnion()
}
yyVAL.union = yyLOCAL
- case 1305:
+ case 1333:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6563
+//line sql.y:6687
{
yyLOCAL = &Offset{V: convertStringToInt(yyDollar[1].str)}
}
yyVAL.union = yyLOCAL
- case 1306:
+ case 1334:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6569
+//line sql.y:6693
{
// TODO(sougou): Deprecate this construct.
if yyDollar[1].identifierCI.Lowered() != "value" {
@@ -18664,427 +18914,427 @@ yydefault:
yyLOCAL = NewIntLiteral("1")
}
yyVAL.union = yyLOCAL
- case 1307:
+ case 1335:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6578
+//line sql.y:6702
{
yyLOCAL = NewIntLiteral(yyDollar[1].str)
}
yyVAL.union = yyLOCAL
- case 1308:
+ case 1336:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6582
+//line sql.y:6706
{
yyLOCAL = NewArgument(yyDollar[1].str[1:])
bindVariable(yylex, yyDollar[1].str[1:])
}
yyVAL.union = yyLOCAL
- case 1309:
+ case 1337:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL Exprs
-//line sql.y:6588
+//line sql.y:6712
{
yyLOCAL = nil
}
yyVAL.union = yyLOCAL
- case 1310:
+ case 1338:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Exprs
-//line sql.y:6592
+//line sql.y:6716
{
yyLOCAL = yyDollar[3].exprsUnion()
}
yyVAL.union = yyLOCAL
- case 1311:
+ case 1339:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6597
+//line sql.y:6721
{
yyLOCAL = nil
}
yyVAL.union = yyLOCAL
- case 1312:
+ case 1340:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6601
+//line sql.y:6725
{
yyLOCAL = yyDollar[2].exprUnion()
}
yyVAL.union = yyLOCAL
- case 1313:
+ case 1341:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *NamedWindow
-//line sql.y:6607
+//line sql.y:6731
{
yyLOCAL = &NamedWindow{yyDollar[2].windowDefinitionsUnion()}
}
yyVAL.union = yyLOCAL
- case 1314:
+ case 1342:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL NamedWindows
-//line sql.y:6613
+//line sql.y:6737
{
yyLOCAL = NamedWindows{yyDollar[1].namedWindowUnion()}
}
yyVAL.union = yyLOCAL
- case 1315:
+ case 1343:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:6617
+//line sql.y:6741
{
yySLICE := (*NamedWindows)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, yyDollar[3].namedWindowUnion())
}
- case 1316:
+ case 1344:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL NamedWindows
-//line sql.y:6622
+//line sql.y:6746
{
yyLOCAL = nil
}
yyVAL.union = yyLOCAL
- case 1317:
+ case 1345:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL NamedWindows
-//line sql.y:6626
+//line sql.y:6750
{
yyLOCAL = yyDollar[1].namedWindowsUnion()
}
yyVAL.union = yyLOCAL
- case 1318:
+ case 1346:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL OrderBy
-//line sql.y:6631
+//line sql.y:6755
{
yyLOCAL = nil
}
yyVAL.union = yyLOCAL
- case 1319:
+ case 1347:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL OrderBy
-//line sql.y:6635
+//line sql.y:6759
{
yyLOCAL = yyDollar[1].orderByUnion()
}
yyVAL.union = yyLOCAL
- case 1320:
+ case 1348:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL OrderBy
-//line sql.y:6641
+//line sql.y:6765
{
yyLOCAL = yyDollar[3].orderByUnion()
}
yyVAL.union = yyLOCAL
- case 1321:
+ case 1349:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL OrderBy
-//line sql.y:6647
+//line sql.y:6771
{
yyLOCAL = OrderBy{yyDollar[1].orderUnion()}
}
yyVAL.union = yyLOCAL
- case 1322:
+ case 1350:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:6651
+//line sql.y:6775
{
yySLICE := (*OrderBy)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, yyDollar[3].orderUnion())
}
- case 1323:
+ case 1351:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *Order
-//line sql.y:6657
+//line sql.y:6781
{
yyLOCAL = &Order{Expr: yyDollar[1].exprUnion(), Direction: yyDollar[2].orderDirectionUnion()}
}
yyVAL.union = yyLOCAL
- case 1324:
+ case 1352:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL OrderDirection
-//line sql.y:6662
+//line sql.y:6786
{
yyLOCAL = AscOrder
}
yyVAL.union = yyLOCAL
- case 1325:
+ case 1353:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL OrderDirection
-//line sql.y:6666
+//line sql.y:6790
{
yyLOCAL = AscOrder
}
yyVAL.union = yyLOCAL
- case 1326:
+ case 1354:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL OrderDirection
-//line sql.y:6670
+//line sql.y:6794
{
yyLOCAL = DescOrder
}
yyVAL.union = yyLOCAL
- case 1327:
+ case 1355:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL *Limit
-//line sql.y:6675
+//line sql.y:6799
{
yyLOCAL = nil
}
yyVAL.union = yyLOCAL
- case 1328:
+ case 1356:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL *Limit
-//line sql.y:6679
+//line sql.y:6803
{
yyLOCAL = yyDollar[1].limitUnion()
}
yyVAL.union = yyLOCAL
- case 1329:
+ case 1357:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *Limit
-//line sql.y:6685
+//line sql.y:6809
{
yyLOCAL = &Limit{Rowcount: yyDollar[2].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1330:
+ case 1358:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL *Limit
-//line sql.y:6689
+//line sql.y:6813
{
yyLOCAL = &Limit{Offset: yyDollar[2].exprUnion(), Rowcount: yyDollar[4].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1331:
+ case 1359:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL *Limit
-//line sql.y:6693
+//line sql.y:6817
{
yyLOCAL = &Limit{Offset: yyDollar[4].exprUnion(), Rowcount: yyDollar[2].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1332:
+ case 1360:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL []AlterOption
-//line sql.y:6698
+//line sql.y:6822
{
yyLOCAL = nil
}
yyVAL.union = yyLOCAL
- case 1333:
+ case 1361:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL []AlterOption
-//line sql.y:6702
+//line sql.y:6826
{
yyLOCAL = []AlterOption{yyDollar[1].alterOptionUnion(), yyDollar[2].alterOptionUnion()}
}
yyVAL.union = yyLOCAL
- case 1334:
+ case 1362:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL []AlterOption
-//line sql.y:6706
+//line sql.y:6830
{
yyLOCAL = []AlterOption{yyDollar[1].alterOptionUnion(), yyDollar[2].alterOptionUnion()}
}
yyVAL.union = yyLOCAL
- case 1335:
+ case 1363:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL []AlterOption
-//line sql.y:6710
+//line sql.y:6834
{
yyLOCAL = []AlterOption{yyDollar[1].alterOptionUnion()}
}
yyVAL.union = yyLOCAL
- case 1336:
+ case 1364:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL []AlterOption
-//line sql.y:6714
+//line sql.y:6838
{
yyLOCAL = []AlterOption{yyDollar[1].alterOptionUnion()}
}
yyVAL.union = yyLOCAL
- case 1337:
+ case 1365:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:6721
+//line sql.y:6845
{
yyLOCAL = &LockOption{Type: DefaultType}
}
yyVAL.union = yyLOCAL
- case 1338:
+ case 1366:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:6725
+//line sql.y:6849
{
yyLOCAL = &LockOption{Type: NoneType}
}
yyVAL.union = yyLOCAL
- case 1339:
+ case 1367:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:6729
+//line sql.y:6853
{
yyLOCAL = &LockOption{Type: SharedType}
}
yyVAL.union = yyLOCAL
- case 1340:
+ case 1368:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:6733
+//line sql.y:6857
{
yyLOCAL = &LockOption{Type: ExclusiveType}
}
yyVAL.union = yyLOCAL
- case 1341:
+ case 1369:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:6739
+//line sql.y:6863
{
yyLOCAL = AlgorithmValue(yyDollar[3].str)
}
yyVAL.union = yyLOCAL
- case 1342:
+ case 1370:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:6743
+//line sql.y:6867
{
yyLOCAL = AlgorithmValue(yyDollar[3].str)
}
yyVAL.union = yyLOCAL
- case 1343:
+ case 1371:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:6747
+//line sql.y:6871
{
yyLOCAL = AlgorithmValue(yyDollar[3].str)
}
yyVAL.union = yyLOCAL
- case 1344:
+ case 1372:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:6751
+//line sql.y:6875
{
yyLOCAL = AlgorithmValue(yyDollar[3].str)
}
yyVAL.union = yyLOCAL
- case 1345:
+ case 1373:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:6756
+//line sql.y:6880
{
yyVAL.str = ""
}
- case 1346:
+ case 1374:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:6760
+//line sql.y:6884
{
yyVAL.str = string(yyDollar[3].str)
}
- case 1347:
+ case 1375:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:6764
+//line sql.y:6888
{
yyVAL.str = string(yyDollar[3].str)
}
- case 1348:
+ case 1376:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:6768
+//line sql.y:6892
{
yyVAL.str = string(yyDollar[3].str)
}
- case 1349:
+ case 1377:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:6773
+//line sql.y:6897
{
yyVAL.str = ""
}
- case 1350:
+ case 1378:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:6777
+//line sql.y:6901
{
yyVAL.str = yyDollar[3].str
}
- case 1351:
+ case 1379:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:6783
+//line sql.y:6907
{
yyVAL.str = string(yyDollar[1].str)
}
- case 1352:
+ case 1380:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:6787
+//line sql.y:6911
{
yyVAL.str = string(yyDollar[1].str)
}
- case 1353:
+ case 1381:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:6792
+//line sql.y:6916
{
yyVAL.str = ""
}
- case 1354:
+ case 1382:
yyDollar = yyS[yypt-4 : yypt+1]
-//line sql.y:6796
+//line sql.y:6920
{
yyVAL.str = yyDollar[2].str
}
- case 1355:
+ case 1383:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:6801
+//line sql.y:6925
{
yyVAL.str = "cascaded"
}
- case 1356:
+ case 1384:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:6805
+//line sql.y:6929
{
yyVAL.str = string(yyDollar[1].str)
}
- case 1357:
+ case 1385:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:6809
+//line sql.y:6933
{
yyVAL.str = string(yyDollar[1].str)
}
- case 1358:
+ case 1386:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL *Definer
-//line sql.y:6814
+//line sql.y:6938
{
yyLOCAL = nil
}
yyVAL.union = yyLOCAL
- case 1359:
+ case 1387:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *Definer
-//line sql.y:6818
+//line sql.y:6942
{
yyLOCAL = yyDollar[3].definerUnion()
}
yyVAL.union = yyLOCAL
- case 1360:
+ case 1388:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL *Definer
-//line sql.y:6824
+//line sql.y:6948
{
yyLOCAL = &Definer{
Name: string(yyDollar[1].str),
}
}
yyVAL.union = yyLOCAL
- case 1361:
+ case 1389:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *Definer
-//line sql.y:6830
+//line sql.y:6954
{
yyLOCAL = &Definer{
Name: string(yyDollar[1].str),
}
}
yyVAL.union = yyLOCAL
- case 1362:
+ case 1390:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *Definer
-//line sql.y:6836
+//line sql.y:6960
{
yyLOCAL = &Definer{
Name: yyDollar[1].str,
@@ -19092,369 +19342,409 @@ yydefault:
}
}
yyVAL.union = yyLOCAL
- case 1363:
+ case 1391:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:6845
+//line sql.y:6969
{
yyVAL.str = encodeSQLString(yyDollar[1].str)
}
- case 1364:
+ case 1392:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:6849
+//line sql.y:6973
{
yyVAL.str = formatIdentifier(yyDollar[1].str)
}
- case 1365:
+ case 1393:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:6854
+//line sql.y:6978
{
yyVAL.str = ""
}
- case 1366:
+ case 1394:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:6858
+//line sql.y:6982
{
yyVAL.str = formatAddress(yyDollar[1].str)
}
- case 1367:
+ case 1395:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Lock
-//line sql.y:6864
+//line sql.y:6988
{
yyLOCAL = ForUpdateLock
}
yyVAL.union = yyLOCAL
- case 1368:
+ case 1396:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ var yyLOCAL Lock
+//line sql.y:6992
+ {
+ yyLOCAL = ForUpdateLockNoWait
+ }
+ yyVAL.union = yyLOCAL
+ case 1397:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ var yyLOCAL Lock
+//line sql.y:6996
+ {
+ yyLOCAL = ForUpdateLockSkipLocked
+ }
+ yyVAL.union = yyLOCAL
+ case 1398:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ var yyLOCAL Lock
+//line sql.y:7000
+ {
+ yyLOCAL = ForShareLock
+ }
+ yyVAL.union = yyLOCAL
+ case 1399:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ var yyLOCAL Lock
+//line sql.y:7004
+ {
+ yyLOCAL = ForShareLockNoWait
+ }
+ yyVAL.union = yyLOCAL
+ case 1400:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ var yyLOCAL Lock
+//line sql.y:7008
+ {
+ yyLOCAL = ForShareLockSkipLocked
+ }
+ yyVAL.union = yyLOCAL
+ case 1401:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Lock
-//line sql.y:6868
+//line sql.y:7012
{
yyLOCAL = ShareModeLock
}
yyVAL.union = yyLOCAL
- case 1369:
+ case 1402:
yyDollar = yyS[yypt-9 : yypt+1]
var yyLOCAL *SelectInto
-//line sql.y:6874
+//line sql.y:7018
{
yyLOCAL = &SelectInto{Type: IntoOutfileS3, FileName: encodeSQLString(yyDollar[4].str), Charset: yyDollar[5].columnCharset, FormatOption: yyDollar[6].str, ExportOption: yyDollar[7].str, Manifest: yyDollar[8].str, Overwrite: yyDollar[9].str}
}
yyVAL.union = yyLOCAL
- case 1370:
+ case 1403:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *SelectInto
-//line sql.y:6878
+//line sql.y:7022
{
yyLOCAL = &SelectInto{Type: IntoDumpfile, FileName: encodeSQLString(yyDollar[3].str), Charset: ColumnCharset{}, FormatOption: "", ExportOption: "", Manifest: "", Overwrite: ""}
}
yyVAL.union = yyLOCAL
- case 1371:
+ case 1404:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL *SelectInto
-//line sql.y:6882
+//line sql.y:7026
{
yyLOCAL = &SelectInto{Type: IntoOutfile, FileName: encodeSQLString(yyDollar[3].str), Charset: yyDollar[4].columnCharset, FormatOption: "", ExportOption: yyDollar[5].str, Manifest: "", Overwrite: ""}
}
yyVAL.union = yyLOCAL
- case 1372:
+ case 1405:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:6887
+//line sql.y:7031
{
yyVAL.str = ""
}
- case 1373:
+ case 1406:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:6891
+//line sql.y:7035
{
yyVAL.str = " format csv" + yyDollar[3].str
}
- case 1374:
+ case 1407:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:6895
+//line sql.y:7039
{
yyVAL.str = " format text" + yyDollar[3].str
}
- case 1375:
+ case 1408:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:6900
+//line sql.y:7044
{
yyVAL.str = ""
}
- case 1376:
+ case 1409:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:6904
+//line sql.y:7048
{
yyVAL.str = " header"
}
- case 1377:
+ case 1410:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:6909
+//line sql.y:7053
{
yyVAL.str = ""
}
- case 1378:
+ case 1411:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:6913
+//line sql.y:7057
{
yyVAL.str = " manifest on"
}
- case 1379:
+ case 1412:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:6917
+//line sql.y:7061
{
yyVAL.str = " manifest off"
}
- case 1380:
+ case 1413:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:6922
+//line sql.y:7066
{
yyVAL.str = ""
}
- case 1381:
+ case 1414:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:6926
+//line sql.y:7070
{
yyVAL.str = " overwrite on"
}
- case 1382:
+ case 1415:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:6930
+//line sql.y:7074
{
yyVAL.str = " overwrite off"
}
- case 1383:
+ case 1416:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:6936
+//line sql.y:7080
{
yyVAL.str = yyDollar[1].str + yyDollar[2].str
}
- case 1384:
+ case 1417:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:6941
+//line sql.y:7085
{
yyVAL.str = ""
}
- case 1385:
+ case 1418:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:6945
+//line sql.y:7089
{
yyVAL.str = " lines" + yyDollar[2].str
}
- case 1386:
+ case 1419:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:6951
+//line sql.y:7095
{
yyVAL.str = yyDollar[1].str
}
- case 1387:
+ case 1420:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:6955
+//line sql.y:7099
{
yyVAL.str = yyDollar[1].str + yyDollar[2].str
}
- case 1388:
+ case 1421:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:6961
+//line sql.y:7105
{
yyVAL.str = " starting by " + encodeSQLString(yyDollar[3].str)
}
- case 1389:
+ case 1422:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:6965
+//line sql.y:7109
{
yyVAL.str = " terminated by " + encodeSQLString(yyDollar[3].str)
}
- case 1390:
+ case 1423:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:6970
+//line sql.y:7114
{
yyVAL.str = ""
}
- case 1391:
+ case 1424:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:6974
+//line sql.y:7118
{
yyVAL.str = " " + yyDollar[1].str + yyDollar[2].str
}
- case 1392:
+ case 1425:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:6980
+//line sql.y:7124
{
yyVAL.str = yyDollar[1].str
}
- case 1393:
+ case 1426:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:6984
+//line sql.y:7128
{
yyVAL.str = yyDollar[1].str + yyDollar[2].str
}
- case 1394:
+ case 1427:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:6990
+//line sql.y:7134
{
yyVAL.str = " terminated by " + encodeSQLString(yyDollar[3].str)
}
- case 1395:
+ case 1428:
yyDollar = yyS[yypt-4 : yypt+1]
-//line sql.y:6994
+//line sql.y:7138
{
yyVAL.str = yyDollar[1].str + " enclosed by " + encodeSQLString(yyDollar[4].str)
}
- case 1396:
+ case 1429:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:6998
+//line sql.y:7142
{
yyVAL.str = " escaped by " + encodeSQLString(yyDollar[3].str)
}
- case 1397:
+ case 1430:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:7003
+//line sql.y:7147
{
yyVAL.str = ""
}
- case 1398:
+ case 1431:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:7007
+//line sql.y:7151
{
yyVAL.str = " optionally"
}
- case 1399:
+ case 1432:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *Insert
-//line sql.y:7020
+//line sql.y:7164
{
yyLOCAL = &Insert{Rows: yyDollar[2].valuesUnion()}
}
yyVAL.union = yyLOCAL
- case 1400:
+ case 1433:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL *Insert
-//line sql.y:7024
+//line sql.y:7168
{
yyLOCAL = &Insert{Rows: yyDollar[1].selStmtUnion()}
}
yyVAL.union = yyLOCAL
- case 1401:
+ case 1434:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL *Insert
-//line sql.y:7028
+//line sql.y:7172
{
yyLOCAL = &Insert{Columns: yyDollar[2].columnsUnion(), Rows: yyDollar[5].valuesUnion()}
}
yyVAL.union = yyLOCAL
- case 1402:
+ case 1435:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL *Insert
-//line sql.y:7032
+//line sql.y:7176
{
yyLOCAL = &Insert{Columns: []IdentifierCI{}, Rows: yyDollar[4].valuesUnion()}
}
yyVAL.union = yyLOCAL
- case 1403:
+ case 1436:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL *Insert
-//line sql.y:7036
+//line sql.y:7180
{
yyLOCAL = &Insert{Columns: yyDollar[2].columnsUnion(), Rows: yyDollar[4].selStmtUnion()}
}
yyVAL.union = yyLOCAL
- case 1404:
+ case 1437:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Columns
-//line sql.y:7042
+//line sql.y:7186
{
yyLOCAL = Columns{yyDollar[1].identifierCI}
}
yyVAL.union = yyLOCAL
- case 1405:
+ case 1438:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Columns
-//line sql.y:7046
+//line sql.y:7190
{
yyLOCAL = Columns{yyDollar[3].identifierCI}
}
yyVAL.union = yyLOCAL
- case 1406:
+ case 1439:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:7050
+//line sql.y:7194
{
yySLICE := (*Columns)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, yyDollar[3].identifierCI)
}
- case 1407:
+ case 1440:
yyDollar = yyS[yypt-5 : yypt+1]
-//line sql.y:7054
+//line sql.y:7198
{
yySLICE := (*Columns)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, yyDollar[5].identifierCI)
}
- case 1408:
+ case 1441:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL UpdateExprs
-//line sql.y:7059
+//line sql.y:7203
{
yyLOCAL = nil
}
yyVAL.union = yyLOCAL
- case 1409:
+ case 1442:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL UpdateExprs
-//line sql.y:7063
+//line sql.y:7207
{
yyLOCAL = yyDollar[5].updateExprsUnion()
}
yyVAL.union = yyLOCAL
- case 1410:
+ case 1443:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Values
-//line sql.y:7069
+//line sql.y:7213
{
yyLOCAL = Values{yyDollar[1].valTupleUnion()}
}
yyVAL.union = yyLOCAL
- case 1411:
+ case 1444:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:7073
+//line sql.y:7217
{
yySLICE := (*Values)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, yyDollar[3].valTupleUnion())
}
- case 1412:
+ case 1445:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL ValTuple
-//line sql.y:7079
+//line sql.y:7223
{
yyLOCAL = yyDollar[1].valTupleUnion()
}
yyVAL.union = yyLOCAL
- case 1413:
+ case 1446:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL ValTuple
-//line sql.y:7083
+//line sql.y:7227
{
yyLOCAL = ValTuple{}
}
yyVAL.union = yyLOCAL
- case 1414:
+ case 1447:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL ValTuple
-//line sql.y:7089
+//line sql.y:7233
{
yyLOCAL = ValTuple(yyDollar[2].exprsUnion())
}
yyVAL.union = yyLOCAL
- case 1415:
+ case 1448:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL ValTuple
-//line sql.y:7093
+//line sql.y:7237
{
yyLOCAL = ValTuple(yyDollar[3].exprsUnion())
}
yyVAL.union = yyLOCAL
- case 1416:
+ case 1449:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:7098
+//line sql.y:7242
{
if len(yyDollar[1].valTupleUnion()) == 1 {
yyLOCAL = yyDollar[1].valTupleUnion()[0]
@@ -19463,344 +19753,268 @@ yydefault:
}
}
yyVAL.union = yyLOCAL
- case 1417:
+ case 1450:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL UpdateExprs
-//line sql.y:7108
+//line sql.y:7252
{
yyLOCAL = UpdateExprs{yyDollar[1].updateExprUnion()}
}
yyVAL.union = yyLOCAL
- case 1418:
+ case 1451:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:7112
+//line sql.y:7256
{
yySLICE := (*UpdateExprs)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, yyDollar[3].updateExprUnion())
}
- case 1419:
+ case 1452:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *UpdateExpr
-//line sql.y:7118
+//line sql.y:7262
{
yyLOCAL = &UpdateExpr{Name: yyDollar[1].colNameUnion(), Expr: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1420:
- yyDollar = yyS[yypt-1 : yypt+1]
- var yyLOCAL SetExprs
-//line sql.y:7124
- {
- yyLOCAL = SetExprs{yyDollar[1].setExprUnion()}
- }
- yyVAL.union = yyLOCAL
- case 1421:
- yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:7128
- {
- yySLICE := (*SetExprs)(yyIaddr(yyVAL.union))
- *yySLICE = append(*yySLICE, yyDollar[3].setExprUnion())
- }
- case 1422:
- yyDollar = yyS[yypt-3 : yypt+1]
- var yyLOCAL *SetExpr
-//line sql.y:7134
- {
- yyLOCAL = &SetExpr{Var: yyDollar[1].variableUnion(), Expr: NewStrLiteral("on")}
- }
- yyVAL.union = yyLOCAL
- case 1423:
- yyDollar = yyS[yypt-3 : yypt+1]
- var yyLOCAL *SetExpr
-//line sql.y:7138
- {
- yyLOCAL = &SetExpr{Var: yyDollar[1].variableUnion(), Expr: NewStrLiteral("off")}
- }
- yyVAL.union = yyLOCAL
- case 1424:
- yyDollar = yyS[yypt-3 : yypt+1]
- var yyLOCAL *SetExpr
-//line sql.y:7142
- {
- yyLOCAL = &SetExpr{Var: yyDollar[1].variableUnion(), Expr: yyDollar[3].exprUnion()}
- }
- yyVAL.union = yyLOCAL
- case 1425:
- yyDollar = yyS[yypt-3 : yypt+1]
- var yyLOCAL *SetExpr
-//line sql.y:7146
- {
- yyLOCAL = &SetExpr{Var: NewSetVariable(string(yyDollar[1].str), SessionScope), Expr: yyDollar[2].exprUnion()}
- }
- yyVAL.union = yyLOCAL
- case 1426:
- yyDollar = yyS[yypt-1 : yypt+1]
- var yyLOCAL *Variable
-//line sql.y:7152
- {
- yyLOCAL = NewSetVariable(string(yyDollar[1].str), SessionScope)
- }
- yyVAL.union = yyLOCAL
- case 1427:
- yyDollar = yyS[yypt-1 : yypt+1]
- var yyLOCAL *Variable
-//line sql.y:7156
- {
- yyLOCAL = yyDollar[1].variableUnion()
- }
- yyVAL.union = yyLOCAL
- case 1428:
- yyDollar = yyS[yypt-2 : yypt+1]
- var yyLOCAL *Variable
-//line sql.y:7160
- {
- yyLOCAL = NewSetVariable(string(yyDollar[2].str), yyDollar[1].scopeUnion())
- }
- yyVAL.union = yyLOCAL
- case 1430:
+ case 1454:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:7167
+//line sql.y:7269
{
yyVAL.str = "charset"
}
- case 1433:
+ case 1457:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:7177
+//line sql.y:7279
{
yyLOCAL = NewStrLiteral(yyDollar[1].identifierCI.String())
}
yyVAL.union = yyLOCAL
- case 1434:
+ case 1458:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:7181
+//line sql.y:7283
{
yyLOCAL = NewStrLiteral(yyDollar[1].str)
}
yyVAL.union = yyLOCAL
- case 1435:
+ case 1459:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:7185
+//line sql.y:7287
{
yyLOCAL = &Default{}
}
yyVAL.union = yyLOCAL
- case 1438:
+ case 1462:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL bool
-//line sql.y:7194
+//line sql.y:7296
{
yyLOCAL = false
}
yyVAL.union = yyLOCAL
- case 1439:
+ case 1463:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL bool
-//line sql.y:7196
+//line sql.y:7298
{
yyLOCAL = true
}
yyVAL.union = yyLOCAL
- case 1440:
+ case 1464:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL bool
-//line sql.y:7199
+//line sql.y:7301
{
yyLOCAL = false
}
yyVAL.union = yyLOCAL
- case 1441:
+ case 1465:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL bool
-//line sql.y:7201
+//line sql.y:7303
{
yyLOCAL = true
}
yyVAL.union = yyLOCAL
- case 1442:
+ case 1466:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL bool
-//line sql.y:7204
+//line sql.y:7306
{
yyLOCAL = false
}
yyVAL.union = yyLOCAL
- case 1443:
+ case 1467:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL bool
-//line sql.y:7206
+//line sql.y:7308
{
yyLOCAL = true
}
yyVAL.union = yyLOCAL
- case 1444:
+ case 1468:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL Ignore
-//line sql.y:7209
+//line sql.y:7311
{
yyLOCAL = false
}
yyVAL.union = yyLOCAL
- case 1445:
+ case 1469:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Ignore
-//line sql.y:7211
+//line sql.y:7313
{
yyLOCAL = true
}
yyVAL.union = yyLOCAL
- case 1446:
+ case 1470:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:7214
+//line sql.y:7316
{
yyVAL.empty = struct{}{}
}
- case 1447:
+ case 1471:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:7216
+//line sql.y:7318
{
yyVAL.empty = struct{}{}
}
- case 1448:
+ case 1472:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:7218
+//line sql.y:7320
{
yyVAL.empty = struct{}{}
}
- case 1449:
+ case 1473:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL Statement
-//line sql.y:7222
+//line sql.y:7324
{
yyLOCAL = &CallProc{Name: yyDollar[2].tableName, Params: yyDollar[4].exprsUnion()}
}
yyVAL.union = yyLOCAL
- case 1450:
+ case 1474:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL Exprs
-//line sql.y:7227
+//line sql.y:7329
{
yyLOCAL = nil
}
yyVAL.union = yyLOCAL
- case 1451:
+ case 1475:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Exprs
-//line sql.y:7231
+//line sql.y:7333
{
yyLOCAL = yyDollar[1].exprsUnion()
}
yyVAL.union = yyLOCAL
- case 1452:
+ case 1476:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL []*IndexOption
-//line sql.y:7236
+//line sql.y:7338
{
yyLOCAL = nil
}
yyVAL.union = yyLOCAL
- case 1453:
+ case 1477:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL []*IndexOption
-//line sql.y:7238
+//line sql.y:7340
{
yyLOCAL = []*IndexOption{yyDollar[1].indexOptionUnion()}
}
yyVAL.union = yyLOCAL
- case 1454:
+ case 1478:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *IndexOption
-//line sql.y:7242
+//line sql.y:7344
{
yyLOCAL = &IndexOption{Name: string(yyDollar[1].str), String: string(yyDollar[2].identifierCI.String())}
}
yyVAL.union = yyLOCAL
- case 1455:
+ case 1479:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:7248
+//line sql.y:7350
{
yyVAL.identifierCI = yyDollar[1].identifierCI
}
- case 1456:
+ case 1480:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:7252
+//line sql.y:7354
{
yyVAL.identifierCI = NewIdentifierCI(string(yyDollar[1].str))
}
- case 1458:
+ case 1482:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:7259
+//line sql.y:7361
{
yyVAL.identifierCI = NewIdentifierCI(string(yyDollar[1].str))
}
- case 1459:
+ case 1483:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:7265
+//line sql.y:7367
{
yyVAL.identifierCS = NewIdentifierCS(string(yyDollar[1].str))
}
- case 1460:
+ case 1484:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:7269
+//line sql.y:7371
{
yyVAL.identifierCS = NewIdentifierCS(string(yyDollar[1].str))
}
- case 1461:
+ case 1485:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:7275
+//line sql.y:7377
{
yyVAL.identifierCS = NewIdentifierCS("")
}
- case 1462:
+ case 1486:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:7279
+//line sql.y:7381
{
yyVAL.identifierCS = yyDollar[1].identifierCS
}
- case 1464:
+ case 1488:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:7286
+//line sql.y:7388
{
yyVAL.identifierCS = NewIdentifierCS(string(yyDollar[1].str))
}
- case 2006:
+ case 2038:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:7854
+//line sql.y:7964
{
- if incNesting(yylex) {
- yylex.Error("max nesting level reached")
- return 1
- }
}
- case 2007:
+ case 2039:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:7863
+//line sql.y:7969
{
- decNesting(yylex)
}
- case 2008:
+ case 2040:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:7868
+//line sql.y:7973
{
skipToEnd(yylex)
}
- case 2009:
+ case 2041:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:7873
+//line sql.y:7978
{
skipToEnd(yylex)
}
- case 2010:
+ case 2042:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:7877
+//line sql.y:7982
{
skipToEnd(yylex)
}
- case 2011:
+ case 2043:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:7881
+//line sql.y:7986
{
skipToEnd(yylex)
}
diff --git a/go/vt/sqlparser/sql.y b/go/vt/sqlparser/sql.y
index 5c2e7c397e1..6d8f84c6c59 100644
--- a/go/vt/sqlparser/sql.y
+++ b/go/vt/sqlparser/sql.y
@@ -28,18 +28,6 @@ func setDDL(yylex yyLexer, node Statement) {
yylex.(*Tokenizer).partialDDL = node
}
-func incNesting(yylex yyLexer) bool {
- yylex.(*Tokenizer).nesting++
- if yylex.(*Tokenizer).nesting == 200 {
- return true
- }
- return false
-}
-
-func decNesting(yylex yyLexer) {
- yylex.(*Tokenizer).nesting--
-}
-
// skipToEnd forces the lexer to end prematurely. Not all SQL statements
// are supported by the Parser, thus calling skipToEnd will make the lexer
// return EOF early.
@@ -65,7 +53,7 @@ func bindVariable(yylex yyLexer, bvar string) {
identifierCI IdentifierCI
joinCondition *JoinCondition
databaseOption DatabaseOption
- columnType ColumnType
+ columnType *ColumnType
columnCharset ColumnCharset
}
@@ -78,7 +66,6 @@ func bindVariable(yylex yyLexer, bvar string) {
optVal Expr
constraintInfo ConstraintInfo
alterOption AlterOption
- characteristic Characteristic
ins *Insert
colName *ColName
@@ -158,7 +145,6 @@ func bindVariable(yylex yyLexer, bvar string) {
partitionValueRange *PartitionValueRange
partitionEngine *PartitionEngine
partSpecs []*PartitionSpec
- characteristics []Characteristic
selectExpr SelectExpr
columns Columns
partitions Partitions
@@ -177,7 +163,6 @@ func bindVariable(yylex yyLexer, bvar string) {
colKeyOpt ColumnKeyOption
referenceAction ReferenceAction
matchAction MatchAction
- isolationLevel IsolationLevel
insertAction InsertAction
scope Scope
lock Lock
@@ -187,9 +172,12 @@ func bindVariable(yylex yyLexer, bvar string) {
matchExprOption MatchExprOption
orderDirection OrderDirection
explainType ExplainType
+ vexplainType VExplainType
intervalType IntervalTypes
lockType LockType
referenceDefinition *ReferenceDefinition
+ txAccessModes []TxAccessMode
+ txAccessMode TxAccessMode
columnStorage ColumnStorage
columnFormat ColumnFormat
@@ -303,7 +291,7 @@ func bindVariable(yylex yyLexer, bvar string) {
// DDL Tokens
%token CREATE ALTER DROP RENAME ANALYZE ADD FLUSH CHANGE MODIFY DEALLOCATE
-%token REVERT
+%token REVERT QUERIES
%token SCHEMA TABLE INDEX VIEW TO IGNORE IF PRIMARY COLUMN SPATIAL FULLTEXT KEY_BLOCK_SIZE CHECK INDEXES
%token ACTION CASCADE CONSTRAINT FOREIGN NO REFERENCES RESTRICT
%token SHOW DESCRIBE EXPLAIN DATE ESCAPE REPAIR OPTIMIZE TRUNCATE COALESCE EXCHANGE REBUILD PARTITIONING REMOVE PREPARE EXECUTE
@@ -314,13 +302,16 @@ func bindVariable(yylex yyLexer, bvar string) {
// Migration tokens
%token VITESS_MIGRATION CANCEL RETRY LAUNCH COMPLETE CLEANUP THROTTLE UNTHROTTLE EXPIRE RATIO
+// Throttler tokens
+%token VITESS_THROTTLER
// Transaction Tokens
%token BEGIN START TRANSACTION COMMIT ROLLBACK SAVEPOINT RELEASE WORK
+%token CONSISTENT SNAPSHOT
// Type Tokens
%token BIT TINYINT SMALLINT MEDIUMINT INT INTEGER BIGINT INTNUM
-%token REAL DOUBLE FLOAT_TYPE DECIMAL_TYPE NUMERIC
+%token REAL DOUBLE FLOAT_TYPE FLOAT4_TYPE FLOAT8_TYPE DECIMAL_TYPE NUMERIC
%token TIME TIMESTAMP DATETIME YEAR
%token CHAR VARCHAR BOOL CHARACTER VARBINARY NCHAR
%token TEXT TINYTEXT MEDIUMTEXT LONGTEXT
@@ -379,7 +370,7 @@ func bindVariable(yylex yyLexer, bvar string) {
%token GTID_SUBSET GTID_SUBTRACT WAIT_FOR_EXECUTED_GTID_SET WAIT_UNTIL_SQL_THREAD_AFTER_GTIDS
// Explain tokens
-%token FORMAT TREE VITESS TRADITIONAL VTEXPLAIN
+%token FORMAT TREE VITESS TRADITIONAL VTEXPLAIN VEXPLAIN PLAN
// Lock type tokens
%token LOCAL LOW_PRIORITY
@@ -403,6 +394,7 @@ func bindVariable(yylex yyLexer, bvar string) {
%type query_expression_parens query_expression query_expression_body select_statement query_primary select_stmt_with_into
%type explain_statement explainable_statement
%type prepare_statement
+%type vexplain_statement
%type execute_statement deallocate_statement
%type stream_statement vstream_statement insert_statement update_statement delete_statement set_statement set_transaction_statement
%type create_statement alter_statement rename_statement drop_statement truncate_statement flush_statement do_statement
@@ -427,6 +419,7 @@ func bindVariable(yylex yyLexer, bvar string) {
%type comment_opt comment_list
%type wild_opt check_option_opt cascade_or_local_opt restrict_or_cascade_opt
%type explain_format_opt
+%type vexplain_type_opt
%type trim_type
%type frame_units
%type argument_less_window_expr_type
@@ -513,13 +506,10 @@ func bindVariable(yylex yyLexer, bvar string) {
%type opt_partition_clause partition_list
%type on_dup_opt
%type update_list
-%type set_list
-%type charset_or_character_set charset_or_character_set_or_names
+%type set_list transaction_chars
+%type set_expression transaction_char
+%type charset_or_character_set charset_or_character_set_or_names isolation_level
%type update_expression
-%type set_expression
-%type transaction_char
-%type transaction_chars
-%type isolation_level
%type for_from from_or_on
%type default_opt
%type ignore_opt
@@ -544,7 +534,7 @@ func bindVariable(yylex yyLexer, bvar string) {
%type charset_opt
%type collate_opt
%type binary_opt
-%type float_length_opt decimal_length_opt
+%type double_length_opt float_length_opt decimal_length_opt
%type unsigned_opt zero_fill_opt without_valid_opt
%type enum_values
%type column_definition
@@ -589,6 +579,8 @@ func bindVariable(yylex yyLexer, bvar string) {
%type underscore_charsets
%type expire_opt
%type ratio_opt
+%type tx_chacteristics_opt tx_chars
+%type tx_char
%start any_command
%%
@@ -637,6 +629,7 @@ command:
| savepoint_statement
| release_statement
| explain_statement
+| vexplain_statement
| other_statement
| flush_statement
| do_statement
@@ -1009,56 +1002,98 @@ set_statement:
$$ = NewSetStatement(Comments($2).Parsed(), $3)
}
+set_list:
+ set_expression
+ {
+ $$ = SetExprs{$1}
+ }
+| set_list ',' set_expression
+ {
+ $$ = append($1, $3)
+ }
+
+set_expression:
+ set_variable '=' ON
+ {
+ $$ = &SetExpr{Var: $1, Expr: NewStrLiteral("on")}
+ }
+| set_variable '=' OFF
+ {
+ $$ = &SetExpr{Var: $1, Expr: NewStrLiteral("off")}
+ }
+| set_variable '=' expression
+ {
+ $$ = &SetExpr{Var: $1, Expr: $3}
+ }
+| charset_or_character_set_or_names charset_value collate_opt
+ {
+ $$ = &SetExpr{Var: NewSetVariable(string($1), SessionScope), Expr: $2}
+ }
+
+set_variable:
+ ID
+ {
+ $$ = NewSetVariable(string($1), SessionScope)
+ }
+| variable_expr
+ {
+ $$ = $1
+ }
+| set_session_or_global ID
+ {
+ $$ = NewSetVariable(string($2), $1)
+ }
+
set_transaction_statement:
SET comment_opt set_session_or_global TRANSACTION transaction_chars
{
- $$ = &SetTransaction{Comments: Comments($2).Parsed(), Scope: $3, Characteristics: $5}
+ $$ = NewSetStatement(Comments($2).Parsed(), UpdateSetExprsScope($5, $3))
}
| SET comment_opt TRANSACTION transaction_chars
{
- $$ = &SetTransaction{Comments: Comments($2).Parsed(), Characteristics: $4, Scope: NoScope}
+ $$ = NewSetStatement(Comments($2).Parsed(), $4)
}
transaction_chars:
transaction_char
{
- $$ = []Characteristic{$1}
+ $$ = SetExprs{$1}
}
| transaction_chars ',' transaction_char
{
- $$ = append($$, $3)
+ $$ = append($1, $3)
}
transaction_char:
ISOLATION LEVEL isolation_level
{
- $$ = $3
+ $$ = &SetExpr{Var: NewSetVariable(TransactionIsolationStr, NextTxScope), Expr: NewStrLiteral($3)}
}
| READ WRITE
{
- $$ = ReadWrite
+ $$ = &SetExpr{Var: NewSetVariable(TransactionReadOnlyStr, NextTxScope), Expr: NewStrLiteral("off")}
}
| READ ONLY
{
- $$ = ReadOnly
+ $$ = &SetExpr{Var: NewSetVariable(TransactionReadOnlyStr, NextTxScope), Expr: NewStrLiteral("on")}
}
isolation_level:
REPEATABLE READ
{
- $$ = RepeatableRead
+ $$ = RepeatableReadStr
}
| READ COMMITTED
{
- $$ = ReadCommitted
+ $$ = ReadCommittedStr
}
| READ UNCOMMITTED
{
- $$ = ReadUncommitted
+ $$ = ReadUncommittedStr
}
| SERIALIZABLE
{
- $$ = Serializable
+ $$ = SerializableStr
}
set_session_or_global:
@@ -1414,7 +1449,7 @@ generated_always_opt:
// was specific (as stated in the MySQL guide) and did not accept arbitrary order options. For example NOT NULL DEFAULT 1 and not DEFAULT 1 NOT NULL
column_attribute_list_opt:
{
- $$ = &ColumnTypeOptions{Null: nil, Default: nil, OnUpdate: nil, Autoincrement: false, KeyOpt: colKeyNone, Comment: nil, As: nil, Invisible: nil, Format: UnspecifiedFormat, EngineAttribute: nil, SecondaryEngineAttribute: nil }
+ $$ = &ColumnTypeOptions{Null: nil, Default: nil, OnUpdate: nil, Autoincrement: false, KeyOpt: ColKeyNone, Comment: nil, As: nil, Invisible: nil, Format: UnspecifiedFormat, EngineAttribute: nil, SecondaryEngineAttribute: nil }
}
| column_attribute_list_opt NULL
{
@@ -1903,19 +1938,19 @@ text_literal_or_arg:
keys:
PRIMARY KEY
{
- $$ = colKeyPrimary
+ $$ = ColKeyPrimary
}
| UNIQUE
{
- $$ = colKeyUnique
+ $$ = ColKeyUnique
}
| UNIQUE KEY
{
- $$ = colKeyUniqueKey
+ $$ = ColKeyUniqueKey
}
| KEY
{
- $$ = colKey
+ $$ = ColKey
}
column_type:
@@ -1943,69 +1978,81 @@ numeric_type:
int_type:
BIT
{
- $$ = ColumnType{Type: string($1)}
+ $$ = &ColumnType{Type: string($1)}
}
| BOOL
{
- $$ = ColumnType{Type: string($1)}
+ $$ = &ColumnType{Type: string($1)}
}
| BOOLEAN
{
- $$ = ColumnType{Type: string($1)}
+ $$ = &ColumnType{Type: string($1)}
}
| TINYINT
{
- $$ = ColumnType{Type: string($1)}
+ $$ = &ColumnType{Type: string($1)}
}
| SMALLINT
{
- $$ = ColumnType{Type: string($1)}
+ $$ = &ColumnType{Type: string($1)}
}
| MEDIUMINT
{
- $$ = ColumnType{Type: string($1)}
+ $$ = &ColumnType{Type: string($1)}
}
| INT
{
- $$ = ColumnType{Type: string($1)}
+ $$ = &ColumnType{Type: string($1)}
}
| INTEGER
{
- $$ = ColumnType{Type: string($1)}
+ $$ = &ColumnType{Type: string($1)}
}
| BIGINT
{
- $$ = ColumnType{Type: string($1)}
+ $$ = &ColumnType{Type: string($1)}
}
decimal_type:
-REAL float_length_opt
+REAL double_length_opt
+ {
+ $$ = &ColumnType{Type: string($1)}
+ $$.Length = $2.Length
+ $$.Scale = $2.Scale
+ }
+| DOUBLE double_length_opt
{
- $$ = ColumnType{Type: string($1)}
+ $$ = &ColumnType{Type: string($1)}
$$.Length = $2.Length
$$.Scale = $2.Scale
}
-| DOUBLE float_length_opt
+| FLOAT8_TYPE double_length_opt
{
- $$ = ColumnType{Type: string($1)}
+ $$ = &ColumnType{Type: string($1)}
$$.Length = $2.Length
$$.Scale = $2.Scale
}
| FLOAT_TYPE float_length_opt
{
- $$ = ColumnType{Type: string($1)}
+ $$ = &ColumnType{Type: string($1)}
+ $$.Length = $2.Length
+ $$.Scale = $2.Scale
+ }
+| FLOAT4_TYPE float_length_opt
+ {
+ $$ = &ColumnType{Type: string($1)}
$$.Length = $2.Length
$$.Scale = $2.Scale
}
| DECIMAL_TYPE decimal_length_opt
{
- $$ = ColumnType{Type: string($1)}
+ $$ = &ColumnType{Type: string($1)}
$$.Length = $2.Length
$$.Scale = $2.Scale
}
| NUMERIC decimal_length_opt
{
- $$ = ColumnType{Type: string($1)}
+ $$ = &ColumnType{Type: string($1)}
$$.Length = $2.Length
$$.Scale = $2.Scale
}
@@ -2013,126 +2060,126 @@ REAL float_length_opt
time_type:
DATE
{
- $$ = ColumnType{Type: string($1)}
+ $$ = &ColumnType{Type: string($1)}
}
| TIME length_opt
{
- $$ = ColumnType{Type: string($1), Length: $2}
+ $$ = &ColumnType{Type: string($1), Length: $2}
}
| TIMESTAMP length_opt
{
- $$ = ColumnType{Type: string($1), Length: $2}
+ $$ = &ColumnType{Type: string($1), Length: $2}
}
| DATETIME length_opt
{
- $$ = ColumnType{Type: string($1), Length: $2}
+ $$ = &ColumnType{Type: string($1), Length: $2}
}
| YEAR length_opt
{
- $$ = ColumnType{Type: string($1), Length: $2}
+ $$ = &ColumnType{Type: string($1), Length: $2}
}
char_type:
CHAR length_opt charset_opt
{
- $$ = ColumnType{Type: string($1), Length: $2, Charset: $3}
+ $$ = &ColumnType{Type: string($1), Length: $2, Charset: $3}
}
| CHAR length_opt BYTE
{
// CHAR BYTE is an alias for binary. See also:
// https://dev.mysql.com/doc/refman/8.0/en/string-type-syntax.html
- $$ = ColumnType{Type: "binary", Length: $2}
+ $$ = &ColumnType{Type: "binary", Length: $2}
}
| VARCHAR length_opt charset_opt
{
- $$ = ColumnType{Type: string($1), Length: $2, Charset: $3}
+ $$ = &ColumnType{Type: string($1), Length: $2, Charset: $3}
}
| BINARY length_opt
{
- $$ = ColumnType{Type: string($1), Length: $2}
+ $$ = &ColumnType{Type: string($1), Length: $2}
}
| VARBINARY length_opt
{
- $$ = ColumnType{Type: string($1), Length: $2}
+ $$ = &ColumnType{Type: string($1), Length: $2}
}
| TEXT charset_opt
{
- $$ = ColumnType{Type: string($1), Charset: $2}
+ $$ = &ColumnType{Type: string($1), Charset: $2}
}
| TINYTEXT charset_opt
{
- $$ = ColumnType{Type: string($1), Charset: $2}
+ $$ = &ColumnType{Type: string($1), Charset: $2}
}
| MEDIUMTEXT charset_opt
{
- $$ = ColumnType{Type: string($1), Charset: $2}
+ $$ = &ColumnType{Type: string($1), Charset: $2}
}
| LONGTEXT charset_opt
{
- $$ = ColumnType{Type: string($1), Charset: $2}
+ $$ = &ColumnType{Type: string($1), Charset: $2}
}
| BLOB
{
- $$ = ColumnType{Type: string($1)}
+ $$ = &ColumnType{Type: string($1)}
}
| TINYBLOB
{
- $$ = ColumnType{Type: string($1)}
+ $$ = &ColumnType{Type: string($1)}
}
| MEDIUMBLOB
{
- $$ = ColumnType{Type: string($1)}
+ $$ = &ColumnType{Type: string($1)}
}
| LONGBLOB
{
- $$ = ColumnType{Type: string($1)}
+ $$ = &ColumnType{Type: string($1)}
}
| JSON
{
- $$ = ColumnType{Type: string($1)}
+ $$ = &ColumnType{Type: string($1)}
}
| ENUM '(' enum_values ')' charset_opt
{
- $$ = ColumnType{Type: string($1), EnumValues: $3, Charset: $5}
+ $$ = &ColumnType{Type: string($1), EnumValues: $3, Charset: $5}
}
// need set_values / SetValues ?
| SET '(' enum_values ')' charset_opt
{
- $$ = ColumnType{Type: string($1), EnumValues: $3, Charset: $5}
+ $$ = &ColumnType{Type: string($1), EnumValues: $3, Charset: $5}
}
spatial_type:
GEOMETRY
{
- $$ = ColumnType{Type: string($1)}
+ $$ = &ColumnType{Type: string($1)}
}
| POINT
{
- $$ = ColumnType{Type: string($1)}
+ $$ = &ColumnType{Type: string($1)}
}
| LINESTRING
{
- $$ = ColumnType{Type: string($1)}
+ $$ = &ColumnType{Type: string($1)}
}
| POLYGON
{
- $$ = ColumnType{Type: string($1)}
+ $$ = &ColumnType{Type: string($1)}
}
| GEOMETRYCOLLECTION
{
- $$ = ColumnType{Type: string($1)}
+ $$ = &ColumnType{Type: string($1)}
}
| MULTIPOINT
{
- $$ = ColumnType{Type: string($1)}
+ $$ = &ColumnType{Type: string($1)}
}
| MULTILINESTRING
{
- $$ = ColumnType{Type: string($1)}
+ $$ = &ColumnType{Type: string($1)}
}
| MULTIPOLYGON
{
- $$ = ColumnType{Type: string($1)}
+ $$ = &ColumnType{Type: string($1)}
}
enum_values:
@@ -2155,7 +2202,7 @@ length_opt:
$$ = NewIntLiteral($2)
}
-float_length_opt:
+double_length_opt:
{
$$ = LengthScaleOption{}
}
@@ -2167,6 +2214,18 @@ float_length_opt:
}
}
+float_length_opt:
+double_length_opt
+ {
+ $$ = $1
+ }
+| '(' INTEGRAL ')'
+ {
+ $$ = LengthScaleOption{
+ Length: NewIntLiteral($2),
+ }
+ }
+
decimal_length_opt:
{
$$ = LengthScaleOption{}
@@ -2788,7 +2847,11 @@ insert_method_options:
| LAST
table_opt_value:
- reserved_sql_id
+ table_id '.' reserved_table_id
+ {
+ $$ = String(TableName{Qualifier: $1, Name: $3})
+ }
+| reserved_sql_id
{
$$ = $1.String()
}
@@ -4022,6 +4085,10 @@ show_statement:
{
$$ = &Show{&ShowBasic{Command: VitessReplicationStatus, Filter: $3}}
}
+| SHOW VITESS_THROTTLER STATUS
+ {
+ $$ = &ShowThrottlerStatus{}
+ }
| SHOW VSCHEMA TABLES
{
$$ = &Show{&ShowBasic{Command: VschemaTables}}
@@ -4221,11 +4288,45 @@ begin_statement:
{
$$ = &Begin{}
}
-| START TRANSACTION
+| START TRANSACTION tx_chacteristics_opt
{
- $$ = &Begin{}
+ $$ = &Begin{TxAccessModes: $3}
+ }
+
+tx_chacteristics_opt:
+ {
+ $$ = nil
+ }
+| tx_chars
+ {
+ $$ = $1
}
+tx_chars:
+ tx_char
+ {
+ $$ = []TxAccessMode{$1}
+ }
+| tx_chars ',' tx_char
+ {
+ $$ = append($1, $3)
+ }
+
+tx_char:
+ WITH CONSISTENT SNAPSHOT
+ {
+ $$ = WithConsistentSnapshot
+ }
+| READ WRITE
+ {
+ $$ = ReadWrite
+ }
+| READ ONLY
+ {
+ $$ = ReadOnly
+ }
+
+
commit_statement:
COMMIT
{
@@ -4293,6 +4394,23 @@ explain_format_opt:
$$ = AnalyzeType
}
+vexplain_type_opt:
+ {
+ $$ = PlanVExplainType
+ }
+| PLAN
+ {
+ $$ = PlanVExplainType
+ }
+| ALL
+ {
+ $$ = AllVExplainType
+ }
+| QUERIES
+ {
+ $$ = QueriesVExplainType
+ }
+
explain_synonyms:
EXPLAIN
{
@@ -4348,6 +4466,12 @@ explain_statement:
$$ = &ExplainStmt{Type: $3, Statement: $4, Comments: Comments($2).Parsed()}
}
+vexplain_statement:
+ VEXPLAIN comment_opt vexplain_type_opt explainable_statement
+ {
+ $$ = &VExplainStmt{Type: $3, Statement: $4, Comments: Comments($2).Parsed()}
+ }
+
other_statement:
REPAIR skip_to_end
{
@@ -5336,7 +5460,7 @@ function_call_keyword
}
interval_value:
- INTERVAL simple_expr sql_id
+ INTERVAL bit_expr sql_id
{
$$ = &IntervalExpr{Expr: $2, Unit: $3.String()}
}
@@ -6864,6 +6988,26 @@ FOR UPDATE
{
$$ = ForUpdateLock
}
+| FOR UPDATE NOWAIT
+ {
+ $$ = ForUpdateLockNoWait
+ }
+| FOR UPDATE SKIP LOCKED
+ {
+ $$ = ForUpdateLockSkipLocked
+ }
+| FOR SHARE
+ {
+ $$ = ForShareLock
+ }
+| FOR SHARE NOWAIT
+ {
+ $$ = ForShareLockNoWait
+ }
+| FOR SHARE SKIP LOCKED
+ {
+ $$ = ForShareLockSkipLocked
+ }
| LOCK IN SHARE MODE
{
$$ = ShareModeLock
@@ -7119,48 +7263,6 @@ update_expression:
$$ = &UpdateExpr{Name: $1, Expr: $3}
}
-set_list:
- set_expression
- {
- $$ = SetExprs{$1}
- }
-| set_list ',' set_expression
- {
- $$ = append($1, $3)
- }
-
-set_expression:
- set_variable '=' ON
- {
- $$ = &SetExpr{Var: $1, Expr: NewStrLiteral("on")}
- }
-| set_variable '=' OFF
- {
- $$ = &SetExpr{Var: $1, Expr: NewStrLiteral("off")}
- }
-| set_variable '=' expression
- {
- $$ = &SetExpr{Var: $1, Expr: $3}
- }
-| charset_or_character_set_or_names charset_value collate_opt
- {
- $$ = &SetExpr{Var: NewSetVariable(string($1), SessionScope), Expr: $2}
- }
-
-set_variable:
- ID
- {
- $$ = NewSetVariable(string($1), SessionScope)
- }
-| variable_expr
- {
- $$ = $1
- }
-| set_session_or_global ID
- {
- $$ = NewSetVariable(string($2), $1)
- }
-
charset_or_character_set:
CHARSET
| CHARACTER SET
@@ -7501,6 +7603,7 @@ non_reserved_keyword:
| COMPRESSED
| COMPRESSION
| CONNECTION
+| CONSISTENT
| COPY
| COUNT %prec FUNCTION_CALL_NON_KEYWORD
| CSV
@@ -7685,6 +7788,7 @@ non_reserved_keyword:
| PATH
| PERSIST
| PERSIST_ONLY
+| PLAN
| PRECEDING
| PREPARE
| PRIVILEGE_CHECKS_USER
@@ -7698,6 +7802,7 @@ non_reserved_keyword:
| POSITION %prec FUNCTION_CALL_NON_KEYWORD
| PROCEDURE
| PROCESSLIST
+| QUERIES
| QUERY
| RANDOM
| RATIO
@@ -7747,6 +7852,7 @@ non_reserved_keyword:
| SKIP
| SLOW
| SMALLINT
+| SNAPSHOT
| SQL
| SRID
| START
@@ -7779,6 +7885,7 @@ non_reserved_keyword:
| TINYBLOB
| TINYINT
| TINYTEXT
+| TRADITIONAL
| TRANSACTION
| TREE
| TRIGGER
@@ -7804,6 +7911,7 @@ non_reserved_keyword:
| VARIABLES
| VARIANCE %prec FUNCTION_CALL_NON_KEYWORD
| VCPU
+| VEXPLAIN
| VGTID_EXECUTED
| VIEW
| VINDEX
@@ -7819,7 +7927,9 @@ non_reserved_keyword:
| VITESS_TABLETS
| VITESS_TARGET
| VITESS_THROTTLED_APPS
+| VITESS_THROTTLER
| VSCHEMA
+| VTEXPLAIN
| WAIT_FOR_EXECUTED_GTID_SET %prec FUNCTION_CALL_NON_KEYWORD
| WAIT_UNTIL_SQL_THREAD_AFTER_GTIDS %prec FUNCTION_CALL_NON_KEYWORD
| WARNINGS
@@ -7852,16 +7962,11 @@ non_reserved_keyword:
openb:
'('
{
- if incNesting(yylex) {
- yylex.Error("max nesting level reached")
- return 1
- }
}
closeb:
')'
{
- decNesting(yylex)
}
skip_to_end:
diff --git a/go/vt/sqlparser/testdata/select_cases.txt b/go/vt/sqlparser/testdata/select_cases.txt
index 849dba354e4..72e8058ba2c 100644
--- a/go/vt/sqlparser/testdata/select_cases.txt
+++ b/go/vt/sqlparser/testdata/select_cases.txt
@@ -2888,7 +2888,7 @@ INPUT
select @@session.transaction_isolation;
END
OUTPUT
-select @@transaction_isolation from dual
+select @@session.transaction_isolation from dual
END
INPUT
select hex(left(_utf16 0xD800DC00D87FDFFF, 1));
diff --git a/go/vt/sqlparser/testdata/union_cases.txt b/go/vt/sqlparser/testdata/union_cases.txt
index 0f74e8a3cda..d9e6c0e2241 100644
--- a/go/vt/sqlparser/testdata/union_cases.txt
+++ b/go/vt/sqlparser/testdata/union_cases.txt
@@ -1004,7 +1004,7 @@ INPUT
SELECT 1 FOR SHARE UNION SELECT 2;
END
ERROR
-syntax error at position 19 near 'SHARE'
+syntax error at position 25 near 'UNION'
END
INPUT
SELECT ST_AsText(ST_Union(shore, boundary)) FROM lakes, named_places WHERE lakes.name = 'Blue Lake' AND named_places.name = 'Goose Island';
diff --git a/go/vt/sqlparser/token.go b/go/vt/sqlparser/token.go
index c3326747b6d..148f4d44616 100644
--- a/go/vt/sqlparser/token.go
+++ b/go/vt/sqlparser/token.go
@@ -41,7 +41,6 @@ type Tokenizer struct {
lastToken string
posVarIndex int
partialDDL Statement
- nesting int
multi bool
specialComment *Tokenizer
@@ -597,7 +596,11 @@ func (tkn *Tokenizer) scanStringSlow(buffer *strings.Builder, delim uint16, typ
// String terminates mid escape character.
return LEX_ERROR, buffer.String()
}
- if decodedChar := sqltypes.SQLDecodeMap[byte(tkn.cur())]; decodedChar == sqltypes.DontEscape {
+ // Preserve escaping of % and _
+ if tkn.cur() == '%' || tkn.cur() == '_' {
+ buffer.WriteByte('\\')
+ ch = tkn.cur()
+ } else if decodedChar := sqltypes.SQLDecodeMap[byte(tkn.cur())]; decodedChar == sqltypes.DontEscape {
ch = tkn.cur()
} else {
ch = uint16(decodedChar)
@@ -670,7 +673,7 @@ func (tkn *Tokenizer) scanMySQLSpecificComment() (int, string) {
commentVersion, sql := ExtractMysqlComment(tkn.buf[start:tkn.Pos])
- if MySQLVersion >= commentVersion {
+ if mySQLParserVersion >= commentVersion {
// Only add the special comment to the tokenizer if the version of MySQL is higher or equal to the comment version
tkn.specialComment = NewStringTokenizer(sql)
}
@@ -699,7 +702,6 @@ func (tkn *Tokenizer) reset() {
tkn.partialDDL = nil
tkn.specialComment = nil
tkn.posVarIndex = 0
- tkn.nesting = 0
tkn.SkipToEnd = false
}
diff --git a/go/vt/sqlparser/token_test.go b/go/vt/sqlparser/token_test.go
index 36229c55e7b..0fd43b8f86c 100644
--- a/go/vt/sqlparser/token_test.go
+++ b/go/vt/sqlparser/token_test.go
@@ -237,7 +237,7 @@ func TestVersion(t *testing.T) {
for _, tcase := range testcases {
t.Run(tcase.version+"_"+tcase.in, func(t *testing.T) {
- MySQLVersion = tcase.version
+ mySQLParserVersion = tcase.version
tok := NewStringTokenizer(tcase.in)
for _, expectedID := range tcase.id {
id, _ := tok.Scan()
diff --git a/go/vt/sqlparser/tracked_buffer_test.go b/go/vt/sqlparser/tracked_buffer_test.go
index 02ea192a5de..85d8733590f 100644
--- a/go/vt/sqlparser/tracked_buffer_test.go
+++ b/go/vt/sqlparser/tracked_buffer_test.go
@@ -220,6 +220,14 @@ func TestCanonicalOutput(t *testing.T) {
"select char(77, 121, 83, 81, '76' using utf8mb4) from dual",
"SELECT CHAR(77, 121, 83, 81, '76' USING utf8mb4) FROM `dual`",
},
+ {
+ "create table t1 (id int primary key, name tinytext not null, fulltext key name_ft(name) with parser ngram)",
+ "CREATE TABLE `t1` (\n\t`id` int PRIMARY KEY,\n\t`name` tinytext NOT NULL,\n\tFULLTEXT KEY `name_ft` (`name`) WITH PARSER ngram\n)",
+ },
+ {
+ "select convert('abc' using utf8mb4)",
+ "SELECT CONVERT('abc' USING utf8mb4) FROM `dual`",
+ },
}
for _, tc := range testcases {
diff --git a/go/vt/sqlparser/utils.go b/go/vt/sqlparser/utils.go
index a9ec689aa2e..170def2103b 100644
--- a/go/vt/sqlparser/utils.go
+++ b/go/vt/sqlparser/utils.go
@@ -44,7 +44,7 @@ func QueryMatchesTemplates(query string, queryTemplates []string) (match bool, e
if err != nil {
return "", err
}
- normalized := String(stmt)
+ normalized := CanonicalString(stmt)
return normalized, nil
}
diff --git a/go/vt/sqlparser/utils_test.go b/go/vt/sqlparser/utils_test.go
index a073f5127cc..362a675076e 100644
--- a/go/vt/sqlparser/utils_test.go
+++ b/go/vt/sqlparser/utils_test.go
@@ -55,92 +55,128 @@ func TestNormalizeAlphabetically(t *testing.T) {
func TestQueryMatchesTemplates(t *testing.T) {
testcases := []struct {
+ name string
q string
tmpl []string
out bool
- }{{
- q: "select id from tbl",
- tmpl: []string{
- "select id from tbl",
- },
- out: true,
- }, {
- q: "select id from tbl",
- tmpl: []string{
- "select name from tbl",
- "select id from tbl",
- },
- out: true,
- }, {
- q: "select id from tbl where a=3",
- tmpl: []string{
- "select id from tbl",
- },
- out: false,
- }, {
- // int value
- q: "select id from tbl where a=3",
- tmpl: []string{
- "select name from tbl where a=17",
- "select id from tbl where a=5",
- },
- out: true,
- }, {
- // string value
- q: "select id from tbl where a='abc'",
- tmpl: []string{
- "select name from tbl where a='x'",
- "select id from tbl where a='y'",
- },
- out: true,
- }, {
- // two params
- q: "select id from tbl where a='abc' and b='def'",
- tmpl: []string{
- "select name from tbl where a='x' and b = 'y'",
- "select id from tbl where a='x' and b = 'y'",
- },
- out: true,
- }, {
- // no match
- q: "select id from tbl where a='abc' and b='def'",
- tmpl: []string{
- "select name from tbl where a='x' and b = 'y'",
- "select id from tbl where a='x' and c = 'y'",
- },
- out: false,
- }, {
- // reorder AND params
- q: "select id from tbl where a='abc' and b='def'",
- tmpl: []string{
- "select id from tbl where b='x' and a = 'y'",
- },
- out: true,
- }, {
- // no reorder OR params
- q: "select id from tbl where a='abc' or b='def'",
- tmpl: []string{
- "select id from tbl where b='x' or a = 'y'",
- },
- out: false,
- }, {
- // strict reorder OR params
- q: "select id from tbl where a='abc' or b='def'",
- tmpl: []string{
- "select id from tbl where a='x' or b = 'y'",
- },
- out: true,
- }, {
- // reorder AND params, range test
- q: "select id from tbl where a >'abc' and b<3",
- tmpl: []string{
- "select id from tbl where b<17 and a > 'y'",
+ }{
+ {
+ name: "trivial, identical",
+ q: "select id from tbl",
+ tmpl: []string{
+ "select id from tbl",
+ },
+ out: true,
+ }, {
+ name: "trivial, canonical",
+ q: "select `id` from tbl",
+ tmpl: []string{
+ "select id FROM `tbl`",
+ },
+ out: true,
+ }, {
+ name: "trivial, identical from list",
+ q: "select id from tbl",
+ tmpl: []string{
+ "select name from tbl",
+ "select id from tbl",
+ },
+ out: true,
+ }, {
+ name: "trivial no match",
+ q: "select id from tbl where a=3",
+ tmpl: []string{
+ "select id from tbl",
+ },
+ out: false,
+ }, {
+ name: "int value",
+ q: "select id from tbl where a=3",
+ tmpl: []string{
+ "select name from tbl where a=17",
+ "select id from tbl where a=5",
+ },
+ out: true,
+ }, {
+ name: "string value",
+ q: "select id from tbl where a='abc'",
+ tmpl: []string{
+ "select name from tbl where a='x'",
+ "select id from tbl where a='y'",
+ },
+ out: true,
+ }, {
+ name: "two params",
+ q: "select id from tbl where a='abc' and b='def'",
+ tmpl: []string{
+ "select name from tbl where a='x' and b = 'y'",
+ "select id from tbl where a='x' and b = 'y'",
+ },
+ out: true,
+ }, {
+ name: "no match",
+ q: "select id from tbl where a='abc' and b='def'",
+ tmpl: []string{
+ "select name from tbl where a='x' and b = 'y'",
+ "select id from tbl where a='x' and c = 'y'",
+ },
+ out: false,
+ }, {
+ name: "reorder AND params",
+ q: "select id from tbl where a='abc' and b='def'",
+ tmpl: []string{
+ "select id from tbl where b='x' and a = 'y'",
+ },
+ out: true,
+ }, {
+ name: "no reorder OR params",
+ q: "select id from tbl where a='abc' or b='def'",
+ tmpl: []string{
+ "select id from tbl where b='x' or a = 'y'",
+ },
+ out: false,
+ }, {
+ name: "strict reorder OR params",
+ q: "select id from tbl where a='abc' or b='def'",
+ tmpl: []string{
+ "select id from tbl where a='x' or b = 'y'",
+ },
+ out: true,
+ }, {
+ name: "identical 'x' annotation in template, identical query values",
+ q: "select id from tbl where a='abc' or b='abc'",
+ tmpl: []string{
+ "select id from tbl where a='x' or b = 'x'",
+ },
+ out: true,
+ }, {
+ name: "identical 'x' annotation in template, different query values",
+ q: "select id from tbl where a='abc' or b='def'",
+ tmpl: []string{
+ "select id from tbl where a='x' or b = 'x'",
+ },
+ out: false,
+ }, {
+ name: "reorder AND params, range test",
+ q: "select id from tbl where a >'abc' and b<3",
+ tmpl: []string{
+ "select id from tbl where b<17 and a > 'y'",
+ },
+ out: true,
+ }, {
+ name: "canonical, case",
+ q: "SHOW BINARY LOGS",
+ tmpl: []string{
+ "show binary logs",
+ },
+ out: true,
},
- out: true,
- }}
+ }
for _, tc := range testcases {
- match, err := QueryMatchesTemplates(tc.q, tc.tmpl)
- assert.NoError(t, err)
- assert.Equal(t, tc.out, match)
+ t.Run(tc.name, func(t *testing.T) {
+ match, err := QueryMatchesTemplates(tc.q, tc.tmpl)
+ assert.NoError(t, err)
+ assert.Equal(t, tc.out, match)
+ })
}
}
diff --git a/go/vt/srvtopo/query.go b/go/vt/srvtopo/query.go
index 098f5c77bc1..ec1ed50100a 100644
--- a/go/vt/srvtopo/query.go
+++ b/go/vt/srvtopo/query.go
@@ -86,7 +86,12 @@ func (q *resilientQuery) getCurrentValue(ctx context.Context, wkey fmt.Stringer,
// If it is not time to check again, then return either the cached
// value or the cached error but don't ask topo again.
- if !shouldRefresh {
+ // Here we have to be careful with the part where we haven't gotten even the first result.
+ // In that case, a refresh is already in progress, but the cache is empty! So, we can't use the cache.
+ // We have to wait for the query's results.
+ // We know the query has run at least once if the insertionTime is non-zero, or if we have an error.
+ queryRanAtLeastOnce := !entry.insertionTime.IsZero() || entry.lastError != nil
+ if !shouldRefresh && queryRanAtLeastOnce {
if cacheValid {
return entry.value, nil
}
diff --git a/go/vt/srvtopo/query_test.go b/go/vt/srvtopo/query_test.go
new file mode 100644
index 00000000000..2569a2ad420
--- /dev/null
+++ b/go/vt/srvtopo/query_test.go
@@ -0,0 +1,72 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package srvtopo
+
+import (
+ "context"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+
+ "vitess.io/vitess/go/stats"
+)
+
+// TestResilientQueryGetCurrentValueInitialization tests that the resilient query returns the correct results when it has been
+// initialized.
+func TestResilientQueryGetCurrentValueInitialization(t *testing.T) {
+ // Create a basic query, which doesn't do anything other than return the same cell it got as an input.
+ // The query however needs to simulate being slow, so we have a sleep in there.
+ query := func(ctx context.Context, entry *queryEntry) (any, error) {
+ time.Sleep(1 * time.Second)
+ cell := entry.key.(cellName)
+ return cell, nil
+ }
+ counts := stats.NewCountersWithSingleLabel("TestResilientQueryGetCurrentValue", "Test for resilient query", "type")
+
+ // Create the resilient query
+ rq := &resilientQuery{
+ query: query,
+ counts: counts,
+ cacheRefreshInterval: 5 * time.Second,
+ cacheTTL: 5 * time.Second,
+ entries: make(map[string]*queryEntry),
+ }
+
+ // Create a context and a cell.
+ ctx := context.Background()
+ cell := cellName("cell-1")
+
+ // Hammer the resilient query with multiple get requests just as it is created.
+ // We expect all of them to work.
+ wg := sync.WaitGroup{}
+ for i := 0; i < 10; i++ {
+ // To test with both stale and not-stale, we use the modulo of our index.
+ stale := i%2 == 0
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ res, err := rq.getCurrentValue(ctx, cell, stale)
+ // Assert that we don't have any error and the value matches what we want.
+ assert.NoError(t, err)
+ assert.EqualValues(t, cell, res)
+ }()
+ }
+ // Wait for the wait group to be empty, otherwise the test is marked a success before any of the go routines finish completion!
+ wg.Wait()
+}
diff --git a/go/vt/sysvars/sysvars.go b/go/vt/sysvars/sysvars.go
index fa7f011656c..c4939d5c63e 100644
--- a/go/vt/sysvars/sysvars.go
+++ b/go/vt/sysvars/sysvars.go
@@ -16,6 +16,8 @@ limitations under the License.
package sysvars
+import "sync"
+
// This information lives here, because it's needed from the vtgate planbuilder, the vtgate engine,
// and the AST rewriter, that happens to live in sqlparser.
@@ -37,8 +39,18 @@ type SystemVariable struct {
Name string
SupportSetVar bool
+
+ Case StorageCase
}
+type StorageCase int
+
+const (
+ SCSame StorageCase = iota
+ SCUpper
+ SCLower
+)
+
// System Settings
var (
on = "1"
@@ -58,6 +70,7 @@ var (
TransactionReadOnly = SystemVariable{Name: "transaction_read_only", IsBoolean: true, Default: off}
TxReadOnly = SystemVariable{Name: "tx_read_only", IsBoolean: true, Default: off}
Workload = SystemVariable{Name: "workload", IdentifierAsString: true}
+ QueryTimeout = SystemVariable{Name: "query_timeout"}
// Online DDL
DDLStrategy = SystemVariable{Name: "ddl_strategy", IdentifierAsString: true}
@@ -86,6 +99,7 @@ var (
ReadAfterWriteGTID,
ReadAfterWriteTimeOut,
SessionTrackGTIDs,
+ QueryTimeout,
}
ReadOnly = []SystemVariable{
@@ -183,8 +197,6 @@ var (
{Name: "optimizer_trace_features"},
{Name: "optimizer_trace_limit"},
{Name: "optimizer_trace_max_mem_size"},
- {Name: "transaction_isolation"},
- {Name: "tx_isolation"},
{Name: "optimizer_trace_offset"},
{Name: "parser_max_mem_size"},
{Name: "profiling", IsBoolean: true},
@@ -205,7 +217,9 @@ var (
{Name: "sql_warnings", IsBoolean: true},
{Name: "time_zone"},
{Name: "tmp_table_size", SupportSetVar: true},
+ {Name: "transaction_isolation", Case: SCUpper},
{Name: "transaction_prealloc_size"},
+ {Name: "tx_isolation", Case: SCUpper},
{Name: "unique_checks", IsBoolean: true, SupportSetVar: true},
{Name: "updatable_views_with_limit", IsBoolean: true, SupportSetVar: true},
}
@@ -268,3 +282,17 @@ func GetInterestingVariables() []string {
}
return res
}
+
+var vitessAwareVariableNames map[string]struct{}
+var vitessAwareInit sync.Once
+
+func IsVitessAware(sysv string) bool {
+ vitessAwareInit.Do(func() {
+ vitessAwareVariableNames = make(map[string]struct{}, len(VitessAware))
+ for _, v := range VitessAware {
+ vitessAwareVariableNames[v.Name] = struct{}{}
+ }
+ })
+ _, found := vitessAwareVariableNames[sysv]
+ return found
+}
diff --git a/go/vt/tlstest/tlstest.go b/go/vt/tlstest/tlstest.go
index 500a3974c48..0529ea4ef09 100644
--- a/go/vt/tlstest/tlstest.go
+++ b/go/vt/tlstest/tlstest.go
@@ -337,12 +337,18 @@ func RevokeCertAndRegenerateCRL(root, parent, name string) {
if err != nil {
log.Fatal(err)
}
- crlList, err := x509.ParseCRL(data)
+
+ block, _ := pem.Decode(data)
+ if block == nil || block.Type != "X509 CRL" {
+ log.Fatal("failed to parse CRL PEM")
+ }
+
+ crlList, err := x509.ParseRevocationList(block.Bytes)
if err != nil {
log.Fatal(err)
}
- revoked := crlList.TBSCertList.RevokedCertificates
+ revoked := crlList.RevokedCertificates
revoked = append(revoked, pkix.RevokedCertificate{
SerialNumber: certificate.SerialNumber,
RevocationTime: time.Now(),
@@ -357,9 +363,10 @@ func RevokeCertAndRegenerateCRL(root, parent, name string) {
log.Fatal(err)
}
+ var crlNumber big.Int
newCrl, err := x509.CreateRevocationList(rand.Reader, &x509.RevocationList{
RevokedCertificates: revoked,
- Number: big.NewInt(int64(crlList.TBSCertList.Version) + 1),
+ Number: crlNumber.Add(crlList.Number, big.NewInt(1)),
}, caCert, caKey.(crypto.Signer))
if err != nil {
log.Fatal(err)
diff --git a/go/vt/topo/cell_info.go b/go/vt/topo/cell_info.go
index 468b15157c3..2af867a8ffc 100644
--- a/go/vt/topo/cell_info.go
+++ b/go/vt/topo/cell_info.go
@@ -194,7 +194,7 @@ func (ts *Server) ExpandCells(ctx context.Context, cells string) ([]string, erro
var (
err error
inputCells []string
- outputCells = sets.NewString() // Use a set to dedupe if the input cells list includes an alias and a cell in that alias.
+ outputCells = sets.New[string]() // Use a set to dedupe if the input cells list includes an alias and a cell in that alias.
)
if cells == "" {
@@ -238,5 +238,5 @@ func (ts *Server) ExpandCells(ctx context.Context, cells string) ([]string, erro
}
}
- return outputCells.List(), nil
+ return sets.List(outputCells), nil
}
diff --git a/go/vt/topo/conn.go b/go/vt/topo/conn.go
index c30c88623d3..4d9293f1755 100644
--- a/go/vt/topo/conn.go
+++ b/go/vt/topo/conn.go
@@ -113,6 +113,18 @@ type Conn interface {
// Returns ErrInterrupted if ctx is canceled.
Lock(ctx context.Context, dirPath, contents string) (LockDescriptor, error)
+ // TryLock takes lock on the given directory with a fail-fast approach.
+ // It is similar to `Lock` but the difference is it attempts to acquire the lock
+ // if it is likely to succeed. If there is already a lock on given path, then unlike `Lock`
+ // instead of waiting and blocking the client it returns with `Lock already exists` error.
+ // With current implementation it may not be able to fail-fast for some scenarios.
+ // For example there is a possibility that a thread checks for lock for a given path
+ // but by the time it acquires the lock, some other thread has already acquired it,
+ // in this case the client will block until the other caller releases the lock or the
+ // client call times out (just like standard `Lock' implementation). In short the lock checking
+ // and acquiring is not under the same mutex in current implementation of `TryLock`.
+ TryLock(ctx context.Context, dirPath, contents string) (LockDescriptor, error)
+
//
// Watches
//
diff --git a/go/vt/topo/consultopo/lock.go b/go/vt/topo/consultopo/lock.go
index 6afa9571c95..ae47b91cc6c 100644
--- a/go/vt/topo/consultopo/lock.go
+++ b/go/vt/topo/consultopo/lock.go
@@ -17,9 +17,9 @@ limitations under the License.
package consultopo
import (
- "path"
-
"context"
+ "fmt"
+ "path"
"github.com/hashicorp/consul/api"
@@ -49,6 +49,36 @@ func (s *Server) Lock(ctx context.Context, dirPath, contents string) (topo.LockD
return nil, convertError(err, dirPath)
}
+ return s.lock(ctx, dirPath, contents)
+}
+
+// TryLock is part of the topo.Conn interface.
+func (s *Server) TryLock(ctx context.Context, dirPath, contents string) (topo.LockDescriptor, error) {
+ // We list all the entries under dirPath
+ entries, err := s.ListDir(ctx, dirPath, true)
+ if err != nil {
+ // We need to return the right error codes, like
+ // topo.ErrNoNode and topo.ErrInterrupted, and the
+ // easiest way to do this is to return convertError(err).
+ // It may lose some of the context, if this is an issue,
+ // maybe logging the error would work here.
+ return nil, convertError(err, dirPath)
+ }
+
+ // If there is a file 'lock' in it then we can assume that someone else already has a lock.
+ // Throw error in this case
+ for _, e := range entries {
+ if e.Name == locksFilename && e.Type == topo.TypeFile && e.Ephemeral {
+ return nil, topo.NewError(topo.NodeExists, fmt.Sprintf("lock already exists at path %s", dirPath))
+ }
+ }
+
+ // everything is good let's acquire the lock.
+ return s.lock(ctx, dirPath, contents)
+}
+
+// Lock is part of the topo.Conn interface.
+func (s *Server) lock(ctx context.Context, dirPath, contents string) (topo.LockDescriptor, error) {
lockPath := path.Join(s.root, dirPath, locksFilename)
lockOpts := &api.LockOptions{
diff --git a/go/vt/topo/consultopo/server_flaky_test.go b/go/vt/topo/consultopo/server_flaky_test.go
index 80b04e815cf..797ad4c955f 100644
--- a/go/vt/topo/consultopo/server_flaky_test.go
+++ b/go/vt/topo/consultopo/server_flaky_test.go
@@ -164,7 +164,7 @@ func TestConsulTopo(t *testing.T) {
}
return ts
- })
+ }, []string{})
}
func TestConsulTopoWithChecks(t *testing.T) {
@@ -210,7 +210,7 @@ func TestConsulTopoWithChecks(t *testing.T) {
}
return ts
- })
+ }, []string{})
}
func TestConsulTopoWithAuth(t *testing.T) {
@@ -267,7 +267,7 @@ func TestConsulTopoWithAuth(t *testing.T) {
}
return ts
- })
+ }, []string{})
}
func TestConsulTopoWithAuthFailure(t *testing.T) {
diff --git a/go/vt/topo/decode.go b/go/vt/topo/decode.go
new file mode 100644
index 00000000000..1265b0e4a80
--- /dev/null
+++ b/go/vt/topo/decode.go
@@ -0,0 +1,82 @@
+/*
+Copyright 2022 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package topo
+
+import (
+ "fmt"
+ "path"
+
+ "google.golang.org/protobuf/encoding/protojson"
+ "google.golang.org/protobuf/encoding/prototext"
+ "google.golang.org/protobuf/proto"
+
+ topodatapb "vitess.io/vitess/go/vt/proto/topodata"
+ vschemapb "vitess.io/vitess/go/vt/proto/vschema"
+)
+
+// DecodeContent uses the filename to imply a type, and proto-decodes
+// the right object, then echoes it as a string.
+func DecodeContent(filename string, data []byte, json bool) (string, error) {
+ name := path.Base(filename)
+ dir := path.Dir(filename)
+ var p proto.Message
+ switch name {
+ case CellInfoFile:
+ p = new(topodatapb.CellInfo)
+ case KeyspaceFile:
+ p = new(topodatapb.Keyspace)
+ case ShardFile:
+ p = new(topodatapb.Shard)
+ case VSchemaFile:
+ p = new(vschemapb.Keyspace)
+ case ShardReplicationFile:
+ p = new(topodatapb.ShardReplication)
+ case TabletFile:
+ p = new(topodatapb.Tablet)
+ case SrvVSchemaFile:
+ p = new(vschemapb.SrvVSchema)
+ case SrvKeyspaceFile:
+ p = new(topodatapb.SrvKeyspace)
+ case RoutingRulesFile:
+ p = new(vschemapb.RoutingRules)
+ default:
+ switch dir {
+ case "/" + GetExternalVitessClusterDir():
+ p = new(topodatapb.ExternalVitessCluster)
+ default:
+ }
+ if p == nil {
+ if json {
+ return "", fmt.Errorf("unknown topo protobuf type for %v", name)
+ }
+ return string(data), nil
+ }
+ }
+
+ if err := proto.Unmarshal(data, p); err != nil {
+ return string(data), err
+ }
+
+ var marshalled []byte
+ var err error
+ if json {
+ marshalled, err = protojson.Marshal(p)
+ } else {
+ marshalled, err = prototext.Marshal(p)
+ }
+ return string(marshalled), err
+}
diff --git a/go/vt/topo/etcd2topo/lock.go b/go/vt/topo/etcd2topo/lock.go
index 96b2fc0c66e..89095156471 100644
--- a/go/vt/topo/etcd2topo/lock.go
+++ b/go/vt/topo/etcd2topo/lock.go
@@ -131,6 +131,31 @@ type etcdLockDescriptor struct {
leaseID clientv3.LeaseID
}
+// TryLock is part of the topo.Conn interface.
+func (s *Server) TryLock(ctx context.Context, dirPath, contents string) (topo.LockDescriptor, error) {
+ // We list all the entries under dirPath
+ entries, err := s.ListDir(ctx, dirPath, true)
+ if err != nil {
+ // We need to return the right error codes, like
+ // topo.ErrNoNode and topo.ErrInterrupted, and the
+ // easiest way to do this is to return convertError(err).
+ // It may lose some of the context, if this is an issue,
+ // maybe logging the error would work here.
+ return nil, convertError(err, dirPath)
+ }
+
+ // If there is a folder '/locks' with some entries in it then we can assume that someone else already has a lock.
+ // Throw error in this case
+ for _, e := range entries {
+ if e.Name == locksPath && e.Type == topo.TypeDirectory && e.Ephemeral {
+ return nil, topo.NewError(topo.NodeExists, fmt.Sprintf("lock already exists at path %s", dirPath))
+ }
+ }
+
+ // everything is good let's acquire the lock.
+ return s.lock(ctx, dirPath, contents)
+}
+
// Lock is part of the topo.Conn interface.
func (s *Server) Lock(ctx context.Context, dirPath, contents string) (topo.LockDescriptor, error) {
// We list the directory first to make sure it exists.
diff --git a/go/vt/topo/etcd2topo/server_test.go b/go/vt/topo/etcd2topo/server_test.go
index 327f504f79b..3bf0e29cfd7 100644
--- a/go/vt/topo/etcd2topo/server_test.go
+++ b/go/vt/topo/etcd2topo/server_test.go
@@ -247,7 +247,7 @@ func TestEtcd2Topo(t *testing.T) {
// Run the TopoServerTestSuite tests.
test.TopoServerTestSuite(t, func() *topo.Server {
return newServer()
- })
+ }, []string{})
// Run etcd-specific tests.
ts := newServer()
diff --git a/go/vt/topo/faketopo/faketopo.go b/go/vt/topo/faketopo/faketopo.go
index 57721b346b3..9265ba699a3 100644
--- a/go/vt/topo/faketopo/faketopo.go
+++ b/go/vt/topo/faketopo/faketopo.go
@@ -287,6 +287,11 @@ func (f *FakeConn) Lock(ctx context.Context, dirPath, contents string) (topo.Loc
return &fakeLockDescriptor{}, nil
}
+// TryLock is part of the topo.Conn interface. Its implementation is same as Lock
+func (f *FakeConn) TryLock(ctx context.Context, dirPath, contents string) (topo.LockDescriptor, error) {
+ return f.Lock(ctx, dirPath, contents)
+}
+
// Watch implements the Conn interface
func (f *FakeConn) Watch(ctx context.Context, filePath string) (*topo.WatchData, <-chan *topo.WatchData, error) {
f.mu.Lock()
diff --git a/go/vt/topo/helpers/tee.go b/go/vt/topo/helpers/tee.go
index ab9cb29f73c..b2178144087 100644
--- a/go/vt/topo/helpers/tee.go
+++ b/go/vt/topo/helpers/tee.go
@@ -187,6 +187,16 @@ type teeTopoLockDescriptor struct {
// Lock is part of the topo.Conn interface.
func (c *TeeConn) Lock(ctx context.Context, dirPath, contents string) (topo.LockDescriptor, error) {
+ return c.lock(ctx, dirPath, contents)
+}
+
+// TryLock is part of the topo.Conn interface. Its implementation is same as Lock
+func (c *TeeConn) TryLock(ctx context.Context, dirPath, contents string) (topo.LockDescriptor, error) {
+ return c.Lock(ctx, dirPath, contents)
+}
+
+// Lock is part of the topo.Conn interface.
+func (c *TeeConn) lock(ctx context.Context, dirPath, contents string) (topo.LockDescriptor, error) {
// Lock lockFirst.
fLD, err := c.lockFirst.Lock(ctx, dirPath, contents)
if err != nil {
diff --git a/go/vt/topo/helpers/tee_topo_test.go b/go/vt/topo/helpers/tee_topo_test.go
index de7dc603b82..519301eaafa 100644
--- a/go/vt/topo/helpers/tee_topo_test.go
+++ b/go/vt/topo/helpers/tee_topo_test.go
@@ -33,5 +33,5 @@ func TestTeeTopo(t *testing.T) {
t.Fatalf("NewTee() failed: %v", err)
}
return tee
- })
+ }, []string{"checkTryLock", "checkShardWithLock"})
}
diff --git a/go/vt/topo/k8stopo/lock.go b/go/vt/topo/k8stopo/lock.go
index 5df663efee0..e1321ea76e4 100644
--- a/go/vt/topo/k8stopo/lock.go
+++ b/go/vt/topo/k8stopo/lock.go
@@ -40,6 +40,11 @@ func (s *Server) Lock(ctx context.Context, dirPath, contents string) (topo.LockD
return s.lock(ctx, dirPath, contents, false)
}
+// TryLock is part of the topo.Conn interface. Its implementation is same as Lock
+func (s *Server) TryLock(ctx context.Context, dirPath, contents string) (topo.LockDescriptor, error) {
+ return s.Lock(ctx, dirPath, contents)
+}
+
// lock is used by both Lock() and primary election.
// it blocks until the lock is taken, interrupted, or times out
func (s *Server) lock(ctx context.Context, nodePath, contents string, createMissing bool) (topo.LockDescriptor, error) {
diff --git a/go/vt/topo/k8stopo/server_flaky_test.go b/go/vt/topo/k8stopo/server_flaky_test.go
index f5107d44476..5a9fce1ca80 100644
--- a/go/vt/topo/k8stopo/server_flaky_test.go
+++ b/go/vt/topo/k8stopo/server_flaky_test.go
@@ -143,5 +143,5 @@ func TestKubernetesTopo(t *testing.T) {
}
return ts
- })
+ }, []string{"checkTryLock", "checkShardWithLock"})
}
diff --git a/go/vt/topo/keyspace.go b/go/vt/topo/keyspace.go
index 996203b9556..3a0416c9032 100755
--- a/go/vt/topo/keyspace.go
+++ b/go/vt/topo/keyspace.go
@@ -18,6 +18,7 @@ package topo
import (
"path"
+ "strings"
"google.golang.org/protobuf/proto"
@@ -54,6 +55,20 @@ func (ki *KeyspaceInfo) SetKeyspaceName(name string) {
ki.keyspace = name
}
+var invalidKeyspaceNameChars = "/"
+
+// ValidateKeyspaceName checks if the provided name is a valid name for a
+// keyspace.
+//
+// As of v16.0.1, "all invalid characters" is just the forward slash ("/").
+func ValidateKeyspaceName(name string) error {
+ if strings.ContainsAny(name, invalidKeyspaceNameChars) {
+ return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "keyspace name %s contains invalid characters; may not contain any of the following: %+v", name, strings.Split(invalidKeyspaceNameChars, ""))
+ }
+
+ return nil
+}
+
// GetServedFrom returns a Keyspace_ServedFrom record if it exists.
func (ki *KeyspaceInfo) GetServedFrom(tabletType topodatapb.TabletType) *topodatapb.Keyspace_ServedFrom {
for _, ksf := range ki.ServedFroms {
@@ -161,6 +176,10 @@ func (ki *KeyspaceInfo) ComputeCellServedFrom(cell string) []*topodatapb.SrvKeys
// CreateKeyspace wraps the underlying Conn.Create
// and dispatches the event.
func (ts *Server) CreateKeyspace(ctx context.Context, keyspace string, value *topodatapb.Keyspace) error {
+ if err := ValidateKeyspaceName(keyspace); err != nil {
+ return vterrors.Wrapf(err, "CreateKeyspace: %s", err)
+ }
+
data, err := proto.Marshal(value)
if err != nil {
return err
@@ -181,6 +200,10 @@ func (ts *Server) CreateKeyspace(ctx context.Context, keyspace string, value *to
// GetKeyspace reads the given keyspace and returns it
func (ts *Server) GetKeyspace(ctx context.Context, keyspace string) (*KeyspaceInfo, error) {
+ if err := ValidateKeyspaceName(keyspace); err != nil {
+ return nil, vterrors.Wrapf(err, "GetKeyspace: %s", err)
+ }
+
keyspacePath := path.Join(KeyspacesPath, keyspace, KeyspaceFile)
data, version, err := ts.globalCell.Get(ctx, keyspacePath)
if err != nil {
@@ -213,6 +236,14 @@ func (ts *Server) GetKeyspaceDurability(ctx context.Context, keyspace string) (s
return "none", nil
}
+func (ts *Server) GetThrottlerConfig(ctx context.Context, keyspace string) (*topodatapb.ThrottlerConfig, error) {
+ keyspaceInfo, err := ts.GetKeyspace(ctx, keyspace)
+ if err != nil {
+ return nil, err
+ }
+ return keyspaceInfo.ThrottlerConfig, nil
+}
+
// UpdateKeyspace updates the keyspace data. It checks the keyspace is locked.
func (ts *Server) UpdateKeyspace(ctx context.Context, ki *KeyspaceInfo) error {
// make sure it is locked first
diff --git a/go/vt/topo/locks.go b/go/vt/topo/locks.go
index 5ee60c2ea2f..036ce983078 100644
--- a/go/vt/topo/locks.go
+++ b/go/vt/topo/locks.go
@@ -27,6 +27,7 @@ import (
"github.com/spf13/pflag"
+ _flag "vitess.io/vitess/go/internal/flag"
"vitess.io/vitess/go/trace"
"vitess.io/vitess/go/vt/log"
"vitess.io/vitess/go/vt/proto/vtrpc"
@@ -38,15 +39,14 @@ import (
// keyspaces and shards.
var (
- // DefaultLockTimeout is a good value to use as a default for
- // locking a shard / keyspace.
- // Now used only for unlock operations
- defaultLockTimeout = 30 * time.Second
+ // LockTimeout is the maximum duration for which a
+ // shard / keyspace lock can be acquired for.
+ LockTimeout = 45 * time.Second
// RemoteOperationTimeout is used for operations where we have to
// call out to another process.
// Used for RPC calls (including topo server calls)
- RemoteOperationTimeout = 30 * time.Second
+ RemoteOperationTimeout = 15 * time.Second
)
// Lock describes a long-running lock on a keyspace or a shard.
@@ -70,6 +70,7 @@ func init() {
func registerTopoLockFlags(fs *pflag.FlagSet) {
fs.DurationVar(&RemoteOperationTimeout, "remote_operation_timeout", RemoteOperationTimeout, "time to wait for a remote operation")
+ fs.DurationVar(&LockTimeout, "lock-timeout", LockTimeout, "Maximum time for which a shard/keyspace lock can be acquired for")
}
// newLock creates a new Lock.
@@ -244,7 +245,7 @@ func CheckKeyspaceLockedAndRenew(ctx context.Context, keyspace string) error {
func (l *Lock) lockKeyspace(ctx context.Context, ts *Server, keyspace string) (LockDescriptor, error) {
log.Infof("Locking keyspace %v for action %v", keyspace, l.Action)
- ctx, cancel := context.WithTimeout(ctx, RemoteOperationTimeout)
+ ctx, cancel := context.WithTimeout(ctx, getLockTimeout())
defer cancel()
span, ctx := trace.NewSpan(ctx, "TopoServer.LockKeyspaceForAction")
@@ -265,10 +266,8 @@ func (l *Lock) unlockKeyspace(ctx context.Context, ts *Server, keyspace string,
// Detach from the parent timeout, but copy the trace span.
// We need to still release the lock even if the parent
// context timed out.
- // Note that we are not using the user provided RemoteOperationTimeout
- // here because it is possible that that timeout is too short.
ctx = trace.CopySpan(context.TODO(), ctx)
- ctx, cancel := context.WithTimeout(ctx, defaultLockTimeout)
+ ctx, cancel := context.WithTimeout(ctx, RemoteOperationTimeout)
defer cancel()
span, ctx := trace.NewSpan(ctx, "TopoServer.UnlockKeyspaceForAction")
@@ -301,9 +300,40 @@ func (l *Lock) unlockKeyspace(ctx context.Context, ts *Server, keyspace string,
// - PlannedReparentShard
// - EmergencyReparentShard
//
+// * any vtorc recovery e.g
+// - RecoverDeadPrimary
+// - ElectNewPrimary
+// - FixPrimary
+//
+// * before any replication repair from replication manager
+//
// * operations that we don't want to conflict with re-parenting:
// - DeleteTablet when it's the shard's current primary
func (ts *Server) LockShard(ctx context.Context, keyspace, shard, action string) (context.Context, func(*error), error) {
+ return ts.internalLockShard(ctx, keyspace, shard, action, true)
+}
+
+// TryLockShard will lock the shard, and return:
+// - a context with a locksInfo structure for future reference.
+// - an unlock method
+// - an error if anything failed.
+//
+// `TryLockShard` is different from `LockShard`. If there is already a lock on given shard,
+// then unlike `LockShard` instead of waiting and blocking the client it returns with
+// `Lock already exists` error. With current implementation it may not be able to fail-fast
+// for some scenarios. For example there is a possibility that a thread checks for lock for
+// a given shard but by the time it acquires the lock, some other thread has already acquired it,
+// in this case the client will block until the other caller releases the lock or the
+// client call times out (just like standard `LockShard' implementation). In short the lock checking
+// and acquiring is not under the same mutex in current implementation of `TryLockShard`.
+//
+// We are currently using `TryLockShard` during tablet discovery in Vtorc recovery
+func (ts *Server) TryLockShard(ctx context.Context, keyspace, shard, action string) (context.Context, func(*error), error) {
+ return ts.internalLockShard(ctx, keyspace, shard, action, false)
+}
+
+// isBlocking is used to indicate whether the call should fail-fast or not.
+func (ts *Server) internalLockShard(ctx context.Context, keyspace, shard, action string, isBlocking bool) (context.Context, func(*error), error) {
i, ok := ctx.Value(locksKey).(*locksInfo)
if !ok {
i = &locksInfo{
@@ -322,7 +352,13 @@ func (ts *Server) LockShard(ctx context.Context, keyspace, shard, action string)
// lock
l := newLock(action)
- lockDescriptor, err := l.lockShard(ctx, ts, keyspace, shard)
+ var lockDescriptor LockDescriptor
+ var err error
+ if isBlocking {
+ lockDescriptor, err = l.lockShard(ctx, ts, keyspace, shard)
+ } else {
+ lockDescriptor, err = l.tryLockShard(ctx, ts, keyspace, shard)
+ }
if err != nil {
return nil, nil, err
}
@@ -383,9 +419,19 @@ func CheckShardLocked(ctx context.Context, keyspace, shard string) error {
// lockShard will lock the shard in the topology server.
// UnlockShard should be called if this returns no error.
func (l *Lock) lockShard(ctx context.Context, ts *Server, keyspace, shard string) (LockDescriptor, error) {
+ return l.internalLockShard(ctx, ts, keyspace, shard, true)
+}
+
+// tryLockShard will lock the shard in the topology server but unlike `lockShard` it fail-fast if not able to get lock
+// UnlockShard should be called if this returns no error.
+func (l *Lock) tryLockShard(ctx context.Context, ts *Server, keyspace, shard string) (LockDescriptor, error) {
+ return l.internalLockShard(ctx, ts, keyspace, shard, false)
+}
+
+func (l *Lock) internalLockShard(ctx context.Context, ts *Server, keyspace, shard string, isBlocking bool) (LockDescriptor, error) {
log.Infof("Locking shard %v/%v for action %v", keyspace, shard, l.Action)
- ctx, cancel := context.WithTimeout(ctx, RemoteOperationTimeout)
+ ctx, cancel := context.WithTimeout(ctx, getLockTimeout())
defer cancel()
span, ctx := trace.NewSpan(ctx, "TopoServer.LockShardForAction")
@@ -399,17 +445,18 @@ func (l *Lock) lockShard(ctx context.Context, ts *Server, keyspace, shard string
if err != nil {
return nil, err
}
- return ts.globalCell.Lock(ctx, shardPath, j)
+ if isBlocking {
+ return ts.globalCell.Lock(ctx, shardPath, j)
+ }
+ return ts.globalCell.TryLock(ctx, shardPath, j)
}
// unlockShard unlocks a previously locked shard.
func (l *Lock) unlockShard(ctx context.Context, ts *Server, keyspace, shard string, lockDescriptor LockDescriptor, actionError error) error {
// Detach from the parent timeout, but copy the trace span.
// We need to still release the lock even if the parent context timed out.
- // Note that we are not using the user provided RemoteOperationTimeout
- // here because it is possible that that timeout is too short.
ctx = trace.CopySpan(context.TODO(), ctx)
- ctx, cancel := context.WithTimeout(ctx, defaultLockTimeout)
+ ctx, cancel := context.WithTimeout(ctx, RemoteOperationTimeout)
defer cancel()
span, ctx := trace.NewSpan(ctx, "TopoServer.UnlockShardForAction")
@@ -428,3 +475,15 @@ func (l *Lock) unlockShard(ctx context.Context, ts *Server, keyspace, shard stri
}
return lockDescriptor.Unlock(ctx)
}
+
+// getLockTimeout is shim code used for backward compatibility with v15
+// This code can be removed in v17+ and LockTimeout can be used directly
+func getLockTimeout() time.Duration {
+ if _flag.IsFlagProvided("lock-timeout") {
+ return LockTimeout
+ }
+ if _flag.IsFlagProvided("remote_operation_timeout") {
+ return RemoteOperationTimeout
+ }
+ return LockTimeout
+}
diff --git a/go/vt/topo/locks_test.go b/go/vt/topo/locks_test.go
new file mode 100644
index 00000000000..c4d2019676e
--- /dev/null
+++ b/go/vt/topo/locks_test.go
@@ -0,0 +1,101 @@
+/*
+Copyright 2022 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package topo
+
+import (
+ "os"
+ "testing"
+ "time"
+
+ "github.com/spf13/pflag"
+ "github.com/stretchr/testify/require"
+
+ "vitess.io/vitess/go/internal/flag"
+)
+
+// TestGetLockTimeout tests the behaviour of
+// getLockTimeout function in different situations where
+// the two flags `remote_operation_timeout` and `lock-timeout` are
+// provided or not.
+func TestGetLockTimeout(t *testing.T) {
+ tests := []struct {
+ description string
+ lockTimeoutValue string
+ remoteOperationTimeoutValue string
+ expectedLockTimeout time.Duration
+ }{
+ {
+ description: "no flags specified",
+ lockTimeoutValue: "",
+ remoteOperationTimeoutValue: "",
+ expectedLockTimeout: 45 * time.Second,
+ }, {
+ description: "lock-timeout flag specified",
+ lockTimeoutValue: "33s",
+ remoteOperationTimeoutValue: "",
+ expectedLockTimeout: 33 * time.Second,
+ }, {
+ description: "remote operation timeout flag specified",
+ lockTimeoutValue: "",
+ remoteOperationTimeoutValue: "33s",
+ expectedLockTimeout: 33 * time.Second,
+ }, {
+ description: "both flags specified",
+ lockTimeoutValue: "33s",
+ remoteOperationTimeoutValue: "22s",
+ expectedLockTimeout: 33 * time.Second,
+ }, {
+ description: "remote operation timeout flag specified to the default",
+ lockTimeoutValue: "",
+ remoteOperationTimeoutValue: "15s",
+ expectedLockTimeout: 15 * time.Second,
+ }, {
+ description: "lock-timeout flag specified to the default",
+ lockTimeoutValue: "45s",
+ remoteOperationTimeoutValue: "33s",
+ expectedLockTimeout: 45 * time.Second,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.description, func(t *testing.T) {
+ oldLockTimeout := LockTimeout
+ oldRemoteOpsTimeout := RemoteOperationTimeout
+ defer func() {
+ LockTimeout = oldLockTimeout
+ RemoteOperationTimeout = oldRemoteOpsTimeout
+ }()
+ var args []string
+ if tt.lockTimeoutValue != "" {
+ args = append(args, "--lock-timeout", tt.lockTimeoutValue)
+ }
+ if tt.remoteOperationTimeoutValue != "" {
+ args = append(args, "--remote_operation_timeout", tt.remoteOperationTimeoutValue)
+ }
+ os.Args = os.Args[0:1]
+ os.Args = append(os.Args, args...)
+
+ fs := pflag.NewFlagSet("test", pflag.ExitOnError)
+ registerTopoLockFlags(fs)
+ flag.Parse(fs)
+
+ val := getLockTimeout()
+ require.Equal(t, tt.expectedLockTimeout, val)
+ })
+ }
+
+}
diff --git a/go/vt/topo/memorytopo/election.go b/go/vt/topo/memorytopo/election.go
index fd9830edb35..868a2c53287 100644
--- a/go/vt/topo/memorytopo/election.go
+++ b/go/vt/topo/memorytopo/election.go
@@ -153,9 +153,7 @@ func (mp *cLeaderParticipation) WaitForNewLeader(ctx context.Context) (<-chan st
}
notifications := make(chan string, 8)
- watchIndex := nextWatchIndex
- nextWatchIndex++
- n.watches[watchIndex] = watch{lock: notifications}
+ watchIndex := n.addWatch(watch{lock: notifications})
if n.lock != nil {
notifications <- n.lockContents
diff --git a/go/vt/topo/memorytopo/lock.go b/go/vt/topo/memorytopo/lock.go
index 6d19a2427ae..c15fb9099bb 100644
--- a/go/vt/topo/memorytopo/lock.go
+++ b/go/vt/topo/memorytopo/lock.go
@@ -40,8 +40,18 @@ type memoryTopoLockDescriptor struct {
dirPath string
}
+// TryLock is part of the topo.Conn interface. Its implementation is same as Lock
+func (c *Conn) TryLock(ctx context.Context, dirPath, contents string) (topo.LockDescriptor, error) {
+ return c.Lock(ctx, dirPath, contents)
+}
+
// Lock is part of the topo.Conn interface.
func (c *Conn) Lock(ctx context.Context, dirPath, contents string) (topo.LockDescriptor, error) {
+ return c.lock(ctx, dirPath, contents)
+}
+
+// Lock is part of the topo.Conn interface.
+func (c *Conn) lock(ctx context.Context, dirPath, contents string) (topo.LockDescriptor, error) {
for {
if err := c.dial(ctx); err != nil {
return nil, err
diff --git a/go/vt/topo/memorytopo/memorytopo.go b/go/vt/topo/memorytopo/memorytopo.go
index cdad2ddbcdd..504f1d4bd39 100644
--- a/go/vt/topo/memorytopo/memorytopo.go
+++ b/go/vt/topo/memorytopo/memorytopo.go
@@ -49,10 +49,6 @@ const (
UnreachableServerAddr = "unreachable"
)
-var (
- nextWatchIndex = 0
-)
-
// Factory is a memory-based implementation of topo.Factory. It
// takes a file-system like approach, with directories at each level
// being an actual directory node. This is meant to be closer to
@@ -206,6 +202,20 @@ func (n *node) propagateRecursiveWatch(ev *topo.WatchDataRecursive) {
}
}
+var (
+ nextWatchIndex = 0
+ nextWatchIndexMu sync.Mutex
+)
+
+func (n *node) addWatch(w watch) int {
+ nextWatchIndexMu.Lock()
+ defer nextWatchIndexMu.Unlock()
+ watchIndex := nextWatchIndex
+ nextWatchIndex++
+ n.watches[watchIndex] = w
+ return watchIndex
+}
+
// PropagateWatchError propagates the given error to all watches on this node
// and recursively applies to all children
func (n *node) PropagateWatchError(err error) {
diff --git a/go/vt/topo/memorytopo/server_test.go b/go/vt/topo/memorytopo/server_test.go
index ea979ea2922..5bfa41c8a5e 100644
--- a/go/vt/topo/memorytopo/server_test.go
+++ b/go/vt/topo/memorytopo/server_test.go
@@ -27,5 +27,5 @@ func TestMemoryTopo(t *testing.T) {
// Run the TopoServerTestSuite tests.
test.TopoServerTestSuite(t, func() *topo.Server {
return NewServer(test.LocalCellName)
- })
+ }, []string{"checkTryLock", "checkShardWithLock"})
}
diff --git a/go/vt/topo/memorytopo/watch.go b/go/vt/topo/memorytopo/watch.go
index 14cb20bc09d..73b2d248434 100644
--- a/go/vt/topo/memorytopo/watch.go
+++ b/go/vt/topo/memorytopo/watch.go
@@ -50,9 +50,7 @@ func (c *Conn) Watch(ctx context.Context, filePath string) (*topo.WatchData, <-c
}
notifications := make(chan *topo.WatchData, 100)
- watchIndex := nextWatchIndex
- nextWatchIndex++
- n.watches[watchIndex] = watch{contents: notifications}
+ watchIndex := n.addWatch(watch{contents: notifications})
go func() {
<-ctx.Done()
@@ -105,9 +103,7 @@ func (c *Conn) WatchRecursive(ctx context.Context, dirpath string) ([]*topo.Watc
})
notifications := make(chan *topo.WatchDataRecursive, 100)
- watchIndex := nextWatchIndex
- nextWatchIndex++
- n.watches[watchIndex] = watch{recursive: notifications}
+ watchIndex := n.addWatch(watch{recursive: notifications})
go func() {
defer close(notifications)
diff --git a/go/vt/topo/server.go b/go/vt/topo/server.go
index 9ca9b2d3322..20af5c624a2 100644
--- a/go/vt/topo/server.go
+++ b/go/vt/topo/server.go
@@ -173,7 +173,7 @@ var (
cellsToAliases: make(map[string]string),
}
- FlagBinaries = []string{"vttablet", "vtctl", "vtctld", "vtcombo", "vtexplain", "vtgate",
+ FlagBinaries = []string{"vttablet", "vtctl", "vtctld", "vtcombo", "vtgate",
"vtgr", "vtorc", "vtbackup"}
)
diff --git a/go/vt/topo/shard.go b/go/vt/topo/shard.go
index 7f03bf13364..2599c7de962 100644
--- a/go/vt/topo/shard.go
+++ b/go/vt/topo/shard.go
@@ -120,6 +120,10 @@ func IsShardUsingRangeBasedSharding(shard string) bool {
// ValidateShardName takes a shard name and sanitizes it, and also returns
// the KeyRange.
func ValidateShardName(shard string) (string, *topodatapb.KeyRange, error) {
+ if strings.Contains(shard, "/") {
+ return "", nil, vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "invalid shardId, may not contain '/': %v", shard)
+ }
+
if !IsShardUsingRangeBasedSharding(shard) {
return shard, nil, nil
}
diff --git a/go/vt/topo/shard_test.go b/go/vt/topo/shard_test.go
index 4c0088f00ee..d0ec08f94ea 100644
--- a/go/vt/topo/shard_test.go
+++ b/go/vt/topo/shard_test.go
@@ -17,13 +17,14 @@ limitations under the License.
package topo
import (
+ "context"
"reflect"
"testing"
+ "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
- "context"
-
+ "vitess.io/vitess/go/test/utils"
topodatapb "vitess.io/vitess/go/vt/proto/topodata"
)
@@ -222,3 +223,58 @@ func TestUpdateSourceDeniedTables(t *testing.T) {
t.Fatalf("one cell removal from all failed: %v", si)
}
}
+
+func TestValidateShardName(t *testing.T) {
+ t.Parallel()
+
+ cases := []struct {
+ name string
+ expectedRange *topodatapb.KeyRange
+ valid bool
+ }{
+ {
+ name: "0",
+ valid: true,
+ },
+ {
+ name: "-80",
+ expectedRange: &topodatapb.KeyRange{
+ Start: nil,
+ End: []byte{0x80},
+ },
+ valid: true,
+ },
+ {
+ name: "40-80",
+ expectedRange: &topodatapb.KeyRange{
+ Start: []byte{0x40},
+ End: []byte{0x80},
+ },
+ valid: true,
+ },
+ {
+ name: "foo-bar",
+ valid: false,
+ },
+ {
+ name: "a/b",
+ valid: false,
+ },
+ }
+
+ for _, tcase := range cases {
+ tcase := tcase
+ t.Run(tcase.name, func(t *testing.T) {
+ t.Parallel()
+
+ _, kr, err := ValidateShardName(tcase.name)
+ if !tcase.valid {
+ assert.Error(t, err, "expected %q to be an invalid shard name", tcase.name)
+ return
+ }
+
+ require.NoError(t, err, "expected %q to be a valid shard name, got error: %v", tcase.name, err)
+ utils.MustMatch(t, tcase.expectedRange, kr)
+ })
+ }
+}
diff --git a/go/vt/topo/srv_keyspace.go b/go/vt/topo/srv_keyspace.go
index ee1db1100e1..e431ce55c40 100644
--- a/go/vt/topo/srv_keyspace.go
+++ b/go/vt/topo/srv_keyspace.go
@@ -396,6 +396,51 @@ func (ts *Server) DeleteSrvKeyspacePartitions(ctx context.Context, keyspace stri
return nil
}
+// UpdateSrvKeyspaceThrottlerConfig updates existing throttler configuration
+func (ts *Server) UpdateSrvKeyspaceThrottlerConfig(ctx context.Context, keyspace string, cells []string, update func(throttlerConfig *topodatapb.ThrottlerConfig) *topodatapb.ThrottlerConfig) (updatedCells []string, err error) {
+ if err = CheckKeyspaceLocked(ctx, keyspace); err != nil {
+ return updatedCells, err
+ }
+
+ // The caller intends to update all cells in this case
+ if len(cells) == 0 {
+ cells, err = ts.GetCellInfoNames(ctx)
+ if err != nil {
+ return updatedCells, err
+ }
+ }
+
+ wg := sync.WaitGroup{}
+ rec := concurrency.AllErrorRecorder{}
+ for _, cell := range cells {
+ wg.Add(1)
+ go func(cell string) {
+ defer wg.Done()
+ srvKeyspace, err := ts.GetSrvKeyspace(ctx, cell, keyspace)
+ switch {
+ case err == nil:
+ srvKeyspace.ThrottlerConfig = update(srvKeyspace.ThrottlerConfig)
+ if err := ts.UpdateSrvKeyspace(ctx, cell, keyspace, srvKeyspace); err != nil {
+ rec.RecordError(err)
+ return
+ }
+ updatedCells = append(updatedCells, cell)
+ return
+ case IsErrType(err, NoNode):
+ // NOOP as not every cell will contain a serving tablet in the keyspace
+ default:
+ rec.RecordError(err)
+ return
+ }
+ }(cell)
+ }
+ wg.Wait()
+ if rec.HasErrors() {
+ return updatedCells, NewError(PartialResult, rec.Error().Error())
+ }
+ return updatedCells, nil
+}
+
// UpdateDisableQueryService will make sure the disableQueryService is
// set appropriately in tablet controls in srvKeyspace.
func (ts *Server) UpdateDisableQueryService(ctx context.Context, keyspace string, shards []*ShardInfo, tabletType topodatapb.TabletType, cells []string, disableQueryService bool) (err error) {
diff --git a/go/vt/topo/stats_conn.go b/go/vt/topo/stats_conn.go
index bed1c00cb46..08f44c0f75e 100644
--- a/go/vt/topo/stats_conn.go
+++ b/go/vt/topo/stats_conn.go
@@ -146,13 +146,29 @@ func (st *StatsConn) Delete(ctx context.Context, filePath string, version Versio
// Lock is part of the Conn interface
func (st *StatsConn) Lock(ctx context.Context, dirPath, contents string) (LockDescriptor, error) {
+ return st.internalLock(ctx, dirPath, contents, true)
+}
+
+// TryLock is part of the topo.Conn interface. Its implementation is same as Lock
+func (st *StatsConn) TryLock(ctx context.Context, dirPath, contents string) (LockDescriptor, error) {
+ return st.internalLock(ctx, dirPath, contents, false)
+}
+
+// TryLock is part of the topo.Conn interface. Its implementation is same as Lock
+func (st *StatsConn) internalLock(ctx context.Context, dirPath, contents string, isBlocking bool) (LockDescriptor, error) {
statsKey := []string{"Lock", st.cell}
if st.readOnly {
return nil, vterrors.Errorf(vtrpc.Code_READ_ONLY, readOnlyErrorStrFormat, statsKey[0], dirPath)
}
startTime := time.Now()
defer topoStatsConnTimings.Record(statsKey, startTime)
- res, err := st.conn.Lock(ctx, dirPath, contents)
+ var res LockDescriptor
+ var err error
+ if isBlocking {
+ res, err = st.conn.Lock(ctx, dirPath, contents)
+ } else {
+ res, err = st.conn.TryLock(ctx, dirPath, contents)
+ }
if err != nil {
topoStatsConnErrors.Add(statsKey, int64(1))
return res, err
diff --git a/go/vt/topo/stats_conn_test.go b/go/vt/topo/stats_conn_test.go
index 50c9ab16e25..e26e8c97f31 100644
--- a/go/vt/topo/stats_conn_test.go
+++ b/go/vt/topo/stats_conn_test.go
@@ -104,6 +104,19 @@ func (st *fakeConn) Lock(ctx context.Context, dirPath, contents string) (lock Lo
return lock, err
}
+// TryLock is part of the topo.Conn interface.
+// As of today it provides same functionality as Lock
+func (st *fakeConn) TryLock(ctx context.Context, dirPath, contents string) (lock LockDescriptor, err error) {
+ if st.readOnly {
+ return nil, vterrors.Errorf(vtrpc.Code_READ_ONLY, "topo server connection is read-only")
+ }
+ if dirPath == "error" {
+ return lock, fmt.Errorf("dummy error")
+
+ }
+ return lock, err
+}
+
// Watch is part of the Conn interface
func (st *fakeConn) Watch(ctx context.Context, filePath string) (current *WatchData, changes <-chan *WatchData, err error) {
return current, changes, err
diff --git a/go/vt/topo/test/lock.go b/go/vt/topo/test/lock.go
index 9b2fbb6d5c6..69cdeff2a55 100644
--- a/go/vt/topo/test/lock.go
+++ b/go/vt/topo/test/lock.go
@@ -171,6 +171,6 @@ func checkLockUnblocks(ctx context.Context, t *testing.T, conn topo.Conn) {
select {
case <-finished:
case <-timeout:
- t.Fatalf("unlocking timed out")
+ t.Fatalf("Unlock(test_keyspace) timed out")
}
}
diff --git a/go/vt/topo/test/shard.go b/go/vt/topo/test/shard.go
index 7126bdfcc09..d285f382838 100644
--- a/go/vt/topo/test/shard.go
+++ b/go/vt/topo/test/shard.go
@@ -17,9 +17,11 @@ limitations under the License.
package test
import (
+ "context"
"testing"
+ "time"
- "context"
+ "github.com/stretchr/testify/require"
"google.golang.org/protobuf/proto"
@@ -94,3 +96,77 @@ func checkShard(t *testing.T, ts *topo.Server) {
t.Errorf("GetShardNames(666): %v", err)
}
}
+
+// checkShardWithLock verifies that `TryLockShard` will keep failing with `NodeExists` error if there is
+// a lock already taken for given shard. Once we unlock that shard, then subsequent call to `TryLockShard`
+// should succeed.
+func checkShardWithLock(t *testing.T, ts *topo.Server) {
+ ctx := context.Background()
+ if err := ts.CreateKeyspace(ctx, "test_keyspace", &topodatapb.Keyspace{}); err != nil {
+ t.Fatalf("CreateKeyspace: %v", err)
+ }
+
+ unblock := make(chan struct{})
+ finished := make(chan struct{})
+
+ // Check GetShardNames returns [], nil for existing keyspace with no shards.
+ if names, err := ts.GetShardNames(ctx, "test_keyspace"); err != nil || len(names) != 0 {
+ t.Errorf("GetShardNames(keyspace with no shards) didn't return [] nil: %v %v", names, err)
+ }
+
+ if err := ts.CreateShard(ctx, "test_keyspace", "b0-c0"); err != nil {
+ t.Fatalf("CreateShard: %v", err)
+ }
+
+ _, unlock1, err := ts.LockShard(ctx, "test_keyspace", "b0-c0", "lock")
+ if err != nil {
+ t.Errorf("CreateShard called second time, got: %v", err)
+ }
+
+ duration := 10 * time.Second
+ waitUntil := time.Now().Add(duration)
+ // As soon as we're unblocked, we try to lock the keyspace.
+ go func() {
+ <-unblock
+ var isUnLocked1 = false
+ for time.Now().Before(waitUntil) {
+ _, unlock2, err := ts.TryLockShard(ctx, "test_keyspace", "b0-c0", "lock")
+ // TryLockShard will fail since we already have acquired lock for `test-keyspace`
+ if err != nil {
+ if !topo.IsErrType(err, topo.NodeExists) {
+ require.Fail(t, "expected node exists during tryLockShard", err.Error())
+ }
+ var finalErr error
+ // unlock `test-keyspace` shard. Now the subsequent call to `TryLockShard` will succeed.
+ unlock1(&finalErr)
+ isUnLocked1 = true
+ if finalErr != nil {
+ require.Fail(t, "Unlock(test_keyspace) failed", finalErr.Error())
+ }
+ } else {
+ // unlock shard acquired through `TryLockShard`
+ unlock2(&err)
+ if err != nil {
+ require.Fail(t, "Unlock(test_keyspace) failed", err.Error())
+ }
+ // true value of 'isUnLocked1' signify that we at-least hit 'NodeExits' once.
+ if isUnLocked1 {
+ close(finished)
+ } else {
+ require.Fail(t, "Test was expecting to hit `NodeExists` error at-least once")
+ }
+ break
+ }
+ }
+ }()
+
+ // unblock the go routine
+ close(unblock)
+
+ timeout := time.After(duration * 2)
+ select {
+ case <-finished:
+ case <-timeout:
+ t.Fatalf("Unlock(test_keyspace) timed out")
+ }
+}
diff --git a/go/vt/topo/test/testing.go b/go/vt/topo/test/testing.go
index ed865719c1c..e8d014242ad 100644
--- a/go/vt/topo/test/testing.go
+++ b/go/vt/topo/test/testing.go
@@ -40,94 +40,118 @@ func newKeyRange(value string) *topodatapb.KeyRange {
return result
}
+func executeTestSuite(f func(*testing.T, *topo.Server), t *testing.T, ts *topo.Server, ignoreList []string, name string) {
+ // some test does not apply every where therefore we ignore them
+ for _, n := range ignoreList {
+ if n == name {
+ t.Logf("=== ignoring test %s", name)
+ return
+ }
+ }
+ f(t, ts)
+}
+
// TopoServerTestSuite runs the full topo.Server/Conn test suite.
// The factory method should return a topo.Server that has a single cell
// called LocalCellName.
-func TopoServerTestSuite(t *testing.T, factory func() *topo.Server) {
+// Not all tests are applicable for each Topo server, therefore we provide ignoreList in order to
+// avoid them for given Topo server tests. For example `TryLock` implementation is same as `Lock` for some Topo servers.
+// Hence, for these Topo servers we ignore executing TryLock Tests.
+func TopoServerTestSuite(t *testing.T, factory func() *topo.Server, ignoreList []string) {
var ts *topo.Server
t.Log("=== checkKeyspace")
ts = factory()
- checkKeyspace(t, ts)
+ executeTestSuite(checkKeyspace, t, ts, ignoreList, "checkKeyspace")
ts.Close()
t.Log("=== checkShard")
ts = factory()
- checkShard(t, ts)
+ executeTestSuite(checkShard, t, ts, ignoreList, "checkShard")
+ ts.Close()
+
+ t.Log("=== checkShardWithLock")
+ ts = factory()
+ executeTestSuite(checkShardWithLock, t, ts, ignoreList, "checkShardWithLock")
ts.Close()
t.Log("=== checkTablet")
ts = factory()
- checkTablet(t, ts)
+ executeTestSuite(checkTablet, t, ts, ignoreList, "checkTablet")
ts.Close()
t.Log("=== checkShardReplication")
ts = factory()
- checkShardReplication(t, ts)
+ executeTestSuite(checkShardReplication, t, ts, ignoreList, "checkShardReplication")
ts.Close()
t.Log("=== checkSrvKeyspace")
ts = factory()
- checkSrvKeyspace(t, ts)
+ executeTestSuite(checkSrvKeyspace, t, ts, ignoreList, "checkSrvKeyspace")
ts.Close()
t.Log("=== checkSrvVSchema")
ts = factory()
- checkSrvVSchema(t, ts)
+ executeTestSuite(checkSrvVSchema, t, ts, ignoreList, "checkSrvVSchema")
ts.Close()
t.Log("=== checkLock")
ts = factory()
- checkLock(t, ts)
+ executeTestSuite(checkLock, t, ts, ignoreList, "checkLock")
+ ts.Close()
+
+ t.Log("=== checkTryLock")
+ ts = factory()
+ executeTestSuite(checkTryLock, t, ts, ignoreList, "checkTryLock")
ts.Close()
t.Log("=== checkVSchema")
ts = factory()
- checkVSchema(t, ts)
+ executeTestSuite(checkVSchema, t, ts, ignoreList, "checkVSchema")
ts.Close()
t.Log("=== checkRoutingRules")
ts = factory()
- checkRoutingRules(t, ts)
+ executeTestSuite(checkRoutingRules, t, ts, ignoreList, "checkRoutingRules")
ts.Close()
t.Log("=== checkElection")
ts = factory()
- checkElection(t, ts)
+ executeTestSuite(checkElection, t, ts, ignoreList, "checkElection")
ts.Close()
t.Log("=== checkWaitForNewLeader")
ts = factory()
- checkWaitForNewLeader(t, ts)
+ executeTestSuite(checkWaitForNewLeader, t, ts, ignoreList, "checkWaitForNewLeader")
ts.Close()
t.Log("=== checkDirectory")
ts = factory()
- checkDirectory(t, ts)
+ executeTestSuite(checkDirectory, t, ts, ignoreList, "checkDirectory")
ts.Close()
t.Log("=== checkFile")
ts = factory()
- checkFile(t, ts)
+ executeTestSuite(checkFile, t, ts, ignoreList, "checkFile")
ts.Close()
t.Log("=== checkWatch")
ts = factory()
- checkWatch(t, ts)
+ executeTestSuite(checkWatch, t, ts, ignoreList, "checkWatch")
ts.Close()
ts = factory()
t.Log("=== checkWatchInterrupt")
- checkWatchInterrupt(t, ts)
+ executeTestSuite(checkWatchInterrupt, t, ts, ignoreList, "checkWatchInterrupt")
ts.Close()
ts = factory()
t.Log("=== checkList")
- checkList(t, ts)
+ executeTestSuite(checkList, t, ts, ignoreList, "checkList")
ts.Close()
ts = factory()
t.Log("=== checkWatchRecursive")
- checkWatchRecursive(t, ts)
+ executeTestSuite(checkWatchRecursive, t, ts, ignoreList, "checkWatchRecursive")
ts.Close()
}
diff --git a/go/vt/topo/test/trylock.go b/go/vt/topo/test/trylock.go
new file mode 100644
index 00000000000..cace3cccc61
--- /dev/null
+++ b/go/vt/topo/test/trylock.go
@@ -0,0 +1,203 @@
+/*
+Copyright 2022 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package test
+
+import (
+ "context"
+ "path"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+
+ "vitess.io/vitess/go/vt/topo"
+
+ topodatapb "vitess.io/vitess/go/vt/proto/topodata"
+)
+
+// checkTryLock checks if we can lock / unlock as expected. It's using a keyspace
+// as the lock target.
+func checkTryLock(t *testing.T, ts *topo.Server) {
+ ctx := context.Background()
+ if err := ts.CreateKeyspace(ctx, "test_keyspace", &topodatapb.Keyspace{}); err != nil {
+ require.Fail(t, "CreateKeyspace fail", err.Error())
+ }
+
+ conn, err := ts.ConnForCell(context.Background(), topo.GlobalCell)
+ if err != nil {
+ require.Fail(t, "ConnForCell(global) failed", err.Error())
+ }
+
+ t.Log("=== checkTryLockTimeout")
+ checkTryLockTimeout(ctx, t, conn)
+
+ t.Log("=== checkTryLockMissing")
+ checkTryLockMissing(ctx, t, conn)
+
+ t.Log("=== checkTryLockUnblocks")
+ checkTryLockUnblocks(ctx, t, conn)
+}
+
+// checkTryLockTimeout test the fail-fast nature of TryLock
+func checkTryLockTimeout(ctx context.Context, t *testing.T, conn topo.Conn) {
+ keyspacePath := path.Join(topo.KeyspacesPath, "test_keyspace")
+ lockDescriptor, err := conn.TryLock(ctx, keyspacePath, "")
+ if err != nil {
+ require.Fail(t, "TryLock failed", err.Error())
+ }
+
+ // We have the lock, list the keyspace directory.
+ // It should not contain anything, except Ephemeral files.
+ entries, err := conn.ListDir(ctx, keyspacePath, true /*full*/)
+ if err != nil {
+ require.Fail(t, "ListDir failed: %v", err.Error())
+ }
+ for _, e := range entries {
+ if e.Name == "Keyspace" {
+ continue
+ }
+ if e.Ephemeral {
+ t.Logf("skipping ephemeral node %v in %v", e, keyspacePath)
+ continue
+ }
+ // Non-ephemeral entries better have only ephemeral children.
+ p := path.Join(keyspacePath, e.Name)
+ entries, err := conn.ListDir(ctx, p, true /*full*/)
+ if err != nil {
+ require.Fail(t, "ListDir failed", err.Error())
+ }
+ for _, e := range entries {
+ if e.Ephemeral {
+ t.Logf("skipping ephemeral node %v in %v", e, p)
+ } else {
+ require.Fail(t, "non-ephemeral DirEntry")
+ }
+ }
+ }
+
+ // We should not be able to take the lock again. It should throw `NodeExists` error.
+ fastCtx, cancel := context.WithTimeout(ctx, 10*time.Second)
+ if _, err := conn.TryLock(fastCtx, keyspacePath, "again"); !topo.IsErrType(err, topo.NodeExists) {
+ require.Fail(t, "TryLock failed", err.Error())
+ }
+ cancel()
+
+ // test we can interrupt taking the lock
+ interruptCtx, cancel := context.WithCancel(ctx)
+ finished := make(chan struct{})
+
+ // go routine to cancel the context.
+ go func() {
+ <-finished
+ cancel()
+ }()
+
+ waitUntil := time.Now().Add(10 * time.Second)
+ var firstTime = true
+ // after attempting the `TryLock` and getting an error `NodeExists`, we will cancel the context deliberately
+ // and expect `context canceled` error in next iteration of `for` loop.
+ for {
+ if time.Now().After(waitUntil) {
+ t.Fatalf("Unlock(test_keyspace) timed out")
+ }
+ // we expect context to fail with `context canceled` error
+ if interruptCtx.Err() != nil {
+ require.ErrorContains(t, interruptCtx.Err(), "context canceled")
+ break
+ }
+ if _, err := conn.TryLock(interruptCtx, keyspacePath, "interrupted"); !topo.IsErrType(err, topo.NodeExists) {
+ require.Fail(t, "TryLock failed", err.Error())
+ }
+ if firstTime {
+ close(finished)
+ firstTime = false
+ }
+ time.Sleep(1 * time.Second)
+ }
+
+ if err := lockDescriptor.Check(ctx); err != nil {
+ t.Errorf("Check(): %v", err)
+ }
+
+ if err := lockDescriptor.Unlock(ctx); err != nil {
+ require.Fail(t, "Unlock failed", err.Error())
+ }
+
+ // test we can't unlock again
+ if err := lockDescriptor.Unlock(ctx); err == nil {
+ require.Fail(t, "Unlock failed", err.Error())
+ }
+}
+
+// checkTryLockMissing makes sure we can't lock a non-existing directory.
+func checkTryLockMissing(ctx context.Context, t *testing.T, conn topo.Conn) {
+ keyspacePath := path.Join(topo.KeyspacesPath, "test_keyspace_666")
+ if _, err := conn.TryLock(ctx, keyspacePath, "missing"); err == nil {
+ require.Fail(t, "TryLock(test_keyspace_666) worked for non-existing keyspace")
+ }
+}
+
+// unlike 'checkLockUnblocks', checkTryLockUnblocks will not block on other client but instead
+// keep retrying until it gets the lock.
+func checkTryLockUnblocks(ctx context.Context, t *testing.T, conn topo.Conn) {
+ keyspacePath := path.Join(topo.KeyspacesPath, "test_keyspace")
+ unblock := make(chan struct{})
+ finished := make(chan struct{})
+
+ duration := 10 * time.Second
+ waitUntil := time.Now().Add(duration)
+ // TryLock will keep getting NodeExists until lockDescriptor2 unlock itself.
+ // It will not wait but immediately return with NodeExists error.
+ go func() {
+ <-unblock
+ for time.Now().Before(waitUntil) {
+ lockDescriptor, err := conn.TryLock(ctx, keyspacePath, "unblocks")
+ if err != nil {
+ if !topo.IsErrType(err, topo.NodeExists) {
+ require.Fail(t, "expected node exists during trylock", err.Error())
+ }
+ time.Sleep(1 * time.Second)
+ } else {
+ if err = lockDescriptor.Unlock(ctx); err != nil {
+ require.Fail(t, "Unlock(test_keyspace) failed", err.Error())
+ }
+ close(finished)
+ break
+ }
+ }
+ }()
+
+ // Lock the keyspace.
+ lockDescriptor2, err := conn.TryLock(ctx, keyspacePath, "")
+ if err != nil {
+ require.Fail(t, "Lock(test_keyspace) failed", err.Error())
+ }
+
+ // unblock the go routine so it starts waiting
+ close(unblock)
+
+ if err = lockDescriptor2.Unlock(ctx); err != nil {
+ require.Fail(t, "Unlock(test_keyspace) failed", err.Error())
+ }
+
+ timeout := time.After(2 * duration)
+ select {
+ case <-finished:
+ case <-timeout:
+ require.Fail(t, "Unlock(test_keyspace) timed out")
+ }
+}
diff --git a/go/vt/topo/topoproto/flag.go b/go/vt/topo/topoproto/flag.go
index d92378017ae..becc789b59f 100644
--- a/go/vt/topo/topoproto/flag.go
+++ b/go/vt/topo/topoproto/flag.go
@@ -39,7 +39,7 @@ func (ttlv *TabletTypeListFlag) Set(v string) (err error) {
// Type is part of the pflag.Value interface.
func (ttlv *TabletTypeListFlag) Type() string {
- return "[]topodatapb.TabletType"
+ return "strings"
}
// TabletTypeFlag implements the pflag.Value interface, for parsing a command-line value into a TabletType.
diff --git a/go/vt/topo/topoproto/tablet.go b/go/vt/topo/topoproto/tablet.go
index 985f7d2b6d8..395f8f0fa47 100644
--- a/go/vt/topo/topoproto/tablet.go
+++ b/go/vt/topo/topoproto/tablet.go
@@ -108,8 +108,8 @@ func ParseTabletAlias(aliasStr string) (*topodatapb.TabletAlias, error) {
}
// ParseTabletSet returns a set of tablets based on a provided comma separated list of tablets.
-func ParseTabletSet(tabletListStr string) sets.String {
- set := sets.NewString()
+func ParseTabletSet(tabletListStr string) sets.Set[string] {
+ set := sets.New[string]()
if tabletListStr == "" {
return set
}
diff --git a/go/vt/topo/topotests/keyspace_test.go b/go/vt/topo/topotests/keyspace_test.go
new file mode 100644
index 00000000000..b0b35c1421c
--- /dev/null
+++ b/go/vt/topo/topotests/keyspace_test.go
@@ -0,0 +1,72 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package topotests
+
+import (
+ "context"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "vitess.io/vitess/go/vt/topo/memorytopo"
+ "vitess.io/vitess/go/vt/vterrors"
+
+ topodatapb "vitess.io/vitess/go/vt/proto/topodata"
+ "vitess.io/vitess/go/vt/proto/vtrpc"
+)
+
+func TestCreateKeyspace(t *testing.T) {
+ ts := memorytopo.NewServer("zone1")
+ ctx := context.Background()
+
+ t.Run("valid name", func(t *testing.T) {
+ err := ts.CreateKeyspace(ctx, "ks", &topodatapb.Keyspace{})
+ require.NoError(t, err)
+ })
+ t.Run("invalid name", func(t *testing.T) {
+ err := ts.CreateKeyspace(ctx, "no/slashes/allowed", &topodatapb.Keyspace{})
+ assert.Error(t, err)
+ assert.Equal(t, vtrpc.Code_INVALID_ARGUMENT, vterrors.Code(err), "%+v", err)
+ })
+}
+
+func TestGetKeyspace(t *testing.T) {
+ ts := memorytopo.NewServer("zone1")
+ ctx := context.Background()
+
+ t.Run("valid name", func(t *testing.T) {
+ // First, create the keyspace.
+ err := ts.CreateKeyspace(ctx, "ks", &topodatapb.Keyspace{})
+ require.NoError(t, err)
+
+ // Now, get it.
+ ks, err := ts.GetKeyspace(ctx, "ks")
+ require.NoError(t, err)
+ assert.NotNil(t, ks)
+ })
+
+ t.Run("invalid name", func(t *testing.T) {
+ // We can't create the keyspace (because we can't create a keyspace
+ // with an invalid name), so we'll validate the error we get is *not*
+ // NOT_FOUND.
+ ks, err := ts.GetKeyspace(ctx, "no/slashes/allowed")
+ assert.Error(t, err)
+ assert.Equal(t, vtrpc.Code_INVALID_ARGUMENT, vterrors.Code(err), "%+v", err)
+ assert.Nil(t, ks)
+ })
+}
diff --git a/go/vt/topo/zk2topo/lock.go b/go/vt/topo/zk2topo/lock.go
index cc0f0859403..974361544a5 100644
--- a/go/vt/topo/zk2topo/lock.go
+++ b/go/vt/topo/zk2topo/lock.go
@@ -17,9 +17,9 @@ limitations under the License.
package zk2topo
import (
- "path"
-
"context"
+ "fmt"
+ "path"
"github.com/z-division/go-zookeeper/zk"
@@ -39,7 +39,40 @@ type zkLockDescriptor struct {
// Lock is part of the topo.Conn interface.
func (zs *Server) Lock(ctx context.Context, dirPath, contents string) (topo.LockDescriptor, error) {
- // Lock paths end in a trailing slash to that when we create
+ return zs.lock(ctx, dirPath, contents)
+}
+
+// TryLock is part of the topo.Conn interface.
+func (zs *Server) TryLock(ctx context.Context, dirPath, contents string) (topo.LockDescriptor, error) {
+ // We list all the entries under dirPath
+ entries, err := zs.ListDir(ctx, dirPath, true)
+ if err != nil {
+ // We need to return the right error codes, like
+ // topo.ErrNoNode and topo.ErrInterrupted, and the
+ // easiest way to do this is to return convertError(err).
+ // It may lose some of the context, if this is an issue,
+ // maybe logging the error would work here.
+ return nil, convertError(err, dirPath)
+ }
+
+ // If there is a folder '/locks' with some entries in it then we can assume that someone else already has a lock.
+ // Throw error in this case
+ for _, e := range entries {
+ // there is a bug where ListDir return ephemeral = false for locks. It is due
+ // https://github.com/vitessio/vitess/blob/main/go/vt/topo/zk2topo/utils.go#L55
+ // TODO: Fix/send ephemeral flag value recursively while creating ephemeral file
+ if e.Name == locksPath && e.Type == topo.TypeDirectory {
+ return nil, topo.NewError(topo.NodeExists, fmt.Sprintf("lock already exists at path %s", dirPath))
+ }
+ }
+
+ // everything is good let's acquire the lock.
+ return zs.lock(ctx, dirPath, contents)
+}
+
+// Lock is part of the topo.Conn interface.
+func (zs *Server) lock(ctx context.Context, dirPath, contents string) (topo.LockDescriptor, error) {
+ // Lock paths end in a trailing slash so that when we create
// sequential nodes, they are created as children, not siblings.
locksDir := path.Join(zs.root, dirPath, locksPath) + "/"
diff --git a/go/vt/topo/zk2topo/server_test.go b/go/vt/topo/zk2topo/server_test.go
index 7eff2e48fa7..ebbca9898ce 100644
--- a/go/vt/topo/zk2topo/server_test.go
+++ b/go/vt/topo/zk2topo/server_test.go
@@ -61,7 +61,7 @@ func TestZk2Topo(t *testing.T) {
}
return ts
- })
+ }, []string{})
}
func TestHasObservers(t *testing.T) {
diff --git a/go/vt/topotools/rebuild_keyspace.go b/go/vt/topotools/rebuild_keyspace.go
index 4882799870d..d58ce0b7160 100644
--- a/go/vt/topotools/rebuild_keyspace.go
+++ b/go/vt/topotools/rebuild_keyspace.go
@@ -96,6 +96,7 @@ func RebuildKeyspaceLocked(ctx context.Context, log logutil.Logger, ts *topo.Ser
srvKeyspaceMap[cell] = &topodatapb.SrvKeyspace{
ServedFrom: ki.ComputeCellServedFrom(cell),
}
+ srvKeyspaceMap[cell].ThrottlerConfig = ki.ThrottlerConfig
}
servedTypes := []topodatapb.TabletType{topodatapb.TabletType_PRIMARY, topodatapb.TabletType_REPLICA, topodatapb.TabletType_RDONLY}
diff --git a/go/vt/topotools/vschema_ddl.go b/go/vt/topotools/vschema_ddl.go
index 30cce3bd5db..e8da2734b4f 100644
--- a/go/vt/topotools/vschema_ddl.go
+++ b/go/vt/topotools/vschema_ddl.go
@@ -17,7 +17,6 @@ limitations under the License.
package topotools
import (
- "fmt"
"reflect"
"vitess.io/vitess/go/vt/sqlparser"
@@ -226,15 +225,9 @@ func ApplyVSchemaDDL(ksName string, ks *vschemapb.Keyspace, alterVschema *sqlpar
return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "vschema already contains auto inc %v on table %s in keyspace %s", table.AutoIncrement, name, ksName)
}
- sequence := alterVschema.AutoIncSpec.Sequence
- sequenceFqn := sequence.Name.String()
- if sequence.Qualifier.String() != "" {
- sequenceFqn = fmt.Sprintf("%s.%s", sequence.Qualifier.String(), sequenceFqn)
- }
-
table.AutoIncrement = &vschemapb.AutoIncrement{
Column: alterVschema.AutoIncSpec.Column.String(),
- Sequence: sequenceFqn,
+ Sequence: sqlparser.String(alterVschema.AutoIncSpec.Sequence),
}
return ks, nil
diff --git a/go/vt/vitessdriver/rows.go b/go/vt/vitessdriver/rows.go
index d2ace7bdfad..a2438bb891c 100644
--- a/go/vt/vitessdriver/rows.go
+++ b/go/vt/vitessdriver/rows.go
@@ -17,10 +17,14 @@ limitations under the License.
package vitessdriver
import (
+ "database/sql"
"database/sql/driver"
"io"
+ "reflect"
+ "time"
"vitess.io/vitess/go/sqltypes"
+ "vitess.io/vitess/go/vt/proto/query"
)
// rows creates a database/sql/driver compliant Row iterator
@@ -58,3 +62,60 @@ func (ri *rows) Next(dest []driver.Value) error {
ri.index++
return nil
}
+
+var (
+ typeInt8 = reflect.TypeOf(int8(0))
+ typeUint8 = reflect.TypeOf(uint8(0))
+ typeInt16 = reflect.TypeOf(int16(0))
+ typeUint16 = reflect.TypeOf(uint16(0))
+ typeInt32 = reflect.TypeOf(int32(0))
+ typeUint32 = reflect.TypeOf(uint32(0))
+ typeInt64 = reflect.TypeOf(int64(0))
+ typeUint64 = reflect.TypeOf(uint64(0))
+ typeFloat32 = reflect.TypeOf(float32(0))
+ typeFloat64 = reflect.TypeOf(float64(0))
+ typeRawBytes = reflect.TypeOf(sql.RawBytes{})
+ typeTime = reflect.TypeOf(time.Time{})
+ typeUnknown = reflect.TypeOf(new(interface{}))
+)
+
+// Implements the RowsColumnTypeScanType interface
+func (ri *rows) ColumnTypeScanType(index int) reflect.Type {
+ field := ri.qr.Fields[index]
+ switch field.GetType() {
+ case query.Type_INT8:
+ return typeInt8
+ case query.Type_UINT8:
+ return typeUint8
+ case query.Type_INT16, query.Type_YEAR:
+ return typeInt16
+ case query.Type_UINT16:
+ return typeUint16
+ case query.Type_INT24:
+ return typeInt32
+ case query.Type_UINT24: // no 24 bit type, using 32 instead
+ return typeUint32
+ case query.Type_INT32:
+ return typeInt32
+ case query.Type_UINT32:
+ return typeUint32
+ case query.Type_INT64:
+ return typeInt64
+ case query.Type_UINT64:
+ return typeUint64
+ case query.Type_FLOAT32:
+ return typeFloat32
+ case query.Type_FLOAT64:
+ return typeFloat64
+ case query.Type_TIMESTAMP, query.Type_DECIMAL, query.Type_VARCHAR, query.Type_TEXT,
+ query.Type_BLOB, query.Type_VARBINARY, query.Type_CHAR, query.Type_BINARY, query.Type_BIT,
+ query.Type_ENUM, query.Type_SET, query.Type_TUPLE, query.Type_GEOMETRY, query.Type_JSON,
+ query.Type_HEXNUM, query.Type_HEXVAL, query.Type_BITNUM:
+
+ return typeRawBytes
+ case query.Type_DATE, query.Type_TIME, query.Type_DATETIME:
+ return typeTime
+ default:
+ return typeUnknown
+ }
+}
diff --git a/go/vt/vitessdriver/rows_test.go b/go/vt/vitessdriver/rows_test.go
index fdfc478ad16..13584e70dd8 100644
--- a/go/vt/vitessdriver/rows_test.go
+++ b/go/vt/vitessdriver/rows_test.go
@@ -18,10 +18,12 @@ package vitessdriver
import (
"database/sql/driver"
+ "fmt"
"io"
"reflect"
"testing"
+ "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"vitess.io/vitess/go/sqltypes"
@@ -135,3 +137,92 @@ func TestRows(t *testing.T) {
_ = ri.Close()
}
+
+// Test that the ColumnTypeScanType function returns the correct reflection type for each
+// sql type. The sql type in turn comes from a table column's type.
+func TestColumnTypeScanType(t *testing.T) {
+ var r = sqltypes.Result{
+ Fields: []*querypb.Field{
+ {
+ Name: "field1",
+ Type: sqltypes.Int8,
+ },
+ {
+ Name: "field2",
+ Type: sqltypes.Uint8,
+ },
+ {
+ Name: "field3",
+ Type: sqltypes.Int16,
+ },
+ {
+ Name: "field4",
+ Type: sqltypes.Uint16,
+ },
+ {
+ Name: "field5",
+ Type: sqltypes.Int24,
+ },
+ {
+ Name: "field6",
+ Type: sqltypes.Uint24,
+ },
+ {
+ Name: "field7",
+ Type: sqltypes.Int32,
+ },
+ {
+ Name: "field8",
+ Type: sqltypes.Uint32,
+ },
+ {
+ Name: "field9",
+ Type: sqltypes.Int64,
+ },
+ {
+ Name: "field10",
+ Type: sqltypes.Uint64,
+ },
+ {
+ Name: "field11",
+ Type: sqltypes.Float32,
+ },
+ {
+ Name: "field12",
+ Type: sqltypes.Float64,
+ },
+ {
+ Name: "field13",
+ Type: sqltypes.VarBinary,
+ },
+ {
+ Name: "field14",
+ Type: sqltypes.Datetime,
+ },
+ },
+ }
+
+ ri := newRows(&r, &converter{}).(driver.RowsColumnTypeScanType)
+ defer ri.Close()
+
+ wantTypes := []reflect.Type{
+ typeInt8,
+ typeUint8,
+ typeInt16,
+ typeUint16,
+ typeInt32,
+ typeUint32,
+ typeInt32,
+ typeUint32,
+ typeInt64,
+ typeUint64,
+ typeFloat32,
+ typeFloat64,
+ typeRawBytes,
+ typeTime,
+ }
+
+ for i := 0; i < len(wantTypes); i++ {
+ assert.Equal(t, ri.ColumnTypeScanType(i), wantTypes[i], fmt.Sprintf("unexpected type %v, wanted %v", ri.ColumnTypeScanType(i), wantTypes[i]))
+ }
+}
diff --git a/go/vt/vtadmin/README.md b/go/vt/vtadmin/README.md
index 75ef9cdc200..c49f04a8dd4 100644
--- a/go/vt/vtadmin/README.md
+++ b/go/vt/vtadmin/README.md
@@ -1,7 +1,6 @@
# VTAdmin
-VTAdmin is an experimental replacement for the vtctld2 web UI, that also allows
-users to manage multiple Vitess clusters from a single API and web UI.
+VTAdmin is web UI and API that allows users to manage multiple Vitess clusters at once.
For a more detailed writeup, refer to the [original RFC](https://github.com/vitessio/vitess/issues/7117).
diff --git a/go/vt/vtadmin/api.go b/go/vt/vtadmin/api.go
index 2ebe638a457..59360cde337 100644
--- a/go/vt/vtadmin/api.go
+++ b/go/vt/vtadmin/api.go
@@ -341,6 +341,8 @@ func (api *API) Handler() http.Handler {
router.HandleFunc("/cells", httpAPI.Adapt(vtadminhttp.GetCellInfos)).Name("API.GetCellInfos")
router.HandleFunc("/cells_aliases", httpAPI.Adapt(vtadminhttp.GetCellsAliases)).Name("API.GetCellsAliases")
router.HandleFunc("/clusters", httpAPI.Adapt(vtadminhttp.GetClusters)).Name("API.GetClusters")
+ router.HandleFunc("/cluster/{cluster_id}/topology", httpAPI.Adapt(vtadminhttp.GetTopologyPath)).Name("API.GetTopologyPath")
+ router.HandleFunc("/cluster/{cluster_id}/validate", httpAPI.Adapt(vtadminhttp.Validate)).Name("API.Validate").Methods("PUT", "OPTIONS")
router.HandleFunc("/gates", httpAPI.Adapt(vtadminhttp.GetGates)).Name("API.GetGates")
router.HandleFunc("/keyspace/{cluster_id}", httpAPI.Adapt(vtadminhttp.CreateKeyspace)).Name("API.CreateKeyspace").Methods("POST")
router.HandleFunc("/keyspace/{cluster_id}/{name}", httpAPI.Adapt(vtadminhttp.DeleteKeyspace)).Name("API.DeleteKeyspace").Methods("DELETE")
@@ -358,6 +360,8 @@ func (api *API) Handler() http.Handler {
router.HandleFunc("/shard/{cluster_id}/{keyspace}/{shard}/emergency_failover", httpAPI.Adapt(vtadminhttp.EmergencyFailoverShard)).Name("API.EmergencyFailoverShard").Methods("POST")
router.HandleFunc("/shard/{cluster_id}/{keyspace}/{shard}/planned_failover", httpAPI.Adapt(vtadminhttp.PlannedFailoverShard)).Name("API.PlannedFailoverShard").Methods("POST")
router.HandleFunc("/shard/{cluster_id}/{keyspace}/{shard}/reload_schema_shard", httpAPI.Adapt(vtadminhttp.ReloadSchemaShard)).Name("API.ReloadSchemaShard").Methods("PUT", "OPTIONS")
+ router.HandleFunc("/shard/{cluster_id}/{keyspace}/{shard}/validate", httpAPI.Adapt(vtadminhttp.ValidateShard)).Name("API.ValidateShard").Methods("PUT", "OPTIONS")
+ router.HandleFunc("/shard/{cluster_id}/{keyspace}/{shard}/validate_version", httpAPI.Adapt(vtadminhttp.ValidateVersionShard)).Name("API.ValidateVersionShard").Methods("PUT", "OPTIONS")
router.HandleFunc("/shard_replication_positions", httpAPI.Adapt(vtadminhttp.GetShardReplicationPositions)).Name("API.GetShardReplicationPositions")
router.HandleFunc("/shards/{cluster_id}", httpAPI.Adapt(vtadminhttp.CreateShard)).Name("API.CreateShard").Methods("POST")
router.HandleFunc("/shards/{cluster_id}", httpAPI.Adapt(vtadminhttp.DeleteShards)).Name("API.DeleteShards").Methods("DELETE")
@@ -366,6 +370,7 @@ func (api *API) Handler() http.Handler {
router.HandleFunc("/tablets", httpAPI.Adapt(vtadminhttp.GetTablets)).Name("API.GetTablets")
router.HandleFunc("/tablet/{tablet}", httpAPI.Adapt(vtadminhttp.GetTablet)).Name("API.GetTablet").Methods("GET")
router.HandleFunc("/tablet/{tablet}", httpAPI.Adapt(vtadminhttp.DeleteTablet)).Name("API.DeleteTablet").Methods("DELETE", "OPTIONS")
+ router.HandleFunc("/tablet/{tablet}/full_status", httpAPI.Adapt(vtadminhttp.GetFullStatus)).Name("API.GetFullStatus").Methods("GET")
router.HandleFunc("/tablet/{tablet}/healthcheck", httpAPI.Adapt(vtadminhttp.RunHealthCheck)).Name("API.RunHealthCheck")
router.HandleFunc("/tablet/{tablet}/ping", httpAPI.Adapt(vtadminhttp.PingTablet)).Name("API.PingTablet")
router.HandleFunc("/tablet/{tablet}/refresh", httpAPI.Adapt(vtadminhttp.RefreshState)).Name("API.RefreshState").Methods("PUT", "OPTIONS")
@@ -771,6 +776,25 @@ func (api *API) GetClusters(ctx context.Context, req *vtadminpb.GetClustersReque
}, nil
}
+// GetFullStatus is part of the vtadminpb.VTAdminServer interface.
+func (api *API) GetFullStatus(ctx context.Context, req *vtadminpb.GetFullStatusRequest) (*vtctldatapb.GetFullStatusResponse, error) {
+ span, ctx := trace.NewSpan(ctx, "API.GetFullStatus")
+ defer span.Finish()
+
+ c, err := api.getClusterForRequest(req.ClusterId)
+ if err != nil {
+ return nil, err
+ }
+
+ if !api.authz.IsAuthorized(ctx, c.ID, rbac.TabletFullStatusResource, rbac.GetAction) {
+ return nil, nil
+ }
+
+ return c.Vtctld.GetFullStatus(ctx, &vtctldatapb.GetFullStatusRequest{
+ TabletAlias: req.Alias,
+ })
+}
+
// GetGates is part of the vtadminpb.VTAdminServer interface.
func (api *API) GetGates(ctx context.Context, req *vtadminpb.GetGatesRequest) (*vtadminpb.GetGatesResponse, error) {
span, ctx := trace.NewSpan(ctx, "API.GetGates")
@@ -1147,6 +1171,25 @@ func (api *API) GetTablets(ctx context.Context, req *vtadminpb.GetTabletsRequest
}, nil
}
+// GetTopologyPath is part of the vtadminpb.VTAdminServer interface.
+func (api *API) GetTopologyPath(ctx context.Context, req *vtadminpb.GetTopologyPathRequest) (*vtctldatapb.GetTopologyPathResponse, error) {
+ span, ctx := trace.NewSpan(ctx, "API.GetTopologyPath")
+ defer span.Finish()
+
+ c, err := api.getClusterForRequest(req.ClusterId)
+ if err != nil {
+ return nil, err
+ }
+
+ cluster.AnnotateSpan(c, span)
+
+ if !api.authz.IsAuthorized(ctx, c.ID, rbac.TopologyResource, rbac.GetAction) {
+ return nil, nil
+ }
+
+ return c.Vtctld.GetTopologyPath(ctx, &vtctldatapb.GetTopologyPathRequest{Path: req.Path})
+}
+
// GetVSchema is part of the vtadminpb.VTAdminServer interface.
func (api *API) GetVSchema(ctx context.Context, req *vtadminpb.GetVSchemaRequest) (*vtadminpb.VSchema, error) {
span, ctx := trace.NewSpan(ctx, "API.GetVSchema")
@@ -1359,7 +1402,7 @@ func (api *API) GetWorkflows(ctx context.Context, req *vtadminpb.GetWorkflowsReq
workflows, err := c.GetWorkflows(ctx, req.Keyspaces, cluster.GetWorkflowsOptions{
ActiveOnly: req.ActiveOnly,
- IgnoreKeyspaces: sets.NewString(req.IgnoreKeyspaces...),
+ IgnoreKeyspaces: sets.New[string](req.IgnoreKeyspaces...),
})
if err != nil {
rec.RecordError(err)
@@ -1714,6 +1757,31 @@ func (api *API) TabletExternallyPromoted(ctx context.Context, req *vtadminpb.Tab
return c.TabletExternallyPromoted(ctx, tablet)
}
+// Validate is part of the vtadminpb.VTAdminServer interface.
+func (api *API) Validate(ctx context.Context, req *vtadminpb.ValidateRequest) (*vtctldatapb.ValidateResponse, error) {
+ span, ctx := trace.NewSpan(ctx, "API.Validate")
+ defer span.Finish()
+
+ c, err := api.getClusterForRequest(req.ClusterId)
+ if err != nil {
+ return nil, err
+ }
+
+ if !api.authz.IsAuthorized(ctx, c.ID, rbac.ClusterResource, rbac.PutAction) {
+ return nil, nil
+ }
+
+ res, err := c.Vtctld.Validate(ctx, &vtctldatapb.ValidateRequest{
+ PingTablets: req.PingTablets,
+ })
+
+ if err != nil {
+ return nil, err
+ }
+
+ return res, nil
+}
+
// ValidateKeyspace is part of the vtadminpb.VTAdminServer interface.
func (api *API) ValidateKeyspace(ctx context.Context, req *vtadminpb.ValidateKeyspaceRequest) (*vtctldatapb.ValidateKeyspaceResponse, error) {
span, ctx := trace.NewSpan(ctx, "API.ValidateKeyspace")
@@ -1765,6 +1833,33 @@ func (api *API) ValidateSchemaKeyspace(ctx context.Context, req *vtadminpb.Valid
return res, nil
}
+// ValidateShard is part of the vtadminpb.VTAdminServer interface.
+func (api *API) ValidateShard(ctx context.Context, req *vtadminpb.ValidateShardRequest) (*vtctldatapb.ValidateShardResponse, error) {
+ span, ctx := trace.NewSpan(ctx, "API.ValidateShard")
+ defer span.Finish()
+
+ c, err := api.getClusterForRequest(req.ClusterId)
+ if err != nil {
+ return nil, err
+ }
+
+ if !api.authz.IsAuthorized(ctx, c.ID, rbac.ShardResource, rbac.PutAction) {
+ return nil, nil
+ }
+
+ res, err := c.Vtctld.ValidateShard(ctx, &vtctldatapb.ValidateShardRequest{
+ Keyspace: req.Keyspace,
+ Shard: req.Shard,
+ PingTablets: req.PingTablets,
+ })
+
+ if err != nil {
+ return nil, err
+ }
+
+ return res, nil
+}
+
// ValidateVersionKeyspace is part of the vtadminpb.VTAdminServer interface.
func (api *API) ValidateVersionKeyspace(ctx context.Context, req *vtadminpb.ValidateVersionKeyspaceRequest) (*vtctldatapb.ValidateVersionKeyspaceResponse, error) {
span, ctx := trace.NewSpan(ctx, "API.ValidateVersionKeyspace")
@@ -1790,8 +1885,37 @@ func (api *API) ValidateVersionKeyspace(ctx context.Context, req *vtadminpb.Vali
return res, nil
}
+// ValidateVersionShard is part of the vtadminpb.VTAdminServer interface.
+func (api *API) ValidateVersionShard(ctx context.Context, req *vtadminpb.ValidateVersionShardRequest) (*vtctldatapb.ValidateVersionShardResponse, error) {
+ span, ctx := trace.NewSpan(ctx, "API.ValidateVersionShard")
+ defer span.Finish()
+
+ c, err := api.getClusterForRequest(req.ClusterId)
+ if err != nil {
+ return nil, err
+ }
+
+ if !api.authz.IsAuthorized(ctx, c.ID, rbac.ShardResource, rbac.PutAction) {
+ return nil, nil
+ }
+
+ res, err := c.Vtctld.ValidateVersionShard(ctx, &vtctldatapb.ValidateVersionShardRequest{
+ Keyspace: req.Keyspace,
+ Shard: req.Shard,
+ })
+
+ if err != nil {
+ return nil, err
+ }
+
+ return res, nil
+}
+
// VTExplain is part of the vtadminpb.VTAdminServer interface.
func (api *API) VTExplain(ctx context.Context, req *vtadminpb.VTExplainRequest) (*vtadminpb.VTExplainResponse, error) {
+ // TODO (andrew): https://github.com/vitessio/vitess/issues/12161.
+ log.Warningf("VTAdminServer.VTExplain is deprecated; please use a vexplain query instead. For more details, see https://vitess.io/docs/user-guides/sql/vexplain/.")
+
span, ctx := trace.NewSpan(ctx, "API.VTExplain")
defer span.Finish()
diff --git a/go/vt/vtadmin/api_authz_test.go b/go/vt/vtadmin/api_authz_test.go
index 36edfee1819..45d3e443c6e 100644
--- a/go/vt/vtadmin/api_authz_test.go
+++ b/go/vt/vtadmin/api_authz_test.go
@@ -3209,7 +3209,8 @@ func testClusters(t testing.TB) []*cluster.Cluster {
Keyspace: "test",
Name: "-",
Shard: &topodatapb.Shard{
- KeyRange: &topodatapb.KeyRange{},
+ KeyRange: &topodatapb.KeyRange{},
+ IsPrimaryServing: true,
},
},
},
diff --git a/go/vt/vtadmin/api_test.go b/go/vt/vtadmin/api_test.go
index 04f758c68c0..b707f2036aa 100644
--- a/go/vt/vtadmin/api_test.go
+++ b/go/vt/vtadmin/api_test.go
@@ -34,7 +34,6 @@ import (
_flag "vitess.io/vitess/go/internal/flag"
"vitess.io/vitess/go/test/utils"
- "vitess.io/vitess/go/vt/grpccommon"
"vitess.io/vitess/go/vt/topo"
"vitess.io/vitess/go/vt/topo/memorytopo"
"vitess.io/vitess/go/vt/topo/topoproto"
@@ -567,7 +566,7 @@ func TestFindSchema(t *testing.T) {
}
assert.NoError(t, err)
- assert.Equal(t, tt.expected, resp)
+ assert.Truef(t, proto.Equal(tt.expected, resp), "expected %v, got %v", tt.expected, resp)
})
}
@@ -815,7 +814,7 @@ func TestFindSchema(t *testing.T) {
}
assert.NoError(t, err)
- assert.Equal(t, expected, schema)
+ assert.Truef(t, proto.Equal(expected, schema), "expected %v, got %v", expected, schema)
})
}
@@ -1091,7 +1090,7 @@ func TestGetKeyspace(t *testing.T) {
}
assert.NoError(t, err)
- assert.Equal(t, tt.expected, ks)
+ assert.Truef(t, proto.Equal(tt.expected, ks), "expected %v, got %v", tt.expected, ks)
}, vtctlds...)
})
}
@@ -1110,14 +1109,14 @@ func TestGetKeyspaces(t *testing.T) {
{
name: "multiple clusters, multiple shards",
clusterKeyspaces: [][]*vtctldatapb.Keyspace{
- //cluster0
+ // cluster0
{
{
Name: "c0-ks0",
Keyspace: &topodatapb.Keyspace{},
},
},
- //cluster1
+ // cluster1
{
{
Name: "c1-ks0",
@@ -1126,7 +1125,7 @@ func TestGetKeyspaces(t *testing.T) {
},
},
clusterShards: [][]*vtctldatapb.Shard{
- //cluster0
+ // cluster0
{
{
Keyspace: "c0-ks0",
@@ -1137,7 +1136,7 @@ func TestGetKeyspaces(t *testing.T) {
Name: "80-",
},
},
- //cluster1
+ // cluster1
{
{
Keyspace: "c1-ks0",
@@ -1248,14 +1247,14 @@ func TestGetKeyspaces(t *testing.T) {
{
name: "filtered by cluster ID",
clusterKeyspaces: [][]*vtctldatapb.Keyspace{
- //cluster0
+ // cluster0
{
{
Name: "c0-ks0",
Keyspace: &topodatapb.Keyspace{},
},
},
- //cluster1
+ // cluster1
{
{
Name: "c1-ks0",
@@ -1575,7 +1574,7 @@ func TestGetSchema(t *testing.T) {
}
assert.NoError(t, err)
- assert.Equal(t, tt.expected, resp)
+ assert.Truef(t, proto.Equal(tt.expected, resp), "expected %v, got %v", tt.expected, resp)
})
})
}
@@ -1742,7 +1741,7 @@ func TestGetSchema(t *testing.T) {
}
assert.NoError(t, err)
- assert.Equal(t, expected, schema)
+ assert.Truef(t, proto.Equal(expected, schema), "expected %v, got %v", expected, schema)
})
}
@@ -2556,7 +2555,7 @@ func TestGetSchemas(t *testing.T) {
}
assert.NoError(t, err)
- assert.ElementsMatch(t, expected.Schemas, resp.Schemas)
+ assert.Truef(t, proto.Equal(expected, resp), "expected: %v, got: %v", expected, resp)
})
}
@@ -2717,7 +2716,7 @@ func TestGetSrvVSchema(t *testing.T) {
}
require.NoError(t, err)
- assert.Equal(t, tt.expected, resp)
+ assert.Truef(t, proto.Equal(tt.expected, resp), "expected %v, got %v", tt.expected, resp)
})
})
}
@@ -3609,7 +3608,7 @@ func TestGetVSchema(t *testing.T) {
}
assert.NoError(t, err)
- assert.Equal(t, tt.expected, resp)
+ assert.Truef(t, proto.Equal(tt.expected, resp), "expected %v, got %v", tt.expected, resp)
})
}
}
@@ -4155,7 +4154,7 @@ func TestGetWorkflow(t *testing.T) {
}
assert.NoError(t, err)
- assert.Equal(t, tt.expected, resp)
+ assert.Truef(t, proto.Equal(tt.expected, resp), "expected %v, got %v", tt.expected, resp)
})
}
}
@@ -5143,17 +5142,6 @@ func init() {
tmclient.RegisterTabletManagerClientFactory("vtadmin.test", func() tmclient.TabletManagerClient {
return nil
})
-
- // This prevents data-race failures in tests involving grpc client or server
- // creation. For example, vtctldclient.New() eventually ends up calling
- // grpccommon.EnableTracingOpt() which does a synchronized, one-time
- // mutation of the global grpc.EnableTracing. This variable is also read,
- // unguarded, by grpc.NewServer(), which is a function call that appears in
- // most, if not all, vtadmin.API tests.
- //
- // Calling this here ensures that one-time write happens before any test
- // attempts to read that value by way of grpc.NewServer().
- grpccommon.EnableTracingOpt()
}
//go:generate -command authztestgen go run ./testutil/authztestgen
diff --git a/go/vt/vtadmin/cluster/cluster.go b/go/vt/vtadmin/cluster/cluster.go
index 9cdbf7c7db3..068416ee359 100644
--- a/go/vt/vtadmin/cluster/cluster.go
+++ b/go/vt/vtadmin/cluster/cluster.go
@@ -623,7 +623,7 @@ func (c *Cluster) findTablets(ctx context.Context, filter func(*vtadminpb.Tablet
// FindWorkflowsOptions is the set of options for FindWorkflows requests.
type FindWorkflowsOptions struct {
ActiveOnly bool
- IgnoreKeyspaces sets.String
+ IgnoreKeyspaces sets.Set[string]
Filter func(workflow *vtadminpb.Workflow) bool
}
@@ -658,7 +658,7 @@ func (c *Cluster) findWorkflows(ctx context.Context, keyspaces []string, opts Fi
}
if opts.IgnoreKeyspaces == nil {
- opts.IgnoreKeyspaces = sets.NewString()
+ opts.IgnoreKeyspaces = sets.New[string]()
}
if len(keyspaces) == 0 {
@@ -685,7 +685,7 @@ func (c *Cluster) findWorkflows(ctx context.Context, keyspaces []string, opts Fi
span.Finish()
} else if opts.IgnoreKeyspaces.Len() > 0 {
log.Warningf("Cluster.findWorkflows: IgnoreKeyspaces was set, but Keyspaces was not empty; ignoring IgnoreKeyspaces in favor of explicitly checking everything in Keyspaces: (%s)", strings.Join(keyspaces, ", "))
- opts.IgnoreKeyspaces = sets.NewString()
+ opts.IgnoreKeyspaces = sets.New[string]()
}
// Annotate the parent span with some additional information about the call.
@@ -693,7 +693,7 @@ func (c *Cluster) findWorkflows(ctx context.Context, keyspaces []string, opts Fi
span.Annotate("num_keyspaces", len(keyspaces))
span.Annotate("keyspaces", strings.Join(keyspaces, ","))
span.Annotate("num_ignore_keyspaces", opts.IgnoreKeyspaces.Len())
- span.Annotate("ignore_keyspaces", strings.Join(opts.IgnoreKeyspaces.List(), ","))
+ span.Annotate("ignore_keyspaces", strings.Join(sets.List(opts.IgnoreKeyspaces), ","))
}
clusterpb := c.ToProto()
@@ -799,7 +799,7 @@ func (c *Cluster) GetBackups(ctx context.Context, req *vtadminpb.GetBackupsReque
)
for ks, shardSet := range shardsByKeyspace {
- for _, shard := range shardSet.List() {
+ for _, shard := range sets.List(shardSet) {
wg.Add(1)
go func(keyspace, shard string) {
@@ -856,8 +856,8 @@ func (c *Cluster) GetBackups(ctx context.Context, req *vtadminpb.GetBackupsReque
return backups, nil
}
-func (c *Cluster) getShardSets(ctx context.Context, keyspaces []string, keyspaceShards []string) (map[string]sets.String, error) {
- shardsByKeyspace := map[string]sets.String{}
+func (c *Cluster) getShardSets(ctx context.Context, keyspaces []string, keyspaceShards []string) (map[string]sets.Set[string], error) {
+ shardsByKeyspace := map[string]sets.Set[string]{}
if len(keyspaces) == 0 && len(keyspaceShards) == 0 {
// Special case: if nothing was explicitly passed, get all shards in
@@ -868,7 +868,7 @@ func (c *Cluster) getShardSets(ctx context.Context, keyspaces []string, keyspace
}
for _, ks := range kss {
- shardsByKeyspace[ks.Keyspace.Name] = sets.NewString()
+ shardsByKeyspace[ks.Keyspace.Name] = sets.New[string]()
for _, shard := range ks.Shards {
shardsByKeyspace[ks.Keyspace.Name].Insert(shard.Name)
}
@@ -884,7 +884,7 @@ func (c *Cluster) getShardSets(ctx context.Context, keyspaces []string, keyspace
}
if _, ok := shardsByKeyspace[ks]; !ok {
- shardsByKeyspace[ks] = sets.NewString(shard)
+ shardsByKeyspace[ks] = sets.New[string](shard)
continue
}
@@ -897,7 +897,7 @@ func (c *Cluster) getShardSets(ctx context.Context, keyspaces []string, keyspace
// empty set to indicate we should take all shards in the GetKeyspace
// section below.
if _, ok := shardsByKeyspace[ks]; !ok {
- shardsByKeyspace[ks] = sets.NewString()
+ shardsByKeyspace[ks] = sets.New[string]()
}
}
@@ -912,7 +912,7 @@ func (c *Cluster) getShardSets(ctx context.Context, keyspaces []string, keyspace
for ksName, shardSet := range shardsByKeyspace {
wg.Add(1)
- go func(ksName string, shardSet sets.String) {
+ go func(ksName string, shardSet sets.Set[string]) {
defer wg.Done()
keyspace, err := c.GetKeyspace(ctx, ksName)
@@ -934,7 +934,7 @@ func (c *Cluster) getShardSets(ctx context.Context, keyspaces []string, keyspace
return
}
- fullShardSet := sets.NewString()
+ fullShardSet := sets.New[string]()
for _, shard := range keyspace.Shards {
fullShardSet.Insert(shard.Name)
}
@@ -949,7 +949,7 @@ func (c *Cluster) getShardSets(ctx context.Context, keyspaces []string, keyspace
overlap := shardSet.Intersection(fullShardSet)
if overlap.Len() != shardSet.Len() {
- log.Warningf("getShardSets(): keyspace %s is missing specified shards in cluster %s: %v", ksName, c.ID, shardSet.Difference(overlap).List())
+ log.Warningf("getShardSets(): keyspace %s is missing specified shards in cluster %s: %v", ksName, c.ID, sets.List(shardSet.Difference(overlap)))
}
m.Lock()
@@ -1684,7 +1684,7 @@ func (c *Cluster) GetShardReplicationPositions(ctx context.Context, req *vtadmin
)
for ks, shardSet := range shardsByKeyspace {
- for _, shard := range shardSet.List() {
+ for _, shard := range sets.List(shardSet) {
wg.Add(1)
go func(keyspace, shard string) {
@@ -1890,7 +1890,7 @@ func (c *Cluster) GetWorkflow(ctx context.Context, keyspace string, name string,
// requests.
type GetWorkflowsOptions struct {
ActiveOnly bool
- IgnoreKeyspaces sets.String
+ IgnoreKeyspaces sets.Set[string]
}
// GetWorkflows returns a list of Workflows in this cluster, across the given
@@ -2046,7 +2046,7 @@ func (c *Cluster) reloadKeyspaceSchemas(ctx context.Context, req *vtadminpb.Relo
return resp.Keyspaces, nil
}
- keyspaceNames := sets.NewString(req.Keyspaces...)
+ keyspaceNames := sets.New[string](req.Keyspaces...)
for _, ks := range resp.Keyspaces {
if keyspaceNames.Has(ks.Name) {
@@ -2184,7 +2184,7 @@ func (c *Cluster) reloadShardSchemas(ctx context.Context, req *vtadminpb.ReloadS
// reloadTabletSchemas reloads schemas in one or more tablets in the cluster.
func (c *Cluster) reloadTabletSchemas(ctx context.Context, req *vtadminpb.ReloadSchemasRequest) ([]*vtadminpb.ReloadSchemasResponse_TabletResult, error) {
- aliasSet := sets.NewString()
+ aliasSet := sets.New[string]()
for _, alias := range req.Tablets {
aliasSet.Insert(topoproto.TabletAliasString(alias))
}
diff --git a/go/vt/vtadmin/cluster/cluster_internal_test.go b/go/vt/vtadmin/cluster/cluster_internal_test.go
index 4080ca54b0b..66901b06682 100644
--- a/go/vt/vtadmin/cluster/cluster_internal_test.go
+++ b/go/vt/vtadmin/cluster/cluster_internal_test.go
@@ -453,41 +453,41 @@ func Test_getShardSets(t *testing.T) {
name string
keyspaces []string
keyspaceShards []string
- result map[string]sets.String
+ result map[string]sets.Set[string]
shouldErr bool
}{
{
name: "all keyspaces and shards",
keyspaces: nil,
keyspaceShards: nil,
- result: map[string]sets.String{
- "ks1": sets.NewString("-80", "80-"),
- "ks2": sets.NewString("-"),
+ result: map[string]sets.Set[string]{
+ "ks1": sets.New[string]("-80", "80-"),
+ "ks2": sets.New[string]("-"),
},
},
{
name: "keyspaceShards filter",
keyspaces: nil,
keyspaceShards: []string{"ks1/-80", "ks2/-"},
- result: map[string]sets.String{
- "ks1": sets.NewString("-80"),
- "ks2": sets.NewString("-"),
+ result: map[string]sets.Set[string]{
+ "ks1": sets.New[string]("-80"),
+ "ks2": sets.New[string]("-"),
},
},
{
name: "keyspace and shards filters",
keyspaces: []string{"ks1"},
keyspaceShards: []string{"ks1/80-"},
- result: map[string]sets.String{
- "ks1": sets.NewString("80-"),
+ result: map[string]sets.Set[string]{
+ "ks1": sets.New[string]("80-"),
},
},
{
name: "skipped non-existing shards and keyspaces",
keyspaces: nil,
keyspaceShards: []string{"ks1/-" /* does not exist */, "ks1/-80", "ks1/80-", "ks3/-" /* does not exist */},
- result: map[string]sets.String{
- "ks1": sets.NewString("-80", "80-"),
+ result: map[string]sets.Set[string]{
+ "ks1": sets.New[string]("-80", "80-"),
},
},
}
diff --git a/go/vt/vtadmin/cluster/cluster_test.go b/go/vt/vtadmin/cluster/cluster_test.go
index 2313de06d6b..e2317d8ab4b 100644
--- a/go/vt/vtadmin/cluster/cluster_test.go
+++ b/go/vt/vtadmin/cluster/cluster_test.go
@@ -977,7 +977,7 @@ func TestFindWorkflows(t *testing.T) {
},
keyspaces: []string{"ks2"},
opts: cluster.FindWorkflowsOptions{
- IgnoreKeyspaces: sets.NewString("ks2"),
+ IgnoreKeyspaces: sets.New[string]("ks2"),
},
expected: &vtadminpb.ClusterWorkflows{
Workflows: []*vtadminpb.Workflow{
@@ -1047,7 +1047,7 @@ func TestFindWorkflows(t *testing.T) {
},
keyspaces: nil,
opts: cluster.FindWorkflowsOptions{
- IgnoreKeyspaces: sets.NewString("ks2"),
+ IgnoreKeyspaces: sets.New[string]("ks2"),
},
expected: &vtadminpb.ClusterWorkflows{
Workflows: []*vtadminpb.Workflow{
diff --git a/go/vt/vtadmin/http/clusters.go b/go/vt/vtadmin/http/clusters.go
index ff02719679a..77c51d9e78d 100644
--- a/go/vt/vtadmin/http/clusters.go
+++ b/go/vt/vtadmin/http/clusters.go
@@ -18,8 +18,12 @@ package http
import (
"context"
+ "encoding/json"
+
+ "github.com/gorilla/mux"
vtadminpb "vitess.io/vitess/go/vt/proto/vtadmin"
+ "vitess.io/vitess/go/vt/vtadmin/errors"
)
// GetClusters implements the http wrapper for /clusters
@@ -27,3 +31,41 @@ func GetClusters(ctx context.Context, r Request, api *API) *JSONResponse {
clusters, err := api.server.GetClusters(ctx, &vtadminpb.GetClustersRequest{})
return NewJSONResponse(clusters, err)
}
+
+// GetTopologyPath implements the http wrapper for /cluster/{cluster_id}/topology
+//
+// Query params:
+// - path: string
+func GetTopologyPath(ctx context.Context, r Request, api *API) *JSONResponse {
+ vars := r.Vars()
+ query := r.URL.Query()
+
+ result, err := api.server.GetTopologyPath(ctx, &vtadminpb.GetTopologyPathRequest{
+ ClusterId: vars["cluster_id"],
+ Path: query["path"][0],
+ })
+ return NewJSONResponse(result, err)
+}
+
+// Validate implements the http wrapper for /cluster/{cluster_id}/validate
+func Validate(ctx context.Context, r Request, api *API) *JSONResponse {
+ vars := mux.Vars(r.Request)
+ decoder := json.NewDecoder(r.Body)
+ defer r.Body.Close()
+
+ var result struct {
+ PingTablets bool `json:"pingTablets"`
+ }
+
+ if err := decoder.Decode(&result); err != nil {
+ return NewJSONResponse(nil, &errors.BadRequest{
+ Err: err,
+ })
+ }
+
+ resp, err := api.server.Validate(ctx, &vtadminpb.ValidateRequest{
+ ClusterId: vars["cluster_id"],
+ PingTablets: result.PingTablets,
+ })
+ return NewJSONResponse(resp, err)
+}
diff --git a/go/vt/vtadmin/http/shards.go b/go/vt/vtadmin/http/shards.go
index b0555f70f13..79a5f9fdb7d 100644
--- a/go/vt/vtadmin/http/shards.go
+++ b/go/vt/vtadmin/http/shards.go
@@ -70,7 +70,7 @@ func DeleteShards(ctx context.Context, r Request, api *API) *JSONResponse {
}
shardList := r.URL.Query()["keyspace_shard"]
- shardList = sets.NewString(shardList...).List()
+ shardList = sets.List(sets.New[string](shardList...))
shards := make([]*vtctldatapb.Shard, len(shardList))
for i, kss := range shardList {
ks, shard, err := topoproto.ParseKeyspaceShard(kss)
@@ -199,3 +199,54 @@ func ReloadSchemaShard(ctx context.Context, r Request, api *API) *JSONResponse {
})
return NewJSONResponse(result, err)
}
+
+// ValidateShard implements the http wrapper for
+// PUT /shard/{cluster_id}/{keyspace}/{shard}/validate
+//
+// Query params: none
+//
+// Body params:
+// - ping_tablets: bool
+func ValidateShard(ctx context.Context, r Request, api *API) *JSONResponse {
+ decoder := json.NewDecoder(r.Body)
+ defer r.Body.Close()
+
+ var params struct {
+ PingTablets bool `json:"ping_tablets"`
+ }
+
+ if err := decoder.Decode(¶ms); err != nil {
+ return NewJSONResponse(nil, &errors.BadRequest{
+ Err: err,
+ })
+ }
+
+ vars := r.Vars()
+
+ result, err := api.server.ValidateShard(ctx, &vtadminpb.ValidateShardRequest{
+ ClusterId: vars["cluster_id"],
+ Keyspace: vars["keyspace"],
+ Shard: vars["shard"],
+ PingTablets: params.PingTablets,
+ })
+
+ return NewJSONResponse(result, err)
+}
+
+// ValidateVersionShard implements the http wrapper for
+// PUT /shard/{cluster_id}/{keyspace}/{shard}/validate_version
+//
+// Query params: none
+//
+// Body params: none
+func ValidateVersionShard(ctx context.Context, r Request, api *API) *JSONResponse {
+ vars := r.Vars()
+
+ result, err := api.server.ValidateVersionShard(ctx, &vtadminpb.ValidateVersionShardRequest{
+ ClusterId: vars["cluster_id"],
+ Keyspace: vars["keyspace"],
+ Shard: vars["shard"],
+ })
+
+ return NewJSONResponse(result, err)
+}
diff --git a/go/vt/vtadmin/http/tablets.go b/go/vt/vtadmin/http/tablets.go
index 322092d8b97..b812fd1aebb 100644
--- a/go/vt/vtadmin/http/tablets.go
+++ b/go/vt/vtadmin/http/tablets.go
@@ -22,6 +22,22 @@ import (
vtadminpb "vitess.io/vitess/go/vt/proto/vtadmin"
)
+// GetFullStatus implements the http wrapper for /tablets/{tablet}/full_status
+func GetFullStatus(ctx context.Context, r Request, api *API) *JSONResponse {
+ vars := r.Vars()
+
+ alias, err := vars.GetTabletAlias("tablet")
+ if err != nil {
+ return NewJSONResponse(nil, err)
+ }
+ status, err := api.server.GetFullStatus(ctx, &vtadminpb.GetFullStatusRequest{
+ ClusterId: r.URL.Query()["cluster"][0],
+ Alias: alias,
+ })
+
+ return NewJSONResponse(status, err)
+}
+
// GetTablets implements the http wrapper for /tablets[?cluster=[&cluster=]].
func GetTablets(ctx context.Context, r Request, api *API) *JSONResponse {
tablets, err := api.server.GetTablets(ctx, &vtadminpb.GetTabletsRequest{
diff --git a/go/vt/vtadmin/rbac/config.go b/go/vt/vtadmin/rbac/config.go
index 196b76bca04..f3c3cfd847b 100644
--- a/go/vt/vtadmin/rbac/config.go
+++ b/go/vt/vtadmin/rbac/config.go
@@ -89,22 +89,22 @@ func (c *Config) Reify() error {
for i, rule := range c.Rules {
resourceRules := byResource[rule.Resource]
- actions := sets.NewString(rule.Actions...)
+ actions := sets.New[string](rule.Actions...)
if actions.Has("*") && actions.Len() > 1 {
// error to have wildcard and something else
- rec.RecordError(fmt.Errorf("rule %d: actions list cannot include wildcard and other actions, have %v", i, actions.List()))
+ rec.RecordError(fmt.Errorf("rule %d: actions list cannot include wildcard and other actions, have %v", i, sets.List(actions)))
}
- subjects := sets.NewString(rule.Subjects...)
+ subjects := sets.New[string](rule.Subjects...)
if subjects.Has("*") && subjects.Len() > 1 {
// error to have wildcard and something else
- rec.RecordError(fmt.Errorf("rule %d: subjects list cannot include wildcard and other subjects, have %v", i, subjects.List()))
+ rec.RecordError(fmt.Errorf("rule %d: subjects list cannot include wildcard and other subjects, have %v", i, sets.List(subjects)))
}
- clusters := sets.NewString(rule.Clusters...)
+ clusters := sets.New[string](rule.Clusters...)
if clusters.Has("*") && clusters.Len() > 1 {
// error to have wildcard and something else
- rec.RecordError(fmt.Errorf("rule %d: clusters list cannot include wildcard and other clusters, have %v", i, clusters.List()))
+ rec.RecordError(fmt.Errorf("rule %d: clusters list cannot include wildcard and other clusters, have %v", i, sets.List(clusters)))
}
resourceRules = append(resourceRules, &Rule{
@@ -188,9 +188,9 @@ func DefaultConfig() *Config {
cfg := map[string][]*Rule{
"*": {
{
- clusters: sets.NewString(clusters...),
- actions: sets.NewString(actions...),
- subjects: sets.NewString(subjects...),
+ clusters: sets.New[string](clusters...),
+ actions: sets.New[string](actions...),
+ subjects: sets.New[string](subjects...),
},
},
}
diff --git a/go/vt/vtadmin/rbac/rbac.go b/go/vt/vtadmin/rbac/rbac.go
index 2aba889ea46..7b5b0e8c8e8 100644
--- a/go/vt/vtadmin/rbac/rbac.go
+++ b/go/vt/vtadmin/rbac/rbac.go
@@ -90,7 +90,8 @@ type Resource string
// Resource definitions.
const (
- ClusterResource Resource = "Cluster"
+ ClusterResource Resource = "Cluster"
+ TopologyResource Resource = "Topology"
/* generic topo resources */
@@ -115,4 +116,6 @@ const (
WorkflowResource Resource = "Workflow"
VTExplainResource Resource = "VTExplain"
+
+ TabletFullStatusResource Resource = "TabletFullStatus"
)
diff --git a/go/vt/vtadmin/rbac/rule.go b/go/vt/vtadmin/rbac/rule.go
index 7504dfe4200..c10890f5747 100644
--- a/go/vt/vtadmin/rbac/rule.go
+++ b/go/vt/vtadmin/rbac/rule.go
@@ -24,9 +24,9 @@ import (
// Rule is a single rule governing access to a particular resource.
type Rule struct {
- clusters sets.String
- actions sets.String
- subjects sets.String
+ clusters sets.Set[string]
+ actions sets.Set[string]
+ subjects sets.Set[string]
}
// Allows returns true if the actor is allowed to take the specified action in
diff --git a/go/vt/vtadmin/testutil/authztestgen/config.json b/go/vt/vtadmin/testutil/authztestgen/config.json
index 88470fe7e86..ac89d7f5557 100644
--- a/go/vt/vtadmin/testutil/authztestgen/config.json
+++ b/go/vt/vtadmin/testutil/authztestgen/config.json
@@ -23,7 +23,7 @@
{
"field": "FindAllShardsInKeyspaceResults",
"type": "map[string]struct{\nResponse *vtctldatapb.FindAllShardsInKeyspaceResponse\nError error}",
- "value": "\"test\": {\nResponse: &vtctldatapb.FindAllShardsInKeyspaceResponse{\nShards: map[string]*vtctldatapb.Shard{\n\"-\": {\nKeyspace: \"test\",\nName: \"-\",\nShard: &topodatapb.Shard{\nKeyRange: &topodatapb.KeyRange{},\n},\n},\n},\n},\n},"
+ "value": "\"test\": {\nResponse: &vtctldatapb.FindAllShardsInKeyspaceResponse{\nShards: map[string]*vtctldatapb.Shard{\n\"-\": {\nKeyspace: \"test\",\nName: \"-\",\nShard: &topodatapb.Shard{\nKeyRange: &topodatapb.KeyRange{},\nIsPrimaryServing: true,\n},\n},\n},\n},\n},"
},
{
"field": "GetBackupsResults",
diff --git a/go/vt/vtcombo/tablet_map.go b/go/vt/vtcombo/tablet_map.go
index 6164aa793e8..33548e1e1d3 100644
--- a/go/vt/vtcombo/tablet_map.go
+++ b/go/vt/vtcombo/tablet_map.go
@@ -661,6 +661,12 @@ func (itc *internalTabletConn) Release(ctx context.Context, target *querypb.Targ
return tabletconn.ErrorFromGRPC(vterrors.ToGRPC(err))
}
+// GetSchema is part of the QueryService interface.
+func (itc *internalTabletConn) GetSchema(ctx context.Context, target *querypb.Target, tableType querypb.SchemaTableType, tableNames []string, callback func(schemaRes *querypb.GetSchemaResponse) error) error {
+ err := itc.tablet.qsc.QueryService().GetSchema(ctx, target, tableType, tableNames, callback)
+ return tabletconn.ErrorFromGRPC(vterrors.ToGRPC(err))
+}
+
// Close is part of queryservice.QueryService
func (itc *internalTabletConn) Close(ctx context.Context) error {
return nil
@@ -903,7 +909,7 @@ func (itmc *internalTabletManagerClient) Backup(context.Context, *topodatapb.Tab
return nil, fmt.Errorf("not implemented in vtcombo")
}
-func (itmc *internalTabletManagerClient) RestoreFromBackup(context.Context, *topodatapb.Tablet, time.Time) (logutil.EventStream, error) {
+func (itmc *internalTabletManagerClient) RestoreFromBackup(context.Context, *topodatapb.Tablet, *tabletmanagerdatapb.RestoreFromBackupRequest) (logutil.EventStream, error) {
return nil, fmt.Errorf("not implemented in vtcombo")
}
diff --git a/go/vt/vtctl/backup.go b/go/vt/vtctl/backup.go
index 7d73c352efc..8087580125c 100644
--- a/go/vt/vtctl/backup.go
+++ b/go/vt/vtctl/backup.go
@@ -55,24 +55,24 @@ func init() {
params: " ",
help: "Removes a backup for the BackupStorage.",
})
-
addCommand("Tablets", command{
name: "Backup",
method: commandBackup,
- params: "[--concurrency=4] [--allow_primary=false] ",
- help: "Stops mysqld and uses the BackupStorage service to store a new backup. This function also remembers if the tablet was replicating so that it can restore the same state after the backup completes.",
+ params: "[--concurrency=4] [--allow_primary=false] [--incremental_from_pos=] ",
+ help: "Run a full or an incremental backup. Uses the BackupStorage service to store a new backup. With full backup, stops mysqld, takes the backup, starts mysqld and resumes replication. With incremental backup (indicated by '--incremental_from_pos', rotate and copy binary logs without disrupting the mysqld service).",
})
addCommand("Tablets", command{
name: "RestoreFromBackup",
method: commandRestoreFromBackup,
- params: "[--backup_timestamp=yyyy-MM-dd.HHmmss] ",
- help: "Stops mysqld and restores the data from the latest backup or if a timestamp is specified then the most recent backup at or before that time.",
+ params: "[--backup_timestamp=yyyy-MM-dd.HHmmss] [--restore_to_pos=] [--dry_run] ",
+ help: "Stops mysqld and restores the data from the latest backup or if a timestamp is specified then the most recent backup at or before that time. If '--restore_to_pos' is given, then a point in time restore based on one full backup followed by zero or more incremental backups. dry-run only validates restore steps without actually restoring data",
})
}
func commandBackup(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag.FlagSet, args []string) error {
concurrency := subFlags.Int("concurrency", 4, "Specifies the number of compression/checksum jobs to run simultaneously")
allowPrimary := subFlags.Bool("allow_primary", false, "Allows backups to be taken on primary. Warning!! If you are using the builtin backup engine, this will shutdown your primary mysql for as long as it takes to create a backup.")
+ incrementalFromPos := subFlags.String("incremental_from_pos", "", "Position of previous backup. Default: empty. If given, then this backup becomes an incremental backup from given position. If value is 'auto', backup taken from last successful backup position")
if err := subFlags.Parse(args); err != nil {
return err
@@ -87,9 +87,10 @@ func commandBackup(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag.F
}
return wr.VtctldServer().Backup(&vtctldatapb.BackupRequest{
- TabletAlias: tabletAlias,
- Concurrency: uint64(*concurrency),
- AllowPrimary: *allowPrimary,
+ TabletAlias: tabletAlias,
+ Concurrency: uint64(*concurrency),
+ AllowPrimary: *allowPrimary,
+ IncrementalFromPos: *incrementalFromPos,
}, &backupEventStreamLogger{logger: wr.Logger(), ctx: ctx})
}
@@ -200,6 +201,8 @@ func (b *backupRestoreEventStreamLogger) Send(resp *vtctldatapb.RestoreFromBacku
func commandRestoreFromBackup(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag.FlagSet, args []string) error {
backupTimestampStr := subFlags.String("backup_timestamp", "", "Use the backup taken at or before this timestamp rather than using the latest backup.")
+ restoreToPos := subFlags.String("restore_to_pos", "", "Run a point in time recovery that ends with the given position. This will attempt to use one full backup followed by zero or more incremental backups")
+ dryRun := subFlags.Bool("dry_run", false, "Only validate restore steps, do not actually restore data")
if err := subFlags.Parse(args); err != nil {
return err
}
@@ -225,7 +228,9 @@ func commandRestoreFromBackup(ctx context.Context, wr *wrangler.Wrangler, subFla
}
req := &vtctldatapb.RestoreFromBackupRequest{
- TabletAlias: tabletAlias,
+ TabletAlias: tabletAlias,
+ RestoreToPos: *restoreToPos,
+ DryRun: *dryRun,
}
if !backupTime.IsZero() {
diff --git a/go/vt/vtctl/endtoend/onlineddl_show_test.go b/go/vt/vtctl/endtoend/onlineddl_show_test.go
index 9d68e780d1e..0b982bc7545 100644
--- a/go/vt/vtctl/endtoend/onlineddl_show_test.go
+++ b/go/vt/vtctl/endtoend/onlineddl_show_test.go
@@ -2,16 +2,14 @@ package endtoend
import (
"context"
- "errors"
+ "strings"
"testing"
"github.com/google/uuid"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
- "vitess.io/vitess/go/sqltypes"
"vitess.io/vitess/go/vt/logutil"
- "vitess.io/vitess/go/vt/topo"
"vitess.io/vitess/go/vt/topo/memorytopo"
"vitess.io/vitess/go/vt/vtctl"
"vitess.io/vitess/go/vt/vtctl/grpcvtctldserver/testutil"
@@ -121,11 +119,18 @@ func onlineDDLTest(t *testing.T, args []string, expectedQuery string) {
logger := logutil.NewMemoryLogger()
wr := wrangler.New(logger, fakeTopo, &tmc)
- wr.VExecFunc = func(ctx context.Context, workflow, keyspace, query string, dryRun bool) (map[*topo.TabletInfo]*sqltypes.Result, error) {
- assert.Equal(t, query, expectedQuery)
- return nil, errors.New("I failed in the test")
- }
-
err := vtctl.RunCommand(ctx, wr, args)
- assert.ErrorContains(t, err, "I failed in the test")
+ assert.Error(t, err)
+ assert.NotEmpty(t, err.Error())
+ containsExpectedError := false
+ expectedErrors := []string{
+ "unable to get shard names for keyspace",
+ "no ExecuteFetchAsDba results on fake TabletManagerClient",
+ }
+ for _, expect := range expectedErrors {
+ if strings.Contains(err.Error(), expect) {
+ containsExpectedError = true
+ }
+ }
+ assert.Truef(t, containsExpectedError, "expecting error <%v> to contain either of: %v", err.Error(), expectedErrors)
}
diff --git a/go/vt/vtctl/grpcvtctldclient/client_gen.go b/go/vt/vtctl/grpcvtctldclient/client_gen.go
index 777a9ac44d7..6267d650c7a 100644
--- a/go/vt/vtctl/grpcvtctldclient/client_gen.go
+++ b/go/vt/vtctl/grpcvtctldclient/client_gen.go
@@ -47,15 +47,6 @@ func (client *gRPCVtctldClient) AddCellsAlias(ctx context.Context, in *vtctldata
return client.c.AddCellsAlias(ctx, in, opts...)
}
-// ApplyShardRoutingRules is part of the vtctlservicepb.VtctldClient interface.
-func (client *gRPCVtctldClient) ApplyShardRoutingRules(ctx context.Context, in *vtctldatapb.ApplyShardRoutingRulesRequest, opts ...grpc.CallOption) (*vtctldatapb.ApplyShardRoutingRulesResponse, error) {
- if client.c == nil {
- return nil, status.Error(codes.Unavailable, connClosedMsg)
- }
-
- return client.c.ApplyShardRoutingRules(ctx, in, opts...)
-}
-
// ApplyRoutingRules is part of the vtctlservicepb.VtctldClient interface.
func (client *gRPCVtctldClient) ApplyRoutingRules(ctx context.Context, in *vtctldatapb.ApplyRoutingRulesRequest, opts ...grpc.CallOption) (*vtctldatapb.ApplyRoutingRulesResponse, error) {
if client.c == nil {
@@ -74,6 +65,15 @@ func (client *gRPCVtctldClient) ApplySchema(ctx context.Context, in *vtctldatapb
return client.c.ApplySchema(ctx, in, opts...)
}
+// ApplyShardRoutingRules is part of the vtctlservicepb.VtctldClient interface.
+func (client *gRPCVtctldClient) ApplyShardRoutingRules(ctx context.Context, in *vtctldatapb.ApplyShardRoutingRulesRequest, opts ...grpc.CallOption) (*vtctldatapb.ApplyShardRoutingRulesResponse, error) {
+ if client.c == nil {
+ return nil, status.Error(codes.Unavailable, connClosedMsg)
+ }
+
+ return client.c.ApplyShardRoutingRules(ctx, in, opts...)
+}
+
// ApplyVSchema is part of the vtctlservicepb.VtctldClient interface.
func (client *gRPCVtctldClient) ApplyVSchema(ctx context.Context, in *vtctldatapb.ApplyVSchemaRequest, opts ...grpc.CallOption) (*vtctldatapb.ApplyVSchemaResponse, error) {
if client.c == nil {
@@ -308,15 +308,6 @@ func (client *gRPCVtctldClient) GetRoutingRules(ctx context.Context, in *vtctlda
return client.c.GetRoutingRules(ctx, in, opts...)
}
-// GetShardRoutingRules is part of the vtctlservicepb.VtctldClient interface.
-func (client *gRPCVtctldClient) GetShardRoutingRules(ctx context.Context, in *vtctldatapb.GetShardRoutingRulesRequest, opts ...grpc.CallOption) (*vtctldatapb.GetShardRoutingRulesResponse, error) {
- if client.c == nil {
- return nil, status.Error(codes.Unavailable, connClosedMsg)
- }
-
- return client.c.GetShardRoutingRules(ctx, in, opts...)
-}
-
// GetSchema is part of the vtctlservicepb.VtctldClient interface.
func (client *gRPCVtctldClient) GetSchema(ctx context.Context, in *vtctldatapb.GetSchemaRequest, opts ...grpc.CallOption) (*vtctldatapb.GetSchemaResponse, error) {
if client.c == nil {
@@ -335,6 +326,15 @@ func (client *gRPCVtctldClient) GetShard(ctx context.Context, in *vtctldatapb.Ge
return client.c.GetShard(ctx, in, opts...)
}
+// GetShardRoutingRules is part of the vtctlservicepb.VtctldClient interface.
+func (client *gRPCVtctldClient) GetShardRoutingRules(ctx context.Context, in *vtctldatapb.GetShardRoutingRulesRequest, opts ...grpc.CallOption) (*vtctldatapb.GetShardRoutingRulesResponse, error) {
+ if client.c == nil {
+ return nil, status.Error(codes.Unavailable, connClosedMsg)
+ }
+
+ return client.c.GetShardRoutingRules(ctx, in, opts...)
+}
+
// GetSrvKeyspaceNames is part of the vtctlservicepb.VtctldClient interface.
func (client *gRPCVtctldClient) GetSrvKeyspaceNames(ctx context.Context, in *vtctldatapb.GetSrvKeyspaceNamesRequest, opts ...grpc.CallOption) (*vtctldatapb.GetSrvKeyspaceNamesResponse, error) {
if client.c == nil {
@@ -389,6 +389,15 @@ func (client *gRPCVtctldClient) GetTablets(ctx context.Context, in *vtctldatapb.
return client.c.GetTablets(ctx, in, opts...)
}
+// GetTopologyPath is part of the vtctlservicepb.VtctldClient interface.
+func (client *gRPCVtctldClient) GetTopologyPath(ctx context.Context, in *vtctldatapb.GetTopologyPathRequest, opts ...grpc.CallOption) (*vtctldatapb.GetTopologyPathResponse, error) {
+ if client.c == nil {
+ return nil, status.Error(codes.Unavailable, connClosedMsg)
+ }
+
+ return client.c.GetTopologyPath(ctx, in, opts...)
+}
+
// GetVSchema is part of the vtctlservicepb.VtctldClient interface.
func (client *gRPCVtctldClient) GetVSchema(ctx context.Context, in *vtctldatapb.GetVSchemaRequest, opts ...grpc.CallOption) (*vtctldatapb.GetVSchemaResponse, error) {
if client.c == nil {
@@ -704,6 +713,15 @@ func (client *gRPCVtctldClient) UpdateCellsAlias(ctx context.Context, in *vtctld
return client.c.UpdateCellsAlias(ctx, in, opts...)
}
+// UpdateThrottlerConfig is part of the vtctlservicepb.VtctldClient interface.
+func (client *gRPCVtctldClient) UpdateThrottlerConfig(ctx context.Context, in *vtctldatapb.UpdateThrottlerConfigRequest, opts ...grpc.CallOption) (*vtctldatapb.UpdateThrottlerConfigResponse, error) {
+ if client.c == nil {
+ return nil, status.Error(codes.Unavailable, connClosedMsg)
+ }
+
+ return client.c.UpdateThrottlerConfig(ctx, in, opts...)
+}
+
// Validate is part of the vtctlservicepb.VtctldClient interface.
func (client *gRPCVtctldClient) Validate(ctx context.Context, in *vtctldatapb.ValidateRequest, opts ...grpc.CallOption) (*vtctldatapb.ValidateResponse, error) {
if client.c == nil {
@@ -757,3 +775,12 @@ func (client *gRPCVtctldClient) ValidateVersionKeyspace(ctx context.Context, in
return client.c.ValidateVersionKeyspace(ctx, in, opts...)
}
+
+// ValidateVersionShard is part of the vtctlservicepb.VtctldClient interface.
+func (client *gRPCVtctldClient) ValidateVersionShard(ctx context.Context, in *vtctldatapb.ValidateVersionShardRequest, opts ...grpc.CallOption) (*vtctldatapb.ValidateVersionShardResponse, error) {
+ if client.c == nil {
+ return nil, status.Error(codes.Unavailable, connClosedMsg)
+ }
+
+ return client.c.ValidateVersionShard(ctx, in, opts...)
+}
diff --git a/go/vt/vtctl/grpcvtctldserver/endtoend/init_shard_primary_test.go b/go/vt/vtctl/grpcvtctldserver/endtoend/init_shard_primary_test.go
index 19a580c451c..fb1f1f5b40e 100644
--- a/go/vt/vtctl/grpcvtctldserver/endtoend/init_shard_primary_test.go
+++ b/go/vt/vtctl/grpcvtctldserver/endtoend/init_shard_primary_test.go
@@ -21,6 +21,8 @@ import (
"fmt"
"testing"
+ "vitess.io/vitess/go/vt/mysqlctl"
+
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -52,36 +54,29 @@ func TestInitShardPrimary(t *testing.T) {
tablet1.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{
"FAKE RESET ALL REPLICATION",
- "CREATE DATABASE IF NOT EXISTS _vt",
- "SUBCREATE TABLE IF NOT EXISTS _vt.reparent_journal",
- "ALTER TABLE _vt.reparent_journal CHANGE COLUMN master_alias primary_alias VARBINARY(32) NOT NULL",
- "CREATE DATABASE IF NOT EXISTS _vt",
- "SUBCREATE TABLE IF NOT EXISTS _vt.reparent_journal",
- "ALTER TABLE _vt.reparent_journal CHANGE COLUMN master_alias primary_alias VARBINARY(32) NOT NULL",
+ mysqlctl.GenerateInitialBinlogEntry(),
"SUBINSERT INTO _vt.reparent_journal (time_created_ns, action_name, primary_alias, replication_position) VALUES",
}
tablet2.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{
// These come from tablet startup
- "RESET SLAVE ALL",
+ "STOP SLAVE",
"FAKE SET MASTER",
"START SLAVE",
// These come from InitShardPrimary
"FAKE RESET ALL REPLICATION",
"FAKE SET SLAVE POSITION",
- "RESET SLAVE ALL",
"FAKE SET MASTER",
"START SLAVE",
}
tablet2.FakeMysqlDaemon.SetReplicationSourceInputs = append(tablet2.FakeMysqlDaemon.SetReplicationSourceInputs, fmt.Sprintf("%v:%v", tablet1.Tablet.Hostname, tablet1.Tablet.MysqlPort))
tablet3.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{
- "RESET SLAVE ALL",
+ "STOP SLAVE",
"FAKE SET MASTER",
"START SLAVE",
"FAKE RESET ALL REPLICATION",
"FAKE SET SLAVE POSITION",
- "RESET SLAVE ALL",
"FAKE SET MASTER",
"START SLAVE",
}
@@ -119,19 +114,13 @@ func TestInitShardPrimaryNoFormerPrimary(t *testing.T) {
tablet1.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{
"FAKE RESET ALL REPLICATION",
- "CREATE DATABASE IF NOT EXISTS _vt",
- "SUBCREATE TABLE IF NOT EXISTS _vt.reparent_journal",
- "ALTER TABLE _vt.reparent_journal CHANGE COLUMN master_alias primary_alias VARBINARY(32) NOT NULL",
- "CREATE DATABASE IF NOT EXISTS _vt",
- "SUBCREATE TABLE IF NOT EXISTS _vt.reparent_journal",
- "ALTER TABLE _vt.reparent_journal CHANGE COLUMN master_alias primary_alias VARBINARY(32) NOT NULL",
+ mysqlctl.GenerateInitialBinlogEntry(),
"SUBINSERT INTO _vt.reparent_journal (time_created_ns, action_name, primary_alias, replication_position) VALUES",
}
tablet2.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{
"FAKE RESET ALL REPLICATION",
"FAKE SET SLAVE POSITION",
- "RESET SLAVE ALL",
"FAKE SET MASTER",
"START SLAVE",
}
@@ -140,7 +129,6 @@ func TestInitShardPrimaryNoFormerPrimary(t *testing.T) {
tablet3.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{
"FAKE RESET ALL REPLICATION",
"FAKE SET SLAVE POSITION",
- "RESET SLAVE ALL",
"FAKE SET MASTER",
"START SLAVE",
}
diff --git a/go/vt/vtctl/grpcvtctldserver/server.go b/go/vt/vtctl/grpcvtctldserver/server.go
index 7423efad50f..1885536f98f 100644
--- a/go/vt/vtctl/grpcvtctldserver/server.go
+++ b/go/vt/vtctl/grpcvtctldserver/server.go
@@ -97,6 +97,16 @@ func NewVtctldServer(ts *topo.Server) *VtctldServer {
}
}
+// NewTestVtctldServer returns a new VtctldServer for the given topo server
+// AND tmclient for use in tests. This should NOT be used in production.
+func NewTestVtctldServer(ts *topo.Server, tmc tmclient.TabletManagerClient) *VtctldServer {
+ return &VtctldServer{
+ ts: ts,
+ tmc: tmc,
+ ws: workflow.NewServer(ts, tmc),
+ }
+}
+
func panicHandler(err *error) {
if x := recover(); x != nil {
*err = fmt.Errorf("uncaught panic: %v", x)
@@ -382,6 +392,7 @@ func (s *VtctldServer) Backup(req *vtctldatapb.BackupRequest, stream vtctlservic
span.Annotate("tablet_alias", topoproto.TabletAliasString(req.TabletAlias))
span.Annotate("allow_primary", req.AllowPrimary)
span.Annotate("concurrency", req.Concurrency)
+ span.Annotate("incremental_from_pos", req.IncrementalFromPos)
ti, err := s.ts.GetTablet(ctx, req.TabletAlias)
if err != nil {
@@ -456,7 +467,11 @@ func (s *VtctldServer) BackupShard(req *vtctldatapb.BackupShardRequest, stream v
func (s *VtctldServer) backupTablet(ctx context.Context, tablet *topodatapb.Tablet, req *vtctldatapb.BackupRequest, stream interface {
Send(resp *vtctldatapb.BackupResponse) error
}) error {
- r := &tabletmanagerdatapb.BackupRequest{Concurrency: int64(req.Concurrency), AllowPrimary: req.AllowPrimary}
+ r := &tabletmanagerdatapb.BackupRequest{
+ Concurrency: int64(req.Concurrency),
+ AllowPrimary: req.AllowPrimary,
+ IncrementalFromPos: req.IncrementalFromPos,
+ }
logStream, err := s.tmc.Backup(ctx, tablet, r)
if err != nil {
return err
@@ -992,7 +1007,7 @@ func (s *VtctldServer) EmergencyReparentShard(ctx context.Context, req *vtctldat
req.Shard,
reparentutil.EmergencyReparentOptions{
NewPrimaryAlias: req.NewPrimary,
- IgnoreReplicas: sets.NewString(ignoreReplicaAliases...),
+ IgnoreReplicas: sets.New[string](ignoreReplicaAliases...),
WaitReplicasTimeout: waitReplicasTimeout,
PreventCrossCellPromotion: req.PreventCrossCellPromotion,
},
@@ -1552,6 +1567,72 @@ func (s *VtctldServer) GetSrvKeyspaces(ctx context.Context, req *vtctldatapb.Get
}, nil
}
+// UpdateThrottlerConfig updates throttler config for all cells
+func (s *VtctldServer) UpdateThrottlerConfig(ctx context.Context, req *vtctldatapb.UpdateThrottlerConfigRequest) (resp *vtctldatapb.UpdateThrottlerConfigResponse, err error) {
+ span, ctx := trace.NewSpan(ctx, "VtctldServer.UpdateThrottlerConfig")
+ defer span.Finish()
+
+ defer panicHandler(&err)
+
+ if req.Enable && req.Disable {
+ return nil, fmt.Errorf("--enable and --disable are mutually exclusive")
+ }
+ if req.CheckAsCheckSelf && req.CheckAsCheckShard {
+ return nil, fmt.Errorf("--check-as-check-self and --check-as-check-shard are mutually exclusive")
+ }
+
+ update := func(throttlerConfig *topodatapb.ThrottlerConfig) *topodatapb.ThrottlerConfig {
+ if throttlerConfig == nil {
+ throttlerConfig = &topodatapb.ThrottlerConfig{}
+ }
+ if req.CustomQuerySet {
+ // custom query provided
+ throttlerConfig.CustomQuery = req.CustomQuery
+ throttlerConfig.Threshold = req.Threshold // allowed to be zero/negative because who knows what kind of custom query this is
+ } else {
+ // no custom query, throttler works by querying replication lag. We only allow positive values
+ if req.Threshold > 0 {
+ throttlerConfig.Threshold = req.Threshold
+ }
+ }
+ if req.Enable {
+ throttlerConfig.Enabled = true
+ }
+ if req.Disable {
+ throttlerConfig.Enabled = false
+ }
+ if req.CheckAsCheckSelf {
+ throttlerConfig.CheckAsCheckSelf = true
+ }
+ if req.CheckAsCheckShard {
+ throttlerConfig.CheckAsCheckSelf = false
+ }
+ return throttlerConfig
+ }
+
+ ctx, unlock, lockErr := s.ts.LockKeyspace(ctx, req.Keyspace, "UpdateThrottlerConfig")
+ if lockErr != nil {
+ return nil, lockErr
+ }
+ defer unlock(&err)
+
+ ki, err := s.ts.GetKeyspace(ctx, req.Keyspace)
+ if err != nil {
+ return nil, err
+ }
+
+ ki.ThrottlerConfig = update(ki.ThrottlerConfig)
+
+ err = s.ts.UpdateKeyspace(ctx, ki)
+ if err != nil {
+ return nil, err
+ }
+
+ _, err = s.ts.UpdateSrvKeyspaceThrottlerConfig(ctx, req.Keyspace, []string{}, update)
+
+ return &vtctldatapb.UpdateThrottlerConfigResponse{}, err
+}
+
// GetSrvVSchema is part of the vtctlservicepb.VtctldServer interface.
func (s *VtctldServer) GetSrvVSchema(ctx context.Context, req *vtctldatapb.GetSrvVSchemaRequest) (resp *vtctldatapb.GetSrvVSchemaResponse, err error) {
span, ctx := trace.NewSpan(ctx, "VtctldServer.GetSrvVSchema")
@@ -1587,10 +1668,10 @@ func (s *VtctldServer) GetSrvVSchemas(ctx context.Context, req *vtctldatapb.GetS
// Omit any cell names in the request that don't map to existing cells
if len(req.Cells) > 0 {
- s1 := sets.NewString(allCells...)
- s2 := sets.NewString(req.Cells...)
+ s1 := sets.New[string](allCells...)
+ s2 := sets.New[string](req.Cells...)
- cells = s1.Intersection(s2).List()
+ cells = sets.List(s1.Intersection(s2))
}
span.Annotate("cells", strings.Join(cells, ","))
@@ -1813,6 +1894,39 @@ func (s *VtctldServer) GetTablets(ctx context.Context, req *vtctldatapb.GetTable
}, nil
}
+// GetTopologyPath is part of the vtctlservicepb.VtctldServer interface.
+// It returns the cell located at the provided path in the topology server.
+func (s *VtctldServer) GetTopologyPath(ctx context.Context, req *vtctldatapb.GetTopologyPathRequest) (*vtctldatapb.GetTopologyPathResponse, error) {
+ span, ctx := trace.NewSpan(ctx, "VtctldServer.GetTopology")
+ defer span.Finish()
+
+ // handle toplevel display: global, then one line per cell.
+ if req.Path == "/" {
+ cells, err := s.ts.GetKnownCells(ctx)
+ if err != nil {
+ return nil, err
+ }
+ resp := vtctldatapb.GetTopologyPathResponse{
+ Cell: &vtctldatapb.TopologyCell{
+ Path: req.Path,
+ // the toplevel display has no name, just children
+ Children: append([]string{topo.GlobalCell}, cells...),
+ },
+ }
+ return &resp, nil
+ }
+
+ // otherwise, delegate to getTopologyCell to parse the path and return the cell there
+ cell, err := s.getTopologyCell(ctx, req.Path)
+ if err != nil {
+ return nil, err
+ }
+
+ return &vtctldatapb.GetTopologyPathResponse{
+ Cell: cell,
+ }, nil
+}
+
// GetVersion returns the version of a tablet from its debug vars
func (s *VtctldServer) GetVersion(ctx context.Context, req *vtctldatapb.GetVersionRequest) (resp *vtctldatapb.GetVersionResponse, err error) {
span, ctx := trace.NewSpan(ctx, "VtctldServer.GetVersion")
@@ -1826,7 +1940,7 @@ func (s *VtctldServer) GetVersion(ctx context.Context, req *vtctldatapb.GetVersi
return nil, err
}
- version, err := getVersionFromTablet(tablet.Addr())
+ version, err := GetVersionFunc()(tablet.Addr())
if err != nil {
return nil, err
}
@@ -2624,7 +2738,12 @@ func (s *VtctldServer) RestoreFromBackup(req *vtctldatapb.RestoreFromBackupReque
span.Annotate("keyspace", ti.Keyspace)
span.Annotate("shard", ti.Shard)
- logStream, err := s.tmc.RestoreFromBackup(ctx, ti.Tablet, protoutil.TimeFromProto(req.BackupTime))
+ r := &tabletmanagerdatapb.RestoreFromBackupRequest{
+ BackupTime: req.BackupTime,
+ RestoreToPos: req.RestoreToPos,
+ DryRun: req.DryRun,
+ }
+ logStream, err := s.tmc.RestoreFromBackup(ctx, ti.Tablet, r)
if err != nil {
return err
}
@@ -2651,6 +2770,10 @@ func (s *VtctldServer) RestoreFromBackup(req *vtctldatapb.RestoreFromBackupReque
if mysqlctl.DisableActiveReparents {
return nil
}
+ if req.RestoreToPos != "" && !req.DryRun {
+ // point in time recovery. Do not restore replication
+ return nil
+ }
// Otherwise, we find the correct primary tablet and set the
// replication source on the freshly-restored tablet, since the
@@ -3511,7 +3634,7 @@ func (s *VtctldServer) Validate(ctx context.Context, req *vtctldatapb.ValidateRe
span, ctx := trace.NewSpan(ctx, "VtctldServer.validateAllTablets")
defer span.Finish()
- cellSet := sets.NewString()
+ cellSet := sets.New[string]()
for _, keyspace := range keyspaces {
getShardNamesCtx, getShardNamesCancel := context.WithTimeout(ctx, topo.RemoteOperationTimeout)
shards, err := s.ts.GetShardNames(getShardNamesCtx, keyspace)
@@ -3542,7 +3665,7 @@ func (s *VtctldServer) Validate(ctx context.Context, req *vtctldatapb.ValidateRe
}
}
- for _, cell := range cellSet.List() {
+ for _, cell := range sets.List(cellSet) {
getTabletsByCellCtx, getTabletsByCellCancel := context.WithTimeout(ctx, topo.RemoteOperationTimeout)
aliases, err := s.ts.GetTabletAliasesByCell(getTabletsByCellCtx, cell)
getTabletsByCellCancel() // don't defer in a loop
@@ -4093,6 +4216,61 @@ func (s *VtctldServer) ValidateVersionKeyspace(ctx context.Context, req *vtctlda
return resp, err
}
+// ValidateVersionShard validates all versions are the same in all
+// tablets in a shard
+func (s *VtctldServer) ValidateVersionShard(ctx context.Context, req *vtctldatapb.ValidateVersionShardRequest) (resp *vtctldatapb.ValidateVersionShardResponse, err error) {
+ span, ctx := trace.NewSpan(ctx, "VtctldServer.ValidateVersionShard")
+ defer span.Finish()
+
+ defer panicHandler(&err)
+
+ shard, err := s.ts.GetShard(ctx, req.Keyspace, req.Shard)
+ if err != nil {
+ err = fmt.Errorf("GetShard(%s) failed: %v", req.Shard, err)
+ return nil, err
+ }
+
+ if !shard.HasPrimary() {
+ err = fmt.Errorf("no primary in shard %v/%v", req.Keyspace, req.Shard)
+ return nil, err
+ }
+
+ log.Infof("Gathering version for primary %v", topoproto.TabletAliasString(shard.PrimaryAlias))
+ primaryVersion, err := s.GetVersion(ctx, &vtctldatapb.GetVersionRequest{
+ TabletAlias: shard.PrimaryAlias,
+ })
+ if err != nil {
+ err = fmt.Errorf("GetVersion(%s) failed: %v", topoproto.TabletAliasString(shard.PrimaryAlias), err)
+ return nil, err
+ }
+
+ aliases, err := s.ts.FindAllTabletAliasesInShard(ctx, req.Keyspace, req.Shard)
+ if err != nil {
+ err = fmt.Errorf("FindAllTabletAliasesInShard(%s, %s) failed: %v", req.Keyspace, req.Shard, err)
+ return nil, err
+ }
+
+ er := concurrency.AllErrorRecorder{}
+ wg := sync.WaitGroup{}
+ for _, alias := range aliases {
+ if topoproto.TabletAliasEqual(alias, shard.PrimaryAlias) {
+ continue
+ }
+
+ wg.Add(1)
+ go s.diffVersion(ctx, primaryVersion.Version, shard.PrimaryAlias, alias, &wg, &er)
+ }
+
+ wg.Wait()
+
+ response := vtctldatapb.ValidateVersionShardResponse{}
+ if er.HasErrors() {
+ response.Results = append(response.Results, er.ErrorStrings()...)
+ }
+
+ return &response, nil
+}
+
// ValidateVSchema compares the schema of each primary tablet in "keyspace/shards..." to the vschema and errs if there are differences
func (s *VtctldServer) ValidateVSchema(ctx context.Context, req *vtctldatapb.ValidateVSchemaRequest) (resp *vtctldatapb.ValidateVSchemaResponse, err error) {
span, ctx := trace.NewSpan(ctx, "VtctldServer.ValidateVSchema")
@@ -4183,6 +4361,54 @@ func StartServer(s *grpc.Server, ts *topo.Server) {
vtctlservicepb.RegisterVtctldServer(s, NewVtctldServer(ts))
}
+// getTopologyCell is a helper method that returns a topology cell given its path.
+func (s *VtctldServer) getTopologyCell(ctx context.Context, cellPath string) (*vtctldatapb.TopologyCell, error) {
+ // extract cell and relative path
+ parts := strings.Split(cellPath, "/")
+ if parts[0] != "" || len(parts) < 2 {
+ err := vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "invalid path: %s", cellPath)
+ return nil, err
+ }
+ cell := parts[1]
+ relativePath := cellPath[len(cell)+1:]
+ topoCell := vtctldatapb.TopologyCell{Name: parts[len(parts)-1], Path: cellPath}
+
+ conn, err := s.ts.ConnForCell(ctx, cell)
+ if err != nil {
+ err := vterrors.Errorf(vtrpc.Code_UNAVAILABLE, "error fetching connection to cell %s: %v", cell, err)
+ return nil, err
+ }
+
+ data, _, dataErr := conn.Get(ctx, relativePath)
+
+ if dataErr == nil {
+ result, err := topo.DecodeContent(relativePath, data, false)
+ if err != nil {
+ err := vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "error decoding file content for cell %s: %v", cellPath, err)
+ return nil, err
+ }
+ topoCell.Data = result
+ // since there is data at this cell, it cannot be a directory cell
+ // so we can early return the topocell
+ return &topoCell, nil
+ }
+
+ children, childrenErr := conn.ListDir(ctx, relativePath, false /*full*/)
+
+ if childrenErr != nil && dataErr != nil {
+ err := vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "cell %s with path %s has no file contents and no children: %v", cell, cellPath, err)
+ return nil, err
+ }
+
+ topoCell.Children = make([]string, len(children))
+
+ for i, c := range children {
+ topoCell.Children[i] = c.Name
+ }
+
+ return &topoCell, nil
+}
+
// Helper function to get version of a tablet from its debug vars
var getVersionFromTabletDebugVars = func(tabletAddr string) (string, error) {
resp, err := http.Get("http://" + tabletAddr + "/debug/vars")
@@ -4210,4 +4436,34 @@ var getVersionFromTabletDebugVars = func(tabletAddr string) (string, error) {
return version, nil
}
+var versionFuncMu sync.Mutex
var getVersionFromTablet = getVersionFromTabletDebugVars
+
+func SetVersionFunc(versionFunc func(string) (string, error)) {
+ versionFuncMu.Lock()
+ defer versionFuncMu.Unlock()
+ getVersionFromTablet = versionFunc
+}
+
+func GetVersionFunc() func(string) (string, error) {
+ versionFuncMu.Lock()
+ defer versionFuncMu.Unlock()
+ return getVersionFromTablet
+}
+
+// helper method to asynchronously get and diff a version
+func (s *VtctldServer) diffVersion(ctx context.Context, primaryVersion string, primaryAlias *topodatapb.TabletAlias, alias *topodatapb.TabletAlias, wg *sync.WaitGroup, er concurrency.ErrorRecorder) {
+ defer wg.Done()
+ log.Infof("Gathering version for %v", topoproto.TabletAliasString(alias))
+ replicaVersion, err := s.GetVersion(ctx, &vtctldatapb.GetVersionRequest{
+ TabletAlias: alias,
+ })
+ if err != nil {
+ er.RecordError(fmt.Errorf("unable to get version for tablet %v: %v", alias, err))
+ return
+ }
+
+ if primaryVersion != replicaVersion.Version {
+ er.RecordError(fmt.Errorf("primary %v version %v is different than replica %v version %v", topoproto.TabletAliasString(primaryAlias), primaryVersion, topoproto.TabletAliasString(alias), replicaVersion))
+ }
+}
diff --git a/go/vt/vtctl/grpcvtctldserver/server_test.go b/go/vt/vtctl/grpcvtctldserver/server_test.go
index d9b10b77b91..52b2176c962 100644
--- a/go/vt/vtctl/grpcvtctldserver/server_test.go
+++ b/go/vt/vtctl/grpcvtctldserver/server_test.go
@@ -21,10 +21,13 @@ import (
"errors"
"fmt"
"io"
+ "os"
"sort"
"testing"
"time"
+ _flag "vitess.io/vitess/go/internal/flag"
+
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"google.golang.org/protobuf/proto"
@@ -5957,6 +5960,94 @@ func TestGetTablets(t *testing.T) {
}
}
+func TestGetTopologyPath(t *testing.T) {
+ t.Parallel()
+
+ ctx := context.Background()
+ ts := memorytopo.NewServer("cell1", "cell2", "cell3")
+ vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer {
+ return NewVtctldServer(ts)
+ })
+
+ err := ts.CreateKeyspace(ctx, "keyspace1", &topodatapb.Keyspace{})
+ require.NoError(t, err)
+
+ testutil.AddTablets(ctx, t, ts, nil, &topodatapb.Tablet{
+ Alias: &topodatapb.TabletAlias{Cell: "cell1", Uid: 100},
+ Hostname: "localhost",
+ Keyspace: "keyspace1",
+ MysqlHostname: "localhost",
+ MysqlPort: 17100,
+ })
+ require.NoError(t, err)
+
+ tests := []struct {
+ name string
+ path string
+ shouldErr bool
+ expected *vtctldatapb.GetTopologyPathResponse
+ }{
+ {
+ name: "root path",
+ path: "/",
+ expected: &vtctldatapb.GetTopologyPathResponse{
+ Cell: &vtctldatapb.TopologyCell{
+ Path: "/",
+ Children: []string{"global", "cell1", "cell2", "cell3"},
+ },
+ },
+ },
+ {
+ name: "invalid path",
+ path: "",
+ shouldErr: true,
+ },
+ {
+ name: "global path",
+ path: "/global",
+ expected: &vtctldatapb.GetTopologyPathResponse{
+ Cell: &vtctldatapb.TopologyCell{
+ Name: "global",
+ Path: "/global",
+ Children: []string{"cells", "keyspaces"},
+ },
+ },
+ },
+ {
+ name: "terminal data path",
+ path: "/cell1/tablets/cell1-0000000100/Tablet",
+ expected: &vtctldatapb.GetTopologyPathResponse{
+ Cell: &vtctldatapb.TopologyCell{
+ Name: "Tablet",
+ Path: "/cell1/tablets/cell1-0000000100/Tablet",
+ Data: "alias:{cell:\"cell1\" uid:100} hostname:\"localhost\" keyspace:\"keyspace1\" mysql_hostname:\"localhost\" mysql_port:17100",
+ },
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ tt := tt
+
+ t.Run(tt.name, func(t *testing.T) {
+ t.Parallel()
+
+ ctx := context.Background()
+ resp, err := vtctld.GetTopologyPath(ctx, &vtctldatapb.GetTopologyPathRequest{
+ Path: tt.path,
+ })
+
+ if tt.shouldErr {
+ assert.Error(t, err)
+ return
+ }
+
+ assert.NoError(t, err)
+ utils.MustMatch(t, tt.expected, resp)
+ })
+ }
+}
+
func TestGetVSchema(t *testing.T) {
t.Parallel()
@@ -11139,7 +11230,7 @@ func TestValidateVersionKeyspace(t *testing.T) {
"primary:0": "version1",
"replica:0": "version1",
}
- getVersionFromTablet = testutil.MockGetVersionFromTablet(addrVersionMap)
+ SetVersionFunc(testutil.MockGetVersionFromTablet(addrVersionMap))
},
shouldErr: false,
},
@@ -11159,7 +11250,7 @@ func TestValidateVersionKeyspace(t *testing.T) {
"primary:0": "version1",
"replica:0": "version2",
}
- getVersionFromTablet = testutil.MockGetVersionFromTablet(addrVersionMap)
+ SetVersionFunc(testutil.MockGetVersionFromTablet(addrVersionMap))
},
shouldErr: false,
},
@@ -11180,6 +11271,119 @@ func TestValidateVersionKeyspace(t *testing.T) {
}
}
+func TestValidateVersionShard(t *testing.T) {
+ t.Parallel()
+
+ ctx := context.Background()
+ ts := memorytopo.NewServer("zone1", "zone2")
+ tmc := testutil.TabletManagerClient{
+ GetSchemaResults: map[string]struct {
+ Schema *tabletmanagerdatapb.SchemaDefinition
+ Error error
+ }{},
+ }
+ testutil.AddKeyspace(ctx, t, ts, &vtctldatapb.Keyspace{
+ Name: "ks",
+ Keyspace: &topodatapb.Keyspace{
+ KeyspaceType: topodatapb.KeyspaceType_NORMAL,
+ },
+ })
+
+ tablets := []*topodatapb.Tablet{
+ {
+ Keyspace: "ks",
+ Shard: "-",
+ Type: topodatapb.TabletType_PRIMARY,
+ Alias: &topodatapb.TabletAlias{
+ Cell: "zone1",
+ Uid: 100,
+ },
+ Hostname: "primary",
+ },
+ {
+ Keyspace: "ks",
+ Shard: "-",
+ Type: topodatapb.TabletType_REPLICA,
+ Alias: &topodatapb.TabletAlias{
+ Cell: "zone1",
+ Uid: 101,
+ },
+ Hostname: "replica",
+ },
+ }
+ testutil.AddTablets(ctx, t, ts, &testutil.AddTabletOptions{
+ AlsoSetShardPrimary: true,
+ ForceSetShardPrimary: true,
+ SkipShardCreation: false,
+ }, tablets...)
+
+ vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, &tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer {
+ return NewVtctldServer(ts)
+ })
+
+ tests := []*struct {
+ name string
+ req *vtctldatapb.ValidateVersionShardRequest
+ expected *vtctldatapb.ValidateVersionShardResponse
+ setup func()
+ shouldErr bool
+ }{
+ {
+ name: "valid versions",
+ req: &vtctldatapb.ValidateVersionShardRequest{
+ Keyspace: "ks",
+ Shard: "-",
+ },
+ expected: &vtctldatapb.ValidateVersionShardResponse{
+ Results: []string{},
+ },
+ setup: func() {
+ addrVersionMap := map[string]string{
+ "primary:0": "version1",
+ "replica:0": "version1",
+ }
+ SetVersionFunc(testutil.MockGetVersionFromTablet(addrVersionMap))
+ },
+ shouldErr: false,
+ },
+ {
+ name: "different versions",
+ req: &vtctldatapb.ValidateVersionShardRequest{
+ Keyspace: "ks",
+ Shard: "-",
+ },
+ expected: &vtctldatapb.ValidateVersionShardResponse{
+ Results: []string{"primary zone1-0000000100 version version1 is different than replica zone1-0000000101 version version:\"version2\""},
+ },
+ setup: func() {
+ addrVersionMap := map[string]string{
+ "primary:0": "version1",
+ "replica:0": "version2",
+ }
+ SetVersionFunc(testutil.MockGetVersionFromTablet(addrVersionMap))
+ },
+ shouldErr: false,
+ },
+ }
+
+ for _, tt := range tests {
+ curT := tt
+ t.Run(tt.name, func(t *testing.T) {
+ t.Parallel()
+
+ curT.setup()
+ resp, err := vtctld.ValidateVersionShard(ctx, curT.req)
+ if curT.shouldErr {
+ assert.Error(t, err)
+ return
+ }
+
+ assert.NoError(t, err)
+ utils.MustMatch(t, curT.expected, resp)
+ })
+ }
+}
+
func TestValidateShard(t *testing.T) {
t.Parallel()
@@ -11724,3 +11928,7 @@ func TestValidateShard(t *testing.T) {
})
}
}
+func TestMain(m *testing.M) {
+ _flag.ParseFlagsForTest()
+ os.Exit(m.Run())
+}
diff --git a/go/vt/vtctl/grpcvtctldserver/testutil/test_tmclient.go b/go/vt/vtctl/grpcvtctldserver/testutil/test_tmclient.go
index 5ed8a30190c..020eed4bd81 100644
--- a/go/vt/vtctl/grpcvtctldserver/testutil/test_tmclient.go
+++ b/go/vt/vtctl/grpcvtctldserver/testutil/test_tmclient.go
@@ -900,7 +900,7 @@ func (stream *backupRestoreStreamAdapter) Send(msg *logutilpb.Event) error {
}
// RestoreFromBackup is part of the tmclient.TabletManagerClient interface.
-func (fake *TabletManagerClient) RestoreFromBackup(ctx context.Context, tablet *topodatapb.Tablet, backupTime time.Time) (logutil.EventStream, error) {
+func (fake *TabletManagerClient) RestoreFromBackup(ctx context.Context, tablet *topodatapb.Tablet, req *tabletmanagerdatapb.RestoreFromBackupRequest) (logutil.EventStream, error) {
key := topoproto.TabletAliasString(tablet.Alias)
testdata, ok := fake.RestoreFromBackupResults[key]
if !ok {
diff --git a/go/vt/vtctl/localvtctldclient/client_gen.go b/go/vt/vtctl/localvtctldclient/client_gen.go
index 45b8629f858..21d743de148 100644
--- a/go/vt/vtctl/localvtctldclient/client_gen.go
+++ b/go/vt/vtctl/localvtctldclient/client_gen.go
@@ -44,16 +44,16 @@ func (client *localVtctldClient) ApplyRoutingRules(ctx context.Context, in *vtct
return client.s.ApplyRoutingRules(ctx, in)
}
-// ApplyRoutingRules is part of the vtctlservicepb.VtctldClient interface.
-func (client *localVtctldClient) ApplyShardRoutingRules(ctx context.Context, in *vtctldatapb.ApplyShardRoutingRulesRequest, opts ...grpc.CallOption) (*vtctldatapb.ApplyShardRoutingRulesResponse, error) {
- return client.s.ApplyShardRoutingRules(ctx, in)
-}
-
// ApplySchema is part of the vtctlservicepb.VtctldClient interface.
func (client *localVtctldClient) ApplySchema(ctx context.Context, in *vtctldatapb.ApplySchemaRequest, opts ...grpc.CallOption) (*vtctldatapb.ApplySchemaResponse, error) {
return client.s.ApplySchema(ctx, in)
}
+// ApplyShardRoutingRules is part of the vtctlservicepb.VtctldClient interface.
+func (client *localVtctldClient) ApplyShardRoutingRules(ctx context.Context, in *vtctldatapb.ApplyShardRoutingRulesRequest, opts ...grpc.CallOption) (*vtctldatapb.ApplyShardRoutingRulesResponse, error) {
+ return client.s.ApplyShardRoutingRules(ctx, in)
+}
+
// ApplyVSchema is part of the vtctlservicepb.VtctldClient interface.
func (client *localVtctldClient) ApplyVSchema(ctx context.Context, in *vtctldatapb.ApplyVSchemaRequest, opts ...grpc.CallOption) (*vtctldatapb.ApplyVSchemaResponse, error) {
return client.s.ApplyVSchema(ctx, in)
@@ -276,11 +276,6 @@ func (client *localVtctldClient) GetRoutingRules(ctx context.Context, in *vtctld
return client.s.GetRoutingRules(ctx, in)
}
-// GetRoutingRules is part of the vtctlservicepb.VtctldClient interface.
-func (client *localVtctldClient) GetShardRoutingRules(ctx context.Context, in *vtctldatapb.GetShardRoutingRulesRequest, opts ...grpc.CallOption) (*vtctldatapb.GetShardRoutingRulesResponse, error) {
- return client.s.GetShardRoutingRules(ctx, in)
-}
-
// GetSchema is part of the vtctlservicepb.VtctldClient interface.
func (client *localVtctldClient) GetSchema(ctx context.Context, in *vtctldatapb.GetSchemaRequest, opts ...grpc.CallOption) (*vtctldatapb.GetSchemaResponse, error) {
return client.s.GetSchema(ctx, in)
@@ -291,6 +286,11 @@ func (client *localVtctldClient) GetShard(ctx context.Context, in *vtctldatapb.G
return client.s.GetShard(ctx, in)
}
+// GetShardRoutingRules is part of the vtctlservicepb.VtctldClient interface.
+func (client *localVtctldClient) GetShardRoutingRules(ctx context.Context, in *vtctldatapb.GetShardRoutingRulesRequest, opts ...grpc.CallOption) (*vtctldatapb.GetShardRoutingRulesResponse, error) {
+ return client.s.GetShardRoutingRules(ctx, in)
+}
+
// GetSrvKeyspaceNames is part of the vtctlservicepb.VtctldClient interface.
func (client *localVtctldClient) GetSrvKeyspaceNames(ctx context.Context, in *vtctldatapb.GetSrvKeyspaceNamesRequest, opts ...grpc.CallOption) (*vtctldatapb.GetSrvKeyspaceNamesResponse, error) {
return client.s.GetSrvKeyspaceNames(ctx, in)
@@ -321,6 +321,11 @@ func (client *localVtctldClient) GetTablets(ctx context.Context, in *vtctldatapb
return client.s.GetTablets(ctx, in)
}
+// GetTopologyPath is part of the vtctlservicepb.VtctldClient interface.
+func (client *localVtctldClient) GetTopologyPath(ctx context.Context, in *vtctldatapb.GetTopologyPathRequest, opts ...grpc.CallOption) (*vtctldatapb.GetTopologyPathResponse, error) {
+ return client.s.GetTopologyPath(ctx, in)
+}
+
// GetVSchema is part of the vtctlservicepb.VtctldClient interface.
func (client *localVtctldClient) GetVSchema(ctx context.Context, in *vtctldatapb.GetVSchemaRequest, opts ...grpc.CallOption) (*vtctldatapb.GetVSchemaResponse, error) {
return client.s.GetVSchema(ctx, in)
@@ -542,6 +547,11 @@ func (client *localVtctldClient) UpdateCellsAlias(ctx context.Context, in *vtctl
return client.s.UpdateCellsAlias(ctx, in)
}
+// UpdateThrottlerConfig is part of the vtctlservicepb.VtctldClient interface.
+func (client *localVtctldClient) UpdateThrottlerConfig(ctx context.Context, in *vtctldatapb.UpdateThrottlerConfigRequest, opts ...grpc.CallOption) (*vtctldatapb.UpdateThrottlerConfigResponse, error) {
+ return client.s.UpdateThrottlerConfig(ctx, in)
+}
+
// Validate is part of the vtctlservicepb.VtctldClient interface.
func (client *localVtctldClient) Validate(ctx context.Context, in *vtctldatapb.ValidateRequest, opts ...grpc.CallOption) (*vtctldatapb.ValidateResponse, error) {
return client.s.Validate(ctx, in)
@@ -571,3 +581,8 @@ func (client *localVtctldClient) ValidateVSchema(ctx context.Context, in *vtctld
func (client *localVtctldClient) ValidateVersionKeyspace(ctx context.Context, in *vtctldatapb.ValidateVersionKeyspaceRequest, opts ...grpc.CallOption) (*vtctldatapb.ValidateVersionKeyspaceResponse, error) {
return client.s.ValidateVersionKeyspace(ctx, in)
}
+
+// ValidateVersionShard is part of the vtctlservicepb.VtctldClient interface.
+func (client *localVtctldClient) ValidateVersionShard(ctx context.Context, in *vtctldatapb.ValidateVersionShardRequest, opts ...grpc.CallOption) (*vtctldatapb.ValidateVersionShardResponse, error) {
+ return client.s.ValidateVersionShard(ctx, in)
+}
diff --git a/go/vt/vtctl/reparent.go b/go/vt/vtctl/reparent.go
index b2f030e0756..43844eb9388 100644
--- a/go/vt/vtctl/reparent.go
+++ b/go/vt/vtctl/reparent.go
@@ -38,10 +38,12 @@ func init() {
help: "Reparent a tablet to the current primary in the shard. This only works if the current replication position matches the last known reparent action.",
})
addCommand("Shards", command{
- name: "InitShardPrimary",
- method: commandInitShardPrimary,
- params: "[--force] [--wait_replicas_timeout=] ",
- help: "Sets the initial primary for a shard. Will make all other tablets in the shard replicas of the provided tablet. WARNING: this could cause data loss on an already replicating shard. PlannedReparentShard or EmergencyReparentShard should be used instead.",
+ name: "InitShardPrimary",
+ method: commandInitShardPrimary,
+ params: "[--force] [--wait_replicas_timeout=] ",
+ help: "Sets the initial primary for a shard. Will make all other tablets in the shard replicas of the provided tablet. WARNING: this could cause data loss on an already replicating shard. PlannedReparentShard or EmergencyReparentShard should be used instead.",
+ deprecated: true,
+ deprecatedBy: "PlannedReparentShard",
})
addCommand("Shards", command{
name: "PlannedReparentShard",
diff --git a/go/vt/vtctl/reparentutil/emergency_reparenter.go b/go/vt/vtctl/reparentutil/emergency_reparenter.go
index ba846ebc147..b13d56a2f5e 100644
--- a/go/vt/vtctl/reparentutil/emergency_reparenter.go
+++ b/go/vt/vtctl/reparentutil/emergency_reparenter.go
@@ -56,7 +56,7 @@ type EmergencyReparenter struct {
// for callers to mutate and reuse options structs for multiple calls.
type EmergencyReparentOptions struct {
NewPrimaryAlias *topodatapb.TabletAlias
- IgnoreReplicas sets.String
+ IgnoreReplicas sets.Set[string]
WaitReplicasTimeout time.Duration
PreventCrossCellPromotion bool
@@ -192,7 +192,7 @@ func (erp *EmergencyReparenter) reparentShardLocked(ctx context.Context, ev *eve
}
// Stop replication on all the tablets and build their status map
- stoppedReplicationSnapshot, err = stopReplicationAndBuildStatusMaps(ctx, erp.tmc, ev, tabletMap, opts.WaitReplicasTimeout, opts.IgnoreReplicas, opts.NewPrimaryAlias, opts.durability, erp.logger)
+ stoppedReplicationSnapshot, err = stopReplicationAndBuildStatusMaps(ctx, erp.tmc, ev, tabletMap, topo.RemoteOperationTimeout, opts.IgnoreReplicas, opts.NewPrimaryAlias, opts.durability, erp.logger)
if err != nil {
return vterrors.Wrapf(err, "failed to stop replication and build status maps: %v", err)
}
@@ -609,6 +609,12 @@ func (erp *EmergencyReparenter) reparentReplicas(
// we're going to be explicit that this is doubly unexpected.
return nil, vterrors.Wrapf(rec.Error(), "received more errors (= %d) than replicas (= %d), which should be impossible: %v", errCount, numReplicas, rec.Error())
case errCount == numReplicas:
+ if len(tabletMap) <= 2 {
+ // If there are at most 2 tablets in the tablet map, we shouldn't be failing the promotion if the replica fails to SetReplicationSource.
+ // The failing replica is probably the old primary that is down, so it is okay if it fails. We still log a warning message in the logs.
+ erp.logger.Warningf("Failed to set the MySQL replication source during ERS but because there is only one other tablet we assume it is the one that had failed and will progress with the reparent. Error: %v", rec.Error())
+ return nil, nil
+ }
return nil, vterrors.Wrapf(rec.Error(), "%d replica(s) failed: %v", numReplicas, rec.Error())
default:
return replicasStartedReplication, nil
diff --git a/go/vt/vtctl/reparentutil/emergency_reparenter_test.go b/go/vt/vtctl/reparentutil/emergency_reparenter_test.go
index 19d119a6ba2..b7d8045f136 100644
--- a/go/vt/vtctl/reparentutil/emergency_reparenter_test.go
+++ b/go/vt/vtctl/reparentutil/emergency_reparenter_test.go
@@ -1964,7 +1964,7 @@ func TestEmergencyReparenter_promoteNewPrimary(t *testing.T) {
}{
{
name: "success",
- emergencyReparentOps: EmergencyReparentOptions{IgnoreReplicas: sets.NewString("zone1-0000000404")},
+ emergencyReparentOps: EmergencyReparentOptions{IgnoreReplicas: sets.New[string]("zone1-0000000404")},
tmc: &testutil.TabletManagerClient{
PopulateReparentJournalResults: map[string]error{
"zone1-0000000100": nil,
@@ -2341,7 +2341,7 @@ func TestEmergencyReparenter_promoteNewPrimary(t *testing.T) {
},
{
name: "success in initialization",
- emergencyReparentOps: EmergencyReparentOptions{IgnoreReplicas: sets.NewString("zone1-0000000404")},
+ emergencyReparentOps: EmergencyReparentOptions{IgnoreReplicas: sets.New[string]("zone1-0000000404")},
tmc: &testutil.TabletManagerClient{
PopulateReparentJournalResults: map[string]error{
"zone1-0000000100": nil,
@@ -3165,7 +3165,7 @@ func TestEmergencyReparenter_reparentReplicas(t *testing.T) {
}{
{
name: "success",
- emergencyReparentOps: EmergencyReparentOptions{IgnoreReplicas: sets.NewString("zone1-0000000404")},
+ emergencyReparentOps: EmergencyReparentOptions{IgnoreReplicas: sets.New[string]("zone1-0000000404")},
tmc: &testutil.TabletManagerClient{
PopulateReparentJournalResults: map[string]error{
"zone1-0000000100": nil,
@@ -3489,6 +3489,49 @@ func TestEmergencyReparenter_reparentReplicas(t *testing.T) {
shard: "-",
ts: memorytopo.NewServer("zone1"),
shouldErr: false,
+ }, {
+ name: "single replica failing to SetReplicationSource does not fail the promotion",
+ emergencyReparentOps: EmergencyReparentOptions{},
+ tmc: &testutil.TabletManagerClient{
+ PopulateReparentJournalResults: map[string]error{
+ "zone1-0000000100": nil,
+ },
+ PrimaryPositionResults: map[string]struct {
+ Position string
+ Error error
+ }{
+ "zone1-0000000100": {
+ Error: nil,
+ },
+ },
+ SetReplicationSourceResults: map[string]error{
+ "zone1-0000000101": assert.AnError,
+ },
+ },
+ newPrimaryTabletAlias: "zone1-0000000100",
+ tabletMap: map[string]*topo.TabletInfo{
+ "zone1-0000000100": {
+ Tablet: &topodatapb.Tablet{
+ Alias: &topodatapb.TabletAlias{
+ Cell: "zone1",
+ Uid: 100,
+ },
+ },
+ },
+ "zone1-0000000101": {
+ Tablet: &topodatapb.Tablet{
+ Alias: &topodatapb.TabletAlias{
+ Cell: "zone1",
+ Uid: 101,
+ },
+ },
+ },
+ },
+ statusMap: map[string]*replicationdatapb.StopReplicationStatus{},
+ keyspace: "testkeyspace",
+ shard: "-",
+ ts: memorytopo.NewServer("zone1"),
+ shouldErr: false,
},
}
@@ -3561,7 +3604,7 @@ func TestEmergencyReparenter_promoteIntermediateSource(t *testing.T) {
}{
{
name: "success",
- emergencyReparentOps: EmergencyReparentOptions{IgnoreReplicas: sets.NewString("zone1-0000000404")},
+ emergencyReparentOps: EmergencyReparentOptions{IgnoreReplicas: sets.New[string]("zone1-0000000404")},
tmc: &testutil.TabletManagerClient{
PopulateReparentJournalResults: map[string]error{
"zone1-0000000100": nil,
@@ -4205,7 +4248,7 @@ func TestParentContextCancelled(t *testing.T) {
durability, err := GetDurabilityPolicy("none")
require.NoError(t, err)
// Setup ERS options with a very high wait replicas timeout
- emergencyReparentOps := EmergencyReparentOptions{IgnoreReplicas: sets.NewString("zone1-0000000404"), WaitReplicasTimeout: time.Minute, durability: durability}
+ emergencyReparentOps := EmergencyReparentOptions{IgnoreReplicas: sets.New[string]("zone1-0000000404"), WaitReplicasTimeout: time.Minute, durability: durability}
// Make the replica tablet return its results after 3 seconds
tmc := &testutil.TabletManagerClient{
PrimaryPositionResults: map[string]struct {
diff --git a/go/vt/vtctl/reparentutil/planned_reparenter.go b/go/vt/vtctl/reparentutil/planned_reparenter.go
index fc0e1c80a06..8633afa13d0 100644
--- a/go/vt/vtctl/reparentutil/planned_reparenter.go
+++ b/go/vt/vtctl/reparentutil/planned_reparenter.go
@@ -213,7 +213,7 @@ func (pr *PlannedReparenter) performGracefulPromotion(
primaryElect *topodatapb.Tablet,
tabletMap map[string]*topo.TabletInfo,
opts PlannedReparentOptions,
-) (string, error) {
+) error {
primaryElectAliasStr := topoproto.TabletAliasString(primaryElect.Alias)
ev.OldPrimary = proto.Clone(currentPrimary.Tablet).(*topodatapb.Tablet)
@@ -231,7 +231,7 @@ func (pr *PlannedReparenter) performGracefulPromotion(
snapshotPos, err := pr.tmc.PrimaryPosition(snapshotCtx, currentPrimary.Tablet)
if err != nil {
- return "", vterrors.Wrapf(err, "cannot get replication position on current primary %v; current primary must be healthy to perform PlannedReparent", currentPrimary.AliasString())
+ return vterrors.Wrapf(err, "cannot get replication position on current primary %v; current primary must be healthy to perform PlannedReparent", currentPrimary.AliasString())
}
// Next, we wait for the primary-elect to catch up to that snapshot point.
@@ -246,12 +246,12 @@ func (pr *PlannedReparenter) performGracefulPromotion(
defer setSourceCancel()
if err := pr.tmc.SetReplicationSource(setSourceCtx, primaryElect, currentPrimary.Alias, 0, snapshotPos, true, IsReplicaSemiSync(opts.durability, currentPrimary.Tablet, primaryElect)); err != nil {
- return "", vterrors.Wrapf(err, "replication on primary-elect %v did not catch up in time; replication must be healthy to perform PlannedReparent", primaryElectAliasStr)
+ return vterrors.Wrapf(err, "replication on primary-elect %v did not catch up in time; replication must be healthy to perform PlannedReparent", primaryElectAliasStr)
}
// Verify we still have the topology lock before doing the demotion.
if err := topo.CheckShardLocked(ctx, keyspace, shard); err != nil {
- return "", vterrors.Wrap(err, "lost topology lock; aborting")
+ return vterrors.Wrap(err, "lost topology lock; aborting")
}
// Next up, demote the current primary and get its replication position.
@@ -265,7 +265,7 @@ func (pr *PlannedReparenter) performGracefulPromotion(
primaryStatus, err := pr.tmc.DemotePrimary(demoteCtx, currentPrimary.Tablet)
if err != nil {
- return "", vterrors.Wrapf(err, "failed to DemotePrimary on current primary %v: %v", currentPrimary.AliasString(), err)
+ return vterrors.Wrapf(err, "failed to DemotePrimary on current primary %v: %v", currentPrimary.AliasString(), err)
}
// Wait for the primary-elect to catch up to the position we demoted the
@@ -298,26 +298,10 @@ func (pr *PlannedReparenter) performGracefulPromotion(
finalWaitErr = vterrors.Wrapf(finalWaitErr, "encountered error while performing UndoDemotePrimary(%v): %v", currentPrimary.AliasString(), undoErr)
}
- return "", finalWaitErr
- }
-
- // Primary-elect is caught up to the current primary. We can do the
- // promotion now.
- promoteCtx, promoteCancel := context.WithTimeout(ctx, opts.WaitReplicasTimeout)
- defer promoteCancel()
-
- rp, err := pr.tmc.PromoteReplica(promoteCtx, primaryElect, SemiSyncAckers(opts.durability, primaryElect) > 0)
- if err != nil {
- return "", vterrors.Wrapf(err, "primary-elect tablet %v failed to be promoted to primary; please try again", primaryElectAliasStr)
- }
-
- if ctx.Err() == context.DeadlineExceeded {
- // PromoteReplica succeeded, but we ran out of time. PRS needs to be
- // re-run to complete fully.
- return "", vterrors.Errorf(vtrpc.Code_DEADLINE_EXCEEDED, "PLannedReparent timed out after successfully promoting primary-elect %v; please re-run to fix up the replicas", primaryElectAliasStr)
+ return finalWaitErr
}
- return rp, nil
+ return nil
}
func (pr *PlannedReparenter) performInitialPromotion(
@@ -383,7 +367,7 @@ func (pr *PlannedReparenter) performPotentialPromotion(
primaryElect *topodatapb.Tablet,
tabletMap map[string]*topo.TabletInfo,
opts PlannedReparentOptions,
-) (string, error) {
+) error {
primaryElectAliasStr := topoproto.TabletAliasString(primaryElect.Alias)
pr.logger.Infof("no clear winner found for current primary term; checking if it's safe to recover by electing %v", primaryElectAliasStr)
@@ -457,7 +441,7 @@ func (pr *PlannedReparenter) performPotentialPromotion(
close(positions)
if rec.HasErrors() {
- return "", vterrors.Wrap(rec.Error(), "failed to demote all tablets")
+ return vterrors.Wrap(rec.Error(), "failed to demote all tablets")
}
// Construct a mapping of alias to tablet position.
@@ -478,7 +462,7 @@ func (pr *PlannedReparenter) performPotentialPromotion(
// if the candidate primary is behind that tablet.
tp, ok := tabletPosMap[primaryElectAliasStr]
if !ok {
- return "", vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "primary-elect tablet %v not found in tablet map", primaryElectAliasStr)
+ return vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "primary-elect tablet %v not found in tablet map", primaryElectAliasStr)
}
primaryElectPos := tp.pos
@@ -487,7 +471,7 @@ func (pr *PlannedReparenter) performPotentialPromotion(
// The primary-elect pos has to be at least as advanced as every tablet
// in the shard.
if !primaryElectPos.AtLeast(tp.pos) {
- return "", vterrors.Errorf(
+ return vterrors.Errorf(
vtrpc.Code_FAILED_PRECONDITION,
"tablet %v (position: %v) contains transactions not found in primary-elect %v (position: %v)",
tp.alias, tp.pos, primaryElectAliasStr, primaryElectPos,
@@ -497,19 +481,9 @@ func (pr *PlannedReparenter) performPotentialPromotion(
// Check that we still have the topology lock.
if err := topo.CheckShardLocked(ctx, keyspace, shard); err != nil {
- return "", vterrors.Wrap(err, "lost topology lock; aborting")
- }
-
- // Promote the candidate primary to type:PRIMARY.
- promoteCtx, promoteCancel := context.WithTimeout(ctx, topo.RemoteOperationTimeout)
- defer promoteCancel()
-
- rp, err := pr.tmc.PromoteReplica(promoteCtx, primaryElect, SemiSyncAckers(opts.durability, primaryElect) > 0)
- if err != nil {
- return "", vterrors.Wrapf(err, "failed to promote %v to primary", primaryElectAliasStr)
+ return vterrors.Wrap(err, "lost topology lock; aborting")
}
-
- return rp, nil
+ return nil
}
func (pr *PlannedReparenter) reparentShardLocked(
@@ -553,6 +527,11 @@ func (pr *PlannedReparenter) reparentShardLocked(
currentPrimary := FindCurrentPrimary(tabletMap, pr.logger)
reparentJournalPos := ""
+ // promoteReplicaRequired is a boolean that is used to store whether we need to call
+ // `PromoteReplica` when we reparent the tablets. This is required to be done when we are doing
+ // a potential or a graceful promotion.
+ // InitialPromotion calls `InitPrimary` and for partial promotion, the tablet is already a primary.
+ promoteReplicaRequired := false
// needsRefresh is used to keep track of whether we need to refresh the state
// of the new primary tablet. The only case that we need to reload the state
// is when we are initializing the new primary. The reason is that the first
@@ -601,7 +580,9 @@ func (pr *PlannedReparenter) reparentShardLocked(
case currentPrimary == nil && ev.ShardInfo.PrimaryAlias != nil:
// Case (2): no clear current primary. Try to find a safe promotion
// candidate, and promote to it.
- reparentJournalPos, err = pr.performPotentialPromotion(ctx, keyspace, shard, ev.NewPrimary, tabletMap, opts)
+ err = pr.performPotentialPromotion(ctx, keyspace, shard, ev.NewPrimary, tabletMap, opts)
+ // We need to call `PromoteReplica` when we reparent the tablets.
+ promoteReplicaRequired = true
case topoproto.TabletAliasEqual(currentPrimary.Alias, opts.NewPrimaryAlias):
// Case (3): desired new primary is the current primary. Attempt to fix
// up replicas to recover from a previous partial promotion.
@@ -609,7 +590,9 @@ func (pr *PlannedReparenter) reparentShardLocked(
default:
// Case (4): desired primary and current primary differ. Do a graceful
// demotion-then-promotion.
- reparentJournalPos, err = pr.performGracefulPromotion(ctx, ev, keyspace, shard, currentPrimary, ev.NewPrimary, tabletMap, opts)
+ err = pr.performGracefulPromotion(ctx, ev, keyspace, shard, currentPrimary, ev.NewPrimary, tabletMap, opts)
+ // We need to call `PromoteReplica` when we reparent the tablets.
+ promoteReplicaRequired = true
}
if err != nil {
@@ -620,7 +603,7 @@ func (pr *PlannedReparenter) reparentShardLocked(
return vterrors.Wrap(err, "lost topology lock, aborting")
}
- if err := pr.reparentTablets(ctx, ev, reparentJournalPos, tabletMap, opts); err != nil {
+ if err := pr.reparentTablets(ctx, ev, reparentJournalPos, promoteReplicaRequired, tabletMap, opts); err != nil {
return err
}
@@ -637,6 +620,7 @@ func (pr *PlannedReparenter) reparentTablets(
ctx context.Context,
ev *events.Reparent,
reparentJournalPosition string,
+ promoteReplicaRequired bool,
tabletMap map[string]*topo.TabletInfo,
opts PlannedReparentOptions,
) error {
@@ -645,7 +629,7 @@ func (pr *PlannedReparenter) reparentTablets(
replCtx, replCancel := context.WithTimeout(ctx, opts.WaitReplicasTimeout)
defer replCancel()
- // Go thorugh all the tablets.
+ // Go through all the tablets.
// - New primary: populate the reparent journal.
// - Everybody else: reparent to the new primary; wait for the reparent
// journal row.
@@ -660,7 +644,7 @@ func (pr *PlannedReparenter) reparentTablets(
// Point all replicas at the new primary and check that they receive the
// reparent journal entry, proving that they are replicating from the new
- // primary. We do this concurrently with adding the journal entry (after
+ // primary. We do this concurrently with adding the journal entry (after
// this loop), because if semi-sync is enabled, the update to the journal
// table will block until at least one replica is successfully attached to
// the new primary.
@@ -688,6 +672,20 @@ func (pr *PlannedReparenter) reparentTablets(
}(alias, tabletInfo.Tablet)
}
+ // If `PromoteReplica` call is required, we should call it and use the position that it returns.
+ if promoteReplicaRequired {
+ // Promote the candidate primary to type:PRIMARY.
+ primaryPosition, err := pr.tmc.PromoteReplica(replCtx, ev.NewPrimary, SemiSyncAckers(opts.durability, ev.NewPrimary) > 0)
+ if err != nil {
+ pr.logger.Warningf("primary %v failed to PromoteReplica; cancelling replica reparent attempts", primaryElectAliasStr)
+ replCancel()
+ replicasWg.Wait()
+
+ return vterrors.Wrapf(err, "failed PromoteReplica(primary=%v, ts=%v): %v", primaryElectAliasStr, reparentJournalTimestamp, err)
+ }
+ reparentJournalPosition = primaryPosition
+ }
+
// Add a reparent journal entry on the new primary. If semi-sync is enabled,
// this blocks until at least one replica is reparented (above) and
// successfully replicating from the new primary.
diff --git a/go/vt/vtctl/reparentutil/planned_reparenter_flaky_test.go b/go/vt/vtctl/reparentutil/planned_reparenter_flaky_test.go
index 5c79caeadb7..20815db3dfc 100644
--- a/go/vt/vtctl/reparentutil/planned_reparenter_flaky_test.go
+++ b/go/vt/vtctl/reparentutil/planned_reparenter_flaky_test.go
@@ -18,6 +18,7 @@ package reparentutil
import (
"context"
+ "errors"
"fmt"
"strings"
"testing"
@@ -963,13 +964,12 @@ func TestPlannedReparenter_performGracefulPromotion(t *testing.T) {
tabletMap map[string]*topo.TabletInfo
opts PlannedReparentOptions
- expectedPos string
expectedEvent *events.Reparent
shouldErr bool
// Optional function to run some additional post-test assertions. Will
// be run in the main test body before the common assertions are run,
// regardless of the value of tt.shouldErr for that test case.
- extraAssertions func(t *testing.T, pos string, err error)
+ extraAssertions func(t *testing.T, err error)
}{
{
name: "successful promotion",
@@ -998,15 +998,6 @@ func TestPlannedReparenter_performGracefulPromotion(t *testing.T) {
Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-10",
},
},
- PromoteReplicaResults: map[string]struct {
- Result string
- Error error
- }{
- "zone1-0000000200": {
- Result: "successful reparent journal position",
- Error: nil,
- },
- },
SetReplicationSourceResults: map[string]error{
"zone1-0000000200": nil,
},
@@ -1033,10 +1024,9 @@ func TestPlannedReparenter_performGracefulPromotion(t *testing.T) {
Uid: 200,
},
},
- tabletMap: map[string]*topo.TabletInfo{},
- opts: PlannedReparentOptions{},
- expectedPos: "successful reparent journal position",
- shouldErr: false,
+ tabletMap: map[string]*topo.TabletInfo{},
+ opts: PlannedReparentOptions{},
+ shouldErr: false,
},
{
name: "cannot get snapshot of current primary",
@@ -1376,20 +1366,6 @@ func TestPlannedReparenter_performGracefulPromotion(t *testing.T) {
Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-10",
},
},
- PromoteReplicaResults: map[string]struct {
- Result string
- Error error
- }{
- // This being present means that if we don't encounter a
- // a case where either WaitForPosition errors, or the parent
- // context times out, then we will fail the test, since it
- // will cause the overall function under test to return no
- // error.
- "zone1-0000000200": {
- Result: "success!",
- Error: nil,
- },
- },
SetReplicationSourceResults: map[string]error{
"zone1-0000000200": nil,
},
@@ -1483,7 +1459,7 @@ func TestPlannedReparenter_performGracefulPromotion(t *testing.T) {
tabletMap: map[string]*topo.TabletInfo{},
opts: PlannedReparentOptions{},
shouldErr: true,
- extraAssertions: func(t *testing.T, pos string, err error) {
+ extraAssertions: func(t *testing.T, err error) {
assert.Contains(t, err.Error(), "UndoDemotePrimary", "expected error to include information about failed demotion rollback")
},
},
@@ -1546,144 +1522,10 @@ func TestPlannedReparenter_performGracefulPromotion(t *testing.T) {
tabletMap: map[string]*topo.TabletInfo{},
opts: PlannedReparentOptions{},
shouldErr: true,
- extraAssertions: func(t *testing.T, pos string, err error) {
+ extraAssertions: func(t *testing.T, err error) {
assert.NotContains(t, err.Error(), "UndoDemotePrimary", "expected error to not include information about failed demotion rollback")
},
},
- {
- name: "primary-elect fails to promote",
- ts: memorytopo.NewServer("zone1"),
- tmc: &testutil.TabletManagerClient{
- DemotePrimaryResults: map[string]struct {
- Status *replicationdatapb.PrimaryStatus
- Error error
- }{
- "zone1-0000000100": {
- Status: &replicationdatapb.PrimaryStatus{
- // value of Position doesn't strictly matter for
- // this test case, as long as it matches the inner
- // key of the WaitForPositionResults map for the
- // primary-elect.
- Position: "position1",
- },
- Error: nil,
- },
- },
- PrimaryPositionResults: map[string]struct {
- Position string
- Error error
- }{
- "zone1-0000000100": {
- Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-10",
- },
- },
- PromoteReplicaResults: map[string]struct {
- Result string
- Error error
- }{
- "zone1-0000000200": {
- Error: assert.AnError,
- },
- },
- SetReplicationSourceResults: map[string]error{
- "zone1-0000000200": nil,
- },
- WaitForPositionResults: map[string]map[string]error{
- "zone1-0000000200": {
- "position1": nil,
- },
- },
- },
- ev: &events.Reparent{},
- keyspace: "testkeyspace",
- shard: "-",
- currentPrimary: &topo.TabletInfo{
- Tablet: &topodatapb.Tablet{
- Alias: &topodatapb.TabletAlias{
- Cell: "zone1",
- Uid: 100,
- },
- },
- },
- primaryElect: &topodatapb.Tablet{
- Alias: &topodatapb.TabletAlias{
- Cell: "zone1",
- Uid: 200,
- },
- },
- tabletMap: map[string]*topo.TabletInfo{},
- opts: PlannedReparentOptions{},
- shouldErr: true,
- },
- {
- name: "promotion succeeds but parent context times out",
- ts: memorytopo.NewServer("zone1"),
- tmc: &testutil.TabletManagerClient{
- DemotePrimaryResults: map[string]struct {
- Status *replicationdatapb.PrimaryStatus
- Error error
- }{
- "zone1-0000000100": {
- Status: &replicationdatapb.PrimaryStatus{
- // value of Position doesn't strictly matter for
- // this test case, as long as it matches the inner
- // key of the WaitForPositionResults map for the
- // primary-elect.
- Position: "position1",
- },
- Error: nil,
- },
- },
- PrimaryPositionResults: map[string]struct {
- Position string
- Error error
- }{
- "zone1-0000000100": {
- Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-10",
- },
- },
- PromoteReplicaPostDelays: map[string]time.Duration{
- "zone1-0000000200": time.Millisecond * 100, // 10x the parent context timeout
- },
- PromoteReplicaResults: map[string]struct {
- Result string
- Error error
- }{
- "zone1-0000000200": {
- Error: nil,
- },
- },
- SetReplicationSourceResults: map[string]error{
- "zone1-0000000200": nil,
- },
- WaitForPositionResults: map[string]map[string]error{
- "zone1-0000000200": {
- "position1": nil,
- },
- },
- },
- ctxTimeout: time.Millisecond * 10,
- ev: &events.Reparent{},
- keyspace: "testkeyspace",
- shard: "-",
- currentPrimary: &topo.TabletInfo{
- Tablet: &topodatapb.Tablet{
- Alias: &topodatapb.TabletAlias{
- Cell: "zone1",
- Uid: 100,
- },
- },
- },
- primaryElect: &topodatapb.Tablet{
- Alias: &topodatapb.TabletAlias{
- Cell: "zone1",
- Uid: 200,
- },
- },
- tabletMap: map[string]*topo.TabletInfo{},
- opts: PlannedReparentOptions{},
- shouldErr: true,
- },
}
ctx := context.Background()
@@ -1727,7 +1569,7 @@ func TestPlannedReparenter_performGracefulPromotion(t *testing.T) {
require.NoError(t, err)
tt.opts.durability = durability
- pos, err := pr.performGracefulPromotion(
+ err = pr.performGracefulPromotion(
ctx,
tt.ev,
tt.keyspace,
@@ -1739,7 +1581,7 @@ func TestPlannedReparenter_performGracefulPromotion(t *testing.T) {
)
if tt.extraAssertions != nil {
- tt.extraAssertions(t, pos, err)
+ tt.extraAssertions(t, err)
}
if tt.shouldErr {
@@ -1749,7 +1591,6 @@ func TestPlannedReparenter_performGracefulPromotion(t *testing.T) {
}
assert.NoError(t, err)
- assert.Equal(t, tt.expectedPos, pos)
})
}
}
@@ -2066,8 +1907,7 @@ func TestPlannedReparenter_performPotentialPromotion(t *testing.T) {
primaryElect *topodatapb.Tablet
tabletMap map[string]*topo.TabletInfo
- expectedPos string
- shouldErr bool
+ shouldErr bool
}{
{
name: "success",
@@ -2096,15 +1936,6 @@ func TestPlannedReparenter_performPotentialPromotion(t *testing.T) {
Error: nil,
},
},
- PromoteReplicaResults: map[string]struct {
- Result string
- Error error
- }{
- "zone1-0000000100": {
- Result: "reparent journal position",
- Error: nil,
- },
- },
},
unlockTopo: false,
keyspace: "testkeyspace",
@@ -2141,8 +1972,7 @@ func TestPlannedReparenter_performPotentialPromotion(t *testing.T) {
},
},
},
- expectedPos: "reparent journal position",
- shouldErr: false,
+ shouldErr: false,
},
{
name: "failed to DemotePrimary on a tablet",
@@ -2403,158 +2233,6 @@ func TestPlannedReparenter_performPotentialPromotion(t *testing.T) {
},
shouldErr: true,
},
- {
- name: "failed to promote primary-elect",
- ts: memorytopo.NewServer("zone1"),
- tmc: &testutil.TabletManagerClient{
- DemotePrimaryResults: map[string]struct {
- Status *replicationdatapb.PrimaryStatus
- Error error
- }{
- "zone1-0000000100": {
- Status: &replicationdatapb.PrimaryStatus{
- Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-10",
- },
- Error: nil,
- },
- "zone1-0000000101": {
- Status: &replicationdatapb.PrimaryStatus{
- Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-10",
- },
- Error: nil,
- },
- "zone1-0000000102": {
- Status: &replicationdatapb.PrimaryStatus{
- Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-5",
- },
- Error: nil,
- },
- },
- PromoteReplicaResults: map[string]struct {
- Result string
- Error error
- }{
- "zone1-0000000100": {
- Result: "",
- Error: assert.AnError,
- },
- },
- },
- unlockTopo: false,
- keyspace: "testkeyspace",
- shard: "-",
- primaryElect: &topodatapb.Tablet{
- Alias: &topodatapb.TabletAlias{
- Cell: "zone1",
- Uid: 100,
- },
- },
- tabletMap: map[string]*topo.TabletInfo{
- "zone1-0000000100": {
- Tablet: &topodatapb.Tablet{
- Alias: &topodatapb.TabletAlias{
- Cell: "zone1",
- Uid: 100,
- },
- },
- },
- "zone1-0000000101": {
- Tablet: &topodatapb.Tablet{
- Alias: &topodatapb.TabletAlias{
- Cell: "zone1",
- Uid: 101,
- },
- },
- },
- "zone1-0000000102": {
- Tablet: &topodatapb.Tablet{
- Alias: &topodatapb.TabletAlias{
- Cell: "zone1",
- Uid: 102,
- },
- },
- },
- },
- shouldErr: true,
- },
- {
- name: "timed out while promoting primary-elect",
- ts: memorytopo.NewServer("zone1"),
- tmc: &testutil.TabletManagerClient{
- DemotePrimaryResults: map[string]struct {
- Status *replicationdatapb.PrimaryStatus
- Error error
- }{
- "zone1-0000000100": {
- Status: &replicationdatapb.PrimaryStatus{
- Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-10",
- },
- Error: nil,
- },
- "zone1-0000000101": {
- Status: &replicationdatapb.PrimaryStatus{
- Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-10",
- },
- Error: nil,
- },
- "zone1-0000000102": {
- Status: &replicationdatapb.PrimaryStatus{
- Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-5",
- },
- Error: nil,
- },
- },
- PromoteReplicaDelays: map[string]time.Duration{
- "zone1-0000000100": time.Millisecond * 100,
- },
- PromoteReplicaResults: map[string]struct {
- Result string
- Error error
- }{
- "zone1-0000000100": {
- Result: "reparent journal position",
- Error: nil,
- },
- },
- },
- timeout: time.Millisecond * 50,
- unlockTopo: false,
- keyspace: "testkeyspace",
- shard: "-",
- primaryElect: &topodatapb.Tablet{
- Alias: &topodatapb.TabletAlias{
- Cell: "zone1",
- Uid: 100,
- },
- },
- tabletMap: map[string]*topo.TabletInfo{
- "zone1-0000000100": {
- Tablet: &topodatapb.Tablet{
- Alias: &topodatapb.TabletAlias{
- Cell: "zone1",
- Uid: 100,
- },
- },
- },
- "zone1-0000000101": {
- Tablet: &topodatapb.Tablet{
- Alias: &topodatapb.TabletAlias{
- Cell: "zone1",
- Uid: 101,
- },
- },
- },
- "zone1-0000000102": {
- Tablet: &topodatapb.Tablet{
- Alias: &topodatapb.TabletAlias{
- Cell: "zone1",
- Uid: 102,
- },
- },
- },
- },
- shouldErr: true,
- },
}
ctx := context.Background()
@@ -2595,7 +2273,7 @@ func TestPlannedReparenter_performPotentialPromotion(t *testing.T) {
durability, err := GetDurabilityPolicy("none")
require.NoError(t, err)
- rp, err := pr.performPotentialPromotion(ctx, tt.keyspace, tt.shard, tt.primaryElect, tt.tabletMap, PlannedReparentOptions{durability: durability})
+ err = pr.performPotentialPromotion(ctx, tt.keyspace, tt.shard, tt.primaryElect, tt.tabletMap, PlannedReparentOptions{durability: durability})
if tt.shouldErr {
assert.Error(t, err)
@@ -2603,7 +2281,6 @@ func TestPlannedReparenter_performPotentialPromotion(t *testing.T) {
}
assert.NoError(t, err)
- assert.Equal(t, tt.expectedPos, rp)
})
}
}
@@ -3335,10 +3012,12 @@ func TestPlannedReparenter_reparentTablets(t *testing.T) {
durability string
ev *events.Reparent
reparentJournalPosition string
+ promoteReplicaRequired bool
tabletMap map[string]*topo.TabletInfo
opts PlannedReparentOptions
shouldErr bool
+ wantErr string
}{
{
name: "success - durability = none",
@@ -3473,6 +3152,158 @@ func TestPlannedReparenter_reparentTablets(t *testing.T) {
},
},
shouldErr: false,
+ }, {
+ name: "success - promote replica required",
+ durability: "semi_sync",
+ promoteReplicaRequired: true,
+ tmc: &testutil.TabletManagerClient{
+ PromoteReplicaResults: map[string]struct {
+ Result string
+ Error error
+ }{
+ "zone1-0000000100": {
+ Result: "successful reparent journal position",
+ Error: nil,
+ },
+ },
+ PopulateReparentJournalResults: map[string]error{
+ "zone1-0000000100": nil,
+ },
+ SetReplicationSourceResults: map[string]error{
+ "zone1-0000000200": nil,
+ "zone1-0000000201": nil,
+ "zone1-0000000202": nil,
+ },
+ SetReplicationSourceSemiSync: map[string]bool{
+ "zone1-0000000200": true,
+ "zone1-0000000201": true,
+ "zone1-0000000202": false,
+ },
+ },
+ ev: &events.Reparent{
+ NewPrimary: &topodatapb.Tablet{
+ Alias: &topodatapb.TabletAlias{
+ Cell: "zone1",
+ Uid: 100,
+ },
+ Type: topodatapb.TabletType_PRIMARY,
+ },
+ },
+ tabletMap: map[string]*topo.TabletInfo{
+ "zone1-0000000100": {
+ Tablet: &topodatapb.Tablet{
+ Alias: &topodatapb.TabletAlias{
+ Cell: "zone1",
+ Uid: 100,
+ },
+ Type: topodatapb.TabletType_PRIMARY,
+ },
+ },
+ "zone1-0000000200": {
+ Tablet: &topodatapb.Tablet{
+ Alias: &topodatapb.TabletAlias{
+ Cell: "zone1",
+ Uid: 200,
+ },
+ Type: topodatapb.TabletType_REPLICA,
+ },
+ },
+ "zone1-0000000201": {
+ Tablet: &topodatapb.Tablet{
+ Alias: &topodatapb.TabletAlias{
+ Cell: "zone1",
+ Uid: 201,
+ },
+ Type: topodatapb.TabletType_REPLICA,
+ },
+ },
+ "zone1-0000000202": {
+ Tablet: &topodatapb.Tablet{
+ Alias: &topodatapb.TabletAlias{
+ Cell: "zone1",
+ Uid: 202,
+ },
+ Type: topodatapb.TabletType_RDONLY,
+ },
+ },
+ },
+ shouldErr: false,
+ }, {
+ name: "Promote replica failed",
+ durability: "semi_sync",
+ promoteReplicaRequired: true,
+ tmc: &testutil.TabletManagerClient{
+ PromoteReplicaResults: map[string]struct {
+ Result string
+ Error error
+ }{
+ "zone1-0000000100": {
+ Error: errors.New("failed promote replica"),
+ },
+ },
+ PopulateReparentJournalResults: map[string]error{
+ "zone1-0000000100": nil,
+ },
+ SetReplicationSourceResults: map[string]error{
+ "zone1-0000000200": nil,
+ "zone1-0000000201": nil,
+ "zone1-0000000202": nil,
+ },
+ SetReplicationSourceSemiSync: map[string]bool{
+ "zone1-0000000200": true,
+ "zone1-0000000201": true,
+ "zone1-0000000202": false,
+ },
+ },
+ ev: &events.Reparent{
+ NewPrimary: &topodatapb.Tablet{
+ Alias: &topodatapb.TabletAlias{
+ Cell: "zone1",
+ Uid: 100,
+ },
+ Type: topodatapb.TabletType_PRIMARY,
+ },
+ },
+ tabletMap: map[string]*topo.TabletInfo{
+ "zone1-0000000100": {
+ Tablet: &topodatapb.Tablet{
+ Alias: &topodatapb.TabletAlias{
+ Cell: "zone1",
+ Uid: 100,
+ },
+ Type: topodatapb.TabletType_PRIMARY,
+ },
+ },
+ "zone1-0000000200": {
+ Tablet: &topodatapb.Tablet{
+ Alias: &topodatapb.TabletAlias{
+ Cell: "zone1",
+ Uid: 200,
+ },
+ Type: topodatapb.TabletType_REPLICA,
+ },
+ },
+ "zone1-0000000201": {
+ Tablet: &topodatapb.Tablet{
+ Alias: &topodatapb.TabletAlias{
+ Cell: "zone1",
+ Uid: 201,
+ },
+ Type: topodatapb.TabletType_REPLICA,
+ },
+ },
+ "zone1-0000000202": {
+ Tablet: &topodatapb.Tablet{
+ Alias: &topodatapb.TabletAlias{
+ Cell: "zone1",
+ Uid: 202,
+ },
+ Type: topodatapb.TabletType_RDONLY,
+ },
+ },
+ },
+ shouldErr: true,
+ wantErr: "failed PromoteReplica(primary=zone1-0000000100,",
},
{
name: "SetReplicationSource failed on replica",
@@ -3534,6 +3365,7 @@ func TestPlannedReparenter_reparentTablets(t *testing.T) {
},
},
shouldErr: true,
+ wantErr: "retry failed replicas: tablet zone1-0000000201 failed to SetReplicationSource(zone1-0000000100): assert.AnError general error for testing",
},
{
name: "SetReplicationSource timed out on replica",
@@ -3601,6 +3433,7 @@ func TestPlannedReparenter_reparentTablets(t *testing.T) {
WaitReplicasTimeout: time.Millisecond * 10,
},
shouldErr: true,
+ wantErr: "retry failed replicas: tablet zone1-0000000201 failed to SetReplicationSource(zone1-0000000100): context deadline exceeded",
},
{
name: "PopulateReparentJournal failed out on new primary",
@@ -3662,6 +3495,7 @@ func TestPlannedReparenter_reparentTablets(t *testing.T) {
},
},
shouldErr: true,
+ wantErr: "failed PopulateReparentJournal(primary=zone1-0000000100",
},
{
name: "PopulateReparentJournal timed out on new primary",
@@ -3729,6 +3563,7 @@ func TestPlannedReparenter_reparentTablets(t *testing.T) {
WaitReplicasTimeout: time.Millisecond * 10,
},
shouldErr: true,
+ wantErr: "failed PopulateReparentJournal(primary=zone1-0000000100",
},
}
@@ -3749,10 +3584,12 @@ func TestPlannedReparenter_reparentTablets(t *testing.T) {
durability, err := GetDurabilityPolicy(durabilityPolicy)
require.NoError(t, err)
tt.opts.durability = durability
- err = pr.reparentTablets(ctx, tt.ev, tt.reparentJournalPosition, tt.tabletMap, tt.opts)
+ err = pr.reparentTablets(ctx, tt.ev, tt.reparentJournalPosition, tt.promoteReplicaRequired, tt.tabletMap, tt.opts)
if tt.shouldErr {
assert.Error(t, err)
-
+ if tt.wantErr != "" {
+ require.ErrorContains(t, err, tt.wantErr)
+ }
return
}
diff --git a/go/vt/vtctl/reparentutil/replication.go b/go/vt/vtctl/reparentutil/replication.go
index 8c905038bd5..dcda5d02cbf 100644
--- a/go/vt/vtctl/reparentutil/replication.go
+++ b/go/vt/vtctl/reparentutil/replication.go
@@ -28,6 +28,8 @@ import (
"vitess.io/vitess/go/vt/concurrency"
"vitess.io/vitess/go/vt/log"
"vitess.io/vitess/go/vt/logutil"
+ replicationdatapb "vitess.io/vitess/go/vt/proto/replicationdata"
+ topodatapb "vitess.io/vitess/go/vt/proto/topodata"
"vitess.io/vitess/go/vt/proto/vtrpc"
"vitess.io/vitess/go/vt/topo"
"vitess.io/vitess/go/vt/topo/topoproto"
@@ -35,9 +37,6 @@ import (
"vitess.io/vitess/go/vt/topotools/events"
"vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/vt/vttablet/tmclient"
-
- replicationdatapb "vitess.io/vitess/go/vt/proto/replicationdata"
- topodatapb "vitess.io/vitess/go/vt/proto/topodata"
)
// FindValidEmergencyReparentCandidates will find candidates for an emergency
@@ -215,8 +214,8 @@ func stopReplicationAndBuildStatusMaps(
tmc tmclient.TabletManagerClient,
ev *events.Reparent,
tabletMap map[string]*topo.TabletInfo,
- waitReplicasTimeout time.Duration,
- ignoredTablets sets.String,
+ stopReplicationTimeout time.Duration,
+ ignoredTablets sets.Set[string],
tabletToWaitFor *topodatapb.TabletAlias,
durability Durabler,
logger logutil.Logger,
@@ -234,7 +233,7 @@ func stopReplicationAndBuildStatusMaps(
}
)
- groupCtx, groupCancel := context.WithTimeout(ctx, waitReplicasTimeout)
+ groupCtx, groupCancel := context.WithTimeout(ctx, stopReplicationTimeout)
defer groupCancel()
fillStatus := func(alias string, tabletInfo *topo.TabletInfo, mustWaitForTablet bool) {
@@ -312,8 +311,9 @@ func stopReplicationAndBuildStatusMaps(
errgroup := concurrency.ErrorGroup{
NumGoroutines: len(tabletMap) - ignoredTablets.Len(),
NumRequiredSuccesses: len(tabletMap) - ignoredTablets.Len() - 1,
- NumAllowedErrors: 1,
- NumErrorsToWaitFor: numErrorsToWaitFor,
+ NumAllowedErrors: len(tabletMap), // We set the number of allowed errors to a very high value, because we don't want to exit early
+ // even in case of multiple failures. We rely on the revoke function below to determine if we have more failures than we can tolerate
+ NumErrorsToWaitFor: numErrorsToWaitFor,
}
errRecorder := errgroup.Wait(groupCancel, errChan)
diff --git a/go/vt/vtctl/reparentutil/replication_test.go b/go/vt/vtctl/reparentutil/replication_test.go
index 42b01cac770..bdeb029b7a2 100644
--- a/go/vt/vtctl/reparentutil/replication_test.go
+++ b/go/vt/vtctl/reparentutil/replication_test.go
@@ -18,27 +18,33 @@ package reparentutil
import (
"context"
+ "os"
"testing"
"time"
+ "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
- "vitess.io/vitess/go/vt/vterrors"
-
- "github.com/stretchr/testify/assert"
"k8s.io/apimachinery/pkg/util/sets"
+ _flag "vitess.io/vitess/go/internal/flag"
"vitess.io/vitess/go/mysql"
"vitess.io/vitess/go/vt/logutil"
"vitess.io/vitess/go/vt/topo"
"vitess.io/vitess/go/vt/topo/topoproto"
"vitess.io/vitess/go/vt/topotools/events"
+ "vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/vt/vttablet/tmclient"
replicationdatapb "vitess.io/vitess/go/vt/proto/replicationdata"
topodatapb "vitess.io/vitess/go/vt/proto/topodata"
)
+func TestMain(m *testing.M) {
+ _flag.ParseFlagsForTest()
+ os.Exit(m.Run())
+}
+
func TestFindValidEmergencyReparentCandidates(t *testing.T) {
t.Parallel()
@@ -83,6 +89,19 @@ func TestFindValidEmergencyReparentCandidates(t *testing.T) {
},
expected: []string{"r1", "r2", "p1"},
shouldErr: false,
+ }, {
+ name: "success for single tablet",
+ statusMap: map[string]*replicationdatapb.StopReplicationStatus{
+ "r1": {
+ After: &replicationdatapb.Status{
+ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562",
+ RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-5,AAAAAAAA-71CA-11E1-9E33-C80AA9429562:1",
+ },
+ },
+ },
+ primaryStatusMap: map[string]*replicationdatapb.PrimaryStatus{},
+ expected: []string{"r1"},
+ shouldErr: false,
},
{
name: "mixed replication modes",
@@ -278,8 +297,8 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) {
durability string
tmc *stopReplicationAndBuildStatusMapsTestTMClient
tabletMap map[string]*topo.TabletInfo
- waitReplicasTimeout time.Duration
- ignoredTablets sets.String
+ stopReplicasTimeout time.Duration
+ ignoredTablets sets.Set[string]
tabletToWaitFor *topodatapb.TabletAlias
expectedStatusMap map[string]*replicationdatapb.StopReplicationStatus
expectedPrimaryStatusMap map[string]*replicationdatapb.PrimaryStatus
@@ -328,7 +347,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) {
},
},
},
- ignoredTablets: sets.NewString(),
+ ignoredTablets: sets.New[string](),
expectedStatusMap: map[string]*replicationdatapb.StopReplicationStatus{
"zone1-0000000100": {
Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-5", IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)},
@@ -421,7 +440,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) {
},
},
},
- ignoredTablets: sets.NewString(),
+ ignoredTablets: sets.New[string](),
expectedStatusMap: map[string]*replicationdatapb.StopReplicationStatus{
"zone1-0000000100": {
Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-5", IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)},
@@ -514,7 +533,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) {
},
},
},
- ignoredTablets: sets.NewString(),
+ ignoredTablets: sets.New[string](),
expectedStatusMap: map[string]*replicationdatapb.StopReplicationStatus{
"zone1-0000000100": {
Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-5", IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)},
@@ -583,7 +602,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) {
},
},
},
- ignoredTablets: sets.NewString("zone1-0000000100"),
+ ignoredTablets: sets.New[string]("zone1-0000000100"),
expectedStatusMap: map[string]*replicationdatapb.StopReplicationStatus{
"zone1-0000000101": {
Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)},
@@ -651,7 +670,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) {
},
},
},
- ignoredTablets: sets.NewString(),
+ ignoredTablets: sets.New[string](),
expectedStatusMap: map[string]*replicationdatapb.StopReplicationStatus{
"zone1-0000000101": {
Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)},
@@ -725,7 +744,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) {
},
},
},
- ignoredTablets: sets.NewString(),
+ ignoredTablets: sets.New[string](),
expectedStatusMap: map[string]*replicationdatapb.StopReplicationStatus{
"zone1-0000000101": {
Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)},
@@ -789,14 +808,14 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) {
},
},
},
- ignoredTablets: sets.NewString(),
+ ignoredTablets: sets.New[string](),
expectedStatusMap: nil,
expectedPrimaryStatusMap: nil,
expectedTabletsReachable: nil,
shouldErr: true, // we get multiple errors, so we fail
},
{
- name: "waitReplicasTimeout exceeded",
+ name: "stopReplicasTimeout exceeded",
durability: "none",
tmc: &stopReplicationAndBuildStatusMapsTestTMClient{
stopReplicationAndGetStatusDelays: map[string]time.Duration{
@@ -840,8 +859,8 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) {
},
},
},
- waitReplicasTimeout: time.Millisecond * 5,
- ignoredTablets: sets.NewString(),
+ stopReplicasTimeout: time.Millisecond * 5,
+ ignoredTablets: sets.New[string](),
expectedStatusMap: map[string]*replicationdatapb.StopReplicationStatus{
"zone1-0000000101": {
Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)},
@@ -897,7 +916,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) {
},
},
},
- ignoredTablets: sets.NewString(),
+ ignoredTablets: sets.New[string](),
expectedStatusMap: map[string]*replicationdatapb.StopReplicationStatus{
"zone1-0000000101": {
Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)},
@@ -950,7 +969,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) {
},
},
},
- ignoredTablets: sets.NewString(),
+ ignoredTablets: sets.New[string](),
expectedStatusMap: nil,
expectedPrimaryStatusMap: nil,
expectedTabletsReachable: nil,
@@ -994,7 +1013,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) {
},
},
},
- ignoredTablets: sets.NewString(),
+ ignoredTablets: sets.New[string](),
expectedStatusMap: nil,
expectedPrimaryStatusMap: nil,
expectedTabletsReachable: nil,
@@ -1064,7 +1083,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) {
Cell: "zone1",
Uid: 102,
},
- ignoredTablets: sets.NewString(),
+ ignoredTablets: sets.New[string](),
expectedStatusMap: map[string]*replicationdatapb.StopReplicationStatus{
"zone1-0000000100": {
Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-5", IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)},
@@ -1098,7 +1117,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) {
Uid: 102,
},
}},
- waitReplicasTimeout: time.Minute,
+ stopReplicasTimeout: time.Minute,
expectedPrimaryStatusMap: map[string]*replicationdatapb.PrimaryStatus{},
shouldErr: false,
},
@@ -1110,7 +1129,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
durability, err := GetDurabilityPolicy(tt.durability)
require.NoError(t, err)
- res, err := stopReplicationAndBuildStatusMaps(ctx, tt.tmc, &events.Reparent{}, tt.tabletMap, tt.waitReplicasTimeout, tt.ignoredTablets, tt.tabletToWaitFor, durability, logger)
+ res, err := stopReplicationAndBuildStatusMaps(ctx, tt.tmc, &events.Reparent{}, tt.tabletMap, tt.stopReplicasTimeout, tt.ignoredTablets, tt.tabletToWaitFor, durability, logger)
if tt.shouldErr {
assert.Error(t, err)
return
diff --git a/go/vt/vtctl/schematools/copy.go b/go/vt/vtctl/schematools/copy.go
deleted file mode 100644
index 6b99e63b79e..00000000000
--- a/go/vt/vtctl/schematools/copy.go
+++ /dev/null
@@ -1,101 +0,0 @@
-/*
-Copyright 2021 The Vitess Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package schematools
-
-import (
- "bytes"
- "context"
- "fmt"
-
- "vitess.io/vitess/go/sqltypes"
- "vitess.io/vitess/go/vt/log"
- "vitess.io/vitess/go/vt/topo"
- "vitess.io/vitess/go/vt/topo/topoproto"
- "vitess.io/vitess/go/vt/vttablet/tmclient"
-
- tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata"
- topodatapb "vitess.io/vitess/go/vt/proto/topodata"
-)
-
-// CopyShardMetadata copies the contents of the _vt.shard_metadata table from
-// the source tablet to the destination tablet.
-//
-// NOTE: This function assumes that the destination tablet is a primary with
-// binary logging enabled, in order to propagate the INSERT statements to any
-// replicas in the destination shard.
-func CopyShardMetadata(ctx context.Context, ts *topo.Server, tmc tmclient.TabletManagerClient, source *topodatapb.TabletAlias, dest *topodatapb.TabletAlias) error {
- sourceTablet, err := ts.GetTablet(ctx, source)
- if err != nil {
- return fmt.Errorf("GetTablet(%v) failed: %w", topoproto.TabletAliasString(source), err)
- }
-
- destTablet, err := ts.GetTablet(ctx, dest)
- if err != nil {
- return fmt.Errorf("GetTablet(%v) failed: %w", topoproto.TabletAliasString(dest), err)
- }
-
- sql := "SELECT 1 FROM information_schema.tables WHERE table_schema = '_vt' AND table_name = 'shard_metadata'"
- presenceResult, err := tmc.ExecuteFetchAsDba(ctx, sourceTablet.Tablet, false, &tabletmanagerdatapb.ExecuteFetchAsDbaRequest{
- Query: []byte(sql),
- MaxRows: 1,
- })
- if err != nil {
- return fmt.Errorf("ExecuteFetchAsDba(%v, false, %v, 1, false, false) failed: %v", topoproto.TabletAliasString(source), sql, err)
- }
- if len(presenceResult.Rows) == 0 {
- log.Infof("_vt.shard_metadata doesn't exist on the source tablet %v, skipping its copy.", topoproto.TabletAliasString(source))
- return nil
- }
-
- // (TODO|@ajm188,@deepthi): 100 may be too low here for row limit
- sql = "SELECT db_name, name, value FROM _vt.shard_metadata"
- p3qr, err := tmc.ExecuteFetchAsDba(ctx, sourceTablet.Tablet, false, &tabletmanagerdatapb.ExecuteFetchAsDbaRequest{
- Query: []byte(sql),
- MaxRows: 100,
- })
- if err != nil {
- return fmt.Errorf("ExecuteFetchAsDba(%v, false, %v, 100, false, false) failed: %v", topoproto.TabletAliasString(source), sql, err)
- }
-
- qr := sqltypes.Proto3ToResult(p3qr)
- queryBuf := bytes.NewBuffer(nil)
- for _, row := range qr.Rows {
- dbName := row[0]
- name := row[1]
- value := row[2]
- queryBuf.WriteString("INSERT INTO _vt.shard_metadata (db_name, name, value) VALUES (")
- dbName.EncodeSQL(queryBuf)
- queryBuf.WriteByte(',')
- name.EncodeSQL(queryBuf)
- queryBuf.WriteByte(',')
- value.EncodeSQL(queryBuf)
- queryBuf.WriteString(") ON DUPLICATE KEY UPDATE value = ")
- value.EncodeSQL(queryBuf)
-
- _, err := tmc.ExecuteFetchAsDba(ctx, destTablet.Tablet, false, &tabletmanagerdatapb.ExecuteFetchAsDbaRequest{
- Query: queryBuf.Bytes(),
- MaxRows: 0,
- })
- if err != nil {
- return fmt.Errorf("ExecuteFetchAsDba(%v, false, %v, 0, false, false) failed: %v", topoproto.TabletAliasString(dest), queryBuf.String(), err)
- }
-
- queryBuf.Reset()
- }
-
- return nil
-}
diff --git a/go/vt/vtctl/topo.go b/go/vt/vtctl/topo.go
index 308d1f305f9..ed2d91e9cf2 100644
--- a/go/vt/vtctl/topo.go
+++ b/go/vt/vtctl/topo.go
@@ -21,18 +21,11 @@ import (
"encoding/json"
"fmt"
"os"
- "path"
"github.com/spf13/pflag"
- "google.golang.org/protobuf/encoding/protojson"
- "google.golang.org/protobuf/encoding/prototext"
- "google.golang.org/protobuf/proto"
"vitess.io/vitess/go/vt/topo"
"vitess.io/vitess/go/vt/wrangler"
-
- topodatapb "vitess.io/vitess/go/vt/proto/topodata"
- vschemapb "vitess.io/vitess/go/vt/proto/vschema"
)
// This file contains the topo command group for vtctl.
@@ -57,59 +50,6 @@ func init() {
})
}
-// DecodeContent uses the filename to imply a type, and proto-decodes
-// the right object, then echoes it as a string.
-func DecodeContent(filename string, data []byte, json bool) (string, error) {
- name := path.Base(filename)
- dir := path.Dir(filename)
- var p proto.Message
- switch name {
- case topo.CellInfoFile:
- p = new(topodatapb.CellInfo)
- case topo.KeyspaceFile:
- p = new(topodatapb.Keyspace)
- case topo.ShardFile:
- p = new(topodatapb.Shard)
- case topo.VSchemaFile:
- p = new(vschemapb.Keyspace)
- case topo.ShardReplicationFile:
- p = new(topodatapb.ShardReplication)
- case topo.TabletFile:
- p = new(topodatapb.Tablet)
- case topo.SrvVSchemaFile:
- p = new(vschemapb.SrvVSchema)
- case topo.SrvKeyspaceFile:
- p = new(topodatapb.SrvKeyspace)
- case topo.RoutingRulesFile:
- p = new(vschemapb.RoutingRules)
- default:
- switch dir {
- case "/" + topo.GetExternalVitessClusterDir():
- p = new(topodatapb.ExternalVitessCluster)
- default:
- }
- if p == nil {
- if json {
- return "", fmt.Errorf("unknown topo protobuf type for %v", name)
- }
- return string(data), nil
- }
- }
-
- if err := proto.Unmarshal(data, p); err != nil {
- return string(data), err
- }
-
- var marshalled []byte
- var err error
- if json {
- marshalled, err = protojson.Marshal(p)
- } else {
- marshalled, err = prototext.Marshal(p)
- }
- return string(marshalled), err
-}
-
func commandTopoCat(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag.FlagSet, args []string) error {
cell := subFlags.String("cell", topo.GlobalCell, "topology cell to cat the file from. Defaults to global cell.")
long := subFlags.Bool("long", false, "long listing.")
@@ -214,7 +154,7 @@ func (d ProtoTopologyDecoder) decode(ctx context.Context, topoPaths []string, co
wr.Logger().Printf("path=%v version=%v\n", topoPath, version)
}
- decoded, err := DecodeContent(topoPath, data, false)
+ decoded, err := topo.DecodeContent(topoPath, data, false)
if err != nil {
wr.Logger().Warningf("TopoCat: cannot proto decode %v: %v", topoPath, err)
decoded = string(data)
@@ -269,7 +209,7 @@ func (d JSONTopologyDecoder) decode(ctx context.Context, topoPaths []string, con
continue
}
- decoded, err := DecodeContent(topoPath, data, true)
+ decoded, err := topo.DecodeContent(topoPath, data, true)
if err != nil {
hasError = true
wr.Logger().Printf("TopoCat: cannot proto decode %v: %v", topoPath, err)
diff --git a/go/vt/vtctl/vdiff2.go b/go/vt/vtctl/vdiff2.go
index e97b56edb68..c51fb15a95e 100644
--- a/go/vt/vtctl/vdiff2.go
+++ b/go/vt/vtctl/vdiff2.go
@@ -45,7 +45,7 @@ import (
)
func commandVDiff2(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag.FlagSet, args []string) error {
- _ = subFlags.Bool("v2", false, "Use VDiff2")
+ _ = subFlags.Bool("v1", false, "Use legacy VDiff v1")
timeout := subFlags.Duration("filtered_replication_wait_time", 30*time.Second, "Specifies the maximum time to wait, in seconds, for filtered replication to catch up on primary migrations. The migration will be cancelled on a timeout.")
maxRows := subFlags.Int64("limit", math.MaxInt64, "Max rows to stop comparing after")
@@ -76,7 +76,7 @@ func commandVDiff2(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag.F
var action vdiff.VDiffAction
var actionArg string
- usage := fmt.Errorf("usage: VDiff -- --v2 . %s [%s|]", strings.Join(*(*[]string)(unsafe.Pointer(&vdiff.Actions)), "|"), strings.Join(vdiff.ActionArgs, "|"))
+ usage := fmt.Errorf("usage: VDiff -- . %s [%s|]", strings.Join(*(*[]string)(unsafe.Pointer(&vdiff.Actions)), "|"), strings.Join(vdiff.ActionArgs, "|"))
switch subFlags.NArg() {
case 1: // for backward compatibility with vdiff1
action = vdiff.CreateAction
@@ -119,7 +119,7 @@ func commandVDiff2(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag.F
MaxExtraRowsToCompare: *maxExtraRowsToCompare,
},
ReportOptions: &tabletmanagerdatapb.VDiffReportOptions{
- OnlyPKS: *onlyPks,
+ OnlyPks: *onlyPks,
DebugQuery: *debugQuery,
Format: format,
},
@@ -142,13 +142,13 @@ func commandVDiff2(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag.F
default:
vdiffUUID, err = uuid.Parse(actionArg)
if err != nil {
- return fmt.Errorf("can only show a specific vdiff, please provide a valid UUID; view all with: VDiff -- --v2 %s.%s show all", keyspace, workflowName)
+ return fmt.Errorf("can only show a specific vdiff, please provide a valid UUID; view all with: VDiff -- %s.%s show all", keyspace, workflowName)
}
}
case vdiff.StopAction, vdiff.ResumeAction:
vdiffUUID, err = uuid.Parse(actionArg)
if err != nil {
- return fmt.Errorf("can only %s a specific vdiff, please provide a valid UUID; view all with: VDiff -- --v2 %s.%s show all", action, keyspace, workflowName)
+ return fmt.Errorf("can only %s a specific vdiff, please provide a valid UUID; view all with: VDiff -- %s.%s show all", action, keyspace, workflowName)
}
case vdiff.DeleteAction:
switch actionArg {
@@ -156,7 +156,7 @@ func commandVDiff2(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag.F
default:
vdiffUUID, err = uuid.Parse(actionArg)
if err != nil {
- return fmt.Errorf("can only delete a specific vdiff, please provide a valid UUID; view all with: VDiff -- --v2 %s.%s show all", keyspace, workflowName)
+ return fmt.Errorf("can only delete a specific vdiff, please provide a valid UUID; view all with: VDiff -- %s.%s show all", keyspace, workflowName)
}
}
default:
@@ -577,7 +577,7 @@ func buildVDiff2SingleSummary(wr *wrangler.Wrangler, keyspace, workflow, uuid st
// on every shard.
if shardStateCounts[vdiff.StoppedState] > 0 {
summary.State = vdiff.StoppedState
- } else if tableStateCounts[vdiff.ErrorState] > 0 {
+ } else if shardStateCounts[vdiff.ErrorState] > 0 || tableStateCounts[vdiff.ErrorState] > 0 {
summary.State = vdiff.ErrorState
} else if tableStateCounts[vdiff.StartedState] > 0 {
summary.State = vdiff.StartedState
diff --git a/go/vt/vtctl/vdiff2_test.go b/go/vt/vtctl/vdiff2_test.go
index db4827bd73f..368f21eb93b 100644
--- a/go/vt/vtctl/vdiff2_test.go
+++ b/go/vt/vtctl/vdiff2_test.go
@@ -1,15 +1,386 @@
package vtctl
import (
+ "context"
+ "fmt"
"math"
"testing"
"time"
+ "github.com/google/uuid"
"github.com/stretchr/testify/require"
+ "gotest.tools/assert"
+ "vitess.io/vitess/go/sqltypes"
+ tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata"
"vitess.io/vitess/go/vt/vttablet/tabletmanager/vdiff"
)
+var (
+ fields = sqltypes.MakeTestFields(
+ "vdiff_state|last_error|table_name|uuid|table_state|table_rows|started_at|rows_compared|completed_at|has_mismatch|report",
+ "varbinary|varbinary|varbinary|varchar|varbinary|int64|timestamp|int64|timestamp|int64|json",
+ )
+ options = &tabletmanagerdatapb.VDiffOptions{
+ PickerOptions: &tabletmanagerdatapb.VDiffPickerOptions{
+ TabletTypes: "primary",
+ },
+ CoreOptions: &tabletmanagerdatapb.VDiffCoreOptions{
+ Tables: "t1",
+ },
+ ReportOptions: &tabletmanagerdatapb.VDiffReportOptions{
+ Format: "json",
+ },
+ }
+)
+
+func TestVDiff2Unsharded(t *testing.T) {
+ env := newTestVDiffEnv(t, []string{"0"}, []string{"0"}, "", nil)
+ defer env.close()
+
+ UUID := uuid.New().String()
+ req := &tabletmanagerdatapb.VDiffRequest{
+ Keyspace: "target",
+ Workflow: env.workflow,
+ Action: string(vdiff.ShowAction),
+ ActionArg: UUID,
+ VdiffUuid: UUID,
+ Options: options,
+ }
+ starttime := time.Now().UTC().Format(vdiff.TimestampFormat)
+ comptime := time.Now().Add(1 * time.Second).UTC().Format(vdiff.TimestampFormat)
+ goodReportfmt := `{
+ "Workflow": "vdiffTest",
+ "Keyspace": "target",
+ "State": "completed",
+ "UUID": "%s",
+ "RowsCompared": %d,
+ "HasMismatch": %t,
+ "Shards": "0",
+ "StartedAt": "%s",
+ "CompletedAt": "%s"
+}
+
+`
+
+ badReportfmt := `{
+ "Workflow": "vdiffTest",
+ "Keyspace": "target",
+ "State": "completed",
+ "UUID": "%s",
+ "RowsCompared": %d,
+ "HasMismatch": %t,
+ "Shards": "0",
+ "StartedAt": "%s",
+ "CompletedAt": "%s",
+ "TableSummary": {
+ "t1": {
+ "TableName": "t1",
+ "State": "completed",
+ "RowsCompared": %d,
+ "MatchingRows": %d,
+ "MismatchedRows": %d,
+ "ExtraRowsSource": %d,
+ "ExtraRowsTarget": %d
+ }
+ },
+ "Reports": {
+ "t1": {
+ "0": {
+ "TableName": "t1",
+ "ProcessedRows": %d,
+ "MatchingRows": %d,
+ "MismatchedRows": %d,
+ "ExtraRowsSource": %d,
+ "ExtraRowsTarget": %d,
+ %s
+ }
+ }
+ }
+}
+
+`
+
+ testcases := []struct {
+ id string
+ result *sqltypes.Result
+ report string
+ }{{
+ id: "1",
+ result: sqltypes.MakeTestResult(fields,
+ "completed||t1|"+UUID+"|completed|3|"+starttime+"|3|"+comptime+"|0|"+
+ `{"TableName": "t1", "MatchingRows": 3, "ProcessedRows": 3, "MismatchedRows": 0, "ExtraRowsSource": 0, `+
+ `"ExtraRowsTarget": 0}`),
+ report: fmt.Sprintf(goodReportfmt,
+ UUID, 3, false, starttime, comptime,
+ ),
+ }, {
+ id: "2",
+ result: sqltypes.MakeTestResult(fields,
+ "completed||t1|"+UUID+"|completed|3|"+starttime+"|3|"+comptime+"|1|"+
+ `{"TableName": "t1", "MatchingRows": 1, "ProcessedRows": 3, "MismatchedRows": 0, "ExtraRowsSource": 0, `+
+ `"ExtraRowsTarget": 2, "ExtraRowsTargetSample": [{"Row": {"c1": "2", "c2": "4"}}]}`),
+ report: fmt.Sprintf(badReportfmt,
+ UUID, 3, true, starttime, comptime, 3, 1, 0, 0, 2, 3, 1, 0, 0, 2,
+ `"ExtraRowsTargetSample": [
+ {
+ "Row": {
+ "c1": "2",
+ "c2": "4"
+ }
+ }
+ ]`),
+ }, {
+ id: "3",
+ result: sqltypes.MakeTestResult(fields,
+ "completed||t1|"+UUID+"|completed|3|"+starttime+"|3|"+comptime+"|1|"+
+ `{"TableName": "t1", "MatchingRows": 1, "ProcessedRows": 3, "MismatchedRows": 0, "ExtraRowsSource": 2, `+
+ `"ExtraRowsTarget": 0, "ExtraRowsSourceSample": [{"Row": {"c1": "2", "c2": "4"}}]}`),
+ report: fmt.Sprintf(badReportfmt,
+ UUID, 3, true, starttime, comptime, 3, 1, 0, 2, 0, 3, 1, 0, 2, 0,
+ `"ExtraRowsSourceSample": [
+ {
+ "Row": {
+ "c1": "2",
+ "c2": "4"
+ }
+ }
+ ]`),
+ }, {
+ id: "4",
+ result: sqltypes.MakeTestResult(fields,
+ "completed||t1|"+UUID+"|completed|3|"+starttime+"|3|"+comptime+"|1|"+
+ `{"TableName": "t1", "MatchingRows": 2, "ProcessedRows": 3, "MismatchedRows": 0, "ExtraRowsSource": 1, `+
+ `"ExtraRowsTarget": 0, "ExtraRowsSourceSample": [{"Row": {"c1": "2", "c2": "4"}}]}`),
+ report: fmt.Sprintf(badReportfmt,
+ UUID, 3, true, starttime, comptime, 3, 2, 0, 1, 0, 3, 2, 0, 1, 0,
+ `"ExtraRowsSourceSample": [
+ {
+ "Row": {
+ "c1": "2",
+ "c2": "4"
+ }
+ }
+ ]`),
+ }, {
+ id: "5",
+ result: sqltypes.MakeTestResult(fields,
+ "completed||t1|"+UUID+"|completed|3|"+starttime+"|3|"+comptime+"|1|"+
+ `{"TableName": "t1", "MatchingRows": 2, "ProcessedRows": 3, "MismatchedRows": 0, "ExtraRowsSource": 1, `+
+ `"ExtraRowsTarget": 0, "ExtraRowsSourceSample": [{"Row": {"c1": "2", "c2": "4"}}]}`),
+ report: fmt.Sprintf(badReportfmt,
+ UUID, 3, true, starttime, comptime, 3, 2, 0, 1, 0, 3, 2, 0, 1, 0,
+ `"ExtraRowsSourceSample": [
+ {
+ "Row": {
+ "c1": "2",
+ "c2": "4"
+ }
+ }
+ ]`),
+ }, {
+ id: "6",
+ result: sqltypes.MakeTestResult(fields,
+ "completed||t1|"+UUID+"|completed|3|"+starttime+"|3|"+comptime+"|1|"+
+ `{"TableName": "t1", "MatchingRows": 2, "ProcessedRows": 3, "MismatchedRows": 1, "ExtraRowsSource": 0, `+
+ `"ExtraRowsTarget": 0, "MismatchedRowsSample": [{"Source": {"Row": {"c1": "2", "c2": "3"}}, `+
+ `"Target": {"Row": {"c1": "2", "c2": "4"}}}]}`),
+ report: fmt.Sprintf(badReportfmt,
+ UUID, 3, true, starttime, comptime, 3, 2, 1, 0, 0, 3, 2, 1, 0, 0,
+ `"MismatchedRowsSample": [
+ {
+ "Source": {
+ "Row": {
+ "c1": "2",
+ "c2": "3"
+ }
+ },
+ "Target": {
+ "Row": {
+ "c1": "2",
+ "c2": "4"
+ }
+ }
+ }
+ ]`),
+ }, {
+ id: "7", // --only_pks
+ result: sqltypes.MakeTestResult(fields,
+ "completed||t1|"+UUID+"|completed|3|"+starttime+"|3|"+comptime+"|1|"+
+ `{"TableName": "t1", "MatchingRows": 2, "ProcessedRows": 3, "MismatchedRows": 1, "ExtraRowsSource": 0, `+
+ `"ExtraRowsTarget": 0, "MismatchedRowsSample": [{"Source": {"Row": {"c1": "2"}}, `+
+ `"Target": {"Row": {"c1": "2"}}}]}`),
+ report: fmt.Sprintf(badReportfmt,
+ UUID, 3, true, starttime, comptime, 3, 2, 1, 0, 0, 3, 2, 1, 0, 0,
+ `"MismatchedRowsSample": [
+ {
+ "Source": {
+ "Row": {
+ "c1": "2"
+ }
+ },
+ "Target": {
+ "Row": {
+ "c1": "2"
+ }
+ }
+ }
+ ]`),
+ }, {
+ id: "8", // --debug_query
+ result: sqltypes.MakeTestResult(fields,
+ "completed||t1|"+UUID+"|completed|3|"+starttime+"|3|"+comptime+"|1|"+
+ `{"TableName": "t1", "MatchingRows": 2, "ProcessedRows": 3, "MismatchedRows": 1, "ExtraRowsSource": 0, `+
+ `"ExtraRowsTarget": 0, "MismatchedRowsSample": [{"Source": {"Row": {"c1": "2", "c2": "3"}, "Query": "select c1, c2 from t1 where c1=2;"}, `+
+ `"Target": {"Row": {"c1": "2", "c2": "4"}, "Query": "select c1, c2 from t1 where c1=2;"}}]}`),
+ report: fmt.Sprintf(badReportfmt,
+ UUID, 3, true, starttime, comptime, 3, 2, 1, 0, 0, 3, 2, 1, 0, 0,
+ `"MismatchedRowsSample": [
+ {
+ "Source": {
+ "Row": {
+ "c1": "2",
+ "c2": "3"
+ },
+ "Query": "select c1, c2 from t1 where c1=2;"
+ },
+ "Target": {
+ "Row": {
+ "c1": "2",
+ "c2": "4"
+ },
+ "Query": "select c1, c2 from t1 where c1=2;"
+ }
+ }
+ ]`),
+ }}
+
+ for _, tcase := range testcases {
+ t.Run(tcase.id, func(t *testing.T) {
+ res := &tabletmanagerdatapb.VDiffResponse{
+ Id: 1,
+ Output: sqltypes.ResultToProto3(tcase.result),
+ }
+ env.tmc.setVDResults(env.tablets[200].tablet, req, res)
+ output, err := env.wr.VDiff2(context.Background(), "target", env.workflow, vdiff.ShowAction, UUID, UUID, options)
+ require.NoError(t, err)
+ vds, err := displayVDiff2ShowSingleSummary(env.wr, options.ReportOptions.Format, "target", env.workflow, UUID, output, false)
+ require.NoError(t, err)
+ require.Equal(t, vdiff.CompletedState, vds)
+ logstr := env.cmdlog.String()
+ assert.Equal(t, tcase.report, logstr)
+ env.cmdlog.Clear()
+ })
+ }
+}
+
+func TestVDiff2Sharded(t *testing.T) {
+ env := newTestVDiffEnv(t, []string{"-40", "40-"}, []string{"-80", "80-"}, "", map[string]string{
+ "-80": "MySQL56/0e45e704-7cb9-11ed-a1eb-0242ac120002:1-890",
+ "80-": "MySQL56/1497ddb0-7cb9-11ed-a1eb-0242ac120002:1-891",
+ })
+ defer env.close()
+
+ UUID := uuid.New().String()
+ req := &tabletmanagerdatapb.VDiffRequest{
+ Keyspace: "target",
+ Workflow: env.workflow,
+ Action: string(vdiff.ShowAction),
+ ActionArg: UUID,
+ VdiffUuid: UUID,
+ Options: options,
+ }
+ starttime := time.Now().UTC().Format(vdiff.TimestampFormat)
+ comptime := time.Now().Add(1 * time.Second).UTC().Format(vdiff.TimestampFormat)
+ verbosefmt := `{
+ "Workflow": "vdiffTest",
+ "Keyspace": "target",
+ "State": "completed",
+ "UUID": "%s",
+ "RowsCompared": %d,
+ "HasMismatch": %t,
+ "Shards": "-80,80-",
+ "StartedAt": "%s",
+ "CompletedAt": "%s",
+ "TableSummary": {
+ "t1": {
+ "TableName": "t1",
+ "State": "completed",
+ "RowsCompared": %d,
+ "MatchingRows": %d,
+ "MismatchedRows": %d,
+ "ExtraRowsSource": %d,
+ "ExtraRowsTarget": %d
+ }
+ },
+ "Reports": {
+ "t1": {
+ "-80": {
+ "TableName": "t1",
+ "ProcessedRows": %d,
+ "MatchingRows": %d,
+ "MismatchedRows": %d,
+ "ExtraRowsSource": %d,
+ "ExtraRowsTarget": %d
+ },
+ "80-": {
+ "TableName": "t1",
+ "ProcessedRows": %d,
+ "MatchingRows": %d,
+ "MismatchedRows": %d,
+ "ExtraRowsSource": %d,
+ "ExtraRowsTarget": %d
+ }
+ }
+ }
+}
+
+`
+
+ testcases := []struct {
+ id string
+ shard1Res *sqltypes.Result
+ shard2Res *sqltypes.Result
+ report string
+ }{{
+ id: "1",
+ shard1Res: sqltypes.MakeTestResult(fields,
+ "completed||t1|"+UUID+"|completed|3|"+starttime+"|3|"+comptime+"|0|"+
+ `{"TableName": "t1", "MatchingRows": 3, "ProcessedRows": 3, "MismatchedRows": 0, "ExtraRowsSource": 0, `+
+ `"ExtraRowsTarget": 0}`),
+ shard2Res: sqltypes.MakeTestResult(fields,
+ "completed||t1|"+UUID+"|completed|3|"+starttime+"|3|"+comptime+"|0|"+
+ `{"TableName": "t1", "MatchingRows": 3, "ProcessedRows": 3, "MismatchedRows": 0, "ExtraRowsSource": 0, `+
+ `"ExtraRowsTarget": 0}`),
+ report: fmt.Sprintf(verbosefmt,
+ UUID, 6, false, starttime, comptime, 6, 6, 0, 0, 0, 3, 3, 0, 0, 0, 3, 3, 0, 0, 0,
+ ),
+ }}
+
+ for _, tcase := range testcases {
+ t.Run(tcase.id, func(t *testing.T) {
+ shard1Res := &tabletmanagerdatapb.VDiffResponse{
+ Id: 1,
+ Output: sqltypes.ResultToProto3(tcase.shard1Res),
+ }
+ shard2Res := &tabletmanagerdatapb.VDiffResponse{
+ Id: 1,
+ Output: sqltypes.ResultToProto3(tcase.shard2Res),
+ }
+ env.tmc.setVDResults(env.tablets[200].tablet, req, shard1Res)
+ env.tmc.setVDResults(env.tablets[210].tablet, req, shard2Res)
+ output, err := env.wr.VDiff2(context.Background(), "target", env.workflow, vdiff.ShowAction, UUID, UUID, options)
+ require.NoError(t, err)
+ vds, err := displayVDiff2ShowSingleSummary(env.wr, options.ReportOptions.Format, "target", env.workflow, UUID, output, true /* verbose */)
+ require.NoError(t, err)
+ require.Equal(t, vdiff.CompletedState, vds)
+ logstr := env.cmdlog.String()
+ assert.Equal(t, tcase.report, logstr)
+ env.cmdlog.Clear()
+ })
+ }
+}
+
func TestGetStructNames(t *testing.T) {
type s struct {
A string
diff --git a/go/vt/vtctl/vdiff_env_test.go b/go/vt/vtctl/vdiff_env_test.go
new file mode 100644
index 00000000000..5fb854284ae
--- /dev/null
+++ b/go/vt/vtctl/vdiff_env_test.go
@@ -0,0 +1,299 @@
+/*
+Copyright 2022 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package vtctl
+
+import (
+ "context"
+ "fmt"
+ "math/rand"
+ "sync"
+ "testing"
+
+ "vitess.io/vitess/go/sqltypes"
+ "vitess.io/vitess/go/vt/grpcclient"
+ "vitess.io/vitess/go/vt/logutil"
+ "vitess.io/vitess/go/vt/topo"
+ "vitess.io/vitess/go/vt/topo/memorytopo"
+ "vitess.io/vitess/go/vt/vttablet/queryservice"
+ "vitess.io/vitess/go/vt/vttablet/queryservice/fakes"
+ "vitess.io/vitess/go/vt/vttablet/tabletconn"
+ "vitess.io/vitess/go/vt/vttablet/tabletconntest"
+ "vitess.io/vitess/go/vt/vttablet/tmclient"
+ "vitess.io/vitess/go/vt/wrangler"
+
+ binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata"
+ querypb "vitess.io/vitess/go/vt/proto/query"
+ tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata"
+ topodatapb "vitess.io/vitess/go/vt/proto/topodata"
+)
+
+const (
+ // vdiffStopPosition is the default stop position for the target vreplication.
+ // It can be overridden with the positons argument to newTestVDiffEnv.
+ vdiffStopPosition = "MySQL56/d834e6b8-7cbf-11ed-a1eb-0242ac120002:1-892"
+ // vdiffSourceGtid should be the position reported by the source side VStreamResults.
+ // It's expected to be higher the vdiffStopPosition.
+ vdiffSourceGtid = "MySQL56/d834e6b8-7cbf-11ed-a1eb-0242ac120002:1-893"
+ // vdiffTargetPrimaryPosition is the primary position of the target after
+ // vreplication has been synchronized.
+ vdiffTargetPrimaryPosition = "MySQL56/e34d6fb6-7cbf-11ed-a1eb-0242ac120002:1-892"
+)
+
+type testVDiffEnv struct {
+ wr *wrangler.Wrangler
+ workflow string
+ topoServ *topo.Server
+ cell string
+ tabletType topodatapb.TabletType
+ tmc *testVDiffTMClient
+ cmdlog *logutil.MemoryLogger
+
+ mu sync.Mutex
+ tablets map[int]*testVDiffTablet
+}
+
+//----------------------------------------------
+// testVDiffEnv
+
+func newTestVDiffEnv(t testing.TB, sourceShards, targetShards []string, query string, positions map[string]string) *testVDiffEnv {
+ env := &testVDiffEnv{
+ workflow: "vdiffTest",
+ tablets: make(map[int]*testVDiffTablet),
+ topoServ: memorytopo.NewServer("cell"),
+ cell: "cell",
+ tabletType: topodatapb.TabletType_REPLICA,
+ tmc: newTestVDiffTMClient(),
+ cmdlog: logutil.NewMemoryLogger(),
+ }
+ env.wr = wrangler.NewTestWrangler(env.cmdlog, env.topoServ, env.tmc)
+
+ // Generate a unique dialer name.
+ dialerName := fmt.Sprintf("VDiffTest-%s-%d", t.Name(), rand.Intn(1000000000))
+ tabletconn.RegisterDialer(dialerName, func(tablet *topodatapb.Tablet, failFast grpcclient.FailFast) (queryservice.QueryService, error) {
+ env.mu.Lock()
+ defer env.mu.Unlock()
+ if qs, ok := env.tablets[int(tablet.Alias.Uid)]; ok {
+ return qs, nil
+ }
+ return nil, fmt.Errorf("tablet %d not found", tablet.Alias.Uid)
+ })
+ tabletconntest.SetProtocol("go.vt.vtctl.vdiff_env_test", dialerName)
+
+ tabletID := 100
+ for _, shard := range sourceShards {
+ _ = env.addTablet(tabletID, "source", shard, topodatapb.TabletType_PRIMARY)
+ env.tmc.waitpos[tabletID+1] = vdiffStopPosition
+
+ tabletID += 10
+ }
+ tabletID = 200
+ for _, shard := range targetShards {
+ primary := env.addTablet(tabletID, "target", shard, topodatapb.TabletType_PRIMARY)
+
+ var rows []string
+ var posRows []string
+ for j, sourceShard := range sourceShards {
+ bls := &binlogdatapb.BinlogSource{
+ Keyspace: "source",
+ Shard: sourceShard,
+ Filter: &binlogdatapb.Filter{
+ Rules: []*binlogdatapb.Rule{{
+ Match: "t1",
+ Filter: query,
+ }},
+ },
+ }
+ rows = append(rows, fmt.Sprintf("%d|%v|||", j+1, bls))
+ position := vdiffStopPosition
+ if pos := positions[sourceShard+shard]; pos != "" {
+ position = pos
+ }
+ posRows = append(posRows, fmt.Sprintf("%v|%s", bls, position))
+
+ // vdiff.syncTargets. This actually happens after stopTargets.
+ // But this is one statement per stream.
+ env.tmc.setVRResults(
+ primary.tablet,
+ fmt.Sprintf("update _vt.vreplication set state='Running', stop_pos='%s', message='synchronizing for vdiff' where id=%d", vdiffSourceGtid, j+1),
+ &sqltypes.Result{},
+ )
+ }
+ // migrater buildMigrationTargets
+ env.tmc.setVRResults(
+ primary.tablet,
+ "select id, source, message, cell, tablet_types, workflow_type, workflow_sub_type, defer_secondary_keys from _vt.vreplication where workflow='vdiffTest' and db_name='vt_target'",
+ sqltypes.MakeTestResult(sqltypes.MakeTestFields(
+ "id|source|message|cell|tablet_types|workflow_type|workflow_sub_type|defer_secondary_keys",
+ "int64|varchar|varchar|varchar|varchar|int64|int64|int64"),
+ rows...,
+ ),
+ )
+
+ // vdiff.stopTargets
+ env.tmc.setVRResults(primary.tablet, "update _vt.vreplication set state='Stopped', message='for vdiff' where db_name='vt_target' and workflow='vdiffTest'", &sqltypes.Result{})
+ env.tmc.setVRResults(
+ primary.tablet,
+ "select source, pos from _vt.vreplication where db_name='vt_target' and workflow='vdiffTest'",
+ sqltypes.MakeTestResult(sqltypes.MakeTestFields(
+ "source|pos",
+ "varchar|varchar"),
+ posRows...,
+ ),
+ )
+
+ // vdiff.syncTargets (continued)
+ env.tmc.vrpos[tabletID] = vdiffSourceGtid
+ env.tmc.pos[tabletID] = vdiffTargetPrimaryPosition
+
+ // vdiff.startQueryStreams
+ env.tmc.waitpos[tabletID+1] = vdiffTargetPrimaryPosition
+
+ // vdiff.restartTargets
+ env.tmc.setVRResults(primary.tablet, "update _vt.vreplication set state='Running', message='', stop_pos='' where db_name='vt_target' and workflow='vdiffTest'", &sqltypes.Result{})
+
+ tabletID += 10
+ }
+ env.cmdlog.Clear()
+ return env
+}
+
+func (env *testVDiffEnv) close() {
+ env.mu.Lock()
+ defer env.mu.Unlock()
+ for _, t := range env.tablets {
+ env.topoServ.DeleteTablet(context.Background(), t.tablet.Alias)
+ }
+ env.tablets = nil
+ env.cmdlog.Clear()
+ env.topoServ.Close()
+ env.wr = nil
+}
+
+func (env *testVDiffEnv) addTablet(id int, keyspace, shard string, tabletType topodatapb.TabletType) *testVDiffTablet {
+ env.mu.Lock()
+ defer env.mu.Unlock()
+ tablet := &topodatapb.Tablet{
+ Alias: &topodatapb.TabletAlias{
+ Cell: env.cell,
+ Uid: uint32(id),
+ },
+ Keyspace: keyspace,
+ Shard: shard,
+ KeyRange: &topodatapb.KeyRange{},
+ Type: tabletType,
+ PortMap: map[string]int32{
+ "test": int32(id),
+ },
+ }
+ env.tablets[id] = newTestVDiffTablet(tablet)
+ if err := env.topoServ.InitTablet(context.Background(), tablet, false /* allowPrimaryOverride */, true /* createShardAndKeyspace */, false /* allowUpdate */); err != nil {
+ panic(err)
+ }
+ if tabletType == topodatapb.TabletType_PRIMARY {
+ _, err := env.topoServ.UpdateShardFields(context.Background(), keyspace, shard, func(si *topo.ShardInfo) error {
+ si.PrimaryAlias = tablet.Alias
+ return nil
+ })
+ if err != nil {
+ panic(err)
+ }
+ }
+ return env.tablets[id]
+}
+
+//----------------------------------------------
+// testVDiffTablet
+
+type testVDiffTablet struct {
+ queryservice.QueryService
+ tablet *topodatapb.Tablet
+}
+
+func newTestVDiffTablet(tablet *topodatapb.Tablet) *testVDiffTablet {
+ return &testVDiffTablet{
+ QueryService: fakes.ErrorQueryService,
+ tablet: tablet,
+ }
+}
+
+func (tvt *testVDiffTablet) StreamHealth(ctx context.Context, callback func(*querypb.StreamHealthResponse) error) error {
+ return callback(&querypb.StreamHealthResponse{
+ Serving: true,
+ Target: &querypb.Target{
+ Keyspace: tvt.tablet.Keyspace,
+ Shard: tvt.tablet.Shard,
+ TabletType: tvt.tablet.Type,
+ },
+ RealtimeStats: &querypb.RealtimeStats{},
+ })
+}
+
+//----------------------------------------------
+// testVDiffTMCclient
+
+type testVDiffTMClient struct {
+ tmclient.TabletManagerClient
+ vrQueries map[int]map[string]*querypb.QueryResult
+ vdRequests map[int]map[string]*tabletmanagerdatapb.VDiffResponse
+ waitpos map[int]string
+ vrpos map[int]string
+ pos map[int]string
+}
+
+func newTestVDiffTMClient() *testVDiffTMClient {
+ return &testVDiffTMClient{
+ vrQueries: make(map[int]map[string]*querypb.QueryResult),
+ vdRequests: make(map[int]map[string]*tabletmanagerdatapb.VDiffResponse),
+ waitpos: make(map[int]string),
+ vrpos: make(map[int]string),
+ pos: make(map[int]string),
+ }
+}
+
+func (tmc *testVDiffTMClient) setVRResults(tablet *topodatapb.Tablet, query string, result *sqltypes.Result) {
+ queries, ok := tmc.vrQueries[int(tablet.Alias.Uid)]
+ if !ok {
+ queries = make(map[string]*querypb.QueryResult)
+ tmc.vrQueries[int(tablet.Alias.Uid)] = queries
+ }
+ queries[query] = sqltypes.ResultToProto3(result)
+}
+
+func (tmc *testVDiffTMClient) VReplicationExec(ctx context.Context, tablet *topodatapb.Tablet, query string) (*querypb.QueryResult, error) {
+ result, ok := tmc.vrQueries[int(tablet.Alias.Uid)][query]
+ if !ok {
+ return nil, fmt.Errorf("query %q not found for tablet %d", query, tablet.Alias.Uid)
+ }
+ return result, nil
+}
+
+func (tmc *testVDiffTMClient) setVDResults(tablet *topodatapb.Tablet, req *tabletmanagerdatapb.VDiffRequest, res *tabletmanagerdatapb.VDiffResponse) {
+ reqs, ok := tmc.vdRequests[int(tablet.Alias.Uid)]
+ if !ok {
+ reqs = make(map[string]*tabletmanagerdatapb.VDiffResponse)
+ tmc.vdRequests[int(tablet.Alias.Uid)] = reqs
+ }
+ reqs[req.VdiffUuid] = res
+}
+
+func (tmc *testVDiffTMClient) VDiff(ctx context.Context, tablet *topodatapb.Tablet, req *tabletmanagerdatapb.VDiffRequest) (*tabletmanagerdatapb.VDiffResponse, error) {
+ resp, ok := tmc.vdRequests[int(tablet.Alias.Uid)][req.VdiffUuid]
+ if !ok {
+ return nil, fmt.Errorf("request %+v not found for tablet %d", req, tablet.Alias.Uid)
+ }
+ return resp, nil
+}
diff --git a/go/vt/vtctl/vtctl.go b/go/vt/vtctl/vtctl.go
index 143740267a5..93628b4d0f5 100644
--- a/go/vt/vtctl/vtctl.go
+++ b/go/vt/vtctl/vtctl.go
@@ -83,7 +83,6 @@ import (
"context"
"encoding/json"
"errors"
- "flag"
"fmt"
"io"
"math"
@@ -104,11 +103,21 @@ import (
"vitess.io/vitess/go/protoutil"
"vitess.io/vitess/go/sqltypes"
"vitess.io/vitess/go/textutil"
+ "vitess.io/vitess/go/vt/discovery"
hk "vitess.io/vitess/go/vt/hook"
"vitess.io/vitess/go/vt/key"
"vitess.io/vitess/go/vt/log"
"vitess.io/vitess/go/vt/logutil"
+ binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata"
+ querypb "vitess.io/vitess/go/vt/proto/query"
+ tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata"
+ topodatapb "vitess.io/vitess/go/vt/proto/topodata"
+ vschemapb "vitess.io/vitess/go/vt/proto/vschema"
+ vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata"
+ vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
+ "vitess.io/vitess/go/vt/proto/vttime"
"vitess.io/vitess/go/vt/schema"
+ "vitess.io/vitess/go/vt/servenv"
"vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/topo"
"vitess.io/vitess/go/vt/topo/topoproto"
@@ -116,27 +125,41 @@ import (
"vitess.io/vitess/go/vt/vtctl/workflow"
"vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/vt/wrangler"
-
- tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata"
- topodatapb "vitess.io/vitess/go/vt/proto/topodata"
- vschemapb "vitess.io/vitess/go/vt/proto/vschema"
- vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata"
- vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
- "vitess.io/vitess/go/vt/proto/vttime"
)
var (
// ErrUnknownCommand is returned for an unknown command
ErrUnknownCommand = errors.New("unknown command")
-)
-// Flags are exported for use in go/vt/vtctld.
-var (
- HealthCheckTopologyRefresh = flag.Duration("vtctl_healthcheck_topology_refresh", 30*time.Second, "refresh interval for re-reading the topology")
- HealthcheckRetryDelay = flag.Duration("vtctl_healthcheck_retry_delay", 5*time.Second, "delay before retrying a failed healthcheck")
- HealthCheckTimeout = flag.Duration("vtctl_healthcheck_timeout", time.Minute, "the health check timeout period")
+ // Flag variables.
+ healthCheckRetryDelay = 5 * time.Second
+ healthCheckTimeout = time.Minute
)
+func init() {
+ servenv.OnParseFor("vtctl", registerFlags)
+ servenv.OnParseFor("vtctld", registerFlags)
+}
+
+func registerFlags(fs *pflag.FlagSet) {
+ // TODO: https://github.com/vitessio/vitess/issues/11973
+ // Then remove this function and associated code (NewHealthCheck, servenv
+ // OnParseFor hooks, etc) entirely.
+ fs.Duration("vtctl_healthcheck_topology_refresh", 30*time.Second, "refresh interval for re-reading the topology")
+ fs.MarkDeprecated("vtctl_healthcheck_topology_refresh", "")
+
+ fs.DurationVar(&healthCheckRetryDelay, "vtctl_healthcheck_retry_delay", healthCheckRetryDelay, "delay before retrying a failed healthcheck")
+ fs.MarkDeprecated("vtctl_healthcheck_retry_delay", "This is used only by the legacy vtctld UI that is already deprecated and will be removed in the next release.")
+ fs.DurationVar(&healthCheckTimeout, "vtctl_healthcheck_timeout", healthCheckTimeout, "the health check timeout period")
+ fs.MarkDeprecated("vtctl_healthcheck_timeout", "This is used only by the legacy vtctld UI that is already deprecated and will be removed in the next release.")
+}
+
+// NewHealthCheck returns a healthcheck implementation based on the vtctl flags.
+// It is exported for use in go/vt/vtctld.
+func NewHealthCheck(ctx context.Context, ts *topo.Server, local string, cellsToWatch []string) discovery.HealthCheck {
+ return discovery.NewHealthCheck(ctx, healthCheckRetryDelay, healthCheckTimeout, ts, local, strings.Join(cellsToWatch, ","))
+}
+
type command struct {
name string
method func(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag.FlagSet, args []string) error
@@ -277,10 +300,12 @@ var commands = []commandGroup{
help: "Runs the given SQL command as a DBA on the remote tablet.",
},
{
- name: "VReplicationExec",
- method: commandVReplicationExec,
- params: "[--json] ",
- help: "Runs the given VReplication command on the remote tablet.",
+ name: "VReplicationExec",
+ method: commandVReplicationExec,
+ params: "[--json] ",
+ help: "Runs the given VReplication command on the remote tablet.",
+ deprecated: true,
+ deprecatedBy: "Workflow -- ",
},
},
},
@@ -436,27 +461,21 @@ var commands = []commandGroup{
{
name: "Reshard",
method: commandReshard,
- params: "[--source_shards=] [--target_shards=] [--cells=] [--tablet_types=] [--skip_schema_copy] 'action must be one of the following: Create, Complete, Cancel, SwitchTraffic, ReverseTrafffic, Show, or Progress' ",
- help: "Start a Resharding process. Example: Reshard --cells='zone1,alias1' --tablet_types='PRIMARY,REPLICA,RDONLY' ks.workflow001 -- '0' '-80,80-'",
+ params: "[--source_shards=] [--target_shards=] [--cells=] [--tablet_types=] [--on-ddl=] [--defer-secondary-keys] [--skip_schema_copy] 'action must be one of the following: Create, Complete, Cancel, SwitchTraffic, ReverseTrafffic, Show, or Progress' ",
+ help: "Start a Resharding process.",
},
{
name: "MoveTables",
method: commandMoveTables,
- params: "[--source=] [--tables=] [--cells=] [--tablet_types=] [--all] [--exclude=] [--auto_start] [--stop_after_copy] [--source_shards=] 'action must be one of the following: Create, Complete, Cancel, SwitchTraffic, ReverseTrafffic, Show, or Progress' ",
+ params: "[--source=] [--tables=] [--cells=] [--tablet_types=] [--all] [--exclude=] [--auto_start] [--stop_after_copy] [--defer-secondary-keys] [--on-ddl=] [--source_shards=] 'action must be one of the following: Create, Complete, Cancel, SwitchTraffic, ReverseTrafffic, Show, or Progress' ",
help: `Move table(s) to another keyspace, table_specs is a list of tables or the tables section of the vschema for the target keyspace. Example: '{"t1":{"column_vindexes": [{"column": "id1", "name": "hash"}]}, "t2":{"column_vindexes": [{"column": "id2", "name": "hash"}]}}'. In the case of an unsharded target keyspace the vschema for each table may be empty. Example: '{"t1":{}, "t2":{}}'.`,
},
{
name: "Migrate",
method: commandMigrate,
- params: "[--cells=] [--tablet_types=] --workflow= ",
+ params: "[--cells=] [--tablet_types=] [--defer-secondary-keys] --workflow= ",
help: `Move table(s) to another keyspace, table_specs is a list of tables or the tables section of the vschema for the target keyspace. Example: '{"t1":{"column_vindexes": [{"column": "id1", "name": "hash"}]}, "t2":{"column_vindexes": [{"column": "id2", "name": "hash"}]}}'. In the case of an unsharded target keyspace the vschema for each table may be empty. Example: '{"t1":{}, "t2":{}}'.`,
},
- {
- name: "DropSources",
- method: commandDropSources,
- params: "[--dry_run] [--rename_tables] ",
- help: "After a MoveTables or Resharding workflow cleanup unused artifacts like source tables, source shards and denylists",
- },
{
name: "CreateLookupVindex",
method: commandCreateLookupVindex,
@@ -478,21 +497,9 @@ var commands = []commandGroup{
{
name: "VDiff",
method: commandVDiff,
- params: "[--source_cell=] [--target_cell=] [--tablet_types=in_order:RDONLY,REPLICA,PRIMARY] [--filtered_replication_wait_time=30s] [--max_extra_rows_to_compare=1000] ",
+ params: "[--source_cell=] [--target_cell=] [--tablet_types=in_order:RDONLY,REPLICA,PRIMARY] [--limit=] [--tables=] [--format=json] [--auto-retry] [--verbose] [--max_extra_rows_to_compare=1000] [--filtered_replication_wait_time=30s] [--debug_query] [--only_pks] [--wait] [--wait-update-interval=1m] [] []",
help: "Perform a diff of all tables in the workflow",
},
- {
- name: "SwitchReads",
- method: commandSwitchReads,
- params: "[--cells=c1,c2,...] [--reverse] [--tablet_types=REPLICA,RDONLY] [--dry_run] ",
- help: "Switch read traffic for the specified workflow.",
- },
- {
- name: "SwitchWrites",
- method: commandSwitchWrites,
- params: "[--timeout=30s] [--reverse] [--reverse_replication=true] [--dry_run] ",
- help: "Switch write traffic for the specified workflow.",
- },
{
name: "FindAllShardsInKeyspace",
method: commandFindAllShardsInKeyspace,
@@ -708,6 +715,12 @@ var commands = []commandGroup{
params: " ",
help: "Outputs a JSON structure that contains information about the SrvKeyspace.",
},
+ {
+ name: "UpdateThrottlerConfig",
+ method: commandUpdateThrottlerConfig,
+ params: "[--enable|--disable] [--threshold=] [--custom-query=] [--check-as-check-self|--check-as-check-shard] ",
+ help: "Update the table throttler configuration for all cells and tablets of a given keyspace",
+ },
{
name: "GetSrvVSchema",
method: commandGetSrvVSchema,
@@ -732,16 +745,6 @@ var commands = []commandGroup{
},
},
},
- {
- "Workflow", []command{
- {
- name: "VExec",
- method: commandVExec,
- params: " --dry-run",
- help: "Runs query on all tablets in workflow. Example: VExec merchant.morders \"update _vt.vreplication set Status='Running'\"",
- },
- },
- },
{
"Workflow", []command{
{
@@ -1338,6 +1341,8 @@ func commandExecuteFetchAsDba(ctx context.Context, wr *wrangler.Wrangler, subFla
}
func commandVReplicationExec(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag.FlagSet, args []string) error {
+ wr.Logger().Printf("\nWARNING: VReplicationExec is deprecated and will be removed in a future release. Please use 'Workflow -- ' instead.\n\n")
+
json := subFlags.Bool("json", false, "Output JSON instead of human-readable table")
if err := subFlags.Parse(args); err != nil {
@@ -2048,88 +2053,12 @@ func commandValidateKeyspace(ctx context.Context, wr *wrangler.Wrangler, subFlag
return wr.ValidateKeyspace(ctx, keyspace, *pingTablets)
}
-func useV1(args []string) bool {
- for _, arg := range args {
- if arg == "-v1" || arg == "--v1" {
- return true
- }
- }
- return false
-}
-
func commandReshard(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag.FlagSet, args []string) error {
- if !useV1(args) {
- log.Infof("*** Using Reshard v2 flow ***")
- return commandVRWorkflow(ctx, wr, subFlags, args, wrangler.ReshardWorkflow)
- }
- wr.Logger().Printf("*** The Reshard v1 flow is deprecated, consider using v2 commands instead, see https://vitess.io/docs/reference/vreplication/v2/ ***\n")
-
- cells := subFlags.String("cells", "", "Cell(s) or CellAlias(es) (comma-separated) to replicate from.")
- tabletTypes := subFlags.String("tablet_types", "", "Source tablet types to replicate from.")
- skipSchemaCopy := subFlags.Bool("skip_schema_copy", false, "Skip copying of schema to targets")
-
- autoStart := subFlags.Bool("auto_start", true, "If false, streams will start in the Stopped state and will need to be explicitly started")
- stopAfterCopy := subFlags.Bool("stop_after_copy", false, "Streams will be stopped once the copy phase is completed")
- _ = subFlags.Bool("v1", true, "")
-
- if err := subFlags.Parse(args); err != nil {
- return err
- }
- if subFlags.NArg() != 3 {
- return fmt.Errorf("three arguments are required: , source_shards, target_shards")
- }
- keyspace, workflow, err := splitKeyspaceWorkflow(subFlags.Arg(0))
- if err != nil {
- return err
- }
- source := strings.Split(subFlags.Arg(1), ",")
- target := strings.Split(subFlags.Arg(2), ",")
- return wr.Reshard(ctx, keyspace, workflow, source, target, *skipSchemaCopy, *cells,
- *tabletTypes, *autoStart, *stopAfterCopy)
+ return commandVRWorkflow(ctx, wr, subFlags, args, wrangler.ReshardWorkflow)
}
func commandMoveTables(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag.FlagSet, args []string) error {
- if !useV1(args) {
- log.Infof("*** Using MoveTables v2 flow ***")
- return commandVRWorkflow(ctx, wr, subFlags, args, wrangler.MoveTablesWorkflow)
- }
- wr.Logger().Printf("*** The MoveTables v1 flow is deprecated, consider using v2 commands instead, see https://vitess.io/docs/reference/vreplication/v2/ ***\n")
-
- workflow := subFlags.String("workflow", "", "Workflow name. Can be any descriptive string. Will be used to later migrate traffic via SwitchReads/SwitchWrites.")
- cells := subFlags.String("cells", "", "Cell(s) or CellAlias(es) (comma-separated) to replicate from.")
- tabletTypes := subFlags.String("tablet_types", "", "Source tablet types to replicate from (e.g. PRIMARY, REPLICA, RDONLY). Defaults to --vreplication_tablet_type parameter value for the tablet, which has the default value of in_order:REPLICA,PRIMARY.")
- allTables := subFlags.Bool("all", false, "Move all tables from the source keyspace")
- excludes := subFlags.String("exclude", "", "Tables to exclude (comma-separated) if --all is specified")
-
- autoStart := subFlags.Bool("auto_start", true, "If false, streams will start in the Stopped state and will need to be explicitly started")
- stopAfterCopy := subFlags.Bool("stop_after_copy", false, "Streams will be stopped once the copy phase is completed")
- dropForeignKeys := subFlags.Bool("drop_foreign_keys", false, "If true, tables in the target keyspace will be created without foreign keys.")
- _ = subFlags.Bool("v1", true, "")
-
- if err := subFlags.Parse(args); err != nil {
- return err
- }
- if *workflow == "" {
- return fmt.Errorf("a workflow name must be specified")
- }
- if !*allTables && len(*excludes) > 0 {
- return fmt.Errorf("you can only specify tables to exclude if all tables are to be moved (with --all)")
- }
- if *allTables {
- if subFlags.NArg() != 2 {
- return fmt.Errorf("two arguments are required: source_keyspace, target_keyspace")
- }
- } else {
- if subFlags.NArg() != 3 {
- return fmt.Errorf("three arguments are required: source_keyspace, target_keyspace, tableSpecs")
- }
- }
-
- source := subFlags.Arg(0)
- target := subFlags.Arg(1)
- tableSpecs := subFlags.Arg(2)
- return wr.MoveTables(ctx, *workflow, source, target, tableSpecs, *cells, *tabletTypes, *allTables,
- *excludes, *autoStart, *stopAfterCopy, "", *dropForeignKeys, "", nil)
+ return commandVRWorkflow(ctx, wr, subFlags, args, wrangler.MoveTablesWorkflow)
}
// VReplicationWorkflowAction defines subcommands passed to vtctl for movetables or reshard
@@ -2171,7 +2100,7 @@ func commandVRWorkflow(ctx context.Context, wr *wrangler.Wrangler, subFlags *pfl
cells := subFlags.String("cells", "", "Cell(s) or CellAlias(es) (comma-separated) to replicate from.")
tabletTypes := subFlags.String("tablet_types", "in_order:REPLICA,PRIMARY", "Source tablet types to replicate from (e.g. PRIMARY, REPLICA, RDONLY). Defaults to --vreplication_tablet_type parameter value for the tablet, which has the default value of in_order:REPLICA,PRIMARY. Note: SwitchTraffic overrides this default and uses in_order:RDONLY,REPLICA,PRIMARY to switch all traffic by default.")
- dryRun := subFlags.Bool("dry_run", false, "Does a dry run of SwitchReads and only reports the actions to be taken. --dry_run is only supported for SwitchTraffic, ReverseTraffic and Complete.")
+ dryRun := subFlags.Bool("dry_run", false, "Does a dry run of SwitchTraffic and only reports the actions to be taken. --dry_run is only supported for SwitchTraffic, ReverseTraffic and Complete.")
timeout := subFlags.Duration("timeout", defaultWaitTime, "Specifies the maximum time to wait, in seconds, for vreplication to catch up on primary migrations. The migration will be cancelled on a timeout. --timeout is only supported for SwitchTraffic and ReverseTraffic.")
reverseReplication := subFlags.Bool("reverse_replication", true, "Also reverse the replication (default true). --reverse_replication is only supported for SwitchTraffic.")
keepData := subFlags.Bool("keep_data", false, "Do not drop tables or shards (if true, only vreplication artifacts are cleaned up). --keep_data is only supported for Complete and Cancel.")
@@ -2181,6 +2110,9 @@ func commandVRWorkflow(ctx context.Context, wr *wrangler.Wrangler, subFlags *pfl
dropForeignKeys := subFlags.Bool("drop_foreign_keys", false, "If true, tables in the target keyspace will be created without foreign keys.")
maxReplicationLagAllowed := subFlags.Duration("max_replication_lag_allowed", defaultMaxReplicationLagAllowed, "Allow traffic to be switched only if vreplication lag is below this (in seconds)")
+ onDDL := "IGNORE"
+ subFlags.StringVar(&onDDL, "on-ddl", onDDL, "What to do when DDL is encountered in the VReplication stream. Possible values are IGNORE, STOP, EXEC, and EXEC_IGNORE.")
+
// MoveTables and Migrate params
tables := subFlags.String("tables", "", "MoveTables only. A table spec or a list of tables. Either table_specs or --all needs to be specified.")
allTables := subFlags.Bool("all", false, "MoveTables only. Move all tables from the source keyspace. Either table_specs or --all needs to be specified.")
@@ -2197,15 +2129,13 @@ func commandVRWorkflow(ctx context.Context, wr *wrangler.Wrangler, subFlags *pfl
// MoveTables and Reshard params
sourceShards := subFlags.String("source_shards", "", "Source shards")
*sourceShards = strings.TrimSpace(*sourceShards)
+ deferNonPKeys := subFlags.Bool("defer-secondary-keys", false, "Defer secondary index creation for a table until after it has been copied.")
// Reshard params
targetShards := subFlags.String("target_shards", "", "Reshard only. Target shards")
*targetShards = strings.TrimSpace(*targetShards)
skipSchemaCopy := subFlags.Bool("skip_schema_copy", false, "Reshard only. Skip copying of schema to target shards")
- _ = subFlags.Bool("v1", false, "Enables usage of v1 command structure. (default false). Must be added to run the command with --workflow")
- _ = subFlags.Bool("v2", true, "")
-
if err := subFlags.Parse(args); err != nil {
return err
}
@@ -2213,6 +2143,12 @@ func commandVRWorkflow(ctx context.Context, wr *wrangler.Wrangler, subFlags *pfl
if subFlags.NArg() != 2 {
return fmt.Errorf("two arguments are needed: action, keyspace.workflow")
}
+
+ onDDL = strings.ToUpper(onDDL)
+ if _, ok := binlogdatapb.OnDDLAction_value[onDDL]; !ok {
+ return fmt.Errorf("invalid value for on-ddl: %v", onDDL)
+ }
+
action := subFlags.Arg(0)
ksWorkflow := subFlags.Arg(1)
target, workflowName, err := splitKeyspaceWorkflow(ksWorkflow)
@@ -2239,22 +2175,28 @@ func commandVRWorkflow(ctx context.Context, wr *wrangler.Wrangler, subFlags *pfl
if err != nil {
return err
}
- s += fmt.Sprintf("Following vreplication streams are running for workflow %s.%s:\n\n", target, workflowName)
+ s += fmt.Sprintf("The following vreplication streams exist for workflow %s.%s:\n\n", target, workflowName)
for ksShard := range res.ShardStatuses {
statuses := res.ShardStatuses[ksShard].PrimaryReplicationStatuses
for _, st := range statuses {
- now := time.Now().Nanosecond()
msg := ""
- updateLag := int64(now) - st.TimeUpdated
- if updateLag > 0*1e9 {
- msg += " Vstream may not be running."
- }
- txLag := int64(now) - st.TransactionTimestamp
- msg += fmt.Sprintf(" VStream Lag: %ds.", txLag/1e9)
- if st.TransactionTimestamp > 0 { // if no events occur after copy phase, TransactionTimeStamp can be 0
- msg += fmt.Sprintf(" Tx time: %s.", time.Unix(st.TransactionTimestamp, 0).Format(time.ANSIC))
+ if st.State == "Error" {
+ msg += fmt.Sprintf(": %s.", st.Message)
+ } else if st.Pos == "" {
+ msg += ". VStream has not started."
+ } else {
+ now := time.Now().Nanosecond()
+ updateLag := int64(now) - st.TimeUpdated
+ if updateLag > 0*1e9 {
+ msg += ". VStream may not be running"
+ }
+ txLag := int64(now) - st.TransactionTimestamp
+ msg += fmt.Sprintf(". VStream Lag: %ds.", txLag/1e9)
+ if st.TransactionTimestamp > 0 { // if no events occur after copy phase, TransactionTimeStamp can be 0
+ msg += fmt.Sprintf(" Tx time: %s.", time.Unix(st.TransactionTimestamp, 0).Format(time.ANSIC))
+ }
}
- s += fmt.Sprintf("id=%d on %s: Status: %s.%s\n", st.ID, ksShard, st.State, msg)
+ s += fmt.Sprintf("id=%d on %s: Status: %s%s\n", st.ID, ksShard, st.State, msg)
}
}
wr.Logger().Printf("\n%s\n", s)
@@ -2334,11 +2276,13 @@ func commandVRWorkflow(ctx context.Context, wr *wrangler.Wrangler, subFlags *pfl
default:
return fmt.Errorf("unknown workflow type passed: %v", workflowType)
}
+ vrwp.OnDDL = onDDL
+ vrwp.DeferSecondaryKeys = *deferNonPKeys
vrwp.Cells = *cells
vrwp.TabletTypes = *tabletTypes
case vReplicationWorkflowActionSwitchTraffic, vReplicationWorkflowActionReverseTraffic:
vrwp.Cells = *cells
- if userPassedFlag(subFlags, "tablet_types") {
+ if subFlags.Changed("tablet_types") {
vrwp.TabletTypes = *tabletTypes
} else {
// When no tablet types are specified we are supposed to switch all traffic so
@@ -2350,6 +2294,7 @@ func commandVRWorkflow(ctx context.Context, wr *wrangler.Wrangler, subFlags *pfl
vrwp.MaxAllowedTransactionLagSeconds = int64(math.Ceil(maxReplicationLagAllowed.Seconds()))
case vReplicationWorkflowActionCancel:
vrwp.KeepData = *keepData
+ vrwp.KeepRoutingRules = *keepRoutingRules
case vReplicationWorkflowActionComplete:
switch workflowType {
case wrangler.MoveTablesWorkflow:
@@ -2377,9 +2322,7 @@ func commandVRWorkflow(ctx context.Context, wr *wrangler.Wrangler, subFlags *pfl
if err != nil {
return err
}
- if copyProgress == nil {
- wr.Logger().Printf("\nCopy Completed.\n")
- } else {
+ if copyProgress != nil {
wr.Logger().Printf("\nCopy Progress (approx):\n")
var tables []string
for table := range *copyProgress {
@@ -2404,7 +2347,6 @@ func commandVRWorkflow(ctx context.Context, wr *wrangler.Wrangler, subFlags *pfl
wr.Logger().Printf("\n%s\n", s)
}
return printDetails()
-
}
if *dryRun {
@@ -2448,7 +2390,6 @@ func commandVRWorkflow(ctx context.Context, wr *wrangler.Wrangler, subFlags *pfl
for {
select {
case <-ctx.Done():
- errCh <- fmt.Errorf("workflow did not start within %s", (*timeout).String())
return
case <-ticker.C:
totalStreams, startedStreams, workflowErrors, err := wf.GetStreamCount()
@@ -2477,9 +2418,13 @@ func commandVRWorkflow(ctx context.Context, wr *wrangler.Wrangler, subFlags *pfl
return nil
}
wr.Logger().Printf("%d%% ... ", 100*progress.started/progress.total)
+ case <-timedCtx.Done():
+ wr.Logger().Printf("\nThe workflow did not start within %s. The workflow may simply be slow to start or there may be an issue.\n",
+ (*timeout).String())
+ wr.Logger().Printf("Check the status using the 'Workflow %s show' client command for details.\n", ksWorkflow)
+ return fmt.Errorf("timed out waiting for workflow to start")
case err := <-errCh:
wr.Logger().Error(err)
- cancelTimedCtx()
return err
case wfErrs := <-wfErrCh:
wr.Logger().Printf("Found problems with the streams created for this workflow:\n")
@@ -2565,9 +2510,9 @@ func commandMaterialize(ctx context.Context, wr *wrangler.Wrangler, subFlags *pf
return wr.Materialize(ctx, ms)
}
-func useVDiffV2(args []string) bool {
+func useVDiffV1(args []string) bool {
for _, arg := range args {
- if arg == "-v2" || arg == "--v2" {
+ if arg == "-v1" || arg == "--v1" {
return true
}
}
@@ -2575,11 +2520,10 @@ func useVDiffV2(args []string) bool {
}
func commandVDiff(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag.FlagSet, args []string) error {
- if useVDiffV2(args) {
- log.Infof("*** Using (experimental) VDiff2 ***")
+ if !useVDiffV1(args) {
return commandVDiff2(ctx, wr, subFlags, args)
}
- _ = subFlags.Bool("v2", false, "Use VDiff2")
+ _ = subFlags.Bool("v1", false, "Use legacy VDiff v1")
sourceCell := subFlags.String("source_cell", "", "The source cell to compare from; default is any available cell")
targetCell := subFlags.String("target_cell", "", "The target cell to compare with; default is any available cell")
@@ -2632,123 +2576,6 @@ func splitKeyspaceWorkflow(in string) (keyspace, workflow string, err error) {
return splits[0], splits[1], nil
}
-func commandDropSources(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag.FlagSet, args []string) error {
- wr.Logger().Printf("*** DropSources is deprecated. Consider using v2 commands instead, see https://vitess.io/docs/reference/vreplication/v2/ ***\n")
- dryRun := subFlags.Bool("dry_run", false, "Does a dry run of commandDropSources and only reports the actions to be taken")
- renameTables := subFlags.Bool("rename_tables", false, "Rename tables instead of dropping them")
- keepData := subFlags.Bool("keep_data", false, "Do not drop tables or shards (if true, only vreplication artifacts are cleaned up)")
- if err := subFlags.Parse(args); err != nil {
- return err
- }
- if subFlags.NArg() != 1 {
- return fmt.Errorf(" is required")
- }
- keyspace, workflowName, err := splitKeyspaceWorkflow(subFlags.Arg(0))
- if err != nil {
- return err
- }
-
- removalType := workflow.DropTable
- if *renameTables {
- removalType = workflow.RenameTable
- }
-
- _, _, _ = dryRun, keyspace, workflowName
- dryRunResults, err := wr.DropSources(ctx, keyspace, workflowName, removalType, *keepData, false, false, *dryRun)
- if err != nil {
- return err
- }
- if *dryRun {
- wr.Logger().Printf("Dry Run results for commandDropSources run at %s\nParameters: %s\n\n", time.Now().Format(time.RFC822), strings.Join(args, " "))
- wr.Logger().Printf("%s\n", strings.Join(*dryRunResults, "\n"))
- }
- return nil
-}
-
-func commandSwitchReads(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag.FlagSet, args []string) error {
- wr.Logger().Printf("*** SwitchReads is deprecated. Consider using v2 commands instead, see https://vitess.io/docs/reference/vreplication/v2/ ***\n")
-
- reverse := subFlags.Bool("reverse", false, "Moves the served tablet type backward instead of forward.")
- cellsStr := subFlags.String("cells", "", "Specifies a comma-separated list of cells to update")
- tabletTypes := subFlags.String("tablet_types", "rdonly,replica", "Tablet types to switch one or both of rdonly/replica")
- dryRun := subFlags.Bool("dry_run", false, "Does a dry run of SwitchReads and only reports the actions to be taken")
- if err := subFlags.Parse(args); err != nil {
- return err
- }
-
- tabletTypesArr := strings.Split(*tabletTypes, ",")
- var servedTypes []topodatapb.TabletType
- for _, tabletType := range tabletTypesArr {
- servedType, err := parseTabletType(tabletType, []topodatapb.TabletType{topodatapb.TabletType_REPLICA, topodatapb.TabletType_RDONLY})
- if err != nil {
- return err
- }
- servedTypes = append(servedTypes, servedType)
- }
- var cells []string
- if *cellsStr != "" {
- cells = strings.Split(*cellsStr, ",")
- }
- direction := workflow.DirectionForward
- if *reverse {
- direction = workflow.DirectionBackward
- }
- if subFlags.NArg() != 1 {
- return fmt.Errorf(" is required")
- }
- keyspace, workflowName, err := splitKeyspaceWorkflow(subFlags.Arg(0))
- if err != nil {
- return err
- }
- dryRunResults, err := wr.SwitchReads(ctx, keyspace, workflowName, servedTypes, cells, direction, *dryRun)
- if err != nil {
- return err
- }
- if *dryRun {
- wr.Logger().Printf("Dry Run results for SwitchReads run at %s\nParameters: %s\n\n", time.Now().Format(time.RFC822), strings.Join(args, " "))
- wr.Logger().Printf("%s\n", strings.Join(*dryRunResults, "\n"))
- }
- return nil
-}
-
-func commandSwitchWrites(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag.FlagSet, args []string) error {
- wr.Logger().Printf("*** SwitchWrites is deprecated. Consider using v2 commands instead, see https://vitess.io/docs/reference/vreplication/v2/ ***\n")
-
- timeout := subFlags.Duration("timeout", 30*time.Second, "Specifies the maximum time to wait, in seconds, for vreplication to catch up on primary migrations. The migration will be cancelled on a timeout.")
- filteredReplicationWaitTime := subFlags.Duration("filtered_replication_wait_time", 30*time.Second, "DEPRECATED Specifies the maximum time to wait, in seconds, for vreplication to catch up on primary migrations. The migration will be cancelled on a timeout.")
- reverseReplication := subFlags.Bool("reverse_replication", true, "Also reverse the replication")
- cancel := subFlags.Bool("cancel", false, "Cancel the failed migration and serve from source")
- reverse := subFlags.Bool("reverse", false, "Reverse a previous SwitchWrites serve from source")
- dryRun := subFlags.Bool("dry_run", false, "Does a dry run of SwitchWrites and only reports the actions to be taken")
- if err := subFlags.Parse(args); err != nil {
- return err
- }
-
- if subFlags.NArg() != 1 {
- return fmt.Errorf(" is required")
- }
- keyspace, workflow, err := splitKeyspaceWorkflow(subFlags.Arg(0))
- if err != nil {
- return err
- }
- if filteredReplicationWaitTime != timeout {
- timeout = filteredReplicationWaitTime
- }
-
- journalID, dryRunResults, err := wr.SwitchWrites(ctx, keyspace, workflow, *timeout, *cancel, *reverse, *reverseReplication, *dryRun)
- if err != nil {
- return err
- }
- if *dryRun {
- wr.Logger().Printf("Dry Run results for SwitchWrites run at %s\nParameters: %s\n\n", time.Now().Format(time.RFC822), strings.Join(args, " "))
- wr.Logger().Printf("%s\n", strings.Join(*dryRunResults, "\n"))
- } else {
- wr.Logger().Infof("Migration Journal ID: %v", journalID)
- }
-
- return nil
-}
-
func commandFindAllShardsInKeyspace(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag.FlagSet, args []string) error {
if err := subFlags.Parse(args); err != nil {
return err
@@ -3131,8 +2958,8 @@ func commandOnlineDDL(ctx context.Context, wr *wrangler.Wrangler, subFlags *pfla
arg = subFlags.Args()[2]
}
- query := ""
- uuid := ""
+ applySchemaQuery := ""
+ executeFetchQuery := ""
var bindErr error
switch command {
case "show":
@@ -3152,15 +2979,17 @@ func commandOnlineDDL(ctx context.Context, wr *wrangler.Wrangler, subFlags *pfla
condition, bindErr = sqlparser.ParseAndBind("migration_status=%a", sqltypes.StringBindVariable(arg))
default:
if schema.IsOnlineDDLUUID(arg) {
- uuid = arg
condition, bindErr = sqlparser.ParseAndBind("migration_uuid=%a", sqltypes.StringBindVariable(arg))
} else {
condition, bindErr = sqlparser.ParseAndBind("migration_context=%a", sqltypes.StringBindVariable(arg))
}
}
- order := " order by `id` ASC"
- if *orderBy == "descending" {
- order = " order by `id` DESC"
+ order := " order by `id` "
+ switch *orderBy {
+ case "desc", "descending":
+ order = order + "DESC"
+ default:
+ order = order + "ASC"
}
skipLimit := ""
@@ -3168,46 +2997,79 @@ func commandOnlineDDL(ctx context.Context, wr *wrangler.Wrangler, subFlags *pfla
skipLimit = fmt.Sprintf("LIMIT %v,%v", *skip, *limit)
}
- query = fmt.Sprintf(`select
+ executeFetchQuery = fmt.Sprintf(`select
*
from _vt.schema_migrations where %s %s %s`, condition, order, skipLimit)
case "retry":
if arg == "" {
return fmt.Errorf("UUID required")
}
- uuid = arg
- query, bindErr = sqlparser.ParseAndBind(`update _vt.schema_migrations set migration_status='retry' where migration_uuid=%a`, sqltypes.StringBindVariable(arg))
+ applySchemaQuery, bindErr = sqlparser.ParseAndBind(`alter vitess_migration %a retry`, sqltypes.StringBindVariable(arg))
case "complete":
if arg == "" {
return fmt.Errorf("UUID required")
}
- uuid = arg
- query, bindErr = sqlparser.ParseAndBind(`update _vt.schema_migrations set migration_status='complete' where migration_uuid=%a`, sqltypes.StringBindVariable(arg))
+ applySchemaQuery, bindErr = sqlparser.ParseAndBind(`alter vitess_migration %a complete`, sqltypes.StringBindVariable(arg))
case "cancel":
if arg == "" {
return fmt.Errorf("UUID required")
}
- uuid = arg
- query, bindErr = sqlparser.ParseAndBind(`update _vt.schema_migrations set migration_status='cancel' where migration_uuid=%a`, sqltypes.StringBindVariable(arg))
+ applySchemaQuery, bindErr = sqlparser.ParseAndBind(`alter vitess_migration %a cancel`, sqltypes.StringBindVariable(arg))
case "cancel-all":
if arg != "" {
return fmt.Errorf("UUID not allowed in %s", command)
}
- query = `update _vt.schema_migrations set migration_status='cancel-all'`
+ applySchemaQuery = `alter vitess_migration cancel all`
default:
return fmt.Errorf("Unknown OnlineDDL command: %s", command)
}
if bindErr != nil {
return fmt.Errorf("Error generating OnlineDDL query: %+v", bindErr)
}
- qr, err := wr.VExecResult(ctx, uuid, keyspace, query, false)
- if err != nil {
- return err
- }
- if *json {
- return printJSON(wr.Logger(), qr)
+
+ if applySchemaQuery != "" {
+ log.Info("Calling ApplySchema on VtctldServer")
+
+ resp, err := wr.VtctldServer().ApplySchema(ctx, &vtctldatapb.ApplySchemaRequest{
+ Keyspace: keyspace,
+ Sql: []string{applySchemaQuery},
+ SkipPreflight: true,
+ WaitReplicasTimeout: protoutil.DurationToProto(wrangler.DefaultWaitReplicasTimeout),
+ })
+ if err != nil {
+ return err
+ }
+ loggerWriter{wr.Logger()}.Printf("resp: %v\n", resp)
+ } else {
+ // This is a SELECT. We run this on all PRIMARY tablets of this keyspace, and return the combined result
+ resp, err := wr.VtctldServer().GetTablets(ctx, &vtctldatapb.GetTabletsRequest{
+ Cells: nil,
+ Strict: false,
+ Keyspace: keyspace,
+ TabletType: topodatapb.TabletType_PRIMARY,
+ })
+ if err != nil {
+ return err
+ }
+
+ tabletResults := map[string]*sqltypes.Result{}
+ for _, tablet := range resp.Tablets {
+ tabletAlias := topoproto.TabletAliasString(tablet.Alias)
+
+ qrproto, err := wr.ExecuteFetchAsDba(ctx, tablet.Alias, executeFetchQuery, 10000, false, false)
+ if err != nil {
+ return err
+ }
+ tabletResults[tabletAlias] = sqltypes.Proto3ToResult(qrproto)
+ }
+ // combine results. This loses sorting if there's more then 1 tablet
+ combinedResults := queryResultForTabletResults(tabletResults)
+ if *json {
+ printJSON(wr.Logger(), combinedResults)
+ } else {
+ printQueryResult(loggerWriter{wr.Logger()}, combinedResults)
+ }
}
- printQueryResult(loggerWriter{wr.Logger()}, qr)
return nil
}
@@ -3644,6 +3506,69 @@ func commandGetSrvKeyspace(ctx context.Context, wr *wrangler.Wrangler, subFlags
return printJSON(wr.Logger(), cellKs)
}
+func commandUpdateThrottlerConfig(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag.FlagSet, args []string) (err error) {
+ enable := subFlags.Bool("enable", false, "Enable the throttler")
+ disable := subFlags.Bool("disable", false, "Disable the throttler")
+ threshold := subFlags.Float64("threshold", 0, "threshold for the either default check (replication lag seconds) or custom check")
+ customQuery := subFlags.String("custom-query", "", "custom throttler check query")
+ checkAsCheckSelf := subFlags.Bool("check-as-check-self", false, "/throttler/check requests behave as is /throttler/check-self was called")
+ checkAsCheckShard := subFlags.Bool("check-as-check-shard", false, "use standard behavior for /throttler/check requests")
+
+ if err := subFlags.Parse(args); err != nil {
+ return err
+ }
+ customQuerySet := subFlags.Changed("custom-query")
+ if subFlags.NArg() != 1 {
+ return fmt.Errorf("the arguments are required for the SetThrottlerConfig command")
+ }
+ if *enable && *disable {
+ return fmt.Errorf("--enable and --disable are mutually exclusive")
+ }
+ if *checkAsCheckSelf && *checkAsCheckShard {
+ return fmt.Errorf("--check-as-check-self and --check-as-check-shard are mutually exclusive")
+ }
+
+ keyspace := subFlags.Arg(0)
+
+ update := func(throttlerConfig *topodatapb.ThrottlerConfig) *topodatapb.ThrottlerConfig {
+ if throttlerConfig == nil {
+ throttlerConfig = &topodatapb.ThrottlerConfig{}
+ }
+ if customQuerySet {
+ // custom query provided
+ throttlerConfig.CustomQuery = *customQuery
+ throttlerConfig.Threshold = *threshold // allowed to be zero/negative because who knows what kind of custom query this is
+ } else {
+ // no custom query, throttler works by querying replication lag. We only allow positive values
+ if *threshold > 0 {
+ throttlerConfig.Threshold = *threshold
+ }
+ }
+ if *enable {
+ throttlerConfig.Enabled = true
+ }
+ if *disable {
+ throttlerConfig.Enabled = false
+ }
+ if *checkAsCheckSelf {
+ throttlerConfig.CheckAsCheckSelf = true
+ }
+ if *checkAsCheckShard {
+ throttlerConfig.CheckAsCheckSelf = false
+ }
+ return throttlerConfig
+ }
+
+ ctx, unlock, lockErr := wr.TopoServer().LockKeyspace(ctx, keyspace, "UpdateThrottlerConfig")
+ if lockErr != nil {
+ return lockErr
+ }
+ defer unlock(&err)
+
+ _, err = wr.TopoServer().UpdateSrvKeyspaceThrottlerConfig(ctx, keyspace, []string{}, update)
+ return err
+}
+
func commandGetSrvVSchema(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag.FlagSet, args []string) error {
if err := subFlags.Parse(args); err != nil {
return err
@@ -3710,45 +3635,6 @@ func commandHelp(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag.Fla
return nil
}
-func commandVExec(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag.FlagSet, args []string) error {
- deprecationMessage := `VExec command will be deprecated in version v12. For Online DDL control, use "vtctl OnlineDDL" commands or SQL syntax`
- log.Warningf(deprecationMessage)
-
- json := subFlags.Bool("json", false, "Output JSON instead of human-readable table")
- dryRun := subFlags.Bool("dry_run", false, "Does a dry run of VExec and only reports the final query and list of tablets on which it will be applied")
- if err := subFlags.Parse(args); err != nil {
- return err
- }
- if subFlags.NArg() != 2 {
- return fmt.Errorf("usage: VExec --dry-run keyspace.workflow \"\"")
- }
- keyspace, workflow, err := splitKeyspaceWorkflow(subFlags.Arg(0))
- if err != nil {
- return err
- }
- _, err = wr.TopoServer().GetKeyspace(ctx, keyspace)
- if err != nil {
- wr.Logger().Errorf("keyspace %s not found", keyspace)
- }
- query := subFlags.Arg(1)
-
- qr, err := wr.VExecResult(ctx, workflow, keyspace, query, *dryRun)
- if err != nil {
- return err
- }
- if *dryRun {
- return nil
- }
- if qr == nil {
- wr.Logger().Printf("no result returned\n")
- }
- if *json {
- return printJSON(wr.Logger(), qr)
- }
- printQueryResult(loggerWriter{wr.Logger()}, qr)
- return nil
-}
-
func commandWorkflow(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag.FlagSet, args []string) error {
dryRun := subFlags.Bool("dry_run", false, "Does a dry run of Workflow and only reports the final query and list of tablets on which the operation will be applied")
if err := subFlags.Parse(args); err != nil {
@@ -4061,14 +3947,25 @@ func PrintAllCommands(logger logutil.Logger) {
}
}
-// userPassedFlag returns true if the flag name given was provided
-// as a command-line argument by the user.
-func userPassedFlag(flags *pflag.FlagSet, name string) bool {
- passed := false
- flags.Visit(func(f *pflag.Flag) {
- if f.Name == name {
- passed = true
+// queryResultForTabletResults aggregates given results into a combined result set
+func queryResultForTabletResults(results map[string]*sqltypes.Result) *sqltypes.Result {
+ var qr = &sqltypes.Result{}
+ defaultFields := []*querypb.Field{{
+ Name: "Tablet",
+ Type: sqltypes.VarBinary,
+ }}
+ var row2 []sqltypes.Value
+ for tabletAlias, result := range results {
+ if qr.Fields == nil {
+ qr.Fields = append(qr.Fields, defaultFields...)
+ qr.Fields = append(qr.Fields, result.Fields...)
}
- })
- return passed
+ for _, row := range result.Rows {
+ row2 = nil
+ row2 = append(row2, sqltypes.NewVarBinary(tabletAlias))
+ row2 = append(row2, row...)
+ qr.Rows = append(qr.Rows, row2)
+ }
+ }
+ return qr
}
diff --git a/go/vt/vtctl/vtctl_env_test.go b/go/vt/vtctl/vtctl_env_test.go
new file mode 100644
index 00000000000..570088b9d13
--- /dev/null
+++ b/go/vt/vtctl/vtctl_env_test.go
@@ -0,0 +1,219 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package vtctl
+
+import (
+ "context"
+ "fmt"
+ "sync"
+
+ "vitess.io/vitess/go/sqltypes"
+ "vitess.io/vitess/go/vt/grpcclient"
+ "vitess.io/vitess/go/vt/logutil"
+ "vitess.io/vitess/go/vt/topo"
+ "vitess.io/vitess/go/vt/topo/memorytopo"
+ "vitess.io/vitess/go/vt/vttablet/queryservice"
+ "vitess.io/vitess/go/vt/vttablet/queryservice/fakes"
+ "vitess.io/vitess/go/vt/vttablet/tabletconn"
+ "vitess.io/vitess/go/vt/vttablet/tabletconntest"
+ "vitess.io/vitess/go/vt/vttablet/tmclient"
+ "vitess.io/vitess/go/vt/wrangler"
+
+ querypb "vitess.io/vitess/go/vt/proto/query"
+ tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata"
+ topodatapb "vitess.io/vitess/go/vt/proto/topodata"
+ vschemapb "vitess.io/vitess/go/vt/proto/vschema"
+)
+
+type testVTCtlEnv struct {
+ wr *wrangler.Wrangler
+ topoServ *topo.Server
+ cell string
+ tabletType topodatapb.TabletType
+ tmc *testVTCtlTMClient
+ cmdlog *logutil.MemoryLogger
+
+ mu sync.Mutex
+ tablets map[int]*testVTCtlTablet
+}
+
+// vtctlEnv has to be a global for RegisterDialer to work.
+var vtctlEnv *testVTCtlEnv
+
+func init() {
+ tabletconn.RegisterDialer("VTCtlTest", func(tablet *topodatapb.Tablet, failFast grpcclient.FailFast) (queryservice.QueryService, error) {
+ vtctlEnv.mu.Lock()
+ defer vtctlEnv.mu.Unlock()
+ if qs, ok := vtctlEnv.tablets[int(tablet.Alias.Uid)]; ok {
+ return qs, nil
+ }
+ return nil, fmt.Errorf("tablet %d not found", tablet.Alias.Uid)
+ })
+}
+
+//----------------------------------------------
+// testVTCtlEnv
+
+func newTestVTCtlEnv() *testVTCtlEnv {
+ tabletconntest.SetProtocol("go.vt.vtctl.vtctl_env_test", "VTCtlTest")
+ cellName := "cell1"
+ env := &testVTCtlEnv{
+ tablets: make(map[int]*testVTCtlTablet),
+ topoServ: memorytopo.NewServer(cellName),
+ cell: cellName,
+ tabletType: topodatapb.TabletType_REPLICA,
+ tmc: newTestVTCtlTMClient(),
+ cmdlog: logutil.NewMemoryLogger(),
+ }
+ env.wr = wrangler.NewTestWrangler(env.cmdlog, env.topoServ, env.tmc)
+ return env
+}
+
+func (env *testVTCtlEnv) close() {
+ env.mu.Lock()
+ defer env.mu.Unlock()
+ for _, t := range env.tablets {
+ env.topoServ.DeleteTablet(context.Background(), t.tablet.Alias)
+ }
+ env.tablets = nil
+ env.cmdlog.Clear()
+ env.tmc.clearResults()
+ env.topoServ.Close()
+ env.wr = nil
+}
+
+func (env *testVTCtlEnv) addTablet(id int, keyspace, shard string, keyRange *topodatapb.KeyRange, tabletType topodatapb.TabletType) *testVTCtlTablet {
+ env.mu.Lock()
+ defer env.mu.Unlock()
+ ctx := context.Background()
+ tablet := &topodatapb.Tablet{
+ Alias: &topodatapb.TabletAlias{
+ Cell: env.cell,
+ Uid: uint32(id),
+ },
+ Keyspace: keyspace,
+ Shard: shard,
+ KeyRange: keyRange,
+ Type: tabletType,
+ PortMap: map[string]int32{
+ "test": int32(id),
+ },
+ }
+ env.tablets[id] = newTestVTCtlTablet(tablet)
+ if err := env.topoServ.InitTablet(ctx, tablet, false /* allowPrimaryOverride */, true /* createShardAndKeyspace */, false /* allowUpdate */); err != nil {
+ panic(err)
+ }
+ if tabletType == topodatapb.TabletType_PRIMARY {
+ _, err := env.topoServ.UpdateShardFields(ctx, keyspace, shard, func(si *topo.ShardInfo) error {
+ si.PrimaryAlias = tablet.Alias
+ return nil
+ })
+ if err != nil {
+ panic(err)
+ }
+ emptySrvVSchema := &vschemapb.SrvVSchema{
+ RoutingRules: &vschemapb.RoutingRules{},
+ ShardRoutingRules: &vschemapb.ShardRoutingRules{},
+ }
+ if err = env.topoServ.UpdateSrvVSchema(ctx, env.cell, emptySrvVSchema); err != nil {
+ panic(err)
+ }
+ }
+ return env.tablets[id]
+}
+
+//----------------------------------------------
+// testVTCtlTablet
+
+type testVTCtlTablet struct {
+ queryservice.QueryService
+ tablet *topodatapb.Tablet
+}
+
+func newTestVTCtlTablet(tablet *topodatapb.Tablet) *testVTCtlTablet {
+ return &testVTCtlTablet{
+ QueryService: fakes.ErrorQueryService,
+ tablet: tablet,
+ }
+}
+
+func (tvt *testVTCtlTablet) StreamHealth(ctx context.Context, callback func(*querypb.StreamHealthResponse) error) error {
+ return callback(&querypb.StreamHealthResponse{
+ Serving: true,
+ Target: &querypb.Target{
+ Keyspace: tvt.tablet.Keyspace,
+ Shard: tvt.tablet.Shard,
+ TabletType: tvt.tablet.Type,
+ },
+ RealtimeStats: &querypb.RealtimeStats{},
+ })
+}
+
+//----------------------------------------------
+// testVTCtlTMClient
+
+type testVTCtlTMClient struct {
+ tmclient.TabletManagerClient
+ vrQueries map[int]map[string]*querypb.QueryResult
+ dbaQueries map[int]map[string]*querypb.QueryResult
+}
+
+func newTestVTCtlTMClient() *testVTCtlTMClient {
+ return &testVTCtlTMClient{
+ vrQueries: make(map[int]map[string]*querypb.QueryResult),
+ dbaQueries: make(map[int]map[string]*querypb.QueryResult),
+ }
+}
+
+func (tmc *testVTCtlTMClient) setVRResults(tablet *topodatapb.Tablet, query string, result *sqltypes.Result) {
+ queries, ok := tmc.vrQueries[int(tablet.Alias.Uid)]
+ if !ok {
+ queries = make(map[string]*querypb.QueryResult)
+ tmc.vrQueries[int(tablet.Alias.Uid)] = queries
+ }
+ queries[query] = sqltypes.ResultToProto3(result)
+}
+
+func (tmc *testVTCtlTMClient) VReplicationExec(ctx context.Context, tablet *topodatapb.Tablet, query string) (*querypb.QueryResult, error) {
+ result, ok := tmc.vrQueries[int(tablet.Alias.Uid)][query]
+ if !ok {
+ return nil, fmt.Errorf("query %q not found for VReplicationExec() on tablet %d", query, tablet.Alias.Uid)
+ }
+ return result, nil
+}
+
+func (tmc *testVTCtlTMClient) setDBAResults(tablet *topodatapb.Tablet, query string, result *sqltypes.Result) {
+ queries, ok := tmc.dbaQueries[int(tablet.Alias.Uid)]
+ if !ok {
+ queries = make(map[string]*querypb.QueryResult)
+ tmc.dbaQueries[int(tablet.Alias.Uid)] = queries
+ }
+ queries[query] = sqltypes.ResultToProto3(result)
+}
+
+func (tmc *testVTCtlTMClient) ExecuteFetchAsDba(ctx context.Context, tablet *topodatapb.Tablet, usePool bool, req *tabletmanagerdatapb.ExecuteFetchAsDbaRequest) (*querypb.QueryResult, error) {
+ result, ok := tmc.dbaQueries[int(tablet.Alias.Uid)][string(req.Query)]
+ if !ok {
+ return nil, fmt.Errorf("query %q not found for ExecuteFetchAsDba() on tablet %d", req.Query, tablet.Alias.Uid)
+ }
+ return result, nil
+}
+
+func (tmc *testVTCtlTMClient) clearResults() {
+ tmc.vrQueries = make(map[int]map[string]*querypb.QueryResult)
+ tmc.dbaQueries = make(map[int]map[string]*querypb.QueryResult)
+}
diff --git a/go/vt/vtctl/vtctl_test.go b/go/vt/vtctl/vtctl_test.go
new file mode 100644
index 00000000000..900ae441e25
--- /dev/null
+++ b/go/vt/vtctl/vtctl_test.go
@@ -0,0 +1,257 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package vtctl
+
+import (
+ "context"
+ "fmt"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/spf13/pflag"
+ "github.com/stretchr/testify/require"
+
+ "vitess.io/vitess/go/sqltypes"
+ binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata"
+ topodatapb "vitess.io/vitess/go/vt/proto/topodata"
+ "vitess.io/vitess/go/vt/wrangler"
+)
+
+// TestMoveTables tests the the MoveTables client command
+// via the commandVRWorkflow() cmd handler.
+// This currently only tests the Progress action (which is
+// a parent of the Show action) but it can be used to test
+// other actions as well.
+func TestMoveTables(t *testing.T) {
+ vrID := 1
+ shard := "0"
+ sourceKs := "sourceks"
+ targetKs := "targetks"
+ table := "customer"
+ wf := "testwf"
+ ksWf := fmt.Sprintf("%s.%s", targetKs, wf)
+ minTableSize := 16384 // a single 16KiB InnoDB page
+ ctx := context.Background()
+ env := newTestVTCtlEnv()
+ defer env.close()
+ source := env.addTablet(100, sourceKs, shard, &topodatapb.KeyRange{}, topodatapb.TabletType_PRIMARY)
+ target := env.addTablet(200, targetKs, shard, &topodatapb.KeyRange{}, topodatapb.TabletType_PRIMARY)
+ sourceCol := fmt.Sprintf(`keyspace:"%s" shard:"%s" filter:{rules:{match:"%s" filter:"select * from %s"}}`,
+ sourceKs, shard, table, table)
+ bls := &binlogdatapb.BinlogSource{
+ Keyspace: sourceKs,
+ Shard: shard,
+ Filter: &binlogdatapb.Filter{
+ Rules: []*binlogdatapb.Rule{{
+ Match: table,
+ Filter: fmt.Sprintf("select * from %s", table),
+ }},
+ },
+ }
+ now := time.Now().UTC().Unix()
+ expectGlobalResults := func() {
+ env.tmc.setVRResults(
+ target.tablet,
+ fmt.Sprintf("select id, source, message, cell, tablet_types, workflow_type, workflow_sub_type, defer_secondary_keys from _vt.vreplication where workflow='%s' and db_name='vt_%s'",
+ wf, targetKs),
+ sqltypes.MakeTestResult(sqltypes.MakeTestFields(
+ "id|source|message|cell|tablet_types|workflow_type|workflow_sub_type|defer_secondary_keys",
+ "int64|varchar|varchar|varchar|varchar|int64|int64|int64"),
+ fmt.Sprintf("%d|%s||%s|primary|%d|%d",
+ vrID, sourceCol, env.cell, binlogdatapb.VReplicationWorkflowType_MoveTables, binlogdatapb.VReplicationWorkflowSubType_None),
+ ),
+ )
+ }
+
+ tests := []struct {
+ name string
+ workflowType wrangler.VReplicationWorkflowType
+ args []string
+ expectResults func()
+ want string
+ }{
+ {
+ name: "NotStarted",
+ workflowType: wrangler.MoveTablesWorkflow,
+ args: []string{"Progress", ksWf},
+ expectResults: func() {
+ env.tmc.setVRResults(
+ target.tablet,
+ fmt.Sprintf("select table_name, lastpk from _vt.copy_state where vrepl_id = %d and id in (select max(id) from _vt.copy_state where vrepl_id = %d group by vrepl_id, table_name)",
+ vrID, vrID),
+ sqltypes.MakeTestResult(sqltypes.MakeTestFields(
+ "table_name|lastpk",
+ "varchar|varbinary"),
+ fmt.Sprintf("%s|", table),
+ ),
+ )
+ env.tmc.setDBAResults(
+ target.tablet,
+ fmt.Sprintf("select distinct table_name from _vt.copy_state cs, _vt.vreplication vr where vr.id = cs.vrepl_id and vr.id = %d",
+ vrID),
+ sqltypes.MakeTestResult(sqltypes.MakeTestFields(
+ "table_name",
+ "varchar"),
+ table,
+ ),
+ )
+ env.tmc.setVRResults(
+ target.tablet,
+ fmt.Sprintf("select id, source, pos, stop_pos, max_replication_lag, state, db_name, time_updated, transaction_timestamp, time_heartbeat, time_throttled, component_throttled, message, tags, workflow_type, workflow_sub_type, defer_secondary_keys from _vt.vreplication where db_name = 'vt_%s' and workflow = '%s'",
+ targetKs, wf),
+ sqltypes.MakeTestResult(sqltypes.MakeTestFields(
+ "id|source|pos|stop_pos|max_replication_lag|state|db_name|time_updated|transaction_timestamp|time_heartbeat|time_throttled|component_throttled|message|tags|workflow_type|workflow_sub_type|defer_secondary_keys",
+ "int64|varchar|varchar|varchar|int64|varchar|varchar|int64|int64|int64|int64|int64|varchar|varchar|varchar|int64|int64|int64"),
+ fmt.Sprintf("%d|%s|||0|Running|vt_%s|0|0|0|0||||%d|%d",
+ vrID, bls, sourceKs, binlogdatapb.VReplicationWorkflowType_MoveTables, binlogdatapb.VReplicationWorkflowSubType_None),
+ ),
+ )
+ env.tmc.setDBAResults(
+ target.tablet,
+ fmt.Sprintf("select table_name, table_rows, data_length from information_schema.tables where table_schema = 'vt_%s' and table_name in ('%s')",
+ targetKs, table),
+ sqltypes.MakeTestResult(sqltypes.MakeTestFields(
+ "table_name|table_rows|data_length",
+ "varchar|int64|int64"),
+ fmt.Sprintf("%s|0|%d", table, minTableSize),
+ ),
+ )
+ env.tmc.setDBAResults(
+ source.tablet,
+ fmt.Sprintf("select table_name, table_rows, data_length from information_schema.tables where table_schema = 'vt_%s' and table_name in ('%s')",
+ sourceKs, table),
+ sqltypes.MakeTestResult(sqltypes.MakeTestFields(
+ "table_name|table_rows|data_length",
+ "varchar|int64|int64"),
+ fmt.Sprintf("%s|10|%d", table, minTableSize),
+ ),
+ )
+ },
+ want: fmt.Sprintf("\nCopy Progress (approx):\n\n\ncustomer: rows copied 0/10 (0%%), size copied 16384/16384 (100%%)\n\n\n\nThe following vreplication streams exist for workflow %s:\n\nid=%d on %s/%s-0000000%d: Status: Copying. VStream has not started.\n\n\n",
+ ksWf, vrID, shard, env.cell, target.tablet.Alias.Uid),
+ },
+ {
+ name: "Error",
+ workflowType: wrangler.MoveTablesWorkflow,
+ args: []string{"Progress", ksWf},
+ expectResults: func() {
+ env.tmc.setVRResults(
+ target.tablet,
+ fmt.Sprintf("select table_name, lastpk from _vt.copy_state where vrepl_id = %d and id in (select max(id) from _vt.copy_state where vrepl_id = %d group by vrepl_id, table_name)",
+ vrID, vrID),
+ sqltypes.MakeTestResult(sqltypes.MakeTestFields(
+ "table_name|lastpk",
+ "varchar|varbinary"),
+ fmt.Sprintf("%s|", table),
+ ),
+ )
+ env.tmc.setDBAResults(
+ target.tablet,
+ fmt.Sprintf("select distinct table_name from _vt.copy_state cs, _vt.vreplication vr where vr.id = cs.vrepl_id and vr.id = %d",
+ vrID),
+ sqltypes.MakeTestResult(sqltypes.MakeTestFields(
+ "table_name",
+ "varchar"),
+ table,
+ ),
+ )
+ env.tmc.setVRResults(
+ target.tablet,
+ fmt.Sprintf("select id, source, pos, stop_pos, max_replication_lag, state, db_name, time_updated, transaction_timestamp, time_heartbeat, time_throttled, component_throttled, message, tags, workflow_type, workflow_sub_type, defer_secondary_keys from _vt.vreplication where db_name = 'vt_%s' and workflow = '%s'",
+ targetKs, wf),
+ sqltypes.MakeTestResult(sqltypes.MakeTestFields(
+ "id|source|pos|stop_pos|max_replication_lag|state|db_name|time_updated|transaction_timestamp|time_heartbeat|time_throttled|component_throttled|message|tags|workflow_type|workflow_sub_type|defer_secondary_keys",
+ "int64|varchar|varchar|varchar|int64|varchar|varchar|int64|int64|int64|int64|int64|varchar|varchar|varchar|int64|int64|int64"),
+ fmt.Sprintf("%d|%s|||0|Error|vt_%s|0|0|0|0||Duplicate entry '6' for key 'customer.PRIMARY' (errno 1062) (sqlstate 23000) during query: insert into customer(customer_id,email) values (6,'mlord@planetscale.com')||%d|%d",
+ vrID, bls, sourceKs, binlogdatapb.VReplicationWorkflowType_MoveTables, binlogdatapb.VReplicationWorkflowSubType_None),
+ ),
+ )
+ env.tmc.setDBAResults(
+ target.tablet,
+ fmt.Sprintf("select table_name, table_rows, data_length from information_schema.tables where table_schema = 'vt_%s' and table_name in ('%s')",
+ targetKs, table),
+ sqltypes.MakeTestResult(sqltypes.MakeTestFields(
+ "table_name|table_rows|data_length",
+ "varchar|int64|int64"),
+ fmt.Sprintf("%s|5|%d", table, minTableSize),
+ ),
+ )
+ env.tmc.setDBAResults(
+ source.tablet,
+ fmt.Sprintf("select table_name, table_rows, data_length from information_schema.tables where table_schema = 'vt_%s' and table_name in ('%s')",
+ sourceKs, table),
+ sqltypes.MakeTestResult(sqltypes.MakeTestFields(
+ "table_name|table_rows|data_length",
+ "varchar|int64|int64"),
+ fmt.Sprintf("%s|10|%d", table, minTableSize),
+ ),
+ )
+ },
+ want: fmt.Sprintf("\nCopy Progress (approx):\n\n\ncustomer: rows copied 5/10 (50%%), size copied 16384/16384 (100%%)\n\n\n\nThe following vreplication streams exist for workflow %s:\n\nid=%d on %s/%s-0000000%d: Status: Error: Duplicate entry '6' for key 'customer.PRIMARY' (errno 1062) (sqlstate 23000) during query: insert into customer(customer_id,email) values (6,'mlord@planetscale.com').\n\n\n",
+ ksWf, vrID, shard, env.cell, target.tablet.Alias.Uid),
+ },
+ {
+ name: "Running",
+ workflowType: wrangler.MoveTablesWorkflow,
+ args: []string{"Progress", ksWf},
+ expectResults: func() {
+ env.tmc.setVRResults(
+ target.tablet,
+ fmt.Sprintf("select table_name, lastpk from _vt.copy_state where vrepl_id = %d and id in (select max(id) from _vt.copy_state where vrepl_id = %d group by vrepl_id, table_name)",
+ vrID, vrID),
+ &sqltypes.Result{},
+ )
+ env.tmc.setDBAResults(
+ target.tablet,
+ fmt.Sprintf("select distinct table_name from _vt.copy_state cs, _vt.vreplication vr where vr.id = cs.vrepl_id and vr.id = %d",
+ vrID),
+ &sqltypes.Result{},
+ )
+ env.tmc.setVRResults(
+ target.tablet,
+ fmt.Sprintf("select id, source, pos, stop_pos, max_replication_lag, state, db_name, time_updated, transaction_timestamp, time_heartbeat, time_throttled, component_throttled, message, tags, workflow_type, workflow_sub_type, defer_secondary_keys from _vt.vreplication where db_name = 'vt_%s' and workflow = '%s'",
+ targetKs, wf),
+ sqltypes.MakeTestResult(sqltypes.MakeTestFields(
+ "id|source|pos|stop_pos|max_replication_lag|state|db_name|time_updated|transaction_timestamp|time_heartbeat|time_throttled|component_throttled|message|tags|workflow_type|workflow_sub_type|defer_secondary_keys",
+ "int64|varchar|varchar|varchar|int64|varchar|varchar|int64|int64|int64|int64|int64|varchar|varchar|varchar|int64|int64|int64"),
+ fmt.Sprintf("%d|%s|MySQL56/4ec30b1e-8ee2-11ed-a1eb-0242ac120002:1-15||0|Running|vt_%s|%d|%d|%d|0||||%d|%d",
+ vrID, bls, sourceKs, now, now, now, binlogdatapb.VReplicationWorkflowType_MoveTables, binlogdatapb.VReplicationWorkflowSubType_None),
+ ),
+ )
+ },
+ want: fmt.Sprintf("/\nThe following vreplication streams exist for workflow %s:\n\nid=%d on %s/%s-0000000%d: Status: Running. VStream Lag: .* Tx time: .*",
+ ksWf, vrID, shard, env.cell, target.tablet.Alias.Uid),
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ subFlags := pflag.NewFlagSet("test", pflag.ContinueOnError)
+ expectGlobalResults()
+ tt.expectResults()
+ err := commandVRWorkflow(ctx, env.wr, subFlags, tt.args, tt.workflowType)
+ require.NoError(t, err)
+ if strings.HasPrefix(tt.want, "/") {
+ require.Regexp(t, tt.want[1:], env.cmdlog.String())
+ } else {
+ require.Equal(t, tt.want, env.cmdlog.String())
+ }
+ env.cmdlog.Clear()
+ env.tmc.clearResults()
+ })
+ }
+}
diff --git a/go/vt/vtctl/workflow.go b/go/vt/vtctl/workflow.go
deleted file mode 100644
index 88a4053f1c1..00000000000
--- a/go/vt/vtctl/workflow.go
+++ /dev/null
@@ -1,210 +0,0 @@
-/*
-Copyright 2019 The Vitess Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package vtctl
-
-import (
- "context"
- "fmt"
-
- "github.com/spf13/pflag"
-
- "vitess.io/vitess/go/vt/workflow"
- "vitess.io/vitess/go/vt/wrangler"
-)
-
-// This file contains the workflows command group for vtctl.
-
-const workflowsGroupName = "Workflows"
-
-var (
- // WorkflowManager contains our manager. It needs to be set or else all
- // commands will be disabled.
- WorkflowManager *workflow.Manager
-)
-
-func init() {
- addCommandGroup(workflowsGroupName)
-
- addCommand(workflowsGroupName, command{
- name: "WorkflowCreate",
- method: commandWorkflowCreate,
- params: "[--skip_start] [parameters...]",
- help: "Creates the workflow with the provided parameters. The workflow is also started, unless -skip_start is specified.",
- })
- addCommand(workflowsGroupName, command{
- name: "WorkflowStart",
- method: commandWorkflowStart,
- params: "",
- help: "Starts the workflow.",
- })
- addCommand(workflowsGroupName, command{
- name: "WorkflowStop",
- method: commandWorkflowStop,
- params: "",
- help: "Stops the workflow.",
- })
- addCommand(workflowsGroupName, command{
- name: "WorkflowDelete",
- method: commandWorkflowDelete,
- params: "",
- help: "Deletes the finished or not started workflow.",
- })
- addCommand(workflowsGroupName, command{
- name: "WorkflowWait",
- method: commandWorkflowWait,
- params: "",
- help: "Waits for the workflow to finish.",
- })
-
- addCommand(workflowsGroupName, command{
- name: "WorkflowTree",
- method: commandWorkflowTree,
- params: "",
- help: "Displays a JSON representation of the workflow tree.",
- })
- addCommand(workflowsGroupName, command{
- name: "WorkflowAction",
- method: commandWorkflowAction,
- params: " ",
- help: "Sends the provided action name on the specified path.",
- })
-}
-
-func commandWorkflowCreate(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag.FlagSet, args []string) error {
- if WorkflowManager == nil {
- return fmt.Errorf("no workflow.Manager registered")
- }
-
- skipStart := subFlags.Bool("skip_start", false, "If set, the workflow will not be started.")
- if err := subFlags.Parse(args); err != nil {
- return err
- }
- if subFlags.NArg() < 1 {
- return fmt.Errorf("the argument is required for the WorkflowCreate command")
- }
- factoryName := subFlags.Arg(0)
-
- uuid, err := WorkflowManager.Create(ctx, factoryName, subFlags.Args()[1:])
- if err != nil {
- return err
- }
- wr.Logger().Printf("uuid: %v\n", uuid)
-
- if !*skipStart {
- return WorkflowManager.Start(ctx, uuid)
- }
- return nil
-}
-
-func commandWorkflowStart(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag.FlagSet, args []string) error {
- if WorkflowManager == nil {
- return fmt.Errorf("no workflow.Manager registered")
- }
-
- if err := subFlags.Parse(args); err != nil {
- return err
- }
- if subFlags.NArg() != 1 {
- return fmt.Errorf("the argument is required for the WorkflowStart command")
- }
- uuid := subFlags.Arg(0)
- return WorkflowManager.Start(ctx, uuid)
-}
-
-func commandWorkflowStop(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag.FlagSet, args []string) error {
- if WorkflowManager == nil {
- return fmt.Errorf("no workflow.Manager registered")
- }
-
- if err := subFlags.Parse(args); err != nil {
- return err
- }
- if subFlags.NArg() != 1 {
- return fmt.Errorf("the argument is required for the WorkflowStop command")
- }
- uuid := subFlags.Arg(0)
- return WorkflowManager.Stop(ctx, uuid)
-}
-
-func commandWorkflowDelete(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag.FlagSet, args []string) error {
- if WorkflowManager == nil {
- return fmt.Errorf("no workflow.Manager registered")
- }
-
- if err := subFlags.Parse(args); err != nil {
- return err
- }
- if subFlags.NArg() != 1 {
- return fmt.Errorf("the argument is required for the WorkflowDelete command")
- }
- uuid := subFlags.Arg(0)
- return WorkflowManager.Delete(ctx, uuid)
-}
-
-func commandWorkflowWait(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag.FlagSet, args []string) error {
- if WorkflowManager == nil {
- return fmt.Errorf("no workflow.Manager registered")
- }
-
- if err := subFlags.Parse(args); err != nil {
- return err
- }
- if subFlags.NArg() != 1 {
- return fmt.Errorf("the argument is required for the WorkflowWait command")
- }
- uuid := subFlags.Arg(0)
- return WorkflowManager.Wait(ctx, uuid)
-}
-
-func commandWorkflowTree(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag.FlagSet, args []string) error {
- if WorkflowManager == nil {
- return fmt.Errorf("no workflow.Manager registered")
- }
-
- if err := subFlags.Parse(args); err != nil {
- return err
- }
- if subFlags.NArg() != 0 {
- return fmt.Errorf("the WorkflowTree command takes no parameter")
- }
-
- tree, err := WorkflowManager.NodeManager().GetFullTree()
- if err != nil {
- return err
- }
- wr.Logger().Printf("%v\n", string(tree))
- return nil
-}
-
-func commandWorkflowAction(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag.FlagSet, args []string) error {
- if WorkflowManager == nil {
- return fmt.Errorf("no workflow.Manager registered")
- }
-
- if err := subFlags.Parse(args); err != nil {
- return err
- }
- if subFlags.NArg() != 2 {
- return fmt.Errorf("the and arguments are required for the WorkflowAction command")
- }
- ap := &workflow.ActionParameters{
- Path: subFlags.Arg(0),
- Name: subFlags.Arg(1),
- }
-
- return WorkflowManager.NodeManager().Action(ctx, ap)
-}
diff --git a/go/vt/vtctl/workflow/server.go b/go/vt/vtctl/workflow/server.go
index 005c923ffe5..b26e198c2cd 100644
--- a/go/vt/vtctl/workflow/server.go
+++ b/go/vt/vtctl/workflow/server.go
@@ -294,7 +294,9 @@ func (s *Server) GetWorkflows(ctx context.Context, req *vtctldatapb.GetWorkflows
time_updated,
transaction_timestamp,
message,
- tags
+ tags,
+ workflow_type,
+ workflow_sub_type
FROM
_vt.vreplication
%s`,
@@ -310,9 +312,9 @@ func (s *Server) GetWorkflows(ctx context.Context, req *vtctldatapb.GetWorkflows
m := sync.Mutex{} // guards access to the following maps during concurrent calls to scanWorkflow
workflowsMap := make(map[string]*vtctldatapb.Workflow, len(results))
sourceKeyspaceByWorkflow := make(map[string]string, len(results))
- sourceShardsByWorkflow := make(map[string]sets.String, len(results))
+ sourceShardsByWorkflow := make(map[string]sets.Set[string], len(results))
targetKeyspaceByWorkflow := make(map[string]string, len(results))
- targetShardsByWorkflow := make(map[string]sets.String, len(results))
+ targetShardsByWorkflow := make(map[string]sets.Set[string], len(results))
maxVReplicationLagByWorkflow := make(map[string]float64, len(results))
// We guarantee the following invariants when this function is called for a
@@ -322,7 +324,7 @@ func (s *Server) GetWorkflows(ctx context.Context, req *vtctldatapb.GetWorkflows
// - sourceShardsByWorkflow[workflow.Name] != nil
// - targetShardsByWorkflow[workflow.Name] != nil
// - workflow.ShardStatuses != nil
- scanWorkflow := func(ctx context.Context, workflow *vtctldatapb.Workflow, row []sqltypes.Value, tablet *topo.TabletInfo) error {
+ scanWorkflow := func(ctx context.Context, workflow *vtctldatapb.Workflow, row sqltypes.RowNamedValues, tablet *topo.TabletInfo) error {
span, ctx := trace.NewSpan(ctx, "workflow.Server.scanWorkflow")
defer span.Finish()
@@ -332,13 +334,13 @@ func (s *Server) GetWorkflows(ctx context.Context, req *vtctldatapb.GetWorkflows
span.Annotate("workflow", workflow.Name)
span.Annotate("tablet_alias", tablet.AliasString())
- id, err := evalengine.ToInt64(row[0])
+ id, err := evalengine.ToInt64(row["id"])
if err != nil {
return err
}
var bls binlogdatapb.BinlogSource
- rowBytes, err := row[2].ToBytes()
+ rowBytes, err := row["source"].ToBytes()
if err != nil {
return err
}
@@ -346,28 +348,30 @@ func (s *Server) GetWorkflows(ctx context.Context, req *vtctldatapb.GetWorkflows
return err
}
- pos := row[3].ToString()
- stopPos := row[4].ToString()
- state := row[6].ToString()
- dbName := row[7].ToString()
+ pos := row["pos"].ToString()
+ stopPos := row["stop_pos"].ToString()
+ state := row["state"].ToString()
+ dbName := row["db_name"].ToString()
- timeUpdatedSeconds, err := evalengine.ToInt64(row[8])
+ timeUpdatedSeconds, err := evalengine.ToInt64(row["time_updated"])
if err != nil {
return err
}
- transactionTimeSeconds, err := evalengine.ToInt64(row[9])
+ transactionTimeSeconds, err := evalengine.ToInt64(row["transaction_timestamp"])
if err != nil {
return err
}
- message := row[10].ToString()
+ message := row["message"].ToString()
- tags := row[11].ToString()
+ tags := row["tags"].ToString()
var tagArray []string
if tags != "" {
tagArray = strings.Split(tags, ",")
}
+ workflowType, _ := row["workflow_type"].ToInt64()
+ workflowSubType, _ := row["workflow_sub_type"].ToInt64()
stream := &vtctldatapb.Workflow_Stream{
Id: id,
Shard: tablet.Shard,
@@ -386,7 +390,8 @@ func (s *Server) GetWorkflows(ctx context.Context, req *vtctldatapb.GetWorkflows
Message: message,
Tags: tagArray,
}
-
+ workflow.WorkflowType = binlogdatapb.VReplicationWorkflowType_name[int32(workflowType)]
+ workflow.WorkflowSubType = binlogdatapb.VReplicationWorkflowSubType_name[int32(workflowSubType)]
stream.CopyStates, err = s.getWorkflowCopyStates(ctx, tablet, id)
if err != nil {
return err
@@ -481,8 +486,8 @@ func (s *Server) GetWorkflows(ctx context.Context, req *vtctldatapb.GetWorkflows
// to a workflow we're already aggregating, or if it's a workflow we
// haven't seen yet for that shard primary. We use the workflow name to
// dedupe for this.
- for _, row := range qr.Rows {
- workflowName := row[1].ToString()
+ for _, row := range qr.Named().Rows {
+ workflowName := row["workflow"].ToString()
workflow, ok := workflowsMap[workflowName]
if !ok {
workflow = &vtctldatapb.Workflow{
@@ -491,12 +496,12 @@ func (s *Server) GetWorkflows(ctx context.Context, req *vtctldatapb.GetWorkflows
}
workflowsMap[workflowName] = workflow
- sourceShardsByWorkflow[workflowName] = sets.NewString()
- targetShardsByWorkflow[workflowName] = sets.NewString()
+ sourceShardsByWorkflow[workflowName] = sets.New[string]()
+ targetShardsByWorkflow[workflowName] = sets.New[string]()
}
scanWorkflowWg.Add(1)
- go func(ctx context.Context, workflow *vtctldatapb.Workflow, row []sqltypes.Value, tablet *topo.TabletInfo) {
+ go func(ctx context.Context, workflow *vtctldatapb.Workflow, row sqltypes.RowNamedValues, tablet *topo.TabletInfo) {
defer scanWorkflowWg.Done()
if err := scanWorkflow(ctx, workflow, row, tablet); err != nil {
scanWorkflowErrors.RecordError(err)
@@ -677,12 +682,12 @@ ORDER BY
workflow.Source = &vtctldatapb.Workflow_ReplicationLocation{
Keyspace: sourceKeyspace,
- Shards: sourceShards.List(),
+ Shards: sets.List(sourceShards),
}
workflow.Target = &vtctldatapb.Workflow_ReplicationLocation{
Keyspace: targetKeyspace,
- Shards: targetShards.List(),
+ Shards: sets.List(targetShards),
}
workflow.MaxVReplicationLag = int64(maxVReplicationLag)
@@ -722,7 +727,7 @@ func (s *Server) getWorkflowCopyStates(ctx context.Context, tablet *topo.TabletI
span.Annotate("tablet_alias", tablet.AliasString())
span.Annotate("vrepl_id", id)
- query := fmt.Sprintf("select table_name, lastpk from _vt.copy_state where vrepl_id = %d", id)
+ query := fmt.Sprintf("select table_name, lastpk from _vt.copy_state where vrepl_id = %d and id in (select max(id) from _vt.copy_state where vrepl_id = %d group by vrepl_id, table_name)", id, id)
qr, err := s.tmc.VReplicationExec(ctx, tablet.Tablet, query)
if err != nil {
return nil, err
diff --git a/go/vt/vtctl/workflow/state.go b/go/vt/vtctl/workflow/state.go
index 2841cd98a1a..613f82d0b43 100644
--- a/go/vt/vtctl/workflow/state.go
+++ b/go/vt/vtctl/workflow/state.go
@@ -41,5 +41,7 @@ type State struct {
WritesSwitched bool
// Partial MoveTables info
- WritesPartiallySwitched bool
+ IsPartialMigration bool
+ ShardsAlreadySwitched []string
+ ShardsNotYetSwitched []string
}
diff --git a/go/vt/vtctl/workflow/stream_migrator.go b/go/vt/vtctl/workflow/stream_migrator.go
index c5e45a06bb2..8266b59aea9 100644
--- a/go/vt/vtctl/workflow/stream_migrator.go
+++ b/go/vt/vtctl/workflow/stream_migrator.go
@@ -199,7 +199,7 @@ func (sm *StreamMigrator) StopStreams(ctx context.Context) ([]string, error) {
/* tablet streams */
func (sm *StreamMigrator) readTabletStreams(ctx context.Context, ti *topo.TabletInfo, constraint string) ([]*VReplicationStream, error) {
- query := fmt.Sprintf("select id, workflow, source, pos, workflow_type, workflow_sub_type from _vt.vreplication where db_name=%s and workflow != %s",
+ query := fmt.Sprintf("select id, workflow, source, pos, workflow_type, workflow_sub_type, defer_secondary_keys from _vt.vreplication where db_name=%s and workflow != %s",
encodeString(ti.DbName()), encodeString(sm.ts.ReverseWorkflowName()))
if constraint != "" {
query += fmt.Sprintf(" and %s", constraint)
@@ -238,6 +238,11 @@ func (sm *StreamMigrator) readTabletStreams(ctx context.Context, ti *topo.Tablet
return nil, err
}
+ deferSecondaryKeys, err := row["defer_secondary_keys"].ToBool()
+ if err != nil {
+ return nil, err
+ }
+
var bls binlogdatapb.BinlogSource
rowBytes, err := row["source"].ToBytes()
if err != nil {
@@ -263,12 +268,13 @@ func (sm *StreamMigrator) readTabletStreams(ctx context.Context, ti *topo.Tablet
}
tabletStreams = append(tabletStreams, &VReplicationStream{
- ID: uint32(id),
- Workflow: workflowName,
- BinlogSource: &bls,
- Position: pos,
- WorkflowType: binlogdatapb.VReplicationWorkflowType(workflowType),
- WorkflowSubType: binlogdatapb.VReplicationWorkflowSubType(workflowSubType),
+ ID: uint32(id),
+ Workflow: workflowName,
+ BinlogSource: &bls,
+ Position: pos,
+ WorkflowType: binlogdatapb.VReplicationWorkflowType(workflowType),
+ WorkflowSubType: binlogdatapb.VReplicationWorkflowSubType(workflowSubType),
+ DeferSecondaryKeys: deferSecondaryKeys,
})
}
return tabletStreams, nil
@@ -315,7 +321,7 @@ func (sm *StreamMigrator) readSourceStreams(ctx context.Context, cancelMigrate b
return nil
}
- query := fmt.Sprintf("select vrepl_id from _vt.copy_state where vrepl_id in %s", VReplicationStreams(tabletStreams).Values())
+ query := fmt.Sprintf("select distinct vrepl_id from _vt.copy_state where vrepl_id in %s", VReplicationStreams(tabletStreams).Values())
p3qr, err := sm.ts.TabletManagerClient().VReplicationExec(ctx, source.GetPrimary().Tablet, query)
switch {
case err != nil:
@@ -574,7 +580,7 @@ func (sm *StreamMigrator) createTargetStreams(ctx context.Context, tmpl []*VRepl
}
ig.AddRow(vrs.Workflow, vrs.BinlogSource, mysql.EncodePosition(vrs.Position), "", "",
- int64(vrs.WorkflowType), int64(vrs.WorkflowSubType))
+ int64(vrs.WorkflowType), int64(vrs.WorkflowSubType), vrs.DeferSecondaryKeys)
}
_, err := sm.ts.VReplicationExec(ctx, target.GetPrimary().GetAlias(), ig.String())
diff --git a/go/vt/vtctl/workflow/traffic_switcher.go b/go/vt/vtctl/workflow/traffic_switcher.go
index 6464e796a00..7289ea155dd 100644
--- a/go/vt/vtctl/workflow/traffic_switcher.go
+++ b/go/vt/vtctl/workflow/traffic_switcher.go
@@ -226,7 +226,7 @@ func BuildTargets(ctx context.Context, ts *topo.Server, tmc tmclient.TabletManag
// NB: changing the whitespace of this query breaks tests for now.
// (TODO:@ajm188) extend FakeDBClient to be less whitespace-sensitive on
// expected queries.
- query := fmt.Sprintf("select id, source, message, cell, tablet_types, workflow_type, workflow_sub_type from _vt.vreplication where workflow=%s and db_name=%s", encodeString(workflow), encodeString(primary.DbName()))
+ query := fmt.Sprintf("select id, source, message, cell, tablet_types, workflow_type, workflow_sub_type, defer_secondary_keys from _vt.vreplication where workflow=%s and db_name=%s", encodeString(workflow), encodeString(primary.DbName()))
p3qr, err := tmc.VReplicationExec(ctx, primary.Tablet, query)
if err != nil {
return nil, err
@@ -308,7 +308,7 @@ func getVReplicationWorkflowSubType(row sqltypes.RowNamedValues) binlogdatapb.VR
// this function should be unexported. Consequently, YOU SHOULD NOT DEPEND ON
// THIS FUNCTION EXTERNALLY.
func CompareShards(ctx context.Context, keyspace string, shards []*topo.ShardInfo, ts *topo.Server) error {
- shardSet := sets.NewString()
+ shardSet := sets.New[string]()
for _, si := range shards {
shardSet.Insert(si.ShardName())
}
@@ -318,19 +318,19 @@ func CompareShards(ctx context.Context, keyspace string, shards []*topo.ShardInf
return err
}
- topoShardSet := sets.NewString(topoShards...)
+ topoShardSet := sets.New[string](topoShards...)
if !shardSet.Equal(topoShardSet) {
wfExtra := shardSet.Difference(topoShardSet)
topoExtra := topoShardSet.Difference(shardSet)
var rec concurrency.AllErrorRecorder
if wfExtra.Len() > 0 {
- wfExtraSorted := wfExtra.List()
+ wfExtraSorted := sets.List(wfExtra)
rec.RecordError(fmt.Errorf("switch command shards not in topo: %v", wfExtraSorted))
}
if topoExtra.Len() > 0 {
- topoExtraSorted := topoExtra.List()
+ topoExtraSorted := sets.List(topoExtra)
rec.RecordError(fmt.Errorf("topo shards not in switch command: %v", topoExtraSorted))
}
diff --git a/go/vt/vtctl/workflow/vreplication_stream.go b/go/vt/vtctl/workflow/vreplication_stream.go
index 7e63d3e62f8..55b668e30f1 100644
--- a/go/vt/vtctl/workflow/vreplication_stream.go
+++ b/go/vt/vtctl/workflow/vreplication_stream.go
@@ -30,12 +30,13 @@ import (
// VReplicationStream represents a single stream of a vreplication workflow.
type VReplicationStream struct {
- ID uint32
- Workflow string
- BinlogSource *binlogdatapb.BinlogSource
- Position mysql.Position
- WorkflowType binlogdatapb.VReplicationWorkflowType
- WorkflowSubType binlogdatapb.VReplicationWorkflowSubType
+ ID uint32
+ Workflow string
+ BinlogSource *binlogdatapb.BinlogSource
+ Position mysql.Position
+ WorkflowType binlogdatapb.VReplicationWorkflowType
+ WorkflowSubType binlogdatapb.VReplicationWorkflowSubType
+ DeferSecondaryKeys bool
}
// VReplicationStreams wraps a slice of VReplicationStream objects to provide
diff --git a/go/vt/vtctld/api.go b/go/vt/vtctld/api.go
index 7944e76d58d..faebd04bfdc 100644
--- a/go/vt/vtctld/api.go
+++ b/go/vt/vtctld/api.go
@@ -42,7 +42,6 @@ import (
"vitess.io/vitess/go/vt/topo/topoproto"
"vitess.io/vitess/go/vt/vtctl"
"vitess.io/vitess/go/vt/vttablet/tmclient"
- "vitess.io/vitess/go/vt/workflow"
"vitess.io/vitess/go/vt/wrangler"
logutilpb "vitess.io/vitess/go/vt/proto/logutil"
@@ -52,8 +51,8 @@ import (
var (
localCell string
- showTopologyCRUD = true
proxyTablets bool
+ showTopologyCRUD = true
)
// This file implements a REST-style API for the vtctld web interface.
@@ -97,8 +96,9 @@ func init() {
func registerVtctldAPIFlags(fs *pflag.FlagSet) {
fs.StringVar(&localCell, "cell", localCell, "cell to use")
- fs.BoolVar(&showTopologyCRUD, "vtctld_show_topology_crud", showTopologyCRUD, "Controls the display of the CRUD topology actions in the vtctld UI.")
fs.BoolVar(&proxyTablets, "proxy_tablets", proxyTablets, "Setting this true will make vtctld proxy the tablet status instead of redirecting to them")
+ fs.BoolVar(&showTopologyCRUD, "vtctld_show_topology_crud", showTopologyCRUD, "Controls the display of the CRUD topology actions in the vtctld UI.")
+ fs.MarkDeprecated("vtctld_show_topology_crud", "It is no longer applicable because vtctld no longer provides a UI.")
}
func newTabletWithStatsAndURL(t *topodatapb.Tablet, healthcheck discovery.HealthCheck) *TabletWithStatsAndURL {
@@ -668,9 +668,6 @@ func initAPI(ctx context.Context, ts *topo.Server, actions *ActionRepository, he
resp := make(map[string]any)
resp["activeReparents"] = !mysqlctl.DisableActiveReparents
resp["showStatus"] = enableRealtimeStats
- resp["showTopologyCRUD"] = showTopologyCRUD
- resp["showWorkflows"] = workflowManagerInit
- resp["workflows"] = workflow.AvailableFactories()
data, err := json.MarshalIndent(resp, "", " ")
if err != nil {
return fmt.Errorf("json error: %v", err)
diff --git a/go/vt/vtctld/api_test.go b/go/vt/vtctld/api_test.go
index 38eb2785d0d..1d8c1863666 100644
--- a/go/vt/vtctld/api_test.go
+++ b/go/vt/vtctld/api_test.go
@@ -26,6 +26,8 @@ import (
"strings"
"testing"
+ "github.com/stretchr/testify/require"
+
"vitess.io/vitess/go/vt/discovery"
"vitess.io/vitess/go/vt/topo/memorytopo"
"vitess.io/vitess/go/vt/wrangler"
@@ -303,7 +305,8 @@ func TestAPI(t *testing.T) {
"keyspace_type":0,
"base_keyspace":"",
"snapshot_time":null,
- "durability_policy":"semi_sync"
+ "durability_policy":"semi_sync",
+ "throttler_config": null
}`, http.StatusOK},
{"GET", "keyspaces/nonexistent", "", "404 page not found", http.StatusNotFound},
{"POST", "keyspaces/ks1?action=TestKeyspaceAction", "", `{
@@ -438,11 +441,11 @@ func TestAPI(t *testing.T) {
// vtctl RunCommand
{"POST", "vtctl/", `["GetKeyspace","ks1"]`, `{
"Error": "",
- "Output": "{\n \"served_froms\": [],\n \"keyspace_type\": 0,\n \"base_keyspace\": \"\",\n \"snapshot_time\": null,\n \"durability_policy\": \"semi_sync\"\n}\n\n"
+ "Output": "{\n \"served_froms\": [],\n \"keyspace_type\": 0,\n \"base_keyspace\": \"\",\n \"snapshot_time\": null,\n \"durability_policy\": \"semi_sync\",\n \"throttler_config\": null\n}\n\n"
}`, http.StatusOK},
{"POST", "vtctl/", `["GetKeyspace","ks3"]`, `{
"Error": "",
- "Output": "{\n \"served_froms\": [],\n \"keyspace_type\": 1,\n \"base_keyspace\": \"ks1\",\n \"snapshot_time\": {\n \"seconds\": \"1136214245\",\n \"nanoseconds\": 0\n },\n \"durability_policy\": \"none\"\n}\n\n"
+ "Output": "{\n \"served_froms\": [],\n \"keyspace_type\": 1,\n \"base_keyspace\": \"ks1\",\n \"snapshot_time\": {\n \"seconds\": \"1136214245\",\n \"nanoseconds\": 0\n },\n \"durability_policy\": \"none\",\n \"throttler_config\": null\n}\n\n"
}`, http.StatusOK},
{"POST", "vtctl/", `["GetVSchema","ks3"]`, `{
"Error": "",
@@ -462,29 +465,19 @@ func TestAPI(t *testing.T) {
switch in.method {
case "GET":
resp, err = http.Get(server.URL + apiPrefix + in.path)
+ require.NoError(t, err)
+ defer resp.Body.Close()
case "POST":
resp, err = http.Post(server.URL+apiPrefix+in.path, "application/json", strings.NewReader(in.body))
+ require.NoError(t, err)
+ defer resp.Body.Close()
default:
t.Fatalf("[%v] unknown method: %v", in.path, in.method)
- return
- }
-
- if err != nil {
- t.Fatalf("[%v] http error: %v", in.path, err)
- return
}
body, err := io.ReadAll(resp.Body)
- resp.Body.Close()
-
- if err != nil {
- t.Fatalf("[%v] io.ReadAll(resp.Body) error: %v", in.path, err)
- return
- }
-
- if resp.StatusCode != in.statusCode {
- t.Fatalf("[%v] got unexpected status code %d, want %d", in.path, resp.StatusCode, in.statusCode)
- }
+ require.NoError(t, err)
+ require.Equal(t, in.statusCode, resp.StatusCode)
got := compactJSON(body)
want := compactJSON([]byte(in.want))
diff --git a/go/vt/vtctld/explorer.go b/go/vt/vtctld/explorer.go
index 7c05c4e2fc3..b6eaa1874a6 100644
--- a/go/vt/vtctld/explorer.go
+++ b/go/vt/vtctld/explorer.go
@@ -17,7 +17,6 @@ limitations under the License.
package vtctld
import (
- "errors"
"fmt"
"net/http"
"path"
@@ -27,8 +26,6 @@ import (
"context"
"vitess.io/vitess/go/vt/topo"
- "vitess.io/vitess/go/vt/topo/topoproto"
- "vitess.io/vitess/go/vt/vtctl"
)
// backendExplorer is a class that uses the Backend interface of a
@@ -89,7 +86,7 @@ func (ex *backendExplorer) HandlePath(nodePath string, r *http.Request) *Result
case nil:
if len(data) > 0 {
// It has contents, we just use it if possible.
- decoded, err := vtctl.DecodeContent(relativePath, data, false)
+ decoded, err := topo.DecodeContent(relativePath, data, false)
if err != nil {
result.Error = err.Error()
} else {
@@ -118,52 +115,6 @@ func (ex *backendExplorer) HandlePath(nodePath string, r *http.Request) *Result
return result
}
-// handleExplorerRedirect returns the redirect target URL.
-func handleExplorerRedirect(ctx context.Context, ts *topo.Server, r *http.Request) (string, error) {
- keyspace := r.FormValue("keyspace")
- shard := r.FormValue("shard")
- cell := r.FormValue("cell")
-
- switch r.FormValue("type") {
- case "keyspace":
- if keyspace == "" {
- return "", errors.New("keyspace is required for this redirect")
- }
- return appPrefix + "#/keyspaces/", nil
- case "shard":
- if keyspace == "" || shard == "" {
- return "", errors.New("keyspace and shard are required for this redirect")
- }
- return appPrefix + fmt.Sprintf("#/shard/%s/%s", keyspace, shard), nil
- case "srv_keyspace":
- if keyspace == "" || cell == "" {
- return "", errors.New("keyspace and cell are required for this redirect")
- }
- return appPrefix + "#/keyspaces/", nil
- case "tablet":
- alias := r.FormValue("alias")
- if alias == "" {
- return "", errors.New("alias is required for this redirect")
- }
- tabletAlias, err := topoproto.ParseTabletAlias(alias)
- if err != nil {
- return "", fmt.Errorf("bad tablet alias %q: %v", alias, err)
- }
- ti, err := ts.GetTablet(ctx, tabletAlias)
- if err != nil {
- return "", fmt.Errorf("can't get tablet %q: %v", alias, err)
- }
- return appPrefix + fmt.Sprintf("#/shard/%s/%s", ti.Keyspace, ti.Shard), nil
- case "replication":
- if keyspace == "" || shard == "" || cell == "" {
- return "", errors.New("keyspace, shard, and cell are required for this redirect")
- }
- return appPrefix + fmt.Sprintf("#/shard/%s/%s", keyspace, shard), nil
- default:
- return "", errors.New("bad redirect type")
- }
-}
-
// initExplorer initializes the redirects for explorer
func initExplorer(ts *topo.Server) {
// Main backend explorer functions.
@@ -171,20 +122,4 @@ func initExplorer(ts *topo.Server) {
handleCollection("topodata", func(r *http.Request) (any, error) {
return be.HandlePath(path.Clean("/"+getItemPath(r.URL.Path)), r), nil
})
-
- // Redirects for explorers.
- http.HandleFunc("/explorers/redirect", func(w http.ResponseWriter, r *http.Request) {
- if err := r.ParseForm(); err != nil {
- httpErrorf(w, r, "cannot parse form: %s", err)
- return
- }
-
- target, err := handleExplorerRedirect(context.Background(), ts, r)
- if err != nil {
- http.Error(w, err.Error(), http.StatusBadRequest)
- return
- }
-
- http.Redirect(w, r, target, http.StatusFound)
- })
}
diff --git a/go/vt/vtctld/explorer_test.go b/go/vt/vtctld/explorer_test.go
index 08d1a7d395c..62eb7c01642 100644
--- a/go/vt/vtctld/explorer_test.go
+++ b/go/vt/vtctld/explorer_test.go
@@ -17,7 +17,6 @@ limitations under the License.
package vtctld
import (
- "net/http"
"path"
"reflect"
"testing"
@@ -30,47 +29,6 @@ import (
topodatapb "vitess.io/vitess/go/vt/proto/topodata"
)
-func TestHandleExplorerRedirect(t *testing.T) {
- ctx := context.Background()
-
- ts := memorytopo.NewServer("cell1")
- if err := ts.CreateTablet(ctx, &topodatapb.Tablet{
- Alias: &topodatapb.TabletAlias{
- Cell: "cell1",
- Uid: 123,
- },
- Keyspace: "test_keyspace",
- Shard: "123-456",
- }); err != nil {
- t.Fatalf("CreateTablet failed: %v", err)
- }
-
- table := map[string]string{
- "/explorers/redirect?type=keyspace&keyspace=test_keyspace": "/app/#/keyspaces/",
- "/explorers/redirect?type=shard&keyspace=test_keyspace&shard=-80": "/app/#/shard/test_keyspace/-80",
- "/explorers/redirect?type=srv_keyspace&keyspace=test_keyspace&cell=cell1": "/app/#/keyspaces/",
- "/explorers/redirect?type=tablet&alias=cell1-123": "/app/#/shard/test_keyspace/123-456",
- "/explorers/redirect?type=replication&keyspace=test_keyspace&shard=-80&cell=cell1": "/app/#/shard/test_keyspace/-80",
- }
-
- for input, want := range table {
- request, err := http.NewRequest("GET", input, nil)
- if err != nil {
- t.Fatalf("NewRequest error: %v", err)
- }
- if err := request.ParseForm(); err != nil {
- t.Fatalf("ParseForm error: %v", err)
- }
- got, err := handleExplorerRedirect(ctx, ts, request)
- if err != nil {
- t.Fatalf("handleExplorerRedirect error: %v", err)
- }
- if got != want {
- t.Errorf("handlExplorerRedirect(%#v) = %#v, want %#v", input, got, want)
- }
- }
-}
-
// Test the explorer using MemoryTopo as a backend.
func TestHandlePathRoot(t *testing.T) {
input := "/"
diff --git a/go/vt/vtctld/redirection.go b/go/vt/vtctld/redirection.go
deleted file mode 100644
index 8c9dea60432..00000000000
--- a/go/vt/vtctld/redirection.go
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
-Copyright 2020 The Vitess Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package vtctld
-
-import (
- "bytes"
- "fmt"
- "io"
- "net/http"
- "net/http/httputil"
- "strconv"
- "strings"
-
- "vitess.io/vitess/go/netutil"
- "vitess.io/vitess/go/vt/log"
- "vitess.io/vitess/go/vt/topo"
- "vitess.io/vitess/go/vt/topo/topoproto"
-)
-
-func initVTTabletRedirection(ts *topo.Server) {
- http.HandleFunc("/vttablet/", func(w http.ResponseWriter, r *http.Request) {
- splits := strings.SplitN(r.URL.Path, "/", 4)
- if len(splits) < 4 {
- log.Errorf("Invalid URL: %v", r.URL)
- http.NotFound(w, r)
- return
- }
- tabletID := splits[2]
- tabletAlias, err := topoproto.ParseTabletAlias(tabletID)
- if err != nil {
- log.Errorf("Error parsting tablet alias %v: %v", tabletID, err)
- http.NotFound(w, r)
- return
- }
- tablet, err := ts.GetTablet(r.Context(), tabletAlias)
- if err != nil {
- log.Errorf("Error fetching tablet %v: %v", splits[2], err)
- http.NotFound(w, r)
- return
- }
- if tablet.Hostname == "" || tablet.PortMap["vt"] == 0 {
- log.Errorf("Invalid host/port: %s %d", tablet.Hostname, tablet.PortMap["vt"])
- http.NotFound(w, r)
- return
- }
-
- rp := &httputil.ReverseProxy{}
- rp.Director = func(req *http.Request) {
- req.URL.Scheme = "http"
- req.URL.Host = netutil.JoinHostPort(tablet.Hostname, tablet.PortMap["vt"])
- req.URL.Path = "/" + splits[3]
- }
-
- prefixPath := fmt.Sprintf("/vttablet/%s/", tabletID)
- rp.ModifyResponse = func(r *http.Response) error {
- b, _ := io.ReadAll(r.Body)
- b = bytes.ReplaceAll(b, []byte(`href="/`), []byte(fmt.Sprintf(`href="%s`, prefixPath)))
- b = bytes.ReplaceAll(b, []byte(`href=/`), []byte(fmt.Sprintf(`href=%s`, prefixPath)))
- r.Body = io.NopCloser(bytes.NewBuffer(b))
- r.Header["Content-Length"] = []string{strconv.FormatInt(int64(len(b)), 10)}
-
- // Don't forget redirects
- loc := r.Header["Location"]
- for i, v := range loc {
- if strings.HasPrefix(v, "/") {
- loc[i] = strings.Replace(v, "/", prefixPath, 1)
- }
- }
- return nil
- }
-
- rp.ServeHTTP(w, r)
- })
-}
diff --git a/go/vt/vtctld/vtctld.go b/go/vt/vtctld/vtctld.go
index 40973864ddc..2b143882f21 100644
--- a/go/vt/vtctld/vtctld.go
+++ b/go/vt/vtctld/vtctld.go
@@ -20,8 +20,6 @@ package vtctld
import (
"context"
- "net/http"
- "strings"
"github.com/spf13/pflag"
@@ -33,7 +31,6 @@ import (
"vitess.io/vitess/go/vt/topo"
"vitess.io/vitess/go/vt/vtctl"
"vitess.io/vitess/go/vt/wrangler"
- "vitess.io/vitess/web/vtctld2"
topodatapb "vitess.io/vitess/go/vt/proto/topodata"
vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata"
@@ -41,15 +38,8 @@ import (
var (
enableRealtimeStats = false
- enableUI = true
durabilityPolicy = "none"
sanitizeLogMessages = false
- webDir string
- webDir2 string
-)
-
-const (
- appPrefix = "/app/"
)
func init() {
@@ -59,12 +49,8 @@ func init() {
}
func registerVtctldFlags(fs *pflag.FlagSet) {
- fs.BoolVar(&enableRealtimeStats, "enable_realtime_stats", enableRealtimeStats, "Required for the Realtime Stats view. If set, vtctld will maintain a streaming RPC to each tablet (in all cells) to gather the realtime health stats.")
- fs.BoolVar(&enableUI, "enable_vtctld_ui", enableUI, "If true, the vtctld web interface will be enabled. Default is true.")
fs.StringVar(&durabilityPolicy, "durability_policy", durabilityPolicy, "type of durability to enforce. Default is none. Other values are dictated by registered plugins")
fs.BoolVar(&sanitizeLogMessages, "vtctld_sanitize_log_messages", sanitizeLogMessages, "When true, vtctld sanitizes logging.")
- fs.StringVar(&webDir, "web_dir", webDir, "NOT USED, here for backward compatibility")
- fs.StringVar(&webDir2, "web_dir2", webDir2, "NOT USED, here for backward compatibility")
}
// InitVtctld initializes all the vtctld functionality.
@@ -145,13 +131,6 @@ func InitVtctld(ts *topo.Server) error {
return "", err
})
- // Anything unrecognized gets redirected to the main app page.
- http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
- http.Redirect(w, r, appPrefix, http.StatusFound)
- })
-
- http.Handle(appPrefix, staticContentHandler(enableUI))
-
var healthCheck discovery.HealthCheck
if enableRealtimeStats {
ctx := context.Background()
@@ -159,32 +138,15 @@ func InitVtctld(ts *topo.Server) error {
if err != nil {
log.Errorf("Failed to get the list of known cells, failed to instantiate the healthcheck at startup: %v", err)
} else {
- healthCheck = discovery.NewHealthCheck(ctx, *vtctl.HealthcheckRetryDelay, *vtctl.HealthCheckTimeout, ts, localCell, strings.Join(cells, ","))
+ healthCheck = vtctl.NewHealthCheck(ctx, ts, localCell, cells)
}
}
- // Serve the REST API for the vtctld web app.
+ // Serve the REST API
initAPI(context.Background(), ts, actionRepo, healthCheck)
- // Init redirects for explorers
+ // Serve the topology endpoint in the REST API at /topodata
initExplorer(ts)
- // Init workflow manager.
- initWorkflowManager(ts)
-
- // Setup reverse proxy for all vttablets through /vttablet/.
- initVTTabletRedirection(ts)
-
return nil
}
-
-func staticContentHandler(enabled bool) http.Handler {
- if enabled {
- return http.FileServer(http.FS(vtctld2.Content))
- }
-
- fn := func(w http.ResponseWriter, r *http.Request) {
- http.NotFound(w, r)
- }
- return http.HandlerFunc(fn)
-}
diff --git a/go/vt/vtctld/vtctld_test.go b/go/vt/vtctld/vtctld_test.go
deleted file mode 100644
index 9bf9c4bfb06..00000000000
--- a/go/vt/vtctld/vtctld_test.go
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
-Copyright 2022 The Vitess Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package vtctld
-
-import (
- "flag"
- "fmt"
- "io"
- "net/http"
- "net/http/httptest"
- "testing"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestWebApp(t *testing.T) {
- req := httptest.NewRequest(http.MethodGet, appPrefix, nil)
- w := httptest.NewRecorder()
-
- handler := staticContentHandler(true)
- handler.ServeHTTP(w, req)
- res := w.Result()
-
- assert.Equal(t, http.StatusOK, res.StatusCode)
-
- defer res.Body.Close()
-
- data, err := io.ReadAll(res.Body)
- fmt.Printf("body: %s\n", string(data))
-
- assert.NoError(t, err)
- assert.Contains(t, string(data), "")
-}
-
-func TestWebAppDisabled(t *testing.T) {
- flag.Set("enable_vtctld_ui", "false")
- defer flag.Set("enable_vtctld_ui", "true")
-
- req := httptest.NewRequest(http.MethodGet, appPrefix, nil)
- w := httptest.NewRecorder()
-
- handler := staticContentHandler(false)
- handler.ServeHTTP(w, req)
- res := w.Result()
-
- assert.Equal(t, http.StatusNotFound, res.StatusCode)
-
- defer res.Body.Close()
-
- data, err := io.ReadAll(res.Body)
- assert.NoError(t, err)
- assert.Equal(t, "404 page not found\n", string(data))
-}
diff --git a/go/vt/vtctld/workflow.go b/go/vt/vtctld/workflow.go
deleted file mode 100644
index bafc6ad8672..00000000000
--- a/go/vt/vtctld/workflow.go
+++ /dev/null
@@ -1,144 +0,0 @@
-/*
-Copyright 2019 The Vitess Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package vtctld
-
-import (
- "context"
- "time"
-
- "github.com/spf13/pflag"
-
- "vitess.io/vitess/go/trace"
-
- "vitess.io/vitess/go/vt/log"
- "vitess.io/vitess/go/vt/servenv"
- "vitess.io/vitess/go/vt/topo"
- "vitess.io/vitess/go/vt/vtctl"
- "vitess.io/vitess/go/vt/workflow"
- "vitess.io/vitess/go/vt/workflow/topovalidator"
-)
-
-var (
- workflowManagerInit bool
- workflowManagerUseElection bool
-
- workflowManagerDisable []string
-)
-
-func registerVtctldWorkflowFlags(fs *pflag.FlagSet) {
- fs.BoolVar(&workflowManagerInit, "workflow_manager_init", workflowManagerInit, "Initialize the workflow manager in this vtctld instance.")
- fs.BoolVar(&workflowManagerUseElection, "workflow_manager_use_election", workflowManagerUseElection, "if specified, will use a topology server-based master election to ensure only one workflow manager is active at a time.")
- fs.StringSliceVar(&workflowManagerDisable, "workflow_manager_disable", workflowManagerDisable, "comma separated list of workflow types to disable")
-}
-
-func init() {
- for _, cmd := range []string{"vtcombo", "vtctld"} {
- servenv.OnParseFor(cmd, registerVtctldWorkflowFlags)
- }
-
-}
-
-func initWorkflowManager(ts *topo.Server) {
- if workflowManagerInit {
- // Uncomment this line to register the UI test validator.
- // topovalidator.RegisterUITestValidator()
-
- // Register the Topo Validators, and the workflow.
- topovalidator.RegisterKeyspaceValidator()
- topovalidator.RegisterShardValidator()
- topovalidator.Register()
-
- // Unregister the disabled workflows.
- for _, name := range workflowManagerDisable {
- workflow.Unregister(name)
- }
-
- // Create the WorkflowManager.
- vtctl.WorkflowManager = workflow.NewManager(ts)
- vtctl.WorkflowManager.SetSanitizeHTTPHeaders(sanitizeLogMessages)
-
- // Register the long polling and websocket handlers.
- vtctl.WorkflowManager.HandleHTTPLongPolling(apiPrefix + "workflow")
- vtctl.WorkflowManager.HandleHTTPWebSocket(apiPrefix + "workflow")
-
- if workflowManagerUseElection {
- runWorkflowManagerElection(ts)
- } else {
- runWorkflowManagerAlone()
- }
- }
-}
-
-func runWorkflowManagerAlone() {
- ctx, cancel := context.WithCancel(context.Background())
- go vtctl.WorkflowManager.Run(ctx)
-
- // Running cancel on OnTermSync will cancel the context of any
- // running workflow inside vtctld. They may still checkpoint
- // if they want to.
- servenv.OnTermSync(cancel)
-}
-
-func runWorkflowManagerElection(ts *topo.Server) {
- var mp topo.LeaderParticipation
-
- // We use servenv.ListeningURL which is only populated during Run,
- // so we have to start this with OnRun.
- servenv.OnRun(func() {
- span, ctx := trace.NewSpan(context.Background(), "WorkflowManagerElection")
- defer span.Finish()
-
- conn, err := ts.ConnForCell(ctx, topo.GlobalCell)
- if err != nil {
- log.Errorf("Cannot get global cell topo connection, disabling workflow manager: %v", err)
- return
- }
-
- mp, err = conn.NewLeaderParticipation("vtctld", servenv.ListeningURL.Host)
- if err != nil {
- log.Errorf("Cannot start LeaderParticipation, disabling workflow manager: %v", err)
- return
- }
-
- // Set up a redirect host so when we are not the
- // primary, we can redirect traffic properly.
- vtctl.WorkflowManager.SetRedirectFunc(func() (string, error) {
- ctx := context.Background()
- return mp.GetCurrentLeaderID(ctx)
- })
-
- go func() {
- for {
- ctx, err := mp.WaitForLeadership()
- switch {
- case err == nil:
- vtctl.WorkflowManager.Run(ctx)
- case topo.IsErrType(err, topo.Interrupted):
- return
- default:
- log.Errorf("Got error while waiting for master, will retry in 5s: %v", err)
- time.Sleep(5 * time.Second)
- }
- }
- }()
- })
-
- // When we get killed, clean up.
- servenv.OnTermSync(func() {
- mp.Stop()
- })
-}
diff --git a/go/vt/vterrors/code.go b/go/vt/vterrors/code.go
new file mode 100644
index 00000000000..9b4351d8e7b
--- /dev/null
+++ b/go/vt/vterrors/code.go
@@ -0,0 +1,178 @@
+/*
+Copyright 2022 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package vterrors
+
+import (
+ "fmt"
+
+ vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
+)
+
+var (
+ VT03001 = errorWithState("VT03001", vtrpcpb.Code_INVALID_ARGUMENT, SyntaxError, "aggregate functions take a single argument '%s'", "This aggregation function only takes a single argument.")
+ VT03002 = errorWithState("VT03002", vtrpcpb.Code_INVALID_ARGUMENT, ForbidSchemaChange, "changing schema from '%s' to '%s' is not allowed", "This schema change is not allowed. You cannot change the keyspace of a table.")
+ VT03003 = errorWithState("VT03003", vtrpcpb.Code_INVALID_ARGUMENT, UnknownTable, "unknown table '%s' in MULTI DELETE", "The specified table in this DELETE statement is unknown.")
+ VT03004 = errorWithState("VT03004", vtrpcpb.Code_INVALID_ARGUMENT, NonUpdateableTable, "the target table %s of the DELETE is not updatable", "You cannot delete something that is not a real MySQL table.")
+ VT03005 = errorWithState("VT03005", vtrpcpb.Code_INVALID_ARGUMENT, WrongGroupField, "cannot group on '%s'", "The planner does not allow grouping on certain field. For instance, aggregation function.")
+ VT03006 = errorWithState("VT03006", vtrpcpb.Code_INVALID_ARGUMENT, WrongValueCountOnRow, "column count does not match value count at row 1", "The number of columns you want to insert do not match the number of columns of your SELECT query.")
+ VT03007 = errorWithoutState("VT03007", vtrpcpb.Code_INVALID_ARGUMENT, "keyspace not specified", "You need to add a keyspace qualifier.")
+ VT03008 = errorWithState("VT03008", vtrpcpb.Code_INVALID_ARGUMENT, CantUseOptionHere, "incorrect usage/placement of '%s'", "The given token is not usable in this situation. Please refer to the MySQL documentation to learn more about your token's syntax.")
+ VT03009 = errorWithState("VT03009", vtrpcpb.Code_INVALID_ARGUMENT, WrongValueForVar, "unexpected value type for '%s': %v", "You cannot assign this type to the given variable.")
+ VT03010 = errorWithState("VT03010", vtrpcpb.Code_INVALID_ARGUMENT, IncorrectGlobalLocalVar, "variable '%s' is a read only variable", "You cannot set the given variable as it is a read-only variable.")
+ VT03011 = errorWithoutState("VT03011", vtrpcpb.Code_INVALID_ARGUMENT, "invalid value type: %v", "The given value type is not accepted.")
+ VT03012 = errorWithoutState("VT03012", vtrpcpb.Code_INVALID_ARGUMENT, "invalid syntax: %s", "The syntax is invalid. Please refer to the MySQL documentation for the proper syntax.")
+ VT03013 = errorWithState("VT03013", vtrpcpb.Code_INVALID_ARGUMENT, NonUniqTable, "not unique table/alias: '%s'", "This table or alias name is already use. Please use another one that is unique.")
+ VT03014 = errorWithState("VT03014", vtrpcpb.Code_INVALID_ARGUMENT, BadFieldError, "unknown column '%d' in '%s'", "The given column is unknown.")
+ VT03015 = errorWithoutState("VT03015", vtrpcpb.Code_INVALID_ARGUMENT, "column has duplicate set values: '%v'", "Cannot assign multiple values to a column in an update statement.")
+ VT03016 = errorWithoutState("VT03016", vtrpcpb.Code_INVALID_ARGUMENT, "unknown vindex column: '%s'", "The given column is unknown in the vindex table.")
+ VT03017 = errorWithState("VT03017", vtrpcpb.Code_INVALID_ARGUMENT, SyntaxError, "where clause can only be of the type 'pos > '", "This vstream where clause can only be a greater than filter.")
+ VT03018 = errorWithoutState("VT03018", vtrpcpb.Code_INVALID_ARGUMENT, "NEXT used on a non-sequence table", "You cannot use the NEXT syntax on a table that is not a sequence table.")
+ VT03019 = errorWithoutState("VT03019", vtrpcpb.Code_INVALID_ARGUMENT, "symbol %s not found", "The given symbol was not found or is not available.")
+ VT03020 = errorWithoutState("VT03020", vtrpcpb.Code_INVALID_ARGUMENT, "symbol %s not found in subquery", "The given symbol was not found in the subquery.")
+ VT03021 = errorWithoutState("VT03021", vtrpcpb.Code_INVALID_ARGUMENT, "ambiguous symbol reference: %v", "The given symbol is ambiguous. You can use a table qualifier to make it unambiguous.")
+ VT03022 = errorWithoutState("VT03022", vtrpcpb.Code_INVALID_ARGUMENT, "column %v not found in %v", "The given column cannot be found.")
+ VT03023 = errorWithoutState("VT03023", vtrpcpb.Code_INVALID_ARGUMENT, "INSERT not supported when targeting a key range: %s", "When targeting a range of shards, Vitess does not know which shard to send the INSERT to.")
+
+ VT05001 = errorWithState("VT05001", vtrpcpb.Code_NOT_FOUND, DbDropExists, "cannot drop database '%s'; database does not exists", "The given database does not exist; Vitess cannot drop it.")
+ VT05002 = errorWithState("VT05002", vtrpcpb.Code_NOT_FOUND, BadDb, "cannot alter database '%s'; unknown database", "The given database does not exist; Vitess cannot alter it.")
+ VT05003 = errorWithState("VT05003", vtrpcpb.Code_NOT_FOUND, BadDb, "unknown database '%s' in vschema", "The given database does not exist in the VSchema.")
+ VT05004 = errorWithState("VT05004", vtrpcpb.Code_NOT_FOUND, UnknownTable, "table '%s' does not exist", "The given table is unknown.")
+ VT05005 = errorWithState("VT05005", vtrpcpb.Code_NOT_FOUND, NoSuchTable, "table '%s' does not exist in keyspace '%s'", "The given table does not exist in this keyspace.")
+ VT05006 = errorWithState("VT05006", vtrpcpb.Code_NOT_FOUND, UnknownSystemVariable, "unknown system variable '%s'", "The given system variable is unknown.")
+ VT05007 = errorWithoutState("VT05007", vtrpcpb.Code_NOT_FOUND, "no table info", "Table information is not available.")
+
+ VT06001 = errorWithState("VT06001", vtrpcpb.Code_ALREADY_EXISTS, DbCreateExists, "cannot create database '%s'; database exists", "The given database name already exists.")
+
+ VT09001 = errorWithState("VT09001", vtrpcpb.Code_FAILED_PRECONDITION, RequiresPrimaryKey, PrimaryVindexNotSet, "the table does not have a primary vindex, the operation is impossible.")
+ VT09002 = errorWithState("VT09002", vtrpcpb.Code_FAILED_PRECONDITION, InnodbReadOnly, "%s statement with a replica target", "This type of DML statement is not allowed on a replica target.")
+ VT09003 = errorWithoutState("VT09003", vtrpcpb.Code_FAILED_PRECONDITION, "INSERT query does not have primary vindex column '%v' in the column list", "A vindex column is mandatory for the insert, please provide one.")
+ VT09004 = errorWithoutState("VT09004", vtrpcpb.Code_FAILED_PRECONDITION, "INSERT should contain column list or the table should have authoritative columns in vschema", "You need to provide the list of columns you want to insert, or provide a VSchema with authoritative columns. If schema tracking is disabled you can enable it to automatically have authoritative columns.")
+ VT09005 = errorWithState("VT09005", vtrpcpb.Code_FAILED_PRECONDITION, NoDB, "no database selected: use keyspace<:shard><@type> or keyspace<[range]><@type> (<> are optional)", "A database must be selected.")
+ VT09006 = errorWithoutState("VT09006", vtrpcpb.Code_FAILED_PRECONDITION, "%s VITESS_MIGRATION works only on primary tablet", "VITESS_MIGRATION commands work only on primary tablets, you must send such commands to a primary tablet.")
+ VT09007 = errorWithoutState("VT09007", vtrpcpb.Code_FAILED_PRECONDITION, "%s VITESS_THROTTLED_APPS works only on primary tablet", "VITESS_THROTTLED_APPS commands work only on primary tablet, you must send such commands to a primary tablet.")
+ VT09008 = errorWithoutState("VT09008", vtrpcpb.Code_FAILED_PRECONDITION, "vexplain queries/all will actually run queries", "vexplain queries/all will actually run queries. `/*vt+ EXECUTE_DML_QUERIES */` must be set to run DML queries in vtexplain. Example: `vexplain /*vt+ EXECUTE_DML_QUERIES */ queries delete from t1`")
+ VT09009 = errorWithoutState("VT09009", vtrpcpb.Code_FAILED_PRECONDITION, "stream is supported only for primary tablet type, current type: %v", "Stream is only supported for primary tablets, please use a stream on those tablets.")
+ VT09010 = errorWithoutState("VT09010", vtrpcpb.Code_FAILED_PRECONDITION, "SHOW VITESS_THROTTLER STATUS works only on primary tablet", "SHOW VITESS_THROTTLER STATUS works only on primary tablet.")
+
+ VT10001 = errorWithoutState("VT10001", vtrpcpb.Code_ABORTED, "foreign key constraints are not allowed", "Foreign key constraints are not allowed, see https://vitess.io/blog/2021-06-15-online-ddl-why-no-fk/.")
+
+ VT12001 = errorWithoutState("VT12001", vtrpcpb.Code_UNIMPLEMENTED, "unsupported: %s", "This statement is unsupported by Vitess. Please rewrite your query to use supported syntax.")
+
+ // VT13001 General Error
+ VT13001 = errorWithoutState("VT13001", vtrpcpb.Code_INTERNAL, "[BUG] %s", "This error should not happen and is a bug. Please file an issue on GitHub: https://github.com/vitessio/vitess/issues/new/choose.")
+ VT13002 = errorWithoutState("VT13002", vtrpcpb.Code_INTERNAL, "unexpected AST struct for query: %s", "This error should not happen and is a bug. Please file an issue on GitHub: https://github.com/vitessio/vitess/issues/new/choose.")
+
+ VT14001 = errorWithoutState("VT14001", vtrpcpb.Code_UNAVAILABLE, "connection error", "The connection failed.")
+ VT14002 = errorWithoutState("VT14002", vtrpcpb.Code_UNAVAILABLE, "no available connection", "No available connection.")
+ VT14003 = errorWithoutState("VT14003", vtrpcpb.Code_UNAVAILABLE, "no connection for tablet %v", "No connection for the given tablet.")
+ VT14004 = errorWithoutState("VT14004", vtrpcpb.Code_UNAVAILABLE, "cannot find keyspace for: %s", "The specified keyspace could not be found.")
+
+ Errors = []func(args ...any) *VitessError{
+ VT03001,
+ VT03002,
+ VT03003,
+ VT03004,
+ VT03005,
+ VT03006,
+ VT03007,
+ VT03008,
+ VT03009,
+ VT03010,
+ VT03011,
+ VT03012,
+ VT03013,
+ VT03014,
+ VT03015,
+ VT03016,
+ VT03017,
+ VT03018,
+ VT03019,
+ VT03020,
+ VT03021,
+ VT03022,
+ VT03023,
+ VT05001,
+ VT05002,
+ VT05003,
+ VT05004,
+ VT05005,
+ VT05006,
+ VT05007,
+ VT06001,
+ VT09001,
+ VT09002,
+ VT09003,
+ VT09004,
+ VT09005,
+ VT09006,
+ VT09007,
+ VT09008,
+ VT09009,
+ VT09010,
+ VT10001,
+ VT12001,
+ VT13001,
+ VT13002,
+ VT14001,
+ VT14002,
+ VT14003,
+ VT14004,
+ }
+)
+
+type VitessError struct {
+ Err error
+ Description string
+ ID string
+ State State
+}
+
+func (o *VitessError) Error() string {
+ return o.Err.Error()
+}
+
+func (o *VitessError) Cause() error {
+ return o.Err
+}
+
+var _ error = (*VitessError)(nil)
+
+func errorWithoutState(id string, code vtrpcpb.Code, short, long string) func(args ...any) *VitessError {
+ return func(args ...any) *VitessError {
+ s := short
+ if len(args) != 0 {
+ s = fmt.Sprintf(s, args...)
+ }
+
+ return &VitessError{
+ Err: New(code, id+": "+s),
+ Description: long,
+ ID: id,
+ }
+ }
+}
+
+func errorWithState(id string, code vtrpcpb.Code, state State, short, long string) func(args ...any) *VitessError {
+ return func(args ...any) *VitessError {
+ return &VitessError{
+ Err: NewErrorf(code, state, id+": "+short, args...),
+ Description: long,
+ ID: id,
+ State: state,
+ }
+ }
+}
diff --git a/go/vt/vterrors/errors_test.go b/go/vt/vterrors/errors_test.go
index 96c034c45ee..c115fb41686 100644
--- a/go/vt/vterrors/errors_test.go
+++ b/go/vt/vterrors/errors_test.go
@@ -21,10 +21,13 @@ import (
"errors"
"fmt"
"io"
+ "math/rand"
"reflect"
"strings"
"testing"
+ "github.com/stretchr/testify/assert"
+
vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
)
@@ -57,6 +60,68 @@ func TestWrap(t *testing.T) {
}
}
+func TestUnwrap(t *testing.T) {
+ tests := []struct {
+ err error
+ isWrapped bool
+ }{
+ {fmt.Errorf("some error: %d", 17), false},
+ {errors.New("some new error"), false},
+ {Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "some msg %d", 19), false},
+ {Wrapf(errors.New("some wrapped error"), "some msg"), true},
+ {nil, false},
+ }
+
+ for _, tt := range tests {
+ t.Run(fmt.Sprintf("%v", tt.err), func(t *testing.T) {
+ {
+ wasWrapped, unwrapped := Unwrap(tt.err)
+ assert.Equal(t, tt.isWrapped, wasWrapped)
+ if !wasWrapped {
+ assert.Equal(t, tt.err, unwrapped)
+ }
+ }
+ {
+ wrapped := Wrap(tt.err, "some message")
+ wasWrapped, unwrapped := Unwrap(wrapped)
+ assert.Equal(t, wasWrapped, (tt.err != nil))
+ assert.Equal(t, tt.err, unwrapped)
+ }
+ })
+ }
+}
+
+func TestUnwrapAll(t *testing.T) {
+ tests := []struct {
+ err error
+ }{
+ {fmt.Errorf("some error: %d", 17)},
+ {errors.New("some new error")},
+ {Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "some msg %d", 19)},
+ {nil},
+ }
+
+ for _, tt := range tests {
+ t.Run(fmt.Sprintf("%v", tt.err), func(t *testing.T) {
+ {
+ // see that unwrapping a non-wrapped error just returns the same error
+ unwrapped := UnwrapAll(tt.err)
+ assert.Equal(t, tt.err, unwrapped)
+ }
+ {
+ // see that unwrapping a 5-times wrapped error returns the original error
+ wrapped := tt.err
+ for range rand.Perm(5) {
+ wrapped = Wrap(wrapped, "some message")
+ }
+ unwrapped := UnwrapAll(wrapped)
+ assert.Equal(t, tt.err, unwrapped)
+ }
+ })
+ }
+
+}
+
type nilError struct{}
func (nilError) Error() string { return "nil error" }
diff --git a/go/vt/vterrors/last_error.go b/go/vt/vterrors/last_error.go
new file mode 100644
index 00000000000..314a54aae00
--- /dev/null
+++ b/go/vt/vterrors/last_error.go
@@ -0,0 +1,92 @@
+/*
+Copyright 2022 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package vterrors
+
+import (
+ "sync"
+ "time"
+
+ "vitess.io/vitess/go/vt/log"
+)
+
+/*
+ * LastError tracks the most recent error for any ongoing process and how long it has persisted.
+ * The err field should be a vterror to ensure we have meaningful error codes, causes, stack
+ * traces, etc.
+ */
+type LastError struct {
+ name string
+ err error
+ firstSeen time.Time
+ lastSeen time.Time
+ mu sync.Mutex
+ maxTimeInError time.Duration // if error persists for this long, shouldRetry() will return false
+}
+
+func NewLastError(name string, maxTimeInError time.Duration) *LastError {
+ log.Infof("Created last error: %s, with maxTimeInError: %s", name, maxTimeInError)
+ return &LastError{
+ name: name,
+ maxTimeInError: maxTimeInError,
+ }
+}
+
+func (le *LastError) Record(err error) {
+ le.mu.Lock()
+ defer le.mu.Unlock()
+ if err == nil {
+ log.Infof("Resetting last error: %s", le.name)
+ le.err = nil
+ le.firstSeen = time.Time{}
+ le.lastSeen = time.Time{}
+ return
+ }
+ if !Equals(err, le.err) {
+ log.Infof("Got new last error %+v for %s, was %+v", err, le.name, le.err)
+ le.firstSeen = time.Now()
+ le.lastSeen = time.Now()
+ le.err = err
+ } else {
+ // same error seen
+ log.Infof("Got the same last error for %q: %+v ; first seen at %s and last seen %dms ago", le.name, le.err, le.firstSeen, int(time.Since(le.lastSeen).Milliseconds()))
+ if time.Since(le.lastSeen) > le.maxTimeInError {
+ // reset firstSeen, since it has been long enough since the last time we saw this error
+ log.Infof("Resetting firstSeen for %s, since it is too long since the last one", le.name)
+ le.firstSeen = time.Now()
+ }
+ le.lastSeen = time.Now()
+ }
+}
+
+func (le *LastError) ShouldRetry() bool {
+ le.mu.Lock()
+ defer le.mu.Unlock()
+ if le.maxTimeInError == 0 {
+ // The value of 0 means "no time limit"
+ return true
+ }
+ if le.firstSeen.IsZero() {
+ return true
+ }
+ if time.Since(le.firstSeen) <= le.maxTimeInError {
+ // within the max time range
+ return true
+ }
+ log.Errorf("%s: the same error was encountered continuously since %s, it is now assumed to be unrecoverable; any affected operations will need to be manually restarted once error '%s' has been addressed",
+ le.name, le.firstSeen.UTC(), le.err)
+ return false
+}
diff --git a/go/vt/vterrors/last_error_test.go b/go/vt/vterrors/last_error_test.go
new file mode 100644
index 00000000000..09913796c9b
--- /dev/null
+++ b/go/vt/vterrors/last_error_test.go
@@ -0,0 +1,86 @@
+/*
+Copyright 2022 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package vterrors
+
+import (
+ "fmt"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+)
+
+const shortWait = 1 * time.Millisecond
+const longWait = 150 * time.Millisecond
+const maxTimeInError = 100 * time.Millisecond
+
+// TestLastErrorZeroMaxTime tests maxTimeInError = 0, should always retry
+func TestLastErrorZeroMaxTime(t *testing.T) {
+ le := NewLastError("test", 0)
+ err1 := fmt.Errorf("error1")
+ le.Record(err1)
+ require.True(t, le.ShouldRetry())
+ time.Sleep(shortWait)
+ require.True(t, le.ShouldRetry())
+ time.Sleep(longWait)
+ require.True(t, le.ShouldRetry())
+}
+
+// TestLastErrorNoError ensures that an uninitialized lastError always retries
+func TestLastErrorNoError(t *testing.T) {
+ le := NewLastError("test", maxTimeInError)
+ require.True(t, le.ShouldRetry())
+ err1 := fmt.Errorf("error1")
+ le.Record(err1)
+ require.True(t, le.ShouldRetry())
+ le.Record(nil)
+ require.True(t, le.ShouldRetry())
+}
+
+// TestLastErrorOneError validates that we retry an error if happening within the maxTimeInError, but not after
+func TestLastErrorOneError(t *testing.T) {
+ le := NewLastError("test", maxTimeInError)
+ err1 := fmt.Errorf("error1")
+ le.Record(err1)
+ require.True(t, le.ShouldRetry())
+ time.Sleep(shortWait)
+ require.True(t, le.ShouldRetry())
+ time.Sleep(shortWait)
+ require.True(t, le.ShouldRetry())
+ time.Sleep(longWait)
+ require.False(t, le.ShouldRetry())
+}
+
+// TestLastErrorRepeatedError confirms that if same error is repeated we don't retry
+// unless it happens after maxTimeInError
+func TestLastErrorRepeatedError(t *testing.T) {
+ le := NewLastError("test", maxTimeInError)
+ err1 := fmt.Errorf("error1")
+ le.Record(err1)
+ require.True(t, le.ShouldRetry())
+ for i := 1; i < 10; i++ {
+ le.Record(err1)
+ time.Sleep(shortWait)
+ }
+ require.True(t, le.ShouldRetry())
+
+ // same error happens after maxTimeInError, so it should retry
+ time.Sleep(longWait)
+ require.False(t, le.ShouldRetry())
+ le.Record(err1)
+ require.True(t, le.ShouldRetry())
+}
diff --git a/go/vt/vterrors/state.go b/go/vt/vterrors/state.go
index 44560e10e95..ae5a4970d2b 100644
--- a/go/vt/vterrors/state.go
+++ b/go/vt/vterrors/state.go
@@ -16,6 +16,8 @@ limitations under the License.
package vterrors
+import vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
+
// State is error state
type State int
@@ -84,3 +86,13 @@ const (
// No state should be added below NumOfStates
NumOfStates
)
+
+// ErrorWithState is used to return the error State is such can be found
+type ErrorWithState interface {
+ ErrorState() State
+}
+
+// ErrorWithCode returns the grpc code
+type ErrorWithCode interface {
+ ErrorCode() vtrpcpb.Code
+}
diff --git a/go/vt/vterrors/vterrors.go b/go/vt/vterrors/vterrors.go
index b313702dfbb..5c5d59cf3ea 100644
--- a/go/vt/vterrors/vterrors.go
+++ b/go/vt/vterrors/vterrors.go
@@ -88,6 +88,7 @@ package vterrors
import (
"context"
+ "errors"
"fmt"
"io"
@@ -133,8 +134,12 @@ func Errorf(code vtrpcpb.Code, format string, args ...any) error {
// NewErrorf also records the stack trace at the point it was called.
// Use this for errors in Vitess that we eventually want to mimic as a MySQL error
func NewErrorf(code vtrpcpb.Code, state State, format string, args ...any) error {
+ msg := format
+ if len(args) != 0 {
+ msg = fmt.Sprintf(format, args...)
+ }
return &fundamental{
- msg: fmt.Sprintf(format, args...),
+ msg: msg,
code: code,
state: state,
stack: callers(),
@@ -173,8 +178,8 @@ func Code(err error) vtrpcpb.Code {
if err == nil {
return vtrpcpb.Code_OK
}
- if err, ok := err.(*fundamental); ok {
- return err.code
+ if err, ok := err.(ErrorWithCode); ok {
+ return err.ErrorCode()
}
cause := Cause(err)
@@ -199,8 +204,9 @@ func ErrState(err error) State {
if err == nil {
return Undefined
}
- if err, ok := err.(*fundamental); ok {
- return err.state
+
+ if err, ok := err.(ErrorWithState); ok {
+ return err.ErrorState()
}
cause := Cause(err)
@@ -239,6 +245,26 @@ func Wrapf(err error, format string, args ...any) error {
}
}
+// Unwrap attempts to return the Cause of the given error, if it is indeed the result of a vterrors.Wrapf()
+// The function indicates whether the error was indeed wrapped. If the error was not wrapped, the function
+// returns the original error.
+func Unwrap(err error) (wasWrapped bool, unwrapped error) {
+ var w *wrapping
+ if errors.As(err, &w) {
+ return true, w.Cause()
+ }
+ return false, err
+}
+
+// UnwrapAll attempts to recursively unwrap the given error, and returns the most underlying cause
+func UnwrapAll(err error) error {
+ wasWrapped := true
+ for wasWrapped {
+ wasWrapped, err = Unwrap(err)
+ }
+ return err
+}
+
type wrapping struct {
cause error
msg string
@@ -334,3 +360,6 @@ func Equals(a, b error) bool {
func Print(err error) string {
return fmt.Sprintf("%v: %v\n", Code(err), err.Error())
}
+
+func (f *fundamental) ErrorState() State { return f.state }
+func (f *fundamental) ErrorCode() vtrpcpb.Code { return f.code }
diff --git a/go/vt/vterrors/vterrorsgen/main.go b/go/vt/vterrors/vterrorsgen/main.go
new file mode 100644
index 00000000000..f705813af8c
--- /dev/null
+++ b/go/vt/vterrors/vterrorsgen/main.go
@@ -0,0 +1,60 @@
+/*
+Copyright 2022 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package main
+
+import (
+ "log"
+ "os"
+ "strings"
+ "text/template"
+
+ "vitess.io/vitess/go/mysql"
+
+ "vitess.io/vitess/go/vt/vterrors"
+)
+
+const (
+ tmpl = `
+| ID | Description | Error | MySQL Error Code | SQL State |
+| --- | --- | --- | --- | --- |
+{{- range $err := . }}
+{{- $data := (call $err) }}
+| {{ $data.ID }} | {{ $data.Description }} | {{ FormatError $data.Err }} | {{ ConvertStateToMySQLErrorCode $data.State }} | {{ ConvertStateToMySQLState $data.State }} |
+{{- end }}
+`
+)
+
+// This program reads the errors located in the `vitess.io/vitess/go/vt/vterrors` package
+// and prints on the standard output a table, in Markdown format, that lists all the
+// errors with their code, description, error content, mysql error code and the SQL state.
+func main() {
+ t := template.New("template")
+ t.Funcs(map[string]any{
+ "ConvertStateToMySQLErrorCode": mysql.ConvertStateToMySQLErrorCode,
+ "ConvertStateToMySQLState": mysql.ConvertStateToMySQLState,
+ "FormatError": func(err error) string {
+ s := err.Error()
+ return strings.TrimSpace(strings.Join(strings.Split(s, ":")[1:], ":"))
+ },
+ })
+ t = template.Must(t.Parse(tmpl))
+
+ err := t.ExecuteTemplate(os.Stdout, "template", vterrors.Errors)
+ if err != nil {
+ log.Fatal(err)
+ }
+}
diff --git a/go/vt/vtexplain/testdata/multi-output/unsharded-output.txt b/go/vt/vtexplain/testdata/multi-output/unsharded-output.txt
index 8bb3ecef970..0adc5661077 100644
--- a/go/vt/vtexplain/testdata/multi-output/unsharded-output.txt
+++ b/go/vt/vtexplain/testdata/multi-output/unsharded-output.txt
@@ -45,3 +45,8 @@ select ID from t1
1 ks_unsharded/-: select ID from t1 limit 10001
----------------------------------------------------------------------
+select t1.id, t2.c2 from t1 join t2 on t1.id = t2.t1_id where t2.c2 in (1)
+
+1 ks_unsharded/-: select t1.id, t2.c2 from t1 join t2 on t1.id = t2.t1_id where t2.c2 in (1) limit 10001
+
+----------------------------------------------------------------------
diff --git a/go/vt/vtexplain/testdata/test-schema.sql b/go/vt/vtexplain/testdata/test-schema.sql
index 5b65334796f..06da14c669a 100644
--- a/go/vt/vtexplain/testdata/test-schema.sql
+++ b/go/vt/vtexplain/testdata/test-schema.sql
@@ -4,6 +4,12 @@ create table t1 (
floatval float not null default 0,
primary key (id)
);
+create table t2 (
+ id bigint(20) unsigned not null,
+ t1_id bigint(20) unsigned not null default 0,
+ c2 bigint(20) null,
+ primary key (id)
+);
create table user (
id bigint,
@@ -105,3 +111,27 @@ CREATE TABLE orders_id_lookup (
keyspace_id varbinary(128),
primary key(id)
);
+
+CREATE TABLE orders_id_lookup_exclusive_read_lock (
+ id int NOT NULL,
+ keyspace_id varbinary(128),
+ primary key(id)
+);
+
+CREATE TABLE orders_id_lookup_shared_read_lock (
+ id int NOT NULL,
+ keyspace_id varbinary(128),
+ primary key(id)
+);
+
+CREATE TABLE orders_id_lookup_no_read_lock (
+ id int NOT NULL,
+ keyspace_id varbinary(128),
+ primary key(id)
+);
+
+CREATE TABLE orders_id_lookup_no_verify (
+ id int NOT NULL,
+ keyspace_id varbinary(128),
+ primary key(id)
+);
diff --git a/go/vt/vtexplain/testdata/test-vschema.json b/go/vt/vtexplain/testdata/test-vschema.json
index a50e11e92ae..5d288121507 100644
--- a/go/vt/vtexplain/testdata/test-vschema.json
+++ b/go/vt/vtexplain/testdata/test-vschema.json
@@ -3,6 +3,7 @@
"sharded": false,
"tables": {
"t1": {},
+ "t2": {},
"table_not_in_schema": {}
}
},
@@ -18,6 +19,46 @@
},
"owner": "orders"
},
+ "orders_id_vdx_exclusive_read_lock": {
+ "type": "lookup_unique",
+ "params": {
+ "table": "orders_id_lookup_exclusive_read_lock",
+ "from": "id",
+ "to": "keyspace_id",
+ "read_lock": "exclusive"
+ },
+ "owner": "orders"
+ },
+ "orders_id_vdx_shared_read_lock": {
+ "type": "lookup_unique",
+ "params": {
+ "table": "orders_id_lookup_shared_read_lock",
+ "from": "id",
+ "to": "keyspace_id",
+ "read_lock": "shared"
+ },
+ "owner": "orders"
+ },
+ "orders_id_vdx_no_read_lock": {
+ "type": "lookup_unique",
+ "params": {
+ "table": "orders_id_lookup_no_read_lock",
+ "from": "id",
+ "to": "keyspace_id",
+ "read_lock": "none"
+ },
+ "owner": "orders"
+ },
+ "orders_id_vdx_no_verify": {
+ "type": "lookup_unique",
+ "params": {
+ "table": "orders_id_lookup_no_verify",
+ "from": "id",
+ "to": "keyspace_id",
+ "no_verify": "true"
+ },
+ "owner": "orders"
+ },
"music_user_map": {
"type": "lookup_hash_unique",
"owner": "music",
@@ -164,6 +205,22 @@
}
]
},
+ "orders_id_lookup_no_read_lock": {
+ "column_vindexes": [
+ {
+ "column": "id",
+ "name": "hash"
+ }
+ ]
+ },
+ "orders_id_lookup_no_verify": {
+ "column_vindexes": [
+ {
+ "column": "id",
+ "name": "hash"
+ }
+ ]
+ },
"email_customer_map": {
"column_vindexes": [
{
diff --git a/go/vt/vtexplain/testdata/unsharded-queries.sql b/go/vt/vtexplain/testdata/unsharded-queries.sql
index f0147ac5d6e..712245f3338 100644
--- a/go/vt/vtexplain/testdata/unsharded-queries.sql
+++ b/go/vt/vtexplain/testdata/unsharded-queries.sql
@@ -6,3 +6,4 @@ update t1 set floatval = 9.99;
delete from t1 where id = 100;
insert into t1 (id,intval,floatval) values (1,2,3.14) on duplicate key update intval=3, floatval=3.14;
select ID from t1;
+select t1.id, t2.c2 from t1 join t2 on t1.id = t2.t1_id where t2.c2 in (1);
\ No newline at end of file
diff --git a/go/vt/vtexplain/vtexplain_test.go b/go/vt/vtexplain/vtexplain_test.go
index 21fc30cbd4f..8145c59b44d 100644
--- a/go/vt/vtexplain/vtexplain_test.go
+++ b/go/vt/vtexplain/vtexplain_test.go
@@ -283,14 +283,14 @@ func TestJSONOutput(t *testing.T) {
}
}
-func testShardInfo(ks, start, end string, t *testing.T) *topo.ShardInfo {
+func testShardInfo(ks, start, end string, primaryServing bool, t *testing.T) *topo.ShardInfo {
kr, err := key.ParseKeyRangeParts(start, end)
require.NoError(t, err)
return topo.NewShardInfo(
ks,
fmt.Sprintf("%s-%s", start, end),
- &topodata.Shard{KeyRange: kr},
+ &topodata.Shard{KeyRange: kr, IsPrimaryServing: primaryServing},
&vtexplainTestTopoVersion{},
)
}
@@ -304,14 +304,17 @@ func TestUsingKeyspaceShardMap(t *testing.T) {
testcase: "select-sharded-8",
ShardRangeMap: map[string]map[string]*topo.ShardInfo{
"ks_sharded": {
- "-20": testShardInfo("ks_sharded", "", "20", t),
- "20-40": testShardInfo("ks_sharded", "20", "40", t),
- "40-60": testShardInfo("ks_sharded", "40", "60", t),
- "60-80": testShardInfo("ks_sharded", "60", "80", t),
- "80-a0": testShardInfo("ks_sharded", "80", "a0", t),
- "a0-c0": testShardInfo("ks_sharded", "a0", "c0", t),
- "c0-e0": testShardInfo("ks_sharded", "c0", "e0", t),
- "e0-": testShardInfo("ks_sharded", "e0", "", t),
+ "-20": testShardInfo("ks_sharded", "", "20", true, t),
+ "20-40": testShardInfo("ks_sharded", "20", "40", true, t),
+ "40-60": testShardInfo("ks_sharded", "40", "60", true, t),
+ "60-80": testShardInfo("ks_sharded", "60", "80", true, t),
+ "80-a0": testShardInfo("ks_sharded", "80", "a0", true, t),
+ "a0-c0": testShardInfo("ks_sharded", "a0", "c0", true, t),
+ "c0-e0": testShardInfo("ks_sharded", "c0", "e0", true, t),
+ "e0-": testShardInfo("ks_sharded", "e0", "", true, t),
+ // Some non-serving shards below - these should never be in the output of vtexplain
+ "-80": testShardInfo("ks_sharded", "", "80", false, t),
+ "80-": testShardInfo("ks_sharded", "80", "", false, t),
},
},
},
@@ -321,11 +324,15 @@ func TestUsingKeyspaceShardMap(t *testing.T) {
// Have mercy on the poor soul that has this keyspace sharding.
// But, hey, vtexplain still works so they have that going for them.
"ks_sharded": {
- "-80": testShardInfo("ks_sharded", "", "80", t),
- "80-90": testShardInfo("ks_sharded", "80", "90", t),
- "90-a0": testShardInfo("ks_sharded", "90", "a0", t),
- "a0-e8": testShardInfo("ks_sharded", "a0", "e8", t),
- "e8-": testShardInfo("ks_sharded", "e8", "", t),
+ "-80": testShardInfo("ks_sharded", "", "80", true, t),
+ "80-90": testShardInfo("ks_sharded", "80", "90", true, t),
+ "90-a0": testShardInfo("ks_sharded", "90", "a0", true, t),
+ "a0-e8": testShardInfo("ks_sharded", "a0", "e8", true, t),
+ "e8-": testShardInfo("ks_sharded", "e8", "", true, t),
+ // Plus some un-even shards that are not serving and which should never be in the output of vtexplain
+ "80-a0": testShardInfo("ks_sharded", "80", "a0", false, t),
+ "a0-a5": testShardInfo("ks_sharded", "a0", "a5", false, t),
+ "a5-": testShardInfo("ks_sharded", "a5", "", false, t),
},
},
},
diff --git a/go/vt/vtexplain/vtexplain_vtgate.go b/go/vt/vtexplain/vtexplain_vtgate.go
index 7c1aa2dfdb6..a4287377c4b 100644
--- a/go/vt/vtexplain/vtexplain_vtgate.go
+++ b/go/vt/vtexplain/vtexplain_vtgate.go
@@ -75,7 +75,7 @@ func (vte *VTExplain) initVtgateExecutor(vSchemaStr, ksShardMapStr string, opts
vte.vtgateExecutor = vtgate.NewExecutor(context.Background(), vte.explainTopo, vtexplainCell, resolver, opts.Normalize, false, streamSize, cache.DefaultConfig, schemaTracker, false, opts.PlannerVersion)
queryLogBufferSize := 10
- vtgate.QueryLogger = streamlog.New("VTGate", queryLogBufferSize)
+ vtgate.SetQueryLogger(streamlog.New("VTGate", queryLogBufferSize))
return nil
}
@@ -131,6 +131,14 @@ func (vte *VTExplain) buildTopology(opts *Options, vschemaStr string, ksShardMap
vte.explainTopo.KeyspaceShards[ks] = make(map[string]*topodatapb.ShardReference)
for _, shard := range shards {
+ // If the topology is in the middle of a reshard, there can be two shards covering the same key range (e.g.
+ // both source shard 80- and target shard 80-c0 cover the keyrange 80-c0). For the purposes of explain, we
+ // should only consider the one that is serving, hence we skip the ones not serving. Otherwise, vtexplain
+ // gives inconsistent results - sometimes it will route the query being explained to the source shard, and
+ // sometimes to the destination shard. See https://github.com/vitessio/vitess/issues/11632 .
+ if shardInfo, ok := ksShardMap[ks][shard.Name]; ok && !shardInfo.IsPrimaryServing {
+ continue
+ }
hostname := fmt.Sprintf("%s/%s", ks, shard.Name)
log.Infof("registering test tablet %s for keyspace %s shard %s", hostname, ks, shard.Name)
diff --git a/go/vt/vtexplain/vtexplain_vttablet.go b/go/vt/vtexplain/vtexplain_vttablet.go
index ee94946e5c1..4f0a3f7d102 100644
--- a/go/vt/vtexplain/vtexplain_vttablet.go
+++ b/go/vt/vtexplain/vtexplain_vttablet.go
@@ -24,13 +24,12 @@ import (
"strings"
"sync"
- "vitess.io/vitess/go/sqlescape"
- "vitess.io/vitess/go/vt/vtgate/evalengine"
- "vitess.io/vitess/go/vt/vttablet/onlineddl"
+ "vitess.io/vitess/go/vt/sidecardb"
"vitess.io/vitess/go/mysql"
"vitess.io/vitess/go/mysql/collations"
"vitess.io/vitess/go/mysql/fakesqldb"
+ "vitess.io/vitess/go/sqlescape"
"vitess.io/vitess/go/sqltypes"
"vitess.io/vitess/go/vt/dbconfigs"
"vitess.io/vitess/go/vt/log"
@@ -38,6 +37,7 @@ import (
"vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/topo/memorytopo"
"vitess.io/vitess/go/vt/topo/topoproto"
+ "vitess.io/vitess/go/vt/vtgate/evalengine"
"vitess.io/vitess/go/vt/vttablet/queryservice"
"vitess.io/vitess/go/vt/vttablet/tabletserver"
@@ -105,6 +105,7 @@ var _ queryservice.QueryService = (*explainTablet)(nil)
func (vte *VTExplain) newTablet(opts *Options, t *topodatapb.Tablet) *explainTablet {
db := fakesqldb.New(nil)
+ sidecardb.AddSchemaInitQueries(db, true)
config := tabletenv.NewCurrentConfig()
config.TrackSchemaVersions = false
@@ -114,6 +115,7 @@ func (vte *VTExplain) newTablet(opts *Options, t *topodatapb.Tablet) *explainTab
config.TwoPCEnable = true
}
config.EnableOnlineDDL = false
+ config.EnableTableGC = false
// XXX much of this is cloned from the tabletserver tests
tsv := tabletserver.NewTabletServer(topoproto.TabletAliasString(t.Alias), config, memorytopo.NewServer(""), t.Alias)
@@ -142,7 +144,7 @@ func (vte *VTExplain) newTablet(opts *Options, t *topodatapb.Tablet) *explainTab
tsv.StartService(&target, dbcfgs, nil /* mysqld */)
// clear all the schema initialization queries out of the tablet
- // to avoid clutttering the output
+ // to avoid cluttering the output
tablet.mysqlQueries = nil
return &tablet
@@ -298,6 +300,15 @@ func newTabletEnvironment(ddls []sqlparser.DDLStatement, opts *Options) (*tablet
{sqltypes.NewVarBinary("STRICT_TRANS_TABLES")},
},
},
+ "select @@session.sql_mode as sql_mode": {
+ Fields: []*querypb.Field{{
+ Name: "sql_mode",
+ Type: sqltypes.VarChar,
+ }},
+ Rows: [][]sqltypes.Value{
+ {sqltypes.NewVarBinary("STRICT_TRANS_TABLES")},
+ },
+ },
"select @@autocommit": {
Fields: []*querypb.Field{{
Type: sqltypes.Uint64,
@@ -387,12 +398,6 @@ func newTabletEnvironment(ddls []sqlparser.DDLStatement, opts *Options) (*tablet
for query, result := range schemaQueries {
tEnv.addResult(query, result)
}
- for _, query := range onlineddl.ApplyDDL {
- tEnv.addResult(query, &sqltypes.Result{
- Fields: []*querypb.Field{{Type: sqltypes.Uint64}},
- Rows: [][]sqltypes.Value{},
- })
- }
showTableRows := make([][]sqltypes.Value, 0, 4)
for _, ddl := range ddls {
@@ -412,6 +417,10 @@ func newTabletEnvironment(ddls []sqlparser.DDLStatement, opts *Options) (*tablet
Fields: mysql.BaseShowTablesFields,
Rows: showTableRows,
})
+ tEnv.addResult(mysql.TablesWithSize80, &sqltypes.Result{
+ Fields: mysql.BaseShowTablesFields,
+ Rows: showTableRows,
+ })
indexRows := make([][]sqltypes.Value, 0, 4)
for _, ddl := range ddls {
@@ -498,205 +507,239 @@ func (t *explainTablet) HandleQuery(c *mysql.Conn, query string, callback func(*
// return the pre-computed results for any schema introspection queries
tEnv := t.vte.getGlobalTabletEnv()
result := tEnv.getResult(query)
-
+ emptyResult := &sqltypes.Result{}
+ if sidecardb.MatchesInitQuery(query) {
+ return callback(emptyResult)
+ }
if result != nil {
return callback(result)
}
switch sqlparser.Preview(query) {
case sqlparser.StmtSelect:
- // Parse the select statement to figure out the table and columns
- // that were referenced so that the synthetic response has the
- // expected field names and types.
- stmt, err := sqlparser.Parse(query)
+ var err error
+ result, err = t.handleSelect(query)
if err != nil {
return err
}
-
- var selStmt *sqlparser.Select
- switch stmt := stmt.(type) {
- case *sqlparser.Select:
- selStmt = stmt
- case *sqlparser.Union:
- selStmt = sqlparser.GetFirstSelect(stmt)
- default:
- return fmt.Errorf("vtexplain: unsupported statement type +%v", reflect.TypeOf(stmt))
+ case sqlparser.StmtBegin, sqlparser.StmtCommit, sqlparser.StmtSet,
+ sqlparser.StmtSavepoint, sqlparser.StmtSRollback, sqlparser.StmtRelease:
+ result = &sqltypes.Result{}
+ case sqlparser.StmtShow:
+ result = &sqltypes.Result{Fields: sqltypes.MakeTestFields("", "")}
+ case sqlparser.StmtInsert, sqlparser.StmtReplace, sqlparser.StmtUpdate, sqlparser.StmtDelete:
+ result = &sqltypes.Result{
+ RowsAffected: 1,
}
+ default:
+ return fmt.Errorf("unsupported query %s", query)
+ }
- // Gen4 supports more complex queries so we now need to
- // handle multiple FROM clauses
- tables := make([]*sqlparser.AliasedTableExpr, len(selStmt.From))
- for _, from := range selStmt.From {
- tables = append(tables, getTables(from)...)
- }
+ return callback(result)
+}
- tableColumnMap := map[sqlparser.IdentifierCS]map[string]querypb.Type{}
- for _, table := range tables {
- if table == nil {
- continue
- }
+func (t *explainTablet) handleSelect(query string) (*sqltypes.Result, error) {
+ // Parse the select statement to figure out the table and columns
+ // that were referenced so that the synthetic response has the
+ // expected field names and types.
+ stmt, err := sqlparser.Parse(query)
+ if err != nil {
+ return nil, err
+ }
- tableName := sqlparser.String(sqlparser.GetTableName(table.Expr))
- columns, exists := t.vte.getGlobalTabletEnv().tableColumns[tableName]
- if !exists && tableName != "" && tableName != "dual" {
- return fmt.Errorf("unable to resolve table name %s", tableName)
- }
+ var selStmt *sqlparser.Select
+ switch stmt := stmt.(type) {
+ case *sqlparser.Select:
+ selStmt = stmt
+ case *sqlparser.Union:
+ selStmt = sqlparser.GetFirstSelect(stmt)
+ default:
+ return nil, fmt.Errorf("vtexplain: unsupported statement type +%v", reflect.TypeOf(stmt))
+ }
- colTypeMap := map[string]querypb.Type{}
+ // Gen4 supports more complex queries so we now need to
+ // handle multiple FROM clauses
+ tables := make([]*sqlparser.AliasedTableExpr, len(selStmt.From))
+ for _, from := range selStmt.From {
+ tables = append(tables, getTables(from)...)
+ }
- if table.As.IsEmpty() {
- tableColumnMap[sqlparser.GetTableName(table.Expr)] = colTypeMap
- } else {
- tableColumnMap[table.As] = colTypeMap
- }
+ tableColumnMap := map[sqlparser.IdentifierCS]map[string]querypb.Type{}
+ for _, table := range tables {
+ if table == nil {
+ continue
+ }
- for k, v := range columns {
- if colType, exists := colTypeMap[k]; exists {
- if colType != v {
- return fmt.Errorf("column type mismatch for column : %s, types: %d vs %d", k, colType, v)
- }
- continue
- }
- colTypeMap[k] = v
- }
+ tableName := sqlparser.String(sqlparser.GetTableName(table.Expr))
+ columns, exists := t.vte.getGlobalTabletEnv().tableColumns[tableName]
+ if !exists && tableName != "" && tableName != "dual" {
+ return nil, fmt.Errorf("unable to resolve table name %s", tableName)
+ }
+
+ colTypeMap := map[string]querypb.Type{}
+ if table.As.IsEmpty() {
+ tableColumnMap[sqlparser.GetTableName(table.Expr)] = colTypeMap
+ } else {
+ tableColumnMap[table.As] = colTypeMap
}
- colNames := make([]string, 0, 4)
- colTypes := make([]querypb.Type, 0, 4)
- for _, node := range selStmt.SelectExprs {
- switch node := node.(type) {
- case *sqlparser.AliasedExpr:
- colNames, colTypes = inferColTypeFromExpr(node.Expr, tableColumnMap, colNames, colTypes)
- case *sqlparser.StarExpr:
- if node.TableName.Name.IsEmpty() {
- // SELECT *
- for _, colTypeMap := range tableColumnMap {
- for col, colType := range colTypeMap {
- colNames = append(colNames, col)
- colTypes = append(colTypes, colType)
- }
- }
- } else {
- // SELECT tableName.*
- colTypeMap := tableColumnMap[node.TableName.Name]
- for col, colType := range colTypeMap {
- colNames = append(colNames, col)
- colTypes = append(colTypes, colType)
- }
+ for k, v := range columns {
+ if colType, exists := colTypeMap[k]; exists {
+ if colType != v {
+ return nil, fmt.Errorf("column type mismatch for column : %s, types: %d vs %d", k, colType, v)
}
+ continue
}
+ colTypeMap[k] = v
}
- // the query against lookup table is in-query, handle it specifically
- var inColName string
- inVal := make([]sqltypes.Value, 0, 10)
-
- rowCount := 1
- if selStmt.Where != nil {
- switch v := selStmt.Where.Expr.(type) {
- case *sqlparser.ComparisonExpr:
- if v.Operator == sqlparser.InOp {
- switch c := v.Left.(type) {
- case *sqlparser.ColName:
- colName := strings.ToLower(c.Name.String())
- colType := tableColumnMap[sqlparser.GetTableName(selStmt.From[0].(*sqlparser.AliasedTableExpr).Expr)][colName]
-
- switch values := v.Right.(type) {
- case sqlparser.ValTuple:
- for _, val := range values {
- switch v := val.(type) {
- case *sqlparser.Literal:
- value, err := evalengine.LiteralToValue(v)
- if err != nil {
- return err
- }
-
- // Cast the value in the tuple to the expected value of the column
- castedValue, err := evalengine.Cast(value, colType)
- if err != nil {
- return err
- }
-
- // Check if we have a duplicate value
- isNewValue := true
- for _, v := range inVal {
- result, err := evalengine.NullsafeCompare(v, value, collations.Default())
- if err != nil {
- return err
- }
-
- if result == 0 {
- isNewValue = false
- break
- }
- }
-
- if isNewValue {
- inVal = append(inVal, castedValue)
- }
- }
- }
- rowCount = len(inVal)
- }
- inColName = strings.ToLower(c.Name.String())
- }
- }
- }
+ }
+
+ colNames, colTypes := t.analyzeExpressions(selStmt, tableColumnMap)
+
+ inColName, inVal, rowCount, s, err := t.analyzeWhere(selStmt, tableColumnMap)
+ if err != nil {
+ return s, err
+ }
+
+ fields := make([]*querypb.Field, len(colNames))
+ rows := make([][]sqltypes.Value, 0, rowCount)
+ for i, col := range colNames {
+ colType := colTypes[i]
+ fields[i] = &querypb.Field{
+ Name: col,
+ Type: colType,
}
+ }
- fields := make([]*querypb.Field, len(colNames))
- rows := make([][]sqltypes.Value, 0, rowCount)
+ for j := 0; j < rowCount; j++ {
+ values := make([]sqltypes.Value, len(colNames))
for i, col := range colNames {
+ // Generate a fake value for the given column. For the column in the IN clause,
+ // use the provided values in the query, For numeric types,
+ // use the column index. For all other types, just shortcut to using
+ // a string type that encodes the column name + index.
colType := colTypes[i]
- fields[i] = &querypb.Field{
- Name: col,
- Type: colType,
+ if len(inVal) > j && col == inColName {
+ values[i], _ = sqltypes.NewValue(querypb.Type_VARBINARY, inVal[j].Raw())
+ } else if sqltypes.IsIntegral(colType) {
+ values[i] = sqltypes.NewInt32(int32(i + 1))
+ } else if sqltypes.IsFloat(colType) {
+ values[i] = sqltypes.NewFloat64(1.0 + float64(i))
+ } else {
+ values[i] = sqltypes.NewVarChar(fmt.Sprintf("%s_val_%d", col, i+1))
}
}
+ rows = append(rows, values)
+ }
+ result := &sqltypes.Result{
+ Fields: fields,
+ InsertID: 0,
+ Rows: rows,
+ }
- for j := 0; j < rowCount; j++ {
- values := make([]sqltypes.Value, len(colNames))
- for i, col := range colNames {
- // Generate a fake value for the given column. For the column in the IN clause,
- // use the provided values in the query, For numeric types,
- // use the column index. For all other types, just shortcut to using
- // a string type that encodes the column name + index.
- colType := colTypes[i]
- if len(inVal) > j && col == inColName {
- values[i], _ = sqltypes.NewValue(querypb.Type_VARBINARY, inVal[j].Raw())
- } else if sqltypes.IsIntegral(colType) {
- values[i] = sqltypes.NewInt32(int32(i + 1))
- } else if sqltypes.IsFloat(colType) {
- values[i] = sqltypes.NewFloat64(1.0 + float64(i))
- } else {
- values[i] = sqltypes.NewVarChar(fmt.Sprintf("%s_val_%d", col, i+1))
- }
+ resultJSON, _ := json.MarshalIndent(result, "", " ")
+ log.V(100).Infof("query %s result %s\n", query, string(resultJSON))
+ return result, nil
+}
+
+func (t *explainTablet) analyzeWhere(selStmt *sqlparser.Select, tableColumnMap map[sqlparser.IdentifierCS]map[string]querypb.Type) (inColName string, inVal []sqltypes.Value, rowCount int, result *sqltypes.Result, err error) {
+ // the query against lookup table is in-query, handle it specifically
+ rowCount = 1
+ if selStmt.Where == nil {
+ return
+ }
+ v, ok := selStmt.Where.Expr.(*sqlparser.ComparisonExpr)
+ if !ok || v.Operator != sqlparser.InOp {
+ return
+ }
+ c, ok := v.Left.(*sqlparser.ColName)
+ if !ok {
+ return
+ }
+ colName := strings.ToLower(c.Name.String())
+ colType := querypb.Type_VARCHAR
+ tableExpr := selStmt.From[0]
+ expr, ok := tableExpr.(*sqlparser.AliasedTableExpr)
+ if ok {
+ m := tableColumnMap[sqlparser.GetTableName(expr.Expr)]
+ if m != nil {
+ t, found := m[colName]
+ if found {
+ colType = t
}
- rows = append(rows, values)
}
- result = &sqltypes.Result{
- Fields: fields,
- InsertID: 0,
- Rows: rows,
+ }
+
+ values, ok := v.Right.(sqlparser.ValTuple)
+ if !ok {
+ return
+ }
+ for _, val := range values {
+ lit, ok := val.(*sqlparser.Literal)
+ if !ok {
+ continue
+ }
+ value, err := evalengine.LiteralToValue(lit)
+ if err != nil {
+ return "", nil, 0, nil, err
}
- resultJSON, _ := json.MarshalIndent(result, "", " ")
- log.V(100).Infof("query %s result %s\n", query, string(resultJSON))
+ // Cast the value in the tuple to the expected value of the column
+ castedValue, err := evalengine.Cast(value, colType)
+ if err != nil {
+ return "", nil, 0, nil, err
+ }
- case sqlparser.StmtBegin, sqlparser.StmtCommit, sqlparser.StmtSet,
- sqlparser.StmtSavepoint, sqlparser.StmtSRollback, sqlparser.StmtRelease:
- result = &sqltypes.Result{}
- case sqlparser.StmtShow:
- result = &sqltypes.Result{Fields: sqltypes.MakeTestFields("", "")}
- case sqlparser.StmtInsert, sqlparser.StmtReplace, sqlparser.StmtUpdate, sqlparser.StmtDelete:
- result = &sqltypes.Result{
- RowsAffected: 1,
+ // Check if we have a duplicate value
+ isNewValue := true
+ for _, v := range inVal {
+ result, err := evalengine.NullsafeCompare(v, value, collations.Default())
+ if err != nil {
+ return "", nil, 0, nil, err
+ }
+
+ if result == 0 {
+ isNewValue = false
+ break
+ }
+ }
+
+ if isNewValue {
+ inVal = append(inVal, castedValue)
}
- default:
- return fmt.Errorf("unsupported query %s", query)
}
+ inColName = strings.ToLower(c.Name.String())
+ return inColName, inVal, rowCount, nil, nil
+}
- return callback(result)
+func (t *explainTablet) analyzeExpressions(selStmt *sqlparser.Select, tableColumnMap map[sqlparser.IdentifierCS]map[string]querypb.Type) ([]string, []querypb.Type) {
+ colNames := make([]string, 0, 4)
+ colTypes := make([]querypb.Type, 0, 4)
+ for _, node := range selStmt.SelectExprs {
+ switch node := node.(type) {
+ case *sqlparser.AliasedExpr:
+ colNames, colTypes = inferColTypeFromExpr(node.Expr, tableColumnMap, colNames, colTypes)
+ case *sqlparser.StarExpr:
+ if node.TableName.Name.IsEmpty() {
+ // SELECT *
+ for _, colTypeMap := range tableColumnMap {
+ for col, colType := range colTypeMap {
+ colNames = append(colNames, col)
+ colTypes = append(colTypes, colType)
+ }
+ }
+ } else {
+ // SELECT tableName.*
+ colTypeMap := tableColumnMap[node.TableName.Name]
+ for col, colType := range colTypeMap {
+ colNames = append(colNames, col)
+ colTypes = append(colTypes, colType)
+ }
+ }
+ }
+ }
+ return colNames, colTypes
}
func getTables(node sqlparser.SQLNode) []*sqlparser.AliasedTableExpr {
diff --git a/go/vt/vtgate/endtoend/deletetest/delete_test.go b/go/vt/vtgate/endtoend/deletetest/delete_test.go
index 82c9c359c5b..f6f5da27fdd 100644
--- a/go/vt/vtgate/endtoend/deletetest/delete_test.go
+++ b/go/vt/vtgate/endtoend/deletetest/delete_test.go
@@ -33,11 +33,10 @@ import (
)
var (
- cluster *vttest.LocalCluster
- vtParams mysql.ConnParams
- mysqlParams mysql.ConnParams
- grpcAddress string
- tabletHostName = flag.String("tablet_hostname", "", "the tablet hostname")
+ cluster *vttest.LocalCluster
+ vtParams mysql.ConnParams
+ mysqlParams mysql.ConnParams
+ grpcAddress string
schema = `
create table t1(
@@ -135,8 +134,6 @@ func TestMain(m *testing.M) {
}
defer os.RemoveAll(cfg.SchemaDir)
- cfg.TabletHostName = *tabletHostName
-
cluster = &vttest.LocalCluster{
Config: cfg,
}
diff --git a/go/vt/vtgate/endtoend/main_test.go b/go/vt/vtgate/endtoend/main_test.go
index 046af36a3dd..48872965cb9 100644
--- a/go/vt/vtgate/endtoend/main_test.go
+++ b/go/vt/vtgate/endtoend/main_test.go
@@ -18,7 +18,6 @@ package endtoend
import (
"context"
- "flag"
"fmt"
"os"
"testing"
@@ -33,11 +32,10 @@ import (
)
var (
- cluster *vttest.LocalCluster
- vtParams mysql.ConnParams
- mysqlParams mysql.ConnParams
- grpcAddress string
- tabletHostName = flag.String("tablet_hostname", "", "the tablet hostname")
+ cluster *vttest.LocalCluster
+ vtParams mysql.ConnParams
+ mysqlParams mysql.ConnParams
+ grpcAddress string
schema = `
create table t1(
@@ -46,6 +44,18 @@ create table t1(
primary key(id1)
) Engine=InnoDB;
+create table t1_copy_basic(
+ id1 bigint,
+ id2 bigint,
+ primary key(id1)
+) Engine=InnoDB;
+
+create table t1_copy_resume(
+ id1 bigint,
+ id2 bigint,
+ primary key(id1)
+) Engine=InnoDB;
+
create table t1_id2_idx(
id2 bigint,
keyspace_id varbinary(10),
@@ -134,6 +144,18 @@ create table t1_sharded(
Name: "t1_id2_vdx",
}},
},
+ "t1_copy_basic": {
+ ColumnVindexes: []*vschemapb.ColumnVindex{{
+ Column: "id1",
+ Name: "hash",
+ }},
+ },
+ "t1_copy_resume": {
+ ColumnVindexes: []*vschemapb.ColumnVindex{{
+ Column: "id1",
+ Name: "hash",
+ }},
+ },
"t1_sharded": {
ColumnVindexes: []*vschemapb.ColumnVindex{{
Column: "id1",
@@ -219,8 +241,6 @@ func TestMain(m *testing.M) {
}
defer os.RemoveAll(cfg.SchemaDir)
- cfg.TabletHostName = *tabletHostName
-
cluster = &vttest.LocalCluster{
Config: cfg,
}
diff --git a/go/vt/vtgate/endtoend/update/lookup_unique_test.go b/go/vt/vtgate/endtoend/update/lookup_unique_test.go
index 88ada23b689..1b37e50af04 100644
--- a/go/vt/vtgate/endtoend/update/lookup_unique_test.go
+++ b/go/vt/vtgate/endtoend/update/lookup_unique_test.go
@@ -18,20 +18,20 @@ package endtoend
import (
"context"
- "flag"
"fmt"
"os"
"testing"
"github.com/stretchr/testify/require"
+ "vitess.io/vitess/go/internal/flag"
+ "vitess.io/vitess/go/mysql"
"vitess.io/vitess/go/test/endtoend/utils"
"vitess.io/vitess/go/vt/log"
+ "vitess.io/vitess/go/vt/vttest"
- "vitess.io/vitess/go/mysql"
vschemapb "vitess.io/vitess/go/vt/proto/vschema"
vttestpb "vitess.io/vitess/go/vt/proto/vttest"
- "vitess.io/vitess/go/vt/vttest"
)
var (
@@ -131,7 +131,7 @@ create table t2_id_idx(
)
func TestMain(m *testing.M) {
- flag.Parse()
+ flag.ParseFlagsForTest()
exitCode := func() int {
var cfg vttest.Config
diff --git a/go/vt/vtgate/endtoend/vstream_test.go b/go/vt/vtgate/endtoend/vstream_test.go
index 477bb2518b5..4a5f7bc46ab 100644
--- a/go/vt/vtgate/endtoend/vstream_test.go
+++ b/go/vt/vtgate/endtoend/vstream_test.go
@@ -20,6 +20,8 @@ import (
"context"
"fmt"
"io"
+ "regexp"
+ "sort"
"sync"
"testing"
@@ -168,7 +170,7 @@ func TestVStreamCopyBasic(t *testing.T) {
gconn, conn, mconn, closeConnections := initialize(ctx, t)
defer closeConnections()
- _, err := conn.ExecuteFetch("insert into t1(id1,id2) values(1,1), (2,2), (3,3), (4,4), (5,5), (6,6), (7,7), (8,8)", 1, false)
+ _, err := conn.ExecuteFetch("insert into t1_copy_basic(id1,id2) values(1,1), (2,2), (3,3), (4,4), (5,5), (6,6), (7,7), (8,8)", 1, false)
if err != nil {
t.Fatal(err)
}
@@ -179,7 +181,7 @@ func TestVStreamCopyBasic(t *testing.T) {
}
qr := sqltypes.ResultToProto3(&lastPK)
tablePKs := []*binlogdatapb.TableLastPK{{
- TableName: "t1",
+ TableName: "t1_copy_basic",
Lastpk: qr,
}}
var shardGtids []*binlogdatapb.ShardGtid
@@ -199,8 +201,8 @@ func TestVStreamCopyBasic(t *testing.T) {
vgtid.ShardGtids = shardGtids
filter := &binlogdatapb.Filter{
Rules: []*binlogdatapb.Rule{{
- Match: "t1",
- Filter: "select * from t1",
+ Match: "t1_copy_basic",
+ Filter: "select * from t1_copy_basic",
}},
}
flags := &vtgatepb.VStreamFlags{}
@@ -209,19 +211,157 @@ func TestVStreamCopyBasic(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- numExpectedEvents := 2 /* num shards */ * (7 /* begin/field/vgtid:pos/2 rowevents avg/vgitd: lastpk/commit) */ + 3 /* begin/vgtid/commit for completed table */)
+ numExpectedEvents := 2 /* num shards */ *(7 /* begin/field/vgtid:pos/2 rowevents avg/vgitd: lastpk/commit) */ +3 /* begin/vgtid/commit for completed table */ +1 /* copy operation completed */) + 1 /* fully copy operation completed */
+ expectedCompletedEvents := []string{
+ `type:COPY_COMPLETED keyspace:"ks" shard:"-80"`,
+ `type:COPY_COMPLETED keyspace:"ks" shard:"80-"`,
+ `type:COPY_COMPLETED`,
+ }
require.NotNil(t, reader)
var evs []*binlogdatapb.VEvent
+ var completedEvs []*binlogdatapb.VEvent
for {
e, err := reader.Recv()
switch err {
case nil:
evs = append(evs, e...)
+
+ for _, ev := range e {
+ if ev.Type == binlogdatapb.VEventType_COPY_COMPLETED {
+ completedEvs = append(completedEvs, ev)
+ }
+ }
+
+ printEvents(evs) // for debugging ci failures
+
if len(evs) == numExpectedEvents {
+ // The arrival order of COPY_COMPLETED events with keyspace/shard is not constant.
+ // On the other hand, the last event should always be a fully COPY_COMPLETED event.
+ // That's why the sort.Slice doesn't have to handle the last element in completedEvs.
+ sort.Slice(completedEvs[:len(completedEvs)-1], func(i, j int) bool {
+ return completedEvs[i].GetShard() < completedEvs[j].GetShard()
+ })
+ for i, ev := range completedEvs {
+ require.Regexp(t, expectedCompletedEvents[i], ev.String())
+ }
t.Logf("TestVStreamCopyBasic was successful")
return
+ } else if numExpectedEvents < len(evs) {
+ t.Fatalf("len(events)=%v are not expected\n", len(evs))
+ }
+ case io.EOF:
+ log.Infof("stream ended\n")
+ cancel()
+ default:
+ log.Errorf("Returned err %v", err)
+ t.Fatalf("remote error: %v\n", err)
+ }
+ }
+}
+
+func TestVStreamCopyResume(t *testing.T) {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+ gconn, conn, mconn, closeConnections := initialize(ctx, t)
+ defer closeConnections()
+
+ _, err := conn.ExecuteFetch("insert into t1_copy_resume(id1,id2) values(1,1), (2,2), (3,3), (4,4), (5,5), (6,6), (7,7), (8,8)", 1, false)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Any subsequent GTIDs will be part of the stream
+ mpos, err := mconn.PrimaryPosition()
+ require.NoError(t, err)
+
+ // lastPK is id1=4, meaning we should only copy rows for id1 IN(5,6,7,8,9)
+ lastPK := sqltypes.Result{
+ Fields: []*query.Field{{Name: "id1", Type: query.Type_INT64}},
+ Rows: [][]sqltypes.Value{{sqltypes.NewInt64(4)}},
+ }
+ tableLastPK := []*binlogdatapb.TableLastPK{{
+ TableName: "t1_copy_resume",
+ Lastpk: sqltypes.ResultToProto3(&lastPK),
+ }}
+
+ catchupQueries := []string{
+ "insert into t1_copy_resume(id1,id2) values(9,9)", // this row will show up twice: once in catchup and copy
+ "update t1_copy_resume set id2 = 10 where id1 = 1",
+ "insert into t1(id1, id2) values(100,100)",
+ "delete from t1_copy_resume where id1 = 1",
+ "update t1_copy_resume set id2 = 90 where id1 = 9",
+ }
+ for _, query := range catchupQueries {
+ _, err = conn.ExecuteFetch(query, 1, false)
+ require.NoError(t, err)
+ }
+
+ var shardGtids []*binlogdatapb.ShardGtid
+ var vgtid = &binlogdatapb.VGtid{}
+ shardGtids = append(shardGtids, &binlogdatapb.ShardGtid{
+ Keyspace: "ks",
+ Shard: "-80",
+ Gtid: fmt.Sprintf("%s/%s", mpos.GTIDSet.Flavor(), mpos),
+ TablePKs: tableLastPK,
+ })
+ shardGtids = append(shardGtids, &binlogdatapb.ShardGtid{
+ Keyspace: "ks",
+ Shard: "80-",
+ Gtid: fmt.Sprintf("%s/%s", mpos.GTIDSet.Flavor(), mpos),
+ TablePKs: tableLastPK,
+ })
+ vgtid.ShardGtids = shardGtids
+ filter := &binlogdatapb.Filter{
+ Rules: []*binlogdatapb.Rule{{
+ Match: "t1_copy_resume",
+ Filter: "select * from t1_copy_resume",
+ }},
+ }
+ flags := &vtgatepb.VStreamFlags{}
+ reader, err := gconn.VStream(ctx, topodatapb.TabletType_PRIMARY, vgtid, filter, flags)
+ if err != nil {
+ t.Fatal(err)
+ }
+ require.NotNil(t, reader)
+
+ expectedRowCopyEvents := 5 // id1 and id2 IN(5,6,7,8,9)
+ expectedCatchupEvents := len(catchupQueries) - 1 // insert into t1 should never reach
+ rowCopyEvents, replCatchupEvents := 0, 0
+ expectedEvents := []string{
+ `type:ROW timestamp:[0-9]+ row_event:{table_name:"ks.t1_copy_resume" row_changes:{before:{lengths:1 lengths:1 values:"11"} after:{lengths:1 lengths:2 values:"110"}} keyspace:"ks" shard:"-80"} current_time:[0-9]+ keyspace:"ks" shard:"-80"`,
+ `type:ROW timestamp:[0-9]+ row_event:{table_name:"ks.t1_copy_resume" row_changes:{before:{lengths:1 lengths:2 values:"110"}} keyspace:"ks" shard:"-80"} current_time:[0-9]+ keyspace:"ks" shard:"-80"`,
+ `type:ROW row_event:{table_name:"ks.t1_copy_resume" row_changes:{after:{lengths:1 lengths:1 values:"55"}} keyspace:"ks" shard:"-80"} keyspace:"ks" shard:"-80"`,
+ `type:ROW row_event:{table_name:"ks.t1_copy_resume" row_changes:{after:{lengths:1 lengths:1 values:"66"}} keyspace:"ks" shard:"80-"} keyspace:"ks" shard:"80-"`,
+ `type:ROW row_event:{table_name:"ks.t1_copy_resume" row_changes:{after:{lengths:1 lengths:1 values:"77"}} keyspace:"ks" shard:"80-"} keyspace:"ks" shard:"80-"`,
+ `type:ROW row_event:{table_name:"ks.t1_copy_resume" row_changes:{after:{lengths:1 lengths:1 values:"88"}} keyspace:"ks" shard:"80-"} keyspace:"ks" shard:"80-"`,
+ `type:ROW timestamp:[0-9]+ row_event:{table_name:"ks.t1_copy_resume" row_changes:{after:{lengths:1 lengths:1 values:"99"}} keyspace:"ks" shard:"-80"} current_time:[0-9]+ keyspace:"ks" shard:"-80"`,
+ `type:ROW row_event:{table_name:"ks.t1_copy_resume" row_changes:{after:{lengths:1 lengths:2 values:"990"}} keyspace:"ks" shard:"-80"} keyspace:"ks" shard:"-80"`,
+ `type:ROW timestamp:[0-9]+ row_event:{table_name:"ks.t1_copy_resume" row_changes:{before:{lengths:1 lengths:1 values:"99"} after:{lengths:1 lengths:2 values:"990"}} keyspace:"ks" shard:"-80"} current_time:[0-9]+ keyspace:"ks" shard:"-80"`,
+ }
+ var evs []*binlogdatapb.VEvent
+ for {
+ e, err := reader.Recv()
+ switch err {
+ case nil:
+ for _, ev := range e {
+ if ev.Type == binlogdatapb.VEventType_ROW {
+ evs = append(evs, ev)
+ if ev.Timestamp == 0 {
+ rowCopyEvents++
+ } else {
+ replCatchupEvents++
+ }
+ printEvents(evs) // for debugging ci failures
+ }
+ }
+ if expectedCatchupEvents == replCatchupEvents && expectedRowCopyEvents == rowCopyEvents {
+ sort.Sort(VEventSorter(evs))
+ for i, ev := range evs {
+ require.Regexp(t, expectedEvents[i], ev.String())
+ }
+ t.Logf("TestVStreamCopyResume was successful")
+ return
}
- printEvents(evs) // for debugging ci failures
case io.EOF:
log.Infof("stream ended\n")
cancel()
@@ -381,6 +521,136 @@ func TestVStreamSharded(t *testing.T) {
}
+// TestVStreamCopyTransactions tests that we are properly wrapping
+// ROW events in the stream with BEGIN and COMMIT events.
+func TestVStreamCopyTransactions(t *testing.T) {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+ keyspace := "ks"
+ shards := []string{"-80", "80-"}
+ table := "t1_copy_basic"
+ beginEventSeen, commitEventSeen := false, false
+ numResultInTrx := 0
+ vgtid := &binlogdatapb.VGtid{
+ ShardGtids: []*binlogdatapb.ShardGtid{
+ {
+ Keyspace: keyspace,
+ Shard: shards[0],
+ Gtid: "", // Start a vstream copy
+ },
+ {
+ Keyspace: keyspace,
+ Shard: shards[1],
+ Gtid: "", // Start a vstream copy
+ },
+ },
+ }
+ filter := &binlogdatapb.Filter{
+ Rules: []*binlogdatapb.Rule{{
+ Match: table,
+ Filter: fmt.Sprintf("select * from %s", table),
+ }},
+ }
+
+ gconn, conn, _, closeConnections := initialize(ctx, t)
+ defer closeConnections()
+
+ // Clear any existing data.
+ q := fmt.Sprintf("delete from %s", table)
+ _, err := conn.ExecuteFetch(q, -1, false)
+ require.NoError(t, err, "error clearing data: %v", err)
+
+ // Generate some test data. Enough to cross the default
+ // vstream_packet_size threshold.
+ for i := 1; i <= 100000; i++ {
+ values := fmt.Sprintf("(%d, %d)", i, i)
+ q := fmt.Sprintf("insert into %s (id1, id2) values %s", table, values)
+ _, err := conn.ExecuteFetch(q, 1, false)
+ require.NoError(t, err, "error inserting data: %v", err)
+ }
+
+ // Start a vstream.
+ reader, err := gconn.VStream(ctx, topodatapb.TabletType_PRIMARY, vgtid, filter, nil)
+ require.NoError(t, err, "error starting vstream: %v", err)
+
+recvLoop:
+ for {
+ vevents, err := reader.Recv()
+ numResultInTrx++
+ eventCount := len(vevents)
+ t.Logf("------------------ Received %d events in response #%d for the transaction ------------------\n",
+ eventCount, numResultInTrx)
+ switch err {
+ case nil:
+ for _, event := range vevents {
+ switch event.Type {
+ case binlogdatapb.VEventType_BEGIN:
+ require.False(t, beginEventSeen, "received a second BEGIN event within the transaction: numResultInTrx=%d\n",
+ numResultInTrx)
+ beginEventSeen = true
+ t.Logf("Found BEGIN event, beginEventSeen=%t, commitEventSeen=%t, eventType=%v, numResultInTrx=%d\n",
+ beginEventSeen, commitEventSeen, event.Type, numResultInTrx)
+ require.False(t, commitEventSeen, "received a BEGIN event when expecting a COMMIT event: numResultInTrx=%d\n",
+ numResultInTrx)
+ case binlogdatapb.VEventType_VGTID:
+ t.Logf("Found VGTID event, beginEventSeen=%t, commitEventSeen=%t, eventType=%v, numResultInTrx=%d, event=%+v\n",
+ beginEventSeen, commitEventSeen, event.Type, numResultInTrx, event)
+ case binlogdatapb.VEventType_FIELD:
+ t.Logf("Found FIELD event, beginEventSeen=%t, commitEventSeen=%t, eventType=%v, numResultInTrx=%d, event=%+v\n",
+ beginEventSeen, commitEventSeen, event.Type, numResultInTrx, event)
+ case binlogdatapb.VEventType_ROW:
+ // Uncomment if you need to do more debugging.
+ // t.Logf("Found ROW event, beginEventSeen=%t, commitEventSeen=%t, eventType=%v, numResultInTrx=%d, event=%+v\n",
+ // beginEventSeen, commitEventSeen, event.Type, numResultInTrx, event)
+ case binlogdatapb.VEventType_COMMIT:
+ commitEventSeen = true
+ t.Logf("Found COMMIT event, beginEventSeen=%t, commitEventSeen=%t, eventType=%v, numResultInTrx=%d, event=%+v\n",
+ beginEventSeen, commitEventSeen, event.Type, numResultInTrx, event)
+ require.True(t, beginEventSeen, "received COMMIT event before receiving BEGIN event: numResultInTrx=%d\n",
+ numResultInTrx)
+ case binlogdatapb.VEventType_COPY_COMPLETED:
+ t.Logf("Finished vstream copy\n")
+ t.Logf("-------------------------------------------------------------------\n\n")
+ cancel()
+ break recvLoop
+ default:
+ t.Logf("Found extraneous event: %+v\n", event)
+ }
+ if beginEventSeen && commitEventSeen {
+ t.Logf("Received both BEGIN and COMMIT, so resetting transactional state\n")
+ beginEventSeen = false
+ commitEventSeen = false
+ numResultInTrx = 0
+ }
+ }
+ case io.EOF:
+ t.Logf("vstream ended\n")
+ t.Logf("-------------------------------------------------------------------\n\n")
+ cancel()
+ return
+ default:
+ require.FailNowf(t, "unexpected error", "encountered error in vstream: %v", err)
+ return
+ }
+ }
+ // The last response, when the vstream copy completes, does not
+ // typically contain ROW events.
+ if beginEventSeen || commitEventSeen {
+ require.True(t, (beginEventSeen && commitEventSeen), "did not receive both BEGIN and COMMIT events in the final ROW event set")
+ }
+}
+
+func removeAnyDeprecatedDisplayWidths(orig string) string {
+ var adjusted string
+ baseIntType := "int"
+ intRE := regexp.MustCompile(`(?i)int\(([0-9]*)?\)`)
+ adjusted = intRE.ReplaceAllString(orig, baseIntType)
+ baseYearType := "year"
+ yearRE := regexp.MustCompile(`(?i)year\(([0-9]*)?\)`)
+ adjusted = yearRE.ReplaceAllString(adjusted, baseYearType)
+ return adjusted
+}
+
var printMu sync.Mutex
func printEvents(evs []*binlogdatapb.VEvent) {
@@ -396,3 +666,31 @@ func printEvents(evs []*binlogdatapb.VEvent) {
s += "===END===" + "\n"
log.Infof("%s", s)
}
+
+// Sort the VEvents by the first row change's after value bytes primarily, with
+// secondary ordering by timestamp (ASC). Note that row copy events do not have
+// a timestamp and the value will be 0.
+type VEventSorter []*binlogdatapb.VEvent
+
+func (v VEventSorter) Len() int {
+ return len(v)
+}
+func (v VEventSorter) Swap(i, j int) {
+ v[i], v[j] = v[j], v[i]
+}
+func (v VEventSorter) Less(i, j int) bool {
+ valsI := v[i].GetRowEvent().RowChanges[0].After
+ if valsI == nil {
+ valsI = v[i].GetRowEvent().RowChanges[0].Before
+ }
+ valsJ := v[j].GetRowEvent().RowChanges[0].After
+ if valsJ == nil {
+ valsJ = v[j].GetRowEvent().RowChanges[0].Before
+ }
+ valI := string(valsI.Values)
+ valJ := string(valsJ.Values)
+ if valI == valJ {
+ return v[i].Timestamp < v[j].Timestamp
+ }
+ return valI < valJ
+}
diff --git a/go/vt/vtgate/engine/cached_size.go b/go/vt/vtgate/engine/cached_size.go
index 14962bc86d7..bf370f4720d 100644
--- a/go/vt/vtgate/engine/cached_size.go
+++ b/go/vt/vtgate/engine/cached_size.go
@@ -1159,38 +1159,38 @@ func (cached *UserDefinedVariable) CachedSize(alloc bool) int64 {
}
return size
}
-func (cached *VStream) CachedSize(alloc bool) int64 {
+func (cached *VExplain) CachedSize(alloc bool) int64 {
if cached == nil {
return int64(0)
}
size := int64(0)
if alloc {
- size += int64(64)
+ size += int64(24)
}
- // field Keyspace *vitess.io/vitess/go/vt/vtgate/vindexes.Keyspace
- size += cached.Keyspace.CachedSize(true)
- // field TargetDestination vitess.io/vitess/go/vt/key.Destination
- if cc, ok := cached.TargetDestination.(cachedObject); ok {
+ // field Input vitess.io/vitess/go/vt/vtgate/engine.Primitive
+ if cc, ok := cached.Input.(cachedObject); ok {
size += cc.CachedSize(true)
}
- // field TableName string
- size += hack.RuntimeAllocSize(int64(len(cached.TableName)))
- // field Position string
- size += hack.RuntimeAllocSize(int64(len(cached.Position)))
return size
}
-func (cached *VTExplain) CachedSize(alloc bool) int64 {
+func (cached *VStream) CachedSize(alloc bool) int64 {
if cached == nil {
return int64(0)
}
size := int64(0)
if alloc {
- size += int64(16)
+ size += int64(64)
}
- // field Input vitess.io/vitess/go/vt/vtgate/engine.Primitive
- if cc, ok := cached.Input.(cachedObject); ok {
+ // field Keyspace *vitess.io/vitess/go/vt/vtgate/vindexes.Keyspace
+ size += cached.Keyspace.CachedSize(true)
+ // field TargetDestination vitess.io/vitess/go/vt/key.Destination
+ if cc, ok := cached.TargetDestination.(cachedObject); ok {
size += cc.CachedSize(true)
}
+ // field TableName string
+ size += hack.RuntimeAllocSize(int64(len(cached.TableName)))
+ // field Position string
+ size += hack.RuntimeAllocSize(int64(len(cached.Position)))
return size
}
func (cached *VindexFunc) CachedSize(alloc bool) int64 {
@@ -1311,7 +1311,7 @@ func (cached *shardRoute) CachedSize(alloc bool) int64 {
}
size := int64(0)
if alloc {
- size += int64(32)
+ size += int64(48)
}
// field query string
size += hack.RuntimeAllocSize(int64(len(cached.query)))
@@ -1332,5 +1332,9 @@ func (cached *shardRoute) CachedSize(alloc bool) int64 {
size += v.CachedSize(true)
}
}
+ // field primitive vitess.io/vitess/go/vt/vtgate/engine.Primitive
+ if cc, ok := cached.primitive.(cachedObject); ok {
+ size += cc.CachedSize(true)
+ }
return size
}
diff --git a/go/vt/vtgate/engine/compare_utils.go b/go/vt/vtgate/engine/compare_utils.go
new file mode 100644
index 00000000000..c854d6723d3
--- /dev/null
+++ b/go/vt/vtgate/engine/compare_utils.go
@@ -0,0 +1,73 @@
+/*
+Copyright 2022 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package engine
+
+import (
+ "encoding/json"
+
+ "vitess.io/vitess/go/sqltypes"
+ "vitess.io/vitess/go/vt/log"
+ vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
+ "vitess.io/vitess/go/vt/vterrors"
+)
+
+func printMismatch(leftResult, rightResult *sqltypes.Result, leftPrimitive, rightPrimitive Primitive, leftName, rightName string) {
+ log.Errorf("Results of %s and %s are not equal. Displaying diff.", rightName, leftName)
+
+ // get right plan and print it
+ rightplan := &Plan{
+ Instructions: rightPrimitive,
+ }
+ rightJSON, _ := json.MarshalIndent(rightplan, "", " ")
+ log.Errorf("%s's plan:\n%s", rightName, string(rightJSON))
+
+ // get left's plan and print it
+ leftplan := &Plan{
+ Instructions: leftPrimitive,
+ }
+ leftJSON, _ := json.MarshalIndent(leftplan, "", " ")
+ log.Errorf("%s's plan:\n%s", leftName, string(leftJSON))
+
+ log.Errorf("%s's results:\n", rightName)
+ log.Errorf("\t[rows affected: %d]\n", rightResult.RowsAffected)
+ for _, row := range rightResult.Rows {
+ log.Errorf("\t%s", row)
+ }
+ log.Errorf("%s's results:\n", leftName)
+ log.Errorf("\t[rows affected: %d]\n", leftResult.RowsAffected)
+ for _, row := range leftResult.Rows {
+ log.Errorf("\t%s", row)
+ }
+ log.Error("End of diff.")
+}
+
+// CompareErrors compares the two errors, and if they don't match, produces an error
+func CompareErrors(leftErr, rightErr error, leftName, rightName string) error {
+ if leftErr != nil && rightErr != nil {
+ if leftErr.Error() == rightErr.Error() {
+ return rightErr
+ }
+ return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "%s and %s failed with different errors: %s: [%s], %s: [%s]", leftName, rightName, leftErr.Error(), rightErr.Error(), leftName, rightName)
+ }
+ if leftErr == nil && rightErr != nil {
+ return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "%s failed while %s did not: %s", rightName, rightErr.Error(), leftName)
+ }
+ if leftErr != nil && rightErr == nil {
+ return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "%s failed while %s did not: %s", leftName, leftErr.Error(), rightName)
+ }
+ return nil
+}
diff --git a/go/vt/vtgate/engine/concatenate.go b/go/vt/vtgate/engine/concatenate.go
index e11adce3858..7858ccfc938 100644
--- a/go/vt/vtgate/engine/concatenate.go
+++ b/go/vt/vtgate/engine/concatenate.go
@@ -143,12 +143,23 @@ func (c *Concatenate) getFields(res []*sqltypes.Result) ([]*querypb.Field, error
}
func (c *Concatenate) execSources(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool) ([]*sqltypes.Result, error) {
- var cancel context.CancelFunc
- ctx, cancel = context.WithCancel(ctx)
- defer cancel()
+ if vcursor.Session().InTransaction() {
+ // as we are in a transaction, we need to execute all queries inside a single transaction
+ // therefore it needs a sequential execution.
+ return c.sequentialExec(ctx, vcursor, bindVars, wantfields)
+ }
+ // not in transaction, so execute in parallel.
+ return c.parallelExec(ctx, vcursor, bindVars, wantfields)
+}
+
+func (c *Concatenate) parallelExec(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool) ([]*sqltypes.Result, error) {
results := make([]*sqltypes.Result, len(c.Sources))
- var wg sync.WaitGroup
var outerErr error
+
+ ctx, cancel := context.WithCancel(ctx)
+ defer cancel()
+
+ var wg sync.WaitGroup
for i, source := range c.Sources {
currIndex, currSource := i, source
vars := copyBindVars(bindVars)
@@ -164,14 +175,35 @@ func (c *Concatenate) execSources(ctx context.Context, vcursor VCursor, bindVars
}()
}
wg.Wait()
- if outerErr != nil {
- return nil, outerErr
+ return results, outerErr
+}
+
+func (c *Concatenate) sequentialExec(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool) ([]*sqltypes.Result, error) {
+ results := make([]*sqltypes.Result, len(c.Sources))
+ for i, source := range c.Sources {
+ currIndex, currSource := i, source
+ vars := copyBindVars(bindVars)
+ result, err := vcursor.ExecutePrimitive(ctx, currSource, vars, wantfields)
+ if err != nil {
+ return nil, err
+ }
+ results[currIndex] = result
}
return results, nil
}
// TryStreamExecute performs a streaming exec.
func (c *Concatenate) TryStreamExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool, callback func(*sqltypes.Result) error) error {
+ if vcursor.Session().InTransaction() {
+ // as we are in a transaction, we need to execute all queries inside a single transaction
+ // therefore it needs a sequential execution.
+ return c.sequentialStreamExec(ctx, vcursor, bindVars, wantfields, callback)
+ }
+ // not in transaction, so execute in parallel.
+ return c.parallelStreamExec(ctx, vcursor, bindVars, wantfields, callback)
+}
+
+func (c *Concatenate) parallelStreamExec(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool, callback func(*sqltypes.Result) error) error {
var seenFields []*querypb.Field
var outerErr error
@@ -237,6 +269,44 @@ func (c *Concatenate) TryStreamExecute(ctx context.Context, vcursor VCursor, bin
return outerErr
}
+func (c *Concatenate) sequentialStreamExec(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool, callback func(*sqltypes.Result) error) error {
+ // all the below fields ensure that the fields are sent only once.
+ var seenFields []*querypb.Field
+ var fieldsMu sync.Mutex
+ var fieldsSent bool
+
+ for idx, source := range c.Sources {
+ err := vcursor.StreamExecutePrimitive(ctx, source, bindVars, wantfields, func(resultChunk *sqltypes.Result) error {
+ // if we have fields to compare, make sure all the fields are all the same
+ if idx == 0 {
+ fieldsMu.Lock()
+ defer fieldsMu.Unlock()
+ if !fieldsSent {
+ fieldsSent = true
+ seenFields = resultChunk.Fields
+ return callback(resultChunk)
+ }
+ }
+ if resultChunk.Fields != nil {
+ err := c.compareFields(seenFields, resultChunk.Fields)
+ if err != nil {
+ return err
+ }
+ }
+ // check if context has expired.
+ if ctx.Err() != nil {
+ return ctx.Err()
+ }
+ return callback(resultChunk)
+
+ })
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
// GetFields fetches the field info.
func (c *Concatenate) GetFields(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable) (*sqltypes.Result, error) {
// TODO: type coercions
diff --git a/go/vt/vtgate/engine/dbddl.go b/go/vt/vtgate/engine/dbddl.go
index 5cd5ee6bb1b..be0c5b049b7 100644
--- a/go/vt/vtgate/engine/dbddl.go
+++ b/go/vt/vtgate/engine/dbddl.go
@@ -102,11 +102,8 @@ func (c *DBDDL) TryExecute(ctx context.Context, vcursor VCursor, bindVars map[st
log.Errorf("'%s' database ddl plugin is not registered. Falling back to default plugin", name)
plugin = databaseCreatorPlugins[defaultDBDDLPlugin]
}
- if c.queryTimeout != 0 {
- var cancel context.CancelFunc
- ctx, cancel = context.WithTimeout(ctx, time.Duration(c.queryTimeout)*time.Millisecond)
- defer cancel()
- }
+ ctx, cancelFunc := addQueryTimeout(ctx, vcursor, c.queryTimeout)
+ defer cancelFunc()
if c.create {
return c.createDatabase(ctx, vcursor, plugin)
@@ -142,7 +139,7 @@ func (c *DBDDL) createDatabase(ctx context.Context, vcursor VCursor, plugin DBDD
}
for {
- _, errors := vcursor.ExecuteMultiShard(ctx, destinations, queries, false, true)
+ _, errors := vcursor.ExecuteMultiShard(ctx, c, destinations, queries, false, true)
noErr := true
for _, err := range errors {
diff --git a/go/vt/vtgate/engine/dbddl_plugin.go b/go/vt/vtgate/engine/dbddl_plugin.go
index 1b132f330a2..03a509ac223 100644
--- a/go/vt/vtgate/engine/dbddl_plugin.go
+++ b/go/vt/vtgate/engine/dbddl_plugin.go
@@ -19,7 +19,6 @@ package engine
import (
"context"
- vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
"vitess.io/vitess/go/vt/vterrors"
)
@@ -27,12 +26,12 @@ type failDBDDL struct{}
// CreateDatabase implements the DropCreateDB interface
func (failDBDDL) CreateDatabase(context.Context, string) error {
- return vterrors.New(vtrpcpb.Code_UNIMPLEMENTED, "create database is not supported")
+ return vterrors.VT12001("create database by failDBDDL")
}
// DropDatabase implements the DropCreateDB interface
func (failDBDDL) DropDatabase(context.Context, string) error {
- return vterrors.New(vtrpcpb.Code_UNIMPLEMENTED, "drop database is not supported")
+ return vterrors.VT12001("drop database by failDBDDL")
}
type noOp struct{}
diff --git a/go/vt/vtgate/engine/delete.go b/go/vt/vtgate/engine/delete.go
index 53a94554e1b..1db717450f8 100644
--- a/go/vt/vtgate/engine/delete.go
+++ b/go/vt/vtgate/engine/delete.go
@@ -19,7 +19,6 @@ package engine
import (
"context"
"fmt"
- "time"
"vitess.io/vitess/go/vt/vtgate/evalengine"
@@ -44,11 +43,8 @@ type Delete struct {
// TryExecute performs a non-streaming exec.
func (del *Delete) TryExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, _ bool) (*sqltypes.Result, error) {
- if del.QueryTimeout != 0 {
- var cancel context.CancelFunc
- ctx, cancel = context.WithTimeout(ctx, time.Duration(del.QueryTimeout)*time.Millisecond)
- defer cancel()
- }
+ ctx, cancelFunc := addQueryTimeout(ctx, vcursor, del.QueryTimeout)
+ defer cancelFunc()
rss, _, err := del.findRoute(ctx, vcursor, bindVars)
if err != nil {
@@ -61,9 +57,9 @@ func (del *Delete) TryExecute(ctx context.Context, vcursor VCursor, bindVars map
switch del.Opcode {
case Unsharded:
- return del.execUnsharded(ctx, vcursor, bindVars, rss)
+ return del.execUnsharded(ctx, del, vcursor, bindVars, rss)
case Equal, IN, Scatter, ByDestination, SubShard, EqualUnique, MultiEqual:
- return del.execMultiDestination(ctx, vcursor, bindVars, rss, del.deleteVindexEntries)
+ return del.execMultiDestination(ctx, del, vcursor, bindVars, rss, del.deleteVindexEntries)
default:
// Unreachable.
return nil, fmt.Errorf("unsupported opcode: %v", del.Opcode)
@@ -95,7 +91,7 @@ func (del *Delete) deleteVindexEntries(ctx context.Context, vcursor VCursor, bin
for i := range rss {
queries[i] = &querypb.BoundQuery{Sql: del.OwnedVindexQuery, BindVariables: bindVars}
}
- subQueryResults, errors := vcursor.ExecuteMultiShard(ctx, rss, queries, false /* rollbackOnError */, false /* canAutocommit */)
+ subQueryResults, errors := vcursor.ExecuteMultiShard(ctx, del, rss, queries, false /* rollbackOnError */, false /* canAutocommit */)
for _, err := range errors {
if err != nil {
return err
diff --git a/go/vt/vtgate/engine/distinct.go b/go/vt/vtgate/engine/distinct.go
index 37fee276d8d..5baa7ca9c1f 100644
--- a/go/vt/vtgate/engine/distinct.go
+++ b/go/vt/vtgate/engine/distinct.go
@@ -23,7 +23,6 @@ import (
"vitess.io/vitess/go/mysql/collations"
"vitess.io/vitess/go/sqltypes"
querypb "vitess.io/vitess/go/vt/proto/query"
- vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
"vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/vt/vtgate/evalengine"
)
@@ -116,7 +115,7 @@ func (pt *probeTable) hashCodeForRow(inputRow sqltypes.Row) (evalengine.HashCode
code := evalengine.HashCode(17)
for i, checkCol := range pt.checkCols {
if i >= len(inputRow) {
- return 0, vterrors.New(vtrpcpb.Code_INTERNAL, "distinct check colls is larger than its input row")
+ return 0, vterrors.VT13001("index out of range in row when creating the DISTINCT hash code")
}
col := inputRow[checkCol.Col]
hashcode, err := evalengine.NullsafeHashcode(col, checkCol.Collation, col.Type())
diff --git a/go/vt/vtgate/engine/dml.go b/go/vt/vtgate/engine/dml.go
index 990b5a0a4e3..5201fe9f81e 100644
--- a/go/vt/vtgate/engine/dml.go
+++ b/go/vt/vtgate/engine/dml.go
@@ -69,11 +69,11 @@ func NewDML() *DML {
return &DML{RoutingParameters: &RoutingParameters{}}
}
-func (dml *DML) execUnsharded(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, rss []*srvtopo.ResolvedShard) (*sqltypes.Result, error) {
- return execShard(ctx, vcursor, dml.Query, bindVars, rss[0], true /* rollbackOnError */, true /* canAutocommit */)
+func (dml *DML) execUnsharded(ctx context.Context, primitive Primitive, vcursor VCursor, bindVars map[string]*querypb.BindVariable, rss []*srvtopo.ResolvedShard) (*sqltypes.Result, error) {
+ return execShard(ctx, primitive, vcursor, dml.Query, bindVars, rss[0], true /* rollbackOnError */, true /* canAutocommit */)
}
-func (dml *DML) execMultiDestination(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, rss []*srvtopo.ResolvedShard, dmlSpecialFunc func(context.Context, VCursor, map[string]*querypb.BindVariable, []*srvtopo.ResolvedShard) error) (*sqltypes.Result, error) {
+func (dml *DML) execMultiDestination(ctx context.Context, primitive Primitive, vcursor VCursor, bindVars map[string]*querypb.BindVariable, rss []*srvtopo.ResolvedShard, dmlSpecialFunc func(context.Context, VCursor, map[string]*querypb.BindVariable, []*srvtopo.ResolvedShard) error) (*sqltypes.Result, error) {
if len(rss) == 0 {
return &sqltypes.Result{}, nil
}
@@ -88,7 +88,7 @@ func (dml *DML) execMultiDestination(ctx context.Context, vcursor VCursor, bindV
BindVariables: bindVars,
}
}
- return execMultiShard(ctx, vcursor, rss, queries, dml.MultiShardAutocommit)
+ return execMultiShard(ctx, primitive, vcursor, rss, queries, dml.MultiShardAutocommit)
}
// RouteType returns a description of the query routing type used by the primitive
@@ -137,9 +137,9 @@ func allowOnlyPrimary(rss ...*srvtopo.ResolvedShard) error {
return nil
}
-func execMultiShard(ctx context.Context, vcursor VCursor, rss []*srvtopo.ResolvedShard, queries []*querypb.BoundQuery, multiShardAutoCommit bool) (*sqltypes.Result, error) {
+func execMultiShard(ctx context.Context, primitive Primitive, vcursor VCursor, rss []*srvtopo.ResolvedShard, queries []*querypb.BoundQuery, multiShardAutoCommit bool) (*sqltypes.Result, error) {
autocommit := (len(rss) == 1 || multiShardAutoCommit) && vcursor.AutocommitApproval()
- result, errs := vcursor.ExecuteMultiShard(ctx, rss, queries, true /* rollbackOnError */, autocommit)
+ result, errs := vcursor.ExecuteMultiShard(ctx, primitive, rss, queries, true /* rollbackOnError */, autocommit)
return result, vterrors.Aggregate(errs)
}
diff --git a/go/vt/vtgate/engine/fake_vcursor_test.go b/go/vt/vtgate/engine/fake_vcursor_test.go
index e5f4c4dd74a..464502f5099 100644
--- a/go/vt/vtgate/engine/fake_vcursor_test.go
+++ b/go/vt/vtgate/engine/fake_vcursor_test.go
@@ -50,6 +50,10 @@ var _ SessionActions = (*noopVCursor)(nil)
type noopVCursor struct {
}
+func (t *noopVCursor) InTransaction() bool {
+ return false
+}
+
func (t *noopVCursor) SetCommitOrder(co vtgatepb.CommitOrder) {
//TODO implement me
panic("implement me")
@@ -101,6 +105,10 @@ func (t *noopVCursor) ExecutePrimitive(ctx context.Context, primitive Primitive,
return primitive.TryExecute(ctx, t, bindVars, wantfields)
}
+func (t *noopVCursor) ExecutePrimitiveStandalone(ctx context.Context, primitive Primitive, bindVars map[string]*querypb.BindVariable, wantfields bool) (*sqltypes.Result, error) {
+ return primitive.TryExecute(ctx, t, bindVars, wantfields)
+}
+
func (t *noopVCursor) StreamExecutePrimitive(ctx context.Context, primitive Primitive, bindVars map[string]*querypb.BindVariable, wantfields bool, callback func(*sqltypes.Result) error) error {
return primitive.TryStreamExecute(ctx, t, bindVars, wantfields, callback)
}
@@ -224,6 +232,13 @@ func (t *noopVCursor) SetClientFoundRows(context.Context, bool) error {
panic("implement me")
}
+func (t *noopVCursor) SetQueryTimeout(maxExecutionTime int64) {
+}
+
+func (t *noopVCursor) GetQueryTimeout(queryTimeoutFromComments int) int {
+ return queryTimeoutFromComments
+}
+
func (t *noopVCursor) SetSkipQueryPlanCache(context.Context, bool) error {
panic("implement me")
}
@@ -244,6 +259,10 @@ func (t *noopVCursor) SetPlannerVersion(querypb.ExecuteOptions_PlannerVersion) {
panic("implement me")
}
+func (t *noopVCursor) SetConsolidator(querypb.ExecuteOptions_Consolidator) {
+ panic("implement me")
+}
+
func (t *noopVCursor) SetTarget(string) error {
panic("implement me")
}
@@ -267,7 +286,7 @@ func (t *noopVCursor) Execute(ctx context.Context, method string, query string,
panic("unimplemented")
}
-func (t *noopVCursor) ExecuteMultiShard(ctx context.Context, rss []*srvtopo.ResolvedShard, queries []*querypb.BoundQuery, rollbackOnError, canAutocommit bool) (*sqltypes.Result, []error) {
+func (t *noopVCursor) ExecuteMultiShard(ctx context.Context, primitive Primitive, rss []*srvtopo.ResolvedShard, queries []*querypb.BoundQuery, rollbackOnError, canAutocommit bool) (*sqltypes.Result, []error) {
panic("unimplemented")
}
@@ -275,11 +294,11 @@ func (t *noopVCursor) AutocommitApproval() bool {
panic("unimplemented")
}
-func (t *noopVCursor) ExecuteStandalone(ctx context.Context, query string, bindvars map[string]*querypb.BindVariable, rs *srvtopo.ResolvedShard) (*sqltypes.Result, error) {
+func (t *noopVCursor) ExecuteStandalone(ctx context.Context, primitive Primitive, query string, bindvars map[string]*querypb.BindVariable, rs *srvtopo.ResolvedShard) (*sqltypes.Result, error) {
panic("unimplemented")
}
-func (t *noopVCursor) StreamExecuteMulti(ctx context.Context, query string, rss []*srvtopo.ResolvedShard, bindVars []map[string]*querypb.BindVariable, rollbackOnError bool, autocommit bool, callback func(reply *sqltypes.Result) error) []error {
+func (t *noopVCursor) StreamExecuteMulti(ctx context.Context, primitive Primitive, query string, rss []*srvtopo.ResolvedShard, bindVars []map[string]*querypb.BindVariable, rollbackOnError bool, autocommit bool, callback func(reply *sqltypes.Result) error) []error {
panic("unimplemented")
}
@@ -336,6 +355,8 @@ type loggingVCursor struct {
// map different shards to keyspaces in the test.
ksShardMap map[string][]string
+
+ shardSession []*srvtopo.ResolvedShard
}
type tableRoutes struct {
@@ -346,6 +367,10 @@ func (f *loggingVCursor) ExecutePrimitive(ctx context.Context, primitive Primiti
return primitive.TryExecute(ctx, f, bindVars, wantfields)
}
+func (f *loggingVCursor) ExecutePrimitiveStandalone(ctx context.Context, primitive Primitive, bindVars map[string]*querypb.BindVariable, wantfields bool) (*sqltypes.Result, error) {
+ return primitive.TryExecute(ctx, f, bindVars, wantfields)
+}
+
func (f *loggingVCursor) StreamExecutePrimitive(ctx context.Context, primitive Primitive, bindVars map[string]*querypb.BindVariable, wantfields bool, callback func(*sqltypes.Result) error) error {
return primitive.TryStreamExecute(ctx, f, bindVars, wantfields, callback)
}
@@ -397,7 +422,7 @@ func (f *loggingVCursor) InReservedConn() bool {
}
func (f *loggingVCursor) ShardSession() []*srvtopo.ResolvedShard {
- return nil
+ return f.shardSession
}
func (f *loggingVCursor) ExecuteVSchema(context.Context, string, *sqlparser.AlterVschema) error {
@@ -437,7 +462,7 @@ func (f *loggingVCursor) Execute(ctx context.Context, method string, query strin
return f.nextResult()
}
-func (f *loggingVCursor) ExecuteMultiShard(ctx context.Context, rss []*srvtopo.ResolvedShard, queries []*querypb.BoundQuery, rollbackOnError, canAutocommit bool) (*sqltypes.Result, []error) {
+func (f *loggingVCursor) ExecuteMultiShard(ctx context.Context, primitive Primitive, rss []*srvtopo.ResolvedShard, queries []*querypb.BoundQuery, rollbackOnError, canAutocommit bool) (*sqltypes.Result, []error) {
f.log = append(f.log, fmt.Sprintf("ExecuteMultiShard %v%v %v", printResolvedShardQueries(rss, queries), rollbackOnError, canAutocommit))
res, err := f.nextResult()
if err != nil {
@@ -451,12 +476,12 @@ func (f *loggingVCursor) AutocommitApproval() bool {
return true
}
-func (f *loggingVCursor) ExecuteStandalone(ctx context.Context, query string, bindvars map[string]*querypb.BindVariable, rs *srvtopo.ResolvedShard) (*sqltypes.Result, error) {
+func (f *loggingVCursor) ExecuteStandalone(ctx context.Context, primitive Primitive, query string, bindvars map[string]*querypb.BindVariable, rs *srvtopo.ResolvedShard) (*sqltypes.Result, error) {
f.log = append(f.log, fmt.Sprintf("ExecuteStandalone %s %v %s %s", query, printBindVars(bindvars), rs.Target.Keyspace, rs.Target.Shard))
return f.nextResult()
}
-func (f *loggingVCursor) StreamExecuteMulti(ctx context.Context, query string, rss []*srvtopo.ResolvedShard, bindVars []map[string]*querypb.BindVariable, rollbackOnError bool, autocommit bool, callback func(reply *sqltypes.Result) error) []error {
+func (f *loggingVCursor) StreamExecuteMulti(ctx context.Context, primitive Primitive, query string, rss []*srvtopo.ResolvedShard, bindVars []map[string]*querypb.BindVariable, rollbackOnError bool, autocommit bool, callback func(reply *sqltypes.Result) error) []error {
f.mu.Lock()
f.log = append(f.log, fmt.Sprintf("StreamExecuteMulti %s %s", query, printResolvedShardsBindVars(rss, bindVars)))
r, err := f.nextResult()
@@ -691,9 +716,9 @@ func (f *loggingVCursor) CanUseSetVar() bool {
return useSetVar
}
-func (t *noopVCursor) VtExplainLogging() {}
-func (t *noopVCursor) DisableLogging() {}
-func (t *noopVCursor) GetVTExplainLogs() []ExecuteEntry {
+func (t *noopVCursor) VExplainLogging() {}
+func (t *noopVCursor) DisableLogging() {}
+func (t *noopVCursor) GetVExplainLogs() []ExecuteEntry {
return nil
}
func (t *noopVCursor) GetLogs() ([]ExecuteEntry, error) {
diff --git a/go/vt/vtgate/engine/filter.go b/go/vt/vtgate/engine/filter.go
index f36467a7526..fb696a9d679 100644
--- a/go/vt/vtgate/engine/filter.go
+++ b/go/vt/vtgate/engine/filter.go
@@ -68,11 +68,8 @@ func (f *Filter) TryExecute(ctx context.Context, vcursor VCursor, bindVars map[s
if err != nil {
return nil, err
}
- intEvalResult, err := evalResult.Value().ToInt64()
- if err != nil {
- return nil, err
- }
- if intEvalResult == 1 {
+
+ if evalResult.ToBoolean() {
rows = append(rows, row)
}
}
diff --git a/go/vt/vtgate/engine/gen4_compare_v3.go b/go/vt/vtgate/engine/gen4_compare_v3.go
index c69fa9670af..a913c442a2c 100644
--- a/go/vt/vtgate/engine/gen4_compare_v3.go
+++ b/go/vt/vtgate/engine/gen4_compare_v3.go
@@ -18,11 +18,9 @@ package engine
import (
"context"
- "encoding/json"
"sync"
"vitess.io/vitess/go/sqltypes"
- "vitess.io/vitess/go/vt/log"
querypb "vitess.io/vitess/go/vt/proto/query"
vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
"vitess.io/vitess/go/vt/vterrors"
@@ -78,7 +76,7 @@ func (gc *Gen4CompareV3) TryExecute(ctx context.Context, vcursor VCursor, bindVa
v3Result, v3Err = gc.V3.TryExecute(ctx, vcursor, bindVars, wantfields)
}
- if err := CompareV3AndGen4Errors(v3Err, gen4Err); err != nil {
+ if err := CompareErrors(v3Err, gen4Err, "v3", "Gen4"); err != nil {
return nil, err
}
@@ -111,7 +109,7 @@ func (gc *Gen4CompareV3) TryStreamExecute(ctx context.Context, vcursor VCursor,
})
}
- if err := CompareV3AndGen4Errors(v3Err, gen4Err); err != nil {
+ if err := CompareErrors(v3Err, gen4Err, "v3", "Gen4"); err != nil {
return err
}
@@ -129,42 +127,12 @@ func (gc *Gen4CompareV3) compareResults(v3Result *sqltypes.Result, gen4Result *s
match = sqltypes.ResultsEqualUnordered([]sqltypes.Result{*v3Result}, []sqltypes.Result{*gen4Result})
}
if !match {
- gc.printMismatch(v3Result, gen4Result)
+ printMismatch(v3Result, gen4Result, gc.V3, gc.Gen4, "V3", "Gen4")
return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "results did not match, see VTGate's logs for more information")
}
return nil
}
-func (gc *Gen4CompareV3) printMismatch(v3Result *sqltypes.Result, gen4Result *sqltypes.Result) {
- log.Warning("Results of Gen4 and V3 are not equal. Displaying diff.")
-
- // get Gen4 plan and print it
- gen4plan := &Plan{
- Instructions: gc.Gen4,
- }
- gen4JSON, _ := json.MarshalIndent(gen4plan, "", " ")
- log.Warning("Gen4's plan:\n", string(gen4JSON))
-
- // get V3's plan and print it
- v3plan := &Plan{
- Instructions: gc.V3,
- }
- v3JSON, _ := json.MarshalIndent(v3plan, "", " ")
- log.Warning("V3's plan:\n", string(v3JSON))
-
- log.Warning("Gen4's results:\n")
- log.Warningf("\t[rows affected: %d]\n", gen4Result.RowsAffected)
- for _, row := range gen4Result.Rows {
- log.Warningf("\t%s", row)
- }
- log.Warning("V3's results:\n")
- log.Warningf("\t[rows affected: %d]\n", v3Result.RowsAffected)
- for _, row := range v3Result.Rows {
- log.Warningf("\t%s", row)
- }
- log.Warning("End of diff.")
-}
-
// Inputs implements the Primitive interface
func (gc *Gen4CompareV3) Inputs() []Primitive {
return []Primitive{gc.Gen4, gc.V3}
@@ -174,20 +142,3 @@ func (gc *Gen4CompareV3) Inputs() []Primitive {
func (gc *Gen4CompareV3) description() PrimitiveDescription {
return PrimitiveDescription{OperatorType: "Gen4CompareV3"}
}
-
-// CompareV3AndGen4Errors compares the two errors, and if they don't match, produces an error
-func CompareV3AndGen4Errors(v3Err, gen4Err error) error {
- if v3Err != nil && gen4Err != nil {
- if v3Err.Error() == gen4Err.Error() {
- return gen4Err
- }
- return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "v3 and Gen4 failed with different errors: v3: [%s], Gen4: [%s]", v3Err.Error(), gen4Err.Error())
- }
- if v3Err == nil && gen4Err != nil {
- return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "Gen4 failed while v3 did not: %s", gen4Err.Error())
- }
- if v3Err != nil && gen4Err == nil {
- return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "v3 failed while Gen4 did not: %s", v3Err.Error())
- }
- return nil
-}
diff --git a/go/vt/vtgate/engine/insert.go b/go/vt/vtgate/engine/insert.go
index 897d0ba2b3d..744ad756c49 100644
--- a/go/vt/vtgate/engine/insert.go
+++ b/go/vt/vtgate/engine/insert.go
@@ -229,11 +229,8 @@ func (ins *Insert) GetTableName() string {
// TryExecute performs a non-streaming exec.
func (ins *Insert) TryExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool) (*sqltypes.Result, error) {
- if ins.QueryTimeout != 0 {
- var cancel context.CancelFunc
- ctx, cancel = context.WithTimeout(ctx, time.Duration(ins.QueryTimeout)*time.Millisecond)
- defer cancel()
- }
+ ctx, cancelFunc := addQueryTimeout(ctx, vcursor, ins.QueryTimeout)
+ defer cancelFunc()
switch ins.Opcode {
case InsertUnsharded:
@@ -382,7 +379,7 @@ func (ins *Insert) executeInsertQueries(
if err != nil {
return nil, err
}
- result, errs := vcursor.ExecuteMultiShard(ctx, rss, queries, true /* rollbackOnError */, autocommit)
+ result, errs := vcursor.ExecuteMultiShard(ctx, ins, rss, queries, true /* rollbackOnError */, autocommit)
if errs != nil {
return nil, vterrors.Aggregate(errs)
}
@@ -561,7 +558,7 @@ func (ins *Insert) processGenerateFromValues(
return 0, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "auto sequence generation can happen through single shard only, it is getting routed to %d shards", len(rss))
}
bindVars := map[string]*querypb.BindVariable{"n": sqltypes.Int64BindVariable(count)}
- qr, err := vcursor.ExecuteStandalone(ctx, ins.Generate.Query, bindVars, rss[0])
+ qr, err := vcursor.ExecuteStandalone(ctx, ins, ins.Generate.Query, bindVars, rss[0])
if err != nil {
return 0, err
}
@@ -623,7 +620,7 @@ func (ins *Insert) processGenerateFromRows(
return 0, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "auto sequence generation can happen through single shard only, it is getting routed to %d shards", len(rss))
}
bindVars := map[string]*querypb.BindVariable{"n": sqltypes.Int64BindVariable(count)}
- qr, err := vcursor.ExecuteStandalone(ctx, ins.Generate.Query, bindVars, rss[0])
+ qr, err := vcursor.ExecuteStandalone(ctx, ins, ins.Generate.Query, bindVars, rss[0])
if err != nil {
return 0, err
}
@@ -1029,7 +1026,7 @@ func (ins *Insert) executeUnshardedTableQuery(ctx context.Context, vcursor VCurs
if err != nil {
return 0, nil, err
}
- qr, err := execShard(ctx, vcursor, query, bindVars, rss[0], true, true /* canAutocommit */)
+ qr, err := execShard(ctx, ins, vcursor, query, bindVars, rss[0], true, true /* canAutocommit */)
if err != nil {
return 0, nil, err
}
diff --git a/go/vt/vtgate/engine/limit_test.go b/go/vt/vtgate/engine/limit_test.go
index 0aacb93fe40..dcdc43880a0 100644
--- a/go/vt/vtgate/engine/limit_test.go
+++ b/go/vt/vtgate/engine/limit_test.go
@@ -19,7 +19,6 @@ package engine
import (
"context"
"errors"
- "reflect"
"testing"
"github.com/stretchr/testify/assert"
@@ -62,7 +61,7 @@ func TestLimitExecute(t *testing.T) {
"a|1",
"b|2",
)
- if !reflect.DeepEqual(result, wantResult) {
+ if !result.Equal(wantResult) {
t.Errorf("l.Execute:\n%v, want\n%v", result, wantResult)
}
@@ -89,7 +88,7 @@ func TestLimitExecute(t *testing.T) {
result, err = l.TryExecute(context.Background(), &noopVCursor{}, bindVars, false)
require.NoError(t, err)
- if !reflect.DeepEqual(result, inputResult) {
+ if !result.Equal(inputResult) {
t.Errorf("l.Execute:\n%v, want\n%v", result, wantResult)
}
@@ -110,7 +109,7 @@ func TestLimitExecute(t *testing.T) {
result, err = l.TryExecute(context.Background(), &noopVCursor{}, bindVars, false)
require.NoError(t, err)
- if !reflect.DeepEqual(result, wantResult) {
+ if !result.Equal(wantResult) {
t.Errorf("l.Execute:\n%v, want\n%v", result, wantResult)
}
@@ -136,7 +135,7 @@ func TestLimitExecute(t *testing.T) {
result, err = l.TryExecute(context.Background(), &noopVCursor{}, map[string]*querypb.BindVariable{"l": sqltypes.Int64BindVariable(2)}, false)
require.NoError(t, err)
- if !reflect.DeepEqual(result, wantResult) {
+ if !result.Equal(wantResult) {
t.Errorf("l.Execute:\n%v, want\n%v", result, wantResult)
}
}
@@ -174,7 +173,7 @@ func TestLimitOffsetExecute(t *testing.T) {
"a|1",
"b|2",
)
- if !reflect.DeepEqual(result, wantResult) {
+ if !result.Equal(wantResult) {
t.Errorf("l.Execute:\n%v, want\n%v", result, wantResult)
}
@@ -205,7 +204,7 @@ func TestLimitOffsetExecute(t *testing.T) {
)
result, err = l.TryExecute(context.Background(), &noopVCursor{}, bindVars, false)
require.NoError(t, err)
- if !reflect.DeepEqual(result, wantResult) {
+ if !result.Equal(wantResult) {
t.Errorf("l.Execute:\n got %v, want\n%v", result, wantResult)
}
@@ -235,7 +234,7 @@ func TestLimitOffsetExecute(t *testing.T) {
)
result, err = l.TryExecute(context.Background(), &noopVCursor{}, bindVars, false)
require.NoError(t, err)
- if !reflect.DeepEqual(result, wantResult) {
+ if !result.Equal(wantResult) {
t.Errorf("l.Execute:\n got %v, want\n%v", result, wantResult)
}
@@ -266,7 +265,7 @@ func TestLimitOffsetExecute(t *testing.T) {
)
result, err = l.TryExecute(context.Background(), &noopVCursor{}, bindVars, false)
require.NoError(t, err)
- if !reflect.DeepEqual(result, wantResult) {
+ if !result.Equal(wantResult) {
t.Errorf("l.Execute:\n got %v, want\n%v", result, wantResult)
}
@@ -295,7 +294,7 @@ func TestLimitOffsetExecute(t *testing.T) {
)
result, err = l.TryExecute(context.Background(), &noopVCursor{}, bindVars, false)
require.NoError(t, err)
- if !reflect.DeepEqual(result, wantResult) {
+ if !result.Equal(wantResult) {
t.Errorf("l.Execute:\n got %v, want\n%v", result, wantResult)
}
@@ -323,7 +322,7 @@ func TestLimitOffsetExecute(t *testing.T) {
)
result, err = l.TryExecute(context.Background(), &noopVCursor{}, bindVars, false)
require.NoError(t, err)
- if !reflect.DeepEqual(result, wantResult) {
+ if !result.Equal(wantResult) {
t.Errorf("l.Execute:\n got %v, want\n%v", result, wantResult)
}
@@ -349,7 +348,7 @@ func TestLimitOffsetExecute(t *testing.T) {
}
result, err = l.TryExecute(context.Background(), &noopVCursor{}, map[string]*querypb.BindVariable{"l": sqltypes.Int64BindVariable(1), "o": sqltypes.Int64BindVariable(1)}, false)
require.NoError(t, err)
- if !reflect.DeepEqual(result, wantResult) {
+ if !result.Equal(wantResult) {
t.Errorf("l.Execute:\n got %v, want\n%v", result, wantResult)
}
}
@@ -387,8 +386,11 @@ func TestLimitStreamExecute(t *testing.T) {
"a|1",
"b|2",
)
- if !reflect.DeepEqual(results, wantResults) {
- t.Errorf("l.StreamExecute:\n%s, want\n%s", sqltypes.PrintResults(results), sqltypes.PrintResults(wantResults))
+ require.Len(t, results, len(wantResults))
+ for i, result := range results {
+ if !result.Equal(wantResults[i]) {
+ t.Errorf("l.StreamExecute:\n%s, want\n%s", sqltypes.PrintResults(results), sqltypes.PrintResults(wantResults))
+ }
}
// Test with bind vars.
@@ -400,8 +402,11 @@ func TestLimitStreamExecute(t *testing.T) {
return nil
})
require.NoError(t, err)
- if !reflect.DeepEqual(results, wantResults) {
- t.Errorf("l.StreamExecute:\n%s, want\n%s", sqltypes.PrintResults(results), sqltypes.PrintResults(wantResults))
+ require.Len(t, results, len(wantResults))
+ for i, result := range results {
+ if !result.Equal(wantResults[i]) {
+ t.Errorf("l.StreamExecute:\n%s, want\n%s", sqltypes.PrintResults(results), sqltypes.PrintResults(wantResults))
+ }
}
// Test with limit equal to input
@@ -420,8 +425,11 @@ func TestLimitStreamExecute(t *testing.T) {
"---",
"c|3",
)
- if !reflect.DeepEqual(results, wantResults) {
- t.Errorf("l.StreamExecute:\n%s, want\n%s", sqltypes.PrintResults(results), sqltypes.PrintResults(wantResults))
+ require.Len(t, results, len(wantResults))
+ for i, result := range results {
+ if !result.Equal(wantResults[i]) {
+ t.Errorf("l.StreamExecute:\n%s, want\n%s", sqltypes.PrintResults(results), sqltypes.PrintResults(wantResults))
+ }
}
// Test with limit higher than input.
@@ -434,8 +442,11 @@ func TestLimitStreamExecute(t *testing.T) {
})
require.NoError(t, err)
// wantResults is same as before.
- if !reflect.DeepEqual(results, wantResults) {
- t.Errorf("l.StreamExecute:\n%s, want\n%s", sqltypes.PrintResults(results), sqltypes.PrintResults(wantResults))
+ require.Len(t, results, len(wantResults))
+ for i, result := range results {
+ if !result.Equal(wantResults[i]) {
+ t.Errorf("l.StreamExecute:\n%s, want\n%s", sqltypes.PrintResults(results), sqltypes.PrintResults(wantResults))
+ }
}
}
@@ -477,8 +488,11 @@ func TestOffsetStreamExecute(t *testing.T) {
"---",
"e|5",
)
- if !reflect.DeepEqual(results, wantResults) {
- t.Errorf("l.StreamExecute:\n%s, want\n%s", sqltypes.PrintResults(results), sqltypes.PrintResults(wantResults))
+ require.Len(t, results, len(wantResults))
+ for i, result := range results {
+ if !result.Equal(wantResults[i]) {
+ t.Errorf("l.StreamExecute:\n%s, want\n%s", sqltypes.PrintResults(results), sqltypes.PrintResults(wantResults))
+ }
}
}
@@ -495,7 +509,7 @@ func TestLimitGetFields(t *testing.T) {
got, err := l.GetFields(context.Background(), nil, nil)
require.NoError(t, err)
- if !reflect.DeepEqual(got, result) {
+ if !got.Equal(result) {
t.Errorf("l.GetFields:\n%v, want\n%v", got, result)
}
}
diff --git a/go/vt/vtgate/engine/lock.go b/go/vt/vtgate/engine/lock.go
index 2a93495a64c..bf3eac73194 100644
--- a/go/vt/vtgate/engine/lock.go
+++ b/go/vt/vtgate/engine/lock.go
@@ -174,7 +174,7 @@ func (l *Lock) GetFields(ctx context.Context, vcursor VCursor, bindVars map[stri
Sql: l.FieldQuery,
BindVariables: bindVars,
}}
- qr, errs := vcursor.ExecuteMultiShard(ctx, rss, boundQuery, false, true)
+ qr, errs := vcursor.ExecuteMultiShard(ctx, l, rss, boundQuery, false, true)
if len(errs) > 0 {
return nil, vterrors.Aggregate(errs)
}
diff --git a/go/vt/vtgate/engine/mstream.go b/go/vt/vtgate/engine/mstream.go
index 005d4b6e1c5..033196ef576 100644
--- a/go/vt/vtgate/engine/mstream.go
+++ b/go/vt/vtgate/engine/mstream.go
@@ -22,7 +22,6 @@ import (
"vitess.io/vitess/go/sqltypes"
"vitess.io/vitess/go/vt/key"
querypb "vitess.io/vitess/go/vt/proto/query"
- vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
"vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/vt/vtgate/vindexes"
)
@@ -62,7 +61,7 @@ func (m *MStream) GetTableName() string {
// TryExecute implements the Primitive interface
func (m *MStream) TryExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool) (*sqltypes.Result, error) {
- return nil, vterrors.New(vtrpcpb.Code_INTERNAL, "[BUG] 'Execute' called for Stream")
+ return nil, vterrors.VT13001("TryExecute is not supported for MStream")
}
// TryStreamExecute implements the Primitive interface
@@ -76,7 +75,7 @@ func (m *MStream) TryStreamExecute(ctx context.Context, vcursor VCursor, bindVar
// GetFields implements the Primitive interface
func (m *MStream) GetFields(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable) (*sqltypes.Result, error) {
- return nil, vterrors.New(vtrpcpb.Code_INTERNAL, "[BUG] 'GetFields' called for Stream")
+ return nil, vterrors.VT13001("GetFields is not supported for MStream")
}
func (m *MStream) description() PrimitiveDescription {
diff --git a/go/vt/vtgate/engine/plan.go b/go/vt/vtgate/engine/plan.go
index d6e4ed1118e..769c69aaa06 100644
--- a/go/vt/vtgate/engine/plan.go
+++ b/go/vt/vtgate/engine/plan.go
@@ -17,6 +17,7 @@ limitations under the License.
package engine
import (
+ "bytes"
"encoding/json"
"sync/atomic"
"time"
@@ -98,5 +99,14 @@ func (p *Plan) MarshalJSON() ([]byte, error) {
Errors: atomic.LoadUint64(&p.Errors),
TablesUsed: p.TablesUsed,
}
- return json.Marshal(marshalPlan)
+
+ b := new(bytes.Buffer)
+ enc := json.NewEncoder(b)
+ enc.SetEscapeHTML(false)
+ err := enc.Encode(marshalPlan)
+ if err != nil {
+ return nil, err
+ }
+
+ return b.Bytes(), nil
}
diff --git a/go/vt/vtgate/engine/plan_description.go b/go/vt/vtgate/engine/plan_description.go
index 4433dcd69dc..0e7929bbe0c 100644
--- a/go/vt/vtgate/engine/plan_description.go
+++ b/go/vt/vtgate/engine/plan_description.go
@@ -161,12 +161,11 @@ func addMap(input map[string]any, buf *bytes.Buffer) error {
func marshalAdd(prepend string, buf *bytes.Buffer, name string, obj any) error {
buf.WriteString(prepend + `"` + name + `":`)
- b, err := json.Marshal(obj)
- if err != nil {
- return err
- }
- buf.Write(b)
- return nil
+
+ enc := json.NewEncoder(buf)
+ enc.SetEscapeHTML(false)
+
+ return enc.Encode(obj)
}
// PrimitiveToPlanDescription transforms a primitive tree into a corresponding PlanDescription tree
diff --git a/go/vt/vtgate/engine/plan_description_test.go b/go/vt/vtgate/engine/plan_description_test.go
index 6170970419a..0d985b9b606 100644
--- a/go/vt/vtgate/engine/plan_description_test.go
+++ b/go/vt/vtgate/engine/plan_description_test.go
@@ -39,7 +39,7 @@ func TestCreateRoutePlanDescription(t *testing.T) {
TargetDestination: key.DestinationAllShards{},
Other: map[string]any{
"Query": route.Query,
- "Table": route.TableName,
+ "Table": route.GetTableName(),
"FieldQuery": route.FieldQuery,
"Vindex": route.Vindex.String(),
},
@@ -97,7 +97,7 @@ func getDescriptionFor(route *Route) PrimitiveDescription {
TargetDestination: key.DestinationAllShards{},
Other: map[string]any{
"Query": route.Query,
- "Table": route.TableName,
+ "Table": route.GetTableName(),
"FieldQuery": route.FieldQuery,
"Vindex": route.Vindex.String(),
},
diff --git a/go/vt/vtgate/engine/primitive.go b/go/vt/vtgate/engine/primitive.go
index 796c1dcb551..36d0719796b 100644
--- a/go/vt/vtgate/engine/primitive.go
+++ b/go/vt/vtgate/engine/primitive.go
@@ -57,14 +57,20 @@ type (
Execute(ctx context.Context, method string, query string, bindVars map[string]*querypb.BindVariable, rollbackOnError bool, co vtgatepb.CommitOrder) (*sqltypes.Result, error)
AutocommitApproval() bool
- // Primitive functions
+ // Execute the given primitive
ExecutePrimitive(ctx context.Context, primitive Primitive, bindVars map[string]*querypb.BindVariable, wantfields bool) (*sqltypes.Result, error)
+ // Execute the given primitive in a new autocommit session
+ ExecutePrimitiveStandalone(ctx context.Context, primitive Primitive, bindVars map[string]*querypb.BindVariable, wantfields bool) (*sqltypes.Result, error)
+
+ // Execute the given primitive
StreamExecutePrimitive(ctx context.Context, primitive Primitive, bindVars map[string]*querypb.BindVariable, wantfields bool, callback func(*sqltypes.Result) error) error
+ // Execute the given primitive in a new autocommit session
+ StreamExecutePrimitiveStandalone(ctx context.Context, primitive Primitive, bindVars map[string]*querypb.BindVariable, wantfields bool, callback func(result *sqltypes.Result) error) error
// Shard-level functions.
- ExecuteMultiShard(ctx context.Context, rss []*srvtopo.ResolvedShard, queries []*querypb.BoundQuery, rollbackOnError, canAutocommit bool) (*sqltypes.Result, []error)
- ExecuteStandalone(ctx context.Context, query string, bindVars map[string]*querypb.BindVariable, rs *srvtopo.ResolvedShard) (*sqltypes.Result, error)
- StreamExecuteMulti(ctx context.Context, query string, rss []*srvtopo.ResolvedShard, bindVars []map[string]*querypb.BindVariable, rollbackOnError bool, autocommit bool, callback func(reply *sqltypes.Result) error) []error
+ ExecuteMultiShard(ctx context.Context, primitive Primitive, rss []*srvtopo.ResolvedShard, queries []*querypb.BoundQuery, rollbackOnError, canAutocommit bool) (*sqltypes.Result, []error)
+ ExecuteStandalone(ctx context.Context, primitive Primitive, query string, bindVars map[string]*querypb.BindVariable, rs *srvtopo.ResolvedShard) (*sqltypes.Result, error)
+ StreamExecuteMulti(ctx context.Context, primitive Primitive, query string, rss []*srvtopo.ResolvedShard, bindVars []map[string]*querypb.BindVariable, rollbackOnError bool, autocommit bool, callback func(reply *sqltypes.Result) error) []error
// Keyspace ID level functions.
ExecuteKeyspaceID(ctx context.Context, keyspace string, ksid []byte, query string, bindVars map[string]*querypb.BindVariable, rollbackOnError, autocommit bool) (*sqltypes.Result, error)
@@ -109,9 +115,6 @@ type (
// ReleaseLock releases all the held advisory locks.
ReleaseLock(ctx context.Context) error
-
- // StreamExecutePrimitiveStandalone executes the primitive in its own new autocommit session.
- StreamExecutePrimitiveStandalone(ctx context.Context, primitive Primitive, bindVars map[string]*querypb.BindVariable, wantfields bool, callback func(result *sqltypes.Result) error) error
}
// SessionActions gives primitives ability to interact with the session state
@@ -141,6 +144,7 @@ type (
SetTransactionMode(vtgatepb.TransactionMode)
SetWorkload(querypb.ExecuteOptions_Workload)
SetPlannerVersion(querypb.ExecuteOptions_PlannerVersion)
+ SetConsolidator(querypb.ExecuteOptions_Consolidator)
SetFoundRows(uint64)
SetDDLStrategy(string)
@@ -170,16 +174,26 @@ type (
// RemoveAdvisoryLock removes advisory lock from the session
RemoveAdvisoryLock(name string)
- // VtExplainLogging enables logging of all interactions to the tablets so
- // EXPLAIN `format=vtexplain` can report what's being done
- VtExplainLogging()
+ // VExplainLogging enables logging of all interactions to the tablets so
+ // VEXPLAIN QUERIES/ALL can report what's being done
+ VExplainLogging()
- // GetVTExplainLogs retrieves the vttablet interaction logs
- GetVTExplainLogs() []ExecuteEntry
+ // GetVExplainLogs retrieves the vttablet interaction logs
+ GetVExplainLogs() []ExecuteEntry
// SetCommitOrder sets the commit order for the shard session in respect of the type of vindex lookup.
// This is used to select the right shard session to perform the vindex lookup query.
SetCommitOrder(co vtgatepb.CommitOrder)
+
+ // GetQueryTimeout gets the query timeout and takes in the query timeout from comments
+ GetQueryTimeout(queryTimeoutFromComment int) int
+
+ // SetQueryTimeout sets the query timeout
+ SetQueryTimeout(queryTimeout int64)
+
+ // InTransaction returns true if the session has already opened transaction or
+ // will start a transaction on the query execution.
+ InTransaction() bool
}
// Match is used to check if a Primitive matches
diff --git a/go/vt/vtgate/engine/rename_fields.go b/go/vt/vtgate/engine/rename_fields.go
index a6bb82c9e35..3eb1917abdd 100644
--- a/go/vt/vtgate/engine/rename_fields.go
+++ b/go/vt/vtgate/engine/rename_fields.go
@@ -21,7 +21,6 @@ import (
"vitess.io/vitess/go/sqltypes"
querypb "vitess.io/vitess/go/vt/proto/query"
- "vitess.io/vitess/go/vt/proto/vtrpc"
"vitess.io/vitess/go/vt/vterrors"
)
@@ -38,7 +37,7 @@ type RenameFields struct {
// NewRenameField creates a new rename field
func NewRenameField(cols []string, indices []int, input Primitive) (*RenameFields, error) {
if len(cols) != len(indices) {
- return nil, vterrors.New(vtrpc.Code_INTERNAL, "Unequal length of columns and indices in RenameField primitive")
+ return nil, vterrors.VT13001("number of columns does not match number of indices in RenameField primitive")
}
return &RenameFields{
Cols: cols,
diff --git a/go/vt/vtgate/engine/route.go b/go/vt/vtgate/engine/route.go
index 2c4fe3442ca..c2462af1756 100644
--- a/go/vt/vtgate/engine/route.go
+++ b/go/vt/vtgate/engine/route.go
@@ -51,7 +51,7 @@ type Route struct {
// Query specifies the query to be executed.
Query string
- // TableName specifies the table to send the query to.
+ // TableName specifies the tables to send the query to.
TableName string
// FieldQuery specifies the query to be executed for a GetFieldInfo request.
@@ -173,11 +173,8 @@ func (route *Route) SetTruncateColumnCount(count int) {
// TryExecute performs a non-streaming exec.
func (route *Route) TryExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool) (*sqltypes.Result, error) {
- if route.QueryTimeout != 0 {
- var cancel context.CancelFunc
- ctx, cancel = context.WithTimeout(ctx, time.Duration(route.QueryTimeout)*time.Millisecond)
- defer cancel()
- }
+ ctx, cancelFunc := addQueryTimeout(ctx, vcursor, route.QueryTimeout)
+ defer cancelFunc()
qr, err := route.executeInternal(ctx, vcursor, bindVars, wantfields)
if err != nil {
return nil, err
@@ -185,6 +182,15 @@ func (route *Route) TryExecute(ctx context.Context, vcursor VCursor, bindVars ma
return qr.Truncate(route.TruncateColumnCount), nil
}
+// addQueryTimeout adds a query timeout to the context it receives and returns the modified context along with the cancel function.
+func addQueryTimeout(ctx context.Context, vcursor VCursor, queryTimeout int) (context.Context, context.CancelFunc) {
+ timeout := vcursor.Session().GetQueryTimeout(queryTimeout)
+ if timeout != 0 {
+ return context.WithTimeout(ctx, time.Duration(timeout)*time.Millisecond)
+ }
+ return ctx, func() {}
+}
+
type cxtKey int
const (
@@ -242,7 +248,7 @@ func (route *Route) executeShards(
}
queries := getQueries(route.Query, bvs)
- result, errs := vcursor.ExecuteMultiShard(ctx, rss, queries, false /* rollbackOnError */, false /* canAutocommit */)
+ result, errs := vcursor.ExecuteMultiShard(ctx, route, rss, queries, false /* rollbackOnError */, false /* canAutocommit */)
if errs != nil {
errs = filterOutNilErrors(errs)
@@ -333,7 +339,7 @@ func (route *Route) streamExecuteShards(
}
if len(route.OrderBy) == 0 {
- errs := vcursor.StreamExecuteMulti(ctx, route.Query, rss, bvs, false /* rollbackOnError */, false /* autocommit */, func(qr *sqltypes.Result) error {
+ errs := vcursor.StreamExecuteMulti(ctx, route, route.Query, rss, bvs, false /* rollbackOnError */, false /* autocommit */, func(qr *sqltypes.Result) error {
return callback(qr.Truncate(route.TruncateColumnCount))
})
if len(errs) > 0 {
@@ -365,9 +371,10 @@ func (route *Route) mergeSort(
prims := make([]StreamExecutor, 0, len(rss))
for i, rs := range rss {
prims = append(prims, &shardRoute{
- query: route.Query,
- rs: rs,
- bv: bvs[i],
+ query: route.Query,
+ rs: rs,
+ bv: bvs[i],
+ primitive: route,
})
}
ms := MergeSort{
@@ -390,7 +397,7 @@ func (route *Route) GetFields(ctx context.Context, vcursor VCursor, bindVars map
// This code is unreachable. It's just a sanity check.
return nil, fmt.Errorf("no shards for keyspace: %s", route.Keyspace.Name)
}
- qr, err := execShard(ctx, vcursor, route.FieldQuery, bindVars, rss[0], false /* rollbackOnError */, false /* canAutocommit */)
+ qr, err := execShard(ctx, route, vcursor, route.FieldQuery, bindVars, rss[0], false /* rollbackOnError */, false /* canAutocommit */)
if err != nil {
return nil, err
}
@@ -402,12 +409,7 @@ func (route *Route) sort(in *sqltypes.Result) (*sqltypes.Result, error) {
// Since Result is immutable, we make a copy.
// The copy can be shallow because we won't be changing
// the contents of any row.
- out := &sqltypes.Result{
- Fields: in.Fields,
- Rows: in.Rows,
- RowsAffected: in.RowsAffected,
- InsertID: in.InsertID,
- }
+ out := in.ShallowCopy()
comparers := extractSlices(route.OrderBy)
@@ -440,7 +442,7 @@ func (route *Route) sort(in *sqltypes.Result) (*sqltypes.Result, error) {
func (route *Route) description() PrimitiveDescription {
other := map[string]any{
"Query": route.Query,
- "Table": route.TableName,
+ "Table": route.GetTableName(),
"FieldQuery": route.FieldQuery,
}
if route.Vindex != nil {
@@ -543,6 +545,7 @@ func (route *Route) streamExecuteAfterLookup(
func execShard(
ctx context.Context,
+ primitive Primitive,
vcursor VCursor,
query string,
bindVars map[string]*querypb.BindVariable,
@@ -550,7 +553,7 @@ func execShard(
rollbackOnError, canAutocommit bool,
) (*sqltypes.Result, error) {
autocommit := canAutocommit && vcursor.AutocommitApproval()
- result, errs := vcursor.ExecuteMultiShard(ctx, []*srvtopo.ResolvedShard{rs}, []*querypb.BoundQuery{
+ result, errs := vcursor.ExecuteMultiShard(ctx, primitive, []*srvtopo.ResolvedShard{rs}, []*querypb.BoundQuery{
{
Sql: query,
BindVariables: bindVars,
diff --git a/go/vt/vtgate/engine/rows.go b/go/vt/vtgate/engine/rows.go
index f552c3c9e59..2b81c85145f 100644
--- a/go/vt/vtgate/engine/rows.go
+++ b/go/vt/vtgate/engine/rows.go
@@ -82,5 +82,16 @@ func (r *Rows) GetFields(context.Context, VCursor, map[string]*querypb.BindVaria
}
func (r *Rows) description() PrimitiveDescription {
- return PrimitiveDescription{OperatorType: "Rows"}
+ others := map[string]any{}
+ if len(r.fields) != 0 {
+ fieldsMap := map[string]string{}
+ for _, field := range r.fields {
+ fieldsMap[field.Name] = field.Type.String()
+ }
+ others["Fields"] = fieldsMap
+ }
+ if len(r.rows) != 0 {
+ others["RowCount"] = len(r.rows)
+ }
+ return PrimitiveDescription{OperatorType: "Rows", Other: others}
}
diff --git a/go/vt/vtgate/engine/scalar_aggregation.go b/go/vt/vtgate/engine/scalar_aggregation.go
index 99fd21d8ea1..2b66073ac5e 100644
--- a/go/vt/vtgate/engine/scalar_aggregation.go
+++ b/go/vt/vtgate/engine/scalar_aggregation.go
@@ -122,7 +122,7 @@ func (sa *ScalarAggregate) TryExecute(ctx context.Context, vcursor VCursor, bind
}
out.Rows = [][]sqltypes.Value{resultRow}
- return out, nil
+ return out.Truncate(sa.TruncateColumnCount), nil
}
// TryStreamExecute implements the Primitive interface
@@ -213,7 +213,8 @@ func createEmptyValueFor(opcode AggregateOpcode) (sqltypes.Value, error) {
AggregateSumDistinct,
AggregateSum,
AggregateMin,
- AggregateMax:
+ AggregateMax,
+ AggregateRandom:
return sqltypes.NULL, nil
}
diff --git a/go/vt/vtgate/engine/scalar_aggregation_test.go b/go/vt/vtgate/engine/scalar_aggregation_test.go
index 11e5b20a72b..15e72639f3d 100644
--- a/go/vt/vtgate/engine/scalar_aggregation_test.go
+++ b/go/vt/vtgate/engine/scalar_aggregation_test.go
@@ -106,16 +106,16 @@ func TestEmptyRows(outer *testing.T) {
func TestScalarAggregateStreamExecute(t *testing.T) {
assert := assert.New(t)
fields := sqltypes.MakeTestFields(
- "count(*)",
- "uint64",
+ "col|weight_string(col)",
+ "uint64|varbinary",
)
fp := &fakePrimitive{
allResultsInOneCall: true,
results: []*sqltypes.Result{
sqltypes.MakeTestResult(fields,
- "1",
+ "1|null",
), sqltypes.MakeTestResult(fields,
- "3",
+ "3|null",
)},
}
@@ -141,3 +141,34 @@ func TestScalarAggregateStreamExecute(t *testing.T) {
got := fmt.Sprintf("%v", results[1].Rows)
assert.Equal("[[UINT64(4)]]", got)
}
+
+// TestScalarAggregateExecuteTruncate checks if truncate works
+func TestScalarAggregateExecuteTruncate(t *testing.T) {
+ assert := assert.New(t)
+ fields := sqltypes.MakeTestFields(
+ "col|weight_string(col)",
+ "uint64|varbinary",
+ )
+
+ fp := &fakePrimitive{
+ allResultsInOneCall: true,
+ results: []*sqltypes.Result{
+ sqltypes.MakeTestResult(fields,
+ "1|null", "3|null",
+ )},
+ }
+
+ oa := &ScalarAggregate{
+ Aggregates: []*AggregateParams{{
+ Opcode: AggregateSum,
+ Col: 0,
+ }},
+ Input: fp,
+ TruncateColumnCount: 1,
+ PreProcess: true,
+ }
+
+ qr, err := oa.TryExecute(context.Background(), &noopVCursor{}, nil, true)
+ assert.NoError(err)
+ assert.Equal("[[UINT64(4)]]", fmt.Sprintf("%v", qr.Rows))
+}
diff --git a/go/vt/vtgate/engine/send.go b/go/vt/vtgate/engine/send.go
index 967ed0e2bd0..1a95d8f93fa 100644
--- a/go/vt/vtgate/engine/send.go
+++ b/go/vt/vtgate/engine/send.go
@@ -86,6 +86,8 @@ func (s *Send) GetTableName() string {
// TryExecute implements Primitive interface
func (s *Send) TryExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool) (*sqltypes.Result, error) {
+ ctx, cancelFunc := addQueryTimeout(ctx, vcursor, 0)
+ defer cancelFunc()
rss, _, err := vcursor.ResolveDestinations(ctx, s.Keyspace.Name, nil, []key.Destination{s.TargetDestination})
if err != nil {
return nil, err
@@ -113,7 +115,7 @@ func (s *Send) TryExecute(ctx context.Context, vcursor VCursor, bindVars map[str
}
rollbackOnError := s.IsDML // for non-dml queries, there's no need to do a rollback
- result, errs := vcursor.ExecuteMultiShard(ctx, rss, queries, rollbackOnError, s.canAutoCommit(vcursor, rss))
+ result, errs := vcursor.ExecuteMultiShard(ctx, s, rss, queries, rollbackOnError, s.canAutoCommit(vcursor, rss))
err = vterrors.Aggregate(errs)
if err != nil {
return nil, err
@@ -160,7 +162,7 @@ func (s *Send) TryStreamExecute(ctx context.Context, vcursor VCursor, bindVars m
}
multiBindVars[i] = bv
}
- errors := vcursor.StreamExecuteMulti(ctx, s.Query, rss, multiBindVars, s.IsDML, s.canAutoCommit(vcursor, rss), callback)
+ errors := vcursor.StreamExecuteMulti(ctx, s, s.Query, rss, multiBindVars, s.IsDML, s.canAutoCommit(vcursor, rss), callback)
return vterrors.Aggregate(errors)
}
diff --git a/go/vt/vtgate/engine/session_primitive.go b/go/vt/vtgate/engine/session_primitive.go
index 7e3cd1c9267..834f335dd6f 100644
--- a/go/vt/vtgate/engine/session_primitive.go
+++ b/go/vt/vtgate/engine/session_primitive.go
@@ -21,7 +21,6 @@ import (
"vitess.io/vitess/go/sqltypes"
querypb "vitess.io/vitess/go/vt/proto/query"
- vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
"vitess.io/vitess/go/vt/vterrors"
)
@@ -76,7 +75,7 @@ func (s *SessionPrimitive) TryStreamExecute(ctx context.Context, vcursor VCursor
// GetFields implements the Primitive interface
func (s *SessionPrimitive) GetFields(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable) (*sqltypes.Result, error) {
- return nil, vterrors.New(vtrpcpb.Code_INTERNAL, "not supported for this primitive")
+ return nil, vterrors.VT13001("GetFields is not supported for SessionPrimitive")
}
// description implements the Primitive interface
diff --git a/go/vt/vtgate/engine/set.go b/go/vt/vtgate/engine/set.go
index d04b94fa506..7b253d5f034 100644
--- a/go/vt/vtgate/engine/set.go
+++ b/go/vt/vtgate/engine/set.go
@@ -253,7 +253,7 @@ func (svci *SysVarCheckAndIgnore) Execute(ctx context.Context, vcursor VCursor,
return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "Unexpected error, DestinationKeyspaceID mapping to multiple shards: %v", svci.TargetDestination)
}
checkSysVarQuery := fmt.Sprintf("select 1 from dual where @@%s = %s", svci.Name, svci.Expr)
- result, err := execShard(ctx, vcursor, checkSysVarQuery, env.BindVars, rss[0], false /* rollbackOnError */, false /* canAutocommit */)
+ result, err := execShard(ctx, nil, vcursor, checkSysVarQuery, env.BindVars, rss[0], false /* rollbackOnError */, false /* canAutocommit */)
if err != nil {
// Rather than returning the error, we will just log the error
// as the intention for executing the query it to validate the current setting and eventually ignore it anyways.
@@ -313,11 +313,11 @@ func (svs *SysVarReservedConn) Execute(ctx context.Context, vcursor VCursor, env
queries := make([]*querypb.BoundQuery, len(rss))
for i := 0; i < len(rss); i++ {
queries[i] = &querypb.BoundQuery{
- Sql: fmt.Sprintf("set @@%s = %s", svs.Name, svs.Expr),
+ Sql: fmt.Sprintf("set %s = %s", svs.Name, svs.Expr),
BindVariables: env.BindVars,
}
}
- _, errs := vcursor.ExecuteMultiShard(ctx, rss, queries, false /* rollbackOnError */, false /* canAutocommit */)
+ _, errs := vcursor.ExecuteMultiShard(ctx, nil, rss, queries, false /* rollbackOnError */, false /* canAutocommit */)
return vterrors.Aggregate(errs)
}
@@ -329,7 +329,7 @@ func (svs *SysVarReservedConn) execSetStatement(ctx context.Context, vcursor VCu
BindVariables: env.BindVars,
}
}
- _, errs := vcursor.ExecuteMultiShard(ctx, rss, queries, false /* rollbackOnError */, false /* canAutocommit */)
+ _, errs := vcursor.ExecuteMultiShard(ctx, nil, rss, queries, false /* rollbackOnError */, false /* canAutocommit */)
return vterrors.Aggregate(errs)
}
@@ -342,7 +342,7 @@ func (svs *SysVarReservedConn) checkAndUpdateSysVar(ctx context.Context, vcursor
if err != nil {
return false, err
}
- qr, err := execShard(ctx, vcursor, sysVarExprValidationQuery, res.BindVars, rss[0], false /* rollbackOnError */, false /* canAutocommit */)
+ qr, err := execShard(ctx, nil, vcursor, sysVarExprValidationQuery, res.BindVars, rss[0], false /* rollbackOnError */, false /* canAutocommit */)
if err != nil {
return false, err
}
@@ -494,6 +494,12 @@ func (svss *SysVarSetAware) Execute(ctx context.Context, vcursor VCursor, env *e
return vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.WrongValueForVar, "invalid DDL strategy: %s", str)
}
vcursor.Session().SetDDLStrategy(str)
+ case sysvars.QueryTimeout.Name:
+ queryTimeout, err := svss.evalAsInt64(env)
+ if err != nil {
+ return err
+ }
+ vcursor.Session().SetQueryTimeout(queryTimeout)
case sysvars.SessionEnableSystemSettings.Name:
err = svss.setBoolSysVar(ctx, env, vcursor.Session().SetSessionEnableSystemSettings)
case sysvars.Charset.Name, sysvars.Names.Name:
diff --git a/go/vt/vtgate/engine/set_test.go b/go/vt/vtgate/engine/set_test.go
index 877aadf6502..d66b7406187 100644
--- a/go/vt/vtgate/engine/set_test.go
+++ b/go/vt/vtgate/engine/set_test.go
@@ -23,6 +23,7 @@ import (
"testing"
"vitess.io/vitess/go/vt/sqlparser"
+ "vitess.io/vitess/go/vt/srvtopo"
"github.com/stretchr/testify/require"
@@ -59,6 +60,7 @@ func TestSetSystemVariableAsString(t *testing.T) {
),
"foobar",
)},
+ shardSession: []*srvtopo.ResolvedShard{{Target: &querypb.Target{Keyspace: "ks", Shard: "-20"}}},
}
_, err := set.TryExecute(context.Background(), vc, map[string]*querypb.BindVariable{}, false)
require.NoError(t, err)
@@ -68,6 +70,7 @@ func TestSetSystemVariableAsString(t *testing.T) {
"ExecuteMultiShard ks.-20: select dummy_expr from dual where @@x != dummy_expr {} false false",
"SysVar set with (x,'foobar')",
"Needs Reserved Conn",
+ "ExecuteMultiShard ks.-20: set x = dummy_expr {} false false",
})
}
@@ -563,10 +566,10 @@ func TestSetTable(t *testing.T) {
tc.input = &SingleRow{}
}
- oldMySQLVersion := sqlparser.MySQLVersion
- defer func() { sqlparser.MySQLVersion = oldMySQLVersion }()
+ oldMySQLVersion := sqlparser.GetParserVersion()
+ defer func() { sqlparser.SetParserVersion(oldMySQLVersion) }()
if tc.mysqlVersion != "" {
- sqlparser.MySQLVersion = tc.mysqlVersion
+ sqlparser.SetParserVersion(tc.mysqlVersion)
}
set := &Set{
diff --git a/go/vt/vtgate/engine/shard_route.go b/go/vt/vtgate/engine/shard_route.go
index 8a991d5e8ed..8365c6fdf59 100644
--- a/go/vt/vtgate/engine/shard_route.go
+++ b/go/vt/vtgate/engine/shard_route.go
@@ -30,14 +30,15 @@ var _ StreamExecutor = (*shardRoute)(nil)
// shardRoute is an internal primitive used by Route
// for performing merge sorts.
type shardRoute struct {
- query string
- rs *srvtopo.ResolvedShard
- bv map[string]*querypb.BindVariable
+ query string
+ rs *srvtopo.ResolvedShard
+ bv map[string]*querypb.BindVariable
+ primitive Primitive
}
// StreamExecute performs a streaming exec.
func (sr *shardRoute) StreamExecute(ctx context.Context, vcursor VCursor, _ map[string]*querypb.BindVariable, _ bool, callback func(*sqltypes.Result) error) error {
// TODO rollback on error and autocommit should probably not be used like this
- errors := vcursor.StreamExecuteMulti(ctx, sr.query, []*srvtopo.ResolvedShard{sr.rs}, []map[string]*querypb.BindVariable{sr.bv}, false /* rollbackOnError */, false /* autocommit */, callback)
+ errors := vcursor.StreamExecuteMulti(ctx, sr.primitive, sr.query, []*srvtopo.ResolvedShard{sr.rs}, []map[string]*querypb.BindVariable{sr.bv}, false /* rollbackOnError */, false /* autocommit */, callback)
return vterrors.Aggregate(errors)
}
diff --git a/go/vt/vtgate/engine/update.go b/go/vt/vtgate/engine/update.go
index 9360791c211..8a26fa87629 100644
--- a/go/vt/vtgate/engine/update.go
+++ b/go/vt/vtgate/engine/update.go
@@ -20,7 +20,6 @@ import (
"context"
"fmt"
"sort"
- "time"
"vitess.io/vitess/go/vt/vtgate/evalengine"
@@ -54,11 +53,8 @@ type Update struct {
// TryExecute performs a non-streaming exec.
func (upd *Update) TryExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool) (*sqltypes.Result, error) {
- if upd.QueryTimeout != 0 {
- var cancel context.CancelFunc
- ctx, cancel = context.WithTimeout(ctx, time.Duration(upd.QueryTimeout)*time.Millisecond)
- defer cancel()
- }
+ ctx, cancelFunc := addQueryTimeout(ctx, vcursor, upd.QueryTimeout)
+ defer cancelFunc()
rss, _, err := upd.findRoute(ctx, vcursor, bindVars)
if err != nil {
@@ -71,9 +67,9 @@ func (upd *Update) TryExecute(ctx context.Context, vcursor VCursor, bindVars map
switch upd.Opcode {
case Unsharded:
- return upd.execUnsharded(ctx, vcursor, bindVars, rss)
+ return upd.execUnsharded(ctx, upd, vcursor, bindVars, rss)
case Equal, EqualUnique, IN, Scatter, ByDestination, SubShard, MultiEqual:
- return upd.execMultiDestination(ctx, vcursor, bindVars, rss, upd.updateVindexEntries)
+ return upd.execMultiDestination(ctx, upd, vcursor, bindVars, rss, upd.updateVindexEntries)
default:
// Unreachable.
return nil, fmt.Errorf("unsupported opcode: %v", upd.Opcode)
@@ -109,7 +105,7 @@ func (upd *Update) updateVindexEntries(ctx context.Context, vcursor VCursor, bin
for i := range rss {
queries[i] = &querypb.BoundQuery{Sql: upd.OwnedVindexQuery, BindVariables: bindVars}
}
- subQueryResult, errors := vcursor.ExecuteMultiShard(ctx, rss, queries, false /* rollbackOnError */, false /* canAutocommit */)
+ subQueryResult, errors := vcursor.ExecuteMultiShard(ctx, upd, rss, queries, false /* rollbackOnError */, false /* canAutocommit */)
for _, err := range errors {
if err != nil {
return err
diff --git a/go/vt/vtgate/engine/vexplain.go b/go/vt/vtgate/engine/vexplain.go
new file mode 100644
index 00000000000..da7b6100221
--- /dev/null
+++ b/go/vt/vtgate/engine/vexplain.go
@@ -0,0 +1,218 @@
+/*
+Copyright 2022 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package engine
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+
+ "vitess.io/vitess/go/sqltypes"
+ querypb "vitess.io/vitess/go/vt/proto/query"
+ vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
+ "vitess.io/vitess/go/vt/sqlparser"
+ "vitess.io/vitess/go/vt/srvtopo"
+ "vitess.io/vitess/go/vt/vterrors"
+)
+
+type (
+ ExecuteEntry struct {
+ ID int
+ Target *querypb.Target
+ Gateway srvtopo.Gateway
+ Query string
+ FiredFrom Primitive
+ }
+
+ VExplain struct {
+ Input Primitive
+ Type sqlparser.VExplainType
+ }
+)
+
+var _ Primitive = (*VExplain)(nil)
+
+// RouteType implements the Primitive interface
+func (v *VExplain) RouteType() string {
+ return v.Input.RouteType()
+}
+
+// GetKeyspaceName implements the Primitive interface
+func (v *VExplain) GetKeyspaceName() string {
+ return v.Input.GetKeyspaceName()
+}
+
+// GetTableName implements the Primitive interface
+func (v *VExplain) GetTableName() string {
+ return v.Input.GetTableName()
+}
+
+// GetFields implements the Primitive interface
+func (v *VExplain) GetFields(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable) (*sqltypes.Result, error) {
+ return v.Input.GetFields(ctx, vcursor, bindVars)
+}
+
+// NeedsTransaction implements the Primitive interface
+func (v *VExplain) NeedsTransaction() bool {
+ return v.Input.NeedsTransaction()
+}
+
+// TryExecute implements the Primitive interface
+func (v *VExplain) TryExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool) (*sqltypes.Result, error) {
+ vcursor.Session().VExplainLogging()
+ _, err := vcursor.ExecutePrimitive(ctx, v.Input, bindVars, wantfields)
+ if err != nil {
+ return nil, err
+ }
+ return v.convertToResult(ctx, vcursor)
+}
+
+// TryStreamExecute implements the Primitive interface
+func (v *VExplain) TryStreamExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool, callback func(*sqltypes.Result) error) error {
+ vcursor.Session().VExplainLogging()
+ err := vcursor.StreamExecutePrimitive(ctx, v.Input, bindVars, wantfields, func(result *sqltypes.Result) error {
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+ result, err := v.convertToResult(ctx, vcursor)
+ if err != nil {
+ return err
+ }
+ return callback(result)
+}
+
+func (v *VExplain) convertToResult(ctx context.Context, vcursor VCursor) (*sqltypes.Result, error) {
+ switch v.Type {
+ case sqlparser.QueriesVExplainType:
+ result := convertToVExplainQueriesResult(vcursor.Session().GetVExplainLogs())
+ return result, nil
+ case sqlparser.AllVExplainType:
+ return v.convertToVExplainAllResult(ctx, vcursor)
+ default:
+ return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "Unknown type of VExplain plan")
+ }
+}
+
+func (v *VExplain) convertToVExplainAllResult(ctx context.Context, vcursor VCursor) (*sqltypes.Result, error) {
+ logEntries := vcursor.Session().GetVExplainLogs()
+ explainResults := make(map[Primitive]string)
+ for _, entry := range logEntries {
+ if entry.Target == nil || entry.Gateway == nil || entry.FiredFrom == nil {
+ continue
+ }
+ if explainResults[entry.FiredFrom] != "" {
+ continue
+ }
+ explainQuery := fmt.Sprintf("explain format = json %v", entry.Query)
+ // We rely on the parser to see if the query we have is explainable or not
+ // If we get an error in parsing then we can't execute explain on the given query, and we skip it
+ _, err := sqlparser.Parse(explainQuery)
+ if err != nil {
+ continue
+ }
+ // Explain statement should now succeed
+ res, err := vcursor.ExecuteStandalone(ctx, nil, explainQuery, nil, &srvtopo.ResolvedShard{
+ Target: entry.Target,
+ Gateway: entry.Gateway,
+ })
+ if err != nil {
+ return nil, err
+ }
+ explainResults[entry.FiredFrom] = res.Rows[0][0].ToString()
+ }
+
+ planDescription := primitiveToPlanDescriptionWithSQLResults(v.Input, explainResults)
+ resultBytes, err := json.MarshalIndent(planDescription, "", "\t")
+ if err != nil {
+ return nil, err
+ }
+
+ result := string(resultBytes)
+ fields := []*querypb.Field{
+ {
+ Name: "VExplain", Type: sqltypes.VarChar,
+ },
+ }
+ rows := []sqltypes.Row{
+ {
+ sqltypes.NewVarChar(result),
+ },
+ }
+ qr := &sqltypes.Result{
+ Fields: fields,
+ Rows: rows,
+ }
+ return qr, nil
+}
+
+// primitiveToPlanDescriptionWithSQLResults transforms a primitive tree into a corresponding PlanDescription tree
+// and adds the given res ...
+func primitiveToPlanDescriptionWithSQLResults(in Primitive, res map[Primitive]string) PrimitiveDescription {
+ this := in.description()
+
+ if v, found := res[in]; found {
+ this.Other["mysql_explain_json"] = json.RawMessage(v)
+ }
+
+ for _, input := range in.Inputs() {
+ this.Inputs = append(this.Inputs, primitiveToPlanDescriptionWithSQLResults(input, res))
+ }
+
+ if len(in.Inputs()) == 0 {
+ this.Inputs = []PrimitiveDescription{}
+ }
+
+ return this
+}
+
+func convertToVExplainQueriesResult(logs []ExecuteEntry) *sqltypes.Result {
+ fields := []*querypb.Field{{
+ Name: "#", Type: sqltypes.Int32,
+ }, {
+ Name: "keyspace", Type: sqltypes.VarChar,
+ }, {
+ Name: "shard", Type: sqltypes.VarChar,
+ }, {
+ Name: "query", Type: sqltypes.VarChar,
+ }}
+ qr := &sqltypes.Result{
+ Fields: fields,
+ }
+ for _, line := range logs {
+ qr.Rows = append(qr.Rows, sqltypes.Row{
+ sqltypes.NewInt32(int32(line.ID)),
+ sqltypes.NewVarChar(line.Target.Keyspace),
+ sqltypes.NewVarChar(line.Target.Shard),
+ sqltypes.NewVarChar(line.Query),
+ })
+ }
+ return qr
+}
+
+// Inputs implements the Primitive interface
+func (v *VExplain) Inputs() []Primitive {
+ return []Primitive{v.Input}
+}
+
+func (v *VExplain) description() PrimitiveDescription {
+ return PrimitiveDescription{
+ OperatorType: "VEXPLAIN",
+ Other: map[string]any{"Type": v.Type.ToString()},
+ }
+}
diff --git a/go/vt/vtgate/engine/vindex_func.go b/go/vt/vtgate/engine/vindex_func.go
index 6ed12091a9a..7e1802077d1 100644
--- a/go/vt/vtgate/engine/vindex_func.go
+++ b/go/vt/vtgate/engine/vindex_func.go
@@ -99,7 +99,7 @@ func (vf *VindexFunc) TryStreamExecute(ctx context.Context, vcursor VCursor, bin
if err != nil {
return err
}
- if err := callback(&sqltypes.Result{Fields: r.Fields}); err != nil {
+ if err := callback(r.Metadata()); err != nil {
return err
}
return callback(&sqltypes.Result{Rows: r.Rows})
@@ -125,16 +125,21 @@ func (vf *VindexFunc) mapVindex(ctx context.Context, vcursor VCursor, bindVars m
result := &sqltypes.Result{
Fields: vf.Fields,
}
- for _, value := range values {
+ destinations, err := vf.Vindex.Map(ctx, vcursor, values)
+ if err != nil {
+ return nil, err
+ }
+ if len(destinations) != len(values) {
+ // should never happen
+ return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "Vindex.Map() length mismatch: input values count is %d, output destinations count is %d",
+ len(values), len(destinations))
+ }
+ for i, value := range values {
vkey, err := evalengine.Cast(value, sqltypes.VarBinary)
if err != nil {
return nil, err
}
- destinations, err := vf.Vindex.Map(ctx, vcursor, []sqltypes.Value{value})
- if err != nil {
- return nil, err
- }
- switch d := destinations[0].(type) {
+ switch d := destinations[i].(type) {
case key.DestinationKeyRange:
if d.KeyRange != nil {
row, err := vf.buildRow(vkey, nil, d.KeyRange)
diff --git a/go/vt/vtgate/engine/vindex_func_test.go b/go/vt/vtgate/engine/vindex_func_test.go
index 5c02f9111e2..2805d85cdd3 100644
--- a/go/vt/vtgate/engine/vindex_func_test.go
+++ b/go/vt/vtgate/engine/vindex_func_test.go
@@ -44,22 +44,24 @@ func (*uvindex) Verify(context.Context, vindexes.VCursor, []sqltypes.Value, [][]
}
func (v *uvindex) Map(ctx context.Context, vcursor vindexes.VCursor, ids []sqltypes.Value) ([]key.Destination, error) {
- if v.matchkr {
- return []key.Destination{
- key.DestinationKeyRange{
- KeyRange: &topodatapb.KeyRange{
- Start: []byte{0x40},
- End: []byte{0x60},
- },
- },
- }, nil
- }
- if v.matchid {
- return []key.Destination{
- key.DestinationKeyspaceID([]byte("foo")),
- }, nil
+ destinations := make([]key.Destination, 0, len(ids))
+ dkid := []byte("foo")
+ for i := 0; i < len(ids); i++ {
+ if v.matchkr {
+ destinations = append(destinations,
+ key.DestinationKeyRange{
+ KeyRange: &topodatapb.KeyRange{
+ Start: []byte{0x40},
+ End: []byte{0x60},
+ },
+ })
+ } else if v.matchid {
+ destinations = append(destinations, key.DestinationKeyspaceID(dkid))
+ } else {
+ destinations = append(destinations, key.DestinationNone{})
+ }
}
- return []key.Destination{key.DestinationNone{}}, nil
+ return destinations, nil
}
// nvindex is NonUnique.
@@ -74,25 +76,31 @@ func (*nvindex) Verify(context.Context, vindexes.VCursor, []sqltypes.Value, [][]
}
func (v *nvindex) Map(ctx context.Context, vcursor vindexes.VCursor, ids []sqltypes.Value) ([]key.Destination, error) {
- if v.matchid {
- return []key.Destination{
- key.DestinationKeyspaceIDs([][]byte{
- []byte("foo"),
- []byte("bar"),
- }),
- }, nil
- }
- if v.matchkr {
- return []key.Destination{
- key.DestinationKeyRange{
- KeyRange: &topodatapb.KeyRange{
- Start: []byte{0x40},
- End: []byte{0x60},
- },
- },
- }, nil
+ destinations := make([]key.Destination, 0)
+ for i := 0; i < len(ids); i++ {
+ if v.matchid {
+ destinations = append(destinations,
+ []key.Destination{
+ key.DestinationKeyspaceIDs([][]byte{
+ []byte("foo"),
+ []byte("bar"),
+ }),
+ }...)
+ } else if v.matchkr {
+ destinations = append(destinations,
+ []key.Destination{
+ key.DestinationKeyRange{
+ KeyRange: &topodatapb.KeyRange{
+ Start: []byte{0x40},
+ End: []byte{0x60},
+ },
+ },
+ }...)
+ } else {
+ destinations = append(destinations, []key.Destination{key.DestinationNone{}}...)
+ }
}
- return []key.Destination{key.DestinationNone{}}, nil
+ return destinations, nil
}
func TestVindexFuncMap(t *testing.T) {
@@ -147,13 +155,15 @@ func TestVindexFuncMap(t *testing.T) {
require.NoError(t, err)
want = &sqltypes.Result{
Fields: sqltypes.MakeTestFields("id|keyspace_id|hex(keyspace_id)|range_start|range_end", "varbinary|varbinary|varbinary|varbinary|varbinary"),
- Rows: [][]sqltypes.Value{{
- sqltypes.NewVarBinary("1"),
- sqltypes.NULL,
- sqltypes.MakeTrusted(sqltypes.VarBinary, []byte{0x40}),
- sqltypes.MakeTrusted(sqltypes.VarBinary, []byte{0x60}),
- sqltypes.NULL,
- }},
+ Rows: [][]sqltypes.Value{
+ {
+ sqltypes.NewVarBinary("1"),
+ sqltypes.NULL,
+ sqltypes.MakeTrusted(sqltypes.VarBinary, []byte{0x40}),
+ sqltypes.MakeTrusted(sqltypes.VarBinary, []byte{0x60}),
+ sqltypes.NULL,
+ },
+ },
RowsAffected: 0,
}
require.Equal(t, got, want)
diff --git a/go/vt/vtgate/engine/vindex_lookup.go b/go/vt/vtgate/engine/vindex_lookup.go
index 8883f138dcd..816507ae086 100644
--- a/go/vt/vtgate/engine/vindex_lookup.go
+++ b/go/vt/vtgate/engine/vindex_lookup.go
@@ -187,10 +187,17 @@ func (vr *VindexLookup) executeNonBatch(ctx context.Context, vcursor VCursor, id
bindVars := map[string]*querypb.BindVariable{
vr.Arguments[0]: vars,
}
- result, err := vcursor.ExecutePrimitive(ctx, vr.Lookup, bindVars, false)
+
+ var result *sqltypes.Result
+ if vr.Vindex.AutoCommitEnabled() {
+ result, err = vcursor.ExecutePrimitiveStandalone(ctx, vr.Lookup, bindVars, false)
+ } else {
+ result, err = vcursor.ExecutePrimitive(ctx, vr.Lookup, bindVars, false)
+ }
if err != nil {
return nil, err
}
+
rows := make([][]sqltypes.Value, 0, len(result.Rows))
for _, row := range result.Rows {
rows = append(rows, []sqltypes.Value{row[1]})
@@ -212,7 +219,17 @@ func (vr *VindexLookup) executeBatch(ctx context.Context, vcursor VCursor, ids [
bindVars := map[string]*querypb.BindVariable{
vr.Arguments[0]: vars,
}
- result, err := vcursor.ExecutePrimitive(ctx, vr.Lookup, bindVars, false)
+
+ var result *sqltypes.Result
+ if vr.Vindex.AutoCommitEnabled() {
+ result, err = vcursor.ExecutePrimitiveStandalone(ctx, vr.Lookup, bindVars, false)
+ } else {
+ result, err = vcursor.ExecutePrimitive(ctx, vr.Lookup, bindVars, false)
+ }
+ if err != nil {
+ return nil, err
+ }
+
if err != nil {
return nil, vterrors.Wrapf(err, "failed while running the lookup query")
}
diff --git a/go/vt/vtgate/engine/vschema_ddl.go b/go/vt/vtgate/engine/vschema_ddl.go
index d2027715a9c..1e385269c1d 100644
--- a/go/vt/vtgate/engine/vschema_ddl.go
+++ b/go/vt/vtgate/engine/vschema_ddl.go
@@ -21,7 +21,6 @@ import (
"vitess.io/vitess/go/sqltypes"
"vitess.io/vitess/go/vt/proto/query"
- vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
"vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/vt/vtgate/vindexes"
@@ -85,5 +84,5 @@ func (v *AlterVSchema) TryStreamExecute(ctx context.Context, vcursor VCursor, bi
// GetFields implements the Primitive interface
func (v *AlterVSchema) GetFields(ctx context.Context, vcursor VCursor, bindVars map[string]*query.BindVariable) (*sqltypes.Result, error) {
- return nil, vterrors.NewErrorf(vtrpcpb.Code_UNIMPLEMENTED, vterrors.UnsupportedPS, "This command is not supported in the prepared statement protocol yet")
+ return nil, vterrors.VT13001("GetFields is not supported for AlterVSchema")
}
diff --git a/go/vt/vtgate/engine/vstream.go b/go/vt/vtgate/engine/vstream.go
index 0ef3ccd64c3..2ad8286dfcc 100644
--- a/go/vt/vtgate/engine/vstream.go
+++ b/go/vt/vtgate/engine/vstream.go
@@ -21,7 +21,6 @@ import (
"fmt"
"io"
- vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
"vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/sqltypes"
@@ -63,7 +62,7 @@ func (v *VStream) GetTableName() string {
// TryExecute implements the Primitive interface
func (v *VStream) TryExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool) (*sqltypes.Result, error) {
- return nil, vterrors.New(vtrpcpb.Code_INTERNAL, "[BUG] 'Execute' called for VStream")
+ return nil, vterrors.VT13001("TryExecute is not supported for VStream")
}
// TryStreamExecute implements the Primitive interface
@@ -155,7 +154,7 @@ func addRowChangeIndicatorColumn(change *binlogdatapb.RowChange, eventFields []*
// GetFields implements the Primitive interface
func (v *VStream) GetFields(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable) (*sqltypes.Result, error) {
- return nil, vterrors.New(vtrpcpb.Code_INTERNAL, "[BUG] 'GetFields' called for VStream")
+ return nil, vterrors.VT13001("GetFields is not supported for VStream")
}
func (v *VStream) description() PrimitiveDescription {
diff --git a/go/vt/vtgate/engine/vtexplain.go b/go/vt/vtgate/engine/vtexplain.go
deleted file mode 100644
index 8bde66e8a6b..00000000000
--- a/go/vt/vtgate/engine/vtexplain.go
+++ /dev/null
@@ -1,125 +0,0 @@
-/*
-Copyright 2022 The Vitess Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package engine
-
-import (
- "context"
-
- "vitess.io/vitess/go/sqltypes"
- querypb "vitess.io/vitess/go/vt/proto/query"
- topodatapb "vitess.io/vitess/go/vt/proto/topodata"
-)
-
-type (
- ExecuteEntry struct {
- ID int
- Keyspace string
- Shard string
- TabletType topodatapb.TabletType
- Cell string
- Query string
- }
- VTExplain struct {
- Input Primitive
- }
-)
-
-var _ Primitive = (*VTExplain)(nil)
-
-// RouteType implements the Primitive interface
-func (v *VTExplain) RouteType() string {
- return v.Input.RouteType()
-}
-
-// GetKeyspaceName implements the Primitive interface
-func (v *VTExplain) GetKeyspaceName() string {
- return v.Input.GetKeyspaceName()
-}
-
-// GetTableName implements the Primitive interface
-func (v *VTExplain) GetTableName() string {
- return v.Input.GetTableName()
-}
-
-// GetFields implements the Primitive interface
-func (v *VTExplain) GetFields(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable) (*sqltypes.Result, error) {
- return v.Input.GetFields(ctx, vcursor, bindVars)
-}
-
-// NeedsTransaction implements the Primitive interface
-func (v *VTExplain) NeedsTransaction() bool {
- return v.Input.NeedsTransaction()
-}
-
-// TryExecute implements the Primitive interface
-func (v *VTExplain) TryExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool) (*sqltypes.Result, error) {
- vcursor.Session().VtExplainLogging()
- _, err := vcursor.ExecutePrimitive(ctx, v.Input, bindVars, wantfields)
- if err != nil {
- return nil, err
- }
- result := convertToVTExplainResult(vcursor.Session().GetVTExplainLogs())
- return result, nil
-}
-
-// TryStreamExecute implements the Primitive interface
-func (v *VTExplain) TryStreamExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool, callback func(*sqltypes.Result) error) error {
- vcursor.Session().VtExplainLogging()
- err := vcursor.StreamExecutePrimitive(ctx, v.Input, bindVars, wantfields, func(result *sqltypes.Result) error {
- return nil
- })
- if err != nil {
- return err
- }
- result := convertToVTExplainResult(vcursor.Session().GetVTExplainLogs())
- return callback(result)
-}
-
-func convertToVTExplainResult(logs []ExecuteEntry) *sqltypes.Result {
- fields := []*querypb.Field{{
- Name: "#", Type: sqltypes.Int32,
- }, {
- Name: "keyspace", Type: sqltypes.VarChar,
- }, {
- Name: "shard", Type: sqltypes.VarChar,
- }, {
- Name: "query", Type: sqltypes.VarChar,
- }}
- qr := &sqltypes.Result{
- Fields: fields,
- }
- for _, line := range logs {
- qr.Rows = append(qr.Rows, sqltypes.Row{
- sqltypes.NewInt32(int32(line.ID)),
- sqltypes.NewVarChar(line.Keyspace),
- sqltypes.NewVarChar(line.Shard),
- sqltypes.NewVarChar(line.Query),
- })
- }
- return qr
-}
-
-// Inputs implements the Primitive interface
-func (v *VTExplain) Inputs() []Primitive {
- return []Primitive{v.Input}
-}
-
-func (v *VTExplain) description() PrimitiveDescription {
- return PrimitiveDescription{
- OperatorType: "VTEXPLAIN",
- }
-}
diff --git a/go/vt/vtgate/evalengine/convert.go b/go/vt/vtgate/evalengine/convert.go
index e769196b62c..d1996fcb1ee 100644
--- a/go/vt/vtgate/evalengine/convert.go
+++ b/go/vt/vtgate/evalengine/convert.go
@@ -39,6 +39,10 @@ type (
)
func (c *ConvertExpr) unsupported() {
+ throwEvalError(c.returnUnsupportedError())
+}
+
+func (c *ConvertExpr) returnUnsupportedError() error {
var err error
switch {
case c.HasLength && c.HasScale:
@@ -48,7 +52,7 @@ func (c *ConvertExpr) unsupported() {
default:
err = vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "Unsupported type conversion: %s", c.Type)
}
- throwEvalError(err)
+ return err
}
func (c *ConvertExpr) eval(env *ExpressionEnv, result *EvalResult) {
@@ -86,15 +90,9 @@ func (c *ConvertExpr) eval(env *ExpressionEnv, result *EvalResult) {
case "FLOAT":
if c.HasLength {
switch p := c.Length; {
- case p <= 24:
- c.unsupported()
- case p <= 53:
- result.makeFloat()
- default:
+ case p > 53:
throwEvalError(vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "Too-big precision %d specified for 'CONVERT'. Maximum is 53.", p))
}
- } else {
- c.unsupported()
}
c.unsupported()
case "SIGNED", "SIGNED INTEGER":
diff --git a/go/vt/vtgate/evalengine/eval_result.go b/go/vt/vtgate/evalengine/eval_result.go
index 2f2de033fdc..1971fdc39c2 100644
--- a/go/vt/vtgate/evalengine/eval_result.go
+++ b/go/vt/vtgate/evalengine/eval_result.go
@@ -309,6 +309,10 @@ func (er *EvalResult) isTextual() bool {
return sqltypes.IsText(tt) || sqltypes.IsBinary(tt)
}
+func (er *EvalResult) ToBoolean() bool {
+ return er.isTruthy() == boolTrue
+}
+
func (er *EvalResult) isTruthy() boolean {
if er.isNull() {
return boolNULL
diff --git a/go/vt/vtgate/evalengine/expr_column_test.go b/go/vt/vtgate/evalengine/expr_column_test.go
new file mode 100644
index 00000000000..d60c5980eed
--- /dev/null
+++ b/go/vt/vtgate/evalengine/expr_column_test.go
@@ -0,0 +1,63 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package evalengine
+
+import (
+ "testing"
+
+ "vitess.io/vitess/go/sqltypes"
+ querypb "vitess.io/vitess/go/vt/proto/query"
+)
+
+func TestTypeOf(t *testing.T) {
+ env := &ExpressionEnv{
+ BindVars: make(map[string]*querypb.BindVariable),
+ }
+
+ field1 := &querypb.Field{
+ Name: "field1",
+ Type: querypb.Type_INT64,
+ Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG),
+ }
+ field2 := &querypb.Field{
+ Name: "field2",
+ Type: querypb.Type_VARCHAR,
+ Flags: 0,
+ }
+ fields := []*querypb.Field{field1, field2}
+
+ c := &Column{}
+ env.Row = sqltypes.Row{sqltypes.NewInt64(10)}
+ env.Fields = fields
+
+ t.Run("Check when row value is not null", func(t *testing.T) {
+ typ, f := c.typeof(env)
+ if typ != sqltypes.Int64 || f != flag(0) {
+ t.Errorf("typeof() failed, expected sqltypes.Int64 and typeFlag 0, got %v and %v", typ, f)
+ }
+ })
+
+ t.Run("Check when row value is null", func(t *testing.T) {
+ env.Row = sqltypes.Row{
+ sqltypes.NULL,
+ }
+ typ, flag := c.typeof(env)
+ if typ != querypb.Type_INT64 || flag != flagNullable {
+ t.Errorf("typeof() failed, expected querypb.Type_INT64 and flagNullable, got %v and %v", typ, flag)
+ }
+ })
+}
diff --git a/go/vt/vtgate/evalengine/expressions.go b/go/vt/vtgate/evalengine/expressions.go
index 4809cc03729..761034c902f 100644
--- a/go/vt/vtgate/evalengine/expressions.go
+++ b/go/vt/vtgate/evalengine/expressions.go
@@ -578,10 +578,9 @@ func (c *Column) typeof(env *ExpressionEnv) (sqltypes.Type, flag) {
// we'll try to do the best possible with the information we have
if c.Offset < len(env.Row) {
value := env.Row[c.Offset]
- if value.IsNull() {
- return sqltypes.Null, flagNull | flagNullable
+ if !value.IsNull() {
+ return value.Type(), flag(0)
}
- return value.Type(), flag(0)
}
if c.Offset < len(env.Fields) {
diff --git a/go/vt/vtgate/evalengine/func.go b/go/vt/vtgate/evalengine/func.go
index 80821caf4b3..cbe530956a5 100644
--- a/go/vt/vtgate/evalengine/func.go
+++ b/go/vt/vtgate/evalengine/func.go
@@ -30,18 +30,31 @@ import (
)
var builtinFunctions = map[string]builtin{
- "coalesce": builtinCoalesce{},
- "greatest": &builtinMultiComparison{name: "GREATEST", cmp: 1},
- "least": &builtinMultiComparison{name: "LEAST", cmp: -1},
- "collation": builtinCollation{},
- "bit_count": builtinBitCount{},
- "hex": builtinHex{},
- "ceil": builtinCeil{},
- "ceiling": builtinCeiling{},
+ "coalesce": builtinCoalesce{},
+ "greatest": &builtinMultiComparison{name: "GREATEST", cmp: 1},
+ "least": &builtinMultiComparison{name: "LEAST", cmp: -1},
+ "collation": builtinCollation{},
+ "bit_count": builtinBitCount{},
+ "hex": builtinHex{},
+ "ceil": builtinCeil{},
+ "ceiling": builtinCeiling{},
+ "lower": builtinLower{},
+ "lcase": builtinLcase{},
+ "upper": builtinUpper{},
+ "ucase": builtinUcase{},
+ "char_length": builtinCharLength{},
+ "character_length": builtinCharacterLength{},
+ "length": builtinLength{},
+ "octet_length": builtinOctetLength{},
+ "bit_length": builtinBitLength{},
+ "ascii": builtinASCII{},
+ "repeat": builtinRepeat{},
}
var builtinFunctionsRewrite = map[string]builtinRewrite{
"isnull": builtinIsNullRewrite,
+ "ifnull": builtinIfNullRewrite,
+ "nullif": builtinNullIfRewrite,
}
type builtin interface {
@@ -380,7 +393,7 @@ func (builtinCollation) typeof(_ *ExpressionEnv, args []Expr) (sqltypes.Type, fl
return sqltypes.VarChar, 0
}
-func builtinIsNullRewrite(args []Expr, lookup TranslationLookup) (Expr, error) {
+func builtinIsNullRewrite(args []Expr, _ TranslationLookup) (Expr, error) {
if len(args) != 1 {
return nil, argError("ISNULL")
}
@@ -391,6 +404,42 @@ func builtinIsNullRewrite(args []Expr, lookup TranslationLookup) (Expr, error) {
}, nil
}
+func builtinIfNullRewrite(args []Expr, _ TranslationLookup) (Expr, error) {
+ if len(args) != 2 {
+ return nil, argError("IFNULL")
+ }
+ var result CaseExpr
+ result.cases = append(result.cases, WhenThen{
+ when: &IsExpr{
+ UnaryExpr: UnaryExpr{args[0]},
+ Op: sqlparser.IsNullOp,
+ Check: func(er *EvalResult) bool { return er.isNull() },
+ },
+ then: args[1],
+ })
+ result.Else = args[0]
+ return &result, nil
+}
+
+func builtinNullIfRewrite(args []Expr, _ TranslationLookup) (Expr, error) {
+ if len(args) != 2 {
+ return nil, argError("NULLIF")
+ }
+ var result CaseExpr
+ result.cases = append(result.cases, WhenThen{
+ when: &ComparisonExpr{
+ BinaryExpr: BinaryExpr{
+ Left: args[0],
+ Right: args[1],
+ },
+ Op: compareEQ{},
+ },
+ then: NullExpr,
+ })
+ result.Else = args[0]
+ return &result, nil
+}
+
type builtinBitCount struct{}
func (builtinBitCount) call(_ *ExpressionEnv, args []EvalResult, result *EvalResult) {
@@ -688,31 +737,8 @@ func (builtinCeil) typeof(env *ExpressionEnv, args []Expr) (sqltypes.Type, flag)
}
}
-type builtinCeiling struct{}
-
-func (builtinCeiling) call(env *ExpressionEnv, args []EvalResult, result *EvalResult) {
- inarg := &args[0]
- argtype := inarg.typeof()
- if inarg.isNull() {
- result.setNull()
- return
- }
-
- if sqltypes.IsIntegral(argtype) {
- result.setInt64(inarg.int64())
- } else if sqltypes.Decimal == argtype {
- num := inarg.decimal()
- num = num.Ceil()
- intnum, isfit := num.Int64()
- if isfit {
- result.setInt64(intnum)
- } else {
- result.setDecimal(num, 0)
- }
- } else {
- inarg.makeFloat()
- result.setFloat(math.Ceil(inarg.float64()))
- }
+type builtinCeiling struct {
+ builtinCeil
}
func (builtinCeiling) typeof(env *ExpressionEnv, args []Expr) (sqltypes.Type, flag) {
diff --git a/go/vt/vtgate/evalengine/integration/comparison_test.go b/go/vt/vtgate/evalengine/integration/comparison_test.go
index 525718c8fa5..66684aa5fef 100644
--- a/go/vt/vtgate/evalengine/integration/comparison_test.go
+++ b/go/vt/vtgate/evalengine/integration/comparison_test.go
@@ -17,7 +17,6 @@ limitations under the License.
package integration
import (
- "flag"
"fmt"
"math"
"strconv"
@@ -25,6 +24,8 @@ import (
"testing"
"time"
+ "github.com/spf13/pflag"
+
"vitess.io/vitess/go/mysql"
"vitess.io/vitess/go/mysql/collations"
"vitess.io/vitess/go/sqltypes"
@@ -32,13 +33,30 @@ import (
"vitess.io/vitess/go/vt/vtgate/evalengine"
)
-var collationEnv *collations.Environment
+var (
+ collationEnv *collations.Environment
+
+ debugPrintAll bool
+ debugNormalize = true
+ debugSimplify = time.Now().UnixNano()&1 != 0
+ debugCheckTypes = true
+ debugCheckCollations = true
+)
+
+func registerFlags(fs *pflag.FlagSet) {
+ fs.BoolVar(&debugPrintAll, "print-all", debugPrintAll, "print all matching tests")
+ fs.BoolVar(&debugNormalize, "normalize", debugNormalize, "normalize comparisons against MySQL values")
+ fs.BoolVar(&debugSimplify, "simplify", debugSimplify, "simplify expressions before evaluating them")
+ fs.BoolVar(&debugCheckTypes, "check-types", debugCheckTypes, "check the TypeOf operator for all queries")
+ fs.BoolVar(&debugCheckCollations, "check-collations", debugCheckCollations, "check the returned collations for all queries")
+}
func init() {
// We require MySQL 8.0 collations for the comparisons in the tests
mySQLVersion := "8.0.0"
servenv.SetMySQLServerVersionForTest(mySQLVersion)
collationEnv = collations.NewEnvironment(mySQLVersion)
+ servenv.OnParse(registerFlags)
}
func perm(a []string, f func([]string)) {
@@ -83,18 +101,12 @@ func normalize(v sqltypes.Value, coll collations.ID) string {
return fmt.Sprintf("%v(%s)", typ, v.Raw())
}
-var debugPrintAll = flag.Bool("print-all", false, "print all matching tests")
-var debugNormalize = flag.Bool("normalize", true, "normalize comparisons against MySQL values")
-var debugSimplify = flag.Bool("simplify", time.Now().UnixNano()&1 != 0, "simplify expressions before evaluating them")
-var debugCheckTypes = flag.Bool("check-types", true, "check the TypeOf operator for all queries")
-var debugCheckCollations = flag.Bool("check-collations", true, "check the returned collations for all queries")
-
func compareRemoteExpr(t *testing.T, conn *mysql.Conn, expr string) {
t.Helper()
localQuery := "SELECT " + expr
remoteQuery := "SELECT " + expr
- if *debugCheckCollations {
+ if debugCheckCollations {
remoteQuery = fmt.Sprintf("SELECT %s, COLLATION(%s)", expr, expr)
}
@@ -105,33 +117,33 @@ func compareRemoteExpr(t *testing.T, conn *mysql.Conn, expr string) {
var localCollation, remoteCollation collations.ID
if localErr == nil {
v := local.Value()
- if *debugCheckCollations {
+ if debugCheckCollations {
if v.IsNull() {
localCollation = collations.CollationBinaryID
} else {
localCollation = local.Collation()
}
}
- if *debugNormalize {
+ if debugNormalize {
localVal = normalize(v, local.Collation())
} else {
localVal = v.String()
}
- if *debugCheckTypes {
+ if debugCheckTypes {
tt := v.Type()
if tt != sqltypes.Null && tt != localType {
t.Errorf("evaluation type mismatch: eval=%v vs typeof=%v\nlocal: %s\nquery: %s (SIMPLIFY=%v)",
- tt, localType, localVal, localQuery, *debugSimplify)
+ tt, localType, localVal, localQuery, debugSimplify)
}
}
}
if remoteErr == nil {
- if *debugNormalize {
+ if debugNormalize {
remoteVal = normalize(remote.Rows[0][0], collations.ID(remote.Fields[0].Charset))
} else {
remoteVal = remote.Rows[0][0].String()
}
- if *debugCheckCollations {
+ if debugCheckCollations {
if remote.Rows[0][0].IsNull() {
// TODO: passthrough proper collations for nullable fields
remoteCollation = collations.CollationBinaryID
@@ -141,8 +153,8 @@ func compareRemoteExpr(t *testing.T, conn *mysql.Conn, expr string) {
}
}
if diff := compareResult(localErr, remoteErr, localVal, remoteVal, localCollation, remoteCollation); diff != "" {
- t.Errorf("%s\nquery: %s (SIMPLIFY=%v)", diff, localQuery, *debugSimplify)
- } else if *debugPrintAll {
+ t.Errorf("%s\nquery: %s (SIMPLIFY=%v)", diff, localQuery, debugSimplify)
+ } else if debugPrintAll {
t.Logf("local=%s mysql=%s\nquery: %s", localVal, remoteVal, localQuery)
}
}
@@ -157,7 +169,6 @@ var comparisonElements = []string{"NULL", "-1", "0", "1",
func TestAllComparisons(t *testing.T) {
var operators = []string{"=", "!=", "<=>", "<", "<=", ">", ">="}
-
var conn = mysqlconn(t)
defer conn.Close()
@@ -420,6 +431,34 @@ func TestTypes(t *testing.T) {
}
}
+func TestUnderscoreAndPercentage(t *testing.T) {
+ var conn = mysqlconn(t)
+ defer conn.Close()
+
+ var queries = []string{
+ `'pokemon' LIKE 'poke%'`,
+ `'pokemon' LIKE 'poke\%'`,
+ `'poke%mon' LIKE 'poke\%mon'`,
+ `'pokemon' LIKE 'poke\%mon'`,
+ `'poke%mon' = 'poke%mon'`,
+ `'poke\%mon' = 'poke%mon'`,
+ `'poke%mon' = 'poke\%mon'`,
+ `'poke\%mon' = 'poke\%mon'`,
+ `'pokemon' LIKE 'poke_on'`,
+ `'pokemon' LIKE 'poke\_on'`,
+ `'poke_mon' LIKE 'poke\_mon'`,
+ `'pokemon' LIKE 'poke\_mon'`,
+ `'poke_mon' = 'poke_mon'`,
+ `'poke\_mon' = 'poke_mon'`,
+ `'poke_mon' = 'poke\_mon'`,
+ `'poke\_mon' = 'poke\_mon'`,
+ }
+
+ for _, query := range queries {
+ compareRemoteExpr(t, conn, query)
+ }
+}
+
func TestFloatFormatting(t *testing.T) {
var floats = []string{
`18446744073709551615`,
@@ -704,7 +743,7 @@ func TestCeilandCeiling(t *testing.T) {
}
for _, num := range ceilInputs {
- compareRemoteExpr(t, conn, fmt.Sprintf("CEIL(%s)", num))
+ // compareRemoteExpr(t, conn, fmt.Sprintf("CEIL(%s)", num))
compareRemoteExpr(t, conn, fmt.Sprintf("CEILING(%s)", num))
}
}
diff --git a/go/vt/vtgate/evalengine/integration/fuzz_test.go b/go/vt/vtgate/evalengine/integration/fuzz_test.go
index 14e29fc396b..49035170e21 100644
--- a/go/vt/vtgate/evalengine/integration/fuzz_test.go
+++ b/go/vt/vtgate/evalengine/integration/fuzz_test.go
@@ -131,7 +131,7 @@ func safeEvaluate(query string) (evalengine.EvalResult, sqltypes.Type, error) {
err = fmt.Errorf("PANIC during translate: %v", r)
}
}()
- expr, err = evalengine.TranslateEx(astExpr, evalengine.LookupDefaultCollation(collations.CollationUtf8mb4ID), *debugSimplify)
+ expr, err = evalengine.TranslateEx(astExpr, evalengine.LookupDefaultCollation(collations.CollationUtf8mb4ID), debugSimplify)
return
}()
@@ -146,7 +146,7 @@ func safeEvaluate(query string) (evalengine.EvalResult, sqltypes.Type, error) {
}()
env := evalengine.EnvWithBindVars(nil, 255)
eval, err = env.Evaluate(local)
- if err == nil && *debugCheckTypes {
+ if err == nil && debugCheckTypes {
tt, err = env.TypeOf(local)
}
return
diff --git a/go/vt/vtgate/evalengine/integration/string_fun_test.go b/go/vt/vtgate/evalengine/integration/string_fun_test.go
new file mode 100644
index 00000000000..af37ce03d24
--- /dev/null
+++ b/go/vt/vtgate/evalengine/integration/string_fun_test.go
@@ -0,0 +1,206 @@
+/*
+Copyright 2021 The Vitess Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+ http://www.apache.org/licenses/LICENSE-2.0
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package integration
+
+import (
+ "fmt"
+ "testing"
+)
+
+var cases = []string{
+ "\"Å å\"",
+ "NULL",
+ "\"\"",
+ "\"a\"",
+ "\"abc\"",
+ "1",
+ "-1",
+ "0123",
+ "0xAACC",
+ "3.1415926",
+ "\"中文测试\"",
+ "\"日本語テスト\"",
+ "\"한국어 시험\"",
+ "\"😊😂🤢\"",
+ "'123'",
+ "9223372036854775807",
+ "-9223372036854775808",
+ "999999999999999999999999",
+ "-999999999999999999999999",
+ "_latin1 X'ÂÄÌå'",
+ "_dec8 'ÒòÅå'",
+ "_binary 'Müller' ",
+ "_utf8mb4 'abcABCÅå'",
+ "_utf8mb3 'abcABCÅå'",
+ "_utf16 'AabcÅå'",
+ "_utf32 'AabcÅå'",
+ "_ucs2 'AabcÅå'",
+}
+
+func TestBuiltinLowerandLcase(t *testing.T) {
+ var conn = mysqlconn(t)
+ defer conn.Close()
+ var cases = []string{
+ "\"Å å\"",
+ "NULL",
+ "\"\"",
+ "\"a\"",
+ "\"abc\"",
+ "1",
+ "-1",
+ "0123",
+ "0xAACC",
+ "3.1415926",
+ "\"中文测试\"",
+ "\"日本語テスト\"",
+ "\"한국어 시험\"",
+ "\"😊😂🤢\"",
+ "'123'",
+ "9223372036854775807",
+ "-9223372036854775808",
+ "999999999999999999999999",
+ "-999999999999999999999999",
+ "_latin1 X'ÂÄÌå'",
+ "_binary 'Müller' ",
+ "_utf8mb4 'abcABCÅå'",
+ }
+ for _, str := range cases {
+ query := fmt.Sprintf("LOWER (%s)", str)
+ compareRemoteExpr(t, conn, query)
+
+ query = fmt.Sprintf("LCASE(%s)", str)
+ compareRemoteExpr(t, conn, query)
+ }
+}
+
+func TestBuiltinUpperandUcase(t *testing.T) {
+ var conn = mysqlconn(t)
+ defer conn.Close()
+
+ var cases = []string{
+ "\"Å å\"",
+ "NULL",
+ "\"\"",
+ "\"a\"",
+ "\"abc\"",
+ "1",
+ "-1",
+ "0123",
+ "0xAACC",
+ "3.1415926",
+ "\"中文测试\"",
+ "\"日本語テスト\"",
+ "\"한국어 시험\"",
+ "\"😊😂🤢\"",
+ "'123'",
+ "9223372036854775807",
+ "-9223372036854775808",
+ "999999999999999999999999",
+ "-999999999999999999999999",
+ "_latin1 X'ÂÄÌå'",
+ "_binary 'Müller' ",
+ "_utf8mb4 'abcABCÅå'",
+ }
+
+ for _, str := range cases {
+ query := fmt.Sprintf("UPPER(%s)", str)
+ compareRemoteExpr(t, conn, query)
+
+ query = fmt.Sprintf("UCASE(%s)", str)
+ compareRemoteExpr(t, conn, query)
+ }
+}
+
+func TestBuiltinCharLength(t *testing.T) {
+ var conn = mysqlconn(t)
+ defer conn.Close()
+
+ for _, str := range cases {
+ query := fmt.Sprintf("CHAR_LENGTH(%s)", str)
+ compareRemoteExpr(t, conn, query)
+
+ query = fmt.Sprintf("CHARACTER_LENGTH(%s)", str)
+ compareRemoteExpr(t, conn, query)
+ }
+}
+
+func TestBuiltinLength(t *testing.T) {
+ var conn = mysqlconn(t)
+ defer conn.Close()
+
+ for _, str := range cases {
+ query := fmt.Sprintf("Length(%s)", str)
+ compareRemoteExpr(t, conn, query)
+
+ query = fmt.Sprintf("OCTET_LENGTH(%s)", str)
+ compareRemoteExpr(t, conn, query)
+ }
+}
+
+func TestBuiltinBitLength(t *testing.T) {
+ var conn = mysqlconn(t)
+ defer conn.Close()
+ for _, str := range cases {
+ query := fmt.Sprintf("BIT_LENGTH(%s)", str)
+ compareRemoteExpr(t, conn, query)
+ }
+}
+
+func TestBuiltinASCII(t *testing.T) {
+ var conn = mysqlconn(t)
+ defer conn.Close()
+
+ for _, str := range cases {
+ query := fmt.Sprintf("ASCII(%s)", str)
+ compareRemoteExpr(t, conn, query)
+ }
+}
+
+func TestBuiltinRepeat(t *testing.T) {
+ var conn = mysqlconn(t)
+ defer conn.Close()
+ counts := []string{"-1", "1.2", "3"}
+ cases := []string{
+ "\"Å å\"",
+ "NULL",
+ "\"\"",
+ "\"a\"",
+ "\"abc\"",
+ "1",
+ "-1",
+ "0123",
+ "0xAACC",
+ "3.1415926",
+ "\"中文测试\"",
+ "\"日本語テスト\"",
+ "\"한국어 시험\"",
+ "\"😊😂🤢\"",
+ "'123'",
+ "9223372036854775807",
+ "-9223372036854775808",
+ "999999999999999999999999",
+ "-999999999999999999999999",
+ "_latin1 X'ÂÄÌå'",
+ "_binary 'Müller' ",
+ "_utf8mb4 'abcABCÅå'",
+ "_utf8mb3 'abcABCÅå'",
+ }
+ for _, str := range cases {
+ for _, cnt := range counts {
+ query := fmt.Sprintf("Repeat(%s, %s)", str, cnt)
+ compareRemoteExpr(t, conn, query)
+ }
+
+ }
+}
diff --git a/go/vt/vtgate/evalengine/mysql_test.go b/go/vt/vtgate/evalengine/mysql_test.go
index 6aee02bf7d3..86c1823ff5d 100644
--- a/go/vt/vtgate/evalengine/mysql_test.go
+++ b/go/vt/vtgate/evalengine/mysql_test.go
@@ -127,6 +127,6 @@ func TestMySQLGolden(t *testing.T) {
func TestDebug1(t *testing.T) {
// Debu g
- eval, err := testSingle(t, `SELECT ('foo' collate utf8mb4_0900_as_cs) = 0xFF`)
+ eval, err := testSingle(t, `SELECT LCASE(-999999999999999999999999)`)
t.Logf("eval=%s err=%v coll=%s", eval.String(), err, collations.Local().LookupByID(eval.Collation()).Name())
}
diff --git a/go/vt/vtgate/evalengine/string.go b/go/vt/vtgate/evalengine/string.go
new file mode 100644
index 00000000000..668ec11cf6d
--- /dev/null
+++ b/go/vt/vtgate/evalengine/string.go
@@ -0,0 +1,267 @@
+/*
+Copyright 2022 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package evalengine
+
+import (
+ "bytes"
+
+ "vitess.io/vitess/go/mysql/collations"
+ "vitess.io/vitess/go/sqltypes"
+ vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
+ "vitess.io/vitess/go/vt/vterrors"
+)
+
+type builtinLower struct{}
+
+func (builtinLower) call(env *ExpressionEnv, args []EvalResult, result *EvalResult) {
+ inarg := &args[0]
+
+ switch {
+ case inarg.isNull():
+ result.setNull()
+
+ case sqltypes.IsNumber(inarg.typeof()):
+ inarg.makeTextual(env.DefaultCollation)
+ result.setRaw(sqltypes.VarChar, inarg.bytes(), inarg.collation())
+
+ default:
+ coll := collations.Local().LookupByID(inarg.collation().Collation)
+ csa, ok := coll.(collations.CaseAwareCollation)
+ if !ok {
+ throwEvalError(vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "not implemented"))
+ }
+
+ dst := csa.ToLower(nil, inarg.bytes())
+ result.setRaw(sqltypes.VarChar, dst, inarg.collation())
+ }
+}
+
+func (builtinLower) typeof(env *ExpressionEnv, args []Expr) (sqltypes.Type, flag) {
+ if len(args) != 1 {
+ throwArgError("LOWER")
+ }
+ _, f := args[0].typeof(env)
+ return sqltypes.VarChar, f
+}
+
+type builtinLcase struct {
+ builtinLower
+}
+
+func (builtinLcase) typeof(env *ExpressionEnv, args []Expr) (sqltypes.Type, flag) {
+ if len(args) != 1 {
+ throwArgError("LCASE")
+ }
+ _, f := args[0].typeof(env)
+ return sqltypes.VarChar, f
+}
+
+type builtinUpper struct{}
+
+func (builtinUpper) call(env *ExpressionEnv, args []EvalResult, result *EvalResult) {
+ inarg := &args[0]
+
+ switch {
+ case inarg.isNull():
+ result.setNull()
+
+ case sqltypes.IsNumber(inarg.typeof()):
+ inarg.makeTextual(env.DefaultCollation)
+ result.setRaw(sqltypes.VarChar, inarg.bytes(), inarg.collation())
+
+ default:
+ coll := collations.Local().LookupByID(inarg.collation().Collation)
+ csa, ok := coll.(collations.CaseAwareCollation)
+ if !ok {
+ throwEvalError(vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "not implemented"))
+ }
+
+ dst := csa.ToUpper(nil, inarg.bytes())
+ result.setRaw(sqltypes.VarChar, dst, inarg.collation())
+ }
+}
+
+func (builtinUpper) typeof(env *ExpressionEnv, args []Expr) (sqltypes.Type, flag) {
+ if len(args) != 1 {
+ throwArgError("UPPER")
+ }
+ _, f := args[0].typeof(env)
+ return sqltypes.VarChar, f
+}
+
+type builtinUcase struct {
+ builtinUpper
+}
+
+func (builtinUcase) typeof(env *ExpressionEnv, args []Expr) (sqltypes.Type, flag) {
+ if len(args) != 1 {
+ throwArgError("UCASE")
+ }
+ _, f := args[0].typeof(env)
+ return sqltypes.VarChar, f
+}
+
+type builtinCharLength struct{}
+
+func (builtinCharLength) call(env *ExpressionEnv, args []EvalResult, result *EvalResult) {
+ inarg := &args[0]
+ if inarg.isNull() {
+ result.setNull()
+ return
+ }
+
+ coll := collations.Local().LookupByID(inarg.collation().Collation)
+ cnt := collations.Length(coll, inarg.toRawBytes())
+ result.setInt64(int64(cnt))
+}
+
+func (builtinCharLength) typeof(env *ExpressionEnv, args []Expr) (sqltypes.Type, flag) {
+ if len(args) != 1 {
+ throwArgError("CHAR_LENGTH")
+ }
+ _, f := args[0].typeof(env)
+ return sqltypes.Int64, f
+}
+
+type builtinCharacterLength struct {
+ builtinCharLength
+}
+
+func (builtinCharacterLength) typeof(env *ExpressionEnv, args []Expr) (sqltypes.Type, flag) {
+ if len(args) != 1 {
+ throwArgError("CHARACTER_LENGTH")
+ }
+ _, f := args[0].typeof(env)
+ return sqltypes.Int64, f
+}
+
+type builtinOctetLength struct{}
+
+func (builtinOctetLength) call(env *ExpressionEnv, args []EvalResult, result *EvalResult) {
+ inarg := &args[0]
+ if inarg.isNull() {
+ result.setNull()
+ return
+ }
+
+ cnt := len(inarg.toRawBytes())
+ result.setInt64(int64(cnt))
+}
+
+func (builtinOctetLength) typeof(env *ExpressionEnv, args []Expr) (sqltypes.Type, flag) {
+ if len(args) != 1 {
+ throwArgError("LENGTH")
+ }
+ _, f := args[0].typeof(env)
+ return sqltypes.Int64, f
+}
+
+type builtinLength struct {
+ builtinOctetLength
+}
+
+func (builtinLength) typeof(env *ExpressionEnv, args []Expr) (sqltypes.Type, flag) {
+ if len(args) != 1 {
+ throwArgError("OCTET_LENGTH")
+ }
+ _, f := args[0].typeof(env)
+ return sqltypes.Int64, f
+}
+
+type builtinBitLength struct {
+}
+
+func (builtinBitLength) call(env *ExpressionEnv, args []EvalResult, result *EvalResult) {
+ inarg := &args[0]
+ if inarg.isNull() {
+ result.setNull()
+ return
+ }
+
+ cnt := len(inarg.toRawBytes())
+ result.setInt64(int64(cnt * 8))
+}
+
+func (builtinBitLength) typeof(env *ExpressionEnv, args []Expr) (sqltypes.Type, flag) {
+ if len(args) != 1 {
+ throwArgError("BIT_LENGTH")
+ }
+ _, f := args[0].typeof(env)
+ return sqltypes.Int64, f
+}
+
+type builtinASCII struct {
+}
+
+func (builtinASCII) call(env *ExpressionEnv, args []EvalResult, result *EvalResult) {
+ inarg := &args[0]
+ if inarg.isNull() {
+ result.setNull()
+ return
+ }
+
+ inarg.makeBinary()
+ bs := inarg.bytes()
+ if len(bs) > 0 {
+ result.setInt64(int64(bs[0]))
+ } else {
+ result.setInt64(0)
+ }
+}
+
+func (builtinASCII) typeof(env *ExpressionEnv, args []Expr) (sqltypes.Type, flag) {
+ if len(args) != 1 {
+ throwArgError("ASCII")
+ }
+ _, f := args[0].typeof(env)
+ return sqltypes.Int64, f
+}
+
+type builtinRepeat struct {
+}
+
+func (builtinRepeat) call(env *ExpressionEnv, args []EvalResult, result *EvalResult) {
+ inarg := &args[0]
+ repeatTime := &args[1]
+ if inarg.isNull() || repeatTime.isNull() {
+ result.setNull()
+ return
+ }
+
+ if sqltypes.IsNumber(inarg.typeof()) {
+ inarg.makeTextual(env.DefaultCollation)
+ }
+
+ repeatTime.makeSignedIntegral()
+ repeat := int(repeatTime.int64())
+ if repeat < 0 {
+ repeat = 0
+ }
+
+ result.setRaw(sqltypes.VarChar, bytes.Repeat(inarg.bytes(), repeat), inarg.collation())
+}
+
+func (builtinRepeat) typeof(env *ExpressionEnv, args []Expr) (sqltypes.Type, flag) {
+ if len(args) != 2 {
+ throwArgError("REPEAT")
+ }
+ _, f1 := args[0].typeof(env)
+ // typecheck the right-hand argument but ignore its flags
+ args[1].typeof(env)
+
+ return sqltypes.VarChar, f1
+}
diff --git a/go/vt/vtgate/evalengine/translate.go b/go/vt/vtgate/evalengine/translate.go
index 6fdce977b08..7e63e886e51 100644
--- a/go/vt/vtgate/evalengine/translate.go
+++ b/go/vt/vtgate/evalengine/translate.go
@@ -493,6 +493,11 @@ func translateConvertExpr(expr sqlparser.Expr, convertType *sqlparser.ConvertTyp
if err != nil {
return nil, err
}
+ case "BINARY", "DOUBLE", "REAL", "SIGNED", "SIGNED INTEGER", "UNSIGNED", "UNSIGNED INTEGER":
+ // Supported types for conv expression
+ default:
+ // For unsupported types, we should return an error on translation instead of returning an error on runtime.
+ return nil, convert.returnUnsupportedError()
}
return &convert, nil
@@ -565,6 +570,29 @@ func translateCaseExpr(node *sqlparser.CaseExpr, lookup TranslationLookup) (Expr
return &result, nil
}
+func translateBetweenExpr(node *sqlparser.BetweenExpr, lookup TranslationLookup) (Expr, error) {
+ // x BETWEEN a AND b => x >= a AND x <= b
+ from := &sqlparser.ComparisonExpr{
+ Operator: sqlparser.GreaterEqualOp,
+ Left: node.Left,
+ Right: node.From,
+ }
+ to := &sqlparser.ComparisonExpr{
+ Operator: sqlparser.LessEqualOp,
+ Left: node.Left,
+ Right: node.To,
+ }
+
+ if !node.IsBetween {
+ // x NOT BETWEEN a AND b => x < a OR x > b
+ from.Operator = sqlparser.LessThanOp
+ to.Operator = sqlparser.GreaterThanOp
+ return translateExpr(&sqlparser.OrExpr{Left: from, Right: to}, lookup)
+ }
+
+ return translateExpr(sqlparser.AndExpressions(from, to), lookup)
+}
+
func translateExprNotSupported(e sqlparser.Expr) error {
return vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "%s: %s", ErrTranslateExprNotSupported, sqlparser.String(e))
}
@@ -624,6 +652,8 @@ func translateExpr(e sqlparser.Expr, lookup TranslationLookup) (Expr, error) {
return translateConvertUsingExpr(node, lookup)
case *sqlparser.CaseExpr:
return translateCaseExpr(node, lookup)
+ case *sqlparser.BetweenExpr:
+ return translateBetweenExpr(node, lookup)
default:
return nil, translateExprNotSupported(e)
}
diff --git a/go/vt/vtgate/evalengine/translate_test.go b/go/vt/vtgate/evalengine/translate_test.go
index ce8249653dc..fa4efa01e67 100644
--- a/go/vt/vtgate/evalengine/translate_test.go
+++ b/go/vt/vtgate/evalengine/translate_test.go
@@ -100,6 +100,13 @@ func TestTranslateSimplification(t *testing.T) {
{"date'2022'", err(`incorrect DATE value: '2022'`), err(`incorrect DATE value: '2022'`)},
{"time'2022-10-03'", err(`incorrect TIME value: '2022-10-03'`), err(`incorrect TIME value: '2022-10-03'`)},
{"timestamp'2022-10-03'", err(`incorrect DATETIME value: '2022-10-03'`), err(`incorrect DATETIME value: '2022-10-03'`)},
+ {"ifnull(12, 23)", ok(`CASE WHEN INT64(12) IS NULL THEN INT64(23) ELSE INT64(12)`), ok(`INT64(12)`)},
+ {"ifnull(null, 23)", ok(`CASE WHEN NULL IS NULL THEN INT64(23) ELSE NULL`), ok(`INT64(23)`)},
+ {"nullif(1, 1)", ok(`CASE WHEN INT64(1) = INT64(1) THEN NULL ELSE INT64(1)`), ok(`NULL`)},
+ {"nullif(1, 2)", ok(`CASE WHEN INT64(1) = INT64(2) THEN NULL ELSE INT64(1)`), ok(`INT64(1)`)},
+ {"12 between 5 and 20", ok("(INT64(12) >= INT64(5)) AND (INT64(12) <= INT64(20))"), ok(`INT64(1)`)},
+ {"12 not between 5 and 20", ok("(INT64(12) < INT64(5)) OR (INT64(12) > INT64(20))"), ok(`INT64(0)`)},
+ {"2 not between 5 and 20", ok("(INT64(2) < INT64(5)) OR (INT64(2) > INT64(20))"), ok(`INT64(1)`)},
}
for _, tc := range testCases {
@@ -340,3 +347,37 @@ func TestEvaluateTuple(t *testing.T) {
})
}
}
+
+// TestTranslationFailures tests that translation fails for functions that we don't support evaluation for.
+func TestTranslationFailures(t *testing.T) {
+ testcases := []struct {
+ expression string
+ expectedErr string
+ }{
+ {
+ expression: "cast('2023-01-07 12:34:56' as date)",
+ expectedErr: "Unsupported type conversion: DATE",
+ }, {
+ expression: "cast('2023-01-07 12:34:56' as datetime(5))",
+ expectedErr: "Unsupported type conversion: DATETIME(5)",
+ }, {
+ expression: "cast('3.4' as FLOAT)",
+ expectedErr: "Unsupported type conversion: FLOAT",
+ }, {
+ expression: "cast('3.4' as FLOAT(3))",
+ expectedErr: "Unsupported type conversion: FLOAT(3)",
+ },
+ }
+
+ for _, testcase := range testcases {
+ t.Run(testcase.expression, func(t *testing.T) {
+ // Given
+ stmt, err := sqlparser.Parse("select " + testcase.expression)
+ require.NoError(t, err)
+ astExpr := stmt.(*sqlparser.Select).SelectExprs[0].(*sqlparser.AliasedExpr).Expr
+ _, err = Translate(astExpr, LookupDefaultCollation(45))
+ require.EqualError(t, err, testcase.expectedErr)
+ })
+ }
+
+}
diff --git a/go/vt/vtgate/executor.go b/go/vt/vtgate/executor.go
index 8214361da4c..8558eed1ed5 100644
--- a/go/vt/vtgate/executor.go
+++ b/go/vt/vtgate/executor.go
@@ -65,10 +65,10 @@ import (
)
var (
- errNoKeyspace = vterrors.NewErrorf(vtrpcpb.Code_FAILED_PRECONDITION, vterrors.NoDB, "No database selected: use keyspace<:shard><@type> or keyspace<[range]><@type> (<> are optional)")
+ errNoKeyspace = vterrors.VT09005()
defaultTabletType = topodatapb.TabletType_PRIMARY
- // TODO: @rafael - These two counters should be deprecated in favor of the ByTable ones. They are kept for now for backwards compatibility.
+ // TODO: @rafael - These two counters should be deprecated in favor of the ByTable ones in v17+. They are kept for now for backwards compatibility.
queriesProcessed = stats.NewCountersWithSingleLabel("QueriesProcessed", "Queries processed at vtgate by plan type", "Plan")
queriesRouted = stats.NewCountersWithSingleLabel("QueriesRouted", "Queries routed from vtgate to vttablet by plan type", "Plan")
@@ -268,8 +268,7 @@ func (e *Executor) StreamExecute(
// the framework currently sends all results as one packet.
byteCount := 0
if len(qr.Fields) > 0 {
- qrfield := &sqltypes.Result{Fields: qr.Fields}
- if err := callback(qrfield); err != nil {
+ if err := callback(qr.Metadata()); err != nil {
return err
}
seenResults.Set(true)
@@ -321,8 +320,6 @@ func (e *Executor) StreamExecute(
}
// 5: Log and add statistics
- logStats.Keyspace = plan.Instructions.GetKeyspaceName()
- logStats.Table = plan.Instructions.GetTableName()
logStats.TablesUsed = plan.TablesUsed
logStats.TabletType = vc.TabletType().String()
logStats.ExecuteTime = time.Since(execStart)
@@ -417,6 +414,8 @@ func (e *Executor) addNeededBindVars(bindVarNeeds *sqlparser.BindVarNeeds, bindV
switch sysVar {
case sysvars.Autocommit.Name:
bindVars[key] = sqltypes.BoolBindVariable(session.Autocommit)
+ case sysvars.QueryTimeout.Name:
+ bindVars[key] = sqltypes.Int64BindVariable(session.GetQueryTimeout())
case sysvars.ClientFoundRows.Name:
var v bool
ifOptionsExist(session, func(options *querypb.ExecuteOptions) {
@@ -524,14 +523,12 @@ func ifReadAfterWriteExist(session *SafeSession, f func(*vtgatepb.ReadAfterWrite
}
}
-func (e *Executor) destinationExec(ctx context.Context, safeSession *SafeSession, sql string, bindVars map[string]*querypb.BindVariable, dest key.Destination, destKeyspace string, destTabletType topodatapb.TabletType, logStats *logstats.LogStats, ignoreMaxMemoryRows bool) (*sqltypes.Result, error) {
- return e.resolver.Execute(ctx, sql, bindVars, destKeyspace, destTabletType, dest, safeSession, safeSession.Options, logStats, false /* canAutocommit */, ignoreMaxMemoryRows)
-}
-
-func (e *Executor) handleBegin(ctx context.Context, safeSession *SafeSession, logStats *logstats.LogStats) (*sqltypes.Result, error) {
+func (e *Executor) handleBegin(ctx context.Context, safeSession *SafeSession, logStats *logstats.LogStats, stmt sqlparser.Statement) (*sqltypes.Result, error) {
execStart := time.Now()
logStats.PlanTime = execStart.Sub(logStats.StartTime)
- err := e.txConn.Begin(ctx, safeSession)
+
+ begin := stmt.(*sqlparser.Begin)
+ err := e.txConn.Begin(ctx, safeSession, begin.TxAccessModes)
logStats.ExecuteTime = time.Since(execStart)
e.updateQueryCounts("Begin", "", "", 0)
@@ -617,7 +614,7 @@ func (e *Executor) executeSPInAllSessions(ctx context.Context, safeSession *Safe
})
queries = append(queries, &querypb.BoundQuery{Sql: sql})
}
- qr, errs = e.ExecuteMultiShard(ctx, rss, queries, safeSession, false /*autocommit*/, ignoreMaxMemoryRows)
+ qr, errs = e.ExecuteMultiShard(ctx, nil, rss, queries, safeSession, false /*autocommit*/, ignoreMaxMemoryRows)
err := vterrors.Aggregate(errs)
if err != nil {
return nil, err
@@ -967,28 +964,30 @@ type iQueryOption interface {
// getPlan computes the plan for the given query. If one is in
// the cache, it reuses it.
-func (e *Executor) getPlan(ctx context.Context, vcursor *vcursorImpl, sql string, comments sqlparser.MarginComments, bindVars map[string]*querypb.BindVariable, qo iQueryOption, logStats *logstats.LogStats) (*engine.Plan, error) {
+func (e *Executor) getPlan(ctx context.Context, vcursor *vcursorImpl, sql string, comments sqlparser.MarginComments, bindVars map[string]*querypb.BindVariable, qo iQueryOption, logStats *logstats.LogStats) (*engine.Plan, sqlparser.Statement, error) {
if e.VSchema() == nil {
- return nil, errors.New("vschema not initialized")
+ return nil, nil, errors.New("vschema not initialized")
}
stmt, reserved, err := sqlparser.Parse2(sql)
if err != nil {
- return nil, err
+ return nil, nil, err
}
query := sql
statement := stmt
reservedVars := sqlparser.NewReservedVars("vtg", reserved)
bindVarNeeds := &sqlparser.BindVarNeeds{}
if !sqlparser.IgnoreMaxPayloadSizeDirective(statement) && !isValidPayloadSize(query) {
- return nil, vterrors.NewErrorf(vtrpcpb.Code_RESOURCE_EXHAUSTED, vterrors.NetPacketTooLarge, "query payload size above threshold")
+ return nil, nil, vterrors.NewErrorf(vtrpcpb.Code_RESOURCE_EXHAUSTED, vterrors.NetPacketTooLarge, "query payload size above threshold")
}
ignoreMaxMemoryRows := sqlparser.IgnoreMaxMaxMemoryRowsDirective(stmt)
vcursor.SetIgnoreMaxMemoryRows(ignoreMaxMemoryRows)
+ consolidator := sqlparser.Consolidator(stmt)
+ vcursor.SetConsolidator(consolidator)
setVarComment, err := prepareSetVarComment(vcursor, stmt)
if err != nil {
- return nil, err
+ return nil, nil, err
}
// Normalize if possible and retry.
if e.canNormalizeStatement(stmt, qo, setVarComment) {
@@ -1002,9 +1001,10 @@ func (e *Executor) getPlan(ctx context.Context, vcursor *vcursorImpl, sql string
qo.getSelectLimit(),
setVarComment,
vcursor.safeSession.SystemVariables,
+ vcursor,
)
if err != nil {
- return nil, err
+ return nil, nil, err
}
statement = result.AST
bindVarNeeds = result.BindVarNeeds
@@ -1014,6 +1014,10 @@ func (e *Executor) getPlan(ctx context.Context, vcursor *vcursorImpl, sql string
logStats.SQL = comments.Leading + query + comments.Trailing
logStats.BindVariables = sqltypes.CopyBindVariables(bindVars)
+ return e.cacheAndBuildStatement(ctx, vcursor, query, statement, qo, logStats, stmt, reservedVars, bindVarNeeds)
+}
+
+func (e *Executor) cacheAndBuildStatement(ctx context.Context, vcursor *vcursorImpl, query string, statement sqlparser.Statement, qo iQueryOption, logStats *logstats.LogStats, stmt sqlparser.Statement, reservedVars *sqlparser.ReservedVars, bindVarNeeds *sqlparser.BindVarNeeds) (*engine.Plan, sqlparser.Statement, error) {
planHash := sha256.New()
_, _ = planHash.Write([]byte(vcursor.planPrefixKey(ctx)))
_, _ = planHash.Write([]byte{':'})
@@ -1023,13 +1027,13 @@ func (e *Executor) getPlan(ctx context.Context, vcursor *vcursorImpl, sql string
if sqlparser.CachePlan(statement) && qo.cachePlan() {
if plan, ok := e.plans.Get(planKey); ok {
logStats.CachedPlan = true
- return plan.(*engine.Plan), nil
+ return plan.(*engine.Plan), stmt, nil
}
}
plan, err := planbuilder.BuildFromStmt(query, statement, reservedVars, vcursor, bindVarNeeds, enableOnlineDDL, enableDirectDDL)
if err != nil {
- return nil, err
+ return nil, nil, err
}
plan.Warnings = vcursor.warnings
@@ -1040,7 +1044,7 @@ func (e *Executor) getPlan(ctx context.Context, vcursor *vcursorImpl, sql string
if err == nil && qo.cachePlan() && sqlparser.CachePlan(statement) {
e.plans.Set(planKey, plan)
}
- return plan, err
+ return plan, stmt, err
}
func (e *Executor) canNormalizeStatement(stmt sqlparser.Statement, qo iQueryOption, setVarComment string) bool {
@@ -1214,7 +1218,7 @@ func (e *Executor) Prepare(ctx context.Context, method string, safeSession *Safe
func (e *Executor) prepare(ctx context.Context, safeSession *SafeSession, sql string, bindVars map[string]*querypb.BindVariable, logStats *logstats.LogStats) ([]*querypb.Field, error) {
// Start an implicit transaction if necessary.
if !safeSession.Autocommit && !safeSession.InTransaction() {
- if err := e.txConn.Begin(ctx, safeSession); err != nil {
+ if err := e.txConn.Begin(ctx, safeSession, nil); err != nil {
return nil, err
}
}
@@ -1251,7 +1255,7 @@ func (e *Executor) handlePrepare(ctx context.Context, safeSession *SafeSession,
// V3 mode.
query, comments := sqlparser.SplitMarginComments(sql)
vcursor, _ := newVCursorImpl(safeSession, comments, e, logStats, e.vm, e.VSchema(), e.resolver.resolver, e.serv, e.warnShardedOnly, e.pv)
- plan, err := e.getPlan(ctx, vcursor, query, comments, bindVars, safeSession, logStats)
+ plan, _, err := e.getPlan(ctx, vcursor, query, comments, bindVars, safeSession, logStats)
execStart := time.Now()
logStats.PlanTime = execStart.Sub(logStats.StartTime)
@@ -1282,13 +1286,13 @@ func (e *Executor) handlePrepare(ctx context.Context, safeSession *SafeSession,
}
// ExecuteMultiShard implements the IExecutor interface
-func (e *Executor) ExecuteMultiShard(ctx context.Context, rss []*srvtopo.ResolvedShard, queries []*querypb.BoundQuery, session *SafeSession, autocommit bool, ignoreMaxMemoryRows bool) (qr *sqltypes.Result, errs []error) {
- return e.scatterConn.ExecuteMultiShard(ctx, rss, queries, session, autocommit, ignoreMaxMemoryRows)
+func (e *Executor) ExecuteMultiShard(ctx context.Context, primitive engine.Primitive, rss []*srvtopo.ResolvedShard, queries []*querypb.BoundQuery, session *SafeSession, autocommit bool, ignoreMaxMemoryRows bool) (qr *sqltypes.Result, errs []error) {
+ return e.scatterConn.ExecuteMultiShard(ctx, primitive, rss, queries, session, autocommit, ignoreMaxMemoryRows)
}
// StreamExecuteMulti implements the IExecutor interface
-func (e *Executor) StreamExecuteMulti(ctx context.Context, query string, rss []*srvtopo.ResolvedShard, vars []map[string]*querypb.BindVariable, session *SafeSession, autocommit bool, callback func(reply *sqltypes.Result) error) []error {
- return e.scatterConn.StreamExecuteMulti(ctx, query, rss, vars, session, autocommit, callback)
+func (e *Executor) StreamExecuteMulti(ctx context.Context, primitive engine.Primitive, query string, rss []*srvtopo.ResolvedShard, vars []map[string]*querypb.BindVariable, session *SafeSession, autocommit bool, callback func(reply *sqltypes.Result) error) []error {
+ return e.scatterConn.StreamExecuteMulti(ctx, primitive, query, rss, vars, session, autocommit, callback)
}
// ExecuteLock implements the IExecutor interface
@@ -1337,6 +1341,7 @@ func (e *Executor) startVStream(ctx context.Context, rss []*srvtopo.ResolvedShar
vsm: vsm,
eventCh: make(chan []*binlogdatapb.VEvent),
ts: ts,
+ copyCompletedShard: make(map[string]struct{}),
}
_ = vs.stream(ctx)
return nil
diff --git a/go/vt/vtgate/executor_dml_test.go b/go/vt/vtgate/executor_dml_test.go
index 05fcb59255d..b73cf1fab16 100644
--- a/go/vt/vtgate/executor_dml_test.go
+++ b/go/vt/vtgate/executor_dml_test.go
@@ -19,7 +19,6 @@ package vtgate
import (
"context"
"fmt"
- "reflect"
"strings"
"testing"
@@ -164,21 +163,24 @@ func TestUpdateFromSubQuery(t *testing.T) {
testQueryLog(t, logChan, "TestExecute", "UPDATE", "update user set a=(select count(*) from user where id = 3) where id = 1", 2)
}
-func TestUpdateEqualWithWriteOnlyLookupUniqueVindex(t *testing.T) {
+func TestUpdateEqualWithNoVerifyAndWriteOnlyLookupUniqueVindexes(t *testing.T) {
res := []*sqltypes.Result{sqltypes.MakeTestResult(
- sqltypes.MakeTestFields("id|wo_lu_col|lu_col|t2_lu_vdx", "int64|int64|int64|int64"),
- "1|2|1|0",
+ sqltypes.MakeTestFields(
+ "id|wo_lu_col|erl_lu_col|srl_lu_col|nrl_lu_col|nv_lu_col|lu_col|t2_lu_vdx",
+ "int64|int64|int64|int64|int64|int64|int64|int64",
+ ),
+ "1|2|2|2|2|2|1|0",
)}
executor, sbc1, sbc2, sbcLookup := createCustomExecutorSetValues(executorVSchema, res)
- _, err := executorExec(executor, "update t2_wo_lookup set lu_col = 5 where wo_lu_col = 2", nil)
+ _, err := executorExec(executor, "update t2_lookup set lu_col = 5 where wo_lu_col = 2", nil)
require.NoError(t, err)
wantQueries := []*querypb.BoundQuery{
{
- Sql: "select id, wo_lu_col, lu_col, lu_col = 5 from t2_wo_lookup where wo_lu_col = 2 for update",
+ Sql: "select id, wo_lu_col, erl_lu_col, srl_lu_col, nrl_lu_col, nv_lu_col, lu_col, lu_col = 5 from t2_lookup where wo_lu_col = 2 for update",
BindVariables: map[string]*querypb.BindVariable{},
}, {
- Sql: "update t2_wo_lookup set lu_col = 5 where wo_lu_col = 2",
+ Sql: "update t2_lookup set lu_col = 5 where wo_lu_col = 2",
BindVariables: map[string]*querypb.BindVariable{},
}}
@@ -199,7 +201,264 @@ func TestUpdateEqualWithWriteOnlyLookupUniqueVindex(t *testing.T) {
"lu_col_0": sqltypes.Int64BindVariable(5),
},
}
- lookWant := []*querypb.BoundQuery{bq1, bq2, bq1, bq2, bq1, bq2, bq1, bq2, bq1, bq2, bq1, bq2, bq1, bq2, bq1, bq2}
+ lookWant := []*querypb.BoundQuery{
+ bq1, bq2,
+ bq1, bq2,
+ bq1, bq2,
+ bq1, bq2,
+ bq1, bq2,
+ bq1, bq2,
+ bq1, bq2,
+ bq1, bq2,
+ }
+ assertQueries(t, sbcLookup, lookWant)
+}
+
+func TestUpdateInTransactionLookupDefaultReadLock(t *testing.T) {
+ res := []*sqltypes.Result{sqltypes.MakeTestResult(
+ sqltypes.MakeTestFields(
+ "id|wo_lu_col|erl_lu_col|srl_lu_col|nrl_lu_col|nv_lu_col|lu_col|t2_lu_vdx",
+ "int64|int64|int64|int64|int64|int64|int64|int64",
+ ),
+ "1|2|2|2|2|2|1|0",
+ )}
+ executor, sbc1, sbc2, sbcLookup := createCustomExecutorSetValues(executorVSchema, res)
+
+ safeSession := NewSafeSession(&vtgatepb.Session{InTransaction: true})
+ _, err := executorExecSession(
+ executor,
+ "update t2_lookup set lu_col = 5 where nv_lu_col = 2",
+ nil,
+ safeSession.Session,
+ )
+
+ require.NoError(t, err)
+ wantQueries := []*querypb.BoundQuery{
+ {
+ Sql: "select id, wo_lu_col, erl_lu_col, srl_lu_col, nrl_lu_col, nv_lu_col, lu_col, lu_col = 5 from t2_lookup where nv_lu_col = 2 and lu_col = 1 for update",
+ BindVariables: map[string]*querypb.BindVariable{},
+ }, {
+ Sql: "update t2_lookup set lu_col = 5 where nv_lu_col = 2",
+ BindVariables: map[string]*querypb.BindVariable{},
+ },
+ }
+
+ assertQueries(t, sbc1, wantQueries)
+ assertQueries(t, sbc2, wantQueries)
+
+ vars, _ := sqltypes.BuildBindVariable([]any{
+ sqltypes.NewInt64(2),
+ })
+ bq1 := &querypb.BoundQuery{
+ Sql: "select nv_lu_col, keyspace_id from nv_lu_idx where nv_lu_col in ::nv_lu_col for update",
+ BindVariables: map[string]*querypb.BindVariable{
+ "nv_lu_col": vars,
+ },
+ }
+ bq2 := &querypb.BoundQuery{
+ Sql: "insert into lu_idx(lu_col, keyspace_id) values (:lu_col_0, :keyspace_id_0)",
+ BindVariables: map[string]*querypb.BindVariable{
+ "keyspace_id_0": sqltypes.Uint64BindVariable(1),
+ "lu_col_0": sqltypes.Int64BindVariable(5),
+ },
+ }
+ lookWant := []*querypb.BoundQuery{
+ bq1, bq2,
+ bq1, bq2,
+ bq1, bq2,
+ bq1, bq2,
+ bq1, bq2,
+ bq1, bq2,
+ bq1, bq2,
+ bq1, bq2,
+ }
+
+ assertQueries(t, sbcLookup, lookWant)
+}
+
+func TestUpdateInTransactionLookupExclusiveReadLock(t *testing.T) {
+ res := []*sqltypes.Result{sqltypes.MakeTestResult(
+ sqltypes.MakeTestFields(
+ "id|wo_lu_col|erl_lu_col|srl_lu_col|nrl_lu_col|nv_lu_col|lu_col|t2_lu_vdx",
+ "int64|int64|int64|int64|int64|int64|int64|int64",
+ ),
+ "1|2|2|2|2|2|1|0",
+ )}
+ executor, sbc1, sbc2, sbcLookup := createCustomExecutorSetValues(executorVSchema, res)
+
+ safeSession := NewSafeSession(&vtgatepb.Session{InTransaction: true})
+ _, err := executorExecSession(
+ executor,
+ "update t2_lookup set lu_col = 5 where erl_lu_col = 2",
+ nil,
+ safeSession.Session,
+ )
+
+ require.NoError(t, err)
+ wantQueries := []*querypb.BoundQuery{
+ {
+ Sql: "select id, wo_lu_col, erl_lu_col, srl_lu_col, nrl_lu_col, nv_lu_col, lu_col, lu_col = 5 from t2_lookup where nv_lu_col = 2 and lu_col = 1 for update",
+ BindVariables: map[string]*querypb.BindVariable{},
+ }, {
+ Sql: "update t2_lookup set lu_col = 5 where erl_lu_col = 2",
+ BindVariables: map[string]*querypb.BindVariable{},
+ },
+ }
+
+ assertQueries(t, sbc1, wantQueries)
+ assertQueries(t, sbc2, wantQueries)
+
+ vars, _ := sqltypes.BuildBindVariable([]any{
+ sqltypes.NewInt64(2),
+ })
+ bq1 := &querypb.BoundQuery{
+ Sql: "select erl_lu_col, keyspace_id from erl_lu_idx where erl_lu_col in ::erl_lu_col for update",
+ BindVariables: map[string]*querypb.BindVariable{
+ "erl_lu_col": vars,
+ },
+ }
+ bq2 := &querypb.BoundQuery{
+ Sql: "insert into lu_idx(lu_col, keyspace_id) values (:lu_col_0, :keyspace_id_0)",
+ BindVariables: map[string]*querypb.BindVariable{
+ "keyspace_id_0": sqltypes.Uint64BindVariable(1),
+ "lu_col_0": sqltypes.Int64BindVariable(5),
+ },
+ }
+ lookWant := []*querypb.BoundQuery{
+ bq1, bq2,
+ bq1, bq2,
+ bq1, bq2,
+ bq1, bq2,
+ bq1, bq2,
+ bq1, bq2,
+ bq1, bq2,
+ bq1, bq2,
+ }
+
+ assertQueries(t, sbcLookup, lookWant)
+}
+
+func TestUpdateInTransactionLookupSharedReadLock(t *testing.T) {
+ res := []*sqltypes.Result{sqltypes.MakeTestResult(
+ sqltypes.MakeTestFields(
+ "id|wo_lu_col|erl_lu_col|srl_lu_col|nrl_lu_col|nv_lu_col|lu_col|t2_lu_vdx",
+ "int64|int64|int64|int64|int64|int64|int64|int64",
+ ),
+ "1|2|2|2|2|2|1|0",
+ )}
+ executor, sbc1, sbc2, sbcLookup := createCustomExecutorSetValues(executorVSchema, res)
+
+ safeSession := NewSafeSession(&vtgatepb.Session{InTransaction: true})
+ _, err := executorExecSession(
+ executor,
+ "update t2_lookup set lu_col = 5 where srl_lu_col = 2",
+ nil,
+ safeSession.Session,
+ )
+
+ require.NoError(t, err)
+ wantQueries := []*querypb.BoundQuery{
+ {
+ Sql: "select id, wo_lu_col, erl_lu_col, srl_lu_col, nrl_lu_col, nv_lu_col, lu_col, lu_col = 5 from t2_lookup where nv_lu_col = 2 and lu_col = 1 for update",
+ BindVariables: map[string]*querypb.BindVariable{},
+ }, {
+ Sql: "update t2_lookup set lu_col = 5 where srl_lu_col = 2",
+ BindVariables: map[string]*querypb.BindVariable{},
+ },
+ }
+
+ assertQueries(t, sbc1, wantQueries)
+ assertQueries(t, sbc2, wantQueries)
+
+ vars, _ := sqltypes.BuildBindVariable([]any{
+ sqltypes.NewInt64(2),
+ })
+ bq1 := &querypb.BoundQuery{
+ Sql: "select srl_lu_col, keyspace_id from srl_lu_idx where srl_lu_col in ::srl_lu_col lock in share mode",
+ BindVariables: map[string]*querypb.BindVariable{
+ "srl_lu_col": vars,
+ },
+ }
+ bq2 := &querypb.BoundQuery{
+ Sql: "insert into lu_idx(lu_col, keyspace_id) values (:lu_col_0, :keyspace_id_0)",
+ BindVariables: map[string]*querypb.BindVariable{
+ "keyspace_id_0": sqltypes.Uint64BindVariable(1),
+ "lu_col_0": sqltypes.Int64BindVariable(5),
+ },
+ }
+ lookWant := []*querypb.BoundQuery{
+ bq1, bq2,
+ bq1, bq2,
+ bq1, bq2,
+ bq1, bq2,
+ bq1, bq2,
+ bq1, bq2,
+ bq1, bq2,
+ bq1, bq2,
+ }
+
+ assertQueries(t, sbcLookup, lookWant)
+}
+
+func TestUpdateInTransactionLookupNoReadLock(t *testing.T) {
+ res := []*sqltypes.Result{sqltypes.MakeTestResult(
+ sqltypes.MakeTestFields(
+ "id|wo_lu_col|erl_lu_col|srl_lu_col|nrl_lu_col|nv_lu_col|lu_col|t2_lu_vdx",
+ "int64|int64|int64|int64|int64|int64|int64|int64",
+ ),
+ "1|2|2|2|2|2|1|0",
+ )}
+ executor, sbc1, sbc2, sbcLookup := createCustomExecutorSetValues(executorVSchema, res)
+
+ safeSession := NewSafeSession(&vtgatepb.Session{InTransaction: true})
+ _, err := executorExecSession(
+ executor,
+ "update t2_lookup set lu_col = 5 where nrl_lu_col = 2",
+ nil,
+ safeSession.Session,
+ )
+
+ require.NoError(t, err)
+ wantQueries := []*querypb.BoundQuery{
+ {
+ Sql: "select id, wo_lu_col, erl_lu_col, srl_lu_col, nrl_lu_col, nv_lu_col, lu_col, lu_col = 5 from t2_lookup where nrl_lu_col = 2 and lu_col = 1 for update",
+ BindVariables: map[string]*querypb.BindVariable{},
+ }, {
+ Sql: "update t2_lookup set lu_col = 5 where nrl_lu_col = 2",
+ BindVariables: map[string]*querypb.BindVariable{},
+ },
+ }
+
+ assertQueries(t, sbc1, wantQueries)
+ assertQueries(t, sbc2, wantQueries)
+
+ vars, _ := sqltypes.BuildBindVariable([]any{
+ sqltypes.NewInt64(2),
+ })
+ bq1 := &querypb.BoundQuery{
+ Sql: "select nrl_lu_col, keyspace_id from nrl_lu_idx where nrl_lu_col in ::nrl_lu_col",
+ BindVariables: map[string]*querypb.BindVariable{
+ "nrl_lu_col": vars,
+ },
+ }
+ bq2 := &querypb.BoundQuery{
+ Sql: "insert into lu_idx(lu_col, keyspace_id) values (:lu_col_0, :keyspace_id_0)",
+ BindVariables: map[string]*querypb.BindVariable{
+ "keyspace_id_0": sqltypes.Uint64BindVariable(1),
+ "lu_col_0": sqltypes.Int64BindVariable(5),
+ },
+ }
+ lookWant := []*querypb.BoundQuery{
+ bq1, bq2,
+ bq1, bq2,
+ bq1, bq2,
+ bq1, bq2,
+ bq1, bq2,
+ bq1, bq2,
+ bq1, bq2,
+ bq1, bq2,
+ }
+
assertQueries(t, sbcLookup, lookWant)
}
@@ -338,10 +597,10 @@ func TestUpdateNormalize(t *testing.T) {
_, err := executorExec(executor, "/* leading */ update user set a=2 where id = 1 /* trailing */", nil)
require.NoError(t, err)
wantQueries := []*querypb.BoundQuery{{
- Sql: "/* leading */ update `user` set a = :vtg1 where id = :vtg2 /* trailing */",
+ Sql: "/* leading */ update `user` set a = :a where id = :id /* trailing */",
BindVariables: map[string]*querypb.BindVariable{
- "vtg1": sqltypes.TestBindVariable(int64(2)),
- "vtg2": sqltypes.TestBindVariable(int64(1)),
+ "a": sqltypes.TestBindVariable(int64(2)),
+ "id": sqltypes.TestBindVariable(int64(1)),
},
}}
assertQueries(t, sbc1, wantQueries)
@@ -353,10 +612,10 @@ func TestUpdateNormalize(t *testing.T) {
_, err = executorExec(executor, "/* leading */ update user set a=2 where id = 1 /* trailing */", nil)
require.NoError(t, err)
wantQueries = []*querypb.BoundQuery{{
- Sql: "/* leading */ update `user` set a = :vtg1 where id = :vtg2 /* trailing */",
+ Sql: "/* leading */ update `user` set a = :a where id = :id /* trailing */",
BindVariables: map[string]*querypb.BindVariable{
- "vtg1": sqltypes.TestBindVariable(int64(2)),
- "vtg2": sqltypes.TestBindVariable(int64(1)),
+ "a": sqltypes.TestBindVariable(int64(2)),
+ "id": sqltypes.TestBindVariable(int64(1)),
},
}}
assertQueries(t, sbc1, nil)
@@ -513,18 +772,21 @@ func TestUpdateEqualWithMultipleLookupVindex(t *testing.T) {
)})
sbc1.SetResults([]*sqltypes.Result{sqltypes.MakeTestResult(
- sqltypes.MakeTestFields("id|wo_lu_col|lu_col|t2_lu_vdx", "int64|int64|int64|int64"),
- "1|2|1|0",
+ sqltypes.MakeTestFields(
+ "id|wo_lu_col|erl_lu_col|srl_lu_col|nrl_lu_col|nv_lu_col|lu_col|t2_lu_vdx",
+ "int64|int64|int64|int64|int64|int64|int64|int64",
+ ),
+ "1|2|2|2|2|2|1|0",
)})
- _, err := executorExec(executor, "update t2_wo_lookup set lu_col = 5 where wo_lu_col = 2 and lu_col = 1", nil)
+ _, err := executorExec(executor, "update t2_lookup set lu_col = 5 where wo_lu_col = 2 and lu_col = 1", nil)
require.NoError(t, err)
wantQueries := []*querypb.BoundQuery{
{
- Sql: "select id, wo_lu_col, lu_col, lu_col = 5 from t2_wo_lookup where wo_lu_col = 2 and lu_col = 1 for update",
+ Sql: "select id, wo_lu_col, erl_lu_col, srl_lu_col, nrl_lu_col, nv_lu_col, lu_col, lu_col = 5 from t2_lookup where wo_lu_col = 2 and lu_col = 1 for update",
BindVariables: map[string]*querypb.BindVariable{},
}, {
- Sql: "update t2_wo_lookup set lu_col = 5 where wo_lu_col = 2 and lu_col = 1",
+ Sql: "update t2_lookup set lu_col = 5 where wo_lu_col = 2 and lu_col = 1",
BindVariables: map[string]*querypb.BindVariable{},
}}
@@ -564,19 +826,22 @@ func TestUpdateUseHigherCostVindexIfBackfilling(t *testing.T) {
)})
sbc1.SetResults([]*sqltypes.Result{sqltypes.MakeTestResult(
- sqltypes.MakeTestFields("id|wo_lu_col|lu_col|t2_lu_vdx", "int64|int64|int64|int64"),
- "1|2|1|0",
- "1|2|2|0",
+ sqltypes.MakeTestFields(
+ "id|wo_lu_col|erl_lu_col|srl_lu_col|nrl_lu_col|nv_lu_col|lu_col|t2_lu_vdx",
+ "int64|int64|int64|int64|int64|int64|int64|int64",
+ ),
+ "1|2|2|2|2|2|1|0",
+ "1|2|2|2|2|2|2|0",
)})
- _, err := executorExec(executor, "update t2_wo_lookup set lu_col = 5 where wo_lu_col = 2 and lu_col in (1, 2)", nil)
+ _, err := executorExec(executor, "update t2_lookup set lu_col = 5 where wo_lu_col = 2 and lu_col in (1, 2)", nil)
require.NoError(t, err)
wantQueries := []*querypb.BoundQuery{
{
- Sql: "select id, wo_lu_col, lu_col, lu_col = 5 from t2_wo_lookup where wo_lu_col = 2 and lu_col in (1, 2) for update",
+ Sql: "select id, wo_lu_col, erl_lu_col, srl_lu_col, nrl_lu_col, nv_lu_col, lu_col, lu_col = 5 from t2_lookup where wo_lu_col = 2 and lu_col in (1, 2) for update",
BindVariables: map[string]*querypb.BindVariable{},
}, {
- Sql: "update t2_wo_lookup set lu_col = 5 where wo_lu_col = 2 and lu_col in (1, 2)",
+ Sql: "update t2_lookup set lu_col = 5 where wo_lu_col = 2 and lu_col in (1, 2)",
BindVariables: map[string]*querypb.BindVariable{},
}}
@@ -619,21 +884,24 @@ func TestUpdateUseHigherCostVindexIfBackfilling(t *testing.T) {
assertQueries(t, sbc2, nil)
}
-func TestDeleteEqualWithWriteOnlyLookupUniqueVindex(t *testing.T) {
+func TestDeleteEqualWithNoVerifyAndWriteOnlyLookupUniqueVindex(t *testing.T) {
res := []*sqltypes.Result{sqltypes.MakeTestResult(
- sqltypes.MakeTestFields("id|wo_lu_col|lu_col", "int64|int64|int64"),
- "1|1|1",
+ sqltypes.MakeTestFields(
+ "id|wo_lu_col|erl_lu_col|srl_lu_col|nrl_lu_col|nv_lu_col|lu_col",
+ "int64|int64|int64|int64|int64|int64|int64",
+ ),
+ "1|1|1|1|1|1|1",
)}
executor, sbc1, sbc2, sbcLookup := createCustomExecutorSetValues(executorVSchema, res)
- _, err := executorExec(executor, "delete from t2_wo_lookup where wo_lu_col = 1", nil)
+ _, err := executorExec(executor, "delete from t2_lookup where wo_lu_col = 1", nil)
require.NoError(t, err)
wantQueries := []*querypb.BoundQuery{
{
- Sql: "select id, wo_lu_col, lu_col from t2_wo_lookup where wo_lu_col = 1 for update",
+ Sql: "select id, wo_lu_col, erl_lu_col, srl_lu_col, nrl_lu_col, nv_lu_col, lu_col from t2_lookup where wo_lu_col = 1 for update",
BindVariables: map[string]*querypb.BindVariable{},
}, {
- Sql: "delete from t2_wo_lookup where wo_lu_col = 1",
+ Sql: "delete from t2_lookup where wo_lu_col = 1",
BindVariables: map[string]*querypb.BindVariable{},
}}
@@ -645,13 +913,50 @@ func TestDeleteEqualWithWriteOnlyLookupUniqueVindex(t *testing.T) {
},
}
bq2 := &querypb.BoundQuery{
+ Sql: "delete from erl_lu_idx where erl_lu_col = :erl_lu_col and keyspace_id = :keyspace_id",
+ BindVariables: map[string]*querypb.BindVariable{
+ "keyspace_id": {Type: querypb.Type_VARBINARY, Value: []byte("\x16k@\xb4J\xbaK\xd6")},
+ "erl_lu_col": sqltypes.Int64BindVariable(1),
+ },
+ }
+ bq3 := &querypb.BoundQuery{
+ Sql: "delete from srl_lu_idx where srl_lu_col = :srl_lu_col and keyspace_id = :keyspace_id",
+ BindVariables: map[string]*querypb.BindVariable{
+ "keyspace_id": {Type: querypb.Type_VARBINARY, Value: []byte("\x16k@\xb4J\xbaK\xd6")},
+ "srl_lu_col": sqltypes.Int64BindVariable(1),
+ },
+ }
+ bq4 := &querypb.BoundQuery{
+ Sql: "delete from nrl_lu_idx where nrl_lu_col = :nrl_lu_col and keyspace_id = :keyspace_id",
+ BindVariables: map[string]*querypb.BindVariable{
+ "keyspace_id": {Type: querypb.Type_VARBINARY, Value: []byte("\x16k@\xb4J\xbaK\xd6")},
+ "nrl_lu_col": sqltypes.Int64BindVariable(1),
+ },
+ }
+ bq5 := &querypb.BoundQuery{
+ Sql: "delete from nv_lu_idx where nv_lu_col = :nv_lu_col and keyspace_id = :keyspace_id",
+ BindVariables: map[string]*querypb.BindVariable{
+ "keyspace_id": {Type: querypb.Type_VARBINARY, Value: []byte("\x16k@\xb4J\xbaK\xd6")},
+ "nv_lu_col": sqltypes.Int64BindVariable(1),
+ },
+ }
+ bq6 := &querypb.BoundQuery{
Sql: "delete from lu_idx where lu_col = :lu_col and keyspace_id = :keyspace_id",
BindVariables: map[string]*querypb.BindVariable{
"keyspace_id": sqltypes.Uint64BindVariable(1),
"lu_col": sqltypes.Int64BindVariable(1),
},
}
- lookWant := []*querypb.BoundQuery{bq1, bq2, bq1, bq2, bq1, bq2, bq1, bq2, bq1, bq2, bq1, bq2, bq1, bq2, bq1, bq2}
+ lookWant := []*querypb.BoundQuery{
+ bq1, bq2, bq3, bq4, bq5, bq6,
+ bq1, bq2, bq3, bq4, bq5, bq6,
+ bq1, bq2, bq3, bq4, bq5, bq6,
+ bq1, bq2, bq3, bq4, bq5, bq6,
+ bq1, bq2, bq3, bq4, bq5, bq6,
+ bq1, bq2, bq3, bq4, bq5, bq6,
+ bq1, bq2, bq3, bq4, bq5, bq6,
+ bq1, bq2, bq3, bq4, bq5, bq6,
+ }
assertQueries(t, sbcLookup, lookWant)
assertQueries(t, sbc1, wantQueries)
assertQueries(t, sbc2, wantQueries)
@@ -666,18 +971,21 @@ func TestDeleteEqualWithMultipleLookupVindex(t *testing.T) {
)})
sbc1.SetResults([]*sqltypes.Result{sqltypes.MakeTestResult(
- sqltypes.MakeTestFields("id|wo_lu_col|lu_col", "int64|int64|int64"),
- "1|1|1",
+ sqltypes.MakeTestFields(
+ "id|wo_lu_col|erl_lu_col|srl_lu_col|nrl_lu_col|nv_lu_col|lu_col",
+ "int64|int64|int64|int64|int64|int64|int64",
+ ),
+ "1|1|1|1|1|1|1",
)})
- _, err := executorExec(executor, "delete from t2_wo_lookup where wo_lu_col = 1 and lu_col = 1", nil)
+ _, err := executorExec(executor, "delete from t2_lookup where wo_lu_col = 1 and lu_col = 1", nil)
require.NoError(t, err)
wantQueries := []*querypb.BoundQuery{
{
- Sql: "select id, wo_lu_col, lu_col from t2_wo_lookup where wo_lu_col = 1 and lu_col = 1 for update",
+ Sql: "select id, wo_lu_col, erl_lu_col, srl_lu_col, nrl_lu_col, nv_lu_col, lu_col from t2_lookup where wo_lu_col = 1 and lu_col = 1 for update",
BindVariables: map[string]*querypb.BindVariable{},
}, {
- Sql: "delete from t2_wo_lookup where wo_lu_col = 1 and lu_col = 1",
+ Sql: "delete from t2_lookup where wo_lu_col = 1 and lu_col = 1",
BindVariables: map[string]*querypb.BindVariable{},
}}
@@ -695,6 +1003,30 @@ func TestDeleteEqualWithMultipleLookupVindex(t *testing.T) {
"keyspace_id": {Type: querypb.Type_VARBINARY, Value: []byte("\x16k@\xb4J\xbaK\xd6")},
"wo_lu_col": sqltypes.Int64BindVariable(1),
},
+ }, {
+ Sql: "delete from erl_lu_idx where erl_lu_col = :erl_lu_col and keyspace_id = :keyspace_id",
+ BindVariables: map[string]*querypb.BindVariable{
+ "keyspace_id": {Type: querypb.Type_VARBINARY, Value: []byte("\x16k@\xb4J\xbaK\xd6")},
+ "erl_lu_col": sqltypes.Int64BindVariable(1),
+ },
+ }, {
+ Sql: "delete from srl_lu_idx where srl_lu_col = :srl_lu_col and keyspace_id = :keyspace_id",
+ BindVariables: map[string]*querypb.BindVariable{
+ "keyspace_id": {Type: querypb.Type_VARBINARY, Value: []byte("\x16k@\xb4J\xbaK\xd6")},
+ "srl_lu_col": sqltypes.Int64BindVariable(1),
+ },
+ }, {
+ Sql: "delete from nrl_lu_idx where nrl_lu_col = :nrl_lu_col and keyspace_id = :keyspace_id",
+ BindVariables: map[string]*querypb.BindVariable{
+ "keyspace_id": {Type: querypb.Type_VARBINARY, Value: []byte("\x16k@\xb4J\xbaK\xd6")},
+ "nrl_lu_col": sqltypes.Int64BindVariable(1),
+ },
+ }, {
+ Sql: "delete from nv_lu_idx where nv_lu_col = :nv_lu_col and keyspace_id = :keyspace_id",
+ BindVariables: map[string]*querypb.BindVariable{
+ "keyspace_id": {Type: querypb.Type_VARBINARY, Value: []byte("\x16k@\xb4J\xbaK\xd6")},
+ "nv_lu_col": sqltypes.Int64BindVariable(1),
+ },
}, {
Sql: "delete from lu_idx where lu_col = :lu_col and keyspace_id = :keyspace_id",
BindVariables: map[string]*querypb.BindVariable{
@@ -718,19 +1050,22 @@ func TestDeleteUseHigherCostVindexIfBackfilling(t *testing.T) {
)})
sbc1.SetResults([]*sqltypes.Result{sqltypes.MakeTestResult(
- sqltypes.MakeTestFields("id|wo_lu_col|lu_col", "int64|int64|int64"),
- "1|1|1",
- "1|1|2",
+ sqltypes.MakeTestFields(
+ "id|wo_lu_col|erl_lu_col|srl_lu_col|nrl_lu_col|nv_lu_col|lu_col",
+ "int64|int64|int64|int64|int64|int64|int64",
+ ),
+ "1|1|1|1|1|1|1",
+ "1|1|1|1|1|1|2",
)})
- _, err := executorExec(executor, "delete from t2_wo_lookup where wo_lu_col = 1 and lu_col in (1, 2)", nil)
+ _, err := executorExec(executor, "delete from t2_lookup where wo_lu_col = 1 and lu_col in (1, 2)", nil)
require.NoError(t, err)
wantQueries := []*querypb.BoundQuery{
{
- Sql: "select id, wo_lu_col, lu_col from t2_wo_lookup where wo_lu_col = 1 and lu_col in (1, 2) for update",
+ Sql: "select id, wo_lu_col, erl_lu_col, srl_lu_col, nrl_lu_col, nv_lu_col, lu_col from t2_lookup where wo_lu_col = 1 and lu_col in (1, 2) for update",
BindVariables: map[string]*querypb.BindVariable{},
}, {
- Sql: "delete from t2_wo_lookup where wo_lu_col = 1 and lu_col in (1, 2)",
+ Sql: "delete from t2_lookup where wo_lu_col = 1 and lu_col in (1, 2)",
BindVariables: map[string]*querypb.BindVariable{},
}}
@@ -749,6 +1084,30 @@ func TestDeleteUseHigherCostVindexIfBackfilling(t *testing.T) {
"keyspace_id": {Type: querypb.Type_VARBINARY, Value: []byte("\x16k@\xb4J\xbaK\xd6")},
"wo_lu_col": sqltypes.Int64BindVariable(1),
},
+ }, {
+ Sql: "delete from erl_lu_idx where erl_lu_col = :erl_lu_col and keyspace_id = :keyspace_id",
+ BindVariables: map[string]*querypb.BindVariable{
+ "keyspace_id": {Type: querypb.Type_VARBINARY, Value: []byte("\x16k@\xb4J\xbaK\xd6")},
+ "erl_lu_col": sqltypes.Int64BindVariable(1),
+ },
+ }, {
+ Sql: "delete from srl_lu_idx where srl_lu_col = :srl_lu_col and keyspace_id = :keyspace_id",
+ BindVariables: map[string]*querypb.BindVariable{
+ "keyspace_id": {Type: querypb.Type_VARBINARY, Value: []byte("\x16k@\xb4J\xbaK\xd6")},
+ "srl_lu_col": sqltypes.Int64BindVariable(1),
+ },
+ }, {
+ Sql: "delete from nrl_lu_idx where nrl_lu_col = :nrl_lu_col and keyspace_id = :keyspace_id",
+ BindVariables: map[string]*querypb.BindVariable{
+ "keyspace_id": {Type: querypb.Type_VARBINARY, Value: []byte("\x16k@\xb4J\xbaK\xd6")},
+ "nrl_lu_col": sqltypes.Int64BindVariable(1),
+ },
+ }, {
+ Sql: "delete from nv_lu_idx where nv_lu_col = :nv_lu_col and keyspace_id = :keyspace_id",
+ BindVariables: map[string]*querypb.BindVariable{
+ "keyspace_id": {Type: querypb.Type_VARBINARY, Value: []byte("\x16k@\xb4J\xbaK\xd6")},
+ "nv_lu_col": sqltypes.Int64BindVariable(1),
+ },
}, {
Sql: "delete from lu_idx where lu_col = :lu_col and keyspace_id = :keyspace_id",
BindVariables: map[string]*querypb.BindVariable{
@@ -761,6 +1120,30 @@ func TestDeleteUseHigherCostVindexIfBackfilling(t *testing.T) {
"keyspace_id": {Type: querypb.Type_VARBINARY, Value: []byte("\x16k@\xb4J\xbaK\xd6")},
"wo_lu_col": sqltypes.Int64BindVariable(1),
},
+ }, {
+ Sql: "delete from erl_lu_idx where erl_lu_col = :erl_lu_col and keyspace_id = :keyspace_id",
+ BindVariables: map[string]*querypb.BindVariable{
+ "keyspace_id": {Type: querypb.Type_VARBINARY, Value: []byte("\x16k@\xb4J\xbaK\xd6")},
+ "erl_lu_col": sqltypes.Int64BindVariable(1),
+ },
+ }, {
+ Sql: "delete from srl_lu_idx where srl_lu_col = :srl_lu_col and keyspace_id = :keyspace_id",
+ BindVariables: map[string]*querypb.BindVariable{
+ "keyspace_id": {Type: querypb.Type_VARBINARY, Value: []byte("\x16k@\xb4J\xbaK\xd6")},
+ "srl_lu_col": sqltypes.Int64BindVariable(1),
+ },
+ }, {
+ Sql: "delete from nrl_lu_idx where nrl_lu_col = :nrl_lu_col and keyspace_id = :keyspace_id",
+ BindVariables: map[string]*querypb.BindVariable{
+ "keyspace_id": {Type: querypb.Type_VARBINARY, Value: []byte("\x16k@\xb4J\xbaK\xd6")},
+ "nrl_lu_col": sqltypes.Int64BindVariable(1),
+ },
+ }, {
+ Sql: "delete from nv_lu_idx where nv_lu_col = :nv_lu_col and keyspace_id = :keyspace_id",
+ BindVariables: map[string]*querypb.BindVariable{
+ "keyspace_id": {Type: querypb.Type_VARBINARY, Value: []byte("\x16k@\xb4J\xbaK\xd6")},
+ "nv_lu_col": sqltypes.Int64BindVariable(1),
+ },
}, {
Sql: "delete from lu_idx where lu_col = :lu_col and keyspace_id = :keyspace_id",
BindVariables: map[string]*querypb.BindVariable{
@@ -1165,7 +1548,7 @@ func TestInsertShardedIgnore(t *testing.T) {
query = "insert ignore into insert_ignore_test(pv, owned, verify) values (1, 1, 1)"
qr, err := executorExec(executor, query, nil)
require.NoError(t, err)
- if !reflect.DeepEqual(qr, &sqltypes.Result{}) {
+ if !qr.Equal(&sqltypes.Result{}) {
t.Errorf("qr: %v, want empty result", qr)
}
assertQueries(t, sbc1, nil)
@@ -1875,7 +2258,7 @@ func TestKeyDestRangeQuery(t *testing.T) {
primarySession.TargetString = "TestExecutor[-]"
_, err := executorExec(executor, insertInput, nil)
- require.EqualError(t, err, "INSERT not supported when targeting a key range: TestExecutor[-]")
+ require.EqualError(t, err, "VT03023: INSERT not supported when targeting a key range: TestExecutor[-]")
primarySession.TargetString = ""
}
@@ -1952,15 +2335,54 @@ func TestUpdateLastInsertID(t *testing.T) {
_, err := executorExec(executor, sql, map[string]*querypb.BindVariable{})
require.NoError(t, err)
wantQueries := []*querypb.BoundQuery{{
- Sql: "update `user` set a = :__lastInsertId where id = :vtg1",
+ Sql: "update `user` set a = :__lastInsertId where id = :id",
BindVariables: map[string]*querypb.BindVariable{
"__lastInsertId": sqltypes.Uint64BindVariable(43),
- "vtg1": sqltypes.Int64BindVariable(1)},
+ "id": sqltypes.Int64BindVariable(1)},
}}
assertQueries(t, sbc1, wantQueries)
}
+func TestUpdateReference(t *testing.T) {
+ executor, sbc1, sbc2, sbclookup := createExecutorEnv()
+
+ logChan := QueryLogger.Subscribe("Test")
+ defer QueryLogger.Unsubscribe(logChan)
+
+ _, err := executorExec(executor, "update zip_detail set status = 'CLOSED' where id = 1", nil)
+ require.NoError(t, err)
+ wantQueries := []*querypb.BoundQuery{{
+ Sql: "update zip_detail set `status` = 'CLOSED' where id = 1",
+ BindVariables: map[string]*querypb.BindVariable{},
+ }}
+ assertQueries(t, sbc1, nil)
+ assertQueries(t, sbc2, nil)
+ assertQueries(t, sbclookup, wantQueries)
+
+ testQueryLog(t, logChan, "TestExecute", "UPDATE", "update zip_detail set status = 'CLOSED' where id = 1", 1)
+
+ sbclookup.Queries = nil
+
+ _, err = executorExec(executor, "update TestUnsharded.zip_detail set status = 'CLOSED' where id = 1", nil)
+ require.NoError(t, err)
+ wantQueries = []*querypb.BoundQuery{{
+ Sql: "update zip_detail set `status` = 'CLOSED' where id = 1",
+ BindVariables: map[string]*querypb.BindVariable{},
+ }}
+ assertQueries(t, sbc1, nil)
+ assertQueries(t, sbc2, nil)
+ assertQueries(t, sbclookup, wantQueries)
+
+ testQueryLog(t, logChan, "TestExecute", "UPDATE",
+ "update TestUnsharded.zip_detail set status = 'CLOSED' where id = 1", 1)
+
+ sbclookup.Queries = nil
+
+ _, err = executorExec(executor, "update TestExecutor.zip_detail set status = 'CLOSED' where id = 1", nil)
+ require.Error(t, err)
+}
+
func TestDeleteLookupOwnedEqual(t *testing.T) {
executor, sbc1, sbc2, _ := createExecutorEnv()
@@ -1988,6 +2410,44 @@ func TestDeleteLookupOwnedEqual(t *testing.T) {
assertQueries(t, sbc2, sbc2wantQueries)
}
+func TestDeleteReference(t *testing.T) {
+ executor, sbc1, sbc2, sbclookup := createExecutorEnv()
+
+ logChan := QueryLogger.Subscribe("Test")
+ defer QueryLogger.Unsubscribe(logChan)
+
+ _, err := executorExec(executor, "delete from zip_detail where id = 1", nil)
+ require.NoError(t, err)
+ wantQueries := []*querypb.BoundQuery{{
+ Sql: "delete from zip_detail where id = 1",
+ BindVariables: map[string]*querypb.BindVariable{},
+ }}
+ assertQueries(t, sbc1, nil)
+ assertQueries(t, sbc2, nil)
+ assertQueries(t, sbclookup, wantQueries)
+
+ testQueryLog(t, logChan, "TestExecute", "DELETE", "delete from zip_detail where id = 1", 1)
+
+ sbclookup.Queries = nil
+
+ _, err = executorExec(executor, "delete from zip_detail where id = 1", nil)
+ require.NoError(t, err)
+ wantQueries = []*querypb.BoundQuery{{
+ Sql: "delete from zip_detail where id = 1",
+ BindVariables: map[string]*querypb.BindVariable{},
+ }}
+ assertQueries(t, sbc1, nil)
+ assertQueries(t, sbc2, nil)
+ assertQueries(t, sbclookup, wantQueries)
+
+ testQueryLog(t, logChan, "TestExecute", "DELETE", "delete from zip_detail where id = 1", 1)
+
+ sbclookup.Queries = nil
+
+ _, err = executorExec(executor, "delete from TestExecutor.zip_detail where id = 1", nil)
+ require.Error(t, err)
+}
+
func TestReservedConnDML(t *testing.T) {
executor, _, _, sbc := createExecutorEnv()
@@ -2014,7 +2474,7 @@ func TestReservedConnDML(t *testing.T) {
require.NoError(t, err)
wantQueries = append(wantQueries,
- &querypb.BoundQuery{Sql: "set @@default_week_format = 1", BindVariables: map[string]*querypb.BindVariable{}},
+ &querypb.BoundQuery{Sql: "set default_week_format = 1", BindVariables: map[string]*querypb.BindVariable{}},
&querypb.BoundQuery{Sql: "insert into `simple`() values ()", BindVariables: map[string]*querypb.BindVariable{}})
_, err = executor.Execute(ctx, "TestReservedConnDML", session, "insert into `simple`() values ()", nil)
require.NoError(t, err)
@@ -2029,7 +2489,7 @@ func TestReservedConnDML(t *testing.T) {
sbc.EphemeralShardErr = mysql.NewSQLError(mysql.CRServerGone, mysql.SSNetError, "connection gone")
// as the first time the query fails due to connection loss i.e. reserved conn lost. It will be recreated to set statement will be executed again.
wantQueries = append(wantQueries,
- &querypb.BoundQuery{Sql: "set @@default_week_format = 1", BindVariables: map[string]*querypb.BindVariable{}},
+ &querypb.BoundQuery{Sql: "set default_week_format = 1", BindVariables: map[string]*querypb.BindVariable{}},
&querypb.BoundQuery{Sql: "insert into `simple`() values ()", BindVariables: map[string]*querypb.BindVariable{}})
_, err = executor.Execute(ctx, "TestReservedConnDML", session, "insert into `simple`() values ()", nil)
require.NoError(t, err)
@@ -2378,3 +2838,42 @@ func TestInsertSelectFromTable(t *testing.T) {
testQueryLog(t, logChan, "TestInsertSelect", "INSERT", "insert into user(id, name) select c1, c2 from music", 9) // 8 from select and 1 from insert.
}
}
+
+func TestInsertReference(t *testing.T) {
+ executor, sbc1, sbc2, sbclookup := createExecutorEnv()
+
+ logChan := QueryLogger.Subscribe("Test")
+ defer QueryLogger.Unsubscribe(logChan)
+
+ _, err := executorExec(executor, "insert into zip_detail(id, status) values (1, 'CLOSED')", nil)
+ require.NoError(t, err)
+ wantQueries := []*querypb.BoundQuery{{
+ Sql: "insert into zip_detail(id, `status`) values (1, 'CLOSED')",
+ BindVariables: map[string]*querypb.BindVariable{},
+ }}
+ assertQueries(t, sbc1, nil)
+ assertQueries(t, sbc2, nil)
+ assertQueries(t, sbclookup, wantQueries)
+
+ testQueryLog(t, logChan, "TestExecute", "INSERT", "insert into zip_detail(id, status) values (1, 'CLOSED')", 1)
+
+ sbclookup.Queries = nil
+
+ _, err = executorExec(executor, "insert into TestUnsharded.zip_detail(id, status) values (1, 'CLOSED')", nil)
+ require.NoError(t, err)
+ wantQueries = []*querypb.BoundQuery{{
+ Sql: "insert into zip_detail(id, `status`) values (1, 'CLOSED')",
+ BindVariables: map[string]*querypb.BindVariable{},
+ }}
+ assertQueries(t, sbc1, nil)
+ assertQueries(t, sbc2, nil)
+ assertQueries(t, sbclookup, wantQueries)
+
+ testQueryLog(t, logChan, "TestExecute", "INSERT",
+ "insert into TestUnsharded.zip_detail(id, status) values (1, 'CLOSED')", 1)
+
+ sbclookup.Queries = nil
+
+ _, err = executorExec(executor, "insert into TestExecutor.zip_detail(id, status) values (1, 'CLOSED')", nil)
+ require.Error(t, err)
+}
diff --git a/go/vt/vtgate/executor_framework_test.go b/go/vt/vtgate/executor_framework_test.go
index 4a2a1e7cfec..6c5acbc72cf 100644
--- a/go/vt/vtgate/executor_framework_test.go
+++ b/go/vt/vtgate/executor_framework_test.go
@@ -119,9 +119,49 @@ var executorVSchema = `
"table": "TestUnsharded.wo_lu_idx",
"from": "wo_lu_col",
"to": "keyspace_id",
- "write_only": "true"
+ "write_only": "true"
},
- "owner": "t2_wo_lookup"
+ "owner": "t2_lookup"
+ },
+ "t2_erl_lu_vdx": {
+ "type": "lookup_unique",
+ "params": {
+ "table": "TestUnsharded.erl_lu_idx",
+ "from": "erl_lu_col",
+ "to": "keyspace_id",
+ "read_lock": "exclusive"
+ },
+ "owner": "t2_lookup"
+ },
+ "t2_srl_lu_vdx": {
+ "type": "lookup_unique",
+ "params": {
+ "table": "TestUnsharded.srl_lu_idx",
+ "from": "srl_lu_col",
+ "to": "keyspace_id",
+ "read_lock": "shared"
+ },
+ "owner": "t2_lookup"
+ },
+ "t2_nrl_lu_vdx": {
+ "type": "lookup_unique",
+ "params": {
+ "table": "TestUnsharded.nrl_lu_idx",
+ "from": "nrl_lu_col",
+ "to": "keyspace_id",
+ "read_lock": "none"
+ },
+ "owner": "t2_lookup"
+ },
+ "t2_nv_lu_vdx": {
+ "type": "lookup_unique",
+ "params": {
+ "table": "TestUnsharded.nv_lu_idx",
+ "from": "nv_lu_col",
+ "to": "keyspace_id",
+ "no_verify": "true"
+ },
+ "owner": "t2_lookup"
},
"t2_lu_vdx": {
"type": "lookup_hash_unique",
@@ -130,14 +170,17 @@ var executorVSchema = `
"from": "lu_col",
"to": "keyspace_id"
},
- "owner": "t2_wo_lookup"
+ "owner": "t2_lookup"
},
"regional_vdx": {
"type": "region_experimental",
"params": {
"region_bytes": "1"
}
- }
+ },
+ "cfc": {
+ "type": "cfc"
+ }
},
"tables": {
"user": {
@@ -294,15 +337,31 @@ var executorVSchema = `
}
]
},
- "t2_wo_lookup": {
+ "t2_lookup": {
"column_vindexes": [
{
"column": "id",
"name": "hash_index"
},
{
- "column": "wo_lu_col",
- "name": "t2_wo_lu_vdx"
+ "column": "wo_lu_col",
+ "name": "t2_wo_lu_vdx"
+ },
+ {
+ "column": "erl_lu_col",
+ "name": "t2_erl_lu_vdx"
+ },
+ {
+ "column": "srl_lu_col",
+ "name": "t2_srl_lu_vdx"
+ },
+ {
+ "column": "nrl_lu_col",
+ "name": "t2_nrl_lu_vdx"
+ },
+ {
+ "column": "nv_lu_col",
+ "name": "t2_nv_lu_vdx"
},
{
"column": "lu_col",
@@ -317,7 +376,25 @@ var executorVSchema = `
"name": "regional_vdx"
}
]
- }
+ },
+ "tbl_cfc": {
+ "column_vindexes": [
+ {
+ "column": "c1",
+ "name": "cfc"
+ }
+ ],
+ "columns": [
+ {
+ "name": "c2",
+ "type": "VARCHAR"
+ }
+ ]
+ },
+ "zip_detail": {
+ "type": "reference",
+ "source": "TestUnsharded.zip_detail"
+ }
}
}
`
@@ -350,8 +427,13 @@ var unshardedVSchema = `
}
},
"wo_lu_idx": {},
+ "erl_lu_idx": {},
+ "srl_lu_idx": {},
+ "nrl_lu_idx": {},
+ "nv_lu_idx": {},
"lu_idx": {},
- "simple": {}
+ "simple": {},
+ "zip_detail": {}
}
}
`
diff --git a/go/vt/vtgate/executor_scatter_stats.go b/go/vt/vtgate/executor_scatter_stats.go
index 56daa37819f..946558e22fd 100644
--- a/go/vt/vtgate/executor_scatter_stats.go
+++ b/go/vt/vtgate/executor_scatter_stats.go
@@ -109,7 +109,7 @@ func (e *Executor) gatherScatterStats() (statsResults, error) {
PercentTimeOfScatters: 100 * float64(execTime) / float64(scatterExecTime),
PercentCountOfReads: 100 * float64(execCount) / float64(readOnlyCount),
PercentCountOfScatters: 100 * float64(execCount) / float64(scatterCount),
- From: route.Keyspace.Name + "." + route.TableName,
+ From: route.Keyspace.Name + "." + route.GetTableName(),
Count: execCount,
}
}
diff --git a/go/vt/vtgate/executor_select_test.go b/go/vt/vtgate/executor_select_test.go
index e116cd83988..4aa51368be2 100644
--- a/go/vt/vtgate/executor_select_test.go
+++ b/go/vt/vtgate/executor_select_test.go
@@ -19,12 +19,15 @@ package vtgate
import (
"context"
"fmt"
+ "os"
"runtime"
"strconv"
"strings"
"testing"
"time"
+ _flag "vitess.io/vitess/go/internal/flag"
+
"vitess.io/vitess/go/vt/sqlparser"
"github.com/google/go-cmp/cmp"
@@ -158,7 +161,7 @@ func TestSystemVariablesMySQLBelow80(t *testing.T) {
executor, sbc1, _, _ := createExecutorEnv()
executor.normalize = true
- sqlparser.MySQLVersion = "57000"
+ sqlparser.SetParserVersion("57000")
setVarEnabled = true
session := NewAutocommitSession(&vtgatepb.Session{EnableSystemSettings: true, TargetString: "TestExecutor"})
@@ -183,7 +186,7 @@ func TestSystemVariablesMySQLBelow80(t *testing.T) {
wantQueries := []*querypb.BoundQuery{
{Sql: "select @@sql_mode orig, 'only_full_group_by' new"},
- {Sql: "set @@sql_mode = 'only_full_group_by'", BindVariables: map[string]*querypb.BindVariable{"vtg1": {Type: sqltypes.Int64, Value: []byte("1")}}},
+ {Sql: "set sql_mode = 'only_full_group_by'", BindVariables: map[string]*querypb.BindVariable{"vtg1": {Type: sqltypes.Int64, Value: []byte("1")}}},
{Sql: "select :vtg1 from information_schema.`table`", BindVariables: map[string]*querypb.BindVariable{"vtg1": {Type: sqltypes.Int64, Value: []byte("1")}}},
}
@@ -194,7 +197,7 @@ func TestSystemVariablesWithSetVarDisabled(t *testing.T) {
executor, sbc1, _, _ := createExecutorEnv()
executor.normalize = true
- sqlparser.MySQLVersion = "80000"
+ sqlparser.SetParserVersion("80000")
setVarEnabled = false
defer func() {
setVarEnabled = true
@@ -221,7 +224,7 @@ func TestSystemVariablesWithSetVarDisabled(t *testing.T) {
wantQueries := []*querypb.BoundQuery{
{Sql: "select @@sql_mode orig, 'only_full_group_by' new"},
- {Sql: "set @@sql_mode = 'only_full_group_by'", BindVariables: map[string]*querypb.BindVariable{"vtg1": {Type: sqltypes.Int64, Value: []byte("1")}}},
+ {Sql: "set sql_mode = 'only_full_group_by'", BindVariables: map[string]*querypb.BindVariable{"vtg1": {Type: sqltypes.Int64, Value: []byte("1")}}},
{Sql: "select :vtg1 from information_schema.`table`", BindVariables: map[string]*querypb.BindVariable{"vtg1": {Type: sqltypes.Int64, Value: []byte("1")}}},
}
@@ -232,7 +235,7 @@ func TestSetSystemVariablesTx(t *testing.T) {
executor, sbc1, _, _ := createExecutorEnv()
executor.normalize = true
- sqlparser.MySQLVersion = "80001"
+ sqlparser.SetParserVersion("80001")
session := NewAutocommitSession(&vtgatepb.Session{EnableSystemSettings: true, TargetString: "TestExecutor"})
@@ -280,7 +283,7 @@ func TestSetSystemVariables(t *testing.T) {
executor, _, _, lookup := createExecutorEnv()
executor.normalize = true
- sqlparser.MySQLVersion = "80001"
+ sqlparser.SetParserVersion("80001")
session := NewAutocommitSession(&vtgatepb.Session{EnableSystemSettings: true, TargetString: KsTestUnsharded, SystemVariables: map[string]string{}})
@@ -383,7 +386,7 @@ func TestSetSystemVariables(t *testing.T) {
wantQueries = []*querypb.BoundQuery{
{Sql: "select 1 from dual where @@max_tmp_tables != 1"},
- {Sql: "set @@max_tmp_tables = '1', @@sql_mode = 'only_full_group_by', @@sql_safe_updates = '0'", BindVariables: map[string]*querypb.BindVariable{"vtg1": {Type: sqltypes.Int64, Value: []byte("1")}}},
+ {Sql: "set max_tmp_tables = '1', sql_mode = 'only_full_group_by', sql_safe_updates = '0'", BindVariables: map[string]*querypb.BindVariable{"vtg1": {Type: sqltypes.Int64, Value: []byte("1")}}},
{Sql: "select :vtg1 from information_schema.`table`", BindVariables: map[string]*querypb.BindVariable{"vtg1": {Type: sqltypes.Int64, Value: []byte("1")}}},
}
utils.MustMatch(t, wantQueries, lookup.Queries)
@@ -413,7 +416,7 @@ func TestSetSystemVariablesWithReservedConnection(t *testing.T) {
require.True(t, session.InReservedConn())
wantQueries := []*querypb.BoundQuery{
{Sql: "select @@sql_mode orig, '' new"},
- {Sql: "set @@sql_mode = ''"},
+ {Sql: "set sql_mode = ''"},
{Sql: "select age, city, weight_string(age) from `user` group by age, weight_string(age) order by age asc"},
}
utils.MustMatch(t, wantQueries, sbc1.Queries)
@@ -423,7 +426,7 @@ func TestSetSystemVariablesWithReservedConnection(t *testing.T) {
require.True(t, session.InReservedConn())
wantQueries = []*querypb.BoundQuery{
{Sql: "select @@sql_mode orig, '' new"},
- {Sql: "set @@sql_mode = ''"},
+ {Sql: "set sql_mode = ''"},
{Sql: "select age, city, weight_string(age) from `user` group by age, weight_string(age) order by age asc"},
{Sql: "select age, city + :vtg1, weight_string(age) from `user` group by age, weight_string(age) order by age asc", BindVariables: map[string]*querypb.BindVariable{"vtg1": {Type: sqltypes.Int64, Value: []byte("1")}}},
}
@@ -444,7 +447,7 @@ func TestCreateTableValidTimestamp(t *testing.T) {
require.True(t, session.InReservedConn())
wantQueries := []*querypb.BoundQuery{
- {Sql: "set @@sql_mode = ALLOW_INVALID_DATES", BindVariables: map[string]*querypb.BindVariable{}},
+ {Sql: "set sql_mode = ALLOW_INVALID_DATES", BindVariables: map[string]*querypb.BindVariable{}},
{Sql: "create table aa (\n\tt timestamp default 0\n)", BindVariables: map[string]*querypb.BindVariable{}},
}
@@ -456,13 +459,14 @@ func TestGen4SelectDBA(t *testing.T) {
executor.normalize = true
executor.pv = querypb.ExecuteOptions_Gen4
- query := "select * from INFORMATION_SCHEMA.foo"
+ query := "select * from INFORMATION_SCHEMA.TABLE_CONSTRAINTS"
_, err := executor.Execute(context.Background(), "TestSelectDBA",
NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor"}),
query, map[string]*querypb.BindVariable{},
)
require.NoError(t, err)
- wantQueries := []*querypb.BoundQuery{{Sql: query, BindVariables: map[string]*querypb.BindVariable{}}}
+ expected := "select CONSTRAINT_CATALOG, CONSTRAINT_SCHEMA, CONSTRAINT_NAME, TABLE_SCHEMA, TABLE_NAME, CONSTRAINT_TYPE, `ENFORCED` from INFORMATION_SCHEMA.TABLE_CONSTRAINTS"
+ wantQueries := []*querypb.BoundQuery{{Sql: expected, BindVariables: map[string]*querypb.BindVariable{}}}
utils.MustMatch(t, wantQueries, sbc1.Queries)
sbc1.Queries = nil
@@ -472,12 +476,12 @@ func TestGen4SelectDBA(t *testing.T) {
query, map[string]*querypb.BindVariable{},
)
require.NoError(t, err)
- wantQueries = []*querypb.BoundQuery{{Sql: "select count(*) from INFORMATION_SCHEMA.`TABLES` as ist where ist.table_schema = :__vtschemaname and ist.table_name = :ist_table_name",
+ wantQueries = []*querypb.BoundQuery{{Sql: "select count(*) from INFORMATION_SCHEMA.`TABLES` as ist where ist.table_schema = :__vtschemaname and ist.table_name = :ist_table_name1",
BindVariables: map[string]*querypb.BindVariable{
- "vtg1": sqltypes.StringBindVariable("performance_schema"),
- "vtg2": sqltypes.StringBindVariable("foo"),
- "__vtschemaname": sqltypes.StringBindVariable("performance_schema"),
- "ist_table_name": sqltypes.StringBindVariable("foo"),
+ "ist_table_schema": sqltypes.StringBindVariable("performance_schema"),
+ "__vtschemaname": sqltypes.StringBindVariable("performance_schema"),
+ "ist_table_name": sqltypes.StringBindVariable("foo"),
+ "ist_table_name1": sqltypes.StringBindVariable("foo"),
}}}
utils.MustMatch(t, wantQueries, sbc1.Queries)
@@ -488,13 +492,13 @@ func TestGen4SelectDBA(t *testing.T) {
query, map[string]*querypb.BindVariable{},
)
require.NoError(t, err)
- wantQueries = []*querypb.BoundQuery{{Sql: "select :vtg1 from information_schema.table_constraints where constraint_schema = :__vtschemaname and table_name = :table_name",
+ wantQueries = []*querypb.BoundQuery{{Sql: "select :vtg1 from information_schema.table_constraints where constraint_schema = :__vtschemaname and table_name = :table_name1",
BindVariables: map[string]*querypb.BindVariable{
- "vtg1": sqltypes.Int64BindVariable(1),
- "vtg2": sqltypes.StringBindVariable("vt_ks"),
- "vtg3": sqltypes.StringBindVariable("user"),
- "__vtschemaname": sqltypes.StringBindVariable("vt_ks"),
- "table_name": sqltypes.StringBindVariable("user"),
+ "vtg1": sqltypes.Int64BindVariable(1),
+ "constraint_schema": sqltypes.StringBindVariable("vt_ks"),
+ "table_name": sqltypes.StringBindVariable("user"),
+ "__vtschemaname": sqltypes.StringBindVariable("vt_ks"),
+ "table_name1": sqltypes.StringBindVariable("user"),
}}}
utils.MustMatch(t, wantQueries, sbc1.Queries)
@@ -507,9 +511,9 @@ func TestGen4SelectDBA(t *testing.T) {
require.NoError(t, err)
wantQueries = []*querypb.BoundQuery{{Sql: "select :vtg1 from information_schema.table_constraints where constraint_schema = :__vtschemaname",
BindVariables: map[string]*querypb.BindVariable{
- "vtg1": sqltypes.Int64BindVariable(1),
- "vtg2": sqltypes.StringBindVariable("vt_ks"),
- "__vtschemaname": sqltypes.StringBindVariable("vt_ks"),
+ "vtg1": sqltypes.Int64BindVariable(1),
+ "constraint_schema": sqltypes.StringBindVariable("vt_ks"),
+ "__vtschemaname": sqltypes.StringBindVariable("vt_ks"),
}}}
utils.MustMatch(t, wantQueries, sbc1.Queries)
@@ -522,7 +526,7 @@ func TestGen4SelectDBA(t *testing.T) {
require.NoError(t, err)
wantQueries = []*querypb.BoundQuery{{Sql: "select t.table_schema, t.table_name, c.column_name, c.column_type from information_schema.`tables` as t, information_schema.`columns` as c where t.table_schema = :__vtschemaname and c.table_schema = :__vtschemaname and c.table_schema = t.table_schema and c.table_name = t.table_name order by t.table_schema asc, t.table_name asc, c.column_name asc",
BindVariables: map[string]*querypb.BindVariable{
- "vtg1": sqltypes.StringBindVariable("TestExecutor"),
+ "t_table_schema": sqltypes.StringBindVariable("TestExecutor"),
"__replacevtschemaname": sqltypes.Int64BindVariable(1),
}}}
utils.MustMatch(t, wantQueries, sbc1.Queries)
@@ -756,7 +760,7 @@ func TestSelectSystemVariables(t *testing.T) {
sql := "select @@autocommit, @@client_found_rows, @@skip_query_plan_cache, @@enable_system_settings, " +
"@@sql_select_limit, @@transaction_mode, @@workload, @@read_after_write_gtid, " +
- "@@read_after_write_timeout, @@session_track_gtids, @@ddl_strategy, @@socket"
+ "@@read_after_write_timeout, @@session_track_gtids, @@ddl_strategy, @@socket, @@query_timeout"
result, err := executorExec(executor, sql, map[string]*querypb.BindVariable{})
wantResult := &sqltypes.Result{
@@ -773,6 +777,7 @@ func TestSelectSystemVariables(t *testing.T) {
{Name: "@@session_track_gtids", Type: sqltypes.VarChar},
{Name: "@@ddl_strategy", Type: sqltypes.VarChar},
{Name: "@@socket", Type: sqltypes.VarChar},
+ {Name: "@@query_timeout", Type: sqltypes.Int64},
},
Rows: [][]sqltypes.Value{{
// the following are the uninitialised session values
@@ -789,6 +794,7 @@ func TestSelectSystemVariables(t *testing.T) {
sqltypes.NewVarChar("own_gtid"),
sqltypes.NewVarChar(""),
sqltypes.NewVarChar(""),
+ sqltypes.NewInt64(0),
}},
}
require.NoError(t, err)
@@ -803,23 +809,27 @@ func TestSelectInitializedVitessAwareVariable(t *testing.T) {
primarySession.Autocommit = true
primarySession.EnableSystemSettings = true
+ primarySession.QueryTimeout = 75
defer func() {
primarySession.Autocommit = false
primarySession.EnableSystemSettings = false
+ primarySession.QueryTimeout = 0
}()
- sql := "select @@autocommit, @@enable_system_settings"
+ sql := "select @@autocommit, @@enable_system_settings, @@query_timeout"
result, err := executorExec(executor, sql, nil)
wantResult := &sqltypes.Result{
Fields: []*querypb.Field{
{Name: "@@autocommit", Type: sqltypes.Int64},
{Name: "@@enable_system_settings", Type: sqltypes.Int64},
+ {Name: "@@query_timeout", Type: sqltypes.Int64},
},
Rows: [][]sqltypes.Value{{
sqltypes.NewInt64(1),
sqltypes.NewInt64(1),
+ sqltypes.NewInt64(75),
}},
}
require.NoError(t, err)
@@ -1213,6 +1223,21 @@ func TestSelectEqual(t *testing.T) {
utils.MustMatch(t, wantQueries, sbclookup.Queries)
}
+func TestSelectINFromOR(t *testing.T) {
+ executor, sbc1, _, _ := createExecutorEnv()
+ executor.pv = querypb.ExecuteOptions_Gen4
+
+ _, err := executorExec(executor, "select 1 from user where id = 1 and name = 'apa' or id = 2 and name = 'toto'", nil)
+ require.NoError(t, err)
+ wantQueries := []*querypb.BoundQuery{{
+ Sql: "select 1 from `user` where id = 1 and `name` = 'apa' or id = 2 and `name` = 'toto'",
+ BindVariables: map[string]*querypb.BindVariable{
+ "__vals": sqltypes.TestBindVariable([]any{int64(1), int64(2)}),
+ },
+ }}
+ utils.MustMatch(t, wantQueries, sbc1.Queries)
+}
+
func TestSelectDual(t *testing.T) {
executor, sbc1, _, lookup := createExecutorEnv()
@@ -1252,9 +1277,9 @@ func TestSelectNormalize(t *testing.T) {
_, err := executorExec(executor, "/* leading */ select id from user where id = 1 /* trailing */", nil)
require.NoError(t, err)
wantQueries := []*querypb.BoundQuery{{
- Sql: "/* leading */ select id from `user` where id = :vtg1 /* trailing */",
+ Sql: "/* leading */ select id from `user` where id = :id /* trailing */",
BindVariables: map[string]*querypb.BindVariable{
- "vtg1": sqltypes.TestBindVariable(int64(1)),
+ "id": sqltypes.TestBindVariable(int64(1)),
},
}}
utils.MustMatch(t, wantQueries, sbc1.Queries)
@@ -1268,9 +1293,9 @@ func TestSelectNormalize(t *testing.T) {
_, err = executorExec(executor, "/* leading */ select id from user where id = 1 /* trailing */", nil)
require.NoError(t, err)
wantQueries = []*querypb.BoundQuery{{
- Sql: "/* leading */ select id from `user` where id = :vtg1 /* trailing */",
+ Sql: "/* leading */ select id from `user` where id = :id /* trailing */",
BindVariables: map[string]*querypb.BindVariable{
- "vtg1": sqltypes.TestBindVariable(int64(1)),
+ "id": sqltypes.TestBindVariable(int64(1)),
},
}}
require.Empty(t, sbc1.Queries)
@@ -3110,10 +3135,10 @@ func TestGen4MultiColumnVindexEqual(t *testing.T) {
require.NoError(t, err)
wantQueries := []*querypb.BoundQuery{
{
- Sql: "select * from user_region where cola = :vtg1 and colb = :vtg2",
+ Sql: "select * from user_region where cola = :cola and colb = :colb",
BindVariables: map[string]*querypb.BindVariable{
- "vtg1": sqltypes.Int64BindVariable(1),
- "vtg2": sqltypes.Int64BindVariable(2),
+ "cola": sqltypes.Int64BindVariable(1),
+ "colb": sqltypes.Int64BindVariable(2),
},
},
}
@@ -3131,10 +3156,10 @@ func TestGen4MultiColumnVindexEqual(t *testing.T) {
require.NoError(t, err)
wantQueries = []*querypb.BoundQuery{
{
- Sql: "select * from user_region where cola = :vtg1 and colb = :vtg2",
+ Sql: "select * from user_region where cola = :cola and colb = :colb",
BindVariables: map[string]*querypb.BindVariable{
- "vtg1": sqltypes.Int64BindVariable(17984),
- "vtg2": sqltypes.Int64BindVariable(1),
+ "cola": sqltypes.Int64BindVariable(17984),
+ "colb": sqltypes.Int64BindVariable(1),
},
},
}
@@ -3204,22 +3229,22 @@ func TestGen4MultiColMixedColComparision(t *testing.T) {
vals0sbc2, _ := sqltypes.BuildBindVariable([]int64{17984})
wantQueries := []*querypb.BoundQuery{
{
- Sql: "select * from user_region where colb = :vtg1 and cola in ::__vals0",
+ Sql: "select * from user_region where colb = :colb and cola in ::__vals0",
BindVariables: map[string]*querypb.BindVariable{
"__vals0": vals0sbc1,
- "vtg1": bvtg1,
- "vtg2": bvtg2,
+ "colb": bvtg1,
+ "vtg1": bvtg2,
},
},
}
utils.MustMatch(t, wantQueries, sbc1.Queries)
wantQueries = []*querypb.BoundQuery{
{
- Sql: "select * from user_region where colb = :vtg1 and cola in ::__vals0",
+ Sql: "select * from user_region where colb = :colb and cola in ::__vals0",
BindVariables: map[string]*querypb.BindVariable{
"__vals0": vals0sbc2,
- "vtg1": bvtg1,
- "vtg2": bvtg2,
+ "colb": bvtg1,
+ "vtg1": bvtg2,
},
},
}
@@ -3242,11 +3267,11 @@ func TestGen4MultiColBestVindexSel(t *testing.T) {
bvtg2, _ := sqltypes.BuildBindVariable([]int64{1, 17984})
wantQueries := []*querypb.BoundQuery{
{
- Sql: "select * from user_region where colb = :vtg1 and cola in ::vtg2 and cola = :vtg3",
+ Sql: "select * from user_region where colb = :colb and cola in ::vtg1 and cola = :cola",
BindVariables: map[string]*querypb.BindVariable{
- "vtg1": sqltypes.Int64BindVariable(2),
- "vtg2": bvtg2,
- "vtg3": sqltypes.Int64BindVariable(1),
+ "colb": sqltypes.Int64BindVariable(2),
+ "vtg1": bvtg2,
+ "cola": sqltypes.Int64BindVariable(1),
},
},
}
@@ -3267,12 +3292,12 @@ func TestGen4MultiColBestVindexSel(t *testing.T) {
bvtg1, _ := sqltypes.BuildBindVariable([]int64{10, 20})
wantQueries = []*querypb.BoundQuery{
{
- Sql: "select * from user_region where colb in ::vtg1 and cola in ::vtg2 and cola = :vtg3 and colb = :vtg4",
+ Sql: "select * from user_region where colb in ::vtg1 and cola in ::vtg2 and cola = :cola and colb = :colb",
BindVariables: map[string]*querypb.BindVariable{
"vtg1": bvtg1,
"vtg2": bvtg2,
- "vtg3": sqltypes.Int64BindVariable(1),
- "vtg4": sqltypes.Int64BindVariable(2),
+ "cola": sqltypes.Int64BindVariable(1),
+ "colb": sqltypes.Int64BindVariable(2),
},
},
}
@@ -3307,6 +3332,112 @@ func TestGen4MultiColMultiEqual(t *testing.T) {
utils.MustMatch(t, wantQueries, sbc2.Queries)
}
+func TestGen4SelectUnqualifiedReferenceTable(t *testing.T) {
+ executor, sbc1, sbc2, sbclookup := createExecutorEnv()
+ executor.pv = querypb.ExecuteOptions_Gen4
+
+ query := "select * from zip_detail"
+ _, err := executorExec(executor, query, nil)
+ require.NoError(t, err)
+ wantQueries := []*querypb.BoundQuery{
+ {
+ Sql: query,
+ BindVariables: map[string]*querypb.BindVariable{},
+ },
+ }
+ utils.MustMatch(t, wantQueries, sbclookup.Queries)
+ require.Nil(t, sbc1.Queries)
+ require.Nil(t, sbc2.Queries)
+}
+
+func TestGen4SelectQualifiedReferenceTable(t *testing.T) {
+ executor, sbc1, sbc2, sbclookup := createExecutorEnv()
+ executor.pv = querypb.ExecuteOptions_Gen4
+
+ query := fmt.Sprintf("select * from %s.zip_detail", KsTestSharded)
+ _, err := executorExec(executor, query, nil)
+ require.NoError(t, err)
+ wantQueries := []*querypb.BoundQuery{
+ {
+ Sql: "select * from zip_detail",
+ BindVariables: map[string]*querypb.BindVariable{},
+ },
+ }
+ require.Nil(t, sbclookup.Queries)
+ utils.MustMatch(t, wantQueries, sbc1.Queries)
+ require.Nil(t, sbc2.Queries)
+}
+
+func TestGen4JoinUnqualifiedReferenceTable(t *testing.T) {
+ executor, sbc1, sbc2, sbclookup := createExecutorEnv()
+ executor.pv = querypb.ExecuteOptions_Gen4
+
+ query := "select * from user join zip_detail on user.zip_detail_id = zip_detail.id"
+ _, err := executorExec(executor, query, nil)
+ require.NoError(t, err)
+ wantQueries := []*querypb.BoundQuery{
+ {
+ Sql: "select * from `user`, zip_detail where `user`.zip_detail_id = zip_detail.id",
+ BindVariables: map[string]*querypb.BindVariable{},
+ },
+ }
+ require.Nil(t, sbclookup.Queries)
+ utils.MustMatch(t, wantQueries, sbc1.Queries)
+ utils.MustMatch(t, wantQueries, sbc2.Queries)
+
+ sbc1.Queries = nil
+ sbc2.Queries = nil
+
+ query = "select * from simple join zip_detail on simple.zip_detail_id = zip_detail.id"
+ _, err = executorExec(executor, query, nil)
+ require.NoError(t, err)
+ wantQueries = []*querypb.BoundQuery{
+ {
+ Sql: "select * from `simple` join zip_detail on `simple`.zip_detail_id = zip_detail.id",
+ BindVariables: map[string]*querypb.BindVariable{},
+ },
+ }
+ utils.MustMatch(t, wantQueries, sbclookup.Queries)
+ require.Nil(t, sbc1.Queries)
+ require.Nil(t, sbc2.Queries)
+}
+
+func TestGen4CrossShardJoinQualifiedReferenceTable(t *testing.T) {
+ executor, sbc1, sbc2, sbclookup := createExecutorEnv()
+ executor.pv = querypb.ExecuteOptions_Gen4
+
+ query := "select user.id from user join TestUnsharded.zip_detail on user.zip_detail_id = TestUnsharded.zip_detail.id"
+ _, err := executorExec(executor, query, nil)
+ require.NoError(t, err)
+
+ shardedWantQueries := []*querypb.BoundQuery{
+ {
+ Sql: "select `user`.id from `user`, zip_detail where `user`.zip_detail_id = zip_detail.id",
+ BindVariables: map[string]*querypb.BindVariable{},
+ },
+ }
+ require.Nil(t, sbclookup.Queries)
+ utils.MustMatch(t, shardedWantQueries, sbc1.Queries)
+ utils.MustMatch(t, shardedWantQueries, sbc2.Queries)
+
+ sbclookup.Queries = nil
+ sbc1.Queries = nil
+ sbc2.Queries = nil
+
+ query = "select simple.id from simple join TestExecutor.zip_detail on simple.zip_detail_id = TestExecutor.zip_detail.id"
+ _, err = executorExec(executor, query, nil)
+ require.NoError(t, err)
+ unshardedWantQueries := []*querypb.BoundQuery{
+ {
+ Sql: "select `simple`.id from `simple` join zip_detail on `simple`.zip_detail_id = zip_detail.id",
+ BindVariables: map[string]*querypb.BindVariable{},
+ },
+ }
+ utils.MustMatch(t, unshardedWantQueries, sbclookup.Queries)
+ require.Nil(t, sbc1.Queries)
+ require.Nil(t, sbc2.Queries)
+}
+
func TestRegionRange(t *testing.T) {
// Special setup: Don't use createExecutorEnv.
@@ -3611,7 +3742,7 @@ func TestSelectAggregationData(t *testing.T) {
}{
{
sql: `select count(distinct col) from user`,
- sandboxRes: sqltypes.MakeTestResult(sqltypes.MakeTestFields("col", "int64"), "1", "2", "2", "3"),
+ sandboxRes: sqltypes.MakeTestResult(sqltypes.MakeTestFields("col|weight_string(col)", "int64|varbinary"), "1|NULL", "2|NULL", "2|NULL", "3|NULL"),
expSandboxQ: "select col, weight_string(col) from `user` group by col, weight_string(col) order by col asc",
expField: `[name:"count(distinct col)" type:INT64]`,
expRow: `[[INT64(3)]]`,
@@ -3625,14 +3756,14 @@ func TestSelectAggregationData(t *testing.T) {
},
{
sql: `select col, count(*) from user group by col`,
- sandboxRes: sqltypes.MakeTestResult(sqltypes.MakeTestFields("col|count(*)", "int64|int64"), "1|3"),
+ sandboxRes: sqltypes.MakeTestResult(sqltypes.MakeTestFields("col|count(*)|weight_string(col)", "int64|int64|varbinary"), "1|3|NULL"),
expSandboxQ: "select col, count(*), weight_string(col) from `user` group by col, weight_string(col) order by col asc",
expField: `[name:"col" type:INT64 name:"count(*)" type:INT64]`,
expRow: `[[INT64(1) INT64(24)]]`,
},
{
sql: `select col, count(*) from user group by col limit 2`,
- sandboxRes: sqltypes.MakeTestResult(sqltypes.MakeTestFields("col|count(*)", "int64|int64"), "1|2", "2|1", "3|4"),
+ sandboxRes: sqltypes.MakeTestResult(sqltypes.MakeTestFields("col|count(*)|weight_string(col)", "int64|int64|varbinary"), "1|2|NULL", "2|1|NULL", "3|4|NULL"),
expSandboxQ: "select col, count(*), weight_string(col) from `user` group by col, weight_string(col) order by col asc limit :__upper_limit",
expField: `[name:"col" type:INT64 name:"count(*)" type:INT64]`,
expRow: `[[INT64(1) INT64(16)] [INT64(2) INT64(8)]]`,
@@ -3725,6 +3856,40 @@ func TestSelectAggregationData(t *testing.T) {
}
}
+func TestSelectAggregationRandom(t *testing.T) {
+ cell := "aa"
+ hc := discovery.NewFakeHealthCheck(nil)
+ createSandbox(KsTestSharded).VSchema = executorVSchema
+ getSandbox(KsTestUnsharded).VSchema = unshardedVSchema
+ serv := newSandboxForCells([]string{cell})
+ resolver := newTestResolver(hc, serv, cell)
+ shards := []string{"-20", "20-40", "40-60", "60-80", "80-a0", "a0-c0", "c0-e0", "e0-"}
+ var conns []*sandboxconn.SandboxConn
+ for _, shard := range shards {
+ sbc := hc.AddTestTablet(cell, shard, 1, KsTestSharded, shard, topodatapb.TabletType_PRIMARY, true, 1, nil)
+ conns = append(conns, sbc)
+
+ sbc.SetResults([]*sqltypes.Result{sqltypes.MakeTestResult(
+ sqltypes.MakeTestFields("a|b", "int64|int64"),
+ "null|null",
+ )})
+ }
+
+ conns[0].SetResults([]*sqltypes.Result{sqltypes.MakeTestResult(
+ sqltypes.MakeTestFields("a|b", "int64|int64"),
+ "10|1",
+ )})
+
+ executor := createExecutor(serv, cell, resolver)
+ executor.pv = querypb.ExecuteOptions_Gen4
+ session := NewAutocommitSession(&vtgatepb.Session{})
+
+ rs, err := executor.Execute(context.Background(), "TestSelectCFC", session,
+ "select /*vt+ PLANNER=gen4 */ A.a, A.b, (A.a / A.b) as c from (select sum(a) as a, sum(b) as b from user) A", nil)
+ require.NoError(t, err)
+ assert.Equal(t, `[[INT64(10) INT64(1) DECIMAL(10.0000)]]`, fmt.Sprintf("%v", rs.Rows))
+}
+
func TestSelectHexAndBit(t *testing.T) {
executor, _, _, _ := createExecutorEnv()
executor.normalize = true
@@ -3740,3 +3905,82 @@ func TestSelectHexAndBit(t *testing.T) {
require.NoError(t, err)
require.Equal(t, `[[UINT64(10) UINT64(10) UINT64(10) UINT64(10)]]`, fmt.Sprintf("%v", qr.Rows))
}
+
+// TestSelectCFC tests validates that cfc vindex plan gets cached and same plan is getting reused.
+// This also validates that cache_size is able to calculate the cfc vindex plan size.
+func TestSelectCFC(t *testing.T) {
+ executor, _, _, _ := createExecutorEnv()
+ executor.normalize = true
+ session := NewAutocommitSession(&vtgatepb.Session{})
+
+ _, err := executor.Execute(context.Background(), "TestSelectCFC", session,
+ "select /*vt+ PLANNER=gen4 */ c2 from tbl_cfc where c1 like 'A%'", nil)
+ require.NoError(t, err)
+
+ timeout := time.After(10 * time.Second)
+ for {
+ select {
+ case <-timeout:
+ t.Fatal("not able to cache a plan withing 10 seconds.")
+ case <-time.After(5 * time.Millisecond):
+ // should be able to find cache entry before the timeout.
+ cacheItems := executor.debugCacheEntries()
+ for _, item := range cacheItems {
+ if strings.Contains(item.Key, "c2 from tbl_cfc where c1 like") {
+ return
+ }
+ }
+ }
+ }
+}
+
+func TestSelectView(t *testing.T) {
+ executor, sbc, _, _ := createExecutorEnv()
+ // add the view to local vschema
+ err := executor.vschema.AddView(KsTestSharded, "user_details_view", "select user.id, user_extra.col from user join user_extra on user.id = user_extra.user_id")
+ require.NoError(t, err)
+
+ executor.normalize = true
+ session := NewAutocommitSession(&vtgatepb.Session{})
+
+ _, err = executor.Execute(context.Background(), "TestSelectView", session,
+ "select * from user_details_view", nil)
+ require.NoError(t, err)
+ wantQueries := []*querypb.BoundQuery{{
+ Sql: "select * from (select `user`.id, user_extra.col from `user` join user_extra on `user`.id = user_extra.user_id) as user_details_view",
+ BindVariables: map[string]*querypb.BindVariable{},
+ }}
+ utils.MustMatch(t, wantQueries, sbc.Queries)
+
+ sbc.Queries = nil
+ _, err = executor.Execute(context.Background(), "TestSelectView", session,
+ "select * from user_details_view where id = 2", nil)
+ require.NoError(t, err)
+ wantQueries = []*querypb.BoundQuery{{
+ Sql: "select * from (select `user`.id, user_extra.col from `user` join user_extra on `user`.id = user_extra.user_id) as user_details_view where id = :id",
+ BindVariables: map[string]*querypb.BindVariable{
+ "id": sqltypes.Int64BindVariable(2),
+ },
+ }}
+ utils.MustMatch(t, wantQueries, sbc.Queries)
+
+ sbc.Queries = nil
+ _, err = executor.Execute(context.Background(), "TestSelectView", session,
+ "select * from user_details_view where id in (1,2,3,4,5)", nil)
+ require.NoError(t, err)
+ bvtg1, _ := sqltypes.BuildBindVariable([]int64{1, 2, 3, 4, 5})
+ bvals, _ := sqltypes.BuildBindVariable([]int64{1, 2})
+ wantQueries = []*querypb.BoundQuery{{
+ Sql: "select * from (select `user`.id, user_extra.col from `user` join user_extra on `user`.id = user_extra.user_id) as user_details_view where id in ::__vals",
+ BindVariables: map[string]*querypb.BindVariable{
+ "vtg1": bvtg1,
+ "__vals": bvals,
+ },
+ }}
+ utils.MustMatch(t, wantQueries, sbc.Queries)
+}
+
+func TestMain(m *testing.M) {
+ _flag.ParseFlagsForTest()
+ os.Exit(m.Run())
+}
diff --git a/go/vt/vtgate/executor_set_test.go b/go/vt/vtgate/executor_set_test.go
index 673c62db138..36cfc9d9140 100644
--- a/go/vt/vtgate/executor_set_test.go
+++ b/go/vt/vtgate/executor_set_test.go
@@ -21,6 +21,7 @@ import (
"fmt"
"testing"
+ "vitess.io/vitess/go/mysql"
"vitess.io/vitess/go/vt/sqlparser"
querypb "vitess.io/vitess/go/vt/proto/query"
@@ -154,6 +155,12 @@ func TestExecutorSet(t *testing.T) {
}, {
in: "set workload = 1",
err: "incorrect argument type to variable 'workload': INT64",
+ }, {
+ in: "set tx_isolation = 'read-committed'",
+ out: &vtgatepb.Session{Autocommit: true},
+ }, {
+ in: "set transaction_isolation = 'read-committed'",
+ out: &vtgatepb.Session{Autocommit: true},
}, {
in: "set transaction_mode = 'twopc', autocommit=1",
out: &vtgatepb.Session{Autocommit: true, TransactionMode: vtgatepb.TransactionMode_TWOPC},
@@ -177,7 +184,7 @@ func TestExecutorSet(t *testing.T) {
out: &vtgatepb.Session{Autocommit: true},
}, {
in: "set foo = 1",
- err: "Unknown system variable '@@foo = 1'",
+ err: "VT05006: unknown system variable '@@foo = 1'",
}, {
in: "set names utf8",
out: &vtgatepb.Session{Autocommit: true},
@@ -218,14 +225,17 @@ func TestExecutorSet(t *testing.T) {
in: "set session transaction isolation level serializable",
out: &vtgatepb.Session{Autocommit: true},
}, {
- in: "set transaction isolation level serializable",
- out: &vtgatepb.Session{Autocommit: true},
+ in: "set transaction isolation level serializable",
+ out: &vtgatepb.Session{
+ Autocommit: true,
+ Warnings: []*querypb.QueryWarning{{Code: mysql.ERNotSupportedYet, Message: "converted 'next transaction' scope to 'session' scope"}},
+ },
}, {
in: "set transaction read only",
- out: &vtgatepb.Session{Autocommit: true},
+ out: &vtgatepb.Session{Autocommit: true, Warnings: []*querypb.QueryWarning{{Code: mysql.ERNotSupportedYet, Message: "converted 'next transaction' scope to 'session' scope"}}},
}, {
in: "set transaction read write",
- out: &vtgatepb.Session{Autocommit: true},
+ out: &vtgatepb.Session{Autocommit: true, Warnings: []*querypb.QueryWarning{{Code: mysql.ERNotSupportedYet, Message: "converted 'next transaction' scope to 'session' scope"}}},
}, {
in: "set session transaction read write",
out: &vtgatepb.Session{Autocommit: true},
@@ -249,7 +259,13 @@ func TestExecutorSet(t *testing.T) {
out: &vtgatepb.Session{Autocommit: true, EnableSystemSettings: false},
}, {
in: "set @@socket = '/tmp/change.sock'",
- err: "variable 'socket' is a read only variable",
+ err: "VT03010: variable 'socket' is a read only variable",
+ }, {
+ in: "set @@query_timeout = 50",
+ out: &vtgatepb.Session{Autocommit: true, QueryTimeout: 50},
+ }, {
+ in: "set @@query_timeout = 50, query_timeout = 75",
+ out: &vtgatepb.Session{Autocommit: true, QueryTimeout: 75},
}}
for i, tcase := range testcases {
t.Run(fmt.Sprintf("%d-%s", i, tcase.in), func(t *testing.T) {
@@ -283,7 +299,7 @@ func TestExecutorSetOp(t *testing.T) {
disallowResConn bool
result *sqltypes.Result
}{{
- in: "set big_tables = 1", //ignore
+ in: "set big_tables = 1", // ignore
}, {
in: "set sql_mode = 'STRICT_ALL_TABLES,NO_AUTO_UPDATES'",
sysVars: map[string]string{"sql_mode": "'STRICT_ALL_TABLES,NO_AUTO_UPDATES'"},
@@ -299,10 +315,6 @@ func TestExecutorSetOp(t *testing.T) {
in: "set sql_safe_updates = 1",
sysVars: map[string]string{"sql_safe_updates": "1"},
result: returnResult("sql_safe_updates", "int64", "1"),
- }, {
- in: "set tx_isolation = 'read-committed'",
- sysVars: map[string]string{"tx_isolation": "'read-committed'"},
- result: returnResult("tx_isolation", "varchar", "read-committed"),
}, {
in: "set sql_quote_show_create = 0",
sysVars: map[string]string{"sql_quote_show_create": "0"},
@@ -347,6 +359,10 @@ func TestExecutorSetOp(t *testing.T) {
}, {
in: "set global client_found_rows = 1",
result: returnNoResult("client_found_rows", "int64"),
+ }, {
+ in: "set tx_isolation = 'read-committed'",
+ sysVars: map[string]string{"tx_isolation": "'read-committed'"},
+ result: returnResult("tx_isolation", "varchar", "read-committed"),
}}
for _, tcase := range testcases {
t.Run(tcase.in, func(t *testing.T) {
@@ -492,10 +508,10 @@ func TestSetVar(t *testing.T) {
executor, _, _, sbc := createExecutorEnv()
executor.normalize = true
- oldVersion := sqlparser.MySQLVersion
- sqlparser.MySQLVersion = "80000"
+ oldVersion := sqlparser.GetParserVersion()
+ sqlparser.SetParserVersion("80000")
defer func() {
- sqlparser.MySQLVersion = oldVersion
+ sqlparser.SetParserVersion(oldVersion)
}()
session := NewAutocommitSession(&vtgatepb.Session{EnableSystemSettings: true, TargetString: KsTestUnsharded})
@@ -536,10 +552,10 @@ func TestSetVarShowVariables(t *testing.T) {
executor, _, _, sbc := createExecutorEnv()
executor.normalize = true
- oldVersion := sqlparser.MySQLVersion
- sqlparser.MySQLVersion = "80000"
+ oldVersion := sqlparser.GetParserVersion()
+ sqlparser.SetParserVersion("80000")
defer func() {
- sqlparser.MySQLVersion = oldVersion
+ sqlparser.SetParserVersion(oldVersion)
}()
session := NewAutocommitSession(&vtgatepb.Session{EnableSystemSettings: true, TargetString: KsTestUnsharded})
@@ -560,3 +576,55 @@ func TestSetVarShowVariables(t *testing.T) {
assert.False(t, session.InReservedConn(), "reserved connection should not be used")
assert.Equal(t, `[[VARCHAR("sql_mode") VARCHAR("only_full_group_by")]]`, fmt.Sprintf("%v", qr.Rows))
}
+
+func TestExecutorSetAndSelect(t *testing.T) {
+ e, _, _, sbc := createExecutorEnv()
+ e.normalize = true
+
+ testcases := []struct {
+ sysVar string
+ val string
+ exp string
+ }{{
+ sysVar: "transaction_isolation",
+ exp: `[[VARCHAR("REPEATABLE-READ")]]`,
+ }, {
+ sysVar: "transaction_isolation",
+ val: "READ-COMMITTED",
+ exp: `[[VARCHAR("READ-COMMITTED")]]`,
+ }, {
+ sysVar: "tx_isolation",
+ val: "READ-UNCOMMITTED",
+ exp: `[[VARCHAR("READ-UNCOMMITTED")]]`,
+ }, {
+ sysVar: "tx_isolation",
+ exp: `[[VARCHAR("READ-UNCOMMITTED")]]`, // this returns the value set in previous query.
+ }}
+ session := NewAutocommitSession(&vtgatepb.Session{TargetString: KsTestUnsharded, EnableSystemSettings: true})
+ for _, tcase := range testcases {
+ t.Run(fmt.Sprintf("%s-%s", tcase.sysVar, tcase.val), func(t *testing.T) {
+ sbc.ExecCount.Set(0) // reset the value
+
+ if tcase.val != "" {
+ // check query result for `select from dual where @@transaction_isolation !=
+ // not always the check query is the first query, so setting it two times, as it will use one of those results.
+ sbc.SetResults([]*sqltypes.Result{
+ sqltypes.MakeTestResult(sqltypes.MakeTestFields(tcase.sysVar, "varchar"), tcase.val), // one for set prequeries
+ sqltypes.MakeTestResult(sqltypes.MakeTestFields(tcase.sysVar, "varchar"), tcase.val), // second for check query
+ sqltypes.MakeTestResult(nil)}) // third one for new set query
+
+ setQ := fmt.Sprintf("set %s = '%s'", tcase.sysVar, tcase.val)
+ _, err := e.Execute(context.Background(), "TestExecutorSetAndSelect", session, setQ, nil)
+ require.NoError(t, err)
+ }
+
+ selectQ := fmt.Sprintf("select @@%s", tcase.sysVar)
+ // if the query reaches the shard, it will return REPEATABLE-READ isolation level.
+ sbc.SetResults([]*sqltypes.Result{sqltypes.MakeTestResult(sqltypes.MakeTestFields(tcase.sysVar, "varchar"), "REPEATABLE-READ")})
+
+ qr, err := e.Execute(context.Background(), "TestExecutorSetAndSelect", session, selectQ, nil)
+ require.NoError(t, err)
+ assert.Equal(t, tcase.exp, fmt.Sprintf("%v", qr.Rows))
+ })
+ }
+}
diff --git a/go/vt/vtgate/executor_test.go b/go/vt/vtgate/executor_test.go
index 6e80f3841aa..e8caceb385e 100644
--- a/go/vt/vtgate/executor_test.go
+++ b/go/vt/vtgate/executor_test.go
@@ -25,6 +25,7 @@ import (
"net/http"
"net/http/httptest"
"reflect"
+ "sort"
"strings"
"testing"
@@ -496,6 +497,19 @@ func TestExecutorShowColumns(t *testing.T) {
}
+func sortString(w string) string {
+ s := strings.Split(w, "")
+ sort.Strings(s)
+ return strings.Join(s, "")
+}
+
+func assertMatchesNoOrder(t *testing.T, expected, got string) {
+ t.Helper()
+ if sortString(expected) != sortString(got) {
+ t.Errorf("for query: expected \n%s \nbut actual \n%s", expected, got)
+ }
+}
+
func TestExecutorShow(t *testing.T) {
executor, _, _, sbclookup := createExecutorEnv()
session := NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor"})
@@ -503,14 +517,14 @@ func TestExecutorShow(t *testing.T) {
for _, query := range []string{"show vitess_keyspaces", "show keyspaces"} {
qr, err := executor.Execute(ctx, "TestExecute", session, query, nil)
require.NoError(t, err)
- require.EqualValues(t, 5, len(qr.Rows), fmt.Sprintf("unexpected results running query: %s", query))
+ assertMatchesNoOrder(t, `[[VARCHAR("TestUnsharded")] [VARCHAR("TestMultiCol")] [VARCHAR("TestXBadVSchema")] [VARCHAR("TestXBadSharding")] [VARCHAR("TestExecutor")]]`, fmt.Sprintf("%v", qr.Rows))
}
for _, query := range []string{"show databases", "show DATABASES", "show schemas", "show SCHEMAS"} {
qr, err := executor.Execute(ctx, "TestExecute", session, query, nil)
require.NoError(t, err)
// Showing default tables (5+4[default])
- require.EqualValues(t, 9, len(qr.Rows), fmt.Sprintf("unexpected results running query: %s", query))
+ assertMatchesNoOrder(t, `[[VARCHAR("TestUnsharded")] [VARCHAR("TestMultiCol")] [VARCHAR("TestXBadVSchema")] [VARCHAR("TestXBadSharding")] [VARCHAR("TestExecutor")]] [VARCHAR("information_schema")] [VARCHAR("mysql")] [VARCHAR("sys")] [VARCHAR("performance_schema")]`, fmt.Sprintf("%v", qr.Rows))
}
_, err := executor.Execute(ctx, "TestExecute", session, "show variables", nil)
@@ -853,6 +867,7 @@ func TestExecutorShow(t *testing.T) {
wantqr = &sqltypes.Result{
Fields: buildVarCharFields("Keyspace", "Name", "Type", "Params", "Owner"),
Rows: [][]sqltypes.Value{
+ buildVarCharRow("TestExecutor", "cfc", "cfc", "", ""),
buildVarCharRow("TestExecutor", "hash_index", "hash", "", ""),
buildVarCharRow("TestExecutor", "idx1", "hash", "", ""),
buildVarCharRow("TestExecutor", "idx_noauto", "hash", "", "noauto_table"),
@@ -865,8 +880,12 @@ func TestExecutorShow(t *testing.T) {
buildVarCharRow("TestExecutor", "name_user_map", "lookup_hash", "from=name; table=name_user_map; to=user_id", "user"),
buildVarCharRow("TestExecutor", "regional_vdx", "region_experimental", "region_bytes=1", ""),
buildVarCharRow("TestExecutor", "t1_lkp_vdx", "consistent_lookup_unique", "from=unq_col; table=t1_lkp_idx; to=keyspace_id", "t1"),
- buildVarCharRow("TestExecutor", "t2_lu_vdx", "lookup_hash_unique", "from=lu_col; table=TestUnsharded.lu_idx; to=keyspace_id", "t2_wo_lookup"),
- buildVarCharRow("TestExecutor", "t2_wo_lu_vdx", "lookup_unique", "from=wo_lu_col; table=TestUnsharded.wo_lu_idx; to=keyspace_id; write_only=true", "t2_wo_lookup"),
+ buildVarCharRow("TestExecutor", "t2_erl_lu_vdx", "lookup_unique", "from=erl_lu_col; read_lock=exclusive; table=TestUnsharded.erl_lu_idx; to=keyspace_id", "t2_lookup"),
+ buildVarCharRow("TestExecutor", "t2_lu_vdx", "lookup_hash_unique", "from=lu_col; table=TestUnsharded.lu_idx; to=keyspace_id", "t2_lookup"),
+ buildVarCharRow("TestExecutor", "t2_nrl_lu_vdx", "lookup_unique", "from=nrl_lu_col; read_lock=none; table=TestUnsharded.nrl_lu_idx; to=keyspace_id", "t2_lookup"),
+ buildVarCharRow("TestExecutor", "t2_nv_lu_vdx", "lookup_unique", "from=nv_lu_col; no_verify=true; table=TestUnsharded.nv_lu_idx; to=keyspace_id", "t2_lookup"),
+ buildVarCharRow("TestExecutor", "t2_srl_lu_vdx", "lookup_unique", "from=srl_lu_col; read_lock=shared; table=TestUnsharded.srl_lu_idx; to=keyspace_id", "t2_lookup"),
+ buildVarCharRow("TestExecutor", "t2_wo_lu_vdx", "lookup_unique", "from=wo_lu_col; table=TestUnsharded.wo_lu_idx; to=keyspace_id; write_only=true", "t2_lookup"),
buildVarCharRow("TestMultiCol", "multicol_vdx", "multicol", "column_bytes=1,3,4; column_count=3; column_vindex=hash,binary,unicode_loose_xxhash", ""),
},
}
@@ -891,7 +910,7 @@ func TestExecutorShow(t *testing.T) {
query = "show vschema vindexes on TestExecutor.garbage"
_, err = executor.Execute(ctx, "TestExecute", session, query, nil)
- wantErr = "table 'garbage' does not exist in keyspace 'TestExecutor'"
+ wantErr = "VT05005: table 'garbage' does not exist in keyspace 'TestExecutor'"
assert.EqualError(t, err, wantErr, query)
query = "show vschema vindexes on user"
@@ -922,7 +941,7 @@ func TestExecutorShow(t *testing.T) {
query = "show vschema vindexes on garbage"
_, err = executor.Execute(ctx, "TestExecute", session, query, nil)
- wantErr = "table 'garbage' does not exist in keyspace 'TestExecutor'"
+ wantErr = "VT05005: table 'garbage' does not exist in keyspace 'TestExecutor'"
assert.EqualError(t, err, wantErr, query)
query = "show warnings"
@@ -997,16 +1016,21 @@ func TestExecutorShow(t *testing.T) {
Fields: buildVarCharFields("Tables"),
Rows: [][]sqltypes.Value{
buildVarCharRow("dual"),
+ buildVarCharRow("erl_lu_idx"),
buildVarCharRow("ins_lookup"),
buildVarCharRow("lu_idx"),
buildVarCharRow("main1"),
buildVarCharRow("music_user_map"),
buildVarCharRow("name_lastname_keyspace_id_map"),
buildVarCharRow("name_user_map"),
+ buildVarCharRow("nrl_lu_idx"),
+ buildVarCharRow("nv_lu_idx"),
buildVarCharRow("simple"),
+ buildVarCharRow("srl_lu_idx"),
buildVarCharRow("user_msgs"),
buildVarCharRow("user_seq"),
buildVarCharRow("wo_lu_idx"),
+ buildVarCharRow("zip_detail"),
},
}
utils.MustMatch(t, wantqr, qr, query)
@@ -1025,17 +1049,17 @@ func TestExecutorShow(t *testing.T) {
query = "show vschema tables"
session = NewSafeSession(&vtgatepb.Session{TargetString: "no_such_keyspace"})
_, err = executor.Execute(ctx, "TestExecute", session, query, nil)
- want = "Unknown database 'no_such_keyspace' in vschema"
+ want = "VT05003: unknown database 'no_such_keyspace' in vschema"
assert.EqualError(t, err, want, query)
query = "show vitess_migrations"
_, err = executor.Execute(ctx, "TestExecute", session, query, nil)
- want = "Unknown database 'no_such_keyspace' in vschema"
+ want = "VT05003: unknown database 'no_such_keyspace' in vschema"
assert.EqualError(t, err, want, query)
query = "show vitess_migrations from ks like '9748c3b7_7fdb_11eb_ac2c_f875a4d24e90'"
_, err = executor.Execute(ctx, "TestExecute", session, query, nil)
- want = "Unknown database 'ks' in vschema"
+ want = "VT05003: unknown database 'ks' in vschema"
assert.EqualError(t, err, want, query)
}
@@ -1093,7 +1117,7 @@ func TestExecutorUse(t *testing.T) {
}
_, err = executor.Execute(ctx, "TestExecute", NewSafeSession(&vtgatepb.Session{}), "use UnexistentKeyspace", nil)
- require.EqualError(t, err, "unknown database 'UnexistentKeyspace'")
+ require.EqualError(t, err, "VT05003: unknown database 'UnexistentKeyspace' in vschema")
}
func TestExecutorComment(t *testing.T) {
@@ -1110,7 +1134,7 @@ func TestExecutorComment(t *testing.T) {
if err != nil {
t.Error(err)
}
- if !reflect.DeepEqual(gotResult, wantResult) {
+ if !gotResult.Equal(wantResult) {
t.Errorf("Exec %s: %v, want %v", stmt, gotResult, wantResult)
}
}
@@ -1261,7 +1285,7 @@ func TestExecutorDDL(t *testing.T) {
}
stmts := []string{
- "create table t1(id bigint primary key)",
+ "create table t2(id bigint primary key)",
"alter table t2 add primary key (id)",
"rename table t2 to t3",
"truncate table t2",
@@ -1285,10 +1309,10 @@ func TestExecutorDDL(t *testing.T) {
stmtType := "DDL"
_, err := executor.Execute(ctx, "TestExecute", NewSafeSession(&vtgatepb.Session{TargetString: tc.targetStr}), stmt, nil)
if tc.hasNoKeyspaceErr {
- require.EqualError(t, err, errNoKeyspace.Error(), "expect query to fail")
+ require.EqualError(t, err, errNoKeyspace.Error(), "expect query to fail: %q", stmt)
stmtType = "" // For error case, plan is not generated to query log will not contain any stmtType.
} else {
- require.NoError(t, err)
+ require.NoError(t, err, "did not expect error for query: %q", stmt)
}
diff := cmp.Diff(tc.wantCnts, cnts{
@@ -1308,6 +1332,7 @@ func TestExecutorDDL(t *testing.T) {
input string
hasErr bool
}{
+ {input: "create table t1(id bigint primary key)", hasErr: false},
{input: "drop table t1", hasErr: false},
{input: "drop table t2", hasErr: true},
{input: "drop view t1", hasErr: false},
@@ -1453,9 +1478,7 @@ func TestExecutorCreateVindexDDL(t *testing.T) {
sbc2.ExecCount.Get(),
sbclookup.ExecCount.Get(),
}
- if !reflect.DeepEqual(gotCount, wantCount) {
- t.Errorf("Exec %s: %v, want %v", stmt, gotCount, wantCount)
- }
+ require.Equal(t, wantCount, gotCount)
}
func TestExecutorAddDropVschemaTableDDL(t *testing.T) {
@@ -1653,7 +1676,7 @@ func assertCacheContains(t *testing.T, e *Executor, want []string) {
func getPlanCached(t *testing.T, e *Executor, vcursor *vcursorImpl, sql string, comments sqlparser.MarginComments, bindVars map[string]*querypb.BindVariable, skipQueryPlanCache bool) (*engine.Plan, *logstats.LogStats) {
logStats := logstats.NewLogStats(ctx, "Test", "", "", nil)
- plan, err := e.getPlan(context.Background(), vcursor, sql, comments, bindVars, &SafeSession{
+ plan, _, err := e.getPlan(context.Background(), vcursor, sql, comments, bindVars, &SafeSession{
Session: &vtgatepb.Session{Options: &querypb.ExecuteOptions{SkipQueryPlanCache: skipQueryPlanCache}},
}, logStats)
require.NoError(t, err)
@@ -1715,16 +1738,12 @@ func TestGetPlanCacheNormalized(t *testing.T) {
query1 := "select * from music_user_map where id = 1"
_, logStats1 := getPlanCached(t, r, emptyvc, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, true /* skipQueryPlanCache */)
assertCacheSize(t, r.plans, 0)
- wantSQL := "select * from music_user_map where id = :vtg1 /* comment */"
- if logStats1.SQL != wantSQL {
- t.Errorf("logstats sql want \"%s\" got \"%s\"", wantSQL, logStats1.SQL)
- }
+ wantSQL := "select * from music_user_map where id = :id /* comment */"
+ assert.Equal(t, wantSQL, logStats1.SQL)
_, logStats2 := getPlanCached(t, r, emptyvc, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false /* skipQueryPlanCache */)
assertCacheSize(t, r.plans, 1)
- if logStats2.SQL != wantSQL {
- t.Errorf("logstats sql want \"%s\" got \"%s\"", wantSQL, logStats2.SQL)
- }
+ assert.Equal(t, wantSQL, logStats2.SQL)
// Skip cache using directive
r, _, _, _ = createExecutorEnv()
@@ -1758,71 +1777,48 @@ func TestGetPlanNormalized(t *testing.T) {
query1 := "select * from music_user_map where id = 1"
query2 := "select * from music_user_map where id = 2"
- normalized := "select * from music_user_map where id = :vtg1"
+ normalized := "select * from music_user_map where id = :id"
plan1, logStats1 := getPlanCached(t, r, emptyvc, query1, makeComments(" /* comment 1 */"), map[string]*querypb.BindVariable{}, false)
plan2, logStats2 := getPlanCached(t, r, emptyvc, query1, makeComments(" /* comment 2 */"), map[string]*querypb.BindVariable{}, false)
- if plan1 != plan2 {
- t.Errorf("getPlan(query1): plans must be equal: %p %p", plan1, plan2)
- }
+ assert.Equal(t, plan1, plan2)
want := []string{
"@unknown:" + normalized,
}
assertCacheContains(t, r, want)
wantSQL := normalized + " /* comment 1 */"
- if logStats1.SQL != wantSQL {
- t.Errorf("logstats sql want \"%s\" got \"%s\"", wantSQL, logStats1.SQL)
- }
+ assert.Equal(t, wantSQL, logStats1.SQL)
wantSQL = normalized + " /* comment 2 */"
- if logStats2.SQL != wantSQL {
- t.Errorf("logstats sql want \"%s\" got \"%s\"", wantSQL, logStats2.SQL)
- }
+ assert.Equal(t, wantSQL, logStats2.SQL)
plan3, logStats3 := getPlanCached(t, r, emptyvc, query2, makeComments(" /* comment 3 */"), map[string]*querypb.BindVariable{}, false)
- if plan1 != plan3 {
- t.Errorf("getPlan(query2): plans must be equal: %p %p", plan1, plan3)
- }
+ assert.Equal(t, plan1, plan3)
wantSQL = normalized + " /* comment 3 */"
- if logStats3.SQL != wantSQL {
- t.Errorf("logstats sql want \"%s\" got \"%s\"", wantSQL, logStats3.SQL)
- }
+ assert.Equal(t, wantSQL, logStats3.SQL)
plan4, logStats4 := getPlanCached(t, r, emptyvc, normalized, makeComments(" /* comment 4 */"), map[string]*querypb.BindVariable{}, false)
- if plan1 != plan4 {
- t.Errorf("getPlan(normalized): plans must be equal: %p %p", plan1, plan4)
- }
+ assert.Equal(t, plan1, plan4)
wantSQL = normalized + " /* comment 4 */"
- if logStats4.SQL != wantSQL {
- t.Errorf("logstats sql want \"%s\" got \"%s\"", wantSQL, logStats4.SQL)
- }
+ assert.Equal(t, wantSQL, logStats4.SQL)
var logStats5 *logstats.LogStats
plan3, logStats5 = getPlanCached(t, r, unshardedvc, query1, makeComments(" /* comment 5 */"), map[string]*querypb.BindVariable{}, false)
- if plan1 == plan3 {
- t.Errorf("getPlan(query1, ks): plans must not be equal: %p %p", plan1, plan3)
- }
+ assert.Equal(t, plan1, plan3)
wantSQL = normalized + " /* comment 5 */"
- if logStats5.SQL != wantSQL {
- t.Errorf("logstats sql want \"%s\" got \"%s\"", wantSQL, logStats5.SQL)
- }
+ assert.Equal(t, wantSQL, logStats5.SQL)
plan4, _ = getPlanCached(t, r, unshardedvc, query1, makeComments(" /* comment 6 */"), map[string]*querypb.BindVariable{}, false)
- if plan3 != plan4 {
- t.Errorf("getPlan(query1, ks): plans must be equal: %p %p", plan3, plan4)
- }
+ assert.Equal(t, plan1, plan4)
want = []string{
KsTestUnsharded + "@unknown:" + normalized,
"@unknown:" + normalized,
}
assertCacheContains(t, r, want)
- _, err := r.getPlan(context.Background(), emptyvc, "syntax", makeComments(""), map[string]*querypb.BindVariable{}, nil, nil)
- wantErr := "syntax error at position 7 near 'syntax'"
- if err == nil || err.Error() != wantErr {
- t.Errorf("getPlan(syntax): %v, want %s", err, wantErr)
- }
+ _, _, err := r.getPlan(context.Background(), emptyvc, "syntax", makeComments(""), map[string]*querypb.BindVariable{}, nil, nil)
+ assert.EqualError(t, err, "syntax error at position 7 near 'syntax'")
assertCacheContains(t, r, want)
}
@@ -2108,30 +2104,24 @@ func TestExecutorOtherRead(t *testing.T) {
}
}
-func TestExecutorExplain(t *testing.T) {
+func TestExecutorVExplain(t *testing.T) {
executor, _, _, _ := createExecutorEnv()
executor.normalize = true
logChan := QueryLogger.Subscribe("Test")
defer QueryLogger.Unsubscribe(logChan)
bindVars := map[string]*querypb.BindVariable{}
- result, err := executorExec(executor, "explain format = vitess select * from user", bindVars)
+ result, err := executorExec(executor, "vexplain plan select * from user", bindVars)
require.NoError(t, err)
require.Equal(t,
- `[[VARCHAR("Route") VARCHAR("Scatter") VARCHAR("TestExecutor") VARCHAR("") VARCHAR("UNKNOWN") VARCHAR("select * from `+"`user`"+`")]]`,
+ `[[VARCHAR("{\n\t\"OperatorType\": \"Route\",\n\t\"Variant\": \"Scatter\",\n\t\"Keyspace\": {\n\t\t\"Name\": \"TestExecutor\",\n\t\t\"Sharded\": true\n\t},\n\t\"FieldQuery\": \"select * from `+"`user`"+` where 1 != 1\",\n\t\"Query\": \"select * from `+"`user`"+`\",\n\t\"Table\": \"`+"`user`"+`\"\n}")]]`,
fmt.Sprintf("%v", result.Rows))
- result, err = executorExec(executor, "explain format = vitess select 42", bindVars)
+ result, err = executorExec(executor, "vexplain plan select 42", bindVars)
require.NoError(t, err)
- expected :=
- `[[VARCHAR("Projection") VARCHAR("") VARCHAR("") VARCHAR("") VARCHAR("UNKNOWN") VARCHAR("")] ` +
- `[VARCHAR("└─ SingleRow") VARCHAR("") VARCHAR("") VARCHAR("") VARCHAR("UNKNOWN") VARCHAR("")]]`
- require.Equal(t,
- `[[VARCHAR("Projection") VARCHAR("") VARCHAR("") VARCHAR("") VARCHAR("UNKNOWN") VARCHAR("")] `+
- `[VARCHAR("└─ SingleRow") VARCHAR("") VARCHAR("") VARCHAR("") VARCHAR("UNKNOWN") VARCHAR("")]]`,
- expected,
- fmt.Sprintf("%v", result.Rows), fmt.Sprintf("%v", result.Rows))
+ expected := `[[VARCHAR("{\n\t\"OperatorType\": \"Projection\",\n\t\"Expressions\": [\n\t\t\"INT64(42) as 42\"\n\t],\n\t\"Inputs\": [\n\t\t{\n\t\t\t\"OperatorType\": \"SingleRow\"\n\t\t}\n\t]\n}")]]`
+ require.Equal(t, expected, fmt.Sprintf("%v", result.Rows))
}
func TestExecutorOtherAdmin(t *testing.T) {
@@ -2321,7 +2311,7 @@ func TestExecutorSavepointInTxWithReservedConn(t *testing.T) {
sbc1WantQueries := []*querypb.BoundQuery{{
Sql: "select @@sql_mode orig, '' new", BindVariables: emptyBV,
}, {
- Sql: "set @@sql_mode = ''", BindVariables: emptyBV,
+ Sql: "set sql_mode = ''", BindVariables: emptyBV,
}, {
Sql: "savepoint a", BindVariables: emptyBV,
}, {
@@ -2333,7 +2323,7 @@ func TestExecutorSavepointInTxWithReservedConn(t *testing.T) {
}}
sbc2WantQueries := []*querypb.BoundQuery{{
- Sql: "set @@sql_mode = ''", BindVariables: emptyBV,
+ Sql: "set sql_mode = ''", BindVariables: emptyBV,
}, {
Sql: "savepoint a", BindVariables: emptyBV,
}, {
@@ -2450,7 +2440,7 @@ func TestExecutorCallProc(t *testing.T) {
if tc.hasNoKeyspaceErr {
assert.EqualError(t, err, errNoKeyspace.Error())
} else if tc.unshardedOnlyErr {
- require.EqualError(t, err, "CALL is not supported for sharded database")
+ require.EqualError(t, err, "CALL is not supported for sharded keyspace")
} else {
assert.NoError(t, err)
}
@@ -2503,14 +2493,14 @@ func TestExecutorDescHash(t *testing.T) {
require.NoError(t, err)
}
-func TestExecutorVtExplain(t *testing.T) {
+func TestExecutorVExplainQueries(t *testing.T) {
executor, _, _, sbclookup := createExecutorEnv()
session := NewAutocommitSession(&vtgatepb.Session{})
sbclookup.SetResults([]*sqltypes.Result{
sqltypes.MakeTestResult(sqltypes.MakeTestFields("name|user_id", "varchar|int64"), "apa|1", "apa|2"),
})
- qr, err := executor.Execute(ctx, "TestExecutorVtExplain", session, "explain format=vtexplain select * from user where name = 'apa'", nil)
+ qr, err := executor.Execute(ctx, "TestExecutorVExplainQueries", session, "vexplain queries select * from user where name = 'apa'", nil)
require.NoError(t, err)
txt := fmt.Sprintf("%v\n", qr.Rows)
lookupQuery := "select `name`, user_id from name_user_map where `name` in"
@@ -2519,7 +2509,7 @@ func TestExecutorVtExplain(t *testing.T) {
// Test the streaming side as well
var results []sqltypes.Row
session = NewAutocommitSession(&vtgatepb.Session{})
- err = executor.StreamExecute(ctx, "TestExecutorVtExplain", session, "explain format=vtexplain select * from user where name = 'apa'", nil, func(result *sqltypes.Result) error {
+ err = executor.StreamExecute(ctx, "TestExecutorVExplainQueries", session, "vexplain queries select * from user where name = 'apa'", nil, func(result *sqltypes.Result) error {
results = append(results, result.Rows...)
return nil
})
@@ -2528,6 +2518,51 @@ func TestExecutorVtExplain(t *testing.T) {
require.Contains(t, txt, lookupQuery)
}
+func TestExecutorStartTxnStmt(t *testing.T) {
+ executor, _, _, _ := createExecutorEnv()
+ session := NewAutocommitSession(&vtgatepb.Session{})
+
+ tcases := []struct {
+ beginSQL string
+ expTxAccessMode []querypb.ExecuteOptions_TransactionAccessMode
+ }{{
+ beginSQL: "begin",
+ }, {
+ beginSQL: "start transaction",
+ }, {
+ beginSQL: "start transaction with consistent snapshot",
+ expTxAccessMode: []querypb.ExecuteOptions_TransactionAccessMode{querypb.ExecuteOptions_CONSISTENT_SNAPSHOT},
+ }, {
+ beginSQL: "start transaction read only",
+ expTxAccessMode: []querypb.ExecuteOptions_TransactionAccessMode{querypb.ExecuteOptions_READ_ONLY},
+ }, {
+ beginSQL: "start transaction read write",
+ expTxAccessMode: []querypb.ExecuteOptions_TransactionAccessMode{querypb.ExecuteOptions_READ_WRITE},
+ }, {
+ beginSQL: "start transaction with consistent snapshot, read only",
+ expTxAccessMode: []querypb.ExecuteOptions_TransactionAccessMode{querypb.ExecuteOptions_CONSISTENT_SNAPSHOT, querypb.ExecuteOptions_READ_ONLY},
+ }, {
+ beginSQL: "start transaction with consistent snapshot, read write",
+ expTxAccessMode: []querypb.ExecuteOptions_TransactionAccessMode{querypb.ExecuteOptions_CONSISTENT_SNAPSHOT, querypb.ExecuteOptions_READ_WRITE},
+ }, {
+ beginSQL: "start transaction read only, with consistent snapshot",
+ expTxAccessMode: []querypb.ExecuteOptions_TransactionAccessMode{querypb.ExecuteOptions_READ_ONLY, querypb.ExecuteOptions_CONSISTENT_SNAPSHOT},
+ }}
+
+ for _, tcase := range tcases {
+ t.Run(tcase.beginSQL, func(t *testing.T) {
+ _, err := executor.Execute(ctx, "TestExecutorStartTxnStmt", session, tcase.beginSQL, nil)
+ require.NoError(t, err)
+
+ assert.Equal(t, tcase.expTxAccessMode, session.GetOrCreateOptions().TransactionAccessMode)
+
+ _, err = executor.Execute(ctx, "TestExecutorStartTxnStmt", session, "rollback", nil)
+ require.NoError(t, err)
+
+ })
+ }
+}
+
func exec(executor *Executor, session *SafeSession, sql string) (*sqltypes.Result, error) {
return executor.Execute(context.Background(), "TestExecute", session, sql, nil)
}
diff --git a/go/vt/vtgate/executor_vschema_ddl_test.go b/go/vt/vtgate/executor_vschema_ddl_test.go
index a1b387e2ad5..d361e25f5bf 100644
--- a/go/vt/vtgate/executor_vschema_ddl_test.go
+++ b/go/vt/vtgate/executor_vschema_ddl_test.go
@@ -236,7 +236,7 @@ func TestPlanExecutorDropVindexDDL(t *testing.T) {
t.Errorf("want error %v got %v", wantErr, err)
}
- //add one vindex that has never been used by the tables
+ // add one vindex that has never been used by the tables
stmt = "alter vschema create vindex test_vindex using hash"
_, err = executor.Execute(context.Background(), "TestExecute", session, stmt, nil)
require.NoError(t, err)
@@ -246,7 +246,7 @@ func TestPlanExecutorDropVindexDDL(t *testing.T) {
t.Errorf("updated vschema did not contain test_vindex")
}
- //drop an existing vindex that has never been used by the tables
+ // drop an existing vindex that has never been used by the tables
stmt = "alter vschema drop vindex TestExecutor.test_vindex"
_, err = executor.Execute(context.Background(), "TestExecute", session, stmt, nil)
require.NoError(t, err)
@@ -256,7 +256,7 @@ func TestPlanExecutorDropVindexDDL(t *testing.T) {
t.Fatalf("test_vindex should not exist after droping it")
}
- //drop an existing vindex that is used by at least one table
+ // drop an existing vindex that is used by at least one table
stmt = "alter vschema drop vindex TestExecutor.keyspace_id"
_, err = executor.Execute(context.Background(), "TestExecute", session, stmt, nil)
wantErr = "can not drop vindex cause keyspace_id still defined on table ksid_table"
@@ -373,13 +373,13 @@ func TestExecutorAddSequenceDDL(t *testing.T) {
}
time.Sleep(10 * time.Millisecond)
- stmt = "alter vschema on test_table add auto_increment id using test_seq"
+ stmt = "alter vschema on test_table add auto_increment id using `db-name`.`test_seq`"
if _, err = executor.Execute(context.Background(), "TestExecute", session, stmt, nil); err != nil {
t.Error(err)
}
time.Sleep(10 * time.Millisecond)
- wantAutoInc := &vschemapb.AutoIncrement{Column: "id", Sequence: "test_seq"}
+ wantAutoInc := &vschemapb.AutoIncrement{Column: "id", Sequence: "`db-name`.test_seq"}
gotAutoInc := executor.vm.GetCurrentSrvVschema().Keyspaces[ksSharded].Tables["test_table"].AutoIncrement
if !reflect.DeepEqual(wantAutoInc, gotAutoInc) {
@@ -392,7 +392,7 @@ func TestExecutorAddDropVindexDDL(t *testing.T) {
defer func() {
vschemaacl.AuthorizedDDLUsers = ""
}()
- executor, sbc1, sbc2, sbclookup := createExecutorEnv() //nolint
+ executor, sbc1, sbc2, sbclookup := createExecutorEnv() // nolint
ks := "TestExecutor"
session := NewSafeSession(&vtgatepb.Session{TargetString: ks})
vschemaUpdates := make(chan *vschemapb.SrvVSchema, 4)
@@ -432,7 +432,7 @@ func TestExecutorAddDropVindexDDL(t *testing.T) {
_, _ = waitForVindex(t, ks, "test_hash", vschemaUpdates, executor)
_ = waitForColVindexes(t, ks, "test", []string{}, executor)
_, err = executor.Execute(context.Background(), "TestExecute", session, "show vschema vindexes on TestExecutor.test", nil)
- require.EqualError(t, err, "table 'test' does not exist in keyspace 'TestExecutor'")
+ require.EqualError(t, err, "VT05005: table 'test' does not exist in keyspace 'TestExecutor'")
// add it again using the same syntax
stmt = "alter vschema on test add vindex test_hash (id) using hash "
@@ -579,6 +579,18 @@ func TestExecutorAddDropVindexDDL(t *testing.T) {
}
utils.MustMatch(t, wantqr, qr)
+ // now make sure we can create another vindex that references a table with dashes (i.e. escaping is necessary)
+ stmt = "alter vschema on test2 add vindex test_lookup_fqn(c1,c2) using consistent_lookup_unique with owner=`test`, from=`c1,c2`, table=`test-keyspace`.`lookup-fqn`, to=`keyspace_id`"
+ _, err = executor.Execute(context.Background(), "TestExecute", session, stmt, nil)
+ require.NoError(t, err)
+
+ _, vindex = waitForVindex(t, ks, "test_lookup_fqn", vschemaUpdates, executor)
+ require.Equal(t, "consistent_lookup_unique", vindex.Type)
+ require.Equal(t, "test", vindex.Owner)
+ require.Equal(t, "c1,c2", vindex.Params["from"])
+ require.Equal(t, "`test-keyspace`.`lookup-fqn`", vindex.Params["table"])
+ require.Equal(t, "keyspace_id", vindex.Params["to"])
+
stmt = "alter vschema on test2 add vindex nonexistent (c1,c2)"
_, err = executor.Execute(context.Background(), "TestExecute", session, stmt, nil)
require.EqualError(t, err, "vindex nonexistent does not exist in keyspace TestExecutor")
@@ -601,11 +613,11 @@ func TestExecutorAddDropVindexDDL(t *testing.T) {
stmt = "alter vschema on nonexistent drop vindex test_lookup"
_, err = executor.Execute(context.Background(), "TestExecute", NewSafeSession(&vtgatepb.Session{TargetString: "InvalidKeyspace"}), stmt, nil)
- require.EqualError(t, err, "Unknown database 'InvalidKeyspace' in vschema")
+ require.EqualError(t, err, "VT05003: unknown database 'InvalidKeyspace' in vschema")
stmt = "alter vschema on nowhere.nohow drop vindex test_lookup"
_, err = executor.Execute(context.Background(), "TestExecute", session, stmt, nil)
- require.EqualError(t, err, "Unknown database 'nowhere' in vschema")
+ require.EqualError(t, err, "VT05003: unknown database 'nowhere' in vschema")
stmt = "alter vschema on test drop vindex test_lookup"
_, err = executor.Execute(context.Background(), "TestExecute", session, stmt, nil)
@@ -622,7 +634,7 @@ func TestExecutorAddDropVindexDDL(t *testing.T) {
}
func TestPlanExecutorVindexDDLACL(t *testing.T) {
- //t.Skip("not yet planned")
+ // t.Skip("not yet planned")
executor, _, _, _ := createExecutorEnv()
ks := "TestExecutor"
session := NewSafeSession(&vtgatepb.Session{TargetString: ks})
diff --git a/go/vt/vtgate/grpcvtgateconn/conn.go b/go/vt/vtgate/grpcvtgateconn/conn.go
index 1f29d1c8c47..0fb76dfefe4 100644
--- a/go/vt/vtgate/grpcvtgateconn/conn.go
+++ b/go/vt/vtgate/grpcvtgateconn/conn.go
@@ -53,7 +53,6 @@ func init() {
"vtclient",
"vtcombo",
"vtctl",
- "vtctld",
"vttestserver",
} {
servenv.OnParseFor(cmd, registerFlags)
diff --git a/go/vt/vtgate/grpcvtgateservice/server.go b/go/vt/vtgate/grpcvtgateservice/server.go
index edf9659c283..0ebe829ac4d 100644
--- a/go/vt/vtgate/grpcvtgateservice/server.go
+++ b/go/vt/vtgate/grpcvtgateservice/server.go
@@ -46,13 +46,15 @@ const (
)
var (
- useEffective bool
- useEffectiveGroups bool
+ useEffective bool
+ useEffectiveGroups bool
+ useStaticAuthenticationIdentity bool
)
func registerFlags(fs *pflag.FlagSet) {
fs.BoolVar(&useEffective, "grpc_use_effective_callerid", false, "If set, and SSL is not used, will set the immediate caller id from the effective caller id's principal.")
fs.BoolVar(&useEffectiveGroups, "grpc-use-effective-groups", false, "If set, and SSL is not used, will set the immediate caller's security groups from the effective caller id's groups.")
+ fs.BoolVar(&useStaticAuthenticationIdentity, "grpc-use-static-authentication-callerid", false, "If set, will set the immediate caller id to the username authenticated by the static auth plugin.")
}
func init() {
@@ -66,13 +68,13 @@ type VTGate struct {
server vtgateservice.VTGateService
}
-// immediateCallerID tries to extract the common name as well as the (domain) subject
+// immediateCallerIDFromCert tries to extract the common name as well as the (domain) subject
// alternative names of the certificate that was used to connect to vtgate.
// If it fails for any reason, it will return "".
// That immediate caller id is then inserted into a Context,
// and will be used when talking to vttablet.
// vttablet in turn can use table ACLs to validate access is authorized.
-func immediateCallerID(ctx context.Context) (string, []string) {
+func immediateCallerIDFromCert(ctx context.Context) (string, []string) {
p, ok := peer.FromContext(ctx)
if !ok {
return "", nil
@@ -94,16 +96,35 @@ func immediateCallerID(ctx context.Context) (string, []string) {
return cert.Subject.CommonName, cert.DNSNames
}
+// immediateCallerIdFromStaticAuthentication extracts the username of the current
+// static authentication context and returns that to the caller.
+func immediateCallerIdFromStaticAuthentication(ctx context.Context) (string, []string) {
+ if immediate := servenv.StaticAuthUsernameFromContext(ctx); immediate != "" {
+ return immediate, nil
+ }
+
+ return "", nil
+}
+
// withCallerIDContext creates a context that extracts what we need
// from the incoming call and can be forwarded for use when talking to vttablet.
func withCallerIDContext(ctx context.Context, effectiveCallerID *vtrpcpb.CallerID) context.Context {
- immediate, securityGroups := immediateCallerID(ctx)
+ // The client cert common name (if using mTLS)
+ immediate, securityGroups := immediateCallerIDFromCert(ctx)
+
+ // The effective caller id (if --grpc_use_effective_callerid=true)
if immediate == "" && useEffective && effectiveCallerID != nil {
immediate = effectiveCallerID.Principal
if useEffectiveGroups && len(effectiveCallerID.Groups) > 0 {
securityGroups = effectiveCallerID.Groups
}
}
+
+ // The static auth username (if --grpc-use-static-authentication-callerid=true)
+ if immediate == "" && useStaticAuthenticationIdentity {
+ immediate, securityGroups = immediateCallerIdFromStaticAuthentication(ctx)
+ }
+
if immediate == "" {
immediate = unsecureClient
}
diff --git a/go/vt/vtgate/legacy_scatter_conn_test.go b/go/vt/vtgate/legacy_scatter_conn_test.go
index dc1f4e0f53a..7cbdab3af69 100644
--- a/go/vt/vtgate/legacy_scatter_conn_test.go
+++ b/go/vt/vtgate/legacy_scatter_conn_test.go
@@ -96,7 +96,7 @@ func TestLegacyExecuteFailOnAutocommit(t *testing.T) {
},
Autocommit: false,
}
- _, errs := sc.ExecuteMultiShard(ctx, rss, queries, NewSafeSession(session), true /*autocommit*/, false)
+ _, errs := sc.ExecuteMultiShard(ctx, nil, rss, queries, NewSafeSession(session), true /*autocommit*/, false)
err := vterrors.Aggregate(errs)
require.Error(t, err)
require.Contains(t, err.Error(), "in autocommit mode, transactionID should be zero but was: 123")
@@ -120,7 +120,7 @@ func TestScatterConnExecuteMulti(t *testing.T) {
}
}
- qr, errs := sc.ExecuteMultiShard(ctx, rss, queries, NewSafeSession(nil), false /*autocommit*/, false)
+ qr, errs := sc.ExecuteMultiShard(ctx, nil, rss, queries, NewSafeSession(nil), false /*autocommit*/, false)
return qr, vterrors.Aggregate(errs)
})
}
@@ -135,7 +135,7 @@ func TestScatterConnStreamExecuteMulti(t *testing.T) {
bvs := make([]map[string]*querypb.BindVariable, len(rss))
qr := new(sqltypes.Result)
var mu sync.Mutex
- errors := sc.StreamExecuteMulti(ctx, "query", rss, bvs, NewSafeSession(&vtgatepb.Session{InTransaction: true}), true /* autocommit */, func(r *sqltypes.Result) error {
+ errors := sc.StreamExecuteMulti(ctx, nil, "query", rss, bvs, NewSafeSession(&vtgatepb.Session{InTransaction: true}), true /* autocommit */, func(r *sqltypes.Result) error {
mu.Lock()
defer mu.Unlock()
qr.AppendResult(r)
@@ -303,7 +303,7 @@ func TestMaxMemoryRows(t *testing.T) {
sbc0.SetResults([]*sqltypes.Result{tworows, tworows})
sbc1.SetResults([]*sqltypes.Result{tworows, tworows})
- _, errs := sc.ExecuteMultiShard(ctx, rss, queries, session, false, test.ignoreMaxMemoryRows)
+ _, errs := sc.ExecuteMultiShard(ctx, nil, rss, queries, session, false, test.ignoreMaxMemoryRows)
if test.ignoreMaxMemoryRows {
require.NoError(t, err)
} else {
@@ -334,7 +334,7 @@ func TestLegaceHealthCheckFailsOnReservedConnections(t *testing.T) {
})
}
- _, errs := sc.ExecuteMultiShard(ctx, rss, queries, session, false, false)
+ _, errs := sc.ExecuteMultiShard(ctx, nil, rss, queries, session, false, false)
require.Error(t, vterrors.Aggregate(errs))
}
@@ -357,7 +357,7 @@ func executeOnShardsReturnsErr(t *testing.T, res *srvtopo.Resolver, keyspace str
})
}
- _, errs := sc.ExecuteMultiShard(ctx, rss, queries, session, false, false)
+ _, errs := sc.ExecuteMultiShard(ctx, nil, rss, queries, session, false, false)
return vterrors.Aggregate(errs)
}
@@ -402,7 +402,7 @@ func TestMultiExecs(t *testing.T) {
}
session := NewSafeSession(&vtgatepb.Session{})
- _, err := sc.ExecuteMultiShard(ctx, rss, queries, session, false, false)
+ _, err := sc.ExecuteMultiShard(ctx, nil, rss, queries, session, false, false)
require.NoError(t, vterrors.Aggregate(err))
if len(sbc0.Queries) == 0 || len(sbc1.Queries) == 0 {
t.Fatalf("didn't get expected query")
@@ -446,7 +446,7 @@ func TestMultiExecs(t *testing.T) {
"bv1": sqltypes.Int64BindVariable(1),
},
}
- _ = sc.StreamExecuteMulti(ctx, "query", rss, bvs, session, false /* autocommit */, func(*sqltypes.Result) error {
+ _ = sc.StreamExecuteMulti(ctx, nil, "query", rss, bvs, session, false /* autocommit */, func(*sqltypes.Result) error {
return nil
})
if !reflect.DeepEqual(sbc0.Queries[0].BindVariables, wantVars0) {
@@ -477,27 +477,27 @@ func TestScatterConnSingleDB(t *testing.T) {
// TransactionMode_SINGLE in session
session := NewSafeSession(&vtgatepb.Session{InTransaction: true, TransactionMode: vtgatepb.TransactionMode_SINGLE})
queries := []*querypb.BoundQuery{{Sql: "query1"}}
- _, errors := sc.ExecuteMultiShard(ctx, rss0, queries, session, false, false)
+ _, errors := sc.ExecuteMultiShard(ctx, nil, rss0, queries, session, false, false)
require.Empty(t, errors)
- _, errors = sc.ExecuteMultiShard(ctx, rss1, queries, session, false, false)
+ _, errors = sc.ExecuteMultiShard(ctx, nil, rss1, queries, session, false, false)
require.Error(t, errors[0])
assert.Contains(t, errors[0].Error(), want)
// TransactionMode_SINGLE in txconn
sc.txConn.mode = vtgatepb.TransactionMode_SINGLE
session = NewSafeSession(&vtgatepb.Session{InTransaction: true})
- _, errors = sc.ExecuteMultiShard(ctx, rss0, queries, session, false, false)
+ _, errors = sc.ExecuteMultiShard(ctx, nil, rss0, queries, session, false, false)
require.Empty(t, errors)
- _, errors = sc.ExecuteMultiShard(ctx, rss1, queries, session, false, false)
+ _, errors = sc.ExecuteMultiShard(ctx, nil, rss1, queries, session, false, false)
require.Error(t, errors[0])
assert.Contains(t, errors[0].Error(), want)
// TransactionMode_MULTI in txconn. Should not fail.
sc.txConn.mode = vtgatepb.TransactionMode_MULTI
session = NewSafeSession(&vtgatepb.Session{InTransaction: true})
- _, errors = sc.ExecuteMultiShard(ctx, rss0, queries, session, false, false)
+ _, errors = sc.ExecuteMultiShard(ctx, nil, rss0, queries, session, false, false)
require.Empty(t, errors)
- _, errors = sc.ExecuteMultiShard(ctx, rss1, queries, session, false, false)
+ _, errors = sc.ExecuteMultiShard(ctx, nil, rss1, queries, session, false, false)
require.Empty(t, errors)
}
diff --git a/go/vt/vtgate/logstats/logstats.go b/go/vt/vtgate/logstats/logstats.go
index d5696e1beb8..1d598e5c5e2 100644
--- a/go/vt/vtgate/logstats/logstats.go
+++ b/go/vt/vtgate/logstats/logstats.go
@@ -17,6 +17,7 @@ limitations under the License.
package logstats
import (
+ "context"
"encoding/json"
"fmt"
"html/template"
@@ -24,8 +25,6 @@ import (
"net/url"
"time"
- "context"
-
"vitess.io/vitess/go/sqltypes"
"vitess.io/vitess/go/streamlog"
"vitess.io/vitess/go/tb"
@@ -57,10 +56,6 @@ type LogStats struct {
SessionUUID string
CachedPlan bool
ActiveKeyspace string // ActiveKeyspace is the selected keyspace `use ks`
-
- // These two fields are deprecated and will be removed in the Vitess V16 release
- Keyspace string
- Table string
}
// NewLogStats constructs a new LogStats with supplied Method and ctx
@@ -156,9 +151,9 @@ func (stats *LogStats) Logf(w io.Writer, params url.Values) error {
var fmtString string
switch streamlog.GetQueryLogFormat() {
case streamlog.QueryLogFormatText:
- fmtString = "%v\t%v\t%v\t'%v'\t'%v'\t%v\t%v\t%.6f\t%.6f\t%.6f\t%.6f\t%v\t%q\t%v\t%v\t%v\t%q\t%q\t%q\t%q\t%q\t%v\t%v\t%q\n"
+ fmtString = "%v\t%v\t%v\t'%v'\t'%v'\t%v\t%v\t%.6f\t%.6f\t%.6f\t%.6f\t%v\t%q\t%v\t%v\t%v\t%q\t%q\t%q\t%v\t%v\t%q\n"
case streamlog.QueryLogFormatJSON:
- fmtString = "{\"Method\": %q, \"RemoteAddr\": %q, \"Username\": %q, \"ImmediateCaller\": %q, \"Effective Caller\": %q, \"Start\": \"%v\", \"End\": \"%v\", \"TotalTime\": %.6f, \"PlanTime\": %v, \"ExecuteTime\": %v, \"CommitTime\": %v, \"StmtType\": %q, \"SQL\": %q, \"BindVars\": %v, \"ShardQueries\": %v, \"RowsAffected\": %v, \"Error\": %q, \"Keyspace\": %q, \"Table\": %q, \"TabletType\": %q, \"SessionUUID\": %q, \"Cached Plan\": %v, \"TablesUsed\": %v, \"ActiveKeyspace\": %q}\n"
+ fmtString = "{\"Method\": %q, \"RemoteAddr\": %q, \"Username\": %q, \"ImmediateCaller\": %q, \"Effective Caller\": %q, \"Start\": \"%v\", \"End\": \"%v\", \"TotalTime\": %.6f, \"PlanTime\": %v, \"ExecuteTime\": %v, \"CommitTime\": %v, \"StmtType\": %q, \"SQL\": %q, \"BindVars\": %v, \"ShardQueries\": %v, \"RowsAffected\": %v, \"Error\": %q, \"TabletType\": %q, \"SessionUUID\": %q, \"Cached Plan\": %v, \"TablesUsed\": %v, \"ActiveKeyspace\": %q}\n"
}
tables := stats.TablesUsed
@@ -189,8 +184,6 @@ func (stats *LogStats) Logf(w io.Writer, params url.Values) error {
stats.ShardQueries,
stats.RowsAffected,
stats.ErrorStr(),
- stats.Keyspace,
- stats.Table,
stats.TabletType,
stats.SessionUUID,
stats.CachedPlan,
diff --git a/go/vt/vtgate/logstats/logstats_test.go b/go/vt/vtgate/logstats/logstats_test.go
index 120113d1279..17b250d3ded 100644
--- a/go/vt/vtgate/logstats/logstats_test.go
+++ b/go/vt/vtgate/logstats/logstats_test.go
@@ -61,8 +61,6 @@ func TestLogStatsFormat(t *testing.T) {
logStats := NewLogStats(context.Background(), "test", "sql1", "suuid", nil)
logStats.StartTime = time.Date(2017, time.January, 1, 1, 2, 3, 0, time.UTC)
logStats.EndTime = time.Date(2017, time.January, 1, 1, 2, 4, 1234, time.UTC)
- logStats.Keyspace = "ks"
- logStats.Table = "table"
logStats.TablesUsed = []string{"ks1.tbl1", "ks2.tbl2"}
logStats.TabletType = "PRIMARY"
logStats.ActiveKeyspace = "db"
@@ -80,42 +78,42 @@ func TestLogStatsFormat(t *testing.T) {
{ // 0
redact: false,
format: "text",
- expected: "test\t\t\t''\t''\t2017-01-01 01:02:03.000000\t2017-01-01 01:02:04.000001\t1.000001\t0.000000\t0.000000\t0.000000\t\t\"sql1\"\tmap[intVal:type:INT64 value:\"1\"]\t0\t0\t\"\"\t\"ks\"\t\"table\"\t\"PRIMARY\"\t\"suuid\"\tfalse\t[\"ks1.tbl1\",\"ks2.tbl2\"]\t\"db\"\n",
+ expected: "test\t\t\t''\t''\t2017-01-01 01:02:03.000000\t2017-01-01 01:02:04.000001\t1.000001\t0.000000\t0.000000\t0.000000\t\t\"sql1\"\tmap[intVal:type:INT64 value:\"1\"]\t0\t0\t\"\"\t\"PRIMARY\"\t\"suuid\"\tfalse\t[\"ks1.tbl1\",\"ks2.tbl2\"]\t\"db\"\n",
bindVars: intBindVar,
}, { // 1
redact: true,
format: "text",
- expected: "test\t\t\t''\t''\t2017-01-01 01:02:03.000000\t2017-01-01 01:02:04.000001\t1.000001\t0.000000\t0.000000\t0.000000\t\t\"sql1\"\t\"[REDACTED]\"\t0\t0\t\"\"\t\"ks\"\t\"table\"\t\"PRIMARY\"\t\"suuid\"\tfalse\t[\"ks1.tbl1\",\"ks2.tbl2\"]\t\"db\"\n",
+ expected: "test\t\t\t''\t''\t2017-01-01 01:02:03.000000\t2017-01-01 01:02:04.000001\t1.000001\t0.000000\t0.000000\t0.000000\t\t\"sql1\"\t\"[REDACTED]\"\t0\t0\t\"\"\t\"PRIMARY\"\t\"suuid\"\tfalse\t[\"ks1.tbl1\",\"ks2.tbl2\"]\t\"db\"\n",
bindVars: intBindVar,
}, { // 2
redact: false,
format: "json",
- expected: "{\"ActiveKeyspace\":\"db\",\"BindVars\":{\"intVal\":{\"type\":\"INT64\",\"value\":1}},\"Cached Plan\":false,\"CommitTime\":0,\"Effective Caller\":\"\",\"End\":\"2017-01-01 01:02:04.000001\",\"Error\":\"\",\"ExecuteTime\":0,\"ImmediateCaller\":\"\",\"Keyspace\":\"ks\",\"Method\":\"test\",\"PlanTime\":0,\"RemoteAddr\":\"\",\"RowsAffected\":0,\"SQL\":\"sql1\",\"SessionUUID\":\"suuid\",\"ShardQueries\":0,\"Start\":\"2017-01-01 01:02:03.000000\",\"StmtType\":\"\",\"Table\":\"table\",\"TablesUsed\":[\"ks1.tbl1\",\"ks2.tbl2\"],\"TabletType\":\"PRIMARY\",\"TotalTime\":1.000001,\"Username\":\"\"}",
+ expected: "{\"ActiveKeyspace\":\"db\",\"BindVars\":{\"intVal\":{\"type\":\"INT64\",\"value\":1}},\"Cached Plan\":false,\"CommitTime\":0,\"Effective Caller\":\"\",\"End\":\"2017-01-01 01:02:04.000001\",\"Error\":\"\",\"ExecuteTime\":0,\"ImmediateCaller\":\"\",\"Method\":\"test\",\"PlanTime\":0,\"RemoteAddr\":\"\",\"RowsAffected\":0,\"SQL\":\"sql1\",\"SessionUUID\":\"suuid\",\"ShardQueries\":0,\"Start\":\"2017-01-01 01:02:03.000000\",\"StmtType\":\"\",\"TablesUsed\":[\"ks1.tbl1\",\"ks2.tbl2\"],\"TabletType\":\"PRIMARY\",\"TotalTime\":1.000001,\"Username\":\"\"}",
bindVars: intBindVar,
}, { // 3
redact: true,
format: "json",
- expected: "{\"ActiveKeyspace\":\"db\",\"BindVars\":\"[REDACTED]\",\"Cached Plan\":false,\"CommitTime\":0,\"Effective Caller\":\"\",\"End\":\"2017-01-01 01:02:04.000001\",\"Error\":\"\",\"ExecuteTime\":0,\"ImmediateCaller\":\"\",\"Keyspace\":\"ks\",\"Method\":\"test\",\"PlanTime\":0,\"RemoteAddr\":\"\",\"RowsAffected\":0,\"SQL\":\"sql1\",\"SessionUUID\":\"suuid\",\"ShardQueries\":0,\"Start\":\"2017-01-01 01:02:03.000000\",\"StmtType\":\"\",\"Table\":\"table\",\"TablesUsed\":[\"ks1.tbl1\",\"ks2.tbl2\"],\"TabletType\":\"PRIMARY\",\"TotalTime\":1.000001,\"Username\":\"\"}",
+ expected: "{\"ActiveKeyspace\":\"db\",\"BindVars\":\"[REDACTED]\",\"Cached Plan\":false,\"CommitTime\":0,\"Effective Caller\":\"\",\"End\":\"2017-01-01 01:02:04.000001\",\"Error\":\"\",\"ExecuteTime\":0,\"ImmediateCaller\":\"\",\"Method\":\"test\",\"PlanTime\":0,\"RemoteAddr\":\"\",\"RowsAffected\":0,\"SQL\":\"sql1\",\"SessionUUID\":\"suuid\",\"ShardQueries\":0,\"Start\":\"2017-01-01 01:02:03.000000\",\"StmtType\":\"\",\"TablesUsed\":[\"ks1.tbl1\",\"ks2.tbl2\"],\"TabletType\":\"PRIMARY\",\"TotalTime\":1.000001,\"Username\":\"\"}",
bindVars: intBindVar,
}, { // 4
redact: false,
format: "text",
- expected: "test\t\t\t''\t''\t2017-01-01 01:02:03.000000\t2017-01-01 01:02:04.000001\t1.000001\t0.000000\t0.000000\t0.000000\t\t\"sql1\"\tmap[strVal:type:VARCHAR value:\"abc\"]\t0\t0\t\"\"\t\"ks\"\t\"table\"\t\"PRIMARY\"\t\"suuid\"\tfalse\t[\"ks1.tbl1\",\"ks2.tbl2\"]\t\"db\"\n",
+ expected: "test\t\t\t''\t''\t2017-01-01 01:02:03.000000\t2017-01-01 01:02:04.000001\t1.000001\t0.000000\t0.000000\t0.000000\t\t\"sql1\"\tmap[strVal:type:VARCHAR value:\"abc\"]\t0\t0\t\"\"\t\"PRIMARY\"\t\"suuid\"\tfalse\t[\"ks1.tbl1\",\"ks2.tbl2\"]\t\"db\"\n",
bindVars: stringBindVar,
}, { // 5
redact: true,
format: "text",
- expected: "test\t\t\t''\t''\t2017-01-01 01:02:03.000000\t2017-01-01 01:02:04.000001\t1.000001\t0.000000\t0.000000\t0.000000\t\t\"sql1\"\t\"[REDACTED]\"\t0\t0\t\"\"\t\"ks\"\t\"table\"\t\"PRIMARY\"\t\"suuid\"\tfalse\t[\"ks1.tbl1\",\"ks2.tbl2\"]\t\"db\"\n",
+ expected: "test\t\t\t''\t''\t2017-01-01 01:02:03.000000\t2017-01-01 01:02:04.000001\t1.000001\t0.000000\t0.000000\t0.000000\t\t\"sql1\"\t\"[REDACTED]\"\t0\t0\t\"\"\t\"PRIMARY\"\t\"suuid\"\tfalse\t[\"ks1.tbl1\",\"ks2.tbl2\"]\t\"db\"\n",
bindVars: stringBindVar,
}, { // 6
redact: false,
format: "json",
- expected: "{\"ActiveKeyspace\":\"db\",\"BindVars\":{\"strVal\":{\"type\":\"VARCHAR\",\"value\":\"abc\"}},\"Cached Plan\":false,\"CommitTime\":0,\"Effective Caller\":\"\",\"End\":\"2017-01-01 01:02:04.000001\",\"Error\":\"\",\"ExecuteTime\":0,\"ImmediateCaller\":\"\",\"Keyspace\":\"ks\",\"Method\":\"test\",\"PlanTime\":0,\"RemoteAddr\":\"\",\"RowsAffected\":0,\"SQL\":\"sql1\",\"SessionUUID\":\"suuid\",\"ShardQueries\":0,\"Start\":\"2017-01-01 01:02:03.000000\",\"StmtType\":\"\",\"Table\":\"table\",\"TablesUsed\":[\"ks1.tbl1\",\"ks2.tbl2\"],\"TabletType\":\"PRIMARY\",\"TotalTime\":1.000001,\"Username\":\"\"}",
+ expected: "{\"ActiveKeyspace\":\"db\",\"BindVars\":{\"strVal\":{\"type\":\"VARCHAR\",\"value\":\"abc\"}},\"Cached Plan\":false,\"CommitTime\":0,\"Effective Caller\":\"\",\"End\":\"2017-01-01 01:02:04.000001\",\"Error\":\"\",\"ExecuteTime\":0,\"ImmediateCaller\":\"\",\"Method\":\"test\",\"PlanTime\":0,\"RemoteAddr\":\"\",\"RowsAffected\":0,\"SQL\":\"sql1\",\"SessionUUID\":\"suuid\",\"ShardQueries\":0,\"Start\":\"2017-01-01 01:02:03.000000\",\"StmtType\":\"\",\"TablesUsed\":[\"ks1.tbl1\",\"ks2.tbl2\"],\"TabletType\":\"PRIMARY\",\"TotalTime\":1.000001,\"Username\":\"\"}",
bindVars: stringBindVar,
}, { // 7
redact: true,
format: "json",
- expected: "{\"ActiveKeyspace\":\"db\",\"BindVars\":\"[REDACTED]\",\"Cached Plan\":false,\"CommitTime\":0,\"Effective Caller\":\"\",\"End\":\"2017-01-01 01:02:04.000001\",\"Error\":\"\",\"ExecuteTime\":0,\"ImmediateCaller\":\"\",\"Keyspace\":\"ks\",\"Method\":\"test\",\"PlanTime\":0,\"RemoteAddr\":\"\",\"RowsAffected\":0,\"SQL\":\"sql1\",\"SessionUUID\":\"suuid\",\"ShardQueries\":0,\"Start\":\"2017-01-01 01:02:03.000000\",\"StmtType\":\"\",\"Table\":\"table\",\"TablesUsed\":[\"ks1.tbl1\",\"ks2.tbl2\"],\"TabletType\":\"PRIMARY\",\"TotalTime\":1.000001,\"Username\":\"\"}",
+ expected: "{\"ActiveKeyspace\":\"db\",\"BindVars\":\"[REDACTED]\",\"Cached Plan\":false,\"CommitTime\":0,\"Effective Caller\":\"\",\"End\":\"2017-01-01 01:02:04.000001\",\"Error\":\"\",\"ExecuteTime\":0,\"ImmediateCaller\":\"\",\"Method\":\"test\",\"PlanTime\":0,\"RemoteAddr\":\"\",\"RowsAffected\":0,\"SQL\":\"sql1\",\"SessionUUID\":\"suuid\",\"ShardQueries\":0,\"Start\":\"2017-01-01 01:02:03.000000\",\"StmtType\":\"\",\"TablesUsed\":[\"ks1.tbl1\",\"ks2.tbl2\"],\"TabletType\":\"PRIMARY\",\"TotalTime\":1.000001,\"Username\":\"\"}",
bindVars: stringBindVar,
},
}
@@ -158,12 +156,12 @@ func TestLogStatsFilter(t *testing.T) {
params := map[string][]string{"full": {}}
got := testFormat(t, logStats, params)
- want := "test\t\t\t''\t''\t2017-01-01 01:02:03.000000\t2017-01-01 01:02:04.000001\t1.000001\t0.000000\t0.000000\t0.000000\t\t\"sql1 /* LOG_THIS_QUERY */\"\tmap[intVal:type:INT64 value:\"1\"]\t0\t0\t\"\"\t\"\"\t\"\"\t\"\"\t\"\"\tfalse\t[]\t\"\"\n"
+ want := "test\t\t\t''\t''\t2017-01-01 01:02:03.000000\t2017-01-01 01:02:04.000001\t1.000001\t0.000000\t0.000000\t0.000000\t\t\"sql1 /* LOG_THIS_QUERY */\"\tmap[intVal:type:INT64 value:\"1\"]\t0\t0\t\"\"\t\"\"\t\"\"\tfalse\t[]\t\"\"\n"
assert.Equal(t, want, got)
streamlog.SetQueryLogFilterTag("LOG_THIS_QUERY")
got = testFormat(t, logStats, params)
- want = "test\t\t\t''\t''\t2017-01-01 01:02:03.000000\t2017-01-01 01:02:04.000001\t1.000001\t0.000000\t0.000000\t0.000000\t\t\"sql1 /* LOG_THIS_QUERY */\"\tmap[intVal:type:INT64 value:\"1\"]\t0\t0\t\"\"\t\"\"\t\"\"\t\"\"\t\"\"\tfalse\t[]\t\"\"\n"
+ want = "test\t\t\t''\t''\t2017-01-01 01:02:03.000000\t2017-01-01 01:02:04.000001\t1.000001\t0.000000\t0.000000\t0.000000\t\t\"sql1 /* LOG_THIS_QUERY */\"\tmap[intVal:type:INT64 value:\"1\"]\t0\t0\t\"\"\t\"\"\t\"\"\tfalse\t[]\t\"\"\n"
assert.Equal(t, want, got)
streamlog.SetQueryLogFilterTag("NOT_THIS_QUERY")
@@ -181,12 +179,12 @@ func TestLogStatsRowThreshold(t *testing.T) {
params := map[string][]string{"full": {}}
got := testFormat(t, logStats, params)
- want := "test\t\t\t''\t''\t2017-01-01 01:02:03.000000\t2017-01-01 01:02:04.000001\t1.000001\t0.000000\t0.000000\t0.000000\t\t\"sql1 /* LOG_THIS_QUERY */\"\tmap[intVal:type:INT64 value:\"1\"]\t0\t0\t\"\"\t\"\"\t\"\"\t\"\"\t\"\"\tfalse\t[]\t\"\"\n"
+ want := "test\t\t\t''\t''\t2017-01-01 01:02:03.000000\t2017-01-01 01:02:04.000001\t1.000001\t0.000000\t0.000000\t0.000000\t\t\"sql1 /* LOG_THIS_QUERY */\"\tmap[intVal:type:INT64 value:\"1\"]\t0\t0\t\"\"\t\"\"\t\"\"\tfalse\t[]\t\"\"\n"
assert.Equal(t, want, got)
streamlog.SetQueryLogRowThreshold(0)
got = testFormat(t, logStats, params)
- want = "test\t\t\t''\t''\t2017-01-01 01:02:03.000000\t2017-01-01 01:02:04.000001\t1.000001\t0.000000\t0.000000\t0.000000\t\t\"sql1 /* LOG_THIS_QUERY */\"\tmap[intVal:type:INT64 value:\"1\"]\t0\t0\t\"\"\t\"\"\t\"\"\t\"\"\t\"\"\tfalse\t[]\t\"\"\n"
+ want = "test\t\t\t''\t''\t2017-01-01 01:02:03.000000\t2017-01-01 01:02:04.000001\t1.000001\t0.000000\t0.000000\t0.000000\t\t\"sql1 /* LOG_THIS_QUERY */\"\tmap[intVal:type:INT64 value:\"1\"]\t0\t0\t\"\"\t\"\"\t\"\"\tfalse\t[]\t\"\"\n"
assert.Equal(t, want, got)
streamlog.SetQueryLogRowThreshold(1)
got = testFormat(t, logStats, params)
diff --git a/go/vt/vtgate/mysql_protocol_test.go b/go/vt/vtgate/mysql_protocol_test.go
index 789a82857c8..dcd12de4bb4 100644
--- a/go/vt/vtgate/mysql_protocol_test.go
+++ b/go/vt/vtgate/mysql_protocol_test.go
@@ -117,7 +117,7 @@ func TestMySQLProtocolExecuteUseStatement(t *testing.T) {
// No such keyspace this will fail
_, err = c.ExecuteFetch("use InvalidKeyspace", 0, false)
require.Error(t, err)
- assert.Contains(t, err.Error(), "unknown database 'InvalidKeyspace' (errno 1049) (sqlstate 42000)")
+ assert.Contains(t, err.Error(), "VT05003: unknown database 'InvalidKeyspace' in vschema (errno 1049) (sqlstate 42000)")
// That doesn't reset the vitess_target
qr, err = c.ExecuteFetch("show vitess_target", 1, false)
@@ -135,7 +135,7 @@ func TestMySQLProtocolExecuteUseStatement(t *testing.T) {
func TestMysqlProtocolInvalidDB(t *testing.T) {
_, err := mysqlConnect(&mysql.ConnParams{DbName: "invalidDB"})
- require.EqualError(t, err, "unknown database 'invalidDB' (errno 1049) (sqlstate 42000)")
+ require.EqualError(t, err, "VT05003: unknown database 'invalidDB' in vschema (errno 1049) (sqlstate 42000)")
}
func TestMySQLProtocolClientFoundRows(t *testing.T) {
diff --git a/go/vt/vtgate/plan_execute.go b/go/vt/vtgate/plan_execute.go
index 4643cea6b29..91a451ece4d 100644
--- a/go/vt/vtgate/plan_execute.go
+++ b/go/vt/vtgate/plan_execute.go
@@ -61,7 +61,7 @@ func (e *Executor) newExecute(
}
// 2: Create a plan for the query
- plan, err := e.getPlan(ctx, vcursor, query, comments, bindVars, safeSession, logStats)
+ plan, stmt, err := e.getPlan(ctx, vcursor, query, comments, bindVars, safeSession, logStats)
execStart := e.logPlanningFinished(logStats, plan)
if err != nil {
@@ -78,7 +78,7 @@ func (e *Executor) newExecute(
safeSession.RecordWarning(warning)
}
- result, err := e.handleTransactions(ctx, safeSession, plan, logStats, vcursor)
+ result, err := e.handleTransactions(ctx, safeSession, plan, logStats, vcursor, stmt)
if err != nil {
return err
}
@@ -104,12 +104,19 @@ func (e *Executor) newExecute(
}
// handleTransactions deals with transactional queries: begin, commit, rollback and savepoint management
-func (e *Executor) handleTransactions(ctx context.Context, safeSession *SafeSession, plan *engine.Plan, logStats *logstats.LogStats, vcursor *vcursorImpl) (*sqltypes.Result, error) {
+func (e *Executor) handleTransactions(
+ ctx context.Context,
+ safeSession *SafeSession,
+ plan *engine.Plan,
+ logStats *logstats.LogStats,
+ vcursor *vcursorImpl,
+ stmt sqlparser.Statement,
+) (*sqltypes.Result, error) {
// We need to explicitly handle errors, and begin/commit/rollback, since these control transactions. Everything else
// will fall through and be handled through planning
switch plan.Type {
case sqlparser.StmtBegin:
- qr, err := e.handleBegin(ctx, safeSession, logStats)
+ qr, err := e.handleBegin(ctx, safeSession, logStats, stmt)
return qr, err
case sqlparser.StmtCommit:
qr, err := e.handleCommit(ctx, safeSession, logStats)
@@ -141,7 +148,7 @@ func (e *Executor) handleTransactions(ctx context.Context, safeSession *SafeSess
func (e *Executor) startTxIfNecessary(ctx context.Context, safeSession *SafeSession) error {
if !safeSession.Autocommit && !safeSession.InTransaction() {
- if err := e.txConn.Begin(ctx, safeSession); err != nil {
+ if err := e.txConn.Begin(ctx, safeSession, nil); err != nil {
return err
}
}
@@ -152,7 +159,7 @@ func (e *Executor) insideTransaction(ctx context.Context, safeSession *SafeSessi
mustCommit := false
if safeSession.Autocommit && !safeSession.InTransaction() {
mustCommit = true
- if err := e.txConn.Begin(ctx, safeSession); err != nil {
+ if err := e.txConn.Begin(ctx, safeSession, nil); err != nil {
return err
}
// The defer acts as a failsafe. If commit was successful,
@@ -250,9 +257,7 @@ func (e *Executor) rollbackPartialExec(ctx context.Context, safeSession *SafeSes
func (e *Executor) setLogStats(logStats *logstats.LogStats, plan *engine.Plan, vcursor *vcursorImpl, execStart time.Time, err error, qr *sqltypes.Result) {
logStats.StmtType = plan.Type.String()
- logStats.Keyspace = plan.Instructions.GetKeyspaceName()
logStats.ActiveKeyspace = vcursor.keyspace
- logStats.Table = plan.Instructions.GetTableName()
logStats.TablesUsed = plan.TablesUsed
logStats.TabletType = vcursor.TabletType().String()
errCount := e.logExecutionEnd(logStats, execStart, plan, err, qr)
diff --git a/go/vt/vtgate/planbuilder/abstract/concatenate.go b/go/vt/vtgate/planbuilder/abstract/concatenate.go
deleted file mode 100644
index d75e739906e..00000000000
--- a/go/vt/vtgate/planbuilder/abstract/concatenate.go
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
-Copyright 2021 The Vitess Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package abstract
-
-import (
- vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
- "vitess.io/vitess/go/vt/sqlparser"
- "vitess.io/vitess/go/vt/vterrors"
- "vitess.io/vitess/go/vt/vtgate/semantics"
-)
-
-// Concatenate represents a UNION ALL/DISTINCT.
-type Concatenate struct {
- Distinct bool
- SelectStmts []*sqlparser.Select
- Sources []LogicalOperator
- OrderBy sqlparser.OrderBy
- Limit *sqlparser.Limit
-}
-
-var _ LogicalOperator = (*Concatenate)(nil)
-
-func (*Concatenate) iLogical() {}
-
-// TableID implements the Operator interface
-func (c *Concatenate) TableID() semantics.TableSet {
- var tableSet semantics.TableSet
- for _, source := range c.Sources {
- tableSet.MergeInPlace(source.TableID())
- }
- return tableSet
-}
-
-// PushPredicate implements the Operator interface
-func (c *Concatenate) PushPredicate(expr sqlparser.Expr, semTable *semantics.SemTable) (LogicalOperator, error) {
- newSources := make([]LogicalOperator, 0, len(c.Sources))
- for index, source := range c.Sources {
- if len(c.SelectStmts[index].SelectExprs) != 1 {
- return nil, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "can't push predicates on concatenate")
- }
- if _, isStarExpr := c.SelectStmts[index].SelectExprs[0].(*sqlparser.StarExpr); !isStarExpr {
- return nil, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "can't push predicates on concatenate")
- }
-
- newSrc, err := source.PushPredicate(expr, semTable)
- if err != nil {
- return nil, err
- }
- newSources = append(newSources, newSrc)
- }
- c.Sources = newSources
- return c, nil
-}
-
-// UnsolvedPredicates implements the Operator interface
-func (c *Concatenate) UnsolvedPredicates(*semantics.SemTable) []sqlparser.Expr {
- return nil
-}
-
-// CheckValid implements the Operator interface
-func (c *Concatenate) CheckValid() error {
- for _, source := range c.Sources {
- err := source.CheckValid()
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-// Compact implements the Operator interface
-func (c *Concatenate) Compact(*semantics.SemTable) (LogicalOperator, error) {
- var newSources []LogicalOperator
- var newSels []*sqlparser.Select
- for i, source := range c.Sources {
- other, isConcat := source.(*Concatenate)
- if !isConcat {
- newSources = append(newSources, source)
- newSels = append(newSels, c.SelectStmts[i])
- continue
- }
- switch {
- case other.Limit == nil && len(other.OrderBy) == 0 && !other.Distinct:
- fallthrough
- case c.Distinct && other.Limit == nil:
- // if the current UNION is a DISTINCT, we can safely ignore everything from children UNIONs, except LIMIT
- newSources = append(newSources, other.Sources...)
- newSels = append(newSels, other.SelectStmts...)
-
- default:
- newSources = append(newSources, other)
- newSels = append(newSels, nil)
- }
- }
- c.Sources = newSources
- c.SelectStmts = newSels
- return c, nil
-}
diff --git a/go/vt/vtgate/planbuilder/abstract/delete.go b/go/vt/vtgate/planbuilder/abstract/delete.go
deleted file mode 100644
index 8aa9445d643..00000000000
--- a/go/vt/vtgate/planbuilder/abstract/delete.go
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
-Copyright 2022 The Vitess Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package abstract
-
-import (
- vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
- "vitess.io/vitess/go/vt/sqlparser"
- "vitess.io/vitess/go/vt/vterrors"
- "vitess.io/vitess/go/vt/vtgate/semantics"
-)
-
-type Delete struct {
- Table *QueryTable
- TableInfo semantics.TableInfo
- AST *sqlparser.Delete
-}
-
-var _ LogicalOperator = (*Delete)(nil)
-
-// TableID implements the LogicalOperator interface
-func (d *Delete) TableID() semantics.TableSet {
- return d.Table.ID
-}
-
-// UnsolvedPredicates implements the LogicalOperator interface
-func (d *Delete) UnsolvedPredicates(semTable *semantics.SemTable) []sqlparser.Expr {
- return nil
-}
-
-// CheckValid implements the LogicalOperator interface
-func (d *Delete) CheckValid() error {
- return nil
-}
-
-// iLogical implements the LogicalOperator interface
-func (d *Delete) iLogical() {}
-
-// PushPredicate implements the LogicalOperator interface
-func (d *Delete) PushPredicate(expr sqlparser.Expr, semTable *semantics.SemTable) (LogicalOperator, error) {
- return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "can't accept predicates")
-}
-
-// Compact implements the LogicalOperator interface
-func (d *Delete) Compact(semTable *semantics.SemTable) (LogicalOperator, error) {
- return d, nil
-}
diff --git a/go/vt/vtgate/planbuilder/abstract/derived.go b/go/vt/vtgate/planbuilder/abstract/derived.go
deleted file mode 100644
index 508d576f37d..00000000000
--- a/go/vt/vtgate/planbuilder/abstract/derived.go
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
-Copyright 2021 The Vitess Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package abstract
-
-import (
- vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
- "vitess.io/vitess/go/vt/sqlparser"
- "vitess.io/vitess/go/vt/vterrors"
- "vitess.io/vitess/go/vt/vtgate/semantics"
-)
-
-// Derived represents a derived table in the query
-type Derived struct {
- Sel sqlparser.SelectStatement
- Inner LogicalOperator
- Alias string
- ColumnAliases sqlparser.Columns
-}
-
-var _ LogicalOperator = (*Derived)(nil)
-
-func (*Derived) iLogical() {}
-
-// TableID implements the Operator interface
-func (d *Derived) TableID() semantics.TableSet {
- return d.Inner.TableID()
-}
-
-// PushPredicate implements the Operator interface
-func (d *Derived) PushPredicate(expr sqlparser.Expr, semTable *semantics.SemTable) (LogicalOperator, error) {
- tableInfo, err := semTable.TableInfoForExpr(expr)
- if err != nil {
- if err == semantics.ErrMultipleTables {
- return nil, semantics.ProjError{Inner: vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "unsupported: unable to split predicates to derived table: %s", sqlparser.String(expr))}
- }
- return nil, err
- }
-
- newExpr, err := semantics.RewriteDerivedTableExpression(expr, tableInfo)
- if err != nil {
- return nil, err
- }
- newSrc, err := d.Inner.PushPredicate(newExpr, semTable)
- d.Inner = newSrc
- return d, err
-}
-
-// UnsolvedPredicates implements the Operator interface
-func (d *Derived) UnsolvedPredicates(semTable *semantics.SemTable) []sqlparser.Expr {
- return d.Inner.UnsolvedPredicates(semTable)
-}
-
-// CheckValid implements the Operator interface
-func (d *Derived) CheckValid() error {
- return d.Inner.CheckValid()
-}
-
-// Compact implements the Operator interface
-func (d *Derived) Compact(*semantics.SemTable) (LogicalOperator, error) {
- return d, nil
-}
diff --git a/go/vt/vtgate/planbuilder/abstract/filter.go b/go/vt/vtgate/planbuilder/abstract/filter.go
deleted file mode 100644
index 46b564e7b9f..00000000000
--- a/go/vt/vtgate/planbuilder/abstract/filter.go
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
-Copyright 2021 The Vitess Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package abstract
-
-import (
- "vitess.io/vitess/go/vt/sqlparser"
- "vitess.io/vitess/go/vt/vtgate/semantics"
-)
-
-type Filter struct {
- Source LogicalOperator
- Predicates []sqlparser.Expr
-}
-
-var _ LogicalOperator = (*Filter)(nil)
-
-// iLogical implements the LogicalOperator interface
-func (f *Filter) iLogical() {}
-
-// TableID implements the LogicalOperator interface
-func (f *Filter) TableID() semantics.TableSet {
- return f.Source.TableID()
-}
-
-// UnsolvedPredicates implements the LogicalOperator interface
-func (f *Filter) UnsolvedPredicates(semTable *semantics.SemTable) []sqlparser.Expr {
- return f.Source.UnsolvedPredicates(semTable)
-}
-
-// CheckValid implements the LogicalOperator interface
-func (f *Filter) CheckValid() error {
- return f.Source.CheckValid()
-}
-
-// PushPredicate implements the LogicalOperator interface
-func (f *Filter) PushPredicate(expr sqlparser.Expr, semTable *semantics.SemTable) (LogicalOperator, error) {
- op, err := f.Source.PushPredicate(expr, semTable)
- if err != nil {
- return nil, err
- }
-
- if filter, isFilter := op.(*Filter); isFilter {
- filter.Predicates = append(f.Predicates, filter.Predicates...)
- return filter, err
- }
-
- return &Filter{
- Source: op,
- Predicates: f.Predicates,
- }, nil
-}
-
-// Compact implements the LogicalOperator interface
-func (f *Filter) Compact(semTable *semantics.SemTable) (LogicalOperator, error) {
- if len(f.Predicates) == 0 {
- return f.Source, nil
- }
-
- return f, nil
-}
diff --git a/go/vt/vtgate/planbuilder/abstract/join.go b/go/vt/vtgate/planbuilder/abstract/join.go
deleted file mode 100644
index b15c0b780dc..00000000000
--- a/go/vt/vtgate/planbuilder/abstract/join.go
+++ /dev/null
@@ -1,180 +0,0 @@
-/*
-Copyright 2021 The Vitess Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package abstract
-
-import (
- vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
- "vitess.io/vitess/go/vt/sqlparser"
- "vitess.io/vitess/go/vt/vterrors"
- "vitess.io/vitess/go/vt/vtgate/semantics"
-)
-
-// Join represents a join. If we have a predicate, this is an inner join. If no predicate exists, it is a cross join
-type Join struct {
- LHS, RHS LogicalOperator
- Predicate sqlparser.Expr
- LeftJoin bool
-}
-
-var _ LogicalOperator = (*Join)(nil)
-
-// iLogical implements the LogicalOperator interface
-func (*Join) iLogical() {}
-
-// PushPredicate implements the Operator interface
-func (j *Join) PushPredicate(expr sqlparser.Expr, semTable *semantics.SemTable) (LogicalOperator, error) {
- deps := semTable.RecursiveDeps(expr)
- switch {
- case deps.IsSolvedBy(j.LHS.TableID()):
- lhs, err := j.LHS.PushPredicate(expr, semTable)
- if err != nil {
- return nil, err
- }
- j.LHS = lhs
- return j, nil
-
- case deps.IsSolvedBy(j.RHS.TableID()):
- j.tryConvertToInnerJoin(expr, semTable)
-
- if !j.LeftJoin {
- rhs, err := j.RHS.PushPredicate(expr, semTable)
- if err != nil {
- return nil, err
- }
- j.RHS = rhs
- return j, err
- }
-
- op := &Filter{
- Source: j,
- Predicates: []sqlparser.Expr{expr},
- }
- return op, nil
-
- case deps.IsSolvedBy(j.LHS.TableID().Merge(j.RHS.TableID())):
- j.tryConvertToInnerJoin(expr, semTable)
-
- if !j.LeftJoin {
- j.Predicate = sqlparser.AndExpressions(j.Predicate, expr)
- return j, nil
- }
-
- op := &Filter{
- Source: j,
- Predicates: []sqlparser.Expr{expr},
- }
- return op, nil
- }
-
- return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "Cannot push predicate: %s", sqlparser.String(expr))
-}
-
-// When a predicate uses information from an outer table, we can convert from an outer join to an inner join
-// if the predicate is "null-intolerant".
-//
-// Null-intolerant in this context means that the predicate will not be true if the table columns are null.
-//
-// Since an outer join is an inner join with the addition of all the rows from the left-hand side that
-// matched no rows on the right-hand, if we are later going to remove all the rows where the right-hand
-// side did not match, we might as well turn the join into an inner join.
-//
-// This is based on the paper "Canonical Abstraction for Outerjoin Optimization" by J Rao et al
-func (j *Join) tryConvertToInnerJoin(expr sqlparser.Expr, semTable *semantics.SemTable) {
- if !j.LeftJoin {
- return
- }
-
- switch expr := expr.(type) {
- case *sqlparser.ComparisonExpr:
- if expr.Operator == sqlparser.NullSafeEqualOp {
- return
- }
-
- if sqlparser.IsColName(expr.Left) && semTable.RecursiveDeps(expr.Left).IsSolvedBy(j.RHS.TableID()) ||
- sqlparser.IsColName(expr.Right) && semTable.RecursiveDeps(expr.Right).IsSolvedBy(j.RHS.TableID()) {
- j.LeftJoin = false
- }
-
- case *sqlparser.IsExpr:
- if expr.Right != sqlparser.IsNotNullOp {
- return
- }
-
- if sqlparser.IsColName(expr.Left) && semTable.RecursiveDeps(expr.Left).IsSolvedBy(j.RHS.TableID()) {
- j.LeftJoin = false
- }
- }
-}
-
-// TableID implements the Operator interface
-func (j *Join) TableID() semantics.TableSet {
- return j.RHS.TableID().Merge(j.LHS.TableID())
-}
-
-// UnsolvedPredicates implements the Operator interface
-func (j *Join) UnsolvedPredicates(semTable *semantics.SemTable) []sqlparser.Expr {
- ts := j.TableID()
- var result []sqlparser.Expr
- for _, expr := range j.LHS.UnsolvedPredicates(semTable) {
- deps := semTable.DirectDeps(expr)
- if !deps.IsSolvedBy(ts) {
- result = append(result, expr)
- }
- }
- for _, expr := range j.RHS.UnsolvedPredicates(semTable) {
- deps := semTable.DirectDeps(expr)
- if !deps.IsSolvedBy(ts) {
- result = append(result, expr)
- }
- }
- return result
-}
-
-// CheckValid implements the Operator interface
-func (j *Join) CheckValid() error {
- err := j.LHS.CheckValid()
- if err != nil {
- return err
- }
-
- return j.RHS.CheckValid()
-}
-
-// Compact implements the Operator interface
-func (j *Join) Compact(semTable *semantics.SemTable) (LogicalOperator, error) {
- if j.LeftJoin {
- // we can't merge outer joins into a single QG
- return j, nil
- }
-
- lqg, lok := j.LHS.(*QueryGraph)
- rqg, rok := j.RHS.(*QueryGraph)
- if !lok || !rok {
- return j, nil
- }
-
- op := &QueryGraph{
- Tables: append(lqg.Tables, rqg.Tables...),
- innerJoins: append(lqg.innerJoins, rqg.innerJoins...),
- NoDeps: sqlparser.AndExpressions(lqg.NoDeps, rqg.NoDeps),
- }
- err := op.collectPredicate(j.Predicate, semTable)
- if err != nil {
- return nil, err
- }
- return op, nil
-}
diff --git a/go/vt/vtgate/planbuilder/abstract/operator.go b/go/vt/vtgate/planbuilder/abstract/operator.go
deleted file mode 100644
index 8d36dc1c2d2..00000000000
--- a/go/vt/vtgate/planbuilder/abstract/operator.go
+++ /dev/null
@@ -1,370 +0,0 @@
-/*
-Copyright 2021 The Vitess Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package abstract
-
-import (
- vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
- "vitess.io/vitess/go/vt/sqlparser"
- "vitess.io/vitess/go/vt/vterrors"
- "vitess.io/vitess/go/vt/vtgate/semantics"
- "vitess.io/vitess/go/vt/vtgate/vindexes"
-)
-
-type (
- // Operator forms the tree of operators, representing the declarative query provided.
- Operator interface {
- // TableID returns a TableSet of the tables contained within
- TableID() semantics.TableSet
-
- // UnsolvedPredicates returns any predicates that have dependencies on the given Operator and
- // on the outside of it (a parent Select expression, any other table not used by Operator, etc).
- UnsolvedPredicates(semTable *semantics.SemTable) []sqlparser.Expr
-
- // CheckValid checks if we have a valid operator tree, and returns an error if something is wrong
- CheckValid() error
- }
-
- LogicalOperator interface {
- Operator
- iLogical()
-
- // PushPredicate pushes a predicate to the closest possible operator
- PushPredicate(expr sqlparser.Expr, semTable *semantics.SemTable) (LogicalOperator, error)
-
- // Compact will optimise the operator tree into a smaller but equivalent version
- Compact(semTable *semantics.SemTable) (LogicalOperator, error)
- }
-
- PhysicalOperator interface {
- Operator
- IPhysical()
- // Cost is simply the number of routes in the operator tree
- Cost() int
- // Clone creates a copy of the operator that can be updated without changing the original
- Clone() PhysicalOperator
- }
-
- // IntroducesTable is used to make it possible to gather information about the table an operator introduces
- IntroducesTable interface {
- GetQTable() *QueryTable
- GetVTable() *vindexes.Table
- }
-)
-
-func getOperatorFromTableExpr(tableExpr sqlparser.TableExpr, semTable *semantics.SemTable) (LogicalOperator, error) {
- switch tableExpr := tableExpr.(type) {
- case *sqlparser.AliasedTableExpr:
- switch tbl := tableExpr.Expr.(type) {
- case sqlparser.TableName:
- tableID := semTable.TableSetFor(tableExpr)
- tableInfo, err := semTable.TableInfoFor(tableID)
- if err != nil {
- return nil, err
- }
-
- if vt, isVindex := tableInfo.(*semantics.VindexTable); isVindex {
- return &Vindex{Table: VindexTable{
- TableID: tableID,
- Alias: tableExpr,
- Table: tbl,
- VTable: vt.Table.GetVindexTable(),
- }, Vindex: vt.Vindex}, nil
- }
- qg := newQueryGraph()
- isInfSchema := tableInfo.IsInfSchema()
- qt := &QueryTable{Alias: tableExpr, Table: tbl, ID: tableID, IsInfSchema: isInfSchema}
- qg.Tables = append(qg.Tables, qt)
- return qg, nil
- case *sqlparser.DerivedTable:
- inner, err := CreateLogicalOperatorFromAST(tbl.Select, semTable)
- if err != nil {
- return nil, err
- }
- return &Derived{Alias: tableExpr.As.String(), Inner: inner, Sel: tbl.Select, ColumnAliases: tableExpr.Columns}, nil
- default:
- return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unable to use: %T", tbl)
- }
- case *sqlparser.JoinTableExpr:
- switch tableExpr.Join {
- case sqlparser.NormalJoinType:
- lhs, err := getOperatorFromTableExpr(tableExpr.LeftExpr, semTable)
- if err != nil {
- return nil, err
- }
- rhs, err := getOperatorFromTableExpr(tableExpr.RightExpr, semTable)
- if err != nil {
- return nil, err
- }
- op := createJoin(lhs, rhs)
- if tableExpr.Condition.On != nil {
- op, err = op.PushPredicate(sqlparser.RemoveKeyspaceFromColName(tableExpr.Condition.On), semTable)
- if err != nil {
- return nil, err
- }
- }
- return op, nil
- case sqlparser.LeftJoinType, sqlparser.RightJoinType:
- lhs, err := getOperatorFromTableExpr(tableExpr.LeftExpr, semTable)
- if err != nil {
- return nil, err
- }
- rhs, err := getOperatorFromTableExpr(tableExpr.RightExpr, semTable)
- if err != nil {
- return nil, err
- }
- if tableExpr.Join == sqlparser.RightJoinType {
- lhs, rhs = rhs, lhs
- }
- return &Join{LHS: lhs, RHS: rhs, LeftJoin: true, Predicate: sqlparser.RemoveKeyspaceFromColName(tableExpr.Condition.On)}, nil
- default:
- return nil, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "unsupported: %s", tableExpr.Join.ToString())
- }
- case *sqlparser.ParenTableExpr:
- return crossJoin(tableExpr.Exprs, semTable)
- default:
- return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unable to use: %T table type", tableExpr)
- }
-}
-
-func crossJoin(exprs sqlparser.TableExprs, semTable *semantics.SemTable) (LogicalOperator, error) {
- var output LogicalOperator
- for _, tableExpr := range exprs {
- op, err := getOperatorFromTableExpr(tableExpr, semTable)
- if err != nil {
- return nil, err
- }
- if output == nil {
- output = op
- } else {
- output = createJoin(output, op)
- }
- }
- return output, nil
-}
-
-func getSelect(s sqlparser.SelectStatement) *sqlparser.Select {
- switch s := s.(type) {
- case *sqlparser.Select:
- return s
- default:
- return nil
- }
-}
-
-// CreateLogicalOperatorFromAST creates an operator tree that represents the input SELECT or UNION query
-func CreateLogicalOperatorFromAST(selStmt sqlparser.Statement, semTable *semantics.SemTable) (op LogicalOperator, err error) {
- switch node := selStmt.(type) {
- case *sqlparser.Select:
- op, err = createOperatorFromSelect(node, semTable)
- case *sqlparser.Union:
- op, err = createOperatorFromUnion(node, semTable)
- case *sqlparser.Update:
- op, err = createOperatorFromUpdate(node, semTable)
- case *sqlparser.Delete:
- op, err = createOperatorFromDelete(node, semTable)
- default:
- err = vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "%T: operator not yet supported", selStmt)
- }
- if err != nil {
- return nil, err
- }
- return op.Compact(semTable)
-}
-
-func createOperatorFromUnion(node *sqlparser.Union, semTable *semantics.SemTable) (LogicalOperator, error) {
- opLHS, err := CreateLogicalOperatorFromAST(node.Left, semTable)
- if err != nil {
- return nil, err
- }
-
- _, isRHSUnion := node.Right.(*sqlparser.Union)
- if isRHSUnion {
- return nil, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "nesting of unions at the right-hand side is not yet supported")
- }
- opRHS, err := CreateLogicalOperatorFromAST(node.Right, semTable)
- if err != nil {
- return nil, err
- }
- return &Concatenate{
- Distinct: node.Distinct,
- SelectStmts: []*sqlparser.Select{getSelect(node.Left), getSelect(node.Right)},
- Sources: []LogicalOperator{opLHS, opRHS},
- OrderBy: node.OrderBy,
- Limit: node.Limit,
- }, nil
-}
-
-// createOperatorFromSelect creates an operator tree that represents the input SELECT query
-func createOperatorFromSelect(sel *sqlparser.Select, semTable *semantics.SemTable) (LogicalOperator, error) {
- subq, err := createSubqueryFromStatement(sel, semTable)
- if err != nil {
- return nil, err
- }
- op, err := crossJoin(sel.From, semTable)
- if err != nil {
- return nil, err
- }
- if sel.Where != nil {
- exprs := sqlparser.SplitAndExpression(nil, sel.Where.Expr)
- for _, expr := range exprs {
- op, err = op.PushPredicate(sqlparser.RemoveKeyspaceFromColName(expr), semTable)
- if err != nil {
- return nil, err
- }
- addColumnEquality(semTable, expr)
- }
- }
- if subq == nil {
- return op, nil
- }
- subq.Outer = op
- return subq, nil
-}
-
-func createOperatorFromUpdate(updStmt *sqlparser.Update, semTable *semantics.SemTable) (LogicalOperator, error) {
- tableInfo, qt, err := createQueryTableForDML(updStmt.TableExprs[0], semTable, updStmt.Where)
- if err != nil {
- return nil, err
- }
-
- assignments := make(map[string]sqlparser.Expr)
- for _, set := range updStmt.Exprs {
- assignments[set.Name.Name.String()] = set.Expr
- }
-
- u := &Update{
- Table: qt,
- Assignments: assignments,
- AST: updStmt,
- TableInfo: tableInfo,
- }
-
- subq, err := createSubqueryFromStatement(updStmt, semTable)
- if err != nil {
- return nil, err
- }
- if subq == nil {
- return u, nil
- }
- subq.Outer = u
- return subq, nil
-}
-
-func createQueryTableForDML(tableExpr sqlparser.TableExpr, semTable *semantics.SemTable, whereClause *sqlparser.Where) (semantics.TableInfo, *QueryTable, error) {
- alTbl, ok := tableExpr.(*sqlparser.AliasedTableExpr)
- if !ok {
- return nil, nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "expected AliasedTableExpr")
- }
- tblName, ok := alTbl.Expr.(sqlparser.TableName)
- if !ok {
- return nil, nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "expected TableName")
- }
-
- tableID := semTable.TableSetFor(alTbl)
- tableInfo, err := semTable.TableInfoFor(tableID)
- if err != nil {
- return nil, nil, err
- }
-
- if tableInfo.IsInfSchema() {
- return nil, nil, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "can't update information schema tables")
- }
-
- var predicates []sqlparser.Expr
- if whereClause != nil {
- predicates = sqlparser.SplitAndExpression(nil, whereClause.Expr)
- }
- qt := &QueryTable{
- ID: tableID,
- Alias: alTbl,
- Table: tblName,
- Predicates: predicates,
- IsInfSchema: false,
- }
- return tableInfo, qt, nil
-}
-
-func createOperatorFromDelete(deleteStmt *sqlparser.Delete, semTable *semantics.SemTable) (LogicalOperator, error) {
- tableInfo, qt, err := createQueryTableForDML(deleteStmt.TableExprs[0], semTable, deleteStmt.Where)
- if err != nil {
- return nil, err
- }
-
- u := &Delete{
- Table: qt,
- AST: deleteStmt,
- TableInfo: tableInfo,
- }
-
- subq, err := createSubqueryFromStatement(deleteStmt, semTable)
- if err != nil {
- return nil, err
- }
- if subq == nil {
- return u, nil
- }
- subq.Outer = u
- return subq, nil
-}
-
-func createSubqueryFromStatement(stmt sqlparser.Statement, semTable *semantics.SemTable) (*SubQuery, error) {
- if len(semTable.SubqueryMap[stmt]) == 0 {
- return nil, nil
- }
- subq := &SubQuery{}
- for _, sq := range semTable.SubqueryMap[stmt] {
- opInner, err := CreateLogicalOperatorFromAST(sq.Subquery.Select, semTable)
- if err != nil {
- return nil, err
- }
- subq.Inner = append(subq.Inner, &SubQueryInner{
- ExtractedSubquery: sq,
- Inner: opInner,
- })
- }
- return subq, nil
-}
-
-func addColumnEquality(semTable *semantics.SemTable, expr sqlparser.Expr) {
- switch expr := expr.(type) {
- case *sqlparser.ComparisonExpr:
- if expr.Operator != sqlparser.EqualOp {
- return
- }
-
- if left, isCol := expr.Left.(*sqlparser.ColName); isCol {
- semTable.AddColumnEquality(left, expr.Right)
- }
- if right, isCol := expr.Right.(*sqlparser.ColName); isCol {
- semTable.AddColumnEquality(right, expr.Left)
- }
- }
-}
-
-func createJoin(LHS, RHS LogicalOperator) LogicalOperator {
- lqg, lok := LHS.(*QueryGraph)
- rqg, rok := RHS.(*QueryGraph)
- if lok && rok {
- op := &QueryGraph{
- Tables: append(lqg.Tables, rqg.Tables...),
- innerJoins: append(lqg.innerJoins, rqg.innerJoins...),
- NoDeps: sqlparser.AndExpressions(lqg.NoDeps, rqg.NoDeps),
- }
- return op
- }
- return &Join{LHS: LHS, RHS: RHS}
-}
diff --git a/go/vt/vtgate/planbuilder/abstract/operator_test_data.txt b/go/vt/vtgate/planbuilder/abstract/operator_test_data.txt
deleted file mode 100644
index 8e162a84ca1..00000000000
--- a/go/vt/vtgate/planbuilder/abstract/operator_test_data.txt
+++ /dev/null
@@ -1,478 +0,0 @@
-(select id from unsharded union all select id from unsharded_auto order by id) union select id from user
-Concatenate(distinct) {
- QueryGraph: {
- Tables:
- TableSet{0}:unsharded
- },
- QueryGraph: {
- Tables:
- TableSet{1}:unsharded_auto
- },
- QueryGraph: {
- Tables:
- TableSet{2}:`user`
- }
-}
-
-select id from unsharded union select id from unsharded_auto
-Concatenate(distinct) {
- QueryGraph: {
- Tables:
- TableSet{0}:unsharded
- },
- QueryGraph: {
- Tables:
- TableSet{1}:unsharded_auto
- }
-}
-
-select id from unsharded union all select id from unsharded_auto
-Concatenate {
- QueryGraph: {
- Tables:
- TableSet{0}:unsharded
- },
- QueryGraph: {
- Tables:
- TableSet{1}:unsharded_auto
- }
-}
-
-(select id from unsharded union all select id from unsharded_auto limit 10) union select id from x order by id
-Concatenate(distinct) {
- Concatenate {
- QueryGraph: {
- Tables:
- TableSet{0}:unsharded
- },
- QueryGraph: {
- Tables:
- TableSet{1}:unsharded_auto
- },
- limit 10
- },
- QueryGraph: {
- Tables:
- TableSet{2}:x
- },
- order by id asc
-}
-
-(select id from unsharded union all select id from unsharded_auto) union all select id from x
-Concatenate {
- QueryGraph: {
- Tables:
- TableSet{0}:unsharded
- },
- QueryGraph: {
- Tables:
- TableSet{1}:unsharded_auto
- },
- QueryGraph: {
- Tables:
- TableSet{2}:x
- }
-}
-
-(select id from unsharded union select id from unsharded_auto) union select id from x
-Concatenate(distinct) {
- QueryGraph: {
- Tables:
- TableSet{0}:unsharded
- },
- QueryGraph: {
- Tables:
- TableSet{1}:unsharded_auto
- },
- QueryGraph: {
- Tables:
- TableSet{2}:x
- }
-}
-
-(select id from unsharded union select id from unsharded_auto) union all select id from x
-Concatenate {
- Concatenate(distinct) {
- QueryGraph: {
- Tables:
- TableSet{0}:unsharded
- },
- QueryGraph: {
- Tables:
- TableSet{1}:unsharded_auto
- }
- },
- QueryGraph: {
- Tables:
- TableSet{2}:x
- }
-}
-
-select * from t
-QueryGraph: {
-Tables:
- TableSet{0}:t
-}
-
-select t.c from t,y,z where t.c = y.c and (t.a = z.a or t.a = y.a) and 1 < 2
-QueryGraph: {
-Tables:
- TableSet{0}:t
- TableSet{1}:y
- TableSet{2}:z
-JoinPredicates:
- TableSet{0,1,2} - t.a = z.a or t.a = y.a
- TableSet{0,1} - t.c = y.c
-ForAll: 1 < 2
-}
-
-select t.c from t join y on t.id = y.t_id join z on t.id = z.t_id where t.name = 'foo' and y.col = 42 and z.baz = 101
-QueryGraph: {
-Tables:
- TableSet{0}:t where t.`name` = 'foo'
- TableSet{1}:y where y.col = 42
- TableSet{2}:z where z.baz = 101
-JoinPredicates:
- TableSet{0,1} - t.id = y.t_id
- TableSet{0,2} - t.id = z.t_id
-}
-
-select t.c from t,y,z where t.name = 'foo' and y.col = 42 and z.baz = 101 and t.id = y.t_id and t.id = z.t_id
-QueryGraph: {
-Tables:
- TableSet{0}:t where t.`name` = 'foo'
- TableSet{1}:y where y.col = 42
- TableSet{2}:z where z.baz = 101
-JoinPredicates:
- TableSet{0,1} - t.id = y.t_id
- TableSet{0,2} - t.id = z.t_id
-}
-
-select 1 from t where '1' = 1 and 12 = '12'
-QueryGraph: {
-Tables:
- TableSet{0}:t
-ForAll: '1' = 1 and 12 = '12'
-}
-
-select 1 from t left join s on t.id = s.id
-OuterJoin: {
- Inner: QueryGraph: {
- Tables:
- TableSet{0}:t
- }
- Outer: QueryGraph: {
- Tables:
- TableSet{1}:s
- }
- Predicate: t.id = s.id
-}
-
-select 1 from t join s on t.id = s.id and t.name = s.name
-QueryGraph: {
-Tables:
- TableSet{0}:t
- TableSet{1}:s
-JoinPredicates:
- TableSet{0,1} - t.id = s.id and t.`name` = s.`name`
-}
-
-select 1 from t left join s on t.id = s.id where t.name = 'Mister'
-OuterJoin: {
- Inner: QueryGraph: {
- Tables:
- TableSet{0}:t where t.`name` = 'Mister'
- }
- Outer: QueryGraph: {
- Tables:
- TableSet{1}:s
- }
- Predicate: t.id = s.id
-}
-
-select 1 from t right join s on t.id = s.id
-OuterJoin: {
- Inner: QueryGraph: {
- Tables:
- TableSet{1}:s
- }
- Outer: QueryGraph: {
- Tables:
- TableSet{0}:t
- }
- Predicate: t.id = s.id
-}
-
-select 1 from (a left join b on a.id = b.id) join (c left join d on c.id = d.id) on a.id = c.id
-Join: {
- LHS: OuterJoin: {
- Inner: QueryGraph: {
- Tables:
- TableSet{0}:a
- }
- Outer: QueryGraph: {
- Tables:
- TableSet{1}:b
- }
- Predicate: a.id = b.id
- }
- RHS: OuterJoin: {
- Inner: QueryGraph: {
- Tables:
- TableSet{2}:c
- }
- Outer: QueryGraph: {
- Tables:
- TableSet{3}:d
- }
- Predicate: c.id = d.id
- }
- Predicate: a.id = c.id
-}
-
-select 1 from (select 42 as id from tbl) as t
-Derived t: {
- Query: select 42 as id from tbl
- Inner: QueryGraph: {
- Tables:
- TableSet{0}:tbl
- }
-}
-
-select 1 from (select id from tbl limit 10) as t join (select foo, count(*) from usr group by foo) as s on t.id = s.foo
-Join: {
- LHS: Derived t: {
- Query: select id from tbl limit 10
- Inner: QueryGraph: {
- Tables:
- TableSet{0}:tbl
- }
- }
- RHS: Derived s: {
- Query: select foo, count(*) from usr group by foo
- Inner: QueryGraph: {
- Tables:
- TableSet{2}:usr
- }
- }
- Predicate: t.id = s.foo
-}
-
-select (select 1) from t where exists (select 1) and id in (select 1)
-SubQuery: {
- SubQueries: [
- {
- Type: PulloutValue
- Query: QueryGraph: {
- Tables:
- TableSet{1}:dual
- }
- }
- {
- Type: PulloutExists
- Query: QueryGraph: {
- Tables:
- TableSet{2}:dual
- }
- }
- {
- Type: PulloutIn
- Query: QueryGraph: {
- Tables:
- TableSet{3}:dual
- }
- }]
- Outer: QueryGraph: {
- Tables:
- TableSet{0}:t where id in (select 1 from dual)
- ForAll: exists (select 1 from dual)
- }
-}
-
-select u.id from user u where u.id = (select id from user_extra where id = u.id)
-SubQuery: {
- SubQueries: [
- {
- Type: PulloutValue
- Query: QueryGraph: {
- Tables:
- TableSet{1}:user_extra
- JoinPredicates:
- TableSet{0,1} - id = u.id
- }
- }]
- Outer: QueryGraph: {
- Tables:
- TableSet{0}:`user` AS u where u.id = (select id from user_extra where id = u.id)
- }
-}
-
-select id from user_index where id = :id
-Vindex: {
- Name: user_index
- Value: :id
-}
-
-select ui.id from user_index as ui join user as u where ui.id = 1 and ui.id = u.id
-Join: {
- LHS: Vindex: {
- Name: user_index
- Value: 1
- }
- RHS: QueryGraph: {
- Tables:
- TableSet{1}:`user` AS u
- }
- Predicate: ui.id = u.id
-}
-
-select u.id from (select id from user_index where id = 2) as u
-Derived u: {
- Query: select id from user_index where id = 2
- Inner: Vindex: {
- Name: user_index
- Value: 2
- }
-}
-
-select 1 from a union select 2 from b
-Concatenate(distinct) {
- QueryGraph: {
- Tables:
- TableSet{0}:a
- },
- QueryGraph: {
- Tables:
- TableSet{1}:b
- }
-}
-
-select 1 from a union select 2 from b union select 3 from c
-Concatenate(distinct) {
- QueryGraph: {
- Tables:
- TableSet{0}:a
- },
- QueryGraph: {
- Tables:
- TableSet{1}:b
- },
- QueryGraph: {
- Tables:
- TableSet{2}:c
- }
-}
-
-select 1 from a union select 2 from b union select 3 from c union all select 4 from d
-Concatenate {
- Concatenate(distinct) {
- QueryGraph: {
- Tables:
- TableSet{0}:a
- },
- QueryGraph: {
- Tables:
- TableSet{1}:b
- },
- QueryGraph: {
- Tables:
- TableSet{2}:c
- }
- },
- QueryGraph: {
- Tables:
- TableSet{3}:d
- }
-}
-
-select id from unsharded union select id from unsharded_auto order by id
-Concatenate(distinct) {
- QueryGraph: {
- Tables:
- TableSet{0}:unsharded
- },
- QueryGraph: {
- Tables:
- TableSet{1}:unsharded_auto
- },
- order by id asc
-}
-
-select id from user where exists(select user_id from user_extra where user_id = 3 and user_id < user.id)
-SubQuery: {
- SubQueries: [
- {
- Type: PulloutExists
- Query: QueryGraph: {
- Tables:
- TableSet{1}:user_extra where user_id = 3
- JoinPredicates:
- TableSet{0,1} - user_id < `user`.id
- }
- }]
- Outer: QueryGraph: {
- Tables:
- TableSet{0}:`user` where exists (select user_id from user_extra where user_id = 3 and user_id < `user`.id)
- }
-}
-
-# we should remove the keyspace from predicates
-select ks.tbl.col from ks.tbl where ks.tbl.id = 1
-QueryGraph: {
-Tables:
- TableSet{0}:ks.tbl where tbl.id = 1
-}
-
-select 1 from ks.t join ks.y on ks.t.id = ks.y.t_id
-QueryGraph: {
-Tables:
- TableSet{0}:ks.t
- TableSet{1}:ks.y
-JoinPredicates:
- TableSet{0,1} - t.id = y.t_id
-}
-
-select 1 from ks.t left join ks.y on ks.t.id = ks.y.t_id
-OuterJoin: {
- Inner: QueryGraph: {
- Tables:
- TableSet{0}:ks.t
- }
- Outer: QueryGraph: {
- Tables:
- TableSet{1}:ks.y
- }
- Predicate: t.id = y.t_id
-}
-
-update tbl set col1 = apa
-Update {
- table: TableSet{0}:tbl
-assignments:
- col1 = apa
-}
-
-update tbl set col1 = 'apa', col2 = 1337 where id = 12 and name = 'gangal'
-Update {
- table: TableSet{0}:tbl where id = 12 and `name` = 'gangal'
-assignments:
- col1 = 'apa'
- col2 = 1337
-}
-
-update user set u = 1 where id = (select id from user_extra where id = 42)
-SubQuery: {
- SubQueries: [
- {
- Type: PulloutValue
- Query: QueryGraph: {
- Tables:
- TableSet{1}:user_extra where id = 42
- }
- }]
- Outer: Update {
- table: TableSet{0}:`user` where id = (select id from user_extra where id = 42)
- assignments:
- u = 1
- }
-}
diff --git a/go/vt/vtgate/planbuilder/abstract/subquery.go b/go/vt/vtgate/planbuilder/abstract/subquery.go
deleted file mode 100644
index 4811e937e3e..00000000000
--- a/go/vt/vtgate/planbuilder/abstract/subquery.go
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
-Copyright 2021 The Vitess Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package abstract
-
-import (
- vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
- "vitess.io/vitess/go/vt/sqlparser"
- "vitess.io/vitess/go/vt/vterrors"
- "vitess.io/vitess/go/vt/vtgate/semantics"
-)
-
-// SubQuery stores the information about subquery
-type SubQuery struct {
- Inner []*SubQueryInner
- Outer LogicalOperator
-}
-
-var _ LogicalOperator = (*SubQuery)(nil)
-
-func (*SubQuery) iLogical() {}
-
-// SubQueryInner stores the subquery information for a select statement
-type SubQueryInner struct {
- // Inner is the Operator inside the parenthesis of the subquery.
- // i.e: select (select 1 union select 1), the Inner here would be
- // of type Concatenate since we have a Union.
- Inner LogicalOperator
-
- // ExtractedSubquery contains all information we need about this subquery
- ExtractedSubquery *sqlparser.ExtractedSubquery
-}
-
-// TableID implements the Operator interface
-func (s *SubQuery) TableID() semantics.TableSet {
- ts := s.Outer.TableID()
- for _, inner := range s.Inner {
- ts = ts.Merge(inner.Inner.TableID())
- }
- return ts
-}
-
-// PushPredicate implements the Operator interface
-func (s *SubQuery) PushPredicate(sqlparser.Expr, *semantics.SemTable) (LogicalOperator, error) {
- return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] should not try to push predicate on subquery")
-}
-
-// UnsolvedPredicates implements the Operator interface
-func (s *SubQuery) UnsolvedPredicates(semTable *semantics.SemTable) []sqlparser.Expr {
- ts := s.TableID()
- var result []sqlparser.Expr
-
- for _, expr := range s.Outer.UnsolvedPredicates(semTable) {
- deps := semTable.DirectDeps(expr)
- if !deps.IsSolvedBy(ts) {
- result = append(result, expr)
- }
- }
- for _, inner := range s.Inner {
- for _, expr := range inner.Inner.UnsolvedPredicates(semTable) {
- deps := semTable.DirectDeps(expr)
- if !deps.IsSolvedBy(ts) {
- result = append(result, expr)
- }
- }
- }
- return result
-}
-
-// CheckValid implements the Operator interface
-func (s *SubQuery) CheckValid() error {
- for _, inner := range s.Inner {
- err := inner.Inner.CheckValid()
- if err != nil {
- return err
- }
- }
- return s.Outer.CheckValid()
-}
-
-// Compact implements the Operator interface
-func (s *SubQuery) Compact(*semantics.SemTable) (LogicalOperator, error) {
- return s, nil
-}
diff --git a/go/vt/vtgate/planbuilder/abstract/update.go b/go/vt/vtgate/planbuilder/abstract/update.go
deleted file mode 100644
index 018dc1c3b36..00000000000
--- a/go/vt/vtgate/planbuilder/abstract/update.go
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
-Copyright 2022 The Vitess Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package abstract
-
-import (
- vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
- "vitess.io/vitess/go/vt/sqlparser"
- "vitess.io/vitess/go/vt/vterrors"
- "vitess.io/vitess/go/vt/vtgate/semantics"
-)
-
-type Update struct {
- Table *QueryTable
- TableInfo semantics.TableInfo
- Assignments map[string]sqlparser.Expr
- AST *sqlparser.Update
-}
-
-var _ LogicalOperator = (*Update)(nil)
-
-// TableID implements the LogicalOperator interface
-func (u *Update) TableID() semantics.TableSet {
- return u.Table.ID
-}
-
-// UnsolvedPredicates implements the LogicalOperator interface
-func (u *Update) UnsolvedPredicates(semTable *semantics.SemTable) []sqlparser.Expr {
- return nil
-}
-
-// CheckValid implements the LogicalOperator interface
-func (u *Update) CheckValid() error {
- return nil
-}
-
-// iLogical implements the LogicalOperator interface
-func (u *Update) iLogical() {}
-
-// PushPredicate implements the LogicalOperator interface
-func (u *Update) PushPredicate(expr sqlparser.Expr, semTable *semantics.SemTable) (LogicalOperator, error) {
- return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "can't accept predicates")
-}
-
-// Compact implements the LogicalOperator interface
-func (u *Update) Compact(semTable *semantics.SemTable) (LogicalOperator, error) {
- return u, nil
-}
diff --git a/go/vt/vtgate/planbuilder/abstract/vindex.go b/go/vt/vtgate/planbuilder/abstract/vindex.go
deleted file mode 100644
index 49b453e25e2..00000000000
--- a/go/vt/vtgate/planbuilder/abstract/vindex.go
+++ /dev/null
@@ -1,119 +0,0 @@
-/*
-Copyright 2021 The Vitess Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package abstract
-
-import (
- vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
- "vitess.io/vitess/go/vt/sqlparser"
- "vitess.io/vitess/go/vt/vterrors"
- "vitess.io/vitess/go/vt/vtgate/engine"
- "vitess.io/vitess/go/vt/vtgate/semantics"
- "vitess.io/vitess/go/vt/vtgate/vindexes"
-)
-
-type (
- // Vindex stores the information about the vindex query
- Vindex struct {
- OpCode engine.VindexOpcode
- Table VindexTable
- Vindex vindexes.Vindex
- Value sqlparser.Expr
- }
-
- // VindexTable contains information about the vindex table we want to query
- VindexTable struct {
- TableID semantics.TableSet
- Alias *sqlparser.AliasedTableExpr
- Table sqlparser.TableName
- Predicates []sqlparser.Expr
- VTable *vindexes.Table
- }
-)
-
-var _ LogicalOperator = (*Vindex)(nil)
-
-func (*Vindex) iLogical() {}
-
-// TableID implements the Operator interface
-func (v *Vindex) TableID() semantics.TableSet {
- return v.Table.TableID
-}
-
-const vindexUnsupported = "unsupported: where clause for vindex function must be of the form id = or id in(,...)"
-
-// PushPredicate implements the Operator interface
-func (v *Vindex) PushPredicate(expr sqlparser.Expr, semTable *semantics.SemTable) (LogicalOperator, error) {
- for _, e := range sqlparser.SplitAndExpression(nil, expr) {
- deps := semTable.RecursiveDeps(e)
- if deps.NumberOfTables() > 1 {
- return nil, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, vindexUnsupported+" (multiple tables involved)")
- }
- // check if we already have a predicate
- if v.OpCode != engine.VindexNone {
- return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, vindexUnsupported+" (multiple filters)")
- }
-
- // check LHS
- comparison, ok := e.(*sqlparser.ComparisonExpr)
- if !ok {
- return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, vindexUnsupported+" (not a comparison)")
- }
- if comparison.Operator != sqlparser.EqualOp && comparison.Operator != sqlparser.InOp {
- return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, vindexUnsupported+" (not equality)")
- }
- colname, ok := comparison.Left.(*sqlparser.ColName)
- if !ok {
- return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, vindexUnsupported+" (lhs is not a column)")
- }
- if !colname.Name.EqualString("id") {
- return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, vindexUnsupported+" (lhs is not id)")
- }
-
- // check RHS
- var err error
- if sqlparser.IsValue(comparison.Right) || sqlparser.IsSimpleTuple(comparison.Right) {
- v.Value = comparison.Right
- } else {
- return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, vindexUnsupported+" (rhs is not a value)")
- }
- if err != nil {
- return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, vindexUnsupported+": %v", err)
- }
- v.OpCode = engine.VindexMap
- v.Table.Predicates = append(v.Table.Predicates, e)
- }
- return v, nil
-}
-
-// UnsolvedPredicates implements the Operator interface
-func (v *Vindex) UnsolvedPredicates(*semantics.SemTable) []sqlparser.Expr {
- return nil
-}
-
-// CheckValid implements the Operator interface
-func (v *Vindex) CheckValid() error {
- if len(v.Table.Predicates) == 0 {
- return vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "unsupported: where clause for vindex function must be of the form id = or id in(,...) (where clause missing)")
- }
-
- return nil
-}
-
-// Compact implements the Operator interface
-func (v *Vindex) Compact(*semantics.SemTable) (LogicalOperator, error) {
- return v, nil
-}
diff --git a/go/vt/vtgate/planbuilder/aggregation_pushing.go b/go/vt/vtgate/planbuilder/aggregation_pushing.go
index e719c72fdfa..15367f9e3e8 100644
--- a/go/vt/vtgate/planbuilder/aggregation_pushing.go
+++ b/go/vt/vtgate/planbuilder/aggregation_pushing.go
@@ -17,13 +17,13 @@ limitations under the License.
package planbuilder
import (
+ "fmt"
"strconv"
- vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
"vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/vt/vtgate/engine"
- "vitess.io/vitess/go/vt/vtgate/planbuilder/abstract"
+ "vitess.io/vitess/go/vt/vtgate/planbuilder/operators"
"vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext"
)
@@ -34,8 +34,8 @@ import (
func (hp *horizonPlanning) pushAggregation(
ctx *plancontext.PlanningContext,
plan logicalPlan,
- grouping []abstract.GroupBy,
- aggregations []abstract.Aggr,
+ grouping []operators.GroupBy,
+ aggregations []operators.Aggr,
ignoreOutputOrder bool,
) (output logicalPlan,
groupingOffsets []offsets,
@@ -83,7 +83,7 @@ func (hp *horizonPlanning) pushAggregation(
var offset int
aggrExpr, ok := aggr.Original.Expr.(sqlparser.AggrFunc)
if !ok {
- return nil, nil, nil, false, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG]: unexpected expression: %v", aggr.Original)
+ return nil, nil, nil, false, vterrors.VT13001(fmt.Sprintf("unexpected expression: %v", aggr.Original))
}
switch aggrExpr.(type) {
@@ -91,7 +91,7 @@ func (hp *horizonPlanning) pushAggregation(
offset = 0
default:
if len(aggrExpr.GetArgs()) != 1 {
- return nil, nil, nil, false, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG]: unexpected expression: %v", aggrExpr)
+ return nil, nil, nil, false, vterrors.VT13001(fmt.Sprintf("unexpected expression: %v", aggrExpr))
}
offset, _, err = pushProjection(ctx, &sqlparser.AliasedExpr{Expr: aggrExpr.GetArg() /*As: expr.As*/}, plan.input, true, true, false)
}
@@ -105,7 +105,7 @@ func (hp *horizonPlanning) pushAggregation(
return
default:
- err = vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "using aggregation on top of a %T plan is not yet supported", plan)
+ err = vterrors.VT12001(fmt.Sprintf("using aggregation on top of a %T plan", plan))
return
}
}
@@ -113,8 +113,8 @@ func (hp *horizonPlanning) pushAggregation(
func pushAggrOnRoute(
ctx *plancontext.PlanningContext,
plan *routeGen4,
- aggregations []abstract.Aggr,
- grouping []abstract.GroupBy,
+ aggregations []operators.Aggr,
+ grouping []operators.GroupBy,
ignoreOutputOrder bool,
) (
groupingOffsets []offsets,
@@ -125,7 +125,7 @@ func pushAggrOnRoute(
columnOrderMatters := !ignoreOutputOrder
sel, isSel := plan.Select.(*sqlparser.Select)
if !isSel {
- return nil, nil, nil, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "can't plan aggregation on union")
+ return nil, nil, nil, vterrors.VT12001("plan aggregation on union")
}
var groupingCols []int
@@ -146,7 +146,7 @@ func pushAggrOnRoute(
} else {
// if we haven't already pushed the aggregations, now is the time
for _, aggregation := range aggregations {
- param := addAggregationToSelect(sel, aggregation)
+ param := addAggregationToSelect(ctx, sel, aggregation)
vtgateAggregation = append(vtgateAggregation, []offsets{param})
}
}
@@ -193,7 +193,7 @@ func pushAggrsAndGroupingInOrder(
for it.next() {
groupBy, aggregation := it.current()
if aggregation != nil {
- param := addAggregationToSelect(sel, *aggregation)
+ param := addAggregationToSelect(ctx, sel, *aggregation)
vtgateAggregation = append(vtgateAggregation, []offsets{param})
continue
}
@@ -210,14 +210,14 @@ func pushAggrsAndGroupingInOrder(
}
// addAggregationToSelect adds the aggregation to the SELECT statement and returns the AggregateParams to be used outside
-func addAggregationToSelect(sel *sqlparser.Select, aggregation abstract.Aggr) offsets {
+func addAggregationToSelect(ctx *plancontext.PlanningContext, sel *sqlparser.Select, aggregation operators.Aggr) offsets {
// TODO: removing duplicated aggregation expression should also be done at the join level
for i, expr := range sel.SelectExprs {
aliasedExpr, isAliasedExpr := expr.(*sqlparser.AliasedExpr)
if !isAliasedExpr {
continue
}
- if sqlparser.EqualsExpr(aliasedExpr.Expr, aggregation.Original.Expr) {
+ if ctx.SemTable.EqualsExpr(aliasedExpr.Expr, aggregation.Original.Expr) {
return newOffset(i)
}
}
@@ -226,10 +226,10 @@ func addAggregationToSelect(sel *sqlparser.Select, aggregation abstract.Aggr) of
return newOffset(len(sel.SelectExprs) - 1)
}
-func countStarAggr() *abstract.Aggr {
+func countStarAggr() *operators.Aggr {
f := &sqlparser.CountStar{}
- return &abstract.Aggr{
+ return &operators.Aggr{
Original: &sqlparser.AliasedExpr{Expr: f},
OpCode: engine.AggregateCountStar,
Alias: "count(*)",
@@ -251,8 +251,8 @@ vtgate level, we can offload most of the work to MySQL, and at the vtgate just s
func (hp *horizonPlanning) pushAggrOnJoin(
ctx *plancontext.PlanningContext,
join *joinGen4,
- grouping []abstract.GroupBy,
- aggregations []abstract.Aggr,
+ grouping []operators.GroupBy,
+ aggregations []operators.Aggr,
) ([]offsets, [][]offsets, error) {
// First we separate aggregations according to which side the dependencies are coming from
lhsAggrs, rhsAggrs, err := splitAggregationsToLeftAndRight(ctx, aggregations, join)
@@ -287,7 +287,7 @@ func (hp *horizonPlanning) pushAggrOnJoin(
return nil, nil, err
}
l = sqlparser.NewIntLiteral(strconv.Itoa(offset + 1))
- rhsGrouping = append(rhsGrouping, abstract.GroupBy{Inner: l})
+ rhsGrouping = append(rhsGrouping, operators.GroupBy{Inner: l})
}
// Next we push the aggregations to both sides
@@ -356,8 +356,8 @@ That way we get the aggregation grouped by the column we need to use to decide i
func (hp *horizonPlanning) pushAggrOnSemiJoin(
ctx *plancontext.PlanningContext,
join *semiJoin,
- grouping []abstract.GroupBy,
- aggregations []abstract.Aggr,
+ grouping []operators.GroupBy,
+ aggregations []operators.Aggr,
ignoreOutputOrder bool,
) ([]offsets, [][]offsets, bool, error) {
// We need to group by the columns used in the join condition.
@@ -391,12 +391,12 @@ func (hp *horizonPlanning) pushAggrOnSemiJoin(
func (hp *horizonPlanning) filteredPushAggregation(
ctx *plancontext.PlanningContext,
plan logicalPlan,
- grouping []abstract.GroupBy,
- aggregations []*abstract.Aggr,
+ grouping []operators.GroupBy,
+ aggregations []*operators.Aggr,
ignoreOutputOrder bool,
) (out logicalPlan, groupingOffsets []offsets, outputAggrs [][]offsets, pushed bool, err error) {
used := make([]bool, len(aggregations))
- var aggrs []abstract.Aggr
+ var aggrs []operators.Aggr
for idx, aggr := range aggregations {
if aggr != nil {
@@ -429,12 +429,16 @@ func isMinOrMax(in engine.AggregateOpcode) bool {
}
}
+func isRandom(in engine.AggregateOpcode) bool {
+ return in == engine.AggregateRandom
+}
+
func splitAggregationsToLeftAndRight(
ctx *plancontext.PlanningContext,
- aggregations []abstract.Aggr,
+ aggregations []operators.Aggr,
join *joinGen4,
-) ([]*abstract.Aggr, []*abstract.Aggr, error) {
- var lhsAggrs, rhsAggrs []*abstract.Aggr
+) ([]*operators.Aggr, []*operators.Aggr, error) {
+ var lhsAggrs, rhsAggrs []*operators.Aggr
for _, aggr := range aggregations {
newAggr := aggr
if _, ok := aggr.Original.Expr.(*sqlparser.CountStar); ok {
@@ -442,9 +446,9 @@ func splitAggregationsToLeftAndRight(
rhsAggrs = append(rhsAggrs, &newAggr)
} else {
deps := ctx.SemTable.RecursiveDeps(aggr.Original.Expr)
- var other *abstract.Aggr
- // if we are sending down min/max, we don't have to multiply the results with anything
- if !isMinOrMax(aggr.OpCode) {
+ var other *operators.Aggr
+ // if we are sending down min/max/random, we don't have to multiply the results with anything
+ if !isMinOrMax(aggr.OpCode) && !isRandom(aggr.OpCode) {
other = countStarAggr()
}
switch {
@@ -455,7 +459,7 @@ func splitAggregationsToLeftAndRight(
rhsAggrs = append(rhsAggrs, &newAggr)
lhsAggrs = append(lhsAggrs, other)
default:
- return nil, nil, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "aggregation on columns from different sources not supported yet")
+ return nil, nil, vterrors.VT12001("aggregation on columns from different sources")
}
}
}
@@ -465,9 +469,9 @@ func splitAggregationsToLeftAndRight(
func splitGroupingsToLeftAndRight(
ctx *plancontext.PlanningContext,
join *joinGen4,
- grouping, lhsGrouping []abstract.GroupBy,
-) ([]abstract.GroupBy, []abstract.GroupBy, []int, error) {
- var rhsGrouping []abstract.GroupBy
+ grouping, lhsGrouping []operators.GroupBy,
+) ([]operators.GroupBy, []operators.GroupBy, []int, error) {
+ var rhsGrouping []operators.GroupBy
lhsTS := join.Left.ContainsTables()
rhsTS := join.Right.ContainsTables()
@@ -484,7 +488,7 @@ func splitGroupingsToLeftAndRight(
groupingOffsets = append(groupingOffsets, len(rhsGrouping)+1)
rhsGrouping = append(rhsGrouping, groupBy)
default:
- return nil, nil, nil, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "grouping on columns from different sources not supported yet")
+ return nil, nil, nil, vterrors.VT12001("grouping on columns from different sources")
}
}
return lhsGrouping, rhsGrouping, groupingOffsets, nil
@@ -493,16 +497,16 @@ func splitGroupingsToLeftAndRight(
type (
reorgFunc = func(groupByOffsets []offsets, aggrOffsets [][]offsets) ([]offsets, [][]offsets)
sortedIterator struct {
- grouping []abstract.GroupBy
- aggregations []abstract.Aggr
- valueGB *abstract.GroupBy
- valueA *abstract.Aggr
+ grouping []operators.GroupBy
+ aggregations []operators.Aggr
+ valueGB *operators.GroupBy
+ valueA *operators.Aggr
groupbyIdx int
aggrIdx int
}
)
-func (it *sortedIterator) current() (*abstract.GroupBy, *abstract.Aggr) {
+func (it *sortedIterator) current() (*operators.GroupBy, *operators.Aggr) {
return it.valueGB, it.valueA
}
@@ -510,7 +514,7 @@ func (it *sortedIterator) next() bool {
if it.aggrIdx < len(it.aggregations) && it.groupbyIdx < len(it.grouping) {
aggregation := it.aggregations[it.aggrIdx]
groupBy := it.grouping[it.groupbyIdx]
- if abstract.CompareRefInt(aggregation.Index, groupBy.InnerIndex) {
+ if operators.CompareRefInt(aggregation.Index, groupBy.InnerIndex) {
it.aggrIdx++
it.valueA, it.valueGB = &aggregation, nil
return true
@@ -539,13 +543,13 @@ func passThrough(groupByOffsets []offsets, aggrOffsets [][]offsets) ([]offsets,
return groupByOffsets, aggrOffsets
}
-func sortOffsets(grouping []abstract.GroupBy, aggregations []abstract.Aggr) ([]abstract.GroupBy, reorgFunc, *sortedIterator) {
- originalGrouping := make([]abstract.GroupBy, len(grouping))
- originalAggr := make([]abstract.Aggr, len(aggregations))
+func sortOffsets(grouping []operators.GroupBy, aggregations []operators.Aggr) ([]operators.GroupBy, reorgFunc, *sortedIterator) {
+ originalGrouping := make([]operators.GroupBy, len(grouping))
+ originalAggr := make([]operators.Aggr, len(aggregations))
copy(originalAggr, aggregations)
copy(originalGrouping, grouping)
- abstract.SortAggregations(aggregations)
- abstract.SortGrouping(grouping)
+ operators.SortAggregations(aggregations)
+ operators.SortGrouping(grouping)
reorg := func(groupByOffsets []offsets, aggrOffsets [][]offsets) ([]offsets, [][]offsets) {
orderedGroupingOffsets := make([]offsets, 0, len(originalGrouping))
diff --git a/go/vt/vtgate/planbuilder/builder.go b/go/vt/vtgate/planbuilder/builder.go
index ce299ae0f38..1181c9b2c8b 100644
--- a/go/vt/vtgate/planbuilder/builder.go
+++ b/go/vt/vtgate/planbuilder/builder.go
@@ -32,8 +32,6 @@ import (
"vitess.io/vitess/go/vt/vtgate/engine"
"vitess.io/vitess/go/vt/vtgate/semantics"
"vitess.io/vitess/go/vt/vtgate/vindexes"
-
- vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
)
const (
@@ -83,7 +81,7 @@ func tablesFromSemantics(semTable *semantics.SemTable) []string {
if vindexTable == nil {
continue
}
- tables[vindexTable.ToString()] = nil
+ tables[vindexTable.String()] = nil
}
names := make([]string, 0, len(tables))
@@ -101,7 +99,7 @@ func TestBuilder(query string, vschema plancontext.VSchema, keyspace string) (*e
if err != nil {
return nil, err
}
- result, err := sqlparser.RewriteAST(stmt, keyspace, sqlparser.SQLSelectLimitUnset, "", nil)
+ result, err := sqlparser.RewriteAST(stmt, keyspace, sqlparser.SQLSelectLimitUnset, "", nil, vschema)
if err != nil {
return nil, err
}
@@ -242,12 +240,16 @@ func createInstructionFor(query string, stmt sqlparser.Statement, reservedVars *
return buildShowMigrationLogsPlan(query, vschema, enableOnlineDDL)
case *sqlparser.ShowThrottledApps:
return buildShowThrottledAppsPlan(query, vschema)
+ case *sqlparser.ShowThrottlerStatus:
+ return buildShowThrottlerStatusPlan(query, vschema)
case *sqlparser.AlterVschema:
return buildVSchemaDDLPlan(stmt, vschema)
case *sqlparser.Use:
return buildUsePlan(stmt)
case sqlparser.Explain:
return buildExplainPlan(stmt, reservedVars, vschema, enableOnlineDDL, enableDirectDDL)
+ case *sqlparser.VExplainStmt:
+ return buildVExplainPlan(stmt, reservedVars, vschema, enableOnlineDDL, enableDirectDDL)
case *sqlparser.OtherRead, *sqlparser.OtherAdmin:
return buildOtherReadAndAdmin(query, vschema)
case *sqlparser.Set:
@@ -256,8 +258,6 @@ func createInstructionFor(query string, stmt sqlparser.Statement, reservedVars *
return buildLoadPlan(query, vschema)
case sqlparser.DBDDLStatement:
return buildRoutePlan(stmt, reservedVars, vschema, buildDBDDLPlan)
- case *sqlparser.SetTransaction:
- return buildRoutePlan(stmt, reservedVars, vschema, buildSetTxPlan)
case *sqlparser.Begin, *sqlparser.Commit, *sqlparser.Rollback, *sqlparser.Savepoint, *sqlparser.SRollback, *sqlparser.Release:
// Empty by design. Not executed by a plan
return nil, nil
@@ -281,7 +281,7 @@ func createInstructionFor(query string, stmt sqlparser.Statement, reservedVars *
return newPlanResult(engine.NewRowsPrimitive(nil, nil)), nil
}
- return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "BUG: unexpected statement type: %T", stmt)
+ return nil, vterrors.VT13001(fmt.Sprintf("unexpected statement type: %T", stmt))
}
func buildDBDDLPlan(stmt sqlparser.Statement, _ *sqlparser.ReservedVars, vschema plancontext.VSchema) (*planResult, error) {
@@ -302,31 +302,24 @@ func buildDBDDLPlan(stmt sqlparser.Statement, _ *sqlparser.ReservedVars, vschema
return newPlanResult(engine.NewRowsPrimitive(make([][]sqltypes.Value, 0), make([]*querypb.Field, 0))), nil
}
if !ksExists {
- return nil, vterrors.NewErrorf(vtrpcpb.Code_NOT_FOUND, vterrors.DbDropExists, "Can't drop database '%s'; database doesn't exists", ksName)
+ return nil, vterrors.VT05001(ksName)
}
return newPlanResult(engine.NewDBDDL(ksName, false, queryTimeout(dbDDL.Comments.Directives()))), nil
case *sqlparser.AlterDatabase:
if !ksExists {
- return nil, vterrors.NewErrorf(vtrpcpb.Code_NOT_FOUND, vterrors.BadDb, "Can't alter database '%s'; unknown database", ksName)
+ return nil, vterrors.VT05002(ksName)
}
- return nil, vterrors.New(vtrpcpb.Code_UNIMPLEMENTED, "alter database is not supported")
+ return nil, vterrors.VT12001("ALTER DATABASE")
case *sqlparser.CreateDatabase:
if dbDDL.IfNotExists && ksExists {
return newPlanResult(engine.NewRowsPrimitive(make([][]sqltypes.Value, 0), make([]*querypb.Field, 0))), nil
}
if !dbDDL.IfNotExists && ksExists {
- return nil, vterrors.NewErrorf(vtrpcpb.Code_ALREADY_EXISTS, vterrors.DbCreateExists, "Can't create database '%s'; database exists", ksName)
+ return nil, vterrors.VT06001(ksName)
}
return newPlanResult(engine.NewDBDDL(ksName, true, queryTimeout(dbDDL.Comments.Directives()))), nil
}
- return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] database ddl not recognized: %s", sqlparser.String(dbDDLstmt))
-}
-
-func buildSetTxPlan(_ sqlparser.Statement, _ *sqlparser.ReservedVars, _ plancontext.VSchema) (*planResult, error) {
- // TODO: This is a NOP, modeled off of tx_isolation and tx_read_only.
- // It's incredibly dangerous that it's a NOP, this will be fixed when it will be implemented.
- // This is currently the refactor of existing setup.
- return newPlanResult(engine.NewRowsPrimitive(nil, nil)), nil
+ return nil, vterrors.VT13001(fmt.Sprintf("database DDL not recognized: %s", sqlparser.String(dbDDLstmt)))
}
func buildLoadPlan(query string, vschema plancontext.VSchema) (*planResult, error) {
@@ -337,7 +330,7 @@ func buildLoadPlan(query string, vschema plancontext.VSchema) (*planResult, erro
destination := vschema.Destination()
if destination == nil {
- if err := vschema.ErrorIfShardedF(keyspace, "LOAD", "LOAD is not supported on sharded database"); err != nil {
+ if err := vschema.ErrorIfShardedF(keyspace, "LOAD", "LOAD is not supported on sharded keyspace"); err != nil {
return nil, err
}
destination = key.DestinationAnyShard{}
diff --git a/go/vt/vtgate/planbuilder/bypass.go b/go/vt/vtgate/planbuilder/bypass.go
index 3703dc84710..a5490e2231e 100644
--- a/go/vt/vtgate/planbuilder/bypass.go
+++ b/go/vt/vtgate/planbuilder/bypass.go
@@ -18,7 +18,6 @@ package planbuilder
import (
"vitess.io/vitess/go/vt/key"
- vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
"vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/vt/vtgate/engine"
@@ -35,7 +34,7 @@ func buildPlanForBypass(stmt sqlparser.Statement, _ *sqlparser.ReservedVars, vsc
switch dest := vschema.Destination().(type) {
case key.DestinationExactKeyRange:
if _, ok := stmt.(*sqlparser.Insert); ok {
- return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "INSERT not supported when targeting a key range: %s", vschema.TargetString())
+ return nil, vterrors.VT03023(vschema.TargetString())
}
case key.DestinationShard:
if !vschema.IsShardRoutingEnabled() {
diff --git a/go/vt/vtgate/planbuilder/call_proc.go b/go/vt/vtgate/planbuilder/call_proc.go
index bb5c7a6bdb8..13fe5cc60e4 100644
--- a/go/vt/vtgate/planbuilder/call_proc.go
+++ b/go/vt/vtgate/planbuilder/call_proc.go
@@ -50,4 +50,4 @@ func buildCallProcPlan(stmt *sqlparser.CallProc, vschema plancontext.VSchema) (*
}), nil
}
-const errNotAllowWhenSharded = "CALL is not supported for sharded database"
+const errNotAllowWhenSharded = "CALL is not supported for sharded keyspace"
diff --git a/go/vt/vtgate/planbuilder/collations_test.go b/go/vt/vtgate/planbuilder/collations_test.go
index e86cf6aaff5..2a7ffebf91c 100644
--- a/go/vt/vtgate/planbuilder/collations_test.go
+++ b/go/vt/vtgate/planbuilder/collations_test.go
@@ -40,7 +40,7 @@ type collationTestCase struct {
func (tc *collationTestCase) run(t *testing.T) {
vschemaWrapper := &vschemaWrapper{
- v: loadSchema(t, "schema_test.json", false),
+ v: loadSchema(t, "vschemas/schema.json", false),
sysVarEnabled: true,
version: Gen4,
}
diff --git a/go/vt/vtgate/planbuilder/concatenate.go b/go/vt/vtgate/planbuilder/concatenate.go
index 92e31d3de7d..70b867b1146 100644
--- a/go/vt/vtgate/planbuilder/concatenate.go
+++ b/go/vt/vtgate/planbuilder/concatenate.go
@@ -17,7 +17,6 @@ limitations under the License.
package planbuilder
import (
- vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
"vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/vt/vtgate/engine"
@@ -76,7 +75,7 @@ func (c *concatenate) Primitive() engine.Primitive {
// Rewrite implements the logicalPlan interface
func (c *concatenate) Rewrite(inputs ...logicalPlan) error {
if len(inputs) != 2 {
- return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "concatenate: wrong number of inputs")
+ return vterrors.VT13001("concatenate: wrong number of inputs")
}
c.lhs = inputs[0]
c.rhs = inputs[1]
diff --git a/go/vt/vtgate/planbuilder/concatenateGen4.go b/go/vt/vtgate/planbuilder/concatenateGen4.go
index 152e4150961..fa12d24cf73 100644
--- a/go/vt/vtgate/planbuilder/concatenateGen4.go
+++ b/go/vt/vtgate/planbuilder/concatenateGen4.go
@@ -17,7 +17,6 @@ limitations under the License.
package planbuilder
import (
- vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
"vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/vt/vtgate/engine"
@@ -94,7 +93,7 @@ func (c *concatenateGen4) Primitive() engine.Primitive {
// Rewrite implements the logicalPlan interface
func (c *concatenateGen4) Rewrite(inputs ...logicalPlan) error {
if len(inputs) != len(c.sources) {
- return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "concatenateGen4: wrong number of inputs")
+ return vterrors.VT13001("concatenateGen4: wrong number of inputs")
}
c.sources = inputs
return nil
@@ -104,7 +103,7 @@ func (c *concatenateGen4) Rewrite(inputs ...logicalPlan) error {
func (c *concatenateGen4) ContainsTables() semantics.TableSet {
var tableSet semantics.TableSet
for _, source := range c.sources {
- tableSet.MergeInPlace(source.ContainsTables())
+ tableSet = tableSet.Merge(source.ContainsTables())
}
return tableSet
}
diff --git a/go/vt/vtgate/planbuilder/ddl.go b/go/vt/vtgate/planbuilder/ddl.go
index e09ffb6f0f6..797f3a2e52c 100644
--- a/go/vt/vtgate/planbuilder/ddl.go
+++ b/go/vt/vtgate/planbuilder/ddl.go
@@ -1,8 +1,9 @@
package planbuilder
import (
+ "fmt"
+
"vitess.io/vitess/go/vt/key"
- vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
"vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/vt/vtgate/engine"
@@ -45,7 +46,7 @@ func (fk *fkContraint) FkWalk(node sqlparser.SQLNode) (kontinue bool, err error)
}
// buildGeneralDDLPlan builds a general DDL plan, which can be either normal DDL or online DDL.
-// The two behave compeltely differently, and have two very different primitives.
+// The two behave completely differently, and have two very different primitives.
// We want to be able to dynamically choose between normal/online plans according to Session settings.
// However, due to caching of plans, we're unable to make that choice right now. In this function we don't have
// a session context. It's only when we Execute() the primitive that we have that context.
@@ -107,35 +108,30 @@ func buildDDLPlans(sql string, ddlStatement sqlparser.DDLStatement, reservedVars
var err error
switch ddl := ddlStatement.(type) {
- case *sqlparser.AlterTable, *sqlparser.TruncateTable:
+ case *sqlparser.AlterTable, *sqlparser.CreateTable, *sqlparser.TruncateTable:
err = checkFKError(vschema, ddlStatement)
if err != nil {
return nil, nil, err
}
- // For Alter Table and other statements, the table must already exist
- // We should find the target of the query from this tables location
+ // For ALTER TABLE and TRUNCATE TABLE, the table must already exist
+ //
+ // For CREATE TABLE, the table may (in the case of --declarative)
+ // already exist.
+ //
+ // We should find the target of the query from this tables location.
destination, keyspace, err = findTableDestinationAndKeyspace(vschema, ddlStatement)
case *sqlparser.CreateView:
destination, keyspace, err = buildCreateView(vschema, ddl, reservedVars, enableOnlineDDL, enableDirectDDL)
case *sqlparser.AlterView:
destination, keyspace, err = buildAlterView(vschema, ddl, reservedVars, enableOnlineDDL, enableDirectDDL)
- case *sqlparser.CreateTable:
- err = checkFKError(vschema, ddlStatement)
- if err != nil {
- return nil, nil, err
- }
- destination, keyspace, _, err = vschema.TargetDestination(ddlStatement.GetTable().Qualifier.String())
- if err != nil {
- return nil, nil, err
- }
- // Remove the keyspace name as the database name might be different.
- ddlStatement.SetTable("", ddlStatement.GetTable().Name.String())
- case *sqlparser.DropView, *sqlparser.DropTable:
- destination, keyspace, err = buildDropViewOrTable(vschema, ddlStatement)
+ case *sqlparser.DropView:
+ destination, keyspace, err = buildDropView(vschema, ddlStatement)
+ case *sqlparser.DropTable:
+ destination, keyspace, err = buildDropTable(vschema, ddlStatement)
case *sqlparser.RenameTable:
destination, keyspace, err = buildRenameTable(vschema, ddl)
default:
- return nil, nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] unexpected ddl statement type: %T", ddlStatement)
+ return nil, nil, vterrors.VT13001(fmt.Sprintf("unexpected DDL statement type: %T", ddlStatement))
}
if err != nil {
@@ -156,8 +152,6 @@ func buildDDLPlans(sql string, ddlStatement sqlparser.DDLStatement, reservedVars
Keyspace: keyspace,
TargetDestination: destination,
Query: query,
- IsDML: false,
- SingleShardOnly: false,
}, &engine.OnlineDDL{
Keyspace: keyspace,
TargetDestination: destination,
@@ -171,7 +165,7 @@ func checkFKError(vschema plancontext.VSchema, ddlStatement sqlparser.DDLStateme
fk := &fkContraint{}
_ = sqlparser.Walk(fk.FkWalk, ddlStatement)
if fk.found {
- return vterrors.Errorf(vtrpcpb.Code_ABORTED, "foreign key constraints are not allowed, see https://vitess.io/blog/2021-06-15-online-ddl-why-no-fk/")
+ return vterrors.VT10001()
}
}
return nil
@@ -214,17 +208,24 @@ func buildAlterView(vschema plancontext.VSchema, ddl *sqlparser.AlterView, reser
if err != nil {
return nil, nil, err
}
- isRoutePlan, keyspaceName, opCode := tryToGetRoutePlan(selectPlan.primitive)
- if !isRoutePlan {
- return nil, nil, vterrors.New(vtrpcpb.Code_UNIMPLEMENTED, ViewComplex)
+ selPlanKs := selectPlan.primitive.GetKeyspaceName()
+ if keyspace.Name != selPlanKs {
+ return nil, nil, vterrors.VT12001(ViewDifferentKeyspace)
}
- if keyspace.Name != keyspaceName {
- return nil, nil, vterrors.New(vtrpcpb.Code_UNIMPLEMENTED, ViewDifferentKeyspace)
+ if vschema.IsViewsEnabled() {
+ if keyspace == nil {
+ return nil, nil, vterrors.VT09005()
+ }
+ return destination, keyspace, nil
+ }
+ isRoutePlan, opCode := tryToGetRoutePlan(selectPlan.primitive)
+ if !isRoutePlan {
+ return nil, nil, vterrors.VT12001(ViewComplex)
}
if opCode != engine.Unsharded && opCode != engine.EqualUnique && opCode != engine.Scatter {
- return nil, nil, vterrors.New(vtrpcpb.Code_UNIMPLEMENTED, ViewComplex)
+ return nil, nil, vterrors.VT12001(ViewComplex)
}
- _ = sqlparser.Rewrite(ddl.Select, func(cursor *sqlparser.Cursor) bool {
+ _ = sqlparser.SafeRewrite(ddl.Select, nil, func(cursor *sqlparser.Cursor) bool {
switch tableName := cursor.Node().(type) {
case sqlparser.TableName:
cursor.Replace(sqlparser.TableName{
@@ -232,7 +233,7 @@ func buildAlterView(vschema plancontext.VSchema, ddl *sqlparser.AlterView, reser
})
}
return true
- }, nil)
+ })
return destination, keyspace, nil
}
@@ -249,17 +250,24 @@ func buildCreateView(vschema plancontext.VSchema, ddl *sqlparser.CreateView, res
if err != nil {
return nil, nil, err
}
- isRoutePlan, keyspaceName, opCode := tryToGetRoutePlan(selectPlan.primitive)
- if !isRoutePlan {
- return nil, nil, vterrors.New(vtrpcpb.Code_UNIMPLEMENTED, ViewComplex)
+ selPlanKs := selectPlan.primitive.GetKeyspaceName()
+ if keyspace.Name != selPlanKs {
+ return nil, nil, vterrors.VT12001(ViewDifferentKeyspace)
+ }
+ if vschema.IsViewsEnabled() {
+ if keyspace == nil {
+ return nil, nil, vterrors.VT09005()
+ }
+ return destination, keyspace, nil
}
- if keyspace.Name != keyspaceName {
- return nil, nil, vterrors.New(vtrpcpb.Code_UNIMPLEMENTED, ViewDifferentKeyspace)
+ isRoutePlan, opCode := tryToGetRoutePlan(selectPlan.primitive)
+ if !isRoutePlan {
+ return nil, nil, vterrors.VT12001(ViewComplex)
}
if opCode != engine.Unsharded && opCode != engine.EqualUnique && opCode != engine.Scatter {
- return nil, nil, vterrors.New(vtrpcpb.Code_UNIMPLEMENTED, ViewComplex)
+ return nil, nil, vterrors.VT12001(ViewComplex)
}
- _ = sqlparser.Rewrite(ddl.Select, func(cursor *sqlparser.Cursor) bool {
+ _ = sqlparser.SafeRewrite(ddl.Select, nil, func(cursor *sqlparser.Cursor) bool {
switch tableName := cursor.Node().(type) {
case sqlparser.TableName:
cursor.Replace(sqlparser.TableName{
@@ -267,11 +275,39 @@ func buildCreateView(vschema plancontext.VSchema, ddl *sqlparser.CreateView, res
})
}
return true
- }, nil)
+ })
return destination, keyspace, nil
}
-func buildDropViewOrTable(vschema plancontext.VSchema, ddlStatement sqlparser.DDLStatement) (key.Destination, *vindexes.Keyspace, error) {
+func buildDropView(vschema plancontext.VSchema, ddlStatement sqlparser.DDLStatement) (key.Destination, *vindexes.Keyspace, error) {
+ if !vschema.IsViewsEnabled() {
+ return buildDropTable(vschema, ddlStatement)
+ }
+ var ks *vindexes.Keyspace
+ viewMap := make(map[string]any)
+ for _, tbl := range ddlStatement.GetFromTables() {
+ _, ksForView, _, err := vschema.TargetDestination(tbl.Qualifier.String())
+ if err != nil {
+ return nil, nil, err
+ }
+ if ksForView == nil {
+ return nil, nil, vterrors.VT09005()
+ }
+ if ks == nil {
+ ks = ksForView
+ } else if ks.Name != ksForView.Name {
+ return nil, nil, vterrors.VT12001("cannot drop views from multiple keyspace in a single statement")
+ }
+ if _, exists := viewMap[tbl.Name.String()]; exists {
+ return nil, nil, vterrors.VT03013(tbl.Name.String())
+ }
+ viewMap[tbl.Name.String()] = nil
+ tbl.Qualifier = sqlparser.NewIdentifierCS("")
+ }
+ return key.DestinationAllShards{}, ks, nil
+}
+
+func buildDropTable(vschema plancontext.VSchema, ddlStatement sqlparser.DDLStatement) (key.Destination, *vindexes.Keyspace, error) {
var destination key.Destination
var keyspace *vindexes.Keyspace
for i, tab := range ddlStatement.GetFromTables() {
@@ -307,7 +343,7 @@ func buildDropViewOrTable(vschema plancontext.VSchema, ddlStatement sqlparser.DD
keyspace = keyspaceTab
}
if destination != destinationTab || keyspace != keyspaceTab {
- return nil, nil, vterrors.New(vtrpcpb.Code_UNIMPLEMENTED, DifferentDestinations)
+ return nil, nil, vterrors.VT12001(DifferentDestinations)
}
}
return destination, keyspace, nil
@@ -351,7 +387,7 @@ func buildRenameTable(vschema plancontext.VSchema, renameTable *sqlparser.Rename
return nil, nil, err
}
if keyspaceTo.Name != keyspaceFrom.Name {
- return nil, nil, vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.ForbidSchemaChange, "Changing schema from '%s' to '%s' is not allowed", keyspaceFrom.Name, keyspaceTo.Name)
+ return nil, nil, vterrors.VT03002(keyspaceFrom.Name, keyspaceTo.Name)
}
tabPair.ToTable = sqlparser.TableName{
Name: tabPair.ToTable.Name,
@@ -363,19 +399,19 @@ func buildRenameTable(vschema plancontext.VSchema, renameTable *sqlparser.Rename
keyspace = keyspaceFrom
}
if destination != destinationFrom || keyspace != keyspaceFrom {
- return nil, nil, vterrors.New(vtrpcpb.Code_UNIMPLEMENTED, DifferentDestinations)
+ return nil, nil, vterrors.VT12001(DifferentDestinations)
}
}
return destination, keyspace, nil
}
-func tryToGetRoutePlan(selectPlan engine.Primitive) (valid bool, keyspaceName string, opCode engine.Opcode) {
+func tryToGetRoutePlan(selectPlan engine.Primitive) (valid bool, opCode engine.Opcode) {
switch plan := selectPlan.(type) {
case *engine.Route:
- return true, plan.Keyspace.Name, plan.Opcode
+ return true, plan.Opcode
case engine.Gen4Comparer:
return tryToGetRoutePlan(plan.GetGen4Primitive())
default:
- return false, "", engine.Opcode(0)
+ return false, engine.Opcode(0)
}
}
diff --git a/go/vt/vtgate/planbuilder/delete.go b/go/vt/vtgate/planbuilder/delete.go
index f93299cf792..876d4aa73e4 100644
--- a/go/vt/vtgate/planbuilder/delete.go
+++ b/go/vt/vtgate/planbuilder/delete.go
@@ -17,7 +17,6 @@ limitations under the License.
package planbuilder
import (
- vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
"vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/vt/vtgate/engine"
@@ -29,7 +28,7 @@ func buildDeletePlan(string) stmtPlanner {
return func(stmt sqlparser.Statement, reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema) (*planResult, error) {
del := stmt.(*sqlparser.Delete)
if del.With != nil {
- return nil, vterrors.New(vtrpcpb.Code_UNIMPLEMENTED, "unsupported: with expression in delete statement")
+ return nil, vterrors.VT12001("WITH expression in DELETE statement")
}
var err error
if len(del.TableExprs) == 1 && len(del.Targets) == 1 {
@@ -48,7 +47,7 @@ func buildDeletePlan(string) stmtPlanner {
}
if len(del.Targets) > 1 {
- return nil, vterrors.New(vtrpcpb.Code_UNIMPLEMENTED, "multi-table delete statement in not supported in sharded database")
+ return nil, vterrors.VT12001("multi-table DELETE statement in a sharded keyspace")
}
edelTable, err := edel.GetSingleTable()
@@ -56,13 +55,13 @@ func buildDeletePlan(string) stmtPlanner {
return nil, err
}
if len(del.Targets) == 1 && del.Targets[0].Name != edelTable.Name {
- return nil, vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.UnknownTable, "Unknown table '%s' in MULTI DELETE", del.Targets[0].Name.String())
+ return nil, vterrors.VT03003(del.Targets[0].Name.String())
}
if len(edelTable.Owned) > 0 {
aTblExpr, ok := del.TableExprs[0].(*sqlparser.AliasedTableExpr)
if !ok {
- return nil, vterrors.New(vtrpcpb.Code_UNIMPLEMENTED, "unsupported: delete on complex table expression")
+ return nil, vterrors.VT12001("deleting from a complex table expression")
}
tblExpr := &sqlparser.AliasedTableExpr{Expr: sqlparser.TableName{Name: edelTable.Name}, As: aTblExpr.As}
edel.OwnedVindexQuery = generateDMLSubquery(tblExpr, del.Where, del.OrderBy, del.Limit, edelTable, ksidVindex.Columns)
@@ -79,33 +78,34 @@ func rewriteSingleTbl(del *sqlparser.Delete) (*sqlparser.Delete, error) {
if !ok {
return del, nil
}
- if !atExpr.As.IsEmpty() && !sqlparser.EqualsIdentifierCS(del.Targets[0].Name, atExpr.As) {
+ if !atExpr.As.IsEmpty() && !sqlparser.Equals.IdentifierCS(del.Targets[0].Name, atExpr.As) {
// Unknown table in MULTI DELETE
- return nil, vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.UnknownTable, "Unknown table '%s' in MULTI DELETE", del.Targets[0].Name.String())
+ return nil, vterrors.VT03003(del.Targets[0].Name.String())
}
tbl, ok := atExpr.Expr.(sqlparser.TableName)
if !ok {
// derived table
- return nil, vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.NonUpdateableTable, "The target table %s of the DELETE is not updatable", atExpr.As.String())
+ return nil, vterrors.VT03004(atExpr.As.String())
}
- if atExpr.As.IsEmpty() && !sqlparser.EqualsIdentifierCS(del.Targets[0].Name, tbl.Name) {
+ if atExpr.As.IsEmpty() && !sqlparser.Equals.IdentifierCS(del.Targets[0].Name, tbl.Name) {
// Unknown table in MULTI DELETE
- return nil, vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.UnknownTable, "Unknown table '%s' in MULTI DELETE", del.Targets[0].Name.String())
+ return nil, vterrors.VT03003(del.Targets[0].Name.String())
}
del.TableExprs = sqlparser.TableExprs{&sqlparser.AliasedTableExpr{Expr: tbl}}
del.Targets = nil
if del.Where != nil {
- _ = sqlparser.Rewrite(del.Where, func(cursor *sqlparser.Cursor) bool {
- switch node := cursor.Node().(type) {
- case *sqlparser.ColName:
- if !node.Qualifier.IsEmpty() {
- node.Qualifier = tbl
- }
+ _ = sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) {
+ col, ok := node.(*sqlparser.ColName)
+ if !ok {
+ return true, nil
+ }
+ if !col.Qualifier.IsEmpty() {
+ col.Qualifier = tbl
}
- return true
- }, nil)
+ return true, nil
+ }, del.Where)
}
return del, nil
}
diff --git a/go/vt/vtgate/planbuilder/distinct.go b/go/vt/vtgate/planbuilder/distinct.go
index 695cc43905c..98e6b550b8b 100644
--- a/go/vt/vtgate/planbuilder/distinct.go
+++ b/go/vt/vtgate/planbuilder/distinct.go
@@ -17,7 +17,6 @@ limitations under the License.
package planbuilder
import (
- vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
"vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/vt/vtgate/engine"
)
@@ -67,7 +66,7 @@ func (d *distinct) Primitive() engine.Primitive {
// Rewrite implements the logicalPlan interface
func (d *distinct) Rewrite(inputs ...logicalPlan) error {
if len(inputs) != 1 {
- return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "distinct: wrong number of inputs")
+ return vterrors.VT13001("distinct: wrong number of inputs")
}
d.input = inputs[0]
return nil
diff --git a/go/vt/vtgate/planbuilder/dml_planner.go b/go/vt/vtgate/planbuilder/dml_planner.go
index 82c2cf56aca..9a4608b295c 100644
--- a/go/vt/vtgate/planbuilder/dml_planner.go
+++ b/go/vt/vtgate/planbuilder/dml_planner.go
@@ -17,8 +17,9 @@ limitations under the License.
package planbuilder
import (
+ "fmt"
+
topodatapb "vitess.io/vitess/go/vt/proto/topodata"
- vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
"vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/vt/vtgate/engine"
@@ -68,7 +69,7 @@ func getDMLRouting(where *sqlparser.Where, table *vindexes.Table) (
) {
// Check that we have a primary vindex which is valid
if len(table.ColumnVindexes) == 0 || !table.ColumnVindexes[0].IsUnique() {
- return engine.Scatter, nil, nil, nil, vterrors.NewErrorf(vtrpcpb.Code_FAILED_PRECONDITION, vterrors.RequiresPrimaryKey, vterrors.PrimaryVindexNotSet, table.Name)
+ return engine.Scatter, nil, nil, nil, vterrors.VT09001(table.Name)
}
// ksidVindex is the primary vindex
ksidVindex := table.ColumnVindexes[0]
@@ -314,7 +315,7 @@ func buildDMLPlan(
if subqueryIsUnsharded {
vschema.WarnUnshardedOnly("subqueries can't be sharded in DML")
} else {
- return nil, nil, nil, vterrors.New(vtrpcpb.Code_UNIMPLEMENTED, "unsupported: sharded subqueries in DML")
+ return nil, nil, nil, vterrors.VT12001("sharded subqueries in DML")
}
edml.Opcode = engine.Unsharded
// Generate query after all the analysis. Otherwise table name substitutions for
@@ -325,7 +326,7 @@ func buildDMLPlan(
}
if hasSubquery(stmt) {
- return nil, nil, nil, vterrors.New(vtrpcpb.Code_UNIMPLEMENTED, "unsupported: subqueries in sharded DML")
+ return nil, nil, nil, vterrors.VT12001("sharded subqueries in DML")
}
// Generate query after all the analysis. Otherwise table name substitutions for
@@ -340,7 +341,7 @@ func buildDMLPlan(
edml.QueryTimeout = queryTimeout(directives)
if len(pb.st.tables) != 1 {
- return nil, nil, nil, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "multi-table %s statement is not supported in sharded database", dmlType)
+ return nil, nil, nil, vterrors.VT12001(fmt.Sprintf("multi-table %s statement in a sharded keyspace", dmlType))
}
edmlTable, err := edml.GetSingleTable()
if err != nil {
@@ -353,7 +354,7 @@ func buildDMLPlan(
if rb.eroute.TargetDestination != nil {
if rb.eroute.TargetTabletType != topodatapb.TabletType_PRIMARY {
- return nil, nil, nil, vterrors.NewErrorf(vtrpcpb.Code_FAILED_PRECONDITION, vterrors.InnodbReadOnly, "unsupported: %s statement with a replica target", dmlType)
+ return nil, nil, nil, vterrors.VT09002(dmlType)
}
edml.Opcode = engine.ByDestination
edml.TargetDestination = rb.eroute.TargetDestination
@@ -363,7 +364,7 @@ func buildDMLPlan(
edml.Opcode = routingType
if routingType == engine.Scatter {
if limit != nil {
- return nil, nil, nil, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "multi shard %s with limit is not supported", dmlType)
+ return nil, nil, nil, vterrors.VT12001(fmt.Sprintf("multi-shard %s with LIMIT", dmlType))
}
} else {
edml.Vindex = vindex
diff --git a/go/vt/vtgate/planbuilder/explain.go b/go/vt/vtgate/planbuilder/explain.go
deleted file mode 100644
index 8f8839eca92..00000000000
--- a/go/vt/vtgate/planbuilder/explain.go
+++ /dev/null
@@ -1,182 +0,0 @@
-/*
-Copyright 2020 The Vitess Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package planbuilder
-
-import (
- "strings"
-
- "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext"
-
- "vitess.io/vitess/go/sqltypes"
- "vitess.io/vitess/go/vt/key"
- querypb "vitess.io/vitess/go/vt/proto/query"
- vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
- "vitess.io/vitess/go/vt/sqlparser"
- "vitess.io/vitess/go/vt/vterrors"
- "vitess.io/vitess/go/vt/vtgate/engine"
-)
-
-// Builds an explain-plan for the given Primitive
-func buildExplainPlan(stmt sqlparser.Explain, reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema, enableOnlineDDL, enableDirectDDL bool) (*planResult, error) {
- switch explain := stmt.(type) {
- case *sqlparser.ExplainTab:
- return explainTabPlan(explain, vschema)
- case *sqlparser.ExplainStmt:
- switch explain.Type {
- case sqlparser.VitessType:
- return buildVitessTypePlan(explain, reservedVars, vschema, enableOnlineDDL, enableDirectDDL)
- case sqlparser.VTExplainType:
- return buildVTExplainTypePlan(explain, reservedVars, vschema, enableOnlineDDL, enableDirectDDL)
- default:
- return buildOtherReadAndAdmin(sqlparser.String(explain), vschema)
- }
- }
- return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] unexpected explain type: %T", stmt)
-}
-
-func explainTabPlan(explain *sqlparser.ExplainTab, vschema plancontext.VSchema) (*planResult, error) {
- _, _, ks, _, destination, err := vschema.FindTableOrVindex(explain.Table)
- if err != nil {
- return nil, err
- }
- explain.Table.Qualifier = sqlparser.NewIdentifierCS("")
-
- if destination == nil {
- destination = key.DestinationAnyShard{}
- }
-
- keyspace, err := vschema.FindKeyspace(ks)
- if err != nil {
- return nil, err
- }
- if keyspace == nil {
- return nil, vterrors.Errorf(vtrpcpb.Code_UNAVAILABLE, "Cannot find keyspace for: %s", ks)
- }
-
- return newPlanResult(&engine.Send{
- Keyspace: keyspace,
- TargetDestination: destination,
- Query: sqlparser.String(explain),
- SingleShardOnly: true,
- }, singleTable(keyspace.Name, explain.Table.Name.String())), nil
-}
-
-func buildVitessTypePlan(explain *sqlparser.ExplainStmt, reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema, enableOnlineDDL, enableDirectDDL bool) (*planResult, error) {
- innerInstruction, err := createInstructionFor(sqlparser.String(explain.Statement), explain.Statement, reservedVars, vschema, enableOnlineDDL, enableDirectDDL)
- if err != nil {
- return nil, err
- }
- descriptions := treeLines(engine.PrimitiveToPlanDescription(innerInstruction.primitive))
-
- var rows [][]sqltypes.Value
- for _, line := range descriptions {
- var targetDest string
- if line.descr.TargetDestination != nil {
- targetDest = line.descr.TargetDestination.String()
- }
- keyspaceName := ""
- if line.descr.Keyspace != nil {
- keyspaceName = line.descr.Keyspace.Name
- }
-
- rows = append(rows, []sqltypes.Value{
- sqltypes.NewVarChar(line.header + line.descr.OperatorType), // operator
- sqltypes.NewVarChar(line.descr.Variant), // variant
- sqltypes.NewVarChar(keyspaceName), // keyspace
- sqltypes.NewVarChar(targetDest), // destination
- sqltypes.NewVarChar(line.descr.TargetTabletType.String()), // tabletType
- sqltypes.NewVarChar(extractQuery(line.descr.Other)), // query
- })
- }
-
- fields := []*querypb.Field{
- {Name: "operator", Type: querypb.Type_VARCHAR},
- {Name: "variant", Type: querypb.Type_VARCHAR},
- {Name: "keyspace", Type: querypb.Type_VARCHAR},
- {Name: "destination", Type: querypb.Type_VARCHAR},
- {Name: "tabletType", Type: querypb.Type_VARCHAR},
- {Name: "query", Type: querypb.Type_VARCHAR},
- }
-
- return newPlanResult(engine.NewRowsPrimitive(rows, fields)), nil
-}
-
-func buildVTExplainTypePlan(explain *sqlparser.ExplainStmt, reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema, enableOnlineDDL, enableDirectDDL bool) (*planResult, error) {
- input, err := createInstructionFor(sqlparser.String(explain.Statement), explain.Statement, reservedVars, vschema, enableOnlineDDL, enableDirectDDL)
- if err != nil {
- return nil, err
- }
- switch input.primitive.(type) {
- case *engine.Insert, *engine.Delete, *engine.Update:
- directives := explain.GetParsedComments().Directives()
- if directives.IsSet(sqlparser.DirectiveVtexplainRunDMLQueries) {
- break
- }
- return nil, vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "explain format = vtexplain will actually run queries. `/*vt+ %s */` must be set to run DML queries in vtexplain. Example: `explain /*vt+ %s */ format = vtexplain delete from t1`", sqlparser.DirectiveVtexplainRunDMLQueries, sqlparser.DirectiveVtexplainRunDMLQueries)
- }
-
- return &planResult{primitive: &engine.VTExplain{Input: input.primitive}, tables: input.tables}, nil
-}
-
-func extractQuery(m map[string]any) string {
- queryObj, ok := m["Query"]
- if !ok {
- return ""
- }
- query, ok := queryObj.(string)
- if !ok {
- return ""
- }
-
- return query
-}
-
-type description struct {
- header string
- descr engine.PrimitiveDescription
-}
-
-func treeLines(root engine.PrimitiveDescription) []description {
- l := len(root.Inputs) - 1
- output := []description{{
- header: "",
- descr: root,
- }}
- for i, child := range root.Inputs {
- childLines := treeLines(child)
- var header string
- var lastHdr string
- if i == l {
- header = "└─" + " "
- lastHdr = strings.Repeat(" ", 3)
- } else {
- header = "├─" + " "
- lastHdr = "│" + strings.Repeat(" ", 2)
- }
-
- for x, childLine := range childLines {
- if x == 0 {
- childLine.header = header + childLine.header
- } else {
- childLine.header = lastHdr + childLine.header
- }
-
- output = append(output, childLine)
- }
- }
- return output
-}
diff --git a/go/vt/vtgate/planbuilder/explain_test.go b/go/vt/vtgate/planbuilder/explain_test.go
deleted file mode 100644
index b607e8975cd..00000000000
--- a/go/vt/vtgate/planbuilder/explain_test.go
+++ /dev/null
@@ -1,151 +0,0 @@
-/*
-Copyright 2020 The Vitess Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package planbuilder
-
-import (
- "strings"
- "testing"
-
- "vitess.io/vitess/go/test/utils"
- "vitess.io/vitess/go/vt/vtgate/engine"
-)
-
-type Descr = engine.PrimitiveDescription
-
-func TestTreeStructure(t *testing.T) {
- var classical, popRock Descr
- {
- n1, n2 := node("Light"), node("Heavy")
- n3, n4 := node("Piano"), node("Orchestra", n1, n2)
- n5, n6 := node("Male"), node("Female")
- n7, n8 := node("Opera", n5, n6), node("Chorus")
- n9, n10 := node("Instrumental", n3, n4), node("Vocal", n7, n8)
- classical = node("Classical", n9, n10)
- }
- {
- n3 := node("Heavy metal")
- n4, n5 := node("Dancing"), node("Soft")
- n6, n7 := node("Rock", n3), node("Country", n4, n5)
- n8, n9 := node("Late pop"), node("Disco")
- n10, n11 := node("Soft techno"), node("Hard techno")
- n12, n13 := node("Pop", n8, n9), node("Techno", n10, n11)
- n14, n15 := node("Organic", n6, n7), node("Electronic", n12, n13)
- popRock = node("Pop/Rock", n14, n15)
- }
- music := node("Music", classical, popRock)
-
- descriptions := treeLines(music)
-
- output := ""
- for _, d := range descriptions {
- output += d.header + d.descr.OperatorType + "\n"
- }
- want :=
- `Music
-├─ Classical
-│ ├─ Instrumental
-│ │ ├─ Piano
-│ │ └─ Orchestra
-│ │ ├─ Light
-│ │ └─ Heavy
-│ └─ Vocal
-│ ├─ Opera
-│ │ ├─ Male
-│ │ └─ Female
-│ └─ Chorus
-└─ Pop/Rock
- ├─ Organic
- │ ├─ Rock
- │ │ └─ Heavy metal
- │ └─ Country
- │ ├─ Dancing
- │ └─ Soft
- └─ Electronic
- ├─ Pop
- │ ├─ Late pop
- │ └─ Disco
- └─ Techno
- ├─ Soft techno
- └─ Hard techno
-`
-
- utils.MustMatch(t, want, output, "")
-}
-
-func node(name string, inputs ...Descr) Descr {
- return Descr{
- OperatorType: name,
- Inputs: inputs,
- }
-}
-
-func TestSingleNode(t *testing.T) {
- single := node("single")
-
- output := toString(treeLines(single))
-
- utils.MustMatch(t, "single", output, "")
-}
-
-func TestTwoNodes(t *testing.T) {
- root := node("parent", node("child1"), node("child2"))
-
- descriptions := treeLines(root)
- output := toString(descriptions)
-
- want :=
- `parent
-├─ child1
-└─ child2`
- utils.MustMatch(t, want, output, "")
-}
-
-func TestThreeNodes(t *testing.T) {
- /*
- Electronic
- ├─ Pop
- │ ├─ Late pop
- │ └─ Disco
- └─ Techno
- ├─ Soft techno
- └─ Hard techno
- */
-
- pop := node("pop", node("late pop"), node("disco"))
- techno := node("techno", node("soft techno"), node("hard techno"))
- electronic := node("electronic", pop, techno)
-
- descriptions := treeLines(electronic)
- output := toString(descriptions)
- want :=
- `electronic
-├─ pop
-│ ├─ late pop
-│ └─ disco
-└─ techno
- ├─ soft techno
- └─ hard techno`
- utils.MustMatch(t, want, output, "")
-}
-
-func toString(descriptions []description) string {
- output := ""
- for _, d := range descriptions {
- output += d.header + d.descr.OperatorType + "\n"
- }
- return strings.Trim(output, " \n\t")
-}
diff --git a/go/vt/vtgate/planbuilder/expr.go b/go/vt/vtgate/planbuilder/expr.go
index 72e45b660b5..dfbe23b1640 100644
--- a/go/vt/vtgate/planbuilder/expr.go
+++ b/go/vt/vtgate/planbuilder/expr.go
@@ -18,8 +18,8 @@ package planbuilder
import (
"bytes"
+ "fmt"
- vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
"vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/vt/sqlparser"
@@ -99,7 +99,7 @@ func (pb *primitiveBuilder) findOrigin(expr sqlparser.Expr, reservedVars *sqlpar
return false, err
}
default:
- return false, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "BUG: unexpected SELECT type: %T", node)
+ return false, vterrors.VT13001(fmt.Sprintf("unexpected SELECT type: %T", node))
}
sqi := subqueryInfo{
ast: node,
@@ -136,7 +136,7 @@ func (pb *primitiveBuilder) findOrigin(expr sqlparser.Expr, reservedVars *sqlpar
continue
}
if sqi.origin != nil {
- return nil, nil, nil, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "unsupported: cross-shard correlated subquery")
+ return nil, nil, nil, vterrors.VT12001("cross-shard correlated subquery")
}
sqName, hasValues := pb.jt.GenerateSubqueryVars()
@@ -195,7 +195,7 @@ func (pb *primitiveBuilder) findOrigin(expr sqlparser.Expr, reservedVars *sqlpar
return pullouts, highestOrigin, expr, nil
}
-var dummyErr = vterrors.Errorf(vtrpcpb.Code_INTERNAL, "dummy")
+var dummyErr = vterrors.VT13001("dummy")
func hasSubquery(node sqlparser.SQLNode) bool {
has := false
diff --git a/go/vt/vtgate/planbuilder/filter.go b/go/vt/vtgate/planbuilder/filter.go
index bb2750de904..efc8d6089e4 100644
--- a/go/vt/vtgate/planbuilder/filter.go
+++ b/go/vt/vtgate/planbuilder/filter.go
@@ -18,7 +18,6 @@ package planbuilder
import (
"vitess.io/vitess/go/mysql/collations"
- vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
"vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/vt/vtgate/engine"
@@ -49,7 +48,7 @@ func (s *simpleConverterLookup) ColumnLookup(col *sqlparser.ColName) (int, error
return 0, err
}
if added && !s.canPushProjection {
- return 0, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "column should not be pushed to projection while doing a column lookup")
+ return 0, vterrors.VT13001("column should not be pushed to projection while doing a column lookup")
}
return offset, nil
}
diff --git a/go/vt/vtgate/planbuilder/filtering.go b/go/vt/vtgate/planbuilder/filtering.go
index 9c50a62cd32..0dd4c889e80 100644
--- a/go/vt/vtgate/planbuilder/filtering.go
+++ b/go/vt/vtgate/planbuilder/filtering.go
@@ -17,13 +17,12 @@ limitations under the License.
package planbuilder
import (
- "errors"
"fmt"
"vitess.io/vitess/go/vt/vtgate/evalengine"
+ "vitess.io/vitess/go/vt/vtgate/planbuilder/operators"
"vitess.io/vitess/go/vt/vtgate/semantics"
- "vitess.io/vitess/go/vt/proto/vtrpc"
"vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/vt/vtgate/engine"
@@ -39,7 +38,7 @@ func planFilter(pb *primitiveBuilder, input logicalPlan, filter sqlparser.Expr,
in = node.Left
} else {
if node.ejoin.Opcode == engine.LeftJoin {
- return nil, errors.New("unsupported: cross-shard left join and where clause")
+ return nil, vterrors.VT12001("cross-shard LEFT JOIN and WHERE clause")
}
isLeft = false
in = node.Right
@@ -76,44 +75,44 @@ func planFilter(pb *primitiveBuilder, input logicalPlan, filter sqlparser.Expr,
case *vindexFunc:
return filterVindexFunc(node, filter)
case *simpleProjection:
- return nil, errors.New("unsupported: filtering on results of cross-shard subquery")
+ return nil, vterrors.VT12001("filtering on results of cross-shard subquery")
case *orderedAggregate:
- return nil, errors.New("unsupported: filtering on results of aggregates")
+ return nil, vterrors.VT12001("filtering on results of aggregates")
}
- return nil, vterrors.Errorf(vtrpc.Code_INTERNAL, "[BUG] unreachable %T.filtering", input)
+ return nil, vterrors.VT13001(fmt.Sprintf("unreachable %T.filtering", input))
}
func filterVindexFunc(node *vindexFunc, filter sqlparser.Expr) (logicalPlan, error) {
if node.eVindexFunc.Opcode != engine.VindexNone {
- return nil, errors.New("unsupported: where clause for vindex function must be of the form id = or id in(,...) (multiple filters)")
+ return nil, vterrors.VT12001(operators.VindexUnsupported + " (multiple filters)")
}
// Check LHS.
comparison, ok := filter.(*sqlparser.ComparisonExpr)
if !ok {
- return nil, errors.New("unsupported: where clause for vindex function must be of the form id = or id in(,...) (not a comparison)")
+ return nil, vterrors.VT12001(operators.VindexUnsupported + " (not a comparison)")
}
if comparison.Operator != sqlparser.EqualOp && comparison.Operator != sqlparser.InOp {
- return nil, errors.New("unsupported: where clause for vindex function must be of the form id = or id in(,...) (not equality)")
+ return nil, vterrors.VT12001(operators.VindexUnsupported + " (not equality)")
}
colname, ok := comparison.Left.(*sqlparser.ColName)
if !ok {
- return nil, errors.New("unsupported: where clause for vindex function must be of the form id = or id in(,...) (lhs is not a column)")
+ return nil, vterrors.VT12001(operators.VindexUnsupported + " (lhs is not a column)")
}
if !colname.Name.EqualString("id") {
- return nil, errors.New("unsupported: where clause for vindex function must be of the form id = or id in(,...) (lhs is not id)")
+ return nil, vterrors.VT12001(operators.VindexUnsupported + " (lhs is not id)")
}
// Check RHS.
// We have to check before calling NewPlanValue because NewPlanValue allows lists also.
if !sqlparser.IsValue(comparison.Right) && !sqlparser.IsSimpleTuple(comparison.Right) {
- return nil, errors.New("unsupported: where clause for vindex function must be of the form id = or id in(,...) (rhs is not a value)")
+ return nil, vterrors.VT12001(operators.VindexUnsupported + " (rhs is not a value)")
}
var err error
node.eVindexFunc.Value, err = evalengine.Translate(comparison.Right, semantics.EmptySemTable())
if err != nil {
- return nil, fmt.Errorf("unsupported: where clause for vindex function must be of the form id = or id in(,...): %v", err)
+ return nil, vterrors.VT12001(fmt.Sprintf("%s: %v", operators.VindexUnsupported, err))
}
node.eVindexFunc.Opcode = engine.VindexMap
diff --git a/go/vt/vtgate/planbuilder/from.go b/go/vt/vtgate/planbuilder/from.go
index 18d653778a7..59df24146fb 100644
--- a/go/vt/vtgate/planbuilder/from.go
+++ b/go/vt/vtgate/planbuilder/from.go
@@ -17,7 +17,6 @@ limitations under the License.
package planbuilder
import (
- "errors"
"fmt"
"sort"
"strings"
@@ -25,7 +24,7 @@ import (
"vitess.io/vitess/go/mysql/collations"
"vitess.io/vitess/go/vt/vtgate/evalengine"
- vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
+ querypb "vitess.io/vitess/go/vt/proto/query"
"vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/vt/sqlparser"
@@ -42,7 +41,7 @@ func (pb *primitiveBuilder) processDMLTable(tableExprs sqlparser.TableExprs, res
}
rb, ok := pb.plan.(*route)
if !ok {
- return nil, errors.New("unsupported: multi-shard or vindex write statement")
+ return nil, vterrors.VT12001("multi-shard or vindex write statement")
}
for _, sub := range rb.substitutions {
*sub.oldExpr = *sub.newExpr
@@ -80,7 +79,7 @@ func (pb *primitiveBuilder) processTableExpr(tableExpr sqlparser.TableExpr, rese
if rb, ok := pb.plan.(*route); ok {
sel, ok := rb.Select.(*sqlparser.Select)
if !ok {
- return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unexpected AST struct for query: %s", sqlparser.String(rb.Select))
+ return vterrors.VT13002(sqlparser.String(rb.Select))
}
sel.From = sqlparser.TableExprs{&sqlparser.ParenTableExpr{Exprs: sel.From}}
@@ -89,9 +88,9 @@ func (pb *primitiveBuilder) processTableExpr(tableExpr sqlparser.TableExpr, rese
case *sqlparser.JoinTableExpr:
return pb.processJoin(tableExpr, reservedVars, where)
case *sqlparser.JSONTableExpr:
- return vterrors.New(vtrpcpb.Code_UNIMPLEMENTED, "unsupported: json_table expressions")
+ return vterrors.VT12001("JSON_TABLE expressions")
}
- return fmt.Errorf("BUG: unexpected table expression type: %T", tableExpr)
+ return vterrors.VT13001(fmt.Sprintf("unexpected table expression type: %T", tableExpr))
}
// processAliasedTable produces a logicalPlan subtree for the given AliasedTableExpr.
@@ -103,14 +102,14 @@ func (pb *primitiveBuilder) processTableExpr(tableExpr sqlparser.TableExpr, rese
// vindex columns.
func (pb *primitiveBuilder) processAliasedTable(tableExpr *sqlparser.AliasedTableExpr, reservedVars *sqlparser.ReservedVars) error {
if tableExpr.Columns != nil {
- return vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "unsupported: column aliases in derived table")
+ return vterrors.VT12001("column aliases in derived table")
}
switch expr := tableExpr.Expr.(type) {
case sqlparser.TableName:
return pb.buildTablePrimitive(tableExpr, expr)
case *sqlparser.DerivedTable:
if expr.Lateral {
- return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "unsupported: lateral derived tables")
+ return vterrors.VT12001("lateral derived tables")
}
spb := newPrimitiveBuilder(pb.vschema, pb.jt)
switch stmt := expr.Select.(type) {
@@ -123,7 +122,7 @@ func (pb *primitiveBuilder) processAliasedTable(tableExpr *sqlparser.AliasedTabl
return err
}
default:
- return fmt.Errorf("BUG: unexpected SELECT type: %T", stmt)
+ return vterrors.VT13001(fmt.Sprintf("unexpected SELECT type: %T", stmt))
}
subroute, ok := spb.plan.(*route)
@@ -175,7 +174,7 @@ func (pb *primitiveBuilder) processAliasedTable(tableExpr *sqlparser.AliasedTabl
// Dups are not allowed in subqueries in this situation.
for _, colVindex := range vschemaTable.ColumnVindexes {
if colVindex.Columns[0].Equal(rc.alias) {
- return fmt.Errorf("duplicate column aliases: %v", rc.alias)
+ return vterrors.VT12001(fmt.Sprintf("duplicate column aliases: %v", rc.alias))
}
}
vschemaTable.ColumnVindexes = append(vschemaTable.ColumnVindexes, &vindexes.ColumnVindex{
@@ -190,7 +189,7 @@ func (pb *primitiveBuilder) processAliasedTable(tableExpr *sqlparser.AliasedTabl
pb.plan, pb.st = rb, st
return nil
}
- return fmt.Errorf("BUG: unexpected table expression type: %T", tableExpr.Expr)
+ return vterrors.VT13001(fmt.Sprintf("unexpected table expression type: %T", tableExpr.Expr))
}
// buildTablePrimitive builds a primitive based on the table name.
@@ -224,12 +223,20 @@ func (pb *primitiveBuilder) buildTablePrimitive(tableExpr *sqlparser.AliasedTabl
if vindex != nil {
single, ok := vindex.(vindexes.SingleColumn)
if !ok {
- return fmt.Errorf("multi-column vindexes not supported")
+ return vterrors.VT12001("multi-column vindexes")
}
pb.plan, pb.st = newVindexFunc(alias, single)
return nil
}
+ sourceTable, err := pb.tryRedirectGen4InsertToSource(vschemaTable)
+ if err != nil {
+ return err
+ }
+ if sourceTable != nil {
+ vschemaTable = sourceTable
+ }
+
rb, st := newRoute(sel)
pb.plan, pb.st = rb, st
if err := st.AddVSchemaTable(alias, vschemaTable, rb); err != nil {
@@ -298,7 +305,7 @@ func (pb *primitiveBuilder) processJoin(ajoin *sqlparser.JoinTableExpr, reserved
case sqlparser.RightJoinType:
convertToLeftJoin(ajoin)
default:
- return fmt.Errorf("unsupported: %s", ajoin.Join.ToString())
+ return vterrors.VT12001(ajoin.Join.ToString())
}
if err := pb.processTableExpr(ajoin.LeftExpr, reservedVars, where); err != nil {
return err
@@ -310,6 +317,26 @@ func (pb *primitiveBuilder) processJoin(ajoin *sqlparser.JoinTableExpr, reserved
return pb.join(rpb, ajoin, reservedVars, where)
}
+// If the primitiveBuilder context is a Gen4 planner, the statement is an
+// INSERT, and the vschema table is a reference with a valid source reference,
+// then redirect the INSERT back to the source.
+func (pb *primitiveBuilder) tryRedirectGen4InsertToSource(vschemaTable *vindexes.Table) (*vindexes.Table, error) {
+ if pb.stmt == nil {
+ return nil, nil
+ }
+ if _, ok := pb.stmt.(*sqlparser.Insert); !ok {
+ return nil, nil
+ }
+ if pb.vschema.Planner() == querypb.ExecuteOptions_V3 {
+ return nil, nil
+ }
+ if vschemaTable.Type != vindexes.TypeReference || vschemaTable.Source == nil {
+ return nil, nil
+ }
+ vschemaTable, _, _, _, _, err := pb.vschema.FindTableOrVindex(vschemaTable.Source.TableName)
+ return vschemaTable, err
+}
+
// convertToLeftJoin converts a right join into a left join.
func convertToLeftJoin(ajoin *sqlparser.JoinTableExpr) {
newRHS := ajoin.LeftExpr
@@ -356,12 +383,12 @@ func (pb *primitiveBuilder) join(rpb *primitiveBuilder, ajoin *sqlparser.JoinTab
// Merge the AST.
sel, ok := lRoute.Select.(*sqlparser.Select)
if !ok {
- return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unexpected AST struct for query: %s", sqlparser.String(lRoute.Select))
+ return vterrors.VT13002(sqlparser.String(lRoute.Select))
}
if ajoin == nil {
rhsSel, ok := rRoute.Select.(*sqlparser.Select)
if !ok {
- return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unexpected AST struct for query: %s", sqlparser.String(rRoute.Select))
+ return vterrors.VT13002(sqlparser.String(rRoute.Select))
}
sel.From = append(sel.From, rhsSel.From...)
} else {
diff --git a/go/vt/vtgate/planbuilder/gen4_compare_v3_planner.go b/go/vt/vtgate/planbuilder/gen4_compare_v3_planner.go
index 53dcb8d3153..28879258dd0 100644
--- a/go/vt/vtgate/planbuilder/gen4_compare_v3_planner.go
+++ b/go/vt/vtgate/planbuilder/gen4_compare_v3_planner.go
@@ -58,7 +58,7 @@ func gen4CompareV3Planner(query string) func(sqlparser.Statement, *sqlparser.Res
v3Primitive, v3Err := planWithPlannerVersion(statement, vars, ctxVSchema, query, V3)
// check potential errors from Gen4 and V3
- err = engine.CompareV3AndGen4Errors(v3Err, gen4Err)
+ err = engine.CompareErrors(v3Err, gen4Err, "v3", "Gen4")
if err != nil {
return nil, err
}
diff --git a/go/vt/vtgate/planbuilder/gen4_planner.go b/go/vt/vtgate/planbuilder/gen4_planner.go
index 924169fc966..6822dcff642 100644
--- a/go/vt/vtgate/planbuilder/gen4_planner.go
+++ b/go/vt/vtgate/planbuilder/gen4_planner.go
@@ -17,22 +17,18 @@ limitations under the License.
package planbuilder
import (
- "errors"
+ "fmt"
querypb "vitess.io/vitess/go/vt/proto/query"
- vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
"vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/vt/vtgate/engine"
- "vitess.io/vitess/go/vt/vtgate/planbuilder/abstract"
- "vitess.io/vitess/go/vt/vtgate/planbuilder/physical"
+ "vitess.io/vitess/go/vt/vtgate/planbuilder/operators"
"vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext"
"vitess.io/vitess/go/vt/vtgate/semantics"
"vitess.io/vitess/go/vt/vtgate/vindexes"
)
-var _ stmtPlanner = gen4Planner("apa", 0)
-
func gen4Planner(query string, plannerVersion querypb.ExecuteOptions_PlannerVersion) stmtPlanner {
return func(stmt sqlparser.Statement, reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema) (*planResult, error) {
switch stmt := stmt.(type) {
@@ -43,7 +39,7 @@ func gen4Planner(query string, plannerVersion querypb.ExecuteOptions_PlannerVers
case *sqlparser.Delete:
return gen4DeleteStmtPlanner(plannerVersion, stmt, reservedVars, vschema)
default:
- return nil, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "%T not yet supported", stmt)
+ return nil, vterrors.VT12001(fmt.Sprintf("%T", stmt))
}
}
}
@@ -58,11 +54,11 @@ func gen4SelectStmtPlanner(
switch node := stmt.(type) {
case *sqlparser.Select:
if node.With != nil {
- return nil, vterrors.New(vtrpcpb.Code_UNIMPLEMENTED, "unsupported: with expression in select statement")
+ return nil, vterrors.VT12001("WITH expression in SELECT statement")
}
case *sqlparser.Union:
if node.With != nil {
- return nil, vterrors.New(vtrpcpb.Code_UNIMPLEMENTED, "unsupported: with expression in union statement")
+ return nil, vterrors.VT12001("WITH expression in UNION statement")
}
}
@@ -91,26 +87,26 @@ func gen4SelectStmtPlanner(
sel.SQLCalcFoundRows = false
}
- getPlan := func(selStatement sqlparser.SelectStatement) (logicalPlan, *semantics.SemTable, error) {
+ getPlan := func(selStatement sqlparser.SelectStatement) (logicalPlan, *semantics.SemTable, []string, error) {
return newBuildSelectPlan(selStatement, reservedVars, vschema, plannerVersion)
}
- plan, st, err := getPlan(stmt)
+ plan, _, tablesUsed, err := getPlan(stmt)
if err != nil {
return nil, err
}
- if shouldRetryWithCNFRewriting(plan) {
+ if shouldRetryAfterPredicateRewriting(plan) {
// by transforming the predicates to CNF, the planner will sometimes find better plans
- primitive, st := gen4CNFRewrite(stmt, getPlan)
- if primitive != nil {
- return newPlanResult(primitive, tablesFromSemantics(st)...), nil
+ plan2, _, tablesUsed := gen4PredicateRewrite(stmt, getPlan)
+ if plan2 != nil {
+ return newPlanResult(plan2.Primitive(), tablesUsed...), nil
}
}
primitive := plan.Primitive()
if !isSel {
- return newPlanResult(primitive, tablesFromSemantics(st)...), nil
+ return newPlanResult(primitive, tablesUsed...), nil
}
// this is done because engine.Route doesn't handle the empty result well
@@ -125,7 +121,7 @@ func gen4SelectStmtPlanner(
prim.SendTo.NoRoutesSpecialHandling = true
}
}
- return newPlanResult(primitive, tablesFromSemantics(st)...), nil
+ return newPlanResult(primitive, tablesUsed...), nil
}
func gen4planSQLCalcFoundRows(vschema plancontext.VSchema, sel *sqlparser.Select, query string, reservedVars *sqlparser.ReservedVars) (*planResult, error) {
@@ -140,33 +136,33 @@ func gen4planSQLCalcFoundRows(vschema plancontext.VSchema, sel *sqlparser.Select
// record any warning as planner warning.
vschema.PlannerWarning(semTable.Warning)
- plan, err := buildSQLCalcFoundRowsPlan(query, sel, reservedVars, vschema, planSelectGen4)
+ plan, tablesUsed, err := buildSQLCalcFoundRowsPlan(query, sel, reservedVars, vschema, planSelectGen4)
if err != nil {
return nil, err
}
- return newPlanResult(plan.Primitive(), tablesFromSemantics(semTable)...), nil
+ return newPlanResult(plan.Primitive(), tablesUsed...), nil
}
-func planSelectGen4(reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema, sel *sqlparser.Select) (*jointab, logicalPlan, error) {
- plan, _, err := newBuildSelectPlan(sel, reservedVars, vschema, 0)
+func planSelectGen4(reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema, sel *sqlparser.Select) (*jointab, logicalPlan, []string, error) {
+ plan, _, tablesUsed, err := newBuildSelectPlan(sel, reservedVars, vschema, 0)
if err != nil {
- return nil, nil, err
+ return nil, nil, nil, err
}
- return nil, plan, nil
+ return nil, plan, tablesUsed, nil
}
-func gen4CNFRewrite(stmt sqlparser.Statement, getPlan func(selStatement sqlparser.SelectStatement) (logicalPlan, *semantics.SemTable, error)) (engine.Primitive, *semantics.SemTable) {
- rewritten, isSel := sqlparser.RewriteToCNF(stmt).(sqlparser.SelectStatement)
+func gen4PredicateRewrite(stmt sqlparser.Statement, getPlan func(selStatement sqlparser.SelectStatement) (logicalPlan, *semantics.SemTable, []string, error)) (logicalPlan, *semantics.SemTable, []string) {
+ rewritten, isSel := sqlparser.RewritePredicate(stmt).(sqlparser.SelectStatement)
if !isSel {
// Fail-safe code, should never happen
- return nil, nil
+ return nil, nil, nil
}
- plan2, st, err := getPlan(rewritten)
- if err == nil && !shouldRetryWithCNFRewriting(plan2) {
+ plan2, st, op, err := getPlan(rewritten)
+ if err == nil && !shouldRetryAfterPredicateRewriting(plan2) {
// we only use this new plan if it's better than the old one we got
- return plan2.Primitive(), st
+ return plan2, st, op
}
- return nil, nil
+ return nil, nil, nil
}
func newBuildSelectPlan(
@@ -174,14 +170,14 @@ func newBuildSelectPlan(
reservedVars *sqlparser.ReservedVars,
vschema plancontext.VSchema,
version querypb.ExecuteOptions_PlannerVersion,
-) (logicalPlan, *semantics.SemTable, error) {
+) (plan logicalPlan, semTable *semantics.SemTable, tablesUsed []string, err error) {
ksName := ""
if ks, _ := vschema.DefaultKeyspace(); ks != nil {
ksName = ks.Name
}
- semTable, err := semantics.Analyze(selStmt, ksName, vschema)
+ semTable, err = semantics.Analyze(selStmt, ksName, vschema)
if err != nil {
- return nil, nil, err
+ return nil, nil, nil, err
}
// record any warning as planner warning.
vschema.PlannerWarning(semTable.Warning)
@@ -189,85 +185,78 @@ func newBuildSelectPlan(
ctx := plancontext.NewPlanningContext(reservedVars, semTable, vschema, version)
if ks, _ := semTable.SingleUnshardedKeyspace(); ks != nil {
- plan, err := unshardedShortcut(ctx, selStmt, ks)
- return plan, semTable, err
+ plan, tablesUsed, err = unshardedShortcut(ctx, selStmt, ks)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+ plan, err = pushCommentDirectivesOnPlan(plan, selStmt)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+ return plan, semTable, tablesUsed, err
}
// From this point on, we know it is not an unsharded query and return the NotUnshardedErr if there is any
if semTable.NotUnshardedErr != nil {
- return nil, nil, semTable.NotUnshardedErr
+ return nil, nil, nil, semTable.NotUnshardedErr
}
err = queryRewrite(semTable, reservedVars, selStmt)
if err != nil {
- return nil, nil, err
- }
-
- logical, err := abstract.CreateLogicalOperatorFromAST(selStmt, semTable)
- if err != nil {
- return nil, nil, err
- }
- err = logical.CheckValid()
- if err != nil {
- return nil, nil, err
+ return nil, nil, nil, err
}
- physOp, err := physical.CreatePhysicalOperator(ctx, logical)
+ op, err := operators.PlanQuery(ctx, selStmt)
if err != nil {
- return nil, nil, err
+ return nil, nil, nil, err
}
- plan, err := transformToLogicalPlan(ctx, physOp, true)
+ plan, err = transformToLogicalPlan(ctx, op, true)
if err != nil {
- return nil, nil, err
+ return nil, nil, nil, err
}
- plan = optimizePlan(plan)
-
- plan, err = planHorizon(ctx, plan, selStmt, true)
- if err != nil {
- return nil, nil, err
- }
+ optimizePlan(plan)
sel, isSel := selStmt.(*sqlparser.Select)
if isSel {
- if err := setMiscFunc(plan, sel); err != nil {
- return nil, nil, err
+ if err = setMiscFunc(plan, sel); err != nil {
+ return nil, nil, nil, err
}
}
- if err := plan.WireupGen4(ctx); err != nil {
- return nil, nil, err
+ if err = plan.WireupGen4(ctx); err != nil {
+ return nil, nil, nil, err
}
plan, err = pushCommentDirectivesOnPlan(plan, selStmt)
if err != nil {
- return nil, nil, err
+ return nil, nil, nil, err
}
- return plan, semTable, nil
+ return plan, semTable, operators.TablesUsed(op), nil
}
// optimizePlan removes unnecessary simpleProjections that have been created while planning
-func optimizePlan(plan logicalPlan) logicalPlan {
- newPlan, _ := visit(plan, func(plan logicalPlan) (bool, logicalPlan, error) {
- this, ok := plan.(*simpleProjection)
- if !ok {
- return true, plan, nil
- }
+func optimizePlan(plan logicalPlan) {
+ for _, lp := range plan.Inputs() {
+ optimizePlan(lp)
+ }
- input, ok := this.input.(*simpleProjection)
- if !ok {
- return true, plan, nil
- }
+ this, ok := plan.(*simpleProjection)
+ if !ok {
+ return
+ }
- for i, col := range this.eSimpleProj.Cols {
- this.eSimpleProj.Cols[i] = input.eSimpleProj.Cols[col]
- }
- this.input = input.input
- return true, this, nil
- })
- return newPlan
+ input, ok := this.input.(*simpleProjection)
+ if !ok {
+ return
+ }
+
+ for i, col := range this.eSimpleProj.Cols {
+ this.eSimpleProj.Cols[i] = input.eSimpleProj.Cols[col]
+ }
+ this.input = input.input
}
func gen4UpdateStmtPlanner(
@@ -277,7 +266,7 @@ func gen4UpdateStmtPlanner(
vschema plancontext.VSchema,
) (*planResult, error) {
if updStmt.With != nil {
- return nil, vterrors.New(vtrpcpb.Code_UNIMPLEMENTED, "unsupported: with expression in update statement")
+ return nil, vterrors.VT12001("WITH expression in UPDATE statement")
}
ksName := ""
@@ -303,7 +292,7 @@ func gen4UpdateStmtPlanner(
edml.Opcode = engine.Unsharded
edml.Query = generateQuery(updStmt)
upd := &engine.Update{DML: edml}
- return newPlanResult(upd, tablesFromSemantics(semTable)...), nil
+ return newPlanResult(upd, operators.QualifiedTables(ks, tables)...), nil
}
if semTable.NotUnshardedErr != nil {
@@ -315,23 +304,14 @@ func gen4UpdateStmtPlanner(
return nil, err
}
- logical, err := abstract.CreateLogicalOperatorFromAST(updStmt, semTable)
- if err != nil {
- return nil, err
- }
- err = logical.CheckValid()
- if err != nil {
- return nil, err
- }
-
ctx := plancontext.NewPlanningContext(reservedVars, semTable, vschema, version)
- physOp, err := physical.CreatePhysicalOperator(ctx, logical)
+ op, err := operators.PlanQuery(ctx, updStmt)
if err != nil {
return nil, err
}
- plan, err := transformToLogicalPlan(ctx, physOp, true)
+ plan, err := transformToLogicalPlan(ctx, op, true)
if err != nil {
return nil, err
}
@@ -347,7 +327,7 @@ func gen4UpdateStmtPlanner(
return nil, err
}
- return newPlanResult(plan.Primitive(), tablesFromSemantics(semTable)...), nil
+ return newPlanResult(plan.Primitive(), operators.TablesUsed(op)...), nil
}
func gen4DeleteStmtPlanner(
@@ -357,7 +337,7 @@ func gen4DeleteStmtPlanner(
vschema plancontext.VSchema,
) (*planResult, error) {
if deleteStmt.With != nil {
- return nil, vterrors.New(vtrpcpb.Code_UNIMPLEMENTED, "unsupported: with expression in delete statement")
+ return nil, vterrors.VT12001("WITH expression in DELETE statement")
}
var err error
@@ -391,7 +371,7 @@ func gen4DeleteStmtPlanner(
edml.Opcode = engine.Unsharded
edml.Query = generateQuery(deleteStmt)
del := &engine.Delete{DML: edml}
- return newPlanResult(del, tablesFromSemantics(semTable)...), nil
+ return newPlanResult(del, operators.QualifiedTables(ks, tables)...), nil
}
if err := checkIfDeleteSupported(deleteStmt, semTable); err != nil {
@@ -403,23 +383,13 @@ func gen4DeleteStmtPlanner(
return nil, err
}
- logical, err := abstract.CreateLogicalOperatorFromAST(deleteStmt, semTable)
- if err != nil {
- return nil, err
- }
- err = logical.CheckValid()
- if err != nil {
- return nil, err
- }
-
ctx := plancontext.NewPlanningContext(reservedVars, semTable, vschema, version)
-
- physOp, err := physical.CreatePhysicalOperator(ctx, logical)
+ op, err := operators.PlanQuery(ctx, deleteStmt)
if err != nil {
return nil, err
}
- plan, err := transformToLogicalPlan(ctx, physOp, true)
+ plan, err := transformToLogicalPlan(ctx, op, true)
if err != nil {
return nil, err
}
@@ -435,24 +405,24 @@ func gen4DeleteStmtPlanner(
return nil, err
}
- return newPlanResult(plan.Primitive(), tablesFromSemantics(semTable)...), nil
+ return newPlanResult(plan.Primitive(), operators.TablesUsed(op)...), nil
}
-func rewriteRoutedTables(stmt sqlparser.Statement, vschema plancontext.VSchema) (err error) {
+func rewriteRoutedTables(stmt sqlparser.Statement, vschema plancontext.VSchema) error {
// Rewrite routed tables
- _ = sqlparser.Rewrite(stmt, func(cursor *sqlparser.Cursor) bool {
- aliasTbl, isAlias := cursor.Node().(*sqlparser.AliasedTableExpr)
+ return sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) {
+ aliasTbl, isAlias := node.(*sqlparser.AliasedTableExpr)
if !isAlias {
- return err == nil
+ return true, nil
}
tableName, ok := aliasTbl.Expr.(sqlparser.TableName)
if !ok {
- return err == nil
+ return true, nil
}
var vschemaTable *vindexes.Table
vschemaTable, _, _, _, _, err = vschema.FindTableOrVindex(tableName)
if err != nil {
- return false
+ return false, err
}
if vschemaTable.Name.String() != tableName.Name.String() {
@@ -465,9 +435,8 @@ func rewriteRoutedTables(stmt sqlparser.Statement, vschema plancontext.VSchema)
aliasTbl.Expr = tableName
}
- return err == nil
- }, nil)
- return
+ return true, nil
+ }, stmt)
}
func setLockOnAllSelect(plan logicalPlan) {
@@ -546,7 +515,7 @@ func planHorizon(ctx *plancontext.PlanningContext, plan logicalPlan, in sqlparse
}
func planOrderByOnUnion(ctx *plancontext.PlanningContext, plan logicalPlan, union *sqlparser.Union) (logicalPlan, error) {
- qp, err := abstract.CreateQPFromUnion(union)
+ qp, err := operators.CreateQPFromUnion(union)
if err != nil {
return nil, err
}
@@ -592,40 +561,26 @@ func checkIfDeleteSupported(del *sqlparser.Delete, semTable *semantics.SemTable)
}
// Delete is only supported for a single TableExpr which is supposed to be an aliased expression
- multiShardErr := errors.New("unsupported: multi-shard or vindex write statement")
+ multiShardErr := vterrors.VT12001("multi-shard or vindex write statement")
if len(del.TableExprs) != 1 {
return multiShardErr
}
- aliasedTableExpr, isAliasedExpr := del.TableExprs[0].(*sqlparser.AliasedTableExpr)
+ _, isAliasedExpr := del.TableExprs[0].(*sqlparser.AliasedTableExpr)
if !isAliasedExpr {
return multiShardErr
}
if len(del.Targets) > 1 {
- return vterrors.New(vtrpcpb.Code_UNIMPLEMENTED, "multi-table delete statement in not supported in sharded database")
+ return vterrors.VT12001("multi-table DELETE statement in a sharded keyspace")
}
- // Get the table information and the vindex table from it
- ti, err := semTable.TableInfoFor(semTable.TableSetFor(aliasedTableExpr))
- if err != nil {
- return err
- }
- isSharded := false
- vt := ti.GetVindexTable()
- if vt != nil && vt.Keyspace != nil {
- isSharded = vt.Keyspace.Sharded
- }
-
- err = sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) {
+ err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) {
switch node.(type) {
case *sqlparser.Subquery, *sqlparser.DerivedTable:
// We have a subquery, so we must fail the planning.
// If this subquery and the table expression were all belonging to the same unsharded keyspace,
// we would have already created a plan for them before doing these checks.
- if isSharded {
- return false, vterrors.New(vtrpcpb.Code_UNIMPLEMENTED, "unsupported: subqueries in sharded DML")
- }
- return false, vterrors.New(vtrpcpb.Code_UNIMPLEMENTED, "unsupported: sharded subqueries in DML")
+ return false, vterrors.VT12001("subqueries in DML")
}
return true, nil
}, del)
diff --git a/go/vt/vtgate/planbuilder/grouping.go b/go/vt/vtgate/planbuilder/grouping.go
index d96534f5ce2..0bd10666029 100644
--- a/go/vt/vtgate/planbuilder/grouping.go
+++ b/go/vt/vtgate/planbuilder/grouping.go
@@ -17,7 +17,8 @@ limitations under the License.
package planbuilder
import (
- vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
+ "fmt"
+
"vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/vt/vtgate/engine"
@@ -57,7 +58,7 @@ func planGroupBy(pb *primitiveBuilder, input logicalPlan, groupBy sqlparser.Grou
case *sqlparser.ColName:
c := e.Metadata.(*column)
if c.Origin() == node {
- return nil, vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.WrongGroupField, "Can't group on '%s'", sqlparser.String(e))
+ return nil, vterrors.VT03005(sqlparser.String(e))
}
for i, rc := range node.resultColumns {
if rc.column == c {
@@ -66,7 +67,7 @@ func planGroupBy(pb *primitiveBuilder, input logicalPlan, groupBy sqlparser.Grou
}
}
if colNumber == -1 {
- return nil, vterrors.New(vtrpcpb.Code_UNIMPLEMENTED, "unsupported: in scatter query: group by column must reference column in SELECT list")
+ return nil, vterrors.VT12001("in scatter query: GROUP BY column must reference column in SELECT list")
}
case *sqlparser.Literal:
num, err := ResultFromNumber(node.resultColumns, e, "group statement")
@@ -75,7 +76,7 @@ func planGroupBy(pb *primitiveBuilder, input logicalPlan, groupBy sqlparser.Grou
}
colNumber = num
default:
- return nil, vterrors.New(vtrpcpb.Code_UNIMPLEMENTED, "unsupported: in scatter query: only simple references allowed")
+ return nil, vterrors.VT12001("in scatter query: only simple references are allowed")
}
node.groupByKeys = append(node.groupByKeys, &engine.GroupByParams{KeyCol: colNumber, WeightStringCol: -1, FromGroupBy: true})
}
@@ -92,7 +93,7 @@ func planGroupBy(pb *primitiveBuilder, input logicalPlan, groupBy sqlparser.Grou
return node, nil
}
- return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] unreachable %T.groupBy: ", input)
+ return nil, vterrors.VT13001(fmt.Sprintf("unreachable %T.groupBy: ", input))
}
// planDistinct makes the output distinct
@@ -123,5 +124,5 @@ func planDistinct(input logicalPlan) (logicalPlan, error) {
return input, nil
}
- return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] unreachable %T.distinct", input)
+ return nil, vterrors.VT13001(fmt.Sprintf("unreachable %T.distinct", input))
}
diff --git a/go/vt/vtgate/planbuilder/hash_join.go b/go/vt/vtgate/planbuilder/hash_join.go
index 26322d5e488..cef2f30bead 100644
--- a/go/vt/vtgate/planbuilder/hash_join.go
+++ b/go/vt/vtgate/planbuilder/hash_join.go
@@ -17,9 +17,10 @@ limitations under the License.
package planbuilder
import (
+ "fmt"
+
"vitess.io/vitess/go/mysql/collations"
querypb "vitess.io/vitess/go/vt/proto/query"
- vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
"vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/vt/vtgate/engine"
@@ -80,7 +81,7 @@ func (hj *hashJoin) Inputs() []logicalPlan {
// Rewrite implements the logicalPlan interface
func (hj *hashJoin) Rewrite(inputs ...logicalPlan) error {
if len(inputs) != 2 {
- return vterrors.New(vtrpcpb.Code_INTERNAL, "wrong number of children")
+ return vterrors.VT13001(fmt.Sprintf("wrong number of children in hashJoin rewrite: %d; should be exactly 2", len(inputs)))
}
hj.Left = inputs[0]
hj.Right = inputs[1]
diff --git a/go/vt/vtgate/planbuilder/horizon_planning.go b/go/vt/vtgate/planbuilder/horizon_planning.go
index d2c17436a00..4e33f62ebe5 100644
--- a/go/vt/vtgate/planbuilder/horizon_planning.go
+++ b/go/vt/vtgate/planbuilder/horizon_planning.go
@@ -17,13 +17,14 @@ limitations under the License.
package planbuilder
import (
+ "fmt"
+
"vitess.io/vitess/go/sqltypes"
- "vitess.io/vitess/go/vt/vtgate/planbuilder/abstract"
+ "vitess.io/vitess/go/vt/vtgate/planbuilder/operators"
"vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext"
"vitess.io/vitess/go/vt/vtgate/semantics"
- vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
"vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/vt/sqlparser"
@@ -32,7 +33,7 @@ import (
type horizonPlanning struct {
sel *sqlparser.Select
- qp *abstract.QueryProjection
+ qp *operators.QueryProjection
}
func (hp *horizonPlanning) planHorizon(ctx *plancontext.PlanningContext, plan logicalPlan, truncateColumns bool) (logicalPlan, error) {
@@ -59,7 +60,8 @@ func (hp *horizonPlanning) planHorizon(ctx *plancontext.PlanningContext, plan lo
// a simpleProjection. We create a new Route that contains the derived table in the
// FROM clause. Meaning that, when we push expressions to the select list of this
// new Route, we do not want them to rewrite them.
- if _, isSimpleProj := plan.(*simpleProjection); isSimpleProj {
+ sp, derivedTable := plan.(*simpleProjection)
+ if derivedTable {
oldRewriteDerivedExpr := ctx.RewriteDerivedExpr
defer func() {
ctx.RewriteDerivedExpr = oldRewriteDerivedExpr
@@ -68,16 +70,17 @@ func (hp *horizonPlanning) planHorizon(ctx *plancontext.PlanningContext, plan lo
}
var err error
- hp.qp, err = abstract.CreateQPFromSelect(hp.sel)
+ hp.qp, err = operators.CreateQPFromSelect(ctx, hp.sel)
if err != nil {
return nil, err
}
needsOrdering := len(hp.qp.OrderExprs) > 0
- canShortcut := isRoute && hp.sel.Having == nil && !needsOrdering
// If we still have a HAVING clause, it's because it could not be pushed to the WHERE,
// so it probably has aggregations
+ canShortcut := isRoute && hp.sel.Having == nil && !needsOrdering
+
switch {
case hp.qp.NeedsAggregation() || hp.sel.Having != nil:
plan, err = hp.planAggregations(ctx, plan)
@@ -91,6 +94,26 @@ func (hp *horizonPlanning) planHorizon(ctx *plancontext.PlanningContext, plan lo
if err != nil {
return nil, err
}
+ case derivedTable:
+ pusher := func(ae *sqlparser.AliasedExpr) (int, error) {
+ offset, _, err := pushProjection(ctx, ae, sp.input, true, true, false)
+ return offset, err
+ }
+ needsVtGate, projections, colNames, err := hp.qp.NeedsProjecting(ctx, pusher)
+ if err != nil {
+ return nil, err
+ }
+ if !needsVtGate {
+ break
+ }
+
+ // there were some expressions we could not push down entirely,
+ // so replace the simpleProjection with a real projection
+ plan = &projection{
+ source: sp.input,
+ columns: projections,
+ columnNames: colNames,
+ }
default:
err = pushProjections(ctx, plan, hp.qp.SelectExprs)
if err != nil {
@@ -123,7 +146,7 @@ func (hp *horizonPlanning) planHorizon(ctx *plancontext.PlanningContext, plan lo
return plan, nil
}
-func pushProjections(ctx *plancontext.PlanningContext, plan logicalPlan, selectExprs []abstract.SelectExpr) error {
+func pushProjections(ctx *plancontext.PlanningContext, plan logicalPlan, selectExprs []operators.SelectExpr) error {
for _, e := range selectExprs {
aliasExpr, err := e.GetAliasedExpr()
if err != nil {
@@ -171,7 +194,6 @@ func (hp *horizonPlanning) truncateColumnsIfNeeded(ctx *plancontext.PlanningCont
}
func checkIfAlreadyExists(expr *sqlparser.AliasedExpr, node sqlparser.SelectStatement, semTable *semantics.SemTable) int {
- exprDep := semTable.RecursiveDeps(expr.Expr)
// Here to find if the expr already exists in the SelectStatement, we have 3 cases
// input is a Select -> In this case we want to search in the select
// input is a Union -> In this case we want to search in the First Select of the Union
@@ -200,21 +222,7 @@ func checkIfAlreadyExists(expr *sqlparser.AliasedExpr, node sqlparser.SelectStat
continue
}
- selectExprCol, isSelectExprCol := selectExpr.Expr.(*sqlparser.ColName)
- selectExprDep := semTable.RecursiveDeps(selectExpr.Expr)
-
- // Check that the two expressions have the same dependencies
- if !selectExprDep.Equals(exprDep) {
- continue
- }
-
- if isSelectExprCol && isExprCol && exprCol.Name.Equal(selectExprCol.Name) {
- // the expressions are ColName, we compare their name
- return i
- }
-
- if sqlparser.EqualsExpr(selectExpr.Expr, expr.Expr) {
- // the expressions are not ColName, so we just compare the expressions
+ if semTable.EqualsExpr(expr.Expr, selectExpr.Expr) {
return i
}
}
@@ -251,15 +259,15 @@ func (hp *horizonPlanning) planAggregations(ctx *plancontext.PlanningContext, pl
func (hp *horizonPlanning) planAggrUsingOA(
ctx *plancontext.PlanningContext,
plan logicalPlan,
- grouping []abstract.GroupBy,
+ grouping []operators.GroupBy,
) (logicalPlan, error) {
oa := &orderedAggregate{
groupByKeys: make([]*engine.GroupByParams, 0, len(grouping)),
}
- var order []abstract.OrderBy
+ var order []operators.OrderBy
if hp.qp.CanPushDownSorting {
- hp.qp.AlignGroupByAndOrderBy()
+ hp.qp.AlignGroupByAndOrderBy(ctx)
// the grouping order might have changed, so we reload the grouping expressions
grouping = hp.qp.GetGrouping()
order = hp.qp.OrderExprs
@@ -280,14 +288,14 @@ func (hp *horizonPlanning) planAggrUsingOA(
}
if hp.sel.Having != nil {
- rewriter := hp.qp.AggrRewriter()
- sqlparser.Rewrite(hp.sel.Having.Expr, rewriter.Rewrite(), nil)
+ rewriter := hp.qp.AggrRewriter(ctx)
+ sqlparser.SafeRewrite(hp.sel.Having.Expr, rewriter.RewriteDown(), rewriter.RewriteUp())
if rewriter.Err != nil {
return nil, rewriter.Err
}
}
- aggregationExprs, err := hp.qp.AggregationExpressions()
+ aggregationExprs, err := hp.qp.AggregationExpressions(ctx)
if err != nil {
return nil, err
}
@@ -359,7 +367,7 @@ func (hp *horizonPlanning) planAggrUsingOA(
return hp.planHaving(ctx, oa)
}
-func passGroupingColumns(proj *projection, groupings []offsets, grouping []abstract.GroupBy) (projGrpOffsets []offsets, err error) {
+func passGroupingColumns(proj *projection, groupings []offsets, grouping []operators.GroupBy) (projGrpOffsets []offsets, err error) {
for idx, grp := range groupings {
origGrp := grouping[idx]
var offs offsets
@@ -379,7 +387,7 @@ func passGroupingColumns(proj *projection, groupings []offsets, grouping []abstr
return projGrpOffsets, nil
}
-func generateAggregateParams(aggrs []abstract.Aggr, aggrParamOffsets [][]offsets, proj *projection, pushed bool) ([]*engine.AggregateParams, error) {
+func generateAggregateParams(aggrs []operators.Aggr, aggrParamOffsets [][]offsets, proj *projection, pushed bool) ([]*engine.AggregateParams, error) {
aggrParams := make([]*engine.AggregateParams, len(aggrs))
for idx, paramOffset := range aggrParamOffsets {
aggr := aggrs[idx]
@@ -395,7 +403,13 @@ func generateAggregateParams(aggrs []abstract.Aggr, aggrParamOffsets [][]offsets
aggrExpr = &sqlparser.BinaryExpr{
Operator: sqlparser.MultOp,
Left: aggrExpr,
- Right: curr,
+ Right: &sqlparser.FuncExpr{
+ Name: sqlparser.NewIdentifierCI("coalesce"),
+ Exprs: sqlparser.SelectExprs{
+ &sqlparser.AliasedExpr{Expr: curr},
+ &sqlparser.AliasedExpr{Expr: sqlparser.NewIntLiteral("1")},
+ },
+ },
}
}
}
@@ -435,7 +449,7 @@ func addColumnsToOA(
ctx *plancontext.PlanningContext,
oa *orderedAggregate,
// these are the group by expressions that where added because we have unique aggregations
- distinctGroupBy []abstract.GroupBy,
+ distinctGroupBy []operators.GroupBy,
// these are the aggregate params we already have for non-distinct aggregations
aggrParams []*engine.AggregateParams,
// distinctOffsets mark out where we need to use the distinctGroupBy offsets
@@ -444,7 +458,7 @@ func addColumnsToOA(
// these are the offsets for the group by params
groupings []offsets,
// aggregationExprs are all the original aggregation expressions the query requested
- aggregationExprs []abstract.Aggr,
+ aggregationExprs []operators.Aggr,
) {
if len(distinctGroupBy) == 0 {
// no distinct aggregations
@@ -494,8 +508,8 @@ func addColumnsToOA(
// handleDistinctAggr takes in a slice of aggregations and returns GroupBy elements that replace
// the distinct aggregations in the input, along with a slice of offsets and the non-distinct aggregations left,
// so we can later reify the original aggregations
-func (hp *horizonPlanning) handleDistinctAggr(ctx *plancontext.PlanningContext, exprs []abstract.Aggr) (
- distincts []abstract.GroupBy, offsets []int, aggrs []abstract.Aggr, err error) {
+func (hp *horizonPlanning) handleDistinctAggr(ctx *plancontext.PlanningContext, exprs []operators.Aggr) (
+ distincts []operators.GroupBy, offsets []int, aggrs []operators.Aggr, err error) {
var distinctExpr sqlparser.Expr
for i, expr := range exprs {
if !expr.Distinct {
@@ -514,12 +528,12 @@ func (hp *horizonPlanning) handleDistinctAggr(ctx *plancontext.PlanningContext,
if distinctExpr == nil {
distinctExpr = innerWS
} else {
- if !sqlparser.EqualsExpr(distinctExpr, innerWS) {
- err = vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "unsupported: only one distinct aggregation allowed in a select: %s", sqlparser.String(expr.Original))
+ if !ctx.SemTable.EqualsExpr(distinctExpr, innerWS) {
+ err = vterrors.VT12001(fmt.Sprintf("only one DISTINCT aggregation is allowed in a SELECT: %s", sqlparser.String(expr.Original)))
return nil, nil, nil, err
}
}
- distincts = append(distincts, abstract.GroupBy{
+ distincts = append(distincts, operators.GroupBy{
Inner: inner,
WeightStrExpr: innerWS,
InnerIndex: expr.Index,
@@ -559,15 +573,15 @@ func newOffset(col int) offsets {
return offsets{col: col, wsCol: -1}
}
-func (hp *horizonPlanning) createGroupingsForColumns(columns []*sqlparser.ColName) ([]abstract.GroupBy, error) {
- var lhsGrouping []abstract.GroupBy
+func (hp *horizonPlanning) createGroupingsForColumns(columns []*sqlparser.ColName) ([]operators.GroupBy, error) {
+ var lhsGrouping []operators.GroupBy
for _, lhsColumn := range columns {
expr, wsExpr, err := hp.qp.GetSimplifiedExpr(lhsColumn)
if err != nil {
return nil, err
}
- lhsGrouping = append(lhsGrouping, abstract.GroupBy{
+ lhsGrouping = append(lhsGrouping, operators.GroupBy{
Inner: expr,
WeightStrExpr: wsExpr,
})
@@ -575,7 +589,7 @@ func (hp *horizonPlanning) createGroupingsForColumns(columns []*sqlparser.ColNam
return lhsGrouping, nil
}
-func hasUniqueVindex(semTable *semantics.SemTable, groupByExprs []abstract.GroupBy) bool {
+func hasUniqueVindex(semTable *semantics.SemTable, groupByExprs []operators.GroupBy) bool {
for _, groupByExpr := range groupByExprs {
if exprHasUniqueVindex(semTable, groupByExpr.WeightStrExpr) {
return true
@@ -584,7 +598,7 @@ func hasUniqueVindex(semTable *semantics.SemTable, groupByExprs []abstract.Group
return false
}
-func (hp *horizonPlanning) planOrderBy(ctx *plancontext.PlanningContext, orderExprs []abstract.OrderBy, plan logicalPlan) (logicalPlan, error) {
+func (hp *horizonPlanning) planOrderBy(ctx *plancontext.PlanningContext, orderExprs []operators.OrderBy, plan logicalPlan) (logicalPlan, error) {
switch plan := plan.(type) {
case *routeGen4:
newPlan, err := planOrderByForRoute(ctx, orderExprs, plan, hp.qp.HasStar)
@@ -608,7 +622,7 @@ func (hp *horizonPlanning) planOrderBy(ctx *plancontext.PlanningContext, orderEx
return newPlan, nil
case *orderedAggregate:
// remove ORDER BY NULL from the list of order by expressions since we will be doing the ordering on vtgate level so NULL is not useful
- var orderExprsWithoutNils []abstract.OrderBy
+ var orderExprsWithoutNils []operators.OrderBy
for _, expr := range orderExprs {
if sqlparser.IsNull(expr.Inner.Expr) {
continue
@@ -655,10 +669,10 @@ func (hp *horizonPlanning) planOrderBy(ctx *plancontext.PlanningContext, orderEx
}
return plan, nil
}
- return nil, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "ordering on complex query %T", plan)
+ return nil, vterrors.VT13001(fmt.Sprintf("ORDER BY in complex query %T", plan))
}
-func isSpecialOrderBy(o abstract.OrderBy) bool {
+func isSpecialOrderBy(o operators.OrderBy) bool {
if sqlparser.IsNull(o.Inner.Expr) {
return true
}
@@ -666,9 +680,9 @@ func isSpecialOrderBy(o abstract.OrderBy) bool {
return isFunction && f.Name.Lowered() == "rand"
}
-func planOrderByForRoute(ctx *plancontext.PlanningContext, orderExprs []abstract.OrderBy, plan *routeGen4, hasStar bool) (logicalPlan, error) {
+func planOrderByForRoute(ctx *plancontext.PlanningContext, orderExprs []operators.OrderBy, plan *routeGen4, hasStar bool) (logicalPlan, error) {
for _, order := range orderExprs {
- err := checkOrderExprCanBePlannedInScatter(plan, order, hasStar)
+ err := checkOrderExprCanBePlannedInScatter(ctx, plan, order, hasStar)
if err != nil {
return nil, err
}
@@ -697,7 +711,7 @@ func planOrderByForRoute(ctx *plancontext.PlanningContext, orderExprs []abstract
// checkOrderExprCanBePlannedInScatter verifies that the given order by expression can be planned.
// It checks if the expression exists in the plan's select list when the query is a scatter.
-func checkOrderExprCanBePlannedInScatter(plan *routeGen4, order abstract.OrderBy, hasStar bool) error {
+func checkOrderExprCanBePlannedInScatter(ctx *plancontext.PlanningContext, plan *routeGen4, order operators.OrderBy, hasStar bool) error {
if !hasStar {
return nil
}
@@ -705,13 +719,13 @@ func checkOrderExprCanBePlannedInScatter(plan *routeGen4, order abstract.OrderBy
found := false
for _, expr := range sel.SelectExprs {
aliasedExpr, isAliasedExpr := expr.(*sqlparser.AliasedExpr)
- if isAliasedExpr && sqlparser.EqualsExpr(aliasedExpr.Expr, order.Inner.Expr) {
+ if isAliasedExpr && ctx.SemTable.EqualsExpr(aliasedExpr.Expr, order.Inner.Expr) {
found = true
break
}
}
if !found {
- return vterrors.New(vtrpcpb.Code_UNIMPLEMENTED, "unsupported: in scatter query: order by must reference a column in the select list: "+sqlparser.String(order.Inner))
+ return vterrors.VT12001(fmt.Sprintf("in scatter query: ORDER BY must reference a column in the SELECT list: %s", sqlparser.String(order.Inner)))
}
return nil
}
@@ -734,7 +748,7 @@ func wrapAndPushExpr(ctx *plancontext.PlanningContext, expr sqlparser.Expr, weig
expr = unary.Expr
}
if !sqlparser.IsColName(expr) {
- return 0, 0, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "unsupported: in scatter query: complex order by expression: %s", sqlparser.String(expr))
+ return 0, 0, vterrors.VT13001(fmt.Sprintf("in scatter query: complex ORDER BY expression: %s", sqlparser.String(expr)))
}
}
qt := ctx.SemTable.TypeFor(expr)
@@ -758,7 +772,7 @@ func weightStringFor(expr sqlparser.Expr) sqlparser.Expr {
return &sqlparser.WeightStringFuncExpr{Expr: expr}
}
-func (hp *horizonPlanning) planOrderByForHashJoin(ctx *plancontext.PlanningContext, orderExprs []abstract.OrderBy, plan *hashJoin) (logicalPlan, error) {
+func (hp *horizonPlanning) planOrderByForHashJoin(ctx *plancontext.PlanningContext, orderExprs []operators.OrderBy, plan *hashJoin) (logicalPlan, error) {
if len(orderExprs) == 1 && isSpecialOrderBy(orderExprs[0]) {
rhs, err := hp.planOrderBy(ctx, orderExprs, plan.Right)
if err != nil {
@@ -782,7 +796,7 @@ func (hp *horizonPlanning) planOrderByForHashJoin(ctx *plancontext.PlanningConte
return sortPlan, nil
}
-func (hp *horizonPlanning) planOrderByForJoin(ctx *plancontext.PlanningContext, orderExprs []abstract.OrderBy, plan *joinGen4) (logicalPlan, error) {
+func (hp *horizonPlanning) planOrderByForJoin(ctx *plancontext.PlanningContext, orderExprs []operators.OrderBy, plan *joinGen4) (logicalPlan, error) {
if len(orderExprs) == 1 && isSpecialOrderBy(orderExprs[0]) {
lhs, err := hp.planOrderBy(ctx, orderExprs, plan.Left)
if err != nil {
@@ -813,7 +827,7 @@ func (hp *horizonPlanning) planOrderByForJoin(ctx *plancontext.PlanningContext,
return sortPlan, nil
}
-func createMemorySortPlanOnAggregation(ctx *plancontext.PlanningContext, plan *orderedAggregate, orderExprs []abstract.OrderBy) (logicalPlan, error) {
+func createMemorySortPlanOnAggregation(ctx *plancontext.PlanningContext, plan *orderedAggregate, orderExprs []operators.OrderBy) (logicalPlan, error) {
primitive := &engine.MemorySort{}
ms := &memorySort{
resultsBuilder: resultsBuilder{
@@ -825,9 +839,9 @@ func createMemorySortPlanOnAggregation(ctx *plancontext.PlanningContext, plan *o
}
for _, order := range orderExprs {
- offset, woffset, found := findExprInOrderedAggr(plan, order)
+ offset, woffset, found := findExprInOrderedAggr(ctx, plan, order)
if !found {
- return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "expected to find the order by expression (%s) in orderedAggregate", sqlparser.String(order.Inner))
+ return nil, vterrors.VT13001(fmt.Sprintf("expected to find ORDER BY expression (%s) in orderedAggregate", sqlparser.String(order.Inner)))
}
collationID := ctx.SemTable.CollationForExpr(order.WeightStrExpr)
@@ -842,23 +856,23 @@ func createMemorySortPlanOnAggregation(ctx *plancontext.PlanningContext, plan *o
return ms, nil
}
-func findExprInOrderedAggr(plan *orderedAggregate, order abstract.OrderBy) (keyCol int, weightStringCol int, found bool) {
+func findExprInOrderedAggr(ctx *plancontext.PlanningContext, plan *orderedAggregate, order operators.OrderBy) (keyCol int, weightStringCol int, found bool) {
for _, key := range plan.groupByKeys {
- if sqlparser.EqualsExpr(order.WeightStrExpr, key.Expr) ||
- sqlparser.EqualsExpr(order.Inner.Expr, key.Expr) {
+ if ctx.SemTable.EqualsExpr(order.WeightStrExpr, key.Expr) ||
+ ctx.SemTable.EqualsExpr(order.Inner.Expr, key.Expr) {
return key.KeyCol, key.WeightStringCol, true
}
}
for _, aggregate := range plan.aggregates {
- if sqlparser.EqualsExpr(order.WeightStrExpr, aggregate.Original.Expr) ||
- sqlparser.EqualsExpr(order.Inner.Expr, aggregate.Original.Expr) {
+ if ctx.SemTable.EqualsExpr(order.WeightStrExpr, aggregate.Original.Expr) ||
+ ctx.SemTable.EqualsExpr(order.Inner.Expr, aggregate.Original.Expr) {
return aggregate.Col, -1, true
}
}
return 0, 0, false
}
-func (hp *horizonPlanning) createMemorySortPlan(ctx *plancontext.PlanningContext, plan logicalPlan, orderExprs []abstract.OrderBy, useWeightStr bool) (logicalPlan, error) {
+func (hp *horizonPlanning) createMemorySortPlan(ctx *plancontext.PlanningContext, plan logicalPlan, orderExprs []operators.OrderBy, useWeightStr bool) (logicalPlan, error) {
primitive := &engine.MemorySort{}
ms := &memorySort{
resultsBuilder: resultsBuilder{
@@ -889,7 +903,7 @@ func (hp *horizonPlanning) createMemorySortPlan(ctx *plancontext.PlanningContext
return ms, nil
}
-func orderExprsDependsOnTableSet(orderExprs []abstract.OrderBy, semTable *semantics.SemTable, ts semantics.TableSet) bool {
+func orderExprsDependsOnTableSet(orderExprs []operators.OrderBy, semTable *semantics.SemTable, ts semantics.TableSet) bool {
for _, expr := range orderExprs {
exprDependencies := semTable.RecursiveDeps(expr.Inner.Expr)
if !exprDependencies.IsSolvedBy(ts) {
@@ -918,7 +932,7 @@ func (hp *horizonPlanning) planDistinct(ctx *plancontext.PlanningContext, plan l
case *orderedAggregate:
return hp.planDistinctOA(ctx.SemTable, p)
default:
- return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unknown plan type for DISTINCT %T", plan)
+ return nil, vterrors.VT13001(fmt.Sprintf("unknown plan type for DISTINCT %T", plan))
}
}
@@ -936,7 +950,7 @@ func (hp *horizonPlanning) planDistinctOA(semTable *semantics.SemTable, currPlan
}
found := false
for _, grpParam := range currPlan.groupByKeys {
- if sqlparser.EqualsExpr(expr, grpParam.Expr) {
+ if semTable.EqualsExpr(expr, grpParam.Expr) {
found = true
oa.groupByKeys = append(oa.groupByKeys, grpParam)
break
@@ -946,21 +960,21 @@ func (hp *horizonPlanning) planDistinctOA(semTable *semantics.SemTable, currPlan
continue
}
for _, aggrParam := range currPlan.aggregates {
- if sqlparser.EqualsExpr(expr, aggrParam.Expr) {
+ if semTable.EqualsExpr(expr, aggrParam.Expr) {
found = true
oa.groupByKeys = append(oa.groupByKeys, &engine.GroupByParams{KeyCol: aggrParam.Col, WeightStringCol: -1, CollationID: semTable.CollationForExpr(expr)})
break
}
}
if !found {
- return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] unable to plan distinct query as the column is not projected: %s", sqlparser.String(sExpr.Col))
+ return nil, vterrors.VT13001(fmt.Sprintf("unable to plan DISTINCT query as the column is not projected: %s", sqlparser.String(sExpr.Col)))
}
}
return oa, nil
}
func (hp *horizonPlanning) addDistinct(ctx *plancontext.PlanningContext, plan logicalPlan) (logicalPlan, error) {
- var orderExprs []abstract.OrderBy
+ var orderExprs []operators.OrderBy
var groupByKeys []*engine.GroupByParams
for index, sExpr := range hp.qp.SelectExprs {
aliasExpr, err := sExpr.GetAliasedExpr()
@@ -968,7 +982,7 @@ func (hp *horizonPlanning) addDistinct(ctx *plancontext.PlanningContext, plan lo
return nil, err
}
if isAmbiguousOrderBy(index, aliasExpr.As, hp.qp.SelectExprs) {
- return nil, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "generating order by clause: ambiguous symbol reference: %s", sqlparser.String(aliasExpr.As))
+ return nil, vterrors.VT13001(fmt.Sprintf("generating ORDER BY clause: ambiguous symbol reference: %s", sqlparser.String(aliasExpr.As)))
}
var inner sqlparser.Expr
if aliasExpr.As.IsEmpty() {
@@ -988,7 +1002,7 @@ func (hp *horizonPlanning) addDistinct(ctx *plancontext.PlanningContext, plan lo
grpParam.WeightStringCol = wOffset
groupByKeys = append(groupByKeys, grpParam)
- orderExprs = append(orderExprs, abstract.OrderBy{
+ orderExprs = append(orderExprs, operators.OrderBy{
Inner: &sqlparser.Order{Expr: inner},
WeightStrExpr: aliasExpr.Expr},
)
@@ -1007,7 +1021,7 @@ func (hp *horizonPlanning) addDistinct(ctx *plancontext.PlanningContext, plan lo
return oa, nil
}
-func isAmbiguousOrderBy(index int, col sqlparser.IdentifierCI, exprs []abstract.SelectExpr) bool {
+func isAmbiguousOrderBy(index int, col sqlparser.IdentifierCI, exprs []operators.SelectExpr) bool {
if col.String() == "" {
return false
}
@@ -1033,7 +1047,7 @@ func isAmbiguousOrderBy(index int, col sqlparser.IdentifierCI, exprs []abstract.
return false
}
-func selectHasUniqueVindex(semTable *semantics.SemTable, sel []abstract.SelectExpr) bool {
+func selectHasUniqueVindex(semTable *semantics.SemTable, sel []operators.SelectExpr) bool {
for _, expr := range sel {
exp, err := expr.GetExpr()
if err != nil {
@@ -1063,11 +1077,11 @@ func pushHaving(ctx *plancontext.PlanningContext, expr sqlparser.Expr, plan logi
case *pulloutSubquery:
return pushHaving(ctx, expr, node.underlying)
case *simpleProjection:
- return nil, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "unsupported: filtering on results of cross-shard derived table")
+ return nil, vterrors.VT13001("filtering on results of cross-shard derived table")
case *orderedAggregate:
return newFilter(ctx, plan, expr)
}
- return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] unreachable %T.filtering", plan)
+ return nil, vterrors.VT13001(fmt.Sprintf("unreachable %T.filtering", plan))
}
func isJoin(plan logicalPlan) bool {
@@ -1110,13 +1124,13 @@ func planSingleShardRoutePlan(sel sqlparser.SelectStatement, rb *routeGen4) erro
if err != nil {
return err
}
- sqlparser.Rewrite(rb.Select, func(cursor *sqlparser.Cursor) bool {
- if aliasedExpr, ok := cursor.Node().(sqlparser.SelectExpr); ok {
+ return sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) {
+ if aliasedExpr, ok := node.(sqlparser.SelectExpr); ok {
removeKeyspaceFromSelectExpr(aliasedExpr)
}
- return true
- }, nil)
- return nil
+ return true, nil
+ }, rb.Select)
+
}
func removeKeyspaceFromSelectExpr(expr sqlparser.SelectExpr) {
@@ -1135,7 +1149,7 @@ func stripDownQuery(from, to sqlparser.SelectStatement) error {
case *sqlparser.Select:
toNode, ok := to.(*sqlparser.Select)
if !ok {
- return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "AST did not match")
+ return vterrors.VT13001("AST did not match")
}
toNode.Distinct = node.Distinct
toNode.GroupBy = node.GroupBy
@@ -1149,7 +1163,7 @@ func stripDownQuery(from, to sqlparser.SelectStatement) error {
case *sqlparser.Union:
toNode, ok := to.(*sqlparser.Union)
if !ok {
- return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "AST did not match")
+ return vterrors.VT13001("AST did not match")
}
err = stripDownQuery(node.Left, toNode.Left)
if err != nil {
@@ -1161,12 +1175,12 @@ func stripDownQuery(from, to sqlparser.SelectStatement) error {
}
toNode.OrderBy = node.OrderBy
default:
- return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "BUG: this should not happen - we have covered all implementations of SelectStatement %T", from)
+ return vterrors.VT13001(fmt.Sprintf("this should not happen - we have covered all implementations of SelectStatement %T", from))
}
return nil
}
-func planGroupByGen4(ctx *plancontext.PlanningContext, groupExpr abstract.GroupBy, plan logicalPlan, wsAdded bool) error {
+func planGroupByGen4(ctx *plancontext.PlanningContext, groupExpr operators.GroupBy, plan logicalPlan, wsAdded bool) error {
switch node := plan.(type) {
case *routeGen4:
sel := node.Select.(*sqlparser.Select)
@@ -1181,13 +1195,13 @@ func planGroupByGen4(ctx *plancontext.PlanningContext, groupExpr abstract.GroupB
case *pulloutSubquery:
return planGroupByGen4(ctx, groupExpr, node.underlying, wsAdded)
case *semiJoin:
- return vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "unsupported: group by in a query having a correlated subquery")
+ return vterrors.VT13001("GROUP BY in a query having a correlated subquery")
default:
- return vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "unsupported: group by on: %T", plan)
+ return vterrors.VT13001(fmt.Sprintf("GROUP BY on: %T", plan))
}
}
-func getLengthOfProjection(groupingOffsets []offsets, aggregations []abstract.Aggr) int {
+func getLengthOfProjection(groupingOffsets []offsets, aggregations []operators.Aggr) int {
length := 0
for _, groupBy := range groupingOffsets {
if groupBy.wsCol != -1 {
diff --git a/go/vt/vtgate/planbuilder/insert.go b/go/vt/vtgate/planbuilder/insert.go
index ec9dc670651..d74d8fcebcc 100644
--- a/go/vt/vtgate/planbuilder/insert.go
+++ b/go/vt/vtgate/planbuilder/insert.go
@@ -17,7 +17,6 @@ limitations under the License.
package planbuilder
import (
- "errors"
"fmt"
"strconv"
"strings"
@@ -27,7 +26,6 @@ import (
"vitess.io/vitess/go/vt/vtgate/evalengine"
"vitess.io/vitess/go/vt/vtgate/semantics"
- vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
"vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/vt/vtgate/engine"
@@ -36,8 +34,8 @@ import (
// buildInsertPlan builds the route for an INSERT statement.
func buildInsertPlan(stmt sqlparser.Statement, reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema) (*planResult, error) {
+ pb := newStmtAwarePrimitiveBuilder(vschema, newJointab(reservedVars), stmt)
ins := stmt.(*sqlparser.Insert)
- pb := newPrimitiveBuilder(vschema, newJointab(reservedVars))
exprs := sqlparser.TableExprs{&sqlparser.AliasedTableExpr{Expr: ins.Table}}
rb, err := pb.processDMLTable(exprs, reservedVars, nil)
if err != nil {
@@ -46,12 +44,12 @@ func buildInsertPlan(stmt sqlparser.Statement, reservedVars *sqlparser.ReservedV
// The table might have been routed to a different one.
ins.Table = exprs[0].(*sqlparser.AliasedTableExpr).Expr.(sqlparser.TableName)
if rb.eroute.TargetDestination != nil {
- return nil, errors.New("unsupported: INSERT with a target destination")
+ return nil, vterrors.VT12001("INSERT with a target destination")
}
if len(pb.st.tables) != 1 {
// Unreachable.
- return nil, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "multi-table insert statement in not supported in sharded keyspace")
+ return nil, vterrors.VT12001("multi-table INSERT statement in a sharded keyspace")
}
var vschemaTable *vindexes.Table
for _, tval := range pb.st.tables {
@@ -62,7 +60,7 @@ func buildInsertPlan(stmt sqlparser.Statement, reservedVars *sqlparser.ReservedV
return buildInsertUnshardedPlan(ins, vschemaTable, reservedVars, vschema)
}
if ins.Action == sqlparser.ReplaceAct {
- return nil, errors.New("unsupported: REPLACE INTO with sharded schema")
+ return nil, vterrors.VT12001("REPLACE INTO with sharded keyspace")
}
return buildInsertShardedPlan(ins, vschemaTable, reservedVars, vschema)
}
@@ -73,13 +71,15 @@ func buildInsertUnshardedPlan(ins *sqlparser.Insert, table *vindexes.Table, rese
table,
table.Keyspace,
)
+ applyCommentDirectives(ins, eins)
+
var rows sqlparser.Values
tc := &tableCollector{}
tc.addVindexTable(table)
switch insertValues := ins.Rows.(type) {
case *sqlparser.Select, *sqlparser.Union:
if eins.Table.AutoIncrement != nil {
- return nil, errors.New("unsupported: auto-inc and select in insert")
+ return nil, vterrors.VT12001("auto-increment and SELECT in INSERT")
}
plan, err := subquerySelectPlan(ins, vschema, reservedVars, false)
if err != nil {
@@ -96,7 +96,7 @@ func buildInsertUnshardedPlan(ins *sqlparser.Insert, table *vindexes.Table, rese
case sqlparser.Values:
rows = insertValues
default:
- return nil, fmt.Errorf("BUG: unexpected construct in insert: %T", insertValues)
+ return nil, vterrors.VT13001(fmt.Sprintf("unexpected construct in INSERT: %T", insertValues))
}
if eins.Table.AutoIncrement == nil {
eins.Query = generateQuery(ins)
@@ -108,12 +108,12 @@ func buildInsertUnshardedPlan(ins *sqlparser.Insert, table *vindexes.Table, rese
if table.ColumnListAuthoritative {
populateInsertColumnlist(ins, table)
} else {
- return nil, errors.New("column list required for tables with auto-inc columns")
+ return nil, vterrors.VT13001("column list required for tables with auto-inc columns")
}
}
for _, row := range rows {
if len(ins.Columns) != len(row) {
- return nil, errors.New("column list doesn't match values")
+ return nil, vterrors.VT13001("column list does not match values")
}
}
if err := modifyForAutoinc(ins, eins); err != nil {
@@ -135,7 +135,7 @@ func buildInsertShardedPlan(ins *sqlparser.Insert, table *vindexes.Table, reserv
eins.Ignore = bool(ins.Ignore)
if ins.OnDup != nil {
if isVindexChanging(sqlparser.UpdateExprs(ins.OnDup), eins.Table.ColumnVindexes) {
- return nil, errors.New("unsupported: DML cannot change vindex column")
+ return nil, vterrors.VT12001("DML cannot update vindex column")
}
eins.Ignore = true
}
@@ -156,7 +156,7 @@ func buildInsertShardedPlan(ins *sqlparser.Insert, table *vindexes.Table, reserv
for _, value := range rows {
if len(ins.Columns) != len(value) {
- return nil, errors.New("column list doesn't match values")
+ return nil, vterrors.VT13001("column list does not match values")
}
}
@@ -205,7 +205,7 @@ func buildInsertSelectPlan(ins *sqlparser.Insert, table *vindexes.Table, reserve
// check if column list is provided if not, then vschema should be able to provide the column list.
if len(ins.Columns) == 0 {
if !table.ColumnListAuthoritative {
- return nil, vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "insert should contain column list or the table should have authoritative columns in vschema")
+ return nil, vterrors.VT09004()
}
populateInsertColumnlist(ins, table)
}
@@ -272,7 +272,7 @@ func getStatementAndPlanner(
configuredPlanner, err = getConfiguredPlanner(vschema, buildUnionPlan, stmt, "")
selectStmt = stmt
default:
- err = vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "unsupported: insert plan with %T", ins.Rows)
+ err = vterrors.VT12001(fmt.Sprintf("INSERT plan with %T", ins.Rows))
}
if err != nil {
@@ -284,7 +284,7 @@ func getStatementAndPlanner(
func checkColumnCounts(ins *sqlparser.Insert, selectStmt sqlparser.SelectStatement) error {
if len(ins.Columns) < selectStmt.GetColumnCount() {
- return vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.WrongValueCountOnRow, "Column count doesn't match value count at row 1")
+ return vterrors.VT03006()
}
if len(ins.Columns) > selectStmt.GetColumnCount() {
sel := sqlparser.GetFirstSelect(selectStmt)
@@ -295,7 +295,7 @@ func checkColumnCounts(ins *sqlparser.Insert, selectStmt sqlparser.SelectStateme
}
}
if !hasStarExpr {
- return vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.WrongValueCountOnRow, "Column count doesn't match value count at row 1")
+ return vterrors.VT03006()
}
}
return nil
@@ -326,7 +326,7 @@ func extractColVindexOffsets(ins *sqlparser.Insert, colVindexes []*vindexes.Colu
colNum := findColumn(ins, col)
// sharding column values should be provided in the insert.
if colNum == -1 && idx == 0 {
- return nil, vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "insert query does not have sharding column '%v' in the column list", col)
+ return nil, vterrors.VT09003(col)
}
vv[idx] = append(vv[idx], colNum)
}
@@ -415,7 +415,7 @@ func modifyForAutoinc(ins *sqlparser.Insert, eins *engine.Insert) error {
eins.Generate.Values = evalengine.NewTupleExpr(autoIncValues...)
return nil
}
- return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "BUG: unexpected construct in insert: %T", ins.Rows)
+ return vterrors.VT13001(fmt.Sprintf("unexpected construct in INSERT: %T", ins.Rows))
}
// findOrAddColumn finds the position of a column in the insert. If it's
diff --git a/go/vt/vtgate/planbuilder/join.go b/go/vt/vtgate/planbuilder/join.go
index 84738152bf3..0fc9b5f2ce3 100644
--- a/go/vt/vtgate/planbuilder/join.go
+++ b/go/vt/vtgate/planbuilder/join.go
@@ -17,7 +17,8 @@ limitations under the License.
package planbuilder
import (
- vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
+ "fmt"
+
"vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/vt/sqlparser"
@@ -97,7 +98,7 @@ func newJoin(lpb, rpb *primitiveBuilder, ajoin *sqlparser.JoinTableExpr, reserve
return err
}
case ajoin.Condition.Using != nil:
- return vterrors.New(vtrpcpb.Code_UNIMPLEMENTED, "unsupported: join with USING(column_list) clause for complex queries")
+ return vterrors.VT12001("JOIN with USING(column_list) clause for complex queries")
}
}
lpb.plan = &join{
@@ -227,7 +228,7 @@ func (jb *join) SupplyWeightString(colNumber int, alsoAddToGroupBy bool) (weight
// Rewrite implements the logicalPlan interface
func (jb *join) Rewrite(inputs ...logicalPlan) error {
if len(inputs) != 2 {
- return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "join: wrong number of inputs")
+ return vterrors.VT13001(fmt.Sprintf("join: wrong number of inputs, got: %d, expect: 2", len(inputs)))
}
jb.Left = inputs[0]
jb.Right = inputs[1]
diff --git a/go/vt/vtgate/planbuilder/joinGen4.go b/go/vt/vtgate/planbuilder/joinGen4.go
index a038f401090..04a408b1fb4 100644
--- a/go/vt/vtgate/planbuilder/joinGen4.go
+++ b/go/vt/vtgate/planbuilder/joinGen4.go
@@ -17,7 +17,8 @@ limitations under the License.
package planbuilder
import (
- vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
+ "fmt"
+
"vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/vt/vtgate/engine"
@@ -79,7 +80,7 @@ func (j *joinGen4) Inputs() []logicalPlan {
// Rewrite implements the logicalPlan interface
func (j *joinGen4) Rewrite(inputs ...logicalPlan) error {
if len(inputs) != 2 {
- return vterrors.New(vtrpcpb.Code_INTERNAL, "wrong number of children")
+ return vterrors.VT13001(fmt.Sprintf("wrong number of children in joinGen4 rewrite, got: %d, expect: 2", len(inputs)))
}
j.Left = inputs[0]
j.Right = inputs[1]
diff --git a/go/vt/vtgate/planbuilder/logical_plan.go b/go/vt/vtgate/planbuilder/logical_plan.go
index b74ff5b35d7..363c012daf8 100644
--- a/go/vt/vtgate/planbuilder/logical_plan.go
+++ b/go/vt/vtgate/planbuilder/logical_plan.go
@@ -17,7 +17,8 @@ limitations under the License.
package planbuilder
import (
- vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
+ "fmt"
+
"vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/vt/vtgate/engine"
@@ -238,7 +239,7 @@ func (bc *logicalPlanCommon) SupplyWeightString(colNumber int, alsoAddToGroupBy
// Rewrite implements the logicalPlan interface
func (bc *logicalPlanCommon) Rewrite(inputs ...logicalPlan) error {
if len(inputs) != 1 {
- return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "builderCommon: wrong number of inputs")
+ return vterrors.VT13001(fmt.Sprintf("builderCommon: wrong number of inputs, got: %d, expect: 1", len(inputs)))
}
bc.input = inputs[0]
return nil
diff --git a/go/vt/vtgate/planbuilder/memory_sort.go b/go/vt/vtgate/planbuilder/memory_sort.go
index 9d1b04bba14..20dd125ecd0 100644
--- a/go/vt/vtgate/planbuilder/memory_sort.go
+++ b/go/vt/vtgate/planbuilder/memory_sort.go
@@ -17,12 +17,13 @@ limitations under the License.
package planbuilder
import (
- "errors"
"fmt"
"vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext"
"vitess.io/vitess/go/mysql/collations"
+ "vitess.io/vitess/go/vt/vterrors"
+
"vitess.io/vitess/go/sqltypes"
"vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/vtgate/engine"
@@ -70,22 +71,22 @@ func newMemorySort(plan logicalPlan, orderBy v3OrderBy) (*memorySort, error) {
case *sqlparser.CastExpr:
colName, ok := expr.Expr.(*sqlparser.ColName)
if !ok {
- return nil, fmt.Errorf("unsupported: memory sort: complex order by expression: %s", sqlparser.String(expr))
+ return nil, vterrors.VT12001(fmt.Sprintf("memory sort: complex ORDER BY expression: %s", sqlparser.String(expr)))
}
colNumber = findColNumber(ms, colName)
case *sqlparser.ConvertExpr:
colName, ok := expr.Expr.(*sqlparser.ColName)
if !ok {
- return nil, fmt.Errorf("unsupported: memory sort: complex order by expression: %s", sqlparser.String(expr))
+ return nil, vterrors.VT12001(fmt.Sprintf("memory sort: complex ORDER BY expression: %s", sqlparser.String(expr)))
}
colNumber = findColNumber(ms, colName)
default:
- return nil, fmt.Errorf("unsupported: memory sort: complex order by expression: %s", sqlparser.String(expr))
+ return nil, vterrors.VT12001(fmt.Sprintf("memory sort: complex ORDER BY expression: %s", sqlparser.String(expr)))
}
// If column is not found, then the order by is referencing
// a column that's not on the select list.
if colNumber == -1 {
- return nil, fmt.Errorf("unsupported: memory sort: order by must reference a column in the select list: %s", sqlparser.String(order))
+ return nil, vterrors.VT12001(fmt.Sprintf("memory sort: ORDER BY must reference a column in the SELECT list: %s", sqlparser.String(order)))
}
// TODO(king-11) need to pass in collation here
ob := engine.OrderByParams{
@@ -109,7 +110,7 @@ func (ms *memorySort) Primitive() engine.Primitive {
// SetLimit implements the logicalPlan interface
func (ms *memorySort) SetLimit(limit *sqlparser.Limit) error {
- return errors.New("memorySort.Limit: unreachable")
+ return vterrors.VT13001("memorySort.Limit: unreachable")
}
// Wireup implements the logicalPlan interface
diff --git a/go/vt/vtgate/planbuilder/migration.go b/go/vt/vtgate/planbuilder/migration.go
index 3bf3c1b0657..468c86d3ffb 100644
--- a/go/vt/vtgate/planbuilder/migration.go
+++ b/go/vt/vtgate/planbuilder/migration.go
@@ -19,7 +19,6 @@ package planbuilder
import (
"vitess.io/vitess/go/vt/key"
topodatapb "vitess.io/vitess/go/vt/proto/topodata"
- vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
"vitess.io/vitess/go/vt/schema"
"vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/vterrors"
@@ -36,11 +35,11 @@ func buildAlterMigrationPlan(query string, vschema plancontext.VSchema, enableOn
return nil, err
}
if ks == nil {
- return nil, vterrors.NewErrorf(vtrpcpb.Code_FAILED_PRECONDITION, vterrors.NoDB, "No database selected: use keyspace<:shard><@type> or keyspace<[range]><@type> (<> are optional)")
+ return nil, vterrors.VT09005()
}
if tabletType != topodatapb.TabletType_PRIMARY {
- return nil, vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "ALTER VITESS_MIGRATION works only on primary tablet")
+ return nil, vterrors.VT09006("ALTER")
}
if dest == nil {
@@ -64,11 +63,11 @@ func buildRevertMigrationPlan(query string, stmt *sqlparser.RevertMigration, vsc
return nil, err
}
if ks == nil {
- return nil, vterrors.NewErrorf(vtrpcpb.Code_FAILED_PRECONDITION, vterrors.NoDB, "No database selected: use keyspace<:shard><@type> or keyspace<[range]><@type> (<> are optional)")
+ return nil, vterrors.VT09005()
}
if tabletType != topodatapb.TabletType_PRIMARY {
- return nil, vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "REVERT VITESS_MIGRATION works only on primary tablet")
+ return nil, vterrors.VT09006("REVERT")
}
if dest == nil {
@@ -93,11 +92,11 @@ func buildShowMigrationLogsPlan(query string, vschema plancontext.VSchema, enabl
return nil, err
}
if ks == nil {
- return nil, vterrors.NewErrorf(vtrpcpb.Code_FAILED_PRECONDITION, vterrors.NoDB, "No database selected: use keyspace<:shard><@type> or keyspace<[range]><@type> (<> are optional)")
+ return nil, vterrors.VT09005()
}
if tabletType != topodatapb.TabletType_PRIMARY {
- return nil, vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "SHOW VITESS_MIGRATION works only on primary tablet")
+ return nil, vterrors.VT09006("SHOW")
}
if dest == nil {
diff --git a/go/vt/vtgate/planbuilder/operator_transformers.go b/go/vt/vtgate/planbuilder/operator_transformers.go
index 5f6d0e5da96..ddceddc9ea0 100644
--- a/go/vt/vtgate/planbuilder/operator_transformers.go
+++ b/go/vt/vtgate/planbuilder/operator_transformers.go
@@ -17,23 +17,25 @@ limitations under the License.
package planbuilder
import (
+ "fmt"
"sort"
"strconv"
"strings"
+ "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/rewrite"
+
+ "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops"
+
"vitess.io/vitess/go/sqltypes"
"vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext"
- "vitess.io/vitess/go/vt/vtgate/planbuilder/physical"
-
"vitess.io/vitess/go/mysql/collations"
"vitess.io/vitess/go/vt/vtgate/evalengine"
- "vitess.io/vitess/go/vt/vtgate/planbuilder/abstract"
+ "vitess.io/vitess/go/vt/vtgate/planbuilder/operators"
- vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
"vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/vtgate/engine"
"vitess.io/vitess/go/vt/vtgate/vindexes"
@@ -41,23 +43,23 @@ import (
"vitess.io/vitess/go/vt/vterrors"
)
-func transformToLogicalPlan(ctx *plancontext.PlanningContext, op abstract.PhysicalOperator, isRoot bool) (logicalPlan, error) {
+func transformToLogicalPlan(ctx *plancontext.PlanningContext, op ops.Operator, isRoot bool) (logicalPlan, error) {
switch op := op.(type) {
- case *physical.Route:
+ case *operators.Route:
return transformRoutePlan(ctx, op)
- case *physical.ApplyJoin:
+ case *operators.ApplyJoin:
return transformApplyJoinPlan(ctx, op)
- case *physical.Union:
+ case *operators.Union:
return transformUnionPlan(ctx, op, isRoot)
- case *physical.Vindex:
+ case *operators.Vindex:
return transformVindexPlan(ctx, op)
- case *physical.SubQueryOp:
+ case *operators.SubQueryOp:
return transformSubQueryPlan(ctx, op)
- case *physical.CorrelatedSubQueryOp:
+ case *operators.CorrelatedSubQueryOp:
return transformCorrelatedSubQueryPlan(ctx, op)
- case *physical.Derived:
+ case *operators.Derived:
return transformDerivedPlan(ctx, op)
- case *physical.Filter:
+ case *operators.Filter:
plan, err := transformToLogicalPlan(ctx, op.Source, false)
if err != nil {
return nil, err
@@ -67,7 +69,7 @@ func transformToLogicalPlan(ctx *plancontext.PlanningContext, op abstract.Physic
ctx: ctx,
plan: plan,
}
- ast := sqlparser.AndExpressions(op.Predicates...)
+ ast := ctx.SemTable.AndExpressions(op.Predicates...)
predicate, err := evalengine.Translate(ast, scl)
if err != nil {
return nil, err
@@ -80,12 +82,54 @@ func transformToLogicalPlan(ctx *plancontext.PlanningContext, op abstract.Physic
ASTPredicate: ast,
},
}, nil
+ case *operators.Horizon:
+ return transformHorizon(ctx, op, isRoot)
+ }
+
+ return nil, vterrors.VT13001(fmt.Sprintf("unknown type encountered: %T (transformToLogicalPlan)", op))
+}
+
+func transformHorizon(ctx *plancontext.PlanningContext, op *operators.Horizon, isRoot bool) (logicalPlan, error) {
+ source, err := transformToLogicalPlan(ctx, op.Source, isRoot)
+ if err != nil {
+ return nil, err
}
+ switch node := op.Select.(type) {
+ case *sqlparser.Select:
+ hp := horizonPlanning{
+ sel: node,
+ }
+
+ replaceSubQuery(ctx, node)
+ plan, err := hp.planHorizon(ctx, source, true)
+ if err != nil {
+ return nil, err
+ }
+ return planLimit(node.Limit, plan)
+ case *sqlparser.Union:
+ var err error
+ rb, isRoute := source.(*routeGen4)
+ if !isRoute && ctx.SemTable.NotSingleRouteErr != nil {
+ return nil, ctx.SemTable.NotSingleRouteErr
+ }
+ var plan logicalPlan
+ if isRoute && rb.isSingleShard() {
+ err = planSingleShardRoutePlan(node, rb)
+ plan = rb
+ } else {
+ plan, err = planOrderByOnUnion(ctx, source, node)
+ }
+ if err != nil {
+ return nil, err
+ }
- return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] unknown type encountered: %T (transformToLogicalPlan)", op)
+ return planLimit(node.Limit, plan)
+ default:
+ panic("only SELECT and UNION implement the SelectStatement interface")
+ }
}
-func transformApplyJoinPlan(ctx *plancontext.PlanningContext, n *physical.ApplyJoin) (logicalPlan, error) {
+func transformApplyJoinPlan(ctx *plancontext.PlanningContext, n *operators.ApplyJoin) (logicalPlan, error) {
lhs, err := transformToLogicalPlan(ctx, n.LHS, false)
if err != nil {
return nil, err
@@ -109,13 +153,7 @@ func transformApplyJoinPlan(ctx *plancontext.PlanningContext, n *physical.ApplyJ
}, nil
}
-func transformRoutePlan(ctx *plancontext.PlanningContext, op *physical.Route) (logicalPlan, error) {
- switch src := op.Source.(type) {
- case *physical.Update:
- return transformUpdatePlan(ctx, op, src)
- case *physical.Delete:
- return transformDeletePlan(ctx, op, src)
- }
+func routeToEngineRoute(ctx *plancontext.PlanningContext, op *operators.Route) (*engine.Route, error) {
tableNames, err := getAllTableNames(op)
if err != nil {
return nil, err
@@ -126,29 +164,46 @@ func transformRoutePlan(ctx *plancontext.PlanningContext, op *physical.Route) (l
vindex = op.Selected.FoundVindex
values = op.Selected.Values
}
+ return &engine.Route{
+ TableName: strings.Join(tableNames, ", "),
+ RoutingParameters: &engine.RoutingParameters{
+ Opcode: op.RouteOpCode,
+ Keyspace: op.Keyspace,
+ Vindex: vindex,
+ Values: values,
+ SysTableTableName: op.SysTableTableName,
+ SysTableTableSchema: op.SysTableTableSchema,
+ },
+ }, nil
+}
+
+func transformRoutePlan(ctx *plancontext.PlanningContext, op *operators.Route) (logicalPlan, error) {
+ switch src := op.Source.(type) {
+ case *operators.Update:
+ return transformUpdatePlan(ctx, op, src)
+ case *operators.Delete:
+ return transformDeletePlan(ctx, op, src)
+ }
condition := getVindexPredicate(ctx, op)
- sel := toSQL(ctx, op.Source)
+ sel, err := operators.ToSQL(ctx, op.Source)
+ if err != nil {
+ return nil, err
+ }
replaceSubQuery(ctx, sel)
+ eroute, err := routeToEngineRoute(ctx, op)
+ if err != nil {
+ return nil, err
+ }
return &routeGen4{
- eroute: &engine.Route{
- TableName: strings.Join(tableNames, ", "),
- RoutingParameters: &engine.RoutingParameters{
- Opcode: op.RouteOpCode,
- Keyspace: op.Keyspace,
- Vindex: vindex,
- Values: values,
- SysTableTableName: op.SysTableTableName,
- SysTableTableSchema: op.SysTableTableSchema,
- },
- },
+ eroute: eroute,
Select: sel,
- tables: op.TableID(),
+ tables: operators.TableID(op),
condition: condition,
}, nil
}
-func transformUpdatePlan(ctx *plancontext.PlanningContext, op *physical.Route, upd *physical.Update) (logicalPlan, error) {
+func transformUpdatePlan(ctx *plancontext.PlanningContext, op *operators.Route, upd *operators.Update) (logicalPlan, error) {
var vindex vindexes.Vindex
var values []evalengine.Expr
if op.Selected != nil {
@@ -192,7 +247,7 @@ func transformUpdatePlan(ctx *plancontext.PlanningContext, op *physical.Route, u
return &primitiveWrapper{prim: e}, nil
}
-func transformDeletePlan(ctx *plancontext.PlanningContext, op *physical.Route, del *physical.Delete) (logicalPlan, error) {
+func transformDeletePlan(ctx *plancontext.PlanningContext, op *operators.Route, del *operators.Delete) (logicalPlan, error) {
var vindex vindexes.Vindex
var values []evalengine.Expr
if op.Selected != nil {
@@ -240,15 +295,15 @@ func replaceSubQuery(ctx *plancontext.PlanningContext, sel sqlparser.Statement)
return
}
sqr := &subQReplacer{subqueryToReplace: extractedSubqueries}
- sqlparser.Rewrite(sel, sqr.replacer, nil)
+ sqlparser.SafeRewrite(sel, nil, sqr.replacer)
for sqr.replaced {
// to handle subqueries inside subqueries, we need to do this again and again until no replacements are left
sqr.replaced = false
- sqlparser.Rewrite(sel, sqr.replacer, nil)
+ sqlparser.SafeRewrite(sel, nil, sqr.replacer)
}
}
-func getVindexPredicate(ctx *plancontext.PlanningContext, op *physical.Route) sqlparser.Expr {
+func getVindexPredicate(ctx *plancontext.PlanningContext, op *operators.Route) sqlparser.Expr {
var condition sqlparser.Expr
if op.Selected != nil {
if len(op.Selected.ValueExprs) > 0 {
@@ -281,10 +336,10 @@ func getVindexPredicate(ctx *plancontext.PlanningContext, op *physical.Route) sq
return condition
}
-func getAllTableNames(op *physical.Route) ([]string, error) {
+func getAllTableNames(op *operators.Route) ([]string, error) {
tableNameMap := map[string]any{}
- err := physical.VisitOperators(op, func(op abstract.PhysicalOperator) (bool, error) {
- tbl, isTbl := op.(*physical.Table)
+ err := rewrite.Visit(op, func(op ops.Operator) error {
+ tbl, isTbl := op.(*operators.Table)
var name string
if isTbl {
if tbl.QTable.IsInfSchema {
@@ -294,7 +349,7 @@ func getAllTableNames(op *physical.Route) ([]string, error) {
}
tableNameMap[name] = nil
}
- return true, nil
+ return nil
})
if err != nil {
return nil, err
@@ -307,7 +362,7 @@ func getAllTableNames(op *physical.Route) ([]string, error) {
return tableNames, nil
}
-func transformUnionPlan(ctx *plancontext.PlanningContext, op *physical.Union, isRoot bool) (logicalPlan, error) {
+func transformUnionPlan(ctx *plancontext.PlanningContext, op *operators.Union, isRoot bool) (logicalPlan, error) {
var sources []logicalPlan
var err error
if op.Distinct {
@@ -337,7 +392,7 @@ func transformUnionPlan(ctx *plancontext.PlanningContext, op *physical.Union, is
result = src
} else {
if len(op.Ordering) > 0 {
- return nil, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "can't do ORDER BY on top of UNION")
+ return nil, vterrors.VT12001("ORDER BY on top of UNION")
}
result = &concatenateGen4{sources: sources}
}
@@ -356,7 +411,7 @@ func transformUnionPlan(ctx *plancontext.PlanningContext, op *physical.Union, is
func getWeightStringForSelectExpr(selectExpr sqlparser.SelectExpr) (*sqlparser.AliasedExpr, error) {
expr, isAliased := selectExpr.(*sqlparser.AliasedExpr)
if !isAliased {
- return nil, vterrors.New(vtrpcpb.Code_UNIMPLEMENTED, "cannot convert select expression to an aliased expression")
+ return nil, vterrors.VT12001("get weight string expression for non-aliased expression")
}
return &sqlparser.AliasedExpr{Expr: weightStringFor(expr.Expr)}, nil
}
@@ -409,36 +464,33 @@ func pushWeightStringForDistinct(ctx *plancontext.PlanningContext, plan logicalP
}
node.noNeedToTypeCheck = append(node.noNeedToTypeCheck, newOffset)
case *joinGen4:
- lhsSolves := node.Left.ContainsTables()
- rhsSolves := node.Right.ContainsTables()
- expr := node.OutputColumns()[offset]
- aliasedExpr, isAliased := expr.(*sqlparser.AliasedExpr)
- if !isAliased {
- return 0, vterrors.New(vtrpcpb.Code_UNIMPLEMENTED, "cannot convert select expression to an aliased expression")
- }
- deps := ctx.SemTable.RecursiveDeps(aliasedExpr.Expr)
+ joinOffset := node.Cols[offset]
switch {
- case deps.IsSolvedBy(lhsSolves):
- offset, err = pushWeightStringForDistinct(ctx, node.Left, offset)
- node.Cols = append(node.Cols, -(offset + 1))
- case deps.IsSolvedBy(rhsSolves):
- offset, err = pushWeightStringForDistinct(ctx, node.Right, offset)
- node.Cols = append(node.Cols, offset+1)
+ case joinOffset < 0:
+ offset, err = pushWeightStringForDistinct(ctx, node.Left, -(joinOffset + 1))
+ offset = -(offset + 1)
+ case joinOffset > 0:
+ offset, err = pushWeightStringForDistinct(ctx, node.Right, joinOffset-1)
+ offset = offset + 1
default:
- return 0, vterrors.New(vtrpcpb.Code_UNIMPLEMENTED, "cannot push distinct weight string to both sides of the join")
+ return 0, vterrors.VT13001("wrong column offset in join plan to push DISTINCT WEIGHT_STRING")
+ }
+ if err != nil {
+ return 0, err
}
- newOffset = len(node.Cols) - 1
+ newOffset = len(node.Cols)
+ node.Cols = append(node.Cols, offset)
default:
- return 0, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "bug: not supported pushWeightStringForDistinct on %T", plan)
+ return 0, vterrors.VT13001(fmt.Sprintf("pushWeightStringForDistinct on %T", plan))
}
return
}
-func transformAndMerge(ctx *plancontext.PlanningContext, op *physical.Union) (sources []logicalPlan, err error) {
- for i, source := range op.Sources {
+func transformAndMerge(ctx *plancontext.PlanningContext, op *operators.Union) (sources []logicalPlan, err error) {
+ for _, source := range op.Sources {
// first we go over all the operator inputs and turn them into logical plans,
// including horizon planning
- plan, err := createLogicalPlan(ctx, source, op.SelectStmts[i])
+ plan, err := transformToLogicalPlan(ctx, source, false)
if err != nil {
return nil, err
}
@@ -480,10 +532,10 @@ func transformAndMerge(ctx *plancontext.PlanningContext, op *physical.Union) (so
return sources, nil
}
-func transformAndMergeInOrder(ctx *plancontext.PlanningContext, op *physical.Union) (sources []logicalPlan, err error) {
+func transformAndMergeInOrder(ctx *plancontext.PlanningContext, op *operators.Union) (sources []logicalPlan, err error) {
// We go over all the input operators and turn them into logical plans
for i, source := range op.Sources {
- plan, err := createLogicalPlan(ctx, source, op.SelectStmts[i])
+ plan, err := transformToLogicalPlan(ctx, source, false)
if err != nil {
return nil, err
}
@@ -506,27 +558,15 @@ func transformAndMergeInOrder(ctx *plancontext.PlanningContext, op *physical.Uni
return sources, nil
}
-func createLogicalPlan(ctx *plancontext.PlanningContext, source abstract.PhysicalOperator, selStmt *sqlparser.Select) (logicalPlan, error) {
- plan, err := transformToLogicalPlan(ctx, source, false)
- if err != nil {
- return nil, err
- }
- if selStmt != nil {
- plan, err = planHorizon(ctx, plan, selStmt, true)
- if err != nil {
- return nil, err
- }
- if err := setMiscFunc(plan, selStmt); err != nil {
- return nil, err
- }
- }
- return plan, nil
-}
-
-func getCollationsFor(ctx *plancontext.PlanningContext, n *physical.Union) []collations.ID {
+func getCollationsFor(ctx *plancontext.PlanningContext, n *operators.Union) []collations.ID {
// TODO: coerce selects' select expressions' collations
var colls []collations.ID
- for _, expr := range n.SelectStmts[0].SelectExprs {
+
+ sel, err := n.GetSelectFor(0)
+ if err != nil {
+ return nil
+ }
+ for _, expr := range sel.SelectExprs {
aliasedE, ok := expr.(*sqlparser.AliasedExpr)
if !ok {
return nil
@@ -542,7 +582,7 @@ func getCollationsFor(ctx *plancontext.PlanningContext, n *physical.Union) []col
return colls
}
-func transformDerivedPlan(ctx *plancontext.PlanningContext, op *physical.Derived) (logicalPlan, error) {
+func transformDerivedPlan(ctx *plancontext.PlanningContext, op *operators.Derived) (logicalPlan, error) {
// transforming the inner part of the derived table into a logical plan
// so that we can do horizon planning on the inner. If the logical plan
// we've produced is a Route, we set its Select.From field to be an aliased
@@ -603,7 +643,7 @@ func (sqr *subQReplacer) replacer(cursor *sqlparser.Cursor) bool {
if ext.GetArgName() == replaceByExpr.GetArgName() {
cursor.Replace(ext.Original)
sqr.replaced = true
- return false
+ return true
}
}
return true
diff --git a/go/vt/vtgate/planbuilder/operator_to_query.go b/go/vt/vtgate/planbuilder/operators/SQL_builder.go
similarity index 60%
rename from go/vt/vtgate/planbuilder/operator_to_query.go
rename to go/vt/vtgate/planbuilder/operators/SQL_builder.go
index 87dec2d8526..74e5f20fea4 100644
--- a/go/vt/vtgate/planbuilder/operator_to_query.go
+++ b/go/vt/vtgate/planbuilder/operators/SQL_builder.go
@@ -1,5 +1,5 @@
/*
-Copyright 2021 The Vitess Authors.
+Copyright 2022 The Vitess Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -14,108 +14,37 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package planbuilder
+package operators
import (
"fmt"
"sort"
"strings"
- "vitess.io/vitess/go/vt/log"
+ "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops"
+ "vitess.io/vitess/go/vt/sqlparser"
+ "vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext"
-
- "vitess.io/vitess/go/vt/vtgate/planbuilder/physical"
-
"vitess.io/vitess/go/vt/vtgate/semantics"
-
- "vitess.io/vitess/go/vt/sqlparser"
- "vitess.io/vitess/go/vt/vtgate/planbuilder/abstract"
)
-type queryBuilder struct {
- ctx *plancontext.PlanningContext
- sel sqlparser.SelectStatement
- tableNames []string
-}
+type (
+ queryBuilder struct {
+ ctx *plancontext.PlanningContext
+ sel sqlparser.SelectStatement
+ tableNames []string
+ }
+)
-func toSQL(ctx *plancontext.PlanningContext, op abstract.PhysicalOperator) sqlparser.SelectStatement {
+func ToSQL(ctx *plancontext.PlanningContext, op ops.Operator) (sqlparser.SelectStatement, error) {
q := &queryBuilder{ctx: ctx}
- buildQuery(op, q)
- q.sortTables()
- return q.sel
-}
-
-func buildQuery(op abstract.PhysicalOperator, qb *queryBuilder) {
- switch op := op.(type) {
- case *physical.Table:
- dbName := ""
-
- if op.QTable.IsInfSchema {
- dbName = op.QTable.Table.Qualifier.String()
- }
- qb.addTable(dbName, op.QTable.Table.Name.String(), op.QTable.Alias.As.String(), op.TableID(), op.QTable.Alias.Hints)
- for _, pred := range op.QTable.Predicates {
- qb.addPredicate(pred)
- }
- for _, name := range op.Columns {
- qb.addProjection(&sqlparser.AliasedExpr{Expr: name})
- }
- case *physical.ApplyJoin:
- buildQuery(op.LHS, qb)
- // If we are going to add the predicate used in join here
- // We should not add the predicate's copy of when it was split into
- // two parts. To avoid this, we use the SkipPredicates map.
- for _, expr := range qb.ctx.JoinPredicates[op.Predicate] {
- qb.ctx.SkipPredicates[expr] = nil
- }
- qbR := &queryBuilder{ctx: qb.ctx}
- buildQuery(op.RHS, qbR)
- if op.LeftJoin {
- qb.joinOuterWith(qbR, op.Predicate)
- } else {
- qb.joinInnerWith(qbR, op.Predicate)
- }
- case *physical.Filter:
- buildQuery(op.Source, qb)
- for _, pred := range op.Predicates {
- qb.addPredicate(pred)
- }
- case *physical.Derived:
- buildQuery(op.Source, qb)
- sel := qb.sel.(*sqlparser.Select) // we can only handle SELECT in derived tables at the moment
- qb.sel = nil
- opQuery := sqlparser.RemoveKeyspace(op.Query).(*sqlparser.Select)
- sel.Limit = opQuery.Limit
- sel.OrderBy = opQuery.OrderBy
- sel.GroupBy = opQuery.GroupBy
- sel.Having = opQuery.Having
- sel.SelectExprs = opQuery.SelectExprs
- qb.addTableExpr(op.Alias, op.Alias, op.TableID(), &sqlparser.DerivedTable{
- Select: sel,
- }, nil, op.ColumnAliases)
- for _, col := range op.Columns {
- qb.addProjection(&sqlparser.AliasedExpr{Expr: col})
- }
- default:
- panic(fmt.Sprintf("%T", op))
+ err := buildQuery(op, q)
+ if err != nil {
+ return nil, err
}
-}
-
-func (qb *queryBuilder) sortTables() {
- _ = sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) {
- sel, isSel := node.(*sqlparser.Select)
- if !isSel {
- return true, nil
- }
- ts := &tableSorter{
- sel: sel,
- tbl: qb.ctx.SemTable,
- }
- sort.Sort(ts)
- return true, nil
- }, qb.sel)
-
+ q.sortTables()
+ return q.sel, nil
}
func (qb *queryBuilder) addTable(db, tableName, alias string, tableID semantics.TableSet, hints sqlparser.IndexHints) {
@@ -144,10 +73,7 @@ func (qb *queryBuilder) addTableExpr(
Hints: hints,
Columns: columnAliases,
}
- err := qb.ctx.SemTable.ReplaceTableSetFor(tableID, elems)
- if err != nil {
- log.Warningf("error in replacing table expression in semtable: %v", err)
- }
+ qb.ctx.SemTable.ReplaceTableSetFor(tableID, elems)
sel.From = append(sel.From, elems)
qb.sel = sel
qb.tableNames = append(qb.tableNames, tableName)
@@ -161,12 +87,16 @@ func (qb *queryBuilder) addPredicate(expr sqlparser.Expr) {
}
sel := qb.sel.(*sqlparser.Select)
- if sel.Where == nil {
- sel.AddWhere(expr)
- return
+ _, isSubQuery := expr.(*sqlparser.ExtractedSubquery)
+ var addPred func(sqlparser.Expr)
+
+ if sqlparser.ContainsAggregation(expr) && !isSubQuery {
+ addPred = sel.AddHaving
+ } else {
+ addPred = sel.AddWhere
}
for _, exp := range sqlparser.SplitAndExpression(nil, expr) {
- sel.AddWhere(exp)
+ addPred(exp)
}
}
@@ -188,7 +118,7 @@ func (qb *queryBuilder) joinInnerWith(other *queryBuilder, onCondition sqlparser
if otherSel.Where != nil {
predExprs := sqlparser.SplitAndExpression(nil, predicate)
otherExprs := sqlparser.SplitAndExpression(nil, otherSel.Where.Expr)
- predicate = sqlparser.AndExpressions(append(predExprs, otherExprs...)...)
+ predicate = qb.ctx.SemTable.AndExpressions(append(predExprs, otherExprs...)...)
}
if predicate != nil {
sel.Where = &sqlparser.Where{Type: sqlparser.WhereClause, Expr: predicate}
@@ -227,7 +157,7 @@ func (qb *queryBuilder) joinOuterWith(other *queryBuilder, onCondition sqlparser
predicate = sel.Where.Expr
}
if otherSel.Where != nil {
- predicate = sqlparser.AndExpressions(predicate, otherSel.Where.Expr)
+ predicate = qb.ctx.SemTable.AndExpressions(predicate, otherSel.Where.Expr)
}
if predicate != nil {
sel.Where = &sqlparser.Where{Type: sqlparser.WhereClause, Expr: predicate}
@@ -235,18 +165,19 @@ func (qb *queryBuilder) joinOuterWith(other *queryBuilder, onCondition sqlparser
}
func (qb *queryBuilder) rewriteExprForDerivedTable(expr sqlparser.Expr, dtName string) {
- sqlparser.Rewrite(expr, func(cursor *sqlparser.Cursor) bool {
- switch node := cursor.Node().(type) {
- case *sqlparser.ColName:
- hasTable := qb.hasTable(node.Qualifier.Name.String())
- if hasTable {
- node.Qualifier = sqlparser.TableName{
- Name: sqlparser.NewIdentifierCS(dtName),
- }
+ _ = sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) {
+ col, ok := node.(*sqlparser.ColName)
+ if !ok {
+ return true, nil
+ }
+ hasTable := qb.hasTable(col.Qualifier.Name.String())
+ if hasTable {
+ col.Qualifier = sqlparser.TableName{
+ Name: sqlparser.NewIdentifierCS(dtName),
}
}
- return true
- }, nil)
+ return true, nil
+ }, expr)
}
func (qb *queryBuilder) hasTable(tableName string) bool {
@@ -258,6 +189,22 @@ func (qb *queryBuilder) hasTable(tableName string) bool {
return false
}
+func (qb *queryBuilder) sortTables() {
+ _ = sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) {
+ sel, isSel := node.(*sqlparser.Select)
+ if !isSel {
+ return true, nil
+ }
+ ts := &tableSorter{
+ sel: sel,
+ tbl: qb.ctx.SemTable,
+ }
+ sort.Sort(ts)
+ return true, nil
+ }, qb.sel)
+
+}
+
type tableSorter struct {
sel *sqlparser.Select
tbl *semantics.SemTable
@@ -288,3 +235,166 @@ func (ts *tableSorter) Less(i, j int) bool {
func (ts *tableSorter) Swap(i, j int) {
ts.sel.From[i], ts.sel.From[j] = ts.sel.From[j], ts.sel.From[i]
}
+
+func (h *Horizon) toSQL(qb *queryBuilder) error {
+ err := stripDownQuery(h.Select, qb.sel)
+ if err != nil {
+ return err
+ }
+ _ = sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) {
+ if aliasedExpr, ok := node.(sqlparser.SelectExpr); ok {
+ removeKeyspaceFromSelectExpr(aliasedExpr)
+ }
+ return true, nil
+ }, qb.sel)
+ return nil
+}
+
+func removeKeyspaceFromSelectExpr(expr sqlparser.SelectExpr) {
+ switch expr := expr.(type) {
+ case *sqlparser.AliasedExpr:
+ sqlparser.RemoveKeyspaceFromColName(expr.Expr)
+ case *sqlparser.StarExpr:
+ expr.TableName.Qualifier = sqlparser.NewIdentifierCS("")
+ }
+}
+
+func stripDownQuery(from, to sqlparser.SelectStatement) error {
+ var err error
+
+ switch node := from.(type) {
+ case *sqlparser.Select:
+ toNode, ok := to.(*sqlparser.Select)
+ if !ok {
+ return vterrors.VT13001("AST did not match")
+ }
+ toNode.Distinct = node.Distinct
+ toNode.GroupBy = node.GroupBy
+ toNode.Having = node.Having
+ toNode.OrderBy = node.OrderBy
+ toNode.Comments = node.Comments
+ toNode.SelectExprs = node.SelectExprs
+ for _, expr := range toNode.SelectExprs {
+ removeKeyspaceFromSelectExpr(expr)
+ }
+ case *sqlparser.Union:
+ toNode, ok := to.(*sqlparser.Union)
+ if !ok {
+ return vterrors.VT13001("AST did not match")
+ }
+ err = stripDownQuery(node.Left, toNode.Left)
+ if err != nil {
+ return err
+ }
+ err = stripDownQuery(node.Right, toNode.Right)
+ if err != nil {
+ return err
+ }
+ toNode.OrderBy = node.OrderBy
+ default:
+ return vterrors.VT13001(fmt.Sprintf("this should not happen - we have covered all implementations of SelectStatement %T", from))
+ }
+ return nil
+}
+
+func buildQuery(op ops.Operator, qb *queryBuilder) error {
+ switch op := op.(type) {
+ case *Table:
+ dbName := ""
+
+ if op.QTable.IsInfSchema {
+ dbName = op.QTable.Table.Qualifier.String()
+ }
+ qb.addTable(dbName, op.QTable.Table.Name.String(), op.QTable.Alias.As.String(), TableID(op), op.QTable.Alias.Hints)
+ for _, pred := range op.QTable.Predicates {
+ qb.addPredicate(pred)
+ }
+ for _, name := range op.Columns {
+ qb.addProjection(&sqlparser.AliasedExpr{Expr: name})
+ }
+ case *ApplyJoin:
+ err := buildQuery(op.LHS, qb)
+ if err != nil {
+ return err
+ }
+ // If we are going to add the predicate used in join here
+ // We should not add the predicate's copy of when it was split into
+ // two parts. To avoid this, we use the SkipPredicates map.
+ for _, expr := range qb.ctx.JoinPredicates[op.Predicate] {
+ qb.ctx.SkipPredicates[expr] = nil
+ }
+ qbR := &queryBuilder{ctx: qb.ctx}
+ err = buildQuery(op.RHS, qbR)
+ if err != nil {
+ return err
+ }
+ if op.LeftJoin {
+ qb.joinOuterWith(qbR, op.Predicate)
+ } else {
+ qb.joinInnerWith(qbR, op.Predicate)
+ }
+ case *Filter:
+ err := buildQuery(op.Source, qb)
+ if err != nil {
+ return err
+ }
+ for _, pred := range op.Predicates {
+ qb.addPredicate(pred)
+ }
+ case *Derived:
+ err := buildQuery(op.Source, qb)
+ if err != nil {
+ return err
+ }
+ sel := qb.sel.(*sqlparser.Select) // we can only handle SELECT in derived tables at the moment
+ qb.sel = nil
+ sqlparser.RemoveKeyspace(op.Query)
+ opQuery := op.Query.(*sqlparser.Select)
+ sel.Limit = opQuery.Limit
+ sel.OrderBy = opQuery.OrderBy
+ sel.GroupBy = opQuery.GroupBy
+ sel.Having = mergeHaving(sel.Having, opQuery.Having)
+ sel.SelectExprs = opQuery.SelectExprs
+ qb.addTableExpr(op.Alias, op.Alias, TableID(op), &sqlparser.DerivedTable{
+ Select: sel,
+ }, nil, op.ColumnAliases)
+ for _, col := range op.Columns {
+ qb.addProjection(&sqlparser.AliasedExpr{Expr: col})
+ }
+ case *Horizon:
+ err := buildQuery(op.Source, qb)
+ if err != nil {
+ return err
+ }
+
+ err = stripDownQuery(op.Select, qb.sel)
+ if err != nil {
+ return err
+ }
+ _ = sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) {
+ if aliasedExpr, ok := node.(sqlparser.SelectExpr); ok {
+ removeKeyspaceFromSelectExpr(aliasedExpr)
+ }
+ return true, nil
+ }, qb.sel)
+ return nil
+
+ default:
+ return vterrors.VT13001(fmt.Sprintf("do not know how to turn %T into SQL", op))
+ }
+ return nil
+}
+
+func mergeHaving(h1, h2 *sqlparser.Where) *sqlparser.Where {
+ switch {
+ case h1 == nil && h2 == nil:
+ return nil
+ case h1 == nil:
+ return h2
+ case h2 == nil:
+ return h1
+ default:
+ h1.Expr = sqlparser.AndExpressions(h1.Expr, h2.Expr)
+ return h1
+ }
+}
diff --git a/go/vt/vtgate/planbuilder/operators/apply_join.go b/go/vt/vtgate/planbuilder/operators/apply_join.go
new file mode 100644
index 00000000000..2968d463b1c
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/operators/apply_join.go
@@ -0,0 +1,193 @@
+/*
+Copyright 2021 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package operators
+
+import (
+ "golang.org/x/exp/maps"
+ "golang.org/x/exp/slices"
+
+ "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops"
+
+ "vitess.io/vitess/go/vt/sqlparser"
+ "vitess.io/vitess/go/vt/vterrors"
+ "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext"
+)
+
+// ApplyJoin is a nested loop join - for each row on the LHS,
+// we'll execute the plan on the RHS, feeding data from left to right
+type ApplyJoin struct {
+ LHS, RHS ops.Operator
+
+ // Columns stores the column indexes of the columns coming from the left and right side
+ // negative value comes from LHS and positive from RHS
+ Columns []int
+
+ // ColumnsAST keeps track of what AST expression is represented in the Columns array
+ ColumnsAST []sqlparser.Expr
+
+ // Vars are the arguments that need to be copied from the LHS to the RHS
+ Vars map[string]int
+
+ // LeftJoin will be true in the case of an outer join
+ LeftJoin bool
+
+ // JoinCols are the columns from the LHS used for the join.
+ // These are the same columns pushed on the LHS that are now used in the Vars field
+ LHSColumns []*sqlparser.ColName
+
+ Predicate sqlparser.Expr
+}
+
+var _ ops.PhysicalOperator = (*ApplyJoin)(nil)
+
+func NewApplyJoin(lhs, rhs ops.Operator, predicate sqlparser.Expr, leftOuterJoin bool) *ApplyJoin {
+ return &ApplyJoin{
+ LHS: lhs,
+ RHS: rhs,
+ Vars: map[string]int{},
+ Predicate: predicate,
+ LeftJoin: leftOuterJoin,
+ }
+}
+
+// IPhysical implements the PhysicalOperator interface
+func (a *ApplyJoin) IPhysical() {}
+
+// Clone implements the Operator interface
+func (a *ApplyJoin) Clone(inputs []ops.Operator) ops.Operator {
+ return &ApplyJoin{
+ LHS: inputs[0],
+ RHS: inputs[1],
+ Columns: slices.Clone(a.Columns),
+ ColumnsAST: slices.Clone(a.ColumnsAST),
+ Vars: maps.Clone(a.Vars),
+ LeftJoin: a.LeftJoin,
+ Predicate: sqlparser.CloneExpr(a.Predicate),
+ LHSColumns: slices.Clone(a.LHSColumns),
+ }
+}
+
+func (a *ApplyJoin) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) (ops.Operator, error) {
+ return AddPredicate(a, ctx, expr, false, newFilter)
+}
+
+// Inputs implements the Operator interface
+func (a *ApplyJoin) Inputs() []ops.Operator {
+ return []ops.Operator{a.LHS, a.RHS}
+}
+
+var _ JoinOp = (*ApplyJoin)(nil)
+
+func (a *ApplyJoin) GetLHS() ops.Operator {
+ return a.LHS
+}
+
+func (a *ApplyJoin) GetRHS() ops.Operator {
+ return a.RHS
+}
+
+func (a *ApplyJoin) SetLHS(operator ops.Operator) {
+ a.LHS = operator
+}
+
+func (a *ApplyJoin) SetRHS(operator ops.Operator) {
+ a.RHS = operator
+}
+
+func (a *ApplyJoin) MakeInner() {
+ a.LeftJoin = false
+}
+
+func (a *ApplyJoin) IsInner() bool {
+ return !a.LeftJoin
+}
+
+func (a *ApplyJoin) AddJoinPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) error {
+ bvName, cols, predicate, err := BreakExpressionInLHSandRHS(ctx, expr, TableID(a.LHS))
+ if err != nil {
+ return err
+ }
+ for i, col := range cols {
+ offset, err := a.LHS.AddColumn(ctx, col)
+ if err != nil {
+ return err
+ }
+ a.Vars[bvName[i]] = offset
+ }
+ a.LHSColumns = append(a.LHSColumns, cols...)
+
+ rhs, err := a.RHS.AddPredicate(ctx, predicate)
+ if err != nil {
+ return err
+ }
+ a.RHS = rhs
+
+ a.Predicate = ctx.SemTable.AndExpressions(expr, a.Predicate)
+ return nil
+}
+
+func (a *ApplyJoin) AddColumn(ctx *plancontext.PlanningContext, expr sqlparser.Expr) (int, error) {
+ // first check if we already are passing through this expression
+ for i, existing := range a.ColumnsAST {
+ if ctx.SemTable.EqualsExpr(existing, expr) {
+ return i, nil
+ }
+ }
+
+ lhs := TableID(a.LHS)
+ rhs := TableID(a.RHS)
+ both := lhs.Merge(rhs)
+ deps := ctx.SemTable.RecursiveDeps(expr)
+
+ // if we get here, it's a new expression we are dealing with.
+ // We need to decide if we can push it all on either side,
+ // or if we have to break the expression into left and right parts
+ switch {
+ case deps.IsSolvedBy(lhs):
+ offset, err := a.LHS.AddColumn(ctx, expr)
+ if err != nil {
+ return 0, err
+ }
+ a.Columns = append(a.Columns, -offset-1)
+ case deps.IsSolvedBy(both):
+ bvNames, lhsExprs, rhsExpr, err := BreakExpressionInLHSandRHS(ctx, expr, lhs)
+ if err != nil {
+ return 0, err
+ }
+ for i, lhsExpr := range lhsExprs {
+ offset, err := a.LHS.AddColumn(ctx, lhsExpr)
+ if err != nil {
+ return 0, err
+ }
+ a.Vars[bvNames[i]] = offset
+ }
+ expr = rhsExpr
+ fallthrough // now we just pass the rest to the RHS of the join
+ case deps.IsSolvedBy(rhs):
+ offset, err := a.RHS.AddColumn(ctx, expr)
+ if err != nil {
+ return 0, err
+ }
+ a.Columns = append(a.Columns, offset+1)
+ default:
+ return 0, vterrors.VT13002(sqlparser.String(expr))
+ }
+
+ // the expression wasn't already there - let's add it
+ a.ColumnsAST = append(a.ColumnsAST, expr)
+ return len(a.Columns) - 1, nil
+}
diff --git a/go/vt/vtgate/planbuilder/operators/correlated_subquery.go b/go/vt/vtgate/planbuilder/operators/correlated_subquery.go
new file mode 100644
index 00000000000..d95207f0a7a
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/operators/correlated_subquery.go
@@ -0,0 +1,95 @@
+/*
+Copyright 2022 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package operators
+
+import (
+ "vitess.io/vitess/go/vt/sqlparser"
+ "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops"
+)
+
+type (
+ CorrelatedSubQueryOp struct {
+ Outer, Inner ops.Operator
+ Extracted *sqlparser.ExtractedSubquery
+
+ // JoinCols are the columns from the LHS used for the join.
+ // These are the same columns pushed on the LHS that are now used in the Vars field
+ LHSColumns []*sqlparser.ColName
+
+ // arguments that need to be copied from the outer to inner
+ Vars map[string]int
+
+ noColumns
+ noPredicates
+ }
+
+ SubQueryOp struct {
+ Outer, Inner ops.Operator
+ Extracted *sqlparser.ExtractedSubquery
+
+ noColumns
+ noPredicates
+ }
+)
+
+var _ ops.PhysicalOperator = (*SubQueryOp)(nil)
+var _ ops.PhysicalOperator = (*CorrelatedSubQueryOp)(nil)
+
+// IPhysical implements the PhysicalOperator interface
+func (s *SubQueryOp) IPhysical() {}
+
+// Clone implements the Operator interface
+func (s *SubQueryOp) Clone(inputs []ops.Operator) ops.Operator {
+ result := &SubQueryOp{
+ Outer: inputs[0],
+ Inner: inputs[1],
+ Extracted: s.Extracted,
+ }
+ return result
+}
+
+// Inputs implements the Operator interface
+func (s *SubQueryOp) Inputs() []ops.Operator {
+ return []ops.Operator{s.Outer, s.Inner}
+}
+
+// IPhysical implements the PhysicalOperator interface
+func (c *CorrelatedSubQueryOp) IPhysical() {}
+
+// Clone implements the Operator interface
+func (c *CorrelatedSubQueryOp) Clone(inputs []ops.Operator) ops.Operator {
+ columns := make([]*sqlparser.ColName, len(c.LHSColumns))
+ copy(columns, c.LHSColumns)
+ vars := make(map[string]int, len(c.Vars))
+ for k, v := range c.Vars {
+ vars[k] = v
+ }
+
+ result := &CorrelatedSubQueryOp{
+ Outer: inputs[0],
+ Inner: inputs[1],
+ Extracted: c.Extracted,
+ LHSColumns: columns,
+ Vars: vars,
+ }
+ return result
+}
+
+// Inputs implements the Operator interface
+func (c *CorrelatedSubQueryOp) Inputs() []ops.Operator {
+ return []ops.Operator{c.Outer, c.Inner}
+}
diff --git a/go/vt/vtgate/planbuilder/physical/delete.go b/go/vt/vtgate/planbuilder/operators/delete.go
similarity index 52%
rename from go/vt/vtgate/planbuilder/physical/delete.go
rename to go/vt/vtgate/planbuilder/operators/delete.go
index 23cb6f3372e..d33acd8b013 100644
--- a/go/vt/vtgate/planbuilder/physical/delete.go
+++ b/go/vt/vtgate/planbuilder/operators/delete.go
@@ -14,50 +14,38 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package physical
+package operators
import (
"vitess.io/vitess/go/vt/sqlparser"
- "vitess.io/vitess/go/vt/vtgate/planbuilder/abstract"
+ "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops"
"vitess.io/vitess/go/vt/vtgate/semantics"
"vitess.io/vitess/go/vt/vtgate/vindexes"
)
type Delete struct {
- QTable *abstract.QueryTable
+ QTable *QueryTable
VTable *vindexes.Table
OwnedVindexQuery string
AST *sqlparser.Delete
-}
-
-var _ abstract.PhysicalOperator = (*Delete)(nil)
-var _ abstract.IntroducesTable = (*Delete)(nil)
-// TableID implements the PhysicalOperator interface
-func (d *Delete) TableID() semantics.TableSet {
- return d.QTable.ID
+ noInputs
+ noColumns
+ noPredicates
}
-// UnsolvedPredicates implements the PhysicalOperator interface
-func (d *Delete) UnsolvedPredicates(semTable *semantics.SemTable) []sqlparser.Expr {
- return nil
-}
+var _ ops.PhysicalOperator = (*Delete)(nil)
-// CheckValid implements the PhysicalOperator interface
-func (d *Delete) CheckValid() error {
- return nil
+// Introduces implements the PhysicalOperator interface
+func (d *Delete) Introduces() semantics.TableSet {
+ return d.QTable.ID
}
// IPhysical implements the PhysicalOperator interface
func (d *Delete) IPhysical() {}
-// Cost implements the PhysicalOperator interface
-func (d *Delete) Cost() int {
- return 1
-}
-
-// Clone implements the PhysicalOperator interface
-func (d *Delete) Clone() abstract.PhysicalOperator {
+// Clone implements the Operator interface
+func (d *Delete) Clone(inputs []ops.Operator) ops.Operator {
return &Delete{
QTable: d.QTable,
VTable: d.VTable,
@@ -66,12 +54,9 @@ func (d *Delete) Clone() abstract.PhysicalOperator {
}
}
-// GetQTable implements the IntroducesTable interface
-func (d *Delete) GetQTable() *abstract.QueryTable {
- return d.QTable
-}
-
-// GetVTable implements the IntroducesTable interface
-func (d *Delete) GetVTable() *vindexes.Table {
- return d.VTable
+func (d *Delete) TablesUsed() []string {
+ if d.VTable != nil {
+ return SingleQualifiedIdentifier(d.VTable.Keyspace, d.VTable.Name)
+ }
+ return nil
}
diff --git a/go/vt/vtgate/planbuilder/operators/derived.go b/go/vt/vtgate/planbuilder/operators/derived.go
new file mode 100644
index 00000000000..6edb9f84e47
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/operators/derived.go
@@ -0,0 +1,188 @@
+/*
+Copyright 2022 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package operators
+
+import (
+ "io"
+
+ "golang.org/x/exp/slices"
+
+ "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops"
+
+ "vitess.io/vitess/go/vt/sqlparser"
+ "vitess.io/vitess/go/vt/vterrors"
+ "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext"
+ "vitess.io/vitess/go/vt/vtgate/semantics"
+)
+
+type Derived struct {
+ Source ops.Operator
+
+ Query sqlparser.SelectStatement
+ Alias string
+ ColumnAliases sqlparser.Columns
+
+ // Columns needed to feed other plans
+ Columns []*sqlparser.ColName
+ ColumnsOffset []int
+}
+
+var _ ops.PhysicalOperator = (*Derived)(nil)
+
+// IPhysical implements the PhysicalOperator interface
+func (d *Derived) IPhysical() {}
+
+// Clone implements the Operator interface
+func (d *Derived) Clone(inputs []ops.Operator) ops.Operator {
+ return &Derived{
+ Source: inputs[0],
+ Query: d.Query,
+ Alias: d.Alias,
+ ColumnAliases: sqlparser.CloneColumns(d.ColumnAliases),
+ Columns: slices.Clone(d.Columns),
+ ColumnsOffset: slices.Clone(d.ColumnsOffset),
+ }
+}
+
+// findOutputColumn returns the index on which the given name is found in the slice of
+// *sqlparser.SelectExprs of the derivedTree. The *sqlparser.SelectExpr must be of type
+// *sqlparser.AliasedExpr and match the given name.
+// If name is not present but the query's select expressions contain a *sqlparser.StarExpr
+// the function will return no error and an index equal to -1.
+// If name is not present and the query does not have a *sqlparser.StarExpr, the function
+// will return an unknown column error.
+func (d *Derived) findOutputColumn(name *sqlparser.ColName) (int, error) {
+ hasStar := false
+ for j, exp := range sqlparser.GetFirstSelect(d.Query).SelectExprs {
+ switch exp := exp.(type) {
+ case *sqlparser.AliasedExpr:
+ if !exp.As.IsEmpty() && exp.As.Equal(name.Name) {
+ return j, nil
+ }
+ if exp.As.IsEmpty() {
+ col, ok := exp.Expr.(*sqlparser.ColName)
+ if !ok {
+ return 0, vterrors.VT12001("complex expression needs column alias: %s", sqlparser.String(exp))
+ }
+ if name.Name.Equal(col.Name) {
+ return j, nil
+ }
+ }
+ case *sqlparser.StarExpr:
+ hasStar = true
+ }
+ }
+
+ // we have found a star but no matching *sqlparser.AliasedExpr, thus we return -1 with no error.
+ if hasStar {
+ return -1, nil
+ }
+ return 0, vterrors.VT03014(name.Name.String(), "field list")
+}
+
+// IsMergeable is not a great name for this function. Suggestions for a better one are welcome!
+// This function will return false if the derived table inside it has to run on the vtgate side, and so can't be merged with subqueries
+// This logic can also be used to check if this is a derived table that can be had on the left hand side of a vtgate join.
+// Since vtgate joins are always nested loop joins, we can't execute them on the RHS
+// if they do some things, like LIMIT or GROUP BY on wrong columns
+func (d *Derived) IsMergeable(ctx *plancontext.PlanningContext) bool {
+ return isMergeable(ctx, d.Query, d)
+}
+
+// Inputs implements the Operator interface
+func (d *Derived) Inputs() []ops.Operator {
+ return []ops.Operator{d.Source}
+}
+
+func (d *Derived) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) (ops.Operator, error) {
+ if _, isUNion := d.Source.(*Union); isUNion {
+ // If we have a derived table on top of a UNION, we can let the UNION do the expression rewriting
+ var err error
+ d.Source, err = d.Source.AddPredicate(ctx, expr)
+ return d, err
+ }
+ tableInfo, err := ctx.SemTable.TableInfoForExpr(expr)
+ if err != nil {
+ if err == semantics.ErrNotSingleTable {
+ return &Filter{
+ Source: d,
+ Predicates: []sqlparser.Expr{expr},
+ }, nil
+ }
+ return nil, err
+ }
+
+ newExpr := semantics.RewriteDerivedTableExpression(expr, tableInfo)
+ if !canBePushedDownIntoDerived(newExpr) {
+ // if we have an aggregation, we don't want to push it inside
+ return &Filter{Source: d, Predicates: []sqlparser.Expr{expr}}, nil
+ }
+ d.Source, err = d.Source.AddPredicate(ctx, newExpr)
+ if err != nil {
+ return nil, err
+ }
+ return d, nil
+}
+
+func canBePushedDownIntoDerived(expr sqlparser.Expr) (canBePushed bool) {
+ canBePushed = true
+ _ = sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) {
+ switch node.(type) {
+ case *sqlparser.Max, *sqlparser.Min:
+ // empty by default
+ case sqlparser.AggrFunc:
+ canBePushed = false
+ return false, io.EOF
+ }
+ return true, nil
+ }, expr)
+ return
+}
+
+func (d *Derived) AddColumn(ctx *plancontext.PlanningContext, expr sqlparser.Expr) (int, error) {
+ col, ok := expr.(*sqlparser.ColName)
+ if !ok {
+ return 0, vterrors.VT13001("cannot push non-colname expression to a derived table")
+ }
+
+ i, err := d.findOutputColumn(col)
+ if err != nil {
+ return 0, err
+ }
+ var pos int
+ d.ColumnsOffset, pos = addToIntSlice(d.ColumnsOffset, i)
+
+ d.Columns = append(d.Columns, col)
+ // add it to the source if we were not already passing it through
+ if i <= -1 {
+ _, err := d.Source.AddColumn(ctx, sqlparser.NewColName(col.Name.String()))
+ if err != nil {
+ return 0, err
+ }
+ }
+ return pos, nil
+}
+
+func addToIntSlice(columnOffset []int, valToAdd int) ([]int, int) {
+ for idx, val := range columnOffset {
+ if val == valToAdd {
+ return columnOffset, idx
+ }
+ }
+ columnOffset = append(columnOffset, valToAdd)
+ return columnOffset, len(columnOffset) - 1
+}
diff --git a/go/vt/vtgate/planbuilder/physical/dml_planning.go b/go/vt/vtgate/planbuilder/operators/dml_planning.go
similarity index 77%
rename from go/vt/vtgate/planbuilder/physical/dml_planning.go
rename to go/vt/vtgate/planbuilder/operators/dml_planning.go
index 09fc6058ca6..5a875b78f36 100644
--- a/go/vt/vtgate/planbuilder/physical/dml_planning.go
+++ b/go/vt/vtgate/planbuilder/operators/dml_planning.go
@@ -14,10 +14,11 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package physical
+package operators
import (
- vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
+ "fmt"
+
"vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/vt/vtgate/engine"
@@ -35,7 +36,7 @@ func getVindexInformation(
) (*vindexes.ColumnVindex, []*VindexPlusPredicates, error) {
// Check that we have a primary vindex which is valid
if len(table.ColumnVindexes) == 0 || !table.ColumnVindexes[0].IsUnique() {
- return nil, nil, vterrors.NewErrorf(vtrpcpb.Code_FAILED_PRECONDITION, vterrors.RequiresPrimaryKey, vterrors.PrimaryVindexNotSet, table.Name)
+ return nil, nil, vterrors.VT09001(table.Name)
}
primaryVindex := table.ColumnVindexes[0]
if len(predicates) == 0 {
@@ -44,9 +45,9 @@ func getVindexInformation(
var vindexesAndPredicates []*VindexPlusPredicates
for _, colVindex := range table.Ordered {
- if lu, isLu := colVindex.Vindex.(vindexes.LookupBackfill); isLu && lu.IsBackfilling() {
- // Checking if the Vindex is currently backfilling or not, if it isn't we can read from the vindex table
- // and we will be able to do a delete equal. Otherwise, we continue to look for next best vindex.
+ // Checking if the Vindex is currently backfilling or not, if it isn't we can read from the vindex table,
+ // and we will be able to do a delete equal. Otherwise, we continue to look for next best vindex.
+ if colVindex.IsBackfilling() {
continue
}
@@ -75,7 +76,7 @@ func buildChangedVindexesValues(update *sqlparser.Update, table *vindexes.Table,
continue
}
if found {
- return nil, "", vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "column has duplicate set values: '%v'", assignment.Name.Name)
+ return nil, "", vterrors.VT03015(assignment.Name.Name)
}
found = true
pv, err := extractValueFromUpdate(assignment)
@@ -97,13 +98,13 @@ func buildChangedVindexesValues(update *sqlparser.Update, table *vindexes.Table,
}
if update.Limit != nil && len(update.OrderBy) == 0 {
- return nil, "", vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "unsupported: Need to provide order by clause when using limit. Invalid update on vindex: %v", vindex.Name)
+ return nil, "", vterrors.VT12001(fmt.Sprintf("you need to provide the ORDER BY clause when using LIMIT; invalid update on vindex: %v", vindex.Name))
}
if i == 0 {
- return nil, "", vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "unsupported: You can't update primary vindex columns. Invalid update on vindex: %v", vindex.Name)
+ return nil, "", vterrors.VT12001(fmt.Sprintf("you cannot UPDATE primary vindex columns; invalid update on vindex: %v", vindex.Name))
}
if _, ok := vindex.Vindex.(vindexes.Lookup); !ok {
- return nil, "", vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "unsupported: You can only update lookup vindexes. Invalid update on vindex: %v", vindex.Name)
+ return nil, "", vterrors.VT12001(fmt.Sprintf("you can only UPDATE lookup vindexes; invalid update on vindex: %v", vindex.Name))
}
changedVindexes[vindex.Name] = &engine.VindexValues{
PvMap: vindexValueMap,
@@ -117,7 +118,7 @@ func buildChangedVindexesValues(update *sqlparser.Update, table *vindexes.Table,
// generate rest of the owned vindex query.
aTblExpr, ok := update.TableExprs[0].(*sqlparser.AliasedTableExpr)
if !ok {
- return nil, "", vterrors.New(vtrpcpb.Code_UNIMPLEMENTED, "unsupported: update on complex table expression")
+ return nil, "", vterrors.VT12001("UPDATE on complex table expression")
}
tblExpr := &sqlparser.AliasedTableExpr{Expr: sqlparser.TableName{Name: table.Name}, As: aTblExpr.As}
buf.Myprintf(" from %v%v%v%v for update", tblExpr, update.Where, update.OrderBy, update.Limit)
@@ -166,5 +167,5 @@ func extractValueFromUpdate(upd *sqlparser.UpdateExpr) (evalengine.Expr, error)
}
func invalidUpdateExpr(upd *sqlparser.UpdateExpr, expr sqlparser.Expr) error {
- return vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "unsupported: Only values are supported. Invalid update on column: `%s` with expr: [%s]", upd.Name.Name.String(), sqlparser.String(expr))
+ return vterrors.VT12001(fmt.Sprintf("only values are supported; invalid update on column: `%s` with expr: [%s]", upd.Name.Name.String(), sqlparser.String(expr)))
}
diff --git a/go/vt/vtgate/planbuilder/operators/expressions.go b/go/vt/vtgate/planbuilder/operators/expressions.go
new file mode 100644
index 00000000000..5a49bb3a058
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/operators/expressions.go
@@ -0,0 +1,64 @@
+/*
+Copyright 2022 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package operators
+
+import (
+ "vitess.io/vitess/go/vt/sqlparser"
+ "vitess.io/vitess/go/vt/vterrors"
+ "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext"
+ "vitess.io/vitess/go/vt/vtgate/semantics"
+)
+
+// BreakExpressionInLHSandRHS takes an expression and
+// extracts the parts that are coming from one of the sides into `ColName`s that are needed
+func BreakExpressionInLHSandRHS(
+ ctx *plancontext.PlanningContext,
+ expr sqlparser.Expr,
+ lhs semantics.TableSet,
+) (bvNames []string, columns []*sqlparser.ColName, rewrittenExpr sqlparser.Expr, err error) {
+ rewrittenExpr = sqlparser.CopyOnRewrite(expr, nil, func(cursor *sqlparser.CopyOnWriteCursor) {
+ node, ok := cursor.Node().(*sqlparser.ColName)
+ if !ok {
+ return
+ }
+ deps := ctx.SemTable.RecursiveDeps(node)
+ if deps.IsEmpty() {
+ err = vterrors.VT13001("unknown column. has the AST been copied?")
+ cursor.StopTreeWalk()
+ return
+ }
+ if !deps.IsSolvedBy(lhs) {
+ return
+ }
+
+ node.Qualifier.Qualifier = sqlparser.NewIdentifierCS("")
+ columns = append(columns, node)
+ bvName := node.CompliantName()
+ bvNames = append(bvNames, bvName)
+ arg := sqlparser.NewArgument(bvName)
+ // we are replacing one of the sides of the comparison with an argument,
+ // but we don't want to lose the type information we have, so we copy it over
+ ctx.SemTable.CopyExprInfo(node, arg)
+ cursor.Replace(arg)
+ }, nil).(sqlparser.Expr)
+
+ if err != nil {
+ return nil, nil, nil, err
+ }
+ ctx.JoinPredicates[expr] = append(ctx.JoinPredicates[expr], rewrittenExpr)
+ return
+}
diff --git a/go/vt/vtgate/planbuilder/operators/filter.go b/go/vt/vtgate/planbuilder/operators/filter.go
new file mode 100644
index 00000000000..d28511dbe86
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/operators/filter.go
@@ -0,0 +1,96 @@
+/*
+Copyright 2021 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package operators
+
+import (
+ "vitess.io/vitess/go/vt/sqlparser"
+ "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops"
+ "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/rewrite"
+ "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext"
+ "vitess.io/vitess/go/vt/vtgate/semantics"
+)
+
+type Filter struct {
+ Source ops.Operator
+ Predicates []sqlparser.Expr
+}
+
+var _ ops.PhysicalOperator = (*Filter)(nil)
+
+func newFilter(op ops.Operator, expr sqlparser.Expr) ops.Operator {
+ return &Filter{
+ Source: op, Predicates: []sqlparser.Expr{expr},
+ }
+}
+
+// IPhysical implements the PhysicalOperator interface
+func (f *Filter) IPhysical() {}
+
+// Clone implements the Operator interface
+func (f *Filter) Clone(inputs []ops.Operator) ops.Operator {
+ predicatesClone := make([]sqlparser.Expr, len(f.Predicates))
+ copy(predicatesClone, f.Predicates)
+ return &Filter{
+ Source: inputs[0],
+ Predicates: predicatesClone,
+ }
+}
+
+// Inputs implements the Operator interface
+func (f *Filter) Inputs() []ops.Operator {
+ return []ops.Operator{f.Source}
+}
+
+// UnsolvedPredicates implements the unresolved interface
+func (f *Filter) UnsolvedPredicates(st *semantics.SemTable) []sqlparser.Expr {
+ var result []sqlparser.Expr
+ id := TableID(f)
+ for _, p := range f.Predicates {
+ deps := st.RecursiveDeps(p)
+ if !deps.IsSolvedBy(id) {
+ result = append(result, p)
+ }
+ }
+ return result
+}
+
+func (f *Filter) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) (ops.Operator, error) {
+ newSrc, err := f.Source.AddPredicate(ctx, expr)
+ if err != nil {
+ return nil, err
+ }
+ f.Source = newSrc
+ return f, nil
+}
+
+func (f *Filter) AddColumn(ctx *plancontext.PlanningContext, expr sqlparser.Expr) (int, error) {
+ return f.Source.AddColumn(ctx, expr)
+}
+
+func (f *Filter) Compact(*plancontext.PlanningContext) (ops.Operator, rewrite.TreeIdentity, error) {
+ if len(f.Predicates) == 0 {
+ return f.Source, rewrite.NewTree, nil
+ }
+
+ other, isFilter := f.Source.(*Filter)
+ if !isFilter {
+ return f, rewrite.SameTree, nil
+ }
+ f.Source = other.Source
+ f.Predicates = append(f.Predicates, other.Predicates...)
+ return f, rewrite.NewTree, nil
+}
diff --git a/go/vt/vtgate/planbuilder/abstract/fuzz.go b/go/vt/vtgate/planbuilder/operators/fuzz.go
similarity index 98%
rename from go/vt/vtgate/planbuilder/abstract/fuzz.go
rename to go/vt/vtgate/planbuilder/operators/fuzz.go
index 3a87a8a920a..bb8c508e56b 100644
--- a/go/vt/vtgate/planbuilder/abstract/fuzz.go
+++ b/go/vt/vtgate/planbuilder/operators/fuzz.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package abstract
+package operators
import (
"vitess.io/vitess/go/vt/sqlparser"
diff --git a/go/vt/vtgate/planbuilder/abstract/fuzz_test.go b/go/vt/vtgate/planbuilder/operators/fuzz_test.go
similarity index 98%
rename from go/vt/vtgate/planbuilder/abstract/fuzz_test.go
rename to go/vt/vtgate/planbuilder/operators/fuzz_test.go
index 25b0fb4a5f4..9d49ab4ba7c 100644
--- a/go/vt/vtgate/planbuilder/abstract/fuzz_test.go
+++ b/go/vt/vtgate/planbuilder/operators/fuzz_test.go
@@ -17,7 +17,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package abstract
+package operators
import (
"os"
diff --git a/go/vt/vtgate/planbuilder/abstract/fuzzdata/clusterfuzz-testcase-planbuilder_fuzzer-1 b/go/vt/vtgate/planbuilder/operators/fuzzdata/clusterfuzz-testcase-planbuilder_fuzzer-1
similarity index 100%
rename from go/vt/vtgate/planbuilder/abstract/fuzzdata/clusterfuzz-testcase-planbuilder_fuzzer-1
rename to go/vt/vtgate/planbuilder/operators/fuzzdata/clusterfuzz-testcase-planbuilder_fuzzer-1
diff --git a/go/vt/vtgate/planbuilder/abstract/fuzzdata/clusterfuzz-testcase-planbuilder_fuzzer-5577761986052096 b/go/vt/vtgate/planbuilder/operators/fuzzdata/clusterfuzz-testcase-planbuilder_fuzzer-5577761986052096
similarity index 100%
rename from go/vt/vtgate/planbuilder/abstract/fuzzdata/clusterfuzz-testcase-planbuilder_fuzzer-5577761986052096
rename to go/vt/vtgate/planbuilder/operators/fuzzdata/clusterfuzz-testcase-planbuilder_fuzzer-5577761986052096
diff --git a/go/vt/vtgate/planbuilder/operators/helpers.go b/go/vt/vtgate/planbuilder/operators/helpers.go
new file mode 100644
index 00000000000..8b63c664d06
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/operators/helpers.go
@@ -0,0 +1,206 @@
+/*
+Copyright 2022 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package operators
+
+import (
+ "fmt"
+ "sort"
+
+ "vitess.io/vitess/go/vt/sqlparser"
+ "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops"
+ "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/rewrite"
+ "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext"
+ "vitess.io/vitess/go/vt/vtgate/semantics"
+ "vitess.io/vitess/go/vt/vtgate/vindexes"
+)
+
+// Compact will optimise the operator tree into a smaller but equivalent version
+func Compact(ctx *plancontext.PlanningContext, op ops.Operator) (ops.Operator, error) {
+ type compactable interface {
+ // Compact implement this interface for operators that have easy to see optimisations
+ Compact(ctx *plancontext.PlanningContext) (ops.Operator, rewrite.TreeIdentity, error)
+ }
+
+ newOp, err := rewrite.BottomUp(op, func(op ops.Operator) (ops.Operator, rewrite.TreeIdentity, error) {
+ newOp, ok := op.(compactable)
+ if !ok {
+ return op, rewrite.SameTree, nil
+ }
+ return newOp.Compact(ctx)
+ })
+ return newOp, err
+}
+
+func CheckValid(op ops.Operator) error {
+ type checkable interface {
+ CheckValid() error
+ }
+
+ return rewrite.Visit(op, func(this ops.Operator) error {
+ if chk, ok := this.(checkable); ok {
+ return chk.CheckValid()
+ }
+ return nil
+ })
+}
+
+func Clone(op ops.Operator) ops.Operator {
+ inputs := op.Inputs()
+ clones := make([]ops.Operator, len(inputs))
+ for i, input := range inputs {
+ clones[i] = Clone(input)
+ }
+ return op.Clone(clones)
+}
+
+// TableIDIntroducer is used to signal that this operator introduces data from a new source
+type TableIDIntroducer interface {
+ Introduces() semantics.TableSet
+}
+
+func TableID(op ops.Operator) (result semantics.TableSet) {
+ _ = rewrite.Visit(op, func(this ops.Operator) error {
+ if tbl, ok := this.(TableIDIntroducer); ok {
+ result = result.Merge(tbl.Introduces())
+ }
+ return nil
+ })
+ return
+}
+
+// TableUser is used to signal that this operator directly interacts with one or more tables
+type TableUser interface {
+ TablesUsed() []string
+}
+
+func TablesUsed(op ops.Operator) []string {
+ addString, collect := collectSortedUniqueStrings()
+ _ = rewrite.Visit(op, func(this ops.Operator) error {
+ if tbl, ok := this.(TableUser); ok {
+ for _, u := range tbl.TablesUsed() {
+ addString(u)
+ }
+ }
+ return nil
+ })
+ return collect()
+}
+
+func UnresolvedPredicates(op ops.Operator, st *semantics.SemTable) (result []sqlparser.Expr) {
+ type unresolved interface {
+ // UnsolvedPredicates returns any predicates that have dependencies on the given Operator and
+ // on the outside of it (a parent Select expression, any other table not used by Operator, etc).
+ // This is used for sub-queries. An example query could be:
+ // SELECT * FROM tbl WHERE EXISTS (SELECT 1 FROM otherTbl WHERE tbl.col = otherTbl.col)
+ // The subquery would have one unsolved predicate: `tbl.col = otherTbl.col`
+ // It's a predicate that belongs to the inner query, but it needs data from the outer query
+ // These predicates dictate which data we have to send from the outer side to the inner
+ UnsolvedPredicates(semTable *semantics.SemTable) []sqlparser.Expr
+ }
+
+ _ = rewrite.Visit(op, func(this ops.Operator) error {
+ if tbl, ok := this.(unresolved); ok {
+ result = append(result, tbl.UnsolvedPredicates(st)...)
+ }
+
+ return nil
+ })
+ return
+}
+
+func CostOf(op ops.Operator) (cost int) {
+ type costly interface {
+ // Cost returns the cost for this operator. All the costly operators in the tree are summed together to get the
+ // total cost of the operator tree.
+ // TODO: We should really calculate this using cardinality estimation,
+ // but until then this is better than nothing
+ Cost() int
+ }
+
+ _ = rewrite.Visit(op, func(op ops.Operator) error {
+ if costlyOp, ok := op.(costly); ok {
+ cost += costlyOp.Cost()
+ }
+ return nil
+ })
+ return
+}
+
+func QualifiedIdentifier(ks *vindexes.Keyspace, i sqlparser.IdentifierCS) string {
+ return QualifiedString(ks, i.String())
+}
+
+func QualifiedString(ks *vindexes.Keyspace, s string) string {
+ return fmt.Sprintf("%s.%s", ks.Name, s)
+}
+
+func QualifiedStrings(ks *vindexes.Keyspace, ss []string) []string {
+ add, collect := collectSortedUniqueStrings()
+ for _, s := range ss {
+ add(QualifiedString(ks, s))
+ }
+ return collect()
+}
+
+func QualifiedTableName(ks *vindexes.Keyspace, t sqlparser.TableName) string {
+ return QualifiedIdentifier(ks, t.Name)
+}
+
+func QualifiedTableNames(ks *vindexes.Keyspace, ts []sqlparser.TableName) []string {
+ add, collect := collectSortedUniqueStrings()
+ for _, t := range ts {
+ add(QualifiedTableName(ks, t))
+ }
+ return collect()
+}
+
+func QualifiedTables(ks *vindexes.Keyspace, vts []*vindexes.Table) []string {
+ add, collect := collectSortedUniqueStrings()
+ for _, vt := range vts {
+ add(QualifiedIdentifier(ks, vt.Name))
+ }
+ return collect()
+}
+
+func SingleQualifiedIdentifier(ks *vindexes.Keyspace, i sqlparser.IdentifierCS) []string {
+ return SingleQualifiedString(ks, i.String())
+}
+
+func SingleQualifiedString(ks *vindexes.Keyspace, s string) []string {
+ return []string{QualifiedString(ks, s)}
+}
+
+func SingleQualifiedTableName(ks *vindexes.Keyspace, t sqlparser.TableName) []string {
+ return SingleQualifiedIdentifier(ks, t.Name)
+}
+
+func collectSortedUniqueStrings() (add func(string), collect func() []string) {
+ uniq := make(map[string]any)
+ add = func(v string) {
+ uniq[v] = nil
+ }
+ collect = func() []string {
+ sorted := make([]string, 0, len(uniq))
+ for v := range uniq {
+ sorted = append(sorted, v)
+ }
+ sort.Strings(sorted)
+ return sorted
+ }
+
+ return add, collect
+}
diff --git a/go/vt/vtgate/planbuilder/operators/horizon.go b/go/vt/vtgate/planbuilder/operators/horizon.go
new file mode 100644
index 00000000000..7bbe3eb9e98
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/operators/horizon.go
@@ -0,0 +1,57 @@
+/*
+Copyright 2022 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package operators
+
+import (
+ "vitess.io/vitess/go/vt/sqlparser"
+ "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops"
+ "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext"
+)
+
+// Horizon is an operator we use until we decide how to handle the source to the horizon.
+// It contains information about the planning we have to do after deciding how we will send the query to the tablets.
+type Horizon struct {
+ Source ops.Operator
+ Select sqlparser.SelectStatement
+
+ noColumns
+}
+
+var _ ops.Operator = (*Horizon)(nil)
+var _ ops.PhysicalOperator = (*Horizon)(nil)
+
+func (h *Horizon) IPhysical() {}
+
+func (h *Horizon) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) (ops.Operator, error) {
+ newSrc, err := h.Source.AddPredicate(ctx, expr)
+ if err != nil {
+ return nil, err
+ }
+ h.Source = newSrc
+ return h, nil
+}
+
+func (h *Horizon) Clone(inputs []ops.Operator) ops.Operator {
+ return &Horizon{
+ Source: inputs[0],
+ Select: h.Select,
+ }
+}
+
+func (h *Horizon) Inputs() []ops.Operator {
+ return []ops.Operator{h.Source}
+}
diff --git a/go/vt/vtgate/planbuilder/operators/horizon_planning.go b/go/vt/vtgate/planbuilder/operators/horizon_planning.go
new file mode 100644
index 00000000000..e8d881a3dda
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/operators/horizon_planning.go
@@ -0,0 +1,59 @@
+/*
+Copyright 2022 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package operators
+
+import (
+ "vitess.io/vitess/go/vt/vterrors"
+ "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/rewrite"
+
+ "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops"
+)
+
+var errNotHorizonPlanned = vterrors.VT12001("query cannot be fully operator planned")
+
+func planHorizons(in ops.Operator) (ops.Operator, error) {
+ return rewrite.TopDown(in, func(in ops.Operator) (ops.Operator, rewrite.TreeIdentity, rewrite.VisitRule, error) {
+ switch in := in.(type) {
+ case *Horizon:
+ op, err := planHorizon(in)
+ if err != nil {
+ return nil, rewrite.SameTree, rewrite.SkipChildren, err
+ }
+ return op, rewrite.NewTree, rewrite.VisitChildren, nil
+ case *Route:
+ return in, rewrite.SameTree, rewrite.SkipChildren, nil
+ default:
+ return in, rewrite.SameTree, rewrite.VisitChildren, nil
+ }
+ })
+}
+
+func planHorizon(in *Horizon) (ops.Operator, error) {
+ rb, isRoute := in.Source.(*Route)
+ if !isRoute {
+ return in, nil
+ }
+ if isRoute && rb.IsSingleShard() && in.Select.GetLimit() == nil {
+ return planSingleShardRoute(rb, in)
+ }
+
+ return nil, errNotHorizonPlanned
+}
+func planSingleShardRoute(rb *Route, horizon *Horizon) (ops.Operator, error) {
+ rb.Source, horizon.Source = horizon, rb.Source
+ return rb, nil
+}
diff --git a/go/vt/vtgate/planbuilder/operators/join.go b/go/vt/vtgate/planbuilder/operators/join.go
new file mode 100644
index 00000000000..3cad6c6bd80
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/operators/join.go
@@ -0,0 +1,151 @@
+/*
+Copyright 2021 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package operators
+
+import (
+ "vitess.io/vitess/go/vt/sqlparser"
+ "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops"
+ "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/rewrite"
+ "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext"
+)
+
+// Join represents a join. If we have a predicate, this is an inner join. If no predicate exists, it is a cross join
+type Join struct {
+ LHS, RHS ops.Operator
+ Predicate sqlparser.Expr
+ LeftJoin bool
+
+ noColumns
+}
+
+var _ ops.Operator = (*Join)(nil)
+
+// Clone implements the Operator interface
+func (j *Join) Clone(inputs []ops.Operator) ops.Operator {
+ clone := *j
+ clone.LHS = inputs[0]
+ clone.RHS = inputs[1]
+ return &Join{
+ LHS: inputs[0],
+ RHS: inputs[1],
+ Predicate: j.Predicate,
+ LeftJoin: j.LeftJoin,
+ }
+}
+
+// Inputs implements the Operator interface
+func (j *Join) Inputs() []ops.Operator {
+ return []ops.Operator{j.LHS, j.RHS}
+}
+
+func (j *Join) Compact(ctx *plancontext.PlanningContext) (ops.Operator, rewrite.TreeIdentity, error) {
+ if j.LeftJoin {
+ // we can't merge outer joins into a single QG
+ return j, rewrite.SameTree, nil
+ }
+
+ lqg, lok := j.LHS.(*QueryGraph)
+ rqg, rok := j.RHS.(*QueryGraph)
+ if !lok || !rok {
+ return j, rewrite.SameTree, nil
+ }
+
+ newOp := &QueryGraph{
+ Tables: append(lqg.Tables, rqg.Tables...),
+ innerJoins: append(lqg.innerJoins, rqg.innerJoins...),
+ NoDeps: ctx.SemTable.AndExpressions(lqg.NoDeps, rqg.NoDeps),
+ }
+ if j.Predicate != nil {
+ err := newOp.collectPredicate(ctx, j.Predicate)
+ if err != nil {
+ return nil, rewrite.SameTree, err
+ }
+ }
+ return newOp, rewrite.NewTree, nil
+}
+
+func createOuterJoin(tableExpr *sqlparser.JoinTableExpr, lhs, rhs ops.Operator) (ops.Operator, error) {
+ if tableExpr.Join == sqlparser.RightJoinType {
+ lhs, rhs = rhs, lhs
+ }
+ predicate := tableExpr.Condition.On
+ sqlparser.RemoveKeyspaceFromColName(predicate)
+ return &Join{LHS: lhs, RHS: rhs, LeftJoin: true, Predicate: predicate}, nil
+}
+
+func createJoin(ctx *plancontext.PlanningContext, LHS, RHS ops.Operator) ops.Operator {
+ lqg, lok := LHS.(*QueryGraph)
+ rqg, rok := RHS.(*QueryGraph)
+ if lok && rok {
+ op := &QueryGraph{
+ Tables: append(lqg.Tables, rqg.Tables...),
+ innerJoins: append(lqg.innerJoins, rqg.innerJoins...),
+ NoDeps: ctx.SemTable.AndExpressions(lqg.NoDeps, rqg.NoDeps),
+ }
+ return op
+ }
+ return &Join{LHS: LHS, RHS: RHS}
+}
+
+func createInnerJoin(ctx *plancontext.PlanningContext, tableExpr *sqlparser.JoinTableExpr, lhs, rhs ops.Operator) (ops.Operator, error) {
+ op := createJoin(ctx, lhs, rhs)
+ pred := tableExpr.Condition.On
+ if pred != nil {
+ var err error
+ sqlparser.RemoveKeyspaceFromColName(pred)
+ op, err = op.AddPredicate(ctx, pred)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return op, nil
+}
+
+func (j *Join) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) (ops.Operator, error) {
+ return AddPredicate(j, ctx, expr, false, newFilter)
+}
+
+var _ JoinOp = (*Join)(nil)
+
+func (j *Join) GetLHS() ops.Operator {
+ return j.LHS
+}
+
+func (j *Join) GetRHS() ops.Operator {
+ return j.RHS
+}
+
+func (j *Join) SetLHS(operator ops.Operator) {
+ j.LHS = operator
+}
+
+func (j *Join) SetRHS(operator ops.Operator) {
+ j.RHS = operator
+}
+
+func (j *Join) MakeInner() {
+ j.LeftJoin = false
+}
+
+func (j *Join) IsInner() bool {
+ return !j.LeftJoin
+}
+
+func (j *Join) AddJoinPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) error {
+ j.Predicate = ctx.SemTable.AndExpressions(j.Predicate, expr)
+ return nil
+}
diff --git a/go/vt/vtgate/planbuilder/operators/joins.go b/go/vt/vtgate/planbuilder/operators/joins.go
new file mode 100644
index 00000000000..2764ad7f735
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/operators/joins.go
@@ -0,0 +1,125 @@
+/*
+Copyright 2022 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package operators
+
+import (
+ "vitess.io/vitess/go/vt/sqlparser"
+ "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops"
+ "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext"
+ "vitess.io/vitess/go/vt/vtgate/semantics"
+)
+
+type JoinOp interface {
+ ops.Operator
+ GetLHS() ops.Operator
+ GetRHS() ops.Operator
+ SetLHS(ops.Operator)
+ SetRHS(ops.Operator)
+ MakeInner()
+ IsInner() bool
+ AddJoinPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) error
+}
+
+func AddPredicate(join JoinOp, ctx *plancontext.PlanningContext, expr sqlparser.Expr, joinPredicates bool, newFilter func(ops.Operator, sqlparser.Expr) ops.Operator) (ops.Operator, error) {
+ deps := ctx.SemTable.RecursiveDeps(expr)
+ switch {
+ case deps.IsSolvedBy(TableID(join.GetLHS())):
+ // predicates can always safely be pushed down to the lhs if that is all they depend on
+ lhs, err := join.GetLHS().AddPredicate(ctx, expr)
+ if err != nil {
+ return nil, err
+ }
+ join.SetLHS(lhs)
+ return join, err
+ case deps.IsSolvedBy(TableID(join.GetRHS())):
+ // if we are dealing with an outer join, always start by checking if this predicate can turn
+ // the join into an inner join
+ if !joinPredicates && !join.IsInner() && canConvertToInner(ctx, expr, TableID(join.GetRHS())) {
+ join.MakeInner()
+ }
+
+ if !joinPredicates && !join.IsInner() {
+ // if we still are dealing with an outer join
+ // we need to filter after the join has been evaluated
+ return newFilter(join, expr), nil
+ }
+
+ // For inner joins, we can just push the filtering on the RHS
+ rhs, err := join.GetRHS().AddPredicate(ctx, expr)
+ if err != nil {
+ return nil, err
+ }
+ join.SetRHS(rhs)
+ return join, err
+
+ case deps.IsSolvedBy(TableID(join)):
+ // if we are dealing with an outer join, always start by checking if this predicate can turn
+ // the join into an inner join
+ if !joinPredicates && !join.IsInner() && canConvertToInner(ctx, expr, TableID(join.GetRHS())) {
+ join.MakeInner()
+ }
+
+ if !joinPredicates && !join.IsInner() {
+ // if we still are dealing with an outer join
+ // we need to filter after the join has been evaluated
+ return newFilter(join, expr), nil
+ }
+
+ err := join.AddJoinPredicate(ctx, expr)
+ if err != nil {
+ return nil, err
+ }
+
+ return join, nil
+ }
+ return nil, nil
+}
+
+// we are looking for predicates like `tbl.col = <>` or `<> = tbl.col`,
+// where tbl is on the rhs of the left outer join
+// When a predicate uses information from an outer table, we can convert from an outer join to an inner join
+// if the predicate is "null-intolerant".
+//
+// Null-intolerant in this context means that the predicate will not be true if the table columns are null.
+//
+// Since an outer join is an inner join with the addition of all the rows from the left-hand side that
+// matched no rows on the right-hand, if we are later going to remove all the rows where the right-hand
+// side did not match, we might as well turn the join into an inner join.
+//
+// This is based on the paper "Canonical Abstraction for Outerjoin Optimization" by J Rao et al
+func canConvertToInner(ctx *plancontext.PlanningContext, expr sqlparser.Expr, rhs semantics.TableSet) bool {
+ isColNameFromRHS := func(e sqlparser.Expr) bool {
+ return sqlparser.IsColName(e) && ctx.SemTable.RecursiveDeps(e).IsSolvedBy(rhs)
+ }
+ switch expr := expr.(type) {
+ case *sqlparser.ComparisonExpr:
+ if expr.Operator == sqlparser.NullSafeEqualOp {
+ return false
+ }
+
+ return isColNameFromRHS(expr.Left) || isColNameFromRHS(expr.Right)
+
+ case *sqlparser.IsExpr:
+ if expr.Right != sqlparser.IsNotNullOp {
+ return false
+ }
+
+ return isColNameFromRHS(expr.Left)
+ default:
+ return false
+ }
+}
diff --git a/go/vt/vtgate/planbuilder/operators/logical.go b/go/vt/vtgate/planbuilder/operators/logical.go
new file mode 100644
index 00000000000..e843f115f57
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/operators/logical.go
@@ -0,0 +1,371 @@
+/*
+Copyright 2022 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package operators
+
+import (
+ "fmt"
+
+ "vitess.io/vitess/go/vt/sqlparser"
+ "vitess.io/vitess/go/vt/vterrors"
+ "vitess.io/vitess/go/vt/vtgate/engine"
+ "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops"
+ "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext"
+ "vitess.io/vitess/go/vt/vtgate/semantics"
+)
+
+// createLogicalOperatorFromAST creates an operator tree that represents the input SELECT or UNION query
+func createLogicalOperatorFromAST(ctx *plancontext.PlanningContext, selStmt sqlparser.Statement) (op ops.Operator, err error) {
+ switch node := selStmt.(type) {
+ case *sqlparser.Select:
+ op, err = createOperatorFromSelect(ctx, node)
+ case *sqlparser.Union:
+ op, err = createOperatorFromUnion(ctx, node)
+ case *sqlparser.Update:
+ op, err = createOperatorFromUpdate(ctx, node)
+ case *sqlparser.Delete:
+ op, err = createOperatorFromDelete(ctx, node)
+ default:
+ err = vterrors.VT12001(fmt.Sprintf("operator: %T", selStmt))
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ return op, nil
+}
+
+// createOperatorFromSelect creates an operator tree that represents the input SELECT query
+func createOperatorFromSelect(ctx *plancontext.PlanningContext, sel *sqlparser.Select) (ops.Operator, error) {
+ subq, err := createSubqueryFromStatement(ctx, sel)
+ if err != nil {
+ return nil, err
+ }
+ op, err := crossJoin(ctx, sel.From)
+ if err != nil {
+ return nil, err
+ }
+ if sel.Where != nil {
+ exprs := sqlparser.SplitAndExpression(nil, sel.Where.Expr)
+ for _, expr := range exprs {
+ sqlparser.RemoveKeyspaceFromColName(expr)
+ op, err = op.AddPredicate(ctx, expr)
+ if err != nil {
+ return nil, err
+ }
+ addColumnEquality(ctx, expr)
+ }
+ }
+ if subq == nil {
+ return &Horizon{
+ Source: op,
+ Select: sel,
+ }, nil
+ }
+ subq.Outer = op
+ return &Horizon{
+ Source: subq,
+ Select: sel,
+ }, nil
+}
+
+func createOperatorFromUnion(ctx *plancontext.PlanningContext, node *sqlparser.Union) (ops.Operator, error) {
+ opLHS, err := createLogicalOperatorFromAST(ctx, node.Left)
+ if err != nil {
+ return nil, err
+ }
+
+ _, isRHSUnion := node.Right.(*sqlparser.Union)
+ if isRHSUnion {
+ return nil, vterrors.VT12001("nesting of UNIONs on the right-hand side")
+ }
+ opRHS, err := createLogicalOperatorFromAST(ctx, node.Right)
+ if err != nil {
+ return nil, err
+ }
+
+ union := &Union{
+ Distinct: node.Distinct,
+ Sources: []ops.Operator{opLHS, opRHS},
+ Ordering: node.OrderBy,
+ }
+ return &Horizon{Source: union, Select: node}, nil
+}
+
+func createOperatorFromUpdate(ctx *plancontext.PlanningContext, updStmt *sqlparser.Update) (ops.Operator, error) {
+ tableInfo, qt, err := createQueryTableForDML(ctx, updStmt.TableExprs[0], updStmt.Where)
+ if err != nil {
+ return nil, err
+ }
+
+ assignments := make(map[string]sqlparser.Expr)
+ for _, set := range updStmt.Exprs {
+ assignments[set.Name.Name.String()] = set.Expr
+ }
+
+ vindexTable, opCode, dest, err := buildVindexTableForDML(ctx, tableInfo, qt, "update")
+ if err != nil {
+ return nil, err
+ }
+
+ vp, cvv, ovq, err := getUpdateVindexInformation(updStmt, vindexTable, qt.ID, qt.Predicates)
+ if err != nil {
+ return nil, err
+ }
+
+ r := &Route{
+ Source: &Update{
+ QTable: qt,
+ VTable: vindexTable,
+ Assignments: assignments,
+ ChangedVindexValues: cvv,
+ OwnedVindexQuery: ovq,
+ AST: updStmt,
+ },
+ RouteOpCode: opCode,
+ Keyspace: vindexTable.Keyspace,
+ VindexPreds: vp,
+ TargetDestination: dest,
+ }
+
+ for _, predicate := range qt.Predicates {
+ err := r.UpdateRoutingLogic(ctx, predicate)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if r.RouteOpCode == engine.Scatter && updStmt.Limit != nil {
+ // TODO systay: we should probably check for other op code types - IN could also hit multiple shards (2022-04-07)
+ return nil, vterrors.VT12001("multi shard UPDATE with LIMIT")
+ }
+
+ subq, err := createSubqueryFromStatement(ctx, updStmt)
+ if err != nil {
+ return nil, err
+ }
+ if subq == nil {
+ return r, nil
+ }
+ subq.Outer = r
+ return subq, nil
+}
+
+func createOperatorFromDelete(ctx *plancontext.PlanningContext, deleteStmt *sqlparser.Delete) (ops.Operator, error) {
+ tableInfo, qt, err := createQueryTableForDML(ctx, deleteStmt.TableExprs[0], deleteStmt.Where)
+ if err != nil {
+ return nil, err
+ }
+
+ vindexTable, opCode, dest, err := buildVindexTableForDML(ctx, tableInfo, qt, "delete")
+ if err != nil {
+ return nil, err
+ }
+
+ del := &Delete{
+ QTable: qt,
+ VTable: vindexTable,
+ AST: deleteStmt,
+ }
+ route := &Route{
+ Source: del,
+ RouteOpCode: opCode,
+ Keyspace: vindexTable.Keyspace,
+ TargetDestination: dest,
+ }
+
+ if !vindexTable.Keyspace.Sharded {
+ return route, nil
+ }
+
+ primaryVindex, vindexAndPredicates, err := getVindexInformation(qt.ID, qt.Predicates, vindexTable)
+ if err != nil {
+ return nil, err
+ }
+
+ route.VindexPreds = vindexAndPredicates
+
+ var ovq string
+ if len(vindexTable.Owned) > 0 {
+ tblExpr := &sqlparser.AliasedTableExpr{Expr: sqlparser.TableName{Name: vindexTable.Name}, As: qt.Alias.As}
+ ovq = generateOwnedVindexQuery(tblExpr, deleteStmt, vindexTable, primaryVindex.Columns)
+ }
+
+ del.OwnedVindexQuery = ovq
+
+ for _, predicate := range qt.Predicates {
+ err := route.UpdateRoutingLogic(ctx, predicate)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if route.RouteOpCode == engine.Scatter && deleteStmt.Limit != nil {
+ // TODO systay: we should probably check for other op code types - IN could also hit multiple shards (2022-04-07)
+ return nil, vterrors.VT12001("multi shard DELETE with LIMIT")
+ }
+
+ subq, err := createSubqueryFromStatement(ctx, deleteStmt)
+ if err != nil {
+ return nil, err
+ }
+ if subq == nil {
+ return route, nil
+ }
+ subq.Outer = route
+ return subq, nil
+}
+
+func getOperatorFromTableExpr(ctx *plancontext.PlanningContext, tableExpr sqlparser.TableExpr) (ops.Operator, error) {
+ switch tableExpr := tableExpr.(type) {
+ case *sqlparser.AliasedTableExpr:
+ return getOperatorFromAliasedTableExpr(ctx, tableExpr)
+ case *sqlparser.JoinTableExpr:
+ return getOperatorFromJoinTableExpr(ctx, tableExpr)
+ case *sqlparser.ParenTableExpr:
+ return crossJoin(ctx, tableExpr.Exprs)
+ default:
+ return nil, vterrors.VT13001(fmt.Sprintf("unable to use: %T table type", tableExpr))
+ }
+}
+
+func getOperatorFromJoinTableExpr(ctx *plancontext.PlanningContext, tableExpr *sqlparser.JoinTableExpr) (ops.Operator, error) {
+ lhs, err := getOperatorFromTableExpr(ctx, tableExpr.LeftExpr)
+ if err != nil {
+ return nil, err
+ }
+ rhs, err := getOperatorFromTableExpr(ctx, tableExpr.RightExpr)
+ if err != nil {
+ return nil, err
+ }
+
+ switch tableExpr.Join {
+ case sqlparser.NormalJoinType:
+ return createInnerJoin(ctx, tableExpr, lhs, rhs)
+ case sqlparser.LeftJoinType, sqlparser.RightJoinType:
+ return createOuterJoin(tableExpr, lhs, rhs)
+ default:
+ return nil, vterrors.VT13001("unsupported: %s", tableExpr.Join.ToString())
+ }
+}
+
+func getOperatorFromAliasedTableExpr(ctx *plancontext.PlanningContext, tableExpr *sqlparser.AliasedTableExpr) (ops.Operator, error) {
+ switch tbl := tableExpr.Expr.(type) {
+ case sqlparser.TableName:
+ tableID := ctx.SemTable.TableSetFor(tableExpr)
+ tableInfo, err := ctx.SemTable.TableInfoFor(tableID)
+ if err != nil {
+ return nil, err
+ }
+
+ if vt, isVindex := tableInfo.(*semantics.VindexTable); isVindex {
+ solves := ctx.SemTable.TableSetFor(tableExpr)
+ return &Vindex{
+ Table: VindexTable{
+ TableID: tableID,
+ Alias: tableExpr,
+ Table: tbl,
+ VTable: vt.Table.GetVindexTable(),
+ },
+ Vindex: vt.Vindex,
+ Solved: solves,
+ }, nil
+ }
+ qg := newQueryGraph()
+ isInfSchema := tableInfo.IsInfSchema()
+ qt := &QueryTable{Alias: tableExpr, Table: tbl, ID: tableID, IsInfSchema: isInfSchema}
+ qg.Tables = append(qg.Tables, qt)
+ return qg, nil
+ case *sqlparser.DerivedTable:
+ inner, err := createLogicalOperatorFromAST(ctx, tbl.Select)
+ if err != nil {
+ return nil, err
+ }
+ if horizon, ok := inner.(*Horizon); ok {
+ inner = horizon.Source
+ }
+
+ return &Derived{Alias: tableExpr.As.String(), Source: inner, Query: tbl.Select, ColumnAliases: tableExpr.Columns}, nil
+ default:
+ return nil, vterrors.VT13001(fmt.Sprintf("unable to use: %T", tbl))
+ }
+}
+
+func crossJoin(ctx *plancontext.PlanningContext, exprs sqlparser.TableExprs) (ops.Operator, error) {
+ var output ops.Operator
+ for _, tableExpr := range exprs {
+ op, err := getOperatorFromTableExpr(ctx, tableExpr)
+ if err != nil {
+ return nil, err
+ }
+ if output == nil {
+ output = op
+ } else {
+ output = createJoin(ctx, output, op)
+ }
+ }
+ return output, nil
+}
+
+func createQueryTableForDML(ctx *plancontext.PlanningContext, tableExpr sqlparser.TableExpr, whereClause *sqlparser.Where) (semantics.TableInfo, *QueryTable, error) {
+ alTbl, ok := tableExpr.(*sqlparser.AliasedTableExpr)
+ if !ok {
+ return nil, nil, vterrors.VT13001("expected AliasedTableExpr")
+ }
+ tblName, ok := alTbl.Expr.(sqlparser.TableName)
+ if !ok {
+ return nil, nil, vterrors.VT13001("expected TableName")
+ }
+
+ tableID := ctx.SemTable.TableSetFor(alTbl)
+ tableInfo, err := ctx.SemTable.TableInfoFor(tableID)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ if tableInfo.IsInfSchema() {
+ return nil, nil, vterrors.VT12001("update information schema tables")
+ }
+
+ var predicates []sqlparser.Expr
+ if whereClause != nil {
+ predicates = sqlparser.SplitAndExpression(nil, whereClause.Expr)
+ }
+ qt := &QueryTable{
+ ID: tableID,
+ Alias: alTbl,
+ Table: tblName,
+ Predicates: predicates,
+ IsInfSchema: false,
+ }
+ return tableInfo, qt, nil
+}
+
+func addColumnEquality(ctx *plancontext.PlanningContext, expr sqlparser.Expr) {
+ switch expr := expr.(type) {
+ case *sqlparser.ComparisonExpr:
+ if expr.Operator != sqlparser.EqualOp {
+ return
+ }
+
+ if left, isCol := expr.Left.(*sqlparser.ColName); isCol {
+ ctx.SemTable.AddColumnEquality(left, expr.Right)
+ }
+ if right, isCol := expr.Right.(*sqlparser.ColName); isCol {
+ ctx.SemTable.AddColumnEquality(right, expr.Left)
+ }
+ }
+}
diff --git a/go/vt/vtgate/planbuilder/operators/operator.go b/go/vt/vtgate/planbuilder/operators/operator.go
new file mode 100644
index 00000000000..2621fa9875b
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/operators/operator.go
@@ -0,0 +1,98 @@
+/*
+Copyright 2021 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package operators contains the operators used to plan queries.
+/*
+The operators go through a few phases while planning:
+1. Logical
+ In this first pass, we build an operator tree from the incoming parsed query.
+ It will contain logical joins - we still haven't decided on the join algorithm to use yet.
+ At the leaves, it will contain QueryGraphs - these are the tables in the FROM clause
+ that we can easily do join ordering on. The logical tree will represent the full query,
+ including projections, grouping, ordering and so on.
+2. Physical
+ Once the logical plan has been fully built, we go bottom up and plan which routes that will be used.
+ During this phase, we will also decide which join algorithms should be used on the vtgate level
+3. Columns & Aggregation
+ Once we know which queries will be sent to the tablets, we go over the tree and decide which
+ columns each operator should output. At this point, we also do offset lookups,
+ so we know at runtime from which columns in the input table we need to read.
+*/
+package operators
+
+import (
+ "vitess.io/vitess/go/vt/sqlparser"
+ "vitess.io/vitess/go/vt/vterrors"
+ "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops"
+ "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext"
+)
+
+type (
+ // helper type that implements Inputs() returning nil
+ noInputs struct{}
+
+ // helper type that implements AddColumn() returning an error
+ noColumns struct{}
+
+ // helper type that implements AddPredicate() returning an error
+ noPredicates struct{}
+)
+
+func PlanQuery(ctx *plancontext.PlanningContext, selStmt sqlparser.Statement) (ops.Operator, error) {
+ op, err := createLogicalOperatorFromAST(ctx, selStmt)
+ if err != nil {
+ return nil, err
+ }
+
+ if err = CheckValid(op); err != nil {
+ return nil, err
+ }
+
+ op, err = transformToPhysical(ctx, op)
+ if err != nil {
+ return nil, err
+ }
+
+ backup := Clone(op)
+
+ op, err = planHorizons(op)
+ if err == errNotHorizonPlanned {
+ op = backup
+ } else if err != nil {
+ return nil, err
+ }
+
+ if op, err = Compact(ctx, op); err != nil {
+ return nil, err
+ }
+
+ return op, err
+}
+
+// Inputs implements the Operator interface
+func (noInputs) Inputs() []ops.Operator {
+ return nil
+}
+
+// AddColumn implements the Operator interface
+func (noColumns) AddColumn(*plancontext.PlanningContext, sqlparser.Expr) (int, error) {
+ return 0, vterrors.VT13001("the noColumns operator cannot accept columns")
+}
+
+// AddPredicate implements the Operator interface
+func (noPredicates) AddPredicate(*plancontext.PlanningContext, sqlparser.Expr) (ops.Operator, error) {
+ return nil, vterrors.VT13001("the noColumns operator cannot accept predicates")
+}
diff --git a/go/vt/vtgate/planbuilder/operators/operator_funcs.go b/go/vt/vtgate/planbuilder/operators/operator_funcs.go
new file mode 100644
index 00000000000..ee23175eac3
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/operators/operator_funcs.go
@@ -0,0 +1,103 @@
+/*
+Copyright 2021 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package operators
+
+import (
+ "fmt"
+
+ "vitess.io/vitess/go/vt/sqlparser"
+ "vitess.io/vitess/go/vt/vterrors"
+ "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops"
+ "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext"
+)
+
+// RemovePredicate is used when we turn a predicate into a plan operator,
+// and the predicate needs to be removed as an AST construct
+func RemovePredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr, op ops.Operator) (ops.Operator, error) {
+ switch op := op.(type) {
+ case *Route:
+ newSrc, err := RemovePredicate(ctx, expr, op.Source)
+ if err != nil {
+ return nil, err
+ }
+ op.Source = newSrc
+ return op, err
+ case *ApplyJoin:
+ isRemoved := false
+ deps := ctx.SemTable.RecursiveDeps(expr)
+ if deps.IsSolvedBy(TableID(op.LHS)) {
+ newSrc, err := RemovePredicate(ctx, expr, op.LHS)
+ if err != nil {
+ return nil, err
+ }
+ op.LHS = newSrc
+ isRemoved = true
+ }
+
+ if deps.IsSolvedBy(TableID(op.RHS)) {
+ newSrc, err := RemovePredicate(ctx, expr, op.RHS)
+ if err != nil {
+ return nil, err
+ }
+ op.RHS = newSrc
+ isRemoved = true
+ }
+
+ var keep []sqlparser.Expr
+ for _, e := range sqlparser.SplitAndExpression(nil, op.Predicate) {
+ if ctx.SemTable.EqualsExpr(expr, e) {
+ isRemoved = true
+ } else {
+ keep = append(keep, e)
+ }
+ }
+
+ if !isRemoved {
+ return nil, vterrors.VT12001(fmt.Sprintf("remove '%s' predicate on cross-shard join query", sqlparser.String(expr)))
+ }
+
+ op.Predicate = ctx.SemTable.AndExpressions(keep...)
+ return op, nil
+ case *Filter:
+ idx := -1
+ for i, predicate := range op.Predicates {
+ if ctx.SemTable.EqualsExpr(predicate, expr) {
+ idx = i
+ }
+ }
+ if idx == -1 {
+ // the predicate is not here. let's remove it from our source
+ newSrc, err := RemovePredicate(ctx, expr, op.Source)
+ if err != nil {
+ return nil, err
+ }
+ op.Source = newSrc
+ return op, nil
+ }
+ if len(op.Predicates) == 1 {
+ // no predicates left on this operator, so we just remove it
+ return op.Source, nil
+ }
+
+ // remove the predicate from this filter
+ op.Predicates = append(op.Predicates[:idx], op.Predicates[idx+1:]...)
+ return op, nil
+
+ default:
+ return nil, vterrors.VT13001("this should not happen - tried to remove predicate from the operator table")
+ }
+}
diff --git a/go/vt/vtgate/planbuilder/abstract/operator_test.go b/go/vt/vtgate/planbuilder/operators/operator_test.go
similarity index 90%
rename from go/vt/vtgate/planbuilder/abstract/operator_test.go
rename to go/vt/vtgate/planbuilder/operators/operator_test.go
index a7a536a4aba..4ba5588f22e 100644
--- a/go/vt/vtgate/planbuilder/abstract/operator_test.go
+++ b/go/vt/vtgate/planbuilder/operators/operator_test.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package abstract
+package operators
import (
"bufio"
@@ -25,6 +25,8 @@ import (
"strings"
"testing"
+ "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext"
+
"vitess.io/vitess/go/vt/vtgate/engine"
"vitess.io/vitess/go/vt/vtgate/vindexes"
@@ -100,12 +102,12 @@ func TestOperator(t *testing.T) {
require.NoError(t, err)
semTable, err := semantics.Analyze(stmt, "", si)
require.NoError(t, err)
- optree, err := CreateLogicalOperatorFromAST(stmt, semTable)
+ ctx := plancontext.NewPlanningContext(nil, semTable, nil, 0)
+ optree, err := createLogicalOperatorFromAST(ctx, stmt)
+ require.NoError(t, err)
+ optree, err = Compact(ctx, optree)
require.NoError(t, err)
output := testString(optree)
- if tc.expected != output {
- fmt.Println(1)
- }
assert.Equal(t, tc.expected, output)
if t.Failed() {
fmt.Println(output)
@@ -114,7 +116,7 @@ func TestOperator(t *testing.T) {
}
}
-func testString(op Operator) string {
+func testString(op interface{}) string { // TODO
switch op := op.(type) {
case *QueryGraph:
return fmt.Sprintf("QueryGraph: %s", op.testString())
@@ -126,8 +128,8 @@ func testString(op Operator) string {
}
return fmt.Sprintf("Join: {\n\tLHS: %s\n\tRHS: %s\n\tPredicate: %s\n}", leftStr, rightStr, sqlparser.String(op.Predicate))
case *Derived:
- inner := indent(testString(op.Inner))
- query := sqlparser.String(op.Sel)
+ inner := indent(testString(op.Source))
+ query := sqlparser.String(op.Query)
return fmt.Sprintf("Derived %s: {\n\tQuery: %s\n\tInner:%s\n}", op.Alias, query, inner)
case *SubQuery:
var inners []string
@@ -147,16 +149,13 @@ func testString(op Operator) string {
case *Vindex:
value := sqlparser.String(op.Value)
return fmt.Sprintf("Vindex: {\n\tName: %s\n\tValue: %s\n}", op.Vindex.String(), value)
- case *Concatenate:
+ case *Union:
var inners []string
for _, source := range op.Sources {
inners = append(inners, indent(testString(source)))
}
- if len(op.OrderBy) > 0 {
- inners = append(inners, indent(sqlparser.String(op.OrderBy)[1:]))
- }
- if op.Limit != nil {
- inners = append(inners, indent(sqlparser.String(op.Limit)[1:]))
+ if len(op.Ordering) > 0 {
+ inners = append(inners, indent(sqlparser.String(op.Ordering)[1:]))
}
dist := ""
if op.Distinct {
@@ -164,7 +163,7 @@ func testString(op Operator) string {
}
return fmt.Sprintf("Concatenate%s {\n%s\n}", dist, strings.Join(inners, ",\n"))
case *Update:
- tbl := "table: " + op.Table.testString()
+ tbl := "table: " + op.QTable.testString()
var assignments []string
// sort to produce stable results, otherwise test is flaky
keys := make([]string, 0, len(op.Assignments))
@@ -176,6 +175,9 @@ func testString(op Operator) string {
assignments = append(assignments, fmt.Sprintf("\t%s = %s", k, sqlparser.String(op.Assignments[k])))
}
return fmt.Sprintf("Update {\n\t%s\nassignments:\n%s\n}", tbl, strings.Join(assignments, "\n"))
+ case *Horizon:
+ src := indent(testString(op.Source))
+ return fmt.Sprintf("Horizon {\n\tQuery: \"%s\"\n\tInner:%s\n}", sqlparser.String(op.Select), src)
}
panic(fmt.Sprintf("%T", op))
}
diff --git a/go/vt/vtgate/planbuilder/operators/operator_test_data.txt b/go/vt/vtgate/planbuilder/operators/operator_test_data.txt
new file mode 100644
index 00000000000..2854a43f6bb
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/operators/operator_test_data.txt
@@ -0,0 +1,628 @@
+(select id from unsharded union all select id from unsharded_auto order by id) union select id from user
+Horizon {
+ Query: "(select id from unsharded union all select id from unsharded_auto order by id asc) union select id from `user`"
+ Inner: Concatenate(distinct) {
+ Horizon {
+ Query: "select id from unsharded"
+ Inner: QueryGraph: {
+ Tables:
+ TableSet{0}:unsharded
+ }
+ },
+ Horizon {
+ Query: "select id from unsharded_auto"
+ Inner: QueryGraph: {
+ Tables:
+ TableSet{1}:unsharded_auto
+ }
+ },
+ Horizon {
+ Query: "select id from `user`"
+ Inner: QueryGraph: {
+ Tables:
+ TableSet{2}:`user`
+ }
+ }
+ }
+}
+
+select id from unsharded union select id from unsharded_auto
+Horizon {
+ Query: "select id from unsharded union select id from unsharded_auto"
+ Inner: Concatenate(distinct) {
+ Horizon {
+ Query: "select id from unsharded"
+ Inner: QueryGraph: {
+ Tables:
+ TableSet{0}:unsharded
+ }
+ },
+ Horizon {
+ Query: "select id from unsharded_auto"
+ Inner: QueryGraph: {
+ Tables:
+ TableSet{1}:unsharded_auto
+ }
+ }
+ }
+}
+
+select id from unsharded union all select id from unsharded_auto
+Horizon {
+ Query: "select id from unsharded union all select id from unsharded_auto"
+ Inner: Concatenate {
+ Horizon {
+ Query: "select id from unsharded"
+ Inner: QueryGraph: {
+ Tables:
+ TableSet{0}:unsharded
+ }
+ },
+ Horizon {
+ Query: "select id from unsharded_auto"
+ Inner: QueryGraph: {
+ Tables:
+ TableSet{1}:unsharded_auto
+ }
+ }
+ }
+}
+
+(select id from unsharded union all select id from unsharded_auto limit 10) union select id from x order by id
+Horizon {
+ Query: "(select id from unsharded union all select id from unsharded_auto limit 10) union select id from x order by id asc"
+ Inner: Concatenate(distinct) {
+ Horizon {
+ Query: "select id from unsharded"
+ Inner: QueryGraph: {
+ Tables:
+ TableSet{0}:unsharded
+ }
+ },
+ Horizon {
+ Query: "select id from unsharded_auto"
+ Inner: QueryGraph: {
+ Tables:
+ TableSet{1}:unsharded_auto
+ }
+ },
+ Horizon {
+ Query: "select id from x"
+ Inner: QueryGraph: {
+ Tables:
+ TableSet{2}:x
+ }
+ },
+ order by id asc
+ }
+}
+
+(select id from unsharded union all select id from unsharded_auto) union all select id from x
+Horizon {
+ Query: "select id from unsharded union all select id from unsharded_auto union all select id from x"
+ Inner: Concatenate {
+ Horizon {
+ Query: "select id from unsharded"
+ Inner: QueryGraph: {
+ Tables:
+ TableSet{0}:unsharded
+ }
+ },
+ Horizon {
+ Query: "select id from unsharded_auto"
+ Inner: QueryGraph: {
+ Tables:
+ TableSet{1}:unsharded_auto
+ }
+ },
+ Horizon {
+ Query: "select id from x"
+ Inner: QueryGraph: {
+ Tables:
+ TableSet{2}:x
+ }
+ }
+ }
+}
+
+(select id from unsharded union select id from unsharded_auto) union select id from x
+Horizon {
+ Query: "select id from unsharded union select id from unsharded_auto union select id from x"
+ Inner: Concatenate(distinct) {
+ Horizon {
+ Query: "select id from unsharded"
+ Inner: QueryGraph: {
+ Tables:
+ TableSet{0}:unsharded
+ }
+ },
+ Horizon {
+ Query: "select id from unsharded_auto"
+ Inner: QueryGraph: {
+ Tables:
+ TableSet{1}:unsharded_auto
+ }
+ },
+ Horizon {
+ Query: "select id from x"
+ Inner: QueryGraph: {
+ Tables:
+ TableSet{2}:x
+ }
+ }
+ }
+}
+
+(select id from unsharded union select id from unsharded_auto) union all select id from x
+Horizon {
+ Query: "select id from unsharded union select id from unsharded_auto union all select id from x"
+ Inner: Concatenate {
+ Concatenate(distinct) {
+ Horizon {
+ Query: "select id from unsharded"
+ Inner: QueryGraph: {
+ Tables:
+ TableSet{0}:unsharded
+ }
+ },
+ Horizon {
+ Query: "select id from unsharded_auto"
+ Inner: QueryGraph: {
+ Tables:
+ TableSet{1}:unsharded_auto
+ }
+ }
+ },
+ Horizon {
+ Query: "select id from x"
+ Inner: QueryGraph: {
+ Tables:
+ TableSet{2}:x
+ }
+ }
+ }
+}
+
+select * from t
+Horizon {
+ Query: "select * from t"
+ Inner: QueryGraph: {
+ Tables:
+ TableSet{0}:t
+ }
+}
+
+select t.c from t,y,z where t.c = y.c and (t.a = z.a or t.a = y.a) and 1 < 2
+Horizon {
+ Query: "select t.c from t, y, z where t.c = y.c and (t.a = z.a or t.a = y.a) and 1 < 2"
+ Inner: QueryGraph: {
+ Tables:
+ TableSet{0}:t
+ TableSet{1}:y
+ TableSet{2}:z
+ JoinPredicates:
+ TableSet{0,1,2} - t.a = z.a or t.a = y.a
+ TableSet{0,1} - t.c = y.c
+ ForAll: 1 < 2
+ }
+}
+
+select t.c from t join y on t.id = y.t_id join z on t.id = z.t_id where t.name = 'foo' and y.col = 42 and z.baz = 101
+Horizon {
+ Query: "select t.c from t join y on t.id = y.t_id join z on t.id = z.t_id where t.`name` = 'foo' and y.col = 42 and z.baz = 101"
+ Inner: QueryGraph: {
+ Tables:
+ TableSet{0}:t where t.`name` = 'foo'
+ TableSet{1}:y where y.col = 42
+ TableSet{2}:z where z.baz = 101
+ JoinPredicates:
+ TableSet{0,1} - t.id = y.t_id
+ TableSet{0,2} - t.id = z.t_id
+ }
+}
+
+select t.c from t,y,z where t.name = 'foo' and y.col = 42 and z.baz = 101 and t.id = y.t_id and t.id = z.t_id
+Horizon {
+ Query: "select t.c from t, y, z where t.`name` = 'foo' and y.col = 42 and z.baz = 101 and t.id = y.t_id and t.id = z.t_id"
+ Inner: QueryGraph: {
+ Tables:
+ TableSet{0}:t where t.`name` = 'foo'
+ TableSet{1}:y where y.col = 42
+ TableSet{2}:z where z.baz = 101
+ JoinPredicates:
+ TableSet{0,1} - t.id = y.t_id
+ TableSet{0,2} - t.id = z.t_id
+ }
+}
+
+select 1 from t where '1' = 1 and 12 = '12'
+Horizon {
+ Query: "select 1 from t where '1' = 1 and 12 = '12'"
+ Inner: QueryGraph: {
+ Tables:
+ TableSet{0}:t
+ ForAll: '1' = 1 and 12 = '12'
+ }
+}
+
+select 1 from t left join s on t.id = s.id
+Horizon {
+ Query: "select 1 from t left join s on t.id = s.id"
+ Inner: OuterJoin: {
+ Inner: QueryGraph: {
+ Tables:
+ TableSet{0}:t
+ }
+ Outer: QueryGraph: {
+ Tables:
+ TableSet{1}:s
+ }
+ Predicate: t.id = s.id
+ }
+}
+
+select 1 from t join s on t.id = s.id and t.name = s.name
+Horizon {
+ Query: "select 1 from t join s on t.id = s.id and t.`name` = s.`name`"
+ Inner: QueryGraph: {
+ Tables:
+ TableSet{0}:t
+ TableSet{1}:s
+ JoinPredicates:
+ TableSet{0,1} - t.id = s.id and t.`name` = s.`name`
+ }
+}
+
+select 1 from t left join s on t.id = s.id where t.name = 'Mister'
+Horizon {
+ Query: "select 1 from t left join s on t.id = s.id where t.`name` = 'Mister'"
+ Inner: OuterJoin: {
+ Inner: QueryGraph: {
+ Tables:
+ TableSet{0}:t where t.`name` = 'Mister'
+ }
+ Outer: QueryGraph: {
+ Tables:
+ TableSet{1}:s
+ }
+ Predicate: t.id = s.id
+ }
+}
+
+select 1 from t right join s on t.id = s.id
+Horizon {
+ Query: "select 1 from t right join s on t.id = s.id"
+ Inner: OuterJoin: {
+ Inner: QueryGraph: {
+ Tables:
+ TableSet{1}:s
+ }
+ Outer: QueryGraph: {
+ Tables:
+ TableSet{0}:t
+ }
+ Predicate: t.id = s.id
+ }
+}
+
+select 1 from (a left join b on a.id = b.id) join (c left join d on c.id = d.id) on a.id = c.id
+Horizon {
+ Query: "select 1 from (a left join b on a.id = b.id) join (c left join d on c.id = d.id) on a.id = c.id"
+ Inner: Join: {
+ LHS: OuterJoin: {
+ Inner: QueryGraph: {
+ Tables:
+ TableSet{0}:a
+ }
+ Outer: QueryGraph: {
+ Tables:
+ TableSet{1}:b
+ }
+ Predicate: a.id = b.id
+ }
+ RHS: OuterJoin: {
+ Inner: QueryGraph: {
+ Tables:
+ TableSet{2}:c
+ }
+ Outer: QueryGraph: {
+ Tables:
+ TableSet{3}:d
+ }
+ Predicate: c.id = d.id
+ }
+ Predicate: a.id = c.id
+ }
+}
+
+select 1 from (select 42 as id from tbl) as t
+Horizon {
+ Query: "select 1 from (select 42 as id from tbl) as t"
+ Inner: Derived t: {
+ Query: select 42 as id from tbl
+ Inner: QueryGraph: {
+ Tables:
+ TableSet{0}:tbl
+ }
+ }
+}
+
+select 1 from (select id from tbl limit 10) as t join (select foo, count(*) from usr group by foo) as s on t.id = s.foo
+Horizon {
+ Query: "select 1 from (select id from tbl limit 10) as t join (select foo, count(*) from usr group by foo) as s on t.id = s.foo"
+ Inner: Join: {
+ LHS: Derived t: {
+ Query: select id from tbl limit 10
+ Inner: QueryGraph: {
+ Tables:
+ TableSet{0}:tbl
+ }
+ }
+ RHS: Derived s: {
+ Query: select foo, count(*) from usr group by foo
+ Inner: QueryGraph: {
+ Tables:
+ TableSet{2}:usr
+ }
+ }
+ Predicate: t.id = s.foo
+ }
+}
+
+select (select 1) from t where exists (select 1) and id in (select 1)
+Horizon {
+ Query: "select (select 1 from dual) from t where exists (select 1 from dual) and id in (select 1 from dual)"
+ Inner: SubQuery: {
+ SubQueries: [
+ {
+ Type: PulloutValue
+ Query: QueryGraph: {
+ Tables:
+ TableSet{1}:dual
+ }
+ }
+ {
+ Type: PulloutExists
+ Query: QueryGraph: {
+ Tables:
+ TableSet{2}:dual
+ }
+ }
+ {
+ Type: PulloutIn
+ Query: QueryGraph: {
+ Tables:
+ TableSet{3}:dual
+ }
+ }]
+ Outer: QueryGraph: {
+ Tables:
+ TableSet{0}:t where id in (select 1 from dual)
+ ForAll: exists (select 1 from dual)
+ }
+ }
+}
+
+select u.id from user u where u.id = (select id from user_extra where id = u.id)
+Horizon {
+ Query: "select u.id from `user` as u where u.id = (select id from user_extra where id = u.id)"
+ Inner: SubQuery: {
+ SubQueries: [
+ {
+ Type: PulloutValue
+ Query: QueryGraph: {
+ Tables:
+ TableSet{1}:user_extra
+ JoinPredicates:
+ TableSet{0,1} - id = u.id
+ }
+ }]
+ Outer: QueryGraph: {
+ Tables:
+ TableSet{0}:`user` AS u where u.id = (select id from user_extra where id = u.id)
+ }
+ }
+}
+
+select id from user_index where id = :id
+Horizon {
+ Query: "select id from user_index where id = :id"
+ Inner: Vindex: {
+ Name: user_index
+ Value: :id
+ }
+}
+
+select ui.id from user_index as ui join user as u where ui.id = 1 and ui.id = u.id
+Horizon {
+ Query: "select ui.id from user_index as ui join `user` as u where ui.id = 1 and ui.id = u.id"
+ Inner: Join: {
+ LHS: Vindex: {
+ Name: user_index
+ Value: 1
+ }
+ RHS: QueryGraph: {
+ Tables:
+ TableSet{1}:`user` AS u
+ }
+ Predicate: ui.id = u.id
+ }
+}
+
+select u.id from (select id from user_index where id = 2) as u
+Horizon {
+ Query: "select u.id from (select id from user_index where id = 2) as u"
+ Inner: Derived u: {
+ Query: select id from user_index where id = 2
+ Inner: Vindex: {
+ Name: user_index
+ Value: 2
+ }
+ }
+}
+
+select 1 from a union select 2 from b
+Horizon {
+ Query: "select 1 from a union select 2 from b"
+ Inner: Concatenate(distinct) {
+ Horizon {
+ Query: "select 1 from a"
+ Inner: QueryGraph: {
+ Tables:
+ TableSet{0}:a
+ }
+ },
+ Horizon {
+ Query: "select 2 from b"
+ Inner: QueryGraph: {
+ Tables:
+ TableSet{1}:b
+ }
+ }
+ }
+}
+
+select 1 from a union select 2 from b union select 3 from c
+Horizon {
+ Query: "select 1 from a union select 2 from b union select 3 from c"
+ Inner: Concatenate(distinct) {
+ Horizon {
+ Query: "select 1 from a"
+ Inner: QueryGraph: {
+ Tables:
+ TableSet{0}:a
+ }
+ },
+ Horizon {
+ Query: "select 2 from b"
+ Inner: QueryGraph: {
+ Tables:
+ TableSet{1}:b
+ }
+ },
+ Horizon {
+ Query: "select 3 from c"
+ Inner: QueryGraph: {
+ Tables:
+ TableSet{2}:c
+ }
+ }
+ }
+}
+
+select 1 from a union select 2 from b union select 3 from c union all select 4 from d
+Horizon {
+ Query: "select 1 from a union select 2 from b union select 3 from c union all select 4 from d"
+ Inner: Concatenate {
+ Concatenate(distinct) {
+ Horizon {
+ Query: "select 1 from a"
+ Inner: QueryGraph: {
+ Tables:
+ TableSet{0}:a
+ }
+ },
+ Horizon {
+ Query: "select 2 from b"
+ Inner: QueryGraph: {
+ Tables:
+ TableSet{1}:b
+ }
+ },
+ Horizon {
+ Query: "select 3 from c"
+ Inner: QueryGraph: {
+ Tables:
+ TableSet{2}:c
+ }
+ }
+ },
+ Horizon {
+ Query: "select 4 from d"
+ Inner: QueryGraph: {
+ Tables:
+ TableSet{3}:d
+ }
+ }
+ }
+}
+
+select id from unsharded union select id from unsharded_auto order by id
+Horizon {
+ Query: "select id from unsharded union select id from unsharded_auto order by id asc"
+ Inner: Concatenate(distinct) {
+ Horizon {
+ Query: "select id from unsharded"
+ Inner: QueryGraph: {
+ Tables:
+ TableSet{0}:unsharded
+ }
+ },
+ Horizon {
+ Query: "select id from unsharded_auto"
+ Inner: QueryGraph: {
+ Tables:
+ TableSet{1}:unsharded_auto
+ }
+ },
+ order by id asc
+ }
+}
+
+select id from user where exists(select user_id from user_extra where user_id = 3 and user_id < user.id)
+Horizon {
+ Query: "select id from `user` where exists (select user_id from user_extra where user_id = 3 and user_id < `user`.id)"
+ Inner: SubQuery: {
+ SubQueries: [
+ {
+ Type: PulloutExists
+ Query: QueryGraph: {
+ Tables:
+ TableSet{1}:user_extra where user_id = 3
+ JoinPredicates:
+ TableSet{0,1} - user_id < `user`.id
+ }
+ }]
+ Outer: QueryGraph: {
+ Tables:
+ TableSet{0}:`user` where exists (select user_id from user_extra where user_id = 3 and user_id < `user`.id)
+ }
+ }
+}
+
+select ks.tbl.col from ks.tbl where ks.tbl.id = 1
+Horizon {
+ Query: "select ks.tbl.col from ks.tbl where tbl.id = 1"
+ Inner: QueryGraph: {
+ Tables:
+ TableSet{0}:ks.tbl where tbl.id = 1
+ }
+}
+
+select 1 from ks.t join ks.y on ks.t.id = ks.y.t_id
+Horizon {
+ Query: "select 1 from ks.t join ks.y on t.id = y.t_id"
+ Inner: QueryGraph: {
+ Tables:
+ TableSet{0}:ks.t
+ TableSet{1}:ks.y
+ JoinPredicates:
+ TableSet{0,1} - t.id = y.t_id
+ }
+}
+
+select 1 from ks.t left join ks.y on ks.t.id = ks.y.t_id
+Horizon {
+ Query: "select 1 from ks.t left join ks.y on t.id = y.t_id"
+ Inner: OuterJoin: {
+ Inner: QueryGraph: {
+ Tables:
+ TableSet{0}:ks.t
+ }
+ Outer: QueryGraph: {
+ Tables:
+ TableSet{1}:ks.y
+ }
+ Predicate: t.id = y.t_id
+ }
+}
diff --git a/go/vt/vtgate/planbuilder/operators/ops/op.go b/go/vt/vtgate/planbuilder/operators/ops/op.go
new file mode 100644
index 00000000000..4deeb5cee1e
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/operators/ops/op.go
@@ -0,0 +1,50 @@
+/*
+Copyright 2022 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package ops
+
+import (
+ "vitess.io/vitess/go/vt/sqlparser"
+ "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext"
+)
+
+type (
+ // Operator forms the tree of operators, representing the declarative query provided.
+ // While planning, the operator tree starts with logical operators, and later moves to physical operators.
+ // The difference between the two is that when we get to a physical operator, we have made decisions on in
+ // which order to do the joins, and how to split them up across shards and keyspaces.
+ // In some situation we go straight to the physical operator - when there are no options to consider,
+ // we can go straight to the end result.
+ Operator interface {
+ Clone(inputs []Operator) Operator
+ Inputs() []Operator
+
+ // AddPredicate is used to push predicates. It pushed it as far down as is possible in the tree.
+ // If we encounter a join and the predicate depends on both sides of the join, the predicate will be split into two parts,
+ // where data is fetched from the LHS of the join to be used in the evaluation on the RHS
+ AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) (Operator, error)
+
+ // AddColumn tells an operator to also output an additional column specified.
+ // The offset to the column is returned.
+ AddColumn(ctx *plancontext.PlanningContext, expr sqlparser.Expr) (int, error)
+ }
+
+ // PhysicalOperator means that this operator is ready to be turned into a logical plan
+ PhysicalOperator interface {
+ Operator
+ IPhysical()
+ }
+)
diff --git a/go/vt/vtgate/planbuilder/abstract/querygraph.go b/go/vt/vtgate/planbuilder/operators/querygraph.go
similarity index 64%
rename from go/vt/vtgate/planbuilder/abstract/querygraph.go
rename to go/vt/vtgate/planbuilder/operators/querygraph.go
index 0e6da05e211..b22fdf6907e 100644
--- a/go/vt/vtgate/planbuilder/abstract/querygraph.go
+++ b/go/vt/vtgate/planbuilder/operators/querygraph.go
@@ -14,10 +14,12 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package abstract
+package operators
import (
"vitess.io/vitess/go/vt/sqlparser"
+ "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops"
+ "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext"
"vitess.io/vitess/go/vt/vtgate/semantics"
)
@@ -38,6 +40,9 @@ type (
// NoDeps contains the predicates that can be evaluated anywhere.
NoDeps sqlparser.Expr
+
+ noInputs
+ noColumns
}
innerJoin struct {
@@ -56,23 +61,10 @@ type (
}
)
-var _ LogicalOperator = (*QueryGraph)(nil)
-
-func (*QueryGraph) iLogical() {}
-
-// PushPredicate implements the Operator interface
-func (qg *QueryGraph) PushPredicate(expr sqlparser.Expr, semTable *semantics.SemTable) (LogicalOperator, error) {
- for _, e := range sqlparser.SplitAndExpression(nil, expr) {
- err := qg.collectPredicate(e, semTable)
- if err != nil {
- return nil, err
- }
- }
- return qg, nil
-}
+var _ ops.Operator = (*QueryGraph)(nil)
-// TableID implements the Operator interface
-func (qg *QueryGraph) TableID() semantics.TableSet {
+// Introduces implements the TableIDIntroducer interface
+func (qg *QueryGraph) Introduces() semantics.TableSet {
var ts semantics.TableSet
for _, table := range qg.Tables {
ts = ts.Merge(table.ID)
@@ -98,11 +90,11 @@ func newQueryGraph() *QueryGraph {
return &QueryGraph{}
}
-func (qg *QueryGraph) collectPredicates(sel *sqlparser.Select, semTable *semantics.SemTable) error {
+func (qg *QueryGraph) collectPredicates(ctx *plancontext.PlanningContext, sel *sqlparser.Select) error {
predicates := sqlparser.SplitAndExpression(nil, sel.Where.Expr)
for _, predicate := range predicates {
- err := qg.collectPredicate(predicate, semTable)
+ err := qg.collectPredicate(ctx, predicate)
if err != nil {
return err
}
@@ -118,41 +110,47 @@ func (qg *QueryGraph) getPredicateByDeps(ts semantics.TableSet) ([]sqlparser.Exp
}
return nil, false
}
-func (qg *QueryGraph) addJoinPredicates(ts semantics.TableSet, expr sqlparser.Expr) {
+func (qg *QueryGraph) addJoinPredicates(ctx *plancontext.PlanningContext, ts semantics.TableSet, predicate sqlparser.Expr) {
for _, join := range qg.innerJoins {
if join.deps == ts {
- join.exprs = append(join.exprs, expr)
+ if ctx.SemTable.ContainsExpr(predicate, join.exprs) {
+ return
+ }
+
+ join.exprs = append(join.exprs, predicate)
return
}
}
qg.innerJoins = append(qg.innerJoins, &innerJoin{
deps: ts,
- exprs: []sqlparser.Expr{expr},
+ exprs: []sqlparser.Expr{predicate},
})
}
-func (qg *QueryGraph) collectPredicate(predicate sqlparser.Expr, semTable *semantics.SemTable) error {
- deps := semTable.RecursiveDeps(predicate)
+func (qg *QueryGraph) collectPredicate(ctx *plancontext.PlanningContext, predicate sqlparser.Expr) error {
+ deps := ctx.SemTable.RecursiveDeps(predicate)
switch deps.NumberOfTables() {
case 0:
qg.addNoDepsPredicate(predicate)
case 1:
- found := qg.addToSingleTable(deps, predicate)
+ found := qg.addToSingleTable(ctx, deps, predicate)
if !found {
// this could be a predicate that only has dependencies from outside this QG
- qg.addJoinPredicates(deps, predicate)
+ qg.addJoinPredicates(ctx, deps, predicate)
}
default:
- qg.addJoinPredicates(deps, predicate)
+ qg.addJoinPredicates(ctx, deps, predicate)
}
return nil
}
-func (qg *QueryGraph) addToSingleTable(table semantics.TableSet, predicate sqlparser.Expr) bool {
+func (qg *QueryGraph) addToSingleTable(ctx *plancontext.PlanningContext, table semantics.TableSet, predicate sqlparser.Expr) bool {
for _, t := range qg.Tables {
if table == t.ID {
- t.Predicates = append(t.Predicates, predicate)
+ if !ctx.SemTable.ContainsExpr(predicate, t.Predicates) {
+ t.Predicates = append(t.Predicates, predicate)
+ }
return true
}
}
@@ -170,24 +168,50 @@ func (qg *QueryGraph) addNoDepsPredicate(predicate sqlparser.Expr) {
}
}
-// UnsolvedPredicates implements the Operator interface
+// UnsolvedPredicates implements the unresolved interface
func (qg *QueryGraph) UnsolvedPredicates(_ *semantics.SemTable) []sqlparser.Expr {
var result []sqlparser.Expr
+ tables := TableID(qg)
for _, join := range qg.innerJoins {
set, exprs := join.deps, join.exprs
- if !set.IsSolvedBy(qg.TableID()) {
+ if !set.IsSolvedBy(tables) {
result = append(result, exprs...)
}
}
return result
}
-// CheckValid implements the Operator interface
-func (qg *QueryGraph) CheckValid() error {
- return nil
+// Clone implements the Operator interface
+func (qg *QueryGraph) Clone(inputs []ops.Operator) ops.Operator {
+ result := &QueryGraph{
+ Tables: nil,
+ innerJoins: nil,
+ NoDeps: nil,
+ }
+
+ result.Tables = append([]*QueryTable{}, qg.Tables...)
+ result.innerJoins = append([]*innerJoin{}, qg.innerJoins...)
+ result.NoDeps = qg.NoDeps
+ return result
}
-// Compact implements the Operator interface
-func (qg *QueryGraph) Compact(*semantics.SemTable) (LogicalOperator, error) {
+func (qg *QueryGraph) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) (ops.Operator, error) {
+ for _, e := range sqlparser.SplitAndExpression(nil, expr) {
+ err := qg.collectPredicate(ctx, e)
+ if err != nil {
+ return nil, err
+ }
+ }
return qg, nil
}
+
+// Clone implements the Operator interface
+func (qt *QueryTable) Clone() *QueryTable {
+ return &QueryTable{
+ ID: qt.ID,
+ Alias: sqlparser.CloneRefOfAliasedTableExpr(qt.Alias),
+ Table: sqlparser.CloneTableName(qt.Table),
+ Predicates: qt.Predicates,
+ IsInfSchema: qt.IsInfSchema,
+ }
+}
diff --git a/go/vt/vtgate/planbuilder/abstract/queryprojection.go b/go/vt/vtgate/planbuilder/operators/queryprojection.go
similarity index 74%
rename from go/vt/vtgate/planbuilder/abstract/queryprojection.go
rename to go/vt/vtgate/planbuilder/operators/queryprojection.go
index 14572b117b2..8de53a762be 100644
--- a/go/vt/vtgate/planbuilder/abstract/queryprojection.go
+++ b/go/vt/vtgate/planbuilder/operators/queryprojection.go
@@ -14,16 +14,19 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package abstract
+package operators
import (
"encoding/json"
+ "fmt"
"sort"
"strings"
+ "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext"
+ "vitess.io/vitess/go/vt/vtgate/semantics"
+
"vitess.io/vitess/go/vt/vtgate/engine"
- vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
"vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/vterrors"
)
@@ -81,6 +84,7 @@ type (
AggrRewriter struct {
qp *QueryProjection
+ st *semantics.SemTable
Err error
}
)
@@ -121,7 +125,7 @@ func (s SelectExpr) GetExpr() (sqlparser.Expr, error) {
case *sqlparser.AliasedExpr:
return sel.Expr, nil
default:
- return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] %T does not have expr", s.Col)
+ return nil, vterrors.VT13001(fmt.Sprintf("%T does not have an expression", s.Col))
}
}
@@ -132,14 +136,14 @@ func (s SelectExpr) GetAliasedExpr() (*sqlparser.AliasedExpr, error) {
case *sqlparser.AliasedExpr:
return expr, nil
case *sqlparser.StarExpr:
- return nil, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "unsupported: '*' expression in cross-shard query")
+ return nil, vterrors.VT12001("'*' expression in cross-shard query")
default:
- return nil, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "not an aliased expression: %T", expr)
+ return nil, vterrors.VT12001(fmt.Sprintf("not an aliased expression: %T", expr))
}
}
// CreateQPFromSelect creates the QueryProjection for the input *sqlparser.Select
-func CreateQPFromSelect(sel *sqlparser.Select) (*QueryProjection, error) {
+func CreateQPFromSelect(ctx *plancontext.PlanningContext, sel *sqlparser.Select) (*QueryProjection, error) {
qp := &QueryProjection{
Distinct: sel.Distinct,
}
@@ -149,7 +153,7 @@ func CreateQPFromSelect(sel *sqlparser.Select) (*QueryProjection, error) {
return nil, err
}
for _, group := range sel.GroupBy {
- selectExprIdx, aliasExpr := qp.FindSelectExprIndexForExpr(group)
+ selectExprIdx, aliasExpr := qp.FindSelectExprIndexForExpr(ctx, group)
expr, weightStrExpr, err := qp.GetSimplifiedExpr(group)
if err != nil {
return nil, err
@@ -181,44 +185,60 @@ func CreateQPFromSelect(sel *sqlparser.Select) (*QueryProjection, error) {
return qp, nil
}
-// Rewrite will go through an expression, add aggregations to the QP, and rewrite them to use column offset
-func (ar *AggrRewriter) Rewrite() func(*sqlparser.Cursor) bool {
+// RewriteDown stops the walker from entering inside aggregation functions
+func (ar *AggrRewriter) RewriteDown() func(sqlparser.SQLNode, sqlparser.SQLNode) bool {
+ return func(node, _ sqlparser.SQLNode) bool {
+ if ar.Err != nil {
+ return true
+ }
+ _, ok := node.(sqlparser.AggrFunc)
+ return !ok
+ }
+}
+
+// RewriteUp will go through an expression, add aggregations to the QP, and rewrite them to use column offset
+func (ar *AggrRewriter) RewriteUp() func(*sqlparser.Cursor) bool {
return func(cursor *sqlparser.Cursor) bool {
if ar.Err != nil {
return false
}
sqlNode := cursor.Node()
- if fExp, ok := sqlNode.(sqlparser.AggrFunc); ok {
- for offset, expr := range ar.qp.SelectExprs {
- ae, err := expr.GetAliasedExpr()
- if err != nil {
- ar.Err = err
- return false
- }
- if sqlparser.EqualsExpr(ae.Expr, fExp) {
- cursor.Replace(sqlparser.NewOffset(offset, fExp))
- return false // no need to visit aggregation children
- }
+ fExp, ok := sqlNode.(sqlparser.AggrFunc)
+ if !ok {
+ return true
+ }
+ for offset, expr := range ar.qp.SelectExprs {
+ ae, err := expr.GetAliasedExpr()
+ if err != nil {
+ ar.Err = err
+ return false
}
-
- col := SelectExpr{
- Aggr: true,
- Col: &sqlparser.AliasedExpr{Expr: fExp},
+ if ar.st.EqualsExpr(ae.Expr, fExp) {
+ cursor.Replace(sqlparser.NewOffset(offset, fExp))
+ return true
}
- ar.qp.HasAggr = true
+ }
- cursor.Replace(sqlparser.NewOffset(len(ar.qp.SelectExprs), fExp))
- ar.qp.SelectExprs = append(ar.qp.SelectExprs, col)
- ar.qp.AddedColumn++
+ col := SelectExpr{
+ Aggr: true,
+ Col: &sqlparser.AliasedExpr{Expr: fExp},
}
+ ar.qp.HasAggr = true
+
+ cursor.Replace(sqlparser.NewOffset(len(ar.qp.SelectExprs), fExp))
+ ar.qp.SelectExprs = append(ar.qp.SelectExprs, col)
+ ar.qp.AddedColumn++
return true
}
}
// AggrRewriter extracts
-func (qp *QueryProjection) AggrRewriter() *AggrRewriter {
- return &AggrRewriter{qp: qp}
+func (qp *QueryProjection) AggrRewriter(ctx *plancontext.PlanningContext) *AggrRewriter {
+ return &AggrRewriter{
+ qp: qp,
+ st: ctx.SemTable,
+ }
}
func (qp *QueryProjection) addSelectExpressions(sel *sqlparser.Select) error {
@@ -245,7 +265,7 @@ func (qp *QueryProjection) addSelectExpressions(sel *sqlparser.Select) error {
}
qp.SelectExprs = append(qp.SelectExprs, col)
default:
- return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] %T in select list", selExp)
+ return vterrors.VT13001(fmt.Sprintf("%T in select list", selExp))
}
}
return nil
@@ -305,7 +325,7 @@ func checkForInvalidAggregations(exp *sqlparser.AliasedExpr) error {
if aggrFunc, isAggregate := node.(sqlparser.AggrFunc); isAggregate {
if aggrFunc.GetArgs() != nil &&
len(aggrFunc.GetArgs()) != 1 {
- return false, vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.SyntaxError, "aggregate functions take a single argument '%s'", sqlparser.String(node))
+ return false, vterrors.VT03001(sqlparser.String(node))
}
return true, nil
}
@@ -314,46 +334,13 @@ func checkForInvalidAggregations(exp *sqlparser.AliasedExpr) error {
}, exp.Expr)
}
-func (qp *QueryProjection) getNonAggrExprNotMatchingGroupByExprs() sqlparser.SelectExpr {
- for _, expr := range qp.SelectExprs {
- if expr.Aggr {
- continue
- }
- if !qp.isExprInGroupByExprs(expr) {
- return expr.Col
- }
- }
- for _, order := range qp.OrderExprs {
- if !qp.isOrderByExprInGroupBy(order) {
- return &sqlparser.AliasedExpr{
- Expr: order.Inner.Expr,
- }
- }
- }
- return nil
-}
-
-func (qp *QueryProjection) isOrderByExprInGroupBy(order OrderBy) bool {
- // ORDER BY NULL or Aggregation functions need not be present in group by
- _, isAggregate := order.WeightStrExpr.(sqlparser.AggrFunc)
- if sqlparser.IsNull(order.Inner.Expr) || isAggregate {
- return true
- }
- for _, groupByExpr := range qp.groupByExprs {
- if sqlparser.EqualsExpr(groupByExpr.WeightStrExpr, order.WeightStrExpr) {
- return true
- }
- }
- return false
-}
-
-func (qp *QueryProjection) isExprInGroupByExprs(expr SelectExpr) bool {
+func (qp *QueryProjection) isExprInGroupByExprs(ctx *plancontext.PlanningContext, expr SelectExpr) bool {
for _, groupByExpr := range qp.groupByExprs {
exp, err := expr.GetExpr()
if err != nil {
return false
}
- if sqlparser.EqualsExpr(groupByExpr.WeightStrExpr, exp) {
+ if ctx.SemTable.EqualsExpr(groupByExpr.WeightStrExpr, exp) {
return true
}
}
@@ -431,7 +418,85 @@ func (qp *QueryProjection) NeedsAggregation() bool {
return qp.HasAggr || len(qp.groupByExprs) > 0
}
-func (qp QueryProjection) onlyAggr() bool {
+// NeedsProjecting returns true if we have projections that need to be evaluated at the vtgate level
+// and can't be pushed down to MySQL
+func (qp *QueryProjection) NeedsProjecting(
+ ctx *plancontext.PlanningContext,
+ pusher func(expr *sqlparser.AliasedExpr) (int, error),
+) (needsVtGateEval bool, expressions []sqlparser.Expr, colNames []string, err error) {
+ for _, se := range qp.SelectExprs {
+ var ae *sqlparser.AliasedExpr
+ ae, err = se.GetAliasedExpr()
+ if err != nil {
+ return false, nil, nil, err
+ }
+
+ expr := ae.Expr
+ colNames = append(colNames, ae.ColumnName())
+
+ if _, isCol := expr.(*sqlparser.ColName); isCol {
+ offset, err := pusher(ae)
+ if err != nil {
+ return false, nil, nil, err
+ }
+ expressions = append(expressions, sqlparser.NewOffset(offset, expr))
+ continue
+ }
+
+ stopOnError := func(sqlparser.SQLNode, sqlparser.SQLNode) bool {
+ return err == nil
+ }
+ rewriter := func(cursor *sqlparser.CopyOnWriteCursor) {
+ col, isCol := cursor.Node().(*sqlparser.ColName)
+ if !isCol {
+ return
+ }
+ var tableInfo semantics.TableInfo
+ tableInfo, err = ctx.SemTable.TableInfoForExpr(col)
+ if err != nil {
+ return
+ }
+ dt, isDT := tableInfo.(*semantics.DerivedTable)
+ if !isDT {
+ return
+ }
+
+ rewritten := semantics.RewriteDerivedTableExpression(col, dt)
+ if sqlparser.ContainsAggregation(rewritten) {
+ offset, tErr := pusher(&sqlparser.AliasedExpr{Expr: col})
+ if tErr != nil {
+ err = tErr
+ return
+ }
+
+ cursor.Replace(sqlparser.NewOffset(offset, col))
+ }
+ }
+ newExpr := sqlparser.CopyOnRewrite(expr, stopOnError, rewriter, nil)
+
+ if err != nil {
+ return
+ }
+
+ if newExpr != expr {
+ // if we changed the expression, it means that we have to evaluate the rest at the vtgate level
+ expressions = append(expressions, newExpr.(sqlparser.Expr))
+ needsVtGateEval = true
+ continue
+ }
+
+ // we did not need to push any parts of this expression down. Let's check if we can push all of it
+ offset, err := pusher(ae)
+ if err != nil {
+ return false, nil, nil, err
+ }
+ expressions = append(expressions, sqlparser.NewOffset(offset, expr))
+ }
+
+ return
+}
+
+func (qp *QueryProjection) onlyAggr() bool {
if !qp.HasAggr {
return false
}
@@ -454,19 +519,16 @@ func (qp *QueryProjection) NeedsDistinct() bool {
return true
}
-func (qp *QueryProjection) AggregationExpressions() (out []Aggr, err error) {
+func (qp *QueryProjection) AggregationExpressions(ctx *plancontext.PlanningContext) (out []Aggr, err error) {
orderBy:
for _, orderExpr := range qp.OrderExprs {
- if qp.isOrderByExprInGroupBy(orderExpr) {
- continue orderBy
- }
- orderExpr := orderExpr.Inner.Expr
+ orderExpr := orderExpr.WeightStrExpr
for _, expr := range qp.SelectExprs {
col, ok := expr.Col.(*sqlparser.AliasedExpr)
if !ok {
continue
}
- if sqlparser.EqualsExpr(col.Expr, orderExpr) {
+ if ctx.SemTable.EqualsExpr(col.Expr, orderExpr) {
continue orderBy // we found the expression we were looking for!
}
}
@@ -486,7 +548,7 @@ orderBy:
idxCopy := idx
if !sqlparser.ContainsAggregation(expr.Col) {
- if !qp.isExprInGroupByExprs(expr) {
+ if !qp.isExprInGroupByExprs(ctx, expr) {
out = append(out, Aggr{
Original: aliasedExpr,
OpCode: engine.AggregateRandom,
@@ -498,12 +560,12 @@ orderBy:
}
fnc, isAggregate := aliasedExpr.Expr.(sqlparser.AggrFunc)
if !isAggregate {
- return nil, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "unsupported: in scatter query: complex aggregate expression")
+ return nil, vterrors.VT12001("in scatter query: complex aggregate expression")
}
opcode, found := engine.SupportedAggregates[strings.ToLower(fnc.AggrName())]
if !found {
- return nil, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "unsupported: in scatter query: aggregation function '%s'", fnc.AggrName())
+ return nil, vterrors.VT12001(fmt.Sprintf("in scatter query: aggregation function '%s'", fnc.AggrName()))
}
if opcode == engine.AggregateCount {
@@ -537,7 +599,7 @@ orderBy:
// FindSelectExprIndexForExpr returns the index of the given expression in the select expressions, if it is part of it
// returns -1 otherwise.
-func (qp *QueryProjection) FindSelectExprIndexForExpr(expr sqlparser.Expr) (*int, *sqlparser.AliasedExpr) {
+func (qp *QueryProjection) FindSelectExprIndexForExpr(ctx *plancontext.PlanningContext, expr sqlparser.Expr) (*int, *sqlparser.AliasedExpr) {
colExpr, isCol := expr.(*sqlparser.ColName)
for idx, selectExpr := range qp.SelectExprs {
@@ -551,7 +613,7 @@ func (qp *QueryProjection) FindSelectExprIndexForExpr(expr sqlparser.Expr) (*int
return &idx, aliasedExpr
}
}
- if sqlparser.EqualsExpr(aliasedExpr.Expr, expr) {
+ if ctx.SemTable.EqualsExpr(aliasedExpr.Expr, expr) {
return &idx, aliasedExpr
}
}
@@ -563,7 +625,7 @@ func (qp *QueryProjection) FindSelectExprIndexForExpr(expr sqlparser.Expr) (*int
// so we can simply re-arrange the column order
// We are also free to add more ORDER BY columns than the user asked for which we leverage,
// so the input is already ordered according to the GROUP BY columns used
-func (qp *QueryProjection) AlignGroupByAndOrderBy() {
+func (qp *QueryProjection) AlignGroupByAndOrderBy(ctx *plancontext.PlanningContext) {
// The ORDER BY can be performed before the OA
var newGrouping []GroupBy
@@ -581,7 +643,7 @@ func (qp *QueryProjection) AlignGroupByAndOrderBy() {
used := make([]bool, len(qp.groupByExprs))
for _, orderExpr := range qp.OrderExprs {
for i, groupingExpr := range qp.groupByExprs {
- if !used[i] && sqlparser.EqualsExpr(groupingExpr.WeightStrExpr, orderExpr.WeightStrExpr) {
+ if !used[i] && ctx.SemTable.EqualsExpr(groupingExpr.WeightStrExpr, orderExpr.WeightStrExpr) {
newGrouping = append(newGrouping, groupingExpr)
used[i] = true
}
@@ -614,12 +676,12 @@ func (qp *QueryProjection) GetColumnCount() int {
func checkForInvalidGroupingExpressions(expr sqlparser.Expr) error {
return sqlparser.Walk(func(node sqlparser.SQLNode) (bool, error) {
if _, isAggregate := node.(sqlparser.AggrFunc); isAggregate {
- return false, vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.WrongGroupField, "Can't group on '%s'", sqlparser.String(expr))
+ return false, vterrors.VT03005(sqlparser.String(expr))
}
_, isSubQ := node.(*sqlparser.Subquery)
arg, isArg := node.(sqlparser.Argument)
if isSubQ || (isArg && strings.HasPrefix(string(arg), "__sq")) {
- return false, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "unsupported: subqueries disallowed in GROUP BY")
+ return false, vterrors.VT12001("subqueries in GROUP BY")
}
return true, nil
}, expr)
diff --git a/go/vt/vtgate/planbuilder/abstract/queryprojection_test.go b/go/vt/vtgate/planbuilder/operators/queryprojection_test.go
similarity index 87%
rename from go/vt/vtgate/planbuilder/abstract/queryprojection_test.go
rename to go/vt/vtgate/planbuilder/operators/queryprojection_test.go
index 5fffa07b45a..13a5291f680 100644
--- a/go/vt/vtgate/planbuilder/abstract/queryprojection_test.go
+++ b/go/vt/vtgate/planbuilder/operators/queryprojection_test.go
@@ -14,11 +14,12 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package abstract
+package operators
import (
"testing"
+ "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext"
"vitess.io/vitess/go/vt/vtgate/semantics"
"github.com/stretchr/testify/assert"
@@ -75,10 +76,10 @@ func TestQP(t *testing.T) {
},
}, {
sql: "select count(*) b from user group by b",
- expErr: "Can't group on 'count(*)'",
+ expErr: "cannot group on 'count(*)'",
},
}
-
+ ctx := &plancontext.PlanningContext{SemTable: semantics.EmptySemTable()}
for _, tcase := range tcases {
t.Run(tcase.sql, func(t *testing.T) {
stmt, err := sqlparser.Parse(tcase.sql)
@@ -88,7 +89,7 @@ func TestQP(t *testing.T) {
_, err = semantics.Analyze(sel, "", &semantics.FakeSI{})
require.NoError(t, err)
- qp, err := CreateQPFromSelect(sel)
+ qp, err := CreateQPFromSelect(ctx, sel)
if tcase.expErr != "" {
require.Error(t, err)
require.Contains(t, err.Error(), tcase.expErr)
@@ -97,8 +98,8 @@ func TestQP(t *testing.T) {
assert.Equal(t, len(sel.SelectExprs), len(qp.SelectExprs))
require.Equal(t, len(tcase.expOrder), len(qp.OrderExprs), "not enough order expressions in QP")
for index, expOrder := range tcase.expOrder {
- assert.True(t, sqlparser.EqualsSQLNode(expOrder.Inner, qp.OrderExprs[index].Inner), "want: %+v, got %+v", sqlparser.String(expOrder.Inner), sqlparser.String(qp.OrderExprs[index].Inner))
- assert.True(t, sqlparser.EqualsSQLNode(expOrder.WeightStrExpr, qp.OrderExprs[index].WeightStrExpr), "want: %v, got %v", sqlparser.String(expOrder.WeightStrExpr), sqlparser.String(qp.OrderExprs[index].WeightStrExpr))
+ assert.True(t, sqlparser.Equals.SQLNode(expOrder.Inner, qp.OrderExprs[index].Inner), "want: %+v, got %+v", sqlparser.String(expOrder.Inner), sqlparser.String(qp.OrderExprs[index].Inner))
+ assert.True(t, sqlparser.Equals.SQLNode(expOrder.WeightStrExpr, qp.OrderExprs[index].WeightStrExpr), "want: %v, got %v", sqlparser.String(expOrder.WeightStrExpr), sqlparser.String(qp.OrderExprs[index].WeightStrExpr))
}
}
})
@@ -194,8 +195,8 @@ func TestQPSimplifiedExpr(t *testing.T) {
sel := ast.(*sqlparser.Select)
_, err = semantics.Analyze(sel, "", &semantics.FakeSI{})
require.NoError(t, err)
-
- qp, err := CreateQPFromSelect(sel)
+ ctx := &plancontext.PlanningContext{SemTable: semantics.EmptySemTable()}
+ qp, err := CreateQPFromSelect(ctx, sel)
require.NoError(t, err)
require.Equal(t, tc.expected[1:], qp.toString())
})
diff --git a/go/vt/vtgate/planbuilder/operators/rewrite/rewriters.go b/go/vt/vtgate/planbuilder/operators/rewrite/rewriters.go
new file mode 100644
index 00000000000..42839c58deb
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/operators/rewrite/rewriters.go
@@ -0,0 +1,128 @@
+/*
+Copyright 2022 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package rewrite
+
+import (
+ "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops"
+)
+
+type (
+ Func func(ops.Operator) (ops.Operator, TreeIdentity, error)
+ BreakableFunc func(ops.Operator) (ops.Operator, TreeIdentity, VisitRule, error)
+
+ // TreeIdentity tracks modifications to node and expression trees.
+ // Only return SameTree when it is acceptable to return the original
+ // input and discard the returned result as a performance improvement.
+ TreeIdentity bool
+
+ // VisitRule signals to the rewriter if the children of this operator should be visited or not
+ VisitRule bool
+)
+
+const (
+ SameTree TreeIdentity = false
+ NewTree TreeIdentity = true
+
+ VisitChildren VisitRule = true
+ SkipChildren VisitRule = false
+)
+
+// Visit allows for the walking of the operator tree. If any error is returned, the walk is aborted
+func Visit(root ops.Operator, visitor func(ops.Operator) error) error {
+ _, err := TopDown(root, func(op ops.Operator) (ops.Operator, TreeIdentity, VisitRule, error) {
+ err := visitor(op)
+ if err != nil {
+ return nil, SameTree, SkipChildren, err
+ }
+ return op, SameTree, VisitChildren, nil
+ })
+ return err
+}
+
+// BottomUp rewrites an operator tree from the bottom up. BottomUp applies a transformation function to
+// the given operator tree from the bottom up. Each callback [f] returns a TreeIdentity that is aggregated
+// into a final output indicating whether the operator tree was changed.
+func BottomUp(root ops.Operator, f Func) (ops.Operator, error) {
+ op, _, err := bottomUp(root, f)
+ if err != nil {
+ return nil, err
+ }
+ return op, nil
+}
+
+// TopDown applies a transformation function to the given operator tree from the bottom up. =
+// Each callback [f] returns a TreeIdentity that is aggregated into a final output indicating whether the
+// operator tree was changed.
+// The callback also returns a VisitRule that signals whether the children of this operator should be visited or not
+func TopDown(in ops.Operator, rewriter BreakableFunc) (ops.Operator, error) {
+ op, _, err := breakableTopDown(in, rewriter)
+ return op, err
+}
+
+func bottomUp(root ops.Operator, rewriter Func) (ops.Operator, TreeIdentity, error) {
+ oldInputs := root.Inputs()
+ anythingChanged := false
+ newInputs := make([]ops.Operator, len(oldInputs))
+ for i, operator := range oldInputs {
+ in, changed, err := bottomUp(operator, rewriter)
+ if err != nil {
+ return nil, SameTree, err
+ }
+ if changed == NewTree {
+ anythingChanged = true
+ }
+ newInputs[i] = in
+ }
+
+ if anythingChanged {
+ root = root.Clone(newInputs)
+ }
+
+ newOp, treeIdentity, err := rewriter(root)
+ if err != nil {
+ return nil, SameTree, err
+ }
+ if anythingChanged {
+ treeIdentity = NewTree
+ }
+ return newOp, treeIdentity, nil
+}
+
+func breakableTopDown(in ops.Operator, rewriter BreakableFunc) (ops.Operator, TreeIdentity, error) {
+ newOp, identity, visit, err := rewriter(in)
+ if err != nil || visit == SkipChildren {
+ return newOp, identity, err
+ }
+
+ anythingChanged := identity == NewTree
+
+ oldInputs := newOp.Inputs()
+ newInputs := make([]ops.Operator, len(oldInputs))
+ for i, oldInput := range oldInputs {
+ newInputs[i], identity, err = breakableTopDown(oldInput, rewriter)
+ anythingChanged = anythingChanged || identity == NewTree
+ if err != nil {
+ return nil, SameTree, err
+ }
+ }
+
+ if anythingChanged {
+ return newOp.Clone(newInputs), NewTree, nil
+ }
+
+ return newOp, SameTree, nil
+}
diff --git a/go/vt/vtgate/planbuilder/physical/route.go b/go/vt/vtgate/planbuilder/operators/route.go
similarity index 72%
rename from go/vt/vtgate/planbuilder/physical/route.go
rename to go/vt/vtgate/planbuilder/operators/route.go
index ee39c9217e5..1c1772220f0 100644
--- a/go/vt/vtgate/planbuilder/physical/route.go
+++ b/go/vt/vtgate/planbuilder/operators/route.go
@@ -14,23 +14,24 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package physical
+package operators
import (
+ "vitess.io/vitess/go/mysql/collations"
"vitess.io/vitess/go/vt/key"
"vitess.io/vitess/go/vt/sqlparser"
+ "vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/vt/vtgate/engine"
"vitess.io/vitess/go/vt/vtgate/evalengine"
- "vitess.io/vitess/go/vt/vtgate/planbuilder/abstract"
+ "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops"
"vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext"
-
"vitess.io/vitess/go/vt/vtgate/semantics"
"vitess.io/vitess/go/vt/vtgate/vindexes"
)
type (
Route struct {
- Source abstract.PhysicalOperator
+ Source ops.Operator
RouteOpCode engine.Opcode
Keyspace *vindexes.Keyspace
@@ -52,6 +53,13 @@ type (
// TargetDestination specifies an explicit target destination tablet type
TargetDestination key.Destination
+
+ // Alternates contains alternate routes to equivalent sources in
+ // other keyspaces.
+ Alternates map[*vindexes.Keyspace]*Route
+
+ // Routes that have been merged into this one.
+ MergedWith []*Route
}
// VindexPlusPredicates is a struct used to store all the predicates that the vindex can be used to query
@@ -84,16 +92,11 @@ type (
}
)
-var _ abstract.PhysicalOperator = (*Route)(nil)
+var _ ops.PhysicalOperator = (*Route)(nil)
// IPhysical implements the PhysicalOperator interface
func (*Route) IPhysical() {}
-// TableID implements the Operator interface
-func (r *Route) TableID() semantics.TableSet {
- return r.Source.TableID()
-}
-
// Cost implements the Operator interface
func (r *Route) Cost() int {
switch r.RouteOpCode {
@@ -119,10 +122,10 @@ func (r *Route) Cost() int {
return 1
}
-// Clone implements the PhysicalOperator interface
-func (r *Route) Clone() abstract.PhysicalOperator {
+// Clone implements the Operator interface
+func (r *Route) Clone(inputs []ops.Operator) ops.Operator {
cloneRoute := *r
- cloneRoute.Source = r.Source.Clone()
+ cloneRoute.Source = inputs[0]
cloneRoute.VindexPreds = make([]*VindexPlusPredicates, len(r.VindexPreds))
for i, pred := range r.VindexPreds {
// we do this to create a copy of the struct
@@ -132,6 +135,11 @@ func (r *Route) Clone() abstract.PhysicalOperator {
return &cloneRoute
}
+// Inputs implements the Operator interface
+func (r *Route) Inputs() []ops.Operator {
+ return []ops.Operator{r.Source}
+}
+
func (r *Route) UpdateRoutingLogic(ctx *plancontext.PlanningContext, expr sqlparser.Expr) error {
r.SeenPredicates = append(r.SeenPredicates, expr)
return r.tryImprovingVindex(ctx, expr)
@@ -445,21 +453,6 @@ func (r *Route) canImprove() bool {
return r.RouteOpCode != engine.None
}
-// UnsolvedPredicates implements the Operator interface
-func (r *Route) UnsolvedPredicates(semTable *semantics.SemTable) []sqlparser.Expr {
- return r.Source.UnsolvedPredicates(semTable)
-}
-
-// CheckValid implements the Operator interface
-func (r *Route) CheckValid() error {
- return r.Source.CheckValid()
-}
-
-// Compact implements the Operator interface
-func (r *Route) Compact(semTable *semantics.SemTable) (abstract.Operator, error) {
- return r, nil
-}
-
func (r *Route) IsSingleShard() bool {
switch r.RouteOpCode {
case engine.Unsharded, engine.DBA, engine.Next, engine.EqualUnique, engine.Reference:
@@ -728,3 +721,254 @@ func (r *Route) planIsExpr(ctx *plancontext.PlanningContext, node *sqlparser.IsE
return r.haveMatchingVindex(ctx, node, vdValue, column, val, opcodeF, justTheVindex)
}
+
+// createRoute returns either an information_schema route, or else consults the
+// VSchema to find a suitable table, and then creates a route from that.
+func createRoute(
+ ctx *plancontext.PlanningContext,
+ queryTable *QueryTable,
+ solves semantics.TableSet,
+) (ops.Operator, error) {
+ if queryTable.IsInfSchema {
+ return createInfSchemaRoute(ctx, queryTable)
+ }
+ return findVSchemaTableAndCreateRoute(ctx, queryTable, queryTable.Table, solves, true /*planAlternates*/)
+}
+
+// findVSchemaTableAndCreateRoute consults the VSchema to find a suitable
+// table, and then creates a route from that.
+func findVSchemaTableAndCreateRoute(
+ ctx *plancontext.PlanningContext,
+ queryTable *QueryTable,
+ tableName sqlparser.TableName,
+ solves semantics.TableSet,
+ planAlternates bool,
+) (*Route, error) {
+ vschemaTable, _, _, _, target, err := ctx.VSchema.FindTableOrVindex(tableName)
+ if target != nil {
+ return nil, vterrors.VT12001("SELECT with a target destination")
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ return createRouteFromVSchemaTable(
+ ctx,
+ queryTable,
+ vschemaTable,
+ solves,
+ planAlternates,
+ )
+}
+
+// createRouteFromTable creates a route from the given VSchema table.
+func createRouteFromVSchemaTable(
+ ctx *plancontext.PlanningContext,
+ queryTable *QueryTable,
+ vschemaTable *vindexes.Table,
+ solves semantics.TableSet,
+ planAlternates bool,
+) (*Route, error) {
+ if vschemaTable.Name.String() != queryTable.Table.Name.String() {
+ // we are dealing with a routed table
+ queryTable = queryTable.Clone()
+ name := queryTable.Table.Name
+ queryTable.Table.Name = vschemaTable.Name
+ astTable, ok := queryTable.Alias.Expr.(sqlparser.TableName)
+ if !ok {
+ return nil, vterrors.VT13001("a derived table should never be a routed table")
+ }
+ realTableName := sqlparser.NewIdentifierCS(vschemaTable.Name.String())
+ astTable.Name = realTableName
+ if queryTable.Alias.As.IsEmpty() {
+ // if the user hasn't specified an alias, we'll insert one here so the old table name still works
+ queryTable.Alias.As = sqlparser.NewIdentifierCS(name.String())
+ }
+ }
+ plan := &Route{
+ Source: &Table{
+ QTable: queryTable,
+ VTable: vschemaTable,
+ },
+ Keyspace: vschemaTable.Keyspace,
+ }
+
+ for _, columnVindex := range vschemaTable.ColumnVindexes {
+ // Checking if the Vindex is currently backfilling or not, if it isn't we can read from the vindex table
+ // Otherwise, we ignore this vindex for selection.
+ if columnVindex.IsBackfilling() {
+ continue
+ }
+
+ plan.VindexPreds = append(plan.VindexPreds, &VindexPlusPredicates{ColVindex: columnVindex, TableID: solves})
+ }
+
+ switch {
+ case vschemaTable.Type == vindexes.TypeSequence:
+ plan.RouteOpCode = engine.Next
+ case vschemaTable.Type == vindexes.TypeReference:
+ plan.RouteOpCode = engine.Reference
+ case !vschemaTable.Keyspace.Sharded:
+ plan.RouteOpCode = engine.Unsharded
+ case vschemaTable.Pinned != nil:
+ // Pinned tables have their keyspace ids already assigned.
+ // Use the Binary vindex, which is the identity function
+ // for keyspace id.
+ plan.RouteOpCode = engine.EqualUnique
+ vindex, _ := vindexes.NewBinary("binary", nil)
+ plan.Selected = &VindexOption{
+ Ready: true,
+ Values: []evalengine.Expr{evalengine.NewLiteralString(vschemaTable.Pinned, collations.TypedCollation{})},
+ ValueExprs: nil,
+ Predicates: nil,
+ OpCode: engine.EqualUnique,
+ FoundVindex: vindex,
+ Cost: Cost{
+ OpCode: engine.EqualUnique,
+ },
+ }
+ default:
+ plan.RouteOpCode = engine.Scatter
+ }
+ for _, predicate := range queryTable.Predicates {
+ err := plan.UpdateRoutingLogic(ctx, predicate)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if plan.RouteOpCode == engine.Scatter && len(queryTable.Predicates) > 0 {
+ // If we have a scatter query, it's worth spending a little extra time seeing if we can't improve it
+ oldPredicates := queryTable.Predicates
+ queryTable.Predicates = nil
+ plan.SeenPredicates = nil
+ for _, pred := range oldPredicates {
+ rewritten := sqlparser.RewritePredicate(pred)
+ predicates := sqlparser.SplitAndExpression(nil, rewritten.(sqlparser.Expr))
+ for _, predicate := range predicates {
+ queryTable.Predicates = append(queryTable.Predicates, predicate)
+ err := plan.UpdateRoutingLogic(ctx, predicate)
+ if err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ if plan.RouteOpCode == engine.Scatter {
+ // if we _still_ haven't found a better route, we can run this additional rewrite on any ORs we have
+ for _, expr := range queryTable.Predicates {
+ or, ok := expr.(*sqlparser.OrExpr)
+ if !ok {
+ continue
+ }
+ for _, predicate := range sqlparser.ExtractINFromOR(or) {
+ err := plan.UpdateRoutingLogic(ctx, predicate)
+ if err != nil {
+ return nil, err
+ }
+ }
+ }
+ }
+ }
+
+ if planAlternates {
+ alternates, err := createAlternateRoutesFromVSchemaTable(
+ ctx,
+ queryTable,
+ vschemaTable,
+ solves,
+ )
+ if err != nil {
+ return nil, err
+ }
+ plan.Alternates = alternates
+ }
+
+ return plan, nil
+}
+
+func createAlternateRoutesFromVSchemaTable(
+ ctx *plancontext.PlanningContext,
+ queryTable *QueryTable,
+ vschemaTable *vindexes.Table,
+ solves semantics.TableSet,
+) (map[*vindexes.Keyspace]*Route, error) {
+ routes := make(map[*vindexes.Keyspace]*Route)
+
+ switch vschemaTable.Type {
+ case "", vindexes.TypeReference:
+ for ksName, referenceTable := range vschemaTable.ReferencedBy {
+ route, err := findVSchemaTableAndCreateRoute(
+ ctx,
+ queryTable,
+ sqlparser.TableName{
+ Name: referenceTable.Name,
+ Qualifier: sqlparser.NewIdentifierCS(ksName),
+ },
+ solves,
+ false, /*planAlternates*/
+ )
+ if err != nil {
+ return nil, err
+ }
+ routes[route.Keyspace] = route
+ }
+
+ if vschemaTable.Source != nil {
+ route, err := findVSchemaTableAndCreateRoute(
+ ctx,
+ queryTable,
+ vschemaTable.Source.TableName,
+ solves,
+ false, /*planAlternates*/
+ )
+ if err != nil {
+ return nil, err
+ }
+ routes[route.Keyspace] = route
+ }
+ }
+
+ return routes, nil
+}
+
+func (r *Route) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) (ops.Operator, error) {
+ err := r.UpdateRoutingLogic(ctx, expr)
+ if err != nil {
+ return nil, err
+ }
+ newSrc, err := r.Source.AddPredicate(ctx, expr)
+ if err != nil {
+ return nil, err
+ }
+ r.Source = newSrc
+ return r, err
+}
+
+func (r *Route) AddColumn(ctx *plancontext.PlanningContext, e sqlparser.Expr) (int, error) {
+ return r.Source.AddColumn(ctx, e)
+}
+
+func (r *Route) AlternateInKeyspace(keyspace *vindexes.Keyspace) *Route {
+ if keyspace.Name == r.Keyspace.Name {
+ return nil
+ }
+
+ if route, ok := r.Alternates[keyspace]; ok {
+ return route
+ }
+
+ return nil
+}
+
+// TablesUsed returns tables used by MergedWith routes, which are not included
+// in Inputs() and thus not a part of the operator tree
+func (r *Route) TablesUsed() []string {
+ addString, collect := collectSortedUniqueStrings()
+ for _, mw := range r.MergedWith {
+ for _, u := range TablesUsed(mw) {
+ addString(u)
+ }
+ }
+ return collect()
+}
diff --git a/go/vt/vtgate/planbuilder/operators/route_planning.go b/go/vt/vtgate/planbuilder/operators/route_planning.go
new file mode 100644
index 00000000000..5e9460dc7ab
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/operators/route_planning.go
@@ -0,0 +1,859 @@
+/*
+Copyright 2021 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package operators
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+
+ "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/rewrite"
+
+ "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops"
+
+ "vitess.io/vitess/go/vt/key"
+ "vitess.io/vitess/go/vt/sqlparser"
+ "vitess.io/vitess/go/vt/vterrors"
+ "vitess.io/vitess/go/vt/vtgate/engine"
+ "vitess.io/vitess/go/vt/vtgate/evalengine"
+ "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext"
+ "vitess.io/vitess/go/vt/vtgate/semantics"
+ "vitess.io/vitess/go/vt/vtgate/vindexes"
+
+ querypb "vitess.io/vitess/go/vt/proto/query"
+ topodatapb "vitess.io/vitess/go/vt/proto/topodata"
+)
+
+type (
+ tableSetPair struct {
+ left, right semantics.TableSet
+ }
+
+ opCacheMap map[tableSetPair]ops.Operator
+)
+
+// TransformToPhysical takes an operator tree and rewrites any parts that have not yet been planned as physical operators.
+// This is where a lot of the optimisations of the query plans are done.
+// Here we try to merge query parts into the same route primitives. At the end of this process,
+// all the operators in the tree are guaranteed to be PhysicalOperators
+func transformToPhysical(ctx *plancontext.PlanningContext, in ops.Operator) (ops.Operator, error) {
+ op, err := rewrite.BottomUp(in, func(operator ops.Operator) (ops.Operator, rewrite.TreeIdentity, error) {
+ switch op := operator.(type) {
+ case *QueryGraph:
+ return optimizeQueryGraph(ctx, op)
+ case *Join:
+ return optimizeJoin(ctx, op)
+ case *Derived:
+ return optimizeDerived(ctx, op)
+ case *SubQuery:
+ return optimizeSubQuery(ctx, op)
+ case *Filter:
+ return optimizeFilter(op)
+ default:
+ return operator, rewrite.SameTree, nil
+ }
+ })
+
+ if err != nil {
+ return nil, err
+ }
+
+ err = rewrite.Visit(op, func(op ops.Operator) error {
+ if _, isPhys := op.(ops.PhysicalOperator); !isPhys {
+ return vterrors.VT13001(fmt.Sprintf("failed to transform %T to a physical operator", op))
+ }
+ return nil
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return Compact(ctx, op)
+}
+
+func optimizeFilter(op *Filter) (ops.Operator, rewrite.TreeIdentity, error) {
+ if route, ok := op.Source.(*Route); ok {
+ // let's push the filter into the route
+ op.Source = route.Source
+ route.Source = op
+ return route, rewrite.NewTree, nil
+ }
+
+ return op, rewrite.SameTree, nil
+}
+
+func optimizeDerived(ctx *plancontext.PlanningContext, op *Derived) (ops.Operator, rewrite.TreeIdentity, error) {
+ innerRoute, ok := op.Source.(*Route)
+ if !ok {
+ return op, rewrite.SameTree, nil
+ }
+
+ if !(innerRoute.RouteOpCode == engine.EqualUnique) && !op.IsMergeable(ctx) {
+ // no need to check anything if we are sure that we will only hit a single shard
+ return op, rewrite.SameTree, nil
+ }
+
+ op.Source = innerRoute.Source
+ innerRoute.Source = op
+
+ return innerRoute, rewrite.NewTree, nil
+}
+
+func optimizeJoin(ctx *plancontext.PlanningContext, op *Join) (ops.Operator, rewrite.TreeIdentity, error) {
+ join, err := mergeOrJoin(ctx, op.LHS, op.RHS, sqlparser.SplitAndExpression(nil, op.Predicate), !op.LeftJoin)
+ if err != nil {
+ return nil, rewrite.SameTree, err
+ }
+ return join, rewrite.NewTree, nil
+}
+
+func optimizeQueryGraph(ctx *plancontext.PlanningContext, op *QueryGraph) (result ops.Operator, changed rewrite.TreeIdentity, err error) {
+ changed = rewrite.NewTree
+ switch {
+ case ctx.PlannerVersion == querypb.ExecuteOptions_Gen4Left2Right:
+ result, err = leftToRightSolve(ctx, op)
+ default:
+ result, err = greedySolve(ctx, op)
+ }
+
+ unresolved := op.UnsolvedPredicates(ctx.SemTable)
+ if len(unresolved) > 0 {
+ // if we have any predicates that none of the joins or tables took care of,
+ // we add a single filter on top, so we don't lose it. This is used for sub-query planning
+ result = newFilter(result, ctx.SemTable.AndExpressions(unresolved...))
+ }
+
+ return
+}
+
+func buildVindexTableForDML(ctx *plancontext.PlanningContext, tableInfo semantics.TableInfo, table *QueryTable, dmlType string) (*vindexes.Table, engine.Opcode, key.Destination, error) {
+ vindexTable := tableInfo.GetVindexTable()
+ opCode := engine.Unsharded
+ if vindexTable.Keyspace.Sharded {
+ opCode = engine.Scatter
+ }
+
+ if vindexTable.Source != nil {
+ sourceTable, _, _, _, _, err := ctx.VSchema.FindTableOrVindex(vindexTable.Source.TableName)
+ if err != nil {
+ return nil, 0, nil, err
+ }
+ vindexTable = sourceTable
+ }
+
+ var dest key.Destination
+ var typ topodatapb.TabletType
+ var err error
+ tblName, ok := table.Alias.Expr.(sqlparser.TableName)
+ if ok {
+ _, _, _, typ, dest, err = ctx.VSchema.FindTableOrVindex(tblName)
+ if err != nil {
+ return nil, 0, nil, err
+ }
+ if dest != nil {
+ if typ != topodatapb.TabletType_PRIMARY {
+ return nil, 0, nil, vterrors.VT09002(dmlType)
+ }
+ // we are dealing with an explicitly targeted UPDATE
+ opCode = engine.ByDestination
+ }
+ }
+ return vindexTable, opCode, dest, nil
+}
+
+func generateOwnedVindexQuery(tblExpr sqlparser.TableExpr, del *sqlparser.Delete, table *vindexes.Table, ksidCols []sqlparser.IdentifierCI) string {
+ buf := sqlparser.NewTrackedBuffer(nil)
+ for idx, col := range ksidCols {
+ if idx == 0 {
+ buf.Myprintf("select %v", col)
+ } else {
+ buf.Myprintf(", %v", col)
+ }
+ }
+ for _, cv := range table.Owned {
+ for _, column := range cv.Columns {
+ buf.Myprintf(", %v", column)
+ }
+ }
+ buf.Myprintf(" from %v%v%v%v for update", tblExpr, del.Where, del.OrderBy, del.Limit)
+ return buf.String()
+}
+
+func getUpdateVindexInformation(
+ updStmt *sqlparser.Update,
+ vindexTable *vindexes.Table,
+ tableID semantics.TableSet,
+ predicates []sqlparser.Expr,
+) ([]*VindexPlusPredicates, map[string]*engine.VindexValues, string, error) {
+ if !vindexTable.Keyspace.Sharded {
+ return nil, nil, "", nil
+ }
+
+ primaryVindex, vindexAndPredicates, err := getVindexInformation(tableID, predicates, vindexTable)
+ if err != nil {
+ return nil, nil, "", err
+ }
+
+ changedVindexValues, ownedVindexQuery, err := buildChangedVindexesValues(updStmt, vindexTable, primaryVindex.Columns)
+ if err != nil {
+ return nil, nil, "", err
+ }
+ return vindexAndPredicates, changedVindexValues, ownedVindexQuery, nil
+}
+
+/*
+ The greedy planner will plan a query by finding first finding the best route plan for every table.
+ Then, iteratively, it finds the cheapest join that can be produced between the remaining plans,
+ and removes the two inputs to this cheapest plan and instead adds the join.
+ As an optimization, it first only considers joining tables that have predicates defined between them
+*/
+func greedySolve(ctx *plancontext.PlanningContext, qg *QueryGraph) (ops.Operator, error) {
+ routeOps, err := seedOperatorList(ctx, qg)
+ planCache := opCacheMap{}
+ if err != nil {
+ return nil, err
+ }
+
+ op, err := mergeRoutes(ctx, qg, routeOps, planCache, false)
+ if err != nil {
+ return nil, err
+ }
+ return op, nil
+}
+
+func leftToRightSolve(ctx *plancontext.PlanningContext, qg *QueryGraph) (ops.Operator, error) {
+ plans, err := seedOperatorList(ctx, qg)
+ if err != nil {
+ return nil, err
+ }
+
+ var acc ops.Operator
+ for _, plan := range plans {
+ if acc == nil {
+ acc = plan
+ continue
+ }
+ joinPredicates := qg.GetPredicates(TableID(acc), TableID(plan))
+ acc, err = mergeOrJoin(ctx, acc, plan, joinPredicates, true)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return acc, nil
+}
+
+// seedOperatorList returns a route for each table in the qg
+func seedOperatorList(ctx *plancontext.PlanningContext, qg *QueryGraph) ([]ops.Operator, error) {
+ plans := make([]ops.Operator, len(qg.Tables))
+
+ // we start by seeding the table with the single routes
+ for i, table := range qg.Tables {
+ solves := ctx.SemTable.TableSetFor(table.Alias)
+ plan, err := createRoute(ctx, table, solves)
+ if err != nil {
+ return nil, err
+ }
+ if qg.NoDeps != nil {
+ plan, err = plan.AddPredicate(ctx, qg.NoDeps)
+ if err != nil {
+ return nil, err
+ }
+ }
+ plans[i] = plan
+ }
+ return plans, nil
+}
+
+func createInfSchemaRoute(ctx *plancontext.PlanningContext, table *QueryTable) (ops.Operator, error) {
+ ks, err := ctx.VSchema.AnyKeyspace()
+ if err != nil {
+ return nil, err
+ }
+ r := &Route{
+ RouteOpCode: engine.DBA,
+ Source: &Table{
+ QTable: table,
+ VTable: &vindexes.Table{
+ Name: table.Table.Name,
+ Keyspace: ks,
+ },
+ },
+ Keyspace: ks,
+ }
+ for _, pred := range table.Predicates {
+ isTableSchema, bvName, out, err := extractInfoSchemaRoutingPredicate(pred, ctx.ReservedVars)
+ if err != nil {
+ return nil, err
+ }
+ if out == nil {
+ // we didn't find a predicate to use for routing, continue to look for next predicate
+ continue
+ }
+
+ if isTableSchema {
+ r.SysTableTableSchema = append(r.SysTableTableSchema, out)
+ } else {
+ if r.SysTableTableName == nil {
+ r.SysTableTableName = map[string]evalengine.Expr{}
+ }
+ r.SysTableTableName[bvName] = out
+ }
+ }
+ return r, nil
+}
+
+func mergeRoutes(ctx *plancontext.PlanningContext, qg *QueryGraph, physicalOps []ops.Operator, planCache opCacheMap, crossJoinsOK bool) (ops.Operator, error) {
+ if len(physicalOps) == 0 {
+ return nil, nil
+ }
+ for len(physicalOps) > 1 {
+ bestTree, lIdx, rIdx, err := findBestJoin(ctx, qg, physicalOps, planCache, crossJoinsOK)
+ if err != nil {
+ return nil, err
+ }
+ // if we found a plan, we'll replace the two plans that were joined with the join plan created
+ if bestTree != nil {
+ // we remove one plan, and replace the other
+ if rIdx > lIdx {
+ physicalOps = removeAt(physicalOps, rIdx)
+ physicalOps = removeAt(physicalOps, lIdx)
+ } else {
+ physicalOps = removeAt(physicalOps, lIdx)
+ physicalOps = removeAt(physicalOps, rIdx)
+ }
+ physicalOps = append(physicalOps, bestTree)
+ } else {
+ if crossJoinsOK {
+ return nil, vterrors.VT13001("should not happen: we should be able to merge cross joins")
+ }
+ // we will only fail to find a join plan when there are only cross joins left
+ // when that happens, we switch over to allow cross joins as well.
+ // this way we prioritize joining physicalOps with predicates first
+ crossJoinsOK = true
+ }
+ }
+ return physicalOps[0], nil
+}
+
+func removeAt(plans []ops.Operator, idx int) []ops.Operator {
+ return append(plans[:idx], plans[idx+1:]...)
+}
+
+func findBestJoin(
+ ctx *plancontext.PlanningContext,
+ qg *QueryGraph,
+ plans []ops.Operator,
+ planCache opCacheMap,
+ crossJoinsOK bool,
+) (bestPlan ops.Operator, lIdx int, rIdx int, err error) {
+ for i, lhs := range plans {
+ for j, rhs := range plans {
+ if i == j {
+ continue
+ }
+ joinPredicates := qg.GetPredicates(TableID(lhs), TableID(rhs))
+ if len(joinPredicates) == 0 && !crossJoinsOK {
+ // if there are no predicates joining the two tables,
+ // creating a join between them would produce a
+ // cartesian product, which is almost always a bad idea
+ continue
+ }
+ plan, err := getJoinFor(ctx, planCache, lhs, rhs, joinPredicates)
+ if err != nil {
+ return nil, 0, 0, err
+ }
+ if bestPlan == nil || CostOf(plan) < CostOf(bestPlan) {
+ bestPlan = plan
+ // remember which plans we based on, so we can remove them later
+ lIdx = i
+ rIdx = j
+ }
+ }
+ }
+ return bestPlan, lIdx, rIdx, nil
+}
+
+func getJoinFor(ctx *plancontext.PlanningContext, cm opCacheMap, lhs, rhs ops.Operator, joinPredicates []sqlparser.Expr) (ops.Operator, error) {
+ solves := tableSetPair{left: TableID(lhs), right: TableID(rhs)}
+ cachedPlan := cm[solves]
+ if cachedPlan != nil {
+ return cachedPlan, nil
+ }
+
+ join, err := mergeOrJoin(ctx, lhs, rhs, joinPredicates, true)
+ if err != nil {
+ return nil, err
+ }
+ cm[solves] = join
+ return join, nil
+}
+
+// requiresSwitchingSides will return true if any of the operators with the root from the given operator tree
+// is of the type that should not be on the RHS of a join
+func requiresSwitchingSides(ctx *plancontext.PlanningContext, op ops.Operator) bool {
+ required := false
+
+ _ = rewrite.Visit(op, func(current ops.Operator) error {
+ derived, isDerived := current.(*Derived)
+
+ if isDerived && !derived.IsMergeable(ctx) {
+ required = true
+ return io.EOF
+ }
+
+ return nil
+ })
+
+ return required
+}
+
+func mergeOrJoin(ctx *plancontext.PlanningContext, lhs, rhs ops.Operator, joinPredicates []sqlparser.Expr, inner bool) (ops.Operator, error) {
+ merger := func(a, b *Route) (*Route, error) {
+ return createRouteOperatorForJoin(ctx, a, b, joinPredicates, inner)
+ }
+
+ newPlan, _ := tryMerge(ctx, lhs, rhs, joinPredicates, merger)
+ if newPlan != nil {
+ return newPlan, nil
+ }
+
+ if len(joinPredicates) > 0 && requiresSwitchingSides(ctx, rhs) {
+ if !inner {
+ return nil, vterrors.VT12001("LEFT JOIN with derived tables")
+ }
+
+ if requiresSwitchingSides(ctx, lhs) {
+ return nil, vterrors.VT12001("JOIN between derived tables")
+ }
+
+ join := NewApplyJoin(Clone(rhs), Clone(lhs), nil, !inner)
+ return pushJoinPredicates(ctx, joinPredicates, join)
+ }
+
+ join := NewApplyJoin(Clone(lhs), Clone(rhs), nil, !inner)
+ return pushJoinPredicates(ctx, joinPredicates, join)
+}
+
+func createRouteOperatorForJoin(ctx *plancontext.PlanningContext, aRoute, bRoute *Route, joinPredicates []sqlparser.Expr, inner bool) (*Route, error) {
+ // append system table names from both the routes.
+ sysTableName := aRoute.SysTableTableName
+ if sysTableName == nil {
+ sysTableName = bRoute.SysTableTableName
+ } else {
+ for k, v := range bRoute.SysTableTableName {
+ sysTableName[k] = v
+ }
+ }
+
+ join := NewApplyJoin(aRoute.Source, bRoute.Source, ctx.SemTable.AndExpressions(joinPredicates...), !inner)
+ r := &Route{
+ RouteOpCode: aRoute.RouteOpCode,
+ Keyspace: aRoute.Keyspace,
+ VindexPreds: append(aRoute.VindexPreds, bRoute.VindexPreds...),
+ SysTableTableSchema: append(aRoute.SysTableTableSchema, bRoute.SysTableTableSchema...),
+ SeenPredicates: append(aRoute.SeenPredicates, bRoute.SeenPredicates...),
+ SysTableTableName: sysTableName,
+ Source: join,
+ MergedWith: []*Route{bRoute},
+ }
+
+ if aRoute.SelectedVindex() == bRoute.SelectedVindex() {
+ r.Selected = aRoute.Selected
+ }
+
+ return r, nil
+}
+
+type mergeFunc func(a, b *Route) (*Route, error)
+
+func operatorsToRoutes(a, b ops.Operator) (*Route, *Route) {
+ aRoute, ok := a.(*Route)
+ if !ok {
+ return nil, nil
+ }
+ bRoute, ok := b.(*Route)
+ if !ok {
+ return nil, nil
+ }
+ return aRoute, bRoute
+}
+
+func tryMerge(
+ ctx *plancontext.PlanningContext,
+ a, b ops.Operator,
+ joinPredicates []sqlparser.Expr,
+ merger mergeFunc,
+) (ops.Operator, error) {
+ aRoute, bRoute := operatorsToRoutes(Clone(a), Clone(b))
+ if aRoute == nil || bRoute == nil {
+ return nil, nil
+ }
+
+ sameKeyspace := aRoute.Keyspace == bRoute.Keyspace
+
+ if !sameKeyspace {
+ if altARoute := aRoute.AlternateInKeyspace(bRoute.Keyspace); altARoute != nil {
+ aRoute = altARoute
+ sameKeyspace = true
+ } else if altBRoute := bRoute.AlternateInKeyspace(aRoute.Keyspace); altBRoute != nil {
+ bRoute = altBRoute
+ sameKeyspace = true
+ }
+ }
+
+ if sameKeyspace || (isDualTable(aRoute) || isDualTable(bRoute)) {
+ tree, err := tryMergeReferenceTable(aRoute, bRoute, merger)
+ if tree != nil || err != nil {
+ return tree, err
+ }
+ }
+
+ switch aRoute.RouteOpCode {
+ case engine.Unsharded, engine.DBA:
+ if aRoute.RouteOpCode == bRoute.RouteOpCode && sameKeyspace {
+ return merger(aRoute, bRoute)
+ }
+ case engine.EqualUnique:
+ // If the two routes fully match, they can be merged together.
+ if bRoute.RouteOpCode == engine.EqualUnique {
+ aVdx := aRoute.SelectedVindex()
+ bVdx := bRoute.SelectedVindex()
+ aExpr := aRoute.VindexExpressions()
+ bExpr := bRoute.VindexExpressions()
+ if aVdx == bVdx && gen4ValuesEqual(ctx, aExpr, bExpr) {
+ return merger(aRoute, bRoute)
+ }
+ }
+
+ // If the two routes don't match, fall through to the next case and see if we
+ // can merge via join predicates instead.
+ fallthrough
+
+ case engine.Scatter, engine.IN, engine.None:
+ if len(joinPredicates) == 0 {
+ // If we are doing two Scatters, we have to make sure that the
+ // joins are on the correct vindex to allow them to be merged
+ // no join predicates - no vindex
+ return nil, nil
+ }
+
+ if !sameKeyspace {
+ return nil, vterrors.VT12001("cross-shard correlated subquery")
+ }
+
+ canMerge := canMergeOnFilters(ctx, aRoute, bRoute, joinPredicates)
+ if !canMerge {
+ return nil, nil
+ }
+ r, err := merger(aRoute, bRoute)
+ if err != nil {
+ return nil, err
+ }
+
+ // If we have a `None` route opcode, we want to keep it -
+ // we only try to find a better Vindex for other route opcodes
+ if aRoute.RouteOpCode != engine.None {
+ r.PickBestAvailableVindex()
+ }
+
+ return r, nil
+ }
+ return nil, nil
+}
+
+func isDualTable(route *Route) bool {
+ sources := leaves(route)
+ if len(sources) > 1 {
+ return false
+ }
+ src, ok := sources[0].(*Table)
+ if !ok {
+ return false
+ }
+ return src.VTable.Name.String() == "dual" && src.QTable.Table.Qualifier.IsEmpty()
+}
+
+func leaves(op ops.Operator) (sources []ops.Operator) {
+ switch op := op.(type) {
+ // these are the leaves
+ case *Table:
+ return []ops.Operator{op}
+ // physical
+ case *ApplyJoin:
+ return []ops.Operator{op.LHS, op.RHS}
+ case *Filter:
+ return []ops.Operator{op.Source}
+ case *Route:
+ return []ops.Operator{op.Source}
+ }
+
+ panic(fmt.Sprintf("leaves unknown type: %T", op))
+}
+
+func tryMergeReferenceTable(aRoute, bRoute *Route, merger mergeFunc) (*Route, error) {
+ var (
+ // if either side is a reference table, we can just merge it and use the opcode of the other side
+ opCode engine.Opcode
+ vindex *VindexOption
+ ks *vindexes.Keyspace
+ )
+
+ switch {
+ case aRoute.RouteOpCode == engine.Reference:
+ vindex = bRoute.Selected
+ opCode = bRoute.RouteOpCode
+ ks = bRoute.Keyspace
+ case bRoute.RouteOpCode == engine.Reference:
+ vindex = aRoute.Selected
+ opCode = aRoute.RouteOpCode
+ ks = aRoute.Keyspace
+ default:
+ return nil, nil
+ }
+
+ r, err := merger(aRoute, bRoute)
+ if err != nil {
+ return nil, err
+ }
+ r.RouteOpCode = opCode
+ r.Selected = vindex
+ r.Keyspace = ks
+ return r, nil
+}
+
+func canMergeOnFilter(ctx *plancontext.PlanningContext, a, b *Route, predicate sqlparser.Expr) bool {
+ comparison, ok := predicate.(*sqlparser.ComparisonExpr)
+ if !ok {
+ return false
+ }
+ if comparison.Operator != sqlparser.EqualOp {
+ return false
+ }
+ left := comparison.Left
+ right := comparison.Right
+
+ lVindex := findColumnVindex(ctx, a, left)
+ if lVindex == nil {
+ left, right = right, left
+ lVindex = findColumnVindex(ctx, a, left)
+ }
+ if lVindex == nil || !lVindex.IsUnique() {
+ return false
+ }
+ rVindex := findColumnVindex(ctx, b, right)
+ if rVindex == nil {
+ return false
+ }
+ return rVindex == lVindex
+}
+
+func findColumnVindex(ctx *plancontext.PlanningContext, a ops.Operator, exp sqlparser.Expr) vindexes.SingleColumn {
+ _, isCol := exp.(*sqlparser.ColName)
+ if !isCol {
+ return nil
+ }
+
+ exp = unwrapDerivedTables(ctx, exp)
+ if exp == nil {
+ return nil
+ }
+
+ var singCol vindexes.SingleColumn
+
+ // for each equality expression that exp has with other column name, we check if it
+ // can be solved by any table in our routeTree. If an equality expression can be solved,
+ // we check if the equality expression and our table share the same vindex, if they do:
+ // the method will return the associated vindexes.SingleColumn.
+ for _, expr := range ctx.SemTable.GetExprAndEqualities(exp) {
+ col, isCol := expr.(*sqlparser.ColName)
+ if !isCol {
+ continue
+ }
+
+ deps := ctx.SemTable.RecursiveDeps(expr)
+
+ _ = rewrite.Visit(a, func(rel ops.Operator) error {
+ to, isTableOp := rel.(TableIDIntroducer)
+ if !isTableOp {
+ return nil
+ }
+ id := to.Introduces()
+ if deps.IsSolvedBy(id) {
+ tableInfo, err := ctx.SemTable.TableInfoFor(id)
+ if err != nil {
+ // an error here is OK, we just can't ask this operator about its column vindexes
+ return nil
+ }
+ vtable := tableInfo.GetVindexTable()
+ if vtable != nil {
+ for _, vindex := range vtable.ColumnVindexes {
+ sC, isSingle := vindex.Vindex.(vindexes.SingleColumn)
+ if isSingle && vindex.Columns[0].Equal(col.Name) {
+ singCol = sC
+ return io.EOF
+ }
+ }
+ }
+ }
+ return nil
+ })
+ if singCol != nil {
+ return singCol
+ }
+ }
+
+ return singCol
+}
+
+// unwrapDerivedTables we want to find the bottom layer of derived tables
+// nolint
+func unwrapDerivedTables(ctx *plancontext.PlanningContext, exp sqlparser.Expr) sqlparser.Expr {
+ for {
+ // if we are dealing with derived tables in derived tables
+ tbl, err := ctx.SemTable.TableInfoForExpr(exp)
+ if err != nil {
+ return nil
+ }
+ _, ok := tbl.(*semantics.DerivedTable)
+ if !ok {
+ break
+ }
+
+ exp = semantics.RewriteDerivedTableExpression(exp, tbl)
+ exp = getColName(exp)
+ if exp == nil {
+ return nil
+ }
+ }
+ return exp
+}
+
+func getColName(exp sqlparser.Expr) *sqlparser.ColName {
+ switch exp := exp.(type) {
+ case *sqlparser.ColName:
+ return exp
+ case *sqlparser.Max, *sqlparser.Min:
+ aggr := exp.(sqlparser.AggrFunc).GetArg()
+ colName, ok := aggr.(*sqlparser.ColName)
+ if ok {
+ return colName
+ }
+ }
+ // for any other expression than a column, or the extremum of a column, we return nil
+ return nil
+}
+
+func canMergeOnFilters(ctx *plancontext.PlanningContext, a, b *Route, joinPredicates []sqlparser.Expr) bool {
+ for _, predicate := range joinPredicates {
+ for _, expr := range sqlparser.SplitAndExpression(nil, predicate) {
+ if canMergeOnFilter(ctx, a, b, expr) {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+func gen4ValuesEqual(ctx *plancontext.PlanningContext, a, b []sqlparser.Expr) bool {
+ if len(a) != len(b) {
+ return false
+ }
+
+ // TODO: check SemTable's columnEqualities for better plan
+
+ for i, aExpr := range a {
+ bExpr := b[i]
+ if !gen4ValEqual(ctx, aExpr, bExpr) {
+ return false
+ }
+ }
+ return true
+}
+
+func gen4ValEqual(ctx *plancontext.PlanningContext, a, b sqlparser.Expr) bool {
+ switch a := a.(type) {
+ case *sqlparser.ColName:
+ if b, ok := b.(*sqlparser.ColName); ok {
+ if !a.Name.Equal(b.Name) {
+ return false
+ }
+
+ return ctx.SemTable.DirectDeps(a) == ctx.SemTable.DirectDeps(b)
+ }
+ case sqlparser.Argument:
+ b, ok := b.(sqlparser.Argument)
+ if !ok {
+ return false
+ }
+ return a == b
+ case *sqlparser.Literal:
+ b, ok := b.(*sqlparser.Literal)
+ if !ok {
+ return false
+ }
+ switch a.Type {
+ case sqlparser.StrVal:
+ switch b.Type {
+ case sqlparser.StrVal:
+ return a.Val == b.Val
+ case sqlparser.HexVal:
+ return hexEqual(b, a)
+ }
+ case sqlparser.HexVal:
+ return hexEqual(a, b)
+ case sqlparser.IntVal:
+ if b.Type == (sqlparser.IntVal) {
+ return a.Val == b.Val
+ }
+ }
+ }
+ return false
+}
+
+func hexEqual(a, b *sqlparser.Literal) bool {
+ v, err := a.HexDecode()
+ if err != nil {
+ return false
+ }
+ switch b.Type {
+ case sqlparser.StrVal:
+ return bytes.Equal(v, b.Bytes())
+ case sqlparser.HexVal:
+ v2, err := b.HexDecode()
+ if err != nil {
+ return false
+ }
+ return bytes.Equal(v, v2)
+ }
+ return false
+}
+
+func pushJoinPredicates(ctx *plancontext.PlanningContext, exprs []sqlparser.Expr, op *ApplyJoin) (ops.Operator, error) {
+ if len(exprs) == 0 {
+ return op, nil
+ }
+
+ for _, expr := range exprs {
+ _, err := AddPredicate(op, ctx, expr, true, newFilter)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return op, nil
+}
diff --git a/go/vt/vtgate/planbuilder/operators/subquery.go b/go/vt/vtgate/planbuilder/operators/subquery.go
new file mode 100644
index 00000000000..e6dbb2f22ed
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/operators/subquery.go
@@ -0,0 +1,110 @@
+/*
+Copyright 2021 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package operators
+
+import (
+ "vitess.io/vitess/go/vt/sqlparser"
+ "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops"
+ "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext"
+)
+
+type (
+ // SubQuery stores the information about subquery
+ SubQuery struct {
+ Outer ops.Operator
+ Inner []*SubQueryInner
+
+ noColumns
+ noPredicates
+ }
+
+ // SubQueryInner stores the subquery information for a select statement
+ SubQueryInner struct {
+ // Inner is the Operator inside the parenthesis of the subquery.
+ // i.e: select (select 1 union select 1), the Inner here would be
+ // of type Concatenate since we have a Union.
+ Inner ops.Operator
+
+ // ExtractedSubquery contains all information we need about this subquery
+ ExtractedSubquery *sqlparser.ExtractedSubquery
+
+ noColumns
+ noPredicates
+ }
+)
+
+var _ ops.Operator = (*SubQuery)(nil)
+var _ ops.Operator = (*SubQueryInner)(nil)
+
+// Clone implements the Operator interface
+func (s *SubQueryInner) Clone(inputs []ops.Operator) ops.Operator {
+ return &SubQueryInner{
+ Inner: inputs[0],
+ ExtractedSubquery: s.ExtractedSubquery,
+ }
+}
+
+// Inputs implements the Operator interface
+func (s *SubQueryInner) Inputs() []ops.Operator {
+ return []ops.Operator{s.Inner}
+}
+
+// Clone implements the Operator interface
+func (s *SubQuery) Clone(inputs []ops.Operator) ops.Operator {
+ result := &SubQuery{
+ Outer: inputs[0],
+ }
+ for idx := range s.Inner {
+ inner, ok := inputs[idx+1].(*SubQueryInner)
+ if !ok {
+ panic("got bad input")
+ }
+ result.Inner = append(result.Inner, inner)
+ }
+ return result
+}
+
+// Inputs implements the Operator interface
+func (s *SubQuery) Inputs() []ops.Operator {
+ operators := []ops.Operator{s.Outer}
+ for _, inner := range s.Inner {
+ operators = append(operators, inner)
+ }
+ return operators
+}
+
+func createSubqueryFromStatement(ctx *plancontext.PlanningContext, stmt sqlparser.Statement) (*SubQuery, error) {
+ if len(ctx.SemTable.SubqueryMap[stmt]) == 0 {
+ return nil, nil
+ }
+ subq := &SubQuery{}
+ for _, sq := range ctx.SemTable.SubqueryMap[stmt] {
+ opInner, err := createLogicalOperatorFromAST(ctx, sq.Subquery.Select)
+ if err != nil {
+ return nil, err
+ }
+ if horizon, ok := opInner.(*Horizon); ok {
+ opInner = horizon.Source
+ }
+
+ subq.Inner = append(subq.Inner, &SubQueryInner{
+ ExtractedSubquery: sq,
+ Inner: opInner,
+ })
+ }
+ return subq, nil
+}
diff --git a/go/vt/vtgate/planbuilder/physical/subquery_planning.go b/go/vt/vtgate/planbuilder/operators/subquery_planning.go
similarity index 67%
rename from go/vt/vtgate/planbuilder/physical/subquery_planning.go
rename to go/vt/vtgate/planbuilder/operators/subquery_planning.go
index dd27c7fa7ae..701876753f6 100644
--- a/go/vt/vtgate/planbuilder/physical/subquery_planning.go
+++ b/go/vt/vtgate/planbuilder/operators/subquery_planning.go
@@ -1,31 +1,41 @@
-package physical
+/*
+Copyright 2022 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package operators
import (
"vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/vt/vtgate/engine"
"vitess.io/vitess/go/vt/vtgate/evalengine"
- "vitess.io/vitess/go/vt/vtgate/planbuilder/abstract"
+ "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops"
+ "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/rewrite"
"vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext"
-
- vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
)
-func optimizeSubQuery(ctx *plancontext.PlanningContext, op *abstract.SubQuery) (abstract.PhysicalOperator, error) {
- outerOp, err := CreatePhysicalOperator(ctx, op.Outer)
- if err != nil {
- return nil, err
- }
+func optimizeSubQuery(ctx *plancontext.PlanningContext, op *SubQuery) (ops.Operator, rewrite.TreeIdentity, error) {
var unmerged []*SubQueryOp
// first loop over the subqueries and try to merge them into the outer plan
+ outer := op.Outer
for _, inner := range op.Inner {
- innerOp, err := CreatePhysicalOperator(ctx, inner.Inner)
- if err != nil {
- return nil, err
- }
+ innerOp := inner.Inner
- preds := inner.Inner.UnsolvedPredicates(ctx.SemTable)
+ var preds []sqlparser.Expr
+ preds, innerOp = unresolvedAndSource(ctx, innerOp)
merger := func(a, b *Route) (*Route, error) {
return mergeSubQueryOp(ctx, a, b, inner)
}
@@ -34,13 +44,13 @@ func optimizeSubQuery(ctx *plancontext.PlanningContext, op *abstract.SubQuery) (
Inner: inner.Inner,
ExtractedSubquery: inner.ExtractedSubquery,
}
- merged, err := tryMergeSubQueryOp(ctx, outerOp, innerOp, newInner, preds, merger)
+ merged, err := tryMergeSubQueryOp(ctx, outer, innerOp, newInner, preds, merger)
if err != nil {
- return nil, err
+ return nil, rewrite.SameTree, err
}
if merged != nil {
- outerOp = merged
+ outer = merged
continue
}
@@ -55,50 +65,39 @@ func optimizeSubQuery(ctx *plancontext.PlanningContext, op *abstract.SubQuery) (
}
if inner.ExtractedSubquery.OpCode == int(engine.PulloutExists) {
- correlatedTree, err := createCorrelatedSubqueryOp(ctx, innerOp, outerOp, preds, inner.ExtractedSubquery)
+ correlatedTree, err := createCorrelatedSubqueryOp(ctx, innerOp, outer, preds, inner.ExtractedSubquery)
if err != nil {
- return nil, err
+ return nil, rewrite.SameTree, err
}
- outerOp = correlatedTree
+ outer = correlatedTree
continue
}
- return nil, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "unsupported: cross-shard correlated subquery")
+ return nil, rewrite.SameTree, vterrors.VT12001("cross-shard correlated subquery")
}
- /*
- build a tree of the unmerged subqueries
- rt: route, sqt: subqueryTree
-
-
- sqt
- sqt rt
- rt rt
- */
for _, tree := range unmerged {
- tree.Outer = outerOp
- outerOp = tree
+ tree.Outer = outer
+ outer = tree
}
- return outerOp, nil
+ return outer, rewrite.NewTree, nil
}
-func mergeSubQueryOp(ctx *plancontext.PlanningContext, outer *Route, inner *Route, subq *abstract.SubQueryInner) (*Route, error) {
- subq.ExtractedSubquery.NeedsRewrite = true
-
- // go over the subquery and add its tables to the one's solved by the route it is merged with
- // this is needed to so that later when we try to push projections, we get the correct
- // solved tableID from the route, since it also includes the tables from the subquery after merging
- err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) {
- switch n := node.(type) {
- case *sqlparser.AliasedTableExpr:
- ts := outer.TableID()
- ts.MergeInPlace(ctx.SemTable.TableSetFor(n))
+func unresolvedAndSource(ctx *plancontext.PlanningContext, op ops.Operator) ([]sqlparser.Expr, ops.Operator) {
+ preds := UnresolvedPredicates(op, ctx.SemTable)
+ if filter, ok := op.(*Filter); ok {
+ if ctx.SemTable.ASTEquals().Exprs(preds, filter.Predicates) {
+ // if we are seeing a single filter with only these predicates,
+ // we can throw away the filter and just use the source
+ return preds, filter.Source
}
- return true, nil
- }, subq.ExtractedSubquery.Subquery)
- if err != nil {
- return nil, err
}
+
+ return preds, op
+}
+
+func mergeSubQueryOp(ctx *plancontext.PlanningContext, outer *Route, inner *Route, subq *SubQueryInner) (*Route, error) {
+ subq.ExtractedSubquery.NeedsRewrite = true
outer.SysTableTableSchema = append(outer.SysTableTableSchema, inner.SysTableTableSchema...)
for k, v := range inner.SysTableTableName {
if outer.SysTableTableName == nil {
@@ -115,7 +114,7 @@ func mergeSubQueryOp(ctx *plancontext.PlanningContext, outer *Route, inner *Rout
// predicates list, so this might be a no-op.
subQueryWasPredicate := false
for i, predicate := range outer.SeenPredicates {
- if sqlparser.EqualsExpr(predicate, subq.ExtractedSubquery) {
+ if ctx.SemTable.EqualsExpr(predicate, subq.ExtractedSubquery) {
outer.SeenPredicates = append(outer.SeenPredicates[:i], outer.SeenPredicates[i+1:]...)
subQueryWasPredicate = true
@@ -126,7 +125,7 @@ func mergeSubQueryOp(ctx *plancontext.PlanningContext, outer *Route, inner *Rout
}
}
- err = outer.resetRoutingSelections(ctx)
+ err := outer.resetRoutingSelections(ctx)
if err != nil {
return nil, err
}
@@ -141,10 +140,12 @@ func mergeSubQueryOp(ctx *plancontext.PlanningContext, outer *Route, inner *Rout
}
}
+ outer.MergedWith = append(outer.MergedWith, inner)
+
return outer, nil
}
-func isMergeable(ctx *plancontext.PlanningContext, query sqlparser.SelectStatement, op abstract.PhysicalOperator) bool {
+func isMergeable(ctx *plancontext.PlanningContext, query sqlparser.SelectStatement, op ops.Operator) bool {
validVindex := func(expr sqlparser.Expr) bool {
sc := findColumnVindex(ctx, op, expr)
return sc != nil && sc.IsUnique()
@@ -186,12 +187,19 @@ func isMergeable(ctx *plancontext.PlanningContext, query sqlparser.SelectStateme
func tryMergeSubQueryOp(
ctx *plancontext.PlanningContext,
- outer, subq abstract.PhysicalOperator,
+ outer, subq ops.Operator,
subQueryInner *SubQueryInner,
joinPredicates []sqlparser.Expr,
merger mergeFunc,
-) (abstract.PhysicalOperator, error) {
+) (ops.Operator, error) {
switch outerOp := outer.(type) {
+ case *Filter:
+ op, err := tryMergeSubQueryOp(ctx, outerOp.Source, subq, subQueryInner, joinPredicates, merger)
+ if err != nil || op == nil {
+ return nil, err
+ }
+ outerOp.Source = op
+ return outerOp, nil
case *Route:
return tryMergeSubqueryWithRoute(ctx, subq, outerOp, joinPredicates, merger, subQueryInner)
case *ApplyJoin:
@@ -203,12 +211,12 @@ func tryMergeSubQueryOp(
func tryMergeSubqueryWithRoute(
ctx *plancontext.PlanningContext,
- subq abstract.PhysicalOperator,
+ subq ops.Operator,
outerOp *Route,
joinPredicates []sqlparser.Expr,
merger mergeFunc,
subQueryInner *SubQueryInner,
-) (abstract.PhysicalOperator, error) {
+) (ops.Operator, error) {
subqueryRoute, isRoute := subq.(*Route)
if !isRoute {
return nil, nil
@@ -235,6 +243,9 @@ func tryMergeSubqueryWithRoute(
// Special case: Inner query won't return any results / is not routable.
if subqueryRoute.RouteOpCode == engine.None {
merged, err := merger(outerOp, subqueryRoute)
+ if err != nil {
+ return nil, err
+ }
return merged, err
}
@@ -260,12 +271,12 @@ func tryMergeSubqueryWithRoute(
func tryMergeSubqueryWithJoin(
ctx *plancontext.PlanningContext,
- subq abstract.PhysicalOperator,
+ subq ops.Operator,
outerOp *ApplyJoin,
joinPredicates []sqlparser.Expr,
merger mergeFunc,
subQueryInner *SubQueryInner,
-) (abstract.PhysicalOperator, error) {
+) (ops.PhysicalOperator, error) {
// Trying to merge the subquery with the left-hand or right-hand side of the join
if outerOp.LeftJoin {
@@ -315,49 +326,47 @@ func tryMergeSubqueryWithJoin(
// the child of joinTree which does not contain the subquery is the otherTree
func rewriteColumnsInSubqueryOpForJoin(
ctx *plancontext.PlanningContext,
- innerOp abstract.PhysicalOperator,
+ innerOp ops.Operator,
outerTree *ApplyJoin,
subQueryInner *SubQueryInner,
-) (abstract.PhysicalOperator, error) {
+) (ops.Operator, error) {
resultInnerOp := innerOp
var rewriteError error
// go over the entire expression in the subquery
- sqlparser.Rewrite(subQueryInner.ExtractedSubquery.Original, func(cursor *sqlparser.Cursor) bool {
- sqlNode := cursor.Node()
- switch node := sqlNode.(type) {
- case *sqlparser.ColName:
- // check whether the column name belongs to the other side of the join tree
- if ctx.SemTable.RecursiveDeps(node).IsSolvedBy(resultInnerOp.TableID()) {
- // get the bindVariable for that column name and replace it in the subquery
- bindVar := ctx.ReservedVars.ReserveColName(node)
- cursor.Replace(sqlparser.NewArgument(bindVar))
- // check whether the bindVariable already exists in the joinVars of the other tree
- _, alreadyExists := outerTree.Vars[bindVar]
- if alreadyExists {
- return false
- }
- // if it does not exist, then push this as an output column there and add it to the joinVars
- newInnerOp, columnIndexes, err := PushOutputColumns(ctx, resultInnerOp, node)
- if err != nil {
- rewriteError = err
- return false
- }
- columnIndex := columnIndexes[0]
- outerTree.Vars[bindVar] = columnIndex
- resultInnerOp = newInnerOp
- return false
- }
+ sqlparser.SafeRewrite(subQueryInner.ExtractedSubquery.Original, nil, func(cursor *sqlparser.Cursor) bool {
+ node, ok := cursor.Node().(*sqlparser.ColName)
+ if !ok {
+ return true
+ }
+
+ // check whether the column name belongs to the other side of the join tree
+ if !ctx.SemTable.RecursiveDeps(node).IsSolvedBy(TableID(resultInnerOp)) {
+ return true
+ }
+
+ // get the bindVariable for that column name and replace it in the subquery
+ bindVar := ctx.ReservedVars.ReserveColName(node)
+ cursor.Replace(sqlparser.NewArgument(bindVar))
+ // check whether the bindVariable already exists in the joinVars of the other tree
+ _, alreadyExists := outerTree.Vars[bindVar]
+ if alreadyExists {
+ return true
}
+ // if it does not exist, then push this as an output column there and add it to the joinVars
+ offset, err := resultInnerOp.AddColumn(ctx, node)
+ if err != nil {
+ rewriteError = err
+ return false
+ }
+ outerTree.Vars[bindVar] = offset
return true
- }, nil)
+ })
// update the dependencies for the subquery by removing the dependencies from the innerOp
tableSet := ctx.SemTable.Direct[subQueryInner.ExtractedSubquery.Subquery]
- tableSet.RemoveInPlace(resultInnerOp.TableID())
- ctx.SemTable.Direct[subQueryInner.ExtractedSubquery.Subquery] = tableSet
+ ctx.SemTable.Direct[subQueryInner.ExtractedSubquery.Subquery] = tableSet.Remove(TableID(resultInnerOp))
tableSet = ctx.SemTable.Recursive[subQueryInner.ExtractedSubquery.Subquery]
- tableSet.RemoveInPlace(resultInnerOp.TableID())
- ctx.SemTable.Recursive[subQueryInner.ExtractedSubquery.Subquery] = tableSet
+ ctx.SemTable.Recursive[subQueryInner.ExtractedSubquery.Subquery] = tableSet.Remove(TableID(resultInnerOp))
// return any error while rewriting
return resultInnerOp, rewriteError
@@ -365,13 +374,13 @@ func rewriteColumnsInSubqueryOpForJoin(
func createCorrelatedSubqueryOp(
ctx *plancontext.PlanningContext,
- innerOp, outerOp abstract.PhysicalOperator,
+ innerOp, outerOp ops.Operator,
preds []sqlparser.Expr,
extractedSubquery *sqlparser.ExtractedSubquery,
) (*CorrelatedSubQueryOp, error) {
newOuter, err := RemovePredicate(ctx, extractedSubquery, outerOp)
if err != nil {
- return nil, vterrors.New(vtrpcpb.Code_UNIMPLEMENTED, "exists sub-queries are only supported with AND clause")
+ return nil, vterrors.VT12001("EXISTS sub-queries are only supported with AND clause")
}
resultOuterOp := newOuter
@@ -380,46 +389,48 @@ func createCorrelatedSubqueryOp(
var lhsCols []*sqlparser.ColName
for _, pred := range preds {
var rewriteError error
- sqlparser.Rewrite(pred, func(cursor *sqlparser.Cursor) bool {
- switch node := cursor.Node().(type) {
- case *sqlparser.ColName:
- if ctx.SemTable.RecursiveDeps(node).IsSolvedBy(resultOuterOp.TableID()) {
- // check whether the bindVariable already exists in the map
- // we do so by checking that the column names are the same and their recursive dependencies are the same
- // so if the column names user.a and a would also be equal if the latter is also referencing the user table
- for colName, bindVar := range bindVars {
- if node.Name.Equal(colName.Name) && ctx.SemTable.RecursiveDeps(node).Equals(ctx.SemTable.RecursiveDeps(colName)) {
- cursor.Replace(sqlparser.NewArgument(bindVar))
- return false
- }
- }
-
- // get the bindVariable for that column name and replace it in the predicate
- bindVar := ctx.ReservedVars.ReserveColName(node)
+ sqlparser.SafeRewrite(pred, nil, func(cursor *sqlparser.Cursor) bool {
+ node, ok := cursor.Node().(*sqlparser.ColName)
+ if !ok {
+ return true
+ }
+
+ nodeDeps := ctx.SemTable.RecursiveDeps(node)
+ if !nodeDeps.IsSolvedBy(TableID(resultOuterOp)) {
+ return true
+ }
+
+ // check whether the bindVariable already exists in the map
+ // we do so by checking that the column names are the same and their recursive dependencies are the same
+ // so the column names `user.a` and `a` would be considered equal as long as both are bound to the same table
+ for colName, bindVar := range bindVars {
+ if ctx.SemTable.EqualsExpr(node, colName) {
cursor.Replace(sqlparser.NewArgument(bindVar))
- // store it in the map for future comparisons
- bindVars[node] = bindVar
-
- // if it does not exist, then push this as an output column in the outerOp and add it to the joinVars
- newOuterOp, columnIndexes, err := PushOutputColumns(ctx, resultOuterOp, node)
- if err != nil {
- rewriteError = err
- return false
- }
- lhsCols = append(lhsCols, node)
- columnIndex := columnIndexes[0]
- vars[bindVar] = columnIndex
- resultOuterOp = newOuterOp
- return false
+ return true
}
}
+
+ // get the bindVariable for that column name and replace it in the predicate
+ bindVar := ctx.ReservedVars.ReserveColName(node)
+ cursor.Replace(sqlparser.NewArgument(bindVar))
+ // store it in the map for future comparisons
+ bindVars[node] = bindVar
+
+ // if it does not exist, then push this as an output column in the outerOp and add it to the joinVars
+ offset, err := resultOuterOp.AddColumn(ctx, node)
+ if err != nil {
+ rewriteError = err
+ return true
+ }
+ lhsCols = append(lhsCols, node)
+ vars[bindVar] = offset
return true
- }, nil)
+ })
if rewriteError != nil {
return nil, rewriteError
}
var err error
- innerOp, err = PushPredicate(ctx, pred, innerOp)
+ innerOp, err = innerOp.AddPredicate(ctx, pred)
if err != nil {
return nil, err
}
diff --git a/go/vt/vtgate/planbuilder/operators/system_tables.go b/go/vt/vtgate/planbuilder/operators/system_tables.go
new file mode 100644
index 00000000000..8486805a853
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/operators/system_tables.go
@@ -0,0 +1,142 @@
+/*
+Copyright 2022 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package operators
+
+import (
+ "strings"
+
+ "vitess.io/vitess/go/mysql/collations"
+ "vitess.io/vitess/go/sqltypes"
+ "vitess.io/vitess/go/vt/sqlparser"
+ "vitess.io/vitess/go/vt/vterrors"
+ "vitess.io/vitess/go/vt/vtgate/evalengine"
+)
+
+func (r *Route) findSysInfoRoutingPredicatesGen4(predicates []sqlparser.Expr, reservedVars *sqlparser.ReservedVars) error {
+ for _, pred := range predicates {
+ isTableSchema, bvName, out, err := extractInfoSchemaRoutingPredicate(pred, reservedVars)
+ if err != nil {
+ return err
+ }
+ if out == nil {
+ // we didn't find a predicate to use for routing, continue to look for next predicate
+ continue
+ }
+
+ if r.SysTableTableName == nil {
+ r.SysTableTableName = map[string]evalengine.Expr{}
+ }
+
+ if isTableSchema {
+ r.SysTableTableSchema = append(r.SysTableTableSchema, out)
+ } else {
+ r.SysTableTableName[bvName] = out
+ }
+ }
+ return nil
+}
+
+func extractInfoSchemaRoutingPredicate(
+ in sqlparser.Expr,
+ reservedVars *sqlparser.ReservedVars,
+) (isSchemaName bool, name string, evalExpr evalengine.Expr, err error) {
+ cmp, ok := in.(*sqlparser.ComparisonExpr)
+ if !ok || cmp.Operator != sqlparser.EqualOp {
+ return
+ }
+
+ isSchemaName, col := isTableOrSchemaRouteable(cmp)
+ if col == nil || !shouldRewrite(cmp.Right) {
+ return
+ }
+
+ evalExpr, err = evalengine.Translate(cmp.Right, ¬ImplementedSchemaInfoConverter{})
+ if err != nil {
+ if strings.Contains(err.Error(), evalengine.ErrTranslateExprNotSupported) {
+ // This just means we can't rewrite this particular expression,
+ // not that we have to exit altogether
+ err = nil
+ return
+ }
+ return
+ }
+ if isSchemaName {
+ name = sqltypes.BvSchemaName
+ } else {
+ name = reservedVars.ReserveColName(col)
+ }
+ cmp.Right = sqlparser.NewArgument(name)
+ return isSchemaName, name, evalExpr, nil
+}
+
+// isTableOrSchemaRouteable searches for a comparison where one side is a table or schema name column.
+// if it finds the correct column name being used,
+// it also makes sure that the LHS of the comparison contains the column, and the RHS the value sought after
+func isTableOrSchemaRouteable(cmp *sqlparser.ComparisonExpr) (
+ isSchema bool, // tells if we are dealing with a table or a schema name comparator
+ col *sqlparser.ColName, // which is the colName we are comparing against
+) {
+ if col, schema, table := isTableSchemaOrName(cmp.Left); schema || table {
+ return schema, col
+ }
+ if col, schema, table := isTableSchemaOrName(cmp.Right); schema || table {
+ // to make the rest of the code easier, we shuffle these around so the ColName is always on the LHS
+ cmp.Right, cmp.Left = cmp.Left, cmp.Right
+ return schema, col
+ }
+
+ return false, nil
+}
+
+func shouldRewrite(e sqlparser.Expr) bool {
+ switch node := e.(type) {
+ case *sqlparser.FuncExpr:
+ // we should not rewrite database() calls against information_schema
+ return !(node.Name.EqualString("database") || node.Name.EqualString("schema"))
+ }
+ return true
+}
+
+func isTableSchemaOrName(e sqlparser.Expr) (col *sqlparser.ColName, isTableSchema bool, isTableName bool) {
+ col, ok := e.(*sqlparser.ColName)
+ if !ok {
+ return nil, false, false
+ }
+ return col, isDbNameCol(col), isTableNameCol(col)
+}
+
+func isDbNameCol(col *sqlparser.ColName) bool {
+ return col.Name.EqualString("table_schema") || col.Name.EqualString("constraint_schema") || col.Name.EqualString("schema_name") || col.Name.EqualString("routine_schema")
+}
+
+func isTableNameCol(col *sqlparser.ColName) bool {
+ return col.Name.EqualString("table_name")
+}
+
+type notImplementedSchemaInfoConverter struct{}
+
+func (f *notImplementedSchemaInfoConverter) ColumnLookup(*sqlparser.ColName) (int, error) {
+ return 0, vterrors.VT12001("comparing table schema name with a column name")
+}
+
+func (f *notImplementedSchemaInfoConverter) CollationForExpr(sqlparser.Expr) collations.ID {
+ return collations.Unknown
+}
+
+func (f *notImplementedSchemaInfoConverter) DefaultCollation() collations.ID {
+ return collations.Default()
+}
diff --git a/go/vt/vtgate/planbuilder/operators/table.go b/go/vt/vtgate/planbuilder/operators/table.go
new file mode 100644
index 00000000000..593dfe3ec7a
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/operators/table.go
@@ -0,0 +1,102 @@
+/*
+Copyright 2021 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package operators
+
+import (
+ "vitess.io/vitess/go/vt/sqlparser"
+ "vitess.io/vitess/go/vt/vterrors"
+ "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops"
+ "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext"
+ "vitess.io/vitess/go/vt/vtgate/semantics"
+ "vitess.io/vitess/go/vt/vtgate/vindexes"
+)
+
+type (
+ Table struct {
+ QTable *QueryTable
+ VTable *vindexes.Table
+ Columns []*sqlparser.ColName
+
+ noInputs
+ }
+ ColNameColumns interface {
+ GetColumns() []*sqlparser.ColName
+ AddCol(*sqlparser.ColName)
+ }
+)
+
+var _ ops.PhysicalOperator = (*Table)(nil)
+
+// IPhysical implements the PhysicalOperator interface
+func (to *Table) IPhysical() {}
+
+// Clone implements the Operator interface
+func (to *Table) Clone([]ops.Operator) ops.Operator {
+ var columns []*sqlparser.ColName
+ for _, name := range to.Columns {
+ columns = append(columns, sqlparser.CloneRefOfColName(name))
+ }
+ return &Table{
+ QTable: to.QTable,
+ VTable: to.VTable,
+ Columns: columns,
+ }
+}
+
+// Introduces implements the PhysicalOperator interface
+func (to *Table) Introduces() semantics.TableSet {
+ return to.QTable.ID
+}
+
+// AddPredicate implements the PhysicalOperator interface
+func (to *Table) AddPredicate(_ *plancontext.PlanningContext, expr sqlparser.Expr) (ops.Operator, error) {
+ return newFilter(to, expr), nil
+}
+
+func (to *Table) AddColumn(_ *plancontext.PlanningContext, e sqlparser.Expr) (int, error) {
+ return addColumn(to, e)
+}
+
+func (to *Table) GetColumns() []*sqlparser.ColName {
+ return to.Columns
+}
+func (to *Table) AddCol(col *sqlparser.ColName) {
+ to.Columns = append(to.Columns, col)
+}
+
+func (to *Table) TablesUsed() []string {
+ if sqlparser.SystemSchema(to.QTable.Table.Qualifier.String()) {
+ return nil
+ }
+ return SingleQualifiedIdentifier(to.VTable.Keyspace, to.VTable.Name)
+}
+
+func addColumn(op ColNameColumns, e sqlparser.Expr) (int, error) {
+ col, ok := e.(*sqlparser.ColName)
+ if !ok {
+ return 0, vterrors.VT13001("cannot push this expression to a table/vindex")
+ }
+ cols := op.GetColumns()
+ for idx, column := range cols {
+ if col.Name.Equal(column.Name) {
+ return idx, nil
+ }
+ }
+ offset := len(cols)
+ op.AddCol(col)
+ return offset, nil
+}
diff --git a/go/vt/vtgate/planbuilder/operators/union.go b/go/vt/vtgate/planbuilder/operators/union.go
new file mode 100644
index 00000000000..ffaa03d858b
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/operators/union.go
@@ -0,0 +1,189 @@
+/*
+Copyright 2022 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package operators
+
+import (
+ "vitess.io/vitess/go/vt/sqlparser"
+ "vitess.io/vitess/go/vt/vterrors"
+ "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops"
+ "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/rewrite"
+ "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext"
+)
+
+type Union struct {
+ Sources []ops.Operator
+ Distinct bool
+
+ // TODO this should be removed. For now it's used to fail queries
+ Ordering sqlparser.OrderBy
+
+ noColumns
+}
+
+var _ ops.PhysicalOperator = (*Union)(nil)
+
+// IPhysical implements the PhysicalOperator interface
+func (u *Union) IPhysical() {}
+
+// Clone implements the Operator interface
+func (u *Union) Clone(inputs []ops.Operator) ops.Operator {
+ newOp := *u
+ newOp.Sources = inputs
+ return &newOp
+}
+
+// Inputs implements the Operator interface
+func (u *Union) Inputs() []ops.Operator {
+ return u.Sources
+}
+
+// AddPredicate adds a predicate a UNION by pushing the predicate to all sources of the UNION.
+/* this is done by offset and expression rewriting. Say we have a query like so:
+select * (
+ select foo as col, bar from tbl1
+ union
+ select id, baz from tbl2
+) as X where X.col = 42
+
+We want to push down the `X.col = 42` as far down the operator tree as possible. We want
+to end up with an operator tree that looks something like this:
+
+select * (
+ select foo as col, bar from tbl1 where foo = 42
+ union
+ select id, baz from tbl2 where id = 42
+) as X
+
+Notice how `X.col = 42` has been translated to `foo = 42` and `id = 42` on respective WHERE clause.
+The first SELECT of the union dictates the column names, and the second is whatever expression
+can be found on the same offset. The names of the RHS are discarded.
+*/
+func (u *Union) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) (ops.Operator, error) {
+ offsets := make(map[string]int)
+ sel, err := u.GetSelectFor(0)
+ if err != nil {
+ return nil, err
+ }
+ for i, selectExpr := range sel.SelectExprs {
+ ae, ok := selectExpr.(*sqlparser.AliasedExpr)
+ if !ok {
+ return nil, vterrors.VT12001("pushing predicates on UNION where the first SELECT contains * or NEXT")
+ }
+ if !ae.As.IsEmpty() {
+ offsets[ae.As.String()] = i
+ continue
+ }
+ col, ok := ae.Expr.(*sqlparser.ColName)
+ if ok {
+ offsets[col.Name.Lowered()] = i
+ }
+ }
+
+ for i := range u.Sources {
+ var err error
+ predicate := sqlparser.CopyOnRewrite(expr, nil, func(cursor *sqlparser.CopyOnWriteCursor) {
+ col, ok := cursor.Node().(*sqlparser.ColName)
+ if !ok {
+ return
+ }
+
+ idx, ok := offsets[col.Name.Lowered()]
+ if !ok {
+ err = vterrors.VT13001("cannot push predicates on concatenate, missing columns from the UNION")
+ cursor.StopTreeWalk()
+ return
+ }
+
+ var sel *sqlparser.Select
+ sel, err = u.GetSelectFor(i)
+ if err != nil {
+ cursor.StopTreeWalk()
+ return
+ }
+
+ ae, ok := sel.SelectExprs[idx].(*sqlparser.AliasedExpr)
+ if !ok {
+ err = vterrors.VT12001("pushing non-aliased expression predicates on concatenate")
+ cursor.StopTreeWalk()
+ return
+ }
+ cursor.Replace(ae.Expr)
+ }, nil).(sqlparser.Expr)
+ if err != nil {
+ return nil, err
+ }
+ u.Sources[i], err = u.Sources[i].AddPredicate(ctx, predicate)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return u, nil
+}
+
+func (u *Union) GetSelectFor(source int) (*sqlparser.Select, error) {
+ src := u.Sources[source]
+ for {
+ switch op := src.(type) {
+ case *Horizon:
+ return sqlparser.GetFirstSelect(op.Select), nil
+ case *Route:
+ src = op.Source
+ default:
+ return nil, vterrors.VT13001("expected all sources of the UNION to be horizons")
+ }
+ }
+}
+
+func (u *Union) Compact(*plancontext.PlanningContext) (ops.Operator, rewrite.TreeIdentity, error) {
+ var newSources []ops.Operator
+ anythingChanged := false
+ for _, source := range u.Sources {
+ var other *Union
+ horizon, ok := source.(*Horizon)
+ if ok {
+ union, ok := horizon.Source.(*Union)
+ if ok {
+ other = union
+ }
+ }
+ if other == nil {
+ newSources = append(newSources, source)
+ continue
+ }
+ anythingChanged = true
+ switch {
+ case len(other.Ordering) == 0 && !other.Distinct:
+ fallthrough
+ case u.Distinct:
+ // if the current UNION is a DISTINCT, we can safely ignore everything from children UNIONs, except LIMIT
+ newSources = append(newSources, other.Sources...)
+
+ default:
+ newSources = append(newSources, other)
+ }
+ }
+ if anythingChanged {
+ u.Sources = newSources
+ }
+ identity := rewrite.SameTree
+ if anythingChanged {
+ identity = rewrite.NewTree
+ }
+
+ return u, identity, nil
+}
diff --git a/go/vt/vtgate/planbuilder/physical/update.go b/go/vt/vtgate/planbuilder/operators/update.go
similarity index 57%
rename from go/vt/vtgate/planbuilder/physical/update.go
rename to go/vt/vtgate/planbuilder/operators/update.go
index 37e4010c11c..11c46a326a4 100644
--- a/go/vt/vtgate/planbuilder/physical/update.go
+++ b/go/vt/vtgate/planbuilder/operators/update.go
@@ -14,53 +14,41 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package physical
+package operators
import (
"vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/vtgate/engine"
- "vitess.io/vitess/go/vt/vtgate/planbuilder/abstract"
+ "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops"
"vitess.io/vitess/go/vt/vtgate/semantics"
"vitess.io/vitess/go/vt/vtgate/vindexes"
)
type Update struct {
- QTable *abstract.QueryTable
+ QTable *QueryTable
VTable *vindexes.Table
Assignments map[string]sqlparser.Expr
ChangedVindexValues map[string]*engine.VindexValues
OwnedVindexQuery string
AST *sqlparser.Update
-}
-
-var _ abstract.PhysicalOperator = (*Update)(nil)
-var _ abstract.IntroducesTable = (*Update)(nil)
-// TableID implements the PhysicalOperator interface
-func (u *Update) TableID() semantics.TableSet {
- return u.QTable.ID
+ noInputs
+ noColumns
+ noPredicates
}
-// UnsolvedPredicates implements the PhysicalOperator interface
-func (u *Update) UnsolvedPredicates(semTable *semantics.SemTable) []sqlparser.Expr {
- return nil
-}
+var _ ops.PhysicalOperator = (*Update)(nil)
-// CheckValid implements the PhysicalOperator interface
-func (u *Update) CheckValid() error {
- return nil
+// Introduces implements the PhysicalOperator interface
+func (u *Update) Introduces() semantics.TableSet {
+ return u.QTable.ID
}
// IPhysical implements the PhysicalOperator interface
func (u *Update) IPhysical() {}
-// Cost implements the PhysicalOperator interface
-func (u *Update) Cost() int {
- return 1
-}
-
-// Clone implements the PhysicalOperator interface
-func (u *Update) Clone() abstract.PhysicalOperator {
+// Clone implements the Operator interface
+func (u *Update) Clone(inputs []ops.Operator) ops.Operator {
return &Update{
QTable: u.QTable,
VTable: u.VTable,
@@ -71,12 +59,9 @@ func (u *Update) Clone() abstract.PhysicalOperator {
}
}
-// GetQTable implements the IntroducesTable interface
-func (u *Update) GetQTable() *abstract.QueryTable {
- return u.QTable
-}
-
-// GetVTable implements the IntroducesTable interface
-func (u *Update) GetVTable() *vindexes.Table {
- return u.VTable
+func (u *Update) TablesUsed() []string {
+ if u.VTable != nil {
+ return SingleQualifiedIdentifier(u.VTable.Keyspace, u.VTable.Name)
+ }
+ return nil
}
diff --git a/go/vt/vtgate/planbuilder/operators/vindex.go b/go/vt/vtgate/planbuilder/operators/vindex.go
new file mode 100644
index 00000000000..0c0d6976fb5
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/operators/vindex.go
@@ -0,0 +1,136 @@
+/*
+Copyright 2022 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package operators
+
+import (
+ "vitess.io/vitess/go/vt/sqlparser"
+ "vitess.io/vitess/go/vt/vterrors"
+ "vitess.io/vitess/go/vt/vtgate/engine"
+ "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops"
+ "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext"
+ "vitess.io/vitess/go/vt/vtgate/semantics"
+ "vitess.io/vitess/go/vt/vtgate/vindexes"
+)
+
+type (
+ Vindex struct {
+ OpCode engine.VindexOpcode
+ Table VindexTable
+ Vindex vindexes.Vindex
+ Solved semantics.TableSet
+ Columns []*sqlparser.ColName
+ Value sqlparser.Expr
+
+ noInputs
+ }
+
+ // VindexTable contains information about the vindex table we want to query
+ VindexTable struct {
+ TableID semantics.TableSet
+ Alias *sqlparser.AliasedTableExpr
+ Table sqlparser.TableName
+ Predicates []sqlparser.Expr
+ VTable *vindexes.Table
+ }
+)
+
+const VindexUnsupported = "WHERE clause for vindex function must be of the form id = or id in(,...)"
+
+// Introduces implements the Operator interface
+func (v *Vindex) Introduces() semantics.TableSet {
+ return v.Solved
+}
+
+// IPhysical implements the PhysicalOperator interface
+func (v *Vindex) IPhysical() {}
+
+// Clone implements the Operator interface
+func (v *Vindex) Clone([]ops.Operator) ops.Operator {
+ clone := *v
+ return &clone
+}
+
+var _ ops.PhysicalOperator = (*Vindex)(nil)
+
+func (v *Vindex) AddColumn(_ *plancontext.PlanningContext, expr sqlparser.Expr) (int, error) {
+ return addColumn(v, expr)
+}
+
+func (v *Vindex) GetColumns() []*sqlparser.ColName {
+ return v.Columns
+}
+func (v *Vindex) AddCol(col *sqlparser.ColName) {
+ v.Columns = append(v.Columns, col)
+}
+
+// checkValid implements the Operator interface
+func (v *Vindex) CheckValid() error {
+ if len(v.Table.Predicates) == 0 {
+ return vterrors.VT12001(VindexUnsupported + " (where clause missing)")
+ }
+
+ return nil
+}
+
+func (v *Vindex) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) (ops.Operator, error) {
+ for _, e := range sqlparser.SplitAndExpression(nil, expr) {
+ deps := ctx.SemTable.RecursiveDeps(e)
+ if deps.NumberOfTables() > 1 {
+ return nil, vterrors.VT12001(VindexUnsupported + " (multiple tables involved)")
+ }
+ // check if we already have a predicate
+ if v.OpCode != engine.VindexNone {
+ return nil, vterrors.VT12001(VindexUnsupported + " (multiple filters)")
+ }
+
+ // check LHS
+ comparison, ok := e.(*sqlparser.ComparisonExpr)
+ if !ok {
+ return nil, vterrors.VT12001(VindexUnsupported + " (not a comparison)")
+ }
+ if comparison.Operator != sqlparser.EqualOp && comparison.Operator != sqlparser.InOp {
+ return nil, vterrors.VT12001(VindexUnsupported + " (not equality)")
+ }
+ colname, ok := comparison.Left.(*sqlparser.ColName)
+ if !ok {
+ return nil, vterrors.VT12001(VindexUnsupported + " (lhs is not a column)")
+ }
+ if !colname.Name.EqualString("id") {
+ return nil, vterrors.VT12001(VindexUnsupported + " (lhs is not id)")
+ }
+
+ // check RHS
+ var err error
+ if sqlparser.IsValue(comparison.Right) || sqlparser.IsSimpleTuple(comparison.Right) {
+ v.Value = comparison.Right
+ } else {
+ return nil, vterrors.VT12001(VindexUnsupported + " (rhs is not a value)")
+ }
+ if err != nil {
+ return nil, vterrors.VT12001(VindexUnsupported+": %v", err)
+ }
+ v.OpCode = engine.VindexMap
+ v.Table.Predicates = append(v.Table.Predicates, e)
+ }
+ return v, nil
+}
+
+// TablesUsed implements the Operator interface.
+// It is not keyspace-qualified.
+func (v *Vindex) TablesUsed() []string {
+ return []string{v.Table.Table.Name.String()}
+}
diff --git a/go/vt/vtgate/planbuilder/ordered_aggregate.go b/go/vt/vtgate/planbuilder/ordered_aggregate.go
index 763aa31362f..9458e85de66 100644
--- a/go/vt/vtgate/planbuilder/ordered_aggregate.go
+++ b/go/vt/vtgate/planbuilder/ordered_aggregate.go
@@ -27,7 +27,6 @@ import (
"vitess.io/vitess/go/sqltypes"
- vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
"vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/vt/sqlparser"
@@ -106,7 +105,7 @@ func (pb *primitiveBuilder) checkAggregates(sel *sqlparser.Select) error {
// order by clauses.
if !isRoute {
if hasAggregates {
- return vterrors.New(vtrpcpb.Code_UNIMPLEMENTED, "unsupported: cross-shard query with aggregates")
+ return vterrors.VT12001("cross-shard query with aggregates")
}
pb.plan = newDistinctV3(pb.plan)
return nil
@@ -270,7 +269,7 @@ func (oa *orderedAggregate) pushAggr(pb *primitiveBuilder, expr *sqlparser.Alias
opcode := origOpcode
if aggrFunc.GetArgs() != nil &&
len(aggrFunc.GetArgs()) != 1 {
- return nil, 0, fmt.Errorf("unsupported: only one expression allowed inside aggregates: %s", sqlparser.String(expr))
+ return nil, 0, vterrors.VT12001(fmt.Sprintf("only one expression is allowed inside aggregates: %s", sqlparser.String(expr)))
}
handleDistinct, innerAliased, err := oa.needDistinctHandling(pb, expr, opcode)
@@ -279,7 +278,7 @@ func (oa *orderedAggregate) pushAggr(pb *primitiveBuilder, expr *sqlparser.Alias
}
if handleDistinct {
if oa.extraDistinct != nil {
- return nil, 0, fmt.Errorf("unsupported: only one distinct aggregation allowed in a select: %s", sqlparser.String(expr))
+ return nil, 0, vterrors.VT12001(fmt.Sprintf("only one DISTINCT aggregation allowed in a SELECT: %s", sqlparser.String(expr)))
}
// Push the expression that's inside the aggregate.
// The column will eventually get added to the group by and order by clauses.
@@ -334,7 +333,7 @@ func (oa *orderedAggregate) needDistinctHandling(pb *primitiveBuilder, expr *sql
aggr, ok := expr.Expr.(sqlparser.AggrFunc)
if !ok {
- return false, nil, fmt.Errorf("syntax error: %s", sqlparser.String(expr))
+ return false, nil, vterrors.VT03012(sqlparser.String(expr))
}
if !aggr.IsDistinct() {
diff --git a/go/vt/vtgate/planbuilder/ordering.go b/go/vt/vtgate/planbuilder/ordering.go
index f41e3ded479..5abf2823e9e 100644
--- a/go/vt/vtgate/planbuilder/ordering.go
+++ b/go/vt/vtgate/planbuilder/ordering.go
@@ -17,7 +17,6 @@ import (
"fmt"
"vitess.io/vitess/go/mysql/collations"
- vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
"vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/vt/vtgate/engine"
@@ -56,14 +55,14 @@ func planOrdering(pb *primitiveBuilder, input logicalPlan, orderBy v3OrderBy) (l
case *orderedAggregate:
return planOAOrdering(pb, orderBy, node)
case *mergeSort:
- return nil, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "can't do ORDER BY on top of ORDER BY")
+ return nil, vterrors.VT12001("ORDER BY on top of ORDER BY")
case *concatenate:
if len(orderBy) == 0 {
return input, nil
}
- return nil, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "can't do ORDER BY on top of UNION")
+ return nil, vterrors.VT12001("ORDER BY on top of UNION")
}
- return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] unreachable %T.ordering", input)
+ return nil, vterrors.VT13001(fmt.Sprintf("unreachable %T.ordering", input))
}
func planOAOrdering(pb *primitiveBuilder, orderBy v3OrderBy, oa *orderedAggregate) (logicalPlan, error) {
@@ -104,17 +103,17 @@ func planOAOrdering(pb *primitiveBuilder, orderBy v3OrderBy, oa *orderedAggregat
case *sqlparser.CastExpr:
col, ok := expr.Expr.(*sqlparser.ColName)
if !ok {
- return nil, fmt.Errorf("unsupported: in scatter query: complex order by expression: %s", sqlparser.String(expr))
+ return nil, vterrors.VT12001(fmt.Sprintf("in scatter query: complex ORDER BY expression: %s", sqlparser.String(expr)))
}
orderByCol = col.Metadata.(*column)
case *sqlparser.ConvertExpr:
col, ok := expr.Expr.(*sqlparser.ColName)
if !ok {
- return nil, fmt.Errorf("unsupported: in scatter query: complex order by expression: %s", sqlparser.String(expr))
+ return nil, vterrors.VT12001(fmt.Sprintf("in scatter query: complex ORDER BY expression: %s", sqlparser.String(expr)))
}
orderByCol = col.Metadata.(*column)
default:
- return nil, fmt.Errorf("unsupported: in scatter query: complex order by expression: %v", sqlparser.String(expr))
+ return nil, vterrors.VT12001(fmt.Sprintf("in scatter query: complex ORDER BY expression: %v", sqlparser.String(expr)))
}
// Match orderByCol against the group by columns.
@@ -143,7 +142,7 @@ func planOAOrdering(pb *primitiveBuilder, orderBy v3OrderBy, oa *orderedAggregat
// Build a brand new reference for the key.
col, err := BuildColName(oa.input.ResultColumns(), groupByKey.KeyCol)
if err != nil {
- return nil, vterrors.Wrapf(err, "generating order by clause")
+ return nil, vterrors.Wrapf(err, "generating ORDER BY clause")
}
selOrderBy = append(selOrderBy, &v3Order{
Order: &sqlparser.Order{Expr: col, Direction: sqlparser.AscOrder},
@@ -220,11 +219,11 @@ func planJoinOrdering(pb *primitiveBuilder, orderBy v3OrderBy, node *join) (logi
switch e := in.(type) {
case *sqlparser.ColName:
if e.Metadata.(*column).Origin().Order() > node.Left.Order() {
- return false, vterrors.New(vtrpcpb.Code_UNIMPLEMENTED, "unsupported: order by spans across shards")
+ return false, vterrors.VT12001("ORDER BY spans across shards")
}
case *sqlparser.Subquery:
// Unreachable because ResolveSymbols perfoms this check up above.
- return false, vterrors.New(vtrpcpb.Code_UNIMPLEMENTED, "unsupported: order by has subquery")
+ return false, vterrors.VT12001("ORDER BY has subquery")
}
return true, nil
}, order.Expr)
@@ -295,7 +294,7 @@ func planRouteOrdering(orderBy v3OrderBy, node *route) (logicalPlan, error) {
case *sqlparser.UnaryExpr:
col, ok := expr.Expr.(*sqlparser.ColName)
if !ok {
- return nil, fmt.Errorf("unsupported: in scatter query: complex order by expression: %s", sqlparser.String(expr))
+ return nil, vterrors.VT12001(fmt.Sprintf("in scatter query: complex ORDER BY expression: %s", sqlparser.String(expr)))
}
c := col.Metadata.(*column)
for i, rc := range node.resultColumns {
@@ -305,12 +304,12 @@ func planRouteOrdering(orderBy v3OrderBy, node *route) (logicalPlan, error) {
}
}
default:
- return nil, fmt.Errorf("unsupported: in scatter query: complex order by expression: %s", sqlparser.String(expr))
+ return nil, vterrors.VT12001(fmt.Sprintf("in scatter query: complex ORDER BY expression: %s", sqlparser.String(expr)))
}
// If column is not found, then the order by is referencing
// a column that's not on the select list.
if colNumber == -1 {
- return nil, fmt.Errorf("unsupported: in scatter query: order by must reference a column in the select list: %s", sqlparser.String(order))
+ return nil, vterrors.VT12001(fmt.Sprintf("in scatter query: ORDER BY must reference a column in the SELECT list: %s", sqlparser.String(order)))
}
starColFixedIndex := colNumber
if selectStatement, ok := node.Select.(*sqlparser.Select); ok {
@@ -328,7 +327,7 @@ func planRouteOrdering(orderBy v3OrderBy, node *route) (logicalPlan, error) {
tableMeta = tableMap[tableName]
}
if tableMeta == nil || !tableMeta.isAuthoritative {
- return nil, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "unsupported: in scatter query, can't order by a column that comes after `*` expressions in the SELECT list")
+ return nil, vterrors.VT12001("in scatter query, cannot ORDER BY a column that comes after `*` expressions in the SELECT list")
}
starColFixedIndex += len(tableMeta.columnNames) - 1
}
diff --git a/go/vt/vtgate/planbuilder/physical/apply_join.go b/go/vt/vtgate/planbuilder/physical/apply_join.go
deleted file mode 100644
index d8a66de82f5..00000000000
--- a/go/vt/vtgate/planbuilder/physical/apply_join.go
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
-Copyright 2021 The Vitess Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package physical
-
-import (
- "vitess.io/vitess/go/vt/sqlparser"
- "vitess.io/vitess/go/vt/vtgate/planbuilder/abstract"
- "vitess.io/vitess/go/vt/vtgate/semantics"
-)
-
-// ApplyJoin is a nested loop join - for each row on the LHS,
-// we'll execute the plan on the RHS, feeding data from left to right
-type ApplyJoin struct {
- LHS, RHS abstract.PhysicalOperator
-
- // Columns stores the column indexes of the columns coming from the left and right side
- // negative value comes from LHS and positive from RHS
- Columns []int
-
- // Vars are the arguments that need to be copied from the LHS to the RHS
- Vars map[string]int
-
- // LeftJoin will be true in the case of an outer join
- LeftJoin bool
-
- // JoinCols are the columns from the LHS used for the join.
- // These are the same columns pushed on the LHS that are now used in the Vars field
- LHSColumns []*sqlparser.ColName
-
- Predicate sqlparser.Expr
-}
-
-var _ abstract.PhysicalOperator = (*ApplyJoin)(nil)
-
-// IPhysical implements the PhysicalOperator interface
-func (a *ApplyJoin) IPhysical() {}
-
-// TableID implements the PhysicalOperator interface
-func (a *ApplyJoin) TableID() semantics.TableSet {
- return a.LHS.TableID().Merge(a.RHS.TableID())
-}
-
-// UnsolvedPredicates implements the PhysicalOperator interface
-func (a *ApplyJoin) UnsolvedPredicates(semTable *semantics.SemTable) []sqlparser.Expr {
- panic("implement me")
-}
-
-// CheckValid implements the PhysicalOperator interface
-func (a *ApplyJoin) CheckValid() error {
- err := a.LHS.CheckValid()
- if err != nil {
- return err
- }
- return a.RHS.CheckValid()
-}
-
-// Compact implements the PhysicalOperator interface
-func (a *ApplyJoin) Compact(semTable *semantics.SemTable) (abstract.Operator, error) {
- return a, nil
-}
-
-// Cost implements the PhysicalOperator interface
-func (a *ApplyJoin) Cost() int {
- return a.LHS.Cost() + a.RHS.Cost()
-}
-
-// Clone implements the PhysicalOperator interface
-func (a *ApplyJoin) Clone() abstract.PhysicalOperator {
- varsClone := map[string]int{}
- for key, value := range a.Vars {
- varsClone[key] = value
- }
- columnsClone := make([]int, len(a.Columns))
- copy(columnsClone, a.Columns)
- lhsColumns := make([]*sqlparser.ColName, len(a.LHSColumns))
- copy(lhsColumns, a.LHSColumns)
- return &ApplyJoin{
- LHS: a.LHS.Clone(),
- RHS: a.RHS.Clone(),
- Columns: columnsClone,
- Vars: varsClone,
- LeftJoin: a.LeftJoin,
- Predicate: sqlparser.CloneExpr(a.Predicate),
- LHSColumns: lhsColumns,
- }
-}
diff --git a/go/vt/vtgate/planbuilder/physical/correlated_subquery.go b/go/vt/vtgate/planbuilder/physical/correlated_subquery.go
deleted file mode 100644
index 03d27efc5a6..00000000000
--- a/go/vt/vtgate/planbuilder/physical/correlated_subquery.go
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
-Copyright 2022 The Vitess Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package physical
-
-import (
- "vitess.io/vitess/go/vt/sqlparser"
- "vitess.io/vitess/go/vt/vtgate/planbuilder/abstract"
- "vitess.io/vitess/go/vt/vtgate/semantics"
-)
-
-type (
- CorrelatedSubQueryOp struct {
- Outer, Inner abstract.PhysicalOperator
- Extracted *sqlparser.ExtractedSubquery
-
- // JoinCols are the columns from the LHS used for the join.
- // These are the same columns pushed on the LHS that are now used in the Vars field
- LHSColumns []*sqlparser.ColName
-
- // arguments that need to be copied from the outer to inner
- Vars map[string]int
- }
-
- SubQueryOp struct {
- Outer, Inner abstract.PhysicalOperator
- Extracted *sqlparser.ExtractedSubquery
- }
-
- SubQueryInner struct {
- Inner abstract.LogicalOperator
-
- // ExtractedSubquery contains all information we need about this subquery
- ExtractedSubquery *sqlparser.ExtractedSubquery
- }
-)
-
-var _ abstract.PhysicalOperator = (*SubQueryOp)(nil)
-var _ abstract.PhysicalOperator = (*CorrelatedSubQueryOp)(nil)
-
-// TableID implements the PhysicalOperator interface
-func (s *SubQueryOp) TableID() semantics.TableSet {
- return s.Inner.TableID().Merge(s.Outer.TableID())
-}
-
-// UnsolvedPredicates implements the PhysicalOperator interface
-func (s *SubQueryOp) UnsolvedPredicates(semTable *semantics.SemTable) []sqlparser.Expr {
- return append(s.Outer.UnsolvedPredicates(semTable), s.Inner.UnsolvedPredicates(semTable)...)
-}
-
-// CheckValid implements the PhysicalOperator interface
-func (s *SubQueryOp) CheckValid() error {
- err := s.Inner.CheckValid()
- if err != nil {
- return err
- }
- return s.Outer.CheckValid()
-}
-
-// IPhysical implements the PhysicalOperator interface
-func (s *SubQueryOp) IPhysical() {}
-
-// Cost implements the PhysicalOperator interface
-func (s *SubQueryOp) Cost() int {
- return s.Inner.Cost() + s.Outer.Cost()
-}
-
-// Clone implements the PhysicalOperator interface
-func (s *SubQueryOp) Clone() abstract.PhysicalOperator {
- result := &SubQueryOp{
- Outer: s.Outer.Clone(),
- Inner: s.Inner.Clone(),
- Extracted: s.Extracted,
- }
- return result
-}
-
-func (c *CorrelatedSubQueryOp) TableID() semantics.TableSet {
- return c.Inner.TableID().Merge(c.Outer.TableID())
-}
-
-func (c *CorrelatedSubQueryOp) UnsolvedPredicates(semTable *semantics.SemTable) []sqlparser.Expr {
- return append(c.Outer.UnsolvedPredicates(semTable), c.Inner.UnsolvedPredicates(semTable)...)
-}
-
-func (c *CorrelatedSubQueryOp) CheckValid() error {
- err := c.Inner.CheckValid()
- if err != nil {
- return err
- }
- return c.Outer.CheckValid()
-}
-
-func (c *CorrelatedSubQueryOp) IPhysical() {}
-
-func (c *CorrelatedSubQueryOp) Cost() int {
- return c.Inner.Cost() + c.Outer.Cost()
-}
-
-func (c *CorrelatedSubQueryOp) Clone() abstract.PhysicalOperator {
- columns := make([]*sqlparser.ColName, len(c.LHSColumns))
- copy(columns, c.LHSColumns)
- result := &CorrelatedSubQueryOp{
- Outer: c.Outer.Clone(),
- Inner: c.Inner.Clone(),
- Extracted: c.Extracted,
- LHSColumns: columns,
- }
- return result
-}
diff --git a/go/vt/vtgate/planbuilder/physical/derived.go b/go/vt/vtgate/planbuilder/physical/derived.go
deleted file mode 100644
index d897e699bd8..00000000000
--- a/go/vt/vtgate/planbuilder/physical/derived.go
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
-Copyright 2022 The Vitess Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package physical
-
-import (
- vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
- "vitess.io/vitess/go/vt/sqlparser"
- "vitess.io/vitess/go/vt/vterrors"
- "vitess.io/vitess/go/vt/vtgate/planbuilder/abstract"
- "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext"
- "vitess.io/vitess/go/vt/vtgate/semantics"
-)
-
-type Derived struct {
- Source abstract.PhysicalOperator
-
- Query sqlparser.SelectStatement
- Alias string
- ColumnAliases sqlparser.Columns
-
- // Columns needed to feed other plans
- Columns []*sqlparser.ColName
- ColumnsOffset []int
-}
-
-var _ abstract.PhysicalOperator = (*Derived)(nil)
-
-// TableID implements the PhysicalOperator interface
-func (d *Derived) TableID() semantics.TableSet {
- return d.Source.TableID()
-}
-
-// UnsolvedPredicates implements the PhysicalOperator interface
-func (d *Derived) UnsolvedPredicates(semTable *semantics.SemTable) []sqlparser.Expr {
- return d.Source.UnsolvedPredicates(semTable)
-}
-
-// CheckValid implements the PhysicalOperator interface
-func (d *Derived) CheckValid() error {
- return d.Source.CheckValid()
-}
-
-// IPhysical implements the PhysicalOperator interface
-func (d *Derived) IPhysical() {}
-
-// Cost implements the PhysicalOperator interface
-func (d *Derived) Cost() int {
- return d.Source.Cost()
-}
-
-// Clone implements the PhysicalOperator interface
-func (d *Derived) Clone() abstract.PhysicalOperator {
- clone := *d
- clone.Source = d.Source.Clone()
- clone.ColumnAliases = sqlparser.CloneColumns(d.ColumnAliases)
- clone.Columns = make([]*sqlparser.ColName, 0, len(d.Columns))
- for _, x := range d.Columns {
- clone.Columns = append(clone.Columns, sqlparser.CloneRefOfColName(x))
- }
- clone.ColumnsOffset = make([]int, 0, len(d.ColumnsOffset))
- copy(clone.ColumnsOffset, d.ColumnsOffset)
- return &clone
-}
-
-// findOutputColumn returns the index on which the given name is found in the slice of
-// *sqlparser.SelectExprs of the derivedTree. The *sqlparser.SelectExpr must be of type
-// *sqlparser.AliasedExpr and match the given name.
-// If name is not present but the query's select expressions contain a *sqlparser.StarExpr
-// the function will return no error and an index equal to -1.
-// If name is not present and the query does not have a *sqlparser.StarExpr, the function
-// will return an unknown column error.
-func (d *Derived) findOutputColumn(name *sqlparser.ColName) (int, error) {
- hasStar := false
- for j, exp := range sqlparser.GetFirstSelect(d.Query).SelectExprs {
- switch exp := exp.(type) {
- case *sqlparser.AliasedExpr:
- if !exp.As.IsEmpty() && exp.As.Equal(name.Name) {
- return j, nil
- }
- if exp.As.IsEmpty() {
- col, ok := exp.Expr.(*sqlparser.ColName)
- if !ok {
- return 0, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "complex expression needs column alias: %s", sqlparser.String(exp))
- }
- if name.Name.Equal(col.Name) {
- return j, nil
- }
- }
- case *sqlparser.StarExpr:
- hasStar = true
- }
- }
-
- // we have found a star but no matching *sqlparser.AliasedExpr, thus we return -1 with no error.
- if hasStar {
- return -1, nil
- }
- return 0, vterrors.NewErrorf(vtrpcpb.Code_NOT_FOUND, vterrors.BadFieldError, "Unknown column '%s' in 'field list'", name.Name.String())
-}
-
-// IsMergeable is not a great name for this function. Suggestions for a better one are welcome!
-// This function will return false if the derived table inside it has to run on the vtgate side, and so can't be merged with subqueries
-// This logic can also be used to check if this is a derived table that can be had on the left hand side of a vtgate join.
-// Since vtgate joins are always nested loop joins, we can't execute them on the RHS
-// if they do some things, like LIMIT or GROUP BY on wrong columns
-func (d *Derived) IsMergeable(ctx *plancontext.PlanningContext) bool {
- return isMergeable(ctx, d.Query, d)
-}
diff --git a/go/vt/vtgate/planbuilder/physical/filter.go b/go/vt/vtgate/planbuilder/physical/filter.go
deleted file mode 100644
index 4647740faac..00000000000
--- a/go/vt/vtgate/planbuilder/physical/filter.go
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
-Copyright 2021 The Vitess Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package physical
-
-import (
- "vitess.io/vitess/go/vt/sqlparser"
- "vitess.io/vitess/go/vt/vtgate/planbuilder/abstract"
- "vitess.io/vitess/go/vt/vtgate/semantics"
-)
-
-type Filter struct {
- Source abstract.PhysicalOperator
- Predicates []sqlparser.Expr
-}
-
-var _ abstract.PhysicalOperator = (*Filter)(nil)
-
-// IPhysical implements the PhysicalOperator interface
-func (f *Filter) IPhysical() {}
-
-// TableID implements the PhysicalOperator interface
-func (f *Filter) TableID() semantics.TableSet {
- return f.Source.TableID()
-}
-
-// UnsolvedPredicates implements the PhysicalOperator interface
-func (f *Filter) UnsolvedPredicates(semTable *semantics.SemTable) []sqlparser.Expr {
- panic("implement me")
-}
-
-// CheckValid implements the PhysicalOperator interface
-func (f *Filter) CheckValid() error {
- return f.Source.CheckValid()
-}
-
-// Compact implements the PhysicalOperator interface
-func (f *Filter) Compact(semTable *semantics.SemTable) (abstract.Operator, error) {
- return f, nil
-}
-
-// Cost implements the PhysicalOperator interface
-func (f *Filter) Cost() int {
- return f.Source.Cost()
-}
-
-// Clone implements the PhysicalOperator interface
-func (f *Filter) Clone() abstract.PhysicalOperator {
- predicatesClone := make([]sqlparser.Expr, len(f.Predicates))
- copy(predicatesClone, f.Predicates)
- return &Filter{
- Source: f.Source.Clone(),
- Predicates: predicatesClone,
- }
-}
diff --git a/go/vt/vtgate/planbuilder/physical/operator_funcs.go b/go/vt/vtgate/planbuilder/physical/operator_funcs.go
deleted file mode 100644
index 16757c041b1..00000000000
--- a/go/vt/vtgate/planbuilder/physical/operator_funcs.go
+++ /dev/null
@@ -1,371 +0,0 @@
-/*
-Copyright 2021 The Vitess Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package physical
-
-import (
- vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
- "vitess.io/vitess/go/vt/sqlparser"
- "vitess.io/vitess/go/vt/vterrors"
- "vitess.io/vitess/go/vt/vtgate/planbuilder/abstract"
- "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext"
-
- "vitess.io/vitess/go/vt/vtgate/semantics"
-)
-
-// PushPredicate is used to push predicates. It pushed it as far down as is possible in the tree.
-// If we encounter a join and the predicate depends on both sides of the join, the predicate will be split into two parts,
-// where data is fetched from the LHS of the join to be used in the evaluation on the RHS
-func PushPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr, op abstract.PhysicalOperator) (abstract.PhysicalOperator, error) {
- switch op := op.(type) {
- case *Route:
- err := op.UpdateRoutingLogic(ctx, expr)
- if err != nil {
- return nil, err
- }
- newSrc, err := PushPredicate(ctx, expr, op.Source)
- if err != nil {
- return nil, err
- }
- op.Source = newSrc
- return op, err
- case *ApplyJoin:
- deps := ctx.SemTable.RecursiveDeps(expr)
- switch {
- case deps.IsSolvedBy(op.LHS.TableID()):
- newSrc, err := PushPredicate(ctx, expr, op.LHS)
- if err != nil {
- return nil, err
- }
- op.LHS = newSrc
- return op, err
- case deps.IsSolvedBy(op.RHS.TableID()):
- if !op.LeftJoin {
- newSrc, err := PushPredicate(ctx, expr, op.RHS)
- if err != nil {
- return nil, err
- }
- op.RHS = newSrc
- return op, err
- }
-
- // we are looking for predicates like `tbl.col = <>` or `<> = tbl.col`,
- // where tbl is on the rhs of the left outer join
- if cmp, isCmp := expr.(*sqlparser.ComparisonExpr); isCmp && cmp.Operator != sqlparser.NullSafeEqualOp &&
- (sqlparser.IsColName(cmp.Left) && ctx.SemTable.RecursiveDeps(cmp.Left).IsSolvedBy(op.RHS.TableID()) ||
- sqlparser.IsColName(cmp.Right) && ctx.SemTable.RecursiveDeps(cmp.Right).IsSolvedBy(op.RHS.TableID())) {
- // When the predicate we are pushing is using information from an outer table, we can
- // check whether the predicate is "null-intolerant" or not. Null-intolerant in this context means that
- // the predicate will not return true if the table columns are null.
- // Since an outer join is an inner join with the addition of all the rows from the left-hand side that
- // matched no rows on the right-hand, if we are later going to remove all the rows where the right-hand
- // side did not match, we might as well turn the join into an inner join.
-
- // This is based on the paper "Canonical Abstraction for Outerjoin Optimization" by J Rao et al
- op.LeftJoin = false
- newSrc, err := PushPredicate(ctx, expr, op.RHS)
- if err != nil {
- return nil, err
- }
- op.RHS = newSrc
- return op, err
- }
-
- // finally, if we can't turn the outer join into an inner,
- // we need to filter after the join has been evaluated
- return &Filter{
- Source: op,
- Predicates: []sqlparser.Expr{expr},
- }, nil
- case deps.IsSolvedBy(op.TableID()):
- bvName, cols, predicate, err := BreakExpressionInLHSandRHS(ctx, expr, op.LHS.TableID())
- if err != nil {
- return nil, err
- }
- out, idxs, err := PushOutputColumns(ctx, op.LHS, cols...)
- if err != nil {
- return nil, err
- }
- op.LHS = out
- for i, idx := range idxs {
- op.Vars[bvName[i]] = idx
- }
- newSrc, err := PushPredicate(ctx, predicate, op.RHS)
- if err != nil {
- return nil, err
- }
- op.RHS = newSrc
- op.Predicate = sqlparser.AndExpressions(op.Predicate, expr)
- return op, err
- }
- return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "Cannot push predicate: %s", sqlparser.String(expr))
- case *Table:
- // We do not add the predicate to op.qtable because that is an immutable struct that should not be
- // changed by physical operators.
- return &Filter{
- Source: op,
- Predicates: []sqlparser.Expr{expr},
- }, nil
- case *Filter:
- op.Predicates = append(op.Predicates, expr)
- return op, nil
- case *Derived:
- tableInfo, err := ctx.SemTable.TableInfoForExpr(expr)
- if err != nil {
- if err == semantics.ErrMultipleTables {
- return nil, semantics.ProjError{Inner: vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "unsupported: unable to split predicates to derived table: %s", sqlparser.String(expr))}
- }
- return nil, err
- }
- newExpr, err := semantics.RewriteDerivedTableExpression(expr, tableInfo)
- if err != nil {
- return nil, err
- }
- newSrc, err := PushPredicate(ctx, newExpr, op.Source)
- if err != nil {
- return nil, err
- }
- op.Source = newSrc
- return op, err
- default:
- return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "we cannot push predicates into %T", op)
- }
-}
-
-// PushOutputColumns will push the columns to the table they originate from,
-// making sure that intermediate operators pass the data through
-func PushOutputColumns(ctx *plancontext.PlanningContext, op abstract.PhysicalOperator, columns ...*sqlparser.ColName) (abstract.PhysicalOperator, []int, error) {
- switch op := op.(type) {
- case *Route:
- retOp, offsets, err := PushOutputColumns(ctx, op.Source, columns...)
- op.Source = retOp
- return op, offsets, err
- case *ApplyJoin:
- var toTheLeft []bool
- var lhs, rhs []*sqlparser.ColName
- for _, col := range columns {
- col.Qualifier.Qualifier = sqlparser.NewIdentifierCS("")
- if ctx.SemTable.RecursiveDeps(col).IsSolvedBy(op.LHS.TableID()) {
- lhs = append(lhs, col)
- toTheLeft = append(toTheLeft, true)
- } else {
- rhs = append(rhs, col)
- toTheLeft = append(toTheLeft, false)
- }
- }
- out, lhsOffset, err := PushOutputColumns(ctx, op.LHS, lhs...)
- if err != nil {
- return nil, nil, err
- }
- op.LHS = out
- out, rhsOffset, err := PushOutputColumns(ctx, op.RHS, rhs...)
- if err != nil {
- return nil, nil, err
- }
- op.RHS = out
-
- outputColumns := make([]int, len(toTheLeft))
- var l, r int
- for i, isLeft := range toTheLeft {
- outputColumns[i] = len(op.Columns)
- if isLeft {
- op.Columns = append(op.Columns, -lhsOffset[l]-1)
- l++
- } else {
- op.Columns = append(op.Columns, rhsOffset[r]+1)
- r++
- }
- }
- return op, outputColumns, nil
- case *Table:
- var offsets []int
- for _, col := range columns {
- exists := false
- for idx, opCol := range op.Columns {
- if sqlparser.EqualsRefOfColName(col, opCol) {
- exists = true
- offsets = append(offsets, idx)
- break
- }
- }
- if !exists {
- offsets = append(offsets, len(op.Columns))
- op.Columns = append(op.Columns, col)
- }
- }
- return op, offsets, nil
- case *Filter:
- newSrc, ints, err := PushOutputColumns(ctx, op.Source, columns...)
- op.Source = newSrc
- return op, ints, err
- case *Vindex:
- idx, err := op.PushOutputColumns(columns)
- return op, idx, err
- case *Derived:
- var noQualifierNames []*sqlparser.ColName
- var offsets []int
- if len(columns) == 0 {
- return op, nil, nil
- }
- for _, col := range columns {
- i, err := op.findOutputColumn(col)
- if err != nil {
- return nil, nil, err
- }
- var pos int
- op.ColumnsOffset, pos = addToIntSlice(op.ColumnsOffset, i)
- offsets = append(offsets, pos)
- // skip adding to columns as it exists already.
- if i > -1 {
- continue
- }
- op.Columns = append(op.Columns, col)
- noQualifierNames = append(noQualifierNames, sqlparser.NewColName(col.Name.String()))
- }
- if len(noQualifierNames) > 0 {
- _, _, err := PushOutputColumns(ctx, op.Source, noQualifierNames...)
- if err != nil {
- return nil, nil, err
- }
- }
- return op, offsets, nil
-
- default:
- return nil, nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "we cannot push output columns into %T", op)
- }
-}
-
-func addToIntSlice(columnOffset []int, valToAdd int) ([]int, int) {
- for idx, val := range columnOffset {
- if val == valToAdd {
- return columnOffset, idx
- }
- }
- columnOffset = append(columnOffset, valToAdd)
- return columnOffset, len(columnOffset) - 1
-}
-
-// RemovePredicate is used when we turn a predicate into a plan operator,
-// and the predicate needs to be removed as an AST construct
-func RemovePredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr, op abstract.PhysicalOperator) (abstract.PhysicalOperator, error) {
- switch op := op.(type) {
- case *Route:
- newSrc, err := RemovePredicate(ctx, expr, op.Source)
- if err != nil {
- return nil, err
- }
- op.Source = newSrc
- return op, err
- case *ApplyJoin:
- isRemoved := false
- deps := ctx.SemTable.RecursiveDeps(expr)
- if deps.IsSolvedBy(op.LHS.TableID()) {
- newSrc, err := RemovePredicate(ctx, expr, op.LHS)
- if err != nil {
- return nil, err
- }
- op.LHS = newSrc
- isRemoved = true
- }
-
- if deps.IsSolvedBy(op.RHS.TableID()) {
- newSrc, err := RemovePredicate(ctx, expr, op.RHS)
- if err != nil {
- return nil, err
- }
- op.RHS = newSrc
- isRemoved = true
- }
-
- var keep []sqlparser.Expr
- for _, e := range sqlparser.SplitAndExpression(nil, op.Predicate) {
- if !sqlparser.EqualsExpr(expr, e) {
- keep = append(keep, e)
- isRemoved = true
- }
- }
- op.Predicate = sqlparser.AndExpressions(keep...)
-
- if !isRemoved {
- return nil, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "remove '%s' predicate not supported on cross-shard join query", sqlparser.String(expr))
- }
- return op, nil
- case *Filter:
- idx := -1
- for i, predicate := range op.Predicates {
- if sqlparser.EqualsExpr(predicate, expr) {
- idx = i
- }
- }
- if idx == -1 {
- // the predicate is not here. let's remove it from our source
- newSrc, err := RemovePredicate(ctx, expr, op.Source)
- if err != nil {
- return nil, err
- }
- op.Source = newSrc
- return op, nil
- }
- if len(op.Predicates) == 1 {
- // no predicates left on this operator, so we just remove it
- return op.Source, nil
- }
-
- // remove the predicate from this filter
- op.Predicates = append(op.Predicates[:idx], op.Predicates[idx+1:]...)
- return op, nil
-
- default:
- return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "this should not happen - tried to remove predicate from table op")
- }
-}
-
-// BreakExpressionInLHSandRHS takes an expression and
-// extracts the parts that are coming from one of the sides into `ColName`s that are needed
-func BreakExpressionInLHSandRHS(
- ctx *plancontext.PlanningContext,
- expr sqlparser.Expr,
- lhs semantics.TableSet,
-) (bvNames []string, columns []*sqlparser.ColName, rewrittenExpr sqlparser.Expr, err error) {
- rewrittenExpr = sqlparser.CloneExpr(expr)
- _ = sqlparser.Rewrite(rewrittenExpr, nil, func(cursor *sqlparser.Cursor) bool {
- switch node := cursor.Node().(type) {
- case *sqlparser.ColName:
- deps := ctx.SemTable.RecursiveDeps(node)
- if deps.NumberOfTables() == 0 {
- err = vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unknown column. has the AST been copied?")
- return false
- }
- if deps.IsSolvedBy(lhs) {
- node.Qualifier.Qualifier = sqlparser.NewIdentifierCS("")
- columns = append(columns, node)
- bvName := node.CompliantName()
- bvNames = append(bvNames, bvName)
- arg := sqlparser.NewArgument(bvName)
- // we are replacing one of the sides of the comparison with an argument,
- // but we don't want to lose the type information we have, so we copy it over
- ctx.SemTable.CopyExprInfo(node, arg)
- cursor.Replace(arg)
- }
- }
- return true
- })
- if err != nil {
- return nil, nil, nil, err
- }
- ctx.JoinPredicates[expr] = append(ctx.JoinPredicates[expr], rewrittenExpr)
- return
-}
diff --git a/go/vt/vtgate/planbuilder/physical/route_planning.go b/go/vt/vtgate/planbuilder/physical/route_planning.go
deleted file mode 100644
index caedb1ab184..00000000000
--- a/go/vt/vtgate/planbuilder/physical/route_planning.go
+++ /dev/null
@@ -1,1313 +0,0 @@
-/*
-Copyright 2021 The Vitess Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package physical
-
-import (
- "bytes"
- "fmt"
- "io"
-
- "vitess.io/vitess/go/mysql/collations"
- "vitess.io/vitess/go/vt/key"
- "vitess.io/vitess/go/vt/sqlparser"
- "vitess.io/vitess/go/vt/vterrors"
- "vitess.io/vitess/go/vt/vtgate/engine"
- "vitess.io/vitess/go/vt/vtgate/evalengine"
- "vitess.io/vitess/go/vt/vtgate/planbuilder/abstract"
- "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext"
- "vitess.io/vitess/go/vt/vtgate/semantics"
- "vitess.io/vitess/go/vt/vtgate/vindexes"
-
- querypb "vitess.io/vitess/go/vt/proto/query"
- topodatapb "vitess.io/vitess/go/vt/proto/topodata"
- vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
-)
-
-type (
- tableSetPair struct {
- left, right semantics.TableSet
- }
-
- opCacheMap map[tableSetPair]abstract.PhysicalOperator
-)
-
-func CreatePhysicalOperator(ctx *plancontext.PlanningContext, opTree abstract.LogicalOperator) (abstract.PhysicalOperator, error) {
- switch op := opTree.(type) {
- case *abstract.QueryGraph:
- return optimizeQueryGraph(ctx, op)
- case *abstract.Join:
- return optimizeJoin(ctx, op)
- case *abstract.Derived:
- return optimizeDerived(ctx, op)
- case *abstract.SubQuery:
- return optimizeSubQuery(ctx, op)
- case *abstract.Vindex:
- return optimizeVindex(ctx, op)
- case *abstract.Concatenate:
- return optimizeUnion(ctx, op)
- case *abstract.Filter:
- return optimizeFilter(ctx, op)
- case *abstract.Update:
- return createPhysicalOperatorFromUpdate(ctx, op)
- case *abstract.Delete:
- return createPhysicalOperatorFromDelete(ctx, op)
- default:
- return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "invalid operator tree: %T", op)
- }
-}
-
-func optimizeFilter(ctx *plancontext.PlanningContext, op *abstract.Filter) (abstract.PhysicalOperator, error) {
- src, err := CreatePhysicalOperator(ctx, op.Source)
- if err != nil {
- return nil, err
- }
-
- filter := &Filter{
- Predicates: op.Predicates,
- }
-
- if route, ok := src.(*Route); ok {
- // let's push the filter into the route
- filter.Source = route.Source
- route.Source = filter
- return route, nil
- }
-
- filter.Source = src
-
- return filter, nil
-}
-
-func optimizeDerived(ctx *plancontext.PlanningContext, op *abstract.Derived) (abstract.PhysicalOperator, error) {
- opInner, err := CreatePhysicalOperator(ctx, op.Inner)
- if err != nil {
- return nil, err
- }
-
- innerRoute, ok := opInner.(*Route)
- if !ok {
- return buildDerivedOp(op, opInner), nil
- }
-
- derived := &Derived{
- Source: innerRoute.Source,
- Query: op.Sel,
- Alias: op.Alias,
- ColumnAliases: op.ColumnAliases,
- }
-
- if innerRoute.RouteOpCode == engine.EqualUnique {
- // no need to check anything if we are sure that we will only hit a single shard
- } else if !derived.IsMergeable(ctx) {
- return buildDerivedOp(op, opInner), nil
- }
-
- innerRoute.Source = derived
- return innerRoute, nil
-}
-
-func buildDerivedOp(op *abstract.Derived, opInner abstract.PhysicalOperator) *Derived {
- return &Derived{
- Source: opInner,
- Query: op.Sel,
- Alias: op.Alias,
- ColumnAliases: op.ColumnAliases,
- }
-}
-
-func optimizeJoin(ctx *plancontext.PlanningContext, op *abstract.Join) (abstract.PhysicalOperator, error) {
- lhs, err := CreatePhysicalOperator(ctx, op.LHS)
- if err != nil {
- return nil, err
- }
- rhs, err := CreatePhysicalOperator(ctx, op.RHS)
- if err != nil {
- return nil, err
- }
-
- return mergeOrJoin(ctx, lhs, rhs, sqlparser.SplitAndExpression(nil, op.Predicate), !op.LeftJoin)
-}
-
-func optimizeQueryGraph(ctx *plancontext.PlanningContext, op *abstract.QueryGraph) (abstract.PhysicalOperator, error) {
- switch {
- case ctx.PlannerVersion == querypb.ExecuteOptions_Gen4Left2Right:
- return leftToRightSolve(ctx, op)
- default:
- return greedySolve(ctx, op)
- }
-}
-
-func createPhysicalOperatorFromUpdate(ctx *plancontext.PlanningContext, op *abstract.Update) (abstract.PhysicalOperator, error) {
- vindexTable, opCode, dest, err := buildVindexTableForDML(ctx, op.TableInfo, op.Table, "update")
- if err != nil {
- return nil, err
- }
-
- vp, cvv, ovq, err := getUpdateVindexInformation(op, vindexTable)
- if err != nil {
- return nil, err
- }
-
- r := &Route{
- Source: &Update{
- QTable: op.Table,
- VTable: vindexTable,
- Assignments: op.Assignments,
- ChangedVindexValues: cvv,
- OwnedVindexQuery: ovq,
- AST: op.AST,
- },
- RouteOpCode: opCode,
- Keyspace: vindexTable.Keyspace,
- VindexPreds: vp,
- TargetDestination: dest,
- }
-
- for _, predicate := range op.Table.Predicates {
- err := r.UpdateRoutingLogic(ctx, predicate)
- if err != nil {
- return nil, err
- }
- }
-
- if r.RouteOpCode == engine.Scatter && op.AST.Limit != nil {
- // TODO systay: we should probably check for other op code types - IN could also hit multiple shards (2022-04-07)
- return nil, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "multi shard update with limit is not supported")
- }
-
- return r, nil
-}
-
-func buildVindexTableForDML(ctx *plancontext.PlanningContext, tableInfo semantics.TableInfo, table *abstract.QueryTable, dmlType string) (*vindexes.Table, engine.Opcode, key.Destination, error) {
- vindexTable := tableInfo.GetVindexTable()
- opCode := engine.Unsharded
- if vindexTable.Keyspace.Sharded {
- opCode = engine.Scatter
- }
-
- var dest key.Destination
- var typ topodatapb.TabletType
- var err error
- tblName, ok := table.Alias.Expr.(sqlparser.TableName)
- if ok {
- _, _, _, typ, dest, err = ctx.VSchema.FindTableOrVindex(tblName)
- if err != nil {
- return nil, 0, nil, err
- }
- if dest != nil {
- if typ != topodatapb.TabletType_PRIMARY {
- return nil, 0, nil, vterrors.NewErrorf(vtrpcpb.Code_FAILED_PRECONDITION, vterrors.InnodbReadOnly, "unsupported: %v statement with a replica target", dmlType)
- }
- // we are dealing with an explicitly targeted UPDATE
- opCode = engine.ByDestination
- }
- }
- return vindexTable, opCode, dest, nil
-}
-
-func createPhysicalOperatorFromDelete(ctx *plancontext.PlanningContext, op *abstract.Delete) (*Route, error) {
- var ovq string
- vindexTable, opCode, dest, err := buildVindexTableForDML(ctx, op.TableInfo, op.Table, "delete")
- if err != nil {
- return nil, err
- }
-
- primaryVindex, vindexAndPredicates, err := getVindexInformation(op.TableID(), op.Table.Predicates, vindexTable)
- if err != nil {
- return nil, err
- }
-
- if len(vindexTable.Owned) > 0 {
- tblExpr := &sqlparser.AliasedTableExpr{Expr: sqlparser.TableName{Name: vindexTable.Name}, As: op.Table.Alias.As}
- ovq = generateOwnedVindexQuery(tblExpr, op.AST, vindexTable, primaryVindex.Columns)
- }
-
- r := &Route{
- Source: &Delete{
- QTable: op.Table,
- VTable: vindexTable,
- OwnedVindexQuery: ovq,
- AST: op.AST,
- },
- RouteOpCode: opCode,
- Keyspace: vindexTable.Keyspace,
- VindexPreds: vindexAndPredicates,
- TargetDestination: dest,
- }
-
- for _, predicate := range op.Table.Predicates {
- err := r.UpdateRoutingLogic(ctx, predicate)
- if err != nil {
- return nil, err
- }
- }
-
- if r.RouteOpCode == engine.Scatter && op.AST.Limit != nil {
- // TODO systay: we should probably check for other op code types - IN could also hit multiple shards (2022-04-07)
- return nil, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "multi shard delete with limit is not supported")
- }
- return r, nil
-}
-
-func generateOwnedVindexQuery(tblExpr sqlparser.TableExpr, del *sqlparser.Delete, table *vindexes.Table, ksidCols []sqlparser.IdentifierCI) string {
- buf := sqlparser.NewTrackedBuffer(nil)
- for idx, col := range ksidCols {
- if idx == 0 {
- buf.Myprintf("select %v", col)
- } else {
- buf.Myprintf(", %v", col)
- }
- }
- for _, cv := range table.Owned {
- for _, column := range cv.Columns {
- buf.Myprintf(", %v", column)
- }
- }
- buf.Myprintf(" from %v%v%v%v for update", tblExpr, del.Where, del.OrderBy, del.Limit)
- return buf.String()
-}
-
-func getUpdateVindexInformation(op *abstract.Update, vindexTable *vindexes.Table) ([]*VindexPlusPredicates, map[string]*engine.VindexValues, string, error) {
- if !vindexTable.Keyspace.Sharded {
- return nil, nil, "", nil
- }
- primaryVindex, vindexAndPredicates, err := getVindexInformation(op.TableID(), op.Table.Predicates, vindexTable)
- if err != nil {
- return nil, nil, "", err
- }
-
- changedVindexValues, ownedVindexQuery, err := buildChangedVindexesValues(op.AST, vindexTable, primaryVindex.Columns)
- if err != nil {
- return nil, nil, "", err
- }
- return vindexAndPredicates, changedVindexValues, ownedVindexQuery, nil
-}
-
-/*
- The greedy planner will plan a query by finding first finding the best route plan for every table.
- Then, iteratively, it finds the cheapest join that can be produced between the remaining plans,
- and removes the two inputs to this cheapest plan and instead adds the join.
- As an optimization, it first only considers joining tables that have predicates defined between them
-*/
-func greedySolve(ctx *plancontext.PlanningContext, qg *abstract.QueryGraph) (abstract.PhysicalOperator, error) {
- routeOps, err := seedOperatorList(ctx, qg)
- planCache := opCacheMap{}
- if err != nil {
- return nil, err
- }
-
- op, err := mergeRoutes(ctx, qg, routeOps, planCache, false)
- if err != nil {
- return nil, err
- }
- return op, nil
-}
-
-func leftToRightSolve(ctx *plancontext.PlanningContext, qg *abstract.QueryGraph) (abstract.PhysicalOperator, error) {
- plans, err := seedOperatorList(ctx, qg)
- if err != nil {
- return nil, err
- }
-
- var acc abstract.PhysicalOperator
- for _, plan := range plans {
- if acc == nil {
- acc = plan
- continue
- }
- joinPredicates := qg.GetPredicates(acc.TableID(), plan.TableID())
- acc, err = mergeOrJoin(ctx, acc, plan, joinPredicates, true)
- if err != nil {
- return nil, err
- }
- }
-
- return acc, nil
-}
-
-// seedOperatorList returns a route for each table in the qg
-func seedOperatorList(ctx *plancontext.PlanningContext, qg *abstract.QueryGraph) ([]abstract.PhysicalOperator, error) {
- plans := make([]abstract.PhysicalOperator, len(qg.Tables))
-
- // we start by seeding the table with the single routes
- for i, table := range qg.Tables {
- solves := ctx.SemTable.TableSetFor(table.Alias)
- plan, err := createRoute(ctx, table, solves)
- if err != nil {
- return nil, err
- }
- if qg.NoDeps != nil {
- plan.Source = &Filter{
- Source: plan.Source,
- Predicates: []sqlparser.Expr{qg.NoDeps},
- }
- }
- plans[i] = plan
- }
- return plans, nil
-}
-
-func createRoute(ctx *plancontext.PlanningContext, table *abstract.QueryTable, solves semantics.TableSet) (*Route, error) {
- if table.IsInfSchema {
- return createInfSchemaRoute(ctx, table)
- }
- vschemaTable, _, _, _, target, err := ctx.VSchema.FindTableOrVindex(table.Table)
- if target != nil {
- return nil, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "unsupported: SELECT with a target destination")
- }
- if err != nil {
- return nil, err
- }
- if vschemaTable.Name.String() != table.Table.Name.String() {
- // we are dealing with a routed table
- name := table.Table.Name
- table.Table.Name = vschemaTable.Name
- astTable, ok := table.Alias.Expr.(sqlparser.TableName)
- if !ok {
- return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] a derived table should never be a routed table")
- }
- realTableName := sqlparser.NewIdentifierCS(vschemaTable.Name.String())
- astTable.Name = realTableName
- if table.Alias.As.IsEmpty() {
- // if the user hasn't specified an alias, we'll insert one here so the old table name still works
- table.Alias.As = sqlparser.NewIdentifierCS(name.String())
- }
- }
- plan := &Route{
- Source: &Table{
- QTable: table,
- VTable: vschemaTable,
- },
- Keyspace: vschemaTable.Keyspace,
- }
-
- for _, columnVindex := range vschemaTable.ColumnVindexes {
- plan.VindexPreds = append(plan.VindexPreds, &VindexPlusPredicates{ColVindex: columnVindex, TableID: solves})
- }
-
- switch {
- case vschemaTable.Type == vindexes.TypeSequence:
- plan.RouteOpCode = engine.Next
- case vschemaTable.Type == vindexes.TypeReference:
- plan.RouteOpCode = engine.Reference
- case !vschemaTable.Keyspace.Sharded:
- plan.RouteOpCode = engine.Unsharded
- case vschemaTable.Pinned != nil:
- // Pinned tables have their keyspace ids already assigned.
- // Use the Binary vindex, which is the identity function
- // for keyspace id.
- plan.RouteOpCode = engine.EqualUnique
- vindex, _ := vindexes.NewBinary("binary", nil)
- plan.Selected = &VindexOption{
- Ready: true,
- Values: []evalengine.Expr{evalengine.NewLiteralString(vschemaTable.Pinned, collations.TypedCollation{})},
- ValueExprs: nil,
- Predicates: nil,
- OpCode: engine.EqualUnique,
- FoundVindex: vindex,
- Cost: Cost{
- OpCode: engine.EqualUnique,
- },
- }
- default:
- plan.RouteOpCode = engine.Scatter
- }
- for _, predicate := range table.Predicates {
- err = plan.UpdateRoutingLogic(ctx, predicate)
- if err != nil {
- return nil, err
- }
- }
-
- if plan.RouteOpCode == engine.Scatter && len(table.Predicates) > 0 {
- // If we have a scatter query, it's worth spending a little extra time seeing if we can't improve it
- for _, pred := range table.Predicates {
- rewritten := tryRewriteOrToIn(pred)
- if rewritten != nil {
- err = plan.UpdateRoutingLogic(ctx, rewritten)
- if err != nil {
- return nil, err
- }
- }
- }
- }
-
- return plan, nil
-}
-
-func tryRewriteOrToIn(expr sqlparser.Expr) sqlparser.Expr {
- rewrote := false
- newPred := sqlparser.Rewrite(sqlparser.CloneExpr(expr), func(cursor *sqlparser.Cursor) bool {
- _, ok := cursor.Node().(*sqlparser.OrExpr)
- return ok
- }, func(cursor *sqlparser.Cursor) bool {
- // we are looking for the pattern WHERE c = 1 or c = 2
- switch or := cursor.Node().(type) {
- case *sqlparser.OrExpr:
- lftCmp, ok := or.Left.(*sqlparser.ComparisonExpr)
- if !ok {
- return true
- }
- rgtCmp, ok := or.Right.(*sqlparser.ComparisonExpr)
- if !ok {
- return true
- }
-
- col, ok := lftCmp.Left.(*sqlparser.ColName)
- if !ok || !sqlparser.EqualsExpr(lftCmp.Left, rgtCmp.Left) {
- return true
- }
-
- var tuple sqlparser.ValTuple
- switch lftCmp.Operator {
- case sqlparser.EqualOp:
- tuple = sqlparser.ValTuple{lftCmp.Right}
- case sqlparser.InOp:
- lft, ok := lftCmp.Right.(sqlparser.ValTuple)
- if !ok {
- return true
- }
- tuple = lft
- default:
- return true
- }
-
- switch rgtCmp.Operator {
- case sqlparser.EqualOp:
- tuple = append(tuple, rgtCmp.Right)
- case sqlparser.InOp:
- lft, ok := rgtCmp.Right.(sqlparser.ValTuple)
- if !ok {
- return true
- }
- tuple = append(tuple, lft...)
- default:
- return true
- }
-
- rewrote = true
- cursor.Replace(&sqlparser.ComparisonExpr{
- Operator: sqlparser.InOp,
- Left: col,
- Right: tuple,
- })
- }
- return true
- })
- if rewrote {
- return newPred.(sqlparser.Expr)
- }
- return nil
-}
-
-func createInfSchemaRoute(ctx *plancontext.PlanningContext, table *abstract.QueryTable) (*Route, error) {
- ks, err := ctx.VSchema.AnyKeyspace()
- if err != nil {
- return nil, err
- }
- var src abstract.PhysicalOperator = &Table{
- QTable: table,
- VTable: &vindexes.Table{
- Name: table.Table.Name,
- Keyspace: ks,
- },
- }
- r := &Route{
- RouteOpCode: engine.DBA,
- Source: src,
- Keyspace: ks,
- }
- for _, pred := range table.Predicates {
- isTableSchema, bvName, out, err := extractInfoSchemaRoutingPredicate(pred, ctx.ReservedVars)
- if err != nil {
- return nil, err
- }
- if out == nil {
- // we didn't find a predicate to use for routing, continue to look for next predicate
- continue
- }
-
- if isTableSchema {
- r.SysTableTableSchema = append(r.SysTableTableSchema, out)
- } else {
- if r.SysTableTableName == nil {
- r.SysTableTableName = map[string]evalengine.Expr{}
- }
- r.SysTableTableName[bvName] = out
- }
- }
- return r, nil
-}
-
-func mergeRoutes(ctx *plancontext.PlanningContext, qg *abstract.QueryGraph, physicalOps []abstract.PhysicalOperator, planCache opCacheMap, crossJoinsOK bool) (abstract.PhysicalOperator, error) {
- if len(physicalOps) == 0 {
- return nil, nil
- }
- for len(physicalOps) > 1 {
- bestTree, lIdx, rIdx, err := findBestJoin(ctx, qg, physicalOps, planCache, crossJoinsOK)
- if err != nil {
- return nil, err
- }
- // if we found a plan, we'll replace the two plans that were joined with the join plan created
- if bestTree != nil {
- // we remove one plan, and replace the other
- if rIdx > lIdx {
- physicalOps = removeAt(physicalOps, rIdx)
- physicalOps = removeAt(physicalOps, lIdx)
- } else {
- physicalOps = removeAt(physicalOps, lIdx)
- physicalOps = removeAt(physicalOps, rIdx)
- }
- physicalOps = append(physicalOps, bestTree)
- } else {
- if crossJoinsOK {
- return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "should not happen")
- }
- // we will only fail to find a join plan when there are only cross joins left
- // when that happens, we switch over to allow cross joins as well.
- // this way we prioritize joining physicalOps with predicates first
- crossJoinsOK = true
- }
- }
- return physicalOps[0], nil
-}
-
-func removeAt(plans []abstract.PhysicalOperator, idx int) []abstract.PhysicalOperator {
- return append(plans[:idx], plans[idx+1:]...)
-}
-
-func findBestJoin(
- ctx *plancontext.PlanningContext,
- qg *abstract.QueryGraph,
- plans []abstract.PhysicalOperator,
- planCache opCacheMap,
- crossJoinsOK bool,
-) (bestPlan abstract.PhysicalOperator, lIdx int, rIdx int, err error) {
- for i, lhs := range plans {
- for j, rhs := range plans {
- if i == j {
- continue
- }
- joinPredicates := qg.GetPredicates(lhs.TableID(), rhs.TableID())
- if len(joinPredicates) == 0 && !crossJoinsOK {
- // if there are no predicates joining the two tables,
- // creating a join between them would produce a
- // cartesian product, which is almost always a bad idea
- continue
- }
- plan, err := getJoinFor(ctx, planCache, lhs, rhs, joinPredicates)
- if err != nil {
- return nil, 0, 0, err
- }
- if bestPlan == nil || plan.Cost() < bestPlan.Cost() {
- bestPlan = plan
- // remember which plans we based on, so we can remove them later
- lIdx = i
- rIdx = j
- }
- }
- }
- return bestPlan, lIdx, rIdx, nil
-}
-
-func getJoinFor(ctx *plancontext.PlanningContext, cm opCacheMap, lhs, rhs abstract.PhysicalOperator, joinPredicates []sqlparser.Expr) (abstract.PhysicalOperator, error) {
- solves := tableSetPair{left: lhs.TableID(), right: rhs.TableID()}
- cachedPlan := cm[solves]
- if cachedPlan != nil {
- return cachedPlan, nil
- }
-
- join, err := mergeOrJoin(ctx, lhs, rhs, joinPredicates, true)
- if err != nil {
- return nil, err
- }
- cm[solves] = join
- return join, nil
-}
-
-// requiresSwitchingSides will return true if any of the operators with the root from the given operator tree
-// is of the type that should not be on the RHS of a join
-func requiresSwitchingSides(ctx *plancontext.PlanningContext, op abstract.PhysicalOperator) bool {
- required := false
-
- _ = VisitOperators(op, func(current abstract.PhysicalOperator) (bool, error) {
- derived, isDerived := current.(*Derived)
-
- if isDerived && !derived.IsMergeable(ctx) {
- required = true
-
- return false, nil
- }
-
- return true, nil
- })
-
- return required
-}
-
-func mergeOrJoin(ctx *plancontext.PlanningContext, lhs, rhs abstract.PhysicalOperator, joinPredicates []sqlparser.Expr, inner bool) (abstract.PhysicalOperator, error) {
- merger := func(a, b *Route) (*Route, error) {
- return createRouteOperatorForJoin(a, b, joinPredicates, inner)
- }
-
- newPlan, _ := tryMerge(ctx, lhs, rhs, joinPredicates, merger)
- if newPlan != nil {
- return newPlan, nil
- }
-
- if len(joinPredicates) > 0 && requiresSwitchingSides(ctx, rhs) {
- if !inner {
- return nil, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "unsupported: LEFT JOIN not supported for derived tables")
- }
-
- if requiresSwitchingSides(ctx, lhs) {
- return nil, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "unsupported: JOIN not supported between derived tables")
- }
-
- join := &ApplyJoin{
- LHS: rhs.Clone(),
- RHS: lhs.Clone(),
- Vars: map[string]int{},
- LeftJoin: !inner,
- }
-
- return pushJoinPredicates(ctx, joinPredicates, join)
- }
-
- join := &ApplyJoin{
- LHS: lhs.Clone(),
- RHS: rhs.Clone(),
- Vars: map[string]int{},
- LeftJoin: !inner,
- }
-
- return pushJoinPredicates(ctx, joinPredicates, join)
-}
-
-func createRouteOperatorForJoin(aRoute, bRoute *Route, joinPredicates []sqlparser.Expr, inner bool) (*Route, error) {
- // append system table names from both the routes.
- sysTableName := aRoute.SysTableTableName
- if sysTableName == nil {
- sysTableName = bRoute.SysTableTableName
- } else {
- for k, v := range bRoute.SysTableTableName {
- sysTableName[k] = v
- }
- }
-
- r := &Route{
- RouteOpCode: aRoute.RouteOpCode,
- Keyspace: aRoute.Keyspace,
- VindexPreds: append(aRoute.VindexPreds, bRoute.VindexPreds...),
- SysTableTableSchema: append(aRoute.SysTableTableSchema, bRoute.SysTableTableSchema...),
- SeenPredicates: append(aRoute.SeenPredicates, bRoute.SeenPredicates...),
- SysTableTableName: sysTableName,
- Source: &ApplyJoin{
- LHS: aRoute.Source,
- RHS: bRoute.Source,
- Vars: map[string]int{},
- LeftJoin: !inner,
- Predicate: sqlparser.AndExpressions(joinPredicates...),
- },
- }
-
- if aRoute.SelectedVindex() == bRoute.SelectedVindex() {
- r.Selected = aRoute.Selected
- }
-
- return r, nil
-}
-
-type mergeFunc func(a, b *Route) (*Route, error)
-
-func operatorsToRoutes(a, b abstract.PhysicalOperator) (*Route, *Route) {
- aRoute, ok := a.(*Route)
- if !ok {
- return nil, nil
- }
- bRoute, ok := b.(*Route)
- if !ok {
- return nil, nil
- }
- return aRoute, bRoute
-}
-
-func tryMerge(
- ctx *plancontext.PlanningContext,
- a, b abstract.PhysicalOperator,
- joinPredicates []sqlparser.Expr,
- merger mergeFunc,
-) (abstract.PhysicalOperator, error) {
- aRoute, bRoute := operatorsToRoutes(a.Clone(), b.Clone())
- if aRoute == nil || bRoute == nil {
- return nil, nil
- }
-
- sameKeyspace := aRoute.Keyspace == bRoute.Keyspace
-
- if sameKeyspace || (isDualTable(aRoute) || isDualTable(bRoute)) {
- tree, err := tryMergeReferenceTable(aRoute, bRoute, merger)
- if tree != nil || err != nil {
- return tree, err
- }
- }
-
- switch aRoute.RouteOpCode {
- case engine.Unsharded, engine.DBA:
- if aRoute.RouteOpCode == bRoute.RouteOpCode && sameKeyspace {
- return merger(aRoute, bRoute)
- }
- case engine.EqualUnique:
- // If the two routes fully match, they can be merged together.
- if bRoute.RouteOpCode == engine.EqualUnique {
- aVdx := aRoute.SelectedVindex()
- bVdx := bRoute.SelectedVindex()
- aExpr := aRoute.VindexExpressions()
- bExpr := bRoute.VindexExpressions()
- if aVdx == bVdx && gen4ValuesEqual(ctx, aExpr, bExpr) {
- return merger(aRoute, bRoute)
- }
- }
-
- // If the two routes don't match, fall through to the next case and see if we
- // can merge via join predicates instead.
- fallthrough
-
- case engine.Scatter, engine.IN, engine.None:
- if len(joinPredicates) == 0 {
- // If we are doing two Scatters, we have to make sure that the
- // joins are on the correct vindex to allow them to be merged
- // no join predicates - no vindex
- return nil, nil
- }
-
- if !sameKeyspace {
- return nil, vterrors.New(vtrpcpb.Code_UNIMPLEMENTED, "unsupported: cross-shard correlated subquery")
- }
-
- canMerge := canMergeOnFilters(ctx, aRoute, bRoute, joinPredicates)
- if !canMerge {
- return nil, nil
- }
- r, err := merger(aRoute, bRoute)
- if err != nil {
- return nil, err
- }
-
- // If we have a `None` route opcode, we want to keep it -
- // we only try to find a better Vindex for other route opcodes
- if aRoute.RouteOpCode != engine.None {
- r.PickBestAvailableVindex()
- }
-
- return r, nil
- }
- return nil, nil
-}
-
-func isDualTable(route *Route) bool {
- sources := leaves(route)
- if len(sources) > 1 {
- return false
- }
- src, ok := sources[0].(*Table)
- if !ok {
- return false
- }
- return src.VTable.Name.String() == "dual" && src.QTable.Table.Qualifier.IsEmpty()
-}
-
-func leaves(op abstract.Operator) (sources []abstract.Operator) {
- switch op := op.(type) {
- // these are the leaves
- case *abstract.QueryGraph, *abstract.Vindex, *Table:
- return []abstract.Operator{op}
-
- // logical
- case *abstract.Concatenate:
- for _, source := range op.Sources {
- sources = append(sources, leaves(source)...)
- }
- return
- case *abstract.Derived:
- return []abstract.Operator{op.Inner}
- case *abstract.Join:
- return []abstract.Operator{op.LHS, op.RHS}
- case *abstract.SubQuery:
- sources = []abstract.Operator{op.Outer}
- for _, inner := range op.Inner {
- sources = append(sources, inner.Inner)
- }
- return
- // physical
- case *ApplyJoin:
- return []abstract.Operator{op.LHS, op.RHS}
- case *Filter:
- return []abstract.Operator{op.Source}
- case *Route:
- return []abstract.Operator{op.Source}
- }
-
- panic(fmt.Sprintf("leaves unknown type: %T", op))
-}
-
-func tryMergeReferenceTable(aRoute, bRoute *Route, merger mergeFunc) (*Route, error) {
- var (
- // if either side is a reference table, we can just merge it and use the opcode of the other side
- opCode engine.Opcode
- vindex *VindexOption
- ks *vindexes.Keyspace
- )
-
- switch {
- case aRoute.RouteOpCode == engine.Reference:
- vindex = bRoute.Selected
- opCode = bRoute.RouteOpCode
- ks = bRoute.Keyspace
- case bRoute.RouteOpCode == engine.Reference:
- vindex = aRoute.Selected
- opCode = aRoute.RouteOpCode
- ks = aRoute.Keyspace
- default:
- return nil, nil
- }
-
- r, err := merger(aRoute, bRoute)
- if err != nil {
- return nil, err
- }
- r.RouteOpCode = opCode
- r.Selected = vindex
- r.Keyspace = ks
- return r, nil
-}
-
-func canMergeOnFilter(ctx *plancontext.PlanningContext, a, b *Route, predicate sqlparser.Expr) bool {
- comparison, ok := predicate.(*sqlparser.ComparisonExpr)
- if !ok {
- return false
- }
- if comparison.Operator != sqlparser.EqualOp {
- return false
- }
- left := comparison.Left
- right := comparison.Right
-
- lVindex := findColumnVindex(ctx, a, left)
- if lVindex == nil {
- left, right = right, left
- lVindex = findColumnVindex(ctx, a, left)
- }
- if lVindex == nil || !lVindex.IsUnique() {
- return false
- }
- rVindex := findColumnVindex(ctx, b, right)
- if rVindex == nil {
- return false
- }
- return rVindex == lVindex
-}
-
-func findColumnVindex(ctx *plancontext.PlanningContext, a abstract.PhysicalOperator, exp sqlparser.Expr) vindexes.SingleColumn {
- _, isCol := exp.(*sqlparser.ColName)
- if !isCol {
- return nil
- }
-
- exp = unwrapDerivedTables(ctx, exp)
- if exp == nil {
- return nil
- }
-
- var singCol vindexes.SingleColumn
-
- // for each equality expression that exp has with other column name, we check if it
- // can be solved by any table in our routeTree. If an equality expression can be solved,
- // we check if the equality expression and our table share the same vindex, if they do:
- // the method will return the associated vindexes.SingleColumn.
- for _, expr := range ctx.SemTable.GetExprAndEqualities(exp) {
- col, isCol := expr.(*sqlparser.ColName)
- if !isCol {
- continue
- }
-
- deps := ctx.SemTable.RecursiveDeps(expr)
-
- _ = VisitOperators(a, func(rel abstract.PhysicalOperator) (bool, error) {
- to, isTableOp := rel.(abstract.IntroducesTable)
- if !isTableOp {
- return true, nil
- }
- if deps.IsSolvedBy(to.GetQTable().ID) {
- for _, vindex := range to.GetVTable().ColumnVindexes {
- sC, isSingle := vindex.Vindex.(vindexes.SingleColumn)
- if isSingle && vindex.Columns[0].Equal(col.Name) {
- singCol = sC
- return false, io.EOF
- }
- }
- }
- return false, nil
- })
- if singCol != nil {
- return singCol
- }
- }
-
- return singCol
-}
-
-// unwrapDerivedTables we want to find the bottom layer of derived tables
-// nolint
-func unwrapDerivedTables(ctx *plancontext.PlanningContext, exp sqlparser.Expr) sqlparser.Expr {
- for {
- // if we are dealing with derived tables in derived tables
- tbl, err := ctx.SemTable.TableInfoForExpr(exp)
- if err != nil {
- return nil
- }
- _, ok := tbl.(*semantics.DerivedTable)
- if !ok {
- break
- }
-
- exp, err = semantics.RewriteDerivedTableExpression(exp, tbl)
- if err != nil {
- return nil
- }
- exp = getColName(exp)
- if exp == nil {
- return nil
- }
- }
- return exp
-}
-
-func getColName(exp sqlparser.Expr) *sqlparser.ColName {
- switch exp := exp.(type) {
- case *sqlparser.ColName:
- return exp
- case *sqlparser.Max, *sqlparser.Min:
- aggr := exp.(sqlparser.AggrFunc).GetArg()
- colName, ok := aggr.(*sqlparser.ColName)
- if ok {
- return colName
- }
- }
- // for any other expression than a column, or the extremum of a column, we return nil
- return nil
-}
-
-func canMergeOnFilters(ctx *plancontext.PlanningContext, a, b *Route, joinPredicates []sqlparser.Expr) bool {
- for _, predicate := range joinPredicates {
- for _, expr := range sqlparser.SplitAndExpression(nil, predicate) {
- if canMergeOnFilter(ctx, a, b, expr) {
- return true
- }
- }
- }
- return false
-}
-
-// VisitOperators visits all the operators.
-func VisitOperators(op abstract.PhysicalOperator, f func(tbl abstract.PhysicalOperator) (bool, error)) error {
- kontinue, err := f(op)
- if err != nil {
- return err
- }
- if !kontinue {
- return nil
- }
-
- switch op := op.(type) {
- case *Table, *Vindex, *Update:
- // leaf - no children to visit
- case *Route:
- err := VisitOperators(op.Source, f)
- if err != nil {
- return err
- }
- case *ApplyJoin:
- err := VisitOperators(op.LHS, f)
- if err != nil {
- return err
- }
- err = VisitOperators(op.RHS, f)
- if err != nil {
- return err
- }
- case *Filter:
- err := VisitOperators(op.Source, f)
- if err != nil {
- return err
- }
- case *CorrelatedSubQueryOp:
- err := VisitOperators(op.Outer, f)
- if err != nil {
- return err
- }
- err = VisitOperators(op.Inner, f)
- if err != nil {
- return err
- }
- case *SubQueryOp:
- err := VisitOperators(op.Outer, f)
- if err != nil {
- return err
- }
- err = VisitOperators(op.Inner, f)
- if err != nil {
- return err
- }
- case *Derived:
- err := VisitOperators(op.Source, f)
- if err != nil {
- return err
- }
- case *Union:
- for _, source := range op.Sources {
- err := VisitOperators(source, f)
- if err != nil {
- return err
- }
- }
- default:
- return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unknown operator type while visiting - %T", op)
- }
- return nil
-}
-
-func optimizeUnion(ctx *plancontext.PlanningContext, op *abstract.Concatenate) (abstract.PhysicalOperator, error) {
- var sources []abstract.PhysicalOperator
-
- for _, source := range op.Sources {
- qt, err := CreatePhysicalOperator(ctx, source)
- if err != nil {
- return nil, err
- }
-
- sources = append(sources, qt)
- }
- return &Union{
- Sources: sources,
- SelectStmts: op.SelectStmts,
- Distinct: op.Distinct,
- Ordering: op.OrderBy,
- }, nil
-}
-
-func gen4ValuesEqual(ctx *plancontext.PlanningContext, a, b []sqlparser.Expr) bool {
- if len(a) != len(b) {
- return false
- }
-
- // TODO: check SemTable's columnEqualities for better plan
-
- for i, aExpr := range a {
- bExpr := b[i]
- if !gen4ValEqual(ctx, aExpr, bExpr) {
- return false
- }
- }
- return true
-}
-
-func gen4ValEqual(ctx *plancontext.PlanningContext, a, b sqlparser.Expr) bool {
- switch a := a.(type) {
- case *sqlparser.ColName:
- if b, ok := b.(*sqlparser.ColName); ok {
- if !a.Name.Equal(b.Name) {
- return false
- }
-
- return ctx.SemTable.DirectDeps(a) == ctx.SemTable.DirectDeps(b)
- }
- case sqlparser.Argument:
- b, ok := b.(sqlparser.Argument)
- if !ok {
- return false
- }
- return a == b
- case *sqlparser.Literal:
- b, ok := b.(*sqlparser.Literal)
- if !ok {
- return false
- }
- switch a.Type {
- case sqlparser.StrVal:
- switch b.Type {
- case sqlparser.StrVal:
- return a.Val == b.Val
- case sqlparser.HexVal:
- return hexEqual(b, a)
- }
- case sqlparser.HexVal:
- return hexEqual(a, b)
- case sqlparser.IntVal:
- if b.Type == (sqlparser.IntVal) {
- return a.Val == b.Val
- }
- }
- }
- return false
-}
-
-func hexEqual(a, b *sqlparser.Literal) bool {
- v, err := a.HexDecode()
- if err != nil {
- return false
- }
- switch b.Type {
- case sqlparser.StrVal:
- return bytes.Equal(v, b.Bytes())
- case sqlparser.HexVal:
- v2, err := b.HexDecode()
- if err != nil {
- return false
- }
- return bytes.Equal(v, v2)
- }
- return false
-}
-
-func pushJoinPredicates(
- ctx *plancontext.PlanningContext,
- exprs []sqlparser.Expr,
- op abstract.PhysicalOperator,
-) (abstract.PhysicalOperator, error) {
- if len(exprs) == 0 {
- return op, nil
- }
-
- switch op := op.(type) {
- case *ApplyJoin:
- return pushJoinPredicateOnJoin(ctx, exprs, op)
- case *Route:
- return pushJoinPredicateOnRoute(ctx, exprs, op)
- case *Table:
- return PushPredicate(ctx, sqlparser.AndExpressions(exprs...), op)
- case *Derived:
- return pushJoinPredicateOnDerived(ctx, exprs, op)
- case *Filter:
- op.Predicates = append(op.Predicates, exprs...)
- return op, nil
- default:
- return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unknown type %T pushJoinPredicates", op)
- }
-}
-
-func pushJoinPredicateOnRoute(ctx *plancontext.PlanningContext, exprs []sqlparser.Expr, op *Route) (abstract.PhysicalOperator, error) {
- for _, expr := range exprs {
- err := op.UpdateRoutingLogic(ctx, expr)
- if err != nil {
- return nil, err
- }
- }
- newSrc, err := pushJoinPredicates(ctx, exprs, op.Source)
- op.Source = newSrc
- return op, err
-}
-
-func pushJoinPredicateOnJoin(ctx *plancontext.PlanningContext, exprs []sqlparser.Expr, node *ApplyJoin) (abstract.PhysicalOperator, error) {
- node = node.Clone().(*ApplyJoin)
- var rhsPreds []sqlparser.Expr
- var lhsPreds []sqlparser.Expr
- var lhsVarsName []string
- for _, expr := range exprs {
- // We find the dependencies for the given expression and if they are solved entirely by one
- // side of the join tree, then we push the predicate there and do not break it into parts.
- // In case a predicate has no dependencies, then it is pushed to both sides so that we can filter
- // rows as early as possible making join cheaper on the vtgate level.
- depsForExpr := ctx.SemTable.RecursiveDeps(expr)
- singleSideDeps := false
- lhsTables := node.LHS.TableID()
- if depsForExpr.IsSolvedBy(lhsTables) {
- lhsPreds = append(lhsPreds, expr)
- singleSideDeps = true
- }
- if depsForExpr.IsSolvedBy(node.RHS.TableID()) {
- rhsPreds = append(rhsPreds, expr)
- singleSideDeps = true
- }
-
- if singleSideDeps {
- continue
- }
-
- bvName, cols, predicate, err := BreakExpressionInLHSandRHS(ctx, expr, lhsTables)
- if err != nil {
- return nil, err
- }
- node.LHSColumns = append(node.LHSColumns, cols...)
- lhsVarsName = append(lhsVarsName, bvName...)
- rhsPreds = append(rhsPreds, predicate)
- }
- if node.LHSColumns != nil && lhsVarsName != nil {
- newNode, offsets, err := PushOutputColumns(ctx, node.LHS, node.LHSColumns...)
- if err != nil {
- return nil, err
- }
- node.LHS = newNode
- for i, idx := range offsets {
- node.Vars[lhsVarsName[i]] = idx
- }
- }
- lhsPlan, err := pushJoinPredicates(ctx, lhsPreds, node.LHS)
- if err != nil {
- return nil, err
- }
-
- rhsPlan, err := pushJoinPredicates(ctx, rhsPreds, node.RHS)
- if err != nil {
- return nil, err
- }
-
- node.LHS = lhsPlan
- node.RHS = rhsPlan
- // If the predicate field is previously non-empty
- // keep that predicate too
- if node.Predicate != nil {
- exprs = append(exprs, node.Predicate)
- }
- node.Predicate = sqlparser.AndExpressions(exprs...)
- return node, nil
-}
-
-func pushJoinPredicateOnDerived(ctx *plancontext.PlanningContext, exprs []sqlparser.Expr, node *Derived) (abstract.PhysicalOperator, error) {
- node = node.Clone().(*Derived)
-
- newExpressions := make([]sqlparser.Expr, 0, len(exprs))
- for _, expr := range exprs {
- tblInfo, err := ctx.SemTable.TableInfoForExpr(expr)
- if err != nil {
- return nil, err
- }
- rewritten, err := semantics.RewriteDerivedTableExpression(expr, tblInfo)
- if err != nil {
- return nil, err
- }
- newExpressions = append(newExpressions, rewritten)
- }
-
- newInner, err := pushJoinPredicates(ctx, newExpressions, node.Source)
- if err != nil {
- return nil, err
- }
-
- node.Source = newInner
- return node, nil
-}
diff --git a/go/vt/vtgate/planbuilder/physical/system_tables.go b/go/vt/vtgate/planbuilder/physical/system_tables.go
deleted file mode 100644
index 185f3b26671..00000000000
--- a/go/vt/vtgate/planbuilder/physical/system_tables.go
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
-Copyright 2022 The Vitess Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package physical
-
-import (
- "strings"
-
- "vitess.io/vitess/go/mysql/collations"
- "vitess.io/vitess/go/sqltypes"
- vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
- "vitess.io/vitess/go/vt/sqlparser"
- "vitess.io/vitess/go/vt/vterrors"
- "vitess.io/vitess/go/vt/vtgate/evalengine"
-)
-
-func (rp *Route) findSysInfoRoutingPredicatesGen4(predicates []sqlparser.Expr, reservedVars *sqlparser.ReservedVars) error {
- for _, pred := range predicates {
- isTableSchema, bvName, out, err := extractInfoSchemaRoutingPredicate(pred, reservedVars)
- if err != nil {
- return err
- }
- if out == nil {
- // we didn't find a predicate to use for routing, continue to look for next predicate
- continue
- }
-
- if isTableSchema {
- rp.SysTableTableSchema = append(rp.SysTableTableSchema, out)
- } else {
- if rp.SysTableTableName == nil {
- rp.SysTableTableName = map[string]evalengine.Expr{}
- }
- rp.SysTableTableName[bvName] = out
- }
- }
- return nil
-}
-
-func extractInfoSchemaRoutingPredicate(in sqlparser.Expr, reservedVars *sqlparser.ReservedVars) (bool, string, evalengine.Expr, error) {
- switch cmp := in.(type) {
- case *sqlparser.ComparisonExpr:
- if cmp.Operator == sqlparser.EqualOp {
- isSchemaName, col, other, replaceOther := findOtherComparator(cmp)
- if col != nil && shouldRewrite(other) {
- evalExpr, err := evalengine.Translate(other, ¬ImplementedSchemaInfoConverter{})
- if err != nil {
- if strings.Contains(err.Error(), evalengine.ErrTranslateExprNotSupported) {
- // This just means we can't rewrite this particular expression,
- // not that we have to exit altogether
- return false, "", nil, nil
- }
- return false, "", nil, err
- }
- var name string
- if isSchemaName {
- name = sqltypes.BvSchemaName
- } else {
- name = reservedVars.ReserveColName(col.(*sqlparser.ColName))
- }
- replaceOther(sqlparser.NewArgument(name))
- return isSchemaName, name, evalExpr, nil
- }
- }
- }
- return false, "", nil, nil
-}
-
-func findOtherComparator(cmp *sqlparser.ComparisonExpr) (bool, sqlparser.Expr, sqlparser.Expr, func(arg sqlparser.Argument)) {
- if schema, table := isTableSchemaOrName(cmp.Left); schema || table {
- return schema, cmp.Left, cmp.Right, func(arg sqlparser.Argument) {
- cmp.Right = arg
- }
- }
- if schema, table := isTableSchemaOrName(cmp.Right); schema || table {
- return schema, cmp.Right, cmp.Left, func(arg sqlparser.Argument) {
- cmp.Left = arg
- }
- }
-
- return false, nil, nil, nil
-}
-
-func shouldRewrite(e sqlparser.Expr) bool {
- switch node := e.(type) {
- case *sqlparser.FuncExpr:
- // we should not rewrite database() calls against information_schema
- return !(node.Name.EqualString("database") || node.Name.EqualString("schema"))
- }
- return true
-}
-
-func isTableSchemaOrName(e sqlparser.Expr) (isTableSchema bool, isTableName bool) {
- col, ok := e.(*sqlparser.ColName)
- if !ok {
- return false, false
- }
- return isDbNameCol(col), isTableNameCol(col)
-}
-
-func isDbNameCol(col *sqlparser.ColName) bool {
- return col.Name.EqualString("table_schema") || col.Name.EqualString("constraint_schema") || col.Name.EqualString("schema_name") || col.Name.EqualString("routine_schema")
-}
-
-func isTableNameCol(col *sqlparser.ColName) bool {
- return col.Name.EqualString("table_name")
-}
-
-type notImplementedSchemaInfoConverter struct{}
-
-func (f *notImplementedSchemaInfoConverter) ColumnLookup(*sqlparser.ColName) (int, error) {
- return 0, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "Comparing table schema name with a column name not yet supported")
-}
-
-func (f *notImplementedSchemaInfoConverter) CollationForExpr(sqlparser.Expr) collations.ID {
- return collations.Unknown
-}
-
-func (f *notImplementedSchemaInfoConverter) DefaultCollation() collations.ID {
- return collations.Default()
-}
diff --git a/go/vt/vtgate/planbuilder/physical/table.go b/go/vt/vtgate/planbuilder/physical/table.go
deleted file mode 100644
index 24e021ac20b..00000000000
--- a/go/vt/vtgate/planbuilder/physical/table.go
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
-Copyright 2021 The Vitess Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package physical
-
-import (
- vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
- "vitess.io/vitess/go/vt/sqlparser"
- "vitess.io/vitess/go/vt/vterrors"
- "vitess.io/vitess/go/vt/vtgate/planbuilder/abstract"
- "vitess.io/vitess/go/vt/vtgate/semantics"
- "vitess.io/vitess/go/vt/vtgate/vindexes"
-)
-
-type Table struct {
- QTable *abstract.QueryTable
- VTable *vindexes.Table
- Columns []*sqlparser.ColName
-}
-
-var _ abstract.PhysicalOperator = (*Table)(nil)
-var _ abstract.IntroducesTable = (*Table)(nil)
-
-// IPhysical implements the PhysicalOperator interface
-func (to *Table) IPhysical() {}
-
-// Cost implements the PhysicalOperator interface
-func (to *Table) Cost() int {
- return 0
-}
-
-// Clone implements the PhysicalOperator interface
-func (to *Table) Clone() abstract.PhysicalOperator {
- var columns []*sqlparser.ColName
- for _, name := range to.Columns {
- columns = append(columns, sqlparser.CloneRefOfColName(name))
- }
- return &Table{
- QTable: to.QTable,
- VTable: to.VTable,
- Columns: columns,
- }
-}
-
-// TableID implements the PhysicalOperator interface
-func (to *Table) TableID() semantics.TableSet {
- return to.QTable.ID
-}
-
-// PushPredicate implements the PhysicalOperator interface
-func (to *Table) PushPredicate(expr sqlparser.Expr, semTable *semantics.SemTable) error {
- return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "we should not push Predicates into a Table. It is meant to be immutable")
-}
-
-// UnsolvedPredicates implements the PhysicalOperator interface
-func (to *Table) UnsolvedPredicates(semTable *semantics.SemTable) []sqlparser.Expr {
- panic("implement me")
-}
-
-// CheckValid implements the PhysicalOperator interface
-func (to *Table) CheckValid() error {
- return nil
-}
-
-// Compact implements the PhysicalOperator interface
-func (to *Table) Compact(semTable *semantics.SemTable) (abstract.Operator, error) {
- return to, nil
-}
-
-// GetQTable implements the IntroducesTable interface
-func (to *Table) GetQTable() *abstract.QueryTable {
- return to.QTable
-}
-
-// GetVTable implements the IntroducesTable interface
-func (to *Table) GetVTable() *vindexes.Table {
- return to.VTable
-}
diff --git a/go/vt/vtgate/planbuilder/physical/union.go b/go/vt/vtgate/planbuilder/physical/union.go
deleted file mode 100644
index 951314efe28..00000000000
--- a/go/vt/vtgate/planbuilder/physical/union.go
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
-Copyright 2022 The Vitess Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package physical
-
-import (
- "vitess.io/vitess/go/vt/sqlparser"
- "vitess.io/vitess/go/vt/vtgate/planbuilder/abstract"
- "vitess.io/vitess/go/vt/vtgate/semantics"
-)
-
-type Union struct {
- Sources []abstract.PhysicalOperator
- SelectStmts []*sqlparser.Select
- Distinct bool
-
- // TODO this should be removed. For now it's used to fail queries
- Ordering sqlparser.OrderBy
-}
-
-var _ abstract.PhysicalOperator = (*Union)(nil)
-
-// TableID implements the PhysicalOperator interface
-func (u *Union) TableID() semantics.TableSet {
- ts := semantics.EmptyTableSet()
- for _, source := range u.Sources {
- ts.MergeInPlace(source.TableID())
- }
- return ts
-}
-
-// UnsolvedPredicates implements the PhysicalOperator interface
-func (u *Union) UnsolvedPredicates(*semantics.SemTable) []sqlparser.Expr {
- panic("implement me")
-}
-
-// CheckValid implements the PhysicalOperator interface
-func (u *Union) CheckValid() error {
- return nil
-}
-
-// IPhysical implements the PhysicalOperator interface
-func (u *Union) IPhysical() {}
-
-// Cost implements the PhysicalOperator interface
-func (u *Union) Cost() int {
- cost := 0
- for _, source := range u.Sources {
- cost += source.Cost()
- }
- return cost
-}
-
-// Clone implements the PhysicalOperator interface
-func (u *Union) Clone() abstract.PhysicalOperator {
- newOp := *u
- newOp.Sources = make([]abstract.PhysicalOperator, 0, len(u.Sources))
- for _, source := range u.Sources {
- newOp.Sources = append(newOp.Sources, source.Clone())
- }
- return &newOp
-}
diff --git a/go/vt/vtgate/planbuilder/physical/vindex.go b/go/vt/vtgate/planbuilder/physical/vindex.go
deleted file mode 100644
index 9944d580c1d..00000000000
--- a/go/vt/vtgate/planbuilder/physical/vindex.go
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
-Copyright 2022 The Vitess Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package physical
-
-import (
- "vitess.io/vitess/go/vt/sqlparser"
- "vitess.io/vitess/go/vt/vtgate/engine"
- "vitess.io/vitess/go/vt/vtgate/planbuilder/abstract"
- "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext"
- "vitess.io/vitess/go/vt/vtgate/semantics"
- "vitess.io/vitess/go/vt/vtgate/vindexes"
-)
-
-type Vindex struct {
- OpCode engine.VindexOpcode
- Table abstract.VindexTable
- Vindex vindexes.Vindex
- Solved semantics.TableSet
- Columns []*sqlparser.ColName
- Value sqlparser.Expr
-}
-
-// TableID implements the Operator interface
-func (v *Vindex) TableID() semantics.TableSet {
- return v.Solved
-}
-
-// UnsolvedPredicates implements the Operator interface
-func (v *Vindex) UnsolvedPredicates(*semantics.SemTable) []sqlparser.Expr {
- return nil
-}
-
-// CheckValid implements the Operator interface
-func (v *Vindex) CheckValid() error {
- return nil
-}
-
-// IPhysical implements the PhysicalOperator interface
-func (v *Vindex) IPhysical() {}
-
-// Cost implements the PhysicalOperator interface
-func (v *Vindex) Cost() int {
- return int(engine.EqualUnique)
-}
-
-// Clone implements the PhysicalOperator interface
-func (v *Vindex) Clone() abstract.PhysicalOperator {
- clone := *v
- return &clone
-}
-
-var _ abstract.PhysicalOperator = (*Vindex)(nil)
-
-func (v *Vindex) PushOutputColumns(columns []*sqlparser.ColName) ([]int, error) {
- idxs := make([]int, len(columns))
-outer:
- for i, newCol := range columns {
- for j, existingCol := range v.Columns {
- if sqlparser.EqualsExpr(newCol, existingCol) {
- idxs[i] = j
- continue outer
- }
- }
- idxs[i] = len(v.Columns)
- v.Columns = append(v.Columns, newCol)
- }
- return idxs, nil
-}
-
-func optimizeVindex(ctx *plancontext.PlanningContext, op *abstract.Vindex) (abstract.PhysicalOperator, error) {
- solves := ctx.SemTable.TableSetFor(op.Table.Alias)
- return &Vindex{
- OpCode: op.OpCode,
- Table: op.Table,
- Vindex: op.Vindex,
- Solved: solves,
- Value: op.Value,
- }, nil
-}
diff --git a/go/vt/vtgate/planbuilder/plan_test.go b/go/vt/vtgate/planbuilder/plan_test.go
index 8ac609dc4e4..283b8874b0d 100644
--- a/go/vt/vtgate/planbuilder/plan_test.go
+++ b/go/vt/vtgate/planbuilder/plan_test.go
@@ -17,219 +17,44 @@ limitations under the License.
package planbuilder
import (
- "bufio"
- "context"
+ "bytes"
"encoding/json"
- "errors"
"fmt"
- "io"
"math/rand"
"os"
+ "path/filepath"
"runtime/debug"
"strings"
"testing"
- vtgatepb "vitess.io/vitess/go/vt/proto/vtgate"
-
- "vitess.io/vitess/go/test/utils"
- vschemapb "vitess.io/vitess/go/vt/proto/vschema"
- "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext"
+ "github.com/nsf/jsondiff"
+ "github.com/stretchr/testify/require"
"vitess.io/vitess/go/mysql/collations"
- "vitess.io/vitess/go/vt/vtgate/semantics"
-
- "github.com/google/go-cmp/cmp"
-
- vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
- "vitess.io/vitess/go/vt/vterrors"
-
"vitess.io/vitess/go/sqltypes"
+ "vitess.io/vitess/go/test/utils"
"vitess.io/vitess/go/vt/key"
+ topodatapb "vitess.io/vitess/go/vt/proto/topodata"
+ vschemapb "vitess.io/vitess/go/vt/proto/vschema"
+ "vitess.io/vitess/go/vt/servenv"
"vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/topo/topoproto"
+ "vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/vt/vtgate/engine"
+ "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext"
+ "vitess.io/vitess/go/vt/vtgate/semantics"
"vitess.io/vitess/go/vt/vtgate/vindexes"
-
- topodatapb "vitess.io/vitess/go/vt/proto/topodata"
-)
-
-// hashIndex is a functional, unique Vindex.
-type hashIndex struct{ name string }
-
-func (v *hashIndex) String() string { return v.name }
-func (*hashIndex) Cost() int { return 1 }
-func (*hashIndex) IsUnique() bool { return true }
-func (*hashIndex) NeedsVCursor() bool { return false }
-func (*hashIndex) Verify(context.Context, vindexes.VCursor, []sqltypes.Value, [][]byte) ([]bool, error) {
- return []bool{}, nil
-}
-func (*hashIndex) Map(ctx context.Context, vcursor vindexes.VCursor, ids []sqltypes.Value) ([]key.Destination, error) {
- return nil, nil
-}
-
-func newHashIndex(name string, _ map[string]string) (vindexes.Vindex, error) {
- return &hashIndex{name: name}, nil
-}
-
-// lookupIndex is a unique Vindex, and satisfies Lookup.
-type lookupIndex struct{ name string }
-
-func (v *lookupIndex) String() string { return v.name }
-func (*lookupIndex) Cost() int { return 2 }
-func (*lookupIndex) IsUnique() bool { return true }
-func (*lookupIndex) NeedsVCursor() bool { return false }
-func (*lookupIndex) Verify(context.Context, vindexes.VCursor, []sqltypes.Value, [][]byte) ([]bool, error) {
- return []bool{}, nil
-}
-func (*lookupIndex) Map(ctx context.Context, vcursor vindexes.VCursor, ids []sqltypes.Value) ([]key.Destination, error) {
- return nil, nil
-}
-func (*lookupIndex) Create(context.Context, vindexes.VCursor, [][]sqltypes.Value, [][]byte, bool) error {
- return nil
-}
-func (*lookupIndex) Delete(context.Context, vindexes.VCursor, [][]sqltypes.Value, []byte) error {
- return nil
-}
-func (*lookupIndex) Update(context.Context, vindexes.VCursor, []sqltypes.Value, []byte, []sqltypes.Value) error {
- return nil
-}
-
-func newLookupIndex(name string, _ map[string]string) (vindexes.Vindex, error) {
- return &lookupIndex{name: name}, nil
-}
-
-var _ vindexes.Lookup = (*lookupIndex)(nil)
-
-// nameLkpIndex satisfies Lookup, NonUnique.
-type nameLkpIndex struct{ name string }
-
-func (v *nameLkpIndex) String() string { return v.name }
-func (*nameLkpIndex) Cost() int { return 3 }
-func (*nameLkpIndex) IsUnique() bool { return false }
-func (*nameLkpIndex) NeedsVCursor() bool { return false }
-func (*nameLkpIndex) AllowBatch() bool { return true }
-func (*nameLkpIndex) GetCommitOrder() vtgatepb.CommitOrder { return vtgatepb.CommitOrder_NORMAL }
-func (*nameLkpIndex) Verify(context.Context, vindexes.VCursor, []sqltypes.Value, [][]byte) ([]bool, error) {
- return []bool{}, nil
-}
-func (*nameLkpIndex) Map(ctx context.Context, vcursor vindexes.VCursor, ids []sqltypes.Value) ([]key.Destination, error) {
- return nil, nil
-}
-func (*nameLkpIndex) Create(context.Context, vindexes.VCursor, [][]sqltypes.Value, [][]byte, bool) error {
- return nil
-}
-func (*nameLkpIndex) Delete(context.Context, vindexes.VCursor, [][]sqltypes.Value, []byte) error {
- return nil
-}
-func (*nameLkpIndex) Update(context.Context, vindexes.VCursor, []sqltypes.Value, []byte, []sqltypes.Value) error {
- return nil
-}
-func (v *nameLkpIndex) Query() (string, []string) {
- return "select name, keyspace_id from name_user_vdx where name in ::name", []string{"name"}
-}
-func (*nameLkpIndex) MapResult([]sqltypes.Value, []*sqltypes.Result) ([]key.Destination, error) {
- return nil, nil
-}
-
-func newNameLkpIndex(name string, _ map[string]string) (vindexes.Vindex, error) {
- return &nameLkpIndex{name: name}, nil
-}
-
-var _ vindexes.Vindex = (*nameLkpIndex)(nil)
-var _ vindexes.Lookup = (*nameLkpIndex)(nil)
-var _ vindexes.LookupPlanable = (*nameLkpIndex)(nil)
-
-// costlyIndex satisfies Lookup, NonUnique.
-type costlyIndex struct{ name string }
-
-func (v *costlyIndex) String() string { return v.name }
-func (*costlyIndex) Cost() int { return 10 }
-func (*costlyIndex) IsUnique() bool { return false }
-func (*costlyIndex) NeedsVCursor() bool { return false }
-func (*costlyIndex) Verify(context.Context, vindexes.VCursor, []sqltypes.Value, [][]byte) ([]bool, error) {
- return []bool{}, nil
-}
-func (*costlyIndex) Map(ctx context.Context, vcursor vindexes.VCursor, ids []sqltypes.Value) ([]key.Destination, error) {
- return nil, nil
-}
-func (*costlyIndex) Create(context.Context, vindexes.VCursor, [][]sqltypes.Value, [][]byte, bool) error {
- return nil
-}
-func (*costlyIndex) Delete(context.Context, vindexes.VCursor, [][]sqltypes.Value, []byte) error {
- return nil
-}
-func (*costlyIndex) Update(context.Context, vindexes.VCursor, []sqltypes.Value, []byte, []sqltypes.Value) error {
- return nil
-}
-
-func newCostlyIndex(name string, _ map[string]string) (vindexes.Vindex, error) {
- return &costlyIndex{name: name}, nil
-}
-
-var _ vindexes.Vindex = (*costlyIndex)(nil)
-var _ vindexes.Lookup = (*costlyIndex)(nil)
-
-// multiColIndex satisfies multi column vindex.
-type multiColIndex struct {
- name string
-}
-
-func newMultiColIndex(name string, _ map[string]string) (vindexes.Vindex, error) {
- return &multiColIndex{name: name}, nil
-}
-
-var _ vindexes.MultiColumn = (*multiColIndex)(nil)
-
-func (m *multiColIndex) String() string { return m.name }
-
-func (m *multiColIndex) Cost() int { return 1 }
-
-func (m *multiColIndex) IsUnique() bool { return true }
-
-func (m *multiColIndex) NeedsVCursor() bool { return false }
-
-func (m *multiColIndex) Map(ctx context.Context, vcursor vindexes.VCursor, rowsColValues [][]sqltypes.Value) ([]key.Destination, error) {
- return nil, nil
-}
-
-func (m *multiColIndex) Verify(ctx context.Context, vcursor vindexes.VCursor, rowsColValues [][]sqltypes.Value, ksids [][]byte) ([]bool, error) {
- return []bool{}, nil
-}
-
-func (m *multiColIndex) PartialVindex() bool {
- return true
-}
-
-func init() {
- vindexes.Register("hash_test", newHashIndex)
- vindexes.Register("lookup_test", newLookupIndex)
- vindexes.Register("name_lkp_test", newNameLkpIndex)
- vindexes.Register("costly", newCostlyIndex)
- vindexes.Register("multiCol_test", newMultiColIndex)
-}
-
-const (
- samePlanMarker = "Gen4 plan same as above\n"
- gen4ErrorPrefix = "Gen4 error: "
)
func makeTestOutput(t *testing.T) string {
testOutputTempDir := utils.MakeTestOutput(t, "testdata", "plan_test")
- t.Cleanup(func() {
- if !t.Failed() {
- _ = os.RemoveAll(testOutputTempDir)
- } else {
- t.Logf("Errors found in plantests. If the output is correct, run `cp %s/* testdata/` to update test expectations", testOutputTempDir)
- }
- })
-
return testOutputTempDir
}
func TestPlan(t *testing.T) {
vschemaWrapper := &vschemaWrapper{
- v: loadSchema(t, "schema_test.json", true),
+ v: loadSchema(t, "vschemas/schema.json", true),
sysVarEnabled: true,
}
testOutputTempDir := makeTestOutput(t)
@@ -240,126 +65,147 @@ func TestPlan(t *testing.T) {
// the column is named as Id. This is to make sure that
// column names are case-preserved, but treated as
// case-insensitive even if they come from the vschema.
- testFile(t, "aggr_cases.txt", testOutputTempDir, vschemaWrapper, false)
- testFile(t, "dml_cases.txt", testOutputTempDir, vschemaWrapper, false)
- testFile(t, "from_cases.txt", testOutputTempDir, vschemaWrapper, false)
- testFile(t, "filter_cases.txt", testOutputTempDir, vschemaWrapper, false)
- testFile(t, "postprocess_cases.txt", testOutputTempDir, vschemaWrapper, false)
- testFile(t, "select_cases.txt", testOutputTempDir, vschemaWrapper, false)
- testFile(t, "symtab_cases.txt", testOutputTempDir, vschemaWrapper, false)
- testFile(t, "unsupported_cases.txt", testOutputTempDir, vschemaWrapper, false)
- testFile(t, "vindex_func_cases.txt", testOutputTempDir, vschemaWrapper, false)
- testFile(t, "wireup_cases.txt", testOutputTempDir, vschemaWrapper, false)
- testFile(t, "memory_sort_cases.txt", testOutputTempDir, vschemaWrapper, false)
- testFile(t, "use_cases.txt", testOutputTempDir, vschemaWrapper, false)
- testFile(t, "set_cases.txt", testOutputTempDir, vschemaWrapper, false)
- testFile(t, "union_cases.txt", testOutputTempDir, vschemaWrapper, false)
- testFile(t, "transaction_cases.txt", testOutputTempDir, vschemaWrapper, false)
- testFile(t, "lock_cases.txt", testOutputTempDir, vschemaWrapper, false)
- testFile(t, "large_cases.txt", testOutputTempDir, vschemaWrapper, false)
- testFile(t, "ddl_cases_no_default_keyspace.txt", testOutputTempDir, vschemaWrapper, false)
- testFile(t, "flush_cases_no_default_keyspace.txt", testOutputTempDir, vschemaWrapper, false)
- testFile(t, "show_cases_no_default_keyspace.txt", testOutputTempDir, vschemaWrapper, false)
- testFile(t, "stream_cases.txt", testOutputTempDir, vschemaWrapper, false)
- testFile(t, "systemtables_cases.txt", testOutputTempDir, vschemaWrapper, false)
+ testFile(t, "aggr_cases.json", testOutputTempDir, vschemaWrapper, false)
+ testFile(t, "dml_cases.json", testOutputTempDir, vschemaWrapper, false)
+ testFile(t, "from_cases.json", testOutputTempDir, vschemaWrapper, false)
+ testFile(t, "filter_cases.json", testOutputTempDir, vschemaWrapper, false)
+ testFile(t, "postprocess_cases.json", testOutputTempDir, vschemaWrapper, false)
+ testFile(t, "select_cases.json", testOutputTempDir, vschemaWrapper, false)
+ testFile(t, "symtab_cases.json", testOutputTempDir, vschemaWrapper, false)
+ testFile(t, "unsupported_cases.json", testOutputTempDir, vschemaWrapper, false)
+ testFile(t, "vindex_func_cases.json", testOutputTempDir, vschemaWrapper, false)
+ testFile(t, "wireup_cases.json", testOutputTempDir, vschemaWrapper, false)
+ testFile(t, "memory_sort_cases.json", testOutputTempDir, vschemaWrapper, false)
+ testFile(t, "use_cases.json", testOutputTempDir, vschemaWrapper, false)
+ testFile(t, "set_cases.json", testOutputTempDir, vschemaWrapper, false)
+ testFile(t, "union_cases.json", testOutputTempDir, vschemaWrapper, false)
+ testFile(t, "large_union_cases.json", testOutputTempDir, vschemaWrapper, false)
+ testFile(t, "transaction_cases.json", testOutputTempDir, vschemaWrapper, false)
+ testFile(t, "lock_cases.json", testOutputTempDir, vschemaWrapper, false)
+ testFile(t, "large_cases.json", testOutputTempDir, vschemaWrapper, false)
+ testFile(t, "ddl_cases_no_default_keyspace.json", testOutputTempDir, vschemaWrapper, false)
+ testFile(t, "flush_cases_no_default_keyspace.json", testOutputTempDir, vschemaWrapper, false)
+ testFile(t, "show_cases_no_default_keyspace.json", testOutputTempDir, vschemaWrapper, false)
+ testFile(t, "stream_cases.json", testOutputTempDir, vschemaWrapper, false)
+ testFile(t, "info_schema80_cases.json", testOutputTempDir, vschemaWrapper, false)
+ testFile(t, "reference_cases.json", testOutputTempDir, vschemaWrapper, false)
+ testFile(t, "vexplain_cases.json", testOutputTempDir, vschemaWrapper, false)
+}
+
+func TestSystemTables57(t *testing.T) {
+ // first we move everything to use 5.7 logic
+ servenv.SetMySQLServerVersionForTest("5.7")
+ defer servenv.SetMySQLServerVersionForTest("")
+ vschemaWrapper := &vschemaWrapper{v: loadSchema(t, "vschemas/schema.json", true)}
+ testOutputTempDir := makeTestOutput(t)
+ testFile(t, "info_schema57_cases.json", testOutputTempDir, vschemaWrapper, false)
}
func TestSysVarSetDisabled(t *testing.T) {
vschemaWrapper := &vschemaWrapper{
- v: loadSchema(t, "schema_test.json", true),
+ v: loadSchema(t, "vschemas/schema.json", true),
sysVarEnabled: false,
}
- testFile(t, "set_sysvar_disabled_cases.txt", makeTestOutput(t), vschemaWrapper, false)
+ testFile(t, "set_sysvar_disabled_cases.json", makeTestOutput(t), vschemaWrapper, false)
+}
+
+func TestViews(t *testing.T) {
+ vschemaWrapper := &vschemaWrapper{
+ v: loadSchema(t, "vschemas/schema.json", true),
+ enableViews: true,
+ }
+
+ testFile(t, "view_cases.json", makeTestOutput(t), vschemaWrapper, false)
}
func TestOne(t *testing.T) {
vschema := &vschemaWrapper{
- v: loadSchema(t, "schema_test.json", true),
+ v: loadSchema(t, "vschemas/schema.json", true),
}
- testFile(t, "onecase.txt", "", vschema, false)
+ testFile(t, "onecase.json", "", vschema, false)
}
func TestOneWithMainAsDefault(t *testing.T) {
vschema := &vschemaWrapper{
- v: loadSchema(t, "schema_test.json", true),
+ v: loadSchema(t, "vschemas/schema.json", true),
keyspace: &vindexes.Keyspace{
Name: "main",
Sharded: false,
},
}
- testFile(t, "onecase.txt", "", vschema, false)
+ testFile(t, "onecase.json", "", vschema, false)
}
func TestOneWithSecondUserAsDefault(t *testing.T) {
vschema := &vschemaWrapper{
- v: loadSchema(t, "schema_test.json", true),
+ v: loadSchema(t, "vschemas/schema.json", true),
keyspace: &vindexes.Keyspace{
Name: "second_user",
Sharded: true,
},
}
- testFile(t, "onecase.txt", "", vschema, false)
+ testFile(t, "onecase.json", "", vschema, false)
}
func TestOneWithUserAsDefault(t *testing.T) {
vschema := &vschemaWrapper{
- v: loadSchema(t, "schema_test.json", true),
+ v: loadSchema(t, "vschemas/schema.json", true),
keyspace: &vindexes.Keyspace{
Name: "user",
Sharded: true,
},
}
- testFile(t, "onecase.txt", "", vschema, false)
+ testFile(t, "onecase.json", "", vschema, false)
}
func TestOneWithTPCHVSchema(t *testing.T) {
vschema := &vschemaWrapper{
- v: loadSchema(t, "tpch_schema_test.json", true),
+ v: loadSchema(t, "vschemas/tpch_schema.json", true),
sysVarEnabled: true,
}
- testFile(t, "onecase.txt", "", vschema, false)
+ testFile(t, "onecase.json", "", vschema, false)
}
func TestRubyOnRailsQueries(t *testing.T) {
vschemaWrapper := &vschemaWrapper{
- v: loadSchema(t, "rails_schema_test.json", true),
+ v: loadSchema(t, "vschemas/rails_schema.json", true),
sysVarEnabled: true,
}
- testFile(t, "rails_cases.txt", makeTestOutput(t), vschemaWrapper, false)
+ testFile(t, "rails_cases.json", makeTestOutput(t), vschemaWrapper, false)
}
func TestOLTP(t *testing.T) {
vschemaWrapper := &vschemaWrapper{
- v: loadSchema(t, "oltp_schema_test.json", true),
+ v: loadSchema(t, "vschemas/oltp_schema.json", true),
sysVarEnabled: true,
}
- testFile(t, "oltp_cases.txt", makeTestOutput(t), vschemaWrapper, false)
+ testFile(t, "oltp_cases.json", makeTestOutput(t), vschemaWrapper, false)
}
func TestTPCC(t *testing.T) {
vschemaWrapper := &vschemaWrapper{
- v: loadSchema(t, "tpcc_schema_test.json", true),
+ v: loadSchema(t, "vschemas/tpcc_schema.json", true),
sysVarEnabled: true,
}
- testFile(t, "tpcc_cases.txt", makeTestOutput(t), vschemaWrapper, false)
+ testFile(t, "tpcc_cases.json", makeTestOutput(t), vschemaWrapper, false)
}
func TestTPCH(t *testing.T) {
vschemaWrapper := &vschemaWrapper{
- v: loadSchema(t, "tpch_schema_test.json", true),
+ v: loadSchema(t, "vschemas/tpch_schema.json", true),
sysVarEnabled: true,
}
- testFile(t, "tpch_cases.txt", makeTestOutput(t), vschemaWrapper, false)
+ testFile(t, "tpch_cases.json", makeTestOutput(t), vschemaWrapper, false)
}
func BenchmarkOLTP(b *testing.B) {
@@ -376,14 +222,11 @@ func BenchmarkTPCH(b *testing.B) {
func benchmarkWorkload(b *testing.B, name string) {
vschemaWrapper := &vschemaWrapper{
- v: loadSchema(b, name+"_schema_test.json", true),
+ v: loadSchema(b, "vschemas/"+name+"_schema.json", true),
sysVarEnabled: true,
}
- var testCases []testCase
- for tc := range iterateExecFile(name + "_cases.txt") {
- testCases = append(testCases, tc)
- }
+ testCases := readJSONTests(name + "_cases.json")
b.ResetTimer()
for _, version := range plannerVersions {
b.Run(version.String(), func(b *testing.B) {
@@ -394,7 +237,7 @@ func benchmarkWorkload(b *testing.B, name string) {
func TestBypassPlanningShardTargetFromFile(t *testing.T) {
vschema := &vschemaWrapper{
- v: loadSchema(t, "schema_test.json", true),
+ v: loadSchema(t, "vschemas/schema.json", true),
keyspace: &vindexes.Keyspace{
Name: "main",
Sharded: false,
@@ -402,13 +245,13 @@ func TestBypassPlanningShardTargetFromFile(t *testing.T) {
tabletType: topodatapb.TabletType_PRIMARY,
dest: key.DestinationShard("-80")}
- testFile(t, "bypass_shard_cases.txt", makeTestOutput(t), vschema, false)
+ testFile(t, "bypass_shard_cases.json", makeTestOutput(t), vschema, false)
}
func TestBypassPlanningKeyrangeTargetFromFile(t *testing.T) {
keyRange, _ := key.ParseShardingSpec("-")
vschema := &vschemaWrapper{
- v: loadSchema(t, "schema_test.json", true),
+ v: loadSchema(t, "vschemas/schema.json", true),
keyspace: &vindexes.Keyspace{
Name: "main",
Sharded: false,
@@ -417,13 +260,13 @@ func TestBypassPlanningKeyrangeTargetFromFile(t *testing.T) {
dest: key.DestinationExactKeyRange{KeyRange: keyRange[0]},
}
- testFile(t, "bypass_keyrange_cases.txt", makeTestOutput(t), vschema, false)
+ testFile(t, "bypass_keyrange_cases.json", makeTestOutput(t), vschema, false)
}
func TestWithDefaultKeyspaceFromFile(t *testing.T) {
// We are testing this separately so we can set a default keyspace
vschema := &vschemaWrapper{
- v: loadSchema(t, "schema_test.json", true),
+ v: loadSchema(t, "vschemas/schema.json", true),
keyspace: &vindexes.Keyspace{
Name: "main",
Sharded: false,
@@ -432,18 +275,18 @@ func TestWithDefaultKeyspaceFromFile(t *testing.T) {
}
testOutputTempDir := makeTestOutput(t)
- testFile(t, "alterVschema_cases.txt", testOutputTempDir, vschema, false)
- testFile(t, "ddl_cases.txt", testOutputTempDir, vschema, false)
- testFile(t, "migration_cases.txt", testOutputTempDir, vschema, false)
- testFile(t, "flush_cases.txt", testOutputTempDir, vschema, false)
- testFile(t, "show_cases.txt", testOutputTempDir, vschema, false)
- testFile(t, "call_cases.txt", testOutputTempDir, vschema, false)
+ testFile(t, "alterVschema_cases.json", testOutputTempDir, vschema, false)
+ testFile(t, "ddl_cases.json", testOutputTempDir, vschema, false)
+ testFile(t, "migration_cases.json", testOutputTempDir, vschema, false)
+ testFile(t, "flush_cases.json", testOutputTempDir, vschema, false)
+ testFile(t, "show_cases.json", testOutputTempDir, vschema, false)
+ testFile(t, "call_cases.json", testOutputTempDir, vschema, false)
}
func TestWithDefaultKeyspaceFromFileSharded(t *testing.T) {
// We are testing this separately so we can set a default keyspace
vschema := &vschemaWrapper{
- v: loadSchema(t, "schema_test.json", true),
+ v: loadSchema(t, "vschemas/schema.json", true),
keyspace: &vindexes.Keyspace{
Name: "second_user",
Sharded: true,
@@ -452,13 +295,13 @@ func TestWithDefaultKeyspaceFromFileSharded(t *testing.T) {
}
testOutputTempDir := makeTestOutput(t)
- testFile(t, "select_cases_with_default.txt", testOutputTempDir, vschema, false)
+ testFile(t, "select_cases_with_default.json", testOutputTempDir, vschema, false)
}
func TestWithUserDefaultKeyspaceFromFileSharded(t *testing.T) {
// We are testing this separately so we can set a default keyspace
vschema := &vschemaWrapper{
- v: loadSchema(t, "schema_test.json", true),
+ v: loadSchema(t, "vschemas/schema.json", true),
keyspace: &vindexes.Keyspace{
Name: "user",
Sharded: true,
@@ -467,24 +310,24 @@ func TestWithUserDefaultKeyspaceFromFileSharded(t *testing.T) {
}
testOutputTempDir := makeTestOutput(t)
- testFile(t, "select_cases_with_user_as_default.txt", testOutputTempDir, vschema, false)
+ testFile(t, "select_cases_with_user_as_default.json", testOutputTempDir, vschema, false)
}
func TestWithSystemSchemaAsDefaultKeyspace(t *testing.T) {
// We are testing this separately so we can set a default keyspace
vschema := &vschemaWrapper{
- v: loadSchema(t, "schema_test.json", true),
+ v: loadSchema(t, "vschemas/schema.json", true),
keyspace: &vindexes.Keyspace{Name: "information_schema"},
tabletType: topodatapb.TabletType_PRIMARY,
}
- testFile(t, "sysschema_default.txt", makeTestOutput(t), vschema, false)
+ testFile(t, "sysschema_default.json", makeTestOutput(t), vschema, false)
}
func TestOtherPlanningFromFile(t *testing.T) {
// We are testing this separately so we can set a default keyspace
vschema := &vschemaWrapper{
- v: loadSchema(t, "schema_test.json", true),
+ v: loadSchema(t, "vschemas/schema.json", true),
keyspace: &vindexes.Keyspace{
Name: "main",
Sharded: false,
@@ -493,8 +336,8 @@ func TestOtherPlanningFromFile(t *testing.T) {
}
testOutputTempDir := makeTestOutput(t)
- testFile(t, "other_read_cases.txt", testOutputTempDir, vschema, false)
- testFile(t, "other_admin_cases.txt", testOutputTempDir, vschema, false)
+ testFile(t, "other_read_cases.json", testOutputTempDir, vschema, false)
+ testFile(t, "other_admin_cases.json", testOutputTempDir, vschema, false)
}
func loadSchema(t testing.TB, filename string, setCollation bool) *vindexes.VSchema {
@@ -511,6 +354,15 @@ func loadSchema(t testing.TB, filename string, setCollation bool) *vindexes.VSch
t.Fatal(ks.Error)
}
+ // adding view in user keyspace
+ if ks.Keyspace.Name == "user" {
+ if err = vschema.AddView(ks.Keyspace.Name,
+ "user_details_view",
+ "select user.id, user_extra.col from user join user_extra on user.id = user_extra.user_id"); err != nil {
+ t.Fatal(err)
+ }
+ }
+
// setting a default value to all the text columns in the tables of this keyspace
// so that we can "simulate" a real case scenario where the vschema is aware of
// columns' collations.
@@ -536,6 +388,7 @@ type vschemaWrapper struct {
dest key.Destination
sysVarEnabled bool
version plancontext.PlannerVersion
+ enableViews bool
}
func (vw *vschemaWrapper) IsShardRoutingEnabled() bool {
@@ -561,7 +414,7 @@ func (vw *vschemaWrapper) GetSrvVschema() *vschemapb.SrvVSchema {
}
func (vw *vschemaWrapper) ConnCollation() collations.ID {
- return collations.Unknown
+ return collations.CollationUtf8ID
}
func (vw *vschemaWrapper) PlannerWarning(_ string) {
@@ -573,7 +426,7 @@ func (vw *vschemaWrapper) ForeignKeyMode() string {
func (vw *vschemaWrapper) AllKeyspace() ([]*vindexes.Keyspace, error) {
if vw.keyspace == nil {
- return nil, errors.New("keyspace not available")
+ return nil, vterrors.VT13001("keyspace not available")
}
return []*vindexes.Keyspace{vw.keyspace}, nil
}
@@ -581,7 +434,7 @@ func (vw *vschemaWrapper) AllKeyspace() ([]*vindexes.Keyspace, error) {
// FindKeyspace implements the VSchema interface
func (vw *vschemaWrapper) FindKeyspace(keyspace string) (*vindexes.Keyspace, error) {
if vw.keyspace == nil {
- return nil, errors.New("keyspace not available")
+ return nil, vterrors.VT13001("keyspace not available")
}
if vw.keyspace.Name == keyspace {
return vw.keyspace, nil
@@ -622,11 +475,11 @@ func (vw *vschemaWrapper) TargetDestination(qualifier string) (key.Destination,
keyspaceName = qualifier
}
if keyspaceName == "" {
- return nil, nil, 0, vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "keyspace not specified")
+ return nil, nil, 0, vterrors.VT03007()
}
keyspace := vw.v.Keyspaces[keyspaceName]
if keyspace == nil {
- return nil, nil, 0, vterrors.NewErrorf(vtrpcpb.Code_NOT_FOUND, vterrors.BadDb, "Unknown database '%s' in vschema", keyspaceName)
+ return nil, nil, 0, vterrors.VT05003(keyspaceName)
}
return vw.dest, keyspace.Keyspace, vw.tabletType, nil
@@ -652,6 +505,14 @@ func (vw *vschemaWrapper) FindTable(tab sqlparser.TableName) (*vindexes.Table, s
return table, destKeyspace, destTabletType, destTarget, nil
}
+func (vw *vschemaWrapper) FindView(tab sqlparser.TableName) sqlparser.SelectStatement {
+ destKeyspace, _, _, err := topoproto.ParseDestination(tab.Qualifier.String(), topodatapb.TabletType_PRIMARY)
+ if err != nil {
+ return nil
+ }
+ return vw.v.FindView(destKeyspace, tab.Name.String())
+}
+
func (vw *vschemaWrapper) FindTableOrVindex(tab sqlparser.TableName) (*vindexes.Table, vindexes.Vindex, string, topodatapb.TabletType, key.Destination, error) {
destKeyspace, destTabletType, destTarget, err := topoproto.ParseDestination(tab.Qualifier.String(), topodatapb.TabletType_PRIMARY)
if err != nil {
@@ -720,14 +581,42 @@ func (vw *vschemaWrapper) FindRoutedShard(keyspace, shard string) (string, error
return "", nil
}
+func (vw *vschemaWrapper) IsViewsEnabled() bool {
+ return vw.enableViews
+}
+
+type (
+ planTest struct {
+ Comment string `json:"comment,omitempty"`
+ Query string `json:"query,omitempty"`
+ Plan json.RawMessage `json:"plan,omitempty"`
+ V3Plan json.RawMessage `json:"v3-plan,omitempty"`
+ Gen4Plan json.RawMessage `json:"gen4-plan,omitempty"`
+ }
+)
+
func testFile(t *testing.T, filename, tempDir string, vschema *vschemaWrapper, render bool) {
+ opts := jsondiff.DefaultConsoleOptions()
+
t.Run(filename, func(t *testing.T) {
- expected := &strings.Builder{}
+ var expected []planTest
var outFirstPlanner string
- for tcase := range iterateExecFile(filename) {
- t.Run(fmt.Sprintf("%d V3: %s", tcase.lineno, tcase.comments), func(t *testing.T) {
+ for _, tcase := range readJSONTests(filename) {
+ if tcase.V3Plan == nil {
+ tcase.V3Plan = tcase.Plan
+ tcase.Gen4Plan = tcase.Plan
+ }
+ current := planTest{}
+ testName := tcase.Comment
+ if testName == "" {
+ testName = tcase.Query
+ }
+ if tcase.Query == "" {
+ continue
+ }
+ t.Run(fmt.Sprintf("V3: %s", testName), func(t *testing.T) {
vschema.version = V3
- plan, err := TestBuilder(tcase.input, vschema, vschema.currentDb())
+ plan, err := TestBuilder(tcase.Query, vschema, vschema.currentDb())
if render && plan != nil {
viz, err := engine.GraphViz(plan.Instructions)
if err == nil {
@@ -736,24 +625,19 @@ func testFile(t *testing.T, filename, tempDir string, vschema *vschemaWrapper, r
}
out := getPlanOrErrorOutput(err, plan)
- if out != tcase.output {
- t.Errorf("V3 - %s:%d\nDiff:\n%s\n[%s] \n[%s]", filename, tcase.lineno, cmp.Diff(tcase.output, out), tcase.output, out)
+ compare, s := jsondiff.Compare(tcase.V3Plan, []byte(out), &opts)
+ if compare != jsondiff.FullMatch {
+ t.Errorf("V3 - %s\nDiff:\n%s\n[%s] \n[%s]", filename, s, tcase.V3Plan, out)
}
- if err != nil {
- out = `"` + out + `"`
- }
- outFirstPlanner = out
- expected.WriteString(tcase.comments)
- encoder := json.NewEncoder(expected)
- encoder.Encode(tcase.input)
- expected.WriteString(fmt.Sprintf("%s\n", out))
+ outFirstPlanner = out
+ current.Comment = testName
+ current.Query = tcase.Query
})
vschema.version = Gen4
out, err := getPlanOutput(tcase, vschema, render)
- if err != nil && tcase.output2ndPlanner == "" && strings.HasPrefix(err.Error(), "gen4 does not yet support") {
- expected.WriteString("\n")
+ if err != nil && len(tcase.Gen4Plan) == 0 && strings.HasPrefix(err.Error(), "gen4 does not yet support") {
continue
}
@@ -764,43 +648,58 @@ func testFile(t *testing.T, filename, tempDir string, vschema *vschemaWrapper, r
// this is shown by not having any info at all after the result for the V3 planner
// with this last expectation, it is an error if the Gen4 planner
// produces the same plan as the V3 planner does
- testName := fmt.Sprintf("%d Gen4: %s", tcase.lineno, tcase.comments)
- t.Run(testName, func(t *testing.T) {
- if out != tcase.output2ndPlanner {
- t.Errorf("Gen4 - %s:%d\nDiff:\n%s\n[%s] \n[%s]", filename, tcase.lineno, cmp.Diff(tcase.output2ndPlanner, out), tcase.output2ndPlanner, out)
- }
- if err != nil {
- out = `"` + out + `"`
+ t.Run(fmt.Sprintf("Gen4: %s", testName), func(t *testing.T) {
+ compare, s := jsondiff.Compare(tcase.Gen4Plan, []byte(out), &opts)
+ if compare != jsondiff.FullMatch {
+ t.Errorf("Gen4 - %s\nDiff:\n%s\n[%s] \n[%s]", filename, s, tcase.Gen4Plan, out)
}
if outFirstPlanner == out {
- expected.WriteString(samePlanMarker)
+ current.Plan = []byte(out)
} else {
- if err != nil {
- out = out[1 : len(out)-1] // remove the double quotes
- expected.WriteString(fmt.Sprintf("Gen4 error: %s\n", out))
- } else {
- expected.WriteString(fmt.Sprintf("%s\n", out))
- }
+ current.V3Plan = []byte(outFirstPlanner)
+ current.Gen4Plan = []byte(out)
}
})
- expected.WriteString("\n")
+ expected = append(expected, current)
}
-
if tempDir != "" {
- gotFile := fmt.Sprintf("%s/%s", tempDir, filename)
- _ = os.WriteFile(gotFile, []byte(strings.TrimSpace(expected.String())+"\n"), 0644)
+ name := strings.TrimSuffix(filename, filepath.Ext(filename))
+ name = filepath.Join(tempDir, name+".json")
+ file, err := os.Create(name)
+ require.NoError(t, err)
+ enc := json.NewEncoder(file)
+ enc.SetEscapeHTML(false)
+ enc.SetIndent("", " ")
+ err = enc.Encode(expected)
+ if err != nil {
+ require.NoError(t, err)
+ }
}
})
}
-func getPlanOutput(tcase testCase, vschema *vschemaWrapper, render bool) (out string, err error) {
+func readJSONTests(filename string) []planTest {
+ var output []planTest
+ file, err := os.Open(locateFile(filename))
+ if err != nil {
+ panic(err)
+ }
+ dec := json.NewDecoder(file)
+ err = dec.Decode(&output)
+ if err != nil {
+ panic(err)
+ }
+ return output
+}
+
+func getPlanOutput(tcase planTest, vschema *vschemaWrapper, render bool) (out string, err error) {
defer func() {
if r := recover(); r != nil {
out = fmt.Sprintf("panicked: %v\n%s", r, string(debug.Stack()))
}
}()
- plan, err := TestBuilder(tcase.input, vschema, vschema.currentDb())
+ plan, err := TestBuilder(tcase.Query, vschema, vschema.currentDb())
if render && plan != nil {
viz, err := engine.GraphViz(plan.Instructions)
if err == nil {
@@ -813,135 +712,32 @@ func getPlanOutput(tcase testCase, vschema *vschemaWrapper, render bool) (out st
func getPlanOrErrorOutput(err error, plan *engine.Plan) string {
if err != nil {
- return err.Error()
+ return "\"" + err.Error() + "\""
}
- bout, _ := json.MarshalIndent(plan, "", " ")
- return string(bout)
-}
-
-type testCase struct {
- file string
- lineno int
- input string
- output string
- output2ndPlanner string
- comments string
-}
-
-func iterateExecFile(name string) (testCaseIterator chan testCase) {
- name = locateFile(name)
- fd, err := os.OpenFile(name, os.O_RDONLY, 0)
+ b := new(bytes.Buffer)
+ enc := json.NewEncoder(b)
+ enc.SetEscapeHTML(false)
+ enc.SetIndent("", " ")
+ err = enc.Encode(plan)
if err != nil {
- panic(fmt.Sprintf("Could not open file %s", name))
+ panic(err)
}
- testCaseIterator = make(chan testCase)
- var comments string
- go func() {
- defer close(testCaseIterator)
-
- r := bufio.NewReader(fd)
- lineno := 0
- for {
- binput, err := r.ReadBytes('\n')
- if err != nil {
- if err != io.EOF {
- panic(fmt.Errorf("error reading file %s: line %d: %s", name, lineno, err.Error()))
- }
- break
- }
- lineno++
- input := string(binput)
- if input == "" || input == "\n" || strings.HasPrefix(input, "Length:") {
- continue
- }
- if input[0] == '#' {
- comments = comments + input
- continue
- }
- err = json.Unmarshal(binput, &input)
- if err != nil {
- panic(fmt.Sprintf("Line: %d, input: %s, error: %v\n", lineno, binput, err))
- }
- input = strings.Trim(input, "\"")
- var output []byte
- for {
- l, err := r.ReadBytes('\n')
- lineno++
- if err != nil {
- panic(fmt.Sprintf("error reading file %s line# %d: %s", name, lineno, err.Error()))
- }
- output = append(output, l...)
- if l[0] == '}' {
- output = output[:len(output)-1]
- break
- }
- if l[0] == '"' {
- output = output[1 : len(output)-2]
- break
- }
- }
-
- binput, err = r.ReadBytes('\n')
- lineno++
- var output2Planner []byte
- if err != nil && err != io.EOF {
- panic(fmt.Sprintf("error reading file %s line# %d: %s", name, lineno, err.Error()))
- }
- nextLine := string(binput)
- switch {
- case nextLine == samePlanMarker:
- output2Planner = output
- case strings.HasPrefix(nextLine, "{"):
- output2Planner = append(output2Planner, binput...)
- for {
- l, err := r.ReadBytes('\n')
- lineno++
- if err != nil {
- panic(fmt.Sprintf("error reading file %s line# %d: %s", name, lineno, err.Error()))
- }
- output2Planner = append(output2Planner, l...)
- if l[0] == '}' {
- output2Planner = output2Planner[:len(output2Planner)-1]
- break
- }
- if l[0] == '"' {
- output2Planner = output2Planner[1 : len(output2Planner)-2]
- break
- }
- }
- case strings.HasPrefix(nextLine, gen4ErrorPrefix):
- output2Planner = []byte(nextLine[len(gen4ErrorPrefix) : len(nextLine)-1])
- }
- testCaseIterator <- testCase{
- file: name,
- lineno: lineno,
- input: input,
- output: string(output),
- output2ndPlanner: string(output2Planner),
- comments: comments,
- }
- comments = ""
- }
- }()
- return testCaseIterator
+ return b.String()
}
func locateFile(name string) string {
return "testdata/" + name
}
-var benchMarkFiles = []string{"from_cases.txt", "filter_cases.txt", "large_cases.txt", "aggr_cases.txt", "select_cases.txt", "union_cases.txt"}
+var benchMarkFiles = []string{"from_cases.json", "filter_cases.json", "large_cases.json", "aggr_cases.json", "select_cases.json", "union_cases.json"}
func BenchmarkPlanner(b *testing.B) {
vschema := &vschemaWrapper{
- v: loadSchema(b, "schema_test.json", true),
+ v: loadSchema(b, "vschemas/schema.json", true),
sysVarEnabled: true,
}
for _, filename := range benchMarkFiles {
- var testCases []testCase
- for tc := range iterateExecFile(filename) {
- testCases = append(testCases, tc)
- }
+ testCases := readJSONTests(filename)
b.Run(filename+"-v3", func(b *testing.B) {
benchmarkPlanner(b, V3, testCases, vschema)
})
@@ -956,14 +752,14 @@ func BenchmarkPlanner(b *testing.B) {
func BenchmarkSemAnalysis(b *testing.B) {
vschema := &vschemaWrapper{
- v: loadSchema(b, "schema_test.json", true),
+ v: loadSchema(b, "vschemas/schema.json", true),
sysVarEnabled: true,
}
for i := 0; i < b.N; i++ {
for _, filename := range benchMarkFiles {
- for tc := range iterateExecFile(filename) {
- exerciseAnalyzer(tc.input, vschema.currentDb(), vschema)
+ for _, tc := range readJSONTests(filename) {
+ exerciseAnalyzer(tc.Query, vschema.currentDb(), vschema)
}
}
}
@@ -989,23 +785,13 @@ func exerciseAnalyzer(query, database string, s semantics.SchemaInformation) {
func BenchmarkSelectVsDML(b *testing.B) {
vschema := &vschemaWrapper{
- v: loadSchema(b, "schema_test.json", true),
+ v: loadSchema(b, "vschemas/schema.json", true),
sysVarEnabled: true,
version: V3,
}
- var dmlCases []testCase
- var selectCases []testCase
-
- for tc := range iterateExecFile("dml_cases.txt") {
- dmlCases = append(dmlCases, tc)
- }
-
- for tc := range iterateExecFile("select_cases.txt") {
- if tc.output2ndPlanner != "" {
- selectCases = append(selectCases, tc)
- }
- }
+ dmlCases := readJSONTests("dml_cases.json")
+ selectCases := readJSONTests("select_cases.json")
rand.Shuffle(len(dmlCases), func(i, j int) {
dmlCases[i], dmlCases[j] = dmlCases[j], dmlCases[i]
@@ -1024,13 +810,13 @@ func BenchmarkSelectVsDML(b *testing.B) {
})
}
-func benchmarkPlanner(b *testing.B, version plancontext.PlannerVersion, testCases []testCase, vschema *vschemaWrapper) {
+func benchmarkPlanner(b *testing.B, version plancontext.PlannerVersion, testCases []planTest, vschema *vschemaWrapper) {
b.ReportAllocs()
for n := 0; n < b.N; n++ {
for _, tcase := range testCases {
- if tcase.output2ndPlanner != "" {
+ if len(tcase.Gen4Plan) > 0 {
vschema.version = version
- _, _ = TestBuilder(tcase.input, vschema, vschema.currentDb())
+ _, _ = TestBuilder(tcase.Query, vschema, vschema.currentDb())
}
}
}
diff --git a/go/vt/vtgate/planbuilder/plan_test_vindex.go b/go/vt/vtgate/planbuilder/plan_test_vindex.go
new file mode 100644
index 00000000000..432ef7b8479
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/plan_test_vindex.go
@@ -0,0 +1,222 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package planbuilder
+
+import (
+ "context"
+ "strconv"
+
+ "vitess.io/vitess/go/sqltypes"
+ "vitess.io/vitess/go/vt/key"
+ vtgatepb "vitess.io/vitess/go/vt/proto/vtgate"
+ "vitess.io/vitess/go/vt/vtgate/vindexes"
+)
+
+// hashIndex is a functional, unique Vindex.
+type hashIndex struct{ name string }
+
+func (v *hashIndex) String() string { return v.name }
+func (*hashIndex) Cost() int { return 1 }
+func (*hashIndex) IsUnique() bool { return true }
+func (*hashIndex) NeedsVCursor() bool { return false }
+func (*hashIndex) Verify(context.Context, vindexes.VCursor, []sqltypes.Value, [][]byte) ([]bool, error) {
+ return []bool{}, nil
+}
+func (*hashIndex) Map(ctx context.Context, vcursor vindexes.VCursor, ids []sqltypes.Value) ([]key.Destination, error) {
+ return nil, nil
+}
+func newHashIndex(name string, _ map[string]string) (vindexes.Vindex, error) {
+ return &hashIndex{name: name}, nil
+}
+
+// lookupIndex is a unique Vindex, and satisfies Lookup.
+type lookupIndex struct{ name string }
+
+func (v *lookupIndex) String() string { return v.name }
+func (*lookupIndex) Cost() int { return 2 }
+func (*lookupIndex) IsUnique() bool { return true }
+func (*lookupIndex) NeedsVCursor() bool { return false }
+func (*lookupIndex) Verify(context.Context, vindexes.VCursor, []sqltypes.Value, [][]byte) ([]bool, error) {
+ return []bool{}, nil
+}
+func (*lookupIndex) Map(ctx context.Context, vcursor vindexes.VCursor, ids []sqltypes.Value) ([]key.Destination, error) {
+ return nil, nil
+}
+func (*lookupIndex) Create(context.Context, vindexes.VCursor, [][]sqltypes.Value, [][]byte, bool) error {
+ return nil
+}
+func (*lookupIndex) Delete(context.Context, vindexes.VCursor, [][]sqltypes.Value, []byte) error {
+ return nil
+}
+func (*lookupIndex) Update(context.Context, vindexes.VCursor, []sqltypes.Value, []byte, []sqltypes.Value) error {
+ return nil
+}
+func newLookupIndex(name string, _ map[string]string) (vindexes.Vindex, error) {
+ return &lookupIndex{name: name}, nil
+}
+
+var _ vindexes.Lookup = (*lookupIndex)(nil)
+
+// nameLkpIndex satisfies Lookup, NonUnique.
+type nameLkpIndex struct{ name string }
+
+func (v *nameLkpIndex) String() string { return v.name }
+func (*nameLkpIndex) Cost() int { return 3 }
+func (*nameLkpIndex) IsUnique() bool { return false }
+func (*nameLkpIndex) NeedsVCursor() bool { return false }
+func (*nameLkpIndex) AllowBatch() bool { return true }
+func (*nameLkpIndex) AutoCommitEnabled() bool { return false }
+func (*nameLkpIndex) GetCommitOrder() vtgatepb.CommitOrder { return vtgatepb.CommitOrder_NORMAL }
+func (*nameLkpIndex) Verify(context.Context, vindexes.VCursor, []sqltypes.Value, [][]byte) ([]bool, error) {
+ return []bool{}, nil
+}
+func (*nameLkpIndex) Map(ctx context.Context, vcursor vindexes.VCursor, ids []sqltypes.Value) ([]key.Destination, error) {
+ return nil, nil
+}
+func (*nameLkpIndex) Create(context.Context, vindexes.VCursor, [][]sqltypes.Value, [][]byte, bool) error {
+ return nil
+}
+func (*nameLkpIndex) Delete(context.Context, vindexes.VCursor, [][]sqltypes.Value, []byte) error {
+ return nil
+}
+func (*nameLkpIndex) Update(context.Context, vindexes.VCursor, []sqltypes.Value, []byte, []sqltypes.Value) error {
+ return nil
+}
+func (*nameLkpIndex) Query() (string, []string) {
+ return "select name, keyspace_id from name_user_vdx where name in ::name", []string{"name"}
+}
+func (*nameLkpIndex) MapResult([]sqltypes.Value, []*sqltypes.Result) ([]key.Destination, error) {
+ return nil, nil
+}
+func newNameLkpIndex(name string, _ map[string]string) (vindexes.Vindex, error) {
+ return &nameLkpIndex{name: name}, nil
+}
+
+var _ vindexes.Vindex = (*nameLkpIndex)(nil)
+var _ vindexes.Lookup = (*nameLkpIndex)(nil)
+var _ vindexes.LookupPlanable = (*nameLkpIndex)(nil)
+
+// costlyIndex satisfies Lookup, NonUnique.
+type costlyIndex struct{ name string }
+
+func (v *costlyIndex) String() string { return v.name }
+func (*costlyIndex) Cost() int { return 10 }
+func (*costlyIndex) IsUnique() bool { return false }
+func (*costlyIndex) NeedsVCursor() bool { return false }
+func (*costlyIndex) Verify(context.Context, vindexes.VCursor, []sqltypes.Value, [][]byte) ([]bool, error) {
+ return []bool{}, nil
+}
+func (*costlyIndex) Map(ctx context.Context, vcursor vindexes.VCursor, ids []sqltypes.Value) ([]key.Destination, error) {
+ return nil, nil
+}
+func (*costlyIndex) Create(context.Context, vindexes.VCursor, [][]sqltypes.Value, [][]byte, bool) error {
+ return nil
+}
+func (*costlyIndex) Delete(context.Context, vindexes.VCursor, [][]sqltypes.Value, []byte) error {
+ return nil
+}
+func (*costlyIndex) Update(context.Context, vindexes.VCursor, []sqltypes.Value, []byte, []sqltypes.Value) error {
+ return nil
+}
+func newCostlyIndex(name string, _ map[string]string) (vindexes.Vindex, error) {
+ return &costlyIndex{name: name}, nil
+}
+
+var _ vindexes.Vindex = (*costlyIndex)(nil)
+var _ vindexes.Lookup = (*costlyIndex)(nil)
+
+// multiColIndex satisfies multi column vindex.
+type multiColIndex struct{ name string }
+
+func (m *multiColIndex) String() string { return m.name }
+func (*multiColIndex) Cost() int { return 1 }
+func (*multiColIndex) IsUnique() bool { return true }
+func (*multiColIndex) NeedsVCursor() bool { return false }
+func (*multiColIndex) Map(ctx context.Context, vcursor vindexes.VCursor, rowsColValues [][]sqltypes.Value) ([]key.Destination, error) {
+ return nil, nil
+}
+func (*multiColIndex) Verify(ctx context.Context, vcursor vindexes.VCursor, rowsColValues [][]sqltypes.Value, ksids [][]byte) ([]bool, error) {
+ return []bool{}, nil
+}
+func (*multiColIndex) PartialVindex() bool { return true }
+func newMultiColIndex(name string, _ map[string]string) (vindexes.Vindex, error) {
+ return &multiColIndex{name: name}, nil
+}
+
+var _ vindexes.MultiColumn = (*multiColIndex)(nil)
+
+// unqLkpVdxBackfill satisfies Lookup, Unique.
+type unqLkpVdxBackfill struct {
+ name string
+ inBackfill bool
+ cost int
+}
+
+func (u *unqLkpVdxBackfill) String() string { return u.name }
+func (u *unqLkpVdxBackfill) Cost() int { return u.cost }
+func (*unqLkpVdxBackfill) IsUnique() bool { return false }
+func (*unqLkpVdxBackfill) NeedsVCursor() bool { return false }
+func (*unqLkpVdxBackfill) AllowBatch() bool { return true }
+func (*unqLkpVdxBackfill) AutoCommitEnabled() bool { return false }
+func (*unqLkpVdxBackfill) GetCommitOrder() vtgatepb.CommitOrder { return vtgatepb.CommitOrder_NORMAL }
+func (*unqLkpVdxBackfill) Verify(context.Context, vindexes.VCursor, []sqltypes.Value, [][]byte) ([]bool, error) {
+ return []bool{}, nil
+}
+func (*unqLkpVdxBackfill) Map(ctx context.Context, vcursor vindexes.VCursor, ids []sqltypes.Value) ([]key.Destination, error) {
+ return nil, nil
+}
+func (*unqLkpVdxBackfill) Create(context.Context, vindexes.VCursor, [][]sqltypes.Value, [][]byte, bool) error {
+ return nil
+}
+func (*unqLkpVdxBackfill) Delete(context.Context, vindexes.VCursor, [][]sqltypes.Value, []byte) error {
+ return nil
+}
+func (*unqLkpVdxBackfill) Update(context.Context, vindexes.VCursor, []sqltypes.Value, []byte, []sqltypes.Value) error {
+ return nil
+}
+func (*unqLkpVdxBackfill) Query() (string, []string) {
+ return "select unq_key, keyspace_id from unq_lkp_idx where unq_key in ::unq_key", []string{"unq_key"}
+}
+func (*unqLkpVdxBackfill) MapResult([]sqltypes.Value, []*sqltypes.Result) ([]key.Destination, error) {
+ return nil, nil
+}
+func (u *unqLkpVdxBackfill) IsBackfilling() bool { return u.inBackfill }
+
+func newUnqLkpVdxBackfill(name string, m map[string]string) (vindexes.Vindex, error) {
+ vdx := &unqLkpVdxBackfill{name: name}
+ if val, ok := m["write_only"]; ok {
+ vdx.inBackfill = val == "true"
+ }
+ if val, ok := m["cost"]; ok {
+ vdx.cost, _ = strconv.Atoi(val)
+ }
+ return vdx, nil
+}
+
+var _ vindexes.Vindex = (*unqLkpVdxBackfill)(nil)
+var _ vindexes.Lookup = (*unqLkpVdxBackfill)(nil)
+var _ vindexes.LookupPlanable = (*unqLkpVdxBackfill)(nil)
+var _ vindexes.LookupBackfill = (*unqLkpVdxBackfill)(nil)
+
+func init() {
+ vindexes.Register("hash_test", newHashIndex)
+ vindexes.Register("lookup_test", newLookupIndex)
+ vindexes.Register("name_lkp_test", newNameLkpIndex)
+ vindexes.Register("costly", newCostlyIndex)
+ vindexes.Register("multiCol_test", newMultiColIndex)
+ vindexes.Register("unq_lkp_test", newUnqLkpVdxBackfill)
+}
diff --git a/go/vt/vtgate/planbuilder/plancontext/planning_context.go b/go/vt/vtgate/planbuilder/plancontext/planning_context.go
index 4b78f5a4686..a57c9528f01 100644
--- a/go/vt/vtgate/planbuilder/plancontext/planning_context.go
+++ b/go/vt/vtgate/planbuilder/plancontext/planning_context.go
@@ -55,7 +55,7 @@ func (c PlanningContext) IsSubQueryToReplace(e sqlparser.Expr) bool {
return false
}
for _, extractedSubq := range c.SemTable.GetSubqueryNeedingRewrite() {
- if extractedSubq.NeedsRewrite && sqlparser.EqualsRefOfSubquery(extractedSubq.Subquery, ext) {
+ if extractedSubq.NeedsRewrite && c.SemTable.EqualsExpr(extractedSubq.Subquery, ext) {
return true
}
}
diff --git a/go/vt/vtgate/planbuilder/plancontext/vschema.go b/go/vt/vtgate/planbuilder/plancontext/vschema.go
index 46aa544f33d..281a3bd9eb0 100644
--- a/go/vt/vtgate/planbuilder/plancontext/vschema.go
+++ b/go/vt/vtgate/planbuilder/plancontext/vschema.go
@@ -3,6 +3,7 @@ package plancontext
import (
"strings"
+ "vitess.io/vitess/go/vt/log"
vschemapb "vitess.io/vitess/go/vt/proto/vschema"
"vitess.io/vitess/go/mysql/collations"
@@ -21,6 +22,7 @@ type PlannerVersion = querypb.ExecuteOptions_PlannerVersion
// info about tables.
type VSchema interface {
FindTable(tablename sqlparser.TableName) (*vindexes.Table, string, topodatapb.TabletType, key.Destination, error)
+ FindView(name sqlparser.TableName) sqlparser.SelectStatement
FindTableOrVindex(tablename sqlparser.TableName) (*vindexes.Table, vindexes.Vindex, string, topodatapb.TabletType, key.Destination, error)
DefaultKeyspace() (*vindexes.Keyspace, error)
TargetString() string
@@ -63,12 +65,17 @@ type VSchema interface {
// IsShardRoutingEnabled returns true if partial shard routing is enabled
IsShardRoutingEnabled() bool
+
+ // IsViewsEnabled returns true if Vitess manages the views.
+ IsViewsEnabled() bool
}
// PlannerNameToVersion returns the numerical representation of the planner
func PlannerNameToVersion(s string) (PlannerVersion, bool) {
+ deprecationMessage := "The V3 planner is deprecated and will be removed in V17 of Vitess"
switch strings.ToLower(s) {
case "v3":
+ log.Warning(deprecationMessage)
return querypb.ExecuteOptions_V3, true
case "gen4":
return querypb.ExecuteOptions_Gen4, true
@@ -79,6 +86,7 @@ func PlannerNameToVersion(s string) (PlannerVersion, bool) {
case "gen4fallback":
return querypb.ExecuteOptions_Gen4WithFallback, true
case "gen4comparev3":
+ log.Warning(deprecationMessage)
return querypb.ExecuteOptions_Gen4CompareV3, true
}
return 0, false
diff --git a/go/vt/vtgate/planbuilder/primitive_builder.go b/go/vt/vtgate/planbuilder/primitive_builder.go
index 29655e81e41..b7c557518e5 100644
--- a/go/vt/vtgate/planbuilder/primitive_builder.go
+++ b/go/vt/vtgate/planbuilder/primitive_builder.go
@@ -17,6 +17,7 @@ limitations under the License.
package planbuilder
import (
+ "vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext"
)
@@ -29,6 +30,15 @@ type primitiveBuilder struct {
jt *jointab
plan logicalPlan
st *symtab
+ stmt sqlparser.Statement
+}
+
+func newStmtAwarePrimitiveBuilder(vschema plancontext.VSchema, jt *jointab, stmt sqlparser.Statement) *primitiveBuilder {
+ return &primitiveBuilder{
+ vschema: vschema,
+ jt: jt,
+ stmt: stmt,
+ }
}
func newPrimitiveBuilder(vschema plancontext.VSchema, jt *jointab) *primitiveBuilder {
diff --git a/go/vt/vtgate/planbuilder/primitive_wrapper.go b/go/vt/vtgate/planbuilder/primitive_wrapper.go
index 46c54b140ad..b4ed7c8aa39 100644
--- a/go/vt/vtgate/planbuilder/primitive_wrapper.go
+++ b/go/vt/vtgate/planbuilder/primitive_wrapper.go
@@ -17,7 +17,6 @@ limitations under the License.
package planbuilder
import (
- vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
"vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/vt/vtgate/engine"
@@ -44,7 +43,7 @@ func (p *primitiveWrapper) Inputs() []logicalPlan {
}
func (p *primitiveWrapper) Rewrite(...logicalPlan) error {
- return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "can't rewrite")
+ return vterrors.VT13001("cannot rewrite")
}
func (p *primitiveWrapper) ContainsTables() semantics.TableSet {
diff --git a/go/vt/vtgate/planbuilder/project.go b/go/vt/vtgate/planbuilder/project.go
index d53ab81b19b..6dfea3fcec2 100644
--- a/go/vt/vtgate/planbuilder/project.go
+++ b/go/vt/vtgate/planbuilder/project.go
@@ -17,14 +17,14 @@ limitations under the License.
package planbuilder
import (
- "errors"
+ "fmt"
"strings"
querypb "vitess.io/vitess/go/vt/proto/query"
- vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
"vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/vt/vtgate/engine"
+ "vitess.io/vitess/go/vt/vtgate/planbuilder/operators"
)
// planProjection pushes the select expression to the specified
@@ -47,7 +47,7 @@ func planProjection(pb *primitiveBuilder, in logicalPlan, expr *sqlparser.Aliase
} else {
// Pushing of non-trivial expressions not allowed for RHS of left joins.
if _, ok := expr.Expr.(*sqlparser.ColName); !ok && node.ejoin.Opcode == engine.LeftJoin {
- return nil, nil, 0, vterrors.New(vtrpcpb.Code_UNIMPLEMENTED, "unsupported: cross-shard left join and column expressions")
+ return nil, nil, 0, vterrors.VT12001("cross-shard LEFT JOIN and column expressions")
}
newRight, col, colNumber, err := planProjection(pb, node.Right, expr, origin)
@@ -85,7 +85,7 @@ func planProjection(pb *primitiveBuilder, in logicalPlan, expr *sqlparser.Aliase
// Ensure that there are no aggregates in the expression.
if sqlparser.ContainsAggregation(expr.Expr) {
- return nil, nil, 0, errors.New("unsupported: in scatter query: complex aggregate expression")
+ return nil, nil, 0, vterrors.VT12001("in scatter query: complex aggregate expression")
}
newInput, innerRC, _, err := planProjection(pb, node.input, expr, origin)
@@ -136,7 +136,7 @@ func planProjection(pb *primitiveBuilder, in logicalPlan, expr *sqlparser.Aliase
case *simpleProjection:
col, ok := expr.Expr.(*sqlparser.ColName)
if !ok {
- return nil, nil, 0, errors.New("unsupported: expression on results of a cross-shard subquery")
+ return nil, nil, 0, vterrors.VT12001("expression on results of a cross-shard subquery")
}
// colNumber should already be set for subquery columns.
@@ -152,11 +152,11 @@ func planProjection(pb *primitiveBuilder, in logicalPlan, expr *sqlparser.Aliase
// Catch the case where no where clause was specified. If so, the opcode
// won't be set.
if node.eVindexFunc.Opcode == engine.VindexNone {
- return nil, nil, 0, errors.New("unsupported: where clause for vindex function must be of the form id = or id in(,...) (where clause missing)")
+ return nil, nil, 0, vterrors.VT12001(operators.VindexUnsupported + " (where clause missing)")
}
col, ok := expr.Expr.(*sqlparser.ColName)
if !ok {
- return nil, nil, 0, errors.New("unsupported: expression on results of a vindex function")
+ return nil, nil, 0, vterrors.VT12001("expression on results of a vindex function")
}
rc := newResultColumn(expr, node)
node.resultColumns = append(node.resultColumns, rc)
@@ -168,5 +168,5 @@ func planProjection(pb *primitiveBuilder, in logicalPlan, expr *sqlparser.Aliase
return node, rc, len(node.resultColumns) - 1, nil
}
- return nil, nil, 0, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "[BUG] unreachable %T.projection", in)
+ return nil, nil, 0, vterrors.VT13001(fmt.Sprintf("unreachable %T.projection", in))
}
diff --git a/go/vt/vtgate/planbuilder/projection.go b/go/vt/vtgate/planbuilder/projection.go
index ad051b8f531..6c942037490 100644
--- a/go/vt/vtgate/planbuilder/projection.go
+++ b/go/vt/vtgate/planbuilder/projection.go
@@ -17,7 +17,8 @@ limitations under the License.
package planbuilder
import (
- vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
+ "fmt"
+
"vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/vt/vtgate/engine"
@@ -65,7 +66,7 @@ func (p *projection) Inputs() []logicalPlan {
// Rewrite implements the logicalPlan interface
func (p *projection) Rewrite(inputs ...logicalPlan) error {
if len(inputs) != 1 {
- return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "wrong number of inputs")
+ return vterrors.VT13001(fmt.Sprintf("wrong number of inputs, got: %d; expected: %d", len(inputs), 1))
}
p.source = inputs[0]
return nil
@@ -108,7 +109,7 @@ func (p *projection) addColumn(idx *int, column sqlparser.Expr, columnName strin
offset = *idx
}
if p.columnNames[offset] != "" || p.columns[offset] != nil {
- return -1, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "overwriting columns in projection is not permitted")
+ return -1, vterrors.VT13001("overwriting columns in projection is not permitted")
}
p.columns[offset] = column
p.columnNames[offset] = columnName
diff --git a/go/vt/vtgate/planbuilder/projection_pushing.go b/go/vt/vtgate/planbuilder/projection_pushing.go
index c48b3b57f8a..e770ef1c9bd 100644
--- a/go/vt/vtgate/planbuilder/projection_pushing.go
+++ b/go/vt/vtgate/planbuilder/projection_pushing.go
@@ -17,11 +17,12 @@ limitations under the License.
package planbuilder
import (
- vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
+ "fmt"
+
"vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/vt/vtgate/engine"
- "vitess.io/vitess/go/vt/vtgate/planbuilder/physical"
+ "vitess.io/vitess/go/vt/vtgate/planbuilder/operators"
"vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext"
"vitess.io/vitess/go/vt/vtgate/semantics"
)
@@ -55,7 +56,7 @@ func pushProjection(
case *concatenateGen4:
return pushProjectionIntoConcatenate(ctx, expr, hasAggregation, node, inner, reuseCol)
default:
- return 0, false, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "[BUG] push projection does not yet support: %T", node)
+ return 0, false, vterrors.VT13001(fmt.Sprintf("push projection does not yet support: %T", node))
}
}
@@ -70,14 +71,14 @@ func pushProjectionIntoVindexFunc(node *vindexFunc, expr *sqlparser.AliasedExpr,
func pushProjectionIntoConcatenate(ctx *plancontext.PlanningContext, expr *sqlparser.AliasedExpr, hasAggregation bool, node *concatenateGen4, inner bool, reuseCol bool) (int, bool, error) {
if hasAggregation {
- return 0, false, vterrors.New(vtrpcpb.Code_UNIMPLEMENTED, "unsupported: aggregation on unions")
+ return 0, false, vterrors.VT12001("aggregation on UNIONs")
}
offset, added, err := pushProjection(ctx, expr, node.sources[0], inner, reuseCol, hasAggregation)
if err != nil {
return 0, false, err
}
- if added && ctx.SemTable.DirectDeps(expr.Expr).NumberOfTables() > 0 {
- return 0, false, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "pushing projection %v on concatenate should reference an existing column", sqlparser.String(expr))
+ if added && ctx.SemTable.DirectDeps(expr.Expr).NonEmpty() {
+ return 0, false, vterrors.VT13001(fmt.Sprintf("pushing projection %v on concatenate should reference an existing column", sqlparser.String(expr)))
}
if added {
for _, source := range node.sources[1:] {
@@ -120,7 +121,7 @@ func pushProjectionIntoSemiJoin(
func pushProjectionIntoOA(ctx *plancontext.PlanningContext, expr *sqlparser.AliasedExpr, node *orderedAggregate, inner, hasAggregation bool) (int, bool, error) {
colName, isColName := expr.Expr.(*sqlparser.ColName)
for _, aggregate := range node.aggregates {
- if sqlparser.EqualsExpr(aggregate.Expr, expr.Expr) {
+ if ctx.SemTable.EqualsExpr(aggregate.Expr, expr.Expr) {
return aggregate.Col, false, nil
}
if isColName && colName.Name.EqualString(aggregate.Alias) {
@@ -128,7 +129,7 @@ func pushProjectionIntoOA(ctx *plancontext.PlanningContext, expr *sqlparser.Alia
}
}
for _, key := range node.groupByKeys {
- if sqlparser.EqualsExpr(key.Expr, expr.Expr) {
+ if ctx.SemTable.EqualsExpr(key.Expr, expr.Expr) {
return key.KeyCol, false, nil
}
}
@@ -202,10 +203,10 @@ func pushProjectionIntoJoin(
// for example an expression like count(*) will have dependencies on both sides, but we should not push it
// instead we should return an error
if hasAggregation {
- return 0, false, vterrors.New(vtrpcpb.Code_UNIMPLEMENTED, "unsupported: cross-shard query with aggregates")
+ return 0, false, vterrors.VT12001("cross-shard query with aggregates")
}
// now we break the expression into left and right side dependencies and rewrite the left ones to bind variables
- bvName, cols, rewrittenExpr, err := physical.BreakExpressionInLHSandRHS(ctx, expr.Expr, lhsSolves)
+ bvName, cols, rewrittenExpr, err := operators.BreakExpressionInLHSandRHS(ctx, expr.Expr, lhsSolves)
if err != nil {
return 0, false, err
}
@@ -276,9 +277,9 @@ func pushProjectionIntoHashJoin(
// for example an expression like count(*) will have dependencies on both sides, but we should not push it
// instead we should return an error
if hasAggregation {
- return 0, false, vterrors.New(vtrpcpb.Code_UNIMPLEMENTED, "unsupported: cross-shard query with aggregates")
+ return 0, false, vterrors.VT12001("cross-shard query with aggregates")
}
- return 0, false, vterrors.New(vtrpcpb.Code_UNIMPLEMENTED, "unsupported: hash join with projection from both sides of the join")
+ return 0, false, vterrors.VT12001("hash join with projection from both sides of the join")
}
if reuseCol && !appended {
for idx, col := range node.Cols {
@@ -299,10 +300,10 @@ func addExpressionToRoute(ctx *plancontext.PlanningContext, rb *routeGen4, expr
return i, false, nil
}
}
- expr.Expr = sqlparser.RemoveKeyspaceFromColName(expr.Expr)
+ sqlparser.RemoveKeyspaceFromColName(expr.Expr)
sel, isSel := rb.Select.(*sqlparser.Select)
if !isSel {
- return 0, false, vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.BadFieldError, "unsupported: pushing projection '%s' on %T", sqlparser.String(expr), rb.Select)
+ return 0, false, vterrors.VT12001(fmt.Sprintf("pushing projection '%s' on %T", sqlparser.String(expr), rb.Select))
}
if ctx.RewriteDerivedExpr {
@@ -322,15 +323,12 @@ func addExpressionToRoute(ctx *plancontext.PlanningContext, rb *routeGen4, expr
func rewriteProjectionOfDerivedTable(expr *sqlparser.AliasedExpr, semTable *semantics.SemTable) error {
ti, err := semTable.TableInfoForExpr(expr.Expr)
- if err != nil && err != semantics.ErrMultipleTables {
+ if err != nil && err != semantics.ErrNotSingleTable {
return err
}
_, isDerivedTable := ti.(*semantics.DerivedTable)
if isDerivedTable {
- expr.Expr, err = semantics.RewriteDerivedTableExpression(expr.Expr, ti)
- if err != nil {
- return err
- }
+ expr.Expr = semantics.RewriteDerivedTableExpression(expr.Expr, ti)
}
return nil
}
diff --git a/go/vt/vtgate/planbuilder/pullout_subquery.go b/go/vt/vtgate/planbuilder/pullout_subquery.go
index dd33bb4a30a..a70fb5efdc4 100644
--- a/go/vt/vtgate/planbuilder/pullout_subquery.go
+++ b/go/vt/vtgate/planbuilder/pullout_subquery.go
@@ -17,7 +17,6 @@ limitations under the License.
package planbuilder
import (
- vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
"vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/vt/vtgate/engine"
@@ -118,7 +117,7 @@ func (ps *pulloutSubquery) SupplyWeightString(colNumber int, alsoAddToGroupBy bo
// Rewrite implements the logicalPlan interface
func (ps *pulloutSubquery) Rewrite(inputs ...logicalPlan) error {
if len(inputs) != 2 {
- return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "pulloutSubquery: wrong number of inputs")
+ return vterrors.VT13001("pulloutSubquery: wrong number of inputs")
}
ps.underlying = inputs[0]
ps.subquery = inputs[1]
diff --git a/go/vt/vtgate/planbuilder/rewrite.go b/go/vt/vtgate/planbuilder/rewrite.go
index d1b6a789a1b..73f5ca43c82 100644
--- a/go/vt/vtgate/planbuilder/rewrite.go
+++ b/go/vt/vtgate/planbuilder/rewrite.go
@@ -17,7 +17,6 @@ limitations under the License.
package planbuilder
import (
- vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
"vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/vt/vtgate/engine"
@@ -76,7 +75,7 @@ func (r *rewriter) rewriteDown(cursor *sqlparser.Cursor) bool {
}
tableName := node.Expr.(sqlparser.TableName)
// if the table name matches what the original is, then we do not need to rewrite
- if sqlparser.EqualsIdentifierCS(vindexTable.Name, tableName.Name) {
+ if sqlparser.Equals.IdentifierCS(vindexTable.Name, tableName.Name) {
break
}
// if there is no as clause, then move the routed table to the as clause.
@@ -89,6 +88,8 @@ func (r *rewriter) rewriteDown(cursor *sqlparser.Cursor) bool {
// replace the table name with the original table
tableName.Name = vindexTable.Name
node.Expr = tableName
+ case *sqlparser.ExtractedSubquery:
+ return false
case *sqlparser.Subquery:
err := rewriteSubquery(cursor, r, node)
if err != nil {
@@ -114,7 +115,7 @@ func rewriteInSubquery(cursor *sqlparser.Cursor, r *rewriter, node *sqlparser.Co
semTableSQ, found := r.semTable.SubqueryRef[subq]
if !found {
- return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "BUG: came across subquery that was not in the subq map")
+ return vterrors.VT13001("got subquery that was not in the subq map")
}
r.inSubquery++
@@ -128,7 +129,7 @@ func rewriteInSubquery(cursor *sqlparser.Cursor, r *rewriter, node *sqlparser.Co
func rewriteSubquery(cursor *sqlparser.Cursor, r *rewriter, node *sqlparser.Subquery) error {
semTableSQ, found := r.semTable.SubqueryRef[node]
if !found {
- return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "BUG: came across subquery that was not in the subq map")
+ return vterrors.VT13001("got subquery that was not in the subq map")
}
if semTableSQ.GetArgName() != "" || engine.PulloutOpcode(semTableSQ.OpCode) != engine.PulloutValue {
return nil
@@ -143,7 +144,7 @@ func rewriteSubquery(cursor *sqlparser.Cursor, r *rewriter, node *sqlparser.Subq
func (r *rewriter) rewriteExistsSubquery(cursor *sqlparser.Cursor, node *sqlparser.ExistsExpr) error {
semTableSQ, found := r.semTable.SubqueryRef[node.Subquery]
if !found {
- return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "BUG: came across subquery that was not in the subq map")
+ return vterrors.VT13001("got subquery that was not in the subq map")
}
r.inSubquery++
@@ -174,33 +175,40 @@ func rewriteHavingClause(node *sqlparser.Select) {
exprs := sqlparser.SplitAndExpression(nil, node.Having.Expr)
node.Having = nil
for _, expr := range exprs {
- var hasAggr bool
- sqlparser.Rewrite(expr, func(cursor *sqlparser.Cursor) bool {
- switch x := cursor.Node().(type) {
- case *sqlparser.ColName:
- if !x.Qualifier.IsEmpty() {
- return false
- }
- originalExpr, isInMap := selectExprMap[x.Name.Lowered()]
- if isInMap {
- if sqlparser.ContainsAggregation(originalExpr) {
+ hasAggr := sqlparser.ContainsAggregation(expr)
+ if !hasAggr {
+ sqlparser.Rewrite(expr, func(cursor *sqlparser.Cursor) bool {
+ visitColName(cursor.Node(), selectExprMap, func(original sqlparser.Expr) {
+ if sqlparser.ContainsAggregation(original) {
hasAggr = true
- } else {
- cursor.Replace(originalExpr)
}
- }
- return false
- default:
- _, isAggregate := x.(sqlparser.AggrFunc)
- hasAggr = hasAggr || isAggregate
- }
- return true
- }, nil)
-
+ })
+ return true
+ }, nil)
+ }
if hasAggr {
node.AddHaving(expr)
} else {
+ sqlparser.Rewrite(expr, func(cursor *sqlparser.Cursor) bool {
+ visitColName(cursor.Node(), selectExprMap, func(original sqlparser.Expr) {
+ cursor.Replace(original)
+ })
+ return true
+ }, nil)
node.AddWhere(expr)
}
}
}
+func visitColName(cursor sqlparser.SQLNode, selectExprMap map[string]sqlparser.Expr, f func(original sqlparser.Expr)) {
+ switch x := cursor.(type) {
+ case *sqlparser.ColName:
+ if !x.Qualifier.IsEmpty() {
+ return
+ }
+ originalExpr, isInMap := selectExprMap[x.Name.Lowered()]
+ if isInMap {
+ f(originalExpr)
+ }
+ return
+ }
+}
diff --git a/go/vt/vtgate/planbuilder/route.go b/go/vt/vtgate/planbuilder/route.go
index 559466f68fd..a7429417cc6 100644
--- a/go/vt/vtgate/planbuilder/route.go
+++ b/go/vt/vtgate/planbuilder/route.go
@@ -17,10 +17,10 @@ limitations under the License.
package planbuilder
import (
+ "fmt"
"strconv"
"vitess.io/vitess/go/mysql/collations"
- vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
"vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/vt/vtgate/evalengine"
"vitess.io/vitess/go/vt/vtgate/semantics"
@@ -321,12 +321,12 @@ func (rb *route) SupplyWeightString(colNumber int, alsoAddToGroupBy bool) (weigh
rc := rb.resultColumns[colNumber]
s, ok := rb.Select.(*sqlparser.Select)
if !ok {
- return 0, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unexpected AST struct for query")
+ return 0, vterrors.VT13001("unexpected AST struct for query")
}
aliasExpr, ok := s.SelectExprs[colNumber].(*sqlparser.AliasedExpr)
if !ok {
- return 0, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unexpected AST struct for query %T", s.SelectExprs[colNumber])
+ return 0, vterrors.VT13001(fmt.Sprintf("unexpected AST struct for query %T", s.SelectExprs[colNumber]))
}
weightStringExpr := &sqlparser.FuncExpr{
Name: sqlparser.NewIdentifierCI("weight_string"),
@@ -342,7 +342,7 @@ func (rb *route) SupplyWeightString(colNumber int, alsoAddToGroupBy bool) (weigh
if alsoAddToGroupBy {
sel, isSelect := rb.Select.(*sqlparser.Select)
if !isSelect {
- return 0, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "cannot add weight string in %T", rb.Select)
+ return 0, vterrors.VT13001(fmt.Sprintf("cannot add weight string in %T", rb.Select))
}
sel.AddGroupBy(weightStringExpr)
}
@@ -363,7 +363,7 @@ func (rb *route) SupplyWeightString(colNumber int, alsoAddToGroupBy bool) (weigh
// Rewrite implements the logicalPlan interface
func (rb *route) Rewrite(inputs ...logicalPlan) error {
if len(inputs) != 0 {
- return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "route: wrong number of inputs")
+ return vterrors.VT13001("route: wrong number of inputs")
}
return nil
}
diff --git a/go/vt/vtgate/planbuilder/routeGen4.go b/go/vt/vtgate/planbuilder/routeGen4.go
index c4d60944b33..a5b6982319e 100644
--- a/go/vt/vtgate/planbuilder/routeGen4.go
+++ b/go/vt/vtgate/planbuilder/routeGen4.go
@@ -18,7 +18,6 @@ package planbuilder
import (
querypb "vitess.io/vitess/go/vt/proto/query"
- vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
"vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext"
"vitess.io/vitess/go/vt/vtgate/semantics"
@@ -186,7 +185,7 @@ func (rb *routeGen4) generateFieldQuery(sel sqlparser.SelectStatement, jt *joint
// Rewrite implements the logicalPlan interface
func (rb *routeGen4) Rewrite(inputs ...logicalPlan) error {
if len(inputs) != 0 {
- return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "route: wrong number of inputs")
+ return vterrors.VT13001("route: wrong number of inputs")
}
return nil
}
diff --git a/go/vt/vtgate/planbuilder/route_test.go b/go/vt/vtgate/planbuilder/route_test.go
index b3c4e48bdf5..9f4c8fa3b97 100644
--- a/go/vt/vtgate/planbuilder/route_test.go
+++ b/go/vt/vtgate/planbuilder/route_test.go
@@ -66,11 +66,11 @@ func TestJoinCanMerge(t *testing.T) {
for left, vals := range testcases {
for right, val := range vals {
name := fmt.Sprintf("%s:%s", engine.Opcode(left).String(), engine.Opcode(right).String())
- t.Run(name, func(t *testing.T) {
- if left == int(engine.SubShard) || right == int(engine.SubShard) {
- t.Skip("not used by v3")
- }
+ if left == int(engine.SubShard) || right == int(engine.SubShard) {
+ continue // not used by v3
+ }
+ t.Run(name, func(t *testing.T) {
lRoute := &route{
// Setting condition will make SelectEqualUnique match itself.
condition: &sqlparser.ColName{},
diff --git a/go/vt/vtgate/planbuilder/select.go b/go/vt/vtgate/planbuilder/select.go
index 77ecd41be07..7ba5e27dc07 100644
--- a/go/vt/vtgate/planbuilder/select.go
+++ b/go/vt/vtgate/planbuilder/select.go
@@ -17,7 +17,6 @@ limitations under the License.
package planbuilder
import (
- "errors"
"fmt"
"vitess.io/vitess/go/vt/log"
@@ -26,7 +25,6 @@ import (
"vitess.io/vitess/go/vt/key"
- vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
"vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/vt/vtgate/evalengine"
@@ -39,7 +37,7 @@ func buildSelectPlan(query string) stmtPlanner {
return func(stmt sqlparser.Statement, reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema) (*planResult, error) {
sel := stmt.(*sqlparser.Select)
if sel.With != nil {
- return nil, vterrors.New(vtrpcpb.Code_UNIMPLEMENTED, "unsupported: with expression in select statement")
+ return nil, vterrors.VT12001("WITH expression in SELECT statement")
}
p, err := handleDualSelects(sel, vschema)
@@ -66,7 +64,7 @@ func buildSelectPlan(query string) stmtPlanner {
return nil, err
}
- if shouldRetryWithCNFRewriting(plan) {
+ if shouldRetryAfterPredicateRewriting(plan) {
// by transforming the predicates to CNF, the planner will sometimes find better plans
primitive := rewriteToCNFAndReplan(stmt, getPlan)
if primitive != nil {
@@ -89,12 +87,12 @@ func buildSelectPlan(query string) stmtPlanner {
}
func rewriteToCNFAndReplan(stmt sqlparser.Statement, getPlan func(sel *sqlparser.Select) (logicalPlan, error)) engine.Primitive {
- rewritten := sqlparser.RewriteToCNF(stmt)
+ rewritten := sqlparser.RewritePredicate(stmt)
sel2, isSelect := rewritten.(*sqlparser.Select)
if isSelect {
log.Infof("retrying plan after cnf: %s", sqlparser.String(sel2))
plan2, err := getPlan(sel2)
- if err == nil && !shouldRetryWithCNFRewriting(plan2) {
+ if err == nil && !shouldRetryAfterPredicateRewriting(plan2) {
// we only use this new plan if it's better than the old one we got
return plan2.Primitive()
}
@@ -102,7 +100,7 @@ func rewriteToCNFAndReplan(stmt sqlparser.Statement, getPlan func(sel *sqlparser
return nil
}
-func shouldRetryWithCNFRewriting(plan logicalPlan) bool {
+func shouldRetryAfterPredicateRewriting(plan logicalPlan) bool {
// if we have a I_S query, but have not found table_schema or table_name, let's try CNF
var opcode engine.Opcode
var sysTableTableName map[string]evalengine.Expr
@@ -126,9 +124,6 @@ func shouldRetryWithCNFRewriting(plan logicalPlan) bool {
len(sysTableTableSchema) == 0
}
-var errSQLCalcFoundRows = vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.CantUseOptionHere, "Incorrect usage/placement of 'SQL_CALC_FOUND_ROWS'")
-var errInto = vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.CantUseOptionHere, "Incorrect usage/placement of 'INTO'")
-
// processSelect builds a primitive tree for the given query or subquery.
// The tree built by this function has the following general structure:
//
@@ -168,16 +163,16 @@ func (pb *primitiveBuilder) processSelect(sel *sqlparser.Select, reservedVars *s
// Check and error if there is any locking function present in select expression.
for _, expr := range sel.SelectExprs {
if aExpr, ok := expr.(*sqlparser.AliasedExpr); ok && sqlparser.IsLockingFunc(aExpr.Expr) {
- return vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "%v allowed only with dual", sqlparser.String(aExpr))
+ return vterrors.VT12001(fmt.Sprintf("%v is allowed only with dual", sqlparser.String(aExpr)))
}
}
if sel.SQLCalcFoundRows {
if outer != nil || query == "" {
- return errSQLCalcFoundRows
+ return vterrors.VT03008("SQL_CALC_FOUND_ROWS")
}
sel.SQLCalcFoundRows = false
if sel.Limit != nil {
- plan, err := buildSQLCalcFoundRowsPlan(query, sel, reservedVars, pb.vschema, planSelectV3)
+ plan, _, err := buildSQLCalcFoundRowsPlan(query, sel, reservedVars, pb.vschema, planSelectV3)
if err != nil {
return err
}
@@ -188,7 +183,7 @@ func (pb *primitiveBuilder) processSelect(sel *sqlparser.Select, reservedVars *s
// Into is not supported in subquery.
if sel.Into != nil && (outer != nil || query == "") {
- return errInto
+ return vterrors.VT03008("INTO")
}
var where sqlparser.Expr
@@ -204,7 +199,7 @@ func (pb *primitiveBuilder) processSelect(sel *sqlparser.Select, reservedVars *s
directives := sel.Comments.Directives()
rb.eroute.QueryTimeout = queryTimeout(directives)
if rb.eroute.TargetDestination != nil {
- return errors.New("unsupported: SELECT with a target destination")
+ return vterrors.VT12001("SELECT with a target destination")
}
if directives.IsSet(sqlparser.DirectiveScatterErrorsAsWarnings) {
rb.eroute.ScatterErrorsAsWarnings = true
@@ -271,7 +266,7 @@ func copyCommentsAndLocks(statement sqlparser.SelectStatement, sel *sqlparser.Se
query.Lock = sel.Lock
if sel.Into != nil {
if opcode != engine.Unsharded {
- return vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "INTO is not supported on sharded keyspace")
+ return vterrors.VT12001("INTO on sharded keyspace")
}
query.Into = sel.Into
}
@@ -283,16 +278,16 @@ func buildSQLCalcFoundRowsPlan(
sel *sqlparser.Select,
reservedVars *sqlparser.ReservedVars,
vschema plancontext.VSchema,
- planSelect func(reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema, sel *sqlparser.Select) (*jointab, logicalPlan, error),
-) (logicalPlan, error) {
- ljt, limitPlan, err := planSelect(reservedVars, vschema, sel)
+ planSelect func(reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema, sel *sqlparser.Select) (*jointab, logicalPlan, []string, error),
+) (logicalPlan, []string, error) {
+ ljt, limitPlan, _, err := planSelect(reservedVars, vschema, sel)
if err != nil {
- return nil, err
+ return nil, nil, err
}
statement2, reserved2, err := sqlparser.Parse2(originalQuery)
if err != nil {
- return nil, err
+ return nil, nil, err
}
sel2 := statement2.(*sqlparser.Select)
@@ -325,18 +320,18 @@ func buildSQLCalcFoundRowsPlan(
reservedVars2 := sqlparser.NewReservedVars("vtg", reserved2)
- cjt, countPlan, err := planSelect(reservedVars2, vschema, sel2)
+ cjt, countPlan, tablesUsed, err := planSelect(reservedVars2, vschema, sel2)
if err != nil {
- return nil, err
+ return nil, nil, err
}
- return &sqlCalcFoundRows{LimitQuery: limitPlan, CountQuery: countPlan, ljt: ljt, cjt: cjt}, nil
+ return &sqlCalcFoundRows{LimitQuery: limitPlan, CountQuery: countPlan, ljt: ljt, cjt: cjt}, tablesUsed, nil
}
-func planSelectV3(reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema, sel *sqlparser.Select) (*jointab, logicalPlan, error) {
+func planSelectV3(reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema, sel *sqlparser.Select) (*jointab, logicalPlan, []string, error) {
ljt := newJointab(reservedVars)
frpb := newPrimitiveBuilder(vschema, ljt)
err := frpb.processSelect(sel, reservedVars, nil, "")
- return ljt, frpb.plan, err
+ return ljt, frpb.plan, nil, err
}
func handleDualSelects(sel *sqlparser.Select, vschema plancontext.VSchema) (engine.Primitive, error) {
@@ -367,7 +362,7 @@ func handleDualSelects(sel *sqlparser.Select, vschema plancontext.VSchema) (engi
continue
}
if len(lockFunctions) > 0 {
- return nil, vterrors.New(vtrpcpb.Code_UNIMPLEMENTED, "unsupported: lock function and other expression in same select query")
+ return nil, vterrors.VT12001(fmt.Sprintf("LOCK function and other expression: [%s] in same select query", sqlparser.String(expr)))
}
exprs[i], err = evalengine.Translate(expr.Expr, evalengine.LookupDefaultCollation(vschema.ConnCollation()))
if err != nil {
@@ -522,7 +517,7 @@ func (pb *primitiveBuilder) pushSelectRoutes(selectExprs sqlparser.SelectExprs,
// We'll allow select * for simple routes.
rb, ok := pb.plan.(*route)
if !ok {
- return nil, errors.New("unsupported: '*' expression in cross-shard query")
+ return nil, vterrors.VT12001("'*' expression in cross-shard query")
}
// Validate keyspace reference if any.
if !node.TableName.IsEmpty() {
@@ -535,15 +530,15 @@ func (pb *primitiveBuilder) pushSelectRoutes(selectExprs sqlparser.SelectExprs,
rb, ok := pb.plan.(*route)
if !ok {
// This code is unreachable because the parser doesn't allow joins for next val statements.
- return nil, errors.New("unsupported: SELECT NEXT query in cross-shard query")
+ return nil, vterrors.VT12001("SELECT NEXT query in cross-shard query")
}
if rb.eroute.Opcode != engine.Next {
- return nil, errors.New("NEXT used on a non-sequence table")
+ return nil, vterrors.VT03018()
}
rb.eroute.Opcode = engine.Next
resultColumns = append(resultColumns, rb.PushAnonymous(node))
default:
- return nil, fmt.Errorf("BUG: unexpected select expression type: %T", node)
+ return nil, vterrors.VT13001(fmt.Sprintf("unexpected SELECT expression type: %T", node))
}
}
return resultColumns, nil
diff --git a/go/vt/vtgate/planbuilder/semi_join.go b/go/vt/vtgate/planbuilder/semi_join.go
index 0204a0f9911..44d99942fe4 100644
--- a/go/vt/vtgate/planbuilder/semi_join.go
+++ b/go/vt/vtgate/planbuilder/semi_join.go
@@ -17,7 +17,6 @@ limitations under the License.
package planbuilder
import (
- vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
"vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/vt/vtgate/engine"
@@ -74,7 +73,7 @@ func (ps *semiJoin) WireupGen4(ctx *plancontext.PlanningContext) error {
// Rewrite implements the logicalPlan interface
func (ps *semiJoin) Rewrite(inputs ...logicalPlan) error {
if len(inputs) != 2 {
- return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "semiJoin: wrong number of inputs")
+ return vterrors.VT13001("semiJoin: wrong number of inputs")
}
ps.lhs = inputs[0]
ps.rhs = inputs[1]
diff --git a/go/vt/vtgate/planbuilder/set.go b/go/vt/vtgate/planbuilder/set.go
index 9a6d13ec676..8508a791d41 100644
--- a/go/vt/vtgate/planbuilder/set.go
+++ b/go/vt/vtgate/planbuilder/set.go
@@ -21,13 +21,13 @@ import (
"strconv"
"strings"
+ "vitess.io/vitess/go/vt/sysvars"
"vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext"
"vitess.io/vitess/go/vt/vtgate/evalengine"
"vitess.io/vitess/go/vt/vtgate/vindexes"
- vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
"vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/vt/key"
@@ -47,6 +47,7 @@ type (
// SET transaction_mode = two_pc => SET transaction_mode = 'two_pc'
identifierAsString bool
supportSetVar bool
+ storageCase sysvars.StorageCase
}
)
@@ -78,7 +79,7 @@ func buildSetPlan(stmt *sqlparser.Set, vschema plancontext.VSchema) (*planResult
Expr: evalExpr,
}
setOps = append(setOps, setOp)
- case sqlparser.SessionScope:
+ case sqlparser.NextTxScope, sqlparser.SessionScope:
planFunc, err := sysvarPlanningFuncs.Get(expr)
if err != nil {
return nil, err
@@ -88,6 +89,12 @@ func buildSetPlan(stmt *sqlparser.Set, vschema plancontext.VSchema) (*planResult
return nil, err
}
setOps = append(setOps, setOp)
+ if expr.Var.Scope == sqlparser.NextTxScope {
+ // This is to keep the backward compatibility.
+ // 'transaction_isolation' was added as a reserved connection system variable, so it used to change the setting at session level already.
+ // logging warning now to
+ vschema.PlannerWarning("converted 'next transaction' scope to 'session' scope")
+ }
case sqlparser.VitessMetadataScope:
value, err := getValueFor(expr)
if err != nil {
@@ -95,13 +102,13 @@ func buildSetPlan(stmt *sqlparser.Set, vschema plancontext.VSchema) (*planResult
}
val, ok := value.(string)
if !ok {
- return nil, vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.WrongValueForVar, "unexpected value type for '%s': %v", expr.Var.Name, value)
+ return nil, vterrors.VT03009(expr.Var.Name, value)
}
setOps = append(setOps,
&engine.VitessMetadata{Name: expr.Var.Name.Lowered(), Value: val})
default:
- return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG]: undefined set type: %v", expr.Var.Scope.ToString())
+ return nil, vterrors.VT13001(fmt.Sprintf("undefined set type: %v", expr.Var.Scope.ToString()))
}
}
@@ -118,13 +125,13 @@ func buildSetPlan(stmt *sqlparser.Set, vschema plancontext.VSchema) (*planResult
func buildSetOpReadOnly(setting) planFunc {
return func(expr *sqlparser.SetExpr, schema plancontext.VSchema, _ *expressionConverter) (engine.SetOp, error) {
- return nil, vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.IncorrectGlobalLocalVar, "variable '%s' is a read only variable", expr.Var.Name)
+ return nil, vterrors.VT03010(expr.Var.Name)
}
}
func buildNotSupported(setting) planFunc {
return func(expr *sqlparser.SetExpr, schema plancontext.VSchema, _ *expressionConverter) (engine.SetOp, error) {
- return nil, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "%s: system setting is not supported", expr.Var.Name)
+ return nil, vterrors.VT12001(fmt.Sprintf("system setting: %s", expr.Var.Name))
}
}
@@ -179,6 +186,8 @@ func buildSetOpReservedConn(s setting) planFunc {
return nil, err
}
+ value = provideAppliedCase(value, s.storageCase)
+
return &engine.SysVarReservedConn{
Name: expr.Var.Name.Lowered(),
Keyspace: ks,
@@ -189,7 +198,17 @@ func buildSetOpReservedConn(s setting) planFunc {
}
}
-const defaultNotSupportedErrFmt = "DEFAULT not supported for @@%s"
+func provideAppliedCase(value string, storageCase sysvars.StorageCase) string {
+ switch storageCase {
+ case sysvars.SCUpper:
+ return strings.ToUpper(value)
+ case sysvars.SCLower:
+ return strings.ToLower(value)
+ }
+ return value
+}
+
+const defaultNotSupportedErrFmt = "DEFAULT for @@%s"
func buildSetOpVitessAware(s setting) planFunc {
return func(astExpr *sqlparser.SetExpr, vschema plancontext.VSchema, ec *expressionConverter) (engine.SetOp, error) {
@@ -199,7 +218,7 @@ func buildSetOpVitessAware(s setting) planFunc {
_, isDefault := astExpr.Expr.(*sqlparser.Default)
if isDefault {
if s.defaultValue == nil {
- return nil, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, defaultNotSupportedErrFmt, astExpr.Var.Name)
+ return nil, vterrors.VT12001(fmt.Sprintf(defaultNotSupportedErrFmt, astExpr.Var.Name))
}
runtimeExpr = s.defaultValue
} else {
@@ -252,7 +271,7 @@ func extractValue(expr *sqlparser.SetExpr, boolean bool) (string, error) {
return fmt.Sprintf("'%s'", sqlparser.String(expr.Expr)), nil
case *sqlparser.Default:
- return "", vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, defaultNotSupportedErrFmt, expr.Var.Name)
+ return "", vterrors.VT12001(defaultNotSupportedErrFmt, expr.Var.Name)
}
return sqlparser.String(expr.Expr), nil
@@ -277,7 +296,7 @@ func getValueFor(expr *sqlparser.SetExpr) (any, error) {
}
return num, nil
default:
- return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "invalid value type: %v", sqlparser.String(expr))
+ return nil, vterrors.VT03011(sqlparser.String(expr))
}
case sqlparser.BoolVal:
var val int64
@@ -292,6 +311,6 @@ func getValueFor(expr *sqlparser.SetExpr) (any, error) {
case *sqlparser.Default:
return "default", nil
default:
- return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "invalid syntax: %s", sqlparser.String(expr))
+ return nil, vterrors.VT03012(sqlparser.String(expr))
}
}
diff --git a/go/vt/vtgate/planbuilder/show.go b/go/vt/vtgate/planbuilder/show.go
index 4b80f74e6bf..49208af21f0 100644
--- a/go/vt/vtgate/planbuilder/show.go
+++ b/go/vt/vtgate/planbuilder/show.go
@@ -33,7 +33,6 @@ import (
"vitess.io/vitess/go/sqltypes"
"vitess.io/vitess/go/vt/key"
querypb "vitess.io/vitess/go/vt/proto/query"
- vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
"vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/vt/vtgate/engine"
@@ -61,7 +60,7 @@ func buildShowPlan(sql string, stmt *sqlparser.Show, _ *sqlparser.ReservedVars,
case *sqlparser.ShowOther:
prim, err = buildShowOtherPlan(sql, vschema)
default:
- return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG]: undefined show type: %T", stmt.Internal)
+ return nil, vterrors.VT13001(fmt.Sprintf("undefined SHOW type: %T", stmt.Internal))
}
if err != nil {
return nil, err
@@ -123,7 +122,7 @@ func buildShowBasicPlan(show *sqlparser.ShowBasic, vschema plancontext.VSchema)
case sqlparser.VschemaVindexes:
return buildVschemaVindexesPlan(show, vschema)
}
- return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] unknown show query type %s", show.Command.ToString())
+ return nil, vterrors.VT13001(fmt.Sprintf("unknown SHOW query type %s", show.Command.ToString()))
}
@@ -192,7 +191,7 @@ func buildShowTblPlan(show *sqlparser.ShowBasic, vschema plancontext.VSchema) (e
return nil, err
}
if table == nil {
- return nil, vterrors.NewErrorf(vtrpcpb.Code_NOT_FOUND, vterrors.UnknownTable, "Table '%s' doesn't exist", show.Tbl.Name.String())
+ return nil, vterrors.VT05004(show.Tbl.Name.String())
}
// Update the table.
show.Tbl.Qualifier = sqlparser.NewIdentifierCS("")
@@ -255,11 +254,11 @@ func buildShowVMigrationsPlan(show *sqlparser.ShowBasic, vschema plancontext.VSc
return nil, err
}
if ks == nil {
- return nil, vterrors.NewErrorf(vtrpcpb.Code_FAILED_PRECONDITION, vterrors.NoDB, "No database selected: use keyspace<:shard><@type> or keyspace<[range]><@type> (<> are optional)")
+ return nil, vterrors.VT09005()
}
if tabletType != topodatapb.TabletType_PRIMARY {
- return nil, vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "show vitess_migrations works only on primary tablet")
+ return nil, vterrors.VT09006("SHOW")
}
if dest == nil {
@@ -365,19 +364,19 @@ func generateCharsetRows(showFilter *sqlparser.ShowFilter, colNames []string) ([
} else {
cmpExp, ok := showFilter.Filter.(*sqlparser.ComparisonExpr)
if !ok {
- return nil, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "expect a 'LIKE' or '=' expression")
+ return nil, vterrors.VT12001("expect a 'LIKE' or '=' expression")
}
left, ok := cmpExp.Left.(*sqlparser.ColName)
if !ok {
- return nil, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "expect left side to be 'charset'")
+ return nil, vterrors.VT12001("expect left side to be 'charset'")
}
leftOk := left.Name.EqualString(charset)
if leftOk {
literal, ok := cmpExp.Right.(*sqlparser.Literal)
if !ok {
- return nil, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "we expect the right side to be a string")
+ return nil, vterrors.VT12001("we expect the right side to be a string")
}
rightString := literal.Val
@@ -449,7 +448,7 @@ func buildShowCreatePlan(show *sqlparser.ShowCreate, vschema plancontext.VSchema
case sqlparser.CreateTbl:
return buildCreateTblPlan(show, vschema)
}
- return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] unknown show query type %s", show.Command.ToString())
+ return nil, vterrors.VT13001("unknown SHOW query type %s", show.Command.ToString())
}
func buildCreateDbPlan(show *sqlparser.ShowCreate, vschema plancontext.VSchema) (engine.Primitive, error) {
@@ -496,7 +495,7 @@ func buildCreateTblPlan(show *sqlparser.ShowCreate, vschema plancontext.VSchema)
return nil, err
}
if tbl == nil {
- return nil, vterrors.NewErrorf(vtrpcpb.Code_NOT_FOUND, vterrors.UnknownTable, "Table '%s' doesn't exist", sqlparser.String(show.Op))
+ return nil, vterrors.VT05004(sqlparser.String(show.Op))
}
ks = tbl.Keyspace
if destKs != nil {
@@ -653,7 +652,7 @@ func buildVschemaTablesPlan(vschema plancontext.VSchema) (engine.Primitive, erro
}
schemaKs, ok := vs.Keyspaces[ks.Name]
if !ok {
- return nil, vterrors.NewErrorf(vtrpcpb.Code_NOT_FOUND, vterrors.BadDb, "Unknown database '%s' in vschema", ks.Name)
+ return nil, vterrors.VT05003(ks.Name)
}
var tables []string
@@ -688,7 +687,7 @@ func buildVschemaVindexesPlan(show *sqlparser.ShowBasic, vschema plancontext.VSc
tableName := show.Tbl.Name.String()
schemaTbl, ok := schemaKs.Tables[tableName]
if !ok {
- return nil, vterrors.NewErrorf(vtrpcpb.Code_NOT_FOUND, vterrors.NoSuchTable, "table '%s' does not exist in keyspace '%s'", tableName, ks.Name)
+ return nil, vterrors.VT05005(tableName, ks.Name)
}
tbl = schemaTbl
}
diff --git a/go/vt/vtgate/planbuilder/simple_projection.go b/go/vt/vtgate/planbuilder/simple_projection.go
index 9cb1f7575d2..fb9894a89e9 100644
--- a/go/vt/vtgate/planbuilder/simple_projection.go
+++ b/go/vt/vtgate/planbuilder/simple_projection.go
@@ -20,6 +20,7 @@ import (
"fmt"
"vitess.io/vitess/go/vt/sqlparser"
+ "vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/vt/vtgate/engine"
)
@@ -57,7 +58,7 @@ func newSimpleProjection(alias sqlparser.IdentifierCS, plan logicalPlan) (*simpl
// Create column symbols based on the result column names.
for _, rc := range plan.ResultColumns() {
if _, ok := t.columns[rc.alias.Lowered()]; ok {
- return nil, nil, fmt.Errorf("duplicate column names in subquery: %s", sqlparser.String(rc.alias))
+ return nil, nil, vterrors.VT12001(fmt.Sprintf("duplicate column names in subquery: %s", sqlparser.String(rc.alias)))
}
t.addColumn(rc.alias, &column{origin: sq})
}
diff --git a/go/vt/vtgate/planbuilder/simplifier_test.go b/go/vt/vtgate/planbuilder/simplifier_test.go
index b72af989f3b..057fb5ab136 100644
--- a/go/vt/vtgate/planbuilder/simplifier_test.go
+++ b/go/vt/vtgate/planbuilder/simplifier_test.go
@@ -38,12 +38,12 @@ import (
func TestSimplifyBuggyQuery(t *testing.T) {
query := "(select id from unsharded union select id from unsharded_auto) union (select id from user union select name from unsharded)"
vschema := &vschemaWrapper{
- v: loadSchema(t, "schema_test.json", true),
+ v: loadSchema(t, "vschemas/schema.json", true),
version: Gen4,
}
stmt, reserved, err := sqlparser.Parse2(query)
require.NoError(t, err)
- rewritten, _ := sqlparser.RewriteAST(sqlparser.CloneStatement(stmt), vschema.currentDb(), sqlparser.SQLSelectLimitUnset, "", nil)
+ rewritten, _ := sqlparser.RewriteAST(sqlparser.CloneStatement(stmt), vschema.currentDb(), sqlparser.SQLSelectLimitUnset, "", nil, nil)
reservedVars := sqlparser.NewReservedVars("vtg", reserved)
simplified := simplifier.SimplifyStatement(
@@ -60,12 +60,12 @@ func TestSimplifyPanic(t *testing.T) {
t.Skip("not needed to run")
query := "(select id from unsharded union select id from unsharded_auto) union (select id from unsharded_auto union select name from unsharded)"
vschema := &vschemaWrapper{
- v: loadSchema(t, "schema_test.json", true),
+ v: loadSchema(t, "vschemas/schema.json", true),
version: Gen4,
}
stmt, reserved, err := sqlparser.Parse2(query)
require.NoError(t, err)
- rewritten, _ := sqlparser.RewriteAST(sqlparser.CloneStatement(stmt), vschema.currentDb(), sqlparser.SQLSelectLimitUnset, "", nil)
+ rewritten, _ := sqlparser.RewriteAST(sqlparser.CloneStatement(stmt), vschema.currentDb(), sqlparser.SQLSelectLimitUnset, "", nil, nil)
reservedVars := sqlparser.NewReservedVars("vtg", reserved)
simplified := simplifier.SimplifyStatement(
@@ -81,21 +81,21 @@ func TestSimplifyPanic(t *testing.T) {
func TestUnsupportedFile(t *testing.T) {
t.Skip("run manually to see if any queries can be simplified")
vschema := &vschemaWrapper{
- v: loadSchema(t, "schema_test.json", true),
+ v: loadSchema(t, "vschemas/schema.json", true),
version: Gen4,
}
fmt.Println(vschema)
- for tcase := range iterateExecFile("unsupported_cases.txt") {
- t.Run(fmt.Sprintf("%d:%s", tcase.lineno, tcase.input), func(t *testing.T) {
- log.Errorf("%s:%d - %s", tcase.file, tcase.lineno, tcase.input)
- stmt, reserved, err := sqlparser.Parse2(tcase.input)
+ for _, tcase := range readJSONTests("unsupported_cases.txt") {
+ t.Run(tcase.Query, func(t *testing.T) {
+ log.Errorf("unsupported_cases.txt - %s", tcase.Query)
+ stmt, reserved, err := sqlparser.Parse2(tcase.Query)
require.NoError(t, err)
_, ok := stmt.(sqlparser.SelectStatement)
if !ok {
t.Skip()
return
}
- rewritten, err := sqlparser.RewriteAST(stmt, vschema.currentDb(), sqlparser.SQLSelectLimitUnset, "", nil)
+ rewritten, err := sqlparser.RewriteAST(stmt, vschema.currentDb(), sqlparser.SQLSelectLimitUnset, "", nil, nil)
if err != nil {
t.Skip()
}
@@ -104,12 +104,12 @@ func TestUnsupportedFile(t *testing.T) {
reservedVars := sqlparser.NewReservedVars("vtg", reserved)
ast := rewritten.AST
origQuery := sqlparser.String(ast)
- stmt, _, _ = sqlparser.Parse2(tcase.input)
+ stmt, _, _ = sqlparser.Parse2(tcase.Query)
simplified := simplifier.SimplifyStatement(
stmt.(sqlparser.SelectStatement),
vschema.currentDb(),
vschema,
- keepSameError(tcase.input, reservedVars, vschema, rewritten.BindVarNeeds),
+ keepSameError(tcase.Query, reservedVars, vschema, rewritten.BindVarNeeds),
)
if simplified == nil {
@@ -129,7 +129,7 @@ func keepSameError(query string, reservedVars *sqlparser.ReservedVars, vschema *
if err != nil {
panic(err)
}
- rewritten, _ := sqlparser.RewriteAST(stmt, vschema.currentDb(), sqlparser.SQLSelectLimitUnset, "", nil)
+ rewritten, _ := sqlparser.RewriteAST(stmt, vschema.currentDb(), sqlparser.SQLSelectLimitUnset, "", nil, nil)
ast := rewritten.AST
_, expected := BuildFromStmt(query, ast, reservedVars, vschema, rewritten.BindVarNeeds, true, true)
if expected == nil {
diff --git a/go/vt/vtgate/planbuilder/single_sharded_shortcut.go b/go/vt/vtgate/planbuilder/single_sharded_shortcut.go
index 868f96cee89..80d3b0fba11 100644
--- a/go/vt/vtgate/planbuilder/single_sharded_shortcut.go
+++ b/go/vt/vtgate/planbuilder/single_sharded_shortcut.go
@@ -20,6 +20,7 @@ import (
"sort"
"strings"
+ "vitess.io/vitess/go/vt/vtgate/planbuilder/operators"
"vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext"
"vitess.io/vitess/go/vt/sqlparser"
@@ -28,9 +29,9 @@ import (
"vitess.io/vitess/go/vt/vtgate/vindexes"
)
-func unshardedShortcut(ctx *plancontext.PlanningContext, stmt sqlparser.SelectStatement, ks *vindexes.Keyspace) (logicalPlan, error) {
+func unshardedShortcut(ctx *plancontext.PlanningContext, stmt sqlparser.SelectStatement, ks *vindexes.Keyspace) (logicalPlan, []string, error) {
// this method is used when the query we are handling has all tables in the same unsharded keyspace
- sqlparser.Rewrite(stmt, func(cursor *sqlparser.Cursor) bool {
+ sqlparser.SafeRewrite(stmt, nil, func(cursor *sqlparser.Cursor) bool {
switch node := cursor.Node().(type) {
case sqlparser.SelectExpr:
removeKeyspaceFromSelectExpr(node)
@@ -40,11 +41,11 @@ func unshardedShortcut(ctx *plancontext.PlanningContext, stmt sqlparser.SelectSt
})
}
return true
- }, nil)
+ })
tableNames, err := getTableNames(ctx.SemTable)
if err != nil {
- return nil, err
+ return nil, nil, err
}
plan := &routeGen4{
eroute: &engine.Route{
@@ -52,19 +53,27 @@ func unshardedShortcut(ctx *plancontext.PlanningContext, stmt sqlparser.SelectSt
Opcode: engine.Unsharded,
Keyspace: ks,
},
- TableName: strings.Join(tableNames, ", "),
+ TableName: strings.Join(escapedTableNames(tableNames), ", "),
},
Select: stmt,
}
if err := plan.WireupGen4(ctx); err != nil {
- return nil, err
+ return nil, nil, err
}
- return plan, nil
+ return plan, operators.QualifiedTableNames(ks, tableNames), nil
}
-func getTableNames(semTable *semantics.SemTable) ([]string, error) {
- tableNameMap := map[string]any{}
+func escapedTableNames(tableNames []sqlparser.TableName) []string {
+ escaped := make([]string, len(tableNames))
+ for i, tableName := range tableNames {
+ escaped[i] = sqlparser.String(tableName)
+ }
+ return escaped
+}
+
+func getTableNames(semTable *semantics.SemTable) ([]sqlparser.TableName, error) {
+ tableNameMap := make(map[string]sqlparser.TableName)
for _, tableInfo := range semTable.Tables {
tblObj := tableInfo.GetVindexTable()
@@ -72,19 +81,24 @@ func getTableNames(semTable *semantics.SemTable) ([]string, error) {
// probably a derived table
continue
}
- var name string
if tableInfo.IsInfSchema() {
- name = "tableName"
+ tableNameMap["tableName"] = sqlparser.TableName{
+ Name: sqlparser.NewIdentifierCS("tableName"),
+ }
} else {
- name = sqlparser.String(tblObj.Name)
+ tableNameMap[sqlparser.String(tblObj.Name)] = sqlparser.TableName{
+ Name: tblObj.Name,
+ }
}
- tableNameMap[name] = nil
}
-
- var tableNames []string
- for name := range tableNameMap {
- tableNames = append(tableNames, name)
+ var keys []string
+ for k := range tableNameMap {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ var tableNames []sqlparser.TableName
+ for _, k := range keys {
+ tableNames = append(tableNames, tableNameMap[k])
}
- sort.Strings(tableNames)
return tableNames, nil
}
diff --git a/go/vt/vtgate/planbuilder/sql_calc_found_rows.go b/go/vt/vtgate/planbuilder/sql_calc_found_rows.go
index 41dcd1df45e..72850361a9e 100644
--- a/go/vt/vtgate/planbuilder/sql_calc_found_rows.go
+++ b/go/vt/vtgate/planbuilder/sql_calc_found_rows.go
@@ -17,7 +17,8 @@ limitations under the License.
package planbuilder
import (
- vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
+ "fmt"
+
"vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/vt/vtgate/engine"
@@ -106,7 +107,7 @@ func (s *sqlCalcFoundRows) SupplyWeightString(int, bool) (weightcolNumber int, e
// Rewrite implements the logicalPlan interface
func (s *sqlCalcFoundRows) Rewrite(inputs ...logicalPlan) error {
if len(inputs) != 2 {
- return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] wrong number of inputs for SQL_CALC_FOUND_ROWS: %d", len(inputs))
+ return vterrors.VT13001(fmt.Sprintf("wrong number of inputs for SQL_CALC_FOUND_ROWS: %d", len(inputs)))
}
s.LimitQuery = inputs[0]
s.CountQuery = inputs[1]
diff --git a/go/vt/vtgate/planbuilder/stream.go b/go/vt/vtgate/planbuilder/stream.go
index bc9d5c2d312..e464d39a7da 100644
--- a/go/vt/vtgate/planbuilder/stream.go
+++ b/go/vt/vtgate/planbuilder/stream.go
@@ -19,7 +19,6 @@ package planbuilder
import (
"vitess.io/vitess/go/vt/key"
topodatapb "vitess.io/vitess/go/vt/proto/topodata"
- vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
"vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/vt/vtgate/engine"
@@ -32,7 +31,7 @@ func buildStreamPlan(stmt *sqlparser.Stream, vschema plancontext.VSchema) (*plan
return nil, err
}
if destTabletType != topodatapb.TabletType_PRIMARY {
- return nil, vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "stream is supported only for primary tablet type, current type: %v", destTabletType)
+ return nil, vterrors.VT09009(destTabletType)
}
if dest == nil {
dest = key.DestinationExactKeyRange{}
diff --git a/go/vt/vtgate/planbuilder/subquery_op.go b/go/vt/vtgate/planbuilder/subquery_op.go
index 6191e5228c9..060c0ecfebd 100644
--- a/go/vt/vtgate/planbuilder/subquery_op.go
+++ b/go/vt/vtgate/planbuilder/subquery_op.go
@@ -19,12 +19,11 @@ package planbuilder
import (
"vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/vtgate/engine"
+ "vitess.io/vitess/go/vt/vtgate/planbuilder/operators"
"vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext"
-
- "vitess.io/vitess/go/vt/vtgate/planbuilder/physical"
)
-func transformSubQueryPlan(ctx *plancontext.PlanningContext, op *physical.SubQueryOp) (logicalPlan, error) {
+func transformSubQueryPlan(ctx *plancontext.PlanningContext, op *operators.SubQueryOp) (logicalPlan, error) {
innerPlan, err := transformToLogicalPlan(ctx, op.Inner, false)
if err != nil {
return nil, err
@@ -50,7 +49,7 @@ func transformSubQueryPlan(ctx *plancontext.PlanningContext, op *physical.SubQue
return plan, err
}
-func transformCorrelatedSubQueryPlan(ctx *plancontext.PlanningContext, op *physical.CorrelatedSubQueryOp) (logicalPlan, error) {
+func transformCorrelatedSubQueryPlan(ctx *plancontext.PlanningContext, op *operators.CorrelatedSubQueryOp) (logicalPlan, error) {
outer, err := transformToLogicalPlan(ctx, op.Outer, false)
if err != nil {
return nil, err
@@ -62,7 +61,7 @@ func transformCorrelatedSubQueryPlan(ctx *plancontext.PlanningContext, op *physi
return newSemiJoin(outer, inner, op.Vars, op.LHSColumns), nil
}
-func mergeSubQueryOpPlan(ctx *plancontext.PlanningContext, inner, outer logicalPlan, n *physical.SubQueryOp) logicalPlan {
+func mergeSubQueryOpPlan(ctx *plancontext.PlanningContext, inner, outer logicalPlan, n *operators.SubQueryOp) logicalPlan {
iroute, ok := inner.(*routeGen4)
if !ok {
return nil
diff --git a/go/vt/vtgate/planbuilder/symtab.go b/go/vt/vtgate/planbuilder/symtab.go
index f3f3bf6997a..7853899b4f6 100644
--- a/go/vt/vtgate/planbuilder/symtab.go
+++ b/go/vt/vtgate/planbuilder/symtab.go
@@ -17,12 +17,10 @@ limitations under the License.
package planbuilder
import (
- "errors"
"fmt"
"strconv"
"strings"
- vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
"vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/vt/sqlparser"
@@ -31,8 +29,6 @@ import (
querypb "vitess.io/vitess/go/vt/proto/query"
)
-var errNoTable = errors.New("no table info")
-
// symtab represents the symbol table for a SELECT statement
// or a subquery. The symtab evolves over time.
// As a query is analyzed, multiple independent
@@ -172,7 +168,7 @@ func (st *symtab) AddTable(t *table) error {
st.singleRoute = nil
}
if _, ok := st.tables[t.alias]; ok {
- return vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.NonUniqTable, "Not unique table/alias: '%s'", t.alias.Name.String())
+ return vterrors.VT03013(t.alias.Name.String())
}
st.tables[t.alias] = t
st.tableNames = append(st.tableNames, t.alias)
@@ -214,7 +210,7 @@ func (st *symtab) AllVschemaTableNames() ([]*vindexes.Table, error) {
for _, tname := range st.tableNames {
t, ok := st.tables[tname]
if !ok {
- return nil, fmt.Errorf("table %v not found", sqlparser.String(tname))
+ return nil, vterrors.VT05004(sqlparser.String(tname))
}
if t.vschemaTable != nil {
tables = append(tables, t.vschemaTable)
@@ -233,11 +229,11 @@ func (st *symtab) FindTable(tname sqlparser.TableName) (*table, error) {
if st.tableNames == nil {
// Unreachable because current code path checks for this condition
// before invoking this function.
- return nil, errNoTable
+ return nil, vterrors.VT05007()
}
t, ok := st.tables[tname]
if !ok {
- return nil, fmt.Errorf("table %v not found", sqlparser.String(tname))
+ return nil, vterrors.VT05004(sqlparser.String(tname))
}
return t, nil
}
@@ -308,14 +304,14 @@ func (st *symtab) Find(col *sqlparser.ColName) (origin logicalPlan, isLocal bool
}
if st.Outer == nil {
- return nil, false, fmt.Errorf("symbol %s not found", sqlparser.String(col))
+ return nil, false, vterrors.VT03019(sqlparser.String(col))
}
// Search is not continued if ResultColumns already has values:
// select a ... having ... (select b ... having a...). In this case,
// a (in having) should not match the outer-most 'a'. This is to
// match MySQL's behavior.
if len(st.ResultColumns) != 0 {
- return nil, false, fmt.Errorf("symbol %s not found in subquery", sqlparser.String(col))
+ return nil, false, vterrors.VT03020(sqlparser.String(col))
}
if origin, _, err = st.Outer.Find(col); err != nil {
@@ -331,7 +327,7 @@ func (st *symtab) searchResultColumn(col *sqlparser.ColName) (c *column, err err
for _, rc := range st.ResultColumns {
if rc.alias.Equal(col.Name) {
if cursym != nil {
- return nil, fmt.Errorf("ambiguous symbol reference: %v", sqlparser.String(col))
+ return nil, vterrors.VT03021(sqlparser.String(col))
}
cursym = rc
}
@@ -369,7 +365,7 @@ func (st *symtab) searchTables(col *sqlparser.ColName) (*column, error) {
return &column{origin: st.singleRoute, st: st}, nil
default:
// If none of the above, the symbol is unresolvable.
- return nil, fmt.Errorf("symbol %s not found", sqlparser.String(col))
+ return nil, vterrors.VT03019(sqlparser.String(col))
}
} else {
var ok bool
@@ -384,7 +380,7 @@ func (st *symtab) searchTables(col *sqlparser.ColName) (*column, error) {
if !ok {
// We know all the column names of a subquery. Might as well return an error if it's not found.
if t.isAuthoritative {
- return nil, fmt.Errorf("symbol %s not found in table or subquery", sqlparser.String(col))
+ return nil, vterrors.VT03019(sqlparser.String(col))
}
c = &column{
origin: t.Origin(),
@@ -399,14 +395,14 @@ func (st *symtab) searchTables(col *sqlparser.ColName) (*column, error) {
// order expression.
func ResultFromNumber(rcs []*resultColumn, val *sqlparser.Literal, caller string) (int, error) {
if val.Type != sqlparser.IntVal {
- return 0, errors.New("column number is not an int")
+ return 0, vterrors.VT13001("column number is not an INT")
}
- num, err := strconv.ParseInt(string(val.Val), 0, 64)
+ num, err := strconv.ParseInt(val.Val, 0, 64)
if err != nil {
- return 0, fmt.Errorf("error parsing column number: %s", sqlparser.String(val))
+ return 0, vterrors.VT13001(fmt.Sprintf("error parsing column number: %s", sqlparser.String(val)))
}
if num < 1 || num > int64(len(rcs)) {
- return 0, vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.BadFieldError, "Unknown column '%d' in '%s'", num, caller)
+ return 0, vterrors.VT03014(num, caller)
}
return int(num - 1), nil
}
@@ -437,14 +433,14 @@ func (st *symtab) Vindex(expr sqlparser.Expr, scope *route) vindexes.SingleColum
func BuildColName(rcs []*resultColumn, index int) (*sqlparser.ColName, error) {
alias := rcs[index].alias
if alias.IsEmpty() {
- return nil, errors.New("cannot reference a complex expression")
+ return nil, vterrors.VT12001("reference a complex expression")
}
for i, rc := range rcs {
if i == index {
continue
}
if rc.alias.Equal(alias) {
- return nil, fmt.Errorf("ambiguous symbol reference: %v", alias)
+ return nil, vterrors.VT03021(alias)
}
}
return &sqlparser.ColName{
@@ -458,14 +454,14 @@ func BuildColName(rcs []*resultColumn, index int) (*sqlparser.ColName, error) {
// If a symbol cannot be resolved or if the expression contains
// a subquery, an error is returned.
func (st *symtab) ResolveSymbols(node sqlparser.SQLNode) error {
- return sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) {
- switch node := node.(type) {
+ return sqlparser.Walk(func(currNode sqlparser.SQLNode) (kontinue bool, err error) {
+ switch currNode := currNode.(type) {
case *sqlparser.ColName:
- if _, _, err := st.Find(node); err != nil {
+ if _, _, err := st.Find(currNode); err != nil {
return false, err
}
case *sqlparser.Subquery:
- return false, errors.New("unsupported: subqueries disallowed in GROUP or ORDER BY")
+ return false, vterrors.VT12001(fmt.Sprintf("subqueries disallowed in %T", node))
}
return true, nil
}, node)
@@ -509,7 +505,7 @@ func (t *table) mergeColumn(alias sqlparser.IdentifierCI, c *column) (*column, e
return col, nil
}
if t.isAuthoritative {
- return nil, fmt.Errorf("column %v not found in %v", sqlparser.String(alias), sqlparser.String(t.alias))
+ return nil, vterrors.VT03022(sqlparser.String(alias), sqlparser.String(t.alias))
}
c.colNumber = len(t.columnNames)
t.columns[lowered] = c
@@ -617,5 +613,5 @@ func GetReturnType(input sqlparser.Expr) (querypb.Type, error) {
case *sqlparser.Count, *sqlparser.CountStar:
return querypb.Type_INT64, nil
}
- return 0, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "cannot evaluate return type for %T", input)
+ return 0, vterrors.VT12001(fmt.Sprintf("evaluate return type for %T", input))
}
diff --git a/go/vt/vtgate/planbuilder/system_tables.go b/go/vt/vtgate/planbuilder/system_tables.go
index 7b5ed2ddcc1..ba061af909f 100644
--- a/go/vt/vtgate/planbuilder/system_tables.go
+++ b/go/vt/vtgate/planbuilder/system_tables.go
@@ -21,7 +21,6 @@ import (
"vitess.io/vitess/go/mysql/collations"
- vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
"vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/sqltypes"
@@ -32,7 +31,7 @@ import (
type notImplementedSchemaInfoConverter struct{}
func (f *notImplementedSchemaInfoConverter) ColumnLookup(*sqlparser.ColName) (int, error) {
- return 0, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "Comparing table schema name with a column name not yet supported")
+ return 0, vterrors.VT12001("comparing table schema name with a column name")
}
func (f *notImplementedSchemaInfoConverter) CollationForExpr(sqlparser.Expr) collations.ID {
@@ -88,41 +87,60 @@ func isTableSchemaOrName(e sqlparser.Expr) (isTableSchema bool, isTableName bool
return isDbNameCol(col), isTableNameCol(col)
}
+var schemaColumns = map[string]any{
+ "table_schema": nil,
+ "constraint_schema": nil,
+ "schema_name": nil,
+ "routine_schema": nil,
+ "specific_schema": nil,
+ "event_schema": nil,
+ "referenced_table_schema": nil,
+ "index_schema": nil,
+ "trigger_schema": nil,
+ "event_object_schema": nil,
+}
+
func isDbNameCol(col *sqlparser.ColName) bool {
- return col.Name.EqualString("table_schema") || col.Name.EqualString("constraint_schema") || col.Name.EqualString("schema_name") || col.Name.EqualString("routine_schema")
+ _, found := schemaColumns[col.Name.Lowered()]
+ return found
}
func isTableNameCol(col *sqlparser.ColName) bool {
- return col.Name.EqualString("table_name")
+ return col.Name.EqualString("table_name") || col.Name.EqualString("referenced_table_name")
}
-func extractInfoSchemaRoutingPredicate(in sqlparser.Expr, reservedVars *sqlparser.ReservedVars) (bool, string, evalengine.Expr, error) {
- switch cmp := in.(type) {
- case *sqlparser.ComparisonExpr:
- if cmp.Operator == sqlparser.EqualOp {
- isSchemaName, col, other, replaceOther := findOtherComparator(cmp)
- if col != nil && shouldRewrite(other) {
- evalExpr, err := evalengine.Translate(other, ¬ImplementedSchemaInfoConverter{})
- if err != nil {
- if strings.Contains(err.Error(), evalengine.ErrTranslateExprNotSupported) {
- // This just means we can't rewrite this particular expression,
- // not that we have to exit altogether
- return false, "", nil, nil
- }
- return false, "", nil, err
- }
- var name string
- if isSchemaName {
- name = sqltypes.BvSchemaName
- } else {
- name = reservedVars.ReserveColName(col.(*sqlparser.ColName))
- }
- replaceOther(sqlparser.NewArgument(name))
- return isSchemaName, name, evalExpr, nil
- }
+func extractInfoSchemaRoutingPredicate(
+ in sqlparser.Expr,
+ reservedVars *sqlparser.ReservedVars,
+) (isSchemaName bool, name string, evalExpr evalengine.Expr, err error) {
+ cmp, ok := in.(*sqlparser.ComparisonExpr)
+ if !ok || cmp.Operator != sqlparser.EqualOp {
+ return
+ }
+
+ isSchemaName, col, other, replaceOther := findOtherComparator(cmp)
+ if col == nil || !shouldRewrite(other) {
+ return
+ }
+
+ evalExpr, err = evalengine.Translate(other, ¬ImplementedSchemaInfoConverter{})
+ if err != nil {
+ if strings.Contains(err.Error(), evalengine.ErrTranslateExprNotSupported) {
+ // This just means we can't rewrite this particular expression,
+ // not that we have to exit altogether
+ err = nil
+ return
}
+ return false, "", nil, err
+ }
+
+ if isSchemaName {
+ name = sqltypes.BvSchemaName
+ } else {
+ name = reservedVars.ReserveColName(col.(*sqlparser.ColName))
}
- return false, "", nil, nil
+ replaceOther(sqlparser.NewArgument(name))
+ return isSchemaName, name, evalExpr, nil
}
func shouldRewrite(e sqlparser.Expr) bool {
diff --git a/go/vt/vtgate/planbuilder/system_variables.go b/go/vt/vtgate/planbuilder/system_variables.go
index a810496387d..eccb263c65a 100644
--- a/go/vt/vtgate/planbuilder/system_variables.go
+++ b/go/vt/vtgate/planbuilder/system_variables.go
@@ -20,7 +20,6 @@ import (
"fmt"
"sync"
- vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
"vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/sysvars"
"vitess.io/vitess/go/vt/vterrors"
@@ -43,6 +42,7 @@ func (pc *sysvarPlanCache) initForSettings(systemVariables []sysvars.SystemVaria
boolean: sysvar.IsBoolean,
identifierAsString: sysvar.IdentifierAsString,
supportSetVar: sysvar.SupportSetVar,
+ storageCase: sysvar.Case,
}
if sysvar.Default != "" {
@@ -84,7 +84,7 @@ func (pc *sysvarPlanCache) Get(expr *sqlparser.SetExpr) (planFunc, error) {
pc.init()
pf, ok := pc.funcs[expr.Var.Name.Lowered()]
if !ok {
- return nil, vterrors.NewErrorf(vtrpcpb.Code_NOT_FOUND, vterrors.UnknownSystemVariable, "Unknown system variable '%s'", sqlparser.String(expr))
+ return nil, vterrors.VT05006(sqlparser.String(expr))
}
return pf, nil
}
diff --git a/go/vt/vtgate/planbuilder/testdata/aggr_cases.json b/go/vt/vtgate/planbuilder/testdata/aggr_cases.json
new file mode 100644
index 00000000000..d1dedaa8d88
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/aggr_cases.json
@@ -0,0 +1,5140 @@
+[
+ {
+ "comment": "group by a unique vindex should revert to simple route, and having clause should find the correct symbols.",
+ "query": "select id, count(*) c from user group by id having max(col) > 10",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id, count(*) c from user group by id having max(col) > 10",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, count(*) as c from `user` where 1 != 1 group by id",
+ "Query": "select id, count(*) as c from `user` group by id having max(col) > 10",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id, count(*) c from user group by id having max(col) > 10",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, count(*) as c from `user` where 1 != 1 group by id",
+ "Query": "select id, count(*) as c from `user` group by id having max(col) > 10",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "scatter aggregate in a subquery",
+ "query": "select a from (select count(*) as a from user) t",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a from (select count(*) as a from user) t",
+ "Instructions": {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count(0) AS count",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*) as a from `user` where 1 != 1",
+ "Query": "select count(*) as a from `user`",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a from (select count(*) as a from user) t",
+ "Instructions": {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count_star(0) AS a",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*) as a from `user` where 1 != 1",
+ "Query": "select count(*) as a from `user`",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "scatter aggregate with non-aggregate expressions.",
+ "query": "select id, count(*) from user",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id, count(*) from user",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count(1) AS count",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, count(*) from `user` where 1 != 1",
+ "Query": "select id, count(*) from `user`",
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id, count(*) from user",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "random(0) AS id, sum_count_star(1) AS count(*)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, count(*) from `user` where 1 != 1",
+ "Query": "select id, count(*) from `user`",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "scatter aggregate using distinctdistinct",
+ "query": "select distinct col from user",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select distinct col from user",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "GroupBy": "0",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "OrderBy": "0 ASC",
+ "Query": "select distinct col from `user` order by col asc",
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select distinct col from user",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "GroupBy": "0",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "OrderBy": "0 ASC",
+ "Query": "select distinct col from `user` order by col asc",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "scatter aggregate group by select col",
+ "query": "select col from user group by col",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from user group by col",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "GroupBy": "0",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1 group by col",
+ "OrderBy": "0 ASC",
+ "Query": "select col from `user` group by col order by col asc",
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from user group by col",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "GroupBy": "0",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1 group by col",
+ "OrderBy": "0 ASC",
+ "Query": "select col from `user` group by col order by col asc",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "count with distinct group by unique vindex",
+ "query": "select id, count(distinct col) from user group by id",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id, count(distinct col) from user group by id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, count(distinct col) from `user` where 1 != 1 group by id",
+ "Query": "select id, count(distinct col) from `user` group by id",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id, count(distinct col) from user group by id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, count(distinct col) from `user` where 1 != 1 group by id",
+ "Query": "select id, count(distinct col) from `user` group by id",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "count with distinct unique vindex",
+ "query": "select col, count(distinct id) from user group by col",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col, count(distinct id) from user group by col",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count(1) AS count",
+ "GroupBy": "0",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col, count(distinct id) from `user` where 1 != 1 group by col",
+ "OrderBy": "0 ASC",
+ "Query": "select col, count(distinct id) from `user` group by col order by col asc",
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col, count(distinct id) from user group by col",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count_distinct(1) AS count(distinct id)",
+ "GroupBy": "0",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col, count(distinct id) from `user` where 1 != 1 group by col",
+ "OrderBy": "0 ASC",
+ "Query": "select col, count(distinct id) from `user` group by col order by col asc",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "count with distinct no unique vindex",
+ "query": "select col1, count(distinct col2) from user group by col1",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col1, count(distinct col2) from user group by col1",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "count_distinct_count(1) AS count(distinct col2)",
+ "GroupBy": "0",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col1, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, col2, weight_string(col1), weight_string(col2)",
+ "OrderBy": "(0|2) ASC, (1|3) ASC",
+ "Query": "select col1, col2, weight_string(col1), weight_string(col2) from `user` group by col1, col2, weight_string(col1), weight_string(col2) order by col1 asc, col2 asc",
+ "ResultColumns": 2,
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col1, count(distinct col2) from user group by col1",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "count_distinct(1|3) AS count(distinct col2)",
+ "GroupBy": "(0|2)",
+ "ResultColumns": 2,
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col1, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, weight_string(col1), col2, weight_string(col2)",
+ "OrderBy": "(0|2) ASC, (1|3) ASC",
+ "Query": "select col1, col2, weight_string(col1), weight_string(col2) from `user` group by col1, weight_string(col1), col2, weight_string(col2) order by col1 asc, col2 asc",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "count with distinct no unique vindex and no group by",
+ "query": "select count(distinct col2) from user",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select count(distinct col2) from user",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "count_distinct_count(0) AS count(distinct col2)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col2, weight_string(col2) from `user` where 1 != 1 group by col2, weight_string(col2)",
+ "OrderBy": "(0|1) ASC",
+ "Query": "select col2, weight_string(col2) from `user` group by col2, weight_string(col2) order by col2 asc",
+ "ResultColumns": 1,
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select count(distinct col2) from user",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "count_distinct(0|1) AS count(distinct col2)",
+ "ResultColumns": 1,
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col2, weight_string(col2) from `user` where 1 != 1 group by col2, weight_string(col2)",
+ "OrderBy": "(0|1) ASC",
+ "Query": "select col2, weight_string(col2) from `user` group by col2, weight_string(col2) order by col2 asc",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "count with distinct no unique vindex, count expression aliased",
+ "query": "select col1, count(distinct col2) c2 from user group by col1",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col1, count(distinct col2) c2 from user group by col1",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "count_distinct_count(1) AS c2",
+ "GroupBy": "0",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col1, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, col2, weight_string(col1), weight_string(col2)",
+ "OrderBy": "(0|2) ASC, (1|3) ASC",
+ "Query": "select col1, col2, weight_string(col1), weight_string(col2) from `user` group by col1, col2, weight_string(col1), weight_string(col2) order by col1 asc, col2 asc",
+ "ResultColumns": 2,
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col1, count(distinct col2) c2 from user group by col1",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "count_distinct(1|3) AS c2",
+ "GroupBy": "(0|2)",
+ "ResultColumns": 2,
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col1, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, weight_string(col1), col2, weight_string(col2)",
+ "OrderBy": "(0|2) ASC, (1|3) ASC",
+ "Query": "select col1, col2, weight_string(col1), weight_string(col2) from `user` group by col1, weight_string(col1), col2, weight_string(col2) order by col1 asc, col2 asc",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "sum with distinct no unique vindex",
+ "query": "select col1, sum(distinct col2) from user group by col1",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col1, sum(distinct col2) from user group by col1",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_distinct_sum(1) AS sum(distinct col2)",
+ "GroupBy": "0",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col1, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, col2, weight_string(col1), weight_string(col2)",
+ "OrderBy": "(0|2) ASC, (1|3) ASC",
+ "Query": "select col1, col2, weight_string(col1), weight_string(col2) from `user` group by col1, col2, weight_string(col1), weight_string(col2) order by col1 asc, col2 asc",
+ "ResultColumns": 2,
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col1, sum(distinct col2) from user group by col1",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_distinct(1|3) AS sum(distinct col2)",
+ "GroupBy": "(0|2)",
+ "ResultColumns": 2,
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col1, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, weight_string(col1), col2, weight_string(col2)",
+ "OrderBy": "(0|2) ASC, (1|3) ASC",
+ "Query": "select col1, col2, weight_string(col1), weight_string(col2) from `user` group by col1, weight_string(col1), col2, weight_string(col2) order by col1 asc, col2 asc",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "min with distinct no unique vindex. distinct is ignored.",
+ "query": "select col1, min(distinct col2) from user group by col1",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col1, min(distinct col2) from user group by col1",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "min(1)",
+ "GroupBy": "0",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col1, min(distinct col2), weight_string(col1) from `user` where 1 != 1 group by col1, weight_string(col1)",
+ "OrderBy": "(0|2) ASC",
+ "Query": "select col1, min(distinct col2), weight_string(col1) from `user` group by col1, weight_string(col1) order by col1 asc",
+ "ResultColumns": 2,
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col1, min(distinct col2) from user group by col1",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "min(1|3) AS min(distinct col2)",
+ "GroupBy": "(0|2)",
+ "ResultColumns": 2,
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col1, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, weight_string(col1), col2, weight_string(col2)",
+ "OrderBy": "(0|2) ASC, (1|3) ASC",
+ "Query": "select col1, col2, weight_string(col1), weight_string(col2) from `user` group by col1, weight_string(col1), col2, weight_string(col2) order by col1 asc, col2 asc",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "order by count distinct",
+ "query": "select col1, count(distinct col2) k from user group by col1 order by k",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col1, count(distinct col2) k from user group by col1 order by k",
+ "Instructions": {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "1 ASC",
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "count_distinct_count(1) AS k",
+ "GroupBy": "0",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col1, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, col2, weight_string(col1), weight_string(col2)",
+ "OrderBy": "(0|2) ASC, (1|3) ASC",
+ "Query": "select col1, col2, weight_string(col1), weight_string(col2) from `user` group by col1, col2, weight_string(col1), weight_string(col2) order by col1 asc, col2 asc",
+ "ResultColumns": 2,
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col1, count(distinct col2) k from user group by col1 order by k",
+ "Instructions": {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "1 ASC",
+ "ResultColumns": 2,
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "count_distinct(1|3) AS k",
+ "GroupBy": "(0|2)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col1, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, weight_string(col1), col2, weight_string(col2)",
+ "OrderBy": "(0|2) ASC, (1|3) ASC",
+ "Query": "select col1, col2, weight_string(col1), weight_string(col2) from `user` group by col1, weight_string(col1), col2, weight_string(col2) order by col1 asc, col2 asc",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "scatter aggregate group by aggregate function",
+ "query": "select count(*) b from user group by b",
+ "v3-plan": "VT03005: cannot group on 'b'",
+ "gen4-plan": "VT03005: cannot group on 'count(*)'"
+ },
+ {
+ "comment": "scatter aggregate multiple group by (columns)",
+ "query": "select a, b, count(*) from user group by b, a",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a, b, count(*) from user group by b, a",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count(2) AS count",
+ "GroupBy": "1, 0",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a, b, count(*), weight_string(b), weight_string(a) from `user` where 1 != 1 group by b, a, weight_string(b), weight_string(a)",
+ "OrderBy": "(1|3) ASC, (0|4) ASC",
+ "Query": "select a, b, count(*), weight_string(b), weight_string(a) from `user` group by b, a, weight_string(b), weight_string(a) order by b asc, a asc",
+ "ResultColumns": 3,
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a, b, count(*) from user group by b, a",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count_star(2) AS count(*)",
+ "GroupBy": "(0|3), (1|4)",
+ "ResultColumns": 3,
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a, b, count(*), weight_string(a), weight_string(b) from `user` where 1 != 1 group by a, weight_string(a), b, weight_string(b)",
+ "OrderBy": "(0|3) ASC, (1|4) ASC",
+ "Query": "select a, b, count(*), weight_string(a), weight_string(b) from `user` group by a, weight_string(a), b, weight_string(b) order by a asc, b asc",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "scatter aggregate multiple group by (numbers)",
+ "query": "select a, b, count(*) from user group by 2, 1",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a, b, count(*) from user group by 2, 1",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count(2) AS count",
+ "GroupBy": "1, 0",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a, b, count(*), weight_string(b), weight_string(a) from `user` where 1 != 1 group by 2, 1, weight_string(b), weight_string(a)",
+ "OrderBy": "(1|3) ASC, (0|4) ASC",
+ "Query": "select a, b, count(*), weight_string(b), weight_string(a) from `user` group by 2, 1, weight_string(b), weight_string(a) order by b asc, a asc",
+ "ResultColumns": 3,
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a, b, count(*) from user group by 2, 1",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count_star(2) AS count(*)",
+ "GroupBy": "(0|3), (1|4)",
+ "ResultColumns": 3,
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a, b, count(*), weight_string(a), weight_string(b) from `user` where 1 != 1 group by a, weight_string(a), b, weight_string(b)",
+ "OrderBy": "(0|3) ASC, (1|4) ASC",
+ "Query": "select a, b, count(*), weight_string(a), weight_string(b) from `user` group by a, weight_string(a), b, weight_string(b) order by a asc, b asc",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "scatter aggregate multiple group by columns inverse order",
+ "query": "select a, b, count(*) from user group by b, a",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a, b, count(*) from user group by b, a",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count(2) AS count",
+ "GroupBy": "1, 0",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a, b, count(*), weight_string(b), weight_string(a) from `user` where 1 != 1 group by b, a, weight_string(b), weight_string(a)",
+ "OrderBy": "(1|3) ASC, (0|4) ASC",
+ "Query": "select a, b, count(*), weight_string(b), weight_string(a) from `user` group by b, a, weight_string(b), weight_string(a) order by b asc, a asc",
+ "ResultColumns": 3,
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a, b, count(*) from user group by b, a",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count_star(2) AS count(*)",
+ "GroupBy": "(0|3), (1|4)",
+ "ResultColumns": 3,
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a, b, count(*), weight_string(a), weight_string(b) from `user` where 1 != 1 group by a, weight_string(a), b, weight_string(b)",
+ "OrderBy": "(0|3) ASC, (1|4) ASC",
+ "Query": "select a, b, count(*), weight_string(a), weight_string(b) from `user` group by a, weight_string(a), b, weight_string(b) order by a asc, b asc",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "scatter aggregate group by column number",
+ "query": "select col from user group by 1",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from user group by 1",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "GroupBy": "0",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1 group by 1",
+ "OrderBy": "0 ASC",
+ "Query": "select col from `user` group by 1 order by col asc",
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from user group by 1",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "GroupBy": "0",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1 group by col",
+ "OrderBy": "0 ASC",
+ "Query": "select col from `user` group by col order by col asc",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "scatter aggregate group by invalid column number",
+ "query": "select col from user group by 2",
+ "v3-plan": "VT03014: unknown column '2' in 'group statement'",
+ "gen4-plan": "Unknown column '2' in 'group statement'"
+ },
+ {
+ "comment": "scatter aggregate order by null",
+ "query": "select count(*) from user order by null",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select count(*) from user order by null",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count(0) AS count",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*) from `user` where 1 != 1",
+ "Query": "select count(*) from `user`",
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select count(*) from user order by null",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count_star(0) AS count(*)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*) from `user` where 1 != 1",
+ "Query": "select count(*) from `user`",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "scatter aggregate with numbered order by columns",
+ "query": "select a, b, c, d, count(*) from user group by 1, 2, 3 order by 1, 2, 3",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a, b, c, d, count(*) from user group by 1, 2, 3 order by 1, 2, 3",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count(4) AS count",
+ "GroupBy": "0, 1, 2",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a, b, c, d, count(*), weight_string(a), weight_string(b), weight_string(c) from `user` where 1 != 1 group by 1, 2, 3, weight_string(a), weight_string(b), weight_string(c)",
+ "OrderBy": "(0|5) ASC, (1|6) ASC, (2|7) ASC",
+ "Query": "select a, b, c, d, count(*), weight_string(a), weight_string(b), weight_string(c) from `user` group by 1, 2, 3, weight_string(a), weight_string(b), weight_string(c) order by 1 asc, 2 asc, 3 asc",
+ "ResultColumns": 5,
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a, b, c, d, count(*) from user group by 1, 2, 3 order by 1, 2, 3",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "random(3) AS d, sum_count_star(4) AS count(*)",
+ "GroupBy": "(0|5), (1|6), (2|7)",
+ "ResultColumns": 5,
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a, b, c, d, count(*), weight_string(a), weight_string(b), weight_string(c) from `user` where 1 != 1 group by a, weight_string(a), b, weight_string(b), c, weight_string(c)",
+ "OrderBy": "(0|5) ASC, (1|6) ASC, (2|7) ASC",
+ "Query": "select a, b, c, d, count(*), weight_string(a), weight_string(b), weight_string(c) from `user` group by a, weight_string(a), b, weight_string(b), c, weight_string(c) order by a asc, b asc, c asc",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "scatter aggregate with named order by columns",
+ "query": "select a, b, c, d, count(*) from user group by 1, 2, 3 order by a, b, c",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a, b, c, d, count(*) from user group by 1, 2, 3 order by a, b, c",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count(4) AS count",
+ "GroupBy": "0, 1, 2",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a, b, c, d, count(*), weight_string(a), weight_string(b), weight_string(c) from `user` where 1 != 1 group by 1, 2, 3, weight_string(a), weight_string(b), weight_string(c)",
+ "OrderBy": "(0|5) ASC, (1|6) ASC, (2|7) ASC",
+ "Query": "select a, b, c, d, count(*), weight_string(a), weight_string(b), weight_string(c) from `user` group by 1, 2, 3, weight_string(a), weight_string(b), weight_string(c) order by a asc, b asc, c asc",
+ "ResultColumns": 5,
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a, b, c, d, count(*) from user group by 1, 2, 3 order by a, b, c",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "random(3) AS d, sum_count_star(4) AS count(*)",
+ "GroupBy": "(0|5), (1|6), (2|7)",
+ "ResultColumns": 5,
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a, b, c, d, count(*), weight_string(a), weight_string(b), weight_string(c) from `user` where 1 != 1 group by a, weight_string(a), b, weight_string(b), c, weight_string(c)",
+ "OrderBy": "(0|5) ASC, (1|6) ASC, (2|7) ASC",
+ "Query": "select a, b, c, d, count(*), weight_string(a), weight_string(b), weight_string(c) from `user` group by a, weight_string(a), b, weight_string(b), c, weight_string(c) order by a asc, b asc, c asc",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "scatter aggregate with jumbled order by columns",
+ "query": "select a, b, c, d, count(*) from user group by 1, 2, 3, 4 order by d, b, a, c",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a, b, c, d, count(*) from user group by 1, 2, 3, 4 order by d, b, a, c",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count(4) AS count",
+ "GroupBy": "0, 1, 2, 3",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a, b, c, d, count(*), weight_string(d), weight_string(b), weight_string(a), weight_string(c) from `user` where 1 != 1 group by 1, 2, 3, 4, weight_string(d), weight_string(b), weight_string(a), weight_string(c)",
+ "OrderBy": "(3|5) ASC, (1|6) ASC, (0|7) ASC, (2|8) ASC",
+ "Query": "select a, b, c, d, count(*), weight_string(d), weight_string(b), weight_string(a), weight_string(c) from `user` group by 1, 2, 3, 4, weight_string(d), weight_string(b), weight_string(a), weight_string(c) order by d asc, b asc, a asc, c asc",
+ "ResultColumns": 5,
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a, b, c, d, count(*) from user group by 1, 2, 3, 4 order by d, b, a, c",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count_star(4) AS count(*)",
+ "GroupBy": "(3|8), (1|6), (0|5), (2|7)",
+ "ResultColumns": 5,
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a, b, c, d, count(*), weight_string(a), weight_string(b), weight_string(c), weight_string(d) from `user` where 1 != 1 group by a, weight_string(a), b, weight_string(b), c, weight_string(c), d, weight_string(d)",
+ "OrderBy": "(3|8) ASC, (1|6) ASC, (0|5) ASC, (2|7) ASC",
+ "Query": "select a, b, c, d, count(*), weight_string(a), weight_string(b), weight_string(c), weight_string(d) from `user` group by a, weight_string(a), b, weight_string(b), c, weight_string(c), d, weight_string(d) order by d asc, b asc, a asc, c asc",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "scatter aggregate with jumbled group by and order by columns",
+ "query": "select a, b, c, d, count(*) from user group by 3, 2, 1, 4 order by d, b, a, c",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a, b, c, d, count(*) from user group by 3, 2, 1, 4 order by d, b, a, c",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count(4) AS count",
+ "GroupBy": "2, 1, 0, 3",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a, b, c, d, count(*), weight_string(d), weight_string(b), weight_string(a), weight_string(c) from `user` where 1 != 1 group by 3, 2, 1, 4, weight_string(d), weight_string(b), weight_string(a), weight_string(c)",
+ "OrderBy": "(3|5) ASC, (1|6) ASC, (0|7) ASC, (2|8) ASC",
+ "Query": "select a, b, c, d, count(*), weight_string(d), weight_string(b), weight_string(a), weight_string(c) from `user` group by 3, 2, 1, 4, weight_string(d), weight_string(b), weight_string(a), weight_string(c) order by d asc, b asc, a asc, c asc",
+ "ResultColumns": 5,
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a, b, c, d, count(*) from user group by 3, 2, 1, 4 order by d, b, a, c",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count_star(4) AS count(*)",
+ "GroupBy": "(3|8), (1|6), (0|5), (2|7)",
+ "ResultColumns": 5,
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a, b, c, d, count(*), weight_string(a), weight_string(b), weight_string(c), weight_string(d) from `user` where 1 != 1 group by a, weight_string(a), b, weight_string(b), c, weight_string(c), d, weight_string(d)",
+ "OrderBy": "(3|8) ASC, (1|6) ASC, (0|5) ASC, (2|7) ASC",
+ "Query": "select a, b, c, d, count(*), weight_string(a), weight_string(b), weight_string(c), weight_string(d) from `user` group by a, weight_string(a), b, weight_string(b), c, weight_string(c), d, weight_string(d) order by d asc, b asc, a asc, c asc",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "scatter aggregate with some descending order by cols",
+ "query": "select a, b, c, count(*) from user group by 3, 2, 1 order by 1 desc, 3 desc, b",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a, b, c, count(*) from user group by 3, 2, 1 order by 1 desc, 3 desc, b",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count(3) AS count",
+ "GroupBy": "2, 1, 0",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a, b, c, count(*), weight_string(a), weight_string(c), weight_string(b) from `user` where 1 != 1 group by 3, 2, 1, weight_string(a), weight_string(c), weight_string(b)",
+ "OrderBy": "(0|4) DESC, (2|5) DESC, (1|6) ASC",
+ "Query": "select a, b, c, count(*), weight_string(a), weight_string(c), weight_string(b) from `user` group by 3, 2, 1, weight_string(a), weight_string(c), weight_string(b) order by 1 desc, 3 desc, b asc",
+ "ResultColumns": 4,
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a, b, c, count(*) from user group by 3, 2, 1 order by 1 desc, 3 desc, b",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count_star(3) AS count(*)",
+ "GroupBy": "(0|4), (2|6), (1|5)",
+ "ResultColumns": 4,
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a, b, c, count(*), weight_string(a), weight_string(b), weight_string(c) from `user` where 1 != 1 group by a, weight_string(a), b, weight_string(b), c, weight_string(c)",
+ "OrderBy": "(0|4) DESC, (2|6) DESC, (1|5) ASC",
+ "Query": "select a, b, c, count(*), weight_string(a), weight_string(b), weight_string(c) from `user` group by a, weight_string(a), b, weight_string(b), c, weight_string(c) order by a desc, c desc, b asc",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "invalid order by column numner for scatter",
+ "query": "select col, count(*) from user group by col order by 5 limit 10",
+ "v3-plan": "VT03014: unknown column '5' in 'order clause'",
+ "gen4-plan": "Unknown column '5' in 'order clause'"
+ },
+ {
+ "comment": "aggregate with limit",
+ "query": "select col, count(*) from user group by col limit 10",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col, count(*) from user group by col limit 10",
+ "Instructions": {
+ "OperatorType": "Limit",
+ "Count": "INT64(10)",
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count(1) AS count",
+ "GroupBy": "0",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col, count(*) from `user` where 1 != 1 group by col",
+ "OrderBy": "0 ASC",
+ "Query": "select col, count(*) from `user` group by col order by col asc limit :__upper_limit",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col, count(*) from user group by col limit 10",
+ "Instructions": {
+ "OperatorType": "Limit",
+ "Count": "INT64(10)",
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count_star(1) AS count(*)",
+ "GroupBy": "0",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col, count(*) from `user` where 1 != 1 group by col",
+ "OrderBy": "0 ASC",
+ "Query": "select col, count(*) from `user` group by col order by col asc limit :__upper_limit",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Group by with collate operator",
+ "query": "select user.col1 as a from user where user.id = 5 group by a collate utf8_general_ci",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col1 as a from user where user.id = 5 group by a collate utf8_general_ci",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col1 as a from `user` where 1 != 1 group by a collate utf8_general_ci",
+ "Query": "select `user`.col1 as a from `user` where `user`.id = 5 group by a collate utf8_general_ci",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col1 as a from user where user.id = 5 group by a collate utf8_general_ci",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col1 as a from `user` where 1 != 1 group by a collate utf8_general_ci",
+ "Query": "select `user`.col1 as a from `user` where `user`.id = 5 group by a collate utf8_general_ci",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "routing rules for aggregates",
+ "query": "select id, count(*) from route2 group by id",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id, count(*) from route2 group by id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select id, count(*) from unsharded as route2 where 1 != 1 group by id",
+ "Query": "select id, count(*) from unsharded as route2 group by id",
+ "Table": "unsharded"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id, count(*) from route2 group by id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select id, count(*) from unsharded as route2 where 1 != 1 group by id",
+ "Query": "select id, count(*) from unsharded as route2 group by id",
+ "Table": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "order by on a reference table",
+ "query": "select col from ref order by col",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from ref order by col",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from ref where 1 != 1",
+ "Query": "select col from ref order by col asc",
+ "Table": "ref"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from ref order by col",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from ref where 1 != 1",
+ "Query": "select col from ref order by col asc",
+ "Table": "ref"
+ },
+ "TablesUsed": [
+ "user.ref"
+ ]
+ }
+ },
+ {
+ "comment": "distinct and aggregate functions missing group by",
+ "query": "select distinct a, count(*) from user",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select distinct a, count(*) from user",
+ "Instructions": {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count(1) AS count",
+ "GroupBy": "0",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a, count(*), weight_string(a) from `user` where 1 != 1",
+ "OrderBy": "(0|2) ASC",
+ "Query": "select a, count(*), weight_string(a) from `user` order by a asc",
+ "ResultColumns": 2,
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select distinct a, count(*) from user",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "GroupBy": "0, 1",
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "random(0) AS a, sum_count_star(1) AS count(*)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a, count(*) from `user` where 1 != 1",
+ "Query": "select a, count(*) from `user`",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "distinct and aggregate functions",
+ "query": "select distinct a, count(*) from user group by a",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select distinct a, count(*) from user group by a",
+ "Instructions": {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count(1) AS count",
+ "GroupBy": "0, 0",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a, count(*), weight_string(a) from `user` where 1 != 1 group by a, weight_string(a)",
+ "OrderBy": "(0|2) ASC, (0|2) ASC",
+ "Query": "select a, count(*), weight_string(a) from `user` group by a, weight_string(a) order by a asc, a asc",
+ "ResultColumns": 2,
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select distinct a, count(*) from user group by a",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "GroupBy": "(0|2), 1",
+ "ResultColumns": 2,
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count_star(1) AS count(*)",
+ "GroupBy": "(0|2)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a, count(*), weight_string(a) from `user` where 1 != 1 group by a, weight_string(a)",
+ "OrderBy": "(0|2) ASC",
+ "Query": "select a, count(*), weight_string(a) from `user` group by a, weight_string(a) order by a asc",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Group by invalid column number (code is duplicated from symab).",
+ "query": "select id from user group by 1.1",
+ "v3-plan": "VT13001: [BUG] column number is not an INT",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user group by 1.1",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "random(0) AS id",
+ "GroupBy": "1",
+ "ResultColumns": 1,
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, 1.1 from `user` where 1 != 1 group by 1.1",
+ "OrderBy": "1 ASC",
+ "Query": "select id, 1.1 from `user` group by 1.1 order by 1.1 asc",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Group by out of range column number (code is duplicated from symab).",
+ "query": "select id from user group by 2",
+ "v3-plan": "VT03014: unknown column '2' in 'group statement'",
+ "gen4-plan": "Unknown column '2' in 'group statement'"
+ },
+ {
+ "comment": "here it is safe to remove the order by on the derived table since it will not influence the output of the count(*)",
+ "query": "select count(*) from (select user.col, user_extra.extra from user join user_extra on user.id = user_extra.user_id order by user_extra.extra) a",
+ "v3-plan": "VT12001: unsupported: cross-shard query with aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select count(*) from (select user.col, user_extra.extra from user join user_extra on user.id = user_extra.user_id order by user_extra.extra) a",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count_star(0) AS count(*)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*) from (select `user`.col, user_extra.extra from `user`, user_extra where 1 != 1) as a where 1 != 1",
+ "Query": "select count(*) from (select `user`.col, user_extra.extra from `user`, user_extra where `user`.id = user_extra.user_id order by user_extra.extra asc) as a",
+ "Table": "`user`, user_extra"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "order by inside derived tables can be ignored",
+ "query": "select col from (select user.col, user_extra.extra from user join user_extra on user.id = user_extra.user_id order by user_extra.extra) a",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from (select user.col, user_extra.extra from user join user_extra on user.id = user_extra.user_id order by user_extra.extra) a",
+ "Instructions": {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col, user_extra.extra, weight_string(user_extra.extra) from `user` join user_extra on `user`.id = user_extra.user_id where 1 != 1",
+ "OrderBy": "(1|2) ASC",
+ "Query": "select `user`.col, user_extra.extra, weight_string(user_extra.extra) from `user` join user_extra on `user`.id = user_extra.user_id order by user_extra.extra asc",
+ "ResultColumns": 2,
+ "Table": "`user`, user_extra"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from (select user.col, user_extra.extra from user join user_extra on user.id = user_extra.user_id order by user_extra.extra) a",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from (select `user`.col, user_extra.extra from `user`, user_extra where 1 != 1) as a where 1 != 1",
+ "Query": "select col from (select `user`.col, user_extra.extra from `user`, user_extra where `user`.id = user_extra.user_id order by user_extra.extra asc) as a",
+ "Table": "`user`, user_extra"
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "here we keep the order since the column is visible on the outside, and used by the orderedAggregate",
+ "query": "select col, count(*) from (select user.col, user_extra.extra from user join user_extra on user.id = user_extra.user_id order by user_extra.extra) a group by col",
+ "v3-plan": "VT12001: unsupported: cross-shard query with aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col, count(*) from (select user.col, user_extra.extra from user join user_extra on user.id = user_extra.user_id order by user_extra.extra) a group by col",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count_star(1) AS count(*)",
+ "GroupBy": "0",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col, count(*) from (select `user`.col, user_extra.extra from `user`, user_extra where 1 != 1) as a where 1 != 1 group by col",
+ "OrderBy": "0 ASC",
+ "Query": "select col, count(*) from (select `user`.col, user_extra.extra from `user`, user_extra where `user`.id = user_extra.user_id order by user_extra.extra asc) as a group by col order by col asc",
+ "Table": "`user`, user_extra"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "optimize group by when using distinct with no aggregation",
+ "query": "select distinct col1, col2 from user group by col1, col2",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select distinct col1, col2 from user group by col1, col2",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "GroupBy": "0, 1, 0, 1",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col1, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, col2, weight_string(col1), weight_string(col2)",
+ "OrderBy": "(0|2) ASC, (1|3) ASC, (0|2) ASC, (1|3) ASC",
+ "Query": "select distinct col1, col2, weight_string(col1), weight_string(col2) from `user` group by col1, col2, weight_string(col1), weight_string(col2) order by col1 asc, col2 asc, col1 asc, col2 asc",
+ "ResultColumns": 2,
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select distinct col1, col2 from user group by col1, col2",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "GroupBy": "(0|2), (1|3)",
+ "ResultColumns": 2,
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col1, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, col2",
+ "OrderBy": "(0|2) ASC, (1|3) ASC",
+ "Query": "select distinct col1, col2, weight_string(col1), weight_string(col2) from `user` group by col1, col2 order by col1 asc, col2 asc",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "do not use distinct when using only aggregates and no group by",
+ "query": "select distinct count(*) from user",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select distinct count(*) from user",
+ "Instructions": {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count(0) AS count",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*) from `user` where 1 != 1",
+ "Query": "select count(*) from `user`",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select distinct count(*) from user",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count_star(0) AS count(*)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*) from `user` where 1 != 1",
+ "Query": "select count(*) from `user`",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Grouping on join",
+ "query": "select user.a from user join user_extra group by user.a",
+ "v3-plan": "VT12001: unsupported: cross-shard query with aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.a from user join user_extra group by user.a",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "GroupBy": "(0|1)",
+ "ResultColumns": 1,
+ "Inputs": [
+ {
+ "OperatorType": "Projection",
+ "Expressions": [
+ "[COLUMN 0] as a",
+ "[COLUMN 1]"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.a, weight_string(`user`.a) from `user` where 1 != 1 group by `user`.a, weight_string(`user`.a)",
+ "OrderBy": "(0|1) ASC",
+ "Query": "select `user`.a, weight_string(`user`.a) from `user` group by `user`.a, weight_string(`user`.a) order by `user`.a asc",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "Cannot have more than one aggr(distinct...",
+ "query": "select count(distinct a), count(distinct b) from user",
+ "v3-plan": "VT12001: unsupported: only one DISTINCT aggregation allowed in a SELECT: count(distinct b)",
+ "gen4-plan": "VT12001: unsupported: only one DISTINCT aggregation is allowed in a SELECT: count(distinct b)"
+ },
+ {
+ "comment": "multiple distinct functions with grouping.",
+ "query": "select col1, count(distinct col2), sum(distinct col2) from user group by col1",
+ "v3-plan": "VT12001: unsupported: only one DISTINCT aggregation allowed in a SELECT: sum(distinct col2)",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col1, count(distinct col2), sum(distinct col2) from user group by col1",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "count_distinct(1|4) AS count(distinct col2), sum_distinct(2|4) AS sum(distinct col2)",
+ "GroupBy": "(0|3)",
+ "ResultColumns": 3,
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col1, col2, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, weight_string(col1), col2, weight_string(col2)",
+ "OrderBy": "(0|3) ASC, (1|4) ASC",
+ "Query": "select col1, col2, col2, weight_string(col1), weight_string(col2) from `user` group by col1, weight_string(col1), col2, weight_string(col2) order by col1 asc, col2 asc",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "aggregate query with order by aggregate column along with NULL",
+ "query": "select col, count(*) k from user group by col order by null, k",
+ "v3-plan": "VT12001: unsupported: in scatter query: complex ORDER BY expression: null",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col, count(*) k from user group by col order by null, k",
+ "Instructions": {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "1 ASC",
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count_star(1) AS k",
+ "GroupBy": "0",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col, count(*) as k from `user` where 1 != 1 group by col",
+ "OrderBy": "0 ASC",
+ "Query": "select col, count(*) as k from `user` group by col order by col asc",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "aggregate query with order by NULL",
+ "query": "select col, count(*) k from user group by col order by null",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col, count(*) k from user group by col order by null",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count(1) AS count",
+ "GroupBy": "0",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col, count(*) as k from `user` where 1 != 1 group by col",
+ "OrderBy": "0 ASC",
+ "Query": "select col, count(*) as k from `user` group by col order by col asc",
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col, count(*) k from user group by col order by null",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count_star(1) AS k",
+ "GroupBy": "0",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col, count(*) as k from `user` where 1 != 1 group by col",
+ "OrderBy": "0 ASC",
+ "Query": "select col, count(*) as k from `user` group by col order by col asc",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "join query on sharding key with group by a unique vindex with having clause.",
+ "query": "select user.id, count(*) c from user, user_extra where user.id = user_extra.user_id group by user.id having max(user.col) > 10",
+ "v3-plan": "VT12001: unsupported: cross-shard query with aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.id, count(*) c from user, user_extra where user.id = user_extra.user_id group by user.id having max(user.col) > 10",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id, count(*) as c from `user`, user_extra where 1 != 1 group by `user`.id",
+ "Query": "select `user`.id, count(*) as c from `user`, user_extra where `user`.id = user_extra.user_id group by `user`.id having max(`user`.col) > 10",
+ "Table": "`user`, user_extra"
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "correlated subquery on sharding key with group by a unique vindex with having clause.",
+ "query": "select count(*) from user where exists (select 1 from user_extra where user_id = user.id group by user_id having max(col) > 10)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select count(*) from user where exists (select 1 from user_extra where user_id = user.id group by user_id having max(col) > 10)",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count(0) AS count",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*) from `user` where 1 != 1",
+ "Query": "select count(*) from `user` where exists (select 1 from user_extra where user_id = `user`.id group by user_id having max(col) > 10 limit 1)",
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select count(*) from user where exists (select 1 from user_extra where user_id = user.id group by user_id having max(col) > 10)",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count_star(0) AS count(*)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*) from `user` where 1 != 1",
+ "Query": "select count(*) from `user` where exists (select 1 from user_extra where user_id = `user`.id group by user_id having max(col) > 10 limit 1)",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "aggregation filtering by having on a route",
+ "query": "select id from user group by id having count(id) = 10",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user group by id having count(id) = 10",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1 group by id",
+ "Query": "select id from `user` group by id having count(id) = 10",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user group by id having count(id) = 10",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1 group by id",
+ "Query": "select id from `user` group by id having count(id) = 10",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "weight_string addition to group by",
+ "query": "select lower(textcol1) as v, count(*) from user group by v",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select lower(textcol1) as v, count(*) from user group by v",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count(1) AS count",
+ "GroupBy": "0",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select lower(textcol1) as v, count(*), weight_string(lower(textcol1)) from `user` where 1 != 1 group by v, weight_string(lower(textcol1))",
+ "OrderBy": "(0|2) ASC",
+ "Query": "select lower(textcol1) as v, count(*), weight_string(lower(textcol1)) from `user` group by v, weight_string(lower(textcol1)) order by v asc",
+ "ResultColumns": 2,
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select lower(textcol1) as v, count(*) from user group by v",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count_star(1) AS count(*)",
+ "GroupBy": "(0|2)",
+ "ResultColumns": 2,
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select lower(textcol1) as v, count(*), weight_string(lower(textcol1)) from `user` where 1 != 1 group by v, weight_string(lower(textcol1))",
+ "OrderBy": "(0|2) ASC",
+ "Query": "select lower(textcol1) as v, count(*), weight_string(lower(textcol1)) from `user` group by v, weight_string(lower(textcol1)) order by v asc",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "weight_string addition to group by when also there in order by",
+ "query": "select char_length(texcol1) as a, count(*) from user group by a order by a",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select char_length(texcol1) as a, count(*) from user group by a order by a",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count(1) AS count",
+ "GroupBy": "0",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select char_length(texcol1) as a, count(*), weight_string(char_length(texcol1)) from `user` where 1 != 1 group by a, weight_string(char_length(texcol1))",
+ "OrderBy": "(0|2) ASC",
+ "Query": "select char_length(texcol1) as a, count(*), weight_string(char_length(texcol1)) from `user` group by a, weight_string(char_length(texcol1)) order by a asc",
+ "ResultColumns": 2,
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select char_length(texcol1) as a, count(*) from user group by a order by a",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count_star(1) AS count(*)",
+ "GroupBy": "(0|2)",
+ "ResultColumns": 2,
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select char_length(texcol1) as a, count(*), weight_string(char_length(texcol1)) from `user` where 1 != 1 group by a, weight_string(char_length(texcol1))",
+ "OrderBy": "(0|2) ASC",
+ "Query": "select char_length(texcol1) as a, count(*), weight_string(char_length(texcol1)) from `user` group by a, weight_string(char_length(texcol1)) order by a asc",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "order by inside and outside parenthesis select",
+ "query": "(select id from user order by 1 desc) order by 1 asc limit 2",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "(select id from user order by 1 desc) order by 1 asc limit 2",
+ "Instructions": {
+ "OperatorType": "Limit",
+ "Count": "INT64(2)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1",
+ "OrderBy": "(0|1) ASC",
+ "Query": "select id, weight_string(id) from `user` order by 1 asc limit :__upper_limit",
+ "ResultColumns": 1,
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "(select id from user order by 1 desc) order by 1 asc limit 2",
+ "Instructions": {
+ "OperatorType": "Limit",
+ "Count": "INT64(2)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1",
+ "OrderBy": "(0|1) ASC",
+ "Query": "select id, weight_string(id) from `user` order by id asc limit :__upper_limit",
+ "ResultColumns": 1,
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "correlated subquery in exists clause with an ordering",
+ "query": "select col, id from user where exists(select user_id from user_extra where user_id = 3 and user_id < user.id) order by id",
+ "v3-plan": "VT12001: unsupported: cross-shard correlated subquery",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col, id from user where exists(select user_id from user_extra where user_id = 3 and user_id < user.id) order by id",
+ "Instructions": {
+ "OperatorType": "SemiJoin",
+ "JoinVars": {
+ "user_id": 0
+ },
+ "ProjectedIndexes": "-2,-1",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id, col, weight_string(id) from `user` where 1 != 1",
+ "OrderBy": "(0|2) ASC",
+ "Query": "select `user`.id, col, weight_string(id) from `user` order by id asc",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra where user_id = 3 and user_id < :user_id",
+ "Table": "user_extra",
+ "Values": [
+ "INT64(3)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "Column and Literal equality filter on scatter aggregates",
+ "query": "select count(*) a from user having a = 10",
+ "v3-plan": "VT12001: unsupported: filtering on results of aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select count(*) a from user having a = 10",
+ "Instructions": {
+ "OperatorType": "Filter",
+ "Predicate": ":0 = 10",
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count_star(0) AS a",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*) as a from `user` where 1 != 1",
+ "Query": "select count(*) as a from `user`",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Equality filtering with column and string literal on scatter aggregates",
+ "query": "select count(*) a from user having a = '1'",
+ "v3-plan": "VT12001: unsupported: filtering on results of aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select count(*) a from user having a = '1'",
+ "Instructions": {
+ "OperatorType": "Filter",
+ "Predicate": ":0 = '1'",
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count_star(0) AS a",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*) as a from `user` where 1 != 1",
+ "Query": "select count(*) as a from `user`",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Column and Literal not equal filter on scatter aggregates",
+ "query": "select count(*) a from user having a != 10",
+ "v3-plan": "VT12001: unsupported: filtering on results of aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select count(*) a from user having a != 10",
+ "Instructions": {
+ "OperatorType": "Filter",
+ "Predicate": ":0 != 10",
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count_star(0) AS a",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*) as a from `user` where 1 != 1",
+ "Query": "select count(*) as a from `user`",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Not equal filter with column and string literal on scatter aggregates",
+ "query": "select count(*) a from user having a != '1'",
+ "v3-plan": "VT12001: unsupported: filtering on results of aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select count(*) a from user having a != '1'",
+ "Instructions": {
+ "OperatorType": "Filter",
+ "Predicate": ":0 != '1'",
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count_star(0) AS a",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*) as a from `user` where 1 != 1",
+ "Query": "select count(*) as a from `user`",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Greater than filter on scatter aggregates",
+ "query": "select count(*) a from user having a > 10",
+ "v3-plan": "VT12001: unsupported: filtering on results of aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select count(*) a from user having a > 10",
+ "Instructions": {
+ "OperatorType": "Filter",
+ "Predicate": ":0 > 10",
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count_star(0) AS a",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*) as a from `user` where 1 != 1",
+ "Query": "select count(*) as a from `user`",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Greater Equal filter on scatter aggregates",
+ "query": "select count(*) a from user having a >= 10",
+ "v3-plan": "VT12001: unsupported: filtering on results of aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select count(*) a from user having a >= 10",
+ "Instructions": {
+ "OperatorType": "Filter",
+ "Predicate": ":0 >= 10",
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count_star(0) AS a",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*) as a from `user` where 1 != 1",
+ "Query": "select count(*) as a from `user`",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Less than filter on scatter aggregates",
+ "query": "select count(*) a from user having a < 10",
+ "v3-plan": "VT12001: unsupported: filtering on results of aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select count(*) a from user having a < 10",
+ "Instructions": {
+ "OperatorType": "Filter",
+ "Predicate": ":0 < 10",
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count_star(0) AS a",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*) as a from `user` where 1 != 1",
+ "Query": "select count(*) as a from `user`",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Less Equal filter on scatter aggregates",
+ "query": "select count(*) a from user having a <= 10",
+ "v3-plan": "VT12001: unsupported: filtering on results of aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select count(*) a from user having a <= 10",
+ "Instructions": {
+ "OperatorType": "Filter",
+ "Predicate": ":0 <= 10",
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count_star(0) AS a",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*) as a from `user` where 1 != 1",
+ "Query": "select count(*) as a from `user`",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Less Equal filter on scatter with grouping",
+ "query": "select col, count(*) a from user group by col having a <= 10",
+ "v3-plan": "VT12001: unsupported: filtering on results of aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col, count(*) a from user group by col having a <= 10",
+ "Instructions": {
+ "OperatorType": "Filter",
+ "Predicate": ":1 <= 10",
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count_star(1) AS a",
+ "GroupBy": "0",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col, count(*) as a from `user` where 1 != 1 group by col",
+ "OrderBy": "0 ASC",
+ "Query": "select col, count(*) as a from `user` group by col order by col asc",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "We should be able to find grouping keys on ordered aggregates",
+ "query": "select count(*) as a, val1 from user group by val1 having a = 1.00",
+ "v3-plan": "VT12001: unsupported: filtering on results of aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select count(*) as a, val1 from user group by val1 having a = 1.00",
+ "Instructions": {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0,
+ 1
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Filter",
+ "Predicate": ":0 = 1.00",
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count_star(0) AS a",
+ "GroupBy": "(1|2)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*) as a, val1, weight_string(val1) from `user` where 1 != 1 group by val1, weight_string(val1)",
+ "OrderBy": "(1|2) ASC",
+ "Query": "select count(*) as a, val1, weight_string(val1) from `user` group by val1, weight_string(val1) order by val1 asc",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "distinct on text column with collation",
+ "query": "select col, count(distinct textcol1) from user group by col",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col, count(distinct textcol1) from user group by col",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "count_distinct_count(1) AS count(distinct textcol1)",
+ "GroupBy": "0",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col, textcol1, weight_string(textcol1) from `user` where 1 != 1 group by col, textcol1, weight_string(textcol1)",
+ "OrderBy": "0 ASC, (1|2) ASC",
+ "Query": "select col, textcol1, weight_string(textcol1) from `user` group by col, textcol1, weight_string(textcol1) order by col asc, textcol1 asc",
+ "ResultColumns": 2,
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col, count(distinct textcol1) from user group by col",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "count_distinct(1 COLLATE latin1_swedish_ci) AS count(distinct textcol1)",
+ "GroupBy": "0",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col, textcol1 from `user` where 1 != 1 group by col, textcol1",
+ "OrderBy": "0 ASC, 1 ASC COLLATE latin1_swedish_ci",
+ "Query": "select col, textcol1 from `user` group by col, textcol1 order by col asc, textcol1 asc",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "aggregation filtering by having on a route with no group by with non-unique vindex filter",
+ "query": "select 1 from user having count(id) = 10 and name = 'a'",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 from user having count(id) = 10 and name = 'a'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` where 1 != 1",
+ "Query": "select 1 from `user` having count(id) = 10 and `name` = 'a'",
+ "Table": "`user`",
+ "Values": [
+ "VARCHAR(\"a\")"
+ ],
+ "Vindex": "name_user_map"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 from user having count(id) = 10 and name = 'a'",
+ "Instructions": {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Filter",
+ "Predicate": ":1 = 10",
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "random(0) AS 1, sum_count(1) AS count(id)",
+ "Inputs": [
+ {
+ "OperatorType": "VindexLookup",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Values": [
+ "VARCHAR(\"a\")"
+ ],
+ "Vindex": "name_user_map",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
+ "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
+ "Table": "name_user_vdx",
+ "Values": [
+ ":name"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1, count(id) from `user` where 1 != 1",
+ "Query": "select 1, count(id) from `user` where `name` = 'a'",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Aggregates and joins",
+ "query": "select count(*) from user join user_extra",
+ "v3-plan": "VT12001: unsupported: cross-shard query with aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select count(*) from user join user_extra",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count_star(0) AS count(*)",
+ "Inputs": [
+ {
+ "OperatorType": "Projection",
+ "Expressions": [
+ "[COLUMN 0] * COALESCE([COLUMN 1], INT64(1)) as count(*)"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,R:1",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*) from `user` where 1 != 1",
+ "Query": "select count(*) from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1, count(*) from user_extra where 1 != 1 group by 1",
+ "Query": "select 1, count(*) from user_extra group by 1",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "aggregation filtering by having on a route with no group by",
+ "query": "select 1 from user having count(id) = 10",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 from user having count(id) = 10",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` where 1 != 1",
+ "Query": "select 1 from `user` having count(id) = 10",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 from user having count(id) = 10",
+ "Instructions": {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Filter",
+ "Predicate": ":1 = 10",
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "random(0) AS 1, sum_count(1) AS count(id)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1, count(id) from `user` where 1 != 1",
+ "Query": "select 1, count(id) from `user`",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Aggregate on join",
+ "query": "select user.a, count(*) from user join user_extra group by user.a",
+ "v3-plan": "VT12001: unsupported: cross-shard query with aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.a, count(*) from user join user_extra group by user.a",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count_star(1) AS count(*)",
+ "GroupBy": "(0|2)",
+ "ResultColumns": 2,
+ "Inputs": [
+ {
+ "OperatorType": "Projection",
+ "Expressions": [
+ "[COLUMN 0] as a",
+ "[COLUMN 2] * COALESCE([COLUMN 3], INT64(1)) as count(*)",
+ "[COLUMN 1]"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:1,L:2,L:0,R:1",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*), `user`.a, weight_string(`user`.a) from `user` where 1 != 1 group by `user`.a, weight_string(`user`.a)",
+ "OrderBy": "(1|2) ASC",
+ "Query": "select count(*), `user`.a, weight_string(`user`.a) from `user` group by `user`.a, weight_string(`user`.a) order by `user`.a asc",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1, count(*) from user_extra where 1 != 1 group by 1",
+ "Query": "select 1, count(*) from user_extra group by 1",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "Aggregate on other table in join",
+ "query": "select user.a, count(user_extra.a) from user join user_extra group by user.a",
+ "v3-plan": "VT12001: unsupported: cross-shard query with aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.a, count(user_extra.a) from user join user_extra group by user.a",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count(1) AS count(user_extra.a)",
+ "GroupBy": "(0|2)",
+ "ResultColumns": 2,
+ "Inputs": [
+ {
+ "OperatorType": "Projection",
+ "Expressions": [
+ "[COLUMN 0] as a",
+ "[COLUMN 2] * COALESCE([COLUMN 3], INT64(1)) as count(user_extra.a)",
+ "[COLUMN 1]"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:1,L:2,L:0,R:1",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*), `user`.a, weight_string(`user`.a) from `user` where 1 != 1 group by `user`.a, weight_string(`user`.a)",
+ "OrderBy": "(1|2) ASC",
+ "Query": "select count(*), `user`.a, weight_string(`user`.a) from `user` group by `user`.a, weight_string(`user`.a) order by `user`.a asc",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1, count(user_extra.a) from user_extra where 1 != 1 group by 1",
+ "Query": "select 1, count(user_extra.a) from user_extra group by 1",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "aggregation spread out across three routes",
+ "query": "select count(u.textcol1), count(ue.foo), us.bar from user u join user_extra ue on u.foo = ue.bar join unsharded us on ue.bar = us.baz group by us.bar",
+ "v3-plan": "VT12001: unsupported: cross-shard query with aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select count(u.textcol1), count(ue.foo), us.bar from user u join user_extra ue on u.foo = ue.bar join unsharded us on ue.bar = us.baz group by us.bar",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count(0) AS count(u.textcol1), sum_count(1) AS count(ue.foo)",
+ "GroupBy": "(2|3)",
+ "ResultColumns": 3,
+ "Inputs": [
+ {
+ "OperatorType": "Projection",
+ "Expressions": [
+ "([COLUMN 2] * COALESCE([COLUMN 3], INT64(1))) * COALESCE([COLUMN 4], INT64(1)) as count(u.textcol1)",
+ "([COLUMN 5] * COALESCE([COLUMN 6], INT64(1))) * COALESCE([COLUMN 7], INT64(1)) as count(ue.foo)",
+ "[COLUMN 0] as bar",
+ "[COLUMN 1]"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "(0|1) ASC",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0,R:1,L:1,R:2,R:3,L:2,R:4,R:5",
+ "JoinVars": {
+ "u_foo": 0
+ },
+ "TableName": "`user`_user_extra_unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.foo, count(u.textcol1), count(*), weight_string(u.foo) from `user` as u where 1 != 1 group by u.foo, weight_string(u.foo)",
+ "Query": "select u.foo, count(u.textcol1), count(*), weight_string(u.foo) from `user` as u group by u.foo, weight_string(u.foo)",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:1,R:2,L:1,R:0,L:2,R:0",
+ "JoinVars": {
+ "ue_bar": 0
+ },
+ "TableName": "user_extra_unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select ue.bar, count(*), count(ue.foo), weight_string(ue.bar) from user_extra as ue where 1 != 1 group by ue.bar, weight_string(ue.bar)",
+ "Query": "select ue.bar, count(*), count(ue.foo), weight_string(ue.bar) from user_extra as ue where ue.bar = :u_foo group by ue.bar, weight_string(ue.bar)",
+ "Table": "user_extra"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select count(*), us.bar, weight_string(us.bar) from unsharded as us where 1 != 1 group by us.bar, weight_string(us.bar)",
+ "Query": "select count(*), us.bar, weight_string(us.bar) from unsharded as us where us.baz = :ue_bar group by us.bar, weight_string(us.bar)",
+ "Table": "unsharded"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "using two distinct columns - min with distinct vindex, sum with distinct without vindex",
+ "query": "select col1, min(distinct id), sum(distinct col3) from user group by col1",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col1, min(distinct id), sum(distinct col3) from user group by col1",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "min(1), sum_distinct_sum(2) AS sum(distinct col3)",
+ "GroupBy": "0",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col1, min(distinct id), col3, weight_string(col1), weight_string(col3) from `user` where 1 != 1 group by col1, col3, weight_string(col1), weight_string(col3)",
+ "OrderBy": "(0|3) ASC, (2|4) ASC",
+ "Query": "select col1, min(distinct id), col3, weight_string(col1), weight_string(col3) from `user` group by col1, col3, weight_string(col1), weight_string(col3) order by col1 asc, col3 asc",
+ "ResultColumns": 3,
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col1, min(distinct id), sum(distinct col3) from user group by col1",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "min(1) AS min(distinct id), sum_distinct(2|4) AS sum(distinct col3)",
+ "GroupBy": "(0|3)",
+ "ResultColumns": 3,
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col1, min(distinct id), col3, weight_string(col1), weight_string(col3) from `user` where 1 != 1 group by col1, weight_string(col1), col3, weight_string(col3)",
+ "OrderBy": "(0|3) ASC, (2|4) ASC",
+ "Query": "select col1, min(distinct id), col3, weight_string(col1), weight_string(col3) from `user` group by col1, weight_string(col1), col3, weight_string(col3) order by col1 asc, col3 asc",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "aggregation on top of semijoin",
+ "query": "select count(*) from user where exists (select 0 from user_extra where user.apa = user_extra.bar)",
+ "v3-plan": "VT12001: unsupported: cross-shard correlated subquery",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select count(*) from user where exists (select 0 from user_extra where user.apa = user_extra.bar)",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count_star(0) AS count(*)",
+ "Inputs": [
+ {
+ "OperatorType": "Projection",
+ "Expressions": [
+ "[COLUMN 1] as count(*)"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SemiJoin",
+ "JoinVars": {
+ "user_apa": 0
+ },
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.apa, count(*), weight_string(`user`.apa) from `user` where 1 != 1 group by `user`.apa, weight_string(`user`.apa)",
+ "Query": "select `user`.apa, count(*), weight_string(`user`.apa) from `user` group by `user`.apa, weight_string(`user`.apa)",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra where user_extra.bar = :user_apa",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "we have to track the order of distinct aggregation expressions",
+ "query": "select val2, count(distinct val1), count(*) from user group by val2",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select val2, count(distinct val1), count(*) from user group by val2",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "count_distinct_count(1) AS count(distinct val1), sum_count(2) AS count",
+ "GroupBy": "0",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select val2, val1, count(*), weight_string(val2), weight_string(val1) from `user` where 1 != 1 group by val2, val1, weight_string(val2), weight_string(val1)",
+ "OrderBy": "(0|3) ASC, (1|4) ASC",
+ "Query": "select val2, val1, count(*), weight_string(val2), weight_string(val1) from `user` group by val2, val1, weight_string(val2), weight_string(val1) order by val2 asc, val1 asc",
+ "ResultColumns": 3,
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select val2, count(distinct val1), count(*) from user group by val2",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "count_distinct(1|4) AS count(distinct val1), sum_count_star(2) AS count(*)",
+ "GroupBy": "(0|3)",
+ "ResultColumns": 3,
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select val2, val1, count(*), weight_string(val2), weight_string(val1) from `user` where 1 != 1 group by val2, weight_string(val2), val1, weight_string(val1)",
+ "OrderBy": "(0|3) ASC, (1|4) ASC",
+ "Query": "select val2, val1, count(*), weight_string(val2), weight_string(val1) from `user` group by val2, weight_string(val2), val1, weight_string(val1) order by val2 asc, val1 asc",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "group by column alias",
+ "query": "select ascii(val1) as a, count(*) from user group by a",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select ascii(val1) as a, count(*) from user group by a",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count(1) AS count",
+ "GroupBy": "0",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select ascii(val1) as a, count(*), weight_string(ascii(val1)) from `user` where 1 != 1 group by a, weight_string(ascii(val1))",
+ "OrderBy": "(0|2) ASC",
+ "Query": "select ascii(val1) as a, count(*), weight_string(ascii(val1)) from `user` group by a, weight_string(ascii(val1)) order by a asc",
+ "ResultColumns": 2,
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select ascii(val1) as a, count(*) from user group by a",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count_star(1) AS count(*)",
+ "GroupBy": "(0|2)",
+ "ResultColumns": 2,
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select ascii(val1) as a, count(*), weight_string(ascii(val1)) from `user` where 1 != 1 group by a, weight_string(ascii(val1))",
+ "OrderBy": "(0|2) ASC",
+ "Query": "select ascii(val1) as a, count(*), weight_string(ascii(val1)) from `user` group by a, weight_string(ascii(val1)) order by a asc",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "multiple distinct aggregations on the same column is allowed",
+ "query": "select tcol1, count(distinct tcol2), sum(distinct tcol2) from user group by tcol1",
+ "v3-plan": "VT12001: unsupported: only one DISTINCT aggregation allowed in a SELECT: sum(distinct tcol2)",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select tcol1, count(distinct tcol2), sum(distinct tcol2) from user group by tcol1",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "count_distinct(1|4) AS count(distinct tcol2), sum_distinct(2|4) AS sum(distinct tcol2)",
+ "GroupBy": "(0|3)",
+ "ResultColumns": 3,
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select tcol1, tcol2, tcol2, weight_string(tcol1), weight_string(tcol2) from `user` where 1 != 1 group by tcol1, weight_string(tcol1), tcol2, weight_string(tcol2)",
+ "OrderBy": "(0|3) ASC, (1|4) ASC",
+ "Query": "select tcol1, tcol2, tcol2, weight_string(tcol1), weight_string(tcol2) from `user` group by tcol1, weight_string(tcol1), tcol2, weight_string(tcol2) order by tcol1 asc, tcol2 asc",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "multiple distinct aggregations on the same column in different positions",
+ "query": "select count(distinct tcol2), tcol1, count(*), sum(distinct tcol2) from user group by tcol1",
+ "v3-plan": "VT12001: unsupported: only one DISTINCT aggregation allowed in a SELECT: sum(distinct tcol2)",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select count(distinct tcol2), tcol1, count(*), sum(distinct tcol2) from user group by tcol1",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "count_distinct(0|4) AS count(distinct tcol2), sum_count_star(2) AS count(*), sum_distinct(3|4) AS sum(distinct tcol2)",
+ "GroupBy": "(1|5)",
+ "ResultColumns": 4,
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select tcol2, tcol1, count(*), tcol2, weight_string(tcol2), weight_string(tcol1) from `user` where 1 != 1 group by tcol2, weight_string(tcol2), tcol1, weight_string(tcol1)",
+ "OrderBy": "(1|5) ASC, (0|4) ASC",
+ "Query": "select tcol2, tcol1, count(*), tcol2, weight_string(tcol2), weight_string(tcol1) from `user` group by tcol2, weight_string(tcol2), tcol1, weight_string(tcol1) order by tcol1 asc, tcol2 asc",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "distinct aggregation will 3 table join query",
+ "query": "select u.textcol1, count(distinct u.val2) from user u join user u2 on u.val2 = u2.id join music m on u2.val2 = m.id group by u.textcol1",
+ "v3-plan": "VT12001: unsupported: cross-shard query with aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u.textcol1, count(distinct u.val2) from user u join user u2 on u.val2 = u2.id join music m on u2.val2 = m.id group by u.textcol1",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "count_distinct(1|2) AS count(distinct u.val2)",
+ "GroupBy": "0 COLLATE latin1_swedish_ci",
+ "ResultColumns": 2,
+ "Inputs": [
+ {
+ "OperatorType": "Projection",
+ "Expressions": [
+ "[COLUMN 0] as textcol1",
+ "[COLUMN 1] as val2",
+ "[COLUMN 2]"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:2,L:3,L:5",
+ "JoinVars": {
+ "u2_val2": 0
+ },
+ "TableName": "`user`_`user`_music",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0,R:0,L:2,L:0,R:1,L:1",
+ "JoinVars": {
+ "u_val2": 0
+ },
+ "TableName": "`user`_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.val2, weight_string(u.val2), u.textcol1 from `user` as u where 1 != 1 group by u.val2, weight_string(u.val2), u.textcol1",
+ "OrderBy": "2 ASC COLLATE latin1_swedish_ci, (0|1) ASC",
+ "Query": "select u.val2, weight_string(u.val2), u.textcol1 from `user` as u group by u.val2, weight_string(u.val2), u.textcol1 order by u.textcol1 asc, u.val2 asc",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u2.val2, weight_string(u2.val2) from `user` as u2 where 1 != 1 group by u2.val2, weight_string(u2.val2)",
+ "Query": "select u2.val2, weight_string(u2.val2) from `user` as u2 where u2.id = :u_val2 group by u2.val2, weight_string(u2.val2)",
+ "Table": "`user`",
+ "Values": [
+ ":u_val2"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from music as m where 1 != 1",
+ "Query": "select 1 from music as m where m.id = :u2_val2",
+ "Table": "music",
+ "Values": [
+ ":u2_val2"
+ ],
+ "Vindex": "music_user_map"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.music",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "interleaving grouping, aggregation and join",
+ "query": "select user.col, min(user_extra.foo), user.bar, max(user_extra.bar) from user join user_extra on user.col = user_extra.bar group by user.col, user.bar",
+ "v3-plan": "VT12001: unsupported: cross-shard query with aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col, min(user_extra.foo), user.bar, max(user_extra.bar) from user join user_extra on user.col = user_extra.bar group by user.col, user.bar",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "min(1) AS min(user_extra.foo), max(3) AS max(user_extra.bar)",
+ "GroupBy": "0, (2|4)",
+ "ResultColumns": 4,
+ "Inputs": [
+ {
+ "OperatorType": "Projection",
+ "Expressions": [
+ "[COLUMN 0] as col",
+ "[COLUMN 3] as min(user_extra.foo)",
+ "[COLUMN 1] as bar",
+ "[COLUMN 4] as max(user_extra.bar)",
+ "[COLUMN 2]"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1,L:2,R:1,R:2",
+ "JoinVars": {
+ "user_col": 0
+ },
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col, `user`.bar, weight_string(`user`.bar) from `user` where 1 != 1 group by `user`.col, `user`.bar, weight_string(`user`.bar)",
+ "OrderBy": "0 ASC, (1|2) ASC",
+ "Query": "select `user`.col, `user`.bar, weight_string(`user`.bar) from `user` group by `user`.col, `user`.bar, weight_string(`user`.bar) order by `user`.col asc, `user`.bar asc",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1, min(user_extra.foo), max(user_extra.bar) from user_extra where 1 != 1 group by 1",
+ "Query": "select 1, min(user_extra.foo), max(user_extra.bar) from user_extra where user_extra.bar = :user_col group by 1",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "group_concat on single shards",
+ "query": "select group_concat(user_id order by name), id from user group by id",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select group_concat(user_id order by name), id from user group by id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select group_concat(user_id order by `name` asc), id from `user` where 1 != 1 group by id",
+ "Query": "select group_concat(user_id order by `name` asc), id from `user` group by id",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select group_concat(user_id order by name), id from user group by id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select group_concat(user_id order by `name` asc), id from `user` where 1 != 1 group by id",
+ "Query": "select group_concat(user_id order by `name` asc), id from `user` group by id",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "select count(distinct user_id, name) from unsharded",
+ "query": "select count(distinct user_id, name) from unsharded",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select count(distinct user_id, name) from unsharded",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select count(distinct user_id, `name`) from unsharded where 1 != 1",
+ "Query": "select count(distinct user_id, `name`) from unsharded",
+ "Table": "unsharded"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select count(distinct user_id, name) from unsharded",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select count(distinct user_id, `name`) from unsharded where 1 != 1",
+ "Query": "select count(distinct user_id, `name`) from unsharded",
+ "Table": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "select count(distinct user_id, name) from user",
+ "query": "select count(distinct user_id, name) from user",
+ "v3-plan": "VT12001: unsupported: only one expression is allowed inside aggregates: count(distinct user_id, `name`)",
+ "gen4-plan": "VT03001: aggregate functions take a single argument 'count(distinct user_id, `name`)'"
+ },
+ {
+ "comment": "select sum(col) from (select user.col as col, 32 from user join user_extra) t",
+ "query": "select sum(col) from (select user.col as col, 32 from user join user_extra) t",
+ "v3-plan": "VT12001: unsupported: cross-shard query with aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select sum(col) from (select user.col as col, 32 from user join user_extra) t",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum(0) AS sum(col)",
+ "Inputs": [
+ {
+ "OperatorType": "Projection",
+ "Expressions": [
+ "[COLUMN 2] * COALESCE([COLUMN 3], INT64(1)) as sum(col)"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1,L:2,R:1",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col as col, 32, sum(col) from `user` where 1 != 1",
+ "Query": "select `user`.col as col, 32, sum(col) from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1, count(*) from user_extra where 1 != 1 group by 1",
+ "Query": "select 1, count(*) from user_extra group by 1",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "find aggregation expression and use column offset in filter",
+ "query": "select foo, count(*) from user group by foo having count(*) = 3",
+ "v3-plan": "VT12001: unsupported: filtering on results of aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select foo, count(*) from user group by foo having count(*) = 3",
+ "Instructions": {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0,
+ 1
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Filter",
+ "Predicate": ":1 = 3",
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count_star(1) AS count(*)",
+ "GroupBy": "(0|2)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select foo, count(*), weight_string(foo) from `user` where 1 != 1 group by foo, weight_string(foo)",
+ "OrderBy": "(0|2) ASC",
+ "Query": "select foo, count(*), weight_string(foo) from `user` group by foo, weight_string(foo) order by foo asc",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "find aggregation expression and use column offset in filter times two",
+ "query": "select foo, sum(foo), sum(bar) from user group by foo having sum(foo)+sum(bar) = 42",
+ "v3-plan": "VT12001: unsupported: filtering on results of aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select foo, sum(foo), sum(bar) from user group by foo having sum(foo)+sum(bar) = 42",
+ "Instructions": {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0,
+ 1,
+ 2
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Filter",
+ "Predicate": ":1 + :2 = 42",
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum(1) AS sum(foo), sum(2) AS sum(bar)",
+ "GroupBy": "(0|3)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select foo, sum(foo), sum(bar), weight_string(foo) from `user` where 1 != 1 group by foo, weight_string(foo)",
+ "OrderBy": "(0|3) ASC",
+ "Query": "select foo, sum(foo), sum(bar), weight_string(foo) from `user` group by foo, weight_string(foo) order by foo asc",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "find aggregation expression and use column offset in filter times three",
+ "query": "select foo, sum(foo) as fooSum, sum(bar) as barSum from user group by foo having fooSum+sum(bar) = 42",
+ "v3-plan": "VT12001: unsupported: filtering on results of aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select foo, sum(foo) as fooSum, sum(bar) as barSum from user group by foo having fooSum+sum(bar) = 42",
+ "Instructions": {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0,
+ 1,
+ 2
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Filter",
+ "Predicate": ":1 + :2 = 42",
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum(1) AS fooSum, sum(2) AS barSum",
+ "GroupBy": "(0|3)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select foo, sum(foo) as fooSum, sum(bar) as barSum, weight_string(foo) from `user` where 1 != 1 group by foo, weight_string(foo)",
+ "OrderBy": "(0|3) ASC",
+ "Query": "select foo, sum(foo) as fooSum, sum(bar) as barSum, weight_string(foo) from `user` group by foo, weight_string(foo) order by foo asc",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "having should be able to add new aggregation expressions in having",
+ "query": "select foo from user group by foo having count(*) = 3",
+ "v3-plan": "VT12001: unsupported: filtering on results of aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select foo from user group by foo having count(*) = 3",
+ "Instructions": {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Filter",
+ "Predicate": ":1 = 3",
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count_star(1) AS count(*)",
+ "GroupBy": "(0|2)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select foo, count(*), weight_string(foo) from `user` where 1 != 1 group by foo, weight_string(foo)",
+ "OrderBy": "(0|2) ASC",
+ "Query": "select foo, count(*), weight_string(foo) from `user` group by foo, weight_string(foo) order by foo asc",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "select u.id from user u join user_extra ue on ue.id = u.id group by u.id having count(u.name) = 3",
+ "query": "select u.id from user u join user_extra ue on ue.id = u.id group by u.id having count(u.name) = 3",
+ "v3-plan": "VT12001: unsupported: cross-shard query with aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u.id from user u join user_extra ue on ue.id = u.id group by u.id having count(u.name) = 3",
+ "Instructions": {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Filter",
+ "Predicate": ":1 = 3",
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count(1) AS count(u.`name`)",
+ "GroupBy": "(0|2)",
+ "Inputs": [
+ {
+ "OperatorType": "Projection",
+ "Expressions": [
+ "[COLUMN 0] as id",
+ "[COLUMN 2] * COALESCE([COLUMN 3], INT64(1)) as count(u.`name`)",
+ "[COLUMN 1]"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "(0|1) ASC",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:1,R:2,L:1,R:0",
+ "JoinVars": {
+ "ue_id": 0
+ },
+ "TableName": "user_extra_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select ue.id, count(*), weight_string(ue.id) from user_extra as ue where 1 != 1 group by ue.id, weight_string(ue.id)",
+ "Query": "select ue.id, count(*), weight_string(ue.id) from user_extra as ue group by ue.id, weight_string(ue.id)",
+ "Table": "user_extra"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(u.`name`), u.id, weight_string(u.id) from `user` as u where 1 != 1 group by u.id, weight_string(u.id)",
+ "Query": "select count(u.`name`), u.id, weight_string(u.id) from `user` as u where u.id = :ue_id group by u.id, weight_string(u.id)",
+ "Table": "`user`",
+ "Values": [
+ ":ue_id"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "select u.id from user u join user_extra ue on ue.user_id = u.id group by u.id having count(u.name) = 3",
+ "query": "select u.id from user u join user_extra ue on ue.user_id = u.id group by u.id having count(u.name) = 3",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u.id from user u join user_extra ue on ue.user_id = u.id group by u.id having count(u.name) = 3",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.id from `user` as u join user_extra as ue on ue.user_id = u.id where 1 != 1 group by u.id",
+ "Query": "select u.id from `user` as u join user_extra as ue on ue.user_id = u.id group by u.id having count(u.`name`) = 3",
+ "Table": "`user`, user_extra"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u.id from user u join user_extra ue on ue.user_id = u.id group by u.id having count(u.name) = 3",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.id from `user` as u, user_extra as ue where 1 != 1 group by u.id",
+ "Query": "select u.id from `user` as u, user_extra as ue where ue.user_id = u.id group by u.id having count(u.`name`) = 3",
+ "Table": "`user`, user_extra"
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "only extract the aggregation once, even if used twice",
+ "query": "select u.id from user u join user_extra ue on ue.id = u.id group by u.id having count(*) < 3 and count(*) > 5",
+ "v3-plan": "VT12001: unsupported: cross-shard query with aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u.id from user u join user_extra ue on ue.id = u.id group by u.id having count(*) < 3 and count(*) > 5",
+ "Instructions": {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Filter",
+ "Predicate": ":1 < 3 and :1 > 5",
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count_star(1) AS count(*)",
+ "GroupBy": "(0|2)",
+ "Inputs": [
+ {
+ "OperatorType": "Projection",
+ "Expressions": [
+ "[COLUMN 0] as id",
+ "[COLUMN 2] * COALESCE([COLUMN 3], INT64(1)) as count(*)",
+ "[COLUMN 1]"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "(0|1) ASC",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:1,R:2,L:1,R:0",
+ "JoinVars": {
+ "ue_id": 0
+ },
+ "TableName": "user_extra_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select ue.id, count(*), weight_string(ue.id) from user_extra as ue where 1 != 1 group by ue.id, weight_string(ue.id)",
+ "Query": "select ue.id, count(*), weight_string(ue.id) from user_extra as ue group by ue.id, weight_string(ue.id)",
+ "Table": "user_extra"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*), u.id, weight_string(u.id) from `user` as u where 1 != 1 group by u.id, weight_string(u.id)",
+ "Query": "select count(*), u.id, weight_string(u.id) from `user` as u where u.id = :ue_id group by u.id, weight_string(u.id)",
+ "Table": "`user`",
+ "Values": [
+ ":ue_id"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "select (select 1 from user u having count(ue.col) > 10) from user_extra ue",
+ "query": "select (select 1 from user u having count(ue.col) > 10) from user_extra ue",
+ "v3-plan": "VT03020: symbol ue.col not found in subquery",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select (select 1 from user u having count(ue.col) > 10) from user_extra ue",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutValue",
+ "PulloutVars": [
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Filter",
+ "Predicate": ":1 > 10",
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "random(0) AS 1, sum_count(1) AS count(ue.col)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1, count(ue.col) from `user` as u where 1 != 1",
+ "Query": "select 1, count(ue.col) from `user` as u",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select :__sq1 from user_extra as ue where 1 != 1",
+ "Query": "select :__sq1 from user_extra as ue",
+ "Table": "user_extra"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "group by and ',' joins with condition",
+ "query": "select user.col from user join user_extra on user_extra.col = user.col group by user.id",
+ "v3-plan": "VT12001: unsupported: cross-shard query with aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user join user_extra on user_extra.col = user.col group by user.id",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "random(0) AS col",
+ "GroupBy": "(2|1)",
+ "ResultColumns": 1,
+ "Inputs": [
+ {
+ "OperatorType": "Projection",
+ "Expressions": [
+ "[COLUMN 2] as col",
+ "[COLUMN 1]",
+ "[COLUMN 0] as id"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:1,L:2,L:0",
+ "JoinVars": {
+ "user_col": 0
+ },
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col, `user`.id, weight_string(`user`.id) from `user` where 1 != 1 group by `user`.col, `user`.id, weight_string(`user`.id)",
+ "OrderBy": "(1|2) ASC",
+ "Query": "select `user`.col, `user`.id, weight_string(`user`.id) from `user` group by `user`.col, `user`.id, weight_string(`user`.id) order by `user`.id asc",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1 group by 1",
+ "Query": "select 1 from user_extra where user_extra.col = :user_col group by 1",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "scatter aggregate symtab lookup error",
+ "query": "select id, b as id, count(*) from user order by id",
+ "v3-plan": "VT03021: ambiguous symbol reference: id",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id, b as id, count(*) from user order by id",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "random(0) AS id, random(1) AS id, sum_count_star(2) AS count(*)",
+ "ResultColumns": 3,
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, b as id, count(*), weight_string(b) from `user` where 1 != 1",
+ "OrderBy": "(1|3) ASC",
+ "Query": "select id, b as id, count(*), weight_string(b) from `user` order by id asc",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "aggr and non-aggr without group by (with query does not give useful result out)",
+ "query": "select id, count(*) from user",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id, count(*) from user",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count(1) AS count",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, count(*) from `user` where 1 != 1",
+ "Query": "select id, count(*) from `user`",
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id, count(*) from user",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "random(0) AS id, sum_count_star(1) AS count(*)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, count(*) from `user` where 1 != 1",
+ "Query": "select id, count(*) from `user`",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "group by and ',' joins",
+ "query": "select user.id from user, user_extra group by id",
+ "v3-plan": "VT12001: unsupported: cross-shard query with aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.id from user, user_extra group by id",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "GroupBy": "(0|1)",
+ "ResultColumns": 1,
+ "Inputs": [
+ {
+ "OperatorType": "Projection",
+ "Expressions": [
+ "[COLUMN 0] as id",
+ "[COLUMN 1]"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1 group by id, weight_string(id)",
+ "OrderBy": "(0|1) ASC",
+ "Query": "select id, weight_string(id) from `user` group by id, weight_string(id) order by id asc",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "count on column from LIMIT",
+ "query": "select count(city) from (select phone, id, city from user where id > 12 limit 10) as x",
+ "v3-plan": "VT12001: unsupported: cross-shard query with aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select count(city) from (select phone, id, city from user where id > 12 limit 10) as x",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "count(0) AS count(city)",
+ "Inputs": [
+ {
+ "OperatorType": "Projection",
+ "Expressions": [
+ "[COLUMN 2] as count(city)"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Limit",
+ "Count": "INT64(10)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select phone, id, city from `user` where 1 != 1",
+ "Query": "select phone, id, city from `user` where id > 12 limit :__upper_limit",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "count(*) on column from LIMIT",
+ "query": "select count(*) from (select phone, id, city from user where id > 12 limit 10) as x",
+ "v3-plan": "VT12001: unsupported: cross-shard query with aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select count(*) from (select phone, id, city from user where id > 12 limit 10) as x",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "count_star(0) AS count(*)",
+ "Inputs": [
+ {
+ "OperatorType": "Projection",
+ "Expressions": [
+ "[COLUMN 0] as count(*)"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Limit",
+ "Count": "INT64(10)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select phone, id, city from `user` where 1 != 1",
+ "Query": "select phone, id, city from `user` where id > 12 limit :__upper_limit",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "count non-null columns incoming from outer joins should work well",
+ "query": "select count(col) from (select user_extra.col as col from user left join user_extra on user.id = user_extra.id limit 10) as x",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "select count(col) from (select user_extra.col as col from user left join user_extra on user.id = user_extra.id limit 10) as x",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "count(0) AS count(col)",
+ "Inputs": [
+ {
+ "OperatorType": "Projection",
+ "Expressions": [
+ "[COLUMN 0] as count(col)"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Limit",
+ "Count": "INT64(10)",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "LeftJoin",
+ "JoinColumnIndexes": "R:0",
+ "JoinVars": {
+ "user_id": 0
+ },
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id from `user` where 1 != 1",
+ "Query": "select `user`.id from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.col as col from user_extra where 1 != 1",
+ "Query": "select user_extra.col as col from user_extra where user_extra.id = :user_id",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "grouping on data from derived table",
+ "query": "select val1, count(*) from (select id, val1 from user where val2 < 4 order by val1 limit 2) as x group by val1",
+ "v3-plan": "VT12001: unsupported: cross-shard query with aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select val1, count(*) from (select id, val1 from user where val2 < 4 order by val1 limit 2) as x group by val1",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "count_star(1) AS count(*)",
+ "GroupBy": "(0|2)",
+ "ResultColumns": 2,
+ "Inputs": [
+ {
+ "OperatorType": "Projection",
+ "Expressions": [
+ "[COLUMN 1] as val1",
+ "[COLUMN 0] as count(*)",
+ "[COLUMN 2]"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Limit",
+ "Count": "INT64(2)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, val1, weight_string(val1) from `user` where 1 != 1",
+ "OrderBy": "(1|2) ASC, (1|2) ASC",
+ "Query": "select id, val1, weight_string(val1) from `user` where val2 < 4 order by val1 asc, val1 asc limit :__upper_limit",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Can't inline derived table when it has HAVING with aggregation function",
+ "query": "select * from (select id from user having count(*) = 1) s",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from (select id from user having count(*) = 1) s",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from (select id from `user` where 1 != 1) as s where 1 != 1",
+ "Query": "select * from (select id from `user` having count(*) = 1) as s",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from (select id from user having count(*) = 1) s",
+ "Instructions": {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Filter",
+ "Predicate": ":1 = 1",
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "random(0) AS id, sum_count_star(1) AS count(*)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, count(*) from `user` where 1 != 1",
+ "Query": "select id, count(*) from `user`",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Group By X Order By X",
+ "query": "SELECT user.intcol FROM user GROUP BY user.intcol ORDER BY COUNT(user.intcol)",
+ "v3-plan": "VT12001: unsupported: in scatter query: complex ORDER BY expression: count(`user`.intcol)",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT user.intcol FROM user GROUP BY user.intcol ORDER BY COUNT(user.intcol)",
+ "Instructions": {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "1 ASC",
+ "ResultColumns": 1,
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count(1) AS count(`user`.intcol)",
+ "GroupBy": "0",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.intcol, count(`user`.intcol) from `user` where 1 != 1 group by `user`.intcol",
+ "OrderBy": "0 ASC",
+ "Query": "select `user`.intcol, count(`user`.intcol) from `user` group by `user`.intcol order by `user`.intcol asc",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "AggregateRandom in non full group by query",
+ "query": "select u.id, u.name, count(m.predef1) from user.user as u join user.user_extra as m on u.id = m.order group by u.id",
+ "v3-plan": "VT12001: unsupported: cross-shard query with aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u.id, u.name, count(m.predef1) from user.user as u join user.user_extra as m on u.id = m.order group by u.id",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "random(1) AS name, sum_count(2) AS count(m.predef1)",
+ "GroupBy": "(0|3)",
+ "ResultColumns": 3,
+ "Inputs": [
+ {
+ "OperatorType": "Projection",
+ "Expressions": [
+ "[COLUMN 0] as id",
+ "[COLUMN 2] as name",
+ "[COLUMN 3] * COALESCE([COLUMN 4], INT64(1)) as count(m.predef1)",
+ "[COLUMN 1]"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "(0|1) ASC",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:2,R:3,R:0,L:1,R:1",
+ "JoinVars": {
+ "m_order": 0
+ },
+ "TableName": "user_extra_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select m.`order`, count(m.predef1), weight_string(m.`order`) from user_extra as m where 1 != 1 group by m.`order`, weight_string(m.`order`)",
+ "Query": "select m.`order`, count(m.predef1), weight_string(m.`order`) from user_extra as m group by m.`order`, weight_string(m.`order`)",
+ "Table": "user_extra"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.`name`, count(*), u.id, weight_string(u.id) from `user` as u where 1 != 1 group by u.id, weight_string(u.id)",
+ "Query": "select u.`name`, count(*), u.id, weight_string(u.id) from `user` as u where u.id = :m_order group by u.id, weight_string(u.id)",
+ "Table": "`user`",
+ "Values": [
+ ":m_order"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "Aggregation in a left join query",
+ "query": "select count (u.id) from user u left join user_extra ue on u.col = ue.col",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "select count (u.id) from user u left join user_extra ue on u.col = ue.col",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count(0) AS count(u.id)",
+ "Inputs": [
+ {
+ "OperatorType": "Projection",
+ "Expressions": [
+ "[COLUMN 0] * COALESCE([COLUMN 1], INT64(1)) as count(u.id)"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "LeftJoin",
+ "JoinColumnIndexes": "L:1,R:1",
+ "JoinVars": {
+ "u_col": 0
+ },
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.col, count(u.id) from `user` as u where 1 != 1 group by u.col",
+ "Query": "select u.col, count(u.id) from `user` as u group by u.col",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1, count(*) from user_extra as ue where 1 != 1 group by 1",
+ "Query": "select 1, count(*) from user_extra as ue where ue.col = :u_col group by 1",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "Aggregations from derived table used in arithmetic outside derived table",
+ "query": "select A.a, A.b, (A.a / A.b) as d from (select sum(a) as a, sum(b) as b from user) A",
+ "v3-plan": "VT12001: unsupported: expression on results of a cross-shard subquery",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select A.a, A.b, (A.a / A.b) as d from (select sum(a) as a, sum(b) as b from user) A",
+ "Instructions": {
+ "OperatorType": "Projection",
+ "Expressions": [
+ "[COLUMN 0] as a",
+ "[COLUMN 1] as b",
+ "[COLUMN 0] / [COLUMN 1] as d"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum(0) AS a, sum(1) AS b",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select sum(a) as a, sum(b) as b from `user` where 1 != 1",
+ "Query": "select sum(a) as a, sum(b) as b from `user`",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "when pushing predicates into derived tables, make sure to put them in HAVING when they contain aggregations",
+ "query": "select t1.portalId, t1.flowId from (select portalId, flowId, count(*) as count from user_extra where localDate > :v1 group by user_id, flowId order by null) as t1 where count >= :v2",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select t1.portalId, t1.flowId from (select portalId, flowId, count(*) as count from user_extra where localDate > :v1 group by user_id, flowId order by null) as t1 where count >= :v2",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select t1.portalId, t1.flowId from (select portalId, flowId, count(*) as `count` from user_extra where 1 != 1 group by user_id, flowId) as t1 where 1 != 1",
+ "Query": "select t1.portalId, t1.flowId from (select portalId, flowId, count(*) as `count` from user_extra where localDate > :v1 group by user_id, flowId order by null) as t1 where `count` >= :v2",
+ "Table": "user_extra"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select t1.portalId, t1.flowId from (select portalId, flowId, count(*) as count from user_extra where localDate > :v1 group by user_id, flowId order by null) as t1 where count >= :v2",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select t1.portalId, t1.flowId from (select portalId, flowId, count(*) as `count` from user_extra where 1 != 1 group by user_id, flowId) as t1 where 1 != 1",
+ "Query": "select t1.portalId, t1.flowId from (select portalId, flowId, count(*) as `count` from user_extra where localDate > :v1 group by user_id, flowId order by null) as t1 where `count` >= :v2",
+ "Table": "user_extra"
+ },
+ "TablesUsed": [
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "aggregation, where and derived tables - we can push extremums",
+ "query": "SELECT foo FROM (SELECT foo, max(baz) as bazo FROM (SELECT foo, baz FROM user) f GROUP BY foo) tt WHERE bazo BETWEEN 100 AND 200",
+ "v3-plan": "VT12001: unsupported: filtering on results of cross-shard subquery",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT foo FROM (SELECT foo, max(baz) as bazo FROM (SELECT foo, baz FROM user) f GROUP BY foo) tt WHERE bazo BETWEEN 100 AND 200",
+ "Instructions": {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "max(1) AS bazo",
+ "GroupBy": "(0|2)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select foo, max(baz) as bazo, weight_string(foo) from (select foo, baz from `user` where 1 != 1) as f where 1 != 1 group by foo, weight_string(foo)",
+ "OrderBy": "(0|2) ASC",
+ "Query": "select foo, max(baz) as bazo, weight_string(foo) from (select foo, baz from `user` having max(baz) between 100 and 200) as f group by foo, weight_string(foo) order by foo asc",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "aggregation, where and derived tables - we can't push aggregations that might need a second layer of aggregation",
+ "query": "SELECT foo FROM (SELECT foo, count(baz) as bazo FROM (SELECT foo, baz FROM user) f GROUP BY foo) tt WHERE bazo BETWEEN 100 AND 200",
+ "v3-plan": "VT12001: unsupported: filtering on results of cross-shard subquery",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT foo FROM (SELECT foo, count(baz) as bazo FROM (SELECT foo, baz FROM user) f GROUP BY foo) tt WHERE bazo BETWEEN 100 AND 200",
+ "Instructions": {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 1
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Filter",
+ "Predicate": "bazo between 100 and 200",
+ "Inputs": [
+ {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 1,
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count(1) AS bazo",
+ "GroupBy": "(0|2)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select foo, count(baz) as bazo, weight_string(foo) from (select foo, baz from `user` where 1 != 1) as f where 1 != 1 group by foo, weight_string(foo)",
+ "OrderBy": "(0|2) ASC",
+ "Query": "select foo, count(baz) as bazo, weight_string(foo) from (select foo, baz from `user`) as f group by foo, weight_string(foo) order by foo asc",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ }
+]
diff --git a/go/vt/vtgate/planbuilder/testdata/aggr_cases.txt b/go/vt/vtgate/planbuilder/testdata/aggr_cases.txt
deleted file mode 100644
index d1c54cb1b2d..00000000000
--- a/go/vt/vtgate/planbuilder/testdata/aggr_cases.txt
+++ /dev/null
@@ -1,5776 +0,0 @@
-# Test cases in this file follow the code in ordered_aggregate.go.
-#
-# Aggregate on unsharded
-"select count(*), col from unsharded"
-{
- "QueryType": "SELECT",
- "Original": "select count(*), col from unsharded",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select count(*), col from unsharded where 1 != 1",
- "Query": "select count(*), col from unsharded",
- "Table": "unsharded"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select count(*), col from unsharded",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select count(*), col from unsharded where 1 != 1",
- "Query": "select count(*), col from unsharded",
- "Table": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-
-# Aggregate on unique sharded
-"select count(*), col from user where id = 1"
-{
- "QueryType": "SELECT",
- "Original": "select count(*), col from user where id = 1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*), col from `user` where 1 != 1",
- "Query": "select count(*), col from `user` where id = 1",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select count(*), col from user where id = 1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*), col from `user` where 1 != 1",
- "Query": "select count(*), col from `user` where id = 1",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Aggregate detection (non-aggregate function)
-"select fun(1), col from user"
-{
- "QueryType": "SELECT",
- "Original": "select fun(1), col from user",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select fun(1), col from `user` where 1 != 1",
- "Query": "select fun(1), col from `user`",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select fun(1), col from user",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select fun(1), col from `user` where 1 != 1",
- "Query": "select fun(1), col from `user`",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# select distinct with unique vindex for scatter route.
-"select distinct col1, id from user"
-{
- "QueryType": "SELECT",
- "Original": "select distinct col1, id from user",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col1, id from `user` where 1 != 1",
- "Query": "select distinct col1, id from `user`",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select distinct col1, id from user",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col1, id from `user` where 1 != 1",
- "Query": "select distinct col1, id from `user`",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# distinct and group by together for single route - group by is redundant
-"select distinct col1, id from user group by col1"
-{
- "QueryType": "SELECT",
- "Original": "select distinct col1, id from user group by col1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col1, id from `user` where 1 != 1 group by col1",
- "Query": "select distinct col1, id from `user` group by col1",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select distinct col1, id from user group by col1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col1, id from `user` where 1 != 1 group by col1",
- "Query": "select distinct col1, id from `user` group by col1",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# scatter group by a text column
-"select count(*), a, textcol1, b from user group by a, textcol1, b"
-{
- "QueryType": "SELECT",
- "Original": "select count(*), a, textcol1, b from user group by a, textcol1, b",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count(0) AS count",
- "GroupBy": "1, 4, 3",
- "ResultColumns": 4,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*), a, textcol1, b, weight_string(textcol1), weight_string(a), weight_string(b) from `user` where 1 != 1 group by a, textcol1, b, weight_string(textcol1), weight_string(a), weight_string(b)",
- "OrderBy": "(1|5) ASC, (2|4) ASC, (3|6) ASC",
- "Query": "select count(*), a, textcol1, b, weight_string(textcol1), weight_string(a), weight_string(b) from `user` group by a, textcol1, b, weight_string(textcol1), weight_string(a), weight_string(b) order by a asc, textcol1 asc, b asc",
- "ResultColumns": 5,
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select count(*), a, textcol1, b from user group by a, textcol1, b",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count_star(0) AS count(*)",
- "GroupBy": "(1|4), 2 COLLATE latin1_swedish_ci, (3|5)",
- "ResultColumns": 4,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*), a, textcol1, b, weight_string(a), weight_string(b) from `user` where 1 != 1 group by a, weight_string(a), textcol1, b, weight_string(b)",
- "OrderBy": "(1|4) ASC, 2 ASC COLLATE latin1_swedish_ci, (3|5) ASC",
- "Query": "select count(*), a, textcol1, b, weight_string(a), weight_string(b) from `user` group by a, weight_string(a), textcol1, b, weight_string(b) order by a asc, textcol1 asc, b asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# scatter group by a integer column. Do not add weight strings for this.
-"select count(*), intcol from user group by intcol"
-{
- "QueryType": "SELECT",
- "Original": "select count(*), intcol from user group by intcol",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count(0) AS count",
- "GroupBy": "1",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*), intcol from `user` where 1 != 1 group by intcol",
- "OrderBy": "1 ASC",
- "Query": "select count(*), intcol from `user` group by intcol order by intcol asc",
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select count(*), intcol from user group by intcol",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count_star(0) AS count(*)",
- "GroupBy": "1",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*), intcol from `user` where 1 != 1 group by intcol",
- "OrderBy": "1 ASC",
- "Query": "select count(*), intcol from `user` group by intcol order by intcol asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# scatter group by a text column, reuse existing weight_string
-"select count(*) k, a, textcol1, b from user group by a, textcol1, b order by k, textcol1"
-{
- "QueryType": "SELECT",
- "Original": "select count(*) k, a, textcol1, b from user group by a, textcol1, b order by k, textcol1",
- "Instructions": {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "0 ASC, (2|4) ASC",
- "ResultColumns": 4,
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count(0) AS count",
- "GroupBy": "1, 4, 3",
- "ResultColumns": 5,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) as k, a, textcol1, b, weight_string(textcol1), weight_string(a), weight_string(b) from `user` where 1 != 1 group by a, textcol1, b, weight_string(textcol1), weight_string(a), weight_string(b)",
- "OrderBy": "(2|4) ASC, (1|5) ASC, (3|6) ASC",
- "Query": "select count(*) as k, a, textcol1, b, weight_string(textcol1), weight_string(a), weight_string(b) from `user` group by a, textcol1, b, weight_string(textcol1), weight_string(a), weight_string(b) order by textcol1 asc, a asc, b asc",
- "ResultColumns": 5,
- "Table": "`user`"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select count(*) k, a, textcol1, b from user group by a, textcol1, b order by k, textcol1",
- "Instructions": {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "0 ASC, 2 ASC COLLATE latin1_swedish_ci",
- "ResultColumns": 4,
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count_star(0) AS k",
- "GroupBy": "(1|4), 2 COLLATE latin1_swedish_ci, (3|5)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) as k, a, textcol1, b, weight_string(a), weight_string(b) from `user` where 1 != 1 group by a, weight_string(a), textcol1, b, weight_string(b)",
- "OrderBy": "(1|4) ASC, 2 ASC COLLATE latin1_swedish_ci, (3|5) ASC",
- "Query": "select count(*) as k, a, textcol1, b, weight_string(a), weight_string(b) from `user` group by a, weight_string(a), textcol1, b, weight_string(b) order by a asc, textcol1 asc, b asc",
- "Table": "`user`"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# count aggregate
-"select count(*) from user"
-{
- "QueryType": "SELECT",
- "Original": "select count(*) from user",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count(0) AS count",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) from `user` where 1 != 1",
- "Query": "select count(*) from `user`",
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select count(*) from user",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count_star(0) AS count(*)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) from `user` where 1 != 1",
- "Query": "select count(*) from `user`",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# sum aggregate
-"select sum(col) from user"
-{
- "QueryType": "SELECT",
- "Original": "select sum(col) from user",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum(0)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select sum(col) from `user` where 1 != 1",
- "Query": "select sum(col) from `user`",
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select sum(col) from user",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum(0) AS sum(col)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select sum(col) from `user` where 1 != 1",
- "Query": "select sum(col) from `user`",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# min aggregate
-"select min(col) from user"
-{
- "QueryType": "SELECT",
- "Original": "select min(col) from user",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "min(0)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select min(col) from `user` where 1 != 1",
- "Query": "select min(col) from `user`",
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select min(col) from user",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "min(0) AS min(col)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select min(col) from `user` where 1 != 1",
- "Query": "select min(col) from `user`",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# max aggregate
-"select max(col) from user"
-{
- "QueryType": "SELECT",
- "Original": "select max(col) from user",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "max(0)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select max(col) from `user` where 1 != 1",
- "Query": "select max(col) from `user`",
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select max(col) from user",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "max(0) AS max(col)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select max(col) from `user` where 1 != 1",
- "Query": "select max(col) from `user`",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# distinct and group by together for scatter route
-"select distinct col1, col2 from user group by col1"
-{
- "QueryType": "SELECT",
- "Original": "select distinct col1, col2 from user group by col1",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "GroupBy": "0, 1, 0",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col1, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, weight_string(col1)",
- "OrderBy": "(0|2) ASC, (1|3) ASC, (0|2) ASC",
- "Query": "select distinct col1, col2, weight_string(col1), weight_string(col2) from `user` group by col1, weight_string(col1) order by col1 asc, col2 asc, col1 asc",
- "ResultColumns": 2,
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select distinct col1, col2 from user group by col1",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "GroupBy": "(0|2), (1|3)",
- "ResultColumns": 2,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col1, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1",
- "OrderBy": "(0|2) ASC, (1|3) ASC",
- "Query": "select distinct col1, col2, weight_string(col1), weight_string(col2) from `user` group by col1 order by col1 asc, col2 asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# aggregate on RHS subquery (tests symbol table merge)
-"select user.a, t.b from user join (select count(*) b from unsharded) as t"
-{
- "QueryType": "SELECT",
- "Original": "select user.a, t.b from user join (select count(*) b from unsharded) as t",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,R:0",
- "TableName": "`user`_unsharded",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.a from `user` where 1 != 1",
- "Query": "select `user`.a from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select t.b from (select count(*) as b from unsharded where 1 != 1) as t where 1 != 1",
- "Query": "select t.b from (select count(*) as b from unsharded) as t",
- "Table": "unsharded"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.a, t.b from user join (select count(*) b from unsharded) as t",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,R:0",
- "TableName": "`user`_unsharded",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.a from `user` where 1 != 1",
- "Query": "select `user`.a from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select t.b from (select count(*) as b from unsharded where 1 != 1) as t where 1 != 1",
- "Query": "select t.b from (select count(*) as b from unsharded) as t",
- "Table": "unsharded"
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded",
- "user.user"
- ]
-}
-
-# group by a unique vindex should use a simple route
-"select id, count(*) from user group by id"
-{
- "QueryType": "SELECT",
- "Original": "select id, count(*) from user group by id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, count(*) from `user` where 1 != 1 group by id",
- "Query": "select id, count(*) from `user` group by id",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id, count(*) from user group by id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, count(*) from `user` where 1 != 1 group by id",
- "Query": "select id, count(*) from `user` group by id",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# group by a unique vindex and other column should use a simple route
-"select id, col, count(*) from user group by id, col"
-{
- "QueryType": "SELECT",
- "Original": "select id, col, count(*) from user group by id, col",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, col, count(*) from `user` where 1 != 1 group by id, col",
- "Query": "select id, col, count(*) from `user` group by id, col",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id, col, count(*) from user group by id, col",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, col, count(*) from `user` where 1 != 1 group by id, col",
- "Query": "select id, col, count(*) from `user` group by id, col",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# group by a non-vindex column should use an OrderdAggregate primitive
-"select col, count(*) from user group by col"
-{
- "QueryType": "SELECT",
- "Original": "select col, count(*) from user group by col",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count(1) AS count",
- "GroupBy": "0",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col, count(*) from `user` where 1 != 1 group by col",
- "OrderBy": "0 ASC",
- "Query": "select col, count(*) from `user` group by col order by col asc",
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select col, count(*) from user group by col",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count_star(1) AS count(*)",
- "GroupBy": "0",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col, count(*) from `user` where 1 != 1 group by col",
- "OrderBy": "0 ASC",
- "Query": "select col, count(*) from `user` group by col order by col asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# group by must only reference expressions in the select list
-"select col, count(*) from user group by col, baz"
-"unsupported: in scatter query: group by column must reference column in SELECT list"
-{
- "QueryType": "SELECT",
- "Original": "select col, count(*) from user group by col, baz",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count_star(1) AS count(*)",
- "GroupBy": "0, (2|3)",
- "ResultColumns": 2,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col, count(*), baz, weight_string(baz) from `user` where 1 != 1 group by col, baz, weight_string(baz)",
- "OrderBy": "0 ASC, (2|3) ASC",
- "Query": "select col, count(*), baz, weight_string(baz) from `user` group by col, baz, weight_string(baz) order by col asc, baz asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# group by a non-unique vindex column should use an OrderedAggregate primitive
-"select name, count(*) from user group by name"
-{
- "QueryType": "SELECT",
- "Original": "select name, count(*) from user group by name",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count(1) AS count",
- "GroupBy": "0",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `name`, count(*), weight_string(`name`) from `user` where 1 != 1 group by `name`, weight_string(`name`)",
- "OrderBy": "(0|2) ASC",
- "Query": "select `name`, count(*), weight_string(`name`) from `user` group by `name`, weight_string(`name`) order by `name` asc",
- "ResultColumns": 2,
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select name, count(*) from user group by name",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count_star(1) AS count(*)",
- "GroupBy": "(0|2)",
- "ResultColumns": 2,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `name`, count(*), weight_string(`name`) from `user` where 1 != 1 group by `name`, weight_string(`name`)",
- "OrderBy": "(0|2) ASC",
- "Query": "select `name`, count(*), weight_string(`name`) from `user` group by `name`, weight_string(`name`) order by `name` asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# group by a unique vindex should use a simple route, even if aggr is complex
-"select id, 1+count(*) from user group by id"
-{
- "QueryType": "SELECT",
- "Original": "select id, 1+count(*) from user group by id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, 1 + count(*) from `user` where 1 != 1 group by id",
- "Query": "select id, 1 + count(*) from `user` group by id",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id, 1+count(*) from user group by id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, 1 + count(*) from `user` where 1 != 1 group by id",
- "Query": "select id, 1 + count(*) from `user` group by id",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# group by a unique vindex where alias from select list is used
-"select id as val, 1+count(*) from user group by val"
-{
- "QueryType": "SELECT",
- "Original": "select id as val, 1+count(*) from user group by val",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id as val, 1 + count(*) from `user` where 1 != 1 group by val",
- "Query": "select id as val, 1 + count(*) from `user` group by val",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id as val, 1+count(*) from user group by val",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id as val, 1 + count(*) from `user` where 1 != 1 group by val",
- "Query": "select id as val, 1 + count(*) from `user` group by val",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# group by a unique vindex where expression is qualified (alias should be ignored)
-"select val as id, 1+count(*) from user group by user.id"
-{
- "QueryType": "SELECT",
- "Original": "select val as id, 1+count(*) from user group by user.id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select val as id, 1 + count(*) from `user` where 1 != 1 group by `user`.id",
- "Query": "select val as id, 1 + count(*) from `user` group by `user`.id",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select val as id, 1+count(*) from user group by user.id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select val as id, 1 + count(*) from `user` where 1 != 1 group by `user`.id",
- "Query": "select val as id, 1 + count(*) from `user` group by `user`.id",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# group by a unique vindex where it should skip non-aliased expressions.
-"select *, id, 1+count(*) from user group by id"
-{
- "QueryType": "SELECT",
- "Original": "select *, id, 1+count(*) from user group by id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select *, id, 1 + count(*) from `user` where 1 != 1 group by id",
- "Query": "select *, id, 1 + count(*) from `user` group by id",
- "Table": "`user`"
- }
-}
-Gen4 error: unsupported: '*' expression in cross-shard query
-
-# group by a unique vindex should revert to simple route, and having clause should find the correct symbols.
-"select id, count(*) c from user group by id having id=1 and c=10"
-{
- "QueryType": "SELECT",
- "Original": "select id, count(*) c from user group by id having id=1 and c=10",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, count(*) as c from `user` where 1 != 1 group by id",
- "Query": "select id, count(*) as c from `user` group by id having id = 1 and c = 10",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id, count(*) c from user group by id having id=1 and c=10",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, count(*) as c from `user` where 1 != 1 group by id",
- "Query": "select id, count(*) as c from `user` where id = 1 group by id having count(*) = 10",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# group by a unique vindex should revert to simple route, and having clause should find the correct symbols.
-"select id, count(*) c from user group by id having max(col) \u003e 10"
-{
- "QueryType": "SELECT",
- "Original": "select id, count(*) c from user group by id having max(col) \u003e 10",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, count(*) as c from `user` where 1 != 1 group by id",
- "Query": "select id, count(*) as c from `user` group by id having max(col) \u003e 10",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id, count(*) c from user group by id having max(col) \u003e 10",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, count(*) as c from `user` where 1 != 1 group by id",
- "Query": "select id, count(*) as c from `user` group by id having max(col) \u003e 10",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# scatter aggregate in a subquery
-"select a from (select count(*) as a from user) t"
-{
- "QueryType": "SELECT",
- "Original": "select a from (select count(*) as a from user) t",
- "Instructions": {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count(0) AS count",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) as a from `user` where 1 != 1",
- "Query": "select count(*) as a from `user`",
- "Table": "`user`"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select a from (select count(*) as a from user) t",
- "Instructions": {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count_star(0) AS a",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) as a from `user` where 1 != 1",
- "Query": "select count(*) as a from `user`",
- "Table": "`user`"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# scatter aggregate with non-aggregate expressions.
-"select id, count(*) from user"
-{
- "QueryType": "SELECT",
- "Original": "select id, count(*) from user",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count(1) AS count",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, count(*) from `user` where 1 != 1",
- "Query": "select id, count(*) from `user`",
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id, count(*) from user",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "random(0) AS id, sum_count_star(1) AS count(*)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, count(*) from `user` where 1 != 1",
- "Query": "select id, count(*) from `user`",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# scatter aggregate using distinctdistinct
-"select distinct col from user"
-{
- "QueryType": "SELECT",
- "Original": "select distinct col from user",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "GroupBy": "0",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "OrderBy": "0 ASC",
- "Query": "select distinct col from `user` order by col asc",
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select distinct col from user",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "GroupBy": "0",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "OrderBy": "0 ASC",
- "Query": "select distinct col from `user` order by col asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# scatter aggregate group by select col
-"select col from user group by col"
-{
- "QueryType": "SELECT",
- "Original": "select col from user group by col",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "GroupBy": "0",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1 group by col",
- "OrderBy": "0 ASC",
- "Query": "select col from `user` group by col order by col asc",
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select col from user group by col",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "GroupBy": "0",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1 group by col",
- "OrderBy": "0 ASC",
- "Query": "select col from `user` group by col order by col asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# count with distinct group by unique vindex
-"select id, count(distinct col) from user group by id"
-{
- "QueryType": "SELECT",
- "Original": "select id, count(distinct col) from user group by id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, count(distinct col) from `user` where 1 != 1 group by id",
- "Query": "select id, count(distinct col) from `user` group by id",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id, count(distinct col) from user group by id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, count(distinct col) from `user` where 1 != 1 group by id",
- "Query": "select id, count(distinct col) from `user` group by id",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# count with distinct unique vindex
-"select col, count(distinct id) from user group by col"
-{
- "QueryType": "SELECT",
- "Original": "select col, count(distinct id) from user group by col",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count(1) AS count",
- "GroupBy": "0",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col, count(distinct id) from `user` where 1 != 1 group by col",
- "OrderBy": "0 ASC",
- "Query": "select col, count(distinct id) from `user` group by col order by col asc",
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select col, count(distinct id) from user group by col",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count_distinct(1) AS count(distinct id)",
- "GroupBy": "0",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col, count(distinct id) from `user` where 1 != 1 group by col",
- "OrderBy": "0 ASC",
- "Query": "select col, count(distinct id) from `user` group by col order by col asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# count with distinct no unique vindex
-"select col1, count(distinct col2) from user group by col1"
-{
- "QueryType": "SELECT",
- "Original": "select col1, count(distinct col2) from user group by col1",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "count_distinct_count(1) AS count(distinct col2)",
- "GroupBy": "0",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col1, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, col2, weight_string(col1), weight_string(col2)",
- "OrderBy": "(0|2) ASC, (1|3) ASC",
- "Query": "select col1, col2, weight_string(col1), weight_string(col2) from `user` group by col1, col2, weight_string(col1), weight_string(col2) order by col1 asc, col2 asc",
- "ResultColumns": 2,
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select col1, count(distinct col2) from user group by col1",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "count_distinct(1|3) AS count(distinct col2)",
- "GroupBy": "(0|2)",
- "ResultColumns": 2,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col1, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, weight_string(col1), col2, weight_string(col2)",
- "OrderBy": "(0|2) ASC, (1|3) ASC",
- "Query": "select col1, col2, weight_string(col1), weight_string(col2) from `user` group by col1, weight_string(col1), col2, weight_string(col2) order by col1 asc, col2 asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# count with distinct no unique vindex and no group by
-"select count(distinct col2) from user"
-{
- "QueryType": "SELECT",
- "Original": "select count(distinct col2) from user",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "count_distinct_count(0) AS count(distinct col2)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col2, weight_string(col2) from `user` where 1 != 1 group by col2, weight_string(col2)",
- "OrderBy": "(0|1) ASC",
- "Query": "select col2, weight_string(col2) from `user` group by col2, weight_string(col2) order by col2 asc",
- "ResultColumns": 1,
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select count(distinct col2) from user",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "count_distinct(0|1) AS count(distinct col2)",
- "ResultColumns": 1,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col2, weight_string(col2) from `user` where 1 != 1 group by col2, weight_string(col2)",
- "OrderBy": "(0|1) ASC",
- "Query": "select col2, weight_string(col2) from `user` group by col2, weight_string(col2) order by col2 asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# count with distinct no unique vindex, count expression aliased
-"select col1, count(distinct col2) c2 from user group by col1"
-{
- "QueryType": "SELECT",
- "Original": "select col1, count(distinct col2) c2 from user group by col1",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "count_distinct_count(1) AS c2",
- "GroupBy": "0",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col1, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, col2, weight_string(col1), weight_string(col2)",
- "OrderBy": "(0|2) ASC, (1|3) ASC",
- "Query": "select col1, col2, weight_string(col1), weight_string(col2) from `user` group by col1, col2, weight_string(col1), weight_string(col2) order by col1 asc, col2 asc",
- "ResultColumns": 2,
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select col1, count(distinct col2) c2 from user group by col1",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "count_distinct(1|3) AS c2",
- "GroupBy": "(0|2)",
- "ResultColumns": 2,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col1, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, weight_string(col1), col2, weight_string(col2)",
- "OrderBy": "(0|2) ASC, (1|3) ASC",
- "Query": "select col1, col2, weight_string(col1), weight_string(col2) from `user` group by col1, weight_string(col1), col2, weight_string(col2) order by col1 asc, col2 asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# sum with distinct no unique vindex
-"select col1, sum(distinct col2) from user group by col1"
-{
- "QueryType": "SELECT",
- "Original": "select col1, sum(distinct col2) from user group by col1",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_distinct_sum(1) AS sum(distinct col2)",
- "GroupBy": "0",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col1, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, col2, weight_string(col1), weight_string(col2)",
- "OrderBy": "(0|2) ASC, (1|3) ASC",
- "Query": "select col1, col2, weight_string(col1), weight_string(col2) from `user` group by col1, col2, weight_string(col1), weight_string(col2) order by col1 asc, col2 asc",
- "ResultColumns": 2,
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select col1, sum(distinct col2) from user group by col1",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_distinct(1|3) AS sum(distinct col2)",
- "GroupBy": "(0|2)",
- "ResultColumns": 2,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col1, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, weight_string(col1), col2, weight_string(col2)",
- "OrderBy": "(0|2) ASC, (1|3) ASC",
- "Query": "select col1, col2, weight_string(col1), weight_string(col2) from `user` group by col1, weight_string(col1), col2, weight_string(col2) order by col1 asc, col2 asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# min with distinct no unique vindex. distinct is ignored.
-"select col1, min(distinct col2) from user group by col1"
-{
- "QueryType": "SELECT",
- "Original": "select col1, min(distinct col2) from user group by col1",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "min(1)",
- "GroupBy": "0",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col1, min(distinct col2), weight_string(col1) from `user` where 1 != 1 group by col1, weight_string(col1)",
- "OrderBy": "(0|2) ASC",
- "Query": "select col1, min(distinct col2), weight_string(col1) from `user` group by col1, weight_string(col1) order by col1 asc",
- "ResultColumns": 2,
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select col1, min(distinct col2) from user group by col1",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "min(1|3) AS min(distinct col2)",
- "GroupBy": "(0|2)",
- "ResultColumns": 2,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col1, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, weight_string(col1), col2, weight_string(col2)",
- "OrderBy": "(0|2) ASC, (1|3) ASC",
- "Query": "select col1, col2, weight_string(col1), weight_string(col2) from `user` group by col1, weight_string(col1), col2, weight_string(col2) order by col1 asc, col2 asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# order by count distinct
-"select col1, count(distinct col2) k from user group by col1 order by k"
-{
- "QueryType": "SELECT",
- "Original": "select col1, count(distinct col2) k from user group by col1 order by k",
- "Instructions": {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "1 ASC",
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "count_distinct_count(1) AS k",
- "GroupBy": "0",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col1, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, col2, weight_string(col1), weight_string(col2)",
- "OrderBy": "(0|2) ASC, (1|3) ASC",
- "Query": "select col1, col2, weight_string(col1), weight_string(col2) from `user` group by col1, col2, weight_string(col1), weight_string(col2) order by col1 asc, col2 asc",
- "ResultColumns": 2,
- "Table": "`user`"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select col1, count(distinct col2) k from user group by col1 order by k",
- "Instructions": {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "1 ASC",
- "ResultColumns": 2,
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "count_distinct(1|3) AS k",
- "GroupBy": "(0|2)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col1, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, weight_string(col1), col2, weight_string(col2)",
- "OrderBy": "(0|2) ASC, (1|3) ASC",
- "Query": "select col1, col2, weight_string(col1), weight_string(col2) from `user` group by col1, weight_string(col1), col2, weight_string(col2) order by col1 asc, col2 asc",
- "Table": "`user`"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# scatter aggregate group by aggregate function
-"select count(*) b from user group by b"
-"Can't group on 'b'"
-Gen4 error: Can't group on 'count(*)'
-
-# scatter aggregate multiple group by (columns)
-"select a, b, count(*) from user group by b, a"
-{
- "QueryType": "SELECT",
- "Original": "select a, b, count(*) from user group by b, a",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count(2) AS count",
- "GroupBy": "1, 0",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a, b, count(*), weight_string(b), weight_string(a) from `user` where 1 != 1 group by b, a, weight_string(b), weight_string(a)",
- "OrderBy": "(1|3) ASC, (0|4) ASC",
- "Query": "select a, b, count(*), weight_string(b), weight_string(a) from `user` group by b, a, weight_string(b), weight_string(a) order by b asc, a asc",
- "ResultColumns": 3,
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select a, b, count(*) from user group by b, a",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count_star(2) AS count(*)",
- "GroupBy": "(0|3), (1|4)",
- "ResultColumns": 3,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a, b, count(*), weight_string(a), weight_string(b) from `user` where 1 != 1 group by a, weight_string(a), b, weight_string(b)",
- "OrderBy": "(0|3) ASC, (1|4) ASC",
- "Query": "select a, b, count(*), weight_string(a), weight_string(b) from `user` group by a, weight_string(a), b, weight_string(b) order by a asc, b asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# scatter aggregate multiple group by (numbers)
-"select a, b, count(*) from user group by 2, 1"
-{
- "QueryType": "SELECT",
- "Original": "select a, b, count(*) from user group by 2, 1",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count(2) AS count",
- "GroupBy": "1, 0",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a, b, count(*), weight_string(b), weight_string(a) from `user` where 1 != 1 group by 2, 1, weight_string(b), weight_string(a)",
- "OrderBy": "(1|3) ASC, (0|4) ASC",
- "Query": "select a, b, count(*), weight_string(b), weight_string(a) from `user` group by 2, 1, weight_string(b), weight_string(a) order by b asc, a asc",
- "ResultColumns": 3,
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select a, b, count(*) from user group by 2, 1",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count_star(2) AS count(*)",
- "GroupBy": "(0|3), (1|4)",
- "ResultColumns": 3,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a, b, count(*), weight_string(a), weight_string(b) from `user` where 1 != 1 group by a, weight_string(a), b, weight_string(b)",
- "OrderBy": "(0|3) ASC, (1|4) ASC",
- "Query": "select a, b, count(*), weight_string(a), weight_string(b) from `user` group by a, weight_string(a), b, weight_string(b) order by a asc, b asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# scatter aggregate multiple group by columns inverse order
-"select a, b, count(*) from user group by b, a"
-{
- "QueryType": "SELECT",
- "Original": "select a, b, count(*) from user group by b, a",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count(2) AS count",
- "GroupBy": "1, 0",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a, b, count(*), weight_string(b), weight_string(a) from `user` where 1 != 1 group by b, a, weight_string(b), weight_string(a)",
- "OrderBy": "(1|3) ASC, (0|4) ASC",
- "Query": "select a, b, count(*), weight_string(b), weight_string(a) from `user` group by b, a, weight_string(b), weight_string(a) order by b asc, a asc",
- "ResultColumns": 3,
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select a, b, count(*) from user group by b, a",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count_star(2) AS count(*)",
- "GroupBy": "(0|3), (1|4)",
- "ResultColumns": 3,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a, b, count(*), weight_string(a), weight_string(b) from `user` where 1 != 1 group by a, weight_string(a), b, weight_string(b)",
- "OrderBy": "(0|3) ASC, (1|4) ASC",
- "Query": "select a, b, count(*), weight_string(a), weight_string(b) from `user` group by a, weight_string(a), b, weight_string(b) order by a asc, b asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# scatter aggregate group by column number
-"select col from user group by 1"
-{
- "QueryType": "SELECT",
- "Original": "select col from user group by 1",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "GroupBy": "0",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1 group by 1",
- "OrderBy": "0 ASC",
- "Query": "select col from `user` group by 1 order by col asc",
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select col from user group by 1",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "GroupBy": "0",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1 group by col",
- "OrderBy": "0 ASC",
- "Query": "select col from `user` group by col order by col asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# scatter aggregate group by invalid column number
-"select col from user group by 2"
-"Unknown column '2' in 'group statement'"
-Gen4 plan same as above
-
-# scatter aggregate order by null
-"select count(*) from user order by null"
-{
- "QueryType": "SELECT",
- "Original": "select count(*) from user order by null",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count(0) AS count",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) from `user` where 1 != 1",
- "Query": "select count(*) from `user`",
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select count(*) from user order by null",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count_star(0) AS count(*)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) from `user` where 1 != 1",
- "Query": "select count(*) from `user`",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# scatter aggregate with numbered order by columns
-"select a, b, c, d, count(*) from user group by 1, 2, 3 order by 1, 2, 3"
-{
- "QueryType": "SELECT",
- "Original": "select a, b, c, d, count(*) from user group by 1, 2, 3 order by 1, 2, 3",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count(4) AS count",
- "GroupBy": "0, 1, 2",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a, b, c, d, count(*), weight_string(a), weight_string(b), weight_string(c) from `user` where 1 != 1 group by 1, 2, 3, weight_string(a), weight_string(b), weight_string(c)",
- "OrderBy": "(0|5) ASC, (1|6) ASC, (2|7) ASC",
- "Query": "select a, b, c, d, count(*), weight_string(a), weight_string(b), weight_string(c) from `user` group by 1, 2, 3, weight_string(a), weight_string(b), weight_string(c) order by 1 asc, 2 asc, 3 asc",
- "ResultColumns": 5,
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select a, b, c, d, count(*) from user group by 1, 2, 3 order by 1, 2, 3",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "random(3) AS d, sum_count_star(4) AS count(*)",
- "GroupBy": "(0|5), (1|6), (2|7)",
- "ResultColumns": 5,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a, b, c, d, count(*), weight_string(a), weight_string(b), weight_string(c) from `user` where 1 != 1 group by a, weight_string(a), b, weight_string(b), c, weight_string(c)",
- "OrderBy": "(0|5) ASC, (1|6) ASC, (2|7) ASC",
- "Query": "select a, b, c, d, count(*), weight_string(a), weight_string(b), weight_string(c) from `user` group by a, weight_string(a), b, weight_string(b), c, weight_string(c) order by a asc, b asc, c asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# scatter aggregate with named order by columns
-"select a, b, c, d, count(*) from user group by 1, 2, 3 order by a, b, c"
-{
- "QueryType": "SELECT",
- "Original": "select a, b, c, d, count(*) from user group by 1, 2, 3 order by a, b, c",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count(4) AS count",
- "GroupBy": "0, 1, 2",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a, b, c, d, count(*), weight_string(a), weight_string(b), weight_string(c) from `user` where 1 != 1 group by 1, 2, 3, weight_string(a), weight_string(b), weight_string(c)",
- "OrderBy": "(0|5) ASC, (1|6) ASC, (2|7) ASC",
- "Query": "select a, b, c, d, count(*), weight_string(a), weight_string(b), weight_string(c) from `user` group by 1, 2, 3, weight_string(a), weight_string(b), weight_string(c) order by a asc, b asc, c asc",
- "ResultColumns": 5,
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select a, b, c, d, count(*) from user group by 1, 2, 3 order by a, b, c",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "random(3) AS d, sum_count_star(4) AS count(*)",
- "GroupBy": "(0|5), (1|6), (2|7)",
- "ResultColumns": 5,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a, b, c, d, count(*), weight_string(a), weight_string(b), weight_string(c) from `user` where 1 != 1 group by a, weight_string(a), b, weight_string(b), c, weight_string(c)",
- "OrderBy": "(0|5) ASC, (1|6) ASC, (2|7) ASC",
- "Query": "select a, b, c, d, count(*), weight_string(a), weight_string(b), weight_string(c) from `user` group by a, weight_string(a), b, weight_string(b), c, weight_string(c) order by a asc, b asc, c asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# scatter aggregate with jumbled order by columns
-"select a, b, c, d, count(*) from user group by 1, 2, 3, 4 order by d, b, a, c"
-{
- "QueryType": "SELECT",
- "Original": "select a, b, c, d, count(*) from user group by 1, 2, 3, 4 order by d, b, a, c",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count(4) AS count",
- "GroupBy": "0, 1, 2, 3",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a, b, c, d, count(*), weight_string(d), weight_string(b), weight_string(a), weight_string(c) from `user` where 1 != 1 group by 1, 2, 3, 4, weight_string(d), weight_string(b), weight_string(a), weight_string(c)",
- "OrderBy": "(3|5) ASC, (1|6) ASC, (0|7) ASC, (2|8) ASC",
- "Query": "select a, b, c, d, count(*), weight_string(d), weight_string(b), weight_string(a), weight_string(c) from `user` group by 1, 2, 3, 4, weight_string(d), weight_string(b), weight_string(a), weight_string(c) order by d asc, b asc, a asc, c asc",
- "ResultColumns": 5,
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select a, b, c, d, count(*) from user group by 1, 2, 3, 4 order by d, b, a, c",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count_star(4) AS count(*)",
- "GroupBy": "(3|8), (1|6), (0|5), (2|7)",
- "ResultColumns": 5,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a, b, c, d, count(*), weight_string(a), weight_string(b), weight_string(c), weight_string(d) from `user` where 1 != 1 group by a, weight_string(a), b, weight_string(b), c, weight_string(c), d, weight_string(d)",
- "OrderBy": "(3|8) ASC, (1|6) ASC, (0|5) ASC, (2|7) ASC",
- "Query": "select a, b, c, d, count(*), weight_string(a), weight_string(b), weight_string(c), weight_string(d) from `user` group by a, weight_string(a), b, weight_string(b), c, weight_string(c), d, weight_string(d) order by d asc, b asc, a asc, c asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# scatter aggregate with jumbled group by and order by columns
-"select a, b, c, d, count(*) from user group by 3, 2, 1, 4 order by d, b, a, c"
-{
- "QueryType": "SELECT",
- "Original": "select a, b, c, d, count(*) from user group by 3, 2, 1, 4 order by d, b, a, c",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count(4) AS count",
- "GroupBy": "2, 1, 0, 3",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a, b, c, d, count(*), weight_string(d), weight_string(b), weight_string(a), weight_string(c) from `user` where 1 != 1 group by 3, 2, 1, 4, weight_string(d), weight_string(b), weight_string(a), weight_string(c)",
- "OrderBy": "(3|5) ASC, (1|6) ASC, (0|7) ASC, (2|8) ASC",
- "Query": "select a, b, c, d, count(*), weight_string(d), weight_string(b), weight_string(a), weight_string(c) from `user` group by 3, 2, 1, 4, weight_string(d), weight_string(b), weight_string(a), weight_string(c) order by d asc, b asc, a asc, c asc",
- "ResultColumns": 5,
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select a, b, c, d, count(*) from user group by 3, 2, 1, 4 order by d, b, a, c",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count_star(4) AS count(*)",
- "GroupBy": "(3|8), (1|6), (0|5), (2|7)",
- "ResultColumns": 5,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a, b, c, d, count(*), weight_string(a), weight_string(b), weight_string(c), weight_string(d) from `user` where 1 != 1 group by a, weight_string(a), b, weight_string(b), c, weight_string(c), d, weight_string(d)",
- "OrderBy": "(3|8) ASC, (1|6) ASC, (0|5) ASC, (2|7) ASC",
- "Query": "select a, b, c, d, count(*), weight_string(a), weight_string(b), weight_string(c), weight_string(d) from `user` group by a, weight_string(a), b, weight_string(b), c, weight_string(c), d, weight_string(d) order by d asc, b asc, a asc, c asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# scatter aggregate with some descending order by cols
-"select a, b, c, count(*) from user group by 3, 2, 1 order by 1 desc, 3 desc, b"
-{
- "QueryType": "SELECT",
- "Original": "select a, b, c, count(*) from user group by 3, 2, 1 order by 1 desc, 3 desc, b",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count(3) AS count",
- "GroupBy": "2, 1, 0",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a, b, c, count(*), weight_string(a), weight_string(c), weight_string(b) from `user` where 1 != 1 group by 3, 2, 1, weight_string(a), weight_string(c), weight_string(b)",
- "OrderBy": "(0|4) DESC, (2|5) DESC, (1|6) ASC",
- "Query": "select a, b, c, count(*), weight_string(a), weight_string(c), weight_string(b) from `user` group by 3, 2, 1, weight_string(a), weight_string(c), weight_string(b) order by 1 desc, 3 desc, b asc",
- "ResultColumns": 4,
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select a, b, c, count(*) from user group by 3, 2, 1 order by 1 desc, 3 desc, b",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count_star(3) AS count(*)",
- "GroupBy": "(0|4), (2|6), (1|5)",
- "ResultColumns": 4,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a, b, c, count(*), weight_string(a), weight_string(b), weight_string(c) from `user` where 1 != 1 group by a, weight_string(a), b, weight_string(b), c, weight_string(c)",
- "OrderBy": "(0|4) DESC, (2|6) DESC, (1|5) ASC",
- "Query": "select a, b, c, count(*), weight_string(a), weight_string(b), weight_string(c) from `user` group by a, weight_string(a), b, weight_string(b), c, weight_string(c) order by a desc, c desc, b asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# invalid order by column numner for scatter
-"select col, count(*) from user group by col order by 5 limit 10"
-"Unknown column '5' in 'order clause'"
-Gen4 plan same as above
-
-# aggregate with limit
-"select col, count(*) from user group by col limit 10"
-{
- "QueryType": "SELECT",
- "Original": "select col, count(*) from user group by col limit 10",
- "Instructions": {
- "OperatorType": "Limit",
- "Count": "INT64(10)",
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count(1) AS count",
- "GroupBy": "0",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col, count(*) from `user` where 1 != 1 group by col",
- "OrderBy": "0 ASC",
- "Query": "select col, count(*) from `user` group by col order by col asc limit :__upper_limit",
- "Table": "`user`"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select col, count(*) from user group by col limit 10",
- "Instructions": {
- "OperatorType": "Limit",
- "Count": "INT64(10)",
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count_star(1) AS count(*)",
- "GroupBy": "0",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col, count(*) from `user` where 1 != 1 group by col",
- "OrderBy": "0 ASC",
- "Query": "select col, count(*) from `user` group by col order by col asc limit :__upper_limit",
- "Table": "`user`"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Group by with collate operator
-"select user.col1 as a from user where user.id = 5 group by a collate utf8_general_ci"
-{
- "QueryType": "SELECT",
- "Original": "select user.col1 as a from user where user.id = 5 group by a collate utf8_general_ci",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col1 as a from `user` where 1 != 1 group by a collate utf8_general_ci",
- "Query": "select `user`.col1 as a from `user` where `user`.id = 5 group by a collate utf8_general_ci",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.col1 as a from user where user.id = 5 group by a collate utf8_general_ci",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col1 as a from `user` where 1 != 1 group by a collate utf8_general_ci",
- "Query": "select `user`.col1 as a from `user` where `user`.id = 5 group by a collate utf8_general_ci",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# routing rules for aggregates
-"select id, count(*) from route2 group by id"
-{
- "QueryType": "SELECT",
- "Original": "select id, count(*) from route2 group by id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select id, count(*) from unsharded as route2 where 1 != 1 group by id",
- "Query": "select id, count(*) from unsharded as route2 group by id",
- "Table": "unsharded"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id, count(*) from route2 group by id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select id, count(*) from unsharded as route2 where 1 != 1 group by id",
- "Query": "select id, count(*) from unsharded as route2 group by id",
- "Table": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-
-# order by on a reference table
-"select col from ref order by col"
-{
- "QueryType": "SELECT",
- "Original": "select col from ref order by col",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from ref where 1 != 1",
- "Query": "select col from ref order by col asc",
- "Table": "ref"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select col from ref order by col",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from ref where 1 != 1",
- "Query": "select col from ref order by col asc",
- "Table": "ref"
- },
- "TablesUsed": [
- "user.ref"
- ]
-}
-
-# distinct and aggregate functions missing group by
-"select distinct a, count(*) from user"
-{
- "QueryType": "SELECT",
- "Original": "select distinct a, count(*) from user",
- "Instructions": {
- "OperatorType": "Distinct",
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count(1) AS count",
- "GroupBy": "0",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a, count(*), weight_string(a) from `user` where 1 != 1",
- "OrderBy": "(0|2) ASC",
- "Query": "select a, count(*), weight_string(a) from `user` order by a asc",
- "ResultColumns": 2,
- "Table": "`user`"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select distinct a, count(*) from user",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "GroupBy": "0, 1",
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "random(0) AS a, sum_count_star(1) AS count(*)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a, count(*) from `user` where 1 != 1",
- "Query": "select a, count(*) from `user`",
- "Table": "`user`"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# distinct and aggregate functions
-"select distinct a, count(*) from user group by a"
-{
- "QueryType": "SELECT",
- "Original": "select distinct a, count(*) from user group by a",
- "Instructions": {
- "OperatorType": "Distinct",
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count(1) AS count",
- "GroupBy": "0, 0",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a, count(*), weight_string(a) from `user` where 1 != 1 group by a, weight_string(a)",
- "OrderBy": "(0|2) ASC, (0|2) ASC",
- "Query": "select a, count(*), weight_string(a) from `user` group by a, weight_string(a) order by a asc, a asc",
- "ResultColumns": 2,
- "Table": "`user`"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select distinct a, count(*) from user group by a",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "GroupBy": "(0|2), 1",
- "ResultColumns": 2,
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count_star(1) AS count(*)",
- "GroupBy": "(0|2)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a, count(*), weight_string(a) from `user` where 1 != 1 group by a, weight_string(a)",
- "OrderBy": "(0|2) ASC",
- "Query": "select a, count(*), weight_string(a) from `user` group by a, weight_string(a) order by a asc",
- "Table": "`user`"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Group by invalid column number (code is duplicated from symab).
-"select id from user group by 1.1"
-"column number is not an int"
-{
- "QueryType": "SELECT",
- "Original": "select id from user group by 1.1",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "random(0) AS id",
- "GroupBy": "1",
- "ResultColumns": 1,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, 1.1 from `user` where 1 != 1 group by 1.1",
- "OrderBy": "1 ASC",
- "Query": "select id, 1.1 from `user` group by 1.1 order by 1.1 asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Group by out of range column number (code is duplicated from symab).
-"select id from user group by 2"
-"Unknown column '2' in 'group statement'"
-Gen4 plan same as above
-
-# here it is safe to remove the order by on the derived table since it will not influence the output of the count(*)
-"select count(*) from (select user.col, user_extra.extra from user join user_extra on user.id = user_extra.user_id order by user_extra.extra) a"
-"unsupported: cross-shard query with aggregates"
-{
- "QueryType": "SELECT",
- "Original": "select count(*) from (select user.col, user_extra.extra from user join user_extra on user.id = user_extra.user_id order by user_extra.extra) a",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count_star(0) AS count(*)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) from (select `user`.col, user_extra.extra from `user`, user_extra where 1 != 1) as a where 1 != 1",
- "Query": "select count(*) from (select `user`.col, user_extra.extra from `user`, user_extra where `user`.id = user_extra.user_id order by user_extra.extra asc) as a",
- "Table": "`user`, user_extra"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# order by inside derived tables can be ignored
-"select col from (select user.col, user_extra.extra from user join user_extra on user.id = user_extra.user_id order by user_extra.extra) a"
-{
- "QueryType": "SELECT",
- "Original": "select col from (select user.col, user_extra.extra from user join user_extra on user.id = user_extra.user_id order by user_extra.extra) a",
- "Instructions": {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col, user_extra.extra, weight_string(user_extra.extra) from `user` join user_extra on `user`.id = user_extra.user_id where 1 != 1",
- "OrderBy": "(1|2) ASC",
- "Query": "select `user`.col, user_extra.extra, weight_string(user_extra.extra) from `user` join user_extra on `user`.id = user_extra.user_id order by user_extra.extra asc",
- "ResultColumns": 2,
- "Table": "`user`, user_extra"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select col from (select user.col, user_extra.extra from user join user_extra on user.id = user_extra.user_id order by user_extra.extra) a",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from (select `user`.col, user_extra.extra from `user`, user_extra where 1 != 1) as a where 1 != 1",
- "Query": "select col from (select `user`.col, user_extra.extra from `user`, user_extra where `user`.id = user_extra.user_id order by user_extra.extra asc) as a",
- "Table": "`user`, user_extra"
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-
-# here we keep the order since the column is visible on the outside, and used by the orderedAggregate
-"select col, count(*) from (select user.col, user_extra.extra from user join user_extra on user.id = user_extra.user_id order by user_extra.extra) a group by col"
-"unsupported: cross-shard query with aggregates"
-{
- "QueryType": "SELECT",
- "Original": "select col, count(*) from (select user.col, user_extra.extra from user join user_extra on user.id = user_extra.user_id order by user_extra.extra) a group by col",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count_star(1) AS count(*)",
- "GroupBy": "0",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col, count(*) from (select `user`.col, user_extra.extra from `user`, user_extra where 1 != 1) as a where 1 != 1 group by col",
- "OrderBy": "0 ASC",
- "Query": "select col, count(*) from (select `user`.col, user_extra.extra from `user`, user_extra where `user`.id = user_extra.user_id order by user_extra.extra asc) as a group by col order by col asc",
- "Table": "`user`, user_extra"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# optimize group by when using distinct with no aggregation
-"select distinct col1, col2 from user group by col1, col2"
-{
- "QueryType": "SELECT",
- "Original": "select distinct col1, col2 from user group by col1, col2",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "GroupBy": "0, 1, 0, 1",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col1, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, col2, weight_string(col1), weight_string(col2)",
- "OrderBy": "(0|2) ASC, (1|3) ASC, (0|2) ASC, (1|3) ASC",
- "Query": "select distinct col1, col2, weight_string(col1), weight_string(col2) from `user` group by col1, col2, weight_string(col1), weight_string(col2) order by col1 asc, col2 asc, col1 asc, col2 asc",
- "ResultColumns": 2,
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select distinct col1, col2 from user group by col1, col2",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "GroupBy": "(0|2), (1|3)",
- "ResultColumns": 2,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col1, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, col2",
- "OrderBy": "(0|2) ASC, (1|3) ASC",
- "Query": "select distinct col1, col2, weight_string(col1), weight_string(col2) from `user` group by col1, col2 order by col1 asc, col2 asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# do not use distinct when using only aggregates and no group by
-"select distinct count(*) from user"
-{
- "QueryType": "SELECT",
- "Original": "select distinct count(*) from user",
- "Instructions": {
- "OperatorType": "Distinct",
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count(0) AS count",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) from `user` where 1 != 1",
- "Query": "select count(*) from `user`",
- "Table": "`user`"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select distinct count(*) from user",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count_star(0) AS count(*)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) from `user` where 1 != 1",
- "Query": "select count(*) from `user`",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Grouping on join
-"select user.a from user join user_extra group by user.a"
-"unsupported: cross-shard query with aggregates"
-{
- "QueryType": "SELECT",
- "Original": "select user.a from user join user_extra group by user.a",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "GroupBy": "(0|1)",
- "ResultColumns": 1,
- "Inputs": [
- {
- "OperatorType": "Projection",
- "Expressions": [
- "[COLUMN 0] as a",
- "[COLUMN 1]"
- ],
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.a, weight_string(`user`.a) from `user` where 1 != 1 group by `user`.a, weight_string(`user`.a)",
- "OrderBy": "(0|1) ASC",
- "Query": "select `user`.a, weight_string(`user`.a) from `user` group by `user`.a, weight_string(`user`.a) order by `user`.a asc",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra",
- "Table": "user_extra"
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# Cannot have more than one aggr(distinct...
-"select count(distinct a), count(distinct b) from user"
-"unsupported: only one distinct aggregation allowed in a select: count(distinct b)"
-Gen4 plan same as above
-
-# multiple distinct functions with grouping.
-"select col1, count(distinct col2), sum(distinct col2) from user group by col1"
-"unsupported: only one distinct aggregation allowed in a select: sum(distinct col2)"
-{
- "QueryType": "SELECT",
- "Original": "select col1, count(distinct col2), sum(distinct col2) from user group by col1",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "count_distinct(1|4) AS count(distinct col2), sum_distinct(2|4) AS sum(distinct col2)",
- "GroupBy": "(0|3)",
- "ResultColumns": 3,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col1, col2, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, weight_string(col1), col2, weight_string(col2)",
- "OrderBy": "(0|3) ASC, (1|4) ASC",
- "Query": "select col1, col2, col2, weight_string(col1), weight_string(col2) from `user` group by col1, weight_string(col1), col2, weight_string(col2) order by col1 asc, col2 asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# aggregate query with order by aggregate column along with NULL
-"select col, count(*) k from user group by col order by null, k"
-"unsupported: in scatter query: complex order by expression: null"
-{
- "QueryType": "SELECT",
- "Original": "select col, count(*) k from user group by col order by null, k",
- "Instructions": {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "1 ASC",
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count_star(1) AS k",
- "GroupBy": "0",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col, count(*) as k from `user` where 1 != 1 group by col",
- "OrderBy": "0 ASC",
- "Query": "select col, count(*) as k from `user` group by col order by col asc",
- "Table": "`user`"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# aggregate query with order by NULL
-"select col, count(*) k from user group by col order by null"
-{
- "QueryType": "SELECT",
- "Original": "select col, count(*) k from user group by col order by null",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count(1) AS count",
- "GroupBy": "0",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col, count(*) as k from `user` where 1 != 1 group by col",
- "OrderBy": "0 ASC",
- "Query": "select col, count(*) as k from `user` group by col order by col asc",
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select col, count(*) k from user group by col order by null",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count_star(1) AS k",
- "GroupBy": "0",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col, count(*) as k from `user` where 1 != 1 group by col",
- "OrderBy": "0 ASC",
- "Query": "select col, count(*) as k from `user` group by col order by col asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# join query on sharding key with group by a unique vindex with having clause.
-"select user.id, count(*) c from user, user_extra where user.id = user_extra.user_id group by user.id having max(user.col) \u003e 10"
-"unsupported: cross-shard query with aggregates"
-{
- "QueryType": "SELECT",
- "Original": "select user.id, count(*) c from user, user_extra where user.id = user_extra.user_id group by user.id having max(user.col) \u003e 10",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id, count(*) as c from `user`, user_extra where 1 != 1 group by `user`.id",
- "Query": "select `user`.id, count(*) as c from `user`, user_extra where `user`.id = user_extra.user_id group by `user`.id having max(`user`.col) \u003e 10",
- "Table": "`user`, user_extra"
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# correlated subquery on sharding key with group by a unique vindex with having clause.
-"select count(*) from user where exists (select 1 from user_extra where user_id = user.id group by user_id having max(col) \u003e 10)"
-{
- "QueryType": "SELECT",
- "Original": "select count(*) from user where exists (select 1 from user_extra where user_id = user.id group by user_id having max(col) \u003e 10)",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count(0) AS count",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) from `user` where 1 != 1",
- "Query": "select count(*) from `user` where exists (select 1 from user_extra where user_id = `user`.id group by user_id having max(col) \u003e 10 limit 1)",
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select count(*) from user where exists (select 1 from user_extra where user_id = user.id group by user_id having max(col) \u003e 10)",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count_star(0) AS count(*)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) from `user` where 1 != 1",
- "Query": "select count(*) from `user` where exists (select 1 from user_extra where user_id = `user`.id group by user_id having max(col) \u003e 10 limit 1)",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# aggregation filtering by having on a route
-"select id from user group by id having count(id) = 10"
-{
- "QueryType": "SELECT",
- "Original": "select id from user group by id having count(id) = 10",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1 group by id",
- "Query": "select id from `user` group by id having count(id) = 10",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user group by id having count(id) = 10",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1 group by id",
- "Query": "select id from `user` group by id having count(id) = 10",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# weight_string addition to group by
-"select lower(textcol1) as v, count(*) from user group by v"
-{
- "QueryType": "SELECT",
- "Original": "select lower(textcol1) as v, count(*) from user group by v",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count(1) AS count",
- "GroupBy": "0",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select lower(textcol1) as v, count(*), weight_string(lower(textcol1)) from `user` where 1 != 1 group by v, weight_string(lower(textcol1))",
- "OrderBy": "(0|2) ASC",
- "Query": "select lower(textcol1) as v, count(*), weight_string(lower(textcol1)) from `user` group by v, weight_string(lower(textcol1)) order by v asc",
- "ResultColumns": 2,
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select lower(textcol1) as v, count(*) from user group by v",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count_star(1) AS count(*)",
- "GroupBy": "(0|2)",
- "ResultColumns": 2,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select lower(textcol1) as v, count(*), weight_string(lower(textcol1)) from `user` where 1 != 1 group by v, weight_string(lower(textcol1))",
- "OrderBy": "(0|2) ASC",
- "Query": "select lower(textcol1) as v, count(*), weight_string(lower(textcol1)) from `user` group by v, weight_string(lower(textcol1)) order by v asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# weight_string addition to group by when also there in order by
-"select char_length(texcol1) as a, count(*) from user group by a order by a"
-{
- "QueryType": "SELECT",
- "Original": "select char_length(texcol1) as a, count(*) from user group by a order by a",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count(1) AS count",
- "GroupBy": "0",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select char_length(texcol1) as a, count(*), weight_string(char_length(texcol1)) from `user` where 1 != 1 group by a, weight_string(char_length(texcol1))",
- "OrderBy": "(0|2) ASC",
- "Query": "select char_length(texcol1) as a, count(*), weight_string(char_length(texcol1)) from `user` group by a, weight_string(char_length(texcol1)) order by a asc",
- "ResultColumns": 2,
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select char_length(texcol1) as a, count(*) from user group by a order by a",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count_star(1) AS count(*)",
- "GroupBy": "(0|2)",
- "ResultColumns": 2,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select char_length(texcol1) as a, count(*), weight_string(char_length(texcol1)) from `user` where 1 != 1 group by a, weight_string(char_length(texcol1))",
- "OrderBy": "(0|2) ASC",
- "Query": "select char_length(texcol1) as a, count(*), weight_string(char_length(texcol1)) from `user` group by a, weight_string(char_length(texcol1)) order by a asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# order by inside and outside parenthesis select
-"(select id from user order by 1 desc) order by 1 asc limit 2"
-{
- "QueryType": "SELECT",
- "Original": "(select id from user order by 1 desc) order by 1 asc limit 2",
- "Instructions": {
- "OperatorType": "Limit",
- "Count": "INT64(2)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1",
- "OrderBy": "(0|1) ASC",
- "Query": "select id, weight_string(id) from `user` order by 1 asc limit :__upper_limit",
- "ResultColumns": 1,
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "(select id from user order by 1 desc) order by 1 asc limit 2",
- "Instructions": {
- "OperatorType": "Limit",
- "Count": "INT64(2)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1",
- "OrderBy": "(0|1) ASC",
- "Query": "select id, weight_string(id) from `user` order by id asc limit :__upper_limit",
- "ResultColumns": 1,
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# correlated subquery in exists clause with an ordering
-"select col, id from user where exists(select user_id from user_extra where user_id = 3 and user_id \u003c user.id) order by id"
-"unsupported: cross-shard correlated subquery"
-{
- "QueryType": "SELECT",
- "Original": "select col, id from user where exists(select user_id from user_extra where user_id = 3 and user_id \u003c user.id) order by id",
- "Instructions": {
- "OperatorType": "SemiJoin",
- "JoinVars": {
- "user_id": 0
- },
- "ProjectedIndexes": "-2,-1",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id, col, weight_string(id) from `user` where 1 != 1",
- "OrderBy": "(0|2) ASC",
- "Query": "select `user`.id, col, weight_string(id) from `user` order by id asc",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra where user_id = 3 and user_id \u003c :user_id",
- "Table": "user_extra",
- "Values": [
- "INT64(3)"
- ],
- "Vindex": "user_index"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# Column and Literal equality filter on scatter aggregates
-"select count(*) a from user having a = 10"
-"unsupported: filtering on results of aggregates"
-{
- "QueryType": "SELECT",
- "Original": "select count(*) a from user having a = 10",
- "Instructions": {
- "OperatorType": "Filter",
- "Predicate": ":0 = 10",
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count_star(0) AS a",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) as a from `user` where 1 != 1",
- "Query": "select count(*) as a from `user`",
- "Table": "`user`"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Equality filtering with column and string literal on scatter aggregates
-"select count(*) a from user having a = '1'"
-"unsupported: filtering on results of aggregates"
-{
- "QueryType": "SELECT",
- "Original": "select count(*) a from user having a = '1'",
- "Instructions": {
- "OperatorType": "Filter",
- "Predicate": ":0 = '1'",
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count_star(0) AS a",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) as a from `user` where 1 != 1",
- "Query": "select count(*) as a from `user`",
- "Table": "`user`"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Column and Literal not equal filter on scatter aggregates
-"select count(*) a from user having a != 10"
-"unsupported: filtering on results of aggregates"
-{
- "QueryType": "SELECT",
- "Original": "select count(*) a from user having a != 10",
- "Instructions": {
- "OperatorType": "Filter",
- "Predicate": ":0 != 10",
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count_star(0) AS a",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) as a from `user` where 1 != 1",
- "Query": "select count(*) as a from `user`",
- "Table": "`user`"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Not equal filter with column and string literal on scatter aggregates
-"select count(*) a from user having a != '1'"
-"unsupported: filtering on results of aggregates"
-{
- "QueryType": "SELECT",
- "Original": "select count(*) a from user having a != '1'",
- "Instructions": {
- "OperatorType": "Filter",
- "Predicate": ":0 != '1'",
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count_star(0) AS a",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) as a from `user` where 1 != 1",
- "Query": "select count(*) as a from `user`",
- "Table": "`user`"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Greater than filter on scatter aggregates
-"select count(*) a from user having a \u003e 10"
-"unsupported: filtering on results of aggregates"
-{
- "QueryType": "SELECT",
- "Original": "select count(*) a from user having a \u003e 10",
- "Instructions": {
- "OperatorType": "Filter",
- "Predicate": ":0 \u003e 10",
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count_star(0) AS a",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) as a from `user` where 1 != 1",
- "Query": "select count(*) as a from `user`",
- "Table": "`user`"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Greater Equal filter on scatter aggregates
-"select count(*) a from user having a \u003e= 10"
-"unsupported: filtering on results of aggregates"
-{
- "QueryType": "SELECT",
- "Original": "select count(*) a from user having a \u003e= 10",
- "Instructions": {
- "OperatorType": "Filter",
- "Predicate": ":0 \u003e= 10",
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count_star(0) AS a",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) as a from `user` where 1 != 1",
- "Query": "select count(*) as a from `user`",
- "Table": "`user`"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Less than filter on scatter aggregates
-"select count(*) a from user having a \u003c 10"
-"unsupported: filtering on results of aggregates"
-{
- "QueryType": "SELECT",
- "Original": "select count(*) a from user having a \u003c 10",
- "Instructions": {
- "OperatorType": "Filter",
- "Predicate": ":0 \u003c 10",
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count_star(0) AS a",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) as a from `user` where 1 != 1",
- "Query": "select count(*) as a from `user`",
- "Table": "`user`"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Less Equal filter on scatter aggregates
-"select count(*) a from user having a \u003c= 10"
-"unsupported: filtering on results of aggregates"
-{
- "QueryType": "SELECT",
- "Original": "select count(*) a from user having a \u003c= 10",
- "Instructions": {
- "OperatorType": "Filter",
- "Predicate": ":0 \u003c= 10",
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count_star(0) AS a",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) as a from `user` where 1 != 1",
- "Query": "select count(*) as a from `user`",
- "Table": "`user`"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Less Equal filter on scatter with grouping
-"select col, count(*) a from user group by col having a \u003c= 10"
-"unsupported: filtering on results of aggregates"
-{
- "QueryType": "SELECT",
- "Original": "select col, count(*) a from user group by col having a \u003c= 10",
- "Instructions": {
- "OperatorType": "Filter",
- "Predicate": ":1 \u003c= 10",
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count_star(1) AS a",
- "GroupBy": "0",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col, count(*) as a from `user` where 1 != 1 group by col",
- "OrderBy": "0 ASC",
- "Query": "select col, count(*) as a from `user` group by col order by col asc",
- "Table": "`user`"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# We should be able to find grouping keys on ordered aggregates
-"select count(*) as a, val1 from user group by val1 having a = 1.00"
-"unsupported: filtering on results of aggregates"
-{
- "QueryType": "SELECT",
- "Original": "select count(*) as a, val1 from user group by val1 having a = 1.00",
- "Instructions": {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0,
- 1
- ],
- "Inputs": [
- {
- "OperatorType": "Filter",
- "Predicate": ":0 = 1.00",
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count_star(0) AS a",
- "GroupBy": "(1|2)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) as a, val1, weight_string(val1) from `user` where 1 != 1 group by val1, weight_string(val1)",
- "OrderBy": "(1|2) ASC",
- "Query": "select count(*) as a, val1, weight_string(val1) from `user` group by val1, weight_string(val1) order by val1 asc",
- "Table": "`user`"
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# distinct on text column with collation
-"select col, count(distinct textcol1) from user group by col"
-{
- "QueryType": "SELECT",
- "Original": "select col, count(distinct textcol1) from user group by col",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "count_distinct_count(1) AS count(distinct textcol1)",
- "GroupBy": "0",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col, textcol1, weight_string(textcol1) from `user` where 1 != 1 group by col, textcol1, weight_string(textcol1)",
- "OrderBy": "0 ASC, (1|2) ASC",
- "Query": "select col, textcol1, weight_string(textcol1) from `user` group by col, textcol1, weight_string(textcol1) order by col asc, textcol1 asc",
- "ResultColumns": 2,
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select col, count(distinct textcol1) from user group by col",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "count_distinct(1 COLLATE latin1_swedish_ci) AS count(distinct textcol1)",
- "GroupBy": "0",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col, textcol1 from `user` where 1 != 1 group by col, textcol1",
- "OrderBy": "0 ASC, 1 ASC COLLATE latin1_swedish_ci",
- "Query": "select col, textcol1 from `user` group by col, textcol1 order by col asc, textcol1 asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# aggregation filtering by having on a route with no group by with non-unique vindex filter
-"select 1 from user having count(id) = 10 and name = 'a'"
-{
- "QueryType": "SELECT",
- "Original": "select 1 from user having count(id) = 10 and name = 'a'",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` where 1 != 1",
- "Query": "select 1 from `user` having count(id) = 10 and `name` = 'a'",
- "Table": "`user`",
- "Values": [
- "VARCHAR(\"a\")"
- ],
- "Vindex": "name_user_map"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select 1 from user having count(id) = 10 and name = 'a'",
- "Instructions": {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "Filter",
- "Predicate": ":1 = 10",
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "random(0) AS 1, sum_count(1) AS count(id)",
- "Inputs": [
- {
- "OperatorType": "VindexLookup",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Values": [
- "VARCHAR(\"a\")"
- ],
- "Vindex": "name_user_map",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
- "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
- "Table": "name_user_vdx",
- "Values": [
- ":name"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "ByDestination",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1, count(id) from `user` where 1 != 1",
- "Query": "select 1, count(id) from `user` where `name` = 'a'",
- "Table": "`user`"
- }
- ]
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Aggregates and joins
-"select count(*) from user join user_extra"
-"unsupported: cross-shard query with aggregates"
-{
- "QueryType": "SELECT",
- "Original": "select count(*) from user join user_extra",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count_star(0) AS count(*)",
- "Inputs": [
- {
- "OperatorType": "Projection",
- "Expressions": [
- "[COLUMN 0] * [COLUMN 1] as count(*)"
- ],
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,R:1",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) from `user` where 1 != 1",
- "Query": "select count(*) from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1, count(*) from user_extra where 1 != 1 group by 1",
- "Query": "select 1, count(*) from user_extra group by 1",
- "Table": "user_extra"
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# aggregation filtering by having on a route with no group by
-"select 1 from user having count(id) = 10"
-{
- "QueryType": "SELECT",
- "Original": "select 1 from user having count(id) = 10",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` where 1 != 1",
- "Query": "select 1 from `user` having count(id) = 10",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select 1 from user having count(id) = 10",
- "Instructions": {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "Filter",
- "Predicate": ":1 = 10",
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "random(0) AS 1, sum_count(1) AS count(id)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1, count(id) from `user` where 1 != 1",
- "Query": "select 1, count(id) from `user`",
- "Table": "`user`"
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Aggregate on join
-"select user.a, count(*) from user join user_extra group by user.a"
-"unsupported: cross-shard query with aggregates"
-{
- "QueryType": "SELECT",
- "Original": "select user.a, count(*) from user join user_extra group by user.a",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count_star(1) AS count(*)",
- "GroupBy": "(0|2)",
- "ResultColumns": 2,
- "Inputs": [
- {
- "OperatorType": "Projection",
- "Expressions": [
- "[COLUMN 0] as a",
- "[COLUMN 2] * [COLUMN 3] as count(*)",
- "[COLUMN 1]"
- ],
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:1,L:2,L:0,R:1",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*), `user`.a, weight_string(`user`.a) from `user` where 1 != 1 group by `user`.a, weight_string(`user`.a)",
- "OrderBy": "(1|2) ASC",
- "Query": "select count(*), `user`.a, weight_string(`user`.a) from `user` group by `user`.a, weight_string(`user`.a) order by `user`.a asc",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1, count(*) from user_extra where 1 != 1 group by 1",
- "Query": "select 1, count(*) from user_extra group by 1",
- "Table": "user_extra"
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# Aggregate on other table in join
-"select user.a, count(user_extra.a) from user join user_extra group by user.a"
-"unsupported: cross-shard query with aggregates"
-{
- "QueryType": "SELECT",
- "Original": "select user.a, count(user_extra.a) from user join user_extra group by user.a",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count(1) AS count(user_extra.a)",
- "GroupBy": "(0|2)",
- "ResultColumns": 2,
- "Inputs": [
- {
- "OperatorType": "Projection",
- "Expressions": [
- "[COLUMN 0] as a",
- "[COLUMN 2] * [COLUMN 3] as count(user_extra.a)",
- "[COLUMN 1]"
- ],
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:1,L:2,L:0,R:1",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*), `user`.a, weight_string(`user`.a) from `user` where 1 != 1 group by `user`.a, weight_string(`user`.a)",
- "OrderBy": "(1|2) ASC",
- "Query": "select count(*), `user`.a, weight_string(`user`.a) from `user` group by `user`.a, weight_string(`user`.a) order by `user`.a asc",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1, count(user_extra.a) from user_extra where 1 != 1 group by 1",
- "Query": "select 1, count(user_extra.a) from user_extra group by 1",
- "Table": "user_extra"
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# aggregation spread out across three routes
-"select count(u.textcol1), count(ue.foo), us.bar from user u join user_extra ue on u.foo = ue.bar join unsharded us on ue.bar = us.baz group by us.bar"
-"unsupported: cross-shard query with aggregates"
-{
- "QueryType": "SELECT",
- "Original": "select count(u.textcol1), count(ue.foo), us.bar from user u join user_extra ue on u.foo = ue.bar join unsharded us on ue.bar = us.baz group by us.bar",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count(0) AS count(u.textcol1), sum_count(1) AS count(ue.foo)",
- "GroupBy": "(2|3)",
- "ResultColumns": 3,
- "Inputs": [
- {
- "OperatorType": "Projection",
- "Expressions": [
- "([COLUMN 2] * [COLUMN 3]) * [COLUMN 4] as count(u.textcol1)",
- "([COLUMN 5] * [COLUMN 6]) * [COLUMN 7] as count(ue.foo)",
- "[COLUMN 0] as bar",
- "[COLUMN 1]"
- ],
- "Inputs": [
- {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "(0|1) ASC",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0,R:1,L:1,R:2,R:3,L:2,R:4,R:5",
- "JoinVars": {
- "u_foo": 0
- },
- "TableName": "`user`_user_extra_unsharded",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u.foo, count(u.textcol1), count(*), weight_string(u.foo) from `user` as u where 1 != 1 group by u.foo, weight_string(u.foo)",
- "Query": "select u.foo, count(u.textcol1), count(*), weight_string(u.foo) from `user` as u group by u.foo, weight_string(u.foo)",
- "Table": "`user`"
- },
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:1,R:2,L:1,R:0,L:2,R:0",
- "JoinVars": {
- "ue_bar": 0
- },
- "TableName": "user_extra_unsharded",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select ue.bar, count(*), count(ue.foo), weight_string(ue.bar) from user_extra as ue where 1 != 1 group by ue.bar, weight_string(ue.bar)",
- "Query": "select ue.bar, count(*), count(ue.foo), weight_string(ue.bar) from user_extra as ue where ue.bar = :u_foo group by ue.bar, weight_string(ue.bar)",
- "Table": "user_extra"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select count(*), us.bar, weight_string(us.bar) from unsharded as us where 1 != 1 group by us.bar, weight_string(us.bar)",
- "Query": "select count(*), us.bar, weight_string(us.bar) from unsharded as us where us.baz = :ue_bar group by us.bar, weight_string(us.bar)",
- "Table": "unsharded"
- }
- ]
- }
- ]
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded",
- "user.user",
- "user.user_extra"
- ]
-}
-
-# using two distinct columns - min with distinct vindex, sum with distinct without vindex
-"select col1, min(distinct id), sum(distinct col3) from user group by col1"
-{
- "QueryType": "SELECT",
- "Original": "select col1, min(distinct id), sum(distinct col3) from user group by col1",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "min(1), sum_distinct_sum(2) AS sum(distinct col3)",
- "GroupBy": "0",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col1, min(distinct id), col3, weight_string(col1), weight_string(col3) from `user` where 1 != 1 group by col1, col3, weight_string(col1), weight_string(col3)",
- "OrderBy": "(0|3) ASC, (2|4) ASC",
- "Query": "select col1, min(distinct id), col3, weight_string(col1), weight_string(col3) from `user` group by col1, col3, weight_string(col1), weight_string(col3) order by col1 asc, col3 asc",
- "ResultColumns": 3,
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select col1, min(distinct id), sum(distinct col3) from user group by col1",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "min(1) AS min(distinct id), sum_distinct(2|4) AS sum(distinct col3)",
- "GroupBy": "(0|3)",
- "ResultColumns": 3,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col1, min(distinct id), col3, weight_string(col1), weight_string(col3) from `user` where 1 != 1 group by col1, weight_string(col1), col3, weight_string(col3)",
- "OrderBy": "(0|3) ASC, (2|4) ASC",
- "Query": "select col1, min(distinct id), col3, weight_string(col1), weight_string(col3) from `user` group by col1, weight_string(col1), col3, weight_string(col3) order by col1 asc, col3 asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# aggregation on top of semijoin
-"select count(*) from user where exists (select 0 from user_extra where user.apa = user_extra.bar)"
-"unsupported: cross-shard correlated subquery"
-{
- "QueryType": "SELECT",
- "Original": "select count(*) from user where exists (select 0 from user_extra where user.apa = user_extra.bar)",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count_star(0) AS count(*)",
- "Inputs": [
- {
- "OperatorType": "Projection",
- "Expressions": [
- "[COLUMN 1] as count(*)"
- ],
- "Inputs": [
- {
- "OperatorType": "SemiJoin",
- "JoinVars": {
- "user_apa": 0
- },
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.apa, count(*), weight_string(`user`.apa) from `user` where 1 != 1 group by `user`.apa, weight_string(`user`.apa)",
- "Query": "select `user`.apa, count(*), weight_string(`user`.apa) from `user` group by `user`.apa, weight_string(`user`.apa)",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra where user_extra.bar = :user_apa",
- "Table": "user_extra"
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# we have to track the order of distinct aggregation expressions
-"select val2, count(distinct val1), count(*) from user group by val2"
-{
- "QueryType": "SELECT",
- "Original": "select val2, count(distinct val1), count(*) from user group by val2",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "count_distinct_count(1) AS count(distinct val1), sum_count(2) AS count",
- "GroupBy": "0",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select val2, val1, count(*), weight_string(val2), weight_string(val1) from `user` where 1 != 1 group by val2, val1, weight_string(val2), weight_string(val1)",
- "OrderBy": "(0|3) ASC, (1|4) ASC",
- "Query": "select val2, val1, count(*), weight_string(val2), weight_string(val1) from `user` group by val2, val1, weight_string(val2), weight_string(val1) order by val2 asc, val1 asc",
- "ResultColumns": 3,
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select val2, count(distinct val1), count(*) from user group by val2",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "count_distinct(1|4) AS count(distinct val1), sum_count_star(2) AS count(*)",
- "GroupBy": "(0|3)",
- "ResultColumns": 3,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select val2, val1, count(*), weight_string(val2), weight_string(val1) from `user` where 1 != 1 group by val2, weight_string(val2), val1, weight_string(val1)",
- "OrderBy": "(0|3) ASC, (1|4) ASC",
- "Query": "select val2, val1, count(*), weight_string(val2), weight_string(val1) from `user` group by val2, weight_string(val2), val1, weight_string(val1) order by val2 asc, val1 asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# group by column alias
-"select ascii(val1) as a, count(*) from user group by a"
-{
- "QueryType": "SELECT",
- "Original": "select ascii(val1) as a, count(*) from user group by a",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count(1) AS count",
- "GroupBy": "0",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select ascii(val1) as a, count(*), weight_string(ascii(val1)) from `user` where 1 != 1 group by a, weight_string(ascii(val1))",
- "OrderBy": "(0|2) ASC",
- "Query": "select ascii(val1) as a, count(*), weight_string(ascii(val1)) from `user` group by a, weight_string(ascii(val1)) order by a asc",
- "ResultColumns": 2,
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select ascii(val1) as a, count(*) from user group by a",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count_star(1) AS count(*)",
- "GroupBy": "(0|2)",
- "ResultColumns": 2,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select ascii(val1) as a, count(*), weight_string(ascii(val1)) from `user` where 1 != 1 group by a, weight_string(ascii(val1))",
- "OrderBy": "(0|2) ASC",
- "Query": "select ascii(val1) as a, count(*), weight_string(ascii(val1)) from `user` group by a, weight_string(ascii(val1)) order by a asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# multiple distinct aggregations on the same column is allowed
-"select tcol1, count(distinct tcol2), sum(distinct tcol2) from user group by tcol1"
-"unsupported: only one distinct aggregation allowed in a select: sum(distinct tcol2)"
-{
- "QueryType": "SELECT",
- "Original": "select tcol1, count(distinct tcol2), sum(distinct tcol2) from user group by tcol1",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "count_distinct(1|4) AS count(distinct tcol2), sum_distinct(2|4) AS sum(distinct tcol2)",
- "GroupBy": "(0|3)",
- "ResultColumns": 3,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select tcol1, tcol2, tcol2, weight_string(tcol1), weight_string(tcol2) from `user` where 1 != 1 group by tcol1, weight_string(tcol1), tcol2, weight_string(tcol2)",
- "OrderBy": "(0|3) ASC, (1|4) ASC",
- "Query": "select tcol1, tcol2, tcol2, weight_string(tcol1), weight_string(tcol2) from `user` group by tcol1, weight_string(tcol1), tcol2, weight_string(tcol2) order by tcol1 asc, tcol2 asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# multiple distinct aggregations on the same column in different positions
-"select count(distinct tcol2), tcol1, count(*), sum(distinct tcol2) from user group by tcol1"
-"unsupported: only one distinct aggregation allowed in a select: sum(distinct tcol2)"
-{
- "QueryType": "SELECT",
- "Original": "select count(distinct tcol2), tcol1, count(*), sum(distinct tcol2) from user group by tcol1",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "count_distinct(0|4) AS count(distinct tcol2), sum_count_star(2) AS count(*), sum_distinct(3|4) AS sum(distinct tcol2)",
- "GroupBy": "(1|5)",
- "ResultColumns": 4,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select tcol2, tcol1, count(*), tcol2, weight_string(tcol2), weight_string(tcol1) from `user` where 1 != 1 group by tcol2, weight_string(tcol2), tcol1, weight_string(tcol1)",
- "OrderBy": "(1|5) ASC, (0|4) ASC",
- "Query": "select tcol2, tcol1, count(*), tcol2, weight_string(tcol2), weight_string(tcol1) from `user` group by tcol2, weight_string(tcol2), tcol1, weight_string(tcol1) order by tcol1 asc, tcol2 asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# distinct aggregation will 3 table join query
-"select u.textcol1, count(distinct u.val2) from user u join user u2 on u.val2 = u2.id join music m on u2.val2 = m.id group by u.textcol1"
-"unsupported: cross-shard query with aggregates"
-{
- "QueryType": "SELECT",
- "Original": "select u.textcol1, count(distinct u.val2) from user u join user u2 on u.val2 = u2.id join music m on u2.val2 = m.id group by u.textcol1",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "count_distinct(1|2) AS count(distinct u.val2)",
- "GroupBy": "0 COLLATE latin1_swedish_ci",
- "ResultColumns": 2,
- "Inputs": [
- {
- "OperatorType": "Projection",
- "Expressions": [
- "[COLUMN 0] as textcol1",
- "[COLUMN 1] as val2",
- "[COLUMN 2]"
- ],
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:2,L:3,L:5",
- "JoinVars": {
- "u2_val2": 0
- },
- "TableName": "`user`_`user`_music",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0,R:0,L:2,L:0,R:1,L:1",
- "JoinVars": {
- "u_val2": 0
- },
- "TableName": "`user`_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u.val2, weight_string(u.val2), u.textcol1 from `user` as u where 1 != 1 group by u.val2, weight_string(u.val2), u.textcol1",
- "OrderBy": "2 ASC COLLATE latin1_swedish_ci, (0|1) ASC",
- "Query": "select u.val2, weight_string(u.val2), u.textcol1 from `user` as u group by u.val2, weight_string(u.val2), u.textcol1 order by u.textcol1 asc, u.val2 asc",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u2.val2, weight_string(u2.val2) from `user` as u2 where 1 != 1 group by u2.val2, weight_string(u2.val2)",
- "Query": "select u2.val2, weight_string(u2.val2) from `user` as u2 where u2.id = :u_val2 group by u2.val2, weight_string(u2.val2)",
- "Table": "`user`",
- "Values": [
- ":u_val2"
- ],
- "Vindex": "user_index"
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from music as m where 1 != 1",
- "Query": "select 1 from music as m where m.id = :u2_val2",
- "Table": "music",
- "Values": [
- ":u2_val2"
- ],
- "Vindex": "music_user_map"
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.music",
- "user.user"
- ]
-}
-
-# interleaving grouping, aggregation and join
-"select user.col, min(user_extra.foo), user.bar, max(user_extra.bar) from user join user_extra on user.col = user_extra.bar group by user.col, user.bar"
-"unsupported: cross-shard query with aggregates"
-{
- "QueryType": "SELECT",
- "Original": "select user.col, min(user_extra.foo), user.bar, max(user_extra.bar) from user join user_extra on user.col = user_extra.bar group by user.col, user.bar",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "min(1) AS min(user_extra.foo), max(3) AS max(user_extra.bar)",
- "GroupBy": "0, (2|4)",
- "ResultColumns": 4,
- "Inputs": [
- {
- "OperatorType": "Projection",
- "Expressions": [
- "[COLUMN 0] as col",
- "[COLUMN 3] as min(user_extra.foo)",
- "[COLUMN 1] as bar",
- "[COLUMN 4] as max(user_extra.bar)",
- "[COLUMN 2]"
- ],
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1,L:2,R:1,R:2",
- "JoinVars": {
- "user_col": 0
- },
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col, `user`.bar, weight_string(`user`.bar) from `user` where 1 != 1 group by `user`.col, `user`.bar, weight_string(`user`.bar)",
- "OrderBy": "0 ASC, (1|2) ASC",
- "Query": "select `user`.col, `user`.bar, weight_string(`user`.bar) from `user` group by `user`.col, `user`.bar, weight_string(`user`.bar) order by `user`.col asc, `user`.bar asc",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1, min(user_extra.foo), max(user_extra.bar) from user_extra where 1 != 1 group by 1",
- "Query": "select 1, min(user_extra.foo), max(user_extra.bar) from user_extra where user_extra.bar = :user_col group by 1",
- "Table": "user_extra"
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# group_concat on single shards
-"select group_concat(user_id order by name), id from user group by id"
-{
- "QueryType": "SELECT",
- "Original": "select group_concat(user_id order by name), id from user group by id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select group_concat(user_id order by `name` asc), id from `user` where 1 != 1 group by id",
- "Query": "select group_concat(user_id order by `name` asc), id from `user` group by id",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select group_concat(user_id order by name), id from user group by id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select group_concat(user_id order by `name` asc), id from `user` where 1 != 1 group by id",
- "Query": "select group_concat(user_id order by `name` asc), id from `user` group by id",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-"select count(distinct user_id, name) from unsharded"
-{
- "QueryType": "SELECT",
- "Original": "select count(distinct user_id, name) from unsharded",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select count(distinct user_id, `name`) from unsharded where 1 != 1",
- "Query": "select count(distinct user_id, `name`) from unsharded",
- "Table": "unsharded"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select count(distinct user_id, name) from unsharded",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select count(distinct user_id, `name`) from unsharded where 1 != 1",
- "Query": "select count(distinct user_id, `name`) from unsharded",
- "Table": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-
-"select count(distinct user_id, name) from user"
-"unsupported: only one expression allowed inside aggregates: count(distinct user_id, `name`)"
-Gen4 error: aggregate functions take a single argument 'count(distinct user_id, `name`)'
-
-"select sum(col) from (select user.col as col, 32 from user join user_extra) t"
-"unsupported: cross-shard query with aggregates"
-{
- "QueryType": "SELECT",
- "Original": "select sum(col) from (select user.col as col, 32 from user join user_extra) t",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum(0) AS sum(col)",
- "Inputs": [
- {
- "OperatorType": "Projection",
- "Expressions": [
- "[COLUMN 2] * [COLUMN 3] as sum(col)"
- ],
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1,L:2,R:1",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col as col, 32, sum(col) from `user` where 1 != 1",
- "Query": "select `user`.col as col, 32, sum(col) from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1, count(*) from user_extra where 1 != 1 group by 1",
- "Query": "select 1, count(*) from user_extra group by 1",
- "Table": "user_extra"
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# find aggregation expression and use column offset in filter
-"select foo, count(*) from user group by foo having count(*) = 3"
-"unsupported: filtering on results of aggregates"
-{
- "QueryType": "SELECT",
- "Original": "select foo, count(*) from user group by foo having count(*) = 3",
- "Instructions": {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0,
- 1
- ],
- "Inputs": [
- {
- "OperatorType": "Filter",
- "Predicate": ":1 = 3",
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count_star(1) AS count(*)",
- "GroupBy": "(0|2)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select foo, count(*), weight_string(foo) from `user` where 1 != 1 group by foo, weight_string(foo)",
- "OrderBy": "(0|2) ASC",
- "Query": "select foo, count(*), weight_string(foo) from `user` group by foo, weight_string(foo) order by foo asc",
- "Table": "`user`"
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# find aggregation expression and use column offset in filter times two
-"select foo, sum(foo), sum(bar) from user group by foo having sum(foo)+sum(bar) = 42"
-"unsupported: filtering on results of aggregates"
-{
- "QueryType": "SELECT",
- "Original": "select foo, sum(foo), sum(bar) from user group by foo having sum(foo)+sum(bar) = 42",
- "Instructions": {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0,
- 1,
- 2
- ],
- "Inputs": [
- {
- "OperatorType": "Filter",
- "Predicate": ":1 + :2 = 42",
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum(1) AS sum(foo), sum(2) AS sum(bar)",
- "GroupBy": "(0|3)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select foo, sum(foo), sum(bar), weight_string(foo) from `user` where 1 != 1 group by foo, weight_string(foo)",
- "OrderBy": "(0|3) ASC",
- "Query": "select foo, sum(foo), sum(bar), weight_string(foo) from `user` group by foo, weight_string(foo) order by foo asc",
- "Table": "`user`"
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# find aggregation expression and use column offset in filter times three
-"select foo, sum(foo) as fooSum, sum(bar) as barSum from user group by foo having fooSum+sum(bar) = 42"
-"unsupported: filtering on results of aggregates"
-{
- "QueryType": "SELECT",
- "Original": "select foo, sum(foo) as fooSum, sum(bar) as barSum from user group by foo having fooSum+sum(bar) = 42",
- "Instructions": {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0,
- 1,
- 2
- ],
- "Inputs": [
- {
- "OperatorType": "Filter",
- "Predicate": ":1 + :2 = 42",
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum(1) AS fooSum, sum(2) AS barSum",
- "GroupBy": "(0|3)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select foo, sum(foo) as fooSum, sum(bar) as barSum, weight_string(foo) from `user` where 1 != 1 group by foo, weight_string(foo)",
- "OrderBy": "(0|3) ASC",
- "Query": "select foo, sum(foo) as fooSum, sum(bar) as barSum, weight_string(foo) from `user` group by foo, weight_string(foo) order by foo asc",
- "Table": "`user`"
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# having should be able to add new aggregation expressions in having
-"select foo from user group by foo having count(*) = 3"
-"unsupported: filtering on results of aggregates"
-{
- "QueryType": "SELECT",
- "Original": "select foo from user group by foo having count(*) = 3",
- "Instructions": {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "Filter",
- "Predicate": ":1 = 3",
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count_star(1) AS count(*)",
- "GroupBy": "(0|2)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select foo, count(*), weight_string(foo) from `user` where 1 != 1 group by foo, weight_string(foo)",
- "OrderBy": "(0|2) ASC",
- "Query": "select foo, count(*), weight_string(foo) from `user` group by foo, weight_string(foo) order by foo asc",
- "Table": "`user`"
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-"select u.id from user u join user_extra ue on ue.id = u.id group by u.id having count(u.name) = 3"
-"unsupported: cross-shard query with aggregates"
-{
- "QueryType": "SELECT",
- "Original": "select u.id from user u join user_extra ue on ue.id = u.id group by u.id having count(u.name) = 3",
- "Instructions": {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "Filter",
- "Predicate": ":1 = 3",
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count(1) AS count(u.`name`)",
- "GroupBy": "(0|2)",
- "Inputs": [
- {
- "OperatorType": "Projection",
- "Expressions": [
- "[COLUMN 0] as id",
- "[COLUMN 2] * [COLUMN 3] as count(u.`name`)",
- "[COLUMN 1]"
- ],
- "Inputs": [
- {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "(0|1) ASC",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:1,R:2,L:1,R:0",
- "JoinVars": {
- "ue_id": 0
- },
- "TableName": "user_extra_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select ue.id, count(*), weight_string(ue.id) from user_extra as ue where 1 != 1 group by ue.id, weight_string(ue.id)",
- "Query": "select ue.id, count(*), weight_string(ue.id) from user_extra as ue group by ue.id, weight_string(ue.id)",
- "Table": "user_extra"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(u.`name`), u.id, weight_string(u.id) from `user` as u where 1 != 1 group by u.id, weight_string(u.id)",
- "Query": "select count(u.`name`), u.id, weight_string(u.id) from `user` as u where u.id = :ue_id group by u.id, weight_string(u.id)",
- "Table": "`user`",
- "Values": [
- ":ue_id"
- ],
- "Vindex": "user_index"
- }
- ]
- }
- ]
- }
- ]
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-"select u.id from user u join user_extra ue on ue.user_id = u.id group by u.id having count(u.name) = 3"
-{
- "QueryType": "SELECT",
- "Original": "select u.id from user u join user_extra ue on ue.user_id = u.id group by u.id having count(u.name) = 3",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u.id from `user` as u join user_extra as ue on ue.user_id = u.id where 1 != 1 group by u.id",
- "Query": "select u.id from `user` as u join user_extra as ue on ue.user_id = u.id group by u.id having count(u.`name`) = 3",
- "Table": "`user`, user_extra"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select u.id from user u join user_extra ue on ue.user_id = u.id group by u.id having count(u.name) = 3",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u.id from `user` as u, user_extra as ue where 1 != 1 group by u.id",
- "Query": "select u.id from `user` as u, user_extra as ue where ue.user_id = u.id group by u.id having count(u.`name`) = 3",
- "Table": "`user`, user_extra"
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# only extract the aggregation once, even if used twice
-"select u.id from user u join user_extra ue on ue.id = u.id group by u.id having count(*) \u003c 3 and count(*) \u003e 5"
-"unsupported: cross-shard query with aggregates"
-{
- "QueryType": "SELECT",
- "Original": "select u.id from user u join user_extra ue on ue.id = u.id group by u.id having count(*) \u003c 3 and count(*) \u003e 5",
- "Instructions": {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "Filter",
- "Predicate": ":1 \u003c 3 and :1 \u003e 5",
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count_star(1) AS count(*)",
- "GroupBy": "(0|2)",
- "Inputs": [
- {
- "OperatorType": "Projection",
- "Expressions": [
- "[COLUMN 0] as id",
- "[COLUMN 2] * [COLUMN 3] as count(*)",
- "[COLUMN 1]"
- ],
- "Inputs": [
- {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "(0|1) ASC",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:1,R:2,L:1,R:0",
- "JoinVars": {
- "ue_id": 0
- },
- "TableName": "user_extra_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select ue.id, count(*), weight_string(ue.id) from user_extra as ue where 1 != 1 group by ue.id, weight_string(ue.id)",
- "Query": "select ue.id, count(*), weight_string(ue.id) from user_extra as ue group by ue.id, weight_string(ue.id)",
- "Table": "user_extra"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*), u.id, weight_string(u.id) from `user` as u where 1 != 1 group by u.id, weight_string(u.id)",
- "Query": "select count(*), u.id, weight_string(u.id) from `user` as u where u.id = :ue_id group by u.id, weight_string(u.id)",
- "Table": "`user`",
- "Values": [
- ":ue_id"
- ],
- "Vindex": "user_index"
- }
- ]
- }
- ]
- }
- ]
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-"select (select 1 from user u having count(ue.col) \u003e 10) from user_extra ue"
-"symbol ue.col not found in subquery"
-{
- "QueryType": "SELECT",
- "Original": "select (select 1 from user u having count(ue.col) \u003e 10) from user_extra ue",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutValue",
- "PulloutVars": [
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "Filter",
- "Predicate": ":1 \u003e 10",
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "random(0) AS 1, sum_count(1) AS count(ue.col)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1, count(ue.col) from `user` as u where 1 != 1",
- "Query": "select 1, count(ue.col) from `user` as u",
- "Table": "`user`"
- }
- ]
- }
- ]
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select :__sq1 from user_extra as ue where 1 != 1",
- "Query": "select :__sq1 from user_extra as ue",
- "Table": "user_extra"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# group by and ',' joins with condition
-"select user.col from user join user_extra on user_extra.col = user.col group by user.id"
-"unsupported: cross-shard query with aggregates"
-{
- "QueryType": "SELECT",
- "Original": "select user.col from user join user_extra on user_extra.col = user.col group by user.id",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "random(0) AS col",
- "GroupBy": "(2|1)",
- "ResultColumns": 1,
- "Inputs": [
- {
- "OperatorType": "Projection",
- "Expressions": [
- "[COLUMN 2] * [COLUMN 3] as col",
- "[COLUMN 1]",
- "[COLUMN 0] as id"
- ],
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:1,L:2,L:0,R:1",
- "JoinVars": {
- "user_col": 0
- },
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col, `user`.id, weight_string(`user`.id) from `user` where 1 != 1 group by `user`.col, `user`.id, weight_string(`user`.id)",
- "OrderBy": "(1|2) ASC",
- "Query": "select `user`.col, `user`.id, weight_string(`user`.id) from `user` group by `user`.col, `user`.id, weight_string(`user`.id) order by `user`.id asc",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1, count(*) from user_extra where 1 != 1 group by 1",
- "Query": "select 1, count(*) from user_extra where user_extra.col = :user_col group by 1",
- "Table": "user_extra"
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# scatter aggregate symtab lookup error
-"select id, b as id, count(*) from user order by id"
-"ambiguous symbol reference: id"
-{
- "QueryType": "SELECT",
- "Original": "select id, b as id, count(*) from user order by id",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "random(0) AS id, random(1) AS id, sum_count_star(2) AS count(*)",
- "ResultColumns": 3,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, b as id, count(*), weight_string(b) from `user` where 1 != 1",
- "OrderBy": "(1|3) ASC",
- "Query": "select id, b as id, count(*), weight_string(b) from `user` order by id asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# aggr and non-aggr without group by (with query does not give useful result out)
-"select id, count(*) from user"
-{
- "QueryType": "SELECT",
- "Original": "select id, count(*) from user",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count(1) AS count",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, count(*) from `user` where 1 != 1",
- "Query": "select id, count(*) from `user`",
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id, count(*) from user",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "random(0) AS id, sum_count_star(1) AS count(*)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, count(*) from `user` where 1 != 1",
- "Query": "select id, count(*) from `user`",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# group by and ',' joins
-"select user.id from user, user_extra group by id"
-"unsupported: cross-shard query with aggregates"
-{
- "QueryType": "SELECT",
- "Original": "select user.id from user, user_extra group by id",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "random(0) AS id",
- "GroupBy": "(2|1)",
- "ResultColumns": 1,
- "Inputs": [
- {
- "OperatorType": "Projection",
- "Expressions": [
- "[COLUMN 2] * [COLUMN 3] as id",
- "[COLUMN 1]",
- "[COLUMN 0] as id"
- ],
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1,L:0,R:1",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id, weight_string(id) from `user` where 1 != 1 group by id, weight_string(id)",
- "OrderBy": "(0|1) ASC",
- "Query": "select `user`.id, weight_string(id) from `user` group by id, weight_string(id) order by id asc",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1, count(*) from user_extra where 1 != 1 group by 1",
- "Query": "select 1, count(*) from user_extra group by 1",
- "Table": "user_extra"
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# count on column from LIMIT
-"select count(city) from (select phone, id, city from user where id \u003e 12 limit 10) as x"
-"unsupported: cross-shard query with aggregates"
-{
- "QueryType": "SELECT",
- "Original": "select count(city) from (select phone, id, city from user where id \u003e 12 limit 10) as x",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "count(0) AS count(city)",
- "Inputs": [
- {
- "OperatorType": "Projection",
- "Expressions": [
- "[COLUMN 2] as count(city)"
- ],
- "Inputs": [
- {
- "OperatorType": "Limit",
- "Count": "INT64(10)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select phone, id, city from `user` where 1 != 1",
- "Query": "select phone, id, city from `user` where id \u003e 12 limit :__upper_limit",
- "Table": "`user`"
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# count(*) on column from LIMIT
-"select count(*) from (select phone, id, city from user where id \u003e 12 limit 10) as x"
-"unsupported: cross-shard query with aggregates"
-{
- "QueryType": "SELECT",
- "Original": "select count(*) from (select phone, id, city from user where id \u003e 12 limit 10) as x",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "count_star(0) AS count(*)",
- "Inputs": [
- {
- "OperatorType": "Projection",
- "Expressions": [
- "[COLUMN 0] as count(*)"
- ],
- "Inputs": [
- {
- "OperatorType": "Limit",
- "Count": "INT64(10)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select phone, id, city from `user` where 1 != 1",
- "Query": "select phone, id, city from `user` where id \u003e 12 limit :__upper_limit",
- "Table": "`user`"
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# count non-null columns incoming from outer joins should work well
-"select count(col) from (select user_extra.col as col from user left join user_extra on user.id = user_extra.id limit 10) as x"
-{
- "QueryType": "SELECT",
- "Original": "select count(col) from (select user_extra.col as col from user left join user_extra on user.id = user_extra.id limit 10) as x",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "count(0) AS count(col)",
- "Inputs": [
- {
- "OperatorType": "Projection",
- "Expressions": [
- "[COLUMN 0] as count(col)"
- ],
- "Inputs": [
- {
- "OperatorType": "Limit",
- "Count": "INT64(10)",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "LeftJoin",
- "JoinColumnIndexes": "R:0",
- "JoinVars": {
- "user_id": 0
- },
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id from `user` where 1 != 1",
- "Query": "select `user`.id from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.col as col from user_extra where 1 != 1",
- "Query": "select user_extra.col as col from user_extra where user_extra.id = :user_id",
- "Table": "user_extra"
- }
- ]
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-Gen4 plan same as above
-
-# grouping on data from derived table
-"select val1, count(*) from (select id, val1 from user where val2 \u003c 4 order by val1 limit 2) as x group by val1"
-"unsupported: cross-shard query with aggregates"
-{
- "QueryType": "SELECT",
- "Original": "select val1, count(*) from (select id, val1 from user where val2 \u003c 4 order by val1 limit 2) as x group by val1",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "count_star(1) AS count(*)",
- "GroupBy": "(0|2)",
- "ResultColumns": 2,
- "Inputs": [
- {
- "OperatorType": "Projection",
- "Expressions": [
- "[COLUMN 1] as val1",
- "[COLUMN 0] as count(*)",
- "[COLUMN 2]"
- ],
- "Inputs": [
- {
- "OperatorType": "Limit",
- "Count": "INT64(2)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, val1, weight_string(val1) from `user` where 1 != 1",
- "OrderBy": "(1|2) ASC, (1|2) ASC",
- "Query": "select id, val1, weight_string(val1) from `user` where val2 \u003c 4 order by val1 asc, val1 asc limit :__upper_limit",
- "Table": "`user`"
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Can't inline derived table when it has HAVING with aggregation function
-"select * from (select id from user having count(*) = 1) s"
-{
- "QueryType": "SELECT",
- "Original": "select * from (select id from user having count(*) = 1) s",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from (select id from `user` where 1 != 1) as s where 1 != 1",
- "Query": "select * from (select id from `user` having count(*) = 1) as s",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from (select id from user having count(*) = 1) s",
- "Instructions": {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "Filter",
- "Predicate": ":1 = 1",
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "random(0) AS id, sum_count_star(1) AS count(*)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, count(*) from `user` where 1 != 1",
- "Query": "select id, count(*) from `user`",
- "Table": "`user`"
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
diff --git a/go/vt/vtgate/planbuilder/testdata/alterVschema_cases.json b/go/vt/vtgate/planbuilder/testdata/alterVschema_cases.json
new file mode 100644
index 00000000000..f8c57c5ea07
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/alterVschema_cases.json
@@ -0,0 +1,173 @@
+[
+ {
+ "comment": "Create vindex",
+ "query": "alter vschema create vindex hash_vdx using hash",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "alter vschema create vindex hash_vdx using hash",
+ "Instructions": {
+ "OperatorType": "AlterVSchema",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "query": "alter vschema create vindex hash_vdx using `hash`"
+ },
+ "TablesUsed": [
+ "main.hash_vdx"
+ ]
+ }
+ },
+ {
+ "comment": "Create vindex with qualifier",
+ "query": "alter vschema create vindex user.hash_vdx using hash",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "alter vschema create vindex user.hash_vdx using hash",
+ "Instructions": {
+ "OperatorType": "AlterVSchema",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "query": "alter vschema create vindex `user`.hash_vdx using `hash`"
+ },
+ "TablesUsed": [
+ "user.hash_vdx"
+ ]
+ }
+ },
+ {
+ "comment": "Drop vindex",
+ "query": "alter vschema drop vindex hash_vdx",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "alter vschema drop vindex hash_vdx",
+ "Instructions": {
+ "OperatorType": "AlterVSchema",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "query": "alter vschema drop vindex hash_vdx"
+ },
+ "TablesUsed": [
+ "main.hash_vdx"
+ ]
+ }
+ },
+ {
+ "comment": "Add table",
+ "query": "alter vschema add table a",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "alter vschema add table a",
+ "Instructions": {
+ "OperatorType": "AlterVSchema",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "query": "alter vschema add table a"
+ },
+ "TablesUsed": [
+ "main.a"
+ ]
+ }
+ },
+ {
+ "comment": "Add sequence",
+ "query": "alter vschema add sequence a_seq",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "alter vschema add sequence a_seq",
+ "Instructions": {
+ "OperatorType": "AlterVSchema",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "query": "alter vschema add sequence a_seq"
+ },
+ "TablesUsed": [
+ "main.a_seq"
+ ]
+ }
+ },
+ {
+ "comment": "Add auto_increment with qualifier",
+ "query": "alter vschema on user.a add auto_increment id using a_seq",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "alter vschema on user.a add auto_increment id using a_seq",
+ "Instructions": {
+ "OperatorType": "AlterVSchema",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "query": "alter vschema on `user`.a add auto_increment id using a_seq"
+ },
+ "TablesUsed": [
+ "user.a"
+ ]
+ }
+ },
+ {
+ "comment": "Drop table",
+ "query": "alter vschema drop table a",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "alter vschema drop table a",
+ "Instructions": {
+ "OperatorType": "AlterVSchema",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "query": "alter vschema drop table a"
+ },
+ "TablesUsed": [
+ "main.a"
+ ]
+ }
+ },
+ {
+ "comment": "Add Vindex",
+ "query": "alter vschema on a add vindex hash (id)",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "alter vschema on a add vindex hash (id)",
+ "Instructions": {
+ "OperatorType": "AlterVSchema",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "query": "alter vschema on a add vindex `hash` (id)"
+ },
+ "TablesUsed": [
+ "main.a"
+ ]
+ }
+ },
+ {
+ "comment": "Drop Vindex",
+ "query": "alter vschema on a drop vindex hash",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "alter vschema on a drop vindex hash",
+ "Instructions": {
+ "OperatorType": "AlterVSchema",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "query": "alter vschema on a drop vindex `hash`"
+ },
+ "TablesUsed": [
+ "main.a"
+ ]
+ }
+ }
+]
diff --git a/go/vt/vtgate/planbuilder/testdata/alterVschema_cases.txt b/go/vt/vtgate/planbuilder/testdata/alterVschema_cases.txt
deleted file mode 100644
index c46df7b18e6..00000000000
--- a/go/vt/vtgate/planbuilder/testdata/alterVschema_cases.txt
+++ /dev/null
@@ -1,170 +0,0 @@
-# Create vindex
-"alter vschema create vindex hash_vdx using hash"
-{
- "QueryType": "DDL",
- "Original": "alter vschema create vindex hash_vdx using hash",
- "Instructions": {
- "OperatorType": "AlterVSchema",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "query": "alter vschema create vindex hash_vdx using `hash`"
- },
- "TablesUsed": [
- "main.hash_vdx"
- ]
-}
-Gen4 plan same as above
-
-# Create vindex with qualifier
-"alter vschema create vindex user.hash_vdx using hash"
-{
- "QueryType": "DDL",
- "Original": "alter vschema create vindex user.hash_vdx using hash",
- "Instructions": {
- "OperatorType": "AlterVSchema",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "query": "alter vschema create vindex `user`.hash_vdx using `hash`"
- },
- "TablesUsed": [
- "user.hash_vdx"
- ]
-}
-Gen4 plan same as above
-
-# Drop vindex
-"alter vschema drop vindex hash_vdx"
-{
- "QueryType": "DDL",
- "Original": "alter vschema drop vindex hash_vdx",
- "Instructions": {
- "OperatorType": "AlterVSchema",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "query": "alter vschema drop vindex hash_vdx"
- },
- "TablesUsed": [
- "main.hash_vdx"
- ]
-}
-Gen4 plan same as above
-
-# Add table
-"alter vschema add table a"
-{
- "QueryType": "DDL",
- "Original": "alter vschema add table a",
- "Instructions": {
- "OperatorType": "AlterVSchema",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "query": "alter vschema add table a"
- },
- "TablesUsed": [
- "main.a"
- ]
-}
-Gen4 plan same as above
-
-# Add sequence
-"alter vschema add sequence a_seq"
-{
- "QueryType": "DDL",
- "Original": "alter vschema add sequence a_seq",
- "Instructions": {
- "OperatorType": "AlterVSchema",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "query": "alter vschema add sequence a_seq"
- },
- "TablesUsed": [
- "main.a_seq"
- ]
-}
-Gen4 plan same as above
-
-# Add auto_increment with qualifier
-"alter vschema on user.a add auto_increment id using a_seq"
-{
- "QueryType": "DDL",
- "Original": "alter vschema on user.a add auto_increment id using a_seq",
- "Instructions": {
- "OperatorType": "AlterVSchema",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "query": "alter vschema on `user`.a add auto_increment id using a_seq"
- },
- "TablesUsed": [
- "user.a"
- ]
-}
-Gen4 plan same as above
-
-# Drop table
-"alter vschema drop table a"
-{
- "QueryType": "DDL",
- "Original": "alter vschema drop table a",
- "Instructions": {
- "OperatorType": "AlterVSchema",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "query": "alter vschema drop table a"
- },
- "TablesUsed": [
- "main.a"
- ]
-}
-Gen4 plan same as above
-
-# Add Vindex
-"alter vschema on a add vindex hash (id)"
-{
- "QueryType": "DDL",
- "Original": "alter vschema on a add vindex hash (id)",
- "Instructions": {
- "OperatorType": "AlterVSchema",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "query": "alter vschema on a add vindex `hash` (id)"
- },
- "TablesUsed": [
- "main.a"
- ]
-}
-Gen4 plan same as above
-
-# Drop Vindex
-"alter vschema on a drop vindex hash"
-{
- "QueryType": "DDL",
- "Original": "alter vschema on a drop vindex hash",
- "Instructions": {
- "OperatorType": "AlterVSchema",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "query": "alter vschema on a drop vindex `hash`"
- },
- "TablesUsed": [
- "main.a"
- ]
-}
-Gen4 plan same as above
diff --git a/go/vt/vtgate/planbuilder/testdata/bypass_keyrange_cases.json b/go/vt/vtgate/planbuilder/testdata/bypass_keyrange_cases.json
new file mode 100644
index 00000000000..b13bafd77f8
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/bypass_keyrange_cases.json
@@ -0,0 +1,168 @@
+[
+ {
+ "comment": "select bypass",
+ "query": "select count(*), col from unsharded",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "select count(*), col from unsharded",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "ExactKeyRange(-)",
+ "Query": "select count(*), col from unsharded"
+ }
+ }
+ },
+ {
+ "comment": "update bypass",
+ "query": "update user set val = 1 where id = 18446744073709551616 and id = 1",
+ "plan": {
+ "QueryType": "UPDATE",
+ "Original": "update user set val = 1 where id = 18446744073709551616 and id = 1",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "ExactKeyRange(-)",
+ "IsDML": true,
+ "Query": "update `user` set val = 1 where id = 18446744073709551616 and id = 1"
+ }
+ }
+ },
+ {
+ "comment": "update bypass autocommit",
+ "query": "update /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ user set val = 1 where id = 18446744073709551616 and id = 1",
+ "plan": {
+ "QueryType": "UPDATE",
+ "Original": "update /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ user set val = 1 where id = 18446744073709551616 and id = 1",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "ExactKeyRange(-)",
+ "IsDML": true,
+ "MultishardAutocommit": true,
+ "Query": "update /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ `user` set val = 1 where id = 18446744073709551616 and id = 1"
+ }
+ }
+ },
+ {
+ "comment": "delete bypass",
+ "query": "DELETE FROM USER WHERE ID = 42",
+ "plan": {
+ "QueryType": "DELETE",
+ "Original": "DELETE FROM USER WHERE ID = 42",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "ExactKeyRange(-)",
+ "IsDML": true,
+ "Query": "delete from `USER` where ID = 42"
+ }
+ }
+ },
+ {
+ "comment": "insert bypass: not supported",
+ "query": "INSERT INTO USER (ID, NAME) VALUES (42, 'ms X')",
+ "plan": "VT03023: INSERT not supported when targeting a key range: targetString"
+ },
+ {
+ "comment": "bypass query for into outfile s3",
+ "query": "select count(*), col from unsharded into outfile S3 'x.txt'",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "select count(*), col from unsharded into outfile S3 'x.txt'",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "ExactKeyRange(-)",
+ "Query": "select count(*), col from unsharded into outfile s3 'x.txt'"
+ }
+ }
+ },
+ {
+ "comment": "Select outfile",
+ "query": "select * from user into outfile S3 'x.txt'",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from user into outfile S3 'x.txt'",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "ExactKeyRange(-)",
+ "Query": "select * from `user` into outfile s3 'x.txt'"
+ }
+ }
+ },
+ {
+ "comment": "load data from s3 'x.txt' into table x",
+ "query": "load data from s3 'x.txt' into table x",
+ "plan": {
+ "QueryType": "OTHER",
+ "Original": "load data from s3 'x.txt' into table x",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "ExactKeyRange(-)",
+ "IsDML": true,
+ "Query": "load data from s3 'x.txt' into table x",
+ "SingleShardOnly": true
+ }
+ }
+ },
+ {
+ "comment": "load data from s3 'x.txt'",
+ "query": "load data from s3 'x.txt'",
+ "plan": {
+ "QueryType": "OTHER",
+ "Original": "load data from s3 'x.txt'",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "ExactKeyRange(-)",
+ "IsDML": true,
+ "Query": "load data from s3 'x.txt'",
+ "SingleShardOnly": true
+ }
+ }
+ },
+ {
+ "comment": "create table",
+ "query": "create /* test */ table t1(id bigint, primary key(id)) /* comments */",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "create /* test */ table t1(id bigint, primary key(id)) /* comments */",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "ExactKeyRange(-)",
+ "Query": "create /* test */ table t1(id bigint, primary key(id)) /* comments */"
+ }
+ }
+ }
+]
diff --git a/go/vt/vtgate/planbuilder/testdata/bypass_keyrange_cases.txt b/go/vt/vtgate/planbuilder/testdata/bypass_keyrange_cases.txt
deleted file mode 100644
index a9bb3e93249..00000000000
--- a/go/vt/vtgate/planbuilder/testdata/bypass_keyrange_cases.txt
+++ /dev/null
@@ -1,163 +0,0 @@
-# select bypass
-"select count(*), col from unsharded"
-{
- "QueryType": "SELECT",
- "Original": "select count(*), col from unsharded",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "ExactKeyRange(-)",
- "Query": "select count(*), col from unsharded"
- }
-}
-Gen4 plan same as above
-
-# update bypass
-"update user set val = 1 where id = 18446744073709551616 and id = 1"
-{
- "QueryType": "UPDATE",
- "Original": "update user set val = 1 where id = 18446744073709551616 and id = 1",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "ExactKeyRange(-)",
- "IsDML": true,
- "Query": "update `user` set val = 1 where id = 18446744073709551616 and id = 1"
- }
-}
-Gen4 plan same as above
-
-# update bypass autocommit
-"update /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ user set val = 1 where id = 18446744073709551616 and id = 1"
-{
- "QueryType": "UPDATE",
- "Original": "update /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ user set val = 1 where id = 18446744073709551616 and id = 1",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "ExactKeyRange(-)",
- "IsDML": true,
- "MultishardAutocommit": true,
- "Query": "update /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ `user` set val = 1 where id = 18446744073709551616 and id = 1"
- }
-}
-Gen4 plan same as above
-
-# delete bypass
-"DELETE FROM USER WHERE ID = 42"
-{
- "QueryType": "DELETE",
- "Original": "DELETE FROM USER WHERE ID = 42",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "ExactKeyRange(-)",
- "IsDML": true,
- "Query": "delete from `USER` where ID = 42"
- }
-}
-Gen4 plan same as above
-
-# insert bypass: not supported
-"INSERT INTO USER (ID, NAME) VALUES (42, 'ms X')"
-"INSERT not supported when targeting a key range: targetString"
-Gen4 plan same as above
-
-# bypass query for into outfile s3
-"select count(*), col from unsharded into outfile S3 'x.txt'"
-{
- "QueryType": "SELECT",
- "Original": "select count(*), col from unsharded into outfile S3 'x.txt'",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "ExactKeyRange(-)",
- "Query": "select count(*), col from unsharded into outfile s3 'x.txt'"
- }
-}
-Gen4 plan same as above
-
-# Select outfile
-"select * from user into outfile S3 'x.txt'"
-{
- "QueryType": "SELECT",
- "Original": "select * from user into outfile S3 'x.txt'",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "ExactKeyRange(-)",
- "Query": "select * from `user` into outfile s3 'x.txt'"
- }
-}
-Gen4 plan same as above
-
-"load data from s3 'x.txt' into table x"
-{
- "QueryType": "OTHER",
- "Original": "load data from s3 'x.txt' into table x",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "ExactKeyRange(-)",
- "IsDML": true,
- "Query": "load data from s3 'x.txt' into table x",
- "SingleShardOnly": true
- }
-}
-Gen4 plan same as above
-
-"load data from s3 'x.txt'"
-{
- "QueryType": "OTHER",
- "Original": "load data from s3 'x.txt'",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "ExactKeyRange(-)",
- "IsDML": true,
- "Query": "load data from s3 'x.txt'",
- "SingleShardOnly": true
- }
-}
-Gen4 plan same as above
-
-# create table
-"create /* test */ table t1(id bigint, primary key(id)) /* comments */"
-{
- "QueryType": "DDL",
- "Original": "create /* test */ table t1(id bigint, primary key(id)) /* comments */",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "ExactKeyRange(-)",
- "Query": "create /* test */ table t1(id bigint, primary key(id)) /* comments */"
- }
-}
-Gen4 plan same as above
diff --git a/go/vt/vtgate/planbuilder/testdata/bypass_shard_cases.json b/go/vt/vtgate/planbuilder/testdata/bypass_shard_cases.json
new file mode 100644
index 00000000000..6f2be325b6b
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/bypass_shard_cases.json
@@ -0,0 +1,178 @@
+[
+ {
+ "comment": "select bypass",
+ "query": "select count(*), col from unsharded",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "select count(*), col from unsharded",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "Shard(-80)",
+ "Query": "select count(*), col from unsharded"
+ }
+ }
+ },
+ {
+ "comment": "update bypass",
+ "query": "update user set val = 1 where id = 18446744073709551616 and id = 1",
+ "plan": {
+ "QueryType": "UPDATE",
+ "Original": "update user set val = 1 where id = 18446744073709551616 and id = 1",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "Shard(-80)",
+ "IsDML": true,
+ "Query": "update `user` set val = 1 where id = 18446744073709551616 and id = 1"
+ }
+ }
+ },
+ {
+ "comment": "delete bypass",
+ "query": "DELETE FROM USER WHERE ID = 42",
+ "plan": {
+ "QueryType": "DELETE",
+ "Original": "DELETE FROM USER WHERE ID = 42",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "Shard(-80)",
+ "IsDML": true,
+ "Query": "delete from `USER` where ID = 42"
+ }
+ }
+ },
+ {
+ "comment": "insert bypass",
+ "query": "INSERT INTO USER (ID, NAME) VALUES (42, 'ms X')",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "INSERT INTO USER (ID, NAME) VALUES (42, 'ms X')",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "Shard(-80)",
+ "IsDML": true,
+ "Query": "insert into `USER`(ID, `NAME`) values (42, 'ms X')"
+ }
+ }
+ },
+ {
+ "comment": "insert bypass with sequence: sequences ignored",
+ "query": "insert into user(nonid) values (2)",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into user(nonid) values (2)",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "Shard(-80)",
+ "IsDML": true,
+ "Query": "insert into `user`(nonid) values (2)"
+ }
+ }
+ },
+ {
+ "comment": "bypass query for into outfile s3",
+ "query": "select count(*), col from unsharded into outfile S3 'x.txt'",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "select count(*), col from unsharded into outfile S3 'x.txt'",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "Shard(-80)",
+ "Query": "select count(*), col from unsharded into outfile s3 'x.txt'"
+ }
+ }
+ },
+ {
+ "comment": "Select outfile",
+ "query": "select * from user into outfile S3 'x.txt'",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from user into outfile S3 'x.txt'",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "Shard(-80)",
+ "Query": "select * from `user` into outfile s3 'x.txt'"
+ }
+ }
+ },
+ {
+ "query": "load data from s3 'x.txt' into table x",
+ "plan": {
+ "QueryType": "OTHER",
+ "Original": "load data from s3 'x.txt' into table x",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "Shard(-80)",
+ "IsDML": true,
+ "Query": "load data from s3 'x.txt' into table x",
+ "SingleShardOnly": true
+ }
+ }
+ },
+ {
+ "query": "load data from s3 'x.txt'",
+ "plan": {
+ "QueryType": "OTHER",
+ "Original": "load data from s3 'x.txt'",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "Shard(-80)",
+ "IsDML": true,
+ "Query": "load data from s3 'x.txt'",
+ "SingleShardOnly": true
+ }
+ }
+ },
+ {
+ "comment": "create table",
+ "query": "create /* test */ table t1(id bigint, primary key(id)) /* comments */",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "create /* test */ table t1(id bigint, primary key(id)) /* comments */",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "Shard(-80)",
+ "Query": "create /* test */ table t1(id bigint, primary key(id)) /* comments */"
+ }
+ }
+ }
+]
\ No newline at end of file
diff --git a/go/vt/vtgate/planbuilder/testdata/bypass_shard_cases.txt b/go/vt/vtgate/planbuilder/testdata/bypass_shard_cases.txt
deleted file mode 100644
index 17f697535e4..00000000000
--- a/go/vt/vtgate/planbuilder/testdata/bypass_shard_cases.txt
+++ /dev/null
@@ -1,175 +0,0 @@
-# select bypass
-"select count(*), col from unsharded"
-{
- "QueryType": "SELECT",
- "Original": "select count(*), col from unsharded",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "Shard(-80)",
- "Query": "select count(*), col from unsharded"
- }
-}
-Gen4 plan same as above
-
-# update bypass
-"update user set val = 1 where id = 18446744073709551616 and id = 1"
-{
- "QueryType": "UPDATE",
- "Original": "update user set val = 1 where id = 18446744073709551616 and id = 1",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "Shard(-80)",
- "IsDML": true,
- "Query": "update `user` set val = 1 where id = 18446744073709551616 and id = 1"
- }
-}
-Gen4 plan same as above
-
-# delete bypass
-"DELETE FROM USER WHERE ID = 42"
-{
- "QueryType": "DELETE",
- "Original": "DELETE FROM USER WHERE ID = 42",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "Shard(-80)",
- "IsDML": true,
- "Query": "delete from `USER` where ID = 42"
- }
-}
-Gen4 plan same as above
-
-# insert bypass
-"INSERT INTO USER (ID, NAME) VALUES (42, 'ms X')"
-{
- "QueryType": "INSERT",
- "Original": "INSERT INTO USER (ID, NAME) VALUES (42, 'ms X')",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "Shard(-80)",
- "IsDML": true,
- "Query": "insert into `USER`(ID, `NAME`) values (42, 'ms X')"
- }
-}
-Gen4 plan same as above
-
-# insert bypass with sequence: sequences ignored
-"insert into user(nonid) values (2)"
-{
- "QueryType": "INSERT",
- "Original": "insert into user(nonid) values (2)",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "Shard(-80)",
- "IsDML": true,
- "Query": "insert into `user`(nonid) values (2)"
- }
-}
-Gen4 plan same as above
-
-# bypass query for into outfile s3
-"select count(*), col from unsharded into outfile S3 'x.txt'"
-{
- "QueryType": "SELECT",
- "Original": "select count(*), col from unsharded into outfile S3 'x.txt'",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "Shard(-80)",
- "Query": "select count(*), col from unsharded into outfile s3 'x.txt'"
- }
-}
-Gen4 plan same as above
-
-# Select outfile
-"select * from user into outfile S3 'x.txt'"
-{
- "QueryType": "SELECT",
- "Original": "select * from user into outfile S3 'x.txt'",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "Shard(-80)",
- "Query": "select * from `user` into outfile s3 'x.txt'"
- }
-}
-Gen4 plan same as above
-
-"load data from s3 'x.txt' into table x"
-{
- "QueryType": "OTHER",
- "Original": "load data from s3 'x.txt' into table x",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "Shard(-80)",
- "IsDML": true,
- "Query": "load data from s3 'x.txt' into table x",
- "SingleShardOnly": true
- }
-}
-Gen4 plan same as above
-
-"load data from s3 'x.txt'"
-{
- "QueryType": "OTHER",
- "Original": "load data from s3 'x.txt'",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "Shard(-80)",
- "IsDML": true,
- "Query": "load data from s3 'x.txt'",
- "SingleShardOnly": true
- }
-}
-Gen4 plan same as above
-
-# create table
-"create /* test */ table t1(id bigint, primary key(id)) /* comments */"
-{
- "QueryType": "DDL",
- "Original": "create /* test */ table t1(id bigint, primary key(id)) /* comments */",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "Shard(-80)",
- "Query": "create /* test */ table t1(id bigint, primary key(id)) /* comments */"
- }
-}
-Gen4 plan same as above
diff --git a/go/vt/vtgate/planbuilder/testdata/call_cases.json b/go/vt/vtgate/planbuilder/testdata/call_cases.json
new file mode 100644
index 00000000000..f8786414389
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/call_cases.json
@@ -0,0 +1,58 @@
+[
+ {
+ "comment": "simple call proc on current keyspace",
+ "query": "call proc()",
+ "plan": {
+ "QueryType": "CALL_PROC",
+ "Original": "call proc()",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AnyShard()",
+ "Query": "call proc()"
+ }
+ }
+ },
+ {
+ "comment": "call qualified keyspace",
+ "query": "call main.proc()",
+ "plan": {
+ "QueryType": "CALL_PROC",
+ "Original": "call main.proc()",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AnyShard()",
+ "Query": "call proc()"
+ }
+ }
+ },
+ {
+ "comment": "CALL not allowed on sharded keyspaces",
+ "query": "call user.proc()",
+ "plan": "CALL is not supported for sharded keyspace"
+ },
+ {
+ "comment": "CALL with expressions and parameters",
+ "query": "call proc(1, 'foo', @var)",
+ "plan": {
+ "QueryType": "CALL_PROC",
+ "Original": "call proc(1, 'foo', @var)",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AnyShard()",
+ "Query": "call proc(1, 'foo', :__vtudvvar)"
+ }
+ }
+ }
+]
diff --git a/go/vt/vtgate/planbuilder/testdata/call_cases.txt b/go/vt/vtgate/planbuilder/testdata/call_cases.txt
deleted file mode 100644
index eb9e0277c84..00000000000
--- a/go/vt/vtgate/planbuilder/testdata/call_cases.txt
+++ /dev/null
@@ -1,55 +0,0 @@
-# simple call proc on current keyspace
-"call proc()"
-{
- "QueryType": "CALL_PROC",
- "Original": "call proc()",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AnyShard()",
- "Query": "call proc()"
- }
-}
-Gen4 plan same as above
-
-# call qualified keyspace
-"call main.proc()"
-{
- "QueryType": "CALL_PROC",
- "Original": "call main.proc()",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AnyShard()",
- "Query": "call proc()"
- }
-}
-Gen4 plan same as above
-
-# CALL not allowed on sharded keyspaces
-"call user.proc()"
-"CALL is not supported for sharded database"
-Gen4 plan same as above
-
-# CALL with expressions and parameters
-"call proc(1, 'foo', @var)"
-{
- "QueryType": "CALL_PROC",
- "Original": "call proc(1, 'foo', @var)",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AnyShard()",
- "Query": "call proc(1, 'foo', :__vtudvvar)"
- }
-}
-Gen4 plan same as above
diff --git a/go/vt/vtgate/planbuilder/testdata/ddl_cases.json b/go/vt/vtgate/planbuilder/testdata/ddl_cases.json
new file mode 100644
index 00000000000..8326922225c
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/ddl_cases.json
@@ -0,0 +1,550 @@
+[
+ {
+ "comment": "simple create table",
+ "query": "create table t1(id bigint, primary key(id))",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "create table t1(id bigint, primary key(id))",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "Query": "create table t1 (\n\tid bigint,\n\tprimary key (id)\n)"
+ },
+ "TablesUsed": [
+ "main.t1"
+ ]
+ }
+ },
+ {
+ "comment": "simple create table with keyspace",
+ "query": "create table user.t1(id bigint, primary key(id))",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "create table user.t1(id bigint, primary key(id))",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "create table t1 (\n\tid bigint,\n\tprimary key (id)\n)"
+ },
+ "TablesUsed": [
+ "user.t1"
+ ]
+ }
+ },
+ {
+ "comment": "DDL",
+ "query": "create table a(id int)",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "create table a(id int)",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "Query": "create table a (\n\tid int\n)"
+ },
+ "TablesUsed": [
+ "main.a"
+ ]
+ }
+ },
+ {
+ "comment": "simple create table with table qualifier that does not exists",
+ "query": "create table a.b(id int)",
+ "plan": "VT05003: unknown database 'a' in vschema"
+ },
+ {
+ "comment": "Alter table",
+ "query": "alter table a ADD id int",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "alter table a ADD id int",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "Query": "alter table a add column id int"
+ },
+ "TablesUsed": [
+ "main.a"
+ ]
+ }
+ },
+ {
+ "comment": "Alter table with qualifier",
+ "query": "alter table user.user ADD id int",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "alter table user.user ADD id int",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "alter table `user` add column id int"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Alter table with qualifier and table not in vschema",
+ "query": "alter table user.a ADD id int",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "alter table user.a ADD id int",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "alter table a add column id int"
+ },
+ "TablesUsed": [
+ "user.a"
+ ]
+ }
+ },
+ {
+ "comment": "Alter table with unknown qualifier",
+ "query": "alter table a.b ADD id int",
+ "plan": "VT05003: unknown database 'a' in vschema"
+ },
+ {
+ "comment": "create db foo",
+ "query": "create database foo",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "create database foo",
+ "Instructions": {
+ "OperatorType": "CREATEDB",
+ "Keyspace": {
+ "Name": "foo",
+ "Sharded": false
+ }
+ }
+ }
+ },
+ {
+ "comment": "create db main",
+ "query": "create database main",
+ "plan": "VT06001: cannot create database 'main'; database exists"
+ },
+ {
+ "comment": "create db if not exists main",
+ "query": "create database if not exists main",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "create database if not exists main",
+ "Instructions": {
+ "OperatorType": "Rows"
+ }
+ }
+ },
+ {
+ "comment": "alter db foo",
+ "query": "alter database foo collate utf8",
+ "plan": "VT05002: cannot alter database 'foo'; unknown database"
+ },
+ {
+ "comment": "alter db main",
+ "query": "alter database main collate utf8",
+ "plan": "VT12001: unsupported: ALTER DATABASE"
+ },
+ {
+ "comment": "drop db foo",
+ "query": "drop database foo",
+ "plan": "VT05001: cannot drop database 'foo'; database does not exists"
+ },
+ {
+ "comment": "drop db main",
+ "query": "drop database main",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "drop database main",
+ "Instructions": {
+ "OperatorType": "DROPDB",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ }
+ }
+ }
+ },
+ {
+ "comment": "drop db if exists main",
+ "query": "drop database if exists main",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "drop database if exists main",
+ "Instructions": {
+ "OperatorType": "DROPDB",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ }
+ }
+ }
+ },
+ {
+ "comment": "drop db if exists foo",
+ "query": "drop schema if exists foo",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "drop schema if exists foo",
+ "Instructions": {
+ "OperatorType": "Rows"
+ }
+ }
+ },
+ {
+ "comment": "DDL with qualifier",
+ "query": "create index a on user.user(id)",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "create index a on user.user(id)",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "alter table `user` add index a (id)"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "DDL with qualifier for a table not in vschema of an unsharded keyspace",
+ "query": "create index a on main.unknown(id)",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "create index a on main.unknown(id)",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "Query": "alter table unknown add index a (id)"
+ },
+ "TablesUsed": [
+ "main.unknown"
+ ]
+ }
+ },
+ {
+ "comment": "create view with subquery in unsharded keyspace",
+ "query": "create view view_a as select * from (select col1, col2 from unsharded where id = 1 union select col1, col2 from unsharded where id = 3) a",
+ "v3-plan": {
+ "QueryType": "DDL",
+ "Original": "create view view_a as select * from (select col1, col2 from unsharded where id = 1 union select col1, col2 from unsharded where id = 3) a",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "Query": "create view view_a as select * from (select col1, col2 from unsharded where id = 1 union select col1, col2 from unsharded where id = 3) as a"
+ },
+ "TablesUsed": [
+ "main.view_a"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "DDL",
+ "Original": "create view view_a as select * from (select col1, col2 from unsharded where id = 1 union select col1, col2 from unsharded where id = 3) a",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "Query": "create view view_a as select a.col1, a.col2 from (select col1, col2 from unsharded where id = 1 union select col1, col2 from unsharded where id = 3) as a"
+ },
+ "TablesUsed": [
+ "main.view_a"
+ ]
+ }
+ },
+ {
+ "comment": "create view with subquery in unsharded keyspace with IN clause",
+ "query": "create view view_a as select id, name from unsharded where id in (select id from unsharded where id = 1 union select id from unsharded where id = 3)",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "create view view_a as select id, name from unsharded where id in (select id from unsharded where id = 1 union select id from unsharded where id = 3)",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "Query": "create view view_a as select id, `name` from unsharded where id in (select id from unsharded where id = 1 union select id from unsharded where id = 3)"
+ },
+ "TablesUsed": [
+ "main.view_a"
+ ]
+ }
+ },
+ {
+ "comment": "create view with subquery in unsharded keyspace with UNION clause",
+ "query": "create view view_a as (select id from unsharded) union (select id from unsharded_auto) order by id limit 5",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "create view view_a as (select id from unsharded) union (select id from unsharded_auto) order by id limit 5",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "Query": "create view view_a as select id from unsharded union select id from unsharded_auto order by id asc limit 5"
+ },
+ "TablesUsed": [
+ "main.view_a"
+ ]
+ }
+ },
+ {
+ "comment": "create view with subquery in unsharded keyspace with multiple UNION clauses",
+ "query": "create view view_a as select id from unsharded union select id from unsharded_auto union select id from unsharded_auto where id in (132)",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "create view view_a as select id from unsharded union select id from unsharded_auto union select id from unsharded_auto where id in (132)",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "Query": "create view view_a as select id from unsharded union select id from unsharded_auto union select id from unsharded_auto where id in (132)"
+ },
+ "TablesUsed": [
+ "main.view_a"
+ ]
+ }
+ },
+ {
+ "comment": "create view with subquery in unsharded keyspace with UNION clauses in subqueries",
+ "query": "create view view_a as (select id from unsharded union select id from unsharded_auto) union (select id from unsharded_auto union select name from unsharded)",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "create view view_a as (select id from unsharded union select id from unsharded_auto) union (select id from unsharded_auto union select name from unsharded)",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "Query": "create view view_a as select id from unsharded union select id from unsharded_auto union select id from unsharded_auto union select `name` from unsharded"
+ },
+ "TablesUsed": [
+ "main.view_a"
+ ]
+ }
+ },
+ {
+ "comment": "Alter View",
+ "query": "alter view user.user_extra as select * from user.user",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "alter view user.user_extra as select * from user.user",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "alter view user_extra as select * from `user`"
+ },
+ "TablesUsed": [
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "Create View with authoritative column",
+ "query": "create view user.tmp_view as select * from user.authoritative",
+ "v3-plan": {
+ "QueryType": "DDL",
+ "Original": "create view user.tmp_view as select * from user.authoritative",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "create view tmp_view as select * from authoritative"
+ },
+ "TablesUsed": [
+ "user.tmp_view"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "DDL",
+ "Original": "create view user.tmp_view as select * from user.authoritative",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "create view tmp_view as select user_id, col1, col2 from authoritative"
+ },
+ "TablesUsed": [
+ "user.tmp_view"
+ ]
+ }
+ },
+ {
+ "comment": "drop table without qualifier",
+ "query": "drop table unsharded_a",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "drop table unsharded_a",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "Query": "drop table unsharded_a"
+ },
+ "TablesUsed": [
+ "main.unsharded_a"
+ ]
+ }
+ },
+ {
+ "comment": "Drop view",
+ "query": "drop view main.a",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "drop view main.a",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "Query": "drop view a"
+ },
+ "TablesUsed": [
+ "main.a"
+ ]
+ }
+ },
+ {
+ "comment": "Truncate table with qualifier",
+ "query": "truncate user.user_extra",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "truncate user.user_extra",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "truncate table user_extra"
+ },
+ "TablesUsed": [
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "Rename table",
+ "query": "rename table a to main.b",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "rename table a to main.b",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "Query": "rename table a to b"
+ },
+ "TablesUsed": [
+ "main.a",
+ "main.b"
+ ]
+ }
+ },
+ {
+ "comment": "CREATE temp TABLE",
+ "query": "create temporary table a(id int)",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "create temporary table a(id int)",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "Query": "create temporary table a (\n\tid int\n)",
+ "TempTable": true
+ },
+ "TablesUsed": [
+ "main.a"
+ ]
+ }
+ },
+ {
+ "comment": "DROP temp TABLE",
+ "query": "drop temporary table a",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "drop temporary table a",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "Query": "drop temporary table a",
+ "TempTable": true
+ },
+ "TablesUsed": [
+ "main.a"
+ ]
+ }
+ },
+ {
+ "comment": "create table with function as a default value",
+ "query": "create table function_default (x varchar(25) DEFAULT (TRIM(' check ')))",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "create table function_default (x varchar(25) DEFAULT (TRIM(' check ')))",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "Query": "create table function_default (\n\tx varchar(25) default (trim(' check '))\n)"
+ },
+ "TablesUsed": [
+ "main.function_default"
+ ]
+ }
+ }
+]
diff --git a/go/vt/vtgate/planbuilder/testdata/ddl_cases.txt b/go/vt/vtgate/planbuilder/testdata/ddl_cases.txt
deleted file mode 100644
index 74b23a47c63..00000000000
--- a/go/vt/vtgate/planbuilder/testdata/ddl_cases.txt
+++ /dev/null
@@ -1,512 +0,0 @@
-# simple create table
-"create table t1(id bigint, primary key(id))"
-{
- "QueryType": "DDL",
- "Original": "create table t1(id bigint, primary key(id))",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "Query": "create table t1 (\n\tid bigint,\n\tprimary key (id)\n)"
- },
- "TablesUsed": [
- "main.t1"
- ]
-}
-Gen4 plan same as above
-
-# simple create table with keyspace
-"create table user.t1(id bigint, primary key(id))"
-{
- "QueryType": "DDL",
- "Original": "create table user.t1(id bigint, primary key(id))",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "create table t1 (\n\tid bigint,\n\tprimary key (id)\n)"
- },
- "TablesUsed": [
- "user.t1"
- ]
-}
-Gen4 plan same as above
-
-# DDL
-"create table a(id int)"
-{
- "QueryType": "DDL",
- "Original": "create table a(id int)",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "Query": "create table a (\n\tid int\n)"
- },
- "TablesUsed": [
- "main.a"
- ]
-}
-Gen4 plan same as above
-
-# simple create table with table qualifier that does not exists
-"create table a.b(id int)"
-"Unknown database 'a' in vschema"
-Gen4 plan same as above
-
-#Alter table
-"alter table a ADD id int"
-{
- "QueryType": "DDL",
- "Original": "alter table a ADD id int",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "Query": "alter table a add column id int"
- },
- "TablesUsed": [
- "main.a"
- ]
-}
-Gen4 plan same as above
-
-#Alter table with qualifier
-"alter table user.user ADD id int"
-{
- "QueryType": "DDL",
- "Original": "alter table user.user ADD id int",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "alter table `user` add column id int"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-Gen4 plan same as above
-
-#Alter table with qualifier and table not in vschema
-"alter table user.a ADD id int"
-{
- "QueryType": "DDL",
- "Original": "alter table user.a ADD id int",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "alter table a add column id int"
- },
- "TablesUsed": [
- "user.a"
- ]
-}
-Gen4 plan same as above
-
-#Alter table with unknown qualifier
-"alter table a.b ADD id int"
-"Unknown database 'a' in vschema"
-Gen4 plan same as above
-
-# create db foo
-"create database foo"
-{
- "QueryType": "DDL",
- "Original": "create database foo",
- "Instructions": {
- "OperatorType": "CREATEDB",
- "Keyspace": {
- "Name": "foo",
- "Sharded": false
- }
- }
-}
-Gen4 plan same as above
-
-# create db main
-"create database main"
-"Can't create database 'main'; database exists"
-Gen4 plan same as above
-
-# create db if not exists main
-"create database if not exists main"
-{
- "QueryType": "DDL",
- "Original": "create database if not exists main",
- "Instructions": {
- "OperatorType": "Rows"
- }
-}
-Gen4 plan same as above
-
-# alter db foo
-"alter database foo collate utf8"
-"Can't alter database 'foo'; unknown database"
-Gen4 plan same as above
-
-# alter db main
-"alter database main collate utf8"
-"alter database is not supported"
-Gen4 plan same as above
-
-# drop db foo
-"drop database foo"
-"Can't drop database 'foo'; database doesn't exists"
-Gen4 plan same as above
-
-# drop db main
-"drop database main"
-{
- "QueryType": "DDL",
- "Original": "drop database main",
- "Instructions": {
- "OperatorType": "DROPDB",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- }
- }
-}
-Gen4 plan same as above
-
-# drop db if exists main
-"drop database if exists main"
-{
- "QueryType": "DDL",
- "Original": "drop database if exists main",
- "Instructions": {
- "OperatorType": "DROPDB",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- }
- }
-}
-Gen4 plan same as above
-
-# drop db if exists foo
-"drop schema if exists foo"
-{
- "QueryType": "DDL",
- "Original": "drop schema if exists foo",
- "Instructions": {
- "OperatorType": "Rows"
- }
-}
-Gen4 plan same as above
-
-# DDL with qualifier
-"create index a on user.user(id)"
-{
- "QueryType": "DDL",
- "Original": "create index a on user.user(id)",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "alter table `user` add index a (id)"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-Gen4 plan same as above
-
-# DDL with qualifier for a table not in vschema of an unsharded keyspace
-"create index a on main.unknown(id)"
-{
- "QueryType": "DDL",
- "Original": "create index a on main.unknown(id)",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "Query": "alter table unknown add index a (id)"
- },
- "TablesUsed": [
- "main.unknown"
- ]
-}
-Gen4 plan same as above
-
-# create view with subquery in unsharded keyspace
-"create view view_a as select * from (select col1, col2 from unsharded where id = 1 union select col1, col2 from unsharded where id = 3) a"
-{
- "QueryType": "DDL",
- "Original": "create view view_a as select * from (select col1, col2 from unsharded where id = 1 union select col1, col2 from unsharded where id = 3) a",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "Query": "create view view_a as select * from (select col1, col2 from unsharded where id = 1 union select col1, col2 from unsharded where id = 3) as a"
- },
- "TablesUsed": [
- "main.view_a"
- ]
-}
-{
- "QueryType": "DDL",
- "Original": "create view view_a as select * from (select col1, col2 from unsharded where id = 1 union select col1, col2 from unsharded where id = 3) a",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "Query": "create view view_a as select a.col1, a.col2 from (select col1, col2 from unsharded where id = 1 union select col1, col2 from unsharded where id = 3) as a"
- },
- "TablesUsed": [
- "main.view_a"
- ]
-}
-
-# create view with subquery in unsharded keyspace with IN clause
-"create view view_a as select id, name from unsharded where id in (select id from unsharded where id = 1 union select id from unsharded where id = 3)"
-{
- "QueryType": "DDL",
- "Original": "create view view_a as select id, name from unsharded where id in (select id from unsharded where id = 1 union select id from unsharded where id = 3)",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "Query": "create view view_a as select id, `name` from unsharded where id in (select id from unsharded where id = 1 union select id from unsharded where id = 3)"
- },
- "TablesUsed": [
- "main.view_a"
- ]
-}
-Gen4 plan same as above
-
-# create view with subquery in unsharded keyspace with UNION clause
-"create view view_a as (select id from unsharded) union (select id from unsharded_auto) order by id limit 5"
-{
- "QueryType": "DDL",
- "Original": "create view view_a as (select id from unsharded) union (select id from unsharded_auto) order by id limit 5",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "Query": "create view view_a as select id from unsharded union select id from unsharded_auto order by id asc limit 5"
- },
- "TablesUsed": [
- "main.view_a"
- ]
-}
-Gen4 plan same as above
-
-# create view with subquery in unsharded keyspace with multiple UNION clauses
-"create view view_a as select id from unsharded union select id from unsharded_auto union select id from unsharded_auto where id in (132)"
-{
- "QueryType": "DDL",
- "Original": "create view view_a as select id from unsharded union select id from unsharded_auto union select id from unsharded_auto where id in (132)",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "Query": "create view view_a as select id from unsharded union select id from unsharded_auto union select id from unsharded_auto where id in (132)"
- },
- "TablesUsed": [
- "main.view_a"
- ]
-}
-Gen4 plan same as above
-
-# create view with subquery in unsharded keyspace with UNION clauses in subqueries
-"create view view_a as (select id from unsharded union select id from unsharded_auto) union (select id from unsharded_auto union select name from unsharded)"
-{
- "QueryType": "DDL",
- "Original": "create view view_a as (select id from unsharded union select id from unsharded_auto) union (select id from unsharded_auto union select name from unsharded)",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "Query": "create view view_a as select id from unsharded union select id from unsharded_auto union select id from unsharded_auto union select `name` from unsharded"
- },
- "TablesUsed": [
- "main.view_a"
- ]
-}
-Gen4 plan same as above
-
-# Alter View
-"alter view user.user_extra as select* from user.user"
-{
- "QueryType": "DDL",
- "Original": "alter view user.user_extra as select* from user.user",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "alter view user_extra as select * from `user`"
- },
- "TablesUsed": [
- "user.user_extra"
- ]
-}
-Gen4 plan same as above
-
-# drop table without qualifier
-"drop table unsharded_a"
-{
- "QueryType": "DDL",
- "Original": "drop table unsharded_a",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "Query": "drop table unsharded_a"
- },
- "TablesUsed": [
- "main.unsharded_a"
- ]
-}
-Gen4 plan same as above
-
-# Drop view
-"drop view main.a"
-{
- "QueryType": "DDL",
- "Original": "drop view main.a",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "Query": "drop view a"
- },
- "TablesUsed": [
- "main.a"
- ]
-}
-Gen4 plan same as above
-
-# Truncate table with qualifier
-"truncate user.user_extra"
-{
- "QueryType": "DDL",
- "Original": "truncate user.user_extra",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "truncate table user_extra"
- },
- "TablesUsed": [
- "user.user_extra"
- ]
-}
-Gen4 plan same as above
-
-# Rename table
-"rename table a to main.b"
-{
- "QueryType": "DDL",
- "Original": "rename table a to main.b",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "Query": "rename table a to b"
- },
- "TablesUsed": [
- "main.a",
- "main.b"
- ]
-}
-Gen4 plan same as above
-
-# CREATE temp TABLE
-"create temporary table a(id int)"
-{
- "QueryType": "DDL",
- "Original": "create temporary table a(id int)",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "Query": "create temporary table a (\n\tid int\n)",
- "TempTable": true
- },
- "TablesUsed": [
- "main.a"
- ]
-}
-Gen4 plan same as above
-
-# DROP temp TABLE
-"drop temporary table a"
-{
- "QueryType": "DDL",
- "Original": "drop temporary table a",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "Query": "drop temporary table a",
- "TempTable": true
- },
- "TablesUsed": [
- "main.a"
- ]
-}
-Gen4 plan same as above
-
-# create table with function as a default value
-"create table function_default (x varchar(25) DEFAULT (TRIM(' check ')))"
-{
- "QueryType": "DDL",
- "Original": "create table function_default (x varchar(25) DEFAULT (TRIM(' check ')))",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "Query": "create table function_default (\n\tx varchar(25) default (trim(' check '))\n)"
- },
- "TablesUsed": [
- "main.function_default"
- ]
-}
-Gen4 plan same as above
diff --git a/go/vt/vtgate/planbuilder/testdata/ddl_cases_no_default_keyspace.json b/go/vt/vtgate/planbuilder/testdata/ddl_cases_no_default_keyspace.json
new file mode 100644
index 00000000000..30547db61c4
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/ddl_cases_no_default_keyspace.json
@@ -0,0 +1,789 @@
+[
+ {
+ "comment": "Create View with qualifier",
+ "query": "create view user.a as select* from user",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "create view user.a as select* from user",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "create view a as select * from `user`"
+ },
+ "TablesUsed": [
+ "user.a"
+ ]
+ }
+ },
+ {
+ "comment": "create view with qualifier in select as well",
+ "query": "create view user.a as select* from user.user",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "create view user.a as select* from user.user",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "create view a as select * from `user`"
+ },
+ "TablesUsed": [
+ "user.a"
+ ]
+ }
+ },
+ {
+ "comment": "create view with No column referenced",
+ "query": "create view user.view_a as select 1 from user",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "create view user.view_a as select 1 from user",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "create view view_a as select 1 from `user`"
+ },
+ "TablesUsed": [
+ "user.view_a"
+ ]
+ }
+ },
+ {
+ "comment": "create view with '*' expression for simple route",
+ "query": "create view user.view_a as select user.* from user",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "create view user.view_a as select user.* from user",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "create view view_a as select `user`.* from `user`"
+ },
+ "TablesUsed": [
+ "user.view_a"
+ ]
+ }
+ },
+ {
+ "comment": "create view with unqualified '*' expression for simple route",
+ "query": "create view user.view_a as select * from user",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "create view user.view_a as select * from user",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "create view view_a as select * from `user`"
+ },
+ "TablesUsed": [
+ "user.view_a"
+ ]
+ }
+ },
+ {
+ "comment": "create view with fully qualified '*' expression for simple route",
+ "query": "create view user.view_a as select user.user.* from user.user",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "create view user.view_a as select user.user.* from user.user",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "create view view_a as select `user`.* from `user`"
+ },
+ "TablesUsed": [
+ "user.view_a"
+ ]
+ }
+ },
+ {
+ "comment": "create view with select * from authoritative table",
+ "query": "create view user.view_a as select * from authoritative",
+ "v3-plan": {
+ "QueryType": "DDL",
+ "Original": "create view user.view_a as select * from authoritative",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "create view view_a as select * from authoritative"
+ },
+ "TablesUsed": [
+ "user.view_a"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "DDL",
+ "Original": "create view user.view_a as select * from authoritative",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "create view view_a as select user_id, col1, col2 from authoritative"
+ },
+ "TablesUsed": [
+ "user.view_a"
+ ]
+ }
+ },
+ {
+ "comment": "create view with select * from join of authoritative tables",
+ "query": "create view user.view_a as select * from authoritative a join authoritative b on a.user_id=b.user_id",
+ "v3-plan": {
+ "QueryType": "DDL",
+ "Original": "create view user.view_a as select * from authoritative a join authoritative b on a.user_id=b.user_id",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "create view view_a as select * from authoritative as a join authoritative as b on a.user_id = b.user_id"
+ },
+ "TablesUsed": [
+ "user.view_a"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "DDL",
+ "Original": "create view user.view_a as select * from authoritative a join authoritative b on a.user_id=b.user_id",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "create view view_a as select a.user_id as user_id, a.col1 as col1, a.col2 as col2, b.user_id as user_id, b.col1 as col1, b.col2 as col2 from authoritative as a join authoritative as b on a.user_id = b.user_id"
+ },
+ "TablesUsed": [
+ "user.view_a"
+ ]
+ }
+ },
+ {
+ "comment": "create view with select * from qualified authoritative table",
+ "query": "create view user.view_a as select a.* from authoritative a",
+ "v3-plan": {
+ "QueryType": "DDL",
+ "Original": "create view user.view_a as select a.* from authoritative a",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "create view view_a as select a.* from authoritative as a"
+ },
+ "TablesUsed": [
+ "user.view_a"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "DDL",
+ "Original": "create view user.view_a as select a.* from authoritative a",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "create view view_a as select a.user_id, a.col1, a.col2 from authoritative as a"
+ },
+ "TablesUsed": [
+ "user.view_a"
+ ]
+ }
+ },
+ {
+ "comment": "create view with select * from intermixing of authoritative table with non-authoritative results in no expansion",
+ "query": "create view user.view_a as select * from authoritative join user on authoritative.user_id=user.id",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "create view user.view_a as select * from authoritative join user on authoritative.user_id=user.id",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "create view view_a as select * from authoritative join `user` on authoritative.user_id = `user`.id"
+ },
+ "TablesUsed": [
+ "user.view_a"
+ ]
+ }
+ },
+ {
+ "comment": "create view with select authoritative.* with intermixing still expands",
+ "query": "create view user.view_a as select user.id, a.*, user.col1 from authoritative a join user on a.user_id=user.id",
+ "v3-plan": {
+ "QueryType": "DDL",
+ "Original": "create view user.view_a as select user.id, a.*, user.col1 from authoritative a join user on a.user_id=user.id",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "create view view_a as select `user`.id, a.*, `user`.col1 from authoritative as a join `user` on a.user_id = `user`.id"
+ },
+ "TablesUsed": [
+ "user.view_a"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "DDL",
+ "Original": "create view user.view_a as select user.id, a.*, user.col1 from authoritative a join user on a.user_id=user.id",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "create view view_a as select `user`.id, a.user_id as user_id, a.col1 as col1, a.col2 as col2, `user`.col1 from authoritative as a join `user` on a.user_id = `user`.id"
+ },
+ "TablesUsed": [
+ "user.view_a"
+ ]
+ }
+ },
+ {
+ "comment": "create view with auto-resolve anonymous columns for simple route",
+ "query": "create view user.view_a as select user.col from user join user_extra on user.id = user_extra.user_id",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "create view user.view_a as select user.col from user join user_extra on user.id = user_extra.user_id",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "create view view_a as select `user`.col from `user` join user_extra on `user`.id = user_extra.user_id"
+ },
+ "TablesUsed": [
+ "user.view_a"
+ ]
+ }
+ },
+ {
+ "comment": "create view with join that can be solved in each shard separately",
+ "query": "create view user.view_a as select user.id from user join user_extra on user.id = user_extra.user_id",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "create view user.view_a as select user.id from user join user_extra on user.id = user_extra.user_id",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "create view view_a as select `user`.id from `user` join user_extra on `user`.id = user_extra.user_id"
+ },
+ "TablesUsed": [
+ "user.view_a"
+ ]
+ }
+ },
+ {
+ "comment": "create view with last_insert_id for unsharded route",
+ "query": "create view main.view_a as select last_insert_id() as x from main.unsharded",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "create view main.view_a as select last_insert_id() as x from main.unsharded",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "Query": "create view view_a as select :__lastInsertId as x from unsharded"
+ },
+ "TablesUsed": [
+ "main.view_a"
+ ]
+ }
+ },
+ {
+ "comment": "create view with select from pinned table",
+ "query": "create view user.view_a as select * from pin_test",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "create view user.view_a as select * from pin_test",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "create view view_a as select * from pin_test"
+ },
+ "TablesUsed": [
+ "user.view_a"
+ ]
+ }
+ },
+ {
+ "comment": "create view with Expression with single-route reference",
+ "query": "create view user.view_a as select user.col, user_extra.id + user_extra.col from user join user_extra on user.id = user_extra.user_id",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "create view user.view_a as select user.col, user_extra.id + user_extra.col from user join user_extra on user.id = user_extra.user_id",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "create view view_a as select `user`.col, user_extra.id + user_extra.col from `user` join user_extra on `user`.id = user_extra.user_id"
+ },
+ "TablesUsed": [
+ "user.view_a"
+ ]
+ }
+ },
+ {
+ "comment": "create view with Comments",
+ "query": "create view user.view_a as select /* comment */ user.col from user join user_extra on user.id = user_extra.user_id",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "create view user.view_a as select /* comment */ user.col from user join user_extra on user.id = user_extra.user_id",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "create view view_a as select /* comment */ `user`.col from `user` join user_extra on `user`.id = user_extra.user_id"
+ },
+ "TablesUsed": [
+ "user.view_a"
+ ]
+ }
+ },
+ {
+ "comment": "create view with for update",
+ "query": "create view user.view_a as select user.col from user join user_extra on user.id = user_extra.user_id for update",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "create view user.view_a as select user.col from user join user_extra on user.id = user_extra.user_id for update",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "create view view_a as select `user`.col from `user` join user_extra on `user`.id = user_extra.user_id for update"
+ },
+ "TablesUsed": [
+ "user.view_a"
+ ]
+ }
+ },
+ {
+ "comment": "create view with Case preservation",
+ "query": "create view user.view_a as select user.Col, user_extra.Id from user join user_extra on user.id = user_extra.user_id",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "create view user.view_a as select user.Col, user_extra.Id from user join user_extra on user.id = user_extra.user_id",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "create view view_a as select `user`.Col, user_extra.Id from `user` join user_extra on `user`.id = user_extra.user_id"
+ },
+ "TablesUsed": [
+ "user.view_a"
+ ]
+ }
+ },
+ {
+ "comment": "create view with syntax error",
+ "query": "create view user.view_a as the quick brown fox",
+ "plan": "syntax error at position 31 near 'the'"
+ },
+ {
+ "comment": "create view with Hex number is not treated as a simple value",
+ "query": "create view user.view_a as select * from user where id = 0x04",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "create view user.view_a as select * from user where id = 0x04",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "create view view_a as select * from `user` where id = 0x04"
+ },
+ "TablesUsed": [
+ "user.view_a"
+ ]
+ }
+ },
+ {
+ "comment": "create view with limit works if it can be dropped",
+ "query": "create view user.view_a as select * from user where name ='abc' AND (id = 4) limit 5",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "create view user.view_a as select * from user where name ='abc' AND (id = 4) limit 5",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "create view view_a as select * from `user` where `name` = 'abc' and id = 4 limit 5"
+ },
+ "TablesUsed": [
+ "user.view_a"
+ ]
+ }
+ },
+ {
+ "comment": "create view with Multiple parenthesized expressions",
+ "query": "create view user.view_a as select * from user where (id = 4) AND (name ='abc') limit 5",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "create view user.view_a as select * from user where (id = 4) AND (name ='abc') limit 5",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "create view view_a as select * from `user` where id = 4 and `name` = 'abc' limit 5"
+ },
+ "TablesUsed": [
+ "user.view_a"
+ ]
+ }
+ },
+ {
+ "comment": "create view with Multiple parenthesized expressions",
+ "query": "create view user.view_a as select * from user where (id = 4 and name ='abc') limit 5",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "create view user.view_a as select * from user where (id = 4 and name ='abc') limit 5",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "create view view_a as select * from `user` where id = 4 and `name` = 'abc' limit 5"
+ },
+ "TablesUsed": [
+ "user.view_a"
+ ]
+ }
+ },
+ {
+ "comment": "create view with Column Aliasing with Table.Column",
+ "query": "create view user.view_a as select user0_.col as col0_ from user user0_ where id = 1 order by user0_.col",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "create view user.view_a as select user0_.col as col0_ from user user0_ where id = 1 order by user0_.col",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "create view view_a as select user0_.col as col0_ from `user` as user0_ where id = 1 order by user0_.col asc"
+ },
+ "TablesUsed": [
+ "user.view_a"
+ ]
+ }
+ },
+ {
+ "comment": "create view with Column Aliasing with Column",
+ "query": "create view user.view_a as select user0_.col as col0_ from user user0_ where id = 1 order by col0_ desc",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "create view user.view_a as select user0_.col as col0_ from user user0_ where id = 1 order by col0_ desc",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "create view view_a as select user0_.col as col0_ from `user` as user0_ where id = 1 order by col0_ desc"
+ },
+ "TablesUsed": [
+ "user.view_a"
+ ]
+ }
+ },
+ {
+ "comment": "create view with Booleans and parenthesis",
+ "query": "create view user.view_a as select * from user where (id = 1) AND name = true",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "create view user.view_a as select * from user where (id = 1) AND name = true",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "create view view_a as select * from `user` where id = 1 and `name` = true"
+ },
+ "TablesUsed": [
+ "user.view_a"
+ ]
+ }
+ },
+ {
+ "comment": "create view with union with the same target shard",
+ "query": "create view user.view_a as select * from music where user_id = 1 union select * from user where id = 1",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "create view user.view_a as select * from music where user_id = 1 union select * from user where id = 1",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "create view view_a as select * from music where user_id = 1 union select * from `user` where id = 1"
+ },
+ "TablesUsed": [
+ "user.view_a"
+ ]
+ }
+ },
+ {
+ "comment": "create view with testing SingleRow Projection",
+ "query": "create view user.view_a as select 42 from user",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "create view user.view_a as select 42 from user",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "create view view_a as select 42 from `user`"
+ },
+ "TablesUsed": [
+ "user.view_a"
+ ]
+ }
+ },
+ {
+ "comment": "create view with sql_calc_found_rows without limit",
+ "query": "create view user.view_a as select sql_calc_found_rows * from music where user_id = 1",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "create view user.view_a as select sql_calc_found_rows * from music where user_id = 1",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "create view view_a as select * from music where user_id = 1"
+ },
+ "TablesUsed": [
+ "user.view_a"
+ ]
+ }
+ },
+ {
+ "comment": "DDL",
+ "query": "create index a on user(id)",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "create index a on user(id)",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "alter table `user` add index a (id)"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Alter table with qualifier",
+ "query": "alter table user ADD id int",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "alter table user ADD id int",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "alter table `user` add column id int"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Alter View",
+ "query": "alter view user_extra as select* from user",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "alter view user_extra as select* from user",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "alter view user_extra as select * from `user`"
+ },
+ "TablesUsed": [
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "Alter View with unknown view",
+ "query": "alter view unknown as select* from user",
+ "plan": "VT03007: keyspace not specified"
+ },
+ {
+ "comment": "drop table with qualifier in one",
+ "query": "drop table user.user, user_extra",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "drop table user.user, user_extra",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "drop table `user`, user_extra"
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "drop table with incompatible tables",
+ "query": "drop table user, unsharded_a",
+ "plan": "VT12001: unsupported: Tables or Views specified in the query do not belong to the same destination"
+ },
+ {
+ "comment": "drop table with unknown table",
+ "query": "drop table unknown",
+ "plan": "VT03007: keyspace not specified"
+ },
+ {
+ "comment": "drop view with 1 view without qualifier",
+ "query": "drop view user.user, user_extra",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "drop view user.user, user_extra",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "drop view `user`, user_extra"
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "drop view with incompatible views",
+ "query": "drop view user, unsharded_a",
+ "plan": "VT12001: unsupported: Tables or Views specified in the query do not belong to the same destination"
+ },
+ {
+ "comment": "drop view with unknown view",
+ "query": "drop view unknown",
+ "plan": "VT03007: keyspace not specified"
+ },
+ {
+ "comment": "Truncate table without qualifier",
+ "query": "truncate user_extra",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "truncate user_extra",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "truncate table user_extra"
+ },
+ "TablesUsed": [
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "Rename table",
+ "query": "rename table user_extra to b",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "rename table user_extra to b",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "rename table user_extra to b"
+ },
+ "TablesUsed": [
+ "user.b",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "Rename table with different keyspace tables",
+ "query": "rename table user_extra to b, main.a to b",
+ "plan": "VT12001: unsupported: Tables or Views specified in the query do not belong to the same destination"
+ },
+ {
+ "comment": "Rename table with change in keyspace name",
+ "query": "rename table user_extra to main.b",
+ "plan": "VT03002: changing schema from 'user' to 'main' is not allowed"
+ }
+]
diff --git a/go/vt/vtgate/planbuilder/testdata/ddl_cases_no_default_keyspace.txt b/go/vt/vtgate/planbuilder/testdata/ddl_cases_no_default_keyspace.txt
deleted file mode 100644
index 57f9de4003c..00000000000
--- a/go/vt/vtgate/planbuilder/testdata/ddl_cases_no_default_keyspace.txt
+++ /dev/null
@@ -1,782 +0,0 @@
-# Create View with qualifier
-"create view user.a as select* from user"
-{
- "QueryType": "DDL",
- "Original": "create view user.a as select* from user",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "create view a as select * from `user`"
- },
- "TablesUsed": [
- "user.a"
- ]
-}
-Gen4 plan same as above
-
-# create view with qualifier in select as well
-"create view user.a as select* from user.user"
-{
- "QueryType": "DDL",
- "Original": "create view user.a as select* from user.user",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "create view a as select * from `user`"
- },
- "TablesUsed": [
- "user.a"
- ]
-}
-Gen4 plan same as above
-
-# create view with No column referenced
-"create view user.view_a as select 1 from user"
-{
- "QueryType": "DDL",
- "Original": "create view user.view_a as select 1 from user",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "create view view_a as select 1 from `user`"
- },
- "TablesUsed": [
- "user.view_a"
- ]
-}
-Gen4 plan same as above
-
-# create view with '*' expression for simple route
-"create view user.view_a as select user.* from user"
-{
- "QueryType": "DDL",
- "Original": "create view user.view_a as select user.* from user",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "create view view_a as select `user`.* from `user`"
- },
- "TablesUsed": [
- "user.view_a"
- ]
-}
-Gen4 plan same as above
-
-# create view with unqualified '*' expression for simple route
-"create view user.view_a as select * from user"
-{
- "QueryType": "DDL",
- "Original": "create view user.view_a as select * from user",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "create view view_a as select * from `user`"
- },
- "TablesUsed": [
- "user.view_a"
- ]
-}
-Gen4 plan same as above
-
-# create view with fully qualified '*' expression for simple route
-"create view user.view_a as select user.user.* from user.user"
-{
- "QueryType": "DDL",
- "Original": "create view user.view_a as select user.user.* from user.user",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "create view view_a as select `user`.* from `user`"
- },
- "TablesUsed": [
- "user.view_a"
- ]
-}
-Gen4 plan same as above
-
-# create view with select * from authoritative table
-"create view user.view_a as select * from authoritative"
-{
- "QueryType": "DDL",
- "Original": "create view user.view_a as select * from authoritative",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "create view view_a as select * from authoritative"
- },
- "TablesUsed": [
- "user.view_a"
- ]
-}
-{
- "QueryType": "DDL",
- "Original": "create view user.view_a as select * from authoritative",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "create view view_a as select user_id, col1, col2 from authoritative"
- },
- "TablesUsed": [
- "user.view_a"
- ]
-}
-
-# create view with select * from join of authoritative tables
-"create view user.view_a as select * from authoritative a join authoritative b on a.user_id=b.user_id"
-{
- "QueryType": "DDL",
- "Original": "create view user.view_a as select * from authoritative a join authoritative b on a.user_id=b.user_id",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "create view view_a as select * from authoritative as a join authoritative as b on a.user_id = b.user_id"
- },
- "TablesUsed": [
- "user.view_a"
- ]
-}
-{
- "QueryType": "DDL",
- "Original": "create view user.view_a as select * from authoritative a join authoritative b on a.user_id=b.user_id",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "create view view_a as select a.user_id as user_id, a.col1 as col1, a.col2 as col2, b.user_id as user_id, b.col1 as col1, b.col2 as col2 from authoritative as a join authoritative as b on a.user_id = b.user_id"
- },
- "TablesUsed": [
- "user.view_a"
- ]
-}
-
-# create view with select * from qualified authoritative table
-"create view user.view_a as select a.* from authoritative a"
-{
- "QueryType": "DDL",
- "Original": "create view user.view_a as select a.* from authoritative a",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "create view view_a as select a.* from authoritative as a"
- },
- "TablesUsed": [
- "user.view_a"
- ]
-}
-{
- "QueryType": "DDL",
- "Original": "create view user.view_a as select a.* from authoritative a",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "create view view_a as select a.user_id, a.col1, a.col2 from authoritative as a"
- },
- "TablesUsed": [
- "user.view_a"
- ]
-}
-
-# create view with select * from intermixing of authoritative table with non-authoritative results in no expansion
-"create view user.view_a as select * from authoritative join user on authoritative.user_id=user.id"
-{
- "QueryType": "DDL",
- "Original": "create view user.view_a as select * from authoritative join user on authoritative.user_id=user.id",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "create view view_a as select * from authoritative join `user` on authoritative.user_id = `user`.id"
- },
- "TablesUsed": [
- "user.view_a"
- ]
-}
-Gen4 plan same as above
-
-# create view with select authoritative.* with intermixing still expands
-"create view user.view_a as select user.id, a.*, user.col1 from authoritative a join user on a.user_id=user.id"
-{
- "QueryType": "DDL",
- "Original": "create view user.view_a as select user.id, a.*, user.col1 from authoritative a join user on a.user_id=user.id",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "create view view_a as select `user`.id, a.*, `user`.col1 from authoritative as a join `user` on a.user_id = `user`.id"
- },
- "TablesUsed": [
- "user.view_a"
- ]
-}
-{
- "QueryType": "DDL",
- "Original": "create view user.view_a as select user.id, a.*, user.col1 from authoritative a join user on a.user_id=user.id",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "create view view_a as select `user`.id, a.user_id as user_id, a.col1 as col1, a.col2 as col2, `user`.col1 from authoritative as a join `user` on a.user_id = `user`.id"
- },
- "TablesUsed": [
- "user.view_a"
- ]
-}
-
-# create view with auto-resolve anonymous columns for simple route
-"create view user.view_a as select user.col from user join user_extra on user.id = user_extra.user_id"
-{
- "QueryType": "DDL",
- "Original": "create view user.view_a as select user.col from user join user_extra on user.id = user_extra.user_id",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "create view view_a as select `user`.col from `user` join user_extra on `user`.id = user_extra.user_id"
- },
- "TablesUsed": [
- "user.view_a"
- ]
-}
-Gen4 plan same as above
-
-# create view with join that can be solved in each shard separately
-"create view user.view_a as select user.id from user join user_extra on user.id = user_extra.user_id"
-{
- "QueryType": "DDL",
- "Original": "create view user.view_a as select user.id from user join user_extra on user.id = user_extra.user_id",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "create view view_a as select `user`.id from `user` join user_extra on `user`.id = user_extra.user_id"
- },
- "TablesUsed": [
- "user.view_a"
- ]
-}
-Gen4 plan same as above
-
-# create view with last_insert_id for unsharded route
-"create view main.view_a as select last_insert_id() as x from main.unsharded"
-{
- "QueryType": "DDL",
- "Original": "create view main.view_a as select last_insert_id() as x from main.unsharded",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "Query": "create view view_a as select :__lastInsertId as x from unsharded"
- },
- "TablesUsed": [
- "main.view_a"
- ]
-}
-Gen4 plan same as above
-
-# create view with select from pinned table
-"create view user.view_a as select * from pin_test"
-{
- "QueryType": "DDL",
- "Original": "create view user.view_a as select * from pin_test",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "create view view_a as select * from pin_test"
- },
- "TablesUsed": [
- "user.view_a"
- ]
-}
-Gen4 plan same as above
-
-# create view with Expression with single-route reference
-"create view user.view_a as select user.col, user_extra.id + user_extra.col from user join user_extra on user.id = user_extra.user_id"
-{
- "QueryType": "DDL",
- "Original": "create view user.view_a as select user.col, user_extra.id + user_extra.col from user join user_extra on user.id = user_extra.user_id",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "create view view_a as select `user`.col, user_extra.id + user_extra.col from `user` join user_extra on `user`.id = user_extra.user_id"
- },
- "TablesUsed": [
- "user.view_a"
- ]
-}
-Gen4 plan same as above
-
-# create view with Comments
-"create view user.view_a as select /* comment */ user.col from user join user_extra on user.id = user_extra.user_id"
-{
- "QueryType": "DDL",
- "Original": "create view user.view_a as select /* comment */ user.col from user join user_extra on user.id = user_extra.user_id",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "create view view_a as select /* comment */ `user`.col from `user` join user_extra on `user`.id = user_extra.user_id"
- },
- "TablesUsed": [
- "user.view_a"
- ]
-}
-Gen4 plan same as above
-
-# create view with for update
-"create view user.view_a as select user.col from user join user_extra on user.id = user_extra.user_id for update"
-{
- "QueryType": "DDL",
- "Original": "create view user.view_a as select user.col from user join user_extra on user.id = user_extra.user_id for update",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "create view view_a as select `user`.col from `user` join user_extra on `user`.id = user_extra.user_id for update"
- },
- "TablesUsed": [
- "user.view_a"
- ]
-}
-Gen4 plan same as above
-
-# create view with Case preservation
-"create view user.view_a as select user.Col, user_extra.Id from user join user_extra on user.id = user_extra.user_id"
-{
- "QueryType": "DDL",
- "Original": "create view user.view_a as select user.Col, user_extra.Id from user join user_extra on user.id = user_extra.user_id",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "create view view_a as select `user`.Col, user_extra.Id from `user` join user_extra on `user`.id = user_extra.user_id"
- },
- "TablesUsed": [
- "user.view_a"
- ]
-}
-Gen4 plan same as above
-
-# create view with syntax error
-"create view user.view_a as the quick brown fox"
-"syntax error at position 31 near 'the'"
-Gen4 plan same as above
-
-# create view with Hex number is not treated as a simple value
-"create view user.view_a as select * from user where id = 0x04"
-{
- "QueryType": "DDL",
- "Original": "create view user.view_a as select * from user where id = 0x04",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "create view view_a as select * from `user` where id = 0x04"
- },
- "TablesUsed": [
- "user.view_a"
- ]
-}
-Gen4 plan same as above
-
-# create view with limit works if it can be dropped
-"create view user.view_a as select * from user where name ='abc' AND (id = 4) limit 5"
-{
- "QueryType": "DDL",
- "Original": "create view user.view_a as select * from user where name ='abc' AND (id = 4) limit 5",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "create view view_a as select * from `user` where `name` = 'abc' and id = 4 limit 5"
- },
- "TablesUsed": [
- "user.view_a"
- ]
-}
-Gen4 plan same as above
-
-# create view with Multiple parenthesized expressions
-"create view user.view_a as select * from user where (id = 4) AND (name ='abc') limit 5"
-{
- "QueryType": "DDL",
- "Original": "create view user.view_a as select * from user where (id = 4) AND (name ='abc') limit 5",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "create view view_a as select * from `user` where id = 4 and `name` = 'abc' limit 5"
- },
- "TablesUsed": [
- "user.view_a"
- ]
-}
-Gen4 plan same as above
-
-# create view with Multiple parenthesized expressions
-"create view user.view_a as select * from user where (id = 4 and name ='abc') limit 5"
-{
- "QueryType": "DDL",
- "Original": "create view user.view_a as select * from user where (id = 4 and name ='abc') limit 5",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "create view view_a as select * from `user` where id = 4 and `name` = 'abc' limit 5"
- },
- "TablesUsed": [
- "user.view_a"
- ]
-}
-Gen4 plan same as above
-
-# create view with Column Aliasing with Table.Column
-"create view user.view_a as select user0_.col as col0_ from user user0_ where id = 1 order by user0_.col"
-{
- "QueryType": "DDL",
- "Original": "create view user.view_a as select user0_.col as col0_ from user user0_ where id = 1 order by user0_.col",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "create view view_a as select user0_.col as col0_ from `user` as user0_ where id = 1 order by user0_.col asc"
- },
- "TablesUsed": [
- "user.view_a"
- ]
-}
-Gen4 plan same as above
-
-# create view with Column Aliasing with Column
-"create view user.view_a as select user0_.col as col0_ from user user0_ where id = 1 order by col0_ desc"
-{
- "QueryType": "DDL",
- "Original": "create view user.view_a as select user0_.col as col0_ from user user0_ where id = 1 order by col0_ desc",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "create view view_a as select user0_.col as col0_ from `user` as user0_ where id = 1 order by col0_ desc"
- },
- "TablesUsed": [
- "user.view_a"
- ]
-}
-Gen4 plan same as above
-
-# create view with Booleans and parenthesis
-"create view user.view_a as select * from user where (id = 1) AND name = true"
-{
- "QueryType": "DDL",
- "Original": "create view user.view_a as select * from user where (id = 1) AND name = true",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "create view view_a as select * from `user` where id = 1 and `name` = true"
- },
- "TablesUsed": [
- "user.view_a"
- ]
-}
-Gen4 plan same as above
-
-# create view with union with the same target shard
-"create view user.view_a as select * from music where user_id = 1 union select * from user where id = 1"
-{
- "QueryType": "DDL",
- "Original": "create view user.view_a as select * from music where user_id = 1 union select * from user where id = 1",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "create view view_a as select * from music where user_id = 1 union select * from `user` where id = 1"
- },
- "TablesUsed": [
- "user.view_a"
- ]
-}
-Gen4 plan same as above
-
-# create view with testing SingleRow Projection
-"create view user.view_a as select 42 from user"
-{
- "QueryType": "DDL",
- "Original": "create view user.view_a as select 42 from user",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "create view view_a as select 42 from `user`"
- },
- "TablesUsed": [
- "user.view_a"
- ]
-}
-Gen4 plan same as above
-
-# create view with sql_calc_found_rows without limit
-"create view user.view_a as select sql_calc_found_rows * from music where user_id = 1"
-{
- "QueryType": "DDL",
- "Original": "create view user.view_a as select sql_calc_found_rows * from music where user_id = 1",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "create view view_a as select * from music where user_id = 1"
- },
- "TablesUsed": [
- "user.view_a"
- ]
-}
-Gen4 plan same as above
-
-# DDL
-"create index a on user(id)"
-{
- "QueryType": "DDL",
- "Original": "create index a on user(id)",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "alter table `user` add index a (id)"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-Gen4 plan same as above
-
-#Alter table with qualifier
-"alter table user ADD id int"
-{
- "QueryType": "DDL",
- "Original": "alter table user ADD id int",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "alter table `user` add column id int"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-Gen4 plan same as above
-
-# Alter View
-"alter view user_extra as select* from user"
-{
- "QueryType": "DDL",
- "Original": "alter view user_extra as select* from user",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "alter view user_extra as select * from `user`"
- },
- "TablesUsed": [
- "user.user_extra"
- ]
-}
-Gen4 plan same as above
-
-# Alter View with unknown view
-"alter view unknown as select* from user"
-"keyspace not specified"
-Gen4 plan same as above
-
-# drop table with qualifier in one
-"drop table user.user, user_extra"
-{
- "QueryType": "DDL",
- "Original": "drop table user.user, user_extra",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "drop table `user`, user_extra"
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-Gen4 plan same as above
-
-# drop table with incompatible tables
-"drop table user, unsharded_a"
-"Tables or Views specified in the query do not belong to the same destination"
-Gen4 plan same as above
-
-# drop table with unknown table
-"drop table unknown"
-"keyspace not specified"
-Gen4 plan same as above
-
-# drop view with 1 view without qualifier
-"drop view user.user, user_extra"
-{
- "QueryType": "DDL",
- "Original": "drop view user.user, user_extra",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "drop view `user`, user_extra"
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-Gen4 plan same as above
-
-# drop view with incompatible views
-"drop view user, unsharded_a"
-"Tables or Views specified in the query do not belong to the same destination"
-Gen4 plan same as above
-
-# drop view with unknown view
-"drop view unknown"
-"keyspace not specified"
-Gen4 plan same as above
-
-# Truncate table without qualifier
-"truncate user_extra"
-{
- "QueryType": "DDL",
- "Original": "truncate user_extra",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "truncate table user_extra"
- },
- "TablesUsed": [
- "user.user_extra"
- ]
-}
-Gen4 plan same as above
-
-# Rename table
-"rename table user_extra to b"
-{
- "QueryType": "DDL",
- "Original": "rename table user_extra to b",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "rename table user_extra to b"
- },
- "TablesUsed": [
- "user.b",
- "user.user_extra"
- ]
-}
-Gen4 plan same as above
-
-# Rename table with different keyspace tables
-"rename table user_extra to b, main.a to b"
-"Tables or Views specified in the query do not belong to the same destination"
-Gen4 plan same as above
-
-# Rename table with change in keyspace name
-"rename table user_extra to main.b"
-"Changing schema from 'user' to 'main' is not allowed"
-Gen4 plan same as above
diff --git a/go/vt/vtgate/planbuilder/testdata/dml_cases.json b/go/vt/vtgate/planbuilder/testdata/dml_cases.json
new file mode 100644
index 00000000000..a3cb1120dff
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/dml_cases.json
@@ -0,0 +1,6271 @@
+[
+ {
+ "comment": "update table not found",
+ "query": "update nouser set val = 1",
+ "plan": "table nouser not found"
+ },
+ {
+ "comment": "delete table not found",
+ "query": "delete from nouser",
+ "plan": "table nouser not found"
+ },
+ {
+ "comment": "explicit keyspace reference",
+ "query": "update main.m1 set val = 1",
+ "plan": {
+ "QueryType": "UPDATE",
+ "Original": "update main.m1 set val = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update m1 set val = 1",
+ "Table": "m1"
+ },
+ "TablesUsed": [
+ "main.m1"
+ ]
+ }
+ },
+ {
+ "comment": "update unsharded",
+ "query": "update unsharded set val = 1",
+ "plan": {
+ "QueryType": "UPDATE",
+ "Original": "update unsharded set val = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update unsharded set val = 1",
+ "Table": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "subqueries in unsharded update",
+ "query": "update unsharded set col = (select col from unsharded limit 1)",
+ "plan": {
+ "QueryType": "UPDATE",
+ "Original": "update unsharded set col = (select col from unsharded limit 1)",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update unsharded set col = (select col from unsharded limit 1)",
+ "Table": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "unsharded union in subquery of unsharded update",
+ "query": "update unsharded set col = (select id from unsharded union select id from unsharded)",
+ "plan": {
+ "QueryType": "UPDATE",
+ "Original": "update unsharded set col = (select id from unsharded union select id from unsharded)",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update unsharded set col = (select id from unsharded union select id from unsharded)",
+ "Table": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "unsharded join in subquery of unsharded update",
+ "query": "update unsharded set col = (select id from unsharded a join unsharded b on a.id = b.id)",
+ "plan": {
+ "QueryType": "UPDATE",
+ "Original": "update unsharded set col = (select id from unsharded a join unsharded b on a.id = b.id)",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update unsharded set col = (select id from unsharded as a join unsharded as b on a.id = b.id)",
+ "Table": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "update with join subquery",
+ "query": "update unsharded as foo left join (select id from unsharded where col is not null order by col desc limit 10) as keepers on foo.id = keepers.id set col1 = 'asdf' where keepers.id is null and foo.col is not null and foo.col < 1000",
+ "plan": {
+ "QueryType": "UPDATE",
+ "Original": "update unsharded as foo left join (select id from unsharded where col is not null order by col desc limit 10) as keepers on foo.id = keepers.id set col1 = 'asdf' where keepers.id is null and foo.col is not null and foo.col < 1000",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update unsharded as foo left join (select id from unsharded where col is not null order by col desc limit 10) as keepers on foo.id = keepers.id set col1 = 'asdf' where keepers.id is null and foo.col is not null and foo.col < 1000",
+ "Table": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "routing rules: updated of a routed table",
+ "query": "update route1 set a=1 where id=1",
+ "v3-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update route1 set a=1 where id=1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update `user` as route1 set a = 1 where id = 1",
+ "Table": "user",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update route1 set a=1 where id=1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update `user` as route1 set a = 1 where id = 1",
+ "Table": "user",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "update: routing rules for subquery.",
+ "query": "update unsharded_a set a=(select a from route2)",
+ "v3-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update unsharded_a set a=(select a from route2)",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update unsharded_a set a = (select a from unsharded as route2)",
+ "Table": "unsharded, unsharded_a"
+ },
+ "TablesUsed": [
+ "main.unsharded_a"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update unsharded_a set a=(select a from route2)",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update unsharded_a set a = (select a from unsharded as route2)",
+ "Table": "unsharded, unsharded_a"
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "main.unsharded_a"
+ ]
+ }
+ },
+ {
+ "comment": "delete unsharded",
+ "query": "delete from unsharded",
+ "plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from unsharded",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "delete from unsharded",
+ "Table": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "delete from sequence",
+ "query": "DELETE FROM seq",
+ "plan": {
+ "QueryType": "DELETE",
+ "Original": "DELETE FROM seq",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "delete from seq",
+ "Table": "seq"
+ },
+ "TablesUsed": [
+ "main.seq"
+ ]
+ }
+ },
+ {
+ "comment": "delete from reference table in unsharded keyspace",
+ "query": "DELETE FROM unsharded_ref",
+ "plan": {
+ "QueryType": "DELETE",
+ "Original": "DELETE FROM unsharded_ref",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "delete from unsharded_ref",
+ "Table": "unsharded_ref"
+ },
+ "TablesUsed": [
+ "main.unsharded_ref"
+ ]
+ }
+ },
+ {
+ "comment": "update by primary keyspace id",
+ "query": "update user set val = 1 where id = 1",
+ "v3-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update user set val = 1 where id = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update `user` set val = 1 where id = 1",
+ "Table": "user",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update user set val = 1 where id = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update `user` set val = 1 where id = 1",
+ "Table": "user",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "update by primary keyspace id with alias",
+ "query": "update user as user_alias set val = 1 where user_alias.id = 1",
+ "v3-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update user as user_alias set val = 1 where user_alias.id = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update `user` as user_alias set val = 1 where user_alias.id = 1",
+ "Table": "user",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update user as user_alias set val = 1 where user_alias.id = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update `user` as user_alias set val = 1 where user_alias.id = 1",
+ "Table": "user",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "update by primary keyspace id with parenthesized expression",
+ "query": "update user set val = 1 where (id = 1)",
+ "v3-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update user set val = 1 where (id = 1)",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update `user` set val = 1 where id = 1",
+ "Table": "user",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update user set val = 1 where (id = 1)",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update `user` set val = 1 where id = 1",
+ "Table": "user",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "update by primary keyspace id with multi-part where clause with parens",
+ "query": "update user set val = 1 where (name = 'foo' and id = 1)",
+ "v3-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update user set val = 1 where (name = 'foo' and id = 1)",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update `user` set val = 1 where `name` = 'foo' and id = 1",
+ "Table": "user",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update user set val = 1 where (name = 'foo' and id = 1)",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update `user` set val = 1 where `name` = 'foo' and id = 1",
+ "Table": "user",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "update by primary keyspace id, changing one vindex column",
+ "query": "update user_metadata set email = 'juan@vitess.io' where user_id = 1",
+ "v3-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update user_metadata set email = 'juan@vitess.io' where user_id = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "ChangedVindexValues": [
+ "email_user_map:3"
+ ],
+ "KsidLength": 1,
+ "KsidVindex": "user_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select user_id, email, address, email = 'juan@vitess.io' from user_metadata where user_id = 1 for update",
+ "Query": "update user_metadata set email = 'juan@vitess.io' where user_id = 1",
+ "Table": "user_metadata",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user_metadata"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update user_metadata set email = 'juan@vitess.io' where user_id = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "ChangedVindexValues": [
+ "email_user_map:3"
+ ],
+ "KsidLength": 1,
+ "KsidVindex": "user_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select user_id, email, address, email = 'juan@vitess.io' from user_metadata where user_id = 1 for update",
+ "Query": "update user_metadata set email = 'juan@vitess.io' where user_id = 1",
+ "Table": "user_metadata",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user_metadata"
+ ]
+ }
+ },
+ {
+ "comment": "update by primary keyspace id, changing same vindex twice",
+ "query": "update user_metadata set email = 'a', email = 'b' where user_id = 1",
+ "plan": "VT03015: column has duplicate set values: 'email'"
+ },
+ {
+ "comment": "update by primary keyspace id, changing multiple vindex columns",
+ "query": "update user_metadata set email = 'juan@vitess.io', address = '155 5th street' where user_id = 1",
+ "v3-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update user_metadata set email = 'juan@vitess.io', address = '155 5th street' where user_id = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "ChangedVindexValues": [
+ "address_user_map:4",
+ "email_user_map:3"
+ ],
+ "KsidLength": 1,
+ "KsidVindex": "user_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select user_id, email, address, email = 'juan@vitess.io', address = '155 5th street' from user_metadata where user_id = 1 for update",
+ "Query": "update user_metadata set email = 'juan@vitess.io', address = '155 5th street' where user_id = 1",
+ "Table": "user_metadata",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user_metadata"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update user_metadata set email = 'juan@vitess.io', address = '155 5th street' where user_id = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "ChangedVindexValues": [
+ "address_user_map:4",
+ "email_user_map:3"
+ ],
+ "KsidLength": 1,
+ "KsidVindex": "user_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select user_id, email, address, email = 'juan@vitess.io', address = '155 5th street' from user_metadata where user_id = 1 for update",
+ "Query": "update user_metadata set email = 'juan@vitess.io', address = '155 5th street' where user_id = 1",
+ "Table": "user_metadata",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user_metadata"
+ ]
+ }
+ },
+ {
+ "comment": "update by primary keyspace id, changing one vindex column, using order by and limit",
+ "query": "update user_metadata set email = 'juan@vitess.io' where user_id = 1 order by user_id asc limit 10",
+ "v3-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update user_metadata set email = 'juan@vitess.io' where user_id = 1 order by user_id asc limit 10",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "ChangedVindexValues": [
+ "email_user_map:3"
+ ],
+ "KsidLength": 1,
+ "KsidVindex": "user_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select user_id, email, address, email = 'juan@vitess.io' from user_metadata where user_id = 1 order by user_id asc limit 10 for update",
+ "Query": "update user_metadata set email = 'juan@vitess.io' where user_id = 1 order by user_id asc limit 10",
+ "Table": "user_metadata",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user_metadata"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update user_metadata set email = 'juan@vitess.io' where user_id = 1 order by user_id asc limit 10",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "ChangedVindexValues": [
+ "email_user_map:3"
+ ],
+ "KsidLength": 1,
+ "KsidVindex": "user_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select user_id, email, address, email = 'juan@vitess.io' from user_metadata where user_id = 1 order by user_id asc limit 10 for update",
+ "Query": "update user_metadata set email = 'juan@vitess.io' where user_id = 1 order by user_id asc limit 10",
+ "Table": "user_metadata",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user_metadata"
+ ]
+ }
+ },
+ {
+ "comment": "update changes non owned vindex column",
+ "query": "update music_extra set music_id = 1 where user_id = 1",
+ "v3-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update music_extra set music_id = 1 where user_id = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "ChangedVindexValues": [
+ "music_user_map:1"
+ ],
+ "KsidLength": 1,
+ "KsidVindex": "user_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select user_id, music_id = 1 from music_extra where user_id = 1 for update",
+ "Query": "update music_extra set music_id = 1 where user_id = 1",
+ "Table": "music_extra",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.music_extra"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update music_extra set music_id = 1 where user_id = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "ChangedVindexValues": [
+ "music_user_map:1"
+ ],
+ "KsidLength": 1,
+ "KsidVindex": "user_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select user_id, music_id = 1 from music_extra where user_id = 1 for update",
+ "Query": "update music_extra set music_id = 1 where user_id = 1",
+ "Table": "music_extra",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.music_extra"
+ ]
+ }
+ },
+ {
+ "comment": "update by primary keyspace id, stray where clause",
+ "query": "update user set val = 1 where id = id2 and id = 1",
+ "v3-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update user set val = 1 where id = id2 and id = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update `user` set val = 1 where id = id2 and id = 1",
+ "Table": "user",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update user set val = 1 where id = id2 and id = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update `user` set val = 1 where id = id2 and id = 1",
+ "Table": "user",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "update by primary keyspace id, stray where clause with conversion error",
+ "query": "update user set val = 1 where id = 18446744073709551616 and id = 1",
+ "v3-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update user set val = 1 where id = 18446744073709551616 and id = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update `user` set val = 1 where id = 18446744073709551616 and id = 1",
+ "Table": "user",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update user set val = 1 where id = 18446744073709551616 and id = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update `user` set val = 1 where id = 18446744073709551616 and id = 1",
+ "Table": "user",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "delete from by primary keyspace id",
+ "query": "delete from user where id = 1",
+ "v3-plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from user where id = 1",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "KsidLength": 1,
+ "KsidVindex": "user_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select Id, `Name`, Costly from `user` where id = 1 for update",
+ "Query": "delete from `user` where id = 1",
+ "Table": "user",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from user where id = 1",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "KsidLength": 1,
+ "KsidVindex": "user_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select Id, `Name`, Costly from `user` where id = 1 for update",
+ "Query": "delete from `user` where id = 1",
+ "Table": "user",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "multi-table delete with comma join",
+ "query": "delete a from unsharded_a a, unsharded_b b where a.id = b.id and b.val = 1",
+ "plan": {
+ "QueryType": "DELETE",
+ "Original": "delete a from unsharded_a a, unsharded_b b where a.id = b.id and b.val = 1",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "delete a from unsharded_a as a, unsharded_b as b where a.id = b.id and b.val = 1",
+ "Table": "unsharded_a, unsharded_b"
+ },
+ "TablesUsed": [
+ "main.unsharded_a",
+ "main.unsharded_b"
+ ]
+ }
+ },
+ {
+ "comment": "multi-table delete with ansi join",
+ "query": "delete a from unsharded_a a join unsharded_b b on a.id = b.id where b.val = 1",
+ "plan": {
+ "QueryType": "DELETE",
+ "Original": "delete a from unsharded_a a join unsharded_b b on a.id = b.id where b.val = 1",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "delete a from unsharded_a as a join unsharded_b as b on a.id = b.id where b.val = 1",
+ "Table": "unsharded_a, unsharded_b"
+ },
+ "TablesUsed": [
+ "main.unsharded_a",
+ "main.unsharded_b"
+ ]
+ }
+ },
+ {
+ "comment": "delete with join from subquery",
+ "query": "delete foo from unsharded as foo left join (select id from unsharded where col is not null order by col desc limit 10) as keepers on foo.id = keepers.id where keepers.id is null and foo.col is not null and foo.col < 1000",
+ "plan": {
+ "QueryType": "DELETE",
+ "Original": "delete foo from unsharded as foo left join (select id from unsharded where col is not null order by col desc limit 10) as keepers on foo.id = keepers.id where keepers.id is null and foo.col is not null and foo.col < 1000",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "delete foo from unsharded as foo left join (select id from unsharded where col is not null order by col desc limit 10) as keepers on foo.id = keepers.id where keepers.id is null and foo.col is not null and foo.col < 1000",
+ "Table": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "routing rules: deleted from a routed table",
+ "query": "delete from route1 where id = 1",
+ "v3-plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from route1 where id = 1",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "KsidLength": 1,
+ "KsidVindex": "user_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select Id, `Name`, Costly from `user` as route1 where id = 1 for update",
+ "Query": "delete from `user` as route1 where id = 1",
+ "Table": "user",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from route1 where id = 1",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "KsidLength": 1,
+ "KsidVindex": "user_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select Id, `Name`, Costly from `user` as route1 where id = 1 for update",
+ "Query": "delete from `user` as route1 where id = 1",
+ "Table": "user",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "delete: routing rules for subquery",
+ "query": "delete from unsharded_a where a=(select a from route2)",
+ "v3-plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from unsharded_a where a=(select a from route2)",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "delete from unsharded_a where a = (select a from unsharded as route2)",
+ "Table": "unsharded, unsharded_a"
+ },
+ "TablesUsed": [
+ "main.unsharded_a"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from unsharded_a where a=(select a from route2)",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "delete from unsharded_a where a = (select a from unsharded as route2)",
+ "Table": "unsharded, unsharded_a"
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "main.unsharded_a"
+ ]
+ }
+ },
+ {
+ "comment": "update by lookup",
+ "query": "update music set val = 1 where id = 1",
+ "v3-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update music set val = 1 where id = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update music set val = 1 where id = 1",
+ "Table": "music",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "music_user_map"
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update music set val = 1 where id = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update music set val = 1 where id = 1",
+ "Table": "music",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "music_user_map"
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "update multi-table ansi join",
+ "query": "update unsharded_a a join unsharded_b b on a.id = b.id set a.val = 'foo' where b.val = 1",
+ "plan": {
+ "QueryType": "UPDATE",
+ "Original": "update unsharded_a a join unsharded_b b on a.id = b.id set a.val = 'foo' where b.val = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update unsharded_a as a join unsharded_b as b on a.id = b.id set a.val = 'foo' where b.val = 1",
+ "Table": "unsharded_a, unsharded_b"
+ },
+ "TablesUsed": [
+ "main.unsharded_a",
+ "main.unsharded_b"
+ ]
+ }
+ },
+ {
+ "comment": "update multi-table comma join",
+ "query": "update unsharded_a a, unsharded_b b set a.val = 'foo' where a.id = b.id and b.val = 1",
+ "plan": {
+ "QueryType": "UPDATE",
+ "Original": "update unsharded_a a, unsharded_b b set a.val = 'foo' where a.id = b.id and b.val = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update unsharded_a as a, unsharded_b as b set a.val = 'foo' where a.id = b.id and b.val = 1",
+ "Table": "unsharded_a, unsharded_b"
+ },
+ "TablesUsed": [
+ "main.unsharded_a",
+ "main.unsharded_b"
+ ]
+ }
+ },
+ {
+ "comment": "delete from by lookup",
+ "query": "delete from music where id = 1",
+ "v3-plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from music where id = 1",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "KsidLength": 1,
+ "KsidVindex": "user_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select user_id, id from music where id = 1 for update",
+ "Query": "delete from music where id = 1",
+ "Table": "music",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "music_user_map"
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from music where id = 1",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "KsidLength": 1,
+ "KsidVindex": "user_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select user_id, id from music where id = 1 for update",
+ "Query": "delete from music where id = 1",
+ "Table": "music",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "music_user_map"
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "delete from, no owned vindexes",
+ "query": "delete from music_extra where user_id = 1",
+ "v3-plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from music_extra where user_id = 1",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "delete from music_extra where user_id = 1",
+ "Table": "music_extra",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.music_extra"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from music_extra where user_id = 1",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "delete from music_extra where user_id = 1",
+ "Table": "music_extra",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.music_extra"
+ ]
+ }
+ },
+ {
+ "comment": "simple insert, no values",
+ "query": "insert into unsharded values()",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into unsharded values()",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into unsharded values ()",
+ "TableName": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "simple insert unsharded",
+ "query": "insert into unsharded values(1, 2)",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into unsharded values(1, 2)",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into unsharded values (1, 2)",
+ "TableName": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "simple upsert unsharded",
+ "query": "insert into unsharded values(1, 2) on duplicate key update x = 3",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into unsharded values(1, 2) on duplicate key update x = 3",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into unsharded values (1, 2) on duplicate key update x = 3",
+ "TableName": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "unsharded insert, no col list with auto-inc and authoritative column list",
+ "query": "insert into unsharded_authoritative values(1,1)",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into unsharded_authoritative values(1,1)",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into unsharded_authoritative(col1, col2) values (:__seq0, 1)",
+ "TableName": "unsharded_authoritative"
+ },
+ "TablesUsed": [
+ "main.unsharded_authoritative"
+ ]
+ }
+ },
+ {
+ "comment": "sharded upsert with sharding key set to vindex column",
+ "query": "insert into music(user_id, id) values(1, 2) on duplicate key update user_id = values(user_id)",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into music(user_id, id) values(1, 2) on duplicate key update user_id = values(user_id)",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Sharded",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "InsertIgnore": true,
+ "MultiShardAutocommit": false,
+ "Query": "insert into music(user_id, id) values (:_user_id_0, :_id_0) on duplicate key update user_id = values(user_id)",
+ "TableName": "music",
+ "VindexValues": {
+ "music_user_map": "INT64(2)",
+ "user_index": "INT64(1)"
+ }
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "sharded bulk upsert with sharding key set to vindex column",
+ "query": "insert into music(user_id, id) values (1, 2), (3,4) on duplicate key update user_id = values(user_id)",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into music(user_id, id) values (1, 2), (3,4) on duplicate key update user_id = values(user_id)",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Sharded",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "InsertIgnore": true,
+ "MultiShardAutocommit": false,
+ "Query": "insert into music(user_id, id) values (:_user_id_0, :_id_0), (:_user_id_1, :_id_1) on duplicate key update user_id = values(user_id)",
+ "TableName": "music",
+ "VindexValues": {
+ "music_user_map": "INT64(2), INT64(4)",
+ "user_index": "INT64(1), INT64(3)"
+ }
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "insert unsharded with select",
+ "query": "insert into unsharded select id from unsharded_auto",
+ "v3-plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into unsharded select id from unsharded_auto",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into unsharded select id from unsharded_auto for update",
+ "TableName": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into unsharded select id from unsharded_auto",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into unsharded select id from unsharded_auto for update",
+ "TableName": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "main.unsharded_auto"
+ ]
+ }
+ },
+ {
+ "comment": "insert unsharded with select with join",
+ "query": "insert into unsharded select id from unsharded join unsharded_auto",
+ "v3-plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into unsharded select id from unsharded join unsharded_auto",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into unsharded select id from unsharded join unsharded_auto for update",
+ "TableName": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into unsharded select id from unsharded join unsharded_auto",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into unsharded select id from unsharded join unsharded_auto for update",
+ "TableName": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "main.unsharded_auto"
+ ]
+ }
+ },
+ {
+ "comment": "insert unsharded, invalid value for auto-inc",
+ "query": "insert into unsharded_auto(id, val) values(18446744073709551616, 'aa')",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into unsharded_auto(id, val) values(18446744073709551616, 'aa')",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into unsharded_auto(id, val) values (:__seq0, 'aa')",
+ "TableName": "unsharded_auto"
+ },
+ "TablesUsed": [
+ "main.unsharded_auto"
+ ]
+ }
+ },
+ {
+ "comment": "insert unsharded, column present",
+ "query": "insert into unsharded_auto(id, val) values(1, 'aa')",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into unsharded_auto(id, val) values(1, 'aa')",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into unsharded_auto(id, val) values (:__seq0, 'aa')",
+ "TableName": "unsharded_auto"
+ },
+ "TablesUsed": [
+ "main.unsharded_auto"
+ ]
+ }
+ },
+ {
+ "comment": "insert unsharded, column absent",
+ "query": "insert into unsharded_auto(val) values('aa')",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into unsharded_auto(val) values('aa')",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into unsharded_auto(val, id) values ('aa', :__seq0)",
+ "TableName": "unsharded_auto"
+ },
+ "TablesUsed": [
+ "main.unsharded_auto"
+ ]
+ }
+ },
+ {
+ "comment": "insert unsharded, column absent",
+ "query": "insert into unsharded_auto(val) values(false)",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into unsharded_auto(val) values(false)",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into unsharded_auto(val, id) values (false, :__seq0)",
+ "TableName": "unsharded_auto"
+ },
+ "TablesUsed": [
+ "main.unsharded_auto"
+ ]
+ }
+ },
+ {
+ "comment": "insert unsharded, multi-val",
+ "query": "insert into unsharded_auto(id, val) values(1, 'aa'), (null, 'bb')",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into unsharded_auto(id, val) values(1, 'aa'), (null, 'bb')",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into unsharded_auto(id, val) values (:__seq0, 'aa'), (:__seq1, 'bb')",
+ "TableName": "unsharded_auto"
+ },
+ "TablesUsed": [
+ "main.unsharded_auto"
+ ]
+ }
+ },
+ {
+ "comment": "unsharded insert subquery in insert value",
+ "query": "insert into unsharded values((select 1 from dual), 1)",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into unsharded values((select 1 from dual), 1)",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into unsharded values (1, 1)",
+ "TableName": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "sharded insert subquery in insert value",
+ "query": "insert into user(id, val) values((select 1), 1)",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into user(id, val) values((select 1), 1)",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Sharded",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into `user`(id, val, `Name`, Costly) values (:_Id_0, 1, :_Name_0, :_Costly_0)",
+ "TableName": "user",
+ "VindexValues": {
+ "costly_map": "NULL",
+ "name_user_map": "NULL",
+ "user_index": ":__seq0"
+ }
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "insert into a routed table",
+ "query": "insert into route1(id) values (1)",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into route1(id) values (1)",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Sharded",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into `user`(id, `Name`, Costly) values (:_Id_0, :_Name_0, :_Costly_0)",
+ "TableName": "user",
+ "VindexValues": {
+ "costly_map": "NULL",
+ "name_user_map": "NULL",
+ "user_index": ":__seq0"
+ }
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "insert with mimatched column list",
+ "query": "insert into user(id) values (1, 2)",
+ "plan": "VT13001: [BUG] column list does not match values"
+ },
+ {
+ "comment": "insert no column list for sharded authoritative table",
+ "query": "insert into authoritative values(1, 2, 3)",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into authoritative values(1, 2, 3)",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Sharded",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into authoritative(user_id, col1, col2) values (:_user_id_0, 2, 3)",
+ "TableName": "authoritative",
+ "VindexValues": {
+ "user_index": "INT64(1)"
+ }
+ },
+ "TablesUsed": [
+ "user.authoritative"
+ ]
+ }
+ },
+ {
+ "comment": "insert sharded, no values",
+ "query": "insert into user values()",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into user values()",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Sharded",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into `user`(id, `Name`, Costly) values (:_Id_0, :_Name_0, :_Costly_0)",
+ "TableName": "user",
+ "VindexValues": {
+ "costly_map": "NULL",
+ "name_user_map": "NULL",
+ "user_index": ":__seq0"
+ }
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "insert with one vindex",
+ "query": "insert into user(id) values (1)",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into user(id) values (1)",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Sharded",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into `user`(id, `Name`, Costly) values (:_Id_0, :_Name_0, :_Costly_0)",
+ "TableName": "user",
+ "VindexValues": {
+ "costly_map": "NULL",
+ "name_user_map": "NULL",
+ "user_index": ":__seq0"
+ }
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "insert ignore sharded",
+ "query": "insert ignore into user(id) values (1)",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "insert ignore into user(id) values (1)",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Sharded",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "InsertIgnore": true,
+ "MultiShardAutocommit": false,
+ "Query": "insert ignore into `user`(id, `Name`, Costly) values (:_Id_0, :_Name_0, :_Costly_0)",
+ "TableName": "user",
+ "VindexValues": {
+ "costly_map": "NULL",
+ "name_user_map": "NULL",
+ "user_index": ":__seq0"
+ }
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "insert on duplicate key",
+ "query": "insert into user(id) values(1) on duplicate key update col = 2",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into user(id) values(1) on duplicate key update col = 2",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Sharded",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "InsertIgnore": true,
+ "MultiShardAutocommit": false,
+ "Query": "insert into `user`(id, `Name`, Costly) values (:_Id_0, :_Name_0, :_Costly_0) on duplicate key update col = 2",
+ "TableName": "user",
+ "VindexValues": {
+ "costly_map": "NULL",
+ "name_user_map": "NULL",
+ "user_index": ":__seq0"
+ }
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "insert with one vindex and bind var",
+ "query": "insert into user(id) values (:aa)",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into user(id) values (:aa)",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Sharded",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into `user`(id, `Name`, Costly) values (:_Id_0, :_Name_0, :_Costly_0)",
+ "TableName": "user",
+ "VindexValues": {
+ "costly_map": "NULL",
+ "name_user_map": "NULL",
+ "user_index": ":__seq0"
+ }
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "insert with non vindex",
+ "query": "insert into user(nonid) values (2)",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into user(nonid) values (2)",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Sharded",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into `user`(nonid, id, `Name`, Costly) values (2, :_Id_0, :_Name_0, :_Costly_0)",
+ "TableName": "user",
+ "VindexValues": {
+ "costly_map": "NULL",
+ "name_user_map": "NULL",
+ "user_index": ":__seq0"
+ }
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "insert with default seq",
+ "query": "insert into user(id, nonid) values (default, 2)",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into user(id, nonid) values (default, 2)",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Sharded",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into `user`(id, nonid, `Name`, Costly) values (:_Id_0, 2, :_Name_0, :_Costly_0)",
+ "TableName": "user",
+ "VindexValues": {
+ "costly_map": "NULL",
+ "name_user_map": "NULL",
+ "user_index": ":__seq0"
+ }
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "insert with non vindex bool value",
+ "query": "insert into user(nonid) values (true)",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into user(nonid) values (true)",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Sharded",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into `user`(nonid, id, `Name`, Costly) values (true, :_Id_0, :_Name_0, :_Costly_0)",
+ "TableName": "user",
+ "VindexValues": {
+ "costly_map": "NULL",
+ "name_user_map": "NULL",
+ "user_index": ":__seq0"
+ }
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "insert with all vindexes supplied",
+ "query": "insert into user(nonid, name, id) values (2, 'foo', 1)",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into user(nonid, name, id) values (2, 'foo', 1)",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Sharded",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into `user`(nonid, `name`, id, Costly) values (2, :_Name_0, :_Id_0, :_Costly_0)",
+ "TableName": "user",
+ "VindexValues": {
+ "costly_map": "NULL",
+ "name_user_map": "VARCHAR(\"foo\")",
+ "user_index": ":__seq0"
+ }
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "insert for non-vindex autoinc",
+ "query": "insert into user_extra(nonid) values (2)",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into user_extra(nonid) values (2)",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Sharded",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into user_extra(nonid, extra_id, user_id) values (2, :__seq0, :_user_id_0)",
+ "TableName": "user_extra",
+ "VindexValues": {
+ "user_index": "NULL"
+ }
+ },
+ "TablesUsed": [
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "insert for non-compliant names",
+ "query": "insert into `weird``name`(`a``b*c`, `b*c`) values(1, 2)",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into `weird``name`(`a``b*c`, `b*c`) values(1, 2)",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Sharded",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into `weird``name`(`a``b*c`, `b*c`) values (:_a_b_c_0, 2)",
+ "TableName": "weird`name",
+ "VindexValues": {
+ "user_index": "INT64(1)"
+ }
+ },
+ "TablesUsed": [
+ "user.weird`name"
+ ]
+ }
+ },
+ {
+ "comment": "unsharded insert from union",
+ "query": "insert into unsharded select 1 from dual union select 1 from dual",
+ "v3-plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into unsharded select 1 from dual union select 1 from dual",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into unsharded select 1 from dual union select 1 from dual for update",
+ "TableName": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into unsharded select 1 from dual union select 1 from dual",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into unsharded select 1 from dual union select 1 from dual for update",
+ "TableName": "unsharded"
+ },
+ "TablesUsed": [
+ "main.dual",
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "insert for non-vindex autoinc, invalid value",
+ "query": "insert into user_extra(nonid, extra_id) values (2, 18446744073709551616)",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into user_extra(nonid, extra_id) values (2, 18446744073709551616)",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Sharded",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into user_extra(nonid, extra_id, user_id) values (2, :__seq0, :_user_id_0)",
+ "TableName": "user_extra",
+ "VindexValues": {
+ "user_index": "NULL"
+ }
+ },
+ "TablesUsed": [
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "insert invalid index value",
+ "query": "insert into music_extra(music_id, user_id) values(1, 18446744073709551616)",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into music_extra(music_id, user_id) values(1, 18446744073709551616)",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Sharded",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into music_extra(music_id, user_id) values (:_music_id_0, :_user_id_0)",
+ "TableName": "music_extra",
+ "VindexValues": {
+ "music_user_map": "INT64(1)",
+ "user_index": "DECIMAL(18446744073709551616)"
+ }
+ },
+ "TablesUsed": [
+ "user.music_extra"
+ ]
+ }
+ },
+ {
+ "comment": "insert invalid index value",
+ "query": "insert into music_extra(music_id, user_id) values(1, id)",
+ "plan": "column access not supported here"
+ },
+ {
+ "comment": "insert invalid table",
+ "query": "insert into noexist(music_id, user_id) values(1, 18446744073709551616)",
+ "plan": "table noexist not found"
+ },
+ {
+ "comment": "insert with multiple rows",
+ "query": "insert into user(id) values (1), (2)",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into user(id) values (1), (2)",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Sharded",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into `user`(id, `Name`, Costly) values (:_Id_0, :_Name_0, :_Costly_0), (:_Id_1, :_Name_1, :_Costly_1)",
+ "TableName": "user",
+ "VindexValues": {
+ "costly_map": "NULL, NULL",
+ "name_user_map": "NULL, NULL",
+ "user_index": ":__seq0, :__seq1"
+ }
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "insert with query timeout",
+ "query": "insert /*vt+ QUERY_TIMEOUT_MS=1 */ into user(id) values (1), (2)",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "insert /*vt+ QUERY_TIMEOUT_MS=1 */ into user(id) values (1), (2)",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Sharded",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert /*vt+ QUERY_TIMEOUT_MS=1 */ into `user`(id, `Name`, Costly) values (:_Id_0, :_Name_0, :_Costly_0), (:_Id_1, :_Name_1, :_Costly_1)",
+ "QueryTimeout": 1,
+ "TableName": "user",
+ "VindexValues": {
+ "costly_map": "NULL, NULL",
+ "name_user_map": "NULL, NULL",
+ "user_index": ":__seq0, :__seq1"
+ }
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "insert with multiple rows - multi-shard autocommit",
+ "query": "insert /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ into user(id) values (1), (2)",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "insert /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ into user(id) values (1), (2)",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Sharded",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": true,
+ "Query": "insert /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ into `user`(id, `Name`, Costly) values (:_Id_0, :_Name_0, :_Costly_0), (:_Id_1, :_Name_1, :_Costly_1)",
+ "TableName": "user",
+ "VindexValues": {
+ "costly_map": "NULL, NULL",
+ "name_user_map": "NULL, NULL",
+ "user_index": ":__seq0, :__seq1"
+ }
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "insert into a vindex not allowed",
+ "query": "insert into user_index(id) values(1)",
+ "plan": "VT12001: unsupported: multi-shard or vindex write statement"
+ },
+ {
+ "comment": "simple replace unsharded",
+ "query": "replace into unsharded values(1, 2)",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "replace into unsharded values(1, 2)",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "replace into unsharded values (1, 2)",
+ "TableName": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "replace unsharded with select",
+ "query": "replace into unsharded select id from unsharded_auto",
+ "v3-plan": {
+ "QueryType": "INSERT",
+ "Original": "replace into unsharded select id from unsharded_auto",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "replace into unsharded select id from unsharded_auto for update",
+ "TableName": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "INSERT",
+ "Original": "replace into unsharded select id from unsharded_auto",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "replace into unsharded select id from unsharded_auto for update",
+ "TableName": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "main.unsharded_auto"
+ ]
+ }
+ },
+ {
+ "comment": "replace unsharded, invalid value for auto-inc",
+ "query": "replace into unsharded_auto(id, val) values(18446744073709551616, 'aa')",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "replace into unsharded_auto(id, val) values(18446744073709551616, 'aa')",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "replace into unsharded_auto(id, val) values (:__seq0, 'aa')",
+ "TableName": "unsharded_auto"
+ },
+ "TablesUsed": [
+ "main.unsharded_auto"
+ ]
+ }
+ },
+ {
+ "comment": "replace unsharded, column present",
+ "query": "replace into unsharded_auto(id, val) values(1, 'aa')",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "replace into unsharded_auto(id, val) values(1, 'aa')",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "replace into unsharded_auto(id, val) values (:__seq0, 'aa')",
+ "TableName": "unsharded_auto"
+ },
+ "TablesUsed": [
+ "main.unsharded_auto"
+ ]
+ }
+ },
+ {
+ "comment": "replace unsharded, column absent",
+ "query": "replace into unsharded_auto(val) values('aa')",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "replace into unsharded_auto(val) values('aa')",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "replace into unsharded_auto(val, id) values ('aa', :__seq0)",
+ "TableName": "unsharded_auto"
+ },
+ "TablesUsed": [
+ "main.unsharded_auto"
+ ]
+ }
+ },
+ {
+ "comment": "replace unsharded, multi-val",
+ "query": "replace into unsharded_auto(id, val) values(1, 'aa'), (null, 'bb')",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "replace into unsharded_auto(id, val) values(1, 'aa'), (null, 'bb')",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "replace into unsharded_auto(id, val) values (:__seq0, 'aa'), (:__seq1, 'bb')",
+ "TableName": "unsharded_auto"
+ },
+ "TablesUsed": [
+ "main.unsharded_auto"
+ ]
+ }
+ },
+ {
+ "comment": "replace invalid table",
+ "query": "replace into noexist(music_id, user_id) values(1, 18446744073709551616)",
+ "plan": "table noexist not found"
+ },
+ {
+ "comment": "insert a row in a multi column vindex table",
+ "query": "insert multicolvin (column_a, column_b, column_c, kid) VALUES (1,2,3,4)",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "insert multicolvin (column_a, column_b, column_c, kid) VALUES (1,2,3,4)",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Sharded",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into multicolvin(column_a, column_b, column_c, kid) values (:_column_a_0, :_column_b_0, :_column_c_0, :_kid_0)",
+ "TableName": "multicolvin",
+ "VindexValues": {
+ "cola_map": "INT64(1)",
+ "colb_colc_map": "INT64(2), INT64(3)",
+ "kid_index": "INT64(4)"
+ }
+ },
+ "TablesUsed": [
+ "user.multicolvin"
+ ]
+ }
+ },
+ {
+ "comment": "insert for overlapped vindex columns",
+ "query": "insert overlap_vindex (kid, column_a, column_b) VALUES (1,2,3)",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "insert overlap_vindex (kid, column_a, column_b) VALUES (1,2,3)",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Sharded",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into overlap_vindex(kid, column_a, column_b) values (:_kid_0, :_column_a_0, 3)",
+ "TableName": "overlap_vindex",
+ "VindexValues": {
+ "cola_kid_map": "INT64(2), INT64(1)",
+ "kid_index": "INT64(1)"
+ }
+ },
+ "TablesUsed": [
+ "user.overlap_vindex"
+ ]
+ }
+ },
+ {
+ "comment": "insert multiple rows in a multi column vindex table",
+ "query": "insert multicolvin (column_a, column_b, column_c, kid) VALUES (1,2,3,4), (5,6,7,8)",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "insert multicolvin (column_a, column_b, column_c, kid) VALUES (1,2,3,4), (5,6,7,8)",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Sharded",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into multicolvin(column_a, column_b, column_c, kid) values (:_column_a_0, :_column_b_0, :_column_c_0, :_kid_0), (:_column_a_1, :_column_b_1, :_column_c_1, :_kid_1)",
+ "TableName": "multicolvin",
+ "VindexValues": {
+ "cola_map": "INT64(1), INT64(5)",
+ "colb_colc_map": "INT64(2), INT64(6), INT64(3), INT64(7)",
+ "kid_index": "INT64(4), INT64(8)"
+ }
+ },
+ "TablesUsed": [
+ "user.multicolvin"
+ ]
+ }
+ },
+ {
+ "comment": "delete row in a multi column vindex table",
+ "query": "delete from multicolvin where kid=1",
+ "v3-plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from multicolvin where kid=1",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "KsidLength": 1,
+ "KsidVindex": "kid_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select kid, column_a, column_b, column_c from multicolvin where kid = 1 for update",
+ "Query": "delete from multicolvin where kid = 1",
+ "Table": "multicolvin",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "kid_index"
+ },
+ "TablesUsed": [
+ "user.multicolvin"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from multicolvin where kid=1",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "KsidLength": 1,
+ "KsidVindex": "kid_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select kid, column_a, column_b, column_c from multicolvin where kid = 1 for update",
+ "Query": "delete from multicolvin where kid = 1",
+ "Table": "multicolvin",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "kid_index"
+ },
+ "TablesUsed": [
+ "user.multicolvin"
+ ]
+ }
+ },
+ {
+ "comment": "update columns of multi column vindex",
+ "query": "update multicolvin set column_b = 1, column_c = 2 where kid = 1",
+ "v3-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update multicolvin set column_b = 1, column_c = 2 where kid = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "ChangedVindexValues": [
+ "colb_colc_map:4"
+ ],
+ "KsidLength": 1,
+ "KsidVindex": "kid_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select kid, column_a, column_b, column_c, column_b = 1 and column_c = 2 from multicolvin where kid = 1 for update",
+ "Query": "update multicolvin set column_b = 1, column_c = 2 where kid = 1",
+ "Table": "multicolvin",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "kid_index"
+ },
+ "TablesUsed": [
+ "user.multicolvin"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update multicolvin set column_b = 1, column_c = 2 where kid = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "ChangedVindexValues": [
+ "colb_colc_map:4"
+ ],
+ "KsidLength": 1,
+ "KsidVindex": "kid_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select kid, column_a, column_b, column_c, column_b = 1 and column_c = 2 from multicolvin where kid = 1 for update",
+ "Query": "update multicolvin set column_b = 1, column_c = 2 where kid = 1",
+ "Table": "multicolvin",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "kid_index"
+ },
+ "TablesUsed": [
+ "user.multicolvin"
+ ]
+ }
+ },
+ {
+ "comment": "update multiple vindexes, with multi column vindex",
+ "query": "update multicolvin set column_a = 0, column_b = 1, column_c = 2 where kid = 1",
+ "v3-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update multicolvin set column_a = 0, column_b = 1, column_c = 2 where kid = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "ChangedVindexValues": [
+ "cola_map:4",
+ "colb_colc_map:5"
+ ],
+ "KsidLength": 1,
+ "KsidVindex": "kid_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select kid, column_a, column_b, column_c, column_a = 0, column_b = 1 and column_c = 2 from multicolvin where kid = 1 for update",
+ "Query": "update multicolvin set column_a = 0, column_b = 1, column_c = 2 where kid = 1",
+ "Table": "multicolvin",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "kid_index"
+ },
+ "TablesUsed": [
+ "user.multicolvin"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update multicolvin set column_a = 0, column_b = 1, column_c = 2 where kid = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "ChangedVindexValues": [
+ "cola_map:4",
+ "colb_colc_map:5"
+ ],
+ "KsidLength": 1,
+ "KsidVindex": "kid_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select kid, column_a, column_b, column_c, column_a = 0, column_b = 1 and column_c = 2 from multicolvin where kid = 1 for update",
+ "Query": "update multicolvin set column_a = 0, column_b = 1, column_c = 2 where kid = 1",
+ "Table": "multicolvin",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "kid_index"
+ },
+ "TablesUsed": [
+ "user.multicolvin"
+ ]
+ }
+ },
+ {
+ "comment": "update with no primary vindex on where clause (scatter update)",
+ "query": "update user_extra set val = 1",
+ "plan": {
+ "QueryType": "UPDATE",
+ "Original": "update user_extra set val = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update user_extra set val = 1",
+ "Table": "user_extra"
+ },
+ "TablesUsed": [
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "update with target destination",
+ "query": "update `user[-]`.user_extra set val = 1",
+ "plan": {
+ "QueryType": "UPDATE",
+ "Original": "update `user[-]`.user_extra set val = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update user_extra set val = 1",
+ "Table": "user_extra"
+ },
+ "TablesUsed": [
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "update with no primary vindex on where clause (scatter update) - multi shard autocommit",
+ "query": "update /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ user_extra set val = 1",
+ "plan": {
+ "QueryType": "UPDATE",
+ "Original": "update /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ user_extra set val = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": true,
+ "Query": "update /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ user_extra set val = 1",
+ "Table": "user_extra"
+ },
+ "TablesUsed": [
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "update with no primary vindex on where clause (scatter update) - query timeout",
+ "query": "update /*vt+ QUERY_TIMEOUT_MS=1 */ user_extra set val = 1",
+ "plan": {
+ "QueryType": "UPDATE",
+ "Original": "update /*vt+ QUERY_TIMEOUT_MS=1 */ user_extra set val = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update /*vt+ QUERY_TIMEOUT_MS=1 */ user_extra set val = 1",
+ "QueryTimeout": 1,
+ "Table": "user_extra"
+ },
+ "TablesUsed": [
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "update with non-comparison expr",
+ "query": "update user_extra set val = 1 where id between 1 and 2",
+ "plan": {
+ "QueryType": "UPDATE",
+ "Original": "update user_extra set val = 1 where id between 1 and 2",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update user_extra set val = 1 where id between 1 and 2",
+ "Table": "user_extra"
+ },
+ "TablesUsed": [
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "update with primary id through IN clause",
+ "query": "update user_extra set val = 1 where user_id in (1, 2)",
+ "plan": {
+ "QueryType": "UPDATE",
+ "Original": "update user_extra set val = 1 where user_id in (1, 2)",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update user_extra set val = 1 where user_id in (1, 2)",
+ "Table": "user_extra",
+ "Values": [
+ "(INT64(1), INT64(2))"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "update with non-unique key",
+ "query": "update user_extra set val = 1 where name = 'foo'",
+ "plan": {
+ "QueryType": "UPDATE",
+ "Original": "update user_extra set val = 1 where name = 'foo'",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update user_extra set val = 1 where `name` = 'foo'",
+ "Table": "user_extra"
+ },
+ "TablesUsed": [
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "update by lookup with IN clause",
+ "query": "update user_extra set val = 1 where id in (1, 2)",
+ "plan": {
+ "QueryType": "UPDATE",
+ "Original": "update user_extra set val = 1 where id in (1, 2)",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update user_extra set val = 1 where id in (1, 2)",
+ "Table": "user_extra"
+ },
+ "TablesUsed": [
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "update with where clause with parens",
+ "query": "update user_extra set val = 1 where (name = 'foo' or id = 1)",
+ "plan": {
+ "QueryType": "UPDATE",
+ "Original": "update user_extra set val = 1 where (name = 'foo' or id = 1)",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update user_extra set val = 1 where `name` = 'foo' or id = 1",
+ "Table": "user_extra"
+ },
+ "TablesUsed": [
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "delete from with no where clause",
+ "query": "delete from user_extra",
+ "plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from user_extra",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "delete from user_extra",
+ "Table": "user_extra"
+ },
+ "TablesUsed": [
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "delete with target destination",
+ "query": "delete from `user[-]`.user_extra",
+ "plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from `user[-]`.user_extra",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "delete from user_extra",
+ "Table": "user_extra"
+ },
+ "TablesUsed": [
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "delete with non-comparison expr",
+ "query": "delete from user_extra where user_id between 1 and 2",
+ "plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from user_extra where user_id between 1 and 2",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "delete from user_extra where user_id between 1 and 2",
+ "Table": "user_extra"
+ },
+ "TablesUsed": [
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "delete from with no index match",
+ "query": "delete from user_extra where name = 'jose'",
+ "plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from user_extra where name = 'jose'",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "delete from user_extra where `name` = 'jose'",
+ "Table": "user_extra"
+ },
+ "TablesUsed": [
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "delete from with no index match - multi shard autocommit",
+ "query": "delete /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ from user_extra where name = 'jose'",
+ "plan": {
+ "QueryType": "DELETE",
+ "Original": "delete /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ from user_extra where name = 'jose'",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": true,
+ "Query": "delete /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ from user_extra where `name` = 'jose'",
+ "Table": "user_extra"
+ },
+ "TablesUsed": [
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "delete from with no index match - query timeout",
+ "query": "delete /*vt+ QUERY_TIMEOUT_MS=1 */ from user_extra where name = 'jose'",
+ "plan": {
+ "QueryType": "DELETE",
+ "Original": "delete /*vt+ QUERY_TIMEOUT_MS=1 */ from user_extra where name = 'jose'",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "delete /*vt+ QUERY_TIMEOUT_MS=1 */ from user_extra where `name` = 'jose'",
+ "QueryTimeout": 1,
+ "Table": "user_extra"
+ },
+ "TablesUsed": [
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "delete from with primary id in through IN clause",
+ "query": "delete from user_extra where user_id in (1, 2)",
+ "plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from user_extra where user_id in (1, 2)",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "delete from user_extra where user_id in (1, 2)",
+ "Table": "user_extra",
+ "Values": [
+ "(INT64(1), INT64(2))"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "unsharded update where inner query references outer query",
+ "query": "update unsharded set col = (select id from unsharded_a where id = unsharded.col) where col = (select id from unsharded_b)",
+ "v3-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update unsharded set col = (select id from unsharded_a where id = unsharded.col) where col = (select id from unsharded_b)",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update unsharded set col = (select id from unsharded_a where id = unsharded.col) where col = (select id from unsharded_b)",
+ "Table": "unsharded, unsharded_a, unsharded_b"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update unsharded set col = (select id from unsharded_a where id = unsharded.col) where col = (select id from unsharded_b)",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update unsharded set col = (select id from unsharded_a where id = unsharded.col) where col = (select id from unsharded_b)",
+ "Table": "unsharded, unsharded_a, unsharded_b"
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "main.unsharded_a",
+ "main.unsharded_b"
+ ]
+ }
+ },
+ {
+ "comment": "unsharded delete where inner query references outer query",
+ "query": "delete from unsharded where col = (select id from unsharded_a where id = unsharded.col)",
+ "v3-plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from unsharded where col = (select id from unsharded_a where id = unsharded.col)",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "delete from unsharded where col = (select id from unsharded_a where id = unsharded.col)",
+ "Table": "unsharded, unsharded_a"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from unsharded where col = (select id from unsharded_a where id = unsharded.col)",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "delete from unsharded where col = (select id from unsharded_a where id = unsharded.col)",
+ "Table": "unsharded, unsharded_a"
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "main.unsharded_a"
+ ]
+ }
+ },
+ {
+ "comment": "update vindex value to null",
+ "query": "update user set name = null where id = 1",
+ "v3-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update user set name = null where id = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "ChangedVindexValues": [
+ "name_user_map:3"
+ ],
+ "KsidLength": 1,
+ "KsidVindex": "user_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select Id, `Name`, Costly, `name` = null from `user` where id = 1 for update",
+ "Query": "update `user` set `name` = null where id = 1",
+ "Table": "user",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update user set name = null where id = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "ChangedVindexValues": [
+ "name_user_map:3"
+ ],
+ "KsidLength": 1,
+ "KsidVindex": "user_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select Id, `Name`, Costly, `name` = null from `user` where id = 1 for update",
+ "Query": "update `user` set `name` = null where id = 1",
+ "Table": "user",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "insert using last_insert_id",
+ "query": "insert into unsharded values(last_insert_id(), 2)",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into unsharded values(last_insert_id(), 2)",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into unsharded values (:__lastInsertId, 2)",
+ "TableName": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "update vindex value to null with multiple primary keyspace id",
+ "query": "update user set name = null where id in (1, 2, 3)",
+ "plan": {
+ "QueryType": "UPDATE",
+ "Original": "update user set name = null where id in (1, 2, 3)",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "ChangedVindexValues": [
+ "name_user_map:3"
+ ],
+ "KsidLength": 1,
+ "KsidVindex": "user_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select Id, `Name`, Costly, `name` = null from `user` where id in (1, 2, 3) for update",
+ "Query": "update `user` set `name` = null where id in (1, 2, 3)",
+ "Table": "user",
+ "Values": [
+ "(INT64(1), INT64(2), INT64(3))"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "update vindex value to null without a where clause",
+ "query": "update user set name = null",
+ "plan": {
+ "QueryType": "UPDATE",
+ "Original": "update user set name = null",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "ChangedVindexValues": [
+ "name_user_map:3"
+ ],
+ "KsidLength": 1,
+ "KsidVindex": "user_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select Id, `Name`, Costly, `name` = null from `user` for update",
+ "Query": "update `user` set `name` = null",
+ "Table": "user"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "update vindex value to null with complex where clause",
+ "query": "update user set name = null where id + 1 = 2",
+ "plan": {
+ "QueryType": "UPDATE",
+ "Original": "update user set name = null where id + 1 = 2",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "ChangedVindexValues": [
+ "name_user_map:3"
+ ],
+ "KsidLength": 1,
+ "KsidVindex": "user_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select Id, `Name`, Costly, `name` = null from `user` where id + 1 = 2 for update",
+ "Query": "update `user` set `name` = null where id + 1 = 2",
+ "Table": "user"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "delete from user by primary keyspace id with in clause",
+ "query": "delete from user where id in (1, 2, 3)",
+ "plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from user where id in (1, 2, 3)",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "KsidLength": 1,
+ "KsidVindex": "user_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select Id, `Name`, Costly from `user` where id in (1, 2, 3) for update",
+ "Query": "delete from `user` where id in (1, 2, 3)",
+ "Table": "user",
+ "Values": [
+ "(INT64(1), INT64(2), INT64(3))"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "delete from user by complex expression",
+ "query": "delete from user where id + 1 = 2",
+ "plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from user where id + 1 = 2",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "KsidLength": 1,
+ "KsidVindex": "user_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select Id, `Name`, Costly from `user` where id + 1 = 2 for update",
+ "Query": "delete from `user` where id + 1 = 2",
+ "Table": "user"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "delete from user without a where clause",
+ "query": "delete from user",
+ "plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from user",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "KsidLength": 1,
+ "KsidVindex": "user_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select Id, `Name`, Costly from `user` for update",
+ "Query": "delete from `user`",
+ "Table": "user"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "delete with single table targets",
+ "query": "delete music from music where id = 1",
+ "v3-plan": {
+ "QueryType": "DELETE",
+ "Original": "delete music from music where id = 1",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "KsidLength": 1,
+ "KsidVindex": "user_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select user_id, id from music where id = 1 for update",
+ "Query": "delete from music where id = 1",
+ "Table": "music",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "music_user_map"
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "DELETE",
+ "Original": "delete music from music where id = 1",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "KsidLength": 1,
+ "KsidVindex": "user_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select user_id, id from music where id = 1 for update",
+ "Query": "delete from music where id = 1",
+ "Table": "music",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "music_user_map"
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "scatter update table with owned vindexes without changing lookup vindex",
+ "query": "update user set val = 1",
+ "plan": {
+ "QueryType": "UPDATE",
+ "Original": "update user set val = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update `user` set val = 1",
+ "Table": "user"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "scatter delete with owned lookup vindex",
+ "query": "delete from user",
+ "plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from user",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "KsidLength": 1,
+ "KsidVindex": "user_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select Id, `Name`, Costly from `user` for update",
+ "Query": "delete from `user`",
+ "Table": "user"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "update multi column vindex, without values for all the vindex columns",
+ "query": "update multicolvin set column_c = 2 where kid = 1",
+ "v3-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update multicolvin set column_c = 2 where kid = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "ChangedVindexValues": [
+ "colb_colc_map:4"
+ ],
+ "KsidLength": 1,
+ "KsidVindex": "kid_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select kid, column_a, column_b, column_c, column_c = 2 from multicolvin where kid = 1 for update",
+ "Query": "update multicolvin set column_c = 2 where kid = 1",
+ "Table": "multicolvin",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "kid_index"
+ },
+ "TablesUsed": [
+ "user.multicolvin"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update multicolvin set column_c = 2 where kid = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "ChangedVindexValues": [
+ "colb_colc_map:4"
+ ],
+ "KsidLength": 1,
+ "KsidVindex": "kid_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select kid, column_a, column_b, column_c, column_c = 2 from multicolvin where kid = 1 for update",
+ "Query": "update multicolvin set column_c = 2 where kid = 1",
+ "Table": "multicolvin",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "kid_index"
+ },
+ "TablesUsed": [
+ "user.multicolvin"
+ ]
+ }
+ },
+ {
+ "comment": "update with binary value",
+ "query": "update user set name = _binary 'abc' where id = 1",
+ "v3-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update user set name = _binary 'abc' where id = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "ChangedVindexValues": [
+ "name_user_map:3"
+ ],
+ "KsidLength": 1,
+ "KsidVindex": "user_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select Id, `Name`, Costly, `name` = _binary 'abc' from `user` where id = 1 for update",
+ "Query": "update `user` set `name` = _binary 'abc' where id = 1",
+ "Table": "user",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update user set name = _binary 'abc' where id = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "ChangedVindexValues": [
+ "name_user_map:3"
+ ],
+ "KsidLength": 1,
+ "KsidVindex": "user_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select Id, `Name`, Costly, `name` = _binary 'abc' from `user` where id = 1 for update",
+ "Query": "update `user` set `name` = _binary 'abc' where id = 1",
+ "Table": "user",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "delete with binary value",
+ "query": "delete from user where name = _binary 'abc'",
+ "v3-plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from user where name = _binary 'abc'",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "KsidLength": 1,
+ "KsidVindex": "user_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select Id, `Name`, Costly from `user` where `name` = _binary 'abc' for update",
+ "Query": "delete from `user` where `name` = _binary 'abc'",
+ "Table": "user"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from user where name = _binary 'abc'",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "KsidLength": 1,
+ "KsidVindex": "user_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select Id, `Name`, Costly from `user` where `name` = _binary 'abc' for update",
+ "Query": "delete from `user` where `name` = _binary 'abc'",
+ "Table": "user",
+ "Values": [
+ "VARBINARY(\"abc\")"
+ ],
+ "Vindex": "name_user_map"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "delete with shard targeting",
+ "query": "delete from `user[-]`.user",
+ "plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from `user[-]`.user",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "KsidLength": 1,
+ "KsidVindex": "user_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select Id, `Name`, Costly from `user` for update",
+ "Query": "delete from `user`",
+ "Table": "user"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "update with shard targeting",
+ "query": "update `user[-]`.user set name = 'myname'",
+ "plan": {
+ "QueryType": "UPDATE",
+ "Original": "update `user[-]`.user set name = 'myname'",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "ChangedVindexValues": [
+ "name_user_map:3"
+ ],
+ "KsidLength": 1,
+ "KsidVindex": "user_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select Id, `Name`, Costly, `name` = 'myname' from `user` for update",
+ "Query": "update `user` set `name` = 'myname'",
+ "Table": "user"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "update with shard targeting without vindex",
+ "query": "update `user[-]`.user_extra set val = 1",
+ "plan": {
+ "QueryType": "UPDATE",
+ "Original": "update `user[-]`.user_extra set val = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update user_extra set val = 1",
+ "Table": "user_extra"
+ },
+ "TablesUsed": [
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "multi-table delete with single table",
+ "query": "delete u.* from user u where u.id * u.col = u.foo",
+ "plan": {
+ "QueryType": "DELETE",
+ "Original": "delete u.* from user u where u.id * u.col = u.foo",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "KsidLength": 1,
+ "KsidVindex": "user_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select Id, `Name`, Costly from `user` where `user`.id * `user`.col = `user`.foo for update",
+ "Query": "delete from `user` where `user`.id * `user`.col = `user`.foo",
+ "Table": "user"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "delete with unknown reference",
+ "query": "delete music from user where id = 1",
+ "plan": "VT03003: unknown table 'music' in MULTI DELETE"
+ },
+ {
+ "comment": "delete with derived tables",
+ "query": "delete music from (select * from user) music where id = 1",
+ "plan": "VT03004: the target table music of the DELETE is not updatable"
+ },
+ {
+ "comment": "delete with derived tables with unknown table",
+ "query": "delete user from (select * from user) music where id = 1",
+ "plan": "VT03003: unknown table 'user' in MULTI DELETE"
+ },
+ {
+ "comment": "INSERT INTO main.user_privacy_consents (user_id, accepted_at) SELECT user_id, accepted_at FROM (SELECT 1 as user_id, 1629194864 as accepted_at) AS tmp WHERE NOT EXISTS (SELECT user_id FROM main.user_privacy_consents WHERE user_id = 1)",
+ "query": "INSERT INTO main.user_privacy_consents (user_id, accepted_at) SELECT user_id, accepted_at FROM (SELECT 1 as user_id, 1629194864 as accepted_at) AS tmp WHERE NOT EXISTS (SELECT user_id FROM main.user_privacy_consents WHERE user_id = 1)",
+ "v3-plan": {
+ "QueryType": "INSERT",
+ "Original": "INSERT INTO main.user_privacy_consents (user_id, accepted_at) SELECT user_id, accepted_at FROM (SELECT 1 as user_id, 1629194864 as accepted_at) AS tmp WHERE NOT EXISTS (SELECT user_id FROM main.user_privacy_consents WHERE user_id = 1)",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into user_privacy_consents(user_id, accepted_at) select user_id, accepted_at from (select 1 as user_id, 1629194864 as accepted_at from dual) as tmp where not exists (select 1 from user_privacy_consents where user_id = 1 limit 1) for update",
+ "TableName": "user_privacy_consents"
+ },
+ "TablesUsed": [
+ "main.user_privacy_consents"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "INSERT",
+ "Original": "INSERT INTO main.user_privacy_consents (user_id, accepted_at) SELECT user_id, accepted_at FROM (SELECT 1 as user_id, 1629194864 as accepted_at) AS tmp WHERE NOT EXISTS (SELECT user_id FROM main.user_privacy_consents WHERE user_id = 1)",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into user_privacy_consents(user_id, accepted_at) select user_id, accepted_at from (select 1 as user_id, 1629194864 as accepted_at from dual) as tmp where not exists (select 1 from user_privacy_consents where user_id = 1 limit 1) for update",
+ "TableName": "user_privacy_consents"
+ },
+ "TablesUsed": [
+ "main.dual",
+ "main.user_privacy_consents"
+ ]
+ }
+ },
+ {
+ "comment": "Delete on backfilling unique lookup vindex should be a scatter",
+ "query": "delete from zlookup_unique.t1 where c2 = 20",
+ "plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from zlookup_unique.t1 where c2 = 20",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "zlookup_unique",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "KsidLength": 1,
+ "KsidVindex": "xxhash",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select c1, c2, c3 from t1 where c2 = 20 for update",
+ "Query": "delete from t1 where c2 = 20",
+ "Table": "t1"
+ },
+ "TablesUsed": [
+ "zlookup_unique.t1"
+ ]
+ }
+ },
+ {
+ "comment": "Update on backfilling unique lookup vindex should be a scatter",
+ "query": "update zlookup_unique.t1 set c2 = 1 where c2 = 20",
+ "plan": {
+ "QueryType": "UPDATE",
+ "Original": "update zlookup_unique.t1 set c2 = 1 where c2 = 20",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "zlookup_unique",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "ChangedVindexValues": [
+ "lookup_t1:3"
+ ],
+ "KsidLength": 1,
+ "KsidVindex": "xxhash",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select c1, c2, c3, c2 = 1 from t1 where c2 = 20 for update",
+ "Query": "update t1 set c2 = 1 where c2 = 20",
+ "Table": "t1"
+ },
+ "TablesUsed": [
+ "zlookup_unique.t1"
+ ]
+ }
+ },
+ {
+ "comment": "Delete on backfilling and non-backfilling unique lookup vindexes should be a delete equal",
+ "query": "delete from zlookup_unique.t1 where c2 = 10 and c3 = 20",
+ "v3-plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from zlookup_unique.t1 where c2 = 10 and c3 = 20",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "zlookup_unique",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "KsidLength": 1,
+ "KsidVindex": "xxhash",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select c1, c2, c3 from t1 where c2 = 10 and c3 = 20 for update",
+ "Query": "delete from t1 where c2 = 10 and c3 = 20",
+ "Table": "t1",
+ "Values": [
+ "INT64(20)"
+ ],
+ "Vindex": "lookup_t1_2"
+ },
+ "TablesUsed": [
+ "zlookup_unique.t1"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from zlookup_unique.t1 where c2 = 10 and c3 = 20",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "zlookup_unique",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "KsidLength": 1,
+ "KsidVindex": "xxhash",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select c1, c2, c3 from t1 where c2 = 10 and c3 = 20 for update",
+ "Query": "delete from t1 where c2 = 10 and c3 = 20",
+ "Table": "t1",
+ "Values": [
+ "INT64(20)"
+ ],
+ "Vindex": "lookup_t1_2"
+ },
+ "TablesUsed": [
+ "zlookup_unique.t1"
+ ]
+ }
+ },
+ {
+ "comment": "Update on backfilling and non-backfilling unique lookup vindexes should be an equal",
+ "query": "update zlookup_unique.t1 set c2 = 1 where c2 = 10 and c3 = 20",
+ "v3-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update zlookup_unique.t1 set c2 = 1 where c2 = 10 and c3 = 20",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "zlookup_unique",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "ChangedVindexValues": [
+ "lookup_t1:3"
+ ],
+ "KsidLength": 1,
+ "KsidVindex": "xxhash",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select c1, c2, c3, c2 = 1 from t1 where c2 = 10 and c3 = 20 for update",
+ "Query": "update t1 set c2 = 1 where c2 = 10 and c3 = 20",
+ "Table": "t1",
+ "Values": [
+ "INT64(20)"
+ ],
+ "Vindex": "lookup_t1_2"
+ },
+ "TablesUsed": [
+ "zlookup_unique.t1"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update zlookup_unique.t1 set c2 = 1 where c2 = 10 and c3 = 20",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "zlookup_unique",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "ChangedVindexValues": [
+ "lookup_t1:3"
+ ],
+ "KsidLength": 1,
+ "KsidVindex": "xxhash",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select c1, c2, c3, c2 = 1 from t1 where c2 = 10 and c3 = 20 for update",
+ "Query": "update t1 set c2 = 1 where c2 = 10 and c3 = 20",
+ "Table": "t1",
+ "Values": [
+ "INT64(20)"
+ ],
+ "Vindex": "lookup_t1_2"
+ },
+ "TablesUsed": [
+ "zlookup_unique.t1"
+ ]
+ }
+ },
+ {
+ "comment": "Delete EQUAL and IN on backfilling and non-backfilling unique lookup vindexes should be a delete IN",
+ "query": "delete from zlookup_unique.t1 where c2 = 10 and c3 in (20, 21)",
+ "plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from zlookup_unique.t1 where c2 = 10 and c3 in (20, 21)",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "zlookup_unique",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "KsidLength": 1,
+ "KsidVindex": "xxhash",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select c1, c2, c3 from t1 where c2 = 10 and c3 in (20, 21) for update",
+ "Query": "delete from t1 where c2 = 10 and c3 in (20, 21)",
+ "Table": "t1",
+ "Values": [
+ "(INT64(20), INT64(21))"
+ ],
+ "Vindex": "lookup_t1_2"
+ },
+ "TablesUsed": [
+ "zlookup_unique.t1"
+ ]
+ }
+ },
+ {
+ "comment": "Update EQUAL and IN on backfilling and non-backfilling unique lookup vindexes should be an update IN",
+ "query": "update zlookup_unique.t1 set c2 = 1 where c2 = 10 and c3 in (20, 21)",
+ "plan": {
+ "QueryType": "UPDATE",
+ "Original": "update zlookup_unique.t1 set c2 = 1 where c2 = 10 and c3 in (20, 21)",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "zlookup_unique",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "ChangedVindexValues": [
+ "lookup_t1:3"
+ ],
+ "KsidLength": 1,
+ "KsidVindex": "xxhash",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select c1, c2, c3, c2 = 1 from t1 where c2 = 10 and c3 in (20, 21) for update",
+ "Query": "update t1 set c2 = 1 where c2 = 10 and c3 in (20, 21)",
+ "Table": "t1",
+ "Values": [
+ "(INT64(20), INT64(21))"
+ ],
+ "Vindex": "lookup_t1_2"
+ },
+ "TablesUsed": [
+ "zlookup_unique.t1"
+ ]
+ }
+ },
+ {
+ "comment": "update with alias table",
+ "query": "update user u set u.name = 'john' where u.col > 20",
+ "plan": {
+ "QueryType": "UPDATE",
+ "Original": "update user u set u.name = 'john' where u.col > 20",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "ChangedVindexValues": [
+ "name_user_map:3"
+ ],
+ "KsidLength": 1,
+ "KsidVindex": "user_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select Id, `Name`, Costly, u.`name` = 'john' from `user` as u where u.col > 20 for update",
+ "Query": "update `user` as u set u.`name` = 'john' where u.col > 20",
+ "Table": "user"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "delete with alias table",
+ "query": "delete from user u where u.col > 20",
+ "plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from user u where u.col > 20",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "KsidLength": 1,
+ "KsidVindex": "user_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select Id, `Name`, Costly from `user` as u where u.col > 20 for update",
+ "Query": "delete from `user` as u where u.col > 20",
+ "Table": "user"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "update with a multicol vindex",
+ "query": "update multicol_tbl set x = 1 where cola = 1 and colb = 2",
+ "v3-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update multicol_tbl set x = 1 where cola = 1 and colb = 2",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update multicol_tbl set x = 1 where cola = 1 and colb = 2",
+ "Table": "multicol_tbl",
+ "Values": [
+ "INT64(1)",
+ "INT64(2)"
+ ],
+ "Vindex": "multicolIdx"
+ },
+ "TablesUsed": [
+ "user.multicol_tbl"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update multicol_tbl set x = 1 where cola = 1 and colb = 2",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update multicol_tbl set x = 1 where cola = 1 and colb = 2",
+ "Table": "multicol_tbl",
+ "Values": [
+ "INT64(1)",
+ "INT64(2)"
+ ],
+ "Vindex": "multicolIdx"
+ },
+ "TablesUsed": [
+ "user.multicol_tbl"
+ ]
+ }
+ },
+ {
+ "comment": "update with a multicol vindex - reverse order",
+ "query": "update multicol_tbl set x = 1 where colb = 2 and cola = 1",
+ "v3-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update multicol_tbl set x = 1 where colb = 2 and cola = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update multicol_tbl set x = 1 where colb = 2 and cola = 1",
+ "Table": "multicol_tbl",
+ "Values": [
+ "INT64(1)",
+ "INT64(2)"
+ ],
+ "Vindex": "multicolIdx"
+ },
+ "TablesUsed": [
+ "user.multicol_tbl"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update multicol_tbl set x = 1 where colb = 2 and cola = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update multicol_tbl set x = 1 where colb = 2 and cola = 1",
+ "Table": "multicol_tbl",
+ "Values": [
+ "INT64(1)",
+ "INT64(2)"
+ ],
+ "Vindex": "multicolIdx"
+ },
+ "TablesUsed": [
+ "user.multicol_tbl"
+ ]
+ }
+ },
+ {
+ "comment": "update with a multicol vindex using an IN clause",
+ "query": "update multicol_tbl set x = 1 where colb IN (1,2) and cola = 1",
+ "plan": {
+ "QueryType": "UPDATE",
+ "Original": "update multicol_tbl set x = 1 where colb IN (1,2) and cola = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update multicol_tbl set x = 1 where colb in (1, 2) and cola = 1",
+ "Table": "multicol_tbl",
+ "Values": [
+ "INT64(1)",
+ "(INT64(1), INT64(2))"
+ ],
+ "Vindex": "multicolIdx"
+ },
+ "TablesUsed": [
+ "user.multicol_tbl"
+ ]
+ }
+ },
+ {
+ "comment": "update with a multicol vindex using an IN clause",
+ "query": "update multicol_tbl set x = 1 where colb IN (1,2) and cola IN (3,4)",
+ "plan": {
+ "QueryType": "UPDATE",
+ "Original": "update multicol_tbl set x = 1 where colb IN (1,2) and cola IN (3,4)",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update multicol_tbl set x = 1 where colb in (1, 2) and cola in (3, 4)",
+ "Table": "multicol_tbl",
+ "Values": [
+ "(INT64(3), INT64(4))",
+ "(INT64(1), INT64(2))"
+ ],
+ "Vindex": "multicolIdx"
+ },
+ "TablesUsed": [
+ "user.multicol_tbl"
+ ]
+ }
+ },
+ {
+ "comment": "delete with a multicol vindex",
+ "query": "delete from multicol_tbl where cola = 1 and colb = 2",
+ "v3-plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from multicol_tbl where cola = 1 and colb = 2",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "KsidLength": 2,
+ "KsidVindex": "multicolIdx",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select cola, colb, colc, `name` from multicol_tbl where cola = 1 and colb = 2 for update",
+ "Query": "delete from multicol_tbl where cola = 1 and colb = 2",
+ "Table": "multicol_tbl",
+ "Values": [
+ "INT64(1)",
+ "INT64(2)"
+ ],
+ "Vindex": "multicolIdx"
+ },
+ "TablesUsed": [
+ "user.multicol_tbl"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from multicol_tbl where cola = 1 and colb = 2",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "KsidLength": 2,
+ "KsidVindex": "multicolIdx",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select cola, colb, colc, `name` from multicol_tbl where cola = 1 and colb = 2 for update",
+ "Query": "delete from multicol_tbl where cola = 1 and colb = 2",
+ "Table": "multicol_tbl",
+ "Values": [
+ "INT64(1)",
+ "INT64(2)"
+ ],
+ "Vindex": "multicolIdx"
+ },
+ "TablesUsed": [
+ "user.multicol_tbl"
+ ]
+ }
+ },
+ {
+ "comment": "delete with a multicol vindex - reverse order",
+ "query": "delete from multicol_tbl where colb = 2 and cola = 1",
+ "v3-plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from multicol_tbl where colb = 2 and cola = 1",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "KsidLength": 2,
+ "KsidVindex": "multicolIdx",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select cola, colb, colc, `name` from multicol_tbl where colb = 2 and cola = 1 for update",
+ "Query": "delete from multicol_tbl where colb = 2 and cola = 1",
+ "Table": "multicol_tbl",
+ "Values": [
+ "INT64(1)",
+ "INT64(2)"
+ ],
+ "Vindex": "multicolIdx"
+ },
+ "TablesUsed": [
+ "user.multicol_tbl"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from multicol_tbl where colb = 2 and cola = 1",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "KsidLength": 2,
+ "KsidVindex": "multicolIdx",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select cola, colb, colc, `name` from multicol_tbl where colb = 2 and cola = 1 for update",
+ "Query": "delete from multicol_tbl where colb = 2 and cola = 1",
+ "Table": "multicol_tbl",
+ "Values": [
+ "INT64(1)",
+ "INT64(2)"
+ ],
+ "Vindex": "multicolIdx"
+ },
+ "TablesUsed": [
+ "user.multicol_tbl"
+ ]
+ }
+ },
+ {
+ "comment": "delete with a multicol vindex using an IN clause",
+ "query": "delete from multicol_tbl where colb IN (1,2) and cola = 1",
+ "plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from multicol_tbl where colb IN (1,2) and cola = 1",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "KsidLength": 2,
+ "KsidVindex": "multicolIdx",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select cola, colb, colc, `name` from multicol_tbl where colb in (1, 2) and cola = 1 for update",
+ "Query": "delete from multicol_tbl where colb in (1, 2) and cola = 1",
+ "Table": "multicol_tbl",
+ "Values": [
+ "INT64(1)",
+ "(INT64(1), INT64(2))"
+ ],
+ "Vindex": "multicolIdx"
+ },
+ "TablesUsed": [
+ "user.multicol_tbl"
+ ]
+ }
+ },
+ {
+ "comment": "delete with a multicol vindex using an IN clause",
+ "query": "delete from multicol_tbl where colb IN (1,2) and cola IN (3,4)",
+ "plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from multicol_tbl where colb IN (1,2) and cola IN (3,4)",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "KsidLength": 2,
+ "KsidVindex": "multicolIdx",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select cola, colb, colc, `name` from multicol_tbl where colb in (1, 2) and cola in (3, 4) for update",
+ "Query": "delete from multicol_tbl where colb in (1, 2) and cola in (3, 4)",
+ "Table": "multicol_tbl",
+ "Values": [
+ "(INT64(3), INT64(4))",
+ "(INT64(1), INT64(2))"
+ ],
+ "Vindex": "multicolIdx"
+ },
+ "TablesUsed": [
+ "user.multicol_tbl"
+ ]
+ }
+ },
+ {
+ "comment": "update with multicol and an owned vindex which changes",
+ "query": "update multicol_tbl set colc = 1 where cola = 1 and colb = 2",
+ "v3-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update multicol_tbl set colc = 1 where cola = 1 and colb = 2",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "ChangedVindexValues": [
+ "colc_map:4"
+ ],
+ "KsidLength": 2,
+ "KsidVindex": "multicolIdx",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select cola, colb, colc, `name`, colc = 1 from multicol_tbl where cola = 1 and colb = 2 for update",
+ "Query": "update multicol_tbl set colc = 1 where cola = 1 and colb = 2",
+ "Table": "multicol_tbl",
+ "Values": [
+ "INT64(1)",
+ "INT64(2)"
+ ],
+ "Vindex": "multicolIdx"
+ },
+ "TablesUsed": [
+ "user.multicol_tbl"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update multicol_tbl set colc = 1 where cola = 1 and colb = 2",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "ChangedVindexValues": [
+ "colc_map:4"
+ ],
+ "KsidLength": 2,
+ "KsidVindex": "multicolIdx",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select cola, colb, colc, `name`, colc = 1 from multicol_tbl where cola = 1 and colb = 2 for update",
+ "Query": "update multicol_tbl set colc = 1 where cola = 1 and colb = 2",
+ "Table": "multicol_tbl",
+ "Values": [
+ "INT64(1)",
+ "INT64(2)"
+ ],
+ "Vindex": "multicolIdx"
+ },
+ "TablesUsed": [
+ "user.multicol_tbl"
+ ]
+ }
+ },
+ {
+ "comment": "update with routing using non-unique lookup vindex",
+ "query": "update multicol_tbl set x = 42 where name = 'foo'",
+ "plan": {
+ "QueryType": "UPDATE",
+ "Original": "update multicol_tbl set x = 42 where name = 'foo'",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update multicol_tbl set x = 42 where `name` = 'foo'",
+ "Table": "multicol_tbl",
+ "Values": [
+ "VARCHAR(\"foo\")"
+ ],
+ "Vindex": "name_muticoltbl_map"
+ },
+ "TablesUsed": [
+ "user.multicol_tbl"
+ ]
+ }
+ },
+ {
+ "comment": "update with routing using subsharding column",
+ "query": "update multicol_tbl set x = 42 where cola = 1",
+ "v3-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update multicol_tbl set x = 42 where cola = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update multicol_tbl set x = 42 where cola = 1",
+ "Table": "multicol_tbl",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "multicolIdx"
+ },
+ "TablesUsed": [
+ "user.multicol_tbl"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update multicol_tbl set x = 42 where cola = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "SubShard",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update multicol_tbl set x = 42 where cola = 1",
+ "Table": "multicol_tbl",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "multicolIdx"
+ },
+ "TablesUsed": [
+ "user.multicol_tbl"
+ ]
+ }
+ },
+ {
+ "comment": "update with routing using subsharding column on lookup vindex",
+ "query": "update multicol_tbl set name = 'bar' where cola = 1",
+ "v3-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update multicol_tbl set name = 'bar' where cola = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "ChangedVindexValues": [
+ "name_muticoltbl_map:4"
+ ],
+ "KsidLength": 2,
+ "KsidVindex": "multicolIdx",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select cola, colb, colc, `name`, `name` = 'bar' from multicol_tbl where cola = 1 for update",
+ "Query": "update multicol_tbl set `name` = 'bar' where cola = 1",
+ "Table": "multicol_tbl",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "multicolIdx"
+ },
+ "TablesUsed": [
+ "user.multicol_tbl"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update multicol_tbl set name = 'bar' where cola = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "SubShard",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "ChangedVindexValues": [
+ "name_muticoltbl_map:4"
+ ],
+ "KsidLength": 2,
+ "KsidVindex": "multicolIdx",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select cola, colb, colc, `name`, `name` = 'bar' from multicol_tbl where cola = 1 for update",
+ "Query": "update multicol_tbl set `name` = 'bar' where cola = 1",
+ "Table": "multicol_tbl",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "multicolIdx"
+ },
+ "TablesUsed": [
+ "user.multicol_tbl"
+ ]
+ }
+ },
+ {
+ "comment": "update with routing using subsharding column with in query",
+ "query": "update multicol_tbl set name = 'bar' where cola in (1,2)",
+ "plan": {
+ "QueryType": "UPDATE",
+ "Original": "update multicol_tbl set name = 'bar' where cola in (1,2)",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "ChangedVindexValues": [
+ "name_muticoltbl_map:4"
+ ],
+ "KsidLength": 2,
+ "KsidVindex": "multicolIdx",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select cola, colb, colc, `name`, `name` = 'bar' from multicol_tbl where cola in (1, 2) for update",
+ "Query": "update multicol_tbl set `name` = 'bar' where cola in (1, 2)",
+ "Table": "multicol_tbl",
+ "Values": [
+ "(INT64(1), INT64(2))"
+ ],
+ "Vindex": "multicolIdx"
+ },
+ "TablesUsed": [
+ "user.multicol_tbl"
+ ]
+ }
+ },
+ {
+ "comment": "update with routing using subsharding column with in query as lower cost over lookup vindex",
+ "query": "update multicol_tbl set x = 1 where name = 'foo' and cola = 2",
+ "v3-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update multicol_tbl set x = 1 where name = 'foo' and cola = 2",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update multicol_tbl set x = 1 where `name` = 'foo' and cola = 2",
+ "Table": "multicol_tbl",
+ "Values": [
+ "INT64(2)"
+ ],
+ "Vindex": "multicolIdx"
+ },
+ "TablesUsed": [
+ "user.multicol_tbl"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update multicol_tbl set x = 1 where name = 'foo' and cola = 2",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update multicol_tbl set x = 1 where `name` = 'foo' and cola = 2",
+ "Table": "multicol_tbl",
+ "Values": [
+ "VARCHAR(\"foo\")"
+ ],
+ "Vindex": "name_muticoltbl_map"
+ },
+ "TablesUsed": [
+ "user.multicol_tbl"
+ ]
+ }
+ },
+ {
+ "comment": "delete with routing using non-unique lookup vindex",
+ "query": "delete from multicol_tbl where name = 'foo'",
+ "plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from multicol_tbl where name = 'foo'",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "KsidLength": 2,
+ "KsidVindex": "multicolIdx",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select cola, colb, colc, `name` from multicol_tbl where `name` = 'foo' for update",
+ "Query": "delete from multicol_tbl where `name` = 'foo'",
+ "Table": "multicol_tbl",
+ "Values": [
+ "VARCHAR(\"foo\")"
+ ],
+ "Vindex": "name_muticoltbl_map"
+ },
+ "TablesUsed": [
+ "user.multicol_tbl"
+ ]
+ }
+ },
+ {
+ "comment": "delete with routing using subsharding column",
+ "query": "delete from multicol_tbl where cola = 1",
+ "v3-plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from multicol_tbl where cola = 1",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "KsidLength": 2,
+ "KsidVindex": "multicolIdx",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select cola, colb, colc, `name` from multicol_tbl where cola = 1 for update",
+ "Query": "delete from multicol_tbl where cola = 1",
+ "Table": "multicol_tbl",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "multicolIdx"
+ },
+ "TablesUsed": [
+ "user.multicol_tbl"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from multicol_tbl where cola = 1",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "SubShard",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "KsidLength": 2,
+ "KsidVindex": "multicolIdx",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select cola, colb, colc, `name` from multicol_tbl where cola = 1 for update",
+ "Query": "delete from multicol_tbl where cola = 1",
+ "Table": "multicol_tbl",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "multicolIdx"
+ },
+ "TablesUsed": [
+ "user.multicol_tbl"
+ ]
+ }
+ },
+ {
+ "comment": "delete with routing using subsharding column with in query",
+ "query": "delete from multicol_tbl where cola in (1,2)",
+ "plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from multicol_tbl where cola in (1,2)",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "KsidLength": 2,
+ "KsidVindex": "multicolIdx",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select cola, colb, colc, `name` from multicol_tbl where cola in (1, 2) for update",
+ "Query": "delete from multicol_tbl where cola in (1, 2)",
+ "Table": "multicol_tbl",
+ "Values": [
+ "(INT64(1), INT64(2))"
+ ],
+ "Vindex": "multicolIdx"
+ },
+ "TablesUsed": [
+ "user.multicol_tbl"
+ ]
+ }
+ },
+ {
+ "comment": "delete with routing using subsharding column with in query as lower cost over lookup vindex",
+ "query": "delete from multicol_tbl where name = 'foo' and cola = 2",
+ "v3-plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from multicol_tbl where name = 'foo' and cola = 2",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "KsidLength": 2,
+ "KsidVindex": "multicolIdx",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select cola, colb, colc, `name` from multicol_tbl where `name` = 'foo' and cola = 2 for update",
+ "Query": "delete from multicol_tbl where `name` = 'foo' and cola = 2",
+ "Table": "multicol_tbl",
+ "Values": [
+ "INT64(2)"
+ ],
+ "Vindex": "multicolIdx"
+ },
+ "TablesUsed": [
+ "user.multicol_tbl"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from multicol_tbl where name = 'foo' and cola = 2",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "KsidLength": 2,
+ "KsidVindex": "multicolIdx",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select cola, colb, colc, `name` from multicol_tbl where `name` = 'foo' and cola = 2 for update",
+ "Query": "delete from multicol_tbl where `name` = 'foo' and cola = 2",
+ "Table": "multicol_tbl",
+ "Values": [
+ "VARCHAR(\"foo\")"
+ ],
+ "Vindex": "name_muticoltbl_map"
+ },
+ "TablesUsed": [
+ "user.multicol_tbl"
+ ]
+ }
+ },
+ {
+ "comment": "insert using select with simple table.",
+ "query": "insert into music(id, user_id) select * from user",
+ "v3-plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into music(id, user_id) select * from user",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Select",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "TableName": "music",
+ "VindexOffsetFromSelect": {
+ "music_user_map": "[0]",
+ "user_index": "[1]"
+ },
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select * from `user` for update",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into music(id, user_id) select * from user",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Select",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "TableName": "music",
+ "VindexOffsetFromSelect": {
+ "music_user_map": "[0]",
+ "user_index": "[1]"
+ },
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select * from `user` for update",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.music",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "insert using select with more columns in insert",
+ "query": "insert into music(id, user_id) select 1",
+ "plan": "VT03006: column count does not match value count at row 1"
+ },
+ {
+ "comment": "insert using select with more columns in select",
+ "query": "insert into music(id, user_id) select id, count(user_id), sum(user_id) from user group by id",
+ "plan": "VT03006: column count does not match value count at row 1"
+ },
+ {
+ "comment": "insert using select with more columns in select after accounting for star column",
+ "query": "insert into music(id, user_id) select id, *, 2 from user",
+ "plan": "VT03006: column count does not match value count at row 1"
+ },
+ {
+ "comment": "insert using select with auto-inc column using vitess sequence, sequence column not present",
+ "query": "insert into user_extra(user_id) select id from user",
+ "v3-plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into user_extra(user_id) select id from user",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Select",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "AutoIncrement": "main:1",
+ "MultiShardAutocommit": false,
+ "TableName": "user_extra",
+ "VindexOffsetFromSelect": {
+ "user_index": "[0]"
+ },
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` for update",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user_extra"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into user_extra(user_id) select id from user",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Select",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "AutoIncrement": "main:1",
+ "MultiShardAutocommit": false,
+ "TableName": "user_extra",
+ "VindexOffsetFromSelect": {
+ "user_index": "[0]"
+ },
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` for update",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "insert using select with auto-inc column using vitess sequence, sequence column present",
+ "query": "insert into user_extra(id, user_id) select null, id from user",
+ "v3-plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into user_extra(id, user_id) select null, id from user",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Select",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "AutoIncrement": "main:2",
+ "MultiShardAutocommit": false,
+ "TableName": "user_extra",
+ "VindexOffsetFromSelect": {
+ "user_index": "[1]"
+ },
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select null, id from `user` where 1 != 1",
+ "Query": "select null, id from `user` for update",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user_extra"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into user_extra(id, user_id) select null, id from user",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Select",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "AutoIncrement": "main:2",
+ "MultiShardAutocommit": false,
+ "TableName": "user_extra",
+ "VindexOffsetFromSelect": {
+ "user_index": "[1]"
+ },
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select null, id from `user` where 1 != 1",
+ "Query": "select null, id from `user` for update",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "sharded insert from select",
+ "query": "insert into user(id) select 1 from dual",
+ "v3-plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into user(id) select 1 from dual",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Select",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "AutoIncrement": "main:0",
+ "MultiShardAutocommit": false,
+ "TableName": "user",
+ "VindexOffsetFromSelect": {
+ "costly_map": "[-1]",
+ "name_user_map": "[-1]",
+ "user_index": "[0]"
+ },
+ "Inputs": [
+ {
+ "OperatorType": "Projection",
+ "Expressions": [
+ "INT64(1) as 1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SingleRow"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into user(id) select 1 from dual",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Select",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "AutoIncrement": "main:0",
+ "MultiShardAutocommit": false,
+ "TableName": "user",
+ "VindexOffsetFromSelect": {
+ "costly_map": "[-1]",
+ "name_user_map": "[-1]",
+ "user_index": "[0]"
+ },
+ "Inputs": [
+ {
+ "OperatorType": "Projection",
+ "Expressions": [
+ "INT64(1) as 1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SingleRow"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.dual",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "insert using select with sharding column is autoinc and not present in the insert column query",
+ "query": "insert into user(pattern) SELECT 1",
+ "v3-plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into user(pattern) SELECT 1",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Select",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "AutoIncrement": "main:1",
+ "MultiShardAutocommit": false,
+ "TableName": "user",
+ "VindexOffsetFromSelect": {
+ "costly_map": "[-1]",
+ "name_user_map": "[-1]",
+ "user_index": "[1]"
+ },
+ "Inputs": [
+ {
+ "OperatorType": "Projection",
+ "Expressions": [
+ "INT64(1) as 1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SingleRow"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into user(pattern) SELECT 1",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Select",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "AutoIncrement": "main:1",
+ "MultiShardAutocommit": false,
+ "TableName": "user",
+ "VindexOffsetFromSelect": {
+ "costly_map": "[-1]",
+ "name_user_map": "[-1]",
+ "user_index": "[1]"
+ },
+ "Inputs": [
+ {
+ "OperatorType": "Projection",
+ "Expressions": [
+ "INT64(1) as 1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SingleRow"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.dual",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "insert using select with sharding column is not an autoinc and not present in the insert column query",
+ "query": "insert into user_extra(pattern) SELECT 1",
+ "plan": "VT09003: INSERT query does not have primary vindex column 'user_id' in the column list"
+ },
+ {
+ "comment": "sharded same keyspace",
+ "query": "insert into user_extra(user_id, col) select col1, col2 from user",
+ "v3-plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into user_extra(user_id, col) select col1, col2 from user",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Select",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "AutoIncrement": "main:2",
+ "MultiShardAutocommit": false,
+ "TableName": "user_extra",
+ "VindexOffsetFromSelect": {
+ "user_index": "[0]"
+ },
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col1, col2 from `user` where 1 != 1",
+ "Query": "select col1, col2 from `user` for update",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user_extra"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into user_extra(user_id, col) select col1, col2 from user",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Select",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "AutoIncrement": "main:2",
+ "MultiShardAutocommit": false,
+ "TableName": "user_extra",
+ "VindexOffsetFromSelect": {
+ "user_index": "[0]"
+ },
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col1, col2 from `user` where 1 != 1",
+ "Query": "select col1, col2 from `user` for update",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "unsharded same keyspace",
+ "query": "insert into unsharded(col) select col from unsharded_auto",
+ "v3-plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into unsharded(col) select col from unsharded_auto",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into unsharded(col) select col from unsharded_auto for update",
+ "TableName": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into unsharded(col) select col from unsharded_auto",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into unsharded(col) select col from unsharded_auto for update",
+ "TableName": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "main.unsharded_auto"
+ ]
+ }
+ },
+ {
+ "comment": "sharded different keyspace",
+ "query": "insert into user_extra(user_id, col) select col1, col2 from t1",
+ "v3-plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into user_extra(user_id, col) select col1, col2 from t1",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Select",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "AutoIncrement": "main:2",
+ "MultiShardAutocommit": false,
+ "TableName": "user_extra",
+ "VindexOffsetFromSelect": {
+ "user_index": "[0]"
+ },
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "zlookup_unique",
+ "Sharded": true
+ },
+ "FieldQuery": "select col1, col2 from t1 where 1 != 1",
+ "Query": "select col1, col2 from t1 for update",
+ "Table": "t1"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user_extra"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into user_extra(user_id, col) select col1, col2 from t1",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Select",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "AutoIncrement": "main:2",
+ "MultiShardAutocommit": false,
+ "TableName": "user_extra",
+ "VindexOffsetFromSelect": {
+ "user_index": "[0]"
+ },
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "zlookup_unique",
+ "Sharded": true
+ },
+ "FieldQuery": "select col1, col2 from t1 where 1 != 1",
+ "Query": "select col1, col2 from t1 for update",
+ "Table": "t1"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user_extra",
+ "zlookup_unique.t1"
+ ]
+ }
+ },
+ {
+ "comment": "sharded insert table, unsharded select table",
+ "query": "insert into user_extra(user_id, col) select col1, col2 from unsharded_tab",
+ "v3-plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into user_extra(user_id, col) select col1, col2 from unsharded_tab",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Select",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "AutoIncrement": "main:2",
+ "MultiShardAutocommit": false,
+ "TableName": "user_extra",
+ "VindexOffsetFromSelect": {
+ "user_index": "[0]"
+ },
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main_2",
+ "Sharded": false
+ },
+ "FieldQuery": "select col1, col2 from unsharded_tab where 1 != 1",
+ "Query": "select col1, col2 from unsharded_tab for update",
+ "Table": "unsharded_tab"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user_extra"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into user_extra(user_id, col) select col1, col2 from unsharded_tab",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Select",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "AutoIncrement": "main:2",
+ "MultiShardAutocommit": false,
+ "TableName": "user_extra",
+ "VindexOffsetFromSelect": {
+ "user_index": "[0]"
+ },
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main_2",
+ "Sharded": false
+ },
+ "FieldQuery": "select col1, col2 from unsharded_tab where 1 != 1",
+ "Query": "select col1, col2 from unsharded_tab for update",
+ "Table": "unsharded_tab"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main_2.unsharded_tab",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "unsharded different keyspace",
+ "query": "insert into unsharded(col) select col from unsharded_tab",
+ "v3-plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into unsharded(col) select col from unsharded_tab",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "TableName": "unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main_2",
+ "Sharded": false
+ },
+ "FieldQuery": "select col from unsharded_tab where 1 != 1",
+ "Query": "select col from unsharded_tab for update",
+ "Table": "unsharded_tab"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into unsharded(col) select col from unsharded_tab",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "TableName": "unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main_2",
+ "Sharded": false
+ },
+ "FieldQuery": "select col from unsharded_tab where 1 != 1",
+ "Query": "select col from unsharded_tab for update",
+ "Table": "unsharded_tab"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "main_2.unsharded_tab"
+ ]
+ }
+ },
+ {
+ "comment": "unsharded insert table, sharded select table",
+ "query": "insert into unsharded(col) select col from t1",
+ "v3-plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into unsharded(col) select col from t1",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "TableName": "unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "zlookup_unique",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from t1 where 1 != 1",
+ "Query": "select col from t1 for update",
+ "Table": "t1"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into unsharded(col) select col from t1",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "TableName": "unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "zlookup_unique",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from t1 where 1 != 1",
+ "Query": "select col from t1 for update",
+ "Table": "t1"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "zlookup_unique.t1"
+ ]
+ }
+ },
+ {
+ "comment": "unsharded subquery in sharded update, not the same keyspace between outer and inner",
+ "query": "update user set col = (select id from unsharded)",
+ "v3-plan": "VT12001: unsupported: sharded subqueries in DML",
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update user set col = (select id from unsharded)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutValue",
+ "PulloutVars": [
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select id from unsharded where 1 != 1",
+ "Query": "select id from unsharded lock in share mode",
+ "Table": "unsharded"
+ },
+ {
+ "OperatorType": "Update",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update `user` set col = :__sq1",
+ "Table": "user"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "sharded subquery in unsharded update, not the same keyspace",
+ "query": "update unsharded set col = (select id from user)",
+ "v3-plan": "VT12001: unsupported: sharded subqueries in DML",
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update unsharded set col = (select id from user)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutValue",
+ "PulloutVars": [
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` lock in share mode",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Update",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update unsharded set col = :__sq1",
+ "Table": "unsharded"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "sharded join unsharded subqueries in unsharded update",
+ "query": "update unsharded set col = (select id from unsharded join user on unsharded.id = user.id)",
+ "v3-plan": "VT12001: unsupported: sharded subqueries in DML",
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update unsharded set col = (select id from unsharded join user on unsharded.id = user.id)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutValue",
+ "PulloutVars": [
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "JoinVars": {
+ "unsharded_id": 0
+ },
+ "TableName": "unsharded_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select unsharded.id from unsharded where 1 != 1",
+ "Query": "select unsharded.id from unsharded lock in share mode",
+ "Table": "unsharded"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where `user`.id = :unsharded_id lock in share mode",
+ "Table": "`user`",
+ "Values": [
+ ":unsharded_id"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Update",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update unsharded set col = :__sq1",
+ "Table": "unsharded"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "sharded update with sub query where the sources can be merged into a single query",
+ "query": "update user set col = (select count(*) from user_extra where user_extra.user_id = 5) where id = 5",
+ "v3-plan": "VT12001: unsupported: sharded subqueries in DML",
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update user set col = (select count(*) from user_extra where user_extra.user_id = 5) where id = 5",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update `user` set col = (select count(*) from user_extra where user_extra.user_id = 5) where id = 5",
+ "Table": "user",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "merge through correlated subquery",
+ "query": "update user set col = (select count(*) from user_extra where user_extra.user_id = user.id) where id = 5",
+ "v3-plan": "VT12001: unsupported: sharded subqueries in DML",
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update user set col = (select count(*) from user_extra where user_extra.user_id = user.id) where id = 5",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update `user` set col = (select count(*) from user_extra where user_extra.user_id = `user`.id) where id = 5",
+ "Table": "user",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "merge through correlated subquery #2",
+ "query": "update user set col = (select count(*) from user_extra where user_extra.user_id = user.id) where id > 5",
+ "v3-plan": "VT12001: unsupported: sharded subqueries in DML",
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update user set col = (select count(*) from user_extra where user_extra.user_id = user.id) where id > 5",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update `user` set col = (select count(*) from user_extra where user_extra.user_id = `user`.id) where id > 5",
+ "Table": "user"
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "all defaults empty column, empty values",
+ "query": "insert into authoritative () values ()",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into authoritative () values ()",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Sharded",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into authoritative(user_id) values (:_user_id_0)",
+ "TableName": "authoritative",
+ "VindexValues": {
+ "user_index": "NULL"
+ }
+ },
+ "TablesUsed": [
+ "user.authoritative"
+ ]
+ }
+ },
+ {
+ "comment": "explain dml without any directive should fail",
+ "query": "explain format=vtexplain delete from user",
+ "plan": "VT09008: vexplain queries/all will actually run queries"
+ },
+ {
+ "comment": "vexplain dml with actually_run_query directive",
+ "query": "vexplain /*vt+ execute_dml_queries */ queries delete from user",
+ "plan": {
+ "QueryType": "EXPLAIN",
+ "Original": "vexplain /*vt+ execute_dml_queries */ queries delete from user",
+ "Instructions": {
+ "OperatorType": "VEXPLAIN",
+ "Type": "queries",
+ "Inputs": [
+ {
+ "OperatorType": "Delete",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "KsidLength": 1,
+ "KsidVindex": "user_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select Id, `Name`, Costly from `user` for update",
+ "Query": "delete from `user`",
+ "Table": "user"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "explain dml with actually_run_query directive - 2",
+ "query": "vexplain /*vt+ eXECUTE_DML_QUERIES */ queries delete from user",
+ "plan": {
+ "QueryType": "EXPLAIN",
+ "Original": "vexplain /*vt+ eXECUTE_DML_QUERIES */ queries delete from user",
+ "Instructions": {
+ "OperatorType": "VEXPLAIN",
+ "Type": "queries",
+ "Inputs": [
+ {
+ "OperatorType": "Delete",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "KsidLength": 1,
+ "KsidVindex": "user_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select Id, `Name`, Costly from `user` for update",
+ "Query": "delete from `user`",
+ "Table": "user"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Here V3 populates the TablesUsed incorrectly\n# delete with join from multi table join subquery.",
+ "query": "delete foo from unsharded as foo join (select id from unsharded a join unsharded_b b on a.user_id = b.user_id) as keepers on foo.id = keepers.id where keepers.id is null and foo.col is not null and foo.col < 1000",
+ "v3-plan": {
+ "QueryType": "DELETE",
+ "Original": "delete foo from unsharded as foo join (select id from unsharded a join unsharded_b b on a.user_id = b.user_id) as keepers on foo.id = keepers.id where keepers.id is null and foo.col is not null and foo.col < 1000",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "delete foo from unsharded as foo join (select id from unsharded as a join unsharded_b as b on a.user_id = b.user_id) as keepers on foo.id = keepers.id where keepers.id is null and foo.col is not null and foo.col < 1000",
+ "Table": "unsharded, unsharded, unsharded_b"
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "main.unsharded, unsharded_b"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "DELETE",
+ "Original": "delete foo from unsharded as foo join (select id from unsharded a join unsharded_b b on a.user_id = b.user_id) as keepers on foo.id = keepers.id where keepers.id is null and foo.col is not null and foo.col < 1000",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "delete foo from unsharded as foo join (select id from unsharded as a join unsharded_b as b on a.user_id = b.user_id) as keepers on foo.id = keepers.id where keepers.id is null and foo.col is not null and foo.col < 1000",
+ "Table": "unsharded, unsharded_b"
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "main.unsharded_b"
+ ]
+ }
+ },
+ {
+ "comment": "update with routing using multi column vindex",
+ "query": "update user set col = 1 where (name, col) in (('aa', 'bb'), ('cc', 'dd'))",
+ "v3-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update user set col = 1 where (name, col) in (('aa', 'bb'), ('cc', 'dd'))",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update `user` set col = 1 where (`name`, col) in (('aa', 'bb'), ('cc', 'dd'))",
+ "Table": "user"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update user set col = 1 where (name, col) in (('aa', 'bb'), ('cc', 'dd'))",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "MultiEqual",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update `user` set col = 1 where (`name`, col) in (('aa', 'bb'), ('cc', 'dd'))",
+ "Table": "user",
+ "Values": [
+ "(VARCHAR(\"aa\"), VARCHAR(\"cc\"))"
+ ],
+ "Vindex": "name_user_map"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "delete with routing using multi column vindex",
+ "query": "delete from user where (name, col) in (('aa', 'bb'), ('cc', 'dd'))",
+ "v3-plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from user where (name, col) in (('aa', 'bb'), ('cc', 'dd'))",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "KsidLength": 1,
+ "KsidVindex": "user_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select Id, `Name`, Costly from `user` where (`name`, col) in (('aa', 'bb'), ('cc', 'dd')) for update",
+ "Query": "delete from `user` where (`name`, col) in (('aa', 'bb'), ('cc', 'dd'))",
+ "Table": "user"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from user where (name, col) in (('aa', 'bb'), ('cc', 'dd'))",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "MultiEqual",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "KsidLength": 1,
+ "KsidVindex": "user_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select Id, `Name`, Costly from `user` where (`name`, col) in (('aa', 'bb'), ('cc', 'dd')) for update",
+ "Query": "delete from `user` where (`name`, col) in (('aa', 'bb'), ('cc', 'dd'))",
+ "Table": "user",
+ "Values": [
+ "(VARCHAR(\"aa\"), VARCHAR(\"cc\"))"
+ ],
+ "Vindex": "name_user_map"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "insert into ref; TODO(maxeng) is this a bug?",
+ "query": "insert into ref(col) values(1)",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into ref(col) values(1)",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Sharded",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into ref(col) values (1)",
+ "TableName": "ref"
+ },
+ "TablesUsed": [
+ "user.ref"
+ ]
+ }
+ },
+ {
+ "comment": "update using last_insert_id with an argument",
+ "query": "update main.m1 set foo = last_insert_id(foo+1) where id = 12345",
+ "plan": {
+ "QueryType": "UPDATE",
+ "Original": "update main.m1 set foo = last_insert_id(foo+1) where id = 12345",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update m1 set foo = last_insert_id(foo + 1) where id = 12345",
+ "Table": "m1"
+ },
+ "TablesUsed": [
+ "main.m1"
+ ]
+ }
+ }
+]
diff --git a/go/vt/vtgate/planbuilder/testdata/dml_cases.txt b/go/vt/vtgate/planbuilder/testdata/dml_cases.txt
deleted file mode 100644
index d141cd661ad..00000000000
--- a/go/vt/vtgate/planbuilder/testdata/dml_cases.txt
+++ /dev/null
@@ -1,6090 +0,0 @@
-# update table not found
-"update nouser set val = 1"
-"table nouser not found"
-Gen4 plan same as above
-
-# delete table not found
-"delete from nouser"
-"table nouser not found"
-Gen4 plan same as above
-
-# explicit keyspace reference
-"update main.m1 set val = 1"
-{
- "QueryType": "UPDATE",
- "Original": "update main.m1 set val = 1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update m1 set val = 1",
- "Table": "m1"
- },
- "TablesUsed": [
- "main.m1"
- ]
-}
-Gen4 plan same as above
-
-# update unsharded
-"update unsharded set val = 1"
-{
- "QueryType": "UPDATE",
- "Original": "update unsharded set val = 1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update unsharded set val = 1",
- "Table": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-Gen4 plan same as above
-
-# subqueries in unsharded update
-"update unsharded set col = (select col from unsharded limit 1)"
-{
- "QueryType": "UPDATE",
- "Original": "update unsharded set col = (select col from unsharded limit 1)",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update unsharded set col = (select col from unsharded limit 1)",
- "Table": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-Gen4 plan same as above
-
-# unsharded union in subquery of unsharded update
-"update unsharded set col = (select id from unsharded union select id from unsharded)"
-{
- "QueryType": "UPDATE",
- "Original": "update unsharded set col = (select id from unsharded union select id from unsharded)",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update unsharded set col = (select id from unsharded union select id from unsharded)",
- "Table": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-Gen4 plan same as above
-
-# unsharded join in subquery of unsharded update
-"update unsharded set col = (select id from unsharded a join unsharded b on a.id = b.id)"
-{
- "QueryType": "UPDATE",
- "Original": "update unsharded set col = (select id from unsharded a join unsharded b on a.id = b.id)",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update unsharded set col = (select id from unsharded as a join unsharded as b on a.id = b.id)",
- "Table": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-Gen4 plan same as above
-
-# update with join subquery
-"update unsharded as foo left join (select id from unsharded where col is not null order by col desc limit 10) as keepers on foo.id = keepers.id set col1 = 'asdf' where keepers.id is null and foo.col is not null and foo.col \u003c 1000"
-{
- "QueryType": "UPDATE",
- "Original": "update unsharded as foo left join (select id from unsharded where col is not null order by col desc limit 10) as keepers on foo.id = keepers.id set col1 = 'asdf' where keepers.id is null and foo.col is not null and foo.col \u003c 1000",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update unsharded as foo left join (select id from unsharded where col is not null order by col desc limit 10) as keepers on foo.id = keepers.id set col1 = 'asdf' where keepers.id is null and foo.col is not null and foo.col \u003c 1000",
- "Table": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-Gen4 plan same as above
-
-# routing rules: updated of a routed table
-"update route1 set a=1 where id=1"
-{
- "QueryType": "UPDATE",
- "Original": "update route1 set a=1 where id=1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update `user` as route1 set a = 1 where id = 1",
- "Table": "user",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-{
- "QueryType": "UPDATE",
- "Original": "update route1 set a=1 where id=1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update `user` as route1 set a = 1 where id = 1",
- "Table": "user",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# update: routing rules for subquery.
-"update unsharded_a set a=(select a from route2)"
-{
- "QueryType": "UPDATE",
- "Original": "update unsharded_a set a=(select a from route2)",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update unsharded_a set a = (select a from unsharded as route2)",
- "Table": "unsharded, unsharded_a"
- },
- "TablesUsed": [
- "main.unsharded_a"
- ]
-}
-{
- "QueryType": "UPDATE",
- "Original": "update unsharded_a set a=(select a from route2)",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update unsharded_a set a = (select a from unsharded as route2)",
- "Table": "unsharded, unsharded_a"
- },
- "TablesUsed": [
- "main.unsharded",
- "main.unsharded_a"
- ]
-}
-
-# delete unsharded
-"delete from unsharded"
-{
- "QueryType": "DELETE",
- "Original": "delete from unsharded",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "delete from unsharded",
- "Table": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-Gen4 plan same as above
-
-# update by primary keyspace id
-"update user set val = 1 where id = 1"
-{
- "QueryType": "UPDATE",
- "Original": "update user set val = 1 where id = 1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update `user` set val = 1 where id = 1",
- "Table": "user",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-{
- "QueryType": "UPDATE",
- "Original": "update user set val = 1 where id = 1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update `user` set val = 1 where id = 1",
- "Table": "user",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# update by primary keyspace id with alias
-"update user as user_alias set val = 1 where user_alias.id = 1"
-{
- "QueryType": "UPDATE",
- "Original": "update user as user_alias set val = 1 where user_alias.id = 1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update `user` as user_alias set val = 1 where user_alias.id = 1",
- "Table": "user",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-{
- "QueryType": "UPDATE",
- "Original": "update user as user_alias set val = 1 where user_alias.id = 1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update `user` as user_alias set val = 1 where user_alias.id = 1",
- "Table": "user",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# update by primary keyspace id with parenthesized expression
-"update user set val = 1 where (id = 1)"
-{
- "QueryType": "UPDATE",
- "Original": "update user set val = 1 where (id = 1)",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update `user` set val = 1 where id = 1",
- "Table": "user",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-{
- "QueryType": "UPDATE",
- "Original": "update user set val = 1 where (id = 1)",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update `user` set val = 1 where id = 1",
- "Table": "user",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# update by primary keyspace id with multi-part where clause with parens
-"update user set val = 1 where (name = 'foo' and id = 1)"
-{
- "QueryType": "UPDATE",
- "Original": "update user set val = 1 where (name = 'foo' and id = 1)",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update `user` set val = 1 where `name` = 'foo' and id = 1",
- "Table": "user",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-{
- "QueryType": "UPDATE",
- "Original": "update user set val = 1 where (name = 'foo' and id = 1)",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update `user` set val = 1 where `name` = 'foo' and id = 1",
- "Table": "user",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# update by primary keyspace id, changing one vindex column
-"update user_metadata set email = 'juan@vitess.io' where user_id = 1"
-{
- "QueryType": "UPDATE",
- "Original": "update user_metadata set email = 'juan@vitess.io' where user_id = 1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "ChangedVindexValues": [
- "email_user_map:3"
- ],
- "KsidLength": 1,
- "KsidVindex": "user_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select user_id, email, address, email = 'juan@vitess.io' from user_metadata where user_id = 1 for update",
- "Query": "update user_metadata set email = 'juan@vitess.io' where user_id = 1",
- "Table": "user_metadata",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user_metadata"
- ]
-}
-{
- "QueryType": "UPDATE",
- "Original": "update user_metadata set email = 'juan@vitess.io' where user_id = 1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "ChangedVindexValues": [
- "email_user_map:3"
- ],
- "KsidLength": 1,
- "KsidVindex": "user_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select user_id, email, address, email = 'juan@vitess.io' from user_metadata where user_id = 1 for update",
- "Query": "update user_metadata set email = 'juan@vitess.io' where user_id = 1",
- "Table": "user_metadata",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user_metadata"
- ]
-}
-
-# update by primary keyspace id, changing same vindex twice
-"update user_metadata set email = 'a', email = 'b' where user_id = 1"
-"column has duplicate set values: 'email'"
-Gen4 plan same as above
-
-# update by primary keyspace id, changing multiple vindex columns
-"update user_metadata set email = 'juan@vitess.io', address = '155 5th street' where user_id = 1"
-{
- "QueryType": "UPDATE",
- "Original": "update user_metadata set email = 'juan@vitess.io', address = '155 5th street' where user_id = 1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "ChangedVindexValues": [
- "address_user_map:4",
- "email_user_map:3"
- ],
- "KsidLength": 1,
- "KsidVindex": "user_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select user_id, email, address, email = 'juan@vitess.io', address = '155 5th street' from user_metadata where user_id = 1 for update",
- "Query": "update user_metadata set email = 'juan@vitess.io', address = '155 5th street' where user_id = 1",
- "Table": "user_metadata",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user_metadata"
- ]
-}
-{
- "QueryType": "UPDATE",
- "Original": "update user_metadata set email = 'juan@vitess.io', address = '155 5th street' where user_id = 1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "ChangedVindexValues": [
- "address_user_map:4",
- "email_user_map:3"
- ],
- "KsidLength": 1,
- "KsidVindex": "user_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select user_id, email, address, email = 'juan@vitess.io', address = '155 5th street' from user_metadata where user_id = 1 for update",
- "Query": "update user_metadata set email = 'juan@vitess.io', address = '155 5th street' where user_id = 1",
- "Table": "user_metadata",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user_metadata"
- ]
-}
-
-# update by primary keyspace id, changing one vindex column, using order by and limit
-"update user_metadata set email = 'juan@vitess.io' where user_id = 1 order by user_id asc limit 10"
-{
- "QueryType": "UPDATE",
- "Original": "update user_metadata set email = 'juan@vitess.io' where user_id = 1 order by user_id asc limit 10",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "ChangedVindexValues": [
- "email_user_map:3"
- ],
- "KsidLength": 1,
- "KsidVindex": "user_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select user_id, email, address, email = 'juan@vitess.io' from user_metadata where user_id = 1 order by user_id asc limit 10 for update",
- "Query": "update user_metadata set email = 'juan@vitess.io' where user_id = 1 order by user_id asc limit 10",
- "Table": "user_metadata",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user_metadata"
- ]
-}
-{
- "QueryType": "UPDATE",
- "Original": "update user_metadata set email = 'juan@vitess.io' where user_id = 1 order by user_id asc limit 10",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "ChangedVindexValues": [
- "email_user_map:3"
- ],
- "KsidLength": 1,
- "KsidVindex": "user_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select user_id, email, address, email = 'juan@vitess.io' from user_metadata where user_id = 1 order by user_id asc limit 10 for update",
- "Query": "update user_metadata set email = 'juan@vitess.io' where user_id = 1 order by user_id asc limit 10",
- "Table": "user_metadata",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user_metadata"
- ]
-}
-
-# update changes non owned vindex column
-"update music_extra set music_id = 1 where user_id = 1"
-{
- "QueryType": "UPDATE",
- "Original": "update music_extra set music_id = 1 where user_id = 1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "ChangedVindexValues": [
- "music_user_map:1"
- ],
- "KsidLength": 1,
- "KsidVindex": "user_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select user_id, music_id = 1 from music_extra where user_id = 1 for update",
- "Query": "update music_extra set music_id = 1 where user_id = 1",
- "Table": "music_extra",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.music_extra"
- ]
-}
-{
- "QueryType": "UPDATE",
- "Original": "update music_extra set music_id = 1 where user_id = 1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "ChangedVindexValues": [
- "music_user_map:1"
- ],
- "KsidLength": 1,
- "KsidVindex": "user_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select user_id, music_id = 1 from music_extra where user_id = 1 for update",
- "Query": "update music_extra set music_id = 1 where user_id = 1",
- "Table": "music_extra",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.music_extra"
- ]
-}
-
-# update by primary keyspace id, stray where clause
-"update user set val = 1 where id = id2 and id = 1"
-{
- "QueryType": "UPDATE",
- "Original": "update user set val = 1 where id = id2 and id = 1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update `user` set val = 1 where id = id2 and id = 1",
- "Table": "user",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-{
- "QueryType": "UPDATE",
- "Original": "update user set val = 1 where id = id2 and id = 1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update `user` set val = 1 where id = id2 and id = 1",
- "Table": "user",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# update by primary keyspace id, stray where clause with conversion error
-"update user set val = 1 where id = 18446744073709551616 and id = 1"
-{
- "QueryType": "UPDATE",
- "Original": "update user set val = 1 where id = 18446744073709551616 and id = 1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update `user` set val = 1 where id = 18446744073709551616 and id = 1",
- "Table": "user",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-{
- "QueryType": "UPDATE",
- "Original": "update user set val = 1 where id = 18446744073709551616 and id = 1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update `user` set val = 1 where id = 18446744073709551616 and id = 1",
- "Table": "user",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# delete from by primary keyspace id
-"delete from user where id = 1"
-{
- "QueryType": "DELETE",
- "Original": "delete from user where id = 1",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "KsidLength": 1,
- "KsidVindex": "user_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select Id, `Name`, Costly from `user` where id = 1 for update",
- "Query": "delete from `user` where id = 1",
- "Table": "user",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-{
- "QueryType": "DELETE",
- "Original": "delete from user where id = 1",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "KsidLength": 1,
- "KsidVindex": "user_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select Id, `Name`, Costly from `user` where id = 1 for update",
- "Query": "delete from `user` where id = 1",
- "Table": "user",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# multi-table delete with comma join
-"delete a from unsharded_a a, unsharded_b b where a.id = b.id and b.val = 1"
-{
- "QueryType": "DELETE",
- "Original": "delete a from unsharded_a a, unsharded_b b where a.id = b.id and b.val = 1",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "delete a from unsharded_a as a, unsharded_b as b where a.id = b.id and b.val = 1",
- "Table": "unsharded_a, unsharded_b"
- },
- "TablesUsed": [
- "main.unsharded_a",
- "main.unsharded_b"
- ]
-}
-Gen4 plan same as above
-
-# multi-table delete with ansi join
-"delete a from unsharded_a a join unsharded_b b on a.id = b.id where b.val = 1"
-{
- "QueryType": "DELETE",
- "Original": "delete a from unsharded_a a join unsharded_b b on a.id = b.id where b.val = 1",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "delete a from unsharded_a as a join unsharded_b as b on a.id = b.id where b.val = 1",
- "Table": "unsharded_a, unsharded_b"
- },
- "TablesUsed": [
- "main.unsharded_a",
- "main.unsharded_b"
- ]
-}
-Gen4 plan same as above
-
-#delete with join from subquery
-"delete foo from unsharded as foo left join (select id from unsharded where col is not null order by col desc limit 10) as keepers on foo.id = keepers.id where keepers.id is null and foo.col is not null and foo.col \u003c 1000"
-{
- "QueryType": "DELETE",
- "Original": "delete foo from unsharded as foo left join (select id from unsharded where col is not null order by col desc limit 10) as keepers on foo.id = keepers.id where keepers.id is null and foo.col is not null and foo.col \u003c 1000",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "delete foo from unsharded as foo left join (select id from unsharded where col is not null order by col desc limit 10) as keepers on foo.id = keepers.id where keepers.id is null and foo.col is not null and foo.col \u003c 1000",
- "Table": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-Gen4 plan same as above
-
-# routing rules: deleted from a routed table
-"delete from route1 where id = 1"
-{
- "QueryType": "DELETE",
- "Original": "delete from route1 where id = 1",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "KsidLength": 1,
- "KsidVindex": "user_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select Id, `Name`, Costly from `user` as route1 where id = 1 for update",
- "Query": "delete from `user` as route1 where id = 1",
- "Table": "user",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-{
- "QueryType": "DELETE",
- "Original": "delete from route1 where id = 1",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "KsidLength": 1,
- "KsidVindex": "user_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select Id, `Name`, Costly from `user` as route1 where id = 1 for update",
- "Query": "delete from `user` as route1 where id = 1",
- "Table": "user",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# delete: routing rules for subquery
-"delete from unsharded_a where a=(select a from route2)"
-{
- "QueryType": "DELETE",
- "Original": "delete from unsharded_a where a=(select a from route2)",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "delete from unsharded_a where a = (select a from unsharded as route2)",
- "Table": "unsharded, unsharded_a"
- },
- "TablesUsed": [
- "main.unsharded_a"
- ]
-}
-{
- "QueryType": "DELETE",
- "Original": "delete from unsharded_a where a=(select a from route2)",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "delete from unsharded_a where a = (select a from unsharded as route2)",
- "Table": "unsharded, unsharded_a"
- },
- "TablesUsed": [
- "main.unsharded",
- "main.unsharded_a"
- ]
-}
-
-# update by lookup
-"update music set val = 1 where id = 1"
-{
- "QueryType": "UPDATE",
- "Original": "update music set val = 1 where id = 1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update music set val = 1 where id = 1",
- "Table": "music",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "music_user_map"
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-{
- "QueryType": "UPDATE",
- "Original": "update music set val = 1 where id = 1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update music set val = 1 where id = 1",
- "Table": "music",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "music_user_map"
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# update multi-table ansi join
-"update unsharded_a a join unsharded_b b on a.id = b.id set a.val = 'foo' where b.val = 1"
-{
- "QueryType": "UPDATE",
- "Original": "update unsharded_a a join unsharded_b b on a.id = b.id set a.val = 'foo' where b.val = 1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update unsharded_a as a join unsharded_b as b on a.id = b.id set a.val = 'foo' where b.val = 1",
- "Table": "unsharded_a, unsharded_b"
- },
- "TablesUsed": [
- "main.unsharded_a",
- "main.unsharded_b"
- ]
-}
-Gen4 plan same as above
-
-# update multi-table comma join
-"update unsharded_a a, unsharded_b b set a.val = 'foo' where a.id = b.id and b.val = 1"
-{
- "QueryType": "UPDATE",
- "Original": "update unsharded_a a, unsharded_b b set a.val = 'foo' where a.id = b.id and b.val = 1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update unsharded_a as a, unsharded_b as b set a.val = 'foo' where a.id = b.id and b.val = 1",
- "Table": "unsharded_a, unsharded_b"
- },
- "TablesUsed": [
- "main.unsharded_a",
- "main.unsharded_b"
- ]
-}
-Gen4 plan same as above
-
-# delete from by lookup
-"delete from music where id = 1"
-{
- "QueryType": "DELETE",
- "Original": "delete from music where id = 1",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "KsidLength": 1,
- "KsidVindex": "user_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select user_id, id from music where id = 1 for update",
- "Query": "delete from music where id = 1",
- "Table": "music",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "music_user_map"
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-{
- "QueryType": "DELETE",
- "Original": "delete from music where id = 1",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "KsidLength": 1,
- "KsidVindex": "user_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select user_id, id from music where id = 1 for update",
- "Query": "delete from music where id = 1",
- "Table": "music",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "music_user_map"
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# delete from, no owned vindexes
-"delete from music_extra where user_id = 1"
-{
- "QueryType": "DELETE",
- "Original": "delete from music_extra where user_id = 1",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "delete from music_extra where user_id = 1",
- "Table": "music_extra",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.music_extra"
- ]
-}
-{
- "QueryType": "DELETE",
- "Original": "delete from music_extra where user_id = 1",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "delete from music_extra where user_id = 1",
- "Table": "music_extra",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.music_extra"
- ]
-}
-
-# simple insert, no values
-"insert into unsharded values()"
-{
- "QueryType": "INSERT",
- "Original": "insert into unsharded values()",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into unsharded values ()",
- "TableName": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-Gen4 plan same as above
-
-# simple insert unsharded
-"insert into unsharded values(1, 2)"
-{
- "QueryType": "INSERT",
- "Original": "insert into unsharded values(1, 2)",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into unsharded values (1, 2)",
- "TableName": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-Gen4 plan same as above
-
-# simple upsert unsharded
-"insert into unsharded values(1, 2) on duplicate key update x = 3"
-{
- "QueryType": "INSERT",
- "Original": "insert into unsharded values(1, 2) on duplicate key update x = 3",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into unsharded values (1, 2) on duplicate key update x = 3",
- "TableName": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-Gen4 plan same as above
-
-# unsharded insert, no col list with auto-inc and authoritative column list
-"insert into unsharded_authoritative values(1,1)"
-{
- "QueryType": "INSERT",
- "Original": "insert into unsharded_authoritative values(1,1)",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into unsharded_authoritative(col1, col2) values (:__seq0, 1)",
- "TableName": "unsharded_authoritative"
- },
- "TablesUsed": [
- "main.unsharded_authoritative"
- ]
-}
-Gen4 plan same as above
-
-# sharded upsert with sharding key set to vindex column
-"insert into music(user_id, id) values(1, 2) on duplicate key update user_id = values(user_id)"
-{
- "QueryType": "INSERT",
- "Original": "insert into music(user_id, id) values(1, 2) on duplicate key update user_id = values(user_id)",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Sharded",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "InsertIgnore": true,
- "MultiShardAutocommit": false,
- "Query": "insert into music(user_id, id) values (:_user_id_0, :_id_0) on duplicate key update user_id = values(user_id)",
- "TableName": "music",
- "VindexValues": {
- "music_user_map": "INT64(2)",
- "user_index": "INT64(1)"
- }
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-Gen4 plan same as above
-
-# sharded bulk upsert with sharding key set to vindex column
-"insert into music(user_id, id) values (1, 2), (3,4) on duplicate key update user_id = values(user_id)"
-{
- "QueryType": "INSERT",
- "Original": "insert into music(user_id, id) values (1, 2), (3,4) on duplicate key update user_id = values(user_id)",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Sharded",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "InsertIgnore": true,
- "MultiShardAutocommit": false,
- "Query": "insert into music(user_id, id) values (:_user_id_0, :_id_0), (:_user_id_1, :_id_1) on duplicate key update user_id = values(user_id)",
- "TableName": "music",
- "VindexValues": {
- "music_user_map": "INT64(2), INT64(4)",
- "user_index": "INT64(1), INT64(3)"
- }
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-Gen4 plan same as above
-
-# insert unsharded with select
-"insert into unsharded select id from unsharded_auto"
-{
- "QueryType": "INSERT",
- "Original": "insert into unsharded select id from unsharded_auto",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into unsharded select id from unsharded_auto for update",
- "TableName": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-{
- "QueryType": "INSERT",
- "Original": "insert into unsharded select id from unsharded_auto",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into unsharded select id from unsharded_auto for update",
- "TableName": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded",
- "main.unsharded_auto"
- ]
-}
-
-# insert unsharded with select with join
-"insert into unsharded select id from unsharded join unsharded_auto"
-{
- "QueryType": "INSERT",
- "Original": "insert into unsharded select id from unsharded join unsharded_auto",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into unsharded select id from unsharded join unsharded_auto for update",
- "TableName": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-{
- "QueryType": "INSERT",
- "Original": "insert into unsharded select id from unsharded join unsharded_auto",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into unsharded select id from unsharded join unsharded_auto for update",
- "TableName": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded",
- "main.unsharded_auto"
- ]
-}
-
-# insert unsharded, invalid value for auto-inc
-"insert into unsharded_auto(id, val) values(18446744073709551616, 'aa')"
-{
- "QueryType": "INSERT",
- "Original": "insert into unsharded_auto(id, val) values(18446744073709551616, 'aa')",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into unsharded_auto(id, val) values (:__seq0, 'aa')",
- "TableName": "unsharded_auto"
- },
- "TablesUsed": [
- "main.unsharded_auto"
- ]
-}
-Gen4 plan same as above
-
-# insert unsharded, column present
-"insert into unsharded_auto(id, val) values(1, 'aa')"
-{
- "QueryType": "INSERT",
- "Original": "insert into unsharded_auto(id, val) values(1, 'aa')",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into unsharded_auto(id, val) values (:__seq0, 'aa')",
- "TableName": "unsharded_auto"
- },
- "TablesUsed": [
- "main.unsharded_auto"
- ]
-}
-Gen4 plan same as above
-
-# insert unsharded, column absent
-"insert into unsharded_auto(val) values('aa')"
-{
- "QueryType": "INSERT",
- "Original": "insert into unsharded_auto(val) values('aa')",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into unsharded_auto(val, id) values ('aa', :__seq0)",
- "TableName": "unsharded_auto"
- },
- "TablesUsed": [
- "main.unsharded_auto"
- ]
-}
-Gen4 plan same as above
-
-# insert unsharded, column absent
-"insert into unsharded_auto(val) values(false)"
-{
- "QueryType": "INSERT",
- "Original": "insert into unsharded_auto(val) values(false)",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into unsharded_auto(val, id) values (false, :__seq0)",
- "TableName": "unsharded_auto"
- },
- "TablesUsed": [
- "main.unsharded_auto"
- ]
-}
-Gen4 plan same as above
-
-# insert unsharded, multi-val
-"insert into unsharded_auto(id, val) values(1, 'aa'), (null, 'bb')"
-{
- "QueryType": "INSERT",
- "Original": "insert into unsharded_auto(id, val) values(1, 'aa'), (null, 'bb')",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into unsharded_auto(id, val) values (:__seq0, 'aa'), (:__seq1, 'bb')",
- "TableName": "unsharded_auto"
- },
- "TablesUsed": [
- "main.unsharded_auto"
- ]
-}
-Gen4 plan same as above
-
-# unsharded insert subquery in insert value
-"insert into unsharded values((select 1 from dual), 1)"
-{
- "QueryType": "INSERT",
- "Original": "insert into unsharded values((select 1 from dual), 1)",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into unsharded values (1, 1)",
- "TableName": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-Gen4 plan same as above
-
-# sharded insert subquery in insert value
-"insert into user(id, val) values((select 1), 1)"
-{
- "QueryType": "INSERT",
- "Original": "insert into user(id, val) values((select 1), 1)",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Sharded",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into `user`(id, val, `Name`, Costly) values (:_Id_0, 1, :_Name_0, :_Costly_0)",
- "TableName": "user",
- "VindexValues": {
- "costly_map": "NULL",
- "name_user_map": "NULL",
- "user_index": ":__seq0"
- }
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-Gen4 plan same as above
-
-# insert into a routed table
-"insert into route1(id) values (1)"
-{
- "QueryType": "INSERT",
- "Original": "insert into route1(id) values (1)",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Sharded",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into `user`(id, `Name`, Costly) values (:_Id_0, :_Name_0, :_Costly_0)",
- "TableName": "user",
- "VindexValues": {
- "costly_map": "NULL",
- "name_user_map": "NULL",
- "user_index": ":__seq0"
- }
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-Gen4 plan same as above
-
-# insert with mimatched column list
-"insert into user(id) values (1, 2)"
-"column list doesn't match values"
-Gen4 plan same as above
-
-# insert no column list for sharded authoritative table
-"insert into authoritative values(1, 2, 3)"
-{
- "QueryType": "INSERT",
- "Original": "insert into authoritative values(1, 2, 3)",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Sharded",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into authoritative(user_id, col1, col2) values (:_user_id_0, 2, 3)",
- "TableName": "authoritative",
- "VindexValues": {
- "user_index": "INT64(1)"
- }
- },
- "TablesUsed": [
- "user.authoritative"
- ]
-}
-Gen4 plan same as above
-
-# insert sharded, no values
-"insert into user values()"
-{
- "QueryType": "INSERT",
- "Original": "insert into user values()",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Sharded",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into `user`(id, `Name`, Costly) values (:_Id_0, :_Name_0, :_Costly_0)",
- "TableName": "user",
- "VindexValues": {
- "costly_map": "NULL",
- "name_user_map": "NULL",
- "user_index": ":__seq0"
- }
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-Gen4 plan same as above
-
-# insert with one vindex
-"insert into user(id) values (1)"
-{
- "QueryType": "INSERT",
- "Original": "insert into user(id) values (1)",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Sharded",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into `user`(id, `Name`, Costly) values (:_Id_0, :_Name_0, :_Costly_0)",
- "TableName": "user",
- "VindexValues": {
- "costly_map": "NULL",
- "name_user_map": "NULL",
- "user_index": ":__seq0"
- }
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-Gen4 plan same as above
-
-# insert ignore sharded
-"insert ignore into user(id) values (1)"
-{
- "QueryType": "INSERT",
- "Original": "insert ignore into user(id) values (1)",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Sharded",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "InsertIgnore": true,
- "MultiShardAutocommit": false,
- "Query": "insert ignore into `user`(id, `Name`, Costly) values (:_Id_0, :_Name_0, :_Costly_0)",
- "TableName": "user",
- "VindexValues": {
- "costly_map": "NULL",
- "name_user_map": "NULL",
- "user_index": ":__seq0"
- }
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-Gen4 plan same as above
-
-# insert on duplicate key
-"insert into user(id) values(1) on duplicate key update col = 2"
-{
- "QueryType": "INSERT",
- "Original": "insert into user(id) values(1) on duplicate key update col = 2",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Sharded",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "InsertIgnore": true,
- "MultiShardAutocommit": false,
- "Query": "insert into `user`(id, `Name`, Costly) values (:_Id_0, :_Name_0, :_Costly_0) on duplicate key update col = 2",
- "TableName": "user",
- "VindexValues": {
- "costly_map": "NULL",
- "name_user_map": "NULL",
- "user_index": ":__seq0"
- }
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-Gen4 plan same as above
-
-# insert with one vindex and bind var
-"insert into user(id) values (:aa)"
-{
- "QueryType": "INSERT",
- "Original": "insert into user(id) values (:aa)",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Sharded",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into `user`(id, `Name`, Costly) values (:_Id_0, :_Name_0, :_Costly_0)",
- "TableName": "user",
- "VindexValues": {
- "costly_map": "NULL",
- "name_user_map": "NULL",
- "user_index": ":__seq0"
- }
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-Gen4 plan same as above
-
-# insert with non vindex
-"insert into user(nonid) values (2)"
-{
- "QueryType": "INSERT",
- "Original": "insert into user(nonid) values (2)",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Sharded",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into `user`(nonid, id, `Name`, Costly) values (2, :_Id_0, :_Name_0, :_Costly_0)",
- "TableName": "user",
- "VindexValues": {
- "costly_map": "NULL",
- "name_user_map": "NULL",
- "user_index": ":__seq0"
- }
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-Gen4 plan same as above
-
-# insert with default seq
-"insert into user(id, nonid) values (default, 2)"
-{
- "QueryType": "INSERT",
- "Original": "insert into user(id, nonid) values (default, 2)",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Sharded",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into `user`(id, nonid, `Name`, Costly) values (:_Id_0, 2, :_Name_0, :_Costly_0)",
- "TableName": "user",
- "VindexValues": {
- "costly_map": "NULL",
- "name_user_map": "NULL",
- "user_index": ":__seq0"
- }
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-Gen4 plan same as above
-
-# insert with non vindex bool value
-"insert into user(nonid) values (true)"
-{
- "QueryType": "INSERT",
- "Original": "insert into user(nonid) values (true)",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Sharded",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into `user`(nonid, id, `Name`, Costly) values (true, :_Id_0, :_Name_0, :_Costly_0)",
- "TableName": "user",
- "VindexValues": {
- "costly_map": "NULL",
- "name_user_map": "NULL",
- "user_index": ":__seq0"
- }
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-Gen4 plan same as above
-
-# insert with all vindexes supplied
-"insert into user(nonid, name, id) values (2, 'foo', 1)"
-{
- "QueryType": "INSERT",
- "Original": "insert into user(nonid, name, id) values (2, 'foo', 1)",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Sharded",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into `user`(nonid, `name`, id, Costly) values (2, :_Name_0, :_Id_0, :_Costly_0)",
- "TableName": "user",
- "VindexValues": {
- "costly_map": "NULL",
- "name_user_map": "VARCHAR(\"foo\")",
- "user_index": ":__seq0"
- }
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-Gen4 plan same as above
-
-# insert for non-vindex autoinc
-"insert into user_extra(nonid) values (2)"
-{
- "QueryType": "INSERT",
- "Original": "insert into user_extra(nonid) values (2)",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Sharded",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into user_extra(nonid, extra_id, user_id) values (2, :__seq0, :_user_id_0)",
- "TableName": "user_extra",
- "VindexValues": {
- "user_index": "NULL"
- }
- },
- "TablesUsed": [
- "user.user_extra"
- ]
-}
-Gen4 plan same as above
-
-# insert for non-compliant names
-"insert into `weird``name`(`a``b*c`, `b*c`) values(1, 2)"
-{
- "QueryType": "INSERT",
- "Original": "insert into `weird``name`(`a``b*c`, `b*c`) values(1, 2)",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Sharded",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into `weird``name`(`a``b*c`, `b*c`) values (:_a_b_c_0, 2)",
- "TableName": "weird`name",
- "VindexValues": {
- "user_index": "INT64(1)"
- }
- },
- "TablesUsed": [
- "user.weird`name"
- ]
-}
-Gen4 plan same as above
-
-# unsharded insert from union
-"insert into unsharded select 1 from dual union select 1 from dual"
-{
- "QueryType": "INSERT",
- "Original": "insert into unsharded select 1 from dual union select 1 from dual",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into unsharded select 1 from dual union select 1 from dual for update",
- "TableName": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-{
- "QueryType": "INSERT",
- "Original": "insert into unsharded select 1 from dual union select 1 from dual",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into unsharded select 1 from dual union select 1 from dual for update",
- "TableName": "unsharded"
- },
- "TablesUsed": [
- "main.dual",
- "main.unsharded"
- ]
-}
-
-# insert for non-vindex autoinc, invalid value
-"insert into user_extra(nonid, extra_id) values (2, 18446744073709551616)"
-{
- "QueryType": "INSERT",
- "Original": "insert into user_extra(nonid, extra_id) values (2, 18446744073709551616)",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Sharded",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into user_extra(nonid, extra_id, user_id) values (2, :__seq0, :_user_id_0)",
- "TableName": "user_extra",
- "VindexValues": {
- "user_index": "NULL"
- }
- },
- "TablesUsed": [
- "user.user_extra"
- ]
-}
-Gen4 plan same as above
-
-# insert invalid index value
-"insert into music_extra(music_id, user_id) values(1, 18446744073709551616)"
-{
- "QueryType": "INSERT",
- "Original": "insert into music_extra(music_id, user_id) values(1, 18446744073709551616)",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Sharded",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into music_extra(music_id, user_id) values (:_music_id_0, :_user_id_0)",
- "TableName": "music_extra",
- "VindexValues": {
- "music_user_map": "INT64(1)",
- "user_index": "DECIMAL(18446744073709551616)"
- }
- },
- "TablesUsed": [
- "user.music_extra"
- ]
-}
-Gen4 plan same as above
-
-# insert invalid index value
-"insert into music_extra(music_id, user_id) values(1, id)"
-"column access not supported here"
-Gen4 plan same as above
-
-# insert invalid table
-"insert into noexist(music_id, user_id) values(1, 18446744073709551616)"
-"table noexist not found"
-Gen4 plan same as above
-
-# insert with multiple rows
-"insert into user(id) values (1), (2)"
-{
- "QueryType": "INSERT",
- "Original": "insert into user(id) values (1), (2)",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Sharded",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into `user`(id, `Name`, Costly) values (:_Id_0, :_Name_0, :_Costly_0), (:_Id_1, :_Name_1, :_Costly_1)",
- "TableName": "user",
- "VindexValues": {
- "costly_map": "NULL, NULL",
- "name_user_map": "NULL, NULL",
- "user_index": ":__seq0, :__seq1"
- }
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-Gen4 plan same as above
-
-# insert with query timeout
-"insert /*vt+ QUERY_TIMEOUT_MS=1 */ into user(id) values (1), (2)"
-{
- "QueryType": "INSERT",
- "Original": "insert /*vt+ QUERY_TIMEOUT_MS=1 */ into user(id) values (1), (2)",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Sharded",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert /*vt+ QUERY_TIMEOUT_MS=1 */ into `user`(id, `Name`, Costly) values (:_Id_0, :_Name_0, :_Costly_0), (:_Id_1, :_Name_1, :_Costly_1)",
- "QueryTimeout": 1,
- "TableName": "user",
- "VindexValues": {
- "costly_map": "NULL, NULL",
- "name_user_map": "NULL, NULL",
- "user_index": ":__seq0, :__seq1"
- }
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-Gen4 plan same as above
-
-# insert with multiple rows - multi-shard autocommit
-"insert /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ into user(id) values (1), (2)"
-{
- "QueryType": "INSERT",
- "Original": "insert /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ into user(id) values (1), (2)",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Sharded",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": true,
- "Query": "insert /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ into `user`(id, `Name`, Costly) values (:_Id_0, :_Name_0, :_Costly_0), (:_Id_1, :_Name_1, :_Costly_1)",
- "TableName": "user",
- "VindexValues": {
- "costly_map": "NULL, NULL",
- "name_user_map": "NULL, NULL",
- "user_index": ":__seq0, :__seq1"
- }
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-Gen4 plan same as above
-
-# insert into a vindex not allowed
-"insert into user_index(id) values(1)"
-"unsupported: multi-shard or vindex write statement"
-Gen4 plan same as above
-
-# simple replace unsharded
-"replace into unsharded values(1, 2)"
-{
- "QueryType": "INSERT",
- "Original": "replace into unsharded values(1, 2)",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "replace into unsharded values (1, 2)",
- "TableName": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-Gen4 plan same as above
-
-# replace unsharded with select
-"replace into unsharded select id from unsharded_auto"
-{
- "QueryType": "INSERT",
- "Original": "replace into unsharded select id from unsharded_auto",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "replace into unsharded select id from unsharded_auto for update",
- "TableName": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-{
- "QueryType": "INSERT",
- "Original": "replace into unsharded select id from unsharded_auto",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "replace into unsharded select id from unsharded_auto for update",
- "TableName": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded",
- "main.unsharded_auto"
- ]
-}
-
-# replace unsharded, invalid value for auto-inc
-"replace into unsharded_auto(id, val) values(18446744073709551616, 'aa')"
-{
- "QueryType": "INSERT",
- "Original": "replace into unsharded_auto(id, val) values(18446744073709551616, 'aa')",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "replace into unsharded_auto(id, val) values (:__seq0, 'aa')",
- "TableName": "unsharded_auto"
- },
- "TablesUsed": [
- "main.unsharded_auto"
- ]
-}
-Gen4 plan same as above
-
-# replace unsharded, column present
-"replace into unsharded_auto(id, val) values(1, 'aa')"
-{
- "QueryType": "INSERT",
- "Original": "replace into unsharded_auto(id, val) values(1, 'aa')",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "replace into unsharded_auto(id, val) values (:__seq0, 'aa')",
- "TableName": "unsharded_auto"
- },
- "TablesUsed": [
- "main.unsharded_auto"
- ]
-}
-Gen4 plan same as above
-
-# replace unsharded, column absent
-"replace into unsharded_auto(val) values('aa')"
-{
- "QueryType": "INSERT",
- "Original": "replace into unsharded_auto(val) values('aa')",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "replace into unsharded_auto(val, id) values ('aa', :__seq0)",
- "TableName": "unsharded_auto"
- },
- "TablesUsed": [
- "main.unsharded_auto"
- ]
-}
-Gen4 plan same as above
-
-# replace unsharded, multi-val
-"replace into unsharded_auto(id, val) values(1, 'aa'), (null, 'bb')"
-{
- "QueryType": "INSERT",
- "Original": "replace into unsharded_auto(id, val) values(1, 'aa'), (null, 'bb')",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "replace into unsharded_auto(id, val) values (:__seq0, 'aa'), (:__seq1, 'bb')",
- "TableName": "unsharded_auto"
- },
- "TablesUsed": [
- "main.unsharded_auto"
- ]
-}
-Gen4 plan same as above
-
-# replace invalid table
-"replace into noexist(music_id, user_id) values(1, 18446744073709551616)"
-"table noexist not found"
-Gen4 plan same as above
-
-# insert a row in a multi column vindex table
-"insert multicolvin (column_a, column_b, column_c, kid) VALUES (1,2,3,4)"
-{
- "QueryType": "INSERT",
- "Original": "insert multicolvin (column_a, column_b, column_c, kid) VALUES (1,2,3,4)",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Sharded",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into multicolvin(column_a, column_b, column_c, kid) values (:_column_a_0, :_column_b_0, :_column_c_0, :_kid_0)",
- "TableName": "multicolvin",
- "VindexValues": {
- "cola_map": "INT64(1)",
- "colb_colc_map": "INT64(2), INT64(3)",
- "kid_index": "INT64(4)"
- }
- },
- "TablesUsed": [
- "user.multicolvin"
- ]
-}
-Gen4 plan same as above
-
-# insert for overlapped vindex columns
-"insert overlap_vindex (kid, column_a, column_b) VALUES (1,2,3)"
-{
- "QueryType": "INSERT",
- "Original": "insert overlap_vindex (kid, column_a, column_b) VALUES (1,2,3)",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Sharded",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into overlap_vindex(kid, column_a, column_b) values (:_kid_0, :_column_a_0, 3)",
- "TableName": "overlap_vindex",
- "VindexValues": {
- "cola_kid_map": "INT64(2), INT64(1)",
- "kid_index": "INT64(1)"
- }
- },
- "TablesUsed": [
- "user.overlap_vindex"
- ]
-}
-Gen4 plan same as above
-
-# insert multiple rows in a multi column vindex table
-"insert multicolvin (column_a, column_b, column_c, kid) VALUES (1,2,3,4), (5,6,7,8)"
-{
- "QueryType": "INSERT",
- "Original": "insert multicolvin (column_a, column_b, column_c, kid) VALUES (1,2,3,4), (5,6,7,8)",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Sharded",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into multicolvin(column_a, column_b, column_c, kid) values (:_column_a_0, :_column_b_0, :_column_c_0, :_kid_0), (:_column_a_1, :_column_b_1, :_column_c_1, :_kid_1)",
- "TableName": "multicolvin",
- "VindexValues": {
- "cola_map": "INT64(1), INT64(5)",
- "colb_colc_map": "INT64(2), INT64(6), INT64(3), INT64(7)",
- "kid_index": "INT64(4), INT64(8)"
- }
- },
- "TablesUsed": [
- "user.multicolvin"
- ]
-}
-Gen4 plan same as above
-
-# delete row in a multi column vindex table
-"delete from multicolvin where kid=1"
-{
- "QueryType": "DELETE",
- "Original": "delete from multicolvin where kid=1",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "KsidLength": 1,
- "KsidVindex": "kid_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select kid, column_a, column_b, column_c from multicolvin where kid = 1 for update",
- "Query": "delete from multicolvin where kid = 1",
- "Table": "multicolvin",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "kid_index"
- },
- "TablesUsed": [
- "user.multicolvin"
- ]
-}
-{
- "QueryType": "DELETE",
- "Original": "delete from multicolvin where kid=1",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "KsidLength": 1,
- "KsidVindex": "kid_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select kid, column_a, column_b, column_c from multicolvin where kid = 1 for update",
- "Query": "delete from multicolvin where kid = 1",
- "Table": "multicolvin",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "kid_index"
- },
- "TablesUsed": [
- "user.multicolvin"
- ]
-}
-
-# update columns of multi column vindex
-"update multicolvin set column_b = 1, column_c = 2 where kid = 1"
-{
- "QueryType": "UPDATE",
- "Original": "update multicolvin set column_b = 1, column_c = 2 where kid = 1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "ChangedVindexValues": [
- "colb_colc_map:4"
- ],
- "KsidLength": 1,
- "KsidVindex": "kid_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select kid, column_a, column_b, column_c, column_b = 1 and column_c = 2 from multicolvin where kid = 1 for update",
- "Query": "update multicolvin set column_b = 1, column_c = 2 where kid = 1",
- "Table": "multicolvin",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "kid_index"
- },
- "TablesUsed": [
- "user.multicolvin"
- ]
-}
-{
- "QueryType": "UPDATE",
- "Original": "update multicolvin set column_b = 1, column_c = 2 where kid = 1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "ChangedVindexValues": [
- "colb_colc_map:4"
- ],
- "KsidLength": 1,
- "KsidVindex": "kid_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select kid, column_a, column_b, column_c, column_b = 1 and column_c = 2 from multicolvin where kid = 1 for update",
- "Query": "update multicolvin set column_b = 1, column_c = 2 where kid = 1",
- "Table": "multicolvin",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "kid_index"
- },
- "TablesUsed": [
- "user.multicolvin"
- ]
-}
-
-# update multiple vindexes, with multi column vindex
-"update multicolvin set column_a = 0, column_b = 1, column_c = 2 where kid = 1"
-{
- "QueryType": "UPDATE",
- "Original": "update multicolvin set column_a = 0, column_b = 1, column_c = 2 where kid = 1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "ChangedVindexValues": [
- "cola_map:4",
- "colb_colc_map:5"
- ],
- "KsidLength": 1,
- "KsidVindex": "kid_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select kid, column_a, column_b, column_c, column_a = 0, column_b = 1 and column_c = 2 from multicolvin where kid = 1 for update",
- "Query": "update multicolvin set column_a = 0, column_b = 1, column_c = 2 where kid = 1",
- "Table": "multicolvin",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "kid_index"
- },
- "TablesUsed": [
- "user.multicolvin"
- ]
-}
-{
- "QueryType": "UPDATE",
- "Original": "update multicolvin set column_a = 0, column_b = 1, column_c = 2 where kid = 1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "ChangedVindexValues": [
- "cola_map:4",
- "colb_colc_map:5"
- ],
- "KsidLength": 1,
- "KsidVindex": "kid_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select kid, column_a, column_b, column_c, column_a = 0, column_b = 1 and column_c = 2 from multicolvin where kid = 1 for update",
- "Query": "update multicolvin set column_a = 0, column_b = 1, column_c = 2 where kid = 1",
- "Table": "multicolvin",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "kid_index"
- },
- "TablesUsed": [
- "user.multicolvin"
- ]
-}
-
-# update with no primary vindex on where clause (scatter update)
-"update user_extra set val = 1"
-{
- "QueryType": "UPDATE",
- "Original": "update user_extra set val = 1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update user_extra set val = 1",
- "Table": "user_extra"
- },
- "TablesUsed": [
- "user.user_extra"
- ]
-}
-Gen4 plan same as above
-
-# update with target destination
-"update `user[-]`.user_extra set val = 1"
-{
- "QueryType": "UPDATE",
- "Original": "update `user[-]`.user_extra set val = 1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "ByDestination",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update user_extra set val = 1",
- "Table": "user_extra"
- },
- "TablesUsed": [
- "user.user_extra"
- ]
-}
-Gen4 plan same as above
-
-# update with no primary vindex on where clause (scatter update) - multi shard autocommit
-"update /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ user_extra set val = 1"
-{
- "QueryType": "UPDATE",
- "Original": "update /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ user_extra set val = 1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": true,
- "Query": "update /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ user_extra set val = 1",
- "Table": "user_extra"
- },
- "TablesUsed": [
- "user.user_extra"
- ]
-}
-Gen4 plan same as above
-
-# update with no primary vindex on where clause (scatter update) - query timeout
-"update /*vt+ QUERY_TIMEOUT_MS=1 */ user_extra set val = 1"
-{
- "QueryType": "UPDATE",
- "Original": "update /*vt+ QUERY_TIMEOUT_MS=1 */ user_extra set val = 1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update /*vt+ QUERY_TIMEOUT_MS=1 */ user_extra set val = 1",
- "QueryTimeout": 1,
- "Table": "user_extra"
- },
- "TablesUsed": [
- "user.user_extra"
- ]
-}
-Gen4 plan same as above
-
-# update with non-comparison expr
-"update user_extra set val = 1 where id between 1 and 2"
-{
- "QueryType": "UPDATE",
- "Original": "update user_extra set val = 1 where id between 1 and 2",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update user_extra set val = 1 where id between 1 and 2",
- "Table": "user_extra"
- },
- "TablesUsed": [
- "user.user_extra"
- ]
-}
-Gen4 plan same as above
-
-# update with primary id through IN clause
-"update user_extra set val = 1 where user_id in (1, 2)"
-{
- "QueryType": "UPDATE",
- "Original": "update user_extra set val = 1 where user_id in (1, 2)",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update user_extra set val = 1 where user_id in (1, 2)",
- "Table": "user_extra",
- "Values": [
- "(INT64(1), INT64(2))"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user_extra"
- ]
-}
-Gen4 plan same as above
-
-# update with non-unique key
-"update user_extra set val = 1 where name = 'foo'"
-{
- "QueryType": "UPDATE",
- "Original": "update user_extra set val = 1 where name = 'foo'",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update user_extra set val = 1 where `name` = 'foo'",
- "Table": "user_extra"
- },
- "TablesUsed": [
- "user.user_extra"
- ]
-}
-Gen4 plan same as above
-
-# update by lookup with IN clause
-"update user_extra set val = 1 where id in (1, 2)"
-{
- "QueryType": "UPDATE",
- "Original": "update user_extra set val = 1 where id in (1, 2)",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update user_extra set val = 1 where id in (1, 2)",
- "Table": "user_extra"
- },
- "TablesUsed": [
- "user.user_extra"
- ]
-}
-Gen4 plan same as above
-
-# update with where clause with parens
-"update user_extra set val = 1 where (name = 'foo' or id = 1)"
-{
- "QueryType": "UPDATE",
- "Original": "update user_extra set val = 1 where (name = 'foo' or id = 1)",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update user_extra set val = 1 where `name` = 'foo' or id = 1",
- "Table": "user_extra"
- },
- "TablesUsed": [
- "user.user_extra"
- ]
-}
-Gen4 plan same as above
-
-# delete from with no where clause
-"delete from user_extra"
-{
- "QueryType": "DELETE",
- "Original": "delete from user_extra",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "delete from user_extra",
- "Table": "user_extra"
- },
- "TablesUsed": [
- "user.user_extra"
- ]
-}
-Gen4 plan same as above
-
-# delete with target destination
-"delete from `user[-]`.user_extra"
-{
- "QueryType": "DELETE",
- "Original": "delete from `user[-]`.user_extra",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "ByDestination",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "delete from user_extra",
- "Table": "user_extra"
- },
- "TablesUsed": [
- "user.user_extra"
- ]
-}
-Gen4 plan same as above
-
-# delete with non-comparison expr
-"delete from user_extra where user_id between 1 and 2"
-{
- "QueryType": "DELETE",
- "Original": "delete from user_extra where user_id between 1 and 2",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "delete from user_extra where user_id between 1 and 2",
- "Table": "user_extra"
- },
- "TablesUsed": [
- "user.user_extra"
- ]
-}
-Gen4 plan same as above
-
-# delete from with no index match
-"delete from user_extra where name = 'jose'"
-{
- "QueryType": "DELETE",
- "Original": "delete from user_extra where name = 'jose'",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "delete from user_extra where `name` = 'jose'",
- "Table": "user_extra"
- },
- "TablesUsed": [
- "user.user_extra"
- ]
-}
-Gen4 plan same as above
-
-# delete from with no index match - multi shard autocommit
-"delete /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ from user_extra where name = 'jose'"
-{
- "QueryType": "DELETE",
- "Original": "delete /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ from user_extra where name = 'jose'",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": true,
- "Query": "delete /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ from user_extra where `name` = 'jose'",
- "Table": "user_extra"
- },
- "TablesUsed": [
- "user.user_extra"
- ]
-}
-Gen4 plan same as above
-
-# delete from with no index match - query timeout
-"delete /*vt+ QUERY_TIMEOUT_MS=1 */ from user_extra where name = 'jose'"
-{
- "QueryType": "DELETE",
- "Original": "delete /*vt+ QUERY_TIMEOUT_MS=1 */ from user_extra where name = 'jose'",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "delete /*vt+ QUERY_TIMEOUT_MS=1 */ from user_extra where `name` = 'jose'",
- "QueryTimeout": 1,
- "Table": "user_extra"
- },
- "TablesUsed": [
- "user.user_extra"
- ]
-}
-Gen4 plan same as above
-
-# delete from with primary id in through IN clause
-"delete from user_extra where user_id in (1, 2)"
-{
- "QueryType": "DELETE",
- "Original": "delete from user_extra where user_id in (1, 2)",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "delete from user_extra where user_id in (1, 2)",
- "Table": "user_extra",
- "Values": [
- "(INT64(1), INT64(2))"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user_extra"
- ]
-}
-Gen4 plan same as above
-
-# unsharded update where inner query references outer query
-"update unsharded set col = (select id from unsharded_a where id = unsharded.col) where col = (select id from unsharded_b)"
-{
- "QueryType": "UPDATE",
- "Original": "update unsharded set col = (select id from unsharded_a where id = unsharded.col) where col = (select id from unsharded_b)",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update unsharded set col = (select id from unsharded_a where id = unsharded.col) where col = (select id from unsharded_b)",
- "Table": "unsharded, unsharded_a, unsharded_b"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-{
- "QueryType": "UPDATE",
- "Original": "update unsharded set col = (select id from unsharded_a where id = unsharded.col) where col = (select id from unsharded_b)",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update unsharded set col = (select id from unsharded_a where id = unsharded.col) where col = (select id from unsharded_b)",
- "Table": "unsharded, unsharded_a, unsharded_b"
- },
- "TablesUsed": [
- "main.unsharded",
- "main.unsharded_a",
- "main.unsharded_b"
- ]
-}
-
-# unsharded delete where inner query references outer query
-"delete from unsharded where col = (select id from unsharded_a where id = unsharded.col)"
-{
- "QueryType": "DELETE",
- "Original": "delete from unsharded where col = (select id from unsharded_a where id = unsharded.col)",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "delete from unsharded where col = (select id from unsharded_a where id = unsharded.col)",
- "Table": "unsharded, unsharded_a"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-{
- "QueryType": "DELETE",
- "Original": "delete from unsharded where col = (select id from unsharded_a where id = unsharded.col)",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "delete from unsharded where col = (select id from unsharded_a where id = unsharded.col)",
- "Table": "unsharded, unsharded_a"
- },
- "TablesUsed": [
- "main.unsharded",
- "main.unsharded_a"
- ]
-}
-
-# update vindex value to null
-"update user set name = null where id = 1"
-{
- "QueryType": "UPDATE",
- "Original": "update user set name = null where id = 1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "ChangedVindexValues": [
- "name_user_map:3"
- ],
- "KsidLength": 1,
- "KsidVindex": "user_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select Id, `Name`, Costly, `name` = null from `user` where id = 1 for update",
- "Query": "update `user` set `name` = null where id = 1",
- "Table": "user",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-{
- "QueryType": "UPDATE",
- "Original": "update user set name = null where id = 1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "ChangedVindexValues": [
- "name_user_map:3"
- ],
- "KsidLength": 1,
- "KsidVindex": "user_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select Id, `Name`, Costly, `name` = null from `user` where id = 1 for update",
- "Query": "update `user` set `name` = null where id = 1",
- "Table": "user",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# insert using last_insert_id
-"insert into unsharded values(last_insert_id(), 2)"
-{
- "QueryType": "INSERT",
- "Original": "insert into unsharded values(last_insert_id(), 2)",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into unsharded values (:__lastInsertId, 2)",
- "TableName": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-Gen4 plan same as above
-
-# update vindex value to null with multiple primary keyspace id
-"update user set name = null where id in (1, 2, 3)"
-{
- "QueryType": "UPDATE",
- "Original": "update user set name = null where id in (1, 2, 3)",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "ChangedVindexValues": [
- "name_user_map:3"
- ],
- "KsidLength": 1,
- "KsidVindex": "user_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select Id, `Name`, Costly, `name` = null from `user` where id in (1, 2, 3) for update",
- "Query": "update `user` set `name` = null where id in (1, 2, 3)",
- "Table": "user",
- "Values": [
- "(INT64(1), INT64(2), INT64(3))"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-Gen4 plan same as above
-
-# update vindex value to null without a where clause
-"update user set name = null"
-{
- "QueryType": "UPDATE",
- "Original": "update user set name = null",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "ChangedVindexValues": [
- "name_user_map:3"
- ],
- "KsidLength": 1,
- "KsidVindex": "user_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select Id, `Name`, Costly, `name` = null from `user` for update",
- "Query": "update `user` set `name` = null",
- "Table": "user"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-Gen4 plan same as above
-
-# update vindex value to null with complex where clause
-"update user set name = null where id + 1 = 2"
-{
- "QueryType": "UPDATE",
- "Original": "update user set name = null where id + 1 = 2",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "ChangedVindexValues": [
- "name_user_map:3"
- ],
- "KsidLength": 1,
- "KsidVindex": "user_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select Id, `Name`, Costly, `name` = null from `user` where id + 1 = 2 for update",
- "Query": "update `user` set `name` = null where id + 1 = 2",
- "Table": "user"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-Gen4 plan same as above
-
-# delete from user by primary keyspace id with in clause
-"delete from user where id in (1, 2, 3)"
-{
- "QueryType": "DELETE",
- "Original": "delete from user where id in (1, 2, 3)",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "KsidLength": 1,
- "KsidVindex": "user_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select Id, `Name`, Costly from `user` where id in (1, 2, 3) for update",
- "Query": "delete from `user` where id in (1, 2, 3)",
- "Table": "user",
- "Values": [
- "(INT64(1), INT64(2), INT64(3))"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-Gen4 plan same as above
-
-# delete from user by complex expression
-"delete from user where id + 1 = 2"
-{
- "QueryType": "DELETE",
- "Original": "delete from user where id + 1 = 2",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "KsidLength": 1,
- "KsidVindex": "user_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select Id, `Name`, Costly from `user` where id + 1 = 2 for update",
- "Query": "delete from `user` where id + 1 = 2",
- "Table": "user"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-Gen4 plan same as above
-
-# delete from user without a where clause
-"delete from user"
-{
- "QueryType": "DELETE",
- "Original": "delete from user",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "KsidLength": 1,
- "KsidVindex": "user_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select Id, `Name`, Costly from `user` for update",
- "Query": "delete from `user`",
- "Table": "user"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-Gen4 plan same as above
-
-# delete with single table targets
-"delete music from music where id = 1"
-{
- "QueryType": "DELETE",
- "Original": "delete music from music where id = 1",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "KsidLength": 1,
- "KsidVindex": "user_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select user_id, id from music where id = 1 for update",
- "Query": "delete from music where id = 1",
- "Table": "music",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "music_user_map"
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-{
- "QueryType": "DELETE",
- "Original": "delete music from music where id = 1",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "KsidLength": 1,
- "KsidVindex": "user_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select user_id, id from music where id = 1 for update",
- "Query": "delete from music where id = 1",
- "Table": "music",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "music_user_map"
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# scatter update table with owned vindexes without changing lookup vindex
-"update user set val = 1"
-{
- "QueryType": "UPDATE",
- "Original": "update user set val = 1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update `user` set val = 1",
- "Table": "user"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-Gen4 plan same as above
-
-# scatter delete with owned lookup vindex
-"delete from user"
-{
- "QueryType": "DELETE",
- "Original": "delete from user",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "KsidLength": 1,
- "KsidVindex": "user_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select Id, `Name`, Costly from `user` for update",
- "Query": "delete from `user`",
- "Table": "user"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-Gen4 plan same as above
-
-# update multi column vindex, without values for all the vindex columns
-"update multicolvin set column_c = 2 where kid = 1"
-{
- "QueryType": "UPDATE",
- "Original": "update multicolvin set column_c = 2 where kid = 1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "ChangedVindexValues": [
- "colb_colc_map:4"
- ],
- "KsidLength": 1,
- "KsidVindex": "kid_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select kid, column_a, column_b, column_c, column_c = 2 from multicolvin where kid = 1 for update",
- "Query": "update multicolvin set column_c = 2 where kid = 1",
- "Table": "multicolvin",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "kid_index"
- },
- "TablesUsed": [
- "user.multicolvin"
- ]
-}
-{
- "QueryType": "UPDATE",
- "Original": "update multicolvin set column_c = 2 where kid = 1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "ChangedVindexValues": [
- "colb_colc_map:4"
- ],
- "KsidLength": 1,
- "KsidVindex": "kid_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select kid, column_a, column_b, column_c, column_c = 2 from multicolvin where kid = 1 for update",
- "Query": "update multicolvin set column_c = 2 where kid = 1",
- "Table": "multicolvin",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "kid_index"
- },
- "TablesUsed": [
- "user.multicolvin"
- ]
-}
-
-# update with binary value
-"update user set name = _binary 'abc' where id = 1"
-{
- "QueryType": "UPDATE",
- "Original": "update user set name = _binary 'abc' where id = 1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "ChangedVindexValues": [
- "name_user_map:3"
- ],
- "KsidLength": 1,
- "KsidVindex": "user_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select Id, `Name`, Costly, `name` = _binary 'abc' from `user` where id = 1 for update",
- "Query": "update `user` set `name` = _binary 'abc' where id = 1",
- "Table": "user",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-{
- "QueryType": "UPDATE",
- "Original": "update user set name = _binary 'abc' where id = 1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "ChangedVindexValues": [
- "name_user_map:3"
- ],
- "KsidLength": 1,
- "KsidVindex": "user_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select Id, `Name`, Costly, `name` = _binary 'abc' from `user` where id = 1 for update",
- "Query": "update `user` set `name` = _binary 'abc' where id = 1",
- "Table": "user",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# delete with binary value
-"delete from user where name = _binary 'abc'"
-{
- "QueryType": "DELETE",
- "Original": "delete from user where name = _binary 'abc'",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "KsidLength": 1,
- "KsidVindex": "user_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select Id, `Name`, Costly from `user` where `name` = _binary 'abc' for update",
- "Query": "delete from `user` where `name` = _binary 'abc'",
- "Table": "user"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-{
- "QueryType": "DELETE",
- "Original": "delete from user where name = _binary 'abc'",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "KsidLength": 1,
- "KsidVindex": "user_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select Id, `Name`, Costly from `user` where `name` = _binary 'abc' for update",
- "Query": "delete from `user` where `name` = _binary 'abc'",
- "Table": "user",
- "Values": [
- "VARBINARY(\"abc\")"
- ],
- "Vindex": "name_user_map"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# delete with shard targeting
-"delete from `user[-]`.user"
-{
- "QueryType": "DELETE",
- "Original": "delete from `user[-]`.user",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "ByDestination",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "KsidLength": 1,
- "KsidVindex": "user_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select Id, `Name`, Costly from `user` for update",
- "Query": "delete from `user`",
- "Table": "user"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-Gen4 plan same as above
-
-# update with shard targeting
-"update `user[-]`.user set name = 'myname'"
-{
- "QueryType": "UPDATE",
- "Original": "update `user[-]`.user set name = 'myname'",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "ByDestination",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "ChangedVindexValues": [
- "name_user_map:3"
- ],
- "KsidLength": 1,
- "KsidVindex": "user_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select Id, `Name`, Costly, `name` = 'myname' from `user` for update",
- "Query": "update `user` set `name` = 'myname'",
- "Table": "user"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-Gen4 plan same as above
-
-# update with shard targeting without vindex
-"update `user[-]`.user_extra set val = 1"
-{
- "QueryType": "UPDATE",
- "Original": "update `user[-]`.user_extra set val = 1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "ByDestination",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update user_extra set val = 1",
- "Table": "user_extra"
- },
- "TablesUsed": [
- "user.user_extra"
- ]
-}
-Gen4 plan same as above
-
-# multi-table delete with single table
-"delete u.* from user u where u.id * u.col = u.foo"
-{
- "QueryType": "DELETE",
- "Original": "delete u.* from user u where u.id * u.col = u.foo",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "KsidLength": 1,
- "KsidVindex": "user_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select Id, `Name`, Costly from `user` where `user`.id * `user`.col = `user`.foo for update",
- "Query": "delete from `user` where `user`.id * `user`.col = `user`.foo",
- "Table": "user"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-Gen4 plan same as above
-
-# delete with unknown reference
-"delete music from user where id = 1"
-"Unknown table 'music' in MULTI DELETE"
-Gen4 plan same as above
-
-# delete with derived tables
-"delete music from (select * from user) music where id = 1"
-"The target table music of the DELETE is not updatable"
-Gen4 plan same as above
-
-# delete with derived tables with unknown table
-"delete user from (select * from user) music where id = 1"
-"Unknown table 'user' in MULTI DELETE"
-Gen4 plan same as above
-
-"INSERT INTO main.user_privacy_consents (user_id, accepted_at) SELECT user_id, accepted_at FROM (SELECT 1 as user_id, 1629194864 as accepted_at) AS tmp WHERE NOT EXISTS (SELECT user_id FROM main.user_privacy_consents WHERE user_id = 1)"
-{
- "QueryType": "INSERT",
- "Original": "INSERT INTO main.user_privacy_consents (user_id, accepted_at) SELECT user_id, accepted_at FROM (SELECT 1 as user_id, 1629194864 as accepted_at) AS tmp WHERE NOT EXISTS (SELECT user_id FROM main.user_privacy_consents WHERE user_id = 1)",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into user_privacy_consents(user_id, accepted_at) select user_id, accepted_at from (select 1 as user_id, 1629194864 as accepted_at from dual) as tmp where not exists (select 1 from user_privacy_consents where user_id = 1 limit 1) for update",
- "TableName": "user_privacy_consents"
- },
- "TablesUsed": [
- "main.user_privacy_consents"
- ]
-}
-Gen4 error: unsupported: unable to split predicates to derived table: not :__sq_has_values1
-
-# Delete on backfilling unique lookup vindex should be a scatter
-"delete from zlookup_unique.t1 where c2 = 20"
-{
- "QueryType": "DELETE",
- "Original": "delete from zlookup_unique.t1 where c2 = 20",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "zlookup_unique",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "KsidLength": 1,
- "KsidVindex": "xxhash",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select c1, c2, c3 from t1 where c2 = 20 for update",
- "Query": "delete from t1 where c2 = 20",
- "Table": "t1"
- },
- "TablesUsed": [
- "zlookup_unique.t1"
- ]
-}
-Gen4 plan same as above
-
-# Update on backfilling unique lookup vindex should be a scatter
-"update zlookup_unique.t1 set c2 = 1 where c2 = 20"
-{
- "QueryType": "UPDATE",
- "Original": "update zlookup_unique.t1 set c2 = 1 where c2 = 20",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "zlookup_unique",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "ChangedVindexValues": [
- "lookup_t1:3"
- ],
- "KsidLength": 1,
- "KsidVindex": "xxhash",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select c1, c2, c3, c2 = 1 from t1 where c2 = 20 for update",
- "Query": "update t1 set c2 = 1 where c2 = 20",
- "Table": "t1"
- },
- "TablesUsed": [
- "zlookup_unique.t1"
- ]
-}
-Gen4 plan same as above
-
-# Delete on backfilling and non-backfilling unique lookup vindexes should be a delete equal
-"delete from zlookup_unique.t1 where c2 = 10 and c3 = 20"
-{
- "QueryType": "DELETE",
- "Original": "delete from zlookup_unique.t1 where c2 = 10 and c3 = 20",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "zlookup_unique",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "KsidLength": 1,
- "KsidVindex": "xxhash",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select c1, c2, c3 from t1 where c2 = 10 and c3 = 20 for update",
- "Query": "delete from t1 where c2 = 10 and c3 = 20",
- "Table": "t1",
- "Values": [
- "INT64(20)"
- ],
- "Vindex": "lookup_t1_2"
- },
- "TablesUsed": [
- "zlookup_unique.t1"
- ]
-}
-{
- "QueryType": "DELETE",
- "Original": "delete from zlookup_unique.t1 where c2 = 10 and c3 = 20",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "zlookup_unique",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "KsidLength": 1,
- "KsidVindex": "xxhash",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select c1, c2, c3 from t1 where c2 = 10 and c3 = 20 for update",
- "Query": "delete from t1 where c2 = 10 and c3 = 20",
- "Table": "t1",
- "Values": [
- "INT64(20)"
- ],
- "Vindex": "lookup_t1_2"
- },
- "TablesUsed": [
- "zlookup_unique.t1"
- ]
-}
-
-# Update on backfilling and non-backfilling unique lookup vindexes should be an equal
-"update zlookup_unique.t1 set c2 = 1 where c2 = 10 and c3 = 20"
-{
- "QueryType": "UPDATE",
- "Original": "update zlookup_unique.t1 set c2 = 1 where c2 = 10 and c3 = 20",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "zlookup_unique",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "ChangedVindexValues": [
- "lookup_t1:3"
- ],
- "KsidLength": 1,
- "KsidVindex": "xxhash",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select c1, c2, c3, c2 = 1 from t1 where c2 = 10 and c3 = 20 for update",
- "Query": "update t1 set c2 = 1 where c2 = 10 and c3 = 20",
- "Table": "t1",
- "Values": [
- "INT64(20)"
- ],
- "Vindex": "lookup_t1_2"
- },
- "TablesUsed": [
- "zlookup_unique.t1"
- ]
-}
-{
- "QueryType": "UPDATE",
- "Original": "update zlookup_unique.t1 set c2 = 1 where c2 = 10 and c3 = 20",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "zlookup_unique",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "ChangedVindexValues": [
- "lookup_t1:3"
- ],
- "KsidLength": 1,
- "KsidVindex": "xxhash",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select c1, c2, c3, c2 = 1 from t1 where c2 = 10 and c3 = 20 for update",
- "Query": "update t1 set c2 = 1 where c2 = 10 and c3 = 20",
- "Table": "t1",
- "Values": [
- "INT64(20)"
- ],
- "Vindex": "lookup_t1_2"
- },
- "TablesUsed": [
- "zlookup_unique.t1"
- ]
-}
-
-# Delete EQUAL and IN on backfilling and non-backfilling unique lookup vindexes should be a delete IN
-"delete from zlookup_unique.t1 where c2 = 10 and c3 in (20, 21)"
-{
- "QueryType": "DELETE",
- "Original": "delete from zlookup_unique.t1 where c2 = 10 and c3 in (20, 21)",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "IN",
- "Keyspace": {
- "Name": "zlookup_unique",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "KsidLength": 1,
- "KsidVindex": "xxhash",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select c1, c2, c3 from t1 where c2 = 10 and c3 in (20, 21) for update",
- "Query": "delete from t1 where c2 = 10 and c3 in (20, 21)",
- "Table": "t1",
- "Values": [
- "(INT64(20), INT64(21))"
- ],
- "Vindex": "lookup_t1_2"
- },
- "TablesUsed": [
- "zlookup_unique.t1"
- ]
-}
-Gen4 plan same as above
-
-# Update EQUAL and IN on backfilling and non-backfilling unique lookup vindexes should be an update IN
-"update zlookup_unique.t1 set c2 = 1 where c2 = 10 and c3 in (20, 21)"
-{
- "QueryType": "UPDATE",
- "Original": "update zlookup_unique.t1 set c2 = 1 where c2 = 10 and c3 in (20, 21)",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "IN",
- "Keyspace": {
- "Name": "zlookup_unique",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "ChangedVindexValues": [
- "lookup_t1:3"
- ],
- "KsidLength": 1,
- "KsidVindex": "xxhash",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select c1, c2, c3, c2 = 1 from t1 where c2 = 10 and c3 in (20, 21) for update",
- "Query": "update t1 set c2 = 1 where c2 = 10 and c3 in (20, 21)",
- "Table": "t1",
- "Values": [
- "(INT64(20), INT64(21))"
- ],
- "Vindex": "lookup_t1_2"
- },
- "TablesUsed": [
- "zlookup_unique.t1"
- ]
-}
-Gen4 plan same as above
-
-#update with alias table
-"update user u set u.name = 'john' where u.col \u003e 20"
-{
- "QueryType": "UPDATE",
- "Original": "update user u set u.name = 'john' where u.col \u003e 20",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "ChangedVindexValues": [
- "name_user_map:3"
- ],
- "KsidLength": 1,
- "KsidVindex": "user_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select Id, `Name`, Costly, u.`name` = 'john' from `user` as u where u.col \u003e 20 for update",
- "Query": "update `user` as u set u.`name` = 'john' where u.col \u003e 20",
- "Table": "user"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-Gen4 plan same as above
-
-#delete with alias table
-"delete from user u where u.col \u003e 20"
-{
- "QueryType": "DELETE",
- "Original": "delete from user u where u.col \u003e 20",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "KsidLength": 1,
- "KsidVindex": "user_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select Id, `Name`, Costly from `user` as u where u.col \u003e 20 for update",
- "Query": "delete from `user` as u where u.col \u003e 20",
- "Table": "user"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-Gen4 plan same as above
-
-# update with a multicol vindex
-"update multicol_tbl set x = 1 where cola = 1 and colb = 2"
-{
- "QueryType": "UPDATE",
- "Original": "update multicol_tbl set x = 1 where cola = 1 and colb = 2",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update multicol_tbl set x = 1 where cola = 1 and colb = 2",
- "Table": "multicol_tbl",
- "Values": [
- "INT64(1)",
- "INT64(2)"
- ],
- "Vindex": "multicolIdx"
- },
- "TablesUsed": [
- "user.multicol_tbl"
- ]
-}
-{
- "QueryType": "UPDATE",
- "Original": "update multicol_tbl set x = 1 where cola = 1 and colb = 2",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update multicol_tbl set x = 1 where cola = 1 and colb = 2",
- "Table": "multicol_tbl",
- "Values": [
- "INT64(1)",
- "INT64(2)"
- ],
- "Vindex": "multicolIdx"
- },
- "TablesUsed": [
- "user.multicol_tbl"
- ]
-}
-
-# update with a multicol vindex - reverse order
-"update multicol_tbl set x = 1 where colb = 2 and cola = 1"
-{
- "QueryType": "UPDATE",
- "Original": "update multicol_tbl set x = 1 where colb = 2 and cola = 1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update multicol_tbl set x = 1 where colb = 2 and cola = 1",
- "Table": "multicol_tbl",
- "Values": [
- "INT64(1)",
- "INT64(2)"
- ],
- "Vindex": "multicolIdx"
- },
- "TablesUsed": [
- "user.multicol_tbl"
- ]
-}
-{
- "QueryType": "UPDATE",
- "Original": "update multicol_tbl set x = 1 where colb = 2 and cola = 1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update multicol_tbl set x = 1 where colb = 2 and cola = 1",
- "Table": "multicol_tbl",
- "Values": [
- "INT64(1)",
- "INT64(2)"
- ],
- "Vindex": "multicolIdx"
- },
- "TablesUsed": [
- "user.multicol_tbl"
- ]
-}
-
-# update with a multicol vindex using an IN clause
-"update multicol_tbl set x = 1 where colb IN (1,2) and cola = 1"
-{
- "QueryType": "UPDATE",
- "Original": "update multicol_tbl set x = 1 where colb IN (1,2) and cola = 1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update multicol_tbl set x = 1 where colb in (1, 2) and cola = 1",
- "Table": "multicol_tbl",
- "Values": [
- "INT64(1)",
- "(INT64(1), INT64(2))"
- ],
- "Vindex": "multicolIdx"
- },
- "TablesUsed": [
- "user.multicol_tbl"
- ]
-}
-Gen4 plan same as above
-
-# update with a multicol vindex using an IN clause
-"update multicol_tbl set x = 1 where colb IN (1,2) and cola IN (3,4)"
-{
- "QueryType": "UPDATE",
- "Original": "update multicol_tbl set x = 1 where colb IN (1,2) and cola IN (3,4)",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update multicol_tbl set x = 1 where colb in (1, 2) and cola in (3, 4)",
- "Table": "multicol_tbl",
- "Values": [
- "(INT64(3), INT64(4))",
- "(INT64(1), INT64(2))"
- ],
- "Vindex": "multicolIdx"
- },
- "TablesUsed": [
- "user.multicol_tbl"
- ]
-}
-Gen4 plan same as above
-
-# delete with a multicol vindex
-"delete from multicol_tbl where cola = 1 and colb = 2"
-{
- "QueryType": "DELETE",
- "Original": "delete from multicol_tbl where cola = 1 and colb = 2",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "KsidLength": 2,
- "KsidVindex": "multicolIdx",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select cola, colb, colc, `name` from multicol_tbl where cola = 1 and colb = 2 for update",
- "Query": "delete from multicol_tbl where cola = 1 and colb = 2",
- "Table": "multicol_tbl",
- "Values": [
- "INT64(1)",
- "INT64(2)"
- ],
- "Vindex": "multicolIdx"
- },
- "TablesUsed": [
- "user.multicol_tbl"
- ]
-}
-{
- "QueryType": "DELETE",
- "Original": "delete from multicol_tbl where cola = 1 and colb = 2",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "KsidLength": 2,
- "KsidVindex": "multicolIdx",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select cola, colb, colc, `name` from multicol_tbl where cola = 1 and colb = 2 for update",
- "Query": "delete from multicol_tbl where cola = 1 and colb = 2",
- "Table": "multicol_tbl",
- "Values": [
- "INT64(1)",
- "INT64(2)"
- ],
- "Vindex": "multicolIdx"
- },
- "TablesUsed": [
- "user.multicol_tbl"
- ]
-}
-
-# delete with a multicol vindex - reverse order
-"delete from multicol_tbl where colb = 2 and cola = 1"
-{
- "QueryType": "DELETE",
- "Original": "delete from multicol_tbl where colb = 2 and cola = 1",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "KsidLength": 2,
- "KsidVindex": "multicolIdx",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select cola, colb, colc, `name` from multicol_tbl where colb = 2 and cola = 1 for update",
- "Query": "delete from multicol_tbl where colb = 2 and cola = 1",
- "Table": "multicol_tbl",
- "Values": [
- "INT64(1)",
- "INT64(2)"
- ],
- "Vindex": "multicolIdx"
- },
- "TablesUsed": [
- "user.multicol_tbl"
- ]
-}
-{
- "QueryType": "DELETE",
- "Original": "delete from multicol_tbl where colb = 2 and cola = 1",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "KsidLength": 2,
- "KsidVindex": "multicolIdx",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select cola, colb, colc, `name` from multicol_tbl where colb = 2 and cola = 1 for update",
- "Query": "delete from multicol_tbl where colb = 2 and cola = 1",
- "Table": "multicol_tbl",
- "Values": [
- "INT64(1)",
- "INT64(2)"
- ],
- "Vindex": "multicolIdx"
- },
- "TablesUsed": [
- "user.multicol_tbl"
- ]
-}
-
-# delete with a multicol vindex using an IN clause
-"delete from multicol_tbl where colb IN (1,2) and cola = 1"
-{
- "QueryType": "DELETE",
- "Original": "delete from multicol_tbl where colb IN (1,2) and cola = 1",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "KsidLength": 2,
- "KsidVindex": "multicolIdx",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select cola, colb, colc, `name` from multicol_tbl where colb in (1, 2) and cola = 1 for update",
- "Query": "delete from multicol_tbl where colb in (1, 2) and cola = 1",
- "Table": "multicol_tbl",
- "Values": [
- "INT64(1)",
- "(INT64(1), INT64(2))"
- ],
- "Vindex": "multicolIdx"
- },
- "TablesUsed": [
- "user.multicol_tbl"
- ]
-}
-Gen4 plan same as above
-
-# delete with a multicol vindex using an IN clause
-"delete from multicol_tbl where colb IN (1,2) and cola IN (3,4)"
-{
- "QueryType": "DELETE",
- "Original": "delete from multicol_tbl where colb IN (1,2) and cola IN (3,4)",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "KsidLength": 2,
- "KsidVindex": "multicolIdx",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select cola, colb, colc, `name` from multicol_tbl where colb in (1, 2) and cola in (3, 4) for update",
- "Query": "delete from multicol_tbl where colb in (1, 2) and cola in (3, 4)",
- "Table": "multicol_tbl",
- "Values": [
- "(INT64(3), INT64(4))",
- "(INT64(1), INT64(2))"
- ],
- "Vindex": "multicolIdx"
- },
- "TablesUsed": [
- "user.multicol_tbl"
- ]
-}
-Gen4 plan same as above
-
-# update with multicol and an owned vindex which changes
-"update multicol_tbl set colc = 1 where cola = 1 and colb = 2"
-{
- "QueryType": "UPDATE",
- "Original": "update multicol_tbl set colc = 1 where cola = 1 and colb = 2",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "ChangedVindexValues": [
- "colc_map:4"
- ],
- "KsidLength": 2,
- "KsidVindex": "multicolIdx",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select cola, colb, colc, `name`, colc = 1 from multicol_tbl where cola = 1 and colb = 2 for update",
- "Query": "update multicol_tbl set colc = 1 where cola = 1 and colb = 2",
- "Table": "multicol_tbl",
- "Values": [
- "INT64(1)",
- "INT64(2)"
- ],
- "Vindex": "multicolIdx"
- },
- "TablesUsed": [
- "user.multicol_tbl"
- ]
-}
-{
- "QueryType": "UPDATE",
- "Original": "update multicol_tbl set colc = 1 where cola = 1 and colb = 2",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "ChangedVindexValues": [
- "colc_map:4"
- ],
- "KsidLength": 2,
- "KsidVindex": "multicolIdx",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select cola, colb, colc, `name`, colc = 1 from multicol_tbl where cola = 1 and colb = 2 for update",
- "Query": "update multicol_tbl set colc = 1 where cola = 1 and colb = 2",
- "Table": "multicol_tbl",
- "Values": [
- "INT64(1)",
- "INT64(2)"
- ],
- "Vindex": "multicolIdx"
- },
- "TablesUsed": [
- "user.multicol_tbl"
- ]
-}
-
-# update with routing using non-unique lookup vindex
-"update multicol_tbl set x = 42 where name = 'foo'"
-{
- "QueryType": "UPDATE",
- "Original": "update multicol_tbl set x = 42 where name = 'foo'",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update multicol_tbl set x = 42 where `name` = 'foo'",
- "Table": "multicol_tbl",
- "Values": [
- "VARCHAR(\"foo\")"
- ],
- "Vindex": "name_muticoltbl_map"
- },
- "TablesUsed": [
- "user.multicol_tbl"
- ]
-}
-Gen4 plan same as above
-
-# update with routing using subsharding column
-"update multicol_tbl set x = 42 where cola = 1"
-{
- "QueryType": "UPDATE",
- "Original": "update multicol_tbl set x = 42 where cola = 1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update multicol_tbl set x = 42 where cola = 1",
- "Table": "multicol_tbl",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "multicolIdx"
- },
- "TablesUsed": [
- "user.multicol_tbl"
- ]
-}
-{
- "QueryType": "UPDATE",
- "Original": "update multicol_tbl set x = 42 where cola = 1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "SubShard",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update multicol_tbl set x = 42 where cola = 1",
- "Table": "multicol_tbl",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "multicolIdx"
- },
- "TablesUsed": [
- "user.multicol_tbl"
- ]
-}
-
-# update with routing using subsharding column on lookup vindex
-"update multicol_tbl set name = 'bar' where cola = 1"
-{
- "QueryType": "UPDATE",
- "Original": "update multicol_tbl set name = 'bar' where cola = 1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "ChangedVindexValues": [
- "name_muticoltbl_map:4"
- ],
- "KsidLength": 2,
- "KsidVindex": "multicolIdx",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select cola, colb, colc, `name`, `name` = 'bar' from multicol_tbl where cola = 1 for update",
- "Query": "update multicol_tbl set `name` = 'bar' where cola = 1",
- "Table": "multicol_tbl",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "multicolIdx"
- },
- "TablesUsed": [
- "user.multicol_tbl"
- ]
-}
-{
- "QueryType": "UPDATE",
- "Original": "update multicol_tbl set name = 'bar' where cola = 1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "SubShard",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "ChangedVindexValues": [
- "name_muticoltbl_map:4"
- ],
- "KsidLength": 2,
- "KsidVindex": "multicolIdx",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select cola, colb, colc, `name`, `name` = 'bar' from multicol_tbl where cola = 1 for update",
- "Query": "update multicol_tbl set `name` = 'bar' where cola = 1",
- "Table": "multicol_tbl",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "multicolIdx"
- },
- "TablesUsed": [
- "user.multicol_tbl"
- ]
-}
-
-# update with routing using subsharding column with in query
-"update multicol_tbl set name = 'bar' where cola in (1,2)"
-{
- "QueryType": "UPDATE",
- "Original": "update multicol_tbl set name = 'bar' where cola in (1,2)",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "ChangedVindexValues": [
- "name_muticoltbl_map:4"
- ],
- "KsidLength": 2,
- "KsidVindex": "multicolIdx",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select cola, colb, colc, `name`, `name` = 'bar' from multicol_tbl where cola in (1, 2) for update",
- "Query": "update multicol_tbl set `name` = 'bar' where cola in (1, 2)",
- "Table": "multicol_tbl",
- "Values": [
- "(INT64(1), INT64(2))"
- ],
- "Vindex": "multicolIdx"
- },
- "TablesUsed": [
- "user.multicol_tbl"
- ]
-}
-Gen4 plan same as above
-
-# update with routing using subsharding column with in query as lower cost over lookup vindex
-"update multicol_tbl set x = 1 where name = 'foo' and cola = 2"
-{
- "QueryType": "UPDATE",
- "Original": "update multicol_tbl set x = 1 where name = 'foo' and cola = 2",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update multicol_tbl set x = 1 where `name` = 'foo' and cola = 2",
- "Table": "multicol_tbl",
- "Values": [
- "INT64(2)"
- ],
- "Vindex": "multicolIdx"
- },
- "TablesUsed": [
- "user.multicol_tbl"
- ]
-}
-{
- "QueryType": "UPDATE",
- "Original": "update multicol_tbl set x = 1 where name = 'foo' and cola = 2",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update multicol_tbl set x = 1 where `name` = 'foo' and cola = 2",
- "Table": "multicol_tbl",
- "Values": [
- "VARCHAR(\"foo\")"
- ],
- "Vindex": "name_muticoltbl_map"
- },
- "TablesUsed": [
- "user.multicol_tbl"
- ]
-}
-
-# delete with routing using non-unique lookup vindex
-"delete from multicol_tbl where name = 'foo'"
-{
- "QueryType": "DELETE",
- "Original": "delete from multicol_tbl where name = 'foo'",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "KsidLength": 2,
- "KsidVindex": "multicolIdx",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select cola, colb, colc, `name` from multicol_tbl where `name` = 'foo' for update",
- "Query": "delete from multicol_tbl where `name` = 'foo'",
- "Table": "multicol_tbl",
- "Values": [
- "VARCHAR(\"foo\")"
- ],
- "Vindex": "name_muticoltbl_map"
- },
- "TablesUsed": [
- "user.multicol_tbl"
- ]
-}
-Gen4 plan same as above
-
-# delete with routing using subsharding column
-"delete from multicol_tbl where cola = 1"
-{
- "QueryType": "DELETE",
- "Original": "delete from multicol_tbl where cola = 1",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "KsidLength": 2,
- "KsidVindex": "multicolIdx",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select cola, colb, colc, `name` from multicol_tbl where cola = 1 for update",
- "Query": "delete from multicol_tbl where cola = 1",
- "Table": "multicol_tbl",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "multicolIdx"
- },
- "TablesUsed": [
- "user.multicol_tbl"
- ]
-}
-{
- "QueryType": "DELETE",
- "Original": "delete from multicol_tbl where cola = 1",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "SubShard",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "KsidLength": 2,
- "KsidVindex": "multicolIdx",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select cola, colb, colc, `name` from multicol_tbl where cola = 1 for update",
- "Query": "delete from multicol_tbl where cola = 1",
- "Table": "multicol_tbl",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "multicolIdx"
- },
- "TablesUsed": [
- "user.multicol_tbl"
- ]
-}
-
-# delete with routing using subsharding column with in query
-"delete from multicol_tbl where cola in (1,2)"
-{
- "QueryType": "DELETE",
- "Original": "delete from multicol_tbl where cola in (1,2)",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "KsidLength": 2,
- "KsidVindex": "multicolIdx",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select cola, colb, colc, `name` from multicol_tbl where cola in (1, 2) for update",
- "Query": "delete from multicol_tbl where cola in (1, 2)",
- "Table": "multicol_tbl",
- "Values": [
- "(INT64(1), INT64(2))"
- ],
- "Vindex": "multicolIdx"
- },
- "TablesUsed": [
- "user.multicol_tbl"
- ]
-}
-Gen4 plan same as above
-
-# delete with routing using subsharding column with in query as lower cost over lookup vindex
-"delete from multicol_tbl where name = 'foo' and cola = 2"
-{
- "QueryType": "DELETE",
- "Original": "delete from multicol_tbl where name = 'foo' and cola = 2",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "KsidLength": 2,
- "KsidVindex": "multicolIdx",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select cola, colb, colc, `name` from multicol_tbl where `name` = 'foo' and cola = 2 for update",
- "Query": "delete from multicol_tbl where `name` = 'foo' and cola = 2",
- "Table": "multicol_tbl",
- "Values": [
- "INT64(2)"
- ],
- "Vindex": "multicolIdx"
- },
- "TablesUsed": [
- "user.multicol_tbl"
- ]
-}
-{
- "QueryType": "DELETE",
- "Original": "delete from multicol_tbl where name = 'foo' and cola = 2",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "KsidLength": 2,
- "KsidVindex": "multicolIdx",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select cola, colb, colc, `name` from multicol_tbl where `name` = 'foo' and cola = 2 for update",
- "Query": "delete from multicol_tbl where `name` = 'foo' and cola = 2",
- "Table": "multicol_tbl",
- "Values": [
- "VARCHAR(\"foo\")"
- ],
- "Vindex": "name_muticoltbl_map"
- },
- "TablesUsed": [
- "user.multicol_tbl"
- ]
-}
-
-# insert using select with simple table.
-"insert into music(id, user_id) select * from user"
-{
- "QueryType": "INSERT",
- "Original": "insert into music(id, user_id) select * from user",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Select",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "TableName": "music",
- "VindexOffsetFromSelect": {
- "music_user_map": "[0]",
- "user_index": "[1]"
- },
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select * from `user` for update",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-{
- "QueryType": "INSERT",
- "Original": "insert into music(id, user_id) select * from user",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Select",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "TableName": "music",
- "VindexOffsetFromSelect": {
- "music_user_map": "[0]",
- "user_index": "[1]"
- },
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select * from `user` for update",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.music",
- "user.user"
- ]
-}
-
-# insert using select with more columns in insert
-"insert into music(id, user_id) select 1"
-"Column count doesn't match value count at row 1"
-Gen4 plan same as above
-
-# insert using select with more columns in select
-"insert into music(id, user_id) select id, count(user_id), sum(user_id) from user group by id"
-"Column count doesn't match value count at row 1"
-Gen4 plan same as above
-
-# insert using select with more columns in select after accounting for star column
-"insert into music(id, user_id) select id, *, 2 from user"
-"Column count doesn't match value count at row 1"
-Gen4 plan same as above
-
-# insert using select with auto-inc column using vitess sequence, sequence column not present
-"insert into user_extra(user_id) select id from user"
-{
- "QueryType": "INSERT",
- "Original": "insert into user_extra(user_id) select id from user",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Select",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "AutoIncrement": "main:1",
- "MultiShardAutocommit": false,
- "TableName": "user_extra",
- "VindexOffsetFromSelect": {
- "user_index": "[0]"
- },
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` for update",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user_extra"
- ]
-}
-{
- "QueryType": "INSERT",
- "Original": "insert into user_extra(user_id) select id from user",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Select",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "AutoIncrement": "main:1",
- "MultiShardAutocommit": false,
- "TableName": "user_extra",
- "VindexOffsetFromSelect": {
- "user_index": "[0]"
- },
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` for update",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# insert using select with auto-inc column using vitess sequence, sequence column present
-"insert into user_extra(id, user_id) select null, id from user"
-{
- "QueryType": "INSERT",
- "Original": "insert into user_extra(id, user_id) select null, id from user",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Select",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "AutoIncrement": "main:2",
- "MultiShardAutocommit": false,
- "TableName": "user_extra",
- "VindexOffsetFromSelect": {
- "user_index": "[1]"
- },
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select null, id from `user` where 1 != 1",
- "Query": "select null, id from `user` for update",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user_extra"
- ]
-}
-{
- "QueryType": "INSERT",
- "Original": "insert into user_extra(id, user_id) select null, id from user",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Select",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "AutoIncrement": "main:2",
- "MultiShardAutocommit": false,
- "TableName": "user_extra",
- "VindexOffsetFromSelect": {
- "user_index": "[1]"
- },
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select null, id from `user` where 1 != 1",
- "Query": "select null, id from `user` for update",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# sharded insert from select
-"insert into user(id) select 1 from dual"
-{
- "QueryType": "INSERT",
- "Original": "insert into user(id) select 1 from dual",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Select",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "AutoIncrement": "main:0",
- "MultiShardAutocommit": false,
- "TableName": "user",
- "VindexOffsetFromSelect": {
- "costly_map": "[-1]",
- "name_user_map": "[-1]",
- "user_index": "[0]"
- },
- "Inputs": [
- {
- "OperatorType": "Projection",
- "Expressions": [
- "INT64(1) as 1"
- ],
- "Inputs": [
- {
- "OperatorType": "SingleRow"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-{
- "QueryType": "INSERT",
- "Original": "insert into user(id) select 1 from dual",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Select",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "AutoIncrement": "main:0",
- "MultiShardAutocommit": false,
- "TableName": "user",
- "VindexOffsetFromSelect": {
- "costly_map": "[-1]",
- "name_user_map": "[-1]",
- "user_index": "[0]"
- },
- "Inputs": [
- {
- "OperatorType": "Projection",
- "Expressions": [
- "INT64(1) as 1"
- ],
- "Inputs": [
- {
- "OperatorType": "SingleRow"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "main.dual",
- "user.user"
- ]
-}
-
-# insert using select with sharding column is autoinc and not present in the insert column query
-"insert into user(pattern) SELECT 1"
-{
- "QueryType": "INSERT",
- "Original": "insert into user(pattern) SELECT 1",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Select",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "AutoIncrement": "main:1",
- "MultiShardAutocommit": false,
- "TableName": "user",
- "VindexOffsetFromSelect": {
- "costly_map": "[-1]",
- "name_user_map": "[-1]",
- "user_index": "[1]"
- },
- "Inputs": [
- {
- "OperatorType": "Projection",
- "Expressions": [
- "INT64(1) as 1"
- ],
- "Inputs": [
- {
- "OperatorType": "SingleRow"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-{
- "QueryType": "INSERT",
- "Original": "insert into user(pattern) SELECT 1",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Select",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "AutoIncrement": "main:1",
- "MultiShardAutocommit": false,
- "TableName": "user",
- "VindexOffsetFromSelect": {
- "costly_map": "[-1]",
- "name_user_map": "[-1]",
- "user_index": "[1]"
- },
- "Inputs": [
- {
- "OperatorType": "Projection",
- "Expressions": [
- "INT64(1) as 1"
- ],
- "Inputs": [
- {
- "OperatorType": "SingleRow"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "main.dual",
- "user.user"
- ]
-}
-
-# insert using select with sharding column is not an autoinc and not present in the insert column query
-"insert into user_extra(pattern) SELECT 1"
-"insert query does not have sharding column 'user_id' in the column list"
-Gen4 plan same as above
-
-# sharded same keyspace
-"insert into user_extra(user_id, col) select col1, col2 from user"
-{
- "QueryType": "INSERT",
- "Original": "insert into user_extra(user_id, col) select col1, col2 from user",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Select",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "AutoIncrement": "main:2",
- "MultiShardAutocommit": false,
- "TableName": "user_extra",
- "VindexOffsetFromSelect": {
- "user_index": "[0]"
- },
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col1, col2 from `user` where 1 != 1",
- "Query": "select col1, col2 from `user` for update",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user_extra"
- ]
-}
-{
- "QueryType": "INSERT",
- "Original": "insert into user_extra(user_id, col) select col1, col2 from user",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Select",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "AutoIncrement": "main:2",
- "MultiShardAutocommit": false,
- "TableName": "user_extra",
- "VindexOffsetFromSelect": {
- "user_index": "[0]"
- },
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col1, col2 from `user` where 1 != 1",
- "Query": "select col1, col2 from `user` for update",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# unsharded same keyspace
-"insert into unsharded(col) select col from unsharded_auto"
-{
- "QueryType": "INSERT",
- "Original": "insert into unsharded(col) select col from unsharded_auto",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into unsharded(col) select col from unsharded_auto for update",
- "TableName": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-{
- "QueryType": "INSERT",
- "Original": "insert into unsharded(col) select col from unsharded_auto",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into unsharded(col) select col from unsharded_auto for update",
- "TableName": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded",
- "main.unsharded_auto"
- ]
-}
-
-# sharded different keyspace
-"insert into user_extra(user_id, col) select col1, col2 from t1"
-{
- "QueryType": "INSERT",
- "Original": "insert into user_extra(user_id, col) select col1, col2 from t1",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Select",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "AutoIncrement": "main:2",
- "MultiShardAutocommit": false,
- "TableName": "user_extra",
- "VindexOffsetFromSelect": {
- "user_index": "[0]"
- },
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "zlookup_unique",
- "Sharded": true
- },
- "FieldQuery": "select col1, col2 from t1 where 1 != 1",
- "Query": "select col1, col2 from t1 for update",
- "Table": "t1"
- }
- ]
- },
- "TablesUsed": [
- "user.user_extra"
- ]
-}
-{
- "QueryType": "INSERT",
- "Original": "insert into user_extra(user_id, col) select col1, col2 from t1",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Select",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "AutoIncrement": "main:2",
- "MultiShardAutocommit": false,
- "TableName": "user_extra",
- "VindexOffsetFromSelect": {
- "user_index": "[0]"
- },
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "zlookup_unique",
- "Sharded": true
- },
- "FieldQuery": "select col1, col2 from t1 where 1 != 1",
- "Query": "select col1, col2 from t1 for update",
- "Table": "t1"
- }
- ]
- },
- "TablesUsed": [
- "user.user_extra",
- "zlookup_unique.t1"
- ]
-}
-
-# sharded insert table, unsharded select table
-"insert into user_extra(user_id, col) select col1, col2 from unsharded_tab"
-{
- "QueryType": "INSERT",
- "Original": "insert into user_extra(user_id, col) select col1, col2 from unsharded_tab",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Select",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "AutoIncrement": "main:2",
- "MultiShardAutocommit": false,
- "TableName": "user_extra",
- "VindexOffsetFromSelect": {
- "user_index": "[0]"
- },
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main_2",
- "Sharded": false
- },
- "FieldQuery": "select col1, col2 from unsharded_tab where 1 != 1",
- "Query": "select col1, col2 from unsharded_tab for update",
- "Table": "unsharded_tab"
- }
- ]
- },
- "TablesUsed": [
- "user.user_extra"
- ]
-}
-{
- "QueryType": "INSERT",
- "Original": "insert into user_extra(user_id, col) select col1, col2 from unsharded_tab",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Select",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "AutoIncrement": "main:2",
- "MultiShardAutocommit": false,
- "TableName": "user_extra",
- "VindexOffsetFromSelect": {
- "user_index": "[0]"
- },
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main_2",
- "Sharded": false
- },
- "FieldQuery": "select col1, col2 from unsharded_tab where 1 != 1",
- "Query": "select col1, col2 from unsharded_tab for update",
- "Table": "unsharded_tab"
- }
- ]
- },
- "TablesUsed": [
- "main_2.unsharded_tab",
- "user.user_extra"
- ]
-}
-
-# unsharded different keyspace
-"insert into unsharded(col) select col from unsharded_tab"
-{
- "QueryType": "INSERT",
- "Original": "insert into unsharded(col) select col from unsharded_tab",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "TableName": "unsharded",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main_2",
- "Sharded": false
- },
- "FieldQuery": "select col from unsharded_tab where 1 != 1",
- "Query": "select col from unsharded_tab for update",
- "Table": "unsharded_tab"
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-{
- "QueryType": "INSERT",
- "Original": "insert into unsharded(col) select col from unsharded_tab",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "TableName": "unsharded",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main_2",
- "Sharded": false
- },
- "FieldQuery": "select col from unsharded_tab where 1 != 1",
- "Query": "select col from unsharded_tab for update",
- "Table": "unsharded_tab"
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded",
- "main_2.unsharded_tab"
- ]
-}
-
-# unsharded insert table, sharded select table
-"insert into unsharded(col) select col from t1"
-{
- "QueryType": "INSERT",
- "Original": "insert into unsharded(col) select col from t1",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "TableName": "unsharded",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "zlookup_unique",
- "Sharded": true
- },
- "FieldQuery": "select col from t1 where 1 != 1",
- "Query": "select col from t1 for update",
- "Table": "t1"
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-{
- "QueryType": "INSERT",
- "Original": "insert into unsharded(col) select col from t1",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "TableName": "unsharded",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "zlookup_unique",
- "Sharded": true
- },
- "FieldQuery": "select col from t1 where 1 != 1",
- "Query": "select col from t1 for update",
- "Table": "t1"
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded",
- "zlookup_unique.t1"
- ]
-}
-
-# unsharded subquery in sharded update, not the same keyspace between outer and inner
-"update user set col = (select id from unsharded)"
-"unsupported: subqueries in sharded DML"
-{
- "QueryType": "UPDATE",
- "Original": "update user set col = (select id from unsharded)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutValue",
- "PulloutVars": [
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select id from unsharded where 1 != 1",
- "Query": "select id from unsharded lock in share mode",
- "Table": "unsharded"
- },
- {
- "OperatorType": "Update",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update `user` set col = :__sq1",
- "Table": "user"
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded",
- "user.user"
- ]
-}
-
-# sharded subquery in unsharded update, not the same keyspace
-"update unsharded set col = (select id from user)"
-"unsupported: sharded subqueries in DML"
-{
- "QueryType": "UPDATE",
- "Original": "update unsharded set col = (select id from user)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutValue",
- "PulloutVars": [
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` lock in share mode",
- "Table": "`user`"
- },
- {
- "OperatorType": "Update",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update unsharded set col = :__sq1",
- "Table": "unsharded"
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded",
- "user.user"
- ]
-}
-
-# sharded join unsharded subqueries in unsharded update
-"update unsharded set col = (select id from unsharded join user on unsharded.id = user.id)"
-"unsupported: sharded subqueries in DML"
-{
- "QueryType": "UPDATE",
- "Original": "update unsharded set col = (select id from unsharded join user on unsharded.id = user.id)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutValue",
- "PulloutVars": [
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "JoinVars": {
- "unsharded_id": 0
- },
- "TableName": "unsharded_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select unsharded.id from unsharded where 1 != 1",
- "Query": "select unsharded.id from unsharded lock in share mode",
- "Table": "unsharded"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where `user`.id = :unsharded_id lock in share mode",
- "Table": "`user`",
- "Values": [
- ":unsharded_id"
- ],
- "Vindex": "user_index"
- }
- ]
- },
- {
- "OperatorType": "Update",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update unsharded set col = :__sq1",
- "Table": "unsharded"
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded",
- "user.user"
- ]
-}
-
-# sharded update with sub query where the sources can be merged into a single query
-"update user set col = (select count(*) from user_extra where user_extra.user_id = 5) where id = 5"
-"unsupported: subqueries in sharded DML"
-{
- "QueryType": "UPDATE",
- "Original": "update user set col = (select count(*) from user_extra where user_extra.user_id = 5) where id = 5",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update `user` set col = (select count(*) from user_extra where user_extra.user_id = 5) where id = 5",
- "Table": "user",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# merge through correlated subquery
-"update user set col = (select count(*) from user_extra where user_extra.user_id = user.id) where id = 5"
-"unsupported: subqueries in sharded DML"
-{
- "QueryType": "UPDATE",
- "Original": "update user set col = (select count(*) from user_extra where user_extra.user_id = user.id) where id = 5",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update `user` set col = (select count(*) from user_extra where user_extra.user_id = `user`.id) where id = 5",
- "Table": "user",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# merge through correlated subquery #2
-"update user set col = (select count(*) from user_extra where user_extra.user_id = user.id) where id \u003e 5"
-"unsupported: subqueries in sharded DML"
-{
- "QueryType": "UPDATE",
- "Original": "update user set col = (select count(*) from user_extra where user_extra.user_id = user.id) where id \u003e 5",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update `user` set col = (select count(*) from user_extra where user_extra.user_id = `user`.id) where id \u003e 5",
- "Table": "user"
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# all defaults empty column, empty values
-"insert into authoritative () values ()"
-{
- "QueryType": "INSERT",
- "Original": "insert into authoritative () values ()",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Sharded",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into authoritative(user_id) values (:_user_id_0)",
- "TableName": "authoritative",
- "VindexValues": {
- "user_index": "NULL"
- }
- },
- "TablesUsed": [
- "user.authoritative"
- ]
-}
-Gen4 plan same as above
-
-# explain dml without any directive should fail
-"explain format=vtexplain delete from user"
-"explain format = vtexplain will actually run queries. `/*vt+ EXECUTE_DML_QUERIES */` must be set to run DML queries in vtexplain. Example: `explain /*vt+ EXECUTE_DML_QUERIES */ format = vtexplain delete from t1`"
-Gen4 plan same as above
-
-# explain dml with actually_run_query directive
-"explain /*vt+ execute_dml_queries */ format=vtexplain delete from user"
-{
- "QueryType": "EXPLAIN",
- "Original": "explain /*vt+ execute_dml_queries */ format=vtexplain delete from user",
- "Instructions": {
- "OperatorType": "VTEXPLAIN",
- "Inputs": [
- {
- "OperatorType": "Delete",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "KsidLength": 1,
- "KsidVindex": "user_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select Id, `Name`, Costly from `user` for update",
- "Query": "delete from `user`",
- "Table": "user"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-Gen4 plan same as above
-
-# explain dml with actually_run_query directive - 2
-"explain /*vt+ eXECUTE_DML_QUERIES */ format=vtexplain delete from user"
-{
- "QueryType": "EXPLAIN",
- "Original": "explain /*vt+ eXECUTE_DML_QUERIES */ format=vtexplain delete from user",
- "Instructions": {
- "OperatorType": "VTEXPLAIN",
- "Inputs": [
- {
- "OperatorType": "Delete",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "KsidLength": 1,
- "KsidVindex": "user_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select Id, `Name`, Costly from `user` for update",
- "Query": "delete from `user`",
- "Table": "user"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-Gen4 plan same as above
-
-# Here V3 populates the TablesUsed incorrectly
-# delete with join from multi table join subquery.
-"delete foo from unsharded as foo join (select id from unsharded a join unsharded_b b on a.user_id = b.user_id) as keepers on foo.id = keepers.id where keepers.id is null and foo.col is not null and foo.col \u003c 1000"
-{
- "QueryType": "DELETE",
- "Original": "delete foo from unsharded as foo join (select id from unsharded a join unsharded_b b on a.user_id = b.user_id) as keepers on foo.id = keepers.id where keepers.id is null and foo.col is not null and foo.col \u003c 1000",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "delete foo from unsharded as foo join (select id from unsharded as a join unsharded_b as b on a.user_id = b.user_id) as keepers on foo.id = keepers.id where keepers.id is null and foo.col is not null and foo.col \u003c 1000",
- "Table": "unsharded, unsharded, unsharded_b"
- },
- "TablesUsed": [
- "main.unsharded",
- "main.unsharded, unsharded_b"
- ]
-}
-{
- "QueryType": "DELETE",
- "Original": "delete foo from unsharded as foo join (select id from unsharded a join unsharded_b b on a.user_id = b.user_id) as keepers on foo.id = keepers.id where keepers.id is null and foo.col is not null and foo.col \u003c 1000",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "delete foo from unsharded as foo join (select id from unsharded as a join unsharded_b as b on a.user_id = b.user_id) as keepers on foo.id = keepers.id where keepers.id is null and foo.col is not null and foo.col \u003c 1000",
- "Table": "unsharded, unsharded_b"
- },
- "TablesUsed": [
- "main.unsharded",
- "main.unsharded_b"
- ]
-}
-
-# update with routing using multi column vindex
-"update user set col = 1 where (name, col) in (('aa', 'bb'), ('cc', 'dd'))"
-{
- "QueryType": "UPDATE",
- "Original": "update user set col = 1 where (name, col) in (('aa', 'bb'), ('cc', 'dd'))",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update `user` set col = 1 where (`name`, col) in (('aa', 'bb'), ('cc', 'dd'))",
- "Table": "user"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-{
- "QueryType": "UPDATE",
- "Original": "update user set col = 1 where (name, col) in (('aa', 'bb'), ('cc', 'dd'))",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "MultiEqual",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update `user` set col = 1 where (`name`, col) in (('aa', 'bb'), ('cc', 'dd'))",
- "Table": "user",
- "Values": [
- "(VARCHAR(\"aa\"), VARCHAR(\"cc\"))"
- ],
- "Vindex": "name_user_map"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# delete with routing using multi column vindex
-"delete from user where (name, col) in (('aa', 'bb'), ('cc', 'dd'))"
-{
- "QueryType": "DELETE",
- "Original": "delete from user where (name, col) in (('aa', 'bb'), ('cc', 'dd'))",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "KsidLength": 1,
- "KsidVindex": "user_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select Id, `Name`, Costly from `user` where (`name`, col) in (('aa', 'bb'), ('cc', 'dd')) for update",
- "Query": "delete from `user` where (`name`, col) in (('aa', 'bb'), ('cc', 'dd'))",
- "Table": "user"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-{
- "QueryType": "DELETE",
- "Original": "delete from user where (name, col) in (('aa', 'bb'), ('cc', 'dd'))",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "MultiEqual",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "KsidLength": 1,
- "KsidVindex": "user_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select Id, `Name`, Costly from `user` where (`name`, col) in (('aa', 'bb'), ('cc', 'dd')) for update",
- "Query": "delete from `user` where (`name`, col) in (('aa', 'bb'), ('cc', 'dd'))",
- "Table": "user",
- "Values": [
- "(VARCHAR(\"aa\"), VARCHAR(\"cc\"))"
- ],
- "Vindex": "name_user_map"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
diff --git a/go/vt/vtgate/planbuilder/testdata/filter_cases.json b/go/vt/vtgate/planbuilder/testdata/filter_cases.json
new file mode 100644
index 00000000000..17ffcb7734f
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/filter_cases.json
@@ -0,0 +1,6700 @@
+[
+ {
+ "comment": "No where clause",
+ "query": "select id from user",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user`",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user`",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Query that always return empty",
+ "query": "select id from user where someColumn = null",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where someColumn = null",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "None",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where someColumn = null",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where someColumn = null",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "None",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where someColumn = null",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Null Safe Equality Operator is handled correctly",
+ "query": "SELECT id from user where someColumn <=> null",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT id from user where someColumn <=> null",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where someColumn <=> null",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT id from user where someColumn <=> null",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where someColumn <=> null",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Single table unique vindex route",
+ "query": "select id from user where user.id = 5",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where user.id = 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where `user`.id = 5",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where user.id = 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where `user`.id = 5",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Single table unique vindex route, but complex expr",
+ "query": "select id from user where user.id = 5+5",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where user.id = 5+5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where `user`.id = 5 + 5",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where user.id = 5+5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where `user`.id = 5 + 5",
+ "Table": "`user`",
+ "Values": [
+ "INT64(10)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Single table multiple unique vindex match",
+ "query": "select id from music where id = 5 and user_id = 4",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from music where id = 5 and user_id = 4",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from music where 1 != 1",
+ "Query": "select id from music where id = 5 and user_id = 4",
+ "Table": "music",
+ "Values": [
+ "INT64(4)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from music where id = 5 and user_id = 4",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from music where 1 != 1",
+ "Query": "select id from music where id = 5 and user_id = 4",
+ "Table": "music",
+ "Values": [
+ "INT64(4)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "Single table multiple non-unique vindex match",
+ "query": "select id from user where costly = 'aa' and name = 'bb'",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where costly = 'aa' and name = 'bb'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where costly = 'aa' and `name` = 'bb'",
+ "Table": "`user`",
+ "Values": [
+ "VARCHAR(\"bb\")"
+ ],
+ "Vindex": "name_user_map"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where costly = 'aa' and name = 'bb'",
+ "Instructions": {
+ "OperatorType": "VindexLookup",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Values": [
+ "VARCHAR(\"bb\")"
+ ],
+ "Vindex": "name_user_map",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
+ "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
+ "Table": "name_user_vdx",
+ "Values": [
+ ":name"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where costly = 'aa' and `name` = 'bb'",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Single table multiple non-unique vindex match for IN clause",
+ "query": "select id from user where costly in ('aa', 'bb') and name in ('aa', 'bb')",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where costly in ('aa', 'bb') and name in ('aa', 'bb')",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where costly in ('aa', 'bb') and `name` in ::__vals",
+ "Table": "`user`",
+ "Values": [
+ "(VARCHAR(\"aa\"), VARCHAR(\"bb\"))"
+ ],
+ "Vindex": "name_user_map"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where costly in ('aa', 'bb') and name in ('aa', 'bb')",
+ "Instructions": {
+ "OperatorType": "VindexLookup",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Values": [
+ "(VARCHAR(\"aa\"), VARCHAR(\"bb\"))"
+ ],
+ "Vindex": "name_user_map",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
+ "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
+ "Table": "name_user_vdx",
+ "Values": [
+ ":name"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where costly in ('aa', 'bb') and `name` in ::__vals",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Composite IN clause",
+ "query": "select id from user where (name, col) in (('aa', 'bb'), ('cc', 'dd'))",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where (name, col) in (('aa', 'bb'), ('cc', 'dd'))",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "MultiEqual",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where (`name`, col) in (('aa', 'bb'), ('cc', 'dd'))",
+ "Table": "`user`",
+ "Values": [
+ "(VARCHAR(\"aa\"), VARCHAR(\"cc\"))"
+ ],
+ "Vindex": "name_user_map"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where (name, col) in (('aa', 'bb'), ('cc', 'dd'))",
+ "Instructions": {
+ "OperatorType": "VindexLookup",
+ "Variant": "MultiEqual",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Values": [
+ "(VARCHAR(\"aa\"), VARCHAR(\"cc\"))"
+ ],
+ "Vindex": "name_user_map",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
+ "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
+ "Table": "name_user_vdx",
+ "Values": [
+ ":name"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where (`name`, col) in (('aa', 'bb'), ('cc', 'dd'))",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Composite IN clause, swapped columns",
+ "query": "select id from user where (col, name) in (('aa', 'bb'), ('cc', 'dd'))",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where (col, name) in (('aa', 'bb'), ('cc', 'dd'))",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "MultiEqual",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where (col, `name`) in (('aa', 'bb'), ('cc', 'dd'))",
+ "Table": "`user`",
+ "Values": [
+ "(VARCHAR(\"bb\"), VARCHAR(\"dd\"))"
+ ],
+ "Vindex": "name_user_map"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where (col, name) in (('aa', 'bb'), ('cc', 'dd'))",
+ "Instructions": {
+ "OperatorType": "VindexLookup",
+ "Variant": "MultiEqual",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Values": [
+ "(VARCHAR(\"bb\"), VARCHAR(\"dd\"))"
+ ],
+ "Vindex": "name_user_map",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
+ "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
+ "Table": "name_user_vdx",
+ "Values": [
+ ":name"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where (col, `name`) in (('aa', 'bb'), ('cc', 'dd'))",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Composite IN clause, choose cost within tuple",
+ "query": "select id from user where (costly, name) in (('aa', 'bb'), ('cc', 'dd'))",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where (costly, name) in (('aa', 'bb'), ('cc', 'dd'))",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "MultiEqual",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where (costly, `name`) in (('aa', 'bb'), ('cc', 'dd'))",
+ "Table": "`user`",
+ "Values": [
+ "(VARCHAR(\"bb\"), VARCHAR(\"dd\"))"
+ ],
+ "Vindex": "name_user_map"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where (costly, name) in (('aa', 'bb'), ('cc', 'dd'))",
+ "Instructions": {
+ "OperatorType": "VindexLookup",
+ "Variant": "MultiEqual",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Values": [
+ "(VARCHAR(\"bb\"), VARCHAR(\"dd\"))"
+ ],
+ "Vindex": "name_user_map",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
+ "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
+ "Table": "name_user_vdx",
+ "Values": [
+ ":name"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where (costly, `name`) in (('aa', 'bb'), ('cc', 'dd'))",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Composite IN clause, choose cost within tuple, swapped",
+ "query": "select id from user where (name, costly) in (('aa', 'bb'), ('cc', 'dd'))",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where (name, costly) in (('aa', 'bb'), ('cc', 'dd'))",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "MultiEqual",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where (`name`, costly) in (('aa', 'bb'), ('cc', 'dd'))",
+ "Table": "`user`",
+ "Values": [
+ "(VARCHAR(\"aa\"), VARCHAR(\"cc\"))"
+ ],
+ "Vindex": "name_user_map"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where (name, costly) in (('aa', 'bb'), ('cc', 'dd'))",
+ "Instructions": {
+ "OperatorType": "VindexLookup",
+ "Variant": "MultiEqual",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Values": [
+ "(VARCHAR(\"aa\"), VARCHAR(\"cc\"))"
+ ],
+ "Vindex": "name_user_map",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
+ "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
+ "Table": "name_user_vdx",
+ "Values": [
+ ":name"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where (`name`, costly) in (('aa', 'bb'), ('cc', 'dd'))",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Composite IN clause, choose cost",
+ "query": "select id from user where (col, costly) in (('aa', 'bb')) and (col, name) in (('cc', 'dd'))",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where (col, costly) in (('aa', 'bb')) and (col, name) in (('cc', 'dd'))",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "MultiEqual",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where (col, costly) in (('aa', 'bb')) and (col, `name`) in (('cc', 'dd'))",
+ "Table": "`user`",
+ "Values": [
+ "(VARCHAR(\"dd\"))"
+ ],
+ "Vindex": "name_user_map"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where (col, costly) in (('aa', 'bb')) and (col, name) in (('cc', 'dd'))",
+ "Instructions": {
+ "OperatorType": "VindexLookup",
+ "Variant": "MultiEqual",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Values": [
+ "(VARCHAR(\"dd\"))"
+ ],
+ "Vindex": "name_user_map",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
+ "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
+ "Table": "name_user_vdx",
+ "Values": [
+ ":name"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where (col, costly) in (('aa', 'bb')) and (col, `name`) in (('cc', 'dd'))",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Composite IN clause vs equality",
+ "query": "select id from user where (col, name) in (('aa', 'bb')) and id = 5",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where (col, name) in (('aa', 'bb')) and id = 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where (col, `name`) in (('aa', 'bb')) and id = 5",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where (col, name) in (('aa', 'bb')) and id = 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where (col, `name`) in (('aa', 'bb')) and id = 5",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Composite IN: multiple vindex matches",
+ "query": "select id from user where (costly, name) in (('aa', 'bb'), ('cc', 'dd'))",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where (costly, name) in (('aa', 'bb'), ('cc', 'dd'))",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "MultiEqual",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where (costly, `name`) in (('aa', 'bb'), ('cc', 'dd'))",
+ "Table": "`user`",
+ "Values": [
+ "(VARCHAR(\"bb\"), VARCHAR(\"dd\"))"
+ ],
+ "Vindex": "name_user_map"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where (costly, name) in (('aa', 'bb'), ('cc', 'dd'))",
+ "Instructions": {
+ "OperatorType": "VindexLookup",
+ "Variant": "MultiEqual",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Values": [
+ "(VARCHAR(\"bb\"), VARCHAR(\"dd\"))"
+ ],
+ "Vindex": "name_user_map",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
+ "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
+ "Table": "name_user_vdx",
+ "Values": [
+ ":name"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where (costly, `name`) in (('aa', 'bb'), ('cc', 'dd'))",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Composite IN: tuple inside tuple",
+ "query": "select id from user where ((col1, name), col2) in ((('aa', 'bb'), 'cc'), (('dd', 'ee'), 'ff'))",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where ((col1, name), col2) in ((('aa', 'bb'), 'cc'), (('dd', 'ee'), 'ff'))",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "MultiEqual",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where ((col1, `name`), col2) in ((('aa', 'bb'), 'cc'), (('dd', 'ee'), 'ff'))",
+ "Table": "`user`",
+ "Values": [
+ "(VARCHAR(\"bb\"), VARCHAR(\"ee\"))"
+ ],
+ "Vindex": "name_user_map"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where ((col1, name), col2) in ((('aa', 'bb'), 'cc'), (('dd', 'ee'), 'ff'))",
+ "Instructions": {
+ "OperatorType": "VindexLookup",
+ "Variant": "MultiEqual",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Values": [
+ "(VARCHAR(\"bb\"), VARCHAR(\"ee\"))"
+ ],
+ "Vindex": "name_user_map",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
+ "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
+ "Table": "name_user_vdx",
+ "Values": [
+ ":name"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where ((col1, `name`), col2) in ((('aa', 'bb'), 'cc'), (('dd', 'ee'), 'ff'))",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Composite IN: tuple inside tuple, but no match in tuple",
+ "query": "select id from user where (name, (col1, col2)) in (('aa', ('bb', 'cc')), ('dd', ('ee', 'ff')))",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where (name, (col1, col2)) in (('aa', ('bb', 'cc')), ('dd', ('ee', 'ff')))",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "MultiEqual",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where (`name`, (col1, col2)) in (('aa', ('bb', 'cc')), ('dd', ('ee', 'ff')))",
+ "Table": "`user`",
+ "Values": [
+ "(VARCHAR(\"aa\"), VARCHAR(\"dd\"))"
+ ],
+ "Vindex": "name_user_map"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where (name, (col1, col2)) in (('aa', ('bb', 'cc')), ('dd', ('ee', 'ff')))",
+ "Instructions": {
+ "OperatorType": "VindexLookup",
+ "Variant": "MultiEqual",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Values": [
+ "(VARCHAR(\"aa\"), VARCHAR(\"dd\"))"
+ ],
+ "Vindex": "name_user_map",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
+ "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
+ "Table": "name_user_vdx",
+ "Values": [
+ ":name"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where (`name`, (col1, col2)) in (('aa', ('bb', 'cc')), ('dd', ('ee', 'ff')))",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Composite IN: tuple inside tuple, mismiatched values",
+ "query": "select id from user where ((col1, name), col2) in (('aa', 'bb', 'cc'), (('dd', 'ee'), 'ff'))",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where ((col1, name), col2) in (('aa', 'bb', 'cc'), (('dd', 'ee'), 'ff'))",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where ((col1, `name`), col2) in (('aa', 'bb', 'cc'), (('dd', 'ee'), 'ff'))",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where ((col1, name), col2) in (('aa', 'bb', 'cc'), (('dd', 'ee'), 'ff'))",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where ((col1, `name`), col2) in (('aa', 'bb', 'cc'), (('dd', 'ee'), 'ff'))",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Composite IN: RHS not tuple",
+ "query": "select id from user where (col1, name) in (select * from music where music.user_id=user.id)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where (col1, name) in (select * from music where music.user_id=user.id)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where (col1, `name`) in (select * from music where music.user_id = `user`.id)",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where (col1, name) in (select * from music where music.user_id=user.id)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where (col1, `name`) in (select * from music where music.user_id = `user`.id)",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.music",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Composite IN: RHS has no simple values",
+ "query": "select id from user where (col1, name) in (('aa', 1+1))",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where (col1, name) in (('aa', 1+1))",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where (col1, `name`) in (('aa', 1 + 1))",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where (col1, name) in (('aa', 1+1))",
+ "Instructions": {
+ "OperatorType": "VindexLookup",
+ "Variant": "MultiEqual",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Values": [
+ "(INT64(2))"
+ ],
+ "Vindex": "name_user_map",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
+ "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
+ "Table": "name_user_vdx",
+ "Values": [
+ ":name"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where (col1, `name`) in (('aa', 1 + 1))",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "IN clause: LHS is neither column nor composite tuple",
+ "query": "select Id from user where 1 in ('aa', 'bb')",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select Id from user where 1 in ('aa', 'bb')",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select Id from `user` where 1 != 1",
+ "Query": "select Id from `user` where 1 in ('aa', 'bb')",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select Id from user where 1 in ('aa', 'bb')",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select Id from `user` where 1 != 1",
+ "Query": "select Id from `user` where 1 in ('aa', 'bb')",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Single table complex in clause",
+ "query": "select id from user where name in (col, 'bb')",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where name in (col, 'bb')",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where `name` in (col, 'bb')",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where name in (col, 'bb')",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where `name` in (col, 'bb')",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Single table equality route with val arg",
+ "query": "select id from user where name = :a",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where name = :a",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where `name` = :a",
+ "Table": "`user`",
+ "Values": [
+ ":a"
+ ],
+ "Vindex": "name_user_map"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where name = :a",
+ "Instructions": {
+ "OperatorType": "VindexLookup",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Values": [
+ ":a"
+ ],
+ "Vindex": "name_user_map",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
+ "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
+ "Table": "name_user_vdx",
+ "Values": [
+ ":name"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where `name` = :a",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Single table equality route with unsigned value",
+ "query": "select id from user where name = 18446744073709551615",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where name = 18446744073709551615",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where `name` = 18446744073709551615",
+ "Table": "`user`",
+ "Values": [
+ "UINT64(18446744073709551615)"
+ ],
+ "Vindex": "name_user_map"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where name = 18446744073709551615",
+ "Instructions": {
+ "OperatorType": "VindexLookup",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Values": [
+ "UINT64(18446744073709551615)"
+ ],
+ "Vindex": "name_user_map",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
+ "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
+ "Table": "name_user_vdx",
+ "Values": [
+ ":name"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where `name` = 18446744073709551615",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Single table in clause list arg",
+ "query": "select id from user where name in ::list",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where name in ::list",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where `name` in ::__vals",
+ "Table": "`user`",
+ "Values": [
+ ":list"
+ ],
+ "Vindex": "name_user_map"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where name in ::list",
+ "Instructions": {
+ "OperatorType": "VindexLookup",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Values": [
+ ":list"
+ ],
+ "Vindex": "name_user_map",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
+ "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
+ "Table": "name_user_vdx",
+ "Values": [
+ ":name"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where `name` in ::__vals",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Multi-table unique vindex constraint",
+ "query": "select user_extra.id from user join user_extra on user.id = user_extra.user_id where user.id = 5",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user_extra.id from user join user_extra on user.id = user_extra.user_id where user.id = 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.id from `user` join user_extra on `user`.id = user_extra.user_id where 1 != 1",
+ "Query": "select user_extra.id from `user` join user_extra on `user`.id = user_extra.user_id where `user`.id = 5",
+ "Table": "`user`, user_extra",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user_extra.id from user join user_extra on user.id = user_extra.user_id where user.id = 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.id from `user`, user_extra where 1 != 1",
+ "Query": "select user_extra.id from `user`, user_extra where `user`.id = 5 and `user`.id = user_extra.user_id",
+ "Table": "`user`, user_extra",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "Multi-table unique vindex constraint on right table",
+ "query": "select user_extra.id from user join user_extra on user.id = user_extra.user_id where user_extra.user_id = 5",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user_extra.id from user join user_extra on user.id = user_extra.user_id where user_extra.user_id = 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.id from `user` join user_extra on `user`.id = user_extra.user_id where 1 != 1",
+ "Query": "select user_extra.id from `user` join user_extra on `user`.id = user_extra.user_id where user_extra.user_id = 5",
+ "Table": "`user`, user_extra",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user_extra.id from user join user_extra on user.id = user_extra.user_id where user_extra.user_id = 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.id from `user`, user_extra where 1 != 1",
+ "Query": "select user_extra.id from `user`, user_extra where user_extra.user_id = 5 and `user`.id = user_extra.user_id",
+ "Table": "`user`, user_extra",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "Multi-table unique vindex constraint on left table of left join",
+ "query": "select user_extra.id from user left join user_extra on user.id = user_extra.user_id where user.id = 5",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "select user_extra.id from user left join user_extra on user.id = user_extra.user_id where user.id = 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.id from `user` left join user_extra on `user`.id = user_extra.user_id where 1 != 1",
+ "Query": "select user_extra.id from `user` left join user_extra on `user`.id = user_extra.user_id where `user`.id = 5",
+ "Table": "`user`, user_extra",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "Multi-table unique vindex constraint on left-joined right table",
+ "query": "select user_extra.id from user left join user_extra on user.id = user_extra.user_id where user_extra.user_id = 5",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "select user_extra.id from user left join user_extra on user.id = user_extra.user_id where user_extra.user_id = 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.id from `user`, user_extra where 1 != 1",
+ "Query": "select user_extra.id from `user`, user_extra where user_extra.user_id = 5 and `user`.id = user_extra.user_id",
+ "Table": "`user`, user_extra",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "Multi-route unique vindex constraint",
+ "query": "select user_extra.id from user join user_extra on user.col = user_extra.col where user.id = 5",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user_extra.id from user join user_extra on user.col = user_extra.col where user.id = 5",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "JoinVars": {
+ "user_col": 0
+ },
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` where 1 != 1",
+ "Query": "select `user`.col from `user` where `user`.id = 5",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.id from user_extra where 1 != 1",
+ "Query": "select user_extra.id from user_extra where user_extra.col = :user_col",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user_extra.id from user join user_extra on user.col = user_extra.col where user.id = 5",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "JoinVars": {
+ "user_col": 0
+ },
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` where 1 != 1",
+ "Query": "select `user`.col from `user` where `user`.id = 5",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.id from user_extra where 1 != 1",
+ "Query": "select user_extra.id from user_extra where user_extra.col = :user_col",
+ "Table": "user_extra"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "Multi-route unique vindex route on both routes",
+ "query": "select user_extra.id from user join user_extra on user.col = user_extra.col where user.id = 5 and user_extra.user_id = 5",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user_extra.id from user join user_extra on user.col = user_extra.col where user.id = 5 and user_extra.user_id = 5",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "JoinVars": {
+ "user_col": 0
+ },
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` where 1 != 1",
+ "Query": "select `user`.col from `user` where `user`.id = 5",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.id from user_extra where 1 != 1",
+ "Query": "select user_extra.id from user_extra where user_extra.col = :user_col and user_extra.user_id = 5",
+ "Table": "user_extra",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user_extra.id from user join user_extra on user.col = user_extra.col where user.id = 5 and user_extra.user_id = 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.id from `user`, user_extra where 1 != 1",
+ "Query": "select user_extra.id from `user`, user_extra where `user`.id = 5 and user_extra.user_id = 5 and `user`.col = user_extra.col",
+ "Table": "`user`, user_extra",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "Multi-route with cross-route constraint",
+ "query": "select user_extra.id from user join user_extra on user.col = user_extra.col where user_extra.user_id = user.col",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user_extra.id from user join user_extra on user.col = user_extra.col where user_extra.user_id = user.col",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "JoinVars": {
+ "user_col": 0
+ },
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` where 1 != 1",
+ "Query": "select `user`.col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.id from user_extra where 1 != 1",
+ "Query": "select user_extra.id from user_extra where user_extra.col = :user_col and user_extra.user_id = :user_col",
+ "Table": "user_extra",
+ "Values": [
+ ":user_col"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user_extra.id from user join user_extra on user.col = user_extra.col where user_extra.user_id = user.col",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "JoinVars": {
+ "user_col": 0
+ },
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` where 1 != 1",
+ "Query": "select `user`.col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.id from user_extra where 1 != 1",
+ "Query": "select user_extra.id from user_extra where user_extra.col = :user_col and user_extra.user_id = :user_col",
+ "Table": "user_extra",
+ "Values": [
+ ":user_col"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "Multi-route with non-route constraint, should use first route.",
+ "query": "select user_extra.id from user join user_extra on user.col = user_extra.col where 1 = 1",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user_extra.id from user join user_extra on user.col = user_extra.col where 1 = 1",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "JoinVars": {
+ "user_col": 0
+ },
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` where 1 != 1",
+ "Query": "select `user`.col from `user` where 1 = 1",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.id from user_extra where 1 != 1",
+ "Query": "select user_extra.id from user_extra where user_extra.col = :user_col",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user_extra.id from user join user_extra on user.col = user_extra.col where 1 = 1",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "JoinVars": {
+ "user_col": 0
+ },
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` where 1 != 1",
+ "Query": "select `user`.col from `user` where 1 = 1",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.id from user_extra where 1 != 1",
+ "Query": "select user_extra.id from user_extra where 1 = 1 and user_extra.col = :user_col",
+ "Table": "user_extra"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "Route with multiple route constraints, SelectIN is the best constraint.",
+ "query": "select id from user where user.col = 5 and user.id in (1, 2)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where user.col = 5 and user.id in (1, 2)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where `user`.col = 5 and `user`.id in ::__vals",
+ "Table": "`user`",
+ "Values": [
+ "(INT64(1), INT64(2))"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where user.col = 5 and user.id in (1, 2)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where `user`.col = 5 and `user`.id in ::__vals",
+ "Table": "`user`",
+ "Values": [
+ "(INT64(1), INT64(2))"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Route with multiple route constraints and boolean, SelectIN is the best constraint.",
+ "query": "select id from user where user.col = case user.col when 'foo' then true else false end and user.id in (1, 2)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where user.col = case user.col when 'foo' then true else false end and user.id in (1, 2)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where `user`.col = case `user`.col when 'foo' then true else false end and `user`.id in ::__vals",
+ "Table": "`user`",
+ "Values": [
+ "(INT64(1), INT64(2))"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where user.col = case user.col when 'foo' then true else false end and user.id in (1, 2)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where `user`.col = case `user`.col when 'foo' then true else false end and `user`.id in ::__vals",
+ "Table": "`user`",
+ "Values": [
+ "(INT64(1), INT64(2))"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Route with multiple route constraints and boolean, SelectEqual is the best constraint.",
+ "query": "select (id or col) as val from user where user.col = 5 and user.id in (1, 2) and user.name = 'aa'",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select (id or col) as val from user where user.col = 5 and user.id in (1, 2) and user.name = 'aa'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id or col as val from `user` where 1 != 1",
+ "Query": "select id or col as val from `user` where `user`.col = 5 and `user`.id in (1, 2) and `user`.`name` = 'aa'",
+ "Table": "`user`",
+ "Values": [
+ "VARCHAR(\"aa\")"
+ ],
+ "Vindex": "name_user_map"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select (id or col) as val from user where user.col = 5 and user.id in (1, 2) and user.name = 'aa'",
+ "Instructions": {
+ "OperatorType": "VindexLookup",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Values": [
+ "VARCHAR(\"aa\")"
+ ],
+ "Vindex": "name_user_map",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
+ "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
+ "Table": "name_user_vdx",
+ "Values": [
+ ":name"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id or col as val from `user` where 1 != 1",
+ "Query": "select id or col as val from `user` where `user`.col = 5 and `user`.id in (1, 2) and `user`.`name` = 'aa'",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Route with multiple route constraints, SelectEqual is the best constraint.",
+ "query": "select id from user where user.col = false and user.id in (1, 2) and user.name = 'aa'",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where user.col = false and user.id in (1, 2) and user.name = 'aa'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where `user`.col = false and `user`.id in (1, 2) and `user`.`name` = 'aa'",
+ "Table": "`user`",
+ "Values": [
+ "VARCHAR(\"aa\")"
+ ],
+ "Vindex": "name_user_map"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where user.col = false and user.id in (1, 2) and user.name = 'aa'",
+ "Instructions": {
+ "OperatorType": "VindexLookup",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Values": [
+ "VARCHAR(\"aa\")"
+ ],
+ "Vindex": "name_user_map",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
+ "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
+ "Table": "name_user_vdx",
+ "Values": [
+ ":name"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where `user`.col = false and `user`.id in (1, 2) and `user`.`name` = 'aa'",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Route with multiple route constraints, SelectEqualUnique is the best constraint.",
+ "query": "select id from user where user.col = 5 and user.id in (1, 2) and user.name = 'aa' and user.id = 1",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where user.col = 5 and user.id in (1, 2) and user.name = 'aa' and user.id = 1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where `user`.col = 5 and `user`.id in (1, 2) and `user`.`name` = 'aa' and `user`.id = 1",
+ "Table": "`user`",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where user.col = 5 and user.id in (1, 2) and user.name = 'aa' and user.id = 1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where `user`.col = 5 and `user`.id in (1, 2) and `user`.`name` = 'aa' and `user`.id = 1",
+ "Table": "`user`",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Route with multiple route constraints, SelectEqualUnique is the best constraint, order reversed.",
+ "query": "select id from user where user.id = 1 and user.name = 'aa' and user.id in (1, 2) and user.col = 5",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where user.id = 1 and user.name = 'aa' and user.id in (1, 2) and user.col = 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where `user`.id = 1 and `user`.`name` = 'aa' and `user`.id in (1, 2) and `user`.col = 5",
+ "Table": "`user`",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where user.id = 1 and user.name = 'aa' and user.id in (1, 2) and user.col = 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where `user`.id = 1 and `user`.`name` = 'aa' and `user`.id in (1, 2) and `user`.col = 5",
+ "Table": "`user`",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Route with OR and AND clause, must parenthesize correctly.",
+ "query": "select id from user where user.id = 1 or user.name = 'aa' and user.id in (1, 2)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where user.id = 1 or user.name = 'aa' and user.id in (1, 2)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where `user`.id = 1 or `user`.`name` = 'aa' and `user`.id in (1, 2)",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where user.id = 1 or user.name = 'aa' and user.id in (1, 2)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where (`user`.id = 1 or `user`.`name` = 'aa') and `user`.id in ::__vals",
+ "Table": "`user`",
+ "Values": [
+ "(INT64(1), INT64(2))"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Unsharded route",
+ "query": "select unsharded.id from user join unsharded where unsharded.id = user.id",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select unsharded.id from user join unsharded where unsharded.id = user.id",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "JoinVars": {
+ "user_id": 0
+ },
+ "TableName": "`user`_unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id from `user` where 1 != 1",
+ "Query": "select `user`.id from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select unsharded.id from unsharded where 1 != 1",
+ "Query": "select unsharded.id from unsharded where unsharded.id = :user_id",
+ "Table": "unsharded"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select unsharded.id from user join unsharded where unsharded.id = user.id",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "JoinVars": {
+ "unsharded_id": 0
+ },
+ "TableName": "unsharded_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select unsharded.id from unsharded where 1 != 1",
+ "Query": "select unsharded.id from unsharded",
+ "Table": "unsharded"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` where 1 != 1",
+ "Query": "select 1 from `user` where `user`.id = :unsharded_id",
+ "Table": "`user`",
+ "Values": [
+ ":unsharded_id"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "routing rules: choose the redirected table",
+ "query": "select col from route1 where id = 1",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from route1 where id = 1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` as route1 where 1 != 1",
+ "Query": "select col from `user` as route1 where id = 1",
+ "Table": "`user`",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from route1 where id = 1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` as route1 where 1 != 1",
+ "Query": "select col from `user` as route1 where id = 1",
+ "Table": "`user`",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "subquery",
+ "query": "select u.m from user_extra join user u where u.id in (select m2 from user where user.id = u.id and user_extra.col = user.col) and u.id in (user_extra.col, 1)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u.m from user_extra join user u where u.id in (select m2 from user where user.id = u.id and user_extra.col = user.col) and u.id in (user_extra.col, 1)",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "JoinVars": {
+ "user_extra_col": 0
+ },
+ "TableName": "user_extra_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.col from user_extra where 1 != 1",
+ "Query": "select user_extra.col from user_extra",
+ "Table": "user_extra"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.m from `user` as u where 1 != 1",
+ "Query": "select u.m from `user` as u where u.id in ::__vals and u.id in (select m2 from `user` where `user`.id = u.id and `user`.col = :user_extra_col)",
+ "Table": "`user`",
+ "Values": [
+ "(:user_extra_col, INT64(1))"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u.m from user_extra join user u where u.id in (select m2 from user where user.id = u.id and user_extra.col = user.col) and u.id in (user_extra.col, 1)",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "JoinVars": {
+ "user_extra_col": 0
+ },
+ "TableName": "user_extra_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.col from user_extra where 1 != 1",
+ "Query": "select user_extra.col from user_extra",
+ "Table": "user_extra"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.m from `user` as u where 1 != 1",
+ "Query": "select u.m from `user` as u where u.id in (select m2 from `user` where `user`.id = u.id and `user`.col = :user_extra_col) and u.id in ::__vals",
+ "Table": "`user`",
+ "Values": [
+ "(:user_extra_col, INT64(1))"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "correlated subquery merge-able into a route of a join tree",
+ "query": "select u.m from user_extra join user u where u.id in (select m2 from user where user.id = u.id) and u.id in (user_extra.col, 1)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u.m from user_extra join user u where u.id in (select m2 from user where user.id = u.id) and u.id in (user_extra.col, 1)",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "JoinVars": {
+ "user_extra_col": 0
+ },
+ "TableName": "user_extra_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.col from user_extra where 1 != 1",
+ "Query": "select user_extra.col from user_extra",
+ "Table": "user_extra"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.m from `user` as u where 1 != 1",
+ "Query": "select u.m from `user` as u where u.id in ::__vals and u.id in (select m2 from `user` where `user`.id = u.id)",
+ "Table": "`user`",
+ "Values": [
+ "(:user_extra_col, INT64(1))"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u.m from user_extra join user u where u.id in (select m2 from user where user.id = u.id) and u.id in (user_extra.col, 1)",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "JoinVars": {
+ "user_extra_col": 0
+ },
+ "TableName": "user_extra_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.col from user_extra where 1 != 1",
+ "Query": "select user_extra.col from user_extra",
+ "Table": "user_extra"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.m from `user` as u where 1 != 1",
+ "Query": "select u.m from `user` as u where u.id in (select m2 from `user` where `user`.id = u.id) and u.id in ::__vals",
+ "Table": "`user`",
+ "Values": [
+ "(:user_extra_col, INT64(1))"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "ensure subquery reordering gets us a better plan",
+ "query": "select u.m from user_extra join user u where u.id in (select m2 from user where user.id = 5) and u.id = 5",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u.m from user_extra join user u where u.id in (select m2 from user where user.id = 5) and u.id = 5",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "TableName": "user_extra_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra",
+ "Table": "user_extra"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.m from `user` as u where 1 != 1",
+ "Query": "select u.m from `user` as u where u.id = 5 and u.id in (select m2 from `user` where `user`.id = 5)",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u.m from user_extra join user u where u.id in (select m2 from user where user.id = 5) and u.id = 5",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "TableName": "user_extra_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra",
+ "Table": "user_extra"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.m from `user` as u where 1 != 1",
+ "Query": "select u.m from `user` as u where u.id in (select m2 from `user` where `user`.id = 5) and u.id = 5",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "nested subquery",
+ "query": "select u.m from user_extra join user u where u.id in (select m2 from user where user.id = u.id and user_extra.col = user.col and user.id in (select m3 from user_extra where user_extra.user_id = user.id)) and u.id in (user_extra.col, 1)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u.m from user_extra join user u where u.id in (select m2 from user where user.id = u.id and user_extra.col = user.col and user.id in (select m3 from user_extra where user_extra.user_id = user.id)) and u.id in (user_extra.col, 1)",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "JoinVars": {
+ "user_extra_col": 0
+ },
+ "TableName": "user_extra_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.col from user_extra where 1 != 1",
+ "Query": "select user_extra.col from user_extra",
+ "Table": "user_extra"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.m from `user` as u where 1 != 1",
+ "Query": "select u.m from `user` as u where u.id in ::__vals and u.id in (select m2 from `user` where `user`.id = u.id and `user`.col = :user_extra_col and `user`.id in (select m3 from user_extra where user_extra.user_id = `user`.id))",
+ "Table": "`user`",
+ "Values": [
+ "(:user_extra_col, INT64(1))"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u.m from user_extra join user u where u.id in (select m2 from user where user.id = u.id and user_extra.col = user.col and user.id in (select m3 from user_extra where user_extra.user_id = user.id)) and u.id in (user_extra.col, 1)",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "JoinVars": {
+ "user_extra_col": 0
+ },
+ "TableName": "user_extra_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.col from user_extra where 1 != 1",
+ "Query": "select user_extra.col from user_extra",
+ "Table": "user_extra"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.m from `user` as u where 1 != 1",
+ "Query": "select u.m from `user` as u where u.id in (select m2 from `user` where `user`.id = u.id and `user`.col = :user_extra_col and `user`.id in (select m3 from user_extra where user_extra.user_id = `user`.id)) and u.id in ::__vals",
+ "Table": "`user`",
+ "Values": [
+ "(:user_extra_col, INT64(1))"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "Correlated subquery in where clause",
+ "query": "select id from user where user.col in (select user_extra.col from user_extra where user_extra.user_id = user.id)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where user.col in (select user_extra.col from user_extra where user_extra.user_id = user.id)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where `user`.col in (select user_extra.col from user_extra where user_extra.user_id = `user`.id)",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where user.col in (select user_extra.col from user_extra where user_extra.user_id = user.id)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where `user`.col in (select user_extra.col from user_extra where user_extra.user_id = `user`.id)",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "outer and inner subquery route by same int val",
+ "query": "select id from user where id = 5 and user.col in (select user_extra.col from user_extra where user_extra.user_id = 5)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where id = 5 and user.col in (select user_extra.col from user_extra where user_extra.user_id = 5)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where id = 5 and `user`.col in (select user_extra.col from user_extra where user_extra.user_id = 5)",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where id = 5 and user.col in (select user_extra.col from user_extra where user_extra.user_id = 5)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where id = 5 and `user`.col in (select user_extra.col from user_extra where user_extra.user_id = 5)",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "outer and inner subquery route by same str val",
+ "query": "select id from user where id = 'aa' and user.col in (select user_extra.col from user_extra where user_extra.user_id = 'aa')",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where id = 'aa' and user.col in (select user_extra.col from user_extra where user_extra.user_id = 'aa')",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where id = 'aa' and `user`.col in (select user_extra.col from user_extra where user_extra.user_id = 'aa')",
+ "Table": "`user`",
+ "Values": [
+ "VARCHAR(\"aa\")"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where id = 'aa' and user.col in (select user_extra.col from user_extra where user_extra.user_id = 'aa')",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where id = 'aa' and `user`.col in (select user_extra.col from user_extra where user_extra.user_id = 'aa')",
+ "Table": "`user`",
+ "Values": [
+ "VARCHAR(\"aa\")"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "outer and inner subquery route by same val arg",
+ "query": "select id from user where id = :a and user.col in (select user_extra.col from user_extra where user_extra.user_id = :a)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where id = :a and user.col in (select user_extra.col from user_extra where user_extra.user_id = :a)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where id = :a and `user`.col in (select user_extra.col from user_extra where user_extra.user_id = :a)",
+ "Table": "`user`",
+ "Values": [
+ ":a"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where id = :a and user.col in (select user_extra.col from user_extra where user_extra.user_id = :a)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where id = :a and `user`.col in (select user_extra.col from user_extra where user_extra.user_id = :a)",
+ "Table": "`user`",
+ "Values": [
+ ":a"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "unresolved symbol in inner subquery.",
+ "query": "select id from user where id = :a and user.col in (select user_extra.col from user_extra where user_extra.user_id = :a and foo.id = 1)",
+ "v3-plan": "VT03019: symbol foo.id not found",
+ "gen4-plan": "symbol foo.id not found"
+ },
+ {
+ "comment": "outer and inner subquery route by same outermost column value",
+ "query": "select id2 from user uu where id in (select id from user where id = uu.id and user.col in (select user_extra.col from user_extra where user_extra.user_id = uu.id))",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id2 from user uu where id in (select id from user where id = uu.id and user.col in (select user_extra.col from user_extra where user_extra.user_id = uu.id))",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id2 from `user` as uu where 1 != 1",
+ "Query": "select id2 from `user` as uu where id in (select id from `user` where id = uu.id and `user`.col in (select user_extra.col from user_extra where user_extra.user_id = uu.id))",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id2 from user uu where id in (select id from user where id = uu.id and user.col in (select user_extra.col from user_extra where user_extra.user_id = uu.id))",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id2 from `user` as uu where 1 != 1",
+ "Query": "select id2 from `user` as uu where id in (select id from `user` where id = uu.id and `user`.col in (select user_extra.col from user_extra where user_extra.user_id = uu.id))",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "cross-shard subquery in IN clause.\n# Note the improved Underlying plan as SelectIN.",
+ "query": "select id from user where id in (select col from user)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where id in (select col from user)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where :__sq_has_values1 = 1 and id in ::__vals",
+ "Table": "`user`",
+ "Values": [
+ ":__sq1"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where id in (select col from user)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where :__sq_has_values1 = 1 and id in ::__vals",
+ "Table": "`user`",
+ "Values": [
+ ":__sq1"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "cross-shard subquery in NOT IN clause.",
+ "query": "select id from user where id not in (select col from user)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where id not in (select col from user)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutNotIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where :__sq_has_values1 = 0 or id not in ::__sq1",
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where id not in (select col from user)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutNotIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where :__sq_has_values1 = 0 or id not in ::__sq1",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "cross-shard subquery in EXISTS clause.",
+ "query": "select id from user where exists (select col from user)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where exists (select col from user)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutExists",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Limit",
+ "Count": "INT64(1)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` where 1 != 1",
+ "Query": "select 1 from `user` limit :__upper_limit",
+ "Table": "`user`"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where :__sq_has_values1",
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where exists (select col from user)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutExists",
+ "PulloutVars": [
+ "__sq_has_values1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Limit",
+ "Count": "INT64(1)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` where 1 != 1",
+ "Query": "select 1 from `user` limit :__upper_limit",
+ "Table": "`user`"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where :__sq_has_values1",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "cross-shard subquery as expression",
+ "query": "select id from user where id = (select col from user)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where id = (select col from user)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutValue",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where id = :__sq1",
+ "Table": "`user`",
+ "Values": [
+ ":__sq1"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where id = (select col from user)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutValue",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where id = :__sq1",
+ "Table": "`user`",
+ "Values": [
+ ":__sq1"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "multi-level pullout",
+ "query": "select id1 from user where id = (select id2 from user where id2 in (select id3 from user))",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id1 from user where id = (select id2 from user where id2 in (select id3 from user))",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutValue",
+ "PulloutVars": [
+ "__sq_has_values2",
+ "__sq2"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id3 from `user` where 1 != 1",
+ "Query": "select id3 from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id2 from `user` where 1 != 1",
+ "Query": "select id2 from `user` where :__sq_has_values1 = 1 and id2 in ::__sq1",
+ "Table": "`user`"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id1 from `user` where 1 != 1",
+ "Query": "select id1 from `user` where id = :__sq2",
+ "Table": "`user`",
+ "Values": [
+ ":__sq2"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id1 from user where id = (select id2 from user where id2 in (select id3 from user))",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutValue",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values2",
+ "__sq2"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id3 from `user` where 1 != 1",
+ "Query": "select id3 from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id2 from `user` where 1 != 1",
+ "Query": "select id2 from `user` where :__sq_has_values2 = 1 and id2 in ::__sq2",
+ "Table": "`user`"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id1 from `user` where 1 != 1",
+ "Query": "select id1 from `user` where id = :__sq1",
+ "Table": "`user`",
+ "Values": [
+ ":__sq1"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "routing rules subquery merge",
+ "query": "select col from user where id = (select id from route1 where route1.id = user.id)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from user where id = (select id from route1 where route1.id = user.id)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user` where id = (select id from `user` as route1 where route1.id = `user`.id)",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from user where id = (select id from route1 where route1.id = user.id)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user` where id = (select id from `user` as route1 where route1.id = `user`.id)",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "routing rules subquery pullout",
+ "query": "select col from user where id = (select id from route2)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from user where id = (select id from route2)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutValue",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select id from unsharded as route2 where 1 != 1",
+ "Query": "select id from unsharded as route2",
+ "Table": "unsharded"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user` where id = :__sq1",
+ "Table": "`user`",
+ "Values": [
+ ":__sq1"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from user where id = (select id from route2)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutValue",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select id from unsharded as route2 where 1 != 1",
+ "Query": "select id from unsharded as route2",
+ "Table": "unsharded"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user` where id = :__sq1",
+ "Table": "`user`",
+ "Values": [
+ ":__sq1"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Case preservation test",
+ "query": "select user_extra.Id from user join user_extra on user.iD = user_extra.User_Id where user.Id = 5",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user_extra.Id from user join user_extra on user.iD = user_extra.User_Id where user.Id = 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.Id from `user` join user_extra on `user`.iD = user_extra.User_Id where 1 != 1",
+ "Query": "select user_extra.Id from `user` join user_extra on `user`.iD = user_extra.User_Id where `user`.Id = 5",
+ "Table": "`user`, user_extra",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user_extra.Id from user join user_extra on user.iD = user_extra.User_Id where user.Id = 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.Id from `user`, user_extra where 1 != 1",
+ "Query": "select user_extra.Id from `user`, user_extra where `user`.Id = 5 and `user`.iD = user_extra.User_Id",
+ "Table": "`user`, user_extra",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "database() call in where clause.",
+ "query": "select id from user where database()",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where database()",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where database()",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where database()",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where database()",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Select with equals null",
+ "query": "select id from music where id = null",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from music where id = null",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "None",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from music where 1 != 1",
+ "Query": "select id from music where id = null",
+ "Table": "music"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from music where id = null",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "None",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from music where 1 != 1",
+ "Query": "select id from music where id = null",
+ "Table": "music"
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "SELECT with IS NULL",
+ "query": "select id from music where id is null",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from music where id is null",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from music where 1 != 1",
+ "Query": "select id from music where id is null",
+ "Table": "music"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from music where id is null",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from music where 1 != 1",
+ "Query": "select id from music where id is null",
+ "Table": "music"
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "SELECT with IS NOT NULL",
+ "query": "select id from music where id is not null",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from music where id is not null",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from music where 1 != 1",
+ "Query": "select id from music where id is not null",
+ "Table": "music"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from music where id is not null",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from music where 1 != 1",
+ "Query": "select id from music where id is not null",
+ "Table": "music"
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "Single table with unique vindex match and null match",
+ "query": "select id from music where user_id = 4 and id = null",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from music where user_id = 4 and id = null",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "None",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from music where 1 != 1",
+ "Query": "select id from music where user_id = 4 and id = null",
+ "Table": "music"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from music where user_id = 4 and id = null",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "None",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from music where 1 != 1",
+ "Query": "select id from music where user_id = 4 and id = null",
+ "Table": "music"
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "Single table with unique vindex match and IN (null)",
+ "query": "select id from music where user_id = 4 and id IN (null)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from music where user_id = 4 and id IN (null)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "None",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from music where 1 != 1",
+ "Query": "select id from music where user_id = 4 and id in (null)",
+ "Table": "music"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from music where user_id = 4 and id IN (null)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "None",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from music where 1 != 1",
+ "Query": "select id from music where user_id = 4 and id in (null)",
+ "Table": "music"
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "Single table with unique vindex match and IN (null, 1, 2)",
+ "query": "select id from music where user_id = 4 and id IN (null, 1, 2)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from music where user_id = 4 and id IN (null, 1, 2)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from music where 1 != 1",
+ "Query": "select id from music where user_id = 4 and id in (null, 1, 2)",
+ "Table": "music",
+ "Values": [
+ "INT64(4)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from music where user_id = 4 and id IN (null, 1, 2)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from music where 1 != 1",
+ "Query": "select id from music where user_id = 4 and id in (null, 1, 2)",
+ "Table": "music",
+ "Values": [
+ "INT64(4)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "Single table with unique vindex match and NOT IN (null, 1, 2)",
+ "query": "select id from music where user_id = 4 and id NOT IN (null, 1, 2)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from music where user_id = 4 and id NOT IN (null, 1, 2)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "None",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from music where 1 != 1",
+ "Query": "select id from music where user_id = 4 and id not in (null, 1, 2)",
+ "Table": "music"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from music where user_id = 4 and id NOT IN (null, 1, 2)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "None",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from music where 1 != 1",
+ "Query": "select id from music where user_id = 4 and id not in (null, 1, 2)",
+ "Table": "music"
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "Single table with unique vindex match and NOT IN (null, 1, 2) predicates inverted",
+ "query": "select id from music where id NOT IN (null, 1, 2) and user_id = 4",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from music where id NOT IN (null, 1, 2) and user_id = 4",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "None",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from music where 1 != 1",
+ "Query": "select id from music where id not in (null, 1, 2) and user_id = 4",
+ "Table": "music"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from music where id NOT IN (null, 1, 2) and user_id = 4",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "None",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from music where 1 != 1",
+ "Query": "select id from music where id not in (null, 1, 2) and user_id = 4",
+ "Table": "music"
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "pullout sq after pullout sq",
+ "query": "select id from user where not id in (select user_extra.col from user_extra where user_extra.user_id = 42) and id in (select user_extra.col from user_extra where user_extra.user_id = 411)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where not id in (select user_extra.col from user_extra where user_extra.user_id = 42) and id in (select user_extra.col from user_extra where user_extra.user_id = 411)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutNotIn",
+ "PulloutVars": [
+ "__sq_has_values2",
+ "__sq2"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.col from user_extra where 1 != 1",
+ "Query": "select user_extra.col from user_extra where user_extra.user_id = 42",
+ "Table": "user_extra",
+ "Values": [
+ "INT64(42)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.col from user_extra where 1 != 1",
+ "Query": "select user_extra.col from user_extra where user_extra.user_id = 411",
+ "Table": "user_extra",
+ "Values": [
+ "INT64(411)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where :__sq_has_values1 = 1 and id in ::__vals and (:__sq_has_values2 = 0 or id not in ::__sq2)",
+ "Table": "`user`",
+ "Values": [
+ ":__sq1"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where not id in (select user_extra.col from user_extra where user_extra.user_id = 42) and id in (select user_extra.col from user_extra where user_extra.user_id = 411)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values2",
+ "__sq2"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.col from user_extra where 1 != 1",
+ "Query": "select user_extra.col from user_extra where user_extra.user_id = 411",
+ "Table": "user_extra",
+ "Values": [
+ "INT64(411)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutNotIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.col from user_extra where 1 != 1",
+ "Query": "select user_extra.col from user_extra where user_extra.user_id = 42",
+ "Table": "user_extra",
+ "Values": [
+ "INT64(42)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where (:__sq_has_values1 = 0 or id not in ::__sq1) and (:__sq_has_values2 = 1 and id in ::__vals)",
+ "Table": "`user`",
+ "Values": [
+ ":__sq2"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "solving LIKE query with a CFC prefix vindex",
+ "query": "select c2 from cfc_vindex_col where c1 like 'A%'",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select c2 from cfc_vindex_col where c1 like 'A%'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select c2 from cfc_vindex_col where 1 != 1",
+ "Query": "select c2 from cfc_vindex_col where c1 like 'A%'",
+ "Table": "cfc_vindex_col",
+ "Values": [
+ "VARCHAR(\"A%\")"
+ ],
+ "Vindex": "cfc"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select c2 from cfc_vindex_col where c1 like 'A%'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select c2 from cfc_vindex_col where 1 != 1",
+ "Query": "select c2 from cfc_vindex_col where c1 like 'A%'",
+ "Table": "cfc_vindex_col",
+ "Values": [
+ "VARCHAR(\"A%\")"
+ ],
+ "Vindex": "cfc"
+ },
+ "TablesUsed": [
+ "user.cfc_vindex_col"
+ ]
+ }
+ },
+ {
+ "comment": "select * from samecolvin where col = :col",
+ "query": "select * from samecolvin where col = :col",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from samecolvin where col = :col",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from samecolvin where 1 != 1",
+ "Query": "select col from samecolvin where col = :col",
+ "Table": "samecolvin",
+ "Values": [
+ ":col"
+ ],
+ "Vindex": "vindex1"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from samecolvin where col = :col",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from samecolvin where 1 != 1",
+ "Query": "select col from samecolvin where col = :col",
+ "Table": "samecolvin",
+ "Values": [
+ ":col"
+ ],
+ "Vindex": "vindex1"
+ },
+ "TablesUsed": [
+ "user.samecolvin"
+ ]
+ }
+ },
+ {
+ "comment": "non unique predicate on vindex",
+ "query": "select id from user where user.id > 5",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where user.id > 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where `user`.id > 5",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where user.id > 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where `user`.id > 5",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "select from unsharded keyspace with uncorrelated subquery which should be merged to a single route",
+ "query": "select unsharded.id from unsharded where unsharded.name in (select name from unsharded_a)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select unsharded.id from unsharded where unsharded.name in (select name from unsharded_a)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select unsharded.id from unsharded where 1 != 1",
+ "Query": "select unsharded.id from unsharded where unsharded.`name` in (select `name` from unsharded_a)",
+ "Table": "unsharded"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select unsharded.id from unsharded where unsharded.name in (select name from unsharded_a)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select unsharded.id from unsharded where 1 != 1",
+ "Query": "select unsharded.id from unsharded where unsharded.`name` in (select `name` from unsharded_a)",
+ "Table": "unsharded, unsharded_a"
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "main.unsharded_a"
+ ]
+ }
+ },
+ {
+ "comment": "in subquery the id will be scoped to local table as there is no qualifier associated with it.",
+ "query": "select id from user where id in (select col from unsharded where col = id)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where id in (select col from unsharded where col = id)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select col from unsharded where 1 != 1",
+ "Query": "select col from unsharded where col = id",
+ "Table": "unsharded"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where :__sq_has_values1 = 1 and id in ::__vals",
+ "Table": "`user`",
+ "Values": [
+ ":__sq1"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where id in (select col from unsharded where col = id)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select col from unsharded where 1 != 1",
+ "Query": "select col from unsharded where col = id",
+ "Table": "unsharded"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where :__sq_has_values1 = 1 and id in ::__vals",
+ "Table": "`user`",
+ "Values": [
+ ":__sq1"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "correlated subquery with different keyspace tables involved",
+ "query": "select id from user where id in (select col from unsharded where col = user.id)",
+ "plan": "VT12001: unsupported: cross-shard correlated subquery"
+ },
+ {
+ "comment": "correlated subquery with same keyspace",
+ "query": "select u.id from user as u where u.col in (select ue.user_id from user_extra as ue where ue.user_id = u.id)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u.id from user as u where u.col in (select ue.user_id from user_extra as ue where ue.user_id = u.id)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.id from `user` as u where 1 != 1",
+ "Query": "select u.id from `user` as u where u.col in (select ue.user_id from user_extra as ue where ue.user_id = u.id)",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u.id from user as u where u.col in (select ue.user_id from user_extra as ue where ue.user_id = u.id)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.id from `user` as u where 1 != 1",
+ "Query": "select u.id from `user` as u where u.col in (select ue.user_id from user_extra as ue where ue.user_id = u.id)",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "SelectDBA with uncorrelated subqueries",
+ "query": "select t.table_schema from information_schema.tables as t where t.table_schema in (select c.column_name from information_schema.columns as c)",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "select t.table_schema from information_schema.tables as t where t.table_schema in (select c.column_name from information_schema.columns as c)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select t.table_schema from information_schema.`tables` as t where 1 != 1",
+ "Query": "select t.table_schema from information_schema.`tables` as t where t.table_schema in (select c.column_name from information_schema.`columns` as c)",
+ "Table": "information_schema.`tables`"
+ }
+ }
+ },
+ {
+ "comment": "SelectReference with uncorrelated subqueries",
+ "query": "select ref.col from ref where ref.col in (select ref.col from ref)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select ref.col from ref where ref.col in (select ref.col from ref)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select ref.col from ref where 1 != 1",
+ "Query": "select ref.col from ref where ref.col in (select ref.col from ref)",
+ "Table": "ref"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select ref.col from ref where ref.col in (select ref.col from ref)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select ref.col from ref where 1 != 1",
+ "Query": "select ref.col from ref where ref.col in (select ref.col from ref)",
+ "Table": "ref"
+ },
+ "TablesUsed": [
+ "user.ref"
+ ]
+ }
+ },
+ {
+ "comment": "SelectEqualUnique with uncorrelated subqueries",
+ "query": "select u1.col from user as u1 where u1.id = 5 and u1.name in (select u2.name from user u2 where u2.id = 5)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u1.col from user as u1 where u1.id = 5 and u1.name in (select u2.name from user u2 where u2.id = 5)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u1.col from `user` as u1 where 1 != 1",
+ "Query": "select u1.col from `user` as u1 where u1.id = 5 and u1.`name` in (select u2.`name` from `user` as u2 where u2.id = 5)",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u1.col from user as u1 where u1.id = 5 and u1.name in (select u2.name from user u2 where u2.id = 5)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u1.col from `user` as u1 where 1 != 1",
+ "Query": "select u1.col from `user` as u1 where u1.id = 5 and u1.`name` in (select u2.`name` from `user` as u2 where u2.id = 5)",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "SelectEqualUnique with EXISTS uncorrelated subquery",
+ "query": "select u1.col from user as u1 where u1.id = 5 and exists (select u2.name from user u2 where u2.id = 5)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u1.col from user as u1 where u1.id = 5 and exists (select u2.name from user u2 where u2.id = 5)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u1.col from `user` as u1 where 1 != 1",
+ "Query": "select u1.col from `user` as u1 where u1.id = 5 and exists (select 1 from `user` as u2 where u2.id = 5 limit 1)",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u1.col from user as u1 where u1.id = 5 and exists (select u2.name from user u2 where u2.id = 5)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u1.col from `user` as u1 where 1 != 1",
+ "Query": "select u1.col from `user` as u1 where u1.id = 5 and exists (select 1 from `user` as u2 where u2.id = 5 limit 1)",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "SelectEqualUnique with NOT EXISTS uncorrelated subquery",
+ "query": "select u1.col from user as u1 where u1.id = 5 and not exists (select u2.name from user u2 where u2.id = 5)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u1.col from user as u1 where u1.id = 5 and not exists (select u2.name from user u2 where u2.id = 5)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u1.col from `user` as u1 where 1 != 1",
+ "Query": "select u1.col from `user` as u1 where u1.id = 5 and not exists (select 1 from `user` as u2 where u2.id = 5 limit 1)",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u1.col from user as u1 where u1.id = 5 and not exists (select u2.name from user u2 where u2.id = 5)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u1.col from `user` as u1 where 1 != 1",
+ "Query": "select u1.col from `user` as u1 where u1.id = 5 and not exists (select 1 from `user` as u2 where u2.id = 5 limit 1)",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "SelectScatter with NOT EXISTS uncorrelated subquery",
+ "query": "select u1.col from user as u1 where not exists (select u2.name from user u2 where u2.id = 5)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u1.col from user as u1 where not exists (select u2.name from user u2 where u2.id = 5)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutExists",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` as u2 where 1 != 1",
+ "Query": "select 1 from `user` as u2 where u2.id = 5 limit 1",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u1.col from `user` as u1 where 1 != 1",
+ "Query": "select u1.col from `user` as u1 where not :__sq_has_values1",
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u1.col from user as u1 where not exists (select u2.name from user u2 where u2.id = 5)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutExists",
+ "PulloutVars": [
+ "__sq_has_values1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` as u2 where 1 != 1",
+ "Query": "select 1 from `user` as u2 where u2.id = 5 limit 1",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u1.col from `user` as u1 where 1 != 1",
+ "Query": "select u1.col from `user` as u1 where not :__sq_has_values1",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "The outer and first inner are SelectEqualUnique with same Vindex value, the second inner has different Vindex value",
+ "query": "select id from user where id = 5 and not id in (select user_extra.col from user_extra where user_extra.user_id = 5) and id in (select user_extra.col from user_extra where user_extra.user_id = 4)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where id = 5 and not id in (select user_extra.col from user_extra where user_extra.user_id = 5) and id in (select user_extra.col from user_extra where user_extra.user_id = 4)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.col from user_extra where 1 != 1",
+ "Query": "select user_extra.col from user_extra where user_extra.user_id = 4",
+ "Table": "user_extra",
+ "Values": [
+ "INT64(4)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where id = 5 and :__sq_has_values1 = 1 and id in ::__sq1 and id not in (select user_extra.col from user_extra where user_extra.user_id = 5)",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where id = 5 and not id in (select user_extra.col from user_extra where user_extra.user_id = 5) and id in (select user_extra.col from user_extra where user_extra.user_id = 4)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values2",
+ "__sq2"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.col from user_extra where 1 != 1",
+ "Query": "select user_extra.col from user_extra where user_extra.user_id = 4",
+ "Table": "user_extra",
+ "Values": [
+ "INT64(4)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where id = 5 and id not in (select user_extra.col from user_extra where user_extra.user_id = 5) and (:__sq_has_values2 = 1 and id in ::__sq2)",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "The outer and second inner are SelectEqualUnique with same Vindex value, the first inner has different Vindex value",
+ "query": "select id from user where id = 5 and not id in (select user_extra.col from user_extra where user_extra.user_id = 4) and id in (select user_extra.col from user_extra where user_extra.user_id = 5)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where id = 5 and not id in (select user_extra.col from user_extra where user_extra.user_id = 4) and id in (select user_extra.col from user_extra where user_extra.user_id = 5)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutNotIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.col from user_extra where 1 != 1",
+ "Query": "select user_extra.col from user_extra where user_extra.user_id = 4",
+ "Table": "user_extra",
+ "Values": [
+ "INT64(4)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where id = 5 and id in (select user_extra.col from user_extra where user_extra.user_id = 5) and (:__sq_has_values1 = 0 or id not in ::__sq1)",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where id = 5 and not id in (select user_extra.col from user_extra where user_extra.user_id = 4) and id in (select user_extra.col from user_extra where user_extra.user_id = 5)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutNotIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.col from user_extra where 1 != 1",
+ "Query": "select user_extra.col from user_extra where user_extra.user_id = 4",
+ "Table": "user_extra",
+ "Values": [
+ "INT64(4)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where id = 5 and (:__sq_has_values1 = 0 or id not in ::__sq1) and id in (select user_extra.col from user_extra where user_extra.user_id = 5)",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "two correlated subqueries that can be merge in a single route",
+ "query": "select u.id from user as u where u.col in (select ue.user_id from user_extra as ue where ue.user_id = u.id) and u.col2 in (select ue.user_id from user_extra as ue where ue.user_id = u.id)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u.id from user as u where u.col in (select ue.user_id from user_extra as ue where ue.user_id = u.id) and u.col2 in (select ue.user_id from user_extra as ue where ue.user_id = u.id)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.id from `user` as u where 1 != 1",
+ "Query": "select u.id from `user` as u where u.col2 in (select ue.user_id from user_extra as ue where ue.user_id = u.id) and u.col in (select ue.user_id from user_extra as ue where ue.user_id = u.id)",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u.id from user as u where u.col in (select ue.user_id from user_extra as ue where ue.user_id = u.id) and u.col2 in (select ue.user_id from user_extra as ue where ue.user_id = u.id)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.id from `user` as u where 1 != 1",
+ "Query": "select u.id from `user` as u where u.col in (select ue.user_id from user_extra as ue where ue.user_id = u.id) and u.col2 in (select ue.user_id from user_extra as ue where ue.user_id = u.id)",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "transitive closures for the win",
+ "query": "select id from user where user.id = user.col and user.col = 5",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where user.id = user.col and user.col = 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where `user`.id = `user`.col and `user`.col = 5",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where user.id = user.col and user.col = 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where `user`.id = `user`.col and `user`.col = 5",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "join with transitive closures",
+ "query": "select id from user, user_extra where user.id = user_extra.col and user_extra.col = user_extra.user_id",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user, user_extra where user.id = user_extra.col and user_extra.col = user_extra.user_id",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "JoinVars": {
+ "user_id": 0
+ },
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra where user_extra.col = :user_id and user_extra.col = user_extra.user_id",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user, user_extra where user.id = user_extra.col and user_extra.col = user_extra.user_id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user`, user_extra where 1 != 1",
+ "Query": "select id from `user`, user_extra where user_extra.col = user_extra.user_id and `user`.id = user_extra.col",
+ "Table": "`user`, user_extra"
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "not supported transitive closures with equality inside of an OR",
+ "query": "select id from user, user_extra where user.id = user_extra.col and (user_extra.col = user_extra.user_id or user_extra.col2 = user_extra.name)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user, user_extra where user.id = user_extra.col and (user_extra.col = user_extra.user_id or user_extra.col2 = user_extra.name)",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "JoinVars": {
+ "user_id": 0
+ },
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra where user_extra.col = :user_id and (user_extra.col = user_extra.user_id or user_extra.col2 = user_extra.`name`)",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user, user_extra where user.id = user_extra.col and (user_extra.col = user_extra.user_id or user_extra.col2 = user_extra.name)",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "JoinVars": {
+ "user_extra_col": 0
+ },
+ "TableName": "user_extra_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.col from user_extra where 1 != 1",
+ "Query": "select user_extra.col from user_extra where user_extra.col = user_extra.user_id or user_extra.col2 = user_extra.`name`",
+ "Table": "user_extra"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where `user`.id = :user_extra_col",
+ "Table": "`user`",
+ "Values": [
+ ":user_extra_col"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "routing rules subquery merge with alias",
+ "query": "select col from user where id = (select id from route1 as a where a.id = user.id)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from user where id = (select id from route1 as a where a.id = user.id)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user` where id = (select id from `user` as a where a.id = `user`.id)",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from user where id = (select id from route1 as a where a.id = user.id)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user` where id = (select id from `user` as a where a.id = `user`.id)",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "left join where clauses where we can optimize into an inner join",
+ "query": "select user.id from user left join user_extra on user.col = user_extra.col where user_extra.foobar = 5",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.id from user left join user_extra on user.col = user_extra.col where user_extra.foobar = 5",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:1",
+ "JoinVars": {
+ "user_col": 0
+ },
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col, `user`.id from `user` where 1 != 1",
+ "Query": "select `user`.col, `user`.id from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra where user_extra.foobar = 5 and user_extra.col = :user_col",
+ "Table": "user_extra"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "this query lead to a nil pointer error",
+ "query": "select user.id from user left join user_extra on user.col = user_extra.col where foo(user_extra.foobar)",
+ "plan": "expr cannot be translated, not supported: foo(user_extra.foobar)"
+ },
+ {
+ "comment": "filter after outer join",
+ "query": "select user.id from user left join user_extra on user.col = user_extra.col where user_extra.id is null",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.id from user left join user_extra on user.col = user_extra.col where user_extra.id is null",
+ "Instructions": {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 1
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Filter",
+ "Predicate": "user_extra.id is null",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "LeftJoin",
+ "JoinColumnIndexes": "R:0,L:1",
+ "JoinVars": {
+ "user_col": 0
+ },
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col, `user`.id from `user` where 1 != 1",
+ "Query": "select `user`.col, `user`.id from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.id from user_extra where 1 != 1",
+ "Query": "select user_extra.id from user_extra where user_extra.col = :user_col",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "subquery on other table",
+ "query": "select distinct user.id, user.col from user where user.col in (select id from music where col2 = 'a')",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select distinct user.id, user.col from user where user.col in (select id from music where col2 = 'a')",
+ "Instructions": {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from music where 1 != 1",
+ "Query": "select id from music where col2 = 'a'",
+ "Table": "music"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id, `user`.col from `user` where 1 != 1",
+ "Query": "select `user`.id, `user`.col from `user` where :__sq_has_values1 = 1 and `user`.col in ::__sq1",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select distinct user.id, user.col from user where user.col in (select id from music where col2 = 'a')",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "GroupBy": "(0|2), 1",
+ "ResultColumns": 2,
+ "Inputs": [
+ {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from music where 1 != 1",
+ "Query": "select id from music where col2 = 'a'",
+ "Table": "music"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id, `user`.col, weight_string(`user`.id) from `user` where 1 != 1",
+ "OrderBy": "(0|2) ASC, 1 ASC",
+ "Query": "select `user`.id, `user`.col, weight_string(`user`.id) from `user` where :__sq_has_values1 = 1 and `user`.col in ::__sq1 order by `user`.id asc, `user`.col asc",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.music",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "should use colb_colc_map as first column of the vindex is present in predicate",
+ "query": "select * from multicolvin where column_b = 1",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from multicolvin where column_b = 1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from multicolvin where 1 != 1",
+ "Query": "select * from multicolvin where column_b = 1",
+ "Table": "multicolvin",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "colb_colc_map"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from multicolvin where column_b = 1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from multicolvin where 1 != 1",
+ "Query": "select * from multicolvin where column_b = 1",
+ "Table": "multicolvin",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "colb_colc_map"
+ },
+ "TablesUsed": [
+ "user.multicolvin"
+ ]
+ }
+ },
+ {
+ "comment": "should only use first column of the vindex colb_colc_map",
+ "query": "select * from multicolvin where column_b = 1 and column_c = 2",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from multicolvin where column_b = 1 and column_c = 2",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from multicolvin where 1 != 1",
+ "Query": "select * from multicolvin where column_b = 1 and column_c = 2",
+ "Table": "multicolvin",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "colb_colc_map"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from multicolvin where column_b = 1 and column_c = 2",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from multicolvin where 1 != 1",
+ "Query": "select * from multicolvin where column_b = 1 and column_c = 2",
+ "Table": "multicolvin",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "colb_colc_map"
+ },
+ "TablesUsed": [
+ "user.multicolvin"
+ ]
+ }
+ },
+ {
+ "comment": "uses vindex colb_colc_map",
+ "query": "select * from multicolvin where column_b = 1 and column_c = 2 and column_a = 3",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from multicolvin where column_b = 1 and column_c = 2 and column_a = 3",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from multicolvin where 1 != 1",
+ "Query": "select * from multicolvin where column_b = 1 and column_c = 2 and column_a = 3",
+ "Table": "multicolvin",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "colb_colc_map"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from multicolvin where column_b = 1 and column_c = 2 and column_a = 3",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from multicolvin where 1 != 1",
+ "Query": "select * from multicolvin where column_b = 1 and column_c = 2 and column_a = 3",
+ "Table": "multicolvin",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "colb_colc_map"
+ },
+ "TablesUsed": [
+ "user.multicolvin"
+ ]
+ }
+ },
+ {
+ "comment": "v3 takes cola_map, gen4 takes colb_colc_map, may be based on map key ordering",
+ "query": "select * from multicolvin where column_a = 3 and column_b = 1",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from multicolvin where column_a = 3 and column_b = 1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from multicolvin where 1 != 1",
+ "Query": "select * from multicolvin where column_a = 3 and column_b = 1",
+ "Table": "multicolvin",
+ "Values": [
+ "INT64(3)"
+ ],
+ "Vindex": "cola_map"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from multicolvin where column_a = 3 and column_b = 1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from multicolvin where 1 != 1",
+ "Query": "select * from multicolvin where column_a = 3 and column_b = 1",
+ "Table": "multicolvin",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "colb_colc_map"
+ },
+ "TablesUsed": [
+ "user.multicolvin"
+ ]
+ }
+ },
+ {
+ "comment": "multi column vindex produces Equal plan in gen4 and Scatter in v3",
+ "query": "select * from multicol_tbl where cola = 1 and colb = 2",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from multicol_tbl where cola = 1 and colb = 2",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from multicol_tbl where 1 != 1",
+ "Query": "select * from multicol_tbl where cola = 1 and colb = 2",
+ "Table": "multicol_tbl"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from multicol_tbl where cola = 1 and colb = 2",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from multicol_tbl where 1 != 1",
+ "Query": "select * from multicol_tbl where cola = 1 and colb = 2",
+ "Table": "multicol_tbl",
+ "Values": [
+ "INT64(1)",
+ "INT64(2)"
+ ],
+ "Vindex": "multicolIdx"
+ },
+ "TablesUsed": [
+ "user.multicol_tbl"
+ ]
+ }
+ },
+ {
+ "comment": "multi column vindex with different order places the vindex keys in correct order",
+ "query": "select * from multicol_tbl where colb = 2 and cola = 1",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from multicol_tbl where colb = 2 and cola = 1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from multicol_tbl where 1 != 1",
+ "Query": "select * from multicol_tbl where colb = 2 and cola = 1",
+ "Table": "multicol_tbl"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from multicol_tbl where colb = 2 and cola = 1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from multicol_tbl where 1 != 1",
+ "Query": "select * from multicol_tbl where colb = 2 and cola = 1",
+ "Table": "multicol_tbl",
+ "Values": [
+ "INT64(1)",
+ "INT64(2)"
+ ],
+ "Vindex": "multicolIdx"
+ },
+ "TablesUsed": [
+ "user.multicol_tbl"
+ ]
+ }
+ },
+ {
+ "comment": "multi column vindex produces IN plan in gen4 and Scatter in v3",
+ "query": "select * from multicol_tbl where cola in (1,2) and colb in (3,4)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from multicol_tbl where cola in (1,2) and colb in (3,4)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from multicol_tbl where 1 != 1",
+ "Query": "select * from multicol_tbl where cola in (1, 2) and colb in (3, 4)",
+ "Table": "multicol_tbl"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from multicol_tbl where cola in (1,2) and colb in (3,4)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from multicol_tbl where 1 != 1",
+ "Query": "select * from multicol_tbl where cola in ::__vals0 and colb in ::__vals1",
+ "Table": "multicol_tbl",
+ "Values": [
+ "(INT64(1), INT64(2))",
+ "(INT64(3), INT64(4))"
+ ],
+ "Vindex": "multicolIdx"
+ },
+ "TablesUsed": [
+ "user.multicol_tbl"
+ ]
+ }
+ },
+ {
+ "comment": "multi column vindex with different order places the vindex keys in correct order in IN plan in gen4",
+ "query": "select * from multicol_tbl where colb in (3,4) and cola in (1,2)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from multicol_tbl where colb in (3,4) and cola in (1,2)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from multicol_tbl where 1 != 1",
+ "Query": "select * from multicol_tbl where colb in (3, 4) and cola in (1, 2)",
+ "Table": "multicol_tbl"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from multicol_tbl where colb in (3,4) and cola in (1,2)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from multicol_tbl where 1 != 1",
+ "Query": "select * from multicol_tbl where colb in ::__vals1 and cola in ::__vals0",
+ "Table": "multicol_tbl",
+ "Values": [
+ "(INT64(1), INT64(2))",
+ "(INT64(3), INT64(4))"
+ ],
+ "Vindex": "multicolIdx"
+ },
+ "TablesUsed": [
+ "user.multicol_tbl"
+ ]
+ }
+ },
+ {
+ "comment": "multi column vindex with different order with one IN predicate and one equality",
+ "query": "select * from multicol_tbl where colb = 1 and cola in (3,4)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from multicol_tbl where colb = 1 and cola in (3,4)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from multicol_tbl where 1 != 1",
+ "Query": "select * from multicol_tbl where colb = 1 and cola in (3, 4)",
+ "Table": "multicol_tbl"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from multicol_tbl where colb = 1 and cola in (3,4)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from multicol_tbl where 1 != 1",
+ "Query": "select * from multicol_tbl where colb = 1 and cola in ::__vals0",
+ "Table": "multicol_tbl",
+ "Values": [
+ "(INT64(3), INT64(4))",
+ "INT64(1)"
+ ],
+ "Vindex": "multicolIdx"
+ },
+ "TablesUsed": [
+ "user.multicol_tbl"
+ ]
+ }
+ },
+ {
+ "comment": "deconstruct tuple equality comparisons",
+ "query": "select id from user where (id, name) = (34, 'apa')",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where (id, name) = (34, 'apa')",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where (id, `name`) = (34, 'apa')",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where (id, name) = (34, 'apa')",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where id = 34 and `name` = 'apa'",
+ "Table": "`user`",
+ "Values": [
+ "INT64(34)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "multi column vindex with both IN predicate and equality predicate",
+ "query": "select * from multicol_tbl where cola in (1,10) and cola = 4 and colb in (5,6) and colb = 7",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from multicol_tbl where cola in (1,10) and cola = 4 and colb in (5,6) and colb = 7",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from multicol_tbl where 1 != 1",
+ "Query": "select * from multicol_tbl where cola in (1, 10) and cola = 4 and colb in (5, 6) and colb = 7",
+ "Table": "multicol_tbl"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from multicol_tbl where cola in (1,10) and cola = 4 and colb in (5,6) and colb = 7",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from multicol_tbl where 1 != 1",
+ "Query": "select * from multicol_tbl where cola in (1, 10) and cola = 4 and colb in (5, 6) and colb = 7",
+ "Table": "multicol_tbl",
+ "Values": [
+ "INT64(4)",
+ "INT64(7)"
+ ],
+ "Vindex": "multicolIdx"
+ },
+ "TablesUsed": [
+ "user.multicol_tbl"
+ ]
+ }
+ },
+ {
+ "comment": "multi column vindex with one column with equal followed by IN predicate, ordering matters for now",
+ "query": "select * from multicol_tbl where colb = 4 and colb in (1,10) and cola in (5,6)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from multicol_tbl where colb = 4 and colb in (1,10) and cola in (5,6)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from multicol_tbl where 1 != 1",
+ "Query": "select * from multicol_tbl where colb = 4 and colb in (1, 10) and cola in (5, 6)",
+ "Table": "multicol_tbl"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from multicol_tbl where colb = 4 and colb in (1,10) and cola in (5,6)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from multicol_tbl where 1 != 1",
+ "Query": "select * from multicol_tbl where colb = 4 and colb in ::__vals1 and cola in ::__vals0",
+ "Table": "multicol_tbl",
+ "Values": [
+ "(INT64(5), INT64(6))",
+ "(INT64(1), INT64(10))"
+ ],
+ "Vindex": "multicolIdx"
+ },
+ "TablesUsed": [
+ "user.multicol_tbl"
+ ]
+ }
+ },
+ {
+ "comment": "multi column vindex with one column with IN followed by equal predicate, ordering matters for now",
+ "query": "select * from multicol_tbl where colb in (1,10) and colb = 4 and cola in (5,6)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from multicol_tbl where colb in (1,10) and colb = 4 and cola in (5,6)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from multicol_tbl where 1 != 1",
+ "Query": "select * from multicol_tbl where colb in (1, 10) and colb = 4 and cola in (5, 6)",
+ "Table": "multicol_tbl"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from multicol_tbl where colb in (1,10) and colb = 4 and cola in (5,6)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from multicol_tbl where 1 != 1",
+ "Query": "select * from multicol_tbl where colb in (1, 10) and colb = 4 and cola in ::__vals0",
+ "Table": "multicol_tbl",
+ "Values": [
+ "(INT64(5), INT64(6))",
+ "INT64(4)"
+ ],
+ "Vindex": "multicolIdx"
+ },
+ "TablesUsed": [
+ "user.multicol_tbl"
+ ]
+ }
+ },
+ {
+ "comment": "multi column vindex with better plan selection",
+ "query": "select * from multicol_tbl where colb in (1,2) and cola IN (3,4) and cola = 5 and colb = 6",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from multicol_tbl where colb in (1,2) and cola IN (3,4) and cola = 5 and colb = 6",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from multicol_tbl where 1 != 1",
+ "Query": "select * from multicol_tbl where colb in (1, 2) and cola in (3, 4) and cola = 5 and colb = 6",
+ "Table": "multicol_tbl"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from multicol_tbl where colb in (1,2) and cola IN (3,4) and cola = 5 and colb = 6",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from multicol_tbl where 1 != 1",
+ "Query": "select * from multicol_tbl where colb in (1, 2) and cola in (3, 4) and cola = 5 and colb = 6",
+ "Table": "multicol_tbl",
+ "Values": [
+ "INT64(5)",
+ "INT64(6)"
+ ],
+ "Vindex": "multicolIdx"
+ },
+ "TablesUsed": [
+ "user.multicol_tbl"
+ ]
+ }
+ },
+ {
+ "comment": "multi column vindex as tuple",
+ "query": "select * from multicol_tbl where (cola,colb) in ((1,2),(3,4))",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from multicol_tbl where (cola,colb) in ((1,2),(3,4))",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from multicol_tbl where 1 != 1",
+ "Query": "select * from multicol_tbl where (cola, colb) in ((1, 2), (3, 4))",
+ "Table": "multicol_tbl"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from multicol_tbl where (cola,colb) in ((1,2),(3,4))",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "MultiEqual",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from multicol_tbl where 1 != 1",
+ "Query": "select * from multicol_tbl where (cola, colb) in ((1, 2), (3, 4))",
+ "Table": "multicol_tbl",
+ "Values": [
+ "(INT64(1), INT64(3))",
+ "(INT64(2), INT64(4))"
+ ],
+ "Vindex": "multicolIdx"
+ },
+ "TablesUsed": [
+ "user.multicol_tbl"
+ ]
+ }
+ },
+ {
+ "comment": "multi column vindex, partial vindex with SelectEqual",
+ "query": "select * from multicol_tbl where cola = 1",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from multicol_tbl where cola = 1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from multicol_tbl where 1 != 1",
+ "Query": "select * from multicol_tbl where cola = 1",
+ "Table": "multicol_tbl"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from multicol_tbl where cola = 1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "SubShard",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from multicol_tbl where 1 != 1",
+ "Query": "select * from multicol_tbl where cola = 1",
+ "Table": "multicol_tbl",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "multicolIdx"
+ },
+ "TablesUsed": [
+ "user.multicol_tbl"
+ ]
+ }
+ },
+ {
+ "comment": "multi column vindex, partial vindex with SelectEqual over full vindex with SelectIN",
+ "query": "select * from multicol_tbl where cola = 1 and colb in (2,3)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from multicol_tbl where cola = 1 and colb in (2,3)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from multicol_tbl where 1 != 1",
+ "Query": "select * from multicol_tbl where cola = 1 and colb in (2, 3)",
+ "Table": "multicol_tbl"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from multicol_tbl where cola = 1 and colb in (2,3)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from multicol_tbl where 1 != 1",
+ "Query": "select * from multicol_tbl where cola = 1 and colb in ::__vals1",
+ "Table": "multicol_tbl",
+ "Values": [
+ "INT64(1)",
+ "(INT64(2), INT64(3))"
+ ],
+ "Vindex": "multicolIdx"
+ },
+ "TablesUsed": [
+ "user.multicol_tbl"
+ ]
+ }
+ },
+ {
+ "comment": "left join with where clause - should be handled by gen4 but still isn't",
+ "query": "select 0 from unsharded_a left join unsharded_b on unsharded_a.col = unsharded_b.col where coalesce(unsharded_b.col, 4) = 5",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "select 0 from unsharded_a left join unsharded_b on unsharded_a.col = unsharded_b.col where coalesce(unsharded_b.col, 4) = 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 0 from unsharded_a left join unsharded_b on unsharded_a.col = unsharded_b.col where 1 != 1",
+ "Query": "select 0 from unsharded_a left join unsharded_b on unsharded_a.col = unsharded_b.col where coalesce(unsharded_b.col, 4) = 5",
+ "Table": "unsharded_a, unsharded_b"
+ },
+ "TablesUsed": [
+ "main.unsharded_a",
+ "main.unsharded_b"
+ ]
+ }
+ },
+ {
+ "comment": "filter on outer join should not be used for routing",
+ "query": "select user.col from user_extra left outer join user on user_extra.user_id = user.id WHERE user.id IS NULL",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user_extra left outer join user on user_extra.user_id = user.id WHERE user.id IS NULL",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from user_extra left join `user` on user_extra.user_id = `user`.id where 1 != 1",
+ "Query": "select `user`.col from user_extra left join `user` on user_extra.user_id = `user`.id where `user`.id is null",
+ "Table": "`user`, user_extra"
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "conditions following a null safe comparison operator can be used for routing",
+ "query": "SELECT music.id FROM music LEFT OUTER JOIN user ON music.user_id = user.id WHERE user.id <=> NULL AND music.user_id = 10",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music LEFT OUTER JOIN user ON music.user_id = user.id WHERE user.id <=> NULL AND music.user_id = 10",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music left join `user` on music.user_id = `user`.id where 1 != 1",
+ "Query": "select music.id from music left join `user` on music.user_id = `user`.id where music.user_id = 10 and `user`.id <=> null",
+ "Table": "`user`, music",
+ "Values": [
+ "INT64(10)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.music",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "For left joins, where conditions using both sides of the join are not pulled into the join conditions",
+ "query": "SELECT music.id FROM music LEFT OUTER JOIN user ON music.user_id = user.id WHERE (user.name = 'Trent Reznor' OR music.genre = 'pop') AND music.user_id = 5",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music LEFT OUTER JOIN user ON music.user_id = user.id WHERE (user.name = 'Trent Reznor' OR music.genre = 'pop') AND music.user_id = 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music left join `user` on music.user_id = `user`.id where 1 != 1",
+ "Query": "select music.id from music left join `user` on music.user_id = `user`.id where music.user_id = 5 and (`user`.`name` = 'Trent Reznor' or music.genre = 'pop')",
+ "Table": "`user`, music",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.music",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "For left joins, where conditions using both sides of the join are not pulled into the join conditions (swapped order)",
+ "query": "SELECT music.id FROM music LEFT OUTER JOIN user ON music.user_id = user.id WHERE music.user_id = 5 AND (user.name = 'Trent Reznor' OR music.genre = 'pop')",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music LEFT OUTER JOIN user ON music.user_id = user.id WHERE music.user_id = 5 AND (user.name = 'Trent Reznor' OR music.genre = 'pop')",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music left join `user` on music.user_id = `user`.id where 1 != 1",
+ "Query": "select music.id from music left join `user` on music.user_id = `user`.id where music.user_id = 5 and (`user`.`name` = 'Trent Reznor' or music.genre = 'pop')",
+ "Table": "`user`, music",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.music",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "For left joins, null intolerant where conditions using both sides of the join are transformed to inner joins",
+ "query": "SELECT music.id FROM music LEFT OUTER JOIN user ON music.user_id = user.id WHERE music.user_id = 5 AND music.componist = user.name",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music LEFT OUTER JOIN user ON music.user_id = user.id WHERE music.user_id = 5 AND music.componist = user.name",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music, `user` where 1 != 1",
+ "Query": "select music.id from music, `user` where music.user_id = 5 and music.user_id = `user`.id and music.componist = `user`.`name`",
+ "Table": "`user`, music",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.music",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "For left joins, null intolerant where conditions using `IS NOT NULL` allow outer join simplification",
+ "query": "SELECT music.id FROM music LEFT OUTER JOIN user ON user.id = music.user_id WHERE music.user_id = 5 AND user.id IS NOT NULL",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music LEFT OUTER JOIN user ON user.id = music.user_id WHERE music.user_id = 5 AND user.id IS NOT NULL",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music, `user` where 1 != 1",
+ "Query": "select music.id from music, `user` where music.user_id = 5 and `user`.id is not null and `user`.id = music.user_id",
+ "Table": "`user`, music",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.music",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "optimize ORs to IN route op codes #1",
+ "query": "select col from user where id = 1 or id = 2",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from user where id = 1 or id = 2",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user` where id = 1 or id = 2",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from user where id = 1 or id = 2",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user` where id in ::__vals",
+ "Table": "`user`",
+ "Values": [
+ "(INT64(1), INT64(2))"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "optimize ORs to IN route op codes #2",
+ "query": "select col from user where id = 1 or id = 2 or id = 3",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from user where id = 1 or id = 2 or id = 3",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user` where id = 1 or id = 2 or id = 3",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from user where id = 1 or id = 2 or id = 3",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user` where id in ::__vals",
+ "Table": "`user`",
+ "Values": [
+ "(INT64(1), INT64(2), INT64(3))"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "optimize ORs to IN route op codes #3",
+ "query": "select col from user where (id = 1 or id = 2) or (id = 3 or id = 4)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from user where (id = 1 or id = 2) or (id = 3 or id = 4)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user` where id = 1 or id = 2 or (id = 3 or id = 4)",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from user where (id = 1 or id = 2) or (id = 3 or id = 4)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user` where id in ::__vals",
+ "Table": "`user`",
+ "Values": [
+ "(INT64(1), INT64(2), INT64(3), INT64(4))"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Don't pick a vindex for an IS NULL predicate if it's a lookup vindex",
+ "query": "select id from music where id is null and user_id in (1,2)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from music where id is null and user_id in (1,2)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from music where 1 != 1",
+ "Query": "select id from music where id is null and user_id in ::__vals",
+ "Table": "music",
+ "Values": [
+ "(INT64(1), INT64(2))"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from music where id is null and user_id in (1,2)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from music where 1 != 1",
+ "Query": "select id from music where id is null and user_id in ::__vals",
+ "Table": "music",
+ "Values": [
+ "(INT64(1), INT64(2))"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "Self referencing columns in HAVING should work",
+ "query": "select a+2 as a from user having a = 42",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a+2 as a from user having a = 42",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a + 2 as a from `user` where 1 != 1",
+ "Query": "select a + 2 as a from `user` having a = 42",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a+2 as a from user having a = 42",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a + 2 as a from `user` where 1 != 1",
+ "Query": "select a + 2 as a from `user` where a + 2 = 42",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "HAVING predicates that use table columns are safe to rewrite if we can move them to the WHERE clause",
+ "query": "select user.col + 2 as a from user having a = 42",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col + 2 as a from user having a = 42",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col + 2 as a from `user` where 1 != 1",
+ "Query": "select `user`.col + 2 as a from `user` having a = 42",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col + 2 as a from user having a = 42",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col + 2 as a from `user` where 1 != 1",
+ "Query": "select `user`.col + 2 as a from `user` where `user`.col + 2 = 42",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "HAVING predicates that use table columns should not get rewritten on unsharded keyspaces",
+ "query": "select col + 2 as a from unsharded having a = 42",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col + 2 as a from unsharded having a = 42",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select col + 2 as a from unsharded where 1 != 1",
+ "Query": "select col + 2 as a from unsharded having a = 42",
+ "Table": "unsharded"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col + 2 as a from unsharded having a = 42",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select col + 2 as a from unsharded where 1 != 1",
+ "Query": "select col + 2 as a from unsharded having a = 42",
+ "Table": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "Single table unique vindex route hiding behind a silly OR",
+ "query": "select id from user where (id = 5 and name ='apa') or (id = 5 and foo = 'bar')",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where (id = 5 and name ='apa') or (id = 5 and foo = 'bar')",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where id = 5 and `name` = 'apa' or id = 5 and foo = 'bar'",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where (id = 5 and name ='apa') or (id = 5 and foo = 'bar')",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where id = 5 and (`name` = 'apa' or foo = 'bar')",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Single table IN vindex route hiding behind OR",
+ "query": "select id from user where (id = 5 and name ='foo') or (id = 12 and name = 'bar')",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where (id = 5 and name ='foo') or (id = 12 and name = 'bar')",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where id = 5 and `name` = 'foo' or id = 12 and `name` = 'bar'",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where (id = 5 and name ='foo') or (id = 12 and name = 'bar')",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where id = 5 and `name` = 'foo' or id = 12 and `name` = 'bar'",
+ "Table": "`user`",
+ "Values": [
+ "(INT64(5), INT64(12))"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Like clause evaluated on the vtgate",
+ "query": "select a.textcol1 from user a join user b where a.textcol1 = b.textcol2 group by a.textcol1 having repeat(a.textcol1,sum(a.id)) like \"And%res\"",
+ "v3-plan": "VT12001: unsupported: cross-shard query with aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a.textcol1 from user a join user b where a.textcol1 = b.textcol2 group by a.textcol1 having repeat(a.textcol1,sum(a.id)) like \"And%res\"",
+ "Instructions": {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Filter",
+ "Predicate": "repeat(a.textcol1, :1) like 'And%res'",
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum(1) AS sum(a.id)",
+ "GroupBy": "0 COLLATE latin1_swedish_ci",
+ "Inputs": [
+ {
+ "OperatorType": "Projection",
+ "Expressions": [
+ "[COLUMN 0] as textcol1",
+ "[COLUMN 1] * COALESCE([COLUMN 2], INT64(1)) as sum(a.id)"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1,R:1",
+ "JoinVars": {
+ "a_textcol1": 0
+ },
+ "TableName": "`user`_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a.textcol1, sum(a.id) from `user` as a where 1 != 1 group by a.textcol1",
+ "OrderBy": "0 ASC COLLATE latin1_swedish_ci",
+ "Query": "select a.textcol1, sum(a.id) from `user` as a group by a.textcol1 order by a.textcol1 asc",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1, count(*) from `user` as b where 1 != 1 group by 1",
+ "Query": "select 1, count(*) from `user` as b where b.textcol2 = :a_textcol1 group by 1",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "two predicates that mean the same thing",
+ "query": "select textcol1 from user where foo = 42 and user.foo = 42",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select textcol1 from user where foo = 42 and user.foo = 42",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select textcol1 from `user` where 1 != 1",
+ "Query": "select textcol1 from `user` where foo = 42 and `user`.foo = 42",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select textcol1 from user where foo = 42 and user.foo = 42",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select textcol1 from `user` where 1 != 1",
+ "Query": "select textcol1 from `user` where foo = 42",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ }
+]
diff --git a/go/vt/vtgate/planbuilder/testdata/filter_cases.txt b/go/vt/vtgate/planbuilder/testdata/filter_cases.txt
deleted file mode 100644
index 75ef9178495..00000000000
--- a/go/vt/vtgate/planbuilder/testdata/filter_cases.txt
+++ /dev/null
@@ -1,6238 +0,0 @@
-# No where clause
-"select id from user"
-{
- "QueryType": "SELECT",
- "Original": "select id from user",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user`",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user`",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Query that always return empty
-"select id from user where someColumn = null"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where someColumn = null",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "None",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where someColumn = null",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where someColumn = null",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "None",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where someColumn = null",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Null Safe Equality Operator is handled correctly
-"SELECT id from user where someColumn \u003c=\u003e null"
-{
- "QueryType": "SELECT",
- "Original": "SELECT id from user where someColumn \u003c=\u003e null",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where someColumn \u003c=\u003e null",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT id from user where someColumn \u003c=\u003e null",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where someColumn \u003c=\u003e null",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Single table unique vindex route
-"select id from user where user.id = 5"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where user.id = 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where `user`.id = 5",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where user.id = 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where `user`.id = 5",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Single table unique vindex route, but complex expr
-"select id from user where user.id = 5+5"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where user.id = 5+5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where `user`.id = 5 + 5",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where user.id = 5+5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where `user`.id = 5 + 5",
- "Table": "`user`",
- "Values": [
- "INT64(10)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Single table multiple unique vindex match
-"select id from music where id = 5 and user_id = 4"
-{
- "QueryType": "SELECT",
- "Original": "select id from music where id = 5 and user_id = 4",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from music where 1 != 1",
- "Query": "select id from music where id = 5 and user_id = 4",
- "Table": "music",
- "Values": [
- "INT64(4)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from music where id = 5 and user_id = 4",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from music where 1 != 1",
- "Query": "select id from music where id = 5 and user_id = 4",
- "Table": "music",
- "Values": [
- "INT64(4)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# Single table multiple non-unique vindex match
-"select id from user where costly = 'aa' and name = 'bb'"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where costly = 'aa' and name = 'bb'",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where costly = 'aa' and `name` = 'bb'",
- "Table": "`user`",
- "Values": [
- "VARCHAR(\"bb\")"
- ],
- "Vindex": "name_user_map"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where costly = 'aa' and name = 'bb'",
- "Instructions": {
- "OperatorType": "VindexLookup",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Values": [
- "VARCHAR(\"bb\")"
- ],
- "Vindex": "name_user_map",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
- "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
- "Table": "name_user_vdx",
- "Values": [
- ":name"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "ByDestination",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where costly = 'aa' and `name` = 'bb'",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Single table multiple non-unique vindex match for IN clause
-"select id from user where costly in ('aa', 'bb') and name in ('aa', 'bb')"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where costly in ('aa', 'bb') and name in ('aa', 'bb')",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where costly in ('aa', 'bb') and `name` in ::__vals",
- "Table": "`user`",
- "Values": [
- "(VARCHAR(\"aa\"), VARCHAR(\"bb\"))"
- ],
- "Vindex": "name_user_map"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where costly in ('aa', 'bb') and name in ('aa', 'bb')",
- "Instructions": {
- "OperatorType": "VindexLookup",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Values": [
- "(VARCHAR(\"aa\"), VARCHAR(\"bb\"))"
- ],
- "Vindex": "name_user_map",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
- "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
- "Table": "name_user_vdx",
- "Values": [
- ":name"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "ByDestination",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where costly in ('aa', 'bb') and `name` in ::__vals",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Composite IN clause
-"select id from user where (name, col) in (('aa', 'bb'), ('cc', 'dd'))"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where (name, col) in (('aa', 'bb'), ('cc', 'dd'))",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "MultiEqual",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where (`name`, col) in (('aa', 'bb'), ('cc', 'dd'))",
- "Table": "`user`",
- "Values": [
- "(VARCHAR(\"aa\"), VARCHAR(\"cc\"))"
- ],
- "Vindex": "name_user_map"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where (name, col) in (('aa', 'bb'), ('cc', 'dd'))",
- "Instructions": {
- "OperatorType": "VindexLookup",
- "Variant": "MultiEqual",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Values": [
- "(VARCHAR(\"aa\"), VARCHAR(\"cc\"))"
- ],
- "Vindex": "name_user_map",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
- "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
- "Table": "name_user_vdx",
- "Values": [
- ":name"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "ByDestination",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where (`name`, col) in (('aa', 'bb'), ('cc', 'dd'))",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Composite IN clause, swapped columns
-"select id from user where (col, name) in (('aa', 'bb'), ('cc', 'dd'))"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where (col, name) in (('aa', 'bb'), ('cc', 'dd'))",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "MultiEqual",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where (col, `name`) in (('aa', 'bb'), ('cc', 'dd'))",
- "Table": "`user`",
- "Values": [
- "(VARCHAR(\"bb\"), VARCHAR(\"dd\"))"
- ],
- "Vindex": "name_user_map"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where (col, name) in (('aa', 'bb'), ('cc', 'dd'))",
- "Instructions": {
- "OperatorType": "VindexLookup",
- "Variant": "MultiEqual",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Values": [
- "(VARCHAR(\"bb\"), VARCHAR(\"dd\"))"
- ],
- "Vindex": "name_user_map",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
- "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
- "Table": "name_user_vdx",
- "Values": [
- ":name"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "ByDestination",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where (col, `name`) in (('aa', 'bb'), ('cc', 'dd'))",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Composite IN clause, choose cost within tuple
-"select id from user where (costly, name) in (('aa', 'bb'), ('cc', 'dd'))"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where (costly, name) in (('aa', 'bb'), ('cc', 'dd'))",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "MultiEqual",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where (costly, `name`) in (('aa', 'bb'), ('cc', 'dd'))",
- "Table": "`user`",
- "Values": [
- "(VARCHAR(\"bb\"), VARCHAR(\"dd\"))"
- ],
- "Vindex": "name_user_map"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where (costly, name) in (('aa', 'bb'), ('cc', 'dd'))",
- "Instructions": {
- "OperatorType": "VindexLookup",
- "Variant": "MultiEqual",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Values": [
- "(VARCHAR(\"bb\"), VARCHAR(\"dd\"))"
- ],
- "Vindex": "name_user_map",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
- "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
- "Table": "name_user_vdx",
- "Values": [
- ":name"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "ByDestination",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where (costly, `name`) in (('aa', 'bb'), ('cc', 'dd'))",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Composite IN clause, choose cost within tuple, swapped
-"select id from user where (name, costly) in (('aa', 'bb'), ('cc', 'dd'))"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where (name, costly) in (('aa', 'bb'), ('cc', 'dd'))",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "MultiEqual",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where (`name`, costly) in (('aa', 'bb'), ('cc', 'dd'))",
- "Table": "`user`",
- "Values": [
- "(VARCHAR(\"aa\"), VARCHAR(\"cc\"))"
- ],
- "Vindex": "name_user_map"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where (name, costly) in (('aa', 'bb'), ('cc', 'dd'))",
- "Instructions": {
- "OperatorType": "VindexLookup",
- "Variant": "MultiEqual",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Values": [
- "(VARCHAR(\"aa\"), VARCHAR(\"cc\"))"
- ],
- "Vindex": "name_user_map",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
- "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
- "Table": "name_user_vdx",
- "Values": [
- ":name"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "ByDestination",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where (`name`, costly) in (('aa', 'bb'), ('cc', 'dd'))",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Composite IN clause, choose cost
-"select id from user where (col, costly) in (('aa', 'bb')) and (col, name) in (('cc', 'dd'))"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where (col, costly) in (('aa', 'bb')) and (col, name) in (('cc', 'dd'))",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "MultiEqual",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where (col, costly) in (('aa', 'bb')) and (col, `name`) in (('cc', 'dd'))",
- "Table": "`user`",
- "Values": [
- "(VARCHAR(\"dd\"))"
- ],
- "Vindex": "name_user_map"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where (col, costly) in (('aa', 'bb')) and (col, name) in (('cc', 'dd'))",
- "Instructions": {
- "OperatorType": "VindexLookup",
- "Variant": "MultiEqual",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Values": [
- "(VARCHAR(\"dd\"))"
- ],
- "Vindex": "name_user_map",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
- "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
- "Table": "name_user_vdx",
- "Values": [
- ":name"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "ByDestination",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where (col, costly) in (('aa', 'bb')) and (col, `name`) in (('cc', 'dd'))",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Composite IN clause vs equality
-"select id from user where (col, name) in (('aa', 'bb')) and id = 5"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where (col, name) in (('aa', 'bb')) and id = 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where (col, `name`) in (('aa', 'bb')) and id = 5",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where (col, name) in (('aa', 'bb')) and id = 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where (col, `name`) in (('aa', 'bb')) and id = 5",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Composite IN: multiple vindex matches
-"select id from user where (costly, name) in (('aa', 'bb'), ('cc', 'dd'))"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where (costly, name) in (('aa', 'bb'), ('cc', 'dd'))",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "MultiEqual",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where (costly, `name`) in (('aa', 'bb'), ('cc', 'dd'))",
- "Table": "`user`",
- "Values": [
- "(VARCHAR(\"bb\"), VARCHAR(\"dd\"))"
- ],
- "Vindex": "name_user_map"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where (costly, name) in (('aa', 'bb'), ('cc', 'dd'))",
- "Instructions": {
- "OperatorType": "VindexLookup",
- "Variant": "MultiEqual",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Values": [
- "(VARCHAR(\"bb\"), VARCHAR(\"dd\"))"
- ],
- "Vindex": "name_user_map",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
- "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
- "Table": "name_user_vdx",
- "Values": [
- ":name"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "ByDestination",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where (costly, `name`) in (('aa', 'bb'), ('cc', 'dd'))",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Composite IN: tuple inside tuple
-"select id from user where ((col1, name), col2) in ((('aa', 'bb'), 'cc'), (('dd', 'ee'), 'ff'))"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where ((col1, name), col2) in ((('aa', 'bb'), 'cc'), (('dd', 'ee'), 'ff'))",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "MultiEqual",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where ((col1, `name`), col2) in ((('aa', 'bb'), 'cc'), (('dd', 'ee'), 'ff'))",
- "Table": "`user`",
- "Values": [
- "(VARCHAR(\"bb\"), VARCHAR(\"ee\"))"
- ],
- "Vindex": "name_user_map"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where ((col1, name), col2) in ((('aa', 'bb'), 'cc'), (('dd', 'ee'), 'ff'))",
- "Instructions": {
- "OperatorType": "VindexLookup",
- "Variant": "MultiEqual",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Values": [
- "(VARCHAR(\"bb\"), VARCHAR(\"ee\"))"
- ],
- "Vindex": "name_user_map",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
- "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
- "Table": "name_user_vdx",
- "Values": [
- ":name"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "ByDestination",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where ((col1, `name`), col2) in ((('aa', 'bb'), 'cc'), (('dd', 'ee'), 'ff'))",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Composite IN: tuple inside tuple, but no match in tuple
-"select id from user where (name, (col1, col2)) in (('aa', ('bb', 'cc')), ('dd', ('ee', 'ff')))"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where (name, (col1, col2)) in (('aa', ('bb', 'cc')), ('dd', ('ee', 'ff')))",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "MultiEqual",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where (`name`, (col1, col2)) in (('aa', ('bb', 'cc')), ('dd', ('ee', 'ff')))",
- "Table": "`user`",
- "Values": [
- "(VARCHAR(\"aa\"), VARCHAR(\"dd\"))"
- ],
- "Vindex": "name_user_map"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where (name, (col1, col2)) in (('aa', ('bb', 'cc')), ('dd', ('ee', 'ff')))",
- "Instructions": {
- "OperatorType": "VindexLookup",
- "Variant": "MultiEqual",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Values": [
- "(VARCHAR(\"aa\"), VARCHAR(\"dd\"))"
- ],
- "Vindex": "name_user_map",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
- "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
- "Table": "name_user_vdx",
- "Values": [
- ":name"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "ByDestination",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where (`name`, (col1, col2)) in (('aa', ('bb', 'cc')), ('dd', ('ee', 'ff')))",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Composite IN: tuple inside tuple, mismiatched values
-"select id from user where ((col1, name), col2) in (('aa', 'bb', 'cc'), (('dd', 'ee'), 'ff'))"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where ((col1, name), col2) in (('aa', 'bb', 'cc'), (('dd', 'ee'), 'ff'))",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where ((col1, `name`), col2) in (('aa', 'bb', 'cc'), (('dd', 'ee'), 'ff'))",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where ((col1, name), col2) in (('aa', 'bb', 'cc'), (('dd', 'ee'), 'ff'))",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where ((col1, `name`), col2) in (('aa', 'bb', 'cc'), (('dd', 'ee'), 'ff'))",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Composite IN: RHS not tuple
-"select id from user where (col1, name) in (select * from music where music.user_id=user.id)"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where (col1, name) in (select * from music where music.user_id=user.id)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where (col1, `name`) in (select * from music where music.user_id = `user`.id)",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where (col1, name) in (select * from music where music.user_id=user.id)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where (col1, `name`) in (select * from music where music.user_id = `user`.id)",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.music",
- "user.user"
- ]
-}
-
-# Composite IN: RHS has no simple values
-"select id from user where (col1, name) in (('aa', 1+1))"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where (col1, name) in (('aa', 1+1))",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where (col1, `name`) in (('aa', 1 + 1))",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where (col1, name) in (('aa', 1+1))",
- "Instructions": {
- "OperatorType": "VindexLookup",
- "Variant": "MultiEqual",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Values": [
- "(INT64(2))"
- ],
- "Vindex": "name_user_map",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
- "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
- "Table": "name_user_vdx",
- "Values": [
- ":name"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "ByDestination",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where (col1, `name`) in (('aa', 1 + 1))",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# IN clause: LHS is neither column nor composite tuple
-"select Id from user where 1 in ('aa', 'bb')"
-{
- "QueryType": "SELECT",
- "Original": "select Id from user where 1 in ('aa', 'bb')",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select Id from `user` where 1 != 1",
- "Query": "select Id from `user` where 1 in ('aa', 'bb')",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select Id from user where 1 in ('aa', 'bb')",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select Id from `user` where 1 != 1",
- "Query": "select Id from `user` where 1 in ('aa', 'bb')",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Single table complex in clause
-"select id from user where name in (col, 'bb')"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where name in (col, 'bb')",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where `name` in (col, 'bb')",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where name in (col, 'bb')",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where `name` in (col, 'bb')",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Single table equality route with val arg
-"select id from user where name = :a"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where name = :a",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where `name` = :a",
- "Table": "`user`",
- "Values": [
- ":a"
- ],
- "Vindex": "name_user_map"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where name = :a",
- "Instructions": {
- "OperatorType": "VindexLookup",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Values": [
- ":a"
- ],
- "Vindex": "name_user_map",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
- "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
- "Table": "name_user_vdx",
- "Values": [
- ":name"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "ByDestination",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where `name` = :a",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Single table equality route with unsigned value
-"select id from user where name = 18446744073709551615"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where name = 18446744073709551615",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where `name` = 18446744073709551615",
- "Table": "`user`",
- "Values": [
- "UINT64(18446744073709551615)"
- ],
- "Vindex": "name_user_map"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where name = 18446744073709551615",
- "Instructions": {
- "OperatorType": "VindexLookup",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Values": [
- "UINT64(18446744073709551615)"
- ],
- "Vindex": "name_user_map",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
- "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
- "Table": "name_user_vdx",
- "Values": [
- ":name"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "ByDestination",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where `name` = 18446744073709551615",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Single table in clause list arg
-"select id from user where name in ::list"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where name in ::list",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where `name` in ::__vals",
- "Table": "`user`",
- "Values": [
- ":list"
- ],
- "Vindex": "name_user_map"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where name in ::list",
- "Instructions": {
- "OperatorType": "VindexLookup",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Values": [
- ":list"
- ],
- "Vindex": "name_user_map",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
- "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
- "Table": "name_user_vdx",
- "Values": [
- ":name"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "ByDestination",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where `name` in ::__vals",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Multi-table unique vindex constraint
-"select user_extra.id from user join user_extra on user.id = user_extra.user_id where user.id = 5"
-{
- "QueryType": "SELECT",
- "Original": "select user_extra.id from user join user_extra on user.id = user_extra.user_id where user.id = 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.id from `user` join user_extra on `user`.id = user_extra.user_id where 1 != 1",
- "Query": "select user_extra.id from `user` join user_extra on `user`.id = user_extra.user_id where `user`.id = 5",
- "Table": "`user`, user_extra",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user_extra.id from user join user_extra on user.id = user_extra.user_id where user.id = 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.id from `user`, user_extra where 1 != 1",
- "Query": "select user_extra.id from `user`, user_extra where `user`.id = 5 and `user`.id = user_extra.user_id",
- "Table": "`user`, user_extra",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# Multi-table unique vindex constraint on right table
-"select user_extra.id from user join user_extra on user.id = user_extra.user_id where user_extra.user_id = 5"
-{
- "QueryType": "SELECT",
- "Original": "select user_extra.id from user join user_extra on user.id = user_extra.user_id where user_extra.user_id = 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.id from `user` join user_extra on `user`.id = user_extra.user_id where 1 != 1",
- "Query": "select user_extra.id from `user` join user_extra on `user`.id = user_extra.user_id where user_extra.user_id = 5",
- "Table": "`user`, user_extra",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user_extra.id from user join user_extra on user.id = user_extra.user_id where user_extra.user_id = 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.id from `user`, user_extra where 1 != 1",
- "Query": "select user_extra.id from `user`, user_extra where user_extra.user_id = 5 and `user`.id = user_extra.user_id",
- "Table": "`user`, user_extra",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# Multi-table unique vindex constraint on left table of left join
-"select user_extra.id from user left join user_extra on user.id = user_extra.user_id where user.id = 5"
-{
- "QueryType": "SELECT",
- "Original": "select user_extra.id from user left join user_extra on user.id = user_extra.user_id where user.id = 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.id from `user` left join user_extra on `user`.id = user_extra.user_id where 1 != 1",
- "Query": "select user_extra.id from `user` left join user_extra on `user`.id = user_extra.user_id where `user`.id = 5",
- "Table": "`user`, user_extra",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-Gen4 plan same as above
-
-# Multi-table unique vindex constraint on left-joined right table
-"select user_extra.id from user left join user_extra on user.id = user_extra.user_id where user_extra.user_id = 5"
-{
- "QueryType": "SELECT",
- "Original": "select user_extra.id from user left join user_extra on user.id = user_extra.user_id where user_extra.user_id = 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.id from `user`, user_extra where 1 != 1",
- "Query": "select user_extra.id from `user`, user_extra where user_extra.user_id = 5 and `user`.id = user_extra.user_id",
- "Table": "`user`, user_extra",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-Gen4 plan same as above
-
-# Multi-route unique vindex constraint
-"select user_extra.id from user join user_extra on user.col = user_extra.col where user.id = 5"
-{
- "QueryType": "SELECT",
- "Original": "select user_extra.id from user join user_extra on user.col = user_extra.col where user.id = 5",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "JoinVars": {
- "user_col": 0
- },
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` where 1 != 1",
- "Query": "select `user`.col from `user` where `user`.id = 5",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.id from user_extra where 1 != 1",
- "Query": "select user_extra.id from user_extra where user_extra.col = :user_col",
- "Table": "user_extra"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user_extra.id from user join user_extra on user.col = user_extra.col where user.id = 5",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "JoinVars": {
- "user_col": 0
- },
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` where 1 != 1",
- "Query": "select `user`.col from `user` where `user`.id = 5",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.id from user_extra where 1 != 1",
- "Query": "select user_extra.id from user_extra where user_extra.col = :user_col",
- "Table": "user_extra"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# Multi-route unique vindex route on both routes
-"select user_extra.id from user join user_extra on user.col = user_extra.col where user.id = 5 and user_extra.user_id = 5"
-{
- "QueryType": "SELECT",
- "Original": "select user_extra.id from user join user_extra on user.col = user_extra.col where user.id = 5 and user_extra.user_id = 5",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "JoinVars": {
- "user_col": 0
- },
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` where 1 != 1",
- "Query": "select `user`.col from `user` where `user`.id = 5",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.id from user_extra where 1 != 1",
- "Query": "select user_extra.id from user_extra where user_extra.col = :user_col and user_extra.user_id = 5",
- "Table": "user_extra",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user_extra.id from user join user_extra on user.col = user_extra.col where user.id = 5 and user_extra.user_id = 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.id from `user`, user_extra where 1 != 1",
- "Query": "select user_extra.id from `user`, user_extra where `user`.id = 5 and user_extra.user_id = 5 and `user`.col = user_extra.col",
- "Table": "`user`, user_extra",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# Multi-route with cross-route constraint
-"select user_extra.id from user join user_extra on user.col = user_extra.col where user_extra.user_id = user.col"
-{
- "QueryType": "SELECT",
- "Original": "select user_extra.id from user join user_extra on user.col = user_extra.col where user_extra.user_id = user.col",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "JoinVars": {
- "user_col": 0
- },
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` where 1 != 1",
- "Query": "select `user`.col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.id from user_extra where 1 != 1",
- "Query": "select user_extra.id from user_extra where user_extra.col = :user_col and user_extra.user_id = :user_col",
- "Table": "user_extra",
- "Values": [
- ":user_col"
- ],
- "Vindex": "user_index"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user_extra.id from user join user_extra on user.col = user_extra.col where user_extra.user_id = user.col",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "JoinVars": {
- "user_col": 0
- },
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` where 1 != 1",
- "Query": "select `user`.col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.id from user_extra where 1 != 1",
- "Query": "select user_extra.id from user_extra where user_extra.col = :user_col and user_extra.user_id = :user_col",
- "Table": "user_extra",
- "Values": [
- ":user_col"
- ],
- "Vindex": "user_index"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# Multi-route with non-route constraint, should use first route.
-"select user_extra.id from user join user_extra on user.col = user_extra.col where 1 = 1"
-{
- "QueryType": "SELECT",
- "Original": "select user_extra.id from user join user_extra on user.col = user_extra.col where 1 = 1",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "JoinVars": {
- "user_col": 0
- },
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` where 1 != 1",
- "Query": "select `user`.col from `user` where 1 = 1",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.id from user_extra where 1 != 1",
- "Query": "select user_extra.id from user_extra where user_extra.col = :user_col",
- "Table": "user_extra"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user_extra.id from user join user_extra on user.col = user_extra.col where 1 = 1",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "JoinVars": {
- "user_col": 0
- },
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` where 1 != 1",
- "Query": "select `user`.col from `user` where 1 = 1",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.id from user_extra where 1 != 1",
- "Query": "select user_extra.id from user_extra where 1 = 1 and user_extra.col = :user_col",
- "Table": "user_extra"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# Route with multiple route constraints, SelectIN is the best constraint.
-"select id from user where user.col = 5 and user.id in (1, 2)"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where user.col = 5 and user.id in (1, 2)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where `user`.col = 5 and `user`.id in ::__vals",
- "Table": "`user`",
- "Values": [
- "(INT64(1), INT64(2))"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where user.col = 5 and user.id in (1, 2)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where `user`.col = 5 and `user`.id in ::__vals",
- "Table": "`user`",
- "Values": [
- "(INT64(1), INT64(2))"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Route with multiple route constraints and boolean, SelectIN is the best constraint.
-"select id from user where user.col = case user.col when 'foo' then true else false end and user.id in (1, 2)"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where user.col = case user.col when 'foo' then true else false end and user.id in (1, 2)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where `user`.col = case `user`.col when 'foo' then true else false end and `user`.id in ::__vals",
- "Table": "`user`",
- "Values": [
- "(INT64(1), INT64(2))"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where user.col = case user.col when 'foo' then true else false end and user.id in (1, 2)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where `user`.col = case `user`.col when 'foo' then true else false end and `user`.id in ::__vals",
- "Table": "`user`",
- "Values": [
- "(INT64(1), INT64(2))"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Route with multiple route constraints and boolean, SelectEqual is the best constraint.
-"select (id or col) as val from user where user.col = 5 and user.id in (1, 2) and user.name = 'aa'"
-{
- "QueryType": "SELECT",
- "Original": "select (id or col) as val from user where user.col = 5 and user.id in (1, 2) and user.name = 'aa'",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id or col as val from `user` where 1 != 1",
- "Query": "select id or col as val from `user` where `user`.col = 5 and `user`.id in (1, 2) and `user`.`name` = 'aa'",
- "Table": "`user`",
- "Values": [
- "VARCHAR(\"aa\")"
- ],
- "Vindex": "name_user_map"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select (id or col) as val from user where user.col = 5 and user.id in (1, 2) and user.name = 'aa'",
- "Instructions": {
- "OperatorType": "VindexLookup",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Values": [
- "VARCHAR(\"aa\")"
- ],
- "Vindex": "name_user_map",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
- "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
- "Table": "name_user_vdx",
- "Values": [
- ":name"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "ByDestination",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id or col as val from `user` where 1 != 1",
- "Query": "select id or col as val from `user` where `user`.col = 5 and `user`.id in (1, 2) and `user`.`name` = 'aa'",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Route with multiple route constraints, SelectEqual is the best constraint.
-"select id from user where user.col = false and user.id in (1, 2) and user.name = 'aa'"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where user.col = false and user.id in (1, 2) and user.name = 'aa'",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where `user`.col = false and `user`.id in (1, 2) and `user`.`name` = 'aa'",
- "Table": "`user`",
- "Values": [
- "VARCHAR(\"aa\")"
- ],
- "Vindex": "name_user_map"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where user.col = false and user.id in (1, 2) and user.name = 'aa'",
- "Instructions": {
- "OperatorType": "VindexLookup",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Values": [
- "VARCHAR(\"aa\")"
- ],
- "Vindex": "name_user_map",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
- "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
- "Table": "name_user_vdx",
- "Values": [
- ":name"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "ByDestination",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where `user`.col = false and `user`.id in (1, 2) and `user`.`name` = 'aa'",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Route with multiple route constraints, SelectEqualUnique is the best constraint.
-"select id from user where user.col = 5 and user.id in (1, 2) and user.name = 'aa' and user.id = 1"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where user.col = 5 and user.id in (1, 2) and user.name = 'aa' and user.id = 1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where `user`.col = 5 and `user`.id in (1, 2) and `user`.`name` = 'aa' and `user`.id = 1",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where user.col = 5 and user.id in (1, 2) and user.name = 'aa' and user.id = 1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where `user`.col = 5 and `user`.id in (1, 2) and `user`.`name` = 'aa' and `user`.id = 1",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Route with multiple route constraints, SelectEqualUnique is the best constraint, order reversed.
-"select id from user where user.id = 1 and user.name = 'aa' and user.id in (1, 2) and user.col = 5"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where user.id = 1 and user.name = 'aa' and user.id in (1, 2) and user.col = 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where `user`.id = 1 and `user`.`name` = 'aa' and `user`.id in (1, 2) and `user`.col = 5",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where user.id = 1 and user.name = 'aa' and user.id in (1, 2) and user.col = 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where `user`.id = 1 and `user`.`name` = 'aa' and `user`.id in (1, 2) and `user`.col = 5",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Route with OR and AND clause, must parenthesize correctly.
-"select id from user where user.id = 1 or user.name = 'aa' and user.id in (1, 2)"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where user.id = 1 or user.name = 'aa' and user.id in (1, 2)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where `user`.id = 1 or `user`.`name` = 'aa' and `user`.id in (1, 2)",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where user.id = 1 or user.name = 'aa' and user.id in (1, 2)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where `user`.id = 1 or `user`.`name` = 'aa' and `user`.id in (1, 2)",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Unsharded route
-"select unsharded.id from user join unsharded where unsharded.id = user.id"
-{
- "QueryType": "SELECT",
- "Original": "select unsharded.id from user join unsharded where unsharded.id = user.id",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "JoinVars": {
- "user_id": 0
- },
- "TableName": "`user`_unsharded",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id from `user` where 1 != 1",
- "Query": "select `user`.id from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select unsharded.id from unsharded where 1 != 1",
- "Query": "select unsharded.id from unsharded where unsharded.id = :user_id",
- "Table": "unsharded"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select unsharded.id from user join unsharded where unsharded.id = user.id",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "JoinVars": {
- "unsharded_id": 0
- },
- "TableName": "unsharded_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select unsharded.id from unsharded where 1 != 1",
- "Query": "select unsharded.id from unsharded",
- "Table": "unsharded"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` where 1 != 1",
- "Query": "select 1 from `user` where `user`.id = :unsharded_id",
- "Table": "`user`",
- "Values": [
- ":unsharded_id"
- ],
- "Vindex": "user_index"
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded",
- "user.user"
- ]
-}
-
-# routing rules: choose the redirected table
-"select col from route1 where id = 1"
-{
- "QueryType": "SELECT",
- "Original": "select col from route1 where id = 1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` as route1 where 1 != 1",
- "Query": "select col from `user` as route1 where id = 1",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select col from route1 where id = 1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` as route1 where 1 != 1",
- "Query": "select col from `user` as route1 where id = 1",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# subquery
-"select u.m from user_extra join user u where u.id in (select m2 from user where user.id = u.id and user_extra.col = user.col) and u.id in (user_extra.col, 1)"
-{
- "QueryType": "SELECT",
- "Original": "select u.m from user_extra join user u where u.id in (select m2 from user where user.id = u.id and user_extra.col = user.col) and u.id in (user_extra.col, 1)",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "JoinVars": {
- "user_extra_col": 0
- },
- "TableName": "user_extra_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.col from user_extra where 1 != 1",
- "Query": "select user_extra.col from user_extra",
- "Table": "user_extra"
- },
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u.m from `user` as u where 1 != 1",
- "Query": "select u.m from `user` as u where u.id in ::__vals and u.id in (select m2 from `user` where `user`.id = u.id and `user`.col = :user_extra_col)",
- "Table": "`user`",
- "Values": [
- "(:user_extra_col, INT64(1))"
- ],
- "Vindex": "user_index"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select u.m from user_extra join user u where u.id in (select m2 from user where user.id = u.id and user_extra.col = user.col) and u.id in (user_extra.col, 1)",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "JoinVars": {
- "user_extra_col": 0
- },
- "TableName": "user_extra_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.col from user_extra where 1 != 1",
- "Query": "select user_extra.col from user_extra",
- "Table": "user_extra"
- },
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u.m from `user` as u where 1 != 1",
- "Query": "select u.m from `user` as u where u.id in (select m2 from `user` where `user`.id = u.id and `user`.col = :user_extra_col) and u.id in ::__vals",
- "Table": "`user`",
- "Values": [
- "(:user_extra_col, INT64(1))"
- ],
- "Vindex": "user_index"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# correlated subquery merge-able into a route of a join tree
-"select u.m from user_extra join user u where u.id in (select m2 from user where user.id = u.id) and u.id in (user_extra.col, 1)"
-{
- "QueryType": "SELECT",
- "Original": "select u.m from user_extra join user u where u.id in (select m2 from user where user.id = u.id) and u.id in (user_extra.col, 1)",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "JoinVars": {
- "user_extra_col": 0
- },
- "TableName": "user_extra_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.col from user_extra where 1 != 1",
- "Query": "select user_extra.col from user_extra",
- "Table": "user_extra"
- },
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u.m from `user` as u where 1 != 1",
- "Query": "select u.m from `user` as u where u.id in ::__vals and u.id in (select m2 from `user` where `user`.id = u.id)",
- "Table": "`user`",
- "Values": [
- "(:user_extra_col, INT64(1))"
- ],
- "Vindex": "user_index"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select u.m from user_extra join user u where u.id in (select m2 from user where user.id = u.id) and u.id in (user_extra.col, 1)",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "JoinVars": {
- "user_extra_col": 0
- },
- "TableName": "user_extra_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.col from user_extra where 1 != 1",
- "Query": "select user_extra.col from user_extra",
- "Table": "user_extra"
- },
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u.m from `user` as u where 1 != 1",
- "Query": "select u.m from `user` as u where u.id in (select m2 from `user` where `user`.id = u.id) and u.id in ::__vals",
- "Table": "`user`",
- "Values": [
- "(:user_extra_col, INT64(1))"
- ],
- "Vindex": "user_index"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# ensure subquery reordering gets us a better plan
-"select u.m from user_extra join user u where u.id in (select m2 from user where user.id = 5) and u.id = 5"
-{
- "QueryType": "SELECT",
- "Original": "select u.m from user_extra join user u where u.id in (select m2 from user where user.id = 5) and u.id = 5",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "TableName": "user_extra_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra",
- "Table": "user_extra"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u.m from `user` as u where 1 != 1",
- "Query": "select u.m from `user` as u where u.id = 5 and u.id in (select m2 from `user` where `user`.id = 5)",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select u.m from user_extra join user u where u.id in (select m2 from user where user.id = 5) and u.id = 5",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "TableName": "user_extra_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra",
- "Table": "user_extra"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u.m from `user` as u where 1 != 1",
- "Query": "select u.m from `user` as u where u.id in (select m2 from `user` where `user`.id = 5) and u.id = 5",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# nested subquery
-"select u.m from user_extra join user u where u.id in (select m2 from user where user.id = u.id and user_extra.col = user.col and user.id in (select m3 from user_extra where user_extra.user_id = user.id)) and u.id in (user_extra.col, 1)"
-{
- "QueryType": "SELECT",
- "Original": "select u.m from user_extra join user u where u.id in (select m2 from user where user.id = u.id and user_extra.col = user.col and user.id in (select m3 from user_extra where user_extra.user_id = user.id)) and u.id in (user_extra.col, 1)",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "JoinVars": {
- "user_extra_col": 0
- },
- "TableName": "user_extra_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.col from user_extra where 1 != 1",
- "Query": "select user_extra.col from user_extra",
- "Table": "user_extra"
- },
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u.m from `user` as u where 1 != 1",
- "Query": "select u.m from `user` as u where u.id in ::__vals and u.id in (select m2 from `user` where `user`.id = u.id and `user`.col = :user_extra_col and `user`.id in (select m3 from user_extra where user_extra.user_id = `user`.id))",
- "Table": "`user`",
- "Values": [
- "(:user_extra_col, INT64(1))"
- ],
- "Vindex": "user_index"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select u.m from user_extra join user u where u.id in (select m2 from user where user.id = u.id and user_extra.col = user.col and user.id in (select m3 from user_extra where user_extra.user_id = user.id)) and u.id in (user_extra.col, 1)",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "JoinVars": {
- "user_extra_col": 0
- },
- "TableName": "user_extra_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.col from user_extra where 1 != 1",
- "Query": "select user_extra.col from user_extra",
- "Table": "user_extra"
- },
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u.m from `user` as u where 1 != 1",
- "Query": "select u.m from `user` as u where u.id in (select m2 from `user` where `user`.id = u.id and `user`.col = :user_extra_col and `user`.id in (select m3 from user_extra where user_extra.user_id = `user`.id)) and u.id in ::__vals",
- "Table": "`user`",
- "Values": [
- "(:user_extra_col, INT64(1))"
- ],
- "Vindex": "user_index"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# Correlated subquery in where clause
-"select id from user where user.col in (select user_extra.col from user_extra where user_extra.user_id = user.id)"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where user.col in (select user_extra.col from user_extra where user_extra.user_id = user.id)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where `user`.col in (select user_extra.col from user_extra where user_extra.user_id = `user`.id)",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where user.col in (select user_extra.col from user_extra where user_extra.user_id = user.id)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where `user`.col in (select user_extra.col from user_extra where user_extra.user_id = `user`.id)",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# outer and inner subquery route by same int val
-"select id from user where id = 5 and user.col in (select user_extra.col from user_extra where user_extra.user_id = 5)"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where id = 5 and user.col in (select user_extra.col from user_extra where user_extra.user_id = 5)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where id = 5 and `user`.col in (select user_extra.col from user_extra where user_extra.user_id = 5)",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where id = 5 and user.col in (select user_extra.col from user_extra where user_extra.user_id = 5)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where id = 5 and `user`.col in (select user_extra.col from user_extra where user_extra.user_id = 5)",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# outer and inner subquery route by same str val
-"select id from user where id = 'aa' and user.col in (select user_extra.col from user_extra where user_extra.user_id = 'aa')"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where id = 'aa' and user.col in (select user_extra.col from user_extra where user_extra.user_id = 'aa')",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where id = 'aa' and `user`.col in (select user_extra.col from user_extra where user_extra.user_id = 'aa')",
- "Table": "`user`",
- "Values": [
- "VARCHAR(\"aa\")"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where id = 'aa' and user.col in (select user_extra.col from user_extra where user_extra.user_id = 'aa')",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where id = 'aa' and `user`.col in (select user_extra.col from user_extra where user_extra.user_id = 'aa')",
- "Table": "`user`",
- "Values": [
- "VARCHAR(\"aa\")"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# outer and inner subquery route by same val arg
-"select id from user where id = :a and user.col in (select user_extra.col from user_extra where user_extra.user_id = :a)"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where id = :a and user.col in (select user_extra.col from user_extra where user_extra.user_id = :a)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where id = :a and `user`.col in (select user_extra.col from user_extra where user_extra.user_id = :a)",
- "Table": "`user`",
- "Values": [
- ":a"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where id = :a and user.col in (select user_extra.col from user_extra where user_extra.user_id = :a)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where id = :a and `user`.col in (select user_extra.col from user_extra where user_extra.user_id = :a)",
- "Table": "`user`",
- "Values": [
- ":a"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# unresolved symbol in inner subquery.
-"select id from user where id = :a and user.col in (select user_extra.col from user_extra where user_extra.user_id = :a and foo.id = 1)"
-"symbol foo.id not found"
-Gen4 plan same as above
-
-# outer and inner subquery route by same outermost column value
-"select id2 from user uu where id in (select id from user where id = uu.id and user.col in (select user_extra.col from user_extra where user_extra.user_id = uu.id))"
-{
- "QueryType": "SELECT",
- "Original": "select id2 from user uu where id in (select id from user where id = uu.id and user.col in (select user_extra.col from user_extra where user_extra.user_id = uu.id))",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id2 from `user` as uu where 1 != 1",
- "Query": "select id2 from `user` as uu where id in (select id from `user` where id = uu.id and `user`.col in (select user_extra.col from user_extra where user_extra.user_id = uu.id))",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id2 from user uu where id in (select id from user where id = uu.id and user.col in (select user_extra.col from user_extra where user_extra.user_id = uu.id))",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id2 from `user` as uu where 1 != 1",
- "Query": "select id2 from `user` as uu where id in (select id from `user` where id = uu.id and `user`.col in (select user_extra.col from user_extra where user_extra.user_id = uu.id))",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# cross-shard subquery in IN clause.
-# Note the improved Underlying plan as SelectIN.
-"select id from user where id in (select col from user)"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where id in (select col from user)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where :__sq_has_values1 = 1 and id in ::__vals",
- "Table": "`user`",
- "Values": [
- ":__sq1"
- ],
- "Vindex": "user_index"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where id in (select col from user)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where :__sq_has_values1 = 1 and id in ::__vals",
- "Table": "`user`",
- "Values": [
- ":__sq1"
- ],
- "Vindex": "user_index"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# cross-shard subquery in NOT IN clause.
-"select id from user where id not in (select col from user)"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where id not in (select col from user)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutNotIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where :__sq_has_values1 = 0 or id not in ::__sq1",
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where id not in (select col from user)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutNotIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where :__sq_has_values1 = 0 or id not in ::__sq1",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# cross-shard subquery in EXISTS clause.
-"select id from user where exists (select col from user)"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where exists (select col from user)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutExists",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Limit",
- "Count": "INT64(1)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` where 1 != 1",
- "Query": "select 1 from `user` limit :__upper_limit",
- "Table": "`user`"
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where :__sq_has_values1",
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where exists (select col from user)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutExists",
- "PulloutVars": [
- "__sq_has_values1"
- ],
- "Inputs": [
- {
- "OperatorType": "Limit",
- "Count": "INT64(1)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` where 1 != 1",
- "Query": "select 1 from `user` limit :__upper_limit",
- "Table": "`user`"
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where :__sq_has_values1",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# cross-shard subquery as expression
-"select id from user where id = (select col from user)"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where id = (select col from user)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutValue",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where id = :__sq1",
- "Table": "`user`",
- "Values": [
- ":__sq1"
- ],
- "Vindex": "user_index"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where id = (select col from user)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutValue",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where id = :__sq1",
- "Table": "`user`",
- "Values": [
- ":__sq1"
- ],
- "Vindex": "user_index"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# multi-level pullout
-"select id1 from user where id = (select id2 from user where id2 in (select id3 from user))"
-{
- "QueryType": "SELECT",
- "Original": "select id1 from user where id = (select id2 from user where id2 in (select id3 from user))",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutValue",
- "PulloutVars": [
- "__sq_has_values2",
- "__sq2"
- ],
- "Inputs": [
- {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id3 from `user` where 1 != 1",
- "Query": "select id3 from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id2 from `user` where 1 != 1",
- "Query": "select id2 from `user` where :__sq_has_values1 = 1 and id2 in ::__sq1",
- "Table": "`user`"
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id1 from `user` where 1 != 1",
- "Query": "select id1 from `user` where id = :__sq2",
- "Table": "`user`",
- "Values": [
- ":__sq2"
- ],
- "Vindex": "user_index"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id1 from user where id = (select id2 from user where id2 in (select id3 from user))",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutValue",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values2",
- "__sq2"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id3 from `user` where 1 != 1",
- "Query": "select id3 from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id2 from `user` where 1 != 1",
- "Query": "select id2 from `user` where :__sq_has_values2 = 1 and id2 in ::__sq2",
- "Table": "`user`"
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id1 from `user` where 1 != 1",
- "Query": "select id1 from `user` where id = :__sq1",
- "Table": "`user`",
- "Values": [
- ":__sq1"
- ],
- "Vindex": "user_index"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# routing rules subquery merge
-"select col from user where id = (select id from route1 where route1.id = user.id)"
-{
- "QueryType": "SELECT",
- "Original": "select col from user where id = (select id from route1 where route1.id = user.id)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user` where id = (select id from `user` as route1 where route1.id = `user`.id)",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select col from user where id = (select id from route1 where route1.id = user.id)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user` where id = (select id from `user` as route1 where route1.id = `user`.id)",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# routing rules subquery pullout
-"select col from user where id = (select id from route2)"
-{
- "QueryType": "SELECT",
- "Original": "select col from user where id = (select id from route2)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutValue",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select id from unsharded as route2 where 1 != 1",
- "Query": "select id from unsharded as route2",
- "Table": "unsharded"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user` where id = :__sq1",
- "Table": "`user`",
- "Values": [
- ":__sq1"
- ],
- "Vindex": "user_index"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select col from user where id = (select id from route2)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutValue",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select id from unsharded as route2 where 1 != 1",
- "Query": "select id from unsharded as route2",
- "Table": "unsharded"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user` where id = :__sq1",
- "Table": "`user`",
- "Values": [
- ":__sq1"
- ],
- "Vindex": "user_index"
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded",
- "user.user"
- ]
-}
-
-# Case preservation test
-"select user_extra.Id from user join user_extra on user.iD = user_extra.User_Id where user.Id = 5"
-{
- "QueryType": "SELECT",
- "Original": "select user_extra.Id from user join user_extra on user.iD = user_extra.User_Id where user.Id = 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.Id from `user` join user_extra on `user`.iD = user_extra.User_Id where 1 != 1",
- "Query": "select user_extra.Id from `user` join user_extra on `user`.iD = user_extra.User_Id where `user`.Id = 5",
- "Table": "`user`, user_extra",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user_extra.Id from user join user_extra on user.iD = user_extra.User_Id where user.Id = 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.Id from `user`, user_extra where 1 != 1",
- "Query": "select user_extra.Id from `user`, user_extra where `user`.Id = 5 and `user`.iD = user_extra.User_Id",
- "Table": "`user`, user_extra",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# database() call in where clause.
-"select id from user where database()"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where database()",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where database()",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where database()",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where database()",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Select with equals null
-"select id from music where id = null"
-{
- "QueryType": "SELECT",
- "Original": "select id from music where id = null",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "None",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from music where 1 != 1",
- "Query": "select id from music where id = null",
- "Table": "music"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from music where id = null",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "None",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from music where 1 != 1",
- "Query": "select id from music where id = null",
- "Table": "music"
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# SELECT with IS NULL
-"select id from music where id is null"
-{
- "QueryType": "SELECT",
- "Original": "select id from music where id is null",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from music where 1 != 1",
- "Query": "select id from music where id is null",
- "Table": "music"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from music where id is null",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from music where 1 != 1",
- "Query": "select id from music where id is null",
- "Table": "music"
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# SELECT with IS NOT NULL
-"select id from music where id is not null"
-{
- "QueryType": "SELECT",
- "Original": "select id from music where id is not null",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from music where 1 != 1",
- "Query": "select id from music where id is not null",
- "Table": "music"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from music where id is not null",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from music where 1 != 1",
- "Query": "select id from music where id is not null",
- "Table": "music"
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# Single table with unique vindex match and null match
-"select id from music where user_id = 4 and id = null"
-{
- "QueryType": "SELECT",
- "Original": "select id from music where user_id = 4 and id = null",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "None",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from music where 1 != 1",
- "Query": "select id from music where user_id = 4 and id = null",
- "Table": "music"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from music where user_id = 4 and id = null",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "None",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from music where 1 != 1",
- "Query": "select id from music where user_id = 4 and id = null",
- "Table": "music"
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# Single table with unique vindex match and IN (null)
-"select id from music where user_id = 4 and id IN (null)"
-{
- "QueryType": "SELECT",
- "Original": "select id from music where user_id = 4 and id IN (null)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "None",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from music where 1 != 1",
- "Query": "select id from music where user_id = 4 and id in (null)",
- "Table": "music"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from music where user_id = 4 and id IN (null)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "None",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from music where 1 != 1",
- "Query": "select id from music where user_id = 4 and id in (null)",
- "Table": "music"
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# Single table with unique vindex match and IN (null, 1, 2)
-"select id from music where user_id = 4 and id IN (null, 1, 2)"
-{
- "QueryType": "SELECT",
- "Original": "select id from music where user_id = 4 and id IN (null, 1, 2)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from music where 1 != 1",
- "Query": "select id from music where user_id = 4 and id in (null, 1, 2)",
- "Table": "music",
- "Values": [
- "INT64(4)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from music where user_id = 4 and id IN (null, 1, 2)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from music where 1 != 1",
- "Query": "select id from music where user_id = 4 and id in (null, 1, 2)",
- "Table": "music",
- "Values": [
- "INT64(4)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# Single table with unique vindex match and NOT IN (null, 1, 2)
-"select id from music where user_id = 4 and id NOT IN (null, 1, 2)"
-{
- "QueryType": "SELECT",
- "Original": "select id from music where user_id = 4 and id NOT IN (null, 1, 2)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "None",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from music where 1 != 1",
- "Query": "select id from music where user_id = 4 and id not in (null, 1, 2)",
- "Table": "music"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from music where user_id = 4 and id NOT IN (null, 1, 2)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "None",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from music where 1 != 1",
- "Query": "select id from music where user_id = 4 and id not in (null, 1, 2)",
- "Table": "music"
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# Single table with unique vindex match and NOT IN (null, 1, 2) predicates inverted
-"select id from music where id NOT IN (null, 1, 2) and user_id = 4"
-{
- "QueryType": "SELECT",
- "Original": "select id from music where id NOT IN (null, 1, 2) and user_id = 4",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "None",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from music where 1 != 1",
- "Query": "select id from music where id not in (null, 1, 2) and user_id = 4",
- "Table": "music"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from music where id NOT IN (null, 1, 2) and user_id = 4",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "None",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from music where 1 != 1",
- "Query": "select id from music where id not in (null, 1, 2) and user_id = 4",
- "Table": "music"
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# pullout sq after pullout sq
-"select id from user where not id in (select user_extra.col from user_extra where user_extra.user_id = 42) and id in (select user_extra.col from user_extra where user_extra.user_id = 411)"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where not id in (select user_extra.col from user_extra where user_extra.user_id = 42) and id in (select user_extra.col from user_extra where user_extra.user_id = 411)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutNotIn",
- "PulloutVars": [
- "__sq_has_values2",
- "__sq2"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.col from user_extra where 1 != 1",
- "Query": "select user_extra.col from user_extra where user_extra.user_id = 42",
- "Table": "user_extra",
- "Values": [
- "INT64(42)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.col from user_extra where 1 != 1",
- "Query": "select user_extra.col from user_extra where user_extra.user_id = 411",
- "Table": "user_extra",
- "Values": [
- "INT64(411)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where :__sq_has_values1 = 1 and id in ::__vals and (:__sq_has_values2 = 0 or id not in ::__sq2)",
- "Table": "`user`",
- "Values": [
- ":__sq1"
- ],
- "Vindex": "user_index"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where not id in (select user_extra.col from user_extra where user_extra.user_id = 42) and id in (select user_extra.col from user_extra where user_extra.user_id = 411)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values2",
- "__sq2"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.col from user_extra where 1 != 1",
- "Query": "select user_extra.col from user_extra where user_extra.user_id = 411",
- "Table": "user_extra",
- "Values": [
- "INT64(411)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Subquery",
- "Variant": "PulloutNotIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.col from user_extra where 1 != 1",
- "Query": "select user_extra.col from user_extra where user_extra.user_id = 42",
- "Table": "user_extra",
- "Values": [
- "INT64(42)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where (:__sq_has_values1 = 0 or id not in ::__sq1) and (:__sq_has_values2 = 1 and id in ::__vals)",
- "Table": "`user`",
- "Values": [
- ":__sq2"
- ],
- "Vindex": "user_index"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# solving LIKE query with a CFC prefix vindex
-"select c2 from cfc_vindex_col where c1 like 'A%'"
-{
- "QueryType": "SELECT",
- "Original": "select c2 from cfc_vindex_col where c1 like 'A%'",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select c2 from cfc_vindex_col where 1 != 1",
- "Query": "select c2 from cfc_vindex_col where c1 like 'A%'",
- "Table": "cfc_vindex_col",
- "Values": [
- "VARCHAR(\"A%\")"
- ],
- "Vindex": "cfc"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select c2 from cfc_vindex_col where c1 like 'A%'",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select c2 from cfc_vindex_col where 1 != 1",
- "Query": "select c2 from cfc_vindex_col where c1 like 'A%'",
- "Table": "cfc_vindex_col",
- "Values": [
- "VARCHAR(\"A%\")"
- ],
- "Vindex": "cfc"
- },
- "TablesUsed": [
- "user.cfc_vindex_col"
- ]
-}
-
-"select * from samecolvin where col = :col"
-{
- "QueryType": "SELECT",
- "Original": "select * from samecolvin where col = :col",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from samecolvin where 1 != 1",
- "Query": "select col from samecolvin where col = :col",
- "Table": "samecolvin",
- "Values": [
- ":col"
- ],
- "Vindex": "vindex1"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from samecolvin where col = :col",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from samecolvin where 1 != 1",
- "Query": "select col from samecolvin where col = :col",
- "Table": "samecolvin",
- "Values": [
- ":col"
- ],
- "Vindex": "vindex1"
- },
- "TablesUsed": [
- "user.samecolvin"
- ]
-}
-
-# non unique predicate on vindex
-"select id from user where user.id \u003e 5"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where user.id \u003e 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where `user`.id \u003e 5",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where user.id \u003e 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where `user`.id \u003e 5",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# select from unsharded keyspace with uncorrelated subquery which should be merged to a single route
-"select unsharded.id from unsharded where unsharded.name in (select name from unsharded_a)"
-{
- "QueryType": "SELECT",
- "Original": "select unsharded.id from unsharded where unsharded.name in (select name from unsharded_a)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select unsharded.id from unsharded where 1 != 1",
- "Query": "select unsharded.id from unsharded where unsharded.`name` in (select `name` from unsharded_a)",
- "Table": "unsharded"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select unsharded.id from unsharded where unsharded.name in (select name from unsharded_a)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select unsharded.id from unsharded where 1 != 1",
- "Query": "select unsharded.id from unsharded where unsharded.`name` in (select `name` from unsharded_a)",
- "Table": "unsharded, unsharded_a"
- },
- "TablesUsed": [
- "main.unsharded",
- "main.unsharded_a"
- ]
-}
-
-# in subquery the id will be scoped to local table as there is no qualifier associated with it.
-"select id from user where id in (select col from unsharded where col = id)"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where id in (select col from unsharded where col = id)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select col from unsharded where 1 != 1",
- "Query": "select col from unsharded where col = id",
- "Table": "unsharded"
- },
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where :__sq_has_values1 = 1 and id in ::__vals",
- "Table": "`user`",
- "Values": [
- ":__sq1"
- ],
- "Vindex": "user_index"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where id in (select col from unsharded where col = id)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select col from unsharded where 1 != 1",
- "Query": "select col from unsharded where col = id",
- "Table": "unsharded"
- },
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where :__sq_has_values1 = 1 and id in ::__vals",
- "Table": "`user`",
- "Values": [
- ":__sq1"
- ],
- "Vindex": "user_index"
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded",
- "user.user"
- ]
-}
-
-# correlated subquery with different keyspace tables involved
-"select id from user where id in (select col from unsharded where col = user.id)"
-"unsupported: cross-shard correlated subquery"
-Gen4 plan same as above
-
-# correlated subquery with same keyspace
-"select u.id from user as u where u.col in (select ue.user_id from user_extra as ue where ue.user_id = u.id)"
-{
- "QueryType": "SELECT",
- "Original": "select u.id from user as u where u.col in (select ue.user_id from user_extra as ue where ue.user_id = u.id)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u.id from `user` as u where 1 != 1",
- "Query": "select u.id from `user` as u where u.col in (select ue.user_id from user_extra as ue where ue.user_id = u.id)",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select u.id from user as u where u.col in (select ue.user_id from user_extra as ue where ue.user_id = u.id)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u.id from `user` as u where 1 != 1",
- "Query": "select u.id from `user` as u where u.col in (select ue.user_id from user_extra as ue where ue.user_id = u.id)",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# SelectDBA with uncorrelated subqueries
-"select t.table_schema from information_schema.tables as t where t.table_schema in (select c.column_name from information_schema.columns as c)"
-{
- "QueryType": "SELECT",
- "Original": "select t.table_schema from information_schema.tables as t where t.table_schema in (select c.column_name from information_schema.columns as c)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select t.table_schema from information_schema.`tables` as t where 1 != 1",
- "Query": "select t.table_schema from information_schema.`tables` as t where t.table_schema in (select c.column_name from information_schema.`columns` as c)",
- "Table": "information_schema.`tables`"
- }
-}
-Gen4 plan same as above
-
-# SelectReference with uncorrelated subqueries
-"select ref.col from ref where ref.col in (select ref.col from ref)"
-{
- "QueryType": "SELECT",
- "Original": "select ref.col from ref where ref.col in (select ref.col from ref)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select ref.col from ref where 1 != 1",
- "Query": "select ref.col from ref where ref.col in (select ref.col from ref)",
- "Table": "ref"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select ref.col from ref where ref.col in (select ref.col from ref)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select ref.col from ref where 1 != 1",
- "Query": "select ref.col from ref where ref.col in (select ref.col from ref)",
- "Table": "ref"
- },
- "TablesUsed": [
- "user.ref"
- ]
-}
-
-# SelectEqualUnique with uncorrelated subqueries
-"select u1.col from user as u1 where u1.id = 5 and u1.name in (select u2.name from user u2 where u2.id = 5)"
-{
- "QueryType": "SELECT",
- "Original": "select u1.col from user as u1 where u1.id = 5 and u1.name in (select u2.name from user u2 where u2.id = 5)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u1.col from `user` as u1 where 1 != 1",
- "Query": "select u1.col from `user` as u1 where u1.id = 5 and u1.`name` in (select u2.`name` from `user` as u2 where u2.id = 5)",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select u1.col from user as u1 where u1.id = 5 and u1.name in (select u2.name from user u2 where u2.id = 5)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u1.col from `user` as u1 where 1 != 1",
- "Query": "select u1.col from `user` as u1 where u1.id = 5 and u1.`name` in (select u2.`name` from `user` as u2 where u2.id = 5)",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# SelectEqualUnique with EXISTS uncorrelated subquery
-"select u1.col from user as u1 where u1.id = 5 and exists (select u2.name from user u2 where u2.id = 5)"
-{
- "QueryType": "SELECT",
- "Original": "select u1.col from user as u1 where u1.id = 5 and exists (select u2.name from user u2 where u2.id = 5)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u1.col from `user` as u1 where 1 != 1",
- "Query": "select u1.col from `user` as u1 where u1.id = 5 and exists (select 1 from `user` as u2 where u2.id = 5 limit 1)",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select u1.col from user as u1 where u1.id = 5 and exists (select u2.name from user u2 where u2.id = 5)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u1.col from `user` as u1 where 1 != 1",
- "Query": "select u1.col from `user` as u1 where u1.id = 5 and exists (select 1 from `user` as u2 where u2.id = 5 limit 1)",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# SelectEqualUnique with NOT EXISTS uncorrelated subquery
-"select u1.col from user as u1 where u1.id = 5 and not exists (select u2.name from user u2 where u2.id = 5)"
-{
- "QueryType": "SELECT",
- "Original": "select u1.col from user as u1 where u1.id = 5 and not exists (select u2.name from user u2 where u2.id = 5)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u1.col from `user` as u1 where 1 != 1",
- "Query": "select u1.col from `user` as u1 where u1.id = 5 and not exists (select 1 from `user` as u2 where u2.id = 5 limit 1)",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select u1.col from user as u1 where u1.id = 5 and not exists (select u2.name from user u2 where u2.id = 5)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u1.col from `user` as u1 where 1 != 1",
- "Query": "select u1.col from `user` as u1 where u1.id = 5 and not exists (select 1 from `user` as u2 where u2.id = 5 limit 1)",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# SelectScatter with NOT EXISTS uncorrelated subquery
-"select u1.col from user as u1 where not exists (select u2.name from user u2 where u2.id = 5)"
-{
- "QueryType": "SELECT",
- "Original": "select u1.col from user as u1 where not exists (select u2.name from user u2 where u2.id = 5)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutExists",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` as u2 where 1 != 1",
- "Query": "select 1 from `user` as u2 where u2.id = 5 limit 1",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u1.col from `user` as u1 where 1 != 1",
- "Query": "select u1.col from `user` as u1 where not :__sq_has_values1",
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select u1.col from user as u1 where not exists (select u2.name from user u2 where u2.id = 5)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutExists",
- "PulloutVars": [
- "__sq_has_values1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` as u2 where 1 != 1",
- "Query": "select 1 from `user` as u2 where u2.id = 5 limit 1",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u1.col from `user` as u1 where 1 != 1",
- "Query": "select u1.col from `user` as u1 where not :__sq_has_values1",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# The outer and first inner are SelectEqualUnique with same Vindex value, the second inner has different Vindex value
-"select id from user where id = 5 and not id in (select user_extra.col from user_extra where user_extra.user_id = 5) and id in (select user_extra.col from user_extra where user_extra.user_id = 4)"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where id = 5 and not id in (select user_extra.col from user_extra where user_extra.user_id = 5) and id in (select user_extra.col from user_extra where user_extra.user_id = 4)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.col from user_extra where 1 != 1",
- "Query": "select user_extra.col from user_extra where user_extra.user_id = 4",
- "Table": "user_extra",
- "Values": [
- "INT64(4)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where id = 5 and :__sq_has_values1 = 1 and id in ::__sq1 and id not in (select user_extra.col from user_extra where user_extra.user_id = 5)",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where id = 5 and not id in (select user_extra.col from user_extra where user_extra.user_id = 5) and id in (select user_extra.col from user_extra where user_extra.user_id = 4)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values2",
- "__sq2"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.col from user_extra where 1 != 1",
- "Query": "select user_extra.col from user_extra where user_extra.user_id = 4",
- "Table": "user_extra",
- "Values": [
- "INT64(4)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where id = 5 and id not in (select user_extra.col from user_extra where user_extra.user_id = 5) and (:__sq_has_values2 = 1 and id in ::__sq2)",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# The outer and second inner are SelectEqualUnique with same Vindex value, the first inner has different Vindex value
-"select id from user where id = 5 and not id in (select user_extra.col from user_extra where user_extra.user_id = 4) and id in (select user_extra.col from user_extra where user_extra.user_id = 5)"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where id = 5 and not id in (select user_extra.col from user_extra where user_extra.user_id = 4) and id in (select user_extra.col from user_extra where user_extra.user_id = 5)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutNotIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.col from user_extra where 1 != 1",
- "Query": "select user_extra.col from user_extra where user_extra.user_id = 4",
- "Table": "user_extra",
- "Values": [
- "INT64(4)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where id = 5 and id in (select user_extra.col from user_extra where user_extra.user_id = 5) and (:__sq_has_values1 = 0 or id not in ::__sq1)",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where id = 5 and not id in (select user_extra.col from user_extra where user_extra.user_id = 4) and id in (select user_extra.col from user_extra where user_extra.user_id = 5)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutNotIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.col from user_extra where 1 != 1",
- "Query": "select user_extra.col from user_extra where user_extra.user_id = 4",
- "Table": "user_extra",
- "Values": [
- "INT64(4)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where id = 5 and (:__sq_has_values1 = 0 or id not in ::__sq1) and id in (select user_extra.col from user_extra where user_extra.user_id = 5)",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# two correlated subqueries that can be merge in a single route
-"select u.id from user as u where u.col in (select ue.user_id from user_extra as ue where ue.user_id = u.id) and u.col2 in (select ue.user_id from user_extra as ue where ue.user_id = u.id)"
-{
- "QueryType": "SELECT",
- "Original": "select u.id from user as u where u.col in (select ue.user_id from user_extra as ue where ue.user_id = u.id) and u.col2 in (select ue.user_id from user_extra as ue where ue.user_id = u.id)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u.id from `user` as u where 1 != 1",
- "Query": "select u.id from `user` as u where u.col2 in (select ue.user_id from user_extra as ue where ue.user_id = u.id) and u.col in (select ue.user_id from user_extra as ue where ue.user_id = u.id)",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select u.id from user as u where u.col in (select ue.user_id from user_extra as ue where ue.user_id = u.id) and u.col2 in (select ue.user_id from user_extra as ue where ue.user_id = u.id)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u.id from `user` as u where 1 != 1",
- "Query": "select u.id from `user` as u where u.col in (select ue.user_id from user_extra as ue where ue.user_id = u.id) and u.col2 in (select ue.user_id from user_extra as ue where ue.user_id = u.id)",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# transitive closures for the win
-"select id from user where user.id = user.col and user.col = 5"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where user.id = user.col and user.col = 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where `user`.id = `user`.col and `user`.col = 5",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where user.id = user.col and user.col = 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where `user`.id = `user`.col and `user`.col = 5",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# join with transitive closures
-"select id from user, user_extra where user.id = user_extra.col and user_extra.col = user_extra.user_id"
-{
- "QueryType": "SELECT",
- "Original": "select id from user, user_extra where user.id = user_extra.col and user_extra.col = user_extra.user_id",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "JoinVars": {
- "user_id": 0
- },
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra where user_extra.col = :user_id and user_extra.col = user_extra.user_id",
- "Table": "user_extra"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user, user_extra where user.id = user_extra.col and user_extra.col = user_extra.user_id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user`, user_extra where 1 != 1",
- "Query": "select id from `user`, user_extra where user_extra.col = user_extra.user_id and `user`.id = user_extra.col",
- "Table": "`user`, user_extra"
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# not supported transitive closures with equality inside of an OR
-"select id from user, user_extra where user.id = user_extra.col and (user_extra.col = user_extra.user_id or user_extra.col2 = user_extra.name)"
-{
- "QueryType": "SELECT",
- "Original": "select id from user, user_extra where user.id = user_extra.col and (user_extra.col = user_extra.user_id or user_extra.col2 = user_extra.name)",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "JoinVars": {
- "user_id": 0
- },
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra where user_extra.col = :user_id and (user_extra.col = user_extra.user_id or user_extra.col2 = user_extra.`name`)",
- "Table": "user_extra"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user, user_extra where user.id = user_extra.col and (user_extra.col = user_extra.user_id or user_extra.col2 = user_extra.name)",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "JoinVars": {
- "user_extra_col": 0
- },
- "TableName": "user_extra_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.col from user_extra where 1 != 1",
- "Query": "select user_extra.col from user_extra where user_extra.col = user_extra.user_id or user_extra.col2 = user_extra.`name`",
- "Table": "user_extra"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where `user`.id = :user_extra_col",
- "Table": "`user`",
- "Values": [
- ":user_extra_col"
- ],
- "Vindex": "user_index"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# routing rules subquery merge with alias
-"select col from user where id = (select id from route1 as a where a.id = user.id)"
-{
- "QueryType": "SELECT",
- "Original": "select col from user where id = (select id from route1 as a where a.id = user.id)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user` where id = (select id from `user` as a where a.id = `user`.id)",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select col from user where id = (select id from route1 as a where a.id = user.id)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user` where id = (select id from `user` as a where a.id = `user`.id)",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# left join where clauses where we can optimize into an inner join
-"select user.id from user left join user_extra on user.col = user_extra.col where user_extra.foobar = 5"
-{
- "QueryType": "SELECT",
- "Original": "select user.id from user left join user_extra on user.col = user_extra.col where user_extra.foobar = 5",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:1",
- "JoinVars": {
- "user_col": 0
- },
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col, `user`.id from `user` where 1 != 1",
- "Query": "select `user`.col, `user`.id from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra where user_extra.foobar = 5 and user_extra.col = :user_col",
- "Table": "user_extra"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-Gen4 plan same as above
-
-# this query lead to a nil pointer error
-"select user.id from user left join user_extra on user.col = user_extra.col where foo(user_extra.foobar)"
-"expr cannot be translated, not supported: foo(user_extra.foobar)"
-Gen4 plan same as above
-
-# filter after outer join
-"select user.id from user left join user_extra on user.col = user_extra.col where user_extra.id is null"
-{
- "QueryType": "SELECT",
- "Original": "select user.id from user left join user_extra on user.col = user_extra.col where user_extra.id is null",
- "Instructions": {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 1
- ],
- "Inputs": [
- {
- "OperatorType": "Filter",
- "Predicate": "user_extra.id is null",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "LeftJoin",
- "JoinColumnIndexes": "R:0,L:1",
- "JoinVars": {
- "user_col": 0
- },
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col, `user`.id from `user` where 1 != 1",
- "Query": "select `user`.col, `user`.id from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.id from user_extra where 1 != 1",
- "Query": "select user_extra.id from user_extra where user_extra.col = :user_col",
- "Table": "user_extra"
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-Gen4 plan same as above
-
-#subquery on other table
-"select distinct user.id, user.col from user where user.col in (select id from music where col2 = 'a')"
-{
- "QueryType": "SELECT",
- "Original": "select distinct user.id, user.col from user where user.col in (select id from music where col2 = 'a')",
- "Instructions": {
- "OperatorType": "Distinct",
- "Inputs": [
- {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from music where 1 != 1",
- "Query": "select id from music where col2 = 'a'",
- "Table": "music"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id, `user`.col from `user` where 1 != 1",
- "Query": "select `user`.id, `user`.col from `user` where :__sq_has_values1 = 1 and `user`.col in ::__sq1",
- "Table": "`user`"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select distinct user.id, user.col from user where user.col in (select id from music where col2 = 'a')",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "GroupBy": "(0|2), 1",
- "ResultColumns": 2,
- "Inputs": [
- {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from music where 1 != 1",
- "Query": "select id from music where col2 = 'a'",
- "Table": "music"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id, `user`.col, weight_string(`user`.id) from `user` where 1 != 1",
- "OrderBy": "(0|2) ASC, 1 ASC",
- "Query": "select `user`.id, `user`.col, weight_string(`user`.id) from `user` where :__sq_has_values1 = 1 and `user`.col in ::__sq1 order by `user`.id asc, `user`.col asc",
- "Table": "`user`"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.music",
- "user.user"
- ]
-}
-
-# should use colb_colc_map as first column of the vindex is present in predicate
-"select * from multicolvin where column_b = 1"
-{
- "QueryType": "SELECT",
- "Original": "select * from multicolvin where column_b = 1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from multicolvin where 1 != 1",
- "Query": "select * from multicolvin where column_b = 1",
- "Table": "multicolvin",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "colb_colc_map"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from multicolvin where column_b = 1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from multicolvin where 1 != 1",
- "Query": "select * from multicolvin where column_b = 1",
- "Table": "multicolvin",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "colb_colc_map"
- },
- "TablesUsed": [
- "user.multicolvin"
- ]
-}
-
-# should only use first column of the vindex colb_colc_map
-"select * from multicolvin where column_b = 1 and column_c = 2"
-{
- "QueryType": "SELECT",
- "Original": "select * from multicolvin where column_b = 1 and column_c = 2",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from multicolvin where 1 != 1",
- "Query": "select * from multicolvin where column_b = 1 and column_c = 2",
- "Table": "multicolvin",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "colb_colc_map"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from multicolvin where column_b = 1 and column_c = 2",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from multicolvin where 1 != 1",
- "Query": "select * from multicolvin where column_b = 1 and column_c = 2",
- "Table": "multicolvin",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "colb_colc_map"
- },
- "TablesUsed": [
- "user.multicolvin"
- ]
-}
-
-# uses vindex colb_colc_map
-"select * from multicolvin where column_b = 1 and column_c = 2 and column_a = 3"
-{
- "QueryType": "SELECT",
- "Original": "select * from multicolvin where column_b = 1 and column_c = 2 and column_a = 3",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from multicolvin where 1 != 1",
- "Query": "select * from multicolvin where column_b = 1 and column_c = 2 and column_a = 3",
- "Table": "multicolvin",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "colb_colc_map"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from multicolvin where column_b = 1 and column_c = 2 and column_a = 3",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from multicolvin where 1 != 1",
- "Query": "select * from multicolvin where column_b = 1 and column_c = 2 and column_a = 3",
- "Table": "multicolvin",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "colb_colc_map"
- },
- "TablesUsed": [
- "user.multicolvin"
- ]
-}
-
-# v3 takes cola_map, gen4 takes colb_colc_map, may be based on map key ordering
-"select * from multicolvin where column_a = 3 and column_b = 1"
-{
- "QueryType": "SELECT",
- "Original": "select * from multicolvin where column_a = 3 and column_b = 1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from multicolvin where 1 != 1",
- "Query": "select * from multicolvin where column_a = 3 and column_b = 1",
- "Table": "multicolvin",
- "Values": [
- "INT64(3)"
- ],
- "Vindex": "cola_map"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from multicolvin where column_a = 3 and column_b = 1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from multicolvin where 1 != 1",
- "Query": "select * from multicolvin where column_a = 3 and column_b = 1",
- "Table": "multicolvin",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "colb_colc_map"
- },
- "TablesUsed": [
- "user.multicolvin"
- ]
-}
-
-# multi column vindex produces Equal plan in gen4 and Scatter in v3
-"select * from multicol_tbl where cola = 1 and colb = 2"
-{
- "QueryType": "SELECT",
- "Original": "select * from multicol_tbl where cola = 1 and colb = 2",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from multicol_tbl where 1 != 1",
- "Query": "select * from multicol_tbl where cola = 1 and colb = 2",
- "Table": "multicol_tbl"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from multicol_tbl where cola = 1 and colb = 2",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from multicol_tbl where 1 != 1",
- "Query": "select * from multicol_tbl where cola = 1 and colb = 2",
- "Table": "multicol_tbl",
- "Values": [
- "INT64(1)",
- "INT64(2)"
- ],
- "Vindex": "multicolIdx"
- },
- "TablesUsed": [
- "user.multicol_tbl"
- ]
-}
-
-# multi column vindex with different order places the vindex keys in correct order
-"select * from multicol_tbl where colb = 2 and cola = 1"
-{
- "QueryType": "SELECT",
- "Original": "select * from multicol_tbl where colb = 2 and cola = 1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from multicol_tbl where 1 != 1",
- "Query": "select * from multicol_tbl where colb = 2 and cola = 1",
- "Table": "multicol_tbl"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from multicol_tbl where colb = 2 and cola = 1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from multicol_tbl where 1 != 1",
- "Query": "select * from multicol_tbl where colb = 2 and cola = 1",
- "Table": "multicol_tbl",
- "Values": [
- "INT64(1)",
- "INT64(2)"
- ],
- "Vindex": "multicolIdx"
- },
- "TablesUsed": [
- "user.multicol_tbl"
- ]
-}
-
-# multi column vindex produces IN plan in gen4 and Scatter in v3
-"select * from multicol_tbl where cola in (1,2) and colb in (3,4)"
-{
- "QueryType": "SELECT",
- "Original": "select * from multicol_tbl where cola in (1,2) and colb in (3,4)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from multicol_tbl where 1 != 1",
- "Query": "select * from multicol_tbl where cola in (1, 2) and colb in (3, 4)",
- "Table": "multicol_tbl"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from multicol_tbl where cola in (1,2) and colb in (3,4)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from multicol_tbl where 1 != 1",
- "Query": "select * from multicol_tbl where cola in ::__vals0 and colb in ::__vals1",
- "Table": "multicol_tbl",
- "Values": [
- "(INT64(1), INT64(2))",
- "(INT64(3), INT64(4))"
- ],
- "Vindex": "multicolIdx"
- },
- "TablesUsed": [
- "user.multicol_tbl"
- ]
-}
-
-# multi column vindex with different order places the vindex keys in correct order in IN plan in gen4
-"select * from multicol_tbl where colb in (3,4) and cola in (1,2)"
-{
- "QueryType": "SELECT",
- "Original": "select * from multicol_tbl where colb in (3,4) and cola in (1,2)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from multicol_tbl where 1 != 1",
- "Query": "select * from multicol_tbl where colb in (3, 4) and cola in (1, 2)",
- "Table": "multicol_tbl"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from multicol_tbl where colb in (3,4) and cola in (1,2)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from multicol_tbl where 1 != 1",
- "Query": "select * from multicol_tbl where colb in ::__vals1 and cola in ::__vals0",
- "Table": "multicol_tbl",
- "Values": [
- "(INT64(1), INT64(2))",
- "(INT64(3), INT64(4))"
- ],
- "Vindex": "multicolIdx"
- },
- "TablesUsed": [
- "user.multicol_tbl"
- ]
-}
-
-# multi column vindex with different order with one IN predicate and one equality
-"select * from multicol_tbl where colb = 1 and cola in (3,4)"
-{
- "QueryType": "SELECT",
- "Original": "select * from multicol_tbl where colb = 1 and cola in (3,4)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from multicol_tbl where 1 != 1",
- "Query": "select * from multicol_tbl where colb = 1 and cola in (3, 4)",
- "Table": "multicol_tbl"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from multicol_tbl where colb = 1 and cola in (3,4)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from multicol_tbl where 1 != 1",
- "Query": "select * from multicol_tbl where colb = 1 and cola in ::__vals0",
- "Table": "multicol_tbl",
- "Values": [
- "(INT64(3), INT64(4))",
- "INT64(1)"
- ],
- "Vindex": "multicolIdx"
- },
- "TablesUsed": [
- "user.multicol_tbl"
- ]
-}
-
-# multi column vindex with both IN predicate and equality predicate
-"select * from multicol_tbl where cola in (1,10) and cola = 4 and colb in (5,6) and colb = 7"
-{
- "QueryType": "SELECT",
- "Original": "select * from multicol_tbl where cola in (1,10) and cola = 4 and colb in (5,6) and colb = 7",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from multicol_tbl where 1 != 1",
- "Query": "select * from multicol_tbl where cola in (1, 10) and cola = 4 and colb in (5, 6) and colb = 7",
- "Table": "multicol_tbl"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from multicol_tbl where cola in (1,10) and cola = 4 and colb in (5,6) and colb = 7",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from multicol_tbl where 1 != 1",
- "Query": "select * from multicol_tbl where cola in (1, 10) and cola = 4 and colb in (5, 6) and colb = 7",
- "Table": "multicol_tbl",
- "Values": [
- "INT64(4)",
- "INT64(7)"
- ],
- "Vindex": "multicolIdx"
- },
- "TablesUsed": [
- "user.multicol_tbl"
- ]
-}
-
-# multi column vindex with one column with equal followed by IN predicate, ordering matters for now
-"select * from multicol_tbl where colb = 4 and colb in (1,10) and cola in (5,6)"
-{
- "QueryType": "SELECT",
- "Original": "select * from multicol_tbl where colb = 4 and colb in (1,10) and cola in (5,6)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from multicol_tbl where 1 != 1",
- "Query": "select * from multicol_tbl where colb = 4 and colb in (1, 10) and cola in (5, 6)",
- "Table": "multicol_tbl"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from multicol_tbl where colb = 4 and colb in (1,10) and cola in (5,6)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from multicol_tbl where 1 != 1",
- "Query": "select * from multicol_tbl where colb = 4 and colb in ::__vals1 and cola in ::__vals0",
- "Table": "multicol_tbl",
- "Values": [
- "(INT64(5), INT64(6))",
- "(INT64(1), INT64(10))"
- ],
- "Vindex": "multicolIdx"
- },
- "TablesUsed": [
- "user.multicol_tbl"
- ]
-}
-
-# multi column vindex with one column with IN followed by equal predicate, ordering matters for now
-"select * from multicol_tbl where colb in (1,10) and colb = 4 and cola in (5,6)"
-{
- "QueryType": "SELECT",
- "Original": "select * from multicol_tbl where colb in (1,10) and colb = 4 and cola in (5,6)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from multicol_tbl where 1 != 1",
- "Query": "select * from multicol_tbl where colb in (1, 10) and colb = 4 and cola in (5, 6)",
- "Table": "multicol_tbl"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from multicol_tbl where colb in (1,10) and colb = 4 and cola in (5,6)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from multicol_tbl where 1 != 1",
- "Query": "select * from multicol_tbl where colb in (1, 10) and colb = 4 and cola in ::__vals0",
- "Table": "multicol_tbl",
- "Values": [
- "(INT64(5), INT64(6))",
- "INT64(4)"
- ],
- "Vindex": "multicolIdx"
- },
- "TablesUsed": [
- "user.multicol_tbl"
- ]
-}
-
-# multi column vindex with better plan selection
-"select * from multicol_tbl where colb in (1,2) and cola IN (3,4) and cola = 5 and colb = 6"
-{
- "QueryType": "SELECT",
- "Original": "select * from multicol_tbl where colb in (1,2) and cola IN (3,4) and cola = 5 and colb = 6",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from multicol_tbl where 1 != 1",
- "Query": "select * from multicol_tbl where colb in (1, 2) and cola in (3, 4) and cola = 5 and colb = 6",
- "Table": "multicol_tbl"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from multicol_tbl where colb in (1,2) and cola IN (3,4) and cola = 5 and colb = 6",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from multicol_tbl where 1 != 1",
- "Query": "select * from multicol_tbl where colb in (1, 2) and cola in (3, 4) and cola = 5 and colb = 6",
- "Table": "multicol_tbl",
- "Values": [
- "INT64(5)",
- "INT64(6)"
- ],
- "Vindex": "multicolIdx"
- },
- "TablesUsed": [
- "user.multicol_tbl"
- ]
-}
-
-# multi column vindex as tuple
-"select * from multicol_tbl where (cola,colb) in ((1,2),(3,4))"
-{
- "QueryType": "SELECT",
- "Original": "select * from multicol_tbl where (cola,colb) in ((1,2),(3,4))",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from multicol_tbl where 1 != 1",
- "Query": "select * from multicol_tbl where (cola, colb) in ((1, 2), (3, 4))",
- "Table": "multicol_tbl"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from multicol_tbl where (cola,colb) in ((1,2),(3,4))",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "MultiEqual",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from multicol_tbl where 1 != 1",
- "Query": "select * from multicol_tbl where (cola, colb) in ((1, 2), (3, 4))",
- "Table": "multicol_tbl",
- "Values": [
- "(INT64(1), INT64(3))",
- "(INT64(2), INT64(4))"
- ],
- "Vindex": "multicolIdx"
- },
- "TablesUsed": [
- "user.multicol_tbl"
- ]
-}
-
-# multi column vindex, partial vindex with SelectEqual
-"select * from multicol_tbl where cola = 1"
-{
- "QueryType": "SELECT",
- "Original": "select * from multicol_tbl where cola = 1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from multicol_tbl where 1 != 1",
- "Query": "select * from multicol_tbl where cola = 1",
- "Table": "multicol_tbl"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from multicol_tbl where cola = 1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "SubShard",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from multicol_tbl where 1 != 1",
- "Query": "select * from multicol_tbl where cola = 1",
- "Table": "multicol_tbl",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "multicolIdx"
- },
- "TablesUsed": [
- "user.multicol_tbl"
- ]
-}
-
-# multi column vindex, partial vindex with SelectEqual over full vindex with SelectIN
-"select * from multicol_tbl where cola = 1 and colb in (2,3)"
-{
- "QueryType": "SELECT",
- "Original": "select * from multicol_tbl where cola = 1 and colb in (2,3)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from multicol_tbl where 1 != 1",
- "Query": "select * from multicol_tbl where cola = 1 and colb in (2, 3)",
- "Table": "multicol_tbl"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from multicol_tbl where cola = 1 and colb in (2,3)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from multicol_tbl where 1 != 1",
- "Query": "select * from multicol_tbl where cola = 1 and colb in ::__vals1",
- "Table": "multicol_tbl",
- "Values": [
- "INT64(1)",
- "(INT64(2), INT64(3))"
- ],
- "Vindex": "multicolIdx"
- },
- "TablesUsed": [
- "user.multicol_tbl"
- ]
-}
-
-# left join with where clause - should be handled by gen4 but still isn't
-"select 0 from unsharded_a left join unsharded_b on unsharded_a.col = unsharded_b.col where coalesce(unsharded_b.col, 4) = 5"
-{
- "QueryType": "SELECT",
- "Original": "select 0 from unsharded_a left join unsharded_b on unsharded_a.col = unsharded_b.col where coalesce(unsharded_b.col, 4) = 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 0 from unsharded_a left join unsharded_b on unsharded_a.col = unsharded_b.col where 1 != 1",
- "Query": "select 0 from unsharded_a left join unsharded_b on unsharded_a.col = unsharded_b.col where coalesce(unsharded_b.col, 4) = 5",
- "Table": "unsharded_a, unsharded_b"
- },
- "TablesUsed": [
- "main.unsharded_a",
- "main.unsharded_b"
- ]
-}
-Gen4 plan same as above
-
-# filter on outer join should not be used for routing
-"select user.col from user_extra left outer join user on user_extra.user_id = user.id WHERE user.id IS NULL"
-{
- "QueryType": "SELECT",
- "Original": "select user.col from user_extra left outer join user on user_extra.user_id = user.id WHERE user.id IS NULL",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from user_extra left join `user` on user_extra.user_id = `user`.id where 1 != 1",
- "Query": "select `user`.col from user_extra left join `user` on user_extra.user_id = `user`.id where `user`.id is null",
- "Table": "`user`, user_extra"
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-Gen4 plan same as above
-
-# conditions following a null safe comparison operator can be used for routing
-"SELECT music.id FROM music LEFT OUTER JOIN user ON music.user_id = user.id WHERE user.id <=> NULL AND music.user_id = 10"
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music LEFT OUTER JOIN user ON music.user_id = user.id WHERE user.id \u003c=\u003e NULL AND music.user_id = 10",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music left join `user` on music.user_id = `user`.id where 1 != 1",
- "Query": "select music.id from music left join `user` on music.user_id = `user`.id where music.user_id = 10 and `user`.id \u003c=\u003e null",
- "Table": "`user`, music",
- "Values": [
- "INT64(10)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.music",
- "user.user"
- ]
-}
-Gen4 plan same as above
-
-# For left joins, where conditions using both sides of the join are not pulled into the join conditions
-"SELECT music.id FROM music LEFT OUTER JOIN user ON music.user_id = user.id WHERE (user.name = 'Trent Reznor' OR music.genre = 'pop') AND music.user_id = 5"
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music LEFT OUTER JOIN user ON music.user_id = user.id WHERE (user.name = 'Trent Reznor' OR music.genre = 'pop') AND music.user_id = 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music left join `user` on music.user_id = `user`.id where 1 != 1",
- "Query": "select music.id from music left join `user` on music.user_id = `user`.id where music.user_id = 5 and (`user`.`name` = 'Trent Reznor' or music.genre = 'pop')",
- "Table": "`user`, music",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.music",
- "user.user"
- ]
-}
-Gen4 plan same as above
-
-# For left joins, where conditions using both sides of the join are not pulled into the join conditions (swapped order)
-"SELECT music.id FROM music LEFT OUTER JOIN user ON music.user_id = user.id WHERE music.user_id = 5 AND (user.name = 'Trent Reznor' OR music.genre = 'pop')"
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music LEFT OUTER JOIN user ON music.user_id = user.id WHERE music.user_id = 5 AND (user.name = 'Trent Reznor' OR music.genre = 'pop')",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music left join `user` on music.user_id = `user`.id where 1 != 1",
- "Query": "select music.id from music left join `user` on music.user_id = `user`.id where music.user_id = 5 and (`user`.`name` = 'Trent Reznor' or music.genre = 'pop')",
- "Table": "`user`, music",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.music",
- "user.user"
- ]
-}
-Gen4 plan same as above
-
-# For left joins, null intolerant where conditions using both sides of the join are transformed to inner joins
-"SELECT music.id FROM music LEFT OUTER JOIN user ON music.user_id = user.id WHERE music.user_id = 5 AND music.componist = user.name"
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music LEFT OUTER JOIN user ON music.user_id = user.id WHERE music.user_id = 5 AND music.componist = user.name",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music, `user` where 1 != 1",
- "Query": "select music.id from music, `user` where music.user_id = 5 and music.user_id = `user`.id and music.componist = `user`.`name`",
- "Table": "`user`, music",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.music",
- "user.user"
- ]
-}
-Gen4 plan same as above
-
-# For left joins, null intolerant where conditions using `IS NOT NULL` allow outer join simplification
-"SELECT music.id FROM music LEFT OUTER JOIN user ON user.id = music.user_id WHERE music.user_id = 5 AND user.id IS NOT NULL"
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music LEFT OUTER JOIN user ON user.id = music.user_id WHERE music.user_id = 5 AND user.id IS NOT NULL",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music, `user` where 1 != 1",
- "Query": "select music.id from music, `user` where music.user_id = 5 and `user`.id is not null and `user`.id = music.user_id",
- "Table": "`user`, music",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.music",
- "user.user"
- ]
-}
-Gen4 plan same as above
-
-# optimize ORs to IN route op codes #1
-"select col from user where id = 1 or id = 2"
-{
- "QueryType": "SELECT",
- "Original": "select col from user where id = 1 or id = 2",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user` where id = 1 or id = 2",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select col from user where id = 1 or id = 2",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user` where id = 1 or id = 2",
- "Table": "`user`",
- "Values": [
- "(INT64(1), INT64(2))"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# optimize ORs to IN route op codes #2
-"select col from user where id = 1 or id = 2 or id = 3"
-{
- "QueryType": "SELECT",
- "Original": "select col from user where id = 1 or id = 2 or id = 3",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user` where id = 1 or id = 2 or id = 3",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select col from user where id = 1 or id = 2 or id = 3",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user` where id = 1 or id = 2 or id = 3",
- "Table": "`user`",
- "Values": [
- "(INT64(1), INT64(2), INT64(3))"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# optimize ORs to IN route op codes #3
-"select col from user where (id = 1 or id = 2) or (id = 3 or id = 4)"
-{
- "QueryType": "SELECT",
- "Original": "select col from user where (id = 1 or id = 2) or (id = 3 or id = 4)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user` where id = 1 or id = 2 or (id = 3 or id = 4)",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select col from user where (id = 1 or id = 2) or (id = 3 or id = 4)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user` where id = 1 or id = 2 or (id = 3 or id = 4)",
- "Table": "`user`",
- "Values": [
- "(INT64(1), INT64(2), INT64(3), INT64(4))"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Don't pick a vindex for an IS NULL predicate if it's a lookup vindex
-"select id from music where id is null and user_id in (1,2)"
-{
- "QueryType": "SELECT",
- "Original": "select id from music where id is null and user_id in (1,2)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from music where 1 != 1",
- "Query": "select id from music where id is null and user_id in ::__vals",
- "Table": "music",
- "Values": [
- "(INT64(1), INT64(2))"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from music where id is null and user_id in (1,2)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from music where 1 != 1",
- "Query": "select id from music where id is null and user_id in ::__vals",
- "Table": "music",
- "Values": [
- "(INT64(1), INT64(2))"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.music"
- ]
-}
diff --git a/go/vt/vtgate/planbuilder/testdata/flush_cases.json b/go/vt/vtgate/planbuilder/testdata/flush_cases.json
new file mode 100644
index 00000000000..8298c6de649
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/flush_cases.json
@@ -0,0 +1,57 @@
+[
+ {
+ "comment": "Flush statement",
+ "query": "flush tables unsharded, music",
+ "plan": {
+ "QueryType": "FLUSH",
+ "Original": "flush tables unsharded, music",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AllShards()",
+ "Query": "flush tables unsharded, music"
+ },
+ "TablesUsed": [
+ "main.music",
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "Flush statement with no tables",
+ "query": "flush local tables with read lock",
+ "plan": {
+ "QueryType": "FLUSH",
+ "Original": "flush local tables with read lock",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AllShards()",
+ "Query": "flush local tables with read lock"
+ }
+ }
+ },
+ {
+ "comment": "Flush statement with flush options",
+ "query": "flush no_write_to_binlog hosts, logs",
+ "plan": {
+ "QueryType": "FLUSH",
+ "Original": "flush no_write_to_binlog hosts, logs",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AllShards()",
+ "Query": "flush local hosts, logs"
+ }
+ }
+ }
+]
diff --git a/go/vt/vtgate/planbuilder/testdata/flush_cases.txt b/go/vt/vtgate/planbuilder/testdata/flush_cases.txt
deleted file mode 100644
index c94e4316c2e..00000000000
--- a/go/vt/vtgate/planbuilder/testdata/flush_cases.txt
+++ /dev/null
@@ -1,54 +0,0 @@
-# Flush statement
-"flush tables unsharded, music"
-{
- "QueryType": "FLUSH",
- "Original": "flush tables unsharded, music",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AllShards()",
- "Query": "flush tables unsharded, music"
- },
- "TablesUsed": [
- "main.music",
- "main.unsharded"
- ]
-}
-Gen4 plan same as above
-
-# Flush statement with no tables
-"flush local tables with read lock"
-{
- "QueryType": "FLUSH",
- "Original": "flush local tables with read lock",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AllShards()",
- "Query": "flush local tables with read lock"
- }
-}
-Gen4 plan same as above
-
-# Flush statement with flush options
-"flush no_write_to_binlog hosts, logs"
-{
- "QueryType": "FLUSH",
- "Original": "flush no_write_to_binlog hosts, logs",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AllShards()",
- "Query": "flush local hosts, logs"
- }
-}
-Gen4 plan same as above
diff --git a/go/vt/vtgate/planbuilder/testdata/flush_cases_no_default_keyspace.json b/go/vt/vtgate/planbuilder/testdata/flush_cases_no_default_keyspace.json
new file mode 100644
index 00000000000..871c30c2ea6
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/flush_cases_no_default_keyspace.json
@@ -0,0 +1,149 @@
+[
+ {
+ "comment": "Flush statement",
+ "query": "flush local tables user, unsharded_a, user_extra with read lock",
+ "plan": {
+ "QueryType": "FLUSH",
+ "Original": "flush local tables user, unsharded_a, user_extra with read lock",
+ "Instructions": {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AllShards()",
+ "Query": "flush local tables unsharded_a with read lock"
+ },
+ {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetDestination": "AllShards()",
+ "Query": "flush local tables `user`, user_extra with read lock"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded_a",
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "Flush statement with flush options",
+ "query": "flush no_write_to_binlog hosts, logs",
+ "plan": "VT03007: keyspace not specified"
+ },
+ {
+ "comment": "Flush statement with routing rules",
+ "query": "flush local tables route1, route2",
+ "plan": {
+ "QueryType": "FLUSH",
+ "Original": "flush local tables route1, route2",
+ "Instructions": {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AllShards()",
+ "Query": "flush local tables unsharded"
+ },
+ {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetDestination": "AllShards()",
+ "Query": "flush local tables `user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Incorrect tables in flush",
+ "query": "flush tables user.a with read lock",
+ "plan": "table a not found"
+ },
+ {
+ "comment": "Unknown tables in unsharded keyspaces are allowed",
+ "query": "flush tables main.a with read lock",
+ "plan": {
+ "QueryType": "FLUSH",
+ "Original": "flush tables main.a with read lock",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AllShards()",
+ "Query": "flush tables a with read lock"
+ },
+ "TablesUsed": [
+ "main.a"
+ ]
+ }
+ },
+ {
+ "comment": "Flush statement with 3 keyspaces",
+ "query": "flush local tables user, unsharded_a, user_extra, unsharded_tab with read lock",
+ "plan": {
+ "QueryType": "FLUSH",
+ "Original": "flush local tables user, unsharded_a, user_extra, unsharded_tab with read lock",
+ "Instructions": {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AllShards()",
+ "Query": "flush local tables unsharded_a with read lock"
+ },
+ {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main_2",
+ "Sharded": false
+ },
+ "TargetDestination": "AllShards()",
+ "Query": "flush local tables unsharded_tab with read lock"
+ },
+ {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetDestination": "AllShards()",
+ "Query": "flush local tables `user`, user_extra with read lock"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded_a",
+ "main_2.unsharded_tab",
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ }
+]
diff --git a/go/vt/vtgate/planbuilder/testdata/flush_cases_no_default_keyspace.txt b/go/vt/vtgate/planbuilder/testdata/flush_cases_no_default_keyspace.txt
deleted file mode 100644
index 3bc63561179..00000000000
--- a/go/vt/vtgate/planbuilder/testdata/flush_cases_no_default_keyspace.txt
+++ /dev/null
@@ -1,146 +0,0 @@
-# Flush statement
-"flush local tables user, unsharded_a, user_extra with read lock"
-{
- "QueryType": "FLUSH",
- "Original": "flush local tables user, unsharded_a, user_extra with read lock",
- "Instructions": {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AllShards()",
- "Query": "flush local tables unsharded_a with read lock"
- },
- {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetDestination": "AllShards()",
- "Query": "flush local tables `user`, user_extra with read lock"
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded_a",
- "user.user",
- "user.user_extra"
- ]
-}
-Gen4 plan same as above
-
-# Flush statement with flush options
-"flush no_write_to_binlog hosts, logs"
-"keyspace not specified"
-Gen4 plan same as above
-
-# Flush statement with routing rules
-"flush local tables route1, route2"
-{
- "QueryType": "FLUSH",
- "Original": "flush local tables route1, route2",
- "Instructions": {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AllShards()",
- "Query": "flush local tables unsharded"
- },
- {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetDestination": "AllShards()",
- "Query": "flush local tables `user`"
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded",
- "user.user"
- ]
-}
-Gen4 plan same as above
-
-# Incorrect tables in flush
-"flush tables user.a with read lock"
-"table a not found"
-Gen4 plan same as above
-
-# Unknown tables in unsharded keyspaces are allowed
-"flush tables main.a with read lock"
-{
- "QueryType": "FLUSH",
- "Original": "flush tables main.a with read lock",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AllShards()",
- "Query": "flush tables a with read lock"
- },
- "TablesUsed": [
- "main.a"
- ]
-}
-Gen4 plan same as above
-
-# Flush statement with 3 keyspaces
-"flush local tables user, unsharded_a, user_extra, unsharded_tab with read lock"
-{
- "QueryType": "FLUSH",
- "Original": "flush local tables user, unsharded_a, user_extra, unsharded_tab with read lock",
- "Instructions": {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AllShards()",
- "Query": "flush local tables unsharded_a with read lock"
- },
- {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main_2",
- "Sharded": false
- },
- "TargetDestination": "AllShards()",
- "Query": "flush local tables unsharded_tab with read lock"
- },
- {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetDestination": "AllShards()",
- "Query": "flush local tables `user`, user_extra with read lock"
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded_a",
- "main_2.unsharded_tab",
- "user.user",
- "user.user_extra"
- ]
-}
-Gen4 plan same as above
diff --git a/go/vt/vtgate/planbuilder/testdata/from_cases.json b/go/vt/vtgate/planbuilder/testdata/from_cases.json
new file mode 100644
index 00000000000..5687c950bdc
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/from_cases.json
@@ -0,0 +1,6503 @@
+[
+ {
+ "comment": "Single table sharded scatter",
+ "query": "select col from user",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from user",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user`",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from user",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user`",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Single table unsharded",
+ "query": "select col from unsharded",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from unsharded",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select col from unsharded where 1 != 1",
+ "Query": "select col from unsharded",
+ "Table": "unsharded"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from unsharded",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select col from unsharded where 1 != 1",
+ "Query": "select col from unsharded",
+ "Table": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "Select from sequence",
+ "query": "select next 2 values from seq",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select next 2 values from seq",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Next",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select next 2 values from seq where 1 != 1",
+ "Query": "select next 2 values from seq",
+ "Table": "seq"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select next 2 values from seq",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Next",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select next 2 values from seq where 1 != 1",
+ "Query": "select next 2 values from seq",
+ "Table": "seq"
+ },
+ "TablesUsed": [
+ "main.seq"
+ ]
+ }
+ },
+ {
+ "comment": "select next from non-sequence table",
+ "query": "select next value from user",
+ "v3-plan": "VT03018: NEXT used on a non-sequence table",
+ "gen4-plan": "NEXT used on a non-sequence table"
+ },
+ {
+ "comment": "select next in derived table",
+ "query": "select 1 from (select next value from seq) t",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 from (select next value from seq) t",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Next",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 from (select next 1 values from seq where 1 != 1) as t where 1 != 1",
+ "Query": "select 1 from (select next 1 values from seq) as t",
+ "Table": "seq"
+ }
+ },
+ "gen4-plan": "Incorrect usage/placement of 'NEXT'"
+ },
+ {
+ "comment": "select next in derived table",
+ "query": "select * from (select next value from seq) t",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from (select next value from seq) t",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Next",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from (select next 1 values from seq where 1 != 1) as t where 1 != 1",
+ "Query": "select * from (select next 1 values from seq) as t",
+ "Table": "seq"
+ }
+ },
+ "gen4-plan": "Incorrect usage/placement of 'NEXT'"
+ },
+ {
+ "comment": "select next in subquery",
+ "query": "select 1 from user where id in (select next value from seq)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 from user where id in (select next value from seq)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Next",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select next 1 values from seq where 1 != 1",
+ "Query": "select next 1 values from seq",
+ "Table": "seq"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` where 1 != 1",
+ "Query": "select 1 from `user` where :__sq_has_values1 = 1 and id in ::__vals",
+ "Table": "`user`",
+ "Values": [
+ ":__sq1"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ },
+ "gen4-plan": "Incorrect usage/placement of 'NEXT'"
+ },
+ {
+ "comment": "select next in projection",
+ "query": "select (select next value from seq) from user",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select (select next value from seq) from user",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutValue",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Next",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select next 1 values from seq where 1 != 1",
+ "Query": "select next 1 values from seq",
+ "Table": "seq"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select :__sq1 from `user` where 1 != 1",
+ "Query": "select :__sq1 from `user`",
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": "Incorrect usage/placement of 'NEXT'"
+ },
+ {
+ "comment": "Select from reference",
+ "query": "select * from ref",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from ref",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from ref where 1 != 1",
+ "Query": "select * from ref",
+ "Table": "ref"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from ref",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from ref where 1 != 1",
+ "Query": "select * from ref",
+ "Table": "ref"
+ },
+ "TablesUsed": [
+ "user.ref"
+ ]
+ }
+ },
+ {
+ "comment": "Multi-table unsharded",
+ "query": "select m1.col from unsharded as m1 join unsharded as m2",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select m1.col from unsharded as m1 join unsharded as m2",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select m1.col from unsharded as m1 join unsharded as m2 where 1 != 1",
+ "Query": "select m1.col from unsharded as m1 join unsharded as m2",
+ "Table": "unsharded"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select m1.col from unsharded as m1 join unsharded as m2",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select m1.col from unsharded as m1 join unsharded as m2 where 1 != 1",
+ "Query": "select m1.col from unsharded as m1 join unsharded as m2",
+ "Table": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "Multi-table, multi-chunk",
+ "query": "select music.col from user join music",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select music.col from user join music",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "TableName": "`user`_music",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` where 1 != 1",
+ "Query": "select 1 from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.col from music where 1 != 1",
+ "Query": "select music.col from music",
+ "Table": "music"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select music.col from user join music",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "TableName": "`user`_music",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` where 1 != 1",
+ "Query": "select 1 from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.col from music where 1 != 1",
+ "Query": "select music.col from music",
+ "Table": "music"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.music",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "routing rules where table name matches, and there's no alias.",
+ "query": "select * from second_user.user",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from second_user.user",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select * from `user`",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from second_user.user",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select * from `user`",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "routing rules where table name matches, and there's an alias.",
+ "query": "select * from second_user.user as a",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from second_user.user as a",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` as a where 1 != 1",
+ "Query": "select * from `user` as a",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from second_user.user as a",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` as a where 1 != 1",
+ "Query": "select * from `user` as a",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "routing rules where table name does not match, and there's no alias.",
+ "query": "select * from route1",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from route1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` as route1 where 1 != 1",
+ "Query": "select * from `user` as route1",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from route1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` as route1 where 1 != 1",
+ "Query": "select * from `user` as route1",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "routing rules where table name does not match, and there's an alias.",
+ "query": "select * from route1 as a",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from route1 as a",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` as a where 1 != 1",
+ "Query": "select * from `user` as a",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from route1 as a",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` as a where 1 != 1",
+ "Query": "select * from `user` as a",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "routing rules with primary targeting",
+ "query": "select * from primary_redirect",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from primary_redirect",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` as primary_redirect where 1 != 1",
+ "Query": "select * from `user` as primary_redirect",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from primary_redirect",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` as primary_redirect where 1 != 1",
+ "Query": "select * from `user` as primary_redirect",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "routing rules bad table",
+ "query": "select * from bad_table",
+ "plan": "VT05003: unknown database 'noks' in vschema"
+ },
+ {
+ "comment": "routing rules disabled table",
+ "query": "select * from disabled",
+ "plan": "table disabled has been disabled"
+ },
+ {
+ "comment": "select second_user.foo.col from second_user.foo join user on second_user.foo.id = user.id where second_user.foo.col = 42",
+ "query": "select second_user.foo.col from second_user.foo join user on second_user.foo.id = user.id where second_user.foo.col = 42",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select second_user.foo.col from second_user.foo join user on second_user.foo.id = user.id where second_user.foo.col = 42",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select foo.col from `user` as foo join `user` on foo.id = `user`.id where 1 != 1",
+ "Query": "select foo.col from `user` as foo join `user` on foo.id = `user`.id where foo.col = 42",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select second_user.foo.col from second_user.foo join user on second_user.foo.id = user.id where second_user.foo.col = 42",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select foo.col from `user` as foo, `user` where 1 != 1",
+ "Query": "select foo.col from `user` as foo, `user` where foo.col = 42 and foo.id = `user`.id",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "select user.music.foo from user.music join user on user.music.id = user.id where user.music.col = 42",
+ "query": "select user.music.foo from user.music join user on user.music.id = user.id where user.music.col = 42",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.music.foo from user.music join user on user.music.id = user.id where user.music.col = 42",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "JoinVars": {
+ "music_id": 1
+ },
+ "TableName": "music_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.foo, music.id from music where 1 != 1",
+ "Query": "select music.foo, music.id from music where music.col = 42",
+ "Table": "music"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` where 1 != 1",
+ "Query": "select 1 from `user` where `user`.id = :music_id",
+ "Table": "`user`",
+ "Values": [
+ ":music_id"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.music.foo from user.music join user on user.music.id = user.id where user.music.col = 42",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:1",
+ "JoinVars": {
+ "music_id": 0
+ },
+ "TableName": "music_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id, music.foo from music where 1 != 1",
+ "Query": "select music.id, music.foo from music where music.col = 42",
+ "Table": "music"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` where 1 != 1",
+ "Query": "select 1 from `user` where `user`.id = :music_id",
+ "Table": "`user`",
+ "Values": [
+ ":music_id"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.music",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "',' join",
+ "query": "select music.col from user, music",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select music.col from user, music",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "TableName": "`user`_music",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` where 1 != 1",
+ "Query": "select 1 from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.col from music where 1 != 1",
+ "Query": "select music.col from music",
+ "Table": "music"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select music.col from user, music",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "TableName": "`user`_music",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` where 1 != 1",
+ "Query": "select 1 from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.col from music where 1 != 1",
+ "Query": "select music.col from music",
+ "Table": "music"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.music",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "',' join unsharded",
+ "query": "select u1.a, u2.a from unsharded u1, unsharded u2",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u1.a, u2.a from unsharded u1, unsharded u2",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select u1.a, u2.a from unsharded as u1, unsharded as u2 where 1 != 1",
+ "Query": "select u1.a, u2.a from unsharded as u1, unsharded as u2",
+ "Table": "unsharded"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u1.a, u2.a from unsharded u1, unsharded u2",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select u1.a, u2.a from unsharded as u1, unsharded as u2 where 1 != 1",
+ "Query": "select u1.a, u2.a from unsharded as u1, unsharded as u2",
+ "Table": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "',' 3-way join unsharded",
+ "query": "select u1.a, u2.a from unsharded u1, unsharded u2, unsharded u3",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u1.a, u2.a from unsharded u1, unsharded u2, unsharded u3",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select u1.a, u2.a from unsharded as u1, unsharded as u2, unsharded as u3 where 1 != 1",
+ "Query": "select u1.a, u2.a from unsharded as u1, unsharded as u2, unsharded as u3",
+ "Table": "unsharded"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u1.a, u2.a from unsharded u1, unsharded u2, unsharded u3",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select u1.a, u2.a from unsharded as u1, unsharded as u2, unsharded as u3 where 1 != 1",
+ "Query": "select u1.a, u2.a from unsharded as u1, unsharded as u2, unsharded as u3",
+ "Table": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "Left join, single chunk",
+ "query": "select m1.col from unsharded as m1 left join unsharded as m2 on m1.a=m2.b",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "select m1.col from unsharded as m1 left join unsharded as m2 on m1.a=m2.b",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select m1.col from unsharded as m1 left join unsharded as m2 on m1.a = m2.b where 1 != 1",
+ "Query": "select m1.col from unsharded as m1 left join unsharded as m2 on m1.a = m2.b",
+ "Table": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "Left join, multi-chunk",
+ "query": "select u.col from user u left join unsharded m on u.a = m.b",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "select u.col from user u left join unsharded m on u.a = m.b",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "LeftJoin",
+ "JoinColumnIndexes": "L:1",
+ "JoinVars": {
+ "u_a": 0
+ },
+ "TableName": "`user`_unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.a, u.col from `user` as u where 1 != 1",
+ "Query": "select u.a, u.col from `user` as u",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 from unsharded as m where 1 != 1",
+ "Query": "select 1 from unsharded as m where m.b = :u_a",
+ "Table": "unsharded"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Three-way left join",
+ "query": "select user.col, m2.foo from user left join unsharded as m1 on user.col = m1.col left join unsharded as m2 on m1.col = m2.col",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col, m2.foo from user left join unsharded as m1 on user.col = m1.col left join unsharded as m2 on m1.col = m2.col",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "LeftJoin",
+ "JoinColumnIndexes": "L:1,R:0",
+ "JoinVars": {
+ "m1_col": 0
+ },
+ "TableName": "`user`_unsharded_unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "LeftJoin",
+ "JoinColumnIndexes": "R:0,L:0",
+ "JoinVars": {
+ "user_col": 0
+ },
+ "TableName": "`user`_unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` where 1 != 1",
+ "Query": "select `user`.col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select m1.col from unsharded as m1 where 1 != 1",
+ "Query": "select m1.col from unsharded as m1 where m1.col = :user_col",
+ "Table": "unsharded"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select m2.foo from unsharded as m2 where 1 != 1",
+ "Query": "select m2.foo from unsharded as m2 where m2.col = :m1_col",
+ "Table": "unsharded"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Three-way left join, right-associated",
+ "query": "select user.col from user left join user_extra as e left join unsharded as m1 on m1.col = e.col on user.col = e.col",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user left join user_extra as e left join unsharded as m1 on m1.col = e.col on user.col = e.col",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "LeftJoin",
+ "JoinColumnIndexes": "L:0",
+ "JoinVars": {
+ "user_col": 0
+ },
+ "TableName": "`user`_user_extra_unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` where 1 != 1",
+ "Query": "select `user`.col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Join",
+ "Variant": "LeftJoin",
+ "JoinVars": {
+ "e_col": 0
+ },
+ "TableName": "user_extra_unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select e.col from user_extra as e where 1 != 1",
+ "Query": "select e.col from user_extra as e where e.col = :user_col",
+ "Table": "user_extra"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 from unsharded as m1 where 1 != 1",
+ "Query": "select 1 from unsharded as m1 where m1.col = :e_col",
+ "Table": "unsharded"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "Right join",
+ "query": "select m1.col from unsharded as m1 right join unsharded as m2 on m1.a=m2.b",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "select m1.col from unsharded as m1 right join unsharded as m2 on m1.a=m2.b",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select m1.col from unsharded as m1 right join unsharded as m2 on m1.a = m2.b where 1 != 1",
+ "Query": "select m1.col from unsharded as m1 right join unsharded as m2 on m1.a = m2.b",
+ "Table": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "Right join with a join LHS",
+ "query": "select m1.col from unsharded as m1 join unsharded as m2 right join unsharded as m3 on m1.a=m2.b",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "select m1.col from unsharded as m1 join unsharded as m2 right join unsharded as m3 on m1.a=m2.b",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select m1.col from unsharded as m1 join unsharded as m2 right join unsharded as m3 on m1.a = m2.b where 1 != 1",
+ "Query": "select m1.col from unsharded as m1 join unsharded as m2 right join unsharded as m3 on m1.a = m2.b",
+ "Table": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "Straight-join (Gen4 ignores the straight_join hint)",
+ "query": "select m1.col from unsharded as m1 straight_join unsharded as m2",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select m1.col from unsharded as m1 straight_join unsharded as m2",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select m1.col from unsharded as m1 straight_join unsharded as m2 where 1 != 1",
+ "Query": "select m1.col from unsharded as m1 straight_join unsharded as m2",
+ "Table": "unsharded"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select m1.col from unsharded as m1 straight_join unsharded as m2",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select m1.col from unsharded as m1 join unsharded as m2 where 1 != 1",
+ "Query": "select m1.col from unsharded as m1 join unsharded as m2",
+ "Table": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "Three-way join",
+ "query": "select user.col from user join unsharded as m1 join unsharded as m2",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user join unsharded as m1 join unsharded as m2",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "`user`_unsharded_unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "`user`_unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` where 1 != 1",
+ "Query": "select `user`.col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 from unsharded as m1 where 1 != 1",
+ "Query": "select 1 from unsharded as m1",
+ "Table": "unsharded"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 from unsharded as m2 where 1 != 1",
+ "Query": "select 1 from unsharded as m2",
+ "Table": "unsharded"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user join unsharded as m1 join unsharded as m2",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "`user`_unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` where 1 != 1",
+ "Query": "select `user`.col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 from unsharded as m1, unsharded as m2 where 1 != 1",
+ "Query": "select 1 from unsharded as m1, unsharded as m2",
+ "Table": "unsharded"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Parenthesized, single chunk",
+ "query": "select user.col from user join (unsharded as m1 join unsharded as m2)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user join (unsharded as m1 join unsharded as m2)",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "`user`_unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` where 1 != 1",
+ "Query": "select `user`.col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 from (unsharded as m1 join unsharded as m2) where 1 != 1",
+ "Query": "select 1 from (unsharded as m1 join unsharded as m2)",
+ "Table": "unsharded"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user join (unsharded as m1 join unsharded as m2)",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "`user`_unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` where 1 != 1",
+ "Query": "select `user`.col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 from unsharded as m1, unsharded as m2 where 1 != 1",
+ "Query": "select 1 from unsharded as m1, unsharded as m2",
+ "Table": "unsharded"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Parenthesized, multi-chunk",
+ "query": "select user.col from user join (user as u1 join unsharded)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user join (user as u1 join unsharded)",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "`user`_`user`_unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` where 1 != 1",
+ "Query": "select `user`.col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "TableName": "`user`_unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` as u1 where 1 != 1",
+ "Query": "select 1 from `user` as u1",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 from unsharded where 1 != 1",
+ "Query": "select 1 from unsharded",
+ "Table": "unsharded"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user join (user as u1 join unsharded)",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "TableName": "`user`_`user`_unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` as u1 where 1 != 1",
+ "Query": "select 1 from `user` as u1",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "`user`_unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` where 1 != 1",
+ "Query": "select `user`.col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 from unsharded where 1 != 1",
+ "Query": "select 1 from unsharded",
+ "Table": "unsharded"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "index hints, make sure they are not stripped.",
+ "query": "select user.col from user use index(a)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user use index(a)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` use index (a) where 1 != 1",
+ "Query": "select `user`.col from `user` use index (a)",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user use index(a)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` use index (a) where 1 != 1",
+ "Query": "select `user`.col from `user` use index (a)",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "multiple index hints, make sure they are not stripped.",
+ "query": "select user.col from user use index(a) use index for group by (b)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user use index(a) use index for group by (b)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` use index (a) use index for group by (b) where 1 != 1",
+ "Query": "select `user`.col from `user` use index (a) use index for group by (b)",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user use index(a) use index for group by (b)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` use index (a) use index for group by (b) where 1 != 1",
+ "Query": "select `user`.col from `user` use index (a) use index for group by (b)",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "mergeable sharded join on unique vindex",
+ "query": "select user.col from user join user_extra on user.id = user_extra.user_id",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user join user_extra on user.id = user_extra.user_id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` join user_extra on `user`.id = user_extra.user_id where 1 != 1",
+ "Query": "select `user`.col from `user` join user_extra on `user`.id = user_extra.user_id",
+ "Table": "`user`, user_extra"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user join user_extra on user.id = user_extra.user_id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user`, user_extra where 1 != 1",
+ "Query": "select `user`.col from `user`, user_extra where `user`.id = user_extra.user_id",
+ "Table": "`user`, user_extra"
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "mergeable sharded join on unique vindex (parenthesized ON clause)",
+ "query": "select user.col from user join user_extra on (user.id = user_extra.user_id)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user join user_extra on (user.id = user_extra.user_id)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` join user_extra on `user`.id = user_extra.user_id where 1 != 1",
+ "Query": "select `user`.col from `user` join user_extra on `user`.id = user_extra.user_id",
+ "Table": "`user`, user_extra"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user join user_extra on (user.id = user_extra.user_id)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user`, user_extra where 1 != 1",
+ "Query": "select `user`.col from `user`, user_extra where `user`.id = user_extra.user_id",
+ "Table": "`user`, user_extra"
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "mergeable sharded join on unique vindex, with a stray condition",
+ "query": "select user.col from user join user_extra on user.col between 1 and 2 and user.id = user_extra.user_id",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user join user_extra on user.col between 1 and 2 and user.id = user_extra.user_id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` join user_extra on `user`.col between 1 and 2 and `user`.id = user_extra.user_id where 1 != 1",
+ "Query": "select `user`.col from `user` join user_extra on `user`.col between 1 and 2 and `user`.id = user_extra.user_id",
+ "Table": "`user`, user_extra"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user join user_extra on user.col between 1 and 2 and user.id = user_extra.user_id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user`, user_extra where 1 != 1",
+ "Query": "select `user`.col from `user`, user_extra where `user`.col between 1 and 2 and `user`.id = user_extra.user_id",
+ "Table": "`user`, user_extra"
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "mergeable sharded join on unique vindex, swapped operands",
+ "query": "select user.col from user join user_extra on user_extra.user_id = user.id",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user join user_extra on user_extra.user_id = user.id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` join user_extra on user_extra.user_id = `user`.id where 1 != 1",
+ "Query": "select `user`.col from `user` join user_extra on user_extra.user_id = `user`.id",
+ "Table": "`user`, user_extra"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user join user_extra on user_extra.user_id = user.id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user`, user_extra where 1 != 1",
+ "Query": "select `user`.col from `user`, user_extra where user_extra.user_id = `user`.id",
+ "Table": "`user`, user_extra"
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "mergeable sharded join on unique vindex, and condition",
+ "query": "select user.col from user join user_extra on user.id = 5 and user.id = user_extra.user_id",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user join user_extra on user.id = 5 and user.id = user_extra.user_id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` join user_extra on `user`.id = 5 and `user`.id = user_extra.user_id where 1 != 1",
+ "Query": "select `user`.col from `user` join user_extra on `user`.id = 5 and `user`.id = user_extra.user_id",
+ "Table": "`user`, user_extra",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user join user_extra on user.id = 5 and user.id = user_extra.user_id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user`, user_extra where 1 != 1",
+ "Query": "select `user`.col from `user`, user_extra where `user`.id = 5 and `user`.id = user_extra.user_id",
+ "Table": "`user`, user_extra",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "sharded join on unique vindex, inequality",
+ "query": "select user.col from user join user_extra on user.id < user_extra.user_id",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user join user_extra on user.id < user_extra.user_id",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "JoinVars": {
+ "user_id": 1
+ },
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col, `user`.id from `user` where 1 != 1",
+ "Query": "select `user`.col, `user`.id from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra where :user_id < user_extra.user_id",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user join user_extra on user.id < user_extra.user_id",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:1",
+ "JoinVars": {
+ "user_id": 0
+ },
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id, `user`.col from `user` where 1 != 1",
+ "Query": "select `user`.id, `user`.col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra where :user_id < user_extra.user_id",
+ "Table": "user_extra"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "sharded join, non-col reference RHS",
+ "query": "select user.col from user join user_extra on user.id = 5",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user join user_extra on user.id = 5",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` where 1 != 1",
+ "Query": "select `user`.col from `user` where `user`.id = 5",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user join user_extra on user.id = 5",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` where 1 != 1",
+ "Query": "select `user`.col from `user` where `user`.id = 5",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "sharded join, non-col reference LHS",
+ "query": "select user.col from user join user_extra on 5 = user.id",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user join user_extra on 5 = user.id",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` where 1 != 1",
+ "Query": "select `user`.col from `user` where `user`.id = 5",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user join user_extra on 5 = user.id",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` where 1 != 1",
+ "Query": "select `user`.col from `user` where `user`.id = 5",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "sharded join, non-vindex col",
+ "query": "select user.col from user join user_extra on user.id = user_extra.col",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user join user_extra on user.id = user_extra.col",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "JoinVars": {
+ "user_id": 1
+ },
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col, `user`.id from `user` where 1 != 1",
+ "Query": "select `user`.col, `user`.id from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra where user_extra.col = :user_id",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user join user_extra on user.id = user_extra.col",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "JoinVars": {
+ "user_extra_col": 0
+ },
+ "TableName": "user_extra_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.col from user_extra where 1 != 1",
+ "Query": "select user_extra.col from user_extra",
+ "Table": "user_extra"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` where 1 != 1",
+ "Query": "select `user`.col from `user` where `user`.id = :user_extra_col",
+ "Table": "`user`",
+ "Values": [
+ ":user_extra_col"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "sharded join, non-unique vindex",
+ "query": "select user.col from user_extra join user on user_extra.user_id = user.name",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user_extra join user on user_extra.user_id = user.name",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "JoinVars": {
+ "user_extra_user_id": 0
+ },
+ "TableName": "user_extra_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.user_id from user_extra where 1 != 1",
+ "Query": "select user_extra.user_id from user_extra",
+ "Table": "user_extra"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` where 1 != 1",
+ "Query": "select `user`.col from `user` where `user`.`name` = :user_extra_user_id",
+ "Table": "`user`",
+ "Values": [
+ ":user_extra_user_id"
+ ],
+ "Vindex": "name_user_map"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user_extra join user on user_extra.user_id = user.name",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:1",
+ "JoinVars": {
+ "user_name": 0
+ },
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.`name`, `user`.col from `user` where 1 != 1",
+ "Query": "select `user`.`name`, `user`.col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra where user_extra.user_id = :user_name",
+ "Table": "user_extra",
+ "Values": [
+ ":user_name"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "join with reference table",
+ "query": "select user.col from user join ref",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user join ref",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` join ref where 1 != 1",
+ "Query": "select `user`.col from `user` join ref",
+ "Table": "`user`, ref"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user join ref",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user`, ref where 1 != 1",
+ "Query": "select `user`.col from `user`, ref",
+ "Table": "`user`, ref"
+ },
+ "TablesUsed": [
+ "user.ref",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "reference table self-join",
+ "query": "select r1.col from ref r1 join ref",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select r1.col from ref r1 join ref",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select r1.col from ref as r1 join ref where 1 != 1",
+ "Query": "select r1.col from ref as r1 join ref",
+ "Table": "ref"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select r1.col from ref r1 join ref",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select r1.col from ref as r1, ref where 1 != 1",
+ "Query": "select r1.col from ref as r1, ref",
+ "Table": "ref"
+ },
+ "TablesUsed": [
+ "user.ref"
+ ]
+ }
+ },
+ {
+ "comment": "reference table can merge with other opcodes left to right.",
+ "query": "select ref.col from ref join user",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select ref.col from ref join user",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select ref.col from ref join `user` where 1 != 1",
+ "Query": "select ref.col from ref join `user`",
+ "Table": "`user`, ref"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select ref.col from ref join user",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select ref.col from ref, `user` where 1 != 1",
+ "Query": "select ref.col from ref, `user`",
+ "Table": "`user`, ref"
+ },
+ "TablesUsed": [
+ "user.ref",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "reference table can merge with other opcodes left to right and vindex value is in the plan.\n# This tests that route.Merge also copies the condition to the LHS.",
+ "query": "select ref.col from ref join (select aa from user where user.id=1) user",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select ref.col from ref join (select aa from user where user.id=1) user",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select ref.col from ref join (select aa from `user` where 1 != 1) as `user` where 1 != 1",
+ "Query": "select ref.col from ref join (select aa from `user` where `user`.id = 1) as `user`",
+ "Table": "`user`, ref",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select ref.col from ref join (select aa from user where user.id=1) user",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select ref.col from ref, (select aa from `user` where 1 != 1) as `user` where 1 != 1",
+ "Query": "select ref.col from ref, (select aa from `user` where `user`.id = 1) as `user`",
+ "Table": "`user`, ref",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.ref",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "routing rules for join, unsharded route wins if we can't find a merged route",
+ "query": "select route2.col from route2 join user_extra",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select route2.col from route2 join user_extra",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "unsharded_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select route2.col from unsharded as route2 where 1 != 1",
+ "Query": "select route2.col from unsharded as route2",
+ "Table": "unsharded"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select route2.col from route2 join user_extra",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "unsharded_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select route2.col from unsharded as route2 where 1 != 1",
+ "Query": "select route2.col from unsharded as route2",
+ "Table": "unsharded"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "derived table",
+ "query": "select id from (select id, col from user where id = 5) as t",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from (select id, col from user where id = 5) as t",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from (select id, col from `user` where 1 != 1) as t where 1 != 1",
+ "Query": "select id from (select id, col from `user` where id = 5) as t",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from (select id, col from user where id = 5) as t",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from (select id, col from `user` where 1 != 1) as t where 1 != 1",
+ "Query": "select id from (select id, col from `user` where id = 5) as t",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "derived table with join",
+ "query": "select t.id from (select id from user where id = 5) as t join user_extra on t.id = user_extra.user_id",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select t.id from (select id from user where id = 5) as t join user_extra on t.id = user_extra.user_id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select t.id from (select id from `user` where 1 != 1) as t join user_extra on t.id = user_extra.user_id where 1 != 1",
+ "Query": "select t.id from (select id from `user` where id = 5) as t join user_extra on t.id = user_extra.user_id",
+ "Table": "`user`, user_extra",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select t.id from (select id from user where id = 5) as t join user_extra on t.id = user_extra.user_id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select t.id from (select id from `user` where 1 != 1) as t, user_extra where 1 != 1",
+ "Query": "select t.id from (select id from `user` where id = 5) as t, user_extra where t.id = user_extra.user_id",
+ "Table": "`user`, user_extra",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "derived table with join, and aliased references",
+ "query": "select t.id from (select user.id from user where user.id = 5) as t join user_extra on t.id = user_extra.user_id",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select t.id from (select user.id from user where user.id = 5) as t join user_extra on t.id = user_extra.user_id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select t.id from (select `user`.id from `user` where 1 != 1) as t join user_extra on t.id = user_extra.user_id where 1 != 1",
+ "Query": "select t.id from (select `user`.id from `user` where `user`.id = 5) as t join user_extra on t.id = user_extra.user_id",
+ "Table": "`user`, user_extra",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select t.id from (select user.id from user where user.id = 5) as t join user_extra on t.id = user_extra.user_id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select t.id from (select `user`.id from `user` where 1 != 1) as t, user_extra where 1 != 1",
+ "Query": "select t.id from (select `user`.id from `user` where `user`.id = 5) as t, user_extra where t.id = user_extra.user_id",
+ "Table": "`user`, user_extra",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "derived table with join, duplicate columns",
+ "query": "select t.id from (select user.id, id from user where user.id = 5) as t join user_extra on t.id = user_extra.user_id",
+ "v3-plan": "VT12001: unsupported: duplicate column aliases: id",
+ "gen4-plan": "Duplicate column name 'id'"
+ },
+ {
+ "comment": "derived table in RHS of join",
+ "query": "select t.id from user_extra join (select id from user where id = 5) as t on t.id = user_extra.user_id",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select t.id from user_extra join (select id from user where id = 5) as t on t.id = user_extra.user_id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select t.id from user_extra join (select id from `user` where 1 != 1) as t on t.id = user_extra.user_id where 1 != 1",
+ "Query": "select t.id from user_extra join (select id from `user` where id = 5) as t on t.id = user_extra.user_id",
+ "Table": "user_extra, `user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select t.id from user_extra join (select id from user where id = 5) as t on t.id = user_extra.user_id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select t.id from user_extra, (select id from `user` where 1 != 1) as t where 1 != 1",
+ "Query": "select t.id from user_extra, (select id from `user` where id = 5) as t where t.id = user_extra.user_id",
+ "Table": "`user`, user_extra",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "derived table in FROM with cross-shard join",
+ "query": "select t.id from (select id from user where id = 5) as t join user_extra on t.id = user_extra.col",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select t.id from (select id from user where id = 5) as t join user_extra on t.id = user_extra.col",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "JoinVars": {
+ "t_id": 0
+ },
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select t.id from (select id from `user` where 1 != 1) as t where 1 != 1",
+ "Query": "select t.id from (select id from `user` where id = 5) as t",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra where user_extra.col = :t_id",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select t.id from (select id from user where id = 5) as t join user_extra on t.id = user_extra.col",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "JoinVars": {
+ "t_id": 0
+ },
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select t.id from (select id from `user` where 1 != 1) as t where 1 != 1",
+ "Query": "select t.id from (select id from `user` where id = 5) as t",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra where user_extra.col = :t_id",
+ "Table": "user_extra"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "routing rules for derived table",
+ "query": "select id from (select id, col from route1 where id = 5) as t",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from (select id, col from route1 where id = 5) as t",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from (select id, col from `user` as route1 where 1 != 1) as t where 1 != 1",
+ "Query": "select id from (select id, col from `user` as route1 where id = 5) as t",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from (select id, col from route1 where id = 5) as t",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from (select id, col from `user` as route1 where 1 != 1) as t where 1 != 1",
+ "Query": "select id from (select id, col from `user` as route1 where id = 5) as t",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "derived table missing columns",
+ "query": "select t.id from (select id from user) as t join user_extra on t.id = user_extra.user_id where t.col = 42",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select t.id from (select id from user) as t join user_extra on t.id = user_extra.user_id where t.col = 42",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select t.id from (select id from `user` where 1 != 1) as t join user_extra on t.id = user_extra.user_id where 1 != 1",
+ "Query": "select t.id from (select id from `user`) as t join user_extra on t.id = user_extra.user_id where t.col = 42",
+ "Table": "`user`, user_extra"
+ }
+ },
+ "gen4-plan": "symbol t.col not found"
+ },
+ {
+ "comment": "routing rules for derived table where the constraint is in the outer query",
+ "query": "select id from (select id, col from route1) as t where id = 5",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from (select id, col from route1) as t where id = 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from (select id, col from `user` as route1 where 1 != 1) as t where 1 != 1",
+ "Query": "select id from (select id, col from `user` as route1) as t where id = 5",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from (select id, col from route1) as t where id = 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from (select id, col from `user` as route1 where 1 != 1) as t where 1 != 1",
+ "Query": "select id from (select id, col from `user` as route1 where id = 5) as t",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "routing rules for derived table where the constraint is in the outer query",
+ "query": "select id from (select id+col as foo from route1) as t where foo = 5",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from (select id+col as foo from route1) as t where foo = 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from (select id + col as foo from `user` as route1 where 1 != 1) as t where 1 != 1",
+ "Query": "select id from (select id + col as foo from `user` as route1) as t where foo = 5",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": "symbol id not found"
+ },
+ {
+ "comment": "push predicate on joined derived tables",
+ "query": "select t.id from (select id, textcol1 as baz from route1) as t join (select id, textcol1+textcol1 as baz from user) as s ON t.id = s.id WHERE t.baz = '3' AND s.baz = '3'",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select t.id from (select id, textcol1 as baz from route1) as t join (select id, textcol1+textcol1 as baz from user) as s ON t.id = s.id WHERE t.baz = '3' AND s.baz = '3'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select t.id from (select id, textcol1 as baz from `user` as route1 where 1 != 1) as t join (select id, textcol1 + textcol1 as baz from `user` where 1 != 1) as s on t.id = s.id where 1 != 1",
+ "Query": "select t.id from (select id, textcol1 as baz from `user` as route1) as t join (select id, textcol1 + textcol1 as baz from `user`) as s on t.id = s.id where t.baz = '3' and s.baz = '3'",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select t.id from (select id, textcol1 as baz from route1) as t join (select id, textcol1+textcol1 as baz from user) as s ON t.id = s.id WHERE t.baz = '3' AND s.baz = '3'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select t.id from (select id, textcol1 as baz from `user` as route1 where 1 != 1) as t, (select id, textcol1 + textcol1 as baz from `user` where 1 != 1) as s where 1 != 1",
+ "Query": "select t.id from (select id, textcol1 as baz from `user` as route1 where textcol1 = '3') as t, (select id, textcol1 + textcol1 as baz from `user` where textcol1 + textcol1 = '3') as s where t.id = s.id",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "recursive derived table predicate push down",
+ "query": "select bar from (select foo+4 as bar from (select colA+colB as foo from user) as u) as t where bar = 5",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select bar from (select foo+4 as bar from (select colA+colB as foo from user) as u) as t where bar = 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select bar from (select foo + 4 as bar from (select colA + colB as foo from `user` where 1 != 1) as u where 1 != 1) as t where 1 != 1",
+ "Query": "select bar from (select foo + 4 as bar from (select colA + colB as foo from `user`) as u) as t where bar = 5",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select bar from (select foo+4 as bar from (select colA+colB as foo from user) as u) as t where bar = 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select bar from (select foo + 4 as bar from (select colA + colB as foo from `user` where 1 != 1) as u where 1 != 1) as t where 1 != 1",
+ "Query": "select bar from (select foo + 4 as bar from (select colA + colB as foo from `user` where colA + colB + 4 = 5) as u) as t",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "recursive derived table lookups",
+ "query": "select id from (select id from (select id from user) as u) as t where id = 5",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from (select id from (select id from user) as u) as t where id = 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from (select id from (select id from `user` where 1 != 1) as u where 1 != 1) as t where 1 != 1",
+ "Query": "select id from (select id from (select id from `user`) as u) as t where id = 5",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from (select id from (select id from user) as u) as t where id = 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from (select id from (select id from `user` where 1 != 1) as u where 1 != 1) as t where 1 != 1",
+ "Query": "select id from (select id from (select id from `user` where id = 5) as u) as t",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "merge derived tables with single-shard routes",
+ "query": "select u.col, e.col from (select col from user where id = 5) as u join (select col from user_extra where user_id = 5) as e",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u.col, e.col from (select col from user where id = 5) as u join (select col from user_extra where user_id = 5) as e",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.col, e.col from (select col from `user` where 1 != 1) as u join (select col from user_extra where 1 != 1) as e where 1 != 1",
+ "Query": "select u.col, e.col from (select col from `user` where id = 5) as u join (select col from user_extra where user_id = 5) as e",
+ "Table": "`user`, user_extra",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u.col, e.col from (select col from user where id = 5) as u join (select col from user_extra where user_id = 5) as e",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.col, e.col from (select col from `user` where 1 != 1) as u, (select col from user_extra where 1 != 1) as e where 1 != 1",
+ "Query": "select u.col, e.col from (select col from `user` where id = 5) as u, (select col from user_extra where user_id = 5) as e",
+ "Table": "`user`, user_extra",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "join of information_schema with normal table",
+ "query": "select unsharded.foo from information_schema.CHARACTER_SETS join unsharded",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select unsharded.foo from information_schema.CHARACTER_SETS join unsharded",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "TableName": "information_schema.CHARACTER_SETS_unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 from information_schema.CHARACTER_SETS where 1 != 1",
+ "Query": "select 1 from information_schema.CHARACTER_SETS",
+ "Table": "information_schema.CHARACTER_SETS"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select unsharded.foo from unsharded where 1 != 1",
+ "Query": "select unsharded.foo from unsharded",
+ "Table": "unsharded"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select unsharded.foo from information_schema.CHARACTER_SETS join unsharded",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "TableName": "information_schema.CHARACTER_SETS_unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 from information_schema.CHARACTER_SETS where 1 != 1",
+ "Query": "select 1 from information_schema.CHARACTER_SETS",
+ "Table": "information_schema.CHARACTER_SETS"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select unsharded.foo from unsharded where 1 != 1",
+ "Query": "select unsharded.foo from unsharded",
+ "Table": "unsharded"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "join of normal table with information_schema",
+ "query": "select unsharded.foo from unsharded join information_schema.CHARACTER_SETS",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select unsharded.foo from unsharded join information_schema.CHARACTER_SETS",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "unsharded_information_schema.CHARACTER_SETS",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select unsharded.foo from unsharded where 1 != 1",
+ "Query": "select unsharded.foo from unsharded",
+ "Table": "unsharded"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 from information_schema.CHARACTER_SETS where 1 != 1",
+ "Query": "select 1 from information_schema.CHARACTER_SETS",
+ "Table": "information_schema.CHARACTER_SETS"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select unsharded.foo from unsharded join information_schema.CHARACTER_SETS",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "unsharded_information_schema.CHARACTER_SETS",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select unsharded.foo from unsharded where 1 != 1",
+ "Query": "select unsharded.foo from unsharded",
+ "Table": "unsharded"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 from information_schema.CHARACTER_SETS where 1 != 1",
+ "Query": "select 1 from information_schema.CHARACTER_SETS",
+ "Table": "information_schema.CHARACTER_SETS"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "wire-up on join with cross-shard derived table",
+ "query": "select t.col1 from (select user.id, user.col1 from user join user_extra) as t join unsharded on unsharded.col1 = t.col1 and unsharded.id = t.id",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select t.col1 from (select user.id, user.col1 from user join user_extra) as t join unsharded on unsharded.col1 = t.col1 and unsharded.id = t.id",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "JoinVars": {
+ "t_col1": 0,
+ "t_id": 1
+ },
+ "TableName": "`user`_user_extra_unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 1,
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id, `user`.col1 from `user` where 1 != 1",
+ "Query": "select `user`.id, `user`.col1 from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 from unsharded where 1 != 1",
+ "Query": "select 1 from unsharded where unsharded.col1 = :t_col1 and unsharded.id = :t_id",
+ "Table": "unsharded"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select t.col1 from (select user.id, user.col1 from user join user_extra) as t join unsharded on unsharded.col1 = t.col1 and unsharded.id = t.id",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "JoinVars": {
+ "t_col1": 0,
+ "t_id": 1
+ },
+ "TableName": "`user`_user_extra_unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 1,
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id, `user`.col1 from `user` where 1 != 1",
+ "Query": "select `user`.id, `user`.col1 from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 from unsharded where 1 != 1",
+ "Query": "select 1 from unsharded where unsharded.col1 = :t_col1 and unsharded.id = :t_id",
+ "Table": "unsharded"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "wire-up on within cross-shard derived table",
+ "query": "select t.id from (select user.id, user.col1 from user join user_extra on user_extra.col = user.col) as t",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select t.id from (select user.id, user.col1 from user join user_extra on user_extra.col = user.col) as t",
+ "Instructions": {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1",
+ "JoinVars": {
+ "user_col": 2
+ },
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id, `user`.col1, `user`.col from `user` where 1 != 1",
+ "Query": "select `user`.id, `user`.col1, `user`.col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra where user_extra.col = :user_col",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select t.id from (select user.id, user.col1 from user join user_extra on user_extra.col = user.col) as t",
+ "Instructions": {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:1,L:2",
+ "JoinVars": {
+ "user_col": 0
+ },
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col, `user`.id, `user`.col1 from `user` where 1 != 1",
+ "Query": "select `user`.col, `user`.id, `user`.col1 from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra where user_extra.col = :user_col",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "Join with cross-shard derived table on rhs",
+ "query": "select t.col1 from unsharded_a ua join (select user.id, user.col1 from user join user_extra) as t",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select t.col1 from unsharded_a ua join (select user.id, user.col1 from user join user_extra) as t",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "TableName": "unsharded_a_`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 from unsharded_a as ua where 1 != 1",
+ "Query": "select 1 from unsharded_a as ua",
+ "Table": "unsharded_a"
+ },
+ {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 1
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id, `user`.col1 from `user` where 1 != 1",
+ "Query": "select `user`.id, `user`.col1 from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select t.col1 from unsharded_a ua join (select user.id, user.col1 from user join user_extra) as t",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "TableName": "unsharded_a_`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 from unsharded_a as ua where 1 != 1",
+ "Query": "select 1 from unsharded_a as ua",
+ "Table": "unsharded_a"
+ },
+ {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 1
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id, `user`.col1 from `user` where 1 != 1",
+ "Query": "select `user`.id, `user`.col1 from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded_a",
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "Join with cross-shard derived table on rhs - push down join predicate to derived table",
+ "query": "select t.col1 from unsharded_a ua join (select user.id, user.col1 from user join user_extra) as t on t.id = ua.id",
+ "v3-plan": "VT12001: unsupported: filtering on results of cross-shard subquery",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select t.col1 from unsharded_a ua join (select user.id, user.col1 from user join user_extra) as t on t.id = ua.id",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "JoinVars": {
+ "ua_id": 0
+ },
+ "TableName": "unsharded_a_`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select ua.id from unsharded_a as ua where 1 != 1",
+ "Query": "select ua.id from unsharded_a as ua",
+ "Table": "unsharded_a"
+ },
+ {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 1
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id, `user`.col1 from `user` where 1 != 1",
+ "Query": "select `user`.id, `user`.col1 from `user` where `user`.id = :ua_id",
+ "Table": "`user`",
+ "Values": [
+ ":ua_id"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded_a",
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "subquery in ON clause, single route",
+ "query": "select unsharded_a.col from unsharded_a join unsharded_b on (select col from user)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select unsharded_a.col from unsharded_a join unsharded_b on (select col from user)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutValue",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select unsharded_a.col from unsharded_a join unsharded_b on :__sq1 where 1 != 1",
+ "Query": "select unsharded_a.col from unsharded_a join unsharded_b on :__sq1",
+ "Table": "unsharded_a, unsharded_b"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select unsharded_a.col from unsharded_a join unsharded_b on (select col from user)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutValue",
+ "PulloutVars": [
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select unsharded_a.col from unsharded_a, unsharded_b where 1 != 1",
+ "Query": "select unsharded_a.col from unsharded_a, unsharded_b where :__sq1",
+ "Table": "unsharded_a, unsharded_b"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded_a",
+ "main.unsharded_b",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "subquery in ON clause as sub-expression",
+ "query": "select unsharded_a.col from unsharded_a join unsharded_b on unsharded_a.col+(select col from user)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select unsharded_a.col from unsharded_a join unsharded_b on unsharded_a.col+(select col from user)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutValue",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select unsharded_a.col from unsharded_a join unsharded_b on unsharded_a.col + :__sq1 where 1 != 1",
+ "Query": "select unsharded_a.col from unsharded_a join unsharded_b on unsharded_a.col + :__sq1",
+ "Table": "unsharded_a, unsharded_b"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select unsharded_a.col from unsharded_a join unsharded_b on unsharded_a.col+(select col from user)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutValue",
+ "PulloutVars": [
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select unsharded_a.col from unsharded_a, unsharded_b where 1 != 1",
+ "Query": "select unsharded_a.col from unsharded_a, unsharded_b where unsharded_a.col + :__sq1",
+ "Table": "unsharded_a, unsharded_b"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded_a",
+ "main.unsharded_b",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "IN subquery in ON clause, single route",
+ "query": "select unsharded_a.col from unsharded_a join unsharded_b on unsharded_a.col in (select col from user)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select unsharded_a.col from unsharded_a join unsharded_b on unsharded_a.col in (select col from user)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select unsharded_a.col from unsharded_a join unsharded_b on :__sq_has_values1 = 1 and unsharded_a.col in ::__sq1 where 1 != 1",
+ "Query": "select unsharded_a.col from unsharded_a join unsharded_b on :__sq_has_values1 = 1 and unsharded_a.col in ::__sq1",
+ "Table": "unsharded_a, unsharded_b"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select unsharded_a.col from unsharded_a join unsharded_b on unsharded_a.col in (select col from user)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select unsharded_a.col from unsharded_a, unsharded_b where 1 != 1",
+ "Query": "select unsharded_a.col from unsharded_a, unsharded_b where :__sq_has_values1 = 1 and unsharded_a.col in ::__sq1",
+ "Table": "unsharded_a, unsharded_b"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded_a",
+ "main.unsharded_b",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "subquery in ON clause, with join primitives",
+ "query": "select unsharded.col from unsharded join user on user.col in (select col from user)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select unsharded.col from unsharded join user on user.col in (select col from user)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "unsharded_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select unsharded.col from unsharded where 1 != 1",
+ "Query": "select unsharded.col from unsharded",
+ "Table": "unsharded"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` where 1 != 1",
+ "Query": "select 1 from `user` where :__sq_has_values1 = 1 and `user`.col in ::__sq1",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select unsharded.col from unsharded join user on user.col in (select col from user)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "unsharded_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select unsharded.col from unsharded where 1 != 1",
+ "Query": "select unsharded.col from unsharded",
+ "Table": "unsharded"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` where 1 != 1",
+ "Query": "select 1 from `user` where :__sq_has_values1 = 1 and `user`.col in ::__sq1",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "subquery in ON clause, with left join primitives\n# The subquery is not pulled all the way out.",
+ "query": "select unsharded.col from unsharded left join user on user.col in (select col from user)",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "select unsharded.col from unsharded left join user on user.col in (select col from user)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Join",
+ "Variant": "LeftJoin",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "unsharded_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select unsharded.col from unsharded where 1 != 1",
+ "Query": "select unsharded.col from unsharded",
+ "Table": "unsharded"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` where 1 != 1",
+ "Query": "select 1 from `user` where :__sq_has_values1 = 1 and `user`.col in ::__sq1",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "subquery in ON clause, with join primitives, and join on top\n# The subquery is not pulled all the way out.",
+ "query": "select unsharded.col from unsharded join user on user.col in (select col from user) join unsharded_a",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select unsharded.col from unsharded join user on user.col in (select col from user) join unsharded_a",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "unsharded_`user`_unsharded_a",
+ "Inputs": [
+ {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "unsharded_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select unsharded.col from unsharded where 1 != 1",
+ "Query": "select unsharded.col from unsharded",
+ "Table": "unsharded"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` where 1 != 1",
+ "Query": "select 1 from `user` where :__sq_has_values1 = 1 and `user`.col in ::__sq1",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 from unsharded_a where 1 != 1",
+ "Query": "select 1 from unsharded_a",
+ "Table": "unsharded_a"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select unsharded.col from unsharded join user on user.col in (select col from user) join unsharded_a",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "TableName": "`user`_unsharded, unsharded_a",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` where 1 != 1",
+ "Query": "select 1 from `user` where :__sq_has_values1 = 1 and `user`.col in ::__sq1",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select unsharded.col from unsharded, unsharded_a where 1 != 1",
+ "Query": "select unsharded.col from unsharded, unsharded_a",
+ "Table": "unsharded, unsharded_a"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "main.unsharded_a",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "keyspace-qualified queries",
+ "query": "select user.user.col1, main.unsharded.col1 from user.user join main.unsharded where main.unsharded.col2 = user.user.col2",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.user.col1, main.unsharded.col1 from user.user join main.unsharded where main.unsharded.col2 = user.user.col2",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,R:0",
+ "JoinVars": {
+ "user_col2": 1
+ },
+ "TableName": "`user`_unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col1, `user`.col2 from `user` where 1 != 1",
+ "Query": "select `user`.col1, `user`.col2 from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select unsharded.col1 from unsharded where 1 != 1",
+ "Query": "select unsharded.col1 from unsharded where unsharded.col2 = :user_col2",
+ "Table": "unsharded"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.user.col1, main.unsharded.col1 from user.user join main.unsharded where main.unsharded.col2 = user.user.col2",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:1,R:0",
+ "JoinVars": {
+ "user_col2": 0
+ },
+ "TableName": "`user`_unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col2, `user`.col1 from `user` where 1 != 1",
+ "Query": "select `user`.col2, `user`.col1 from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select unsharded.col1 from unsharded where 1 != 1",
+ "Query": "select unsharded.col1 from unsharded where unsharded.col2 = :user_col2",
+ "Table": "unsharded"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "implicit table reference for unsharded keyspace",
+ "query": "select main.foo.col from main.foo",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select main.foo.col from main.foo",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select foo.col from foo where 1 != 1",
+ "Query": "select foo.col from foo",
+ "Table": "foo"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select main.foo.col from main.foo",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select foo.col from foo where 1 != 1",
+ "Query": "select foo.col from foo",
+ "Table": "foo"
+ },
+ "TablesUsed": [
+ "main.foo"
+ ]
+ }
+ },
+ {
+ "comment": "col refs should be case-insensitive",
+ "query": "select user.col from user join user_extra on user.ID = user_extra.User_Id",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user join user_extra on user.ID = user_extra.User_Id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` join user_extra on `user`.ID = user_extra.User_Id where 1 != 1",
+ "Query": "select `user`.col from `user` join user_extra on `user`.ID = user_extra.User_Id",
+ "Table": "`user`, user_extra"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user join user_extra on user.ID = user_extra.User_Id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user`, user_extra where 1 != 1",
+ "Query": "select `user`.col from `user`, user_extra where `user`.ID = user_extra.User_Id",
+ "Table": "`user`, user_extra"
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "derived table with join primitive (FROM)",
+ "query": "select id, t.id from (select user.id from user join user_extra) as t",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id, t.id from (select user.id from user join user_extra) as t",
+ "Instructions": {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0,
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id from `user` where 1 != 1",
+ "Query": "select `user`.id from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id, t.id from (select user.id from user join user_extra) as t",
+ "Instructions": {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0,
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id from `user` where 1 != 1",
+ "Query": "select `user`.id from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "database call in ON clause.\n# The on clause is weird because the substitution must even for root expressions.",
+ "query": "select u1.a from unsharded u1 join unsharded u2 on database()",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u1.a from unsharded u1 join unsharded u2 on database()",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select u1.a from unsharded as u1 join unsharded as u2 on database() where 1 != 1",
+ "Query": "select u1.a from unsharded as u1 join unsharded as u2 on database()",
+ "Table": "unsharded"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u1.a from unsharded u1 join unsharded u2 on database()",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select u1.a from unsharded as u1 join unsharded as u2 on database() where 1 != 1",
+ "Query": "select u1.a from unsharded as u1 join unsharded as u2 on database()",
+ "Table": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "last_insert_id for dual",
+ "query": "select last_insert_id()",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select last_insert_id()",
+ "Instructions": {
+ "OperatorType": "Projection",
+ "Expressions": [
+ ":__lastInsertId as last_insert_id()"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SingleRow"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select last_insert_id()",
+ "Instructions": {
+ "OperatorType": "Projection",
+ "Expressions": [
+ ":__lastInsertId as last_insert_id()"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SingleRow"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.dual"
+ ]
+ }
+ },
+ {
+ "comment": "last_insert_id for sharded keyspace",
+ "query": "select last_insert_id() from user",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select last_insert_id() from user",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select :__lastInsertId as `last_insert_id()` from `user` where 1 != 1",
+ "Query": "select :__lastInsertId as `last_insert_id()` from `user`",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select last_insert_id() from user",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select :__lastInsertId as `last_insert_id()` from `user` where 1 != 1",
+ "Query": "select :__lastInsertId as `last_insert_id()` from `user`",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "last_insert_id for unsharded route",
+ "query": "select last_insert_id() from main.unsharded",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select last_insert_id() from main.unsharded",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select :__lastInsertId as `last_insert_id()` from unsharded where 1 != 1",
+ "Query": "select :__lastInsertId as `last_insert_id()` from unsharded",
+ "Table": "unsharded"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select last_insert_id() from main.unsharded",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select :__lastInsertId as `last_insert_id()` from unsharded where 1 != 1",
+ "Query": "select :__lastInsertId as `last_insert_id()` from unsharded",
+ "Table": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "join with bindvariables",
+ "query": "SELECT `user`.`id` FROM `user` INNER JOIN `user_extra` ON `user`.`id` = `user_extra`.`assembly_id` WHERE `user_extra`.`user_id` = 2",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT `user`.`id` FROM `user` INNER JOIN `user_extra` ON `user`.`id` = `user_extra`.`assembly_id` WHERE `user_extra`.`user_id` = 2",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "JoinVars": {
+ "user_id": 0
+ },
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id from `user` where 1 != 1",
+ "Query": "select `user`.id from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra where user_extra.assembly_id = :user_id and user_extra.user_id = 2",
+ "Table": "user_extra",
+ "Values": [
+ "INT64(2)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT `user`.`id` FROM `user` INNER JOIN `user_extra` ON `user`.`id` = `user_extra`.`assembly_id` WHERE `user_extra`.`user_id` = 2",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "JoinVars": {
+ "user_extra_assembly_id": 0
+ },
+ "TableName": "user_extra_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.assembly_id from user_extra where 1 != 1",
+ "Query": "select user_extra.assembly_id from user_extra where user_extra.user_id = 2",
+ "Table": "user_extra",
+ "Values": [
+ "INT64(2)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id from `user` where 1 != 1",
+ "Query": "select `user`.id from `user` where `user`.id = :user_extra_assembly_id",
+ "Table": "`user`",
+ "Values": [
+ ":user_extra_assembly_id"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "verify ',' vs JOIN precedence",
+ "query": "select u1.a from unsharded u1, unsharded u2 join unsharded u3 on u1.a = u2.a",
+ "v3-plan": "VT03019: symbol u1.a not found",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u1.a from unsharded u1, unsharded u2 join unsharded u3 on u1.a = u2.a",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select u1.a from unsharded as u1, unsharded as u2 join unsharded as u3 on u1.a = u2.a where 1 != 1",
+ "Query": "select u1.a from unsharded as u1, unsharded as u2 join unsharded as u3 on u1.a = u2.a",
+ "Table": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "first expression fails for ',' join (code coverage: ensure error is returned)",
+ "query": "select user.foo.col from user.foo, user",
+ "plan": "table foo not found"
+ },
+ {
+ "comment": "table names should be case-sensitive",
+ "query": "select unsharded.id from unsharded where Unsharded.val = 1",
+ "v3-plan": "VT03019: symbol Unsharded.val not found",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select unsharded.id from unsharded where Unsharded.val = 1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select unsharded.id from unsharded where 1 != 1",
+ "Query": "select unsharded.id from unsharded where Unsharded.val = 1",
+ "Table": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "implicit table reference for sharded keyspace",
+ "query": "select user.foo.col from user.foo",
+ "plan": "table foo not found"
+ },
+ {
+ "comment": "duplicate symbols",
+ "query": "select user.id from user join user",
+ "plan": "VT03013: not unique table/alias: 'user'"
+ },
+ {
+ "comment": "duplicate symbols for merging routes",
+ "query": "select user.id from user join user_extra user on user.id = user.user_id",
+ "plan": "VT03013: not unique table/alias: 'user'"
+ },
+ {
+ "comment": "non-existent table",
+ "query": "select c from t",
+ "plan": "table t not found"
+ },
+ {
+ "comment": "non-existent table on left of join",
+ "query": "select c from t join user",
+ "plan": "table t not found"
+ },
+ {
+ "comment": "non-existent table on right of join",
+ "query": "select c from user join t",
+ "plan": "table t not found"
+ },
+ {
+ "comment": "query with parens is planned correctly",
+ "query": "select m1.col from (unsharded as m1, unsharded as m2)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select m1.col from (unsharded as m1, unsharded as m2)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select m1.col from (unsharded as m1, unsharded as m2) where 1 != 1",
+ "Query": "select m1.col from (unsharded as m1, unsharded as m2)",
+ "Table": "unsharded"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select m1.col from (unsharded as m1, unsharded as m2)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select m1.col from (unsharded as m1, unsharded as m2) where 1 != 1",
+ "Query": "select m1.col from (unsharded as m1, unsharded as m2)",
+ "Table": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "gen4 - optimise plan by merging user_extra and music first, and then querying for user info",
+ "query": "select 1 from user u join user_extra ue on ue.id = u.id join music m on m.user_id = ue.user_id",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 from user u join user_extra ue on ue.id = u.id join music m on m.user_id = ue.user_id",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "JoinVars": {
+ "ue_user_id": 1
+ },
+ "TableName": "`user`_user_extra_music",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,R:0",
+ "JoinVars": {
+ "u_id": 1
+ },
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1, u.id from `user` as u where 1 != 1",
+ "Query": "select 1, u.id from `user` as u",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select ue.user_id from user_extra as ue where 1 != 1",
+ "Query": "select ue.user_id from user_extra as ue where ue.id = :u_id",
+ "Table": "user_extra"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from music as m where 1 != 1",
+ "Query": "select 1 from music as m where m.user_id = :ue_user_id",
+ "Table": "music",
+ "Values": [
+ ":ue_user_id"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 from user u join user_extra ue on ue.id = u.id join music m on m.user_id = ue.user_id",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:1",
+ "JoinVars": {
+ "ue_id": 0
+ },
+ "TableName": "music, user_extra_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select ue.id, 1 from user_extra as ue, music as m where 1 != 1",
+ "Query": "select ue.id, 1 from user_extra as ue, music as m where m.user_id = ue.user_id",
+ "Table": "music, user_extra"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` as u where 1 != 1",
+ "Query": "select 1 from `user` as u where u.id = :ue_id",
+ "Table": "`user`",
+ "Values": [
+ ":ue_id"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.music",
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "join column selected as alias",
+ "query": "SELECT u.id as uid, ue.id as ueid FROM user u join user_extra ue where u.id = ue.id",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT u.id as uid, ue.id as ueid FROM user u join user_extra ue where u.id = ue.id",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,R:0",
+ "JoinVars": {
+ "u_id": 0
+ },
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.id as uid from `user` as u where 1 != 1",
+ "Query": "select u.id as uid from `user` as u",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select ue.id as ueid from user_extra as ue where 1 != 1",
+ "Query": "select ue.id as ueid from user_extra as ue where ue.id = :u_id",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT u.id as uid, ue.id as ueid FROM user u join user_extra ue where u.id = ue.id",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0,L:1",
+ "JoinVars": {
+ "ue_id": 0
+ },
+ "TableName": "user_extra_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select ue.id, ue.id as ueid from user_extra as ue where 1 != 1",
+ "Query": "select ue.id, ue.id as ueid from user_extra as ue",
+ "Table": "user_extra"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.id as uid from `user` as u where 1 != 1",
+ "Query": "select u.id as uid from `user` as u where u.id = :ue_id",
+ "Table": "`user`",
+ "Values": [
+ ":ue_id"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "alias on column from derived table. TODO: to support alias in SimpleProjection engine primitive.",
+ "query": "select a as k from (select count(*) as a from user) t",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a as k from (select count(*) as a from user) t",
+ "Instructions": {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count(0) AS count",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*) as a from `user` where 1 != 1",
+ "Query": "select count(*) as a from `user`",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a as k from (select count(*) as a from user) t",
+ "Instructions": {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count_star(0) AS a",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*) as a from `user` where 1 != 1",
+ "Query": "select count(*) as a from `user`",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "select star from derived table on expandable and unsharded table",
+ "query": "select u.* from (select * from unsharded) u",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u.* from (select * from unsharded) u",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select u.* from (select * from unsharded where 1 != 1) as u where 1 != 1",
+ "Query": "select u.* from (select * from unsharded) as u",
+ "Table": "unsharded"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u.* from (select * from unsharded) u",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select u.* from (select * from unsharded where 1 != 1) as u where 1 != 1",
+ "Query": "select u.* from (select * from unsharded) as u",
+ "Table": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "filtering on a cross-shard derived table",
+ "query": "select id from (select user.id, user.col from user join user_extra) as t where id=5",
+ "v3-plan": "VT12001: unsupported: filtering on results of cross-shard subquery",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from (select user.id, user.col from user join user_extra) as t where id=5",
+ "Instructions": {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id, `user`.col from `user` where 1 != 1",
+ "Query": "select `user`.id, `user`.col from `user` where `user`.id = 5",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "expression on a cross-shard derived table",
+ "query": "select id+1 from (select user.id, user.col from user join user_extra) as t",
+ "v3-plan": "VT12001: unsupported: expression on results of a cross-shard subquery",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id+1 from (select user.id, user.col from user join user_extra) as t",
+ "Instructions": {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 2
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1,L:2",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id, `user`.col, `user`.id + 1 from `user` where 1 != 1",
+ "Query": "select `user`.id, `user`.col, `user`.id + 1 from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "derived table with aliased columns and outer predicate pushed in derived table",
+ "query": "select u.a from (select id as b, name from user) u(a, n) where u.n = 1",
+ "v3-plan": "VT12001: unsupported: column aliases in derived table",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u.a from (select id as b, name from user) u(a, n) where u.n = 1",
+ "Instructions": {
+ "OperatorType": "VindexLookup",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "name_user_map",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
+ "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
+ "Table": "name_user_vdx",
+ "Values": [
+ ":name"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.a from (select id as b, `name` from `user` where 1 != 1) as u(a, n) where 1 != 1",
+ "Query": "select u.a from (select id as b, `name` from `user` where `name` = 1) as u(a, n)",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "derived table with aliased columns predicate in both the outer and inner",
+ "query": "select u.a from (select id as b, name from user where b = 1) u(a, n) where u.n = 1",
+ "v3-plan": "VT12001: unsupported: column aliases in derived table",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u.a from (select id as b, name from user where b = 1) u(a, n) where u.n = 1",
+ "Instructions": {
+ "OperatorType": "VindexLookup",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "name_user_map",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
+ "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
+ "Table": "name_user_vdx",
+ "Values": [
+ ":name"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.a from (select id as b, `name` from `user` where 1 != 1) as u(a, n) where 1 != 1",
+ "Query": "select u.a from (select id as b, `name` from `user` where b = 1 and `name` = 1) as u(a, n)",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "derived table with aliased columns and a join that requires pushProjection",
+ "query": "select i+1 from (select user.id from user join user_extra) t(i)",
+ "v3-plan": "VT12001: unsupported: column aliases in derived table",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select i+1 from (select user.id from user join user_extra) t(i)",
+ "Instructions": {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 1
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id, `user`.id + 1 from `user` where 1 != 1",
+ "Query": "select `user`.id, `user`.id + 1 from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "two subqueries with different Select and OpCode",
+ "query": "select id from user where id in (select id from user_extra) and col = (select user_id from user_extra limit 1)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where id in (select id from user_extra) and col = (select user_id from user_extra limit 1)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values2",
+ "__sq2"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from user_extra where 1 != 1",
+ "Query": "select id from user_extra",
+ "Table": "user_extra"
+ },
+ {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutValue",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Limit",
+ "Count": "INT64(1)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_id from user_extra where 1 != 1",
+ "Query": "select user_id from user_extra limit :__upper_limit",
+ "Table": "user_extra"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where col = :__sq1 and :__sq_has_values2 = 1 and id in ::__vals",
+ "Table": "`user`",
+ "Values": [
+ ":__sq2"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where id in (select id from user_extra) and col = (select user_id from user_extra limit 1)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutValue",
+ "PulloutVars": [
+ "__sq_has_values2",
+ "__sq2"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Limit",
+ "Count": "INT64(1)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_id from user_extra where 1 != 1",
+ "Query": "select user_id from user_extra limit :__upper_limit",
+ "Table": "user_extra"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from user_extra where 1 != 1",
+ "Query": "select id from user_extra",
+ "Table": "user_extra"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where :__sq_has_values1 = 1 and id in ::__vals and col = :__sq2",
+ "Table": "`user`",
+ "Values": [
+ ":__sq1"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "join on int columns",
+ "query": "select u.id from user as u join user as uu on u.intcol = uu.intcol",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u.id from user as u join user as uu on u.intcol = uu.intcol",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "JoinVars": {
+ "u_intcol": 1
+ },
+ "TableName": "`user`_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.id, u.intcol from `user` as u where 1 != 1",
+ "Query": "select u.id, u.intcol from `user` as u",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` as uu where 1 != 1",
+ "Query": "select 1 from `user` as uu where uu.intcol = :u_intcol",
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u.id from user as u join user as uu on u.intcol = uu.intcol",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:1",
+ "JoinVars": {
+ "u_intcol": 0
+ },
+ "TableName": "`user`_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.intcol, u.id from `user` as u where 1 != 1",
+ "Query": "select u.intcol, u.id from `user` as u",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` as uu where 1 != 1",
+ "Query": "select 1 from `user` as uu where uu.intcol = :u_intcol",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Duplicate output column from derived table having a join",
+ "query": "select 0 from (select `user`.col1 from `user` join unsharded) as t join unsharded on unsharded.col1 = t.col1 and unsharded.a = t.col1",
+ "v3-plan": "VT12001: unsupported: expression on results of a cross-shard subquery",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 0 from (select `user`.col1 from `user` join unsharded) as t join unsharded on unsharded.col1 = t.col1 and unsharded.a = t.col1",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:1",
+ "JoinVars": {
+ "t_col1": 0
+ },
+ "TableName": "`user`_unsharded_unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0,
+ 1
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1",
+ "TableName": "`user`_unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col1, 0 from `user` where 1 != 1",
+ "Query": "select `user`.col1, 0 from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 from unsharded where 1 != 1",
+ "Query": "select 1 from unsharded",
+ "Table": "unsharded"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 from unsharded where 1 != 1",
+ "Query": "select 1 from unsharded where unsharded.col1 = :t_col1 and unsharded.a = :t_col1",
+ "Table": "unsharded"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "left join where clauses #2",
+ "query": "select user.id from user left join user_extra on user.col = user_extra.col where coalesce(user_extra.col, 4) = 5",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.id from user left join user_extra on user.col = user_extra.col where coalesce(user_extra.col, 4) = 5",
+ "Instructions": {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 1
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Filter",
+ "Predicate": "coalesce(user_extra.col, 4) = 5",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "LeftJoin",
+ "JoinColumnIndexes": "R:0,L:1",
+ "JoinVars": {
+ "user_col": 0
+ },
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col, `user`.id from `user` where 1 != 1",
+ "Query": "select `user`.col, `user`.id from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.col from user_extra where 1 != 1",
+ "Query": "select user_extra.col from user_extra where user_extra.col = :user_col",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "dont merge unsharded tables from different keyspaces",
+ "query": "select 1 from main.unsharded join main_2.unsharded_tab",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 from main.unsharded join main_2.unsharded_tab",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "unsharded_unsharded_tab",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 from unsharded where 1 != 1",
+ "Query": "select 1 from unsharded",
+ "Table": "unsharded"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main_2",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 from unsharded_tab where 1 != 1",
+ "Query": "select 1 from unsharded_tab",
+ "Table": "unsharded_tab"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 from main.unsharded join main_2.unsharded_tab",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "unsharded_unsharded_tab",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 from unsharded where 1 != 1",
+ "Query": "select 1 from unsharded",
+ "Table": "unsharded"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main_2",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 from unsharded_tab where 1 != 1",
+ "Query": "select 1 from unsharded_tab",
+ "Table": "unsharded_tab"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "main_2.unsharded_tab"
+ ]
+ }
+ },
+ {
+ "comment": "Unsharded join with using",
+ "query": "select * from unsharded_a join unsharded_b using (propertyId);",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from unsharded_a join unsharded_b using (propertyId);",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from unsharded_a join unsharded_b using (propertyId) where 1 != 1",
+ "Query": "select * from unsharded_a join unsharded_b using (propertyId)",
+ "Table": "unsharded_a, unsharded_b"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from unsharded_a join unsharded_b using (propertyId);",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from unsharded_a join unsharded_b using (propertyId) where 1 != 1",
+ "Query": "select * from unsharded_a join unsharded_b using (propertyId)",
+ "Table": "unsharded_a, unsharded_b"
+ },
+ "TablesUsed": [
+ "main.unsharded_a",
+ "main.unsharded_b"
+ ]
+ }
+ },
+ {
+ "comment": "Column aliases in Derived Table",
+ "query": "select id2 from (select id from user) as x (id2)",
+ "v3-plan": "VT12001: unsupported: column aliases in derived table",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id2 from (select id from user) as x (id2)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id2 from (select id from `user` where 1 != 1) as x(id2) where 1 != 1",
+ "Query": "select id2 from (select id from `user`) as x(id2)",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "single unsharded keyspace with derived table",
+ "query": "select col from (select col from unsharded join unsharded_b) as u join unsharded_a ua limit 1",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from (select col from unsharded join unsharded_b) as u join unsharded_a ua limit 1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select col from (select col from unsharded join unsharded_b where 1 != 1) as u join unsharded_a as ua where 1 != 1",
+ "Query": "select col from (select col from unsharded join unsharded_b) as u join unsharded_a as ua limit 1",
+ "Table": "unsharded, unsharded_b, unsharded_a"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from (select col from unsharded join unsharded_b) as u join unsharded_a ua limit 1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select col from (select col from unsharded join unsharded_b where 1 != 1) as u join unsharded_a as ua where 1 != 1",
+ "Query": "select col from (select col from unsharded join unsharded_b) as u join unsharded_a as ua limit 1",
+ "Table": "unsharded, unsharded_a, unsharded_b"
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "main.unsharded_a",
+ "main.unsharded_b"
+ ]
+ }
+ },
+ {
+ "comment": "query builder with derived table having join inside it",
+ "query": "select u.col from (select user.col from user join user_extra) as u join user_extra ue limit 1",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u.col from (select user.col from user join user_extra) as u join user_extra ue limit 1",
+ "Instructions": {
+ "OperatorType": "Limit",
+ "Count": "INT64(1)",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "`user`_user_extra_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` where 1 != 1",
+ "Query": "select `user`.col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra as ue where 1 != 1",
+ "Query": "select 1 from user_extra as ue",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u.col from (select user.col from user join user_extra) as u join user_extra ue limit 1",
+ "Instructions": {
+ "OperatorType": "Limit",
+ "Count": "INT64(1)",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "`user`_user_extra_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` where 1 != 1",
+ "Query": "select `user`.col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra as ue where 1 != 1",
+ "Query": "select 1 from user_extra as ue",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "left join with expressions",
+ "query": "select user_extra.col+1 from user left join user_extra on user.col = user_extra.col",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "select user_extra.col+1 from user left join user_extra on user.col = user_extra.col",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "LeftJoin",
+ "JoinColumnIndexes": "R:0",
+ "JoinVars": {
+ "user_col": 0
+ },
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` where 1 != 1",
+ "Query": "select `user`.col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.col + 1 from user_extra where 1 != 1",
+ "Query": "select user_extra.col + 1 from user_extra where user_extra.col = :user_col",
+ "Table": "user_extra"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "left join with expressions, with three-way join (different code path)",
+ "query": "select user.id, user_extra.col+1 from user left join user_extra on user.col = user_extra.col join user_extra e",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.id, user_extra.col+1 from user left join user_extra on user.col = user_extra.col join user_extra e",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1",
+ "TableName": "`user`_user_extra_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "LeftJoin",
+ "JoinColumnIndexes": "L:1,R:0",
+ "JoinVars": {
+ "user_col": 0
+ },
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col, `user`.id from `user` where 1 != 1",
+ "Query": "select `user`.col, `user`.id from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.col + 1 from user_extra where 1 != 1",
+ "Query": "select user_extra.col + 1 from user_extra where user_extra.col = :user_col",
+ "Table": "user_extra"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra as e where 1 != 1",
+ "Query": "select 1 from user_extra as e",
+ "Table": "user_extra"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "left join with expressions coming from both sides",
+ "query": "select user.foo+user_extra.col+1 from user left join user_extra on user.col = user_extra.col",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.foo+user_extra.col+1 from user left join user_extra on user.col = user_extra.col",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "LeftJoin",
+ "JoinColumnIndexes": "R:0",
+ "JoinVars": {
+ "user_col": 0,
+ "user_foo": 1
+ },
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col, `user`.foo from `user` where 1 != 1",
+ "Query": "select `user`.col, `user`.foo from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select :user_foo + user_extra.col + 1 from user_extra where 1 != 1",
+ "Query": "select :user_foo + user_extra.col + 1 from user_extra where user_extra.col = :user_col",
+ "Table": "user_extra"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "Do not rewrite derived expressions when the derived table is merged with the outer",
+ "query": "select col1, count(*) from (select colC+colD as col1 from user) as tbl group by col1",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col1, count(*) from (select colC+colD as col1 from user) as tbl group by col1",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count(1) AS count",
+ "GroupBy": "0",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col1, count(*), weight_string(col1) from (select colC + colD as col1 from `user` where 1 != 1) as tbl where 1 != 1 group by col1, weight_string(col1)",
+ "OrderBy": "(0|2) ASC",
+ "Query": "select col1, count(*), weight_string(col1) from (select colC + colD as col1 from `user`) as tbl group by col1, weight_string(col1) order by col1 asc",
+ "ResultColumns": 2,
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col1, count(*) from (select colC+colD as col1 from user) as tbl group by col1",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count_star(1) AS count(*)",
+ "GroupBy": "(0|2)",
+ "ResultColumns": 2,
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col1, count(*), weight_string(col1) from (select colC + colD as col1 from `user` where 1 != 1) as tbl where 1 != 1 group by col1, weight_string(col1)",
+ "OrderBy": "(0|2) ASC",
+ "Query": "select col1, count(*), weight_string(col1) from (select colC + colD as col1 from `user`) as tbl group by col1, weight_string(col1) order by col1 asc",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "join with USING construct",
+ "query": "select * from authoritative join unsharded_authoritative using(col1)",
+ "v3-plan": "VT12001: unsupported: JOIN with USING(column_list) clause for complex queries",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from authoritative join unsharded_authoritative using(col1)",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:1,L:2,L:3,R:0",
+ "JoinVars": {
+ "authoritative_col1": 0
+ },
+ "TableName": "authoritative_unsharded_authoritative",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select authoritative.col1, authoritative.col1 as col1, authoritative.user_id as user_id, authoritative.col2 as col2 from authoritative where 1 != 1",
+ "Query": "select authoritative.col1, authoritative.col1 as col1, authoritative.user_id as user_id, authoritative.col2 as col2 from authoritative",
+ "Table": "authoritative"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select unsharded_authoritative.col2 as col2 from unsharded_authoritative where 1 != 1",
+ "Query": "select unsharded_authoritative.col2 as col2 from unsharded_authoritative where unsharded_authoritative.col1 = :authoritative_col1",
+ "Table": "unsharded_authoritative"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded_authoritative",
+ "user.authoritative"
+ ]
+ }
+ },
+ {
+ "comment": "derived table inside derived table with a where clause depending on columns from the derived table",
+ "query": "select * from (select bar as push_it from (select foo as bar from (select id as foo from user) as t1) as t2) as t3 where push_it = 12",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from (select bar as push_it from (select foo as bar from (select id as foo from user) as t1) as t2) as t3 where push_it = 12",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from (select bar as push_it from (select foo as bar from (select id as foo from `user` where 1 != 1) as t1 where 1 != 1) as t2 where 1 != 1) as t3 where 1 != 1",
+ "Query": "select * from (select bar as push_it from (select foo as bar from (select id as foo from `user`) as t1) as t2) as t3 where push_it = 12",
+ "Table": "`user`",
+ "Values": [
+ "INT64(12)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from (select bar as push_it from (select foo as bar from (select id as foo from user) as t1) as t2) as t3 where push_it = 12",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select t3.push_it from (select bar as push_it from (select foo as bar from (select id as foo from `user` where 1 != 1) as t1 where 1 != 1) as t2 where 1 != 1) as t3 where 1 != 1",
+ "Query": "select t3.push_it from (select bar as push_it from (select foo as bar from (select id as foo from `user` where id = 12) as t1) as t2) as t3",
+ "Table": "`user`",
+ "Values": [
+ "INT64(12)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "use a view",
+ "query": "select * from user.user_details_view",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from user.user_details_view",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from (select `user`.id, user_extra.col from `user` join user_extra on `user`.id = user_extra.user_id where 1 != 1) as user_details_view where 1 != 1",
+ "Query": "select * from (select `user`.id, user_extra.col from `user` join user_extra on `user`.id = user_extra.user_id) as user_details_view",
+ "Table": "`user`, user_extra"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from user.user_details_view",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_details_view.id, user_details_view.col from (select `user`.id, user_extra.col from `user`, user_extra where 1 != 1) as user_details_view where 1 != 1",
+ "Query": "select user_details_view.id, user_details_view.col from (select `user`.id, user_extra.col from `user`, user_extra where `user`.id = user_extra.user_id) as user_details_view",
+ "Table": "`user`, user_extra"
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "use a view without qualifying the keyspace",
+ "query": "select * from user_details_view",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from user_details_view",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from (select `user`.id, user_extra.col from `user` join user_extra on `user`.id = user_extra.user_id where 1 != 1) as user_details_view where 1 != 1",
+ "Query": "select * from (select `user`.id, user_extra.col from `user` join user_extra on `user`.id = user_extra.user_id) as user_details_view",
+ "Table": "`user`, user_extra"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from user_details_view",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_details_view.id, user_details_view.col from (select `user`.id, user_extra.col from `user`, user_extra where 1 != 1) as user_details_view where 1 != 1",
+ "Query": "select user_details_view.id, user_details_view.col from (select `user`.id, user_extra.col from `user`, user_extra where `user`.id = user_extra.user_id) as user_details_view",
+ "Table": "`user`, user_extra"
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "left join where clauses #3 - assert that we can evaluate BETWEEN with the evalengine",
+ "query": "select user.id from user left join user_extra on user.col = user_extra.col where user_extra.col between 10 and 20",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.id from user left join user_extra on user.col = user_extra.col where user_extra.col between 10 and 20",
+ "Instructions": {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 1
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Filter",
+ "Predicate": "user_extra.col between 10 and 20",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "LeftJoin",
+ "JoinColumnIndexes": "R:0,L:1",
+ "JoinVars": {
+ "user_col": 0
+ },
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col, `user`.id from `user` where 1 != 1",
+ "Query": "select `user`.col, `user`.id from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.col from user_extra where 1 != 1",
+ "Query": "select user_extra.col from user_extra where user_extra.col = :user_col",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "missing and ambiguous column info is OK as long as we can send the query to a single unsharded keyspace",
+ "query": "select missing_column from unsharded, unsharded_a",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select missing_column from unsharded, unsharded_a",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select missing_column from unsharded, unsharded_a where 1 != 1",
+ "Query": "select missing_column from unsharded, unsharded_a",
+ "Table": "unsharded, unsharded_a"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select missing_column from unsharded, unsharded_a",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select missing_column from unsharded, unsharded_a where 1 != 1",
+ "Query": "select missing_column from unsharded, unsharded_a",
+ "Table": "unsharded, unsharded_a"
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "main.unsharded_a"
+ ]
+ }
+ },
+ {
+ "comment": "missing and ambiguous column info is not valid when we have two different unsharded keyspaces in the query",
+ "query": "select missing_column from unsharded, unsharded_tab",
+ "v3-plan": "VT03019: symbol missing_column not found",
+ "gen4-plan": "Column 'missing_column' in field list is ambiguous"
+ },
+ {
+ "comment": "join predicate only depending on the RHS should not turn outer join into inner join",
+ "query": "select t1.id1, t2.id1 from t1 left join t1 as t2 on t2.id1 = t2.id2",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select t1.id1, t2.id1 from t1 left join t1 as t2 on t2.id1 = t2.id2",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "LeftJoin",
+ "JoinColumnIndexes": "L:0,R:0",
+ "TableName": "t1_t1",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "zlookup_unique",
+ "Sharded": true
+ },
+ "FieldQuery": "select t1.id1 from t1 where 1 != 1",
+ "Query": "select t1.id1 from t1",
+ "Table": "t1"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "zlookup_unique",
+ "Sharded": true
+ },
+ "FieldQuery": "select t2.id1 from t1 as t2 where 1 != 1",
+ "Query": "select t2.id1 from t1 as t2 where t2.id1 = t2.id2",
+ "Table": "t1"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "zlookup_unique.t1"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select t1.id1, t2.id1 from t1 left join t1 as t2 on t2.id1 = t2.id2",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "LeftJoin",
+ "JoinColumnIndexes": "L:0,R:0",
+ "TableName": "t1_t1",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "zlookup_unique",
+ "Sharded": true
+ },
+ "FieldQuery": "select t1.id1 from t1 where 1 != 1",
+ "Query": "select t1.id1 from t1",
+ "Table": "t1"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "zlookup_unique",
+ "Sharded": true
+ },
+ "FieldQuery": "select t2.id1 from t1 as t2 where 1 != 1",
+ "Query": "select t2.id1 from t1 as t2 where t2.id1 = t2.id2",
+ "Table": "t1"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "zlookup_unique.t1"
+ ]
+ }
+ }
+]
diff --git a/go/vt/vtgate/planbuilder/testdata/from_cases.txt b/go/vt/vtgate/planbuilder/testdata/from_cases.txt
deleted file mode 100644
index 59c42783f31..00000000000
--- a/go/vt/vtgate/planbuilder/testdata/from_cases.txt
+++ /dev/null
@@ -1,6112 +0,0 @@
-# Single table sharded scatter
-"select col from user"
-{
- "QueryType": "SELECT",
- "Original": "select col from user",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user`",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select col from user",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user`",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Single table unsharded
-"select col from unsharded"
-{
- "QueryType": "SELECT",
- "Original": "select col from unsharded",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select col from unsharded where 1 != 1",
- "Query": "select col from unsharded",
- "Table": "unsharded"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select col from unsharded",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select col from unsharded where 1 != 1",
- "Query": "select col from unsharded",
- "Table": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-
-# Select from sequence
-"select next 2 values from seq"
-{
- "QueryType": "SELECT",
- "Original": "select next 2 values from seq",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Next",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select next 2 values from seq where 1 != 1",
- "Query": "select next 2 values from seq",
- "Table": "seq"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select next 2 values from seq",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Next",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select next 2 values from seq where 1 != 1",
- "Query": "select next 2 values from seq",
- "Table": "seq"
- },
- "TablesUsed": [
- "main.seq"
- ]
-}
-
-# select next from non-sequence table
-"select next value from user"
-"NEXT used on a non-sequence table"
-Gen4 plan same as above
-
-# select next in derived table
-"select 1 from (select next value from seq) t"
-{
- "QueryType": "SELECT",
- "Original": "select 1 from (select next value from seq) t",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Next",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 from (select next 1 values from seq where 1 != 1) as t where 1 != 1",
- "Query": "select 1 from (select next 1 values from seq) as t",
- "Table": "seq"
- }
-}
-Gen4 error: Incorrect usage/placement of 'NEXT'
-
-# select next in derived table
-"select * from (select next value from seq) t"
-{
- "QueryType": "SELECT",
- "Original": "select * from (select next value from seq) t",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Next",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select * from (select next 1 values from seq where 1 != 1) as t where 1 != 1",
- "Query": "select * from (select next 1 values from seq) as t",
- "Table": "seq"
- }
-}
-Gen4 error: Incorrect usage/placement of 'NEXT'
-
-# select next in subquery
-"select 1 from user where id in (select next value from seq)"
-{
- "QueryType": "SELECT",
- "Original": "select 1 from user where id in (select next value from seq)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Next",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select next 1 values from seq where 1 != 1",
- "Query": "select next 1 values from seq",
- "Table": "seq"
- },
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` where 1 != 1",
- "Query": "select 1 from `user` where :__sq_has_values1 = 1 and id in ::__vals",
- "Table": "`user`",
- "Values": [
- ":__sq1"
- ],
- "Vindex": "user_index"
- }
- ]
- }
-}
-Gen4 error: Incorrect usage/placement of 'NEXT'
-
-# select next in projection
-"select (select next value from seq) from user"
-{
- "QueryType": "SELECT",
- "Original": "select (select next value from seq) from user",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutValue",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Next",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select next 1 values from seq where 1 != 1",
- "Query": "select next 1 values from seq",
- "Table": "seq"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select :__sq1 from `user` where 1 != 1",
- "Query": "select :__sq1 from `user`",
- "Table": "`user`"
- }
- ]
- }
-}
-Gen4 error: Incorrect usage/placement of 'NEXT'
-
-# Select from reference
-"select * from ref"
-{
- "QueryType": "SELECT",
- "Original": "select * from ref",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from ref where 1 != 1",
- "Query": "select * from ref",
- "Table": "ref"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from ref",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from ref where 1 != 1",
- "Query": "select * from ref",
- "Table": "ref"
- },
- "TablesUsed": [
- "user.ref"
- ]
-}
-
-# Multi-table unsharded
-"select m1.col from unsharded as m1 join unsharded as m2"
-{
- "QueryType": "SELECT",
- "Original": "select m1.col from unsharded as m1 join unsharded as m2",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select m1.col from unsharded as m1 join unsharded as m2 where 1 != 1",
- "Query": "select m1.col from unsharded as m1 join unsharded as m2",
- "Table": "unsharded"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select m1.col from unsharded as m1 join unsharded as m2",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select m1.col from unsharded as m1 join unsharded as m2 where 1 != 1",
- "Query": "select m1.col from unsharded as m1 join unsharded as m2",
- "Table": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-
-# Multi-table, multi-chunk
-"select music.col from user join music"
-{
- "QueryType": "SELECT",
- "Original": "select music.col from user join music",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "TableName": "`user`_music",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` where 1 != 1",
- "Query": "select 1 from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.col from music where 1 != 1",
- "Query": "select music.col from music",
- "Table": "music"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select music.col from user join music",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "TableName": "`user`_music",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` where 1 != 1",
- "Query": "select 1 from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.col from music where 1 != 1",
- "Query": "select music.col from music",
- "Table": "music"
- }
- ]
- },
- "TablesUsed": [
- "user.music",
- "user.user"
- ]
-}
-
-# routing rules where table name matches, and there's no alias.
-"select * from second_user.user"
-{
- "QueryType": "SELECT",
- "Original": "select * from second_user.user",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select * from `user`",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from second_user.user",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select * from `user`",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# routing rules where table name matches, and there's an alias.
-"select * from second_user.user as a"
-{
- "QueryType": "SELECT",
- "Original": "select * from second_user.user as a",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` as a where 1 != 1",
- "Query": "select * from `user` as a",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from second_user.user as a",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` as a where 1 != 1",
- "Query": "select * from `user` as a",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# routing rules where table name does not match, and there's no alias.
-"select * from route1"
-{
- "QueryType": "SELECT",
- "Original": "select * from route1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` as route1 where 1 != 1",
- "Query": "select * from `user` as route1",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from route1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` as route1 where 1 != 1",
- "Query": "select * from `user` as route1",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# routing rules where table name does not match, and there's an alias.
-"select * from route1 as a"
-{
- "QueryType": "SELECT",
- "Original": "select * from route1 as a",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` as a where 1 != 1",
- "Query": "select * from `user` as a",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from route1 as a",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` as a where 1 != 1",
- "Query": "select * from `user` as a",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# routing rules with primary targeting
-"select * from primary_redirect"
-{
- "QueryType": "SELECT",
- "Original": "select * from primary_redirect",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` as primary_redirect where 1 != 1",
- "Query": "select * from `user` as primary_redirect",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from primary_redirect",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` as primary_redirect where 1 != 1",
- "Query": "select * from `user` as primary_redirect",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# routing rules bad table
-"select * from bad_table"
-"Unknown database 'noks' in vschema"
-Gen4 plan same as above
-
-# routing rules disabled table
-"select * from disabled"
-"table disabled has been disabled"
-Gen4 plan same as above
-
-"select second_user.foo.col from second_user.foo join user on second_user.foo.id = user.id where second_user.foo.col = 42"
-{
- "QueryType": "SELECT",
- "Original": "select second_user.foo.col from second_user.foo join user on second_user.foo.id = user.id where second_user.foo.col = 42",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select foo.col from `user` as foo join `user` on foo.id = `user`.id where 1 != 1",
- "Query": "select foo.col from `user` as foo join `user` on foo.id = `user`.id where foo.col = 42",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select second_user.foo.col from second_user.foo join user on second_user.foo.id = user.id where second_user.foo.col = 42",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select foo.col from `user` as foo, `user` where 1 != 1",
- "Query": "select foo.col from `user` as foo, `user` where foo.col = 42 and foo.id = `user`.id",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-"select user.music.foo from user.music join user on user.music.id = user.id where user.music.col = 42"
-{
- "QueryType": "SELECT",
- "Original": "select user.music.foo from user.music join user on user.music.id = user.id where user.music.col = 42",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "JoinVars": {
- "music_id": 1
- },
- "TableName": "music_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.foo, music.id from music where 1 != 1",
- "Query": "select music.foo, music.id from music where music.col = 42",
- "Table": "music"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` where 1 != 1",
- "Query": "select 1 from `user` where `user`.id = :music_id",
- "Table": "`user`",
- "Values": [
- ":music_id"
- ],
- "Vindex": "user_index"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.music.foo from user.music join user on user.music.id = user.id where user.music.col = 42",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:1",
- "JoinVars": {
- "music_id": 0
- },
- "TableName": "music_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id, music.foo from music where 1 != 1",
- "Query": "select music.id, music.foo from music where music.col = 42",
- "Table": "music"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` where 1 != 1",
- "Query": "select 1 from `user` where `user`.id = :music_id",
- "Table": "`user`",
- "Values": [
- ":music_id"
- ],
- "Vindex": "user_index"
- }
- ]
- },
- "TablesUsed": [
- "user.music",
- "user.user"
- ]
-}
-
-# ',' join
-"select music.col from user, music"
-{
- "QueryType": "SELECT",
- "Original": "select music.col from user, music",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "TableName": "`user`_music",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` where 1 != 1",
- "Query": "select 1 from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.col from music where 1 != 1",
- "Query": "select music.col from music",
- "Table": "music"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select music.col from user, music",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "TableName": "`user`_music",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` where 1 != 1",
- "Query": "select 1 from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.col from music where 1 != 1",
- "Query": "select music.col from music",
- "Table": "music"
- }
- ]
- },
- "TablesUsed": [
- "user.music",
- "user.user"
- ]
-}
-
-# ',' join unsharded
-"select u1.a, u2.a from unsharded u1, unsharded u2"
-{
- "QueryType": "SELECT",
- "Original": "select u1.a, u2.a from unsharded u1, unsharded u2",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select u1.a, u2.a from unsharded as u1, unsharded as u2 where 1 != 1",
- "Query": "select u1.a, u2.a from unsharded as u1, unsharded as u2",
- "Table": "unsharded"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select u1.a, u2.a from unsharded u1, unsharded u2",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select u1.a, u2.a from unsharded as u1, unsharded as u2 where 1 != 1",
- "Query": "select u1.a, u2.a from unsharded as u1, unsharded as u2",
- "Table": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-
-# ',' 3-way join unsharded
-"select u1.a, u2.a from unsharded u1, unsharded u2, unsharded u3"
-{
- "QueryType": "SELECT",
- "Original": "select u1.a, u2.a from unsharded u1, unsharded u2, unsharded u3",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select u1.a, u2.a from unsharded as u1, unsharded as u2, unsharded as u3 where 1 != 1",
- "Query": "select u1.a, u2.a from unsharded as u1, unsharded as u2, unsharded as u3",
- "Table": "unsharded"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select u1.a, u2.a from unsharded u1, unsharded u2, unsharded u3",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select u1.a, u2.a from unsharded as u1, unsharded as u2, unsharded as u3 where 1 != 1",
- "Query": "select u1.a, u2.a from unsharded as u1, unsharded as u2, unsharded as u3",
- "Table": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-
-# Left join, single chunk
-"select m1.col from unsharded as m1 left join unsharded as m2 on m1.a=m2.b"
-{
- "QueryType": "SELECT",
- "Original": "select m1.col from unsharded as m1 left join unsharded as m2 on m1.a=m2.b",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select m1.col from unsharded as m1 left join unsharded as m2 on m1.a = m2.b where 1 != 1",
- "Query": "select m1.col from unsharded as m1 left join unsharded as m2 on m1.a = m2.b",
- "Table": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-Gen4 plan same as above
-
-# Left join, multi-chunk
-"select u.col from user u left join unsharded m on u.a = m.b"
-{
- "QueryType": "SELECT",
- "Original": "select u.col from user u left join unsharded m on u.a = m.b",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "LeftJoin",
- "JoinColumnIndexes": "L:1",
- "JoinVars": {
- "u_a": 0
- },
- "TableName": "`user`_unsharded",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u.a, u.col from `user` as u where 1 != 1",
- "Query": "select u.a, u.col from `user` as u",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 from unsharded as m where 1 != 1",
- "Query": "select 1 from unsharded as m where m.b = :u_a",
- "Table": "unsharded"
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded",
- "user.user"
- ]
-}
-Gen4 plan same as above
-
-# Three-way left join
-"select user.col, m2.foo from user left join unsharded as m1 on user.col = m1.col left join unsharded as m2 on m1.col = m2.col"
-{
- "QueryType": "SELECT",
- "Original": "select user.col, m2.foo from user left join unsharded as m1 on user.col = m1.col left join unsharded as m2 on m1.col = m2.col",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "LeftJoin",
- "JoinColumnIndexes": "L:1,R:0",
- "JoinVars": {
- "m1_col": 0
- },
- "TableName": "`user`_unsharded_unsharded",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "LeftJoin",
- "JoinColumnIndexes": "R:0,L:0",
- "JoinVars": {
- "user_col": 0
- },
- "TableName": "`user`_unsharded",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` where 1 != 1",
- "Query": "select `user`.col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select m1.col from unsharded as m1 where 1 != 1",
- "Query": "select m1.col from unsharded as m1 where m1.col = :user_col",
- "Table": "unsharded"
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select m2.foo from unsharded as m2 where 1 != 1",
- "Query": "select m2.foo from unsharded as m2 where m2.col = :m1_col",
- "Table": "unsharded"
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded",
- "user.user"
- ]
-}
-Gen4 plan same as above
-
-# Three-way left join, right-associated
-"select user.col from user left join user_extra as e left join unsharded as m1 on m1.col = e.col on user.col = e.col"
-{
- "QueryType": "SELECT",
- "Original": "select user.col from user left join user_extra as e left join unsharded as m1 on m1.col = e.col on user.col = e.col",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "LeftJoin",
- "JoinColumnIndexes": "L:0",
- "JoinVars": {
- "user_col": 0
- },
- "TableName": "`user`_user_extra_unsharded",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` where 1 != 1",
- "Query": "select `user`.col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Join",
- "Variant": "LeftJoin",
- "JoinVars": {
- "e_col": 0
- },
- "TableName": "user_extra_unsharded",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select e.col from user_extra as e where 1 != 1",
- "Query": "select e.col from user_extra as e where e.col = :user_col",
- "Table": "user_extra"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 from unsharded as m1 where 1 != 1",
- "Query": "select 1 from unsharded as m1 where m1.col = :e_col",
- "Table": "unsharded"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded",
- "user.user",
- "user.user_extra"
- ]
-}
-Gen4 plan same as above
-
-# Right join
-"select m1.col from unsharded as m1 right join unsharded as m2 on m1.a=m2.b"
-{
- "QueryType": "SELECT",
- "Original": "select m1.col from unsharded as m1 right join unsharded as m2 on m1.a=m2.b",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select m1.col from unsharded as m1 right join unsharded as m2 on m1.a = m2.b where 1 != 1",
- "Query": "select m1.col from unsharded as m1 right join unsharded as m2 on m1.a = m2.b",
- "Table": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-Gen4 plan same as above
-
-# Right join with a join LHS
-"select m1.col from unsharded as m1 join unsharded as m2 right join unsharded as m3 on m1.a=m2.b"
-{
- "QueryType": "SELECT",
- "Original": "select m1.col from unsharded as m1 join unsharded as m2 right join unsharded as m3 on m1.a=m2.b",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select m1.col from unsharded as m1 join unsharded as m2 right join unsharded as m3 on m1.a = m2.b where 1 != 1",
- "Query": "select m1.col from unsharded as m1 join unsharded as m2 right join unsharded as m3 on m1.a = m2.b",
- "Table": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-Gen4 plan same as above
-
-# Straight-join (Gen4 ignores the straight_join hint)
-"select m1.col from unsharded as m1 straight_join unsharded as m2"
-{
- "QueryType": "SELECT",
- "Original": "select m1.col from unsharded as m1 straight_join unsharded as m2",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select m1.col from unsharded as m1 straight_join unsharded as m2 where 1 != 1",
- "Query": "select m1.col from unsharded as m1 straight_join unsharded as m2",
- "Table": "unsharded"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select m1.col from unsharded as m1 straight_join unsharded as m2",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select m1.col from unsharded as m1 join unsharded as m2 where 1 != 1",
- "Query": "select m1.col from unsharded as m1 join unsharded as m2",
- "Table": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-
-# Three-way join
-"select user.col from user join unsharded as m1 join unsharded as m2"
-{
- "QueryType": "SELECT",
- "Original": "select user.col from user join unsharded as m1 join unsharded as m2",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "`user`_unsharded_unsharded",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "`user`_unsharded",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` where 1 != 1",
- "Query": "select `user`.col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 from unsharded as m1 where 1 != 1",
- "Query": "select 1 from unsharded as m1",
- "Table": "unsharded"
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 from unsharded as m2 where 1 != 1",
- "Query": "select 1 from unsharded as m2",
- "Table": "unsharded"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.col from user join unsharded as m1 join unsharded as m2",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "`user`_unsharded",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` where 1 != 1",
- "Query": "select `user`.col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 from unsharded as m1, unsharded as m2 where 1 != 1",
- "Query": "select 1 from unsharded as m1, unsharded as m2",
- "Table": "unsharded"
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded",
- "user.user"
- ]
-}
-
-# Parenthesized, single chunk
-"select user.col from user join (unsharded as m1 join unsharded as m2)"
-{
- "QueryType": "SELECT",
- "Original": "select user.col from user join (unsharded as m1 join unsharded as m2)",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "`user`_unsharded",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` where 1 != 1",
- "Query": "select `user`.col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 from (unsharded as m1 join unsharded as m2) where 1 != 1",
- "Query": "select 1 from (unsharded as m1 join unsharded as m2)",
- "Table": "unsharded"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.col from user join (unsharded as m1 join unsharded as m2)",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "`user`_unsharded",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` where 1 != 1",
- "Query": "select `user`.col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 from unsharded as m1, unsharded as m2 where 1 != 1",
- "Query": "select 1 from unsharded as m1, unsharded as m2",
- "Table": "unsharded"
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded",
- "user.user"
- ]
-}
-
-# Parenthesized, multi-chunk
-"select user.col from user join (user as u1 join unsharded)"
-{
- "QueryType": "SELECT",
- "Original": "select user.col from user join (user as u1 join unsharded)",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "`user`_`user`_unsharded",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` where 1 != 1",
- "Query": "select `user`.col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "TableName": "`user`_unsharded",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` as u1 where 1 != 1",
- "Query": "select 1 from `user` as u1",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 from unsharded where 1 != 1",
- "Query": "select 1 from unsharded",
- "Table": "unsharded"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.col from user join (user as u1 join unsharded)",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "TableName": "`user`_`user`_unsharded",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` as u1 where 1 != 1",
- "Query": "select 1 from `user` as u1",
- "Table": "`user`"
- },
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "`user`_unsharded",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` where 1 != 1",
- "Query": "select `user`.col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 from unsharded where 1 != 1",
- "Query": "select 1 from unsharded",
- "Table": "unsharded"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded",
- "user.user"
- ]
-}
-
-# index hints, make sure they are not stripped.
-"select user.col from user use index(a)"
-{
- "QueryType": "SELECT",
- "Original": "select user.col from user use index(a)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` use index (a) where 1 != 1",
- "Query": "select `user`.col from `user` use index (a)",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.col from user use index(a)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` use index (a) where 1 != 1",
- "Query": "select `user`.col from `user` use index (a)",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# multiple index hints, make sure they are not stripped.
-"select user.col from user use index(a) use index for group by (b)"
-{
- "QueryType": "SELECT",
- "Original": "select user.col from user use index(a) use index for group by (b)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` use index (a) use index for group by (b) where 1 != 1",
- "Query": "select `user`.col from `user` use index (a) use index for group by (b)",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.col from user use index(a) use index for group by (b)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` use index (a) use index for group by (b) where 1 != 1",
- "Query": "select `user`.col from `user` use index (a) use index for group by (b)",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# mergeable sharded join on unique vindex
-"select user.col from user join user_extra on user.id = user_extra.user_id"
-{
- "QueryType": "SELECT",
- "Original": "select user.col from user join user_extra on user.id = user_extra.user_id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` join user_extra on `user`.id = user_extra.user_id where 1 != 1",
- "Query": "select `user`.col from `user` join user_extra on `user`.id = user_extra.user_id",
- "Table": "`user`, user_extra"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.col from user join user_extra on user.id = user_extra.user_id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user`, user_extra where 1 != 1",
- "Query": "select `user`.col from `user`, user_extra where `user`.id = user_extra.user_id",
- "Table": "`user`, user_extra"
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# mergeable sharded join on unique vindex (parenthesized ON clause)
-"select user.col from user join user_extra on (user.id = user_extra.user_id)"
-{
- "QueryType": "SELECT",
- "Original": "select user.col from user join user_extra on (user.id = user_extra.user_id)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` join user_extra on `user`.id = user_extra.user_id where 1 != 1",
- "Query": "select `user`.col from `user` join user_extra on `user`.id = user_extra.user_id",
- "Table": "`user`, user_extra"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.col from user join user_extra on (user.id = user_extra.user_id)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user`, user_extra where 1 != 1",
- "Query": "select `user`.col from `user`, user_extra where `user`.id = user_extra.user_id",
- "Table": "`user`, user_extra"
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# mergeable sharded join on unique vindex, with a stray condition
-"select user.col from user join user_extra on user.col between 1 and 2 and user.id = user_extra.user_id"
-{
- "QueryType": "SELECT",
- "Original": "select user.col from user join user_extra on user.col between 1 and 2 and user.id = user_extra.user_id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` join user_extra on `user`.col between 1 and 2 and `user`.id = user_extra.user_id where 1 != 1",
- "Query": "select `user`.col from `user` join user_extra on `user`.col between 1 and 2 and `user`.id = user_extra.user_id",
- "Table": "`user`, user_extra"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.col from user join user_extra on user.col between 1 and 2 and user.id = user_extra.user_id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user`, user_extra where 1 != 1",
- "Query": "select `user`.col from `user`, user_extra where `user`.col between 1 and 2 and `user`.id = user_extra.user_id",
- "Table": "`user`, user_extra"
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# mergeable sharded join on unique vindex, swapped operands
-"select user.col from user join user_extra on user_extra.user_id = user.id"
-{
- "QueryType": "SELECT",
- "Original": "select user.col from user join user_extra on user_extra.user_id = user.id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` join user_extra on user_extra.user_id = `user`.id where 1 != 1",
- "Query": "select `user`.col from `user` join user_extra on user_extra.user_id = `user`.id",
- "Table": "`user`, user_extra"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.col from user join user_extra on user_extra.user_id = user.id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user`, user_extra where 1 != 1",
- "Query": "select `user`.col from `user`, user_extra where user_extra.user_id = `user`.id",
- "Table": "`user`, user_extra"
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# mergeable sharded join on unique vindex, and condition
-"select user.col from user join user_extra on user.id = 5 and user.id = user_extra.user_id"
-{
- "QueryType": "SELECT",
- "Original": "select user.col from user join user_extra on user.id = 5 and user.id = user_extra.user_id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` join user_extra on `user`.id = 5 and `user`.id = user_extra.user_id where 1 != 1",
- "Query": "select `user`.col from `user` join user_extra on `user`.id = 5 and `user`.id = user_extra.user_id",
- "Table": "`user`, user_extra",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.col from user join user_extra on user.id = 5 and user.id = user_extra.user_id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user`, user_extra where 1 != 1",
- "Query": "select `user`.col from `user`, user_extra where `user`.id = 5 and `user`.id = user_extra.user_id",
- "Table": "`user`, user_extra",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# sharded join on unique vindex, inequality
-"select user.col from user join user_extra on user.id \u003c user_extra.user_id"
-{
- "QueryType": "SELECT",
- "Original": "select user.col from user join user_extra on user.id \u003c user_extra.user_id",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "JoinVars": {
- "user_id": 1
- },
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col, `user`.id from `user` where 1 != 1",
- "Query": "select `user`.col, `user`.id from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra where :user_id \u003c user_extra.user_id",
- "Table": "user_extra"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.col from user join user_extra on user.id \u003c user_extra.user_id",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:1",
- "JoinVars": {
- "user_id": 0
- },
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id, `user`.col from `user` where 1 != 1",
- "Query": "select `user`.id, `user`.col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra where :user_id \u003c user_extra.user_id",
- "Table": "user_extra"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# sharded join, non-col reference RHS
-"select user.col from user join user_extra on user.id = 5"
-{
- "QueryType": "SELECT",
- "Original": "select user.col from user join user_extra on user.id = 5",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` where 1 != 1",
- "Query": "select `user`.col from `user` where `user`.id = 5",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra",
- "Table": "user_extra"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.col from user join user_extra on user.id = 5",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` where 1 != 1",
- "Query": "select `user`.col from `user` where `user`.id = 5",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra",
- "Table": "user_extra"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# sharded join, non-col reference LHS
-"select user.col from user join user_extra on 5 = user.id"
-{
- "QueryType": "SELECT",
- "Original": "select user.col from user join user_extra on 5 = user.id",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` where 1 != 1",
- "Query": "select `user`.col from `user` where `user`.id = 5",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra",
- "Table": "user_extra"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.col from user join user_extra on 5 = user.id",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` where 1 != 1",
- "Query": "select `user`.col from `user` where `user`.id = 5",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra",
- "Table": "user_extra"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# sharded join, non-vindex col
-"select user.col from user join user_extra on user.id = user_extra.col"
-{
- "QueryType": "SELECT",
- "Original": "select user.col from user join user_extra on user.id = user_extra.col",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "JoinVars": {
- "user_id": 1
- },
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col, `user`.id from `user` where 1 != 1",
- "Query": "select `user`.col, `user`.id from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra where user_extra.col = :user_id",
- "Table": "user_extra"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.col from user join user_extra on user.id = user_extra.col",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "JoinVars": {
- "user_extra_col": 0
- },
- "TableName": "user_extra_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.col from user_extra where 1 != 1",
- "Query": "select user_extra.col from user_extra",
- "Table": "user_extra"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` where 1 != 1",
- "Query": "select `user`.col from `user` where `user`.id = :user_extra_col",
- "Table": "`user`",
- "Values": [
- ":user_extra_col"
- ],
- "Vindex": "user_index"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# sharded join, non-unique vindex
-"select user.col from user_extra join user on user_extra.user_id = user.name"
-{
- "QueryType": "SELECT",
- "Original": "select user.col from user_extra join user on user_extra.user_id = user.name",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "JoinVars": {
- "user_extra_user_id": 0
- },
- "TableName": "user_extra_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.user_id from user_extra where 1 != 1",
- "Query": "select user_extra.user_id from user_extra",
- "Table": "user_extra"
- },
- {
- "OperatorType": "Route",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` where 1 != 1",
- "Query": "select `user`.col from `user` where `user`.`name` = :user_extra_user_id",
- "Table": "`user`",
- "Values": [
- ":user_extra_user_id"
- ],
- "Vindex": "name_user_map"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.col from user_extra join user on user_extra.user_id = user.name",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:1",
- "JoinVars": {
- "user_name": 0
- },
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.`name`, `user`.col from `user` where 1 != 1",
- "Query": "select `user`.`name`, `user`.col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra where user_extra.user_id = :user_name",
- "Table": "user_extra",
- "Values": [
- ":user_name"
- ],
- "Vindex": "user_index"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# join with reference table
-"select user.col from user join ref"
-{
- "QueryType": "SELECT",
- "Original": "select user.col from user join ref",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` join ref where 1 != 1",
- "Query": "select `user`.col from `user` join ref",
- "Table": "`user`, ref"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.col from user join ref",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user`, ref where 1 != 1",
- "Query": "select `user`.col from `user`, ref",
- "Table": "`user`, ref"
- },
- "TablesUsed": [
- "user.ref",
- "user.user"
- ]
-}
-
-# reference table self-join
-"select r1.col from ref r1 join ref"
-{
- "QueryType": "SELECT",
- "Original": "select r1.col from ref r1 join ref",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select r1.col from ref as r1 join ref where 1 != 1",
- "Query": "select r1.col from ref as r1 join ref",
- "Table": "ref"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select r1.col from ref r1 join ref",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select r1.col from ref as r1, ref where 1 != 1",
- "Query": "select r1.col from ref as r1, ref",
- "Table": "ref"
- },
- "TablesUsed": [
- "user.ref"
- ]
-}
-
-# reference table can merge with other opcodes left to right.
-"select ref.col from ref join user"
-{
- "QueryType": "SELECT",
- "Original": "select ref.col from ref join user",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select ref.col from ref join `user` where 1 != 1",
- "Query": "select ref.col from ref join `user`",
- "Table": "`user`, ref"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select ref.col from ref join user",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select ref.col from ref, `user` where 1 != 1",
- "Query": "select ref.col from ref, `user`",
- "Table": "`user`, ref"
- },
- "TablesUsed": [
- "user.ref",
- "user.user"
- ]
-}
-
-# reference table can merge with other opcodes left to right and vindex value is in the plan.
-# This tests that route.Merge also copies the condition to the LHS.
-"select ref.col from ref join (select aa from user where user.id=1) user"
-{
- "QueryType": "SELECT",
- "Original": "select ref.col from ref join (select aa from user where user.id=1) user",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select ref.col from ref join (select aa from `user` where 1 != 1) as `user` where 1 != 1",
- "Query": "select ref.col from ref join (select aa from `user` where `user`.id = 1) as `user`",
- "Table": "`user`, ref",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select ref.col from ref join (select aa from user where user.id=1) user",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select ref.col from ref, (select aa from `user` where 1 != 1) as `user` where 1 != 1",
- "Query": "select ref.col from ref, (select aa from `user` where `user`.id = 1) as `user`",
- "Table": "`user`, ref",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.ref",
- "user.user"
- ]
-}
-
-# routing rules for join, unsharded route wins if we can't find a merged route
-"select route2.col from route2 join user_extra"
-{
- "QueryType": "SELECT",
- "Original": "select route2.col from route2 join user_extra",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "unsharded_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select route2.col from unsharded as route2 where 1 != 1",
- "Query": "select route2.col from unsharded as route2",
- "Table": "unsharded"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra",
- "Table": "user_extra"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select route2.col from route2 join user_extra",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "unsharded_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select route2.col from unsharded as route2 where 1 != 1",
- "Query": "select route2.col from unsharded as route2",
- "Table": "unsharded"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra",
- "Table": "user_extra"
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded",
- "user.user_extra"
- ]
-}
-
-# derived table
-"select id from (select id, col from user where id = 5) as t"
-{
- "QueryType": "SELECT",
- "Original": "select id from (select id, col from user where id = 5) as t",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from (select id, col from `user` where 1 != 1) as t where 1 != 1",
- "Query": "select id from (select id, col from `user` where id = 5) as t",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from (select id, col from user where id = 5) as t",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from (select id, col from `user` where 1 != 1) as t where 1 != 1",
- "Query": "select id from (select id, col from `user` where id = 5) as t",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# derived table with join
-"select t.id from (select id from user where id = 5) as t join user_extra on t.id = user_extra.user_id"
-{
- "QueryType": "SELECT",
- "Original": "select t.id from (select id from user where id = 5) as t join user_extra on t.id = user_extra.user_id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select t.id from (select id from `user` where 1 != 1) as t join user_extra on t.id = user_extra.user_id where 1 != 1",
- "Query": "select t.id from (select id from `user` where id = 5) as t join user_extra on t.id = user_extra.user_id",
- "Table": "`user`, user_extra",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select t.id from (select id from user where id = 5) as t join user_extra on t.id = user_extra.user_id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select t.id from (select id from `user` where 1 != 1) as t, user_extra where 1 != 1",
- "Query": "select t.id from (select id from `user` where id = 5) as t, user_extra where t.id = user_extra.user_id",
- "Table": "`user`, user_extra",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# derived table with join, and aliased references
-"select t.id from (select user.id from user where user.id = 5) as t join user_extra on t.id = user_extra.user_id"
-{
- "QueryType": "SELECT",
- "Original": "select t.id from (select user.id from user where user.id = 5) as t join user_extra on t.id = user_extra.user_id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select t.id from (select `user`.id from `user` where 1 != 1) as t join user_extra on t.id = user_extra.user_id where 1 != 1",
- "Query": "select t.id from (select `user`.id from `user` where `user`.id = 5) as t join user_extra on t.id = user_extra.user_id",
- "Table": "`user`, user_extra",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select t.id from (select user.id from user where user.id = 5) as t join user_extra on t.id = user_extra.user_id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select t.id from (select `user`.id from `user` where 1 != 1) as t, user_extra where 1 != 1",
- "Query": "select t.id from (select `user`.id from `user` where `user`.id = 5) as t, user_extra where t.id = user_extra.user_id",
- "Table": "`user`, user_extra",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# derived table with join, duplicate columns
-"select t.id from (select user.id, id from user where user.id = 5) as t join user_extra on t.id = user_extra.user_id"
-"duplicate column aliases: id"
-Gen4 error: Duplicate column name 'id'
-
-# derived table in RHS of join
-"select t.id from user_extra join (select id from user where id = 5) as t on t.id = user_extra.user_id"
-{
- "QueryType": "SELECT",
- "Original": "select t.id from user_extra join (select id from user where id = 5) as t on t.id = user_extra.user_id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select t.id from user_extra join (select id from `user` where 1 != 1) as t on t.id = user_extra.user_id where 1 != 1",
- "Query": "select t.id from user_extra join (select id from `user` where id = 5) as t on t.id = user_extra.user_id",
- "Table": "user_extra, `user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select t.id from user_extra join (select id from user where id = 5) as t on t.id = user_extra.user_id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select t.id from user_extra, (select id from `user` where 1 != 1) as t where 1 != 1",
- "Query": "select t.id from user_extra, (select id from `user` where id = 5) as t where t.id = user_extra.user_id",
- "Table": "`user`, user_extra",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# derived table in FROM with cross-shard join
-"select t.id from (select id from user where id = 5) as t join user_extra on t.id = user_extra.col"
-{
- "QueryType": "SELECT",
- "Original": "select t.id from (select id from user where id = 5) as t join user_extra on t.id = user_extra.col",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "JoinVars": {
- "t_id": 0
- },
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select t.id from (select id from `user` where 1 != 1) as t where 1 != 1",
- "Query": "select t.id from (select id from `user` where id = 5) as t",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra where user_extra.col = :t_id",
- "Table": "user_extra"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select t.id from (select id from user where id = 5) as t join user_extra on t.id = user_extra.col",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "JoinVars": {
- "t_id": 0
- },
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select t.id from (select id from `user` where 1 != 1) as t where 1 != 1",
- "Query": "select t.id from (select id from `user` where id = 5) as t",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra where user_extra.col = :t_id",
- "Table": "user_extra"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# routing rules for derived table
-"select id from (select id, col from route1 where id = 5) as t"
-{
- "QueryType": "SELECT",
- "Original": "select id from (select id, col from route1 where id = 5) as t",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from (select id, col from `user` as route1 where 1 != 1) as t where 1 != 1",
- "Query": "select id from (select id, col from `user` as route1 where id = 5) as t",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from (select id, col from route1 where id = 5) as t",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from (select id, col from `user` as route1 where 1 != 1) as t where 1 != 1",
- "Query": "select id from (select id, col from `user` as route1 where id = 5) as t",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# derived table missing columns
-"select t.id from (select id from user) as t join user_extra on t.id = user_extra.user_id where t.col = 42"
-{
- "QueryType": "SELECT",
- "Original": "select t.id from (select id from user) as t join user_extra on t.id = user_extra.user_id where t.col = 42",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select t.id from (select id from `user` where 1 != 1) as t join user_extra on t.id = user_extra.user_id where 1 != 1",
- "Query": "select t.id from (select id from `user`) as t join user_extra on t.id = user_extra.user_id where t.col = 42",
- "Table": "`user`, user_extra"
- }
-}
-Gen4 error: symbol t.col not found
-
-# routing rules for derived table where the constraint is in the outer query
-"select id from (select id, col from route1) as t where id = 5"
-{
- "QueryType": "SELECT",
- "Original": "select id from (select id, col from route1) as t where id = 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from (select id, col from `user` as route1 where 1 != 1) as t where 1 != 1",
- "Query": "select id from (select id, col from `user` as route1) as t where id = 5",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from (select id, col from route1) as t where id = 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from (select id, col from `user` as route1 where 1 != 1) as t where 1 != 1",
- "Query": "select id from (select id, col from `user` as route1 where id = 5) as t",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# routing rules for derived table where the constraint is in the outer query
-"select id from (select id+col as foo from route1) as t where foo = 5"
-{
- "QueryType": "SELECT",
- "Original": "select id from (select id+col as foo from route1) as t where foo = 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from (select id + col as foo from `user` as route1 where 1 != 1) as t where 1 != 1",
- "Query": "select id from (select id + col as foo from `user` as route1) as t where foo = 5",
- "Table": "`user`"
- }
-}
-Gen4 error: symbol id not found
-
-# push predicate on joined derived tables
-"select t.id from (select id, textcol1 as baz from route1) as t join (select id, textcol1+textcol1 as baz from user) as s ON t.id = s.id WHERE t.baz = '3' AND s.baz = '3'"
-{
- "QueryType": "SELECT",
- "Original": "select t.id from (select id, textcol1 as baz from route1) as t join (select id, textcol1+textcol1 as baz from user) as s ON t.id = s.id WHERE t.baz = '3' AND s.baz = '3'",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select t.id from (select id, textcol1 as baz from `user` as route1 where 1 != 1) as t join (select id, textcol1 + textcol1 as baz from `user` where 1 != 1) as s on t.id = s.id where 1 != 1",
- "Query": "select t.id from (select id, textcol1 as baz from `user` as route1) as t join (select id, textcol1 + textcol1 as baz from `user`) as s on t.id = s.id where t.baz = '3' and s.baz = '3'",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select t.id from (select id, textcol1 as baz from route1) as t join (select id, textcol1+textcol1 as baz from user) as s ON t.id = s.id WHERE t.baz = '3' AND s.baz = '3'",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select t.id from (select id, textcol1 as baz from `user` as route1 where 1 != 1) as t, (select id, textcol1 + textcol1 as baz from `user` where 1 != 1) as s where 1 != 1",
- "Query": "select t.id from (select id, textcol1 as baz from `user` as route1 where textcol1 = '3') as t, (select id, textcol1 + textcol1 as baz from `user` where textcol1 + textcol1 = '3') as s where t.id = s.id",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# recursive derived table predicate push down
-"select bar from (select foo+4 as bar from (select colA+colB as foo from user) as u) as t where bar = 5"
-{
- "QueryType": "SELECT",
- "Original": "select bar from (select foo+4 as bar from (select colA+colB as foo from user) as u) as t where bar = 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select bar from (select foo + 4 as bar from (select colA + colB as foo from `user` where 1 != 1) as u where 1 != 1) as t where 1 != 1",
- "Query": "select bar from (select foo + 4 as bar from (select colA + colB as foo from `user`) as u) as t where bar = 5",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select bar from (select foo+4 as bar from (select colA+colB as foo from user) as u) as t where bar = 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select bar from (select foo + 4 as bar from (select colA + colB as foo from `user` where 1 != 1) as u where 1 != 1) as t where 1 != 1",
- "Query": "select bar from (select foo + 4 as bar from (select colA + colB as foo from `user` where colA + colB + 4 = 5) as u) as t",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# recursive derived table lookups
-"select id from (select id from (select id from user) as u) as t where id = 5"
-{
- "QueryType": "SELECT",
- "Original": "select id from (select id from (select id from user) as u) as t where id = 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from (select id from (select id from `user` where 1 != 1) as u where 1 != 1) as t where 1 != 1",
- "Query": "select id from (select id from (select id from `user`) as u) as t where id = 5",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from (select id from (select id from user) as u) as t where id = 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from (select id from (select id from `user` where 1 != 1) as u where 1 != 1) as t where 1 != 1",
- "Query": "select id from (select id from (select id from `user` where id = 5) as u) as t",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# merge derived tables with single-shard routes
-"select u.col, e.col from (select col from user where id = 5) as u join (select col from user_extra where user_id = 5) as e"
-{
- "QueryType": "SELECT",
- "Original": "select u.col, e.col from (select col from user where id = 5) as u join (select col from user_extra where user_id = 5) as e",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u.col, e.col from (select col from `user` where 1 != 1) as u join (select col from user_extra where 1 != 1) as e where 1 != 1",
- "Query": "select u.col, e.col from (select col from `user` where id = 5) as u join (select col from user_extra where user_id = 5) as e",
- "Table": "`user`, user_extra",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select u.col, e.col from (select col from user where id = 5) as u join (select col from user_extra where user_id = 5) as e",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u.col, e.col from (select col from `user` where 1 != 1) as u, (select col from user_extra where 1 != 1) as e where 1 != 1",
- "Query": "select u.col, e.col from (select col from `user` where id = 5) as u, (select col from user_extra where user_id = 5) as e",
- "Table": "`user`, user_extra",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# join of information_schema with normal table
-"select unsharded.foo from information_schema.a join unsharded"
-{
- "QueryType": "SELECT",
- "Original": "select unsharded.foo from information_schema.a join unsharded",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "TableName": "information_schema.a_unsharded",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 from information_schema.a where 1 != 1",
- "Query": "select 1 from information_schema.a",
- "Table": "information_schema.a"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select unsharded.foo from unsharded where 1 != 1",
- "Query": "select unsharded.foo from unsharded",
- "Table": "unsharded"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select unsharded.foo from information_schema.a join unsharded",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "TableName": "information_schema.a_unsharded",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 from information_schema.a where 1 != 1",
- "Query": "select 1 from information_schema.a",
- "Table": "information_schema.a"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select unsharded.foo from unsharded where 1 != 1",
- "Query": "select unsharded.foo from unsharded",
- "Table": "unsharded"
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-
-# join of normal table with information_schema
-"select unsharded.foo from unsharded join information_schema.a"
-{
- "QueryType": "SELECT",
- "Original": "select unsharded.foo from unsharded join information_schema.a",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "unsharded_information_schema.a",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select unsharded.foo from unsharded where 1 != 1",
- "Query": "select unsharded.foo from unsharded",
- "Table": "unsharded"
- },
- {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 from information_schema.a where 1 != 1",
- "Query": "select 1 from information_schema.a",
- "Table": "information_schema.a"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select unsharded.foo from unsharded join information_schema.a",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "unsharded_information_schema.a",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select unsharded.foo from unsharded where 1 != 1",
- "Query": "select unsharded.foo from unsharded",
- "Table": "unsharded"
- },
- {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 from information_schema.a where 1 != 1",
- "Query": "select 1 from information_schema.a",
- "Table": "information_schema.a"
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-
-# wire-up on join with cross-shard derived table
-"select t.col1 from (select user.id, user.col1 from user join user_extra) as t join unsharded on unsharded.col1 = t.col1 and unsharded.id = t.id"
-{
- "QueryType": "SELECT",
- "Original": "select t.col1 from (select user.id, user.col1 from user join user_extra) as t join unsharded on unsharded.col1 = t.col1 and unsharded.id = t.id",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "JoinVars": {
- "t_col1": 0,
- "t_id": 1
- },
- "TableName": "`user`_user_extra_unsharded",
- "Inputs": [
- {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 1,
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id, `user`.col1 from `user` where 1 != 1",
- "Query": "select `user`.id, `user`.col1 from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra",
- "Table": "user_extra"
- }
- ]
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 from unsharded where 1 != 1",
- "Query": "select 1 from unsharded where unsharded.col1 = :t_col1 and unsharded.id = :t_id",
- "Table": "unsharded"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select t.col1 from (select user.id, user.col1 from user join user_extra) as t join unsharded on unsharded.col1 = t.col1 and unsharded.id = t.id",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "JoinVars": {
- "t_col1": 0,
- "t_id": 1
- },
- "TableName": "`user`_user_extra_unsharded",
- "Inputs": [
- {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 1,
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id, `user`.col1 from `user` where 1 != 1",
- "Query": "select `user`.id, `user`.col1 from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra",
- "Table": "user_extra"
- }
- ]
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 from unsharded where 1 != 1",
- "Query": "select 1 from unsharded where unsharded.col1 = :t_col1 and unsharded.id = :t_id",
- "Table": "unsharded"
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded",
- "user.user",
- "user.user_extra"
- ]
-}
-
-# wire-up on within cross-shard derived table
-"select t.id from (select user.id, user.col1 from user join user_extra on user_extra.col = user.col) as t"
-{
- "QueryType": "SELECT",
- "Original": "select t.id from (select user.id, user.col1 from user join user_extra on user_extra.col = user.col) as t",
- "Instructions": {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1",
- "JoinVars": {
- "user_col": 2
- },
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id, `user`.col1, `user`.col from `user` where 1 != 1",
- "Query": "select `user`.id, `user`.col1, `user`.col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra where user_extra.col = :user_col",
- "Table": "user_extra"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select t.id from (select user.id, user.col1 from user join user_extra on user_extra.col = user.col) as t",
- "Instructions": {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:1,L:2",
- "JoinVars": {
- "user_col": 0
- },
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col, `user`.id, `user`.col1 from `user` where 1 != 1",
- "Query": "select `user`.col, `user`.id, `user`.col1 from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra where user_extra.col = :user_col",
- "Table": "user_extra"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# Join with cross-shard derived table on rhs
-"select t.col1 from unsharded_a ua join (select user.id, user.col1 from user join user_extra) as t"
-{
- "QueryType": "SELECT",
- "Original": "select t.col1 from unsharded_a ua join (select user.id, user.col1 from user join user_extra) as t",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "TableName": "unsharded_a_`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 from unsharded_a as ua where 1 != 1",
- "Query": "select 1 from unsharded_a as ua",
- "Table": "unsharded_a"
- },
- {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 1
- ],
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id, `user`.col1 from `user` where 1 != 1",
- "Query": "select `user`.id, `user`.col1 from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra",
- "Table": "user_extra"
- }
- ]
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select t.col1 from unsharded_a ua join (select user.id, user.col1 from user join user_extra) as t",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "TableName": "unsharded_a_`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 from unsharded_a as ua where 1 != 1",
- "Query": "select 1 from unsharded_a as ua",
- "Table": "unsharded_a"
- },
- {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 1
- ],
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id, `user`.col1 from `user` where 1 != 1",
- "Query": "select `user`.id, `user`.col1 from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra",
- "Table": "user_extra"
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded_a",
- "user.user",
- "user.user_extra"
- ]
-}
-
-# Join with cross-shard derived table on rhs - push down join predicate to derived table
-"select t.col1 from unsharded_a ua join (select user.id, user.col1 from user join user_extra) as t on t.id = ua.id"
-"unsupported: filtering on results of cross-shard subquery"
-{
- "QueryType": "SELECT",
- "Original": "select t.col1 from unsharded_a ua join (select user.id, user.col1 from user join user_extra) as t on t.id = ua.id",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "JoinVars": {
- "ua_id": 0
- },
- "TableName": "unsharded_a_`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select ua.id from unsharded_a as ua where 1 != 1",
- "Query": "select ua.id from unsharded_a as ua",
- "Table": "unsharded_a"
- },
- {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 1
- ],
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id, `user`.col1 from `user` where 1 != 1",
- "Query": "select `user`.id, `user`.col1 from `user` where `user`.id = :ua_id",
- "Table": "`user`",
- "Values": [
- ":ua_id"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra",
- "Table": "user_extra"
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded_a",
- "user.user",
- "user.user_extra"
- ]
-}
-
-# subquery in ON clause, single route
-"select unsharded_a.col from unsharded_a join unsharded_b on (select col from user)"
-{
- "QueryType": "SELECT",
- "Original": "select unsharded_a.col from unsharded_a join unsharded_b on (select col from user)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutValue",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select unsharded_a.col from unsharded_a join unsharded_b on :__sq1 where 1 != 1",
- "Query": "select unsharded_a.col from unsharded_a join unsharded_b on :__sq1",
- "Table": "unsharded_a, unsharded_b"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select unsharded_a.col from unsharded_a join unsharded_b on (select col from user)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutValue",
- "PulloutVars": [
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select unsharded_a.col from unsharded_a, unsharded_b where 1 != 1",
- "Query": "select unsharded_a.col from unsharded_a, unsharded_b where :__sq1",
- "Table": "unsharded_a, unsharded_b"
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded_a",
- "main.unsharded_b",
- "user.user"
- ]
-}
-
-# subquery in ON clause as sub-expression
-"select unsharded_a.col from unsharded_a join unsharded_b on unsharded_a.col+(select col from user)"
-{
- "QueryType": "SELECT",
- "Original": "select unsharded_a.col from unsharded_a join unsharded_b on unsharded_a.col+(select col from user)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutValue",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select unsharded_a.col from unsharded_a join unsharded_b on unsharded_a.col + :__sq1 where 1 != 1",
- "Query": "select unsharded_a.col from unsharded_a join unsharded_b on unsharded_a.col + :__sq1",
- "Table": "unsharded_a, unsharded_b"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select unsharded_a.col from unsharded_a join unsharded_b on unsharded_a.col+(select col from user)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutValue",
- "PulloutVars": [
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select unsharded_a.col from unsharded_a, unsharded_b where 1 != 1",
- "Query": "select unsharded_a.col from unsharded_a, unsharded_b where unsharded_a.col + :__sq1",
- "Table": "unsharded_a, unsharded_b"
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded_a",
- "main.unsharded_b",
- "user.user"
- ]
-}
-
-# IN subquery in ON clause, single route
-"select unsharded_a.col from unsharded_a join unsharded_b on unsharded_a.col in (select col from user)"
-{
- "QueryType": "SELECT",
- "Original": "select unsharded_a.col from unsharded_a join unsharded_b on unsharded_a.col in (select col from user)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select unsharded_a.col from unsharded_a join unsharded_b on :__sq_has_values1 = 1 and unsharded_a.col in ::__sq1 where 1 != 1",
- "Query": "select unsharded_a.col from unsharded_a join unsharded_b on :__sq_has_values1 = 1 and unsharded_a.col in ::__sq1",
- "Table": "unsharded_a, unsharded_b"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select unsharded_a.col from unsharded_a join unsharded_b on unsharded_a.col in (select col from user)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select unsharded_a.col from unsharded_a, unsharded_b where 1 != 1",
- "Query": "select unsharded_a.col from unsharded_a, unsharded_b where :__sq_has_values1 = 1 and unsharded_a.col in ::__sq1",
- "Table": "unsharded_a, unsharded_b"
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded_a",
- "main.unsharded_b",
- "user.user"
- ]
-}
-
-# subquery in ON clause, with join primitives
-"select unsharded.col from unsharded join user on user.col in (select col from user)"
-{
- "QueryType": "SELECT",
- "Original": "select unsharded.col from unsharded join user on user.col in (select col from user)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "unsharded_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select unsharded.col from unsharded where 1 != 1",
- "Query": "select unsharded.col from unsharded",
- "Table": "unsharded"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` where 1 != 1",
- "Query": "select 1 from `user` where :__sq_has_values1 = 1 and `user`.col in ::__sq1",
- "Table": "`user`"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select unsharded.col from unsharded join user on user.col in (select col from user)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "unsharded_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select unsharded.col from unsharded where 1 != 1",
- "Query": "select unsharded.col from unsharded",
- "Table": "unsharded"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` where 1 != 1",
- "Query": "select 1 from `user` where :__sq_has_values1 = 1 and `user`.col in ::__sq1",
- "Table": "`user`"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded",
- "user.user"
- ]
-}
-
-# subquery in ON clause, with left join primitives
-# The subquery is not pulled all the way out.
-"select unsharded.col from unsharded left join user on user.col in (select col from user)"
-{
- "QueryType": "SELECT",
- "Original": "select unsharded.col from unsharded left join user on user.col in (select col from user)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Join",
- "Variant": "LeftJoin",
- "JoinColumnIndexes": "L:0",
- "TableName": "unsharded_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select unsharded.col from unsharded where 1 != 1",
- "Query": "select unsharded.col from unsharded",
- "Table": "unsharded"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` where 1 != 1",
- "Query": "select 1 from `user` where :__sq_has_values1 = 1 and `user`.col in ::__sq1",
- "Table": "`user`"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded",
- "user.user"
- ]
-}
-Gen4 plan same as above
-
-# subquery in ON clause, with join primitives, and join on top
-# The subquery is not pulled all the way out.
-"select unsharded.col from unsharded join user on user.col in (select col from user) join unsharded_a"
-{
- "QueryType": "SELECT",
- "Original": "select unsharded.col from unsharded join user on user.col in (select col from user) join unsharded_a",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "unsharded_`user`_unsharded_a",
- "Inputs": [
- {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "unsharded_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select unsharded.col from unsharded where 1 != 1",
- "Query": "select unsharded.col from unsharded",
- "Table": "unsharded"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` where 1 != 1",
- "Query": "select 1 from `user` where :__sq_has_values1 = 1 and `user`.col in ::__sq1",
- "Table": "`user`"
- }
- ]
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 from unsharded_a where 1 != 1",
- "Query": "select 1 from unsharded_a",
- "Table": "unsharded_a"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select unsharded.col from unsharded join user on user.col in (select col from user) join unsharded_a",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "TableName": "`user`_unsharded, unsharded_a",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` where 1 != 1",
- "Query": "select 1 from `user` where :__sq_has_values1 = 1 and `user`.col in ::__sq1",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select unsharded.col from unsharded, unsharded_a where 1 != 1",
- "Query": "select unsharded.col from unsharded, unsharded_a",
- "Table": "unsharded, unsharded_a"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded",
- "main.unsharded_a",
- "user.user"
- ]
-}
-
-# keyspace-qualified queries
-"select user.user.col1, main.unsharded.col1 from user.user join main.unsharded where main.unsharded.col2 = user.user.col2"
-{
- "QueryType": "SELECT",
- "Original": "select user.user.col1, main.unsharded.col1 from user.user join main.unsharded where main.unsharded.col2 = user.user.col2",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,R:0",
- "JoinVars": {
- "user_col2": 1
- },
- "TableName": "`user`_unsharded",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col1, `user`.col2 from `user` where 1 != 1",
- "Query": "select `user`.col1, `user`.col2 from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select unsharded.col1 from unsharded where 1 != 1",
- "Query": "select unsharded.col1 from unsharded where unsharded.col2 = :user_col2",
- "Table": "unsharded"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.user.col1, main.unsharded.col1 from user.user join main.unsharded where main.unsharded.col2 = user.user.col2",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:1,R:0",
- "JoinVars": {
- "user_col2": 0
- },
- "TableName": "`user`_unsharded",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col2, `user`.col1 from `user` where 1 != 1",
- "Query": "select `user`.col2, `user`.col1 from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select unsharded.col1 from unsharded where 1 != 1",
- "Query": "select unsharded.col1 from unsharded where unsharded.col2 = :user_col2",
- "Table": "unsharded"
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded",
- "user.user"
- ]
-}
-
-# implicit table reference for unsharded keyspace
-"select main.foo.col from main.foo"
-{
- "QueryType": "SELECT",
- "Original": "select main.foo.col from main.foo",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select foo.col from foo where 1 != 1",
- "Query": "select foo.col from foo",
- "Table": "foo"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select main.foo.col from main.foo",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select foo.col from foo where 1 != 1",
- "Query": "select foo.col from foo",
- "Table": "foo"
- },
- "TablesUsed": [
- "main.foo"
- ]
-}
-
-# col refs should be case-insensitive
-"select user.col from user join user_extra on user.ID = user_extra.User_Id"
-{
- "QueryType": "SELECT",
- "Original": "select user.col from user join user_extra on user.ID = user_extra.User_Id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` join user_extra on `user`.ID = user_extra.User_Id where 1 != 1",
- "Query": "select `user`.col from `user` join user_extra on `user`.ID = user_extra.User_Id",
- "Table": "`user`, user_extra"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.col from user join user_extra on user.ID = user_extra.User_Id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user`, user_extra where 1 != 1",
- "Query": "select `user`.col from `user`, user_extra where `user`.ID = user_extra.User_Id",
- "Table": "`user`, user_extra"
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# derived table with join primitive (FROM)
-"select id, t.id from (select user.id from user join user_extra) as t"
-{
- "QueryType": "SELECT",
- "Original": "select id, t.id from (select user.id from user join user_extra) as t",
- "Instructions": {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0,
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id from `user` where 1 != 1",
- "Query": "select `user`.id from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra",
- "Table": "user_extra"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id, t.id from (select user.id from user join user_extra) as t",
- "Instructions": {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0,
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id from `user` where 1 != 1",
- "Query": "select `user`.id from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra",
- "Table": "user_extra"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# database call in ON clause.
-# The on clause is weird because the substitution must even for root expressions.
-"select u1.a from unsharded u1 join unsharded u2 on database()"
-{
- "QueryType": "SELECT",
- "Original": "select u1.a from unsharded u1 join unsharded u2 on database()",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select u1.a from unsharded as u1 join unsharded as u2 on database() where 1 != 1",
- "Query": "select u1.a from unsharded as u1 join unsharded as u2 on database()",
- "Table": "unsharded"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select u1.a from unsharded u1 join unsharded u2 on database()",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select u1.a from unsharded as u1 join unsharded as u2 on database() where 1 != 1",
- "Query": "select u1.a from unsharded as u1 join unsharded as u2 on database()",
- "Table": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-
-# last_insert_id for dual
-"select last_insert_id()"
-{
- "QueryType": "SELECT",
- "Original": "select last_insert_id()",
- "Instructions": {
- "OperatorType": "Projection",
- "Expressions": [
- ":__lastInsertId as last_insert_id()"
- ],
- "Inputs": [
- {
- "OperatorType": "SingleRow"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select last_insert_id()",
- "Instructions": {
- "OperatorType": "Projection",
- "Expressions": [
- ":__lastInsertId as last_insert_id()"
- ],
- "Inputs": [
- {
- "OperatorType": "SingleRow"
- }
- ]
- },
- "TablesUsed": [
- "main.dual"
- ]
-}
-
-# last_insert_id for sharded keyspace
-"select last_insert_id() from user"
-{
- "QueryType": "SELECT",
- "Original": "select last_insert_id() from user",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select :__lastInsertId as `last_insert_id()` from `user` where 1 != 1",
- "Query": "select :__lastInsertId as `last_insert_id()` from `user`",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select last_insert_id() from user",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select :__lastInsertId as `last_insert_id()` from `user` where 1 != 1",
- "Query": "select :__lastInsertId as `last_insert_id()` from `user`",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# last_insert_id for unsharded route
-"select last_insert_id() from main.unsharded"
-{
- "QueryType": "SELECT",
- "Original": "select last_insert_id() from main.unsharded",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select :__lastInsertId as `last_insert_id()` from unsharded where 1 != 1",
- "Query": "select :__lastInsertId as `last_insert_id()` from unsharded",
- "Table": "unsharded"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select last_insert_id() from main.unsharded",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select :__lastInsertId as `last_insert_id()` from unsharded where 1 != 1",
- "Query": "select :__lastInsertId as `last_insert_id()` from unsharded",
- "Table": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-
-# join with bindvariables
-"SELECT `user`.`id` FROM `user` INNER JOIN `user_extra` ON `user`.`id` = `user_extra`.`assembly_id` WHERE `user_extra`.`user_id` = 2"
-{
- "QueryType": "SELECT",
- "Original": "SELECT `user`.`id` FROM `user` INNER JOIN `user_extra` ON `user`.`id` = `user_extra`.`assembly_id` WHERE `user_extra`.`user_id` = 2",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "JoinVars": {
- "user_id": 0
- },
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id from `user` where 1 != 1",
- "Query": "select `user`.id from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra where user_extra.assembly_id = :user_id and user_extra.user_id = 2",
- "Table": "user_extra",
- "Values": [
- "INT64(2)"
- ],
- "Vindex": "user_index"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT `user`.`id` FROM `user` INNER JOIN `user_extra` ON `user`.`id` = `user_extra`.`assembly_id` WHERE `user_extra`.`user_id` = 2",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "JoinVars": {
- "user_extra_assembly_id": 0
- },
- "TableName": "user_extra_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.assembly_id from user_extra where 1 != 1",
- "Query": "select user_extra.assembly_id from user_extra where user_extra.user_id = 2",
- "Table": "user_extra",
- "Values": [
- "INT64(2)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id from `user` where 1 != 1",
- "Query": "select `user`.id from `user` where `user`.id = :user_extra_assembly_id",
- "Table": "`user`",
- "Values": [
- ":user_extra_assembly_id"
- ],
- "Vindex": "user_index"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# verify ',' vs JOIN precedence
-"select u1.a from unsharded u1, unsharded u2 join unsharded u3 on u1.a = u2.a"
-"symbol u1.a not found"
-Gen4 plan same as above
-
-# first expression fails for ',' join (code coverage: ensure error is returned)
-"select user.foo.col from user.foo, user"
-"table foo not found"
-Gen4 plan same as above
-
-# table names should be case-sensitive
-"select unsharded.id from unsharded where Unsharded.val = 1"
-"symbol Unsharded.val not found"
-Gen4 plan same as above
-
-# implicit table reference for sharded keyspace
-"select user.foo.col from user.foo"
-"table foo not found"
-Gen4 plan same as above
-
-# duplicate symbols
-"select user.id from user join user"
-"Not unique table/alias: 'user'"
-Gen4 plan same as above
-
-# duplicate symbols for merging routes
-"select user.id from user join user_extra user on user.id = user.user_id"
-"Not unique table/alias: 'user'"
-Gen4 plan same as above
-
-# non-existent table
-"select c from t"
-"table t not found"
-Gen4 plan same as above
-
-# non-existent table on left of join
-"select c from t join user"
-"table t not found"
-Gen4 plan same as above
-
-# non-existent table on right of join
-"select c from user join t"
-"table t not found"
-Gen4 plan same as above
-
-# query with parens is planned correctly
-"select m1.col from (unsharded as m1, unsharded as m2)"
-{
- "QueryType": "SELECT",
- "Original": "select m1.col from (unsharded as m1, unsharded as m2)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select m1.col from (unsharded as m1, unsharded as m2) where 1 != 1",
- "Query": "select m1.col from (unsharded as m1, unsharded as m2)",
- "Table": "unsharded"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select m1.col from (unsharded as m1, unsharded as m2)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select m1.col from (unsharded as m1, unsharded as m2) where 1 != 1",
- "Query": "select m1.col from (unsharded as m1, unsharded as m2)",
- "Table": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-
-# gen4 - optimise plan by merging user_extra and music first, and then querying for user info
-"select 1 from user u join user_extra ue on ue.id = u.id join music m on m.user_id = ue.user_id"
-{
- "QueryType": "SELECT",
- "Original": "select 1 from user u join user_extra ue on ue.id = u.id join music m on m.user_id = ue.user_id",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "JoinVars": {
- "ue_user_id": 1
- },
- "TableName": "`user`_user_extra_music",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,R:0",
- "JoinVars": {
- "u_id": 1
- },
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1, u.id from `user` as u where 1 != 1",
- "Query": "select 1, u.id from `user` as u",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select ue.user_id from user_extra as ue where 1 != 1",
- "Query": "select ue.user_id from user_extra as ue where ue.id = :u_id",
- "Table": "user_extra"
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from music as m where 1 != 1",
- "Query": "select 1 from music as m where m.user_id = :ue_user_id",
- "Table": "music",
- "Values": [
- ":ue_user_id"
- ],
- "Vindex": "user_index"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select 1 from user u join user_extra ue on ue.id = u.id join music m on m.user_id = ue.user_id",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:1",
- "JoinVars": {
- "ue_id": 0
- },
- "TableName": "music, user_extra_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select ue.id, 1 from user_extra as ue, music as m where 1 != 1",
- "Query": "select ue.id, 1 from user_extra as ue, music as m where m.user_id = ue.user_id",
- "Table": "music, user_extra"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` as u where 1 != 1",
- "Query": "select 1 from `user` as u where u.id = :ue_id",
- "Table": "`user`",
- "Values": [
- ":ue_id"
- ],
- "Vindex": "user_index"
- }
- ]
- },
- "TablesUsed": [
- "user.music",
- "user.user",
- "user.user_extra"
- ]
-}
-
-# join column selected as alias
-"SELECT u.id as uid, ue.id as ueid FROM user u join user_extra ue where u.id = ue.id"
-{
- "QueryType": "SELECT",
- "Original": "SELECT u.id as uid, ue.id as ueid FROM user u join user_extra ue where u.id = ue.id",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,R:0",
- "JoinVars": {
- "u_id": 0
- },
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u.id as uid from `user` as u where 1 != 1",
- "Query": "select u.id as uid from `user` as u",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select ue.id as ueid from user_extra as ue where 1 != 1",
- "Query": "select ue.id as ueid from user_extra as ue where ue.id = :u_id",
- "Table": "user_extra"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT u.id as uid, ue.id as ueid FROM user u join user_extra ue where u.id = ue.id",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0,L:1",
- "JoinVars": {
- "ue_id": 0
- },
- "TableName": "user_extra_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select ue.id, ue.id as ueid from user_extra as ue where 1 != 1",
- "Query": "select ue.id, ue.id as ueid from user_extra as ue",
- "Table": "user_extra"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u.id as uid from `user` as u where 1 != 1",
- "Query": "select u.id as uid from `user` as u where u.id = :ue_id",
- "Table": "`user`",
- "Values": [
- ":ue_id"
- ],
- "Vindex": "user_index"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# alias on column from derived table. TODO: to support alias in SimpleProjection engine primitive.
-"select a as k from (select count(*) as a from user) t"
-{
- "QueryType": "SELECT",
- "Original": "select a as k from (select count(*) as a from user) t",
- "Instructions": {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count(0) AS count",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) as a from `user` where 1 != 1",
- "Query": "select count(*) as a from `user`",
- "Table": "`user`"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select a as k from (select count(*) as a from user) t",
- "Instructions": {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count_star(0) AS a",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) as a from `user` where 1 != 1",
- "Query": "select count(*) as a from `user`",
- "Table": "`user`"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# select star from derived table on expandable and unsharded table
-"select u.* from (select * from unsharded) u"
-{
- "QueryType": "SELECT",
- "Original": "select u.* from (select * from unsharded) u",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select u.* from (select * from unsharded where 1 != 1) as u where 1 != 1",
- "Query": "select u.* from (select * from unsharded) as u",
- "Table": "unsharded"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select u.* from (select * from unsharded) u",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select u.* from (select * from unsharded where 1 != 1) as u where 1 != 1",
- "Query": "select u.* from (select * from unsharded) as u",
- "Table": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-
-# filtering on a cross-shard derived table
-"select id from (select user.id, user.col from user join user_extra) as t where id=5"
-"unsupported: filtering on results of cross-shard subquery"
-{
- "QueryType": "SELECT",
- "Original": "select id from (select user.id, user.col from user join user_extra) as t where id=5",
- "Instructions": {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id, `user`.col from `user` where 1 != 1",
- "Query": "select `user`.id, `user`.col from `user` where `user`.id = 5",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra",
- "Table": "user_extra"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# expression on a cross-shard derived table
-"select id+1 from (select user.id, user.col from user join user_extra) as t"
-"unsupported: expression on results of a cross-shard subquery"
-{
- "QueryType": "SELECT",
- "Original": "select id+1 from (select user.id, user.col from user join user_extra) as t",
- "Instructions": {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 2
- ],
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1,L:2",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id, `user`.col, `user`.id + 1 from `user` where 1 != 1",
- "Query": "select `user`.id, `user`.col, `user`.id + 1 from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra",
- "Table": "user_extra"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# derived table with aliased columns and outer predicate pushed in derived table
-"select u.a from (select id as b, name from user) u(a, n) where u.n = 1"
-"unsupported: column aliases in derived table"
-{
- "QueryType": "SELECT",
- "Original": "select u.a from (select id as b, name from user) u(a, n) where u.n = 1",
- "Instructions": {
- "OperatorType": "VindexLookup",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "name_user_map",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
- "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
- "Table": "name_user_vdx",
- "Values": [
- ":name"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "ByDestination",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u.a from (select id as b, `name` from `user` where 1 != 1) as u(a, n) where 1 != 1",
- "Query": "select u.a from (select id as b, `name` from `user` where `name` = 1) as u(a, n)",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# derived table with aliased columns predicate in both the outer and inner
-"select u.a from (select id as b, name from user where b = 1) u(a, n) where u.n = 1"
-"unsupported: column aliases in derived table"
-{
- "QueryType": "SELECT",
- "Original": "select u.a from (select id as b, name from user where b = 1) u(a, n) where u.n = 1",
- "Instructions": {
- "OperatorType": "VindexLookup",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "name_user_map",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
- "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
- "Table": "name_user_vdx",
- "Values": [
- ":name"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "ByDestination",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u.a from (select id as b, `name` from `user` where 1 != 1) as u(a, n) where 1 != 1",
- "Query": "select u.a from (select id as b, `name` from `user` where b = 1 and `name` = 1) as u(a, n)",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# derived table with aliased columns and a join that requires pushProjection
-"select i+1 from (select user.id from user join user_extra) t(i)"
-"unsupported: column aliases in derived table"
-{
- "QueryType": "SELECT",
- "Original": "select i+1 from (select user.id from user join user_extra) t(i)",
- "Instructions": {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 1
- ],
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id, `user`.id + 1 from `user` where 1 != 1",
- "Query": "select `user`.id, `user`.id + 1 from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra",
- "Table": "user_extra"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# two subqueries with different Select and OpCode
-"select id from user where id in (select id from user_extra) and col = (select user_id from user_extra limit 1)"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where id in (select id from user_extra) and col = (select user_id from user_extra limit 1)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values2",
- "__sq2"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from user_extra where 1 != 1",
- "Query": "select id from user_extra",
- "Table": "user_extra"
- },
- {
- "OperatorType": "Subquery",
- "Variant": "PulloutValue",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Limit",
- "Count": "INT64(1)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_id from user_extra where 1 != 1",
- "Query": "select user_id from user_extra limit :__upper_limit",
- "Table": "user_extra"
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where col = :__sq1 and :__sq_has_values2 = 1 and id in ::__vals",
- "Table": "`user`",
- "Values": [
- ":__sq2"
- ],
- "Vindex": "user_index"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where id in (select id from user_extra) and col = (select user_id from user_extra limit 1)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutValue",
- "PulloutVars": [
- "__sq_has_values2",
- "__sq2"
- ],
- "Inputs": [
- {
- "OperatorType": "Limit",
- "Count": "INT64(1)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_id from user_extra where 1 != 1",
- "Query": "select user_id from user_extra limit :__upper_limit",
- "Table": "user_extra"
- }
- ]
- },
- {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from user_extra where 1 != 1",
- "Query": "select id from user_extra",
- "Table": "user_extra"
- },
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where :__sq_has_values1 = 1 and id in ::__vals and col = :__sq2",
- "Table": "`user`",
- "Values": [
- ":__sq1"
- ],
- "Vindex": "user_index"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# join on int columns
-"select u.id from user as u join user as uu on u.intcol = uu.intcol"
-{
- "QueryType": "SELECT",
- "Original": "select u.id from user as u join user as uu on u.intcol = uu.intcol",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "JoinVars": {
- "u_intcol": 1
- },
- "TableName": "`user`_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u.id, u.intcol from `user` as u where 1 != 1",
- "Query": "select u.id, u.intcol from `user` as u",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` as uu where 1 != 1",
- "Query": "select 1 from `user` as uu where uu.intcol = :u_intcol",
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select u.id from user as u join user as uu on u.intcol = uu.intcol",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:1",
- "JoinVars": {
- "u_intcol": 0
- },
- "TableName": "`user`_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u.intcol, u.id from `user` as u where 1 != 1",
- "Query": "select u.intcol, u.id from `user` as u",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` as uu where 1 != 1",
- "Query": "select 1 from `user` as uu where uu.intcol = :u_intcol",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Duplicate output column from derived table having a join
-"select 0 from (select `user`.col1 from `user` join unsharded) as t join unsharded on unsharded.col1 = t.col1 and unsharded.a = t.col1"
-"unsupported: expression on results of a cross-shard subquery"
-{
- "QueryType": "SELECT",
- "Original": "select 0 from (select `user`.col1 from `user` join unsharded) as t join unsharded on unsharded.col1 = t.col1 and unsharded.a = t.col1",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:1",
- "JoinVars": {
- "t_col1": 0
- },
- "TableName": "`user`_unsharded_unsharded",
- "Inputs": [
- {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0,
- 1
- ],
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1",
- "TableName": "`user`_unsharded",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col1, 0 from `user` where 1 != 1",
- "Query": "select `user`.col1, 0 from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 from unsharded where 1 != 1",
- "Query": "select 1 from unsharded",
- "Table": "unsharded"
- }
- ]
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 from unsharded where 1 != 1",
- "Query": "select 1 from unsharded where unsharded.col1 = :t_col1 and unsharded.a = :t_col1",
- "Table": "unsharded"
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded",
- "user.user"
- ]
-}
-
-# left join where clauses #2
-"select user.id from user left join user_extra on user.col = user_extra.col where coalesce(user_extra.col, 4) = 5"
-{
- "QueryType": "SELECT",
- "Original": "select user.id from user left join user_extra on user.col = user_extra.col where coalesce(user_extra.col, 4) = 5",
- "Instructions": {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 1
- ],
- "Inputs": [
- {
- "OperatorType": "Filter",
- "Predicate": "coalesce(user_extra.col, 4) = 5",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "LeftJoin",
- "JoinColumnIndexes": "R:0,L:1",
- "JoinVars": {
- "user_col": 0
- },
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col, `user`.id from `user` where 1 != 1",
- "Query": "select `user`.col, `user`.id from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.col from user_extra where 1 != 1",
- "Query": "select user_extra.col from user_extra where user_extra.col = :user_col",
- "Table": "user_extra"
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-Gen4 plan same as above
-
-# dont merge unsharded tables from different keyspaces
-"select 1 from main.unsharded join main_2.unsharded_tab"
-{
- "QueryType": "SELECT",
- "Original": "select 1 from main.unsharded join main_2.unsharded_tab",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "unsharded_unsharded_tab",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 from unsharded where 1 != 1",
- "Query": "select 1 from unsharded",
- "Table": "unsharded"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main_2",
- "Sharded": false
- },
- "FieldQuery": "select 1 from unsharded_tab where 1 != 1",
- "Query": "select 1 from unsharded_tab",
- "Table": "unsharded_tab"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select 1 from main.unsharded join main_2.unsharded_tab",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "unsharded_unsharded_tab",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 from unsharded where 1 != 1",
- "Query": "select 1 from unsharded",
- "Table": "unsharded"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main_2",
- "Sharded": false
- },
- "FieldQuery": "select 1 from unsharded_tab where 1 != 1",
- "Query": "select 1 from unsharded_tab",
- "Table": "unsharded_tab"
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded",
- "main_2.unsharded_tab"
- ]
-}
-
-# Unsharded join with using
-"select * from unsharded_a join unsharded_b using (propertyId);"
-{
- "QueryType": "SELECT",
- "Original": "select * from unsharded_a join unsharded_b using (propertyId);",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select * from unsharded_a join unsharded_b using (propertyId) where 1 != 1",
- "Query": "select * from unsharded_a join unsharded_b using (propertyId)",
- "Table": "unsharded_a, unsharded_b"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from unsharded_a join unsharded_b using (propertyId);",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select * from unsharded_a join unsharded_b using (propertyId) where 1 != 1",
- "Query": "select * from unsharded_a join unsharded_b using (propertyId)",
- "Table": "unsharded_a, unsharded_b"
- },
- "TablesUsed": [
- "main.unsharded_a",
- "main.unsharded_b"
- ]
-}
-
-# Column aliases in Derived Table
-"select id2 from (select id from user) as x (id2)"
-"unsupported: column aliases in derived table"
-{
- "QueryType": "SELECT",
- "Original": "select id2 from (select id from user) as x (id2)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id2 from (select id from `user` where 1 != 1) as x(id2) where 1 != 1",
- "Query": "select id2 from (select id from `user`) as x(id2)",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# single unsharded keyspace with derived table
-"select col from (select col from unsharded join unsharded_b) as u join unsharded_a ua limit 1"
-{
- "QueryType": "SELECT",
- "Original": "select col from (select col from unsharded join unsharded_b) as u join unsharded_a ua limit 1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select col from (select col from unsharded join unsharded_b where 1 != 1) as u join unsharded_a as ua where 1 != 1",
- "Query": "select col from (select col from unsharded join unsharded_b) as u join unsharded_a as ua limit 1",
- "Table": "unsharded, unsharded_b, unsharded_a"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select col from (select col from unsharded join unsharded_b) as u join unsharded_a ua limit 1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select col from (select col from unsharded join unsharded_b where 1 != 1) as u join unsharded_a as ua where 1 != 1",
- "Query": "select col from (select col from unsharded join unsharded_b) as u join unsharded_a as ua limit 1",
- "Table": "unsharded, unsharded_a, unsharded_b"
- },
- "TablesUsed": [
- "main.unsharded",
- "main.unsharded_a",
- "main.unsharded_b"
- ]
-}
-
-# query builder with derived table having join inside it
-"select u.col from (select user.col from user join user_extra) as u join user_extra ue limit 1"
-{
- "QueryType": "SELECT",
- "Original": "select u.col from (select user.col from user join user_extra) as u join user_extra ue limit 1",
- "Instructions": {
- "OperatorType": "Limit",
- "Count": "INT64(1)",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "`user`_user_extra_user_extra",
- "Inputs": [
- {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` where 1 != 1",
- "Query": "select `user`.col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra",
- "Table": "user_extra"
- }
- ]
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra as ue where 1 != 1",
- "Query": "select 1 from user_extra as ue",
- "Table": "user_extra"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select u.col from (select user.col from user join user_extra) as u join user_extra ue limit 1",
- "Instructions": {
- "OperatorType": "Limit",
- "Count": "INT64(1)",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "`user`_user_extra_user_extra",
- "Inputs": [
- {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` where 1 != 1",
- "Query": "select `user`.col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra",
- "Table": "user_extra"
- }
- ]
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra as ue where 1 != 1",
- "Query": "select 1 from user_extra as ue",
- "Table": "user_extra"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# left join with expressions
-"select user_extra.col+1 from user left join user_extra on user.col = user_extra.col"
-{
- "QueryType": "SELECT",
- "Original": "select user_extra.col+1 from user left join user_extra on user.col = user_extra.col",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "LeftJoin",
- "JoinColumnIndexes": "R:0",
- "JoinVars": {
- "user_col": 0
- },
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` where 1 != 1",
- "Query": "select `user`.col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.col + 1 from user_extra where 1 != 1",
- "Query": "select user_extra.col + 1 from user_extra where user_extra.col = :user_col",
- "Table": "user_extra"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-Gen4 plan same as above
-
-# left join with expressions, with three-way join (different code path)
-"select user.id, user_extra.col+1 from user left join user_extra on user.col = user_extra.col join user_extra e"
-{
- "QueryType": "SELECT",
- "Original": "select user.id, user_extra.col+1 from user left join user_extra on user.col = user_extra.col join user_extra e",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1",
- "TableName": "`user`_user_extra_user_extra",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "LeftJoin",
- "JoinColumnIndexes": "L:1,R:0",
- "JoinVars": {
- "user_col": 0
- },
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col, `user`.id from `user` where 1 != 1",
- "Query": "select `user`.col, `user`.id from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.col + 1 from user_extra where 1 != 1",
- "Query": "select user_extra.col + 1 from user_extra where user_extra.col = :user_col",
- "Table": "user_extra"
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra as e where 1 != 1",
- "Query": "select 1 from user_extra as e",
- "Table": "user_extra"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-Gen4 plan same as above
-
-# left join with expressions coming from both sides
-"select user.foo+user_extra.col+1 from user left join user_extra on user.col = user_extra.col"
-{
- "QueryType": "SELECT",
- "Original": "select user.foo+user_extra.col+1 from user left join user_extra on user.col = user_extra.col",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "LeftJoin",
- "JoinColumnIndexes": "R:0",
- "JoinVars": {
- "user_col": 0,
- "user_foo": 1
- },
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col, `user`.foo from `user` where 1 != 1",
- "Query": "select `user`.col, `user`.foo from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select :user_foo + user_extra.col + 1 from user_extra where 1 != 1",
- "Query": "select :user_foo + user_extra.col + 1 from user_extra where user_extra.col = :user_col",
- "Table": "user_extra"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-Gen4 plan same as above
-
-# Do not rewrite derived expressions when the derived table is merged with the outer
-"select col1, count(*) from (select colC+colD as col1 from user) as tbl group by col1"
-{
- "QueryType": "SELECT",
- "Original": "select col1, count(*) from (select colC+colD as col1 from user) as tbl group by col1",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count(1) AS count",
- "GroupBy": "0",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col1, count(*), weight_string(col1) from (select colC + colD as col1 from `user` where 1 != 1) as tbl where 1 != 1 group by col1, weight_string(col1)",
- "OrderBy": "(0|2) ASC",
- "Query": "select col1, count(*), weight_string(col1) from (select colC + colD as col1 from `user`) as tbl group by col1, weight_string(col1) order by col1 asc",
- "ResultColumns": 2,
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select col1, count(*) from (select colC+colD as col1 from user) as tbl group by col1",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count_star(1) AS count(*)",
- "GroupBy": "(0|2)",
- "ResultColumns": 2,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col1, count(*), weight_string(col1) from (select colC + colD as col1 from `user` where 1 != 1) as tbl where 1 != 1 group by col1, weight_string(col1)",
- "OrderBy": "(0|2) ASC",
- "Query": "select col1, count(*), weight_string(col1) from (select colC + colD as col1 from `user`) as tbl group by col1, weight_string(col1) order by col1 asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# join with USING construct
-"select * from authoritative join unsharded_authoritative using(col1)"
-"unsupported: join with USING(column_list) clause for complex queries"
-{
- "QueryType": "SELECT",
- "Original": "select * from authoritative join unsharded_authoritative using(col1)",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:1,L:2,L:3,R:0",
- "JoinVars": {
- "authoritative_col1": 0
- },
- "TableName": "authoritative_unsharded_authoritative",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select authoritative.col1, authoritative.col1 as col1, authoritative.user_id as user_id, authoritative.col2 as col2 from authoritative where 1 != 1",
- "Query": "select authoritative.col1, authoritative.col1 as col1, authoritative.user_id as user_id, authoritative.col2 as col2 from authoritative",
- "Table": "authoritative"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select unsharded_authoritative.col2 as col2 from unsharded_authoritative where 1 != 1",
- "Query": "select unsharded_authoritative.col2 as col2 from unsharded_authoritative where unsharded_authoritative.col1 = :authoritative_col1",
- "Table": "unsharded_authoritative"
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded_authoritative",
- "user.authoritative"
- ]
-}
-
-
-"select * from (select bar as push_it from (select foo as bar from (select id as foo from user) as t1) as t2) as t3 where push_it = 12"
-{
- "QueryType": "SELECT",
- "Original": "select * from (select bar as push_it from (select foo as bar from (select id as foo from user) as t1) as t2) as t3 where push_it = 12",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from (select bar as push_it from (select foo as bar from (select id as foo from `user` where 1 != 1) as t1 where 1 != 1) as t2 where 1 != 1) as t3 where 1 != 1",
- "Query": "select * from (select bar as push_it from (select foo as bar from (select id as foo from `user`) as t1) as t2) as t3 where push_it = 12",
- "Table": "`user`",
- "Values": [
- "INT64(12)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from (select bar as push_it from (select foo as bar from (select id as foo from user) as t1) as t2) as t3 where push_it = 12",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select t3.push_it from (select bar as push_it from (select foo as bar from (select id as foo from `user` where 1 != 1) as t1 where 1 != 1) as t2 where 1 != 1) as t3 where 1 != 1",
- "Query": "select t3.push_it from (select bar as push_it from (select foo as bar from (select id as foo from `user` where id = 12) as t1) as t2) as t3",
- "Table": "`user`",
- "Values": [
- "INT64(12)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
diff --git a/go/vt/vtgate/planbuilder/testdata/info_schema57_cases.json b/go/vt/vtgate/planbuilder/testdata/info_schema57_cases.json
new file mode 100644
index 00000000000..04cd660afb5
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/info_schema57_cases.json
@@ -0,0 +1,1582 @@
+[
+ {
+ "comment": "Single information_schema query",
+ "query": "select TABLE_NAME from information_schema.TABLES",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "select TABLE_NAME from information_schema.TABLES",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select TABLE_NAME from information_schema.`TABLES` where 1 != 1",
+ "Query": "select TABLE_NAME from information_schema.`TABLES`",
+ "Table": "information_schema.`TABLES`"
+ }
+ }
+ },
+ {
+ "comment": "',' join information_schema",
+ "query": "select a.ENGINE, b.DATA_TYPE from information_schema.TABLES as a, information_schema.COLUMNS as b",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a.ENGINE, b.DATA_TYPE from information_schema.TABLES as a, information_schema.COLUMNS as b",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select a.`ENGINE`, b.DATA_TYPE from information_schema.`TABLES` as a, information_schema.`COLUMNS` as b where 1 != 1",
+ "Query": "select a.`ENGINE`, b.DATA_TYPE from information_schema.`TABLES` as a, information_schema.`COLUMNS` as b",
+ "Table": "information_schema.`TABLES`, information_schema.`COLUMNS`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a.ENGINE, b.DATA_TYPE from information_schema.TABLES as a, information_schema.COLUMNS as b",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select a.`ENGINE`, b.DATA_TYPE from information_schema.`TABLES` as a, information_schema.`COLUMNS` as b where 1 != 1",
+ "Query": "select a.`ENGINE`, b.DATA_TYPE from information_schema.`TABLES` as a, information_schema.`COLUMNS` as b",
+ "Table": "information_schema.`COLUMNS`, information_schema.`TABLES`"
+ }
+ }
+ },
+ {
+ "comment": "information schema query that uses table_schema",
+ "query": "select column_name from information_schema.columns where table_schema = (select schema())",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "select column_name from information_schema.columns where table_schema = (select schema())",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select column_name from information_schema.`columns` where 1 != 1",
+ "Query": "select column_name from information_schema.`columns` where table_schema = schema()",
+ "Table": "information_schema.`columns`"
+ }
+ }
+ },
+ {
+ "comment": "information schema join",
+ "query": "select tables.TABLE_SCHEMA, files.`STATUS` from information_schema.tables join information_schema.files",
+ "v3-plan": "VT03019: symbol `tables`.TABLE_SCHEMA not found",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select tables.TABLE_SCHEMA, files.`STATUS` from information_schema.tables join information_schema.files",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select `tables`.TABLE_SCHEMA, files.`STATUS` from information_schema.`tables`, information_schema.files where 1 != 1",
+ "Query": "select `tables`.TABLE_SCHEMA, files.`STATUS` from information_schema.`tables`, information_schema.files",
+ "Table": "information_schema.`tables`, information_schema.files"
+ }
+ }
+ },
+ {
+ "comment": "access to qualified column names in information_schema",
+ "query": "select * from information_schema.COLUMNS where information_schema.COLUMNS.COLUMN_NAME='toto'",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from information_schema.COLUMNS where information_schema.COLUMNS.COLUMN_NAME='toto'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from information_schema.`COLUMNS` where 1 != 1",
+ "Query": "select * from information_schema.`COLUMNS` where information_schema.`COLUMNS`.COLUMN_NAME = 'toto'",
+ "Table": "information_schema.`COLUMNS`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from information_schema.COLUMNS where information_schema.COLUMNS.COLUMN_NAME='toto'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, COLUMN_NAME, ORDINAL_POSITION, COLUMN_DEFAULT, IS_NULLABLE, DATA_TYPE, CHARACTER_MAXIMUM_LENGTH, CHARACTER_OCTET_LENGTH, NUMERIC_PRECISION, NUMERIC_SCALE, DATETIME_PRECISION, CHARACTER_SET_NAME, COLLATION_NAME, COLUMN_TYPE, COLUMN_KEY, EXTRA, `PRIVILEGES`, COLUMN_COMMENT, GENERATION_EXPRESSION from information_schema.`COLUMNS` where 1 != 1",
+ "Query": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, COLUMN_NAME, ORDINAL_POSITION, COLUMN_DEFAULT, IS_NULLABLE, DATA_TYPE, CHARACTER_MAXIMUM_LENGTH, CHARACTER_OCTET_LENGTH, NUMERIC_PRECISION, NUMERIC_SCALE, DATETIME_PRECISION, CHARACTER_SET_NAME, COLLATION_NAME, COLUMN_TYPE, COLUMN_KEY, EXTRA, `PRIVILEGES`, COLUMN_COMMENT, GENERATION_EXPRESSION from information_schema.`COLUMNS` where `COLUMNS`.COLUMN_NAME = 'toto'",
+ "Table": "information_schema.`COLUMNS`"
+ }
+ }
+ },
+ {
+ "comment": "union of information_schema",
+ "query": "select TABLE_NAME from information_schema.columns union select table_schema from information_schema.tables",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "select TABLE_NAME from information_schema.columns union select table_schema from information_schema.tables",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select TABLE_NAME from information_schema.`columns` where 1 != 1 union select table_schema from information_schema.`tables` where 1 != 1",
+ "Query": "select TABLE_NAME from information_schema.`columns` union select table_schema from information_schema.`tables`",
+ "Table": "information_schema.`columns`"
+ }
+ }
+ },
+ {
+ "comment": "union between information_schema tables that should not be merged",
+ "query": "select * from information_schema.tables where table_schema = 'user' union select * from information_schema.tables where table_schema = 'main'",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from information_schema.tables where table_schema = 'user' union select * from information_schema.tables where table_schema = 'main'",
+ "Instructions": {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from information_schema.`tables` where 1 != 1",
+ "Query": "select * from information_schema.`tables` where table_schema = :__vtschemaname",
+ "SysTableTableSchema": "[VARCHAR(\"user\")]",
+ "Table": "information_schema.`tables`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from information_schema.`tables` where 1 != 1",
+ "Query": "select * from information_schema.`tables` where table_schema = :__vtschemaname",
+ "SysTableTableSchema": "[VARCHAR(\"main\")]",
+ "Table": "information_schema.`tables`"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from information_schema.tables where table_schema = 'user' union select * from information_schema.tables where table_schema = 'main'",
+ "Instructions": {
+ "OperatorType": "Distinct",
+ "Collations": [
+ "(0:21)",
+ "(1:22)",
+ "(2:23)",
+ "(3:24)",
+ "(4:25)",
+ "5: binary",
+ "(6:26)",
+ "7: binary",
+ "8: binary",
+ "9: binary",
+ "10: binary",
+ "11: binary",
+ "12: binary",
+ "13: binary",
+ "(14:27)",
+ "(15:28)",
+ "(16:29)",
+ "(17:30)",
+ "18: binary",
+ "(19:31)",
+ "(20:32)"
+ ],
+ "ResultColumns": 21,
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT, weight_string(TABLE_CATALOG), weight_string(TABLE_SCHEMA), weight_string(TABLE_NAME), weight_string(TABLE_TYPE), weight_string(`ENGINE`), weight_string(`ROW_FORMAT`), weight_string(CREATE_TIME), weight_string(UPDATE_TIME), weight_string(CHECK_TIME), weight_string(TABLE_COLLATION), weight_string(CREATE_OPTIONS), weight_string(TABLE_COMMENT) from information_schema.`tables` where 1 != 1",
+ "Query": "select distinct TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT, weight_string(TABLE_CATALOG), weight_string(TABLE_SCHEMA), weight_string(TABLE_NAME), weight_string(TABLE_TYPE), weight_string(`ENGINE`), weight_string(`ROW_FORMAT`), weight_string(CREATE_TIME), weight_string(UPDATE_TIME), weight_string(CHECK_TIME), weight_string(TABLE_COLLATION), weight_string(CREATE_OPTIONS), weight_string(TABLE_COMMENT) from information_schema.`tables` where table_schema = :__vtschemaname",
+ "SysTableTableSchema": "[VARCHAR(\"user\")]",
+ "Table": "information_schema.`tables`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT, weight_string(TABLE_CATALOG), weight_string(TABLE_SCHEMA), weight_string(TABLE_NAME), weight_string(TABLE_TYPE), weight_string(`ENGINE`), weight_string(`ROW_FORMAT`), weight_string(CREATE_TIME), weight_string(UPDATE_TIME), weight_string(CHECK_TIME), weight_string(TABLE_COLLATION), weight_string(CREATE_OPTIONS), weight_string(TABLE_COMMENT) from information_schema.`tables` where 1 != 1",
+ "Query": "select distinct TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT, weight_string(TABLE_CATALOG), weight_string(TABLE_SCHEMA), weight_string(TABLE_NAME), weight_string(TABLE_TYPE), weight_string(`ENGINE`), weight_string(`ROW_FORMAT`), weight_string(CREATE_TIME), weight_string(UPDATE_TIME), weight_string(CHECK_TIME), weight_string(TABLE_COLLATION), weight_string(CREATE_OPTIONS), weight_string(TABLE_COMMENT) from information_schema.`tables` where table_schema = :__vtschemaname",
+ "SysTableTableSchema": "[VARCHAR(\"main\")]",
+ "Table": "information_schema.`tables`"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ },
+ {
+ "comment": "Select from information schema query with two tables that route should be merged",
+ "query": "SELECT RC.CONSTRAINT_NAME, ORDINAL_POSITION FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE AS KCU INNER JOIN INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS AS RC ON KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME WHERE KCU.TABLE_SCHEMA = 'test' AND KCU.TABLE_NAME = 'data_type_table' AND KCU.COLUMN_NAME = 'id' AND KCU.REFERENCED_TABLE_SCHEMA = 'test' AND KCU.CONSTRAINT_NAME = 'data_type_table_id_fkey' ORDER BY KCU.CONSTRAINT_NAME, KCU.COLUMN_NAME",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT RC.CONSTRAINT_NAME, ORDINAL_POSITION FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE AS KCU INNER JOIN INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS AS RC ON KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME WHERE KCU.TABLE_SCHEMA = 'test' AND KCU.TABLE_NAME = 'data_type_table' AND KCU.COLUMN_NAME = 'id' AND KCU.REFERENCED_TABLE_SCHEMA = 'test' AND KCU.CONSTRAINT_NAME = 'data_type_table_id_fkey' ORDER BY KCU.CONSTRAINT_NAME, KCU.COLUMN_NAME",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select RC.CONSTRAINT_NAME, ORDINAL_POSITION from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as KCU join INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS as RC on KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME where 1 != 1",
+ "Query": "select RC.CONSTRAINT_NAME, ORDINAL_POSITION from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as KCU join INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS as RC on KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME where KCU.TABLE_SCHEMA = :__vtschemaname and KCU.TABLE_NAME = :KCU_TABLE_NAME and KCU.COLUMN_NAME = 'id' and KCU.REFERENCED_TABLE_SCHEMA = :__vtschemaname and KCU.CONSTRAINT_NAME = 'data_type_table_id_fkey' order by KCU.CONSTRAINT_NAME asc, KCU.COLUMN_NAME asc",
+ "SysTableTableName": "[KCU_TABLE_NAME:VARCHAR(\"data_type_table\")]",
+ "SysTableTableSchema": "[VARCHAR(\"test\"), VARCHAR(\"test\")]",
+ "Table": "INFORMATION_SCHEMA.KEY_COLUMN_USAGE, INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT RC.CONSTRAINT_NAME, ORDINAL_POSITION FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE AS KCU INNER JOIN INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS AS RC ON KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME WHERE KCU.TABLE_SCHEMA = 'test' AND KCU.TABLE_NAME = 'data_type_table' AND KCU.COLUMN_NAME = 'id' AND KCU.REFERENCED_TABLE_SCHEMA = 'test' AND KCU.CONSTRAINT_NAME = 'data_type_table_id_fkey' ORDER BY KCU.CONSTRAINT_NAME, KCU.COLUMN_NAME",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select RC.CONSTRAINT_NAME, ORDINAL_POSITION from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as KCU, INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS as RC where 1 != 1",
+ "Query": "select RC.CONSTRAINT_NAME, ORDINAL_POSITION from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as KCU, INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS as RC where KCU.TABLE_SCHEMA = :__vtschemaname and KCU.TABLE_NAME = :KCU_TABLE_NAME and KCU.COLUMN_NAME = 'id' and KCU.REFERENCED_TABLE_SCHEMA = 'test' and KCU.CONSTRAINT_NAME = 'data_type_table_id_fkey' and KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME order by KCU.CONSTRAINT_NAME asc, KCU.COLUMN_NAME asc",
+ "SysTableTableName": "[KCU_TABLE_NAME:VARCHAR(\"data_type_table\")]",
+ "SysTableTableSchema": "[VARCHAR(\"test\")]",
+ "Table": "INFORMATION_SCHEMA.KEY_COLUMN_USAGE, INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS"
+ }
+ }
+ },
+ {
+ "comment": "Select from information schema query with three tables such that route for 2 should be merged but not for the last.",
+ "query": "SELECT KCU.`TABLE_NAME`, S.`TABLE_NAME` FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE AS KCU INNER JOIN INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS AS RC ON KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME, INFORMATION_SCHEMA.`TABLES` AS S WHERE KCU.TABLE_SCHEMA = 'test' AND KCU.TABLE_NAME = 'data_type_table' AND KCU.TABLE_NAME = 'data_type_table' AND S.TABLE_SCHEMA = 'test' AND S.TABLE_NAME = 'sc' ORDER BY KCU.CONSTRAINT_NAME, KCU.COLUMN_NAME",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT KCU.`TABLE_NAME`, S.`TABLE_NAME` FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE AS KCU INNER JOIN INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS AS RC ON KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME, INFORMATION_SCHEMA.`TABLES` AS S WHERE KCU.TABLE_SCHEMA = 'test' AND KCU.TABLE_NAME = 'data_type_table' AND KCU.TABLE_NAME = 'data_type_table' AND S.TABLE_SCHEMA = 'test' AND S.TABLE_NAME = 'sc' ORDER BY KCU.CONSTRAINT_NAME, KCU.COLUMN_NAME",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,R:0",
+ "TableName": "INFORMATION_SCHEMA.KEY_COLUMN_USAGE, INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS_INFORMATION_SCHEMA.`TABLES`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select KCU.TABLE_NAME from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as KCU join INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS as RC on KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME where 1 != 1",
+ "Query": "select KCU.TABLE_NAME from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as KCU join INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS as RC on KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME where KCU.TABLE_SCHEMA = :__vtschemaname and KCU.TABLE_NAME = :KCU_TABLE_NAME and KCU.TABLE_NAME = :KCU_TABLE_NAME1 order by KCU.CONSTRAINT_NAME asc, KCU.COLUMN_NAME asc",
+ "SysTableTableName": "[KCU_TABLE_NAME1:VARCHAR(\"data_type_table\"), KCU_TABLE_NAME:VARCHAR(\"data_type_table\")]",
+ "SysTableTableSchema": "[VARCHAR(\"test\")]",
+ "Table": "INFORMATION_SCHEMA.KEY_COLUMN_USAGE, INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select S.TABLE_NAME from INFORMATION_SCHEMA.`TABLES` as S where 1 != 1",
+ "Query": "select S.TABLE_NAME from INFORMATION_SCHEMA.`TABLES` as S where S.TABLE_SCHEMA = :__vtschemaname and S.TABLE_NAME = :S_TABLE_NAME",
+ "SysTableTableName": "[S_TABLE_NAME:VARCHAR(\"sc\")]",
+ "SysTableTableSchema": "[VARCHAR(\"test\")]",
+ "Table": "INFORMATION_SCHEMA.`TABLES`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT KCU.`TABLE_NAME`, S.`TABLE_NAME` FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE AS KCU INNER JOIN INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS AS RC ON KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME, INFORMATION_SCHEMA.`TABLES` AS S WHERE KCU.TABLE_SCHEMA = 'test' AND KCU.TABLE_NAME = 'data_type_table' AND KCU.TABLE_NAME = 'data_type_table' AND S.TABLE_SCHEMA = 'test' AND S.TABLE_NAME = 'sc' ORDER BY KCU.CONSTRAINT_NAME, KCU.COLUMN_NAME",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select KCU.TABLE_NAME, S.TABLE_NAME from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as KCU, INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS as RC, INFORMATION_SCHEMA.`TABLES` as S where 1 != 1",
+ "Query": "select KCU.TABLE_NAME, S.TABLE_NAME from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as KCU, INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS as RC, INFORMATION_SCHEMA.`TABLES` as S where S.TABLE_SCHEMA = :__vtschemaname and S.TABLE_NAME = :S_TABLE_NAME and KCU.TABLE_SCHEMA = :__vtschemaname and KCU.TABLE_NAME = :KCU_TABLE_NAME and KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME order by KCU.CONSTRAINT_NAME asc, KCU.COLUMN_NAME asc",
+ "SysTableTableName": "[KCU_TABLE_NAME:VARCHAR(\"data_type_table\"), S_TABLE_NAME:VARCHAR(\"sc\")]",
+ "SysTableTableSchema": "[VARCHAR(\"test\"), VARCHAR(\"test\")]",
+ "Table": "INFORMATION_SCHEMA.KEY_COLUMN_USAGE, INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS, INFORMATION_SCHEMA.`TABLES`"
+ }
+ }
+ },
+ {
+ "comment": "information_schema.routines",
+ "query": "SELECT routine_name AS name, routine_definition AS definition FROM information_schema.routines WHERE ROUTINE_SCHEMA = ? AND ROUTINE_TYPE = 'PROCEDURE'",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT routine_name AS name, routine_definition AS definition FROM information_schema.routines WHERE ROUTINE_SCHEMA = ? AND ROUTINE_TYPE = 'PROCEDURE'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select routine_name as `name`, routine_definition as definition from information_schema.routines where 1 != 1",
+ "Query": "select routine_name as `name`, routine_definition as definition from information_schema.routines where ROUTINE_SCHEMA = :__vtschemaname and ROUTINE_TYPE = 'PROCEDURE'",
+ "SysTableTableSchema": "[:v1]",
+ "Table": "information_schema.routines"
+ }
+ }
+ },
+ {
+ "comment": "information_schema table sizes",
+ "query": "SELECT SUM(data_length + index_length) as size FROM information_schema.TABLES WHERE table_schema = ?",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT SUM(data_length + index_length) as size FROM information_schema.TABLES WHERE table_schema = ?",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select sum(data_length + index_length) as size from information_schema.`TABLES` where 1 != 1",
+ "Query": "select sum(data_length + index_length) as size from information_schema.`TABLES` where table_schema = :__vtschemaname",
+ "SysTableTableSchema": "[:v1]",
+ "Table": "information_schema.`TABLES`"
+ }
+ }
+ },
+ {
+ "comment": "information_schema referential contraints",
+ "query": "SELECT kcu.constraint_name constraint_name, kcu.column_name column_name, kcu.referenced_table_name referenced_table_name, kcu.referenced_column_name referenced_column_name, kcu.ordinal_position ordinal_position, kcu.table_name table_name, rc.delete_rule delete_rule, rc.update_rule update_rule FROM information_schema.key_column_usage AS kcu INNER JOIN information_schema.referential_constraints AS rc ON kcu.constraint_name = rc.constraint_name WHERE kcu.table_schema = ? AND rc.constraint_schema = ? AND kcu.referenced_column_name IS NOT NULL ORDER BY ordinal_position",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT kcu.constraint_name constraint_name, kcu.column_name column_name, kcu.referenced_table_name referenced_table_name, kcu.referenced_column_name referenced_column_name, kcu.ordinal_position ordinal_position, kcu.table_name table_name, rc.delete_rule delete_rule, rc.update_rule update_rule FROM information_schema.key_column_usage AS kcu INNER JOIN information_schema.referential_constraints AS rc ON kcu.constraint_name = rc.constraint_name WHERE kcu.table_schema = ? AND rc.constraint_schema = ? AND kcu.referenced_column_name IS NOT NULL ORDER BY ordinal_position",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select kcu.constraint_name as constraint_name, kcu.column_name as column_name, kcu.referenced_table_name as referenced_table_name, kcu.referenced_column_name as referenced_column_name, kcu.ordinal_position as ordinal_position, kcu.table_name as table_name, rc.delete_rule as delete_rule, rc.update_rule as update_rule from information_schema.key_column_usage as kcu join information_schema.referential_constraints as rc on kcu.constraint_name = rc.constraint_name where 1 != 1",
+ "Query": "select kcu.constraint_name as constraint_name, kcu.column_name as column_name, kcu.referenced_table_name as referenced_table_name, kcu.referenced_column_name as referenced_column_name, kcu.ordinal_position as ordinal_position, kcu.table_name as table_name, rc.delete_rule as delete_rule, rc.update_rule as update_rule from information_schema.key_column_usage as kcu join information_schema.referential_constraints as rc on kcu.constraint_name = rc.constraint_name where kcu.table_schema = :__vtschemaname and rc.constraint_schema = :__vtschemaname and kcu.referenced_column_name is not null order by ordinal_position asc",
+ "SysTableTableSchema": "[:v1, :v2]",
+ "Table": "information_schema.key_column_usage, information_schema.referential_constraints"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT kcu.constraint_name constraint_name, kcu.column_name column_name, kcu.referenced_table_name referenced_table_name, kcu.referenced_column_name referenced_column_name, kcu.ordinal_position ordinal_position, kcu.table_name table_name, rc.delete_rule delete_rule, rc.update_rule update_rule FROM information_schema.key_column_usage AS kcu INNER JOIN information_schema.referential_constraints AS rc ON kcu.constraint_name = rc.constraint_name WHERE kcu.table_schema = ? AND rc.constraint_schema = ? AND kcu.referenced_column_name IS NOT NULL ORDER BY ordinal_position",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select kcu.constraint_name as constraint_name, kcu.column_name as column_name, kcu.referenced_table_name as referenced_table_name, kcu.referenced_column_name as referenced_column_name, kcu.ordinal_position as ordinal_position, kcu.table_name as table_name, rc.delete_rule as delete_rule, rc.update_rule as update_rule from information_schema.key_column_usage as kcu, information_schema.referential_constraints as rc where 1 != 1",
+ "Query": "select kcu.constraint_name as constraint_name, kcu.column_name as column_name, kcu.referenced_table_name as referenced_table_name, kcu.referenced_column_name as referenced_column_name, kcu.ordinal_position as ordinal_position, kcu.table_name as table_name, rc.delete_rule as delete_rule, rc.update_rule as update_rule from information_schema.key_column_usage as kcu, information_schema.referential_constraints as rc where kcu.table_schema = :__vtschemaname and kcu.referenced_column_name is not null and rc.constraint_schema = :__vtschemaname and kcu.constraint_name = rc.constraint_name order by ordinal_position asc",
+ "SysTableTableSchema": "[:v1, :v2]",
+ "Table": "information_schema.key_column_usage, information_schema.referential_constraints"
+ }
+ }
+ },
+ {
+ "comment": "rails query",
+ "query": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as name, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc join information_schema.key_column_usage as fk using (constraint_schema, constraint_name) where fk.referenced_column_name is not null and fk.table_schema = database() and fk.table_name = ':vtg1' and rc.constraint_schema = database() and rc.table_name = ':vtg1'",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as name, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc join information_schema.key_column_usage as fk using (constraint_schema, constraint_name) where fk.referenced_column_name is not null and fk.table_schema = database() and fk.table_name = ':vtg1' and rc.constraint_schema = database() and rc.table_name = ':vtg1'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as `name`, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc join information_schema.key_column_usage as fk using (constraint_schema, constraint_name) where 1 != 1",
+ "Query": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as `name`, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc join information_schema.key_column_usage as fk using (constraint_schema, constraint_name) where fk.referenced_column_name is not null and fk.table_schema = database() and fk.table_name = :fk_table_name and rc.constraint_schema = database() and rc.table_name = :rc_table_name",
+ "SysTableTableName": "[fk_table_name:VARCHAR(\":vtg1\"), rc_table_name:VARCHAR(\":vtg1\")]",
+ "Table": "information_schema.referential_constraints, information_schema.key_column_usage"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as name, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc join information_schema.key_column_usage as fk using (constraint_schema, constraint_name) where fk.referenced_column_name is not null and fk.table_schema = database() and fk.table_name = ':vtg1' and rc.constraint_schema = database() and rc.table_name = ':vtg1'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as `name`, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc, information_schema.key_column_usage as fk where 1 != 1",
+ "Query": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as `name`, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc, information_schema.key_column_usage as fk where rc.constraint_schema = database() and rc.table_name = :rc_table_name and fk.referenced_column_name is not null and fk.table_schema = database() and fk.table_name = :fk_table_name and rc.constraint_schema = fk.constraint_schema and rc.constraint_name = fk.constraint_name",
+ "SysTableTableName": "[fk_table_name:VARCHAR(\":vtg1\"), rc_table_name:VARCHAR(\":vtg1\")]",
+ "Table": "information_schema.key_column_usage, information_schema.referential_constraints"
+ }
+ }
+ },
+ {
+ "comment": "rails_query 2",
+ "query": "SELECT * FROM information_schema.schemata WHERE schema_name = 'user'",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT * FROM information_schema.schemata WHERE schema_name = 'user'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from information_schema.schemata where 1 != 1",
+ "Query": "select * from information_schema.schemata where schema_name = :__vtschemaname",
+ "SysTableTableSchema": "[VARCHAR(\"user\")]",
+ "Table": "information_schema.schemata"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT * FROM information_schema.schemata WHERE schema_name = 'user'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select CATALOG_NAME, SCHEMA_NAME, DEFAULT_CHARACTER_SET_NAME, DEFAULT_COLLATION_NAME, SQL_PATH from information_schema.schemata where 1 != 1",
+ "Query": "select CATALOG_NAME, SCHEMA_NAME, DEFAULT_CHARACTER_SET_NAME, DEFAULT_COLLATION_NAME, SQL_PATH from information_schema.schemata where schema_name = :__vtschemaname",
+ "SysTableTableSchema": "[VARCHAR(\"user\")]",
+ "Table": "information_schema.schemata"
+ }
+ }
+ },
+ {
+ "comment": "rails_query 3",
+ "query": "SELECT table_comment FROM information_schema.tables WHERE table_schema = 'schema_name' AND table_name = 'table_name'",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT table_comment FROM information_schema.tables WHERE table_schema = 'schema_name' AND table_name = 'table_name'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select table_comment from information_schema.`tables` where 1 != 1",
+ "Query": "select table_comment from information_schema.`tables` where table_schema = :__vtschemaname and table_name = :table_name",
+ "SysTableTableName": "[table_name:VARCHAR(\"table_name\")]",
+ "SysTableTableSchema": "[VARCHAR(\"schema_name\")]",
+ "Table": "information_schema.`tables`"
+ }
+ }
+ },
+ {
+ "comment": "rails_query 4",
+ "query": "SELECT fk.referenced_table_name AS 'to_table', fk.referenced_column_name AS 'primary_key',fk.column_name AS 'column',fk.constraint_name AS 'name',rc.update_rule AS 'on_update',rc.delete_rule AS 'on_delete' FROM information_schema.referential_constraints rc JOIN information_schema.key_column_usage fk USING (constraint_schema, constraint_name) WHERE fk.referenced_column_name IS NOT NULL AND fk.table_schema = 'table_schema' AND fk.table_name = 'table_name' AND rc.constraint_schema = 'table_schema' AND rc.table_name = 'table_name'",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT fk.referenced_table_name AS 'to_table', fk.referenced_column_name AS 'primary_key',fk.column_name AS 'column',fk.constraint_name AS 'name',rc.update_rule AS 'on_update',rc.delete_rule AS 'on_delete' FROM information_schema.referential_constraints rc JOIN information_schema.key_column_usage fk USING (constraint_schema, constraint_name) WHERE fk.referenced_column_name IS NOT NULL AND fk.table_schema = 'table_schema' AND fk.table_name = 'table_name' AND rc.constraint_schema = 'table_schema' AND rc.table_name = 'table_name'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as `name`, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc join information_schema.key_column_usage as fk using (constraint_schema, constraint_name) where 1 != 1",
+ "Query": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as `name`, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc join information_schema.key_column_usage as fk using (constraint_schema, constraint_name) where fk.referenced_column_name is not null and fk.table_schema = :__vtschemaname and fk.table_name = :fk_table_name and rc.constraint_schema = :__vtschemaname and rc.table_name = :rc_table_name",
+ "SysTableTableName": "[fk_table_name:VARCHAR(\"table_name\"), rc_table_name:VARCHAR(\"table_name\")]",
+ "SysTableTableSchema": "[VARCHAR(\"table_schema\"), VARCHAR(\"table_schema\")]",
+ "Table": "information_schema.referential_constraints, information_schema.key_column_usage"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT fk.referenced_table_name AS 'to_table', fk.referenced_column_name AS 'primary_key',fk.column_name AS 'column',fk.constraint_name AS 'name',rc.update_rule AS 'on_update',rc.delete_rule AS 'on_delete' FROM information_schema.referential_constraints rc JOIN information_schema.key_column_usage fk USING (constraint_schema, constraint_name) WHERE fk.referenced_column_name IS NOT NULL AND fk.table_schema = 'table_schema' AND fk.table_name = 'table_name' AND rc.constraint_schema = 'table_schema' AND rc.table_name = 'table_name'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as `name`, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc, information_schema.key_column_usage as fk where 1 != 1",
+ "Query": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as `name`, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc, information_schema.key_column_usage as fk where rc.constraint_schema = :__vtschemaname and rc.table_name = :rc_table_name and fk.referenced_column_name is not null and fk.table_schema = :__vtschemaname and fk.table_name = :fk_table_name and rc.constraint_schema = fk.constraint_schema and rc.constraint_name = fk.constraint_name",
+ "SysTableTableName": "[fk_table_name:VARCHAR(\"table_name\"), rc_table_name:VARCHAR(\"table_name\")]",
+ "SysTableTableSchema": "[VARCHAR(\"table_schema\"), VARCHAR(\"table_schema\")]",
+ "Table": "information_schema.key_column_usage, information_schema.referential_constraints"
+ }
+ }
+ },
+ {
+ "comment": "rails_query 6",
+ "query": "SELECT column_name FROM information_schema.statistics WHERE index_name = 'PRIMARY' AND table_schema = 'table_schema' AND table_name = 'table_name' ORDER BY seq_in_index",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT column_name FROM information_schema.statistics WHERE index_name = 'PRIMARY' AND table_schema = 'table_schema' AND table_name = 'table_name' ORDER BY seq_in_index",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select column_name from information_schema.statistics where 1 != 1",
+ "Query": "select column_name from information_schema.statistics where index_name = 'PRIMARY' and table_schema = :__vtschemaname and table_name = :table_name order by seq_in_index asc",
+ "SysTableTableName": "[table_name:VARCHAR(\"table_name\")]",
+ "SysTableTableSchema": "[VARCHAR(\"table_schema\")]",
+ "Table": "information_schema.statistics"
+ }
+ }
+ },
+ {
+ "comment": "rails_query 7",
+ "query": "SELECT generation_expression FROM information_schema.columns WHERE table_schema = 'table_schema' AND table_name = 'table_name' AND column_name = 'column_name'",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT generation_expression FROM information_schema.columns WHERE table_schema = 'table_schema' AND table_name = 'table_name' AND column_name = 'column_name'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select generation_expression from information_schema.`columns` where 1 != 1",
+ "Query": "select generation_expression from information_schema.`columns` where table_schema = :__vtschemaname and table_name = :table_name and column_name = 'column_name'",
+ "SysTableTableName": "[table_name:VARCHAR(\"table_name\")]",
+ "SysTableTableSchema": "[VARCHAR(\"table_schema\")]",
+ "Table": "information_schema.`columns`"
+ }
+ }
+ },
+ {
+ "comment": "rails_query 8",
+ "query": "SELECT id FROM information_schema.processlist WHERE info LIKE '% FOR UPDATE'",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT id FROM information_schema.processlist WHERE info LIKE '% FOR UPDATE'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select id from information_schema.`processlist` where 1 != 1",
+ "Query": "select id from information_schema.`processlist` where info like '% FOR UPDATE'",
+ "Table": "information_schema.`processlist`"
+ }
+ }
+ },
+ {
+ "comment": "rails_query 9",
+ "query": "SELECT table_name FROM (SELECT * FROM information_schema.tables WHERE table_schema = 'table_schema') _subquery",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT table_name FROM (SELECT * FROM information_schema.tables WHERE table_schema = 'table_schema') _subquery",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select table_name from (select * from information_schema.`tables` where 1 != 1) as _subquery where 1 != 1",
+ "Query": "select table_name from (select * from information_schema.`tables` where table_schema = :__vtschemaname) as _subquery",
+ "SysTableTableSchema": "[VARCHAR(\"table_schema\")]",
+ "Table": "information_schema.`tables`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT table_name FROM (SELECT * FROM information_schema.tables WHERE table_schema = 'table_schema') _subquery",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select table_name from (select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from information_schema.`tables` where 1 != 1) as _subquery where 1 != 1",
+ "Query": "select table_name from (select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from information_schema.`tables` where table_schema = :__vtschemaname) as _subquery",
+ "SysTableTableSchema": "[VARCHAR(\"table_schema\")]",
+ "Table": "information_schema.`tables`"
+ }
+ }
+ },
+ {
+ "comment": "rails_query 10",
+ "query": "SELECT table_name FROM (SELECT * FROM information_schema.tables WHERE table_schema = 'table_schema') _subquery WHERE _subquery.table_type = 'table_type' AND _subquery.table_name = 'table_name'",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT table_name FROM (SELECT * FROM information_schema.tables WHERE table_schema = 'table_schema') _subquery WHERE _subquery.table_type = 'table_type' AND _subquery.table_name = 'table_name'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select table_name from (select * from information_schema.`tables` where 1 != 1) as _subquery where 1 != 1",
+ "Query": "select table_name from (select * from information_schema.`tables` where table_schema = :__vtschemaname) as _subquery where _subquery.table_type = 'table_type' and _subquery.table_name = :_subquery_table_name",
+ "SysTableTableName": "[_subquery_table_name:VARCHAR(\"table_name\")]",
+ "SysTableTableSchema": "[VARCHAR(\"table_schema\")]",
+ "Table": "information_schema.`tables`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT table_name FROM (SELECT * FROM information_schema.tables WHERE table_schema = 'table_schema') _subquery WHERE _subquery.table_type = 'table_type' AND _subquery.table_name = 'table_name'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select table_name from (select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from information_schema.`tables` where 1 != 1) as _subquery where 1 != 1",
+ "Query": "select table_name from (select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from information_schema.`tables` where table_schema = :__vtschemaname and table_type = 'table_type' and table_name = 'table_name') as _subquery",
+ "SysTableTableSchema": "[VARCHAR(\"table_schema\")]",
+ "Table": "information_schema.`tables`"
+ }
+ }
+ },
+ {
+ "comment": "system schema in where clause of information_schema query",
+ "query": "SELECT COUNT(*) FROM INFORMATION_SCHEMA.TABLES WHERE table_schema = 'performance_schema' AND table_name = 'foo'",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT COUNT(*) FROM INFORMATION_SCHEMA.TABLES WHERE table_schema = 'performance_schema' AND table_name = 'foo'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select count(*) from INFORMATION_SCHEMA.`TABLES` where 1 != 1",
+ "Query": "select count(*) from INFORMATION_SCHEMA.`TABLES` where table_schema = :__vtschemaname and table_name = :table_name",
+ "SysTableTableName": "[table_name:VARCHAR(\"foo\")]",
+ "SysTableTableSchema": "[VARCHAR(\"performance_schema\")]",
+ "Table": "INFORMATION_SCHEMA.`TABLES`"
+ }
+ }
+ },
+ {
+ "comment": "subquery of information_schema with itself",
+ "query": "select TABLES.CHECKSUM from information_schema.`TABLES` where `TABLE_NAME` in (select `TABLE_NAME` from information_schema.`COLUMNS`)",
+ "v3-plan": "VT03019: symbol `TABLES`.`CHECKSUM` not found",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select TABLES.CHECKSUM from information_schema.`TABLES` where `TABLE_NAME` in (select `TABLE_NAME` from information_schema.`COLUMNS`)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select `TABLES`.`CHECKSUM` from information_schema.`TABLES` where 1 != 1",
+ "Query": "select `TABLES`.`CHECKSUM` from information_schema.`TABLES` where TABLE_NAME in (select TABLE_NAME from information_schema.`COLUMNS`)",
+ "Table": "information_schema.`TABLES`"
+ }
+ }
+ },
+ {
+ "comment": "query trying to query two different keyspaces at the same time",
+ "query": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'user' AND TABLE_SCHEMA = 'main'",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'user' AND TABLE_SCHEMA = 'main'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from INFORMATION_SCHEMA.`TABLES` where 1 != 1",
+ "Query": "select * from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname",
+ "SysTableTableSchema": "[VARCHAR(\"user\"), VARCHAR(\"main\")]",
+ "Table": "INFORMATION_SCHEMA.`TABLES`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'user' AND TABLE_SCHEMA = 'main'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from INFORMATION_SCHEMA.`TABLES` where 1 != 1",
+ "Query": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname",
+ "SysTableTableSchema": "[VARCHAR(\"user\"), VARCHAR(\"main\")]",
+ "Table": "INFORMATION_SCHEMA.`TABLES`"
+ }
+ }
+ },
+ {
+ "comment": "information_schema query using database() func",
+ "query": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = database()",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = database()",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from INFORMATION_SCHEMA.`TABLES` where 1 != 1",
+ "Query": "select * from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = database()",
+ "Table": "INFORMATION_SCHEMA.`TABLES`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = database()",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from INFORMATION_SCHEMA.`TABLES` where 1 != 1",
+ "Query": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = database()",
+ "Table": "INFORMATION_SCHEMA.`TABLES`"
+ }
+ }
+ },
+ {
+ "comment": "table_schema predicate the wrong way around",
+ "query": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE 'ks' = TABLE_SCHEMA",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE 'ks' = TABLE_SCHEMA",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from INFORMATION_SCHEMA.`TABLES` where 1 != 1",
+ "Query": "select * from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname",
+ "SysTableTableSchema": "[VARCHAR(\"ks\")]",
+ "Table": "INFORMATION_SCHEMA.`TABLES`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE 'ks' = TABLE_SCHEMA",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from INFORMATION_SCHEMA.`TABLES` where 1 != 1",
+ "Query": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname",
+ "SysTableTableSchema": "[VARCHAR(\"ks\")]",
+ "Table": "INFORMATION_SCHEMA.`TABLES`"
+ }
+ }
+ },
+ {
+ "comment": "table_name predicate against a routed table",
+ "query": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'ks' AND TABLE_NAME = 'route1'",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'ks' AND TABLE_NAME = 'route1'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from INFORMATION_SCHEMA.`TABLES` where 1 != 1",
+ "Query": "select * from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname and TABLE_NAME = :TABLE_NAME",
+ "SysTableTableName": "[TABLE_NAME:VARCHAR(\"route1\")]",
+ "SysTableTableSchema": "[VARCHAR(\"ks\")]",
+ "Table": "INFORMATION_SCHEMA.`TABLES`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'ks' AND TABLE_NAME = 'route1'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from INFORMATION_SCHEMA.`TABLES` where 1 != 1",
+ "Query": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname and TABLE_NAME = :TABLE_NAME",
+ "SysTableTableName": "[TABLE_NAME:VARCHAR(\"route1\")]",
+ "SysTableTableSchema": "[VARCHAR(\"ks\")]",
+ "Table": "INFORMATION_SCHEMA.`TABLES`"
+ }
+ }
+ },
+ {
+ "comment": "information_schema query with additional predicates",
+ "query": "SELECT `TABLE_NAME` FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'ks' and DATA_FREE = 42",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT `TABLE_NAME` FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'ks' and DATA_FREE = 42",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select TABLE_NAME from INFORMATION_SCHEMA.`TABLES` where 1 != 1",
+ "Query": "select TABLE_NAME from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname and DATA_FREE = 42",
+ "SysTableTableSchema": "[VARCHAR(\"ks\")]",
+ "Table": "INFORMATION_SCHEMA.`TABLES`"
+ }
+ }
+ },
+ {
+ "comment": "able to isolate table_schema value even when hidden inside of ORs",
+ "query": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE (TABLE_SCHEMA = 'ks' and DATA_FREE = 42) OR (TABLE_SCHEMA = 'ks' and CHECKSUM = 'value')",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE (TABLE_SCHEMA = 'ks' and DATA_FREE = 42) OR (TABLE_SCHEMA = 'ks' and CHECKSUM = 'value')",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from INFORMATION_SCHEMA.`TABLES` where 1 != 1",
+ "Query": "select * from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname and (DATA_FREE = 42 or `CHECKSUM` = 'value')",
+ "SysTableTableSchema": "[VARCHAR(\"ks\")]",
+ "Table": "INFORMATION_SCHEMA.`TABLES`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE (TABLE_SCHEMA = 'ks' and DATA_FREE = 42) OR (TABLE_SCHEMA = 'ks' and CHECKSUM = 'value')",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from INFORMATION_SCHEMA.`TABLES` where 1 != 1",
+ "Query": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname and (DATA_FREE = 42 or `CHECKSUM` = 'value')",
+ "SysTableTableSchema": "[VARCHAR(\"ks\")]",
+ "Table": "INFORMATION_SCHEMA.`TABLES`"
+ }
+ }
+ },
+ {
+ "comment": "expand star with information schema",
+ "query": "select x.table_name from (select a.* from information_schema.key_column_usage a) x",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select x.table_name from (select a.* from information_schema.key_column_usage a) x",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select x.table_name from (select a.* from information_schema.key_column_usage as a where 1 != 1) as x where 1 != 1",
+ "Query": "select x.table_name from (select a.* from information_schema.key_column_usage as a) as x",
+ "Table": "information_schema.key_column_usage"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select x.table_name from (select a.* from information_schema.key_column_usage a) x",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select x.table_name from (select a.CONSTRAINT_CATALOG, a.CONSTRAINT_SCHEMA, a.CONSTRAINT_NAME, a.TABLE_CATALOG, a.TABLE_SCHEMA, a.TABLE_NAME, a.COLUMN_NAME, a.ORDINAL_POSITION, a.POSITION_IN_UNIQUE_CONSTRAINT, a.REFERENCED_TABLE_SCHEMA, a.REFERENCED_TABLE_NAME, a.REFERENCED_COLUMN_NAME from information_schema.key_column_usage as a where 1 != 1) as x where 1 != 1",
+ "Query": "select x.table_name from (select a.CONSTRAINT_CATALOG, a.CONSTRAINT_SCHEMA, a.CONSTRAINT_NAME, a.TABLE_CATALOG, a.TABLE_SCHEMA, a.TABLE_NAME, a.COLUMN_NAME, a.ORDINAL_POSITION, a.POSITION_IN_UNIQUE_CONSTRAINT, a.REFERENCED_TABLE_SCHEMA, a.REFERENCED_TABLE_NAME, a.REFERENCED_COLUMN_NAME from information_schema.key_column_usage as a) as x",
+ "Table": "information_schema.key_column_usage"
+ }
+ }
+ },
+ {
+ "comment": "expand star with information schema in a derived table",
+ "query": "select x.table_name from (select a.* from information_schema.key_column_usage a) x join user on x.`COLUMN_NAME` = user.id",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select x.table_name from (select a.* from information_schema.key_column_usage a) x join user on x.`COLUMN_NAME` = user.id",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "JoinVars": {
+ "x_COLUMN_NAME": 1
+ },
+ "TableName": "information_schema.key_column_usage_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select x.table_name, x.COLUMN_NAME from (select a.* from information_schema.key_column_usage as a where 1 != 1) as x where 1 != 1",
+ "Query": "select x.table_name, x.COLUMN_NAME from (select a.* from information_schema.key_column_usage as a) as x",
+ "Table": "information_schema.key_column_usage"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` where 1 != 1",
+ "Query": "select 1 from `user` where `user`.id = :x_COLUMN_NAME",
+ "Table": "`user`",
+ "Values": [
+ ":x_COLUMN_NAME"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select x.table_name from (select a.* from information_schema.key_column_usage a) x join user on x.`COLUMN_NAME` = user.id",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:1",
+ "JoinVars": {
+ "x_COLUMN_NAME": 0
+ },
+ "TableName": "information_schema.key_column_usage_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select x.COLUMN_NAME, x.table_name from (select a.CONSTRAINT_CATALOG, a.CONSTRAINT_SCHEMA, a.CONSTRAINT_NAME, a.TABLE_CATALOG, a.TABLE_SCHEMA, a.TABLE_NAME, a.COLUMN_NAME, a.ORDINAL_POSITION, a.POSITION_IN_UNIQUE_CONSTRAINT, a.REFERENCED_TABLE_SCHEMA, a.REFERENCED_TABLE_NAME, a.REFERENCED_COLUMN_NAME from information_schema.key_column_usage as a where 1 != 1) as x where 1 != 1",
+ "Query": "select x.COLUMN_NAME, x.table_name from (select a.CONSTRAINT_CATALOG, a.CONSTRAINT_SCHEMA, a.CONSTRAINT_NAME, a.TABLE_CATALOG, a.TABLE_SCHEMA, a.TABLE_NAME, a.COLUMN_NAME, a.ORDINAL_POSITION, a.POSITION_IN_UNIQUE_CONSTRAINT, a.REFERENCED_TABLE_SCHEMA, a.REFERENCED_TABLE_NAME, a.REFERENCED_COLUMN_NAME from information_schema.key_column_usage as a) as x",
+ "Table": "information_schema.key_column_usage"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` where 1 != 1",
+ "Query": "select 1 from `user` where `user`.id = :x_COLUMN_NAME",
+ "Table": "`user`",
+ "Values": [
+ ":x_COLUMN_NAME"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "join of information_schema queries with select stars exprs",
+ "query": "select a.*, b.* from information_schema.GLOBAL_STATUS a, information_schema.CHARACTER_SETS b",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a.*, b.* from information_schema.GLOBAL_STATUS a, information_schema.CHARACTER_SETS b",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select a.*, b.* from information_schema.GLOBAL_STATUS as a, information_schema.CHARACTER_SETS as b where 1 != 1",
+ "Query": "select a.*, b.* from information_schema.GLOBAL_STATUS as a, information_schema.CHARACTER_SETS as b",
+ "Table": "information_schema.GLOBAL_STATUS, information_schema.CHARACTER_SETS"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a.*, b.* from information_schema.GLOBAL_STATUS a, information_schema.CHARACTER_SETS b",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select a.VARIABLE_NAME as VARIABLE_NAME, a.VARIABLE_VALUE as VARIABLE_VALUE, b.CHARACTER_SET_NAME as CHARACTER_SET_NAME, b.DEFAULT_COLLATE_NAME as DEFAULT_COLLATE_NAME, b.DESCRIPTION as DESCRIPTION, b.MAXLEN as MAXLEN from information_schema.GLOBAL_STATUS as a, information_schema.CHARACTER_SETS as b where 1 != 1",
+ "Query": "select a.VARIABLE_NAME as VARIABLE_NAME, a.VARIABLE_VALUE as VARIABLE_VALUE, b.CHARACTER_SET_NAME as CHARACTER_SET_NAME, b.DEFAULT_COLLATE_NAME as DEFAULT_COLLATE_NAME, b.DESCRIPTION as DESCRIPTION, b.MAXLEN as MAXLEN from information_schema.GLOBAL_STATUS as a, information_schema.CHARACTER_SETS as b",
+ "Table": "information_schema.CHARACTER_SETS, information_schema.GLOBAL_STATUS"
+ }
+ }
+ },
+ {
+ "comment": "join two routes with SysTableTableName entries in LHS and RHS",
+ "query": "select a.table_name from (select * from information_schema.key_column_usage a where a.table_name = 'users') a join (select * from information_schema.referential_constraints where table_name = 'users') b",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a.table_name from (select * from information_schema.key_column_usage a where a.table_name = 'users') a join (select * from information_schema.referential_constraints where table_name = 'users') b",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select a.table_name from (select * from information_schema.key_column_usage as a where 1 != 1) as a join (select * from information_schema.referential_constraints where 1 != 1) as b where 1 != 1",
+ "Query": "select a.table_name from (select * from information_schema.key_column_usage as a where a.table_name = :a_table_name) as a join (select * from information_schema.referential_constraints where table_name = :table_name) as b",
+ "SysTableTableName": "[a_table_name:VARCHAR(\"users\"), table_name:VARCHAR(\"users\")]",
+ "Table": "information_schema.key_column_usage, information_schema.referential_constraints"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a.table_name from (select * from information_schema.key_column_usage a where a.table_name = 'users') a join (select * from information_schema.referential_constraints where table_name = 'users') b",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select a.table_name from (select a.CONSTRAINT_CATALOG, a.CONSTRAINT_SCHEMA, a.CONSTRAINT_NAME, a.TABLE_CATALOG, a.TABLE_SCHEMA, a.TABLE_NAME, a.COLUMN_NAME, a.ORDINAL_POSITION, a.POSITION_IN_UNIQUE_CONSTRAINT, a.REFERENCED_TABLE_SCHEMA, a.REFERENCED_TABLE_NAME, a.REFERENCED_COLUMN_NAME from information_schema.key_column_usage as a where 1 != 1) as a, (select CONSTRAINT_CATALOG, CONSTRAINT_SCHEMA, CONSTRAINT_NAME, UNIQUE_CONSTRAINT_CATALOG, UNIQUE_CONSTRAINT_SCHEMA, UNIQUE_CONSTRAINT_NAME, MATCH_OPTION, UPDATE_RULE, DELETE_RULE, TABLE_NAME, REFERENCED_TABLE_NAME from information_schema.referential_constraints where 1 != 1) as b where 1 != 1",
+ "Query": "select a.table_name from (select a.CONSTRAINT_CATALOG, a.CONSTRAINT_SCHEMA, a.CONSTRAINT_NAME, a.TABLE_CATALOG, a.TABLE_SCHEMA, a.TABLE_NAME, a.COLUMN_NAME, a.ORDINAL_POSITION, a.POSITION_IN_UNIQUE_CONSTRAINT, a.REFERENCED_TABLE_SCHEMA, a.REFERENCED_TABLE_NAME, a.REFERENCED_COLUMN_NAME from information_schema.key_column_usage as a where a.table_name = :a_table_name) as a, (select CONSTRAINT_CATALOG, CONSTRAINT_SCHEMA, CONSTRAINT_NAME, UNIQUE_CONSTRAINT_CATALOG, UNIQUE_CONSTRAINT_SCHEMA, UNIQUE_CONSTRAINT_NAME, MATCH_OPTION, UPDATE_RULE, DELETE_RULE, TABLE_NAME, REFERENCED_TABLE_NAME from information_schema.referential_constraints where table_name = :table_name) as b",
+ "SysTableTableName": "[a_table_name:VARCHAR(\"users\"), table_name:VARCHAR(\"users\")]",
+ "Table": "information_schema.key_column_usage, information_schema.referential_constraints"
+ }
+ }
+ },
+ {
+ "comment": "select sum(found) from (select 1 as found from information_schema.`tables` where table_schema = 'music' union all (select 1 as found from information_schema.views where table_schema = 'music' limit 1)) as t",
+ "query": "select sum(found) from (select 1 as found from information_schema.`tables` where table_schema = 'music' union all (select 1 as found from information_schema.views where table_schema = 'music' limit 1)) as t",
+ "v3-plan": "VT12001: unsupported: cross-shard query with aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select sum(found) from (select 1 as found from information_schema.`tables` where table_schema = 'music' union all (select 1 as found from information_schema.views where table_schema = 'music' limit 1)) as t",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select sum(found) from (select 1 as found from information_schema.`tables` where 1 != 1 union all (select 1 as found from information_schema.views where 1 != 1)) as t where 1 != 1",
+ "Query": "select sum(found) from (select 1 as found from information_schema.`tables` where table_schema = :__vtschemaname union all (select 1 as found from information_schema.views where table_schema = :__vtschemaname limit 1)) as t",
+ "SysTableTableSchema": "[VARCHAR(\"music\"), VARCHAR(\"music\")]",
+ "Table": "information_schema.`tables`"
+ }
+ }
+ },
+ {
+ "comment": "union as a derived table",
+ "query": "select found from (select 1 as found from information_schema.`tables` where table_schema = 'music' union all (select 1 as found from information_schema.views where table_schema = 'music' limit 1)) as t",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select found from (select 1 as found from information_schema.`tables` where table_schema = 'music' union all (select 1 as found from information_schema.views where table_schema = 'music' limit 1)) as t",
+ "Instructions": {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 as found from information_schema.`tables` where 1 != 1",
+ "Query": "select 1 as found from information_schema.`tables` where table_schema = :__vtschemaname",
+ "SysTableTableSchema": "[VARCHAR(\"music\")]",
+ "Table": "information_schema.`tables`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 as found from information_schema.views where 1 != 1",
+ "Query": "select 1 as found from information_schema.views where table_schema = :__vtschemaname limit 1",
+ "SysTableTableSchema": "[VARCHAR(\"music\")]",
+ "Table": "information_schema.views"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select found from (select 1 as found from information_schema.`tables` where table_schema = 'music' union all (select 1 as found from information_schema.views where table_schema = 'music' limit 1)) as t",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select found from (select 1 as found from information_schema.`tables` where 1 != 1 union all (select 1 as found from information_schema.views where 1 != 1)) as t where 1 != 1",
+ "Query": "select found from (select 1 as found from information_schema.`tables` where table_schema = :__vtschemaname union all (select 1 as found from information_schema.views where table_schema = :__vtschemaname limit 1)) as t",
+ "SysTableTableSchema": "[VARCHAR(\"music\"), VARCHAR(\"music\")]",
+ "Table": "information_schema.`tables`"
+ }
+ }
+ },
+ {
+ "comment": "merge system schema queries as long as they have any same table_schema",
+ "query": "select 1 as found from information_schema.`tables` where table_schema = 'music' and table_schema = 'Music' union all (select 1 as found from information_schema.views where table_schema = 'music' and table_schema = 'user' limit 1)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 as found from information_schema.`tables` where table_schema = 'music' and table_schema = 'Music' union all (select 1 as found from information_schema.views where table_schema = 'music' and table_schema = 'user' limit 1)",
+ "Instructions": {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 as found from information_schema.`tables` where 1 != 1",
+ "Query": "select 1 as found from information_schema.`tables` where table_schema = :__vtschemaname",
+ "SysTableTableSchema": "[VARCHAR(\"music\"), VARCHAR(\"Music\")]",
+ "Table": "information_schema.`tables`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 as found from information_schema.views where 1 != 1",
+ "Query": "select 1 as found from information_schema.views where table_schema = :__vtschemaname limit 1",
+ "SysTableTableSchema": "[VARCHAR(\"music\"), VARCHAR(\"user\")]",
+ "Table": "information_schema.views"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 as found from information_schema.`tables` where table_schema = 'music' and table_schema = 'Music' union all (select 1 as found from information_schema.views where table_schema = 'music' and table_schema = 'user' limit 1)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 as found from information_schema.`tables` where 1 != 1 union all (select 1 as found from information_schema.views where 1 != 1)",
+ "Query": "select 1 as found from information_schema.`tables` where table_schema = :__vtschemaname union all (select 1 as found from information_schema.views where table_schema = :__vtschemaname limit 1)",
+ "SysTableTableSchema": "[VARCHAR(\"music\"), VARCHAR(\"Music\"), VARCHAR(\"music\"), VARCHAR(\"user\")]",
+ "Table": "information_schema.`tables`"
+ }
+ }
+ },
+ {
+ "comment": "merge system schema queries as long as they have any same table_name",
+ "query": "select 1 as found from information_schema.`tables` where table_schema = 'music' and table_schema = 'Music' union all (select 1 as found from information_schema.views where table_schema = 'music' and table_schema = 'user' limit 1)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 as found from information_schema.`tables` where table_schema = 'music' and table_schema = 'Music' union all (select 1 as found from information_schema.views where table_schema = 'music' and table_schema = 'user' limit 1)",
+ "Instructions": {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 as found from information_schema.`tables` where 1 != 1",
+ "Query": "select 1 as found from information_schema.`tables` where table_schema = :__vtschemaname",
+ "SysTableTableSchema": "[VARCHAR(\"music\"), VARCHAR(\"Music\")]",
+ "Table": "information_schema.`tables`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 as found from information_schema.views where 1 != 1",
+ "Query": "select 1 as found from information_schema.views where table_schema = :__vtschemaname limit 1",
+ "SysTableTableSchema": "[VARCHAR(\"music\"), VARCHAR(\"user\")]",
+ "Table": "information_schema.views"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 as found from information_schema.`tables` where table_schema = 'music' and table_schema = 'Music' union all (select 1 as found from information_schema.views where table_schema = 'music' and table_schema = 'user' limit 1)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 as found from information_schema.`tables` where 1 != 1 union all (select 1 as found from information_schema.views where 1 != 1)",
+ "Query": "select 1 as found from information_schema.`tables` where table_schema = :__vtschemaname union all (select 1 as found from information_schema.views where table_schema = :__vtschemaname limit 1)",
+ "SysTableTableSchema": "[VARCHAR(\"music\"), VARCHAR(\"Music\"), VARCHAR(\"music\"), VARCHAR(\"user\")]",
+ "Table": "information_schema.`tables`"
+ }
+ }
+ },
+ {
+ "comment": "merge union subquery with outer query referencing the same system schemas",
+ "query": "select 1 as found from information_schema.`tables` where table_name = 'music' and table_name = 'Music' and exists (select 1 as found from information_schema.`tables` where table_name = 'music' and table_name = 'Music' union all (select 1 as found from information_schema.views where table_name = 'music' and table_name = 'user' limit 1))",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 as found from information_schema.`tables` where table_name = 'music' and table_name = 'Music' and exists (select 1 as found from information_schema.`tables` where table_name = 'music' and table_name = 'Music' union all (select 1 as found from information_schema.views where table_name = 'music' and table_name = 'user' limit 1))",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutExists",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 as found from information_schema.`tables` where 1 != 1",
+ "Query": "select 1 as found from information_schema.`tables` where table_name = :table_name2 and table_name = :table_name3",
+ "SysTableTableName": "[table_name2:VARCHAR(\"music\"), table_name3:VARCHAR(\"Music\")]",
+ "Table": "information_schema.`tables`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 as found from information_schema.views where 1 != 1",
+ "Query": "select 1 as found from information_schema.views where table_name = :table_name4 and table_name = :table_name5 limit 1",
+ "SysTableTableName": "[table_name4:VARCHAR(\"music\"), table_name5:VARCHAR(\"user\")]",
+ "Table": "information_schema.views"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 as found from information_schema.`tables` where 1 != 1",
+ "Query": "select 1 as found from information_schema.`tables` where table_name = :table_name and table_name = :table_name1 and :__sq_has_values1",
+ "SysTableTableName": "[table_name1:VARCHAR(\"Music\"), table_name:VARCHAR(\"music\")]",
+ "Table": "information_schema.`tables`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 as found from information_schema.`tables` where table_name = 'music' and table_name = 'Music' and exists (select 1 as found from information_schema.`tables` where table_name = 'music' and table_name = 'Music' union all (select 1 as found from information_schema.views where table_name = 'music' and table_name = 'user' limit 1))",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 as found from information_schema.`tables` where 1 != 1",
+ "Query": "select 1 as found from information_schema.`tables` where table_name = :table_name and table_name = :table_name1 and exists (select 1 as found from information_schema.`tables` where table_name = :table_name2 and table_name = :table_name3 union all (select 1 as found from information_schema.views where table_name = :table_name4 and table_name = :table_name5 limit 1))",
+ "SysTableTableName": "[table_name1:VARCHAR(\"Music\"), table_name2:VARCHAR(\"music\"), table_name3:VARCHAR(\"Music\"), table_name4:VARCHAR(\"music\"), table_name5:VARCHAR(\"user\"), table_name:VARCHAR(\"music\")]",
+ "Table": "information_schema.`tables`"
+ }
+ }
+ },
+ {
+ "comment": "merge even one side have schema name in derived table",
+ "query": "select * from (select TABLE_NAME from information_schema.tables t where t.TABLE_SCHEMA = 'a' union select TABLE_NAME from information_schema.columns) dt",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from (select TABLE_NAME from information_schema.tables t where t.TABLE_SCHEMA = 'a' union select TABLE_NAME from information_schema.columns) dt",
+ "Instructions": {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select TABLE_NAME from information_schema.`tables` as t where 1 != 1",
+ "Query": "select TABLE_NAME from information_schema.`tables` as t where t.TABLE_SCHEMA = :__vtschemaname",
+ "SysTableTableSchema": "[VARCHAR(\"a\")]",
+ "Table": "information_schema.`tables`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select TABLE_NAME from information_schema.`columns` where 1 != 1",
+ "Query": "select TABLE_NAME from information_schema.`columns`",
+ "Table": "information_schema.`columns`"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from (select TABLE_NAME from information_schema.tables t where t.TABLE_SCHEMA = 'a' union select TABLE_NAME from information_schema.columns) dt",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select dt.TABLE_NAME from (select TABLE_NAME from information_schema.`tables` as t where 1 != 1 union select TABLE_NAME from information_schema.`columns` where 1 != 1) as dt where 1 != 1",
+ "Query": "select dt.TABLE_NAME from (select TABLE_NAME from information_schema.`tables` as t where t.TABLE_SCHEMA = :__vtschemaname union select TABLE_NAME from information_schema.`columns`) as dt",
+ "SysTableTableSchema": "[VARCHAR(\"a\")]",
+ "Table": "information_schema.`tables`"
+ }
+ }
+ },
+ {
+ "comment": "merge even one side have schema name in subquery",
+ "query": "select `COLLATION_NAME` from information_schema.`COLUMNS` t where `COLUMN_NAME` in (select `COLUMN_NAME` from information_schema.tables t where t.TABLE_SCHEMA = 'a' union select `COLUMN_NAME` from information_schema.columns)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select `COLLATION_NAME` from information_schema.`COLUMNS` t where `COLUMN_NAME` in (select `COLUMN_NAME` from information_schema.tables t where t.TABLE_SCHEMA = 'a' union select `COLUMN_NAME` from information_schema.columns)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select COLUMN_NAME from information_schema.`tables` as t where 1 != 1",
+ "Query": "select COLUMN_NAME from information_schema.`tables` as t where t.TABLE_SCHEMA = :__vtschemaname",
+ "SysTableTableSchema": "[VARCHAR(\"a\")]",
+ "Table": "information_schema.`tables`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select COLUMN_NAME from information_schema.`columns` where 1 != 1",
+ "Query": "select COLUMN_NAME from information_schema.`columns`",
+ "Table": "information_schema.`columns`"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select COLLATION_NAME from information_schema.`COLUMNS` as t where 1 != 1",
+ "Query": "select COLLATION_NAME from information_schema.`COLUMNS` as t where :__sq_has_values1 = 1 and COLUMN_NAME in ::__sq1",
+ "Table": "information_schema.`COLUMNS`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select `COLLATION_NAME` from information_schema.`COLUMNS` t where `COLUMN_NAME` in (select `COLUMN_NAME` from information_schema.tables t where t.TABLE_SCHEMA = 'a' union select `COLUMN_NAME` from information_schema.columns)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select COLLATION_NAME from information_schema.`COLUMNS` as t where 1 != 1",
+ "Query": "select COLLATION_NAME from information_schema.`COLUMNS` as t where COLUMN_NAME in (select COLUMN_NAME from information_schema.`tables` as t where t.TABLE_SCHEMA = :__vtschemaname union select COLUMN_NAME from information_schema.`columns`)",
+ "SysTableTableSchema": "[VARCHAR(\"a\")]",
+ "Table": "information_schema.`COLUMNS`"
+ }
+ }
+ },
+ {
+ "comment": "table_schema OR predicate\n# It is unsupported because we do not route queries to multiple keyspaces right now",
+ "query": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'ks' OR TABLE_SCHEMA = 'main'",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'ks' OR TABLE_SCHEMA = 'main'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from INFORMATION_SCHEMA.`TABLES` where 1 != 1",
+ "Query": "select * from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = 'ks' or TABLE_SCHEMA = 'main'",
+ "Table": "INFORMATION_SCHEMA.`TABLES`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'ks' OR TABLE_SCHEMA = 'main'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from INFORMATION_SCHEMA.`TABLES` where 1 != 1",
+ "Query": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = 'ks' or TABLE_SCHEMA = 'main'",
+ "Table": "INFORMATION_SCHEMA.`TABLES`"
+ }
+ }
+ },
+ {
+ "comment": "Non-existing information_schema table is still OK",
+ "query": "select TABLE_NAME from information_schema.apa",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "select TABLE_NAME from information_schema.apa",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select TABLE_NAME from information_schema.apa where 1 != 1",
+ "Query": "select TABLE_NAME from information_schema.apa",
+ "Table": "information_schema.apa"
+ }
+ }
+ }
+]
diff --git a/go/vt/vtgate/planbuilder/testdata/info_schema80_cases.json b/go/vt/vtgate/planbuilder/testdata/info_schema80_cases.json
new file mode 100644
index 00000000000..fbae05c2d61
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/info_schema80_cases.json
@@ -0,0 +1,1716 @@
+[
+ {
+ "comment": "Single information_schema query",
+ "query": "select TABLE_NAME from information_schema.TABLES",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "select TABLE_NAME from information_schema.TABLES",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select TABLE_NAME from information_schema.`TABLES` where 1 != 1",
+ "Query": "select TABLE_NAME from information_schema.`TABLES`",
+ "Table": "information_schema.`TABLES`"
+ }
+ }
+ },
+ {
+ "comment": "',' join information_schema",
+ "query": "select a.ENGINE, b.DATA_TYPE from information_schema.TABLES as a, information_schema.COLUMNS as b",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a.ENGINE, b.DATA_TYPE from information_schema.TABLES as a, information_schema.COLUMNS as b",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select a.`ENGINE`, b.DATA_TYPE from information_schema.`TABLES` as a, information_schema.`COLUMNS` as b where 1 != 1",
+ "Query": "select a.`ENGINE`, b.DATA_TYPE from information_schema.`TABLES` as a, information_schema.`COLUMNS` as b",
+ "Table": "information_schema.`TABLES`, information_schema.`COLUMNS`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a.ENGINE, b.DATA_TYPE from information_schema.TABLES as a, information_schema.COLUMNS as b",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select a.`ENGINE`, b.DATA_TYPE from information_schema.`TABLES` as a, information_schema.`COLUMNS` as b where 1 != 1",
+ "Query": "select a.`ENGINE`, b.DATA_TYPE from information_schema.`TABLES` as a, information_schema.`COLUMNS` as b",
+ "Table": "information_schema.`COLUMNS`, information_schema.`TABLES`"
+ }
+ }
+ },
+ {
+ "comment": "information schema query that uses table_schema",
+ "query": "select column_name from information_schema.columns where table_schema = (select schema())",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "select column_name from information_schema.columns where table_schema = (select schema())",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select column_name from information_schema.`columns` where 1 != 1",
+ "Query": "select column_name from information_schema.`columns` where table_schema = schema()",
+ "Table": "information_schema.`columns`"
+ }
+ }
+ },
+ {
+ "comment": "information schema join",
+ "query": "select tables.TABLE_SCHEMA, files.`STATUS` from information_schema.tables join information_schema.files",
+ "v3-plan": "VT03019: symbol `tables`.TABLE_SCHEMA not found",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select tables.TABLE_SCHEMA, files.`STATUS` from information_schema.tables join information_schema.files",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select `tables`.TABLE_SCHEMA, files.`STATUS` from information_schema.`tables`, information_schema.files where 1 != 1",
+ "Query": "select `tables`.TABLE_SCHEMA, files.`STATUS` from information_schema.`tables`, information_schema.files",
+ "Table": "information_schema.`tables`, information_schema.files"
+ }
+ }
+ },
+ {
+ "comment": "access to qualified column names in information_schema",
+ "query": "select * from information_schema.COLUMNS where information_schema.COLUMNS.COLUMN_NAME='toto'",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from information_schema.COLUMNS where information_schema.COLUMNS.COLUMN_NAME='toto'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from information_schema.`COLUMNS` where 1 != 1",
+ "Query": "select * from information_schema.`COLUMNS` where information_schema.`COLUMNS`.COLUMN_NAME = 'toto'",
+ "Table": "information_schema.`COLUMNS`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from information_schema.COLUMNS where information_schema.COLUMNS.COLUMN_NAME='toto'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, COLUMN_NAME, ORDINAL_POSITION, COLUMN_DEFAULT, IS_NULLABLE, DATA_TYPE, CHARACTER_MAXIMUM_LENGTH, CHARACTER_OCTET_LENGTH, NUMERIC_PRECISION, NUMERIC_SCALE, DATETIME_PRECISION, CHARACTER_SET_NAME, COLLATION_NAME, COLUMN_TYPE, COLUMN_KEY, EXTRA, `PRIVILEGES`, COLUMN_COMMENT, GENERATION_EXPRESSION, SRS_ID from information_schema.`COLUMNS` where 1 != 1",
+ "Query": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, COLUMN_NAME, ORDINAL_POSITION, COLUMN_DEFAULT, IS_NULLABLE, DATA_TYPE, CHARACTER_MAXIMUM_LENGTH, CHARACTER_OCTET_LENGTH, NUMERIC_PRECISION, NUMERIC_SCALE, DATETIME_PRECISION, CHARACTER_SET_NAME, COLLATION_NAME, COLUMN_TYPE, COLUMN_KEY, EXTRA, `PRIVILEGES`, COLUMN_COMMENT, GENERATION_EXPRESSION, SRS_ID from information_schema.`COLUMNS` where `COLUMNS`.COLUMN_NAME = 'toto'",
+ "Table": "information_schema.`COLUMNS`"
+ }
+ }
+ },
+ {
+ "comment": "union of information_schema",
+ "query": "select TABLE_NAME from information_schema.columns union select table_schema from information_schema.tables",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "select TABLE_NAME from information_schema.columns union select table_schema from information_schema.tables",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select TABLE_NAME from information_schema.`columns` where 1 != 1 union select table_schema from information_schema.`tables` where 1 != 1",
+ "Query": "select TABLE_NAME from information_schema.`columns` union select table_schema from information_schema.`tables`",
+ "Table": "information_schema.`columns`"
+ }
+ }
+ },
+ {
+ "comment": "union between information_schema tables that should not be merged",
+ "query": "select * from information_schema.tables where table_schema = 'user' union select * from information_schema.tables where table_schema = 'main'",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from information_schema.tables where table_schema = 'user' union select * from information_schema.tables where table_schema = 'main'",
+ "Instructions": {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from information_schema.`tables` where 1 != 1",
+ "Query": "select * from information_schema.`tables` where table_schema = :__vtschemaname",
+ "SysTableTableSchema": "[VARCHAR(\"user\")]",
+ "Table": "information_schema.`tables`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from information_schema.`tables` where 1 != 1",
+ "Query": "select * from information_schema.`tables` where table_schema = :__vtschemaname",
+ "SysTableTableSchema": "[VARCHAR(\"main\")]",
+ "Table": "information_schema.`tables`"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from information_schema.tables where table_schema = 'user' union select * from information_schema.tables where table_schema = 'main'",
+ "Instructions": {
+ "OperatorType": "Distinct",
+ "Collations": [
+ "(0:21)",
+ "(1:22)",
+ "(2:23)",
+ "(3:24)",
+ "(4:25)",
+ "5: binary",
+ "(6:26)",
+ "7: binary",
+ "8: binary",
+ "9: binary",
+ "10: binary",
+ "11: binary",
+ "12: binary",
+ "13: binary",
+ "(14:27)",
+ "(15:28)",
+ "(16:29)",
+ "(17:30)",
+ "18: binary",
+ "(19:31)",
+ "(20:32)"
+ ],
+ "ResultColumns": 21,
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT, weight_string(TABLE_CATALOG), weight_string(TABLE_SCHEMA), weight_string(TABLE_NAME), weight_string(TABLE_TYPE), weight_string(`ENGINE`), weight_string(`ROW_FORMAT`), weight_string(CREATE_TIME), weight_string(UPDATE_TIME), weight_string(CHECK_TIME), weight_string(TABLE_COLLATION), weight_string(CREATE_OPTIONS), weight_string(TABLE_COMMENT) from information_schema.`tables` where 1 != 1",
+ "Query": "select distinct TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT, weight_string(TABLE_CATALOG), weight_string(TABLE_SCHEMA), weight_string(TABLE_NAME), weight_string(TABLE_TYPE), weight_string(`ENGINE`), weight_string(`ROW_FORMAT`), weight_string(CREATE_TIME), weight_string(UPDATE_TIME), weight_string(CHECK_TIME), weight_string(TABLE_COLLATION), weight_string(CREATE_OPTIONS), weight_string(TABLE_COMMENT) from information_schema.`tables` where table_schema = :__vtschemaname",
+ "SysTableTableSchema": "[VARCHAR(\"user\")]",
+ "Table": "information_schema.`tables`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT, weight_string(TABLE_CATALOG), weight_string(TABLE_SCHEMA), weight_string(TABLE_NAME), weight_string(TABLE_TYPE), weight_string(`ENGINE`), weight_string(`ROW_FORMAT`), weight_string(CREATE_TIME), weight_string(UPDATE_TIME), weight_string(CHECK_TIME), weight_string(TABLE_COLLATION), weight_string(CREATE_OPTIONS), weight_string(TABLE_COMMENT) from information_schema.`tables` where 1 != 1",
+ "Query": "select distinct TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT, weight_string(TABLE_CATALOG), weight_string(TABLE_SCHEMA), weight_string(TABLE_NAME), weight_string(TABLE_TYPE), weight_string(`ENGINE`), weight_string(`ROW_FORMAT`), weight_string(CREATE_TIME), weight_string(UPDATE_TIME), weight_string(CHECK_TIME), weight_string(TABLE_COLLATION), weight_string(CREATE_OPTIONS), weight_string(TABLE_COMMENT) from information_schema.`tables` where table_schema = :__vtschemaname",
+ "SysTableTableSchema": "[VARCHAR(\"main\")]",
+ "Table": "information_schema.`tables`"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ },
+ {
+ "comment": "Select from information schema query with two tables that route should be merged",
+ "query": "SELECT RC.CONSTRAINT_NAME, ORDINAL_POSITION FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE AS KCU INNER JOIN INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS AS RC ON KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME WHERE KCU.TABLE_SCHEMA = 'test' AND KCU.TABLE_NAME = 'data_type_table' AND KCU.COLUMN_NAME = 'id' AND KCU.REFERENCED_TABLE_SCHEMA = 'test' AND KCU.CONSTRAINT_NAME = 'data_type_table_id_fkey' ORDER BY KCU.CONSTRAINT_NAME, KCU.COLUMN_NAME",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT RC.CONSTRAINT_NAME, ORDINAL_POSITION FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE AS KCU INNER JOIN INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS AS RC ON KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME WHERE KCU.TABLE_SCHEMA = 'test' AND KCU.TABLE_NAME = 'data_type_table' AND KCU.COLUMN_NAME = 'id' AND KCU.REFERENCED_TABLE_SCHEMA = 'test' AND KCU.CONSTRAINT_NAME = 'data_type_table_id_fkey' ORDER BY KCU.CONSTRAINT_NAME, KCU.COLUMN_NAME",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select RC.CONSTRAINT_NAME, ORDINAL_POSITION from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as KCU join INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS as RC on KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME where 1 != 1",
+ "Query": "select RC.CONSTRAINT_NAME, ORDINAL_POSITION from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as KCU join INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS as RC on KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME where KCU.TABLE_SCHEMA = :__vtschemaname and KCU.TABLE_NAME = :KCU_TABLE_NAME and KCU.COLUMN_NAME = 'id' and KCU.REFERENCED_TABLE_SCHEMA = :__vtschemaname and KCU.CONSTRAINT_NAME = 'data_type_table_id_fkey' order by KCU.CONSTRAINT_NAME asc, KCU.COLUMN_NAME asc",
+ "SysTableTableName": "[KCU_TABLE_NAME:VARCHAR(\"data_type_table\")]",
+ "SysTableTableSchema": "[VARCHAR(\"test\"), VARCHAR(\"test\")]",
+ "Table": "INFORMATION_SCHEMA.KEY_COLUMN_USAGE, INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT RC.CONSTRAINT_NAME, ORDINAL_POSITION FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE AS KCU INNER JOIN INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS AS RC ON KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME WHERE KCU.TABLE_SCHEMA = 'test' AND KCU.TABLE_NAME = 'data_type_table' AND KCU.COLUMN_NAME = 'id' AND KCU.REFERENCED_TABLE_SCHEMA = 'test' AND KCU.CONSTRAINT_NAME = 'data_type_table_id_fkey' ORDER BY KCU.CONSTRAINT_NAME, KCU.COLUMN_NAME",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select RC.CONSTRAINT_NAME, ORDINAL_POSITION from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as KCU, INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS as RC where 1 != 1",
+ "Query": "select RC.CONSTRAINT_NAME, ORDINAL_POSITION from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as KCU, INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS as RC where KCU.TABLE_SCHEMA = :__vtschemaname and KCU.TABLE_NAME = :KCU_TABLE_NAME and KCU.COLUMN_NAME = 'id' and KCU.REFERENCED_TABLE_SCHEMA = 'test' and KCU.CONSTRAINT_NAME = 'data_type_table_id_fkey' and KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME order by KCU.CONSTRAINT_NAME asc, KCU.COLUMN_NAME asc",
+ "SysTableTableName": "[KCU_TABLE_NAME:VARCHAR(\"data_type_table\")]",
+ "SysTableTableSchema": "[VARCHAR(\"test\")]",
+ "Table": "INFORMATION_SCHEMA.KEY_COLUMN_USAGE, INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS"
+ }
+ }
+ },
+ {
+ "comment": "Select from information schema query with three tables such that route for 2 should be merged but not for the last.",
+ "query": "SELECT KCU.`TABLE_NAME`, S.`TABLE_NAME` FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE AS KCU INNER JOIN INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS AS RC ON KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME, INFORMATION_SCHEMA.`TABLES` AS S WHERE KCU.TABLE_SCHEMA = 'test' AND KCU.TABLE_NAME = 'data_type_table' AND KCU.TABLE_NAME = 'data_type_table' AND S.TABLE_SCHEMA = 'test' AND S.TABLE_NAME = 'sc' ORDER BY KCU.CONSTRAINT_NAME, KCU.COLUMN_NAME",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT KCU.`TABLE_NAME`, S.`TABLE_NAME` FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE AS KCU INNER JOIN INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS AS RC ON KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME, INFORMATION_SCHEMA.`TABLES` AS S WHERE KCU.TABLE_SCHEMA = 'test' AND KCU.TABLE_NAME = 'data_type_table' AND KCU.TABLE_NAME = 'data_type_table' AND S.TABLE_SCHEMA = 'test' AND S.TABLE_NAME = 'sc' ORDER BY KCU.CONSTRAINT_NAME, KCU.COLUMN_NAME",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,R:0",
+ "TableName": "INFORMATION_SCHEMA.KEY_COLUMN_USAGE, INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS_INFORMATION_SCHEMA.`TABLES`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select KCU.TABLE_NAME from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as KCU join INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS as RC on KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME where 1 != 1",
+ "Query": "select KCU.TABLE_NAME from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as KCU join INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS as RC on KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME where KCU.TABLE_SCHEMA = :__vtschemaname and KCU.TABLE_NAME = :KCU_TABLE_NAME and KCU.TABLE_NAME = :KCU_TABLE_NAME1 order by KCU.CONSTRAINT_NAME asc, KCU.COLUMN_NAME asc",
+ "SysTableTableName": "[KCU_TABLE_NAME1:VARCHAR(\"data_type_table\"), KCU_TABLE_NAME:VARCHAR(\"data_type_table\")]",
+ "SysTableTableSchema": "[VARCHAR(\"test\")]",
+ "Table": "INFORMATION_SCHEMA.KEY_COLUMN_USAGE, INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select S.TABLE_NAME from INFORMATION_SCHEMA.`TABLES` as S where 1 != 1",
+ "Query": "select S.TABLE_NAME from INFORMATION_SCHEMA.`TABLES` as S where S.TABLE_SCHEMA = :__vtschemaname and S.TABLE_NAME = :S_TABLE_NAME",
+ "SysTableTableName": "[S_TABLE_NAME:VARCHAR(\"sc\")]",
+ "SysTableTableSchema": "[VARCHAR(\"test\")]",
+ "Table": "INFORMATION_SCHEMA.`TABLES`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT KCU.`TABLE_NAME`, S.`TABLE_NAME` FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE AS KCU INNER JOIN INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS AS RC ON KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME, INFORMATION_SCHEMA.`TABLES` AS S WHERE KCU.TABLE_SCHEMA = 'test' AND KCU.TABLE_NAME = 'data_type_table' AND KCU.TABLE_NAME = 'data_type_table' AND S.TABLE_SCHEMA = 'test' AND S.TABLE_NAME = 'sc' ORDER BY KCU.CONSTRAINT_NAME, KCU.COLUMN_NAME",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select KCU.TABLE_NAME, S.TABLE_NAME from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as KCU, INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS as RC, INFORMATION_SCHEMA.`TABLES` as S where 1 != 1",
+ "Query": "select KCU.TABLE_NAME, S.TABLE_NAME from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as KCU, INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS as RC, INFORMATION_SCHEMA.`TABLES` as S where S.TABLE_SCHEMA = :__vtschemaname and S.TABLE_NAME = :S_TABLE_NAME and KCU.TABLE_SCHEMA = :__vtschemaname and KCU.TABLE_NAME = :KCU_TABLE_NAME and KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME order by KCU.CONSTRAINT_NAME asc, KCU.COLUMN_NAME asc",
+ "SysTableTableName": "[KCU_TABLE_NAME:VARCHAR(\"data_type_table\"), S_TABLE_NAME:VARCHAR(\"sc\")]",
+ "SysTableTableSchema": "[VARCHAR(\"test\"), VARCHAR(\"test\")]",
+ "Table": "INFORMATION_SCHEMA.KEY_COLUMN_USAGE, INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS, INFORMATION_SCHEMA.`TABLES`"
+ }
+ }
+ },
+ {
+ "comment": "information_schema.routines",
+ "query": "SELECT routine_name AS name, routine_definition AS definition FROM information_schema.routines WHERE ROUTINE_SCHEMA = ? AND ROUTINE_TYPE = 'PROCEDURE'",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT routine_name AS name, routine_definition AS definition FROM information_schema.routines WHERE ROUTINE_SCHEMA = ? AND ROUTINE_TYPE = 'PROCEDURE'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select routine_name as `name`, routine_definition as definition from information_schema.routines where 1 != 1",
+ "Query": "select routine_name as `name`, routine_definition as definition from information_schema.routines where ROUTINE_SCHEMA = :__vtschemaname and ROUTINE_TYPE = 'PROCEDURE'",
+ "SysTableTableSchema": "[:v1]",
+ "Table": "information_schema.routines"
+ }
+ }
+ },
+ {
+ "comment": "information_schema table sizes",
+ "query": "SELECT SUM(data_length + index_length) as size FROM information_schema.TABLES WHERE table_schema = ?",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT SUM(data_length + index_length) as size FROM information_schema.TABLES WHERE table_schema = ?",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select sum(data_length + index_length) as size from information_schema.`TABLES` where 1 != 1",
+ "Query": "select sum(data_length + index_length) as size from information_schema.`TABLES` where table_schema = :__vtschemaname",
+ "SysTableTableSchema": "[:v1]",
+ "Table": "information_schema.`TABLES`"
+ }
+ }
+ },
+ {
+ "comment": "information_schema referential contraints",
+ "query": "SELECT kcu.constraint_name constraint_name, kcu.column_name column_name, kcu.referenced_table_name referenced_table_name, kcu.referenced_column_name referenced_column_name, kcu.ordinal_position ordinal_position, kcu.table_name table_name, rc.delete_rule delete_rule, rc.update_rule update_rule FROM information_schema.key_column_usage AS kcu INNER JOIN information_schema.referential_constraints AS rc ON kcu.constraint_name = rc.constraint_name WHERE kcu.table_schema = ? AND rc.constraint_schema = ? AND kcu.referenced_column_name IS NOT NULL ORDER BY ordinal_position",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT kcu.constraint_name constraint_name, kcu.column_name column_name, kcu.referenced_table_name referenced_table_name, kcu.referenced_column_name referenced_column_name, kcu.ordinal_position ordinal_position, kcu.table_name table_name, rc.delete_rule delete_rule, rc.update_rule update_rule FROM information_schema.key_column_usage AS kcu INNER JOIN information_schema.referential_constraints AS rc ON kcu.constraint_name = rc.constraint_name WHERE kcu.table_schema = ? AND rc.constraint_schema = ? AND kcu.referenced_column_name IS NOT NULL ORDER BY ordinal_position",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select kcu.constraint_name as constraint_name, kcu.column_name as column_name, kcu.referenced_table_name as referenced_table_name, kcu.referenced_column_name as referenced_column_name, kcu.ordinal_position as ordinal_position, kcu.table_name as table_name, rc.delete_rule as delete_rule, rc.update_rule as update_rule from information_schema.key_column_usage as kcu join information_schema.referential_constraints as rc on kcu.constraint_name = rc.constraint_name where 1 != 1",
+ "Query": "select kcu.constraint_name as constraint_name, kcu.column_name as column_name, kcu.referenced_table_name as referenced_table_name, kcu.referenced_column_name as referenced_column_name, kcu.ordinal_position as ordinal_position, kcu.table_name as table_name, rc.delete_rule as delete_rule, rc.update_rule as update_rule from information_schema.key_column_usage as kcu join information_schema.referential_constraints as rc on kcu.constraint_name = rc.constraint_name where kcu.table_schema = :__vtschemaname and rc.constraint_schema = :__vtschemaname and kcu.referenced_column_name is not null order by ordinal_position asc",
+ "SysTableTableSchema": "[:v1, :v2]",
+ "Table": "information_schema.key_column_usage, information_schema.referential_constraints"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT kcu.constraint_name constraint_name, kcu.column_name column_name, kcu.referenced_table_name referenced_table_name, kcu.referenced_column_name referenced_column_name, kcu.ordinal_position ordinal_position, kcu.table_name table_name, rc.delete_rule delete_rule, rc.update_rule update_rule FROM information_schema.key_column_usage AS kcu INNER JOIN information_schema.referential_constraints AS rc ON kcu.constraint_name = rc.constraint_name WHERE kcu.table_schema = ? AND rc.constraint_schema = ? AND kcu.referenced_column_name IS NOT NULL ORDER BY ordinal_position",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select kcu.constraint_name as constraint_name, kcu.column_name as column_name, kcu.referenced_table_name as referenced_table_name, kcu.referenced_column_name as referenced_column_name, kcu.ordinal_position as ordinal_position, kcu.table_name as table_name, rc.delete_rule as delete_rule, rc.update_rule as update_rule from information_schema.key_column_usage as kcu, information_schema.referential_constraints as rc where 1 != 1",
+ "Query": "select kcu.constraint_name as constraint_name, kcu.column_name as column_name, kcu.referenced_table_name as referenced_table_name, kcu.referenced_column_name as referenced_column_name, kcu.ordinal_position as ordinal_position, kcu.table_name as table_name, rc.delete_rule as delete_rule, rc.update_rule as update_rule from information_schema.key_column_usage as kcu, information_schema.referential_constraints as rc where kcu.table_schema = :__vtschemaname and kcu.referenced_column_name is not null and rc.constraint_schema = :__vtschemaname and kcu.constraint_name = rc.constraint_name order by ordinal_position asc",
+ "SysTableTableSchema": "[:v1, :v2]",
+ "Table": "information_schema.key_column_usage, information_schema.referential_constraints"
+ }
+ }
+ },
+ {
+ "comment": "rails query",
+ "query": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as name, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc join information_schema.key_column_usage as fk using (constraint_schema, constraint_name) where fk.referenced_column_name is not null and fk.table_schema = database() and fk.table_name = ':vtg1' and rc.constraint_schema = database() and rc.table_name = ':vtg1'",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as name, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc join information_schema.key_column_usage as fk using (constraint_schema, constraint_name) where fk.referenced_column_name is not null and fk.table_schema = database() and fk.table_name = ':vtg1' and rc.constraint_schema = database() and rc.table_name = ':vtg1'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as `name`, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc join information_schema.key_column_usage as fk using (constraint_schema, constraint_name) where 1 != 1",
+ "Query": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as `name`, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc join information_schema.key_column_usage as fk using (constraint_schema, constraint_name) where fk.referenced_column_name is not null and fk.table_schema = database() and fk.table_name = :fk_table_name and rc.constraint_schema = database() and rc.table_name = :rc_table_name",
+ "SysTableTableName": "[fk_table_name:VARCHAR(\":vtg1\"), rc_table_name:VARCHAR(\":vtg1\")]",
+ "Table": "information_schema.referential_constraints, information_schema.key_column_usage"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as name, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc join information_schema.key_column_usage as fk using (constraint_schema, constraint_name) where fk.referenced_column_name is not null and fk.table_schema = database() and fk.table_name = ':vtg1' and rc.constraint_schema = database() and rc.table_name = ':vtg1'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as `name`, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc, information_schema.key_column_usage as fk where 1 != 1",
+ "Query": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as `name`, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc, information_schema.key_column_usage as fk where rc.constraint_schema = database() and rc.table_name = :rc_table_name and fk.referenced_column_name is not null and fk.table_schema = database() and fk.table_name = :fk_table_name and rc.constraint_schema = fk.constraint_schema and rc.constraint_name = fk.constraint_name",
+ "SysTableTableName": "[fk_table_name:VARCHAR(\":vtg1\"), rc_table_name:VARCHAR(\":vtg1\")]",
+ "Table": "information_schema.key_column_usage, information_schema.referential_constraints"
+ }
+ }
+ },
+ {
+ "comment": "rails_query 2",
+ "query": "SELECT * FROM information_schema.schemata WHERE schema_name = 'user'",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT * FROM information_schema.schemata WHERE schema_name = 'user'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from information_schema.schemata where 1 != 1",
+ "Query": "select * from information_schema.schemata where schema_name = :__vtschemaname",
+ "SysTableTableSchema": "[VARCHAR(\"user\")]",
+ "Table": "information_schema.schemata"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT * FROM information_schema.schemata WHERE schema_name = 'user'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select CATALOG_NAME, SCHEMA_NAME, DEFAULT_CHARACTER_SET_NAME, DEFAULT_COLLATION_NAME, SQL_PATH, DEFAULT_ENCRYPTION from information_schema.schemata where 1 != 1",
+ "Query": "select CATALOG_NAME, SCHEMA_NAME, DEFAULT_CHARACTER_SET_NAME, DEFAULT_COLLATION_NAME, SQL_PATH, DEFAULT_ENCRYPTION from information_schema.schemata where schema_name = :__vtschemaname",
+ "SysTableTableSchema": "[VARCHAR(\"user\")]",
+ "Table": "information_schema.schemata"
+ }
+ }
+ },
+ {
+ "comment": "rails_query 3",
+ "query": "SELECT table_comment FROM information_schema.tables WHERE table_schema = 'schema_name' AND table_name = 'table_name'",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT table_comment FROM information_schema.tables WHERE table_schema = 'schema_name' AND table_name = 'table_name'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select table_comment from information_schema.`tables` where 1 != 1",
+ "Query": "select table_comment from information_schema.`tables` where table_schema = :__vtschemaname and table_name = :table_name",
+ "SysTableTableName": "[table_name:VARCHAR(\"table_name\")]",
+ "SysTableTableSchema": "[VARCHAR(\"schema_name\")]",
+ "Table": "information_schema.`tables`"
+ }
+ }
+ },
+ {
+ "comment": "rails_query 4",
+ "query": "SELECT fk.referenced_table_name AS 'to_table', fk.referenced_column_name AS 'primary_key',fk.column_name AS 'column',fk.constraint_name AS 'name',rc.update_rule AS 'on_update',rc.delete_rule AS 'on_delete' FROM information_schema.referential_constraints rc JOIN information_schema.key_column_usage fk USING (constraint_schema, constraint_name) WHERE fk.referenced_column_name IS NOT NULL AND fk.table_schema = 'table_schema' AND fk.table_name = 'table_name' AND rc.constraint_schema = 'table_schema' AND rc.table_name = 'table_name'",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT fk.referenced_table_name AS 'to_table', fk.referenced_column_name AS 'primary_key',fk.column_name AS 'column',fk.constraint_name AS 'name',rc.update_rule AS 'on_update',rc.delete_rule AS 'on_delete' FROM information_schema.referential_constraints rc JOIN information_schema.key_column_usage fk USING (constraint_schema, constraint_name) WHERE fk.referenced_column_name IS NOT NULL AND fk.table_schema = 'table_schema' AND fk.table_name = 'table_name' AND rc.constraint_schema = 'table_schema' AND rc.table_name = 'table_name'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as `name`, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc join information_schema.key_column_usage as fk using (constraint_schema, constraint_name) where 1 != 1",
+ "Query": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as `name`, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc join information_schema.key_column_usage as fk using (constraint_schema, constraint_name) where fk.referenced_column_name is not null and fk.table_schema = :__vtschemaname and fk.table_name = :fk_table_name and rc.constraint_schema = :__vtschemaname and rc.table_name = :rc_table_name",
+ "SysTableTableName": "[fk_table_name:VARCHAR(\"table_name\"), rc_table_name:VARCHAR(\"table_name\")]",
+ "SysTableTableSchema": "[VARCHAR(\"table_schema\"), VARCHAR(\"table_schema\")]",
+ "Table": "information_schema.referential_constraints, information_schema.key_column_usage"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT fk.referenced_table_name AS 'to_table', fk.referenced_column_name AS 'primary_key',fk.column_name AS 'column',fk.constraint_name AS 'name',rc.update_rule AS 'on_update',rc.delete_rule AS 'on_delete' FROM information_schema.referential_constraints rc JOIN information_schema.key_column_usage fk USING (constraint_schema, constraint_name) WHERE fk.referenced_column_name IS NOT NULL AND fk.table_schema = 'table_schema' AND fk.table_name = 'table_name' AND rc.constraint_schema = 'table_schema' AND rc.table_name = 'table_name'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as `name`, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc, information_schema.key_column_usage as fk where 1 != 1",
+ "Query": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as `name`, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc, information_schema.key_column_usage as fk where rc.constraint_schema = :__vtschemaname and rc.table_name = :rc_table_name and fk.referenced_column_name is not null and fk.table_schema = :__vtschemaname and fk.table_name = :fk_table_name and rc.constraint_schema = fk.constraint_schema and rc.constraint_name = fk.constraint_name",
+ "SysTableTableName": "[fk_table_name:VARCHAR(\"table_name\"), rc_table_name:VARCHAR(\"table_name\")]",
+ "SysTableTableSchema": "[VARCHAR(\"table_schema\"), VARCHAR(\"table_schema\")]",
+ "Table": "information_schema.key_column_usage, information_schema.referential_constraints"
+ }
+ }
+ },
+ {
+ "comment": "rails_query 5",
+ "query": "SELECT cc.constraint_name AS 'name', cc.check_clause AS 'expression' FROM information_schema.check_constraints cc JOIN information_schema.table_constraints tc USING (constraint_schema, constraint_name) WHERE tc.table_schema = 'table_schema' AND tc.table_name = 'table_name' AND cc.constraint_schema = 'constraint_schema'",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT cc.constraint_name AS 'name', cc.check_clause AS 'expression' FROM information_schema.check_constraints cc JOIN information_schema.table_constraints tc USING (constraint_schema, constraint_name) WHERE tc.table_schema = 'table_schema' AND tc.table_name = 'table_name' AND cc.constraint_schema = 'constraint_schema'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select cc.constraint_name as `name`, cc.check_clause as expression from information_schema.check_constraints as cc join information_schema.table_constraints as tc using (constraint_schema, constraint_name) where 1 != 1",
+ "Query": "select cc.constraint_name as `name`, cc.check_clause as expression from information_schema.check_constraints as cc join information_schema.table_constraints as tc using (constraint_schema, constraint_name) where tc.table_schema = :__vtschemaname and tc.table_name = :tc_table_name and cc.constraint_schema = :__vtschemaname",
+ "SysTableTableName": "[tc_table_name:VARCHAR(\"table_name\")]",
+ "SysTableTableSchema": "[VARCHAR(\"table_schema\"), VARCHAR(\"constraint_schema\")]",
+ "Table": "information_schema.check_constraints, information_schema.table_constraints"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT cc.constraint_name AS 'name', cc.check_clause AS 'expression' FROM information_schema.check_constraints cc JOIN information_schema.table_constraints tc USING (constraint_schema, constraint_name) WHERE tc.table_schema = 'table_schema' AND tc.table_name = 'table_name' AND cc.constraint_schema = 'constraint_schema'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select cc.constraint_name as `name`, cc.check_clause as expression from information_schema.check_constraints as cc, information_schema.table_constraints as tc where 1 != 1",
+ "Query": "select cc.constraint_name as `name`, cc.check_clause as expression from information_schema.check_constraints as cc, information_schema.table_constraints as tc where cc.constraint_schema = :__vtschemaname and tc.table_schema = :__vtschemaname and tc.table_name = :tc_table_name and cc.constraint_schema = tc.constraint_schema and cc.constraint_name = tc.constraint_name",
+ "SysTableTableName": "[tc_table_name:VARCHAR(\"table_name\")]",
+ "SysTableTableSchema": "[VARCHAR(\"constraint_schema\"), VARCHAR(\"table_schema\")]",
+ "Table": "information_schema.check_constraints, information_schema.table_constraints"
+ }
+ }
+ },
+ {
+ "comment": "rails_query 6",
+ "query": "SELECT column_name FROM information_schema.statistics WHERE index_name = 'PRIMARY' AND table_schema = 'table_schema' AND table_name = 'table_name' ORDER BY seq_in_index",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT column_name FROM information_schema.statistics WHERE index_name = 'PRIMARY' AND table_schema = 'table_schema' AND table_name = 'table_name' ORDER BY seq_in_index",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select column_name from information_schema.statistics where 1 != 1",
+ "Query": "select column_name from information_schema.statistics where index_name = 'PRIMARY' and table_schema = :__vtschemaname and table_name = :table_name order by seq_in_index asc",
+ "SysTableTableName": "[table_name:VARCHAR(\"table_name\")]",
+ "SysTableTableSchema": "[VARCHAR(\"table_schema\")]",
+ "Table": "information_schema.statistics"
+ }
+ }
+ },
+ {
+ "comment": "rails_query 7",
+ "query": "SELECT generation_expression FROM information_schema.columns WHERE table_schema = 'table_schema' AND table_name = 'table_name' AND column_name = 'column_name'",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT generation_expression FROM information_schema.columns WHERE table_schema = 'table_schema' AND table_name = 'table_name' AND column_name = 'column_name'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select generation_expression from information_schema.`columns` where 1 != 1",
+ "Query": "select generation_expression from information_schema.`columns` where table_schema = :__vtschemaname and table_name = :table_name and column_name = 'column_name'",
+ "SysTableTableName": "[table_name:VARCHAR(\"table_name\")]",
+ "SysTableTableSchema": "[VARCHAR(\"table_schema\")]",
+ "Table": "information_schema.`columns`"
+ }
+ }
+ },
+ {
+ "comment": "rails_query 8",
+ "query": "SELECT id FROM information_schema.processlist WHERE info LIKE '% FOR UPDATE'",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT id FROM information_schema.processlist WHERE info LIKE '% FOR UPDATE'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select id from information_schema.`processlist` where 1 != 1",
+ "Query": "select id from information_schema.`processlist` where info like '% FOR UPDATE'",
+ "Table": "information_schema.`processlist`"
+ }
+ }
+ },
+ {
+ "comment": "rails_query 9",
+ "query": "SELECT table_name FROM (SELECT * FROM information_schema.tables WHERE table_schema = 'table_schema') _subquery",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT table_name FROM (SELECT * FROM information_schema.tables WHERE table_schema = 'table_schema') _subquery",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select table_name from (select * from information_schema.`tables` where 1 != 1) as _subquery where 1 != 1",
+ "Query": "select table_name from (select * from information_schema.`tables` where table_schema = :__vtschemaname) as _subquery",
+ "SysTableTableSchema": "[VARCHAR(\"table_schema\")]",
+ "Table": "information_schema.`tables`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT table_name FROM (SELECT * FROM information_schema.tables WHERE table_schema = 'table_schema') _subquery",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select table_name from (select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from information_schema.`tables` where 1 != 1) as _subquery where 1 != 1",
+ "Query": "select table_name from (select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from information_schema.`tables` where table_schema = :__vtschemaname) as _subquery",
+ "SysTableTableSchema": "[VARCHAR(\"table_schema\")]",
+ "Table": "information_schema.`tables`"
+ }
+ }
+ },
+ {
+ "comment": "rails_query 10",
+ "query": "SELECT table_name FROM (SELECT * FROM information_schema.tables WHERE table_schema = 'table_schema') _subquery WHERE _subquery.table_type = 'table_type' AND _subquery.table_name = 'table_name'",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT table_name FROM (SELECT * FROM information_schema.tables WHERE table_schema = 'table_schema') _subquery WHERE _subquery.table_type = 'table_type' AND _subquery.table_name = 'table_name'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select table_name from (select * from information_schema.`tables` where 1 != 1) as _subquery where 1 != 1",
+ "Query": "select table_name from (select * from information_schema.`tables` where table_schema = :__vtschemaname) as _subquery where _subquery.table_type = 'table_type' and _subquery.table_name = :_subquery_table_name",
+ "SysTableTableName": "[_subquery_table_name:VARCHAR(\"table_name\")]",
+ "SysTableTableSchema": "[VARCHAR(\"table_schema\")]",
+ "Table": "information_schema.`tables`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT table_name FROM (SELECT * FROM information_schema.tables WHERE table_schema = 'table_schema') _subquery WHERE _subquery.table_type = 'table_type' AND _subquery.table_name = 'table_name'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select table_name from (select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from information_schema.`tables` where 1 != 1) as _subquery where 1 != 1",
+ "Query": "select table_name from (select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from information_schema.`tables` where table_schema = :__vtschemaname and table_type = 'table_type' and table_name = 'table_name') as _subquery",
+ "SysTableTableSchema": "[VARCHAR(\"table_schema\")]",
+ "Table": "information_schema.`tables`"
+ }
+ }
+ },
+ {
+ "comment": "two predicates specifying the database for the same table work if the database is the same",
+ "query": "SELECT cc.constraint_name AS 'name' FROM information_schema.check_constraints cc WHERE cc.constraint_schema = 'a' AND cc.`CONSTRAINT_CATALOG` = 'a'",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT cc.constraint_name AS 'name' FROM information_schema.check_constraints cc WHERE cc.constraint_schema = 'a' AND cc.`CONSTRAINT_CATALOG` = 'a'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select cc.constraint_name as `name` from information_schema.check_constraints as cc where 1 != 1",
+ "Query": "select cc.constraint_name as `name` from information_schema.check_constraints as cc where cc.constraint_schema = :__vtschemaname and cc.CONSTRAINT_CATALOG = 'a'",
+ "SysTableTableSchema": "[VARCHAR(\"a\")]",
+ "Table": "information_schema.check_constraints"
+ }
+ }
+ },
+ {
+ "comment": "system schema in where clause of information_schema query",
+ "query": "SELECT COUNT(*) FROM INFORMATION_SCHEMA.TABLES WHERE table_schema = 'performance_schema' AND table_name = 'foo'",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT COUNT(*) FROM INFORMATION_SCHEMA.TABLES WHERE table_schema = 'performance_schema' AND table_name = 'foo'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select count(*) from INFORMATION_SCHEMA.`TABLES` where 1 != 1",
+ "Query": "select count(*) from INFORMATION_SCHEMA.`TABLES` where table_schema = :__vtschemaname and table_name = :table_name",
+ "SysTableTableName": "[table_name:VARCHAR(\"foo\")]",
+ "SysTableTableSchema": "[VARCHAR(\"performance_schema\")]",
+ "Table": "INFORMATION_SCHEMA.`TABLES`"
+ }
+ }
+ },
+ {
+ "comment": "subquery of information_schema with itself",
+ "query": "select TABLES.CHECKSUM from information_schema.`TABLES` where `TABLE_NAME` in (select `TABLE_NAME` from information_schema.`COLUMNS`)",
+ "v3-plan": "VT03019: symbol `TABLES`.`CHECKSUM` not found",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select TABLES.CHECKSUM from information_schema.`TABLES` where `TABLE_NAME` in (select `TABLE_NAME` from information_schema.`COLUMNS`)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select `TABLES`.`CHECKSUM` from information_schema.`TABLES` where 1 != 1",
+ "Query": "select `TABLES`.`CHECKSUM` from information_schema.`TABLES` where TABLE_NAME in (select TABLE_NAME from information_schema.`COLUMNS`)",
+ "Table": "information_schema.`TABLES`"
+ }
+ }
+ },
+ {
+ "comment": "query trying to query two different keyspaces at the same time",
+ "query": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'user' AND TABLE_SCHEMA = 'main'",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'user' AND TABLE_SCHEMA = 'main'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from INFORMATION_SCHEMA.`TABLES` where 1 != 1",
+ "Query": "select * from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname",
+ "SysTableTableSchema": "[VARCHAR(\"user\"), VARCHAR(\"main\")]",
+ "Table": "INFORMATION_SCHEMA.`TABLES`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'user' AND TABLE_SCHEMA = 'main'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from INFORMATION_SCHEMA.`TABLES` where 1 != 1",
+ "Query": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname",
+ "SysTableTableSchema": "[VARCHAR(\"user\"), VARCHAR(\"main\")]",
+ "Table": "INFORMATION_SCHEMA.`TABLES`"
+ }
+ }
+ },
+ {
+ "comment": "information_schema query using database() func",
+ "query": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = database()",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = database()",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from INFORMATION_SCHEMA.`TABLES` where 1 != 1",
+ "Query": "select * from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = database()",
+ "Table": "INFORMATION_SCHEMA.`TABLES`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = database()",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from INFORMATION_SCHEMA.`TABLES` where 1 != 1",
+ "Query": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = database()",
+ "Table": "INFORMATION_SCHEMA.`TABLES`"
+ }
+ }
+ },
+ {
+ "comment": "table_schema predicate the wrong way around",
+ "query": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE 'ks' = TABLE_SCHEMA",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE 'ks' = TABLE_SCHEMA",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from INFORMATION_SCHEMA.`TABLES` where 1 != 1",
+ "Query": "select * from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname",
+ "SysTableTableSchema": "[VARCHAR(\"ks\")]",
+ "Table": "INFORMATION_SCHEMA.`TABLES`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE 'ks' = TABLE_SCHEMA",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from INFORMATION_SCHEMA.`TABLES` where 1 != 1",
+ "Query": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname",
+ "SysTableTableSchema": "[VARCHAR(\"ks\")]",
+ "Table": "INFORMATION_SCHEMA.`TABLES`"
+ }
+ }
+ },
+ {
+ "comment": "table_name predicate against a routed table",
+ "query": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'ks' AND TABLE_NAME = 'route1'",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'ks' AND TABLE_NAME = 'route1'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from INFORMATION_SCHEMA.`TABLES` where 1 != 1",
+ "Query": "select * from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname and TABLE_NAME = :TABLE_NAME",
+ "SysTableTableName": "[TABLE_NAME:VARCHAR(\"route1\")]",
+ "SysTableTableSchema": "[VARCHAR(\"ks\")]",
+ "Table": "INFORMATION_SCHEMA.`TABLES`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'ks' AND TABLE_NAME = 'route1'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from INFORMATION_SCHEMA.`TABLES` where 1 != 1",
+ "Query": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname and TABLE_NAME = :TABLE_NAME",
+ "SysTableTableName": "[TABLE_NAME:VARCHAR(\"route1\")]",
+ "SysTableTableSchema": "[VARCHAR(\"ks\")]",
+ "Table": "INFORMATION_SCHEMA.`TABLES`"
+ }
+ }
+ },
+ {
+ "comment": "information_schema query with additional predicates",
+ "query": "SELECT `TABLE_NAME` FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'ks' and DATA_FREE = 42",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT `TABLE_NAME` FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'ks' and DATA_FREE = 42",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select TABLE_NAME from INFORMATION_SCHEMA.`TABLES` where 1 != 1",
+ "Query": "select TABLE_NAME from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname and DATA_FREE = 42",
+ "SysTableTableSchema": "[VARCHAR(\"ks\")]",
+ "Table": "INFORMATION_SCHEMA.`TABLES`"
+ }
+ }
+ },
+ {
+ "comment": "able to isolate table_schema value even when hidden inside of ORs",
+ "query": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE (TABLE_SCHEMA = 'ks' and DATA_FREE = 42) OR (TABLE_SCHEMA = 'ks' and CHECKSUM = 'value')",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE (TABLE_SCHEMA = 'ks' and DATA_FREE = 42) OR (TABLE_SCHEMA = 'ks' and CHECKSUM = 'value')",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from INFORMATION_SCHEMA.`TABLES` where 1 != 1",
+ "Query": "select * from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname and (DATA_FREE = 42 or `CHECKSUM` = 'value')",
+ "SysTableTableSchema": "[VARCHAR(\"ks\")]",
+ "Table": "INFORMATION_SCHEMA.`TABLES`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE (TABLE_SCHEMA = 'ks' and DATA_FREE = 42) OR (TABLE_SCHEMA = 'ks' and CHECKSUM = 'value')",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from INFORMATION_SCHEMA.`TABLES` where 1 != 1",
+ "Query": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname and (DATA_FREE = 42 or `CHECKSUM` = 'value')",
+ "SysTableTableSchema": "[VARCHAR(\"ks\")]",
+ "Table": "INFORMATION_SCHEMA.`TABLES`"
+ }
+ }
+ },
+ {
+ "comment": "expand star with information schema",
+ "query": "select x.table_name from (select a.* from information_schema.key_column_usage a) x",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select x.table_name from (select a.* from information_schema.key_column_usage a) x",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select x.table_name from (select a.* from information_schema.key_column_usage as a where 1 != 1) as x where 1 != 1",
+ "Query": "select x.table_name from (select a.* from information_schema.key_column_usage as a) as x",
+ "Table": "information_schema.key_column_usage"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select x.table_name from (select a.* from information_schema.key_column_usage a) x",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select x.table_name from (select a.CONSTRAINT_CATALOG, a.CONSTRAINT_SCHEMA, a.CONSTRAINT_NAME, a.TABLE_CATALOG, a.TABLE_SCHEMA, a.TABLE_NAME, a.COLUMN_NAME, a.ORDINAL_POSITION, a.POSITION_IN_UNIQUE_CONSTRAINT, a.REFERENCED_TABLE_SCHEMA, a.REFERENCED_TABLE_NAME, a.REFERENCED_COLUMN_NAME from information_schema.key_column_usage as a where 1 != 1) as x where 1 != 1",
+ "Query": "select x.table_name from (select a.CONSTRAINT_CATALOG, a.CONSTRAINT_SCHEMA, a.CONSTRAINT_NAME, a.TABLE_CATALOG, a.TABLE_SCHEMA, a.TABLE_NAME, a.COLUMN_NAME, a.ORDINAL_POSITION, a.POSITION_IN_UNIQUE_CONSTRAINT, a.REFERENCED_TABLE_SCHEMA, a.REFERENCED_TABLE_NAME, a.REFERENCED_COLUMN_NAME from information_schema.key_column_usage as a) as x",
+ "Table": "information_schema.key_column_usage"
+ }
+ }
+ },
+ {
+ "comment": "expand star with information schema in a derived table",
+ "query": "select x.table_name from (select a.* from information_schema.key_column_usage a) x join user on x.`COLUMN_NAME` = user.id",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select x.table_name from (select a.* from information_schema.key_column_usage a) x join user on x.`COLUMN_NAME` = user.id",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "JoinVars": {
+ "x_COLUMN_NAME": 1
+ },
+ "TableName": "information_schema.key_column_usage_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select x.table_name, x.COLUMN_NAME from (select a.* from information_schema.key_column_usage as a where 1 != 1) as x where 1 != 1",
+ "Query": "select x.table_name, x.COLUMN_NAME from (select a.* from information_schema.key_column_usage as a) as x",
+ "Table": "information_schema.key_column_usage"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` where 1 != 1",
+ "Query": "select 1 from `user` where `user`.id = :x_COLUMN_NAME",
+ "Table": "`user`",
+ "Values": [
+ ":x_COLUMN_NAME"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select x.table_name from (select a.* from information_schema.key_column_usage a) x join user on x.`COLUMN_NAME` = user.id",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:1",
+ "JoinVars": {
+ "x_COLUMN_NAME": 0
+ },
+ "TableName": "information_schema.key_column_usage_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select x.COLUMN_NAME, x.table_name from (select a.CONSTRAINT_CATALOG, a.CONSTRAINT_SCHEMA, a.CONSTRAINT_NAME, a.TABLE_CATALOG, a.TABLE_SCHEMA, a.TABLE_NAME, a.COLUMN_NAME, a.ORDINAL_POSITION, a.POSITION_IN_UNIQUE_CONSTRAINT, a.REFERENCED_TABLE_SCHEMA, a.REFERENCED_TABLE_NAME, a.REFERENCED_COLUMN_NAME from information_schema.key_column_usage as a where 1 != 1) as x where 1 != 1",
+ "Query": "select x.COLUMN_NAME, x.table_name from (select a.CONSTRAINT_CATALOG, a.CONSTRAINT_SCHEMA, a.CONSTRAINT_NAME, a.TABLE_CATALOG, a.TABLE_SCHEMA, a.TABLE_NAME, a.COLUMN_NAME, a.ORDINAL_POSITION, a.POSITION_IN_UNIQUE_CONSTRAINT, a.REFERENCED_TABLE_SCHEMA, a.REFERENCED_TABLE_NAME, a.REFERENCED_COLUMN_NAME from information_schema.key_column_usage as a) as x",
+ "Table": "information_schema.key_column_usage"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` where 1 != 1",
+ "Query": "select 1 from `user` where `user`.id = :x_COLUMN_NAME",
+ "Table": "`user`",
+ "Values": [
+ ":x_COLUMN_NAME"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "join of information_schema queries with select stars exprs",
+ "query": "select a.*, b.* from information_schema.CHECK_CONSTRAINTS a, information_schema.CHARACTER_SETS b",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a.*, b.* from information_schema.CHECK_CONSTRAINTS a, information_schema.CHARACTER_SETS b",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select a.*, b.* from information_schema.CHECK_CONSTRAINTS as a, information_schema.CHARACTER_SETS as b where 1 != 1",
+ "Query": "select a.*, b.* from information_schema.CHECK_CONSTRAINTS as a, information_schema.CHARACTER_SETS as b",
+ "Table": "information_schema.CHECK_CONSTRAINTS, information_schema.CHARACTER_SETS"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a.*, b.* from information_schema.CHECK_CONSTRAINTS a, information_schema.CHARACTER_SETS b",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select a.CONSTRAINT_CATALOG as CONSTRAINT_CATALOG, a.CONSTRAINT_SCHEMA as CONSTRAINT_SCHEMA, a.CONSTRAINT_NAME as CONSTRAINT_NAME, a.CHECK_CLAUSE as CHECK_CLAUSE, b.CHARACTER_SET_NAME as CHARACTER_SET_NAME, b.DEFAULT_COLLATE_NAME as DEFAULT_COLLATE_NAME, b.DESCRIPTION as DESCRIPTION, b.MAXLEN as MAXLEN from information_schema.CHECK_CONSTRAINTS as a, information_schema.CHARACTER_SETS as b where 1 != 1",
+ "Query": "select a.CONSTRAINT_CATALOG as CONSTRAINT_CATALOG, a.CONSTRAINT_SCHEMA as CONSTRAINT_SCHEMA, a.CONSTRAINT_NAME as CONSTRAINT_NAME, a.CHECK_CLAUSE as CHECK_CLAUSE, b.CHARACTER_SET_NAME as CHARACTER_SET_NAME, b.DEFAULT_COLLATE_NAME as DEFAULT_COLLATE_NAME, b.DESCRIPTION as DESCRIPTION, b.MAXLEN as MAXLEN from information_schema.CHECK_CONSTRAINTS as a, information_schema.CHARACTER_SETS as b",
+ "Table": "information_schema.CHARACTER_SETS, information_schema.CHECK_CONSTRAINTS"
+ }
+ }
+ },
+ {
+ "comment": "join two routes with SysTableTableName entries in LHS and RHS",
+ "query": "select a.table_name from (select * from information_schema.key_column_usage a where a.table_name = 'users') a join (select * from information_schema.referential_constraints where table_name = 'users') b",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a.table_name from (select * from information_schema.key_column_usage a where a.table_name = 'users') a join (select * from information_schema.referential_constraints where table_name = 'users') b",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select a.table_name from (select * from information_schema.key_column_usage as a where 1 != 1) as a join (select * from information_schema.referential_constraints where 1 != 1) as b where 1 != 1",
+ "Query": "select a.table_name from (select * from information_schema.key_column_usage as a where a.table_name = :a_table_name) as a join (select * from information_schema.referential_constraints where table_name = :table_name) as b",
+ "SysTableTableName": "[a_table_name:VARCHAR(\"users\"), table_name:VARCHAR(\"users\")]",
+ "Table": "information_schema.key_column_usage, information_schema.referential_constraints"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a.table_name from (select * from information_schema.key_column_usage a where a.table_name = 'users') a join (select * from information_schema.referential_constraints where table_name = 'users') b",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select a.table_name from (select a.CONSTRAINT_CATALOG, a.CONSTRAINT_SCHEMA, a.CONSTRAINT_NAME, a.TABLE_CATALOG, a.TABLE_SCHEMA, a.TABLE_NAME, a.COLUMN_NAME, a.ORDINAL_POSITION, a.POSITION_IN_UNIQUE_CONSTRAINT, a.REFERENCED_TABLE_SCHEMA, a.REFERENCED_TABLE_NAME, a.REFERENCED_COLUMN_NAME from information_schema.key_column_usage as a where 1 != 1) as a, (select CONSTRAINT_CATALOG, CONSTRAINT_SCHEMA, CONSTRAINT_NAME, UNIQUE_CONSTRAINT_CATALOG, UNIQUE_CONSTRAINT_SCHEMA, UNIQUE_CONSTRAINT_NAME, MATCH_OPTION, UPDATE_RULE, DELETE_RULE, TABLE_NAME, REFERENCED_TABLE_NAME from information_schema.referential_constraints where 1 != 1) as b where 1 != 1",
+ "Query": "select a.table_name from (select a.CONSTRAINT_CATALOG, a.CONSTRAINT_SCHEMA, a.CONSTRAINT_NAME, a.TABLE_CATALOG, a.TABLE_SCHEMA, a.TABLE_NAME, a.COLUMN_NAME, a.ORDINAL_POSITION, a.POSITION_IN_UNIQUE_CONSTRAINT, a.REFERENCED_TABLE_SCHEMA, a.REFERENCED_TABLE_NAME, a.REFERENCED_COLUMN_NAME from information_schema.key_column_usage as a where a.table_name = :a_table_name) as a, (select CONSTRAINT_CATALOG, CONSTRAINT_SCHEMA, CONSTRAINT_NAME, UNIQUE_CONSTRAINT_CATALOG, UNIQUE_CONSTRAINT_SCHEMA, UNIQUE_CONSTRAINT_NAME, MATCH_OPTION, UPDATE_RULE, DELETE_RULE, TABLE_NAME, REFERENCED_TABLE_NAME from information_schema.referential_constraints where table_name = :table_name) as b",
+ "SysTableTableName": "[a_table_name:VARCHAR(\"users\"), table_name:VARCHAR(\"users\")]",
+ "Table": "information_schema.key_column_usage, information_schema.referential_constraints"
+ }
+ }
+ },
+ {
+ "comment": "select sum(found) from (select 1 as found from information_schema.`tables` where table_schema = 'music' union all (select 1 as found from information_schema.views where table_schema = 'music' limit 1)) as t",
+ "query": "select sum(found) from (select 1 as found from information_schema.`tables` where table_schema = 'music' union all (select 1 as found from information_schema.views where table_schema = 'music' limit 1)) as t",
+ "v3-plan": "VT12001: unsupported: cross-shard query with aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select sum(found) from (select 1 as found from information_schema.`tables` where table_schema = 'music' union all (select 1 as found from information_schema.views where table_schema = 'music' limit 1)) as t",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select sum(found) from (select 1 as found from information_schema.`tables` where 1 != 1 union all (select 1 as found from information_schema.views where 1 != 1)) as t where 1 != 1",
+ "Query": "select sum(found) from (select 1 as found from information_schema.`tables` where table_schema = :__vtschemaname union all (select 1 as found from information_schema.views where table_schema = :__vtschemaname limit 1)) as t",
+ "SysTableTableSchema": "[VARCHAR(\"music\"), VARCHAR(\"music\")]",
+ "Table": "information_schema.`tables`"
+ }
+ }
+ },
+ {
+ "comment": "union as a derived table",
+ "query": "select found from (select 1 as found from information_schema.`tables` where table_schema = 'music' union all (select 1 as found from information_schema.views where table_schema = 'music' limit 1)) as t",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select found from (select 1 as found from information_schema.`tables` where table_schema = 'music' union all (select 1 as found from information_schema.views where table_schema = 'music' limit 1)) as t",
+ "Instructions": {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 as found from information_schema.`tables` where 1 != 1",
+ "Query": "select 1 as found from information_schema.`tables` where table_schema = :__vtschemaname",
+ "SysTableTableSchema": "[VARCHAR(\"music\")]",
+ "Table": "information_schema.`tables`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 as found from information_schema.views where 1 != 1",
+ "Query": "select 1 as found from information_schema.views where table_schema = :__vtschemaname limit 1",
+ "SysTableTableSchema": "[VARCHAR(\"music\")]",
+ "Table": "information_schema.views"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select found from (select 1 as found from information_schema.`tables` where table_schema = 'music' union all (select 1 as found from information_schema.views where table_schema = 'music' limit 1)) as t",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select found from (select 1 as found from information_schema.`tables` where 1 != 1 union all (select 1 as found from information_schema.views where 1 != 1)) as t where 1 != 1",
+ "Query": "select found from (select 1 as found from information_schema.`tables` where table_schema = :__vtschemaname union all (select 1 as found from information_schema.views where table_schema = :__vtschemaname limit 1)) as t",
+ "SysTableTableSchema": "[VARCHAR(\"music\"), VARCHAR(\"music\")]",
+ "Table": "information_schema.`tables`"
+ }
+ }
+ },
+ {
+ "comment": "merge system schema queries as long as they have any same table_schema",
+ "query": "select 1 as found from information_schema.`tables` where table_schema = 'music' and table_schema = 'Music' union all (select 1 as found from information_schema.views where table_schema = 'music' and table_schema = 'user' limit 1)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 as found from information_schema.`tables` where table_schema = 'music' and table_schema = 'Music' union all (select 1 as found from information_schema.views where table_schema = 'music' and table_schema = 'user' limit 1)",
+ "Instructions": {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 as found from information_schema.`tables` where 1 != 1",
+ "Query": "select 1 as found from information_schema.`tables` where table_schema = :__vtschemaname",
+ "SysTableTableSchema": "[VARCHAR(\"music\"), VARCHAR(\"Music\")]",
+ "Table": "information_schema.`tables`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 as found from information_schema.views where 1 != 1",
+ "Query": "select 1 as found from information_schema.views where table_schema = :__vtschemaname limit 1",
+ "SysTableTableSchema": "[VARCHAR(\"music\"), VARCHAR(\"user\")]",
+ "Table": "information_schema.views"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 as found from information_schema.`tables` where table_schema = 'music' and table_schema = 'Music' union all (select 1 as found from information_schema.views where table_schema = 'music' and table_schema = 'user' limit 1)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 as found from information_schema.`tables` where 1 != 1 union all (select 1 as found from information_schema.views where 1 != 1)",
+ "Query": "select 1 as found from information_schema.`tables` where table_schema = :__vtschemaname union all (select 1 as found from information_schema.views where table_schema = :__vtschemaname limit 1)",
+ "SysTableTableSchema": "[VARCHAR(\"music\"), VARCHAR(\"Music\"), VARCHAR(\"music\"), VARCHAR(\"user\")]",
+ "Table": "information_schema.`tables`"
+ }
+ }
+ },
+ {
+ "comment": "merge system schema queries as long as they have any same table_name",
+ "query": "select 1 as found from information_schema.`tables` where table_schema = 'music' and table_schema = 'Music' union all (select 1 as found from information_schema.views where table_schema = 'music' and table_schema = 'user' limit 1)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 as found from information_schema.`tables` where table_schema = 'music' and table_schema = 'Music' union all (select 1 as found from information_schema.views where table_schema = 'music' and table_schema = 'user' limit 1)",
+ "Instructions": {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 as found from information_schema.`tables` where 1 != 1",
+ "Query": "select 1 as found from information_schema.`tables` where table_schema = :__vtschemaname",
+ "SysTableTableSchema": "[VARCHAR(\"music\"), VARCHAR(\"Music\")]",
+ "Table": "information_schema.`tables`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 as found from information_schema.views where 1 != 1",
+ "Query": "select 1 as found from information_schema.views where table_schema = :__vtschemaname limit 1",
+ "SysTableTableSchema": "[VARCHAR(\"music\"), VARCHAR(\"user\")]",
+ "Table": "information_schema.views"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 as found from information_schema.`tables` where table_schema = 'music' and table_schema = 'Music' union all (select 1 as found from information_schema.views where table_schema = 'music' and table_schema = 'user' limit 1)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 as found from information_schema.`tables` where 1 != 1 union all (select 1 as found from information_schema.views where 1 != 1)",
+ "Query": "select 1 as found from information_schema.`tables` where table_schema = :__vtschemaname union all (select 1 as found from information_schema.views where table_schema = :__vtschemaname limit 1)",
+ "SysTableTableSchema": "[VARCHAR(\"music\"), VARCHAR(\"Music\"), VARCHAR(\"music\"), VARCHAR(\"user\")]",
+ "Table": "information_schema.`tables`"
+ }
+ }
+ },
+ {
+ "comment": "merge union subquery with outer query referencing the same system schemas",
+ "query": "select 1 as found from information_schema.`tables` where table_name = 'music' and table_name = 'Music' and exists (select 1 as found from information_schema.`tables` where table_name = 'music' and table_name = 'Music' union all (select 1 as found from information_schema.views where table_name = 'music' and table_name = 'user' limit 1))",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 as found from information_schema.`tables` where table_name = 'music' and table_name = 'Music' and exists (select 1 as found from information_schema.`tables` where table_name = 'music' and table_name = 'Music' union all (select 1 as found from information_schema.views where table_name = 'music' and table_name = 'user' limit 1))",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutExists",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 as found from information_schema.`tables` where 1 != 1",
+ "Query": "select 1 as found from information_schema.`tables` where table_name = :table_name2 and table_name = :table_name3",
+ "SysTableTableName": "[table_name2:VARCHAR(\"music\"), table_name3:VARCHAR(\"Music\")]",
+ "Table": "information_schema.`tables`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 as found from information_schema.views where 1 != 1",
+ "Query": "select 1 as found from information_schema.views where table_name = :table_name4 and table_name = :table_name5 limit 1",
+ "SysTableTableName": "[table_name4:VARCHAR(\"music\"), table_name5:VARCHAR(\"user\")]",
+ "Table": "information_schema.views"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 as found from information_schema.`tables` where 1 != 1",
+ "Query": "select 1 as found from information_schema.`tables` where table_name = :table_name and table_name = :table_name1 and :__sq_has_values1",
+ "SysTableTableName": "[table_name1:VARCHAR(\"Music\"), table_name:VARCHAR(\"music\")]",
+ "Table": "information_schema.`tables`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 as found from information_schema.`tables` where table_name = 'music' and table_name = 'Music' and exists (select 1 as found from information_schema.`tables` where table_name = 'music' and table_name = 'Music' union all (select 1 as found from information_schema.views where table_name = 'music' and table_name = 'user' limit 1))",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 as found from information_schema.`tables` where 1 != 1",
+ "Query": "select 1 as found from information_schema.`tables` where table_name = :table_name and table_name = :table_name1 and exists (select 1 as found from information_schema.`tables` where table_name = :table_name2 and table_name = :table_name3 union all (select 1 as found from information_schema.views where table_name = :table_name4 and table_name = :table_name5 limit 1))",
+ "SysTableTableName": "[table_name1:VARCHAR(\"Music\"), table_name2:VARCHAR(\"music\"), table_name3:VARCHAR(\"Music\"), table_name4:VARCHAR(\"music\"), table_name5:VARCHAR(\"user\"), table_name:VARCHAR(\"music\")]",
+ "Table": "information_schema.`tables`"
+ }
+ }
+ },
+ {
+ "comment": "merge even one side have schema name in derived table",
+ "query": "select * from (select TABLE_NAME from information_schema.tables t where t.TABLE_SCHEMA = 'a' union select TABLE_NAME from information_schema.columns) dt",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from (select TABLE_NAME from information_schema.tables t where t.TABLE_SCHEMA = 'a' union select TABLE_NAME from information_schema.columns) dt",
+ "Instructions": {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select TABLE_NAME from information_schema.`tables` as t where 1 != 1",
+ "Query": "select TABLE_NAME from information_schema.`tables` as t where t.TABLE_SCHEMA = :__vtschemaname",
+ "SysTableTableSchema": "[VARCHAR(\"a\")]",
+ "Table": "information_schema.`tables`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select TABLE_NAME from information_schema.`columns` where 1 != 1",
+ "Query": "select TABLE_NAME from information_schema.`columns`",
+ "Table": "information_schema.`columns`"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from (select TABLE_NAME from information_schema.tables t where t.TABLE_SCHEMA = 'a' union select TABLE_NAME from information_schema.columns) dt",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select dt.TABLE_NAME from (select TABLE_NAME from information_schema.`tables` as t where 1 != 1 union select TABLE_NAME from information_schema.`columns` where 1 != 1) as dt where 1 != 1",
+ "Query": "select dt.TABLE_NAME from (select TABLE_NAME from information_schema.`tables` as t where t.TABLE_SCHEMA = :__vtschemaname union select TABLE_NAME from information_schema.`columns`) as dt",
+ "SysTableTableSchema": "[VARCHAR(\"a\")]",
+ "Table": "information_schema.`tables`"
+ }
+ }
+ },
+ {
+ "comment": "merge even one side have schema name in subquery",
+ "query": "select `COLLATION_NAME` from information_schema.`COLUMNS` t where `COLUMN_NAME` in (select `COLUMN_NAME` from information_schema.tables t where t.TABLE_SCHEMA = 'a' union select `COLUMN_NAME` from information_schema.columns)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select `COLLATION_NAME` from information_schema.`COLUMNS` t where `COLUMN_NAME` in (select `COLUMN_NAME` from information_schema.tables t where t.TABLE_SCHEMA = 'a' union select `COLUMN_NAME` from information_schema.columns)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select COLUMN_NAME from information_schema.`tables` as t where 1 != 1",
+ "Query": "select COLUMN_NAME from information_schema.`tables` as t where t.TABLE_SCHEMA = :__vtschemaname",
+ "SysTableTableSchema": "[VARCHAR(\"a\")]",
+ "Table": "information_schema.`tables`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select COLUMN_NAME from information_schema.`columns` where 1 != 1",
+ "Query": "select COLUMN_NAME from information_schema.`columns`",
+ "Table": "information_schema.`columns`"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select COLLATION_NAME from information_schema.`COLUMNS` as t where 1 != 1",
+ "Query": "select COLLATION_NAME from information_schema.`COLUMNS` as t where :__sq_has_values1 = 1 and COLUMN_NAME in ::__sq1",
+ "Table": "information_schema.`COLUMNS`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select `COLLATION_NAME` from information_schema.`COLUMNS` t where `COLUMN_NAME` in (select `COLUMN_NAME` from information_schema.tables t where t.TABLE_SCHEMA = 'a' union select `COLUMN_NAME` from information_schema.columns)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select COLLATION_NAME from information_schema.`COLUMNS` as t where 1 != 1",
+ "Query": "select COLLATION_NAME from information_schema.`COLUMNS` as t where COLUMN_NAME in (select COLUMN_NAME from information_schema.`tables` as t where t.TABLE_SCHEMA = :__vtschemaname union select COLUMN_NAME from information_schema.`columns`)",
+ "SysTableTableSchema": "[VARCHAR(\"a\")]",
+ "Table": "information_schema.`COLUMNS`"
+ }
+ }
+ },
+ {
+ "comment": "table_schema OR predicate\n# It is unsupported because we do not route queries to multiple keyspaces right now",
+ "query": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'ks' OR TABLE_SCHEMA = 'main'",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'ks' OR TABLE_SCHEMA = 'main'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from INFORMATION_SCHEMA.`TABLES` where 1 != 1",
+ "Query": "select * from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = 'ks' or TABLE_SCHEMA = 'main'",
+ "Table": "INFORMATION_SCHEMA.`TABLES`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'ks' OR TABLE_SCHEMA = 'main'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from INFORMATION_SCHEMA.`TABLES` where 1 != 1",
+ "Query": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = 'ks' or TABLE_SCHEMA = 'main'",
+ "Table": "INFORMATION_SCHEMA.`TABLES`"
+ }
+ }
+ },
+ {
+ "comment": "select variable, value from sys.sys_config",
+ "query": "select variable, value from sys.sys_config",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "select variable, value from sys.sys_config",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select variable, value from sys.sys_config where 1 != 1",
+ "Query": "select variable, value from sys.sys_config",
+ "Table": "sys.sys_config"
+ }
+ }
+ },
+ {
+ "comment": "select host, db from mysql.`db`",
+ "query": "select host, db from mysql.`db`",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "select host, db from mysql.`db`",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select host, db from mysql.db where 1 != 1",
+ "Query": "select host, db from mysql.db",
+ "Table": "mysql.db"
+ }
+ }
+ },
+ {
+ "comment": "select logged, prio from performance_schema.error_log",
+ "query": "select logged, prio from performance_schema.error_log",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "select logged, prio from performance_schema.error_log",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select logged, prio from performance_schema.error_log where 1 != 1",
+ "Query": "select logged, prio from performance_schema.error_log",
+ "Table": "performance_schema.error_log"
+ }
+ }
+ },
+ {
+ "comment": "Non-existing information_schema table is still OK",
+ "query": "select TABLE_NAME from information_schema.apa",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "select TABLE_NAME from information_schema.apa",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select TABLE_NAME from information_schema.apa where 1 != 1",
+ "Query": "select TABLE_NAME from information_schema.apa",
+ "Table": "information_schema.apa"
+ }
+ }
+ },
+ {
+ "comment": "mysqldump query",
+ "query": "SELECT LOGFILE_GROUP_NAME, FILE_NAME, TOTAL_EXTENTS, INITIAL_SIZE, ENGINE, EXTRA FROM INFORMATION_SCHEMA.FILES WHERE ENGINE = 'ndbcluster' AND FILE_TYPE = 'UNDO LOG' AND FILE_NAME IS NOT NULL AND LOGFILE_GROUP_NAME IS NOT NULL AND LOGFILE_GROUP_NAME IN (SELECT DISTINCT LOGFILE_GROUP_NAME FROM INFORMATION_SCHEMA.FILES WHERE ENGINE = 'ndbcluster' AND FILE_TYPE = 'DATAFILE' AND TABLESPACE_NAME IN (SELECT DISTINCT TABLESPACE_NAME FROM INFORMATION_SCHEMA.PARTITIONS WHERE TABLE_SCHEMA IN ('commerce'))) GROUP BY LOGFILE_GROUP_NAME, FILE_NAME, ENGINE, TOTAL_EXTENTS, INITIAL_SIZE ORDER BY LOGFILE_GROUP_NAME",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT LOGFILE_GROUP_NAME, FILE_NAME, TOTAL_EXTENTS, INITIAL_SIZE, ENGINE, EXTRA FROM INFORMATION_SCHEMA.FILES WHERE ENGINE = 'ndbcluster' AND FILE_TYPE = 'UNDO LOG' AND FILE_NAME IS NOT NULL AND LOGFILE_GROUP_NAME IS NOT NULL AND LOGFILE_GROUP_NAME IN (SELECT DISTINCT LOGFILE_GROUP_NAME FROM INFORMATION_SCHEMA.FILES WHERE ENGINE = 'ndbcluster' AND FILE_TYPE = 'DATAFILE' AND TABLESPACE_NAME IN (SELECT DISTINCT TABLESPACE_NAME FROM INFORMATION_SCHEMA.PARTITIONS WHERE TABLE_SCHEMA IN ('commerce'))) GROUP BY LOGFILE_GROUP_NAME, FILE_NAME, ENGINE, TOTAL_EXTENTS, INITIAL_SIZE ORDER BY LOGFILE_GROUP_NAME",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select LOGFILE_GROUP_NAME, FILE_NAME, TOTAL_EXTENTS, INITIAL_SIZE, `ENGINE`, EXTRA from INFORMATION_SCHEMA.FILES where 1 != 1 group by LOGFILE_GROUP_NAME, FILE_NAME, `ENGINE`, TOTAL_EXTENTS, INITIAL_SIZE",
+ "Query": "select LOGFILE_GROUP_NAME, FILE_NAME, TOTAL_EXTENTS, INITIAL_SIZE, `ENGINE`, EXTRA from INFORMATION_SCHEMA.FILES where `ENGINE` = 'ndbcluster' and FILE_TYPE = 'UNDO LOG' and FILE_NAME is not null and LOGFILE_GROUP_NAME is not null and LOGFILE_GROUP_NAME in (select distinct LOGFILE_GROUP_NAME from INFORMATION_SCHEMA.FILES where `ENGINE` = 'ndbcluster' and FILE_TYPE = 'DATAFILE' and TABLESPACE_NAME in (select distinct TABLESPACE_NAME from INFORMATION_SCHEMA.`PARTITIONS` where TABLE_SCHEMA in ('commerce'))) group by LOGFILE_GROUP_NAME, FILE_NAME, `ENGINE`, TOTAL_EXTENTS, INITIAL_SIZE order by LOGFILE_GROUP_NAME asc",
+ "Table": "INFORMATION_SCHEMA.FILES"
+ }
+ }
+ }
+]
diff --git a/go/vt/vtgate/planbuilder/testdata/large_cases.json b/go/vt/vtgate/planbuilder/testdata/large_cases.json
new file mode 100644
index 00000000000..4b2fae633ab
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/large_cases.json
@@ -0,0 +1,254 @@
+[
+ {
+ "comment": "select user.id from user, user_extra, user_metadata, music, unsharded, unsharded_a, unsharded_b, unsharded_auto, music_extra where user.id = user_extra.user_id and user_metadata.user_id = user_extra.user_id and music.id = music_extra.music_id and unsharded.x = unsharded_a.y",
+ "query": "select user.id from user, user_extra, user_metadata, music, unsharded, unsharded_a, unsharded_b, unsharded_auto, music_extra where user.id = user_extra.user_id and user_metadata.user_id = user_extra.user_id and music.id = music_extra.music_id and unsharded.x = unsharded_a.y",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.id from user, user_extra, user_metadata, music, unsharded, unsharded_a, unsharded_b, unsharded_auto, music_extra where user.id = user_extra.user_id and user_metadata.user_id = user_extra.user_id and music.id = music_extra.music_id and unsharded.x = unsharded_a.y",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "JoinVars": {
+ "user_id": 0
+ },
+ "TableName": "`user`_user_extra_user_metadata_music_unsharded_unsharded_a_unsharded_b_unsharded_auto_music_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id from `user` where 1 != 1",
+ "Query": "select `user`.id from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinVars": {
+ "user_extra_user_id": 0
+ },
+ "TableName": "user_extra_user_metadata_music_unsharded_unsharded_a_unsharded_b_unsharded_auto_music_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.user_id from user_extra where 1 != 1",
+ "Query": "select user_extra.user_id from user_extra where user_extra.user_id = :user_id",
+ "Table": "user_extra",
+ "Values": [
+ ":user_id"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "TableName": "user_metadata_music_unsharded_unsharded_a_unsharded_b_unsharded_auto_music_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_metadata where 1 != 1",
+ "Query": "select 1 from user_metadata where user_metadata.user_id = :user_extra_user_id",
+ "Table": "user_metadata",
+ "Values": [
+ ":user_extra_user_id"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinVars": {
+ "music_id": 0
+ },
+ "TableName": "music_unsharded_unsharded_a_unsharded_b_unsharded_auto_music_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music",
+ "Table": "music"
+ },
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinVars": {
+ "unsharded_x": 0
+ },
+ "TableName": "unsharded_unsharded_a_unsharded_b_unsharded_auto_music_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select unsharded.x from unsharded where 1 != 1",
+ "Query": "select unsharded.x from unsharded",
+ "Table": "unsharded"
+ },
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "TableName": "unsharded_a_unsharded_b_unsharded_auto_music_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 from unsharded_a where 1 != 1",
+ "Query": "select 1 from unsharded_a where unsharded_a.y = :unsharded_x",
+ "Table": "unsharded_a"
+ },
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "TableName": "unsharded_b_unsharded_auto_music_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 from unsharded_b where 1 != 1",
+ "Query": "select 1 from unsharded_b",
+ "Table": "unsharded_b"
+ },
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "TableName": "unsharded_auto_music_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 from unsharded_auto where 1 != 1",
+ "Query": "select 1 from unsharded_auto",
+ "Table": "unsharded_auto"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from music_extra where 1 != 1",
+ "Query": "select 1 from music_extra where music_extra.music_id = :music_id",
+ "Table": "music_extra",
+ "Values": [
+ ":music_id"
+ ],
+ "Vindex": "music_user_map"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.id from user, user_extra, user_metadata, music, unsharded, unsharded_a, unsharded_b, unsharded_auto, music_extra where user.id = user_extra.user_id and user_metadata.user_id = user_extra.user_id and music.id = music_extra.music_id and unsharded.x = unsharded_a.y",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "TableName": "music, music_extra_`user`, user_extra, user_metadata_unsharded, unsharded_a, unsharded_auto, unsharded_b",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from music, music_extra where 1 != 1",
+ "Query": "select 1 from music, music_extra where music.id = music_extra.music_id",
+ "Table": "music, music_extra"
+ },
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "`user`, user_extra, user_metadata_unsharded, unsharded_a, unsharded_auto, unsharded_b",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id from `user`, user_extra, user_metadata where 1 != 1",
+ "Query": "select `user`.id from `user`, user_extra, user_metadata where `user`.id = user_extra.user_id and user_metadata.user_id = user_extra.user_id",
+ "Table": "`user`, user_extra, user_metadata"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 from unsharded, unsharded_a, unsharded_b, unsharded_auto where 1 != 1",
+ "Query": "select 1 from unsharded, unsharded_a, unsharded_b, unsharded_auto where unsharded.x = unsharded_a.y",
+ "Table": "unsharded, unsharded_a, unsharded_auto, unsharded_b"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "main.unsharded_a",
+ "main.unsharded_auto",
+ "main.unsharded_b",
+ "user.music",
+ "user.music_extra",
+ "user.user",
+ "user.user_extra",
+ "user.user_metadata"
+ ]
+ }
+ }
+]
diff --git a/go/vt/vtgate/planbuilder/testdata/large_cases.txt b/go/vt/vtgate/planbuilder/testdata/large_cases.txt
deleted file mode 100644
index 5fa57013a00..00000000000
--- a/go/vt/vtgate/planbuilder/testdata/large_cases.txt
+++ /dev/null
@@ -1,249 +0,0 @@
-"select user.id from user, user_extra, user_metadata, music, unsharded, unsharded_a, unsharded_b, unsharded_auto, music_extra where user.id = user_extra.user_id and user_metadata.user_id = user_extra.user_id and music.id = music_extra.music_id and unsharded.x = unsharded_a.y"
-{
- "QueryType": "SELECT",
- "Original": "select user.id from user, user_extra, user_metadata, music, unsharded, unsharded_a, unsharded_b, unsharded_auto, music_extra where user.id = user_extra.user_id and user_metadata.user_id = user_extra.user_id and music.id = music_extra.music_id and unsharded.x = unsharded_a.y",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "JoinVars": {
- "user_id": 0
- },
- "TableName": "`user`_user_extra_user_metadata_music_unsharded_unsharded_a_unsharded_b_unsharded_auto_music_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id from `user` where 1 != 1",
- "Query": "select `user`.id from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinVars": {
- "user_extra_user_id": 0
- },
- "TableName": "user_extra_user_metadata_music_unsharded_unsharded_a_unsharded_b_unsharded_auto_music_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.user_id from user_extra where 1 != 1",
- "Query": "select user_extra.user_id from user_extra where user_extra.user_id = :user_id",
- "Table": "user_extra",
- "Values": [
- ":user_id"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "TableName": "user_metadata_music_unsharded_unsharded_a_unsharded_b_unsharded_auto_music_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_metadata where 1 != 1",
- "Query": "select 1 from user_metadata where user_metadata.user_id = :user_extra_user_id",
- "Table": "user_metadata",
- "Values": [
- ":user_extra_user_id"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinVars": {
- "music_id": 0
- },
- "TableName": "music_unsharded_unsharded_a_unsharded_b_unsharded_auto_music_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music",
- "Table": "music"
- },
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinVars": {
- "unsharded_x": 0
- },
- "TableName": "unsharded_unsharded_a_unsharded_b_unsharded_auto_music_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select unsharded.x from unsharded where 1 != 1",
- "Query": "select unsharded.x from unsharded",
- "Table": "unsharded"
- },
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "TableName": "unsharded_a_unsharded_b_unsharded_auto_music_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 from unsharded_a where 1 != 1",
- "Query": "select 1 from unsharded_a where unsharded_a.y = :unsharded_x",
- "Table": "unsharded_a"
- },
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "TableName": "unsharded_b_unsharded_auto_music_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 from unsharded_b where 1 != 1",
- "Query": "select 1 from unsharded_b",
- "Table": "unsharded_b"
- },
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "TableName": "unsharded_auto_music_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 from unsharded_auto where 1 != 1",
- "Query": "select 1 from unsharded_auto",
- "Table": "unsharded_auto"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from music_extra where 1 != 1",
- "Query": "select 1 from music_extra where music_extra.music_id = :music_id",
- "Table": "music_extra",
- "Values": [
- ":music_id"
- ],
- "Vindex": "music_user_map"
- }
- ]
- }
- ]
- }
- ]
- }
- ]
- }
- ]
- }
- ]
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.id from user, user_extra, user_metadata, music, unsharded, unsharded_a, unsharded_b, unsharded_auto, music_extra where user.id = user_extra.user_id and user_metadata.user_id = user_extra.user_id and music.id = music_extra.music_id and unsharded.x = unsharded_a.y",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "TableName": "music, music_extra_`user`, user_extra, user_metadata_unsharded, unsharded_a, unsharded_auto, unsharded_b",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from music, music_extra where 1 != 1",
- "Query": "select 1 from music, music_extra where music.id = music_extra.music_id",
- "Table": "music, music_extra"
- },
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "`user`, user_extra, user_metadata_unsharded, unsharded_a, unsharded_auto, unsharded_b",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id from `user`, user_extra, user_metadata where 1 != 1",
- "Query": "select `user`.id from `user`, user_extra, user_metadata where `user`.id = user_extra.user_id and user_metadata.user_id = user_extra.user_id",
- "Table": "`user`, user_extra, user_metadata"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 from unsharded, unsharded_a, unsharded_b, unsharded_auto where 1 != 1",
- "Query": "select 1 from unsharded, unsharded_a, unsharded_b, unsharded_auto where unsharded.x = unsharded_a.y",
- "Table": "unsharded, unsharded_a, unsharded_auto, unsharded_b"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded",
- "main.unsharded_a",
- "main.unsharded_auto",
- "main.unsharded_b",
- "user.music",
- "user.music_extra",
- "user.user",
- "user.user_extra",
- "user.user_metadata"
- ]
-}
diff --git a/go/vt/vtgate/planbuilder/testdata/large_union_cases.json b/go/vt/vtgate/planbuilder/testdata/large_union_cases.json
new file mode 100644
index 00000000000..9120e39bfd6
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/large_union_cases.json
@@ -0,0 +1,2592 @@
+[
+ {
+ "comment": "this testcase breaks goland, so it lives on its own file",
+ "query": "(SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270698330 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270699497 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270703806 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270707364 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270714657 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270721330 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270812079 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271011532 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271034164 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271034177 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271066849 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271098740 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271355000 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271639345 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271914117 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271924504 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272086055 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272127855 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272191137 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272468271 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270637436 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270644941 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270650576 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270652906 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270660650 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270670201 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270698330 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270699497 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270707364 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271365691 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271799956 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271914117 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270637436 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271799956 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270637436 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271639345 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270644941 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270649256 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270653671 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270670201 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270717223 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270720898 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270982590 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271346411 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271352121 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271354908 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271365691 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271367516 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271472522 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271607757 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271639345 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271821733 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271914117 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272068709 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272127855 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272191137 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272244005 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272468271 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270982590 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271365691 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271607757 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270982590 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271365691 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271607757 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272244005 ORDER BY created_at ASC, id ASC LIMIT 11)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "(SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270698330 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270699497 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270703806 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270707364 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270714657 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270721330 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270812079 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271011532 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271034164 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271034177 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271066849 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271098740 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271355000 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271639345 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271914117 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271924504 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272086055 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272127855 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272191137 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272468271 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270637436 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270644941 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270650576 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270652906 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270660650 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270670201 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270698330 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270699497 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270707364 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271365691 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271799956 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271914117 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270637436 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271799956 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270637436 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271639345 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270644941 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270649256 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270653671 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270670201 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270717223 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270720898 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270982590 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271346411 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271352121 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271354908 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271365691 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271367516 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271472522 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271607757 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271639345 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271821733 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271914117 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272068709 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272127855 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272191137 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272244005 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272468271 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270982590 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271365691 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271607757 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270982590 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271365691 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271607757 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272244005 ORDER BY created_at ASC, id ASC LIMIT 11)",
+ "Instructions": {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1270698330 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270698330)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1270699497 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270699497)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1270703806 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270703806)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1270707364 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270707364)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1270714657 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270714657)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1270721330 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270721330)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1270812079 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270812079)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1271011532 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271011532)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1271034164 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271034164)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1271034177 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271034177)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1271066849 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271066849)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1271098740 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271098740)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1271355000 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271355000)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1271639345 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271639345)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1271914117 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271914117)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1271924504 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271924504)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1272086055 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1272086055)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1272127855 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1272127855)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1272191137 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1272191137)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1272468271 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1272468271)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1270637436 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270637436)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1270644941 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270644941)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1270650576 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270650576)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1270652906 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270652906)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1270660650 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270660650)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1270670201 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270670201)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1270698330 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270698330)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1270699497 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270699497)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1270707364 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270707364)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1271365691 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271365691)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1271799956 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271799956)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1271914117 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271914117)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1270637436 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270637436)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1271799956 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271799956)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1270637436 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270637436)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1271639345 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271639345)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1270644941 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270644941)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1270649256 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270649256)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1270653671 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270653671)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1270670201 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270670201)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1270717223 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270717223)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1270720898 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270720898)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1270982590 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270982590)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1271346411 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271346411)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1271352121 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271352121)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1271354908 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271354908)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1271365691 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271365691)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1271367516 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271367516)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1271472522 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271472522)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1271607757 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271607757)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1271639345 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271639345)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1271821733 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271821733)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1271914117 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271914117)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1272068709 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1272068709)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1272127855 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1272127855)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1272191137 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1272191137)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1272244005 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1272244005)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1272468271 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1272468271)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1270982590 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270982590)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1271365691 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271365691)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1271607757 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271607757)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1270982590 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270982590)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1271365691 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271365691)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1271607757 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271607757)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1272244005 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1272244005)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "(SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270698330 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270699497 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270703806 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270707364 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270714657 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270721330 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270812079 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271011532 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271034164 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271034177 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271066849 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271098740 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271355000 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271639345 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271914117 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271924504 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272086055 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272127855 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272191137 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272468271 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270637436 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270644941 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270650576 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270652906 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270660650 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270670201 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270698330 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270699497 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270707364 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271365691 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271799956 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271914117 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270637436 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271799956 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270637436 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271639345 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270644941 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270649256 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270653671 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270670201 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270717223 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270720898 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270982590 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271346411 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271352121 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271354908 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271365691 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271367516 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271472522 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271607757 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271639345 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271821733 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271914117 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272068709 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272127855 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272191137 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272244005 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272468271 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270982590 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271365691 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271607757 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270982590 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271365691 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271607757 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272244005 ORDER BY created_at ASC, id ASC LIMIT 11)",
+ "Instructions": {
+ "OperatorType": "Distinct",
+ "Collations": [
+ "(0:2)",
+ "(1:3)"
+ ],
+ "ResultColumns": 2,
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "(select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1) union (select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1)",
+ "Query": "(select content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1270698330 order by created_at asc, id asc limit 11) union (select content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1270698330 order by created_at asc, id asc limit 11)",
+ "Table": "music",
+ "Values": [
+ "INT64(1270698330)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "(select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1) union (select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1)",
+ "Query": "(select content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1270699497 order by created_at asc, id asc limit 11) union (select content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1270699497 order by created_at asc, id asc limit 11)",
+ "Table": "music",
+ "Values": [
+ "INT64(1270699497)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1270703806 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270703806)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1270707364 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270707364)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1270714657 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270714657)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1270721330 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270721330)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1270812079 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270812079)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1271011532 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271011532)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1271034164 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271034164)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1271034177 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271034177)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1271066849 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271066849)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1271098740 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271098740)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1271355000 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271355000)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1271639345 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271639345)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1271914117 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271914117)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1271924504 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271924504)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1272086055 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1272086055)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1272127855 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1272127855)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1272191137 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1272191137)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1272468271 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1272468271)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1270637436 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270637436)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1270644941 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270644941)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1270650576 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270650576)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1270652906 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270652906)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1270660650 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270660650)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1270670201 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270670201)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1270707364 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270707364)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1271365691 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271365691)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1271799956 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271799956)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1271914117 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271914117)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1270637436 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270637436)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1271799956 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271799956)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1270637436 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270637436)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1271639345 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271639345)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1270644941 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270644941)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1270649256 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270649256)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1270653671 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270653671)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1270670201 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270670201)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1270717223 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270717223)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1270720898 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270720898)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1270982590 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270982590)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1271346411 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271346411)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1271352121 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271352121)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1271354908 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271354908)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1271365691 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271365691)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1271367516 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271367516)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1271472522 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271472522)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1271607757 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271607757)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1271639345 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271639345)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1271821733 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271821733)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1271914117 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271914117)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1272068709 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1272068709)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1272127855 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1272127855)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1272191137 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1272191137)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1272244005 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1272244005)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1272468271 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1272468271)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1270982590 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270982590)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1271365691 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271365691)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1271607757 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271607757)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1270982590 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270982590)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1271365691 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271365691)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1271607757 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271607757)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1272244005 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1272244005)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ }
+]
diff --git a/go/vt/vtgate/planbuilder/testdata/lock_cases.json b/go/vt/vtgate/planbuilder/testdata/lock_cases.json
new file mode 100644
index 00000000000..98ffa9d1bb9
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/lock_cases.json
@@ -0,0 +1,193 @@
+[
+ {
+ "comment": "get_lock from dual",
+ "query": "select get_lock('xyz', 10) from dual",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select get_lock('xyz', 10) from dual",
+ "Instructions": {
+ "OperatorType": "Lock",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "KeyspaceID(00)",
+ "FieldQuery": "select get_lock('xyz', 10) from dual where 1 != 1",
+ "lock_func": [
+ "get_lock('xyz', 10)"
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select get_lock('xyz', 10) from dual",
+ "Instructions": {
+ "OperatorType": "Lock",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "KeyspaceID(00)",
+ "FieldQuery": "select get_lock('xyz', 10) from dual where 1 != 1",
+ "lock_func": [
+ "get_lock('xyz', 10)"
+ ]
+ },
+ "TablesUsed": [
+ "main.dual"
+ ]
+ }
+ },
+ {
+ "comment": "is_free_lock from dual",
+ "query": "select is_free_lock('xyz') from dual",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select is_free_lock('xyz') from dual",
+ "Instructions": {
+ "OperatorType": "Lock",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "KeyspaceID(00)",
+ "FieldQuery": "select is_free_lock('xyz') from dual where 1 != 1",
+ "lock_func": [
+ "is_free_lock('xyz')"
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select is_free_lock('xyz') from dual",
+ "Instructions": {
+ "OperatorType": "Lock",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "KeyspaceID(00)",
+ "FieldQuery": "select is_free_lock('xyz') from dual where 1 != 1",
+ "lock_func": [
+ "is_free_lock('xyz')"
+ ]
+ },
+ "TablesUsed": [
+ "main.dual"
+ ]
+ }
+ },
+ {
+ "comment": "get_lock from dual prepare query",
+ "query": "select get_lock(?, ?)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select get_lock(?, ?)",
+ "Instructions": {
+ "OperatorType": "Lock",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "KeyspaceID(00)",
+ "FieldQuery": "select get_lock(:v1, :v2) from dual where 1 != 1",
+ "lock_func": [
+ "get_lock(:v1, :v2)"
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select get_lock(?, ?)",
+ "Instructions": {
+ "OperatorType": "Lock",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "KeyspaceID(00)",
+ "FieldQuery": "select get_lock(:v1, :v2) from dual where 1 != 1",
+ "lock_func": [
+ "get_lock(:v1, :v2)"
+ ]
+ },
+ "TablesUsed": [
+ "main.dual"
+ ]
+ }
+ },
+ {
+ "comment": "lock tables read",
+ "query": "lock tables t as x read local",
+ "plan": {
+ "QueryType": "LOCK_TABLES",
+ "Original": "lock tables t as x read local",
+ "Instructions": {
+ "OperatorType": "Rows"
+ }
+ }
+ },
+ {
+ "comment": "lock tables write",
+ "query": "lock tables t low_priority write",
+ "plan": {
+ "QueryType": "LOCK_TABLES",
+ "Original": "lock tables t low_priority write",
+ "Instructions": {
+ "OperatorType": "Rows"
+ }
+ }
+ },
+ {
+ "comment": "unlock tables",
+ "query": "unlock tables",
+ "plan": {
+ "QueryType": "UNLOCK_TABLES",
+ "Original": "unlock tables",
+ "Instructions": {
+ "OperatorType": "Rows"
+ }
+ }
+ },
+ {
+ "comment": "multiple lock functions",
+ "query": "select get_lock('xyz', 10), is_free_lock('abc') from dual",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select get_lock('xyz', 10), is_free_lock('abc') from dual",
+ "Instructions": {
+ "OperatorType": "Lock",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "KeyspaceID(00)",
+ "FieldQuery": "select get_lock('xyz', 10), is_free_lock('abc') from dual where 1 != 1",
+ "lock_func": [
+ "get_lock('xyz', 10)",
+ "is_free_lock('abc')"
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select get_lock('xyz', 10), is_free_lock('abc') from dual",
+ "Instructions": {
+ "OperatorType": "Lock",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "KeyspaceID(00)",
+ "FieldQuery": "select get_lock('xyz', 10), is_free_lock('abc') from dual where 1 != 1",
+ "lock_func": [
+ "get_lock('xyz', 10)",
+ "is_free_lock('abc')"
+ ]
+ },
+ "TablesUsed": [
+ "main.dual"
+ ]
+ }
+ }
+]
diff --git a/go/vt/vtgate/planbuilder/testdata/lock_cases.txt b/go/vt/vtgate/planbuilder/testdata/lock_cases.txt
deleted file mode 100644
index 765c4c27568..00000000000
--- a/go/vt/vtgate/planbuilder/testdata/lock_cases.txt
+++ /dev/null
@@ -1,186 +0,0 @@
-# get_lock from dual
-"select get_lock('xyz', 10) from dual"
-{
- "QueryType": "SELECT",
- "Original": "select get_lock('xyz', 10) from dual",
- "Instructions": {
- "OperatorType": "Lock",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "KeyspaceID(00)",
- "FieldQuery": "select get_lock('xyz', 10) from dual where 1 != 1",
- "lock_func": [
- "get_lock('xyz', 10)"
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select get_lock('xyz', 10) from dual",
- "Instructions": {
- "OperatorType": "Lock",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "KeyspaceID(00)",
- "FieldQuery": "select get_lock('xyz', 10) from dual where 1 != 1",
- "lock_func": [
- "get_lock('xyz', 10)"
- ]
- },
- "TablesUsed": [
- "main.dual"
- ]
-}
-
-# is_free_lock from dual
-"select is_free_lock('xyz') from dual"
-{
- "QueryType": "SELECT",
- "Original": "select is_free_lock('xyz') from dual",
- "Instructions": {
- "OperatorType": "Lock",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "KeyspaceID(00)",
- "FieldQuery": "select is_free_lock('xyz') from dual where 1 != 1",
- "lock_func": [
- "is_free_lock('xyz')"
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select is_free_lock('xyz') from dual",
- "Instructions": {
- "OperatorType": "Lock",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "KeyspaceID(00)",
- "FieldQuery": "select is_free_lock('xyz') from dual where 1 != 1",
- "lock_func": [
- "is_free_lock('xyz')"
- ]
- },
- "TablesUsed": [
- "main.dual"
- ]
-}
-
-# get_lock from dual prepare query
-"select get_lock(?, ?)"
-{
- "QueryType": "SELECT",
- "Original": "select get_lock(?, ?)",
- "Instructions": {
- "OperatorType": "Lock",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "KeyspaceID(00)",
- "FieldQuery": "select get_lock(:v1, :v2) from dual where 1 != 1",
- "lock_func": [
- "get_lock(:v1, :v2)"
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select get_lock(?, ?)",
- "Instructions": {
- "OperatorType": "Lock",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "KeyspaceID(00)",
- "FieldQuery": "select get_lock(:v1, :v2) from dual where 1 != 1",
- "lock_func": [
- "get_lock(:v1, :v2)"
- ]
- },
- "TablesUsed": [
- "main.dual"
- ]
-}
-
-# lock tables read
-"lock tables t as x read local"
-{
- "QueryType": "LOCK_TABLES",
- "Original": "lock tables t as x read local",
- "Instructions": {
- "OperatorType": "Rows"
- }
-}
-Gen4 plan same as above
-
-# lock tables write
-"lock tables t low_priority write"
-{
- "QueryType": "LOCK_TABLES",
- "Original": "lock tables t low_priority write",
- "Instructions": {
- "OperatorType": "Rows"
- }
-}
-Gen4 plan same as above
-
-# unlock tables
-"unlock tables"
-{
- "QueryType": "UNLOCK_TABLES",
- "Original": "unlock tables",
- "Instructions": {
- "OperatorType": "Rows"
- }
-}
-Gen4 plan same as above
-
-# multiple lock functions
-"select get_lock('xyz', 10), is_free_lock('abc') from dual"
-{
- "QueryType": "SELECT",
- "Original": "select get_lock('xyz', 10), is_free_lock('abc') from dual",
- "Instructions": {
- "OperatorType": "Lock",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "KeyspaceID(00)",
- "FieldQuery": "select get_lock('xyz', 10), is_free_lock('abc') from dual where 1 != 1",
- "lock_func": [
- "get_lock('xyz', 10)",
- "is_free_lock('abc')"
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select get_lock('xyz', 10), is_free_lock('abc') from dual",
- "Instructions": {
- "OperatorType": "Lock",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "KeyspaceID(00)",
- "FieldQuery": "select get_lock('xyz', 10), is_free_lock('abc') from dual where 1 != 1",
- "lock_func": [
- "get_lock('xyz', 10)",
- "is_free_lock('abc')"
- ]
- },
- "TablesUsed": [
- "main.dual"
- ]
-}
diff --git a/go/vt/vtgate/planbuilder/testdata/memory_sort_cases.json b/go/vt/vtgate/planbuilder/testdata/memory_sort_cases.json
new file mode 100644
index 00000000000..c1b4fbe83b7
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/memory_sort_cases.json
@@ -0,0 +1,1162 @@
+[
+ {
+ "comment": "Test cases in this file follow the code in memory_sort.go.\n# scatter aggregate order by references ungrouped column",
+ "query": "select a, b, count(*) from user group by a order by b",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a, b, count(*) from user group by a order by b",
+ "Instructions": {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "(1|3) ASC",
+ "ResultColumns": 3,
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count(2) AS count",
+ "GroupBy": "0",
+ "ResultColumns": 4,
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a, b, count(*), weight_string(b), weight_string(a) from `user` where 1 != 1 group by a, weight_string(a)",
+ "OrderBy": "(0|4) ASC",
+ "Query": "select a, b, count(*), weight_string(b), weight_string(a) from `user` group by a, weight_string(a) order by a asc",
+ "ResultColumns": 4,
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a, b, count(*) from user group by a order by b",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "random(1) AS b, sum_count_star(2) AS count(*)",
+ "GroupBy": "(0|3)",
+ "ResultColumns": 3,
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a, b, count(*), weight_string(a), weight_string(b) from `user` where 1 != 1 group by a, weight_string(a)",
+ "OrderBy": "(1|4) ASC, (0|3) ASC",
+ "Query": "select a, b, count(*), weight_string(a), weight_string(b) from `user` group by a, weight_string(a) order by b asc, a asc",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "scatter aggregate order by references aggregate expression",
+ "query": "select a, b, count(*) k from user group by a order by k",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a, b, count(*) k from user group by a order by k",
+ "Instructions": {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "2 ASC",
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count(2) AS count",
+ "GroupBy": "0",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a, b, count(*) as k, weight_string(a) from `user` where 1 != 1 group by a, weight_string(a)",
+ "OrderBy": "(0|3) ASC",
+ "Query": "select a, b, count(*) as k, weight_string(a) from `user` group by a, weight_string(a) order by a asc",
+ "ResultColumns": 3,
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a, b, count(*) k from user group by a order by k",
+ "Instructions": {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "2 ASC",
+ "ResultColumns": 3,
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "random(1) AS b, sum_count_star(2) AS k",
+ "GroupBy": "(0|3)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a, b, count(*) as k, weight_string(a) from `user` where 1 != 1 group by a, weight_string(a)",
+ "OrderBy": "(0|3) ASC",
+ "Query": "select a, b, count(*) as k, weight_string(a) from `user` group by a, weight_string(a) order by a asc",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "select a, b, count(*) k from user group by a order by b, a, k",
+ "query": "select a, b, count(*) k from user group by a order by b, a, k",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a, b, count(*) k from user group by a order by b, a, k",
+ "Instructions": {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "(1|3) ASC, (0|4) ASC, 2 ASC",
+ "ResultColumns": 3,
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count(2) AS count",
+ "GroupBy": "0",
+ "ResultColumns": 5,
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a, b, count(*) as k, weight_string(b), weight_string(a) from `user` where 1 != 1 group by a, weight_string(a)",
+ "OrderBy": "(0|4) ASC",
+ "Query": "select a, b, count(*) as k, weight_string(b), weight_string(a) from `user` group by a, weight_string(a) order by a asc",
+ "ResultColumns": 5,
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a, b, count(*) k from user group by a order by b, a, k",
+ "Instructions": {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "1 ASC, (0|3) ASC, 2 ASC",
+ "ResultColumns": 3,
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "random(1) AS b, sum_count_star(2) AS k",
+ "GroupBy": "(0|3)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a, b, count(*) as k, weight_string(a) from `user` where 1 != 1 group by a, weight_string(a)",
+ "OrderBy": "(0|3) ASC",
+ "Query": "select a, b, count(*) as k, weight_string(a) from `user` group by a, weight_string(a) order by a asc",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "scatter aggregate with memory sort and limit",
+ "query": "select a, b, count(*) k from user group by a order by k desc limit 10",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a, b, count(*) k from user group by a order by k desc limit 10",
+ "Instructions": {
+ "OperatorType": "Limit",
+ "Count": "INT64(10)",
+ "Inputs": [
+ {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "2 DESC",
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count(2) AS count",
+ "GroupBy": "0",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a, b, count(*) as k, weight_string(a) from `user` where 1 != 1 group by a, weight_string(a)",
+ "OrderBy": "(0|3) ASC",
+ "Query": "select a, b, count(*) as k, weight_string(a) from `user` group by a, weight_string(a) order by a asc",
+ "ResultColumns": 3,
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a, b, count(*) k from user group by a order by k desc limit 10",
+ "Instructions": {
+ "OperatorType": "Limit",
+ "Count": "INT64(10)",
+ "Inputs": [
+ {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "2 DESC",
+ "ResultColumns": 3,
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "random(1) AS b, sum_count_star(2) AS k",
+ "GroupBy": "(0|3)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a, b, count(*) as k, weight_string(a) from `user` where 1 != 1 group by a, weight_string(a)",
+ "OrderBy": "(0|3) ASC",
+ "Query": "select a, b, count(*) as k, weight_string(a) from `user` group by a, weight_string(a) order by a asc",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "scatter aggregate with memory sort and order by number",
+ "query": "select a, b, count(*) k from user group by a order by 1,3",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a, b, count(*) k from user group by a order by 1,3",
+ "Instructions": {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "(0|3) ASC, 2 ASC",
+ "ResultColumns": 3,
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count(2) AS count",
+ "GroupBy": "0",
+ "ResultColumns": 4,
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a, b, count(*) as k, weight_string(a) from `user` where 1 != 1 group by a, weight_string(a)",
+ "OrderBy": "(0|3) ASC",
+ "Query": "select a, b, count(*) as k, weight_string(a) from `user` group by a, weight_string(a) order by 1 asc",
+ "ResultColumns": 4,
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a, b, count(*) k from user group by a order by 1,3",
+ "Instructions": {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "(0|3) ASC, 2 ASC",
+ "ResultColumns": 3,
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "random(1) AS b, sum_count_star(2) AS k",
+ "GroupBy": "(0|3)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a, b, count(*) as k, weight_string(a) from `user` where 1 != 1 group by a, weight_string(a)",
+ "OrderBy": "(0|3) ASC",
+ "Query": "select a, b, count(*) as k, weight_string(a) from `user` group by a, weight_string(a) order by a asc",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "scatter aggregate with memory sort and order by number, reuse weight_string\n# we have to use a meaningless construct to test this. TODO: improve to do ordering once for textcol1",
+ "query": "select textcol1 as t, count(*) k from user group by textcol1 order by textcol1, k, textcol1",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select textcol1 as t, count(*) k from user group by textcol1 order by textcol1, k, textcol1",
+ "Instructions": {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "(0|2) ASC, 1 ASC, (0|2) ASC",
+ "ResultColumns": 2,
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count(1) AS count",
+ "GroupBy": "2",
+ "ResultColumns": 3,
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select textcol1 as t, count(*) as k, weight_string(textcol1) from `user` where 1 != 1 group by textcol1, weight_string(textcol1)",
+ "OrderBy": "(0|2) ASC, (0|2) ASC",
+ "Query": "select textcol1 as t, count(*) as k, weight_string(textcol1) from `user` group by textcol1, weight_string(textcol1) order by textcol1 asc, textcol1 asc",
+ "ResultColumns": 3,
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select textcol1 as t, count(*) k from user group by textcol1 order by textcol1, k, textcol1",
+ "Instructions": {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "0 ASC COLLATE latin1_swedish_ci, 1 ASC, 0 ASC COLLATE latin1_swedish_ci",
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count_star(1) AS k",
+ "GroupBy": "0 COLLATE latin1_swedish_ci",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select textcol1 as t, count(*) as k from `user` where 1 != 1 group by textcol1",
+ "OrderBy": "0 ASC COLLATE latin1_swedish_ci",
+ "Query": "select textcol1 as t, count(*) as k from `user` group by textcol1 order by textcol1 asc",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "order by on a cross-shard derived table",
+ "query": "select id from (select user.id, user.col from user join user_extra) as t order by id",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from (select user.id, user.col from user join user_extra) as t order by id",
+ "Instructions": {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "(0|2) ASC",
+ "ResultColumns": 1,
+ "Inputs": [
+ {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1,L:2",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id, `user`.col, weight_string(`user`.id) from `user` where 1 != 1",
+ "Query": "select `user`.id, `user`.col, weight_string(`user`.id) from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from (select user.id, user.col from user join user_extra) as t order by id",
+ "Instructions": {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "(0|1) ASC",
+ "ResultColumns": 1,
+ "Inputs": [
+ {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0,
+ 2
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1,L:2",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id, `user`.col, weight_string(`user`.id) from `user` where 1 != 1",
+ "Query": "select `user`.id, `user`.col, weight_string(`user`.id) from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "order by on a cross-shard query. Note: this happens only when an order by column is from the second table",
+ "query": "select user.col1 as a, user.col2 b, music.col3 c from user, music where user.id = music.id and user.id = 1 order by c",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col1 as a, user.col2 b, music.col3 c from user, music where user.id = music.id and user.id = 1 order by c",
+ "Instructions": {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "(2|3) ASC",
+ "ResultColumns": 3,
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1,R:0,R:1",
+ "JoinVars": {
+ "user_id": 2
+ },
+ "TableName": "`user`_music",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col1 as a, `user`.col2 as b, `user`.id from `user` where 1 != 1",
+ "Query": "select `user`.col1 as a, `user`.col2 as b, `user`.id from `user` where `user`.id = 1",
+ "Table": "`user`",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.col3 as c, weight_string(music.col3) from music where 1 != 1",
+ "Query": "select music.col3 as c, weight_string(music.col3) from music where music.id = :user_id",
+ "Table": "music",
+ "Values": [
+ ":user_id"
+ ],
+ "Vindex": "music_user_map"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col1 as a, user.col2 b, music.col3 c from user, music where user.id = music.id and user.id = 1 order by c",
+ "Instructions": {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "(2|3) ASC",
+ "ResultColumns": 3,
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:1,L:2,R:0,R:1",
+ "JoinVars": {
+ "user_id": 0
+ },
+ "TableName": "`user`_music",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id, `user`.col1 as a, `user`.col2 as b from `user` where 1 != 1",
+ "Query": "select `user`.id, `user`.col1 as a, `user`.col2 as b from `user` where `user`.id = 1",
+ "Table": "`user`",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.col3 as c, weight_string(music.col3) from music where 1 != 1",
+ "Query": "select music.col3 as c, weight_string(music.col3) from music where music.id = :user_id",
+ "Table": "music",
+ "Values": [
+ ":user_id"
+ ],
+ "Vindex": "music_user_map"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.music",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Order by for join, with mixed cross-shard ordering",
+ "query": "select user.col1 as a, user.col2, music.col3 from user join music on user.id = music.id where user.id = 1 order by 1 asc, 3 desc, 2 asc",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col1 as a, user.col2, music.col3 from user join music on user.id = music.id where user.id = 1 order by 1 asc, 3 desc, 2 asc",
+ "Instructions": {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "(0|3) ASC, (2|4) DESC, (1|5) ASC",
+ "ResultColumns": 3,
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1,R:0,L:2,R:1,L:3",
+ "JoinVars": {
+ "user_id": 4
+ },
+ "TableName": "`user`_music",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col1 as a, `user`.col2, weight_string(`user`.col1), weight_string(`user`.col2), `user`.id from `user` where 1 != 1",
+ "Query": "select `user`.col1 as a, `user`.col2, weight_string(`user`.col1), weight_string(`user`.col2), `user`.id from `user` where `user`.id = 1",
+ "Table": "`user`",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.col3, weight_string(music.col3) from music where 1 != 1",
+ "Query": "select music.col3, weight_string(music.col3) from music where music.id = :user_id",
+ "Table": "music",
+ "Values": [
+ ":user_id"
+ ],
+ "Vindex": "music_user_map"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col1 as a, user.col2, music.col3 from user join music on user.id = music.id where user.id = 1 order by 1 asc, 3 desc, 2 asc",
+ "Instructions": {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "(0|3) ASC, (2|4) DESC, (1|5) ASC",
+ "ResultColumns": 3,
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:1,L:2,R:0,L:3,R:1,L:4",
+ "JoinVars": {
+ "user_id": 0
+ },
+ "TableName": "`user`_music",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id, `user`.col1 as a, `user`.col2, weight_string(`user`.col1), weight_string(`user`.col2) from `user` where 1 != 1",
+ "Query": "select `user`.id, `user`.col1 as a, `user`.col2, weight_string(`user`.col1), weight_string(`user`.col2) from `user` where `user`.id = 1",
+ "Table": "`user`",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.col3, weight_string(music.col3) from music where 1 != 1",
+ "Query": "select music.col3, weight_string(music.col3) from music where music.id = :user_id",
+ "Table": "music",
+ "Values": [
+ ":user_id"
+ ],
+ "Vindex": "music_user_map"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.music",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Order by for join, on text column in LHS.",
+ "query": "select u.a, u.textcol1, un.col2 from user u join unsharded un order by u.textcol1, un.col2",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u.a, u.textcol1, un.col2 from user u join unsharded un order by u.textcol1, un.col2",
+ "Instructions": {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "(1|3) ASC, (2|4) ASC",
+ "ResultColumns": 3,
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1,R:0,L:2,R:1",
+ "TableName": "`user`_unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.a, u.textcol1, weight_string(u.textcol1) from `user` as u where 1 != 1",
+ "Query": "select u.a, u.textcol1, weight_string(u.textcol1) from `user` as u",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select un.col2, weight_string(un.col2) from unsharded as un where 1 != 1",
+ "Query": "select un.col2, weight_string(un.col2) from unsharded as un",
+ "Table": "unsharded"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u.a, u.textcol1, un.col2 from user u join unsharded un order by u.textcol1, un.col2",
+ "Instructions": {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "(1|3) ASC COLLATE latin1_swedish_ci, (2|4) ASC",
+ "ResultColumns": 3,
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1,R:0,L:2,R:1",
+ "TableName": "`user`_unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.a, u.textcol1, weight_string(u.textcol1) from `user` as u where 1 != 1",
+ "Query": "select u.a, u.textcol1, weight_string(u.textcol1) from `user` as u",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select un.col2, weight_string(un.col2) from unsharded as un where 1 != 1",
+ "Query": "select un.col2, weight_string(un.col2) from unsharded as un",
+ "Table": "unsharded"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Order by for join, on text column in RHS.",
+ "query": "select u.a, u.textcol1, un.col2 from unsharded un join user u order by u.textcol1, un.col2",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u.a, u.textcol1, un.col2 from unsharded un join user u order by u.textcol1, un.col2",
+ "Instructions": {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "(1|3) ASC, (2|4) ASC",
+ "ResultColumns": 3,
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0,R:1,L:0,R:2,L:1",
+ "TableName": "unsharded_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select un.col2, weight_string(un.col2) from unsharded as un where 1 != 1",
+ "Query": "select un.col2, weight_string(un.col2) from unsharded as un",
+ "Table": "unsharded"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.a, u.textcol1, weight_string(u.textcol1) from `user` as u where 1 != 1",
+ "Query": "select u.a, u.textcol1, weight_string(u.textcol1) from `user` as u",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u.a, u.textcol1, un.col2 from unsharded un join user u order by u.textcol1, un.col2",
+ "Instructions": {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "(1|3) ASC COLLATE latin1_swedish_ci, (2|4) ASC",
+ "ResultColumns": 3,
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0,R:1,L:0,R:2,L:1",
+ "TableName": "unsharded_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select un.col2, weight_string(un.col2) from unsharded as un where 1 != 1",
+ "Query": "select un.col2, weight_string(un.col2) from unsharded as un",
+ "Table": "unsharded"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.a, u.textcol1, weight_string(u.textcol1) from `user` as u where 1 != 1",
+ "Query": "select u.a, u.textcol1, weight_string(u.textcol1) from `user` as u",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "order by for vindex func",
+ "query": "select id, keyspace_id, range_start, range_end from user_index where id = :id order by range_start",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id, keyspace_id, range_start, range_end from user_index where id = :id order by range_start",
+ "Instructions": {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "2 ASC",
+ "Inputs": [
+ {
+ "OperatorType": "VindexFunc",
+ "Variant": "VindexMap",
+ "Columns": [
+ 0,
+ 1,
+ 2,
+ 3
+ ],
+ "Fields": {
+ "id": "VARBINARY",
+ "keyspace_id": "VARBINARY",
+ "range_end": "VARBINARY",
+ "range_start": "VARBINARY"
+ },
+ "Value": ":id",
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id, keyspace_id, range_start, range_end from user_index where id = :id order by range_start",
+ "Instructions": {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "2 ASC",
+ "Inputs": [
+ {
+ "OperatorType": "VindexFunc",
+ "Variant": "VindexMap",
+ "Columns": [
+ 0,
+ 1,
+ 2,
+ 3
+ ],
+ "Fields": {
+ "id": "VARBINARY",
+ "keyspace_id": "VARBINARY",
+ "range_end": "VARBINARY",
+ "range_start": "VARBINARY"
+ },
+ "Value": ":id",
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user_index"
+ ]
+ }
+ },
+ {
+ "comment": "unary expression",
+ "query": "select a from user order by binary a desc",
+ "v3-plan": "VT12001: unsupported: in scatter query: complex ORDER BY expression: convert(a, binary)",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a from user order by binary a desc",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a, convert(a, binary), weight_string(convert(a, binary)) from `user` where 1 != 1",
+ "OrderBy": "(1|2) DESC",
+ "Query": "select a, convert(a, binary), weight_string(convert(a, binary)) from `user` order by convert(a, binary) desc",
+ "ResultColumns": 1,
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "unary expression in join query",
+ "query": "select u.a from user u join music m on u.a = m.a order by binary a desc",
+ "v3-plan": "VT12001: unsupported: in scatter query: complex ORDER BY expression: convert(a, binary)",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u.a from user u join music m on u.a = m.a order by binary a desc",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "JoinVars": {
+ "u_a": 0
+ },
+ "TableName": "`user`_music",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.a, convert(a, binary), weight_string(convert(a, binary)) from `user` as u where 1 != 1",
+ "OrderBy": "(1|2) DESC",
+ "Query": "select u.a, convert(a, binary), weight_string(convert(a, binary)) from `user` as u order by convert(a, binary) desc",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from music as m where 1 != 1",
+ "Query": "select 1 from music as m where m.a = :u_a",
+ "Table": "music"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.music",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "intcol order by",
+ "query": "select id, intcol from user order by intcol",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id, intcol from user order by intcol",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, intcol from `user` where 1 != 1",
+ "OrderBy": "1 ASC",
+ "Query": "select id, intcol from `user` order by intcol asc",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id, intcol from user order by intcol",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, intcol from `user` where 1 != 1",
+ "OrderBy": "1 ASC",
+ "Query": "select id, intcol from `user` order by intcol asc",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "scatter order by with order by column not present",
+ "query": "select col from user order by id",
+ "v3-plan": "VT12001: unsupported: in scatter query: ORDER BY must reference a column in the SELECT list: id asc",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from user order by id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col, id, weight_string(id) from `user` where 1 != 1",
+ "OrderBy": "(1|2) ASC",
+ "Query": "select col, id, weight_string(id) from `user` order by id asc",
+ "ResultColumns": 1,
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ }
+]
diff --git a/go/vt/vtgate/planbuilder/testdata/memory_sort_cases.txt b/go/vt/vtgate/planbuilder/testdata/memory_sort_cases.txt
deleted file mode 100644
index ec49c4e5c33..00000000000
--- a/go/vt/vtgate/planbuilder/testdata/memory_sort_cases.txt
+++ /dev/null
@@ -1,1144 +0,0 @@
-# Test cases in this file follow the code in memory_sort.go.
-# scatter aggregate order by references ungrouped column
-"select a, b, count(*) from user group by a order by b"
-{
- "QueryType": "SELECT",
- "Original": "select a, b, count(*) from user group by a order by b",
- "Instructions": {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "(1|3) ASC",
- "ResultColumns": 3,
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count(2) AS count",
- "GroupBy": "0",
- "ResultColumns": 4,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a, b, count(*), weight_string(b), weight_string(a) from `user` where 1 != 1 group by a, weight_string(a)",
- "OrderBy": "(0|4) ASC",
- "Query": "select a, b, count(*), weight_string(b), weight_string(a) from `user` group by a, weight_string(a) order by a asc",
- "ResultColumns": 4,
- "Table": "`user`"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select a, b, count(*) from user group by a order by b",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "random(1) AS b, sum_count_star(2) AS count(*)",
- "GroupBy": "(0|3)",
- "ResultColumns": 3,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a, b, count(*), weight_string(a), weight_string(b) from `user` where 1 != 1 group by a, weight_string(a)",
- "OrderBy": "(1|4) ASC, (0|3) ASC",
- "Query": "select a, b, count(*), weight_string(a), weight_string(b) from `user` group by a, weight_string(a) order by b asc, a asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# scatter aggregate order by references aggregate expression
-"select a, b, count(*) k from user group by a order by k"
-{
- "QueryType": "SELECT",
- "Original": "select a, b, count(*) k from user group by a order by k",
- "Instructions": {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "2 ASC",
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count(2) AS count",
- "GroupBy": "0",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a, b, count(*) as k, weight_string(a) from `user` where 1 != 1 group by a, weight_string(a)",
- "OrderBy": "(0|3) ASC",
- "Query": "select a, b, count(*) as k, weight_string(a) from `user` group by a, weight_string(a) order by a asc",
- "ResultColumns": 3,
- "Table": "`user`"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select a, b, count(*) k from user group by a order by k",
- "Instructions": {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "2 ASC",
- "ResultColumns": 3,
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "random(1) AS b, sum_count_star(2) AS k",
- "GroupBy": "(0|3)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a, b, count(*) as k, weight_string(a) from `user` where 1 != 1 group by a, weight_string(a)",
- "OrderBy": "(0|3) ASC",
- "Query": "select a, b, count(*) as k, weight_string(a) from `user` group by a, weight_string(a) order by a asc",
- "Table": "`user`"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-"select a, b, count(*) k from user group by a order by b, a, k"
-{
- "QueryType": "SELECT",
- "Original": "select a, b, count(*) k from user group by a order by b, a, k",
- "Instructions": {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "(1|3) ASC, (0|4) ASC, 2 ASC",
- "ResultColumns": 3,
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count(2) AS count",
- "GroupBy": "0",
- "ResultColumns": 5,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a, b, count(*) as k, weight_string(b), weight_string(a) from `user` where 1 != 1 group by a, weight_string(a)",
- "OrderBy": "(0|4) ASC",
- "Query": "select a, b, count(*) as k, weight_string(b), weight_string(a) from `user` group by a, weight_string(a) order by a asc",
- "ResultColumns": 5,
- "Table": "`user`"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select a, b, count(*) k from user group by a order by b, a, k",
- "Instructions": {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "1 ASC, (0|3) ASC, 2 ASC",
- "ResultColumns": 3,
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "random(1) AS b, sum_count_star(2) AS k",
- "GroupBy": "(0|3)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a, b, count(*) as k, weight_string(a) from `user` where 1 != 1 group by a, weight_string(a)",
- "OrderBy": "(0|3) ASC",
- "Query": "select a, b, count(*) as k, weight_string(a) from `user` group by a, weight_string(a) order by a asc",
- "Table": "`user`"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# scatter aggregate with memory sort and limit
-"select a, b, count(*) k from user group by a order by k desc limit 10"
-{
- "QueryType": "SELECT",
- "Original": "select a, b, count(*) k from user group by a order by k desc limit 10",
- "Instructions": {
- "OperatorType": "Limit",
- "Count": "INT64(10)",
- "Inputs": [
- {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "2 DESC",
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count(2) AS count",
- "GroupBy": "0",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a, b, count(*) as k, weight_string(a) from `user` where 1 != 1 group by a, weight_string(a)",
- "OrderBy": "(0|3) ASC",
- "Query": "select a, b, count(*) as k, weight_string(a) from `user` group by a, weight_string(a) order by a asc",
- "ResultColumns": 3,
- "Table": "`user`"
- }
- ]
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select a, b, count(*) k from user group by a order by k desc limit 10",
- "Instructions": {
- "OperatorType": "Limit",
- "Count": "INT64(10)",
- "Inputs": [
- {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "2 DESC",
- "ResultColumns": 3,
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "random(1) AS b, sum_count_star(2) AS k",
- "GroupBy": "(0|3)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a, b, count(*) as k, weight_string(a) from `user` where 1 != 1 group by a, weight_string(a)",
- "OrderBy": "(0|3) ASC",
- "Query": "select a, b, count(*) as k, weight_string(a) from `user` group by a, weight_string(a) order by a asc",
- "Table": "`user`"
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# scatter aggregate with memory sort and order by number
-"select a, b, count(*) k from user group by a order by 1,3"
-{
- "QueryType": "SELECT",
- "Original": "select a, b, count(*) k from user group by a order by 1,3",
- "Instructions": {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "(0|3) ASC, 2 ASC",
- "ResultColumns": 3,
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count(2) AS count",
- "GroupBy": "0",
- "ResultColumns": 4,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a, b, count(*) as k, weight_string(a) from `user` where 1 != 1 group by a, weight_string(a)",
- "OrderBy": "(0|3) ASC",
- "Query": "select a, b, count(*) as k, weight_string(a) from `user` group by a, weight_string(a) order by 1 asc",
- "ResultColumns": 4,
- "Table": "`user`"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select a, b, count(*) k from user group by a order by 1,3",
- "Instructions": {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "(0|3) ASC, 2 ASC",
- "ResultColumns": 3,
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "random(1) AS b, sum_count_star(2) AS k",
- "GroupBy": "(0|3)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a, b, count(*) as k, weight_string(a) from `user` where 1 != 1 group by a, weight_string(a)",
- "OrderBy": "(0|3) ASC",
- "Query": "select a, b, count(*) as k, weight_string(a) from `user` group by a, weight_string(a) order by a asc",
- "Table": "`user`"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# scatter aggregate with memory sort and order by number, reuse weight_string
-# we have to use a meaningless construct to test this. TODO: improve to do ordering once for textcol1
-"select textcol1 as t, count(*) k from user group by textcol1 order by textcol1, k, textcol1"
-{
- "QueryType": "SELECT",
- "Original": "select textcol1 as t, count(*) k from user group by textcol1 order by textcol1, k, textcol1",
- "Instructions": {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "(0|2) ASC, 1 ASC, (0|2) ASC",
- "ResultColumns": 2,
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count(1) AS count",
- "GroupBy": "2",
- "ResultColumns": 3,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select textcol1 as t, count(*) as k, weight_string(textcol1) from `user` where 1 != 1 group by textcol1, weight_string(textcol1)",
- "OrderBy": "(0|2) ASC, (0|2) ASC",
- "Query": "select textcol1 as t, count(*) as k, weight_string(textcol1) from `user` group by textcol1, weight_string(textcol1) order by textcol1 asc, textcol1 asc",
- "ResultColumns": 3,
- "Table": "`user`"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select textcol1 as t, count(*) k from user group by textcol1 order by textcol1, k, textcol1",
- "Instructions": {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "0 ASC COLLATE latin1_swedish_ci, 1 ASC, 0 ASC COLLATE latin1_swedish_ci",
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count_star(1) AS k",
- "GroupBy": "0 COLLATE latin1_swedish_ci",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select textcol1 as t, count(*) as k from `user` where 1 != 1 group by textcol1",
- "OrderBy": "0 ASC COLLATE latin1_swedish_ci",
- "Query": "select textcol1 as t, count(*) as k from `user` group by textcol1 order by textcol1 asc",
- "Table": "`user`"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# order by on a cross-shard derived table
-"select id from (select user.id, user.col from user join user_extra) as t order by id"
-{
- "QueryType": "SELECT",
- "Original": "select id from (select user.id, user.col from user join user_extra) as t order by id",
- "Instructions": {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "(0|2) ASC",
- "ResultColumns": 1,
- "Inputs": [
- {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1,L:2",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id, `user`.col, weight_string(`user`.id) from `user` where 1 != 1",
- "Query": "select `user`.id, `user`.col, weight_string(`user`.id) from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra",
- "Table": "user_extra"
- }
- ]
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from (select user.id, user.col from user join user_extra) as t order by id",
- "Instructions": {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "(0|1) ASC",
- "ResultColumns": 1,
- "Inputs": [
- {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0,
- 2
- ],
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1,L:2",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id, `user`.col, weight_string(`user`.id) from `user` where 1 != 1",
- "Query": "select `user`.id, `user`.col, weight_string(`user`.id) from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra",
- "Table": "user_extra"
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# order by on a cross-shard query. Note: this happens only when an order by column is from the second table
-"select user.col1 as a, user.col2 b, music.col3 c from user, music where user.id = music.id and user.id = 1 order by c"
-{
- "QueryType": "SELECT",
- "Original": "select user.col1 as a, user.col2 b, music.col3 c from user, music where user.id = music.id and user.id = 1 order by c",
- "Instructions": {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "(2|3) ASC",
- "ResultColumns": 3,
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1,R:0,R:1",
- "JoinVars": {
- "user_id": 2
- },
- "TableName": "`user`_music",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col1 as a, `user`.col2 as b, `user`.id from `user` where 1 != 1",
- "Query": "select `user`.col1 as a, `user`.col2 as b, `user`.id from `user` where `user`.id = 1",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.col3 as c, weight_string(music.col3) from music where 1 != 1",
- "Query": "select music.col3 as c, weight_string(music.col3) from music where music.id = :user_id",
- "Table": "music",
- "Values": [
- ":user_id"
- ],
- "Vindex": "music_user_map"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.col1 as a, user.col2 b, music.col3 c from user, music where user.id = music.id and user.id = 1 order by c",
- "Instructions": {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "(2|3) ASC",
- "ResultColumns": 3,
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:1,L:2,R:0,R:1",
- "JoinVars": {
- "user_id": 0
- },
- "TableName": "`user`_music",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id, `user`.col1 as a, `user`.col2 as b from `user` where 1 != 1",
- "Query": "select `user`.id, `user`.col1 as a, `user`.col2 as b from `user` where `user`.id = 1",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.col3 as c, weight_string(music.col3) from music where 1 != 1",
- "Query": "select music.col3 as c, weight_string(music.col3) from music where music.id = :user_id",
- "Table": "music",
- "Values": [
- ":user_id"
- ],
- "Vindex": "music_user_map"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.music",
- "user.user"
- ]
-}
-
-# Order by for join, with mixed cross-shard ordering
-"select user.col1 as a, user.col2, music.col3 from user join music on user.id = music.id where user.id = 1 order by 1 asc, 3 desc, 2 asc"
-{
- "QueryType": "SELECT",
- "Original": "select user.col1 as a, user.col2, music.col3 from user join music on user.id = music.id where user.id = 1 order by 1 asc, 3 desc, 2 asc",
- "Instructions": {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "(0|3) ASC, (2|4) DESC, (1|5) ASC",
- "ResultColumns": 3,
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1,R:0,L:2,R:1,L:3",
- "JoinVars": {
- "user_id": 4
- },
- "TableName": "`user`_music",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col1 as a, `user`.col2, weight_string(`user`.col1), weight_string(`user`.col2), `user`.id from `user` where 1 != 1",
- "Query": "select `user`.col1 as a, `user`.col2, weight_string(`user`.col1), weight_string(`user`.col2), `user`.id from `user` where `user`.id = 1",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.col3, weight_string(music.col3) from music where 1 != 1",
- "Query": "select music.col3, weight_string(music.col3) from music where music.id = :user_id",
- "Table": "music",
- "Values": [
- ":user_id"
- ],
- "Vindex": "music_user_map"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.col1 as a, user.col2, music.col3 from user join music on user.id = music.id where user.id = 1 order by 1 asc, 3 desc, 2 asc",
- "Instructions": {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "(0|3) ASC, (2|4) DESC, (1|5) ASC",
- "ResultColumns": 3,
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:1,L:2,R:0,L:3,R:1,L:4",
- "JoinVars": {
- "user_id": 0
- },
- "TableName": "`user`_music",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id, `user`.col1 as a, `user`.col2, weight_string(`user`.col1), weight_string(`user`.col2) from `user` where 1 != 1",
- "Query": "select `user`.id, `user`.col1 as a, `user`.col2, weight_string(`user`.col1), weight_string(`user`.col2) from `user` where `user`.id = 1",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.col3, weight_string(music.col3) from music where 1 != 1",
- "Query": "select music.col3, weight_string(music.col3) from music where music.id = :user_id",
- "Table": "music",
- "Values": [
- ":user_id"
- ],
- "Vindex": "music_user_map"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.music",
- "user.user"
- ]
-}
-
-# Order by for join, on text column in LHS.
-"select u.a, u.textcol1, un.col2 from user u join unsharded un order by u.textcol1, un.col2"
-{
- "QueryType": "SELECT",
- "Original": "select u.a, u.textcol1, un.col2 from user u join unsharded un order by u.textcol1, un.col2",
- "Instructions": {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "(1|3) ASC, (2|4) ASC",
- "ResultColumns": 3,
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1,R:0,L:2,R:1",
- "TableName": "`user`_unsharded",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u.a, u.textcol1, weight_string(u.textcol1) from `user` as u where 1 != 1",
- "Query": "select u.a, u.textcol1, weight_string(u.textcol1) from `user` as u",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select un.col2, weight_string(un.col2) from unsharded as un where 1 != 1",
- "Query": "select un.col2, weight_string(un.col2) from unsharded as un",
- "Table": "unsharded"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select u.a, u.textcol1, un.col2 from user u join unsharded un order by u.textcol1, un.col2",
- "Instructions": {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "(1|3) ASC COLLATE latin1_swedish_ci, (2|4) ASC",
- "ResultColumns": 3,
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1,R:0,L:2,R:1",
- "TableName": "`user`_unsharded",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u.a, u.textcol1, weight_string(u.textcol1) from `user` as u where 1 != 1",
- "Query": "select u.a, u.textcol1, weight_string(u.textcol1) from `user` as u",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select un.col2, weight_string(un.col2) from unsharded as un where 1 != 1",
- "Query": "select un.col2, weight_string(un.col2) from unsharded as un",
- "Table": "unsharded"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded",
- "user.user"
- ]
-}
-
-# Order by for join, on text column in RHS.
-"select u.a, u.textcol1, un.col2 from unsharded un join user u order by u.textcol1, un.col2"
-{
- "QueryType": "SELECT",
- "Original": "select u.a, u.textcol1, un.col2 from unsharded un join user u order by u.textcol1, un.col2",
- "Instructions": {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "(1|3) ASC, (2|4) ASC",
- "ResultColumns": 3,
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0,R:1,L:0,R:2,L:1",
- "TableName": "unsharded_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select un.col2, weight_string(un.col2) from unsharded as un where 1 != 1",
- "Query": "select un.col2, weight_string(un.col2) from unsharded as un",
- "Table": "unsharded"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u.a, u.textcol1, weight_string(u.textcol1) from `user` as u where 1 != 1",
- "Query": "select u.a, u.textcol1, weight_string(u.textcol1) from `user` as u",
- "Table": "`user`"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select u.a, u.textcol1, un.col2 from unsharded un join user u order by u.textcol1, un.col2",
- "Instructions": {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "(1|3) ASC COLLATE latin1_swedish_ci, (2|4) ASC",
- "ResultColumns": 3,
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0,R:1,L:0,R:2,L:1",
- "TableName": "unsharded_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select un.col2, weight_string(un.col2) from unsharded as un where 1 != 1",
- "Query": "select un.col2, weight_string(un.col2) from unsharded as un",
- "Table": "unsharded"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u.a, u.textcol1, weight_string(u.textcol1) from `user` as u where 1 != 1",
- "Query": "select u.a, u.textcol1, weight_string(u.textcol1) from `user` as u",
- "Table": "`user`"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded",
- "user.user"
- ]
-}
-
-# order by for vindex func
-"select id, keyspace_id, range_start, range_end from user_index where id = :id order by range_start"
-{
- "QueryType": "SELECT",
- "Original": "select id, keyspace_id, range_start, range_end from user_index where id = :id order by range_start",
- "Instructions": {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "2 ASC",
- "Inputs": [
- {
- "OperatorType": "VindexFunc",
- "Variant": "VindexMap",
- "Columns": [
- 0,
- 1,
- 2,
- 3
- ],
- "Fields": {
- "id": "VARBINARY",
- "keyspace_id": "VARBINARY",
- "range_end": "VARBINARY",
- "range_start": "VARBINARY"
- },
- "Value": ":id",
- "Vindex": "user_index"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id, keyspace_id, range_start, range_end from user_index where id = :id order by range_start",
- "Instructions": {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "2 ASC",
- "Inputs": [
- {
- "OperatorType": "VindexFunc",
- "Variant": "VindexMap",
- "Columns": [
- 0,
- 1,
- 2,
- 3
- ],
- "Fields": {
- "id": "VARBINARY",
- "keyspace_id": "VARBINARY",
- "range_end": "VARBINARY",
- "range_start": "VARBINARY"
- },
- "Value": ":id",
- "Vindex": "user_index"
- }
- ]
- },
- "TablesUsed": [
- "user_index"
- ]
-}
-
-# unary expression
-"select a from user order by binary a desc"
-"unsupported: in scatter query: complex order by expression: convert(a, binary)"
-{
- "QueryType": "SELECT",
- "Original": "select a from user order by binary a desc",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a, convert(a, binary), weight_string(convert(a, binary)) from `user` where 1 != 1",
- "OrderBy": "(1|2) DESC",
- "Query": "select a, convert(a, binary), weight_string(convert(a, binary)) from `user` order by convert(a, binary) desc",
- "ResultColumns": 1,
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# unary expression in join query
-"select u.a from user u join music m on u.a = m.a order by binary a desc"
-"unsupported: in scatter query: complex order by expression: convert(a, binary)"
-{
- "QueryType": "SELECT",
- "Original": "select u.a from user u join music m on u.a = m.a order by binary a desc",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "JoinVars": {
- "u_a": 0
- },
- "TableName": "`user`_music",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u.a, convert(a, binary), weight_string(convert(a, binary)) from `user` as u where 1 != 1",
- "OrderBy": "(1|2) DESC",
- "Query": "select u.a, convert(a, binary), weight_string(convert(a, binary)) from `user` as u order by convert(a, binary) desc",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from music as m where 1 != 1",
- "Query": "select 1 from music as m where m.a = :u_a",
- "Table": "music"
- }
- ]
- },
- "TablesUsed": [
- "user.music",
- "user.user"
- ]
-}
-
-# intcol order by
-"select id, intcol from user order by intcol"
-{
- "QueryType": "SELECT",
- "Original": "select id, intcol from user order by intcol",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, intcol from `user` where 1 != 1",
- "OrderBy": "1 ASC",
- "Query": "select id, intcol from `user` order by intcol asc",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id, intcol from user order by intcol",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, intcol from `user` where 1 != 1",
- "OrderBy": "1 ASC",
- "Query": "select id, intcol from `user` order by intcol asc",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# scatter order by with order by column not present
-"select col from user order by id"
-"unsupported: in scatter query: order by must reference a column in the select list: id asc"
-{
- "QueryType": "SELECT",
- "Original": "select col from user order by id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col, id, weight_string(id) from `user` where 1 != 1",
- "OrderBy": "(1|2) ASC",
- "Query": "select col, id, weight_string(id) from `user` order by id asc",
- "ResultColumns": 1,
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
diff --git a/go/vt/vtgate/planbuilder/testdata/migration_cases.json b/go/vt/vtgate/planbuilder/testdata/migration_cases.json
new file mode 100644
index 00000000000..db77a3d4a41
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/migration_cases.json
@@ -0,0 +1,103 @@
+[
+ {
+ "comment": "revert migration",
+ "query": "revert vitess_migration 'abc'",
+ "plan": {
+ "QueryType": "REVERT",
+ "Original": "revert vitess_migration 'abc'",
+ "Instructions": {
+ "OperatorType": "RevertMigration",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "query": "revert vitess_migration 'abc'"
+ }
+ }
+ },
+ {
+ "comment": "retry migration",
+ "query": "alter vitess_migration 'abc' retry",
+ "plan": {
+ "QueryType": "UNKNOWN",
+ "Original": "alter vitess_migration 'abc' retry",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AllShards()",
+ "Query": "alter vitess_migration 'abc' retry"
+ }
+ }
+ },
+ {
+ "comment": "complete migration",
+ "query": "alter vitess_migration 'abc' complete",
+ "plan": {
+ "QueryType": "UNKNOWN",
+ "Original": "alter vitess_migration 'abc' complete",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AllShards()",
+ "Query": "alter vitess_migration 'abc' complete"
+ }
+ }
+ },
+ {
+ "comment": "complete migration",
+ "query": "alter vitess_migration 'abc' cleanup",
+ "plan": {
+ "QueryType": "UNKNOWN",
+ "Original": "alter vitess_migration 'abc' cleanup",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AllShards()",
+ "Query": "alter vitess_migration 'abc' cleanup"
+ }
+ }
+ },
+ {
+ "comment": "cancel migration",
+ "query": "alter vitess_migration 'abc' cancel",
+ "plan": {
+ "QueryType": "UNKNOWN",
+ "Original": "alter vitess_migration 'abc' cancel",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AllShards()",
+ "Query": "alter vitess_migration 'abc' cancel"
+ }
+ }
+ },
+ {
+ "comment": "cancel all migrations",
+ "query": "alter vitess_migration cancel all",
+ "plan": {
+ "QueryType": "UNKNOWN",
+ "Original": "alter vitess_migration cancel all",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AllShards()",
+ "Query": "alter vitess_migration cancel all"
+ }
+ }
+ }
+]
diff --git a/go/vt/vtgate/planbuilder/testdata/migration_cases.txt b/go/vt/vtgate/planbuilder/testdata/migration_cases.txt
deleted file mode 100644
index fc08cfe7d07..00000000000
--- a/go/vt/vtgate/planbuilder/testdata/migration_cases.txt
+++ /dev/null
@@ -1,100 +0,0 @@
-# revert migration
-"revert vitess_migration 'abc'"
-{
- "QueryType": "REVERT",
- "Original": "revert vitess_migration 'abc'",
- "Instructions": {
- "OperatorType": "RevertMigration",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "query": "revert vitess_migration 'abc'"
- }
-}
-Gen4 plan same as above
-
-# retry migration
-"alter vitess_migration 'abc' retry"
-{
- "QueryType": "UNKNOWN",
- "Original": "alter vitess_migration 'abc' retry",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AllShards()",
- "Query": "alter vitess_migration 'abc' retry"
- }
-}
-Gen4 plan same as above
-
-# complete migration
-"alter vitess_migration 'abc' complete"
-{
- "QueryType": "UNKNOWN",
- "Original": "alter vitess_migration 'abc' complete",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AllShards()",
- "Query": "alter vitess_migration 'abc' complete"
- }
-}
-Gen4 plan same as above
-
-# complete migration
-"alter vitess_migration 'abc' cleanup"
-{
- "QueryType": "UNKNOWN",
- "Original": "alter vitess_migration 'abc' cleanup",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AllShards()",
- "Query": "alter vitess_migration 'abc' cleanup"
- }
-}
-Gen4 plan same as above
-
-# cancel migration
-"alter vitess_migration 'abc' cancel"
-{
- "QueryType": "UNKNOWN",
- "Original": "alter vitess_migration 'abc' cancel",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AllShards()",
- "Query": "alter vitess_migration 'abc' cancel"
- }
-}
-Gen4 plan same as above
-
-# cancel all migrations
-"alter vitess_migration cancel all"
-{
- "QueryType": "UNKNOWN",
- "Original": "alter vitess_migration cancel all",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AllShards()",
- "Query": "alter vitess_migration cancel all"
- }
-}
-Gen4 plan same as above
diff --git a/go/vt/vtgate/planbuilder/testdata/oltp_cases.json b/go/vt/vtgate/planbuilder/testdata/oltp_cases.json
new file mode 100644
index 00000000000..88717292379
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/oltp_cases.json
@@ -0,0 +1,407 @@
+[
+ {
+ "comment": "OLTP simple select",
+ "query": "SELECT c FROM sbtest34 WHERE id=15",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT c FROM sbtest34 WHERE id=15",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select c from sbtest34 where 1 != 1",
+ "Query": "select c from sbtest34 where id = 15",
+ "Table": "sbtest34",
+ "Values": [
+ "INT64(15)"
+ ],
+ "Vindex": "hash"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT c FROM sbtest34 WHERE id=15",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select c from sbtest34 where 1 != 1",
+ "Query": "select c from sbtest34 where id = 15",
+ "Table": "sbtest34",
+ "Values": [
+ "INT64(15)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.sbtest34"
+ ]
+ }
+ },
+ {
+ "comment": "OLTP simple range select",
+ "query": "SELECT c FROM sbtest12 WHERE id BETWEEN 1 AND 10",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT c FROM sbtest12 WHERE id BETWEEN 1 AND 10",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select c from sbtest12 where 1 != 1",
+ "Query": "select c from sbtest12 where id between 1 and 10",
+ "Table": "sbtest12"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT c FROM sbtest12 WHERE id BETWEEN 1 AND 10",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select c from sbtest12 where 1 != 1",
+ "Query": "select c from sbtest12 where id between 1 and 10",
+ "Table": "sbtest12"
+ },
+ "TablesUsed": [
+ "main.sbtest12"
+ ]
+ }
+ },
+ {
+ "comment": "OLTP sum range select",
+ "query": "SELECT SUM(k) FROM sbtest43 WHERE id BETWEEN 90 AND 990",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT SUM(k) FROM sbtest43 WHERE id BETWEEN 90 AND 990",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum(0)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select sum(k) from sbtest43 where 1 != 1",
+ "Query": "select sum(k) from sbtest43 where id between 90 and 990",
+ "Table": "sbtest43"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT SUM(k) FROM sbtest43 WHERE id BETWEEN 90 AND 990",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum(0) AS sum(k)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select sum(k) from sbtest43 where 1 != 1",
+ "Query": "select sum(k) from sbtest43 where id between 90 and 990",
+ "Table": "sbtest43"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.sbtest43"
+ ]
+ }
+ },
+ {
+ "comment": "OLTP order range select",
+ "query": "SELECT c FROM sbtest1 WHERE id BETWEEN 50 AND 235 ORDER BY c",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT c FROM sbtest1 WHERE id BETWEEN 50 AND 235 ORDER BY c",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select c, weight_string(c) from sbtest1 where 1 != 1",
+ "OrderBy": "(0|1) ASC",
+ "Query": "select c, weight_string(c) from sbtest1 where id between 50 and 235 order by c asc",
+ "ResultColumns": 1,
+ "Table": "sbtest1"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT c FROM sbtest1 WHERE id BETWEEN 50 AND 235 ORDER BY c",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select c from sbtest1 where 1 != 1",
+ "OrderBy": "0 ASC COLLATE latin1_swedish_ci",
+ "Query": "select c from sbtest1 where id between 50 and 235 order by c asc",
+ "Table": "sbtest1"
+ },
+ "TablesUsed": [
+ "main.sbtest1"
+ ]
+ }
+ },
+ {
+ "comment": "OLTP distinct range select",
+ "query": "SELECT DISTINCT c FROM sbtest30 WHERE id BETWEEN 1 AND 10 ORDER BY c",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT DISTINCT c FROM sbtest30 WHERE id BETWEEN 1 AND 10 ORDER BY c",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "GroupBy": "1",
+ "ResultColumns": 1,
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select c, weight_string(c) from sbtest30 where 1 != 1",
+ "OrderBy": "(0|1) ASC",
+ "Query": "select distinct c, weight_string(c) from sbtest30 where id between 1 and 10 order by c asc",
+ "ResultColumns": 2,
+ "Table": "sbtest30"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT DISTINCT c FROM sbtest30 WHERE id BETWEEN 1 AND 10 ORDER BY c",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "GroupBy": "(0|1) COLLATE latin1_swedish_ci",
+ "ResultColumns": 1,
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select c, weight_string(c) from sbtest30 where 1 != 1",
+ "OrderBy": "0 ASC COLLATE latin1_swedish_ci, 0 ASC COLLATE latin1_swedish_ci",
+ "Query": "select distinct c, weight_string(c) from sbtest30 where id between 1 and 10 order by c asc, c asc",
+ "Table": "sbtest30"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.sbtest30"
+ ]
+ }
+ },
+ {
+ "comment": "OLTP index udpate",
+ "query": "UPDATE sbtest6 SET k=k+1 WHERE id=5",
+ "v3-plan": {
+ "QueryType": "UPDATE",
+ "Original": "UPDATE sbtest6 SET k=k+1 WHERE id=5",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update sbtest6 set k = k + 1 where id = 5",
+ "Table": "sbtest6",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.sbtest6"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "UPDATE sbtest6 SET k=k+1 WHERE id=5",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update sbtest6 set k = k + 1 where id = 5",
+ "Table": "sbtest6",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.sbtest6"
+ ]
+ }
+ },
+ {
+ "comment": "OLTP non index update",
+ "query": "UPDATE sbtest9 SET c=7 WHERE id=8",
+ "v3-plan": {
+ "QueryType": "UPDATE",
+ "Original": "UPDATE sbtest9 SET c=7 WHERE id=8",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update sbtest9 set c = 7 where id = 8",
+ "Table": "sbtest9",
+ "Values": [
+ "INT64(8)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.sbtest9"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "UPDATE sbtest9 SET c=7 WHERE id=8",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update sbtest9 set c = 7 where id = 8",
+ "Table": "sbtest9",
+ "Values": [
+ "INT64(8)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.sbtest9"
+ ]
+ }
+ },
+ {
+ "comment": "OLTP delete",
+ "query": "DELETE FROM sbtest15 WHERE id=7525",
+ "v3-plan": {
+ "QueryType": "DELETE",
+ "Original": "DELETE FROM sbtest15 WHERE id=7525",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "delete from sbtest15 where id = 7525",
+ "Table": "sbtest15",
+ "Values": [
+ "INT64(7525)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.sbtest15"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "DELETE",
+ "Original": "DELETE FROM sbtest15 WHERE id=7525",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "delete from sbtest15 where id = 7525",
+ "Table": "sbtest15",
+ "Values": [
+ "INT64(7525)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.sbtest15"
+ ]
+ }
+ },
+ {
+ "comment": "OLTP insert",
+ "query": "INSERT INTO sbtest16 (id, k, c, pad) VALUES (42, 1, 2, 50)",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "INSERT INTO sbtest16 (id, k, c, pad) VALUES (42, 1, 2, 50)",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Sharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into sbtest16(id, k, c, pad) values (:_id_0, 1, 2, 50)",
+ "TableName": "sbtest16",
+ "VindexValues": {
+ "hash": "INT64(42)"
+ }
+ },
+ "TablesUsed": [
+ "main.sbtest16"
+ ]
+ }
+ }
+]
\ No newline at end of file
diff --git a/go/vt/vtgate/planbuilder/testdata/oltp_cases.txt b/go/vt/vtgate/planbuilder/testdata/oltp_cases.txt
deleted file mode 100644
index 42aefb3fd4d..00000000000
--- a/go/vt/vtgate/planbuilder/testdata/oltp_cases.txt
+++ /dev/null
@@ -1,396 +0,0 @@
-# OLTP simple select
-"SELECT c FROM sbtest34 WHERE id=15"
-{
- "QueryType": "SELECT",
- "Original": "SELECT c FROM sbtest34 WHERE id=15",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select c from sbtest34 where 1 != 1",
- "Query": "select c from sbtest34 where id = 15",
- "Table": "sbtest34",
- "Values": [
- "INT64(15)"
- ],
- "Vindex": "hash"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT c FROM sbtest34 WHERE id=15",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select c from sbtest34 where 1 != 1",
- "Query": "select c from sbtest34 where id = 15",
- "Table": "sbtest34",
- "Values": [
- "INT64(15)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.sbtest34"
- ]
-}
-
-# OLTP simple range select
-"SELECT c FROM sbtest12 WHERE id BETWEEN 1 AND 10"
-{
- "QueryType": "SELECT",
- "Original": "SELECT c FROM sbtest12 WHERE id BETWEEN 1 AND 10",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select c from sbtest12 where 1 != 1",
- "Query": "select c from sbtest12 where id between 1 and 10",
- "Table": "sbtest12"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT c FROM sbtest12 WHERE id BETWEEN 1 AND 10",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select c from sbtest12 where 1 != 1",
- "Query": "select c from sbtest12 where id between 1 and 10",
- "Table": "sbtest12"
- },
- "TablesUsed": [
- "main.sbtest12"
- ]
-}
-
-# OLTP sum range select
-"SELECT SUM(k) FROM sbtest43 WHERE id BETWEEN 90 AND 990"
-{
- "QueryType": "SELECT",
- "Original": "SELECT SUM(k) FROM sbtest43 WHERE id BETWEEN 90 AND 990",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum(0)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select sum(k) from sbtest43 where 1 != 1",
- "Query": "select sum(k) from sbtest43 where id between 90 and 990",
- "Table": "sbtest43"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT SUM(k) FROM sbtest43 WHERE id BETWEEN 90 AND 990",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum(0) AS sum(k)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select sum(k) from sbtest43 where 1 != 1",
- "Query": "select sum(k) from sbtest43 where id between 90 and 990",
- "Table": "sbtest43"
- }
- ]
- },
- "TablesUsed": [
- "main.sbtest43"
- ]
-}
-
-# OLTP order range select
-"SELECT c FROM sbtest1 WHERE id BETWEEN 50 AND 235 ORDER BY c"
-{
- "QueryType": "SELECT",
- "Original": "SELECT c FROM sbtest1 WHERE id BETWEEN 50 AND 235 ORDER BY c",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select c, weight_string(c) from sbtest1 where 1 != 1",
- "OrderBy": "(0|1) ASC",
- "Query": "select c, weight_string(c) from sbtest1 where id between 50 and 235 order by c asc",
- "ResultColumns": 1,
- "Table": "sbtest1"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT c FROM sbtest1 WHERE id BETWEEN 50 AND 235 ORDER BY c",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select c from sbtest1 where 1 != 1",
- "OrderBy": "0 ASC COLLATE latin1_swedish_ci",
- "Query": "select c from sbtest1 where id between 50 and 235 order by c asc",
- "Table": "sbtest1"
- },
- "TablesUsed": [
- "main.sbtest1"
- ]
-}
-
-# OLTP distinct range select
-"SELECT DISTINCT c FROM sbtest30 WHERE id BETWEEN 1 AND 10 ORDER BY c"
-{
- "QueryType": "SELECT",
- "Original": "SELECT DISTINCT c FROM sbtest30 WHERE id BETWEEN 1 AND 10 ORDER BY c",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "GroupBy": "1",
- "ResultColumns": 1,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select c, weight_string(c) from sbtest30 where 1 != 1",
- "OrderBy": "(0|1) ASC",
- "Query": "select distinct c, weight_string(c) from sbtest30 where id between 1 and 10 order by c asc",
- "ResultColumns": 2,
- "Table": "sbtest30"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT DISTINCT c FROM sbtest30 WHERE id BETWEEN 1 AND 10 ORDER BY c",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "GroupBy": "(0|1) COLLATE latin1_swedish_ci",
- "ResultColumns": 1,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select c, weight_string(c) from sbtest30 where 1 != 1",
- "OrderBy": "0 ASC COLLATE latin1_swedish_ci, 0 ASC COLLATE latin1_swedish_ci",
- "Query": "select distinct c, weight_string(c) from sbtest30 where id between 1 and 10 order by c asc, c asc",
- "Table": "sbtest30"
- }
- ]
- },
- "TablesUsed": [
- "main.sbtest30"
- ]
-}
-
-# OLTP index udpate
-"UPDATE sbtest6 SET k=k+1 WHERE id=5"
-{
- "QueryType": "UPDATE",
- "Original": "UPDATE sbtest6 SET k=k+1 WHERE id=5",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update sbtest6 set k = k + 1 where id = 5",
- "Table": "sbtest6",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.sbtest6"
- ]
-}
-{
- "QueryType": "UPDATE",
- "Original": "UPDATE sbtest6 SET k=k+1 WHERE id=5",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update sbtest6 set k = k + 1 where id = 5",
- "Table": "sbtest6",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.sbtest6"
- ]
-}
-
-# OLTP non index update
-"UPDATE sbtest9 SET c=7 WHERE id=8"
-{
- "QueryType": "UPDATE",
- "Original": "UPDATE sbtest9 SET c=7 WHERE id=8",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update sbtest9 set c = 7 where id = 8",
- "Table": "sbtest9",
- "Values": [
- "INT64(8)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.sbtest9"
- ]
-}
-{
- "QueryType": "UPDATE",
- "Original": "UPDATE sbtest9 SET c=7 WHERE id=8",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update sbtest9 set c = 7 where id = 8",
- "Table": "sbtest9",
- "Values": [
- "INT64(8)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.sbtest9"
- ]
-}
-
-# OLTP delete
-"DELETE FROM sbtest15 WHERE id=7525"
-{
- "QueryType": "DELETE",
- "Original": "DELETE FROM sbtest15 WHERE id=7525",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "delete from sbtest15 where id = 7525",
- "Table": "sbtest15",
- "Values": [
- "INT64(7525)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.sbtest15"
- ]
-}
-{
- "QueryType": "DELETE",
- "Original": "DELETE FROM sbtest15 WHERE id=7525",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "delete from sbtest15 where id = 7525",
- "Table": "sbtest15",
- "Values": [
- "INT64(7525)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.sbtest15"
- ]
-}
-
-# OLTP insert
-"INSERT INTO sbtest16 (id, k, c, pad) VALUES (42, 1, 2, 50)"
-{
- "QueryType": "INSERT",
- "Original": "INSERT INTO sbtest16 (id, k, c, pad) VALUES (42, 1, 2, 50)",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Sharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into sbtest16(id, k, c, pad) values (:_id_0, 1, 2, 50)",
- "TableName": "sbtest16",
- "VindexValues": {
- "hash": "INT64(42)"
- }
- },
- "TablesUsed": [
- "main.sbtest16"
- ]
-}
-Gen4 plan same as above
diff --git a/go/vt/vtgate/planbuilder/testdata/onecase.json b/go/vt/vtgate/planbuilder/testdata/onecase.json
new file mode 100644
index 00000000000..da7543f706a
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/onecase.json
@@ -0,0 +1,9 @@
+[
+ {
+ "comment": "Add your test case here for debugging and run go test -run=One.",
+ "query": "",
+ "plan": {
+
+ }
+ }
+]
\ No newline at end of file
diff --git a/go/vt/vtgate/planbuilder/testdata/onecase.txt b/go/vt/vtgate/planbuilder/testdata/onecase.txt
deleted file mode 100644
index e819513f354..00000000000
--- a/go/vt/vtgate/planbuilder/testdata/onecase.txt
+++ /dev/null
@@ -1 +0,0 @@
-# Add your test case here for debugging and run go test -run=One.
diff --git a/go/vt/vtgate/planbuilder/testdata/other_admin_cases.json b/go/vt/vtgate/planbuilder/testdata/other_admin_cases.json
new file mode 100644
index 00000000000..2eb3432e1b7
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/other_admin_cases.json
@@ -0,0 +1,56 @@
+[
+ {
+ "comment": "Repair statement",
+ "query": "repair table t1,t2 quick",
+ "plan": {
+ "QueryType": "OTHER",
+ "Original": "repair table t1,t2 quick",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AnyShard()",
+ "Query": "repair table t1,t2 quick",
+ "SingleShardOnly": true
+ }
+ }
+ },
+ {
+ "comment": "Optimize statement",
+ "query": "optimize table t1",
+ "plan": {
+ "QueryType": "OTHER",
+ "Original": "optimize table t1",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AnyShard()",
+ "Query": "optimize table t1",
+ "SingleShardOnly": true
+ }
+ }
+ },
+ {
+ "comment": "DO statement",
+ "query": "DO 1",
+ "plan": {
+ "QueryType": "OTHER",
+ "Original": "DO 1",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AnyShard()",
+ "Query": "DO 1",
+ "SingleShardOnly": true
+ }
+ }
+ }
+]
\ No newline at end of file
diff --git a/go/vt/vtgate/planbuilder/testdata/other_admin_cases.txt b/go/vt/vtgate/planbuilder/testdata/other_admin_cases.txt
deleted file mode 100644
index e5f965ee1b6..00000000000
--- a/go/vt/vtgate/planbuilder/testdata/other_admin_cases.txt
+++ /dev/null
@@ -1,53 +0,0 @@
-# Repair statement
-"repair table t1,t2 quick"
-{
- "QueryType": "OTHER",
- "Original": "repair table t1,t2 quick",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AnyShard()",
- "Query": "repair table t1,t2 quick",
- "SingleShardOnly": true
- }
-}
-Gen4 plan same as above
-
-# Optimize statement
-"optimize table t1"
-{
- "QueryType": "OTHER",
- "Original": "optimize table t1",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AnyShard()",
- "Query": "optimize table t1",
- "SingleShardOnly": true
- }
-}
-Gen4 plan same as above
-
-# DO statement
-"DO 1"
-{
- "QueryType": "OTHER",
- "Original": "DO 1",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AnyShard()",
- "Query": "DO 1",
- "SingleShardOnly": true
- }
-}
-Gen4 plan same as above
diff --git a/go/vt/vtgate/planbuilder/testdata/other_read_cases.json b/go/vt/vtgate/planbuilder/testdata/other_read_cases.json
new file mode 100644
index 00000000000..795e4855fb5
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/other_read_cases.json
@@ -0,0 +1,89 @@
+[
+ {
+ "comment": "Explain statement",
+ "query": "explain select * from user",
+ "plan": {
+ "QueryType": "EXPLAIN",
+ "Original": "explain select * from user",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AnyShard()",
+ "Query": "explain select * from `user`",
+ "SingleShardOnly": true
+ }
+ }
+ },
+ {
+ "comment": "Explain Vitess statement",
+ "query": "explain format=vitess select * from user",
+ "plan": {
+ "QueryType": "EXPLAIN",
+ "Original": "explain format=vitess select * from user",
+ "Instructions": {
+ "OperatorType": "Rows",
+ "Fields": {
+ "JSON": "VARCHAR"
+ },
+ "RowCount": 1
+ }
+ }
+ },
+ {
+ "comment": "Analyze statement",
+ "query": "analyze table t1",
+ "plan": {
+ "QueryType": "OTHER",
+ "Original": "analyze table t1",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AnyShard()",
+ "Query": "analyze table t1",
+ "SingleShardOnly": true
+ }
+ }
+ },
+ {
+ "comment": "Describe statement",
+ "query": "describe select * from t",
+ "plan": {
+ "QueryType": "EXPLAIN",
+ "Original": "describe select * from t",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AnyShard()",
+ "Query": "explain select * from t",
+ "SingleShardOnly": true
+ }
+ }
+ },
+ {
+ "comment": "Desc statement",
+ "query": "desc select * from t",
+ "plan": {
+ "QueryType": "EXPLAIN",
+ "Original": "desc select * from t",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AnyShard()",
+ "Query": "explain select * from t",
+ "SingleShardOnly": true
+ }
+ }
+ }
+]
diff --git a/go/vt/vtgate/planbuilder/testdata/other_read_cases.txt b/go/vt/vtgate/planbuilder/testdata/other_read_cases.txt
deleted file mode 100644
index 0866d9df34b..00000000000
--- a/go/vt/vtgate/planbuilder/testdata/other_read_cases.txt
+++ /dev/null
@@ -1,127 +0,0 @@
-# Explain statement
-"explain select * from user"
-{
- "QueryType": "EXPLAIN",
- "Original": "explain select * from user",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AnyShard()",
- "Query": "explain select * from `user`",
- "SingleShardOnly": true
- }
-}
-Gen4 plan same as above
-
-# Explain Vitess statement
-"explain format=vitess select * from user"
-{
- "QueryType": "EXPLAIN",
- "Original": "explain format=vitess select * from user",
- "Instructions": {
- "OperatorType": "Rows"
- }
-}
-Gen4 plan same as above
-
-# Analyze statement
-"analyze table t1"
-{
- "QueryType": "OTHER",
- "Original": "analyze table t1",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AnyShard()",
- "Query": "analyze table t1",
- "SingleShardOnly": true
- }
-}
-Gen4 plan same as above
-
-# Describe statement
-"describe select * from t"
-{
- "QueryType": "EXPLAIN",
- "Original": "describe select * from t",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AnyShard()",
- "Query": "explain select * from t",
- "SingleShardOnly": true
- }
-}
-Gen4 plan same as above
-
-# Desc statement
-"desc select * from t"
-{
- "QueryType": "EXPLAIN",
- "Original": "desc select * from t",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AnyShard()",
- "Query": "explain select * from t",
- "SingleShardOnly": true
- }
-}
-Gen4 plan same as above
-
-"explain format=vtexplain select * from user"
-{
- "QueryType": "EXPLAIN",
- "Original": "explain format=vtexplain select * from user",
- "Instructions": {
- "OperatorType": "VTEXPLAIN",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select * from `user`",
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "EXPLAIN",
- "Original": "explain format=vtexplain select * from user",
- "Instructions": {
- "OperatorType": "VTEXPLAIN",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select * from `user`",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "main.user"
- ]
-}
diff --git a/go/vt/vtgate/planbuilder/testdata/postprocess_cases.json b/go/vt/vtgate/planbuilder/testdata/postprocess_cases.json
new file mode 100644
index 00000000000..747b681b34f
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/postprocess_cases.json
@@ -0,0 +1,3121 @@
+[
+ {
+ "comment": "HAVING implicitly references table col",
+ "query": "select user.col1 from user having col2 = 2",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col1 from user having col2 = 2",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col1 from `user` where 1 != 1",
+ "Query": "select `user`.col1 from `user` having col2 = 2",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col1 from user having col2 = 2",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col1 from `user` where 1 != 1",
+ "Query": "select `user`.col1 from `user` where col2 = 2",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "ambiguous symbol reference",
+ "query": "select user.col1, user_extra.col1 from user join user_extra having col1 = 2",
+ "v3-plan": "VT03021: ambiguous symbol reference: col1",
+ "gen4-plan": "Column 'col1' in field list is ambiguous"
+ },
+ {
+ "comment": "TODO: this should be 'Column 'col1' in having clause is ambiguous'\n# non-ambiguous symbol reference",
+ "query": "select user.col1, user_extra.col1 from user join user_extra having user_extra.col1 = 2",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col1, user_extra.col1 from user join user_extra having user_extra.col1 = 2",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,R:0",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col1 from `user` where 1 != 1",
+ "Query": "select `user`.col1 from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.col1 from user_extra where 1 != 1",
+ "Query": "select user_extra.col1 from user_extra having user_extra.col1 = 2",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col1, user_extra.col1 from user join user_extra having user_extra.col1 = 2",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,R:0",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col1 from `user` where 1 != 1",
+ "Query": "select `user`.col1 from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.col1 from user_extra where 1 != 1",
+ "Query": "select user_extra.col1 from user_extra where user_extra.col1 = 2",
+ "Table": "user_extra"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "HAVING multi-route",
+ "query": "select user.col1 as a, user.col2, user_extra.col3 from user join user_extra having 1 = 1 and a = 1 and a = user.col2 and user_extra.col3 = 1",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col1 as a, user.col2, user_extra.col3 from user join user_extra having 1 = 1 and a = 1 and a = user.col2 and user_extra.col3 = 1",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1,R:0",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col1 as a, `user`.col2 from `user` where 1 != 1",
+ "Query": "select `user`.col1 as a, `user`.col2 from `user` having 1 = 1 and a = 1 and a = `user`.col2",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.col3 from user_extra where 1 != 1",
+ "Query": "select user_extra.col3 from user_extra having user_extra.col3 = 1",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col1 as a, user.col2, user_extra.col3 from user join user_extra having 1 = 1 and a = 1 and a = user.col2 and user_extra.col3 = 1",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1,R:0",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col1 as a, `user`.col2 from `user` where 1 != 1",
+ "Query": "select `user`.col1 as a, `user`.col2 from `user` where `user`.col1 = 1 and `user`.col1 = `user`.col2 and 1 = 1",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.col3 from user_extra where 1 != 1",
+ "Query": "select user_extra.col3 from user_extra where user_extra.col3 = 1 and 1 = 1",
+ "Table": "user_extra"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "HAVING uses subquery",
+ "query": "select id from user having id in (select col from user)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user having id in (select col from user)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` having :__sq_has_values1 = 1 and id in ::__vals",
+ "Table": "`user`",
+ "Values": [
+ ":__sq1"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user having id in (select col from user)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where :__sq_has_values1 = 1 and id in ::__vals",
+ "Table": "`user`",
+ "Values": [
+ ":__sq1"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "ORDER BY, reference col from local table.",
+ "query": "select col from user where id = 5 order by aa",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from user where id = 5 order by aa",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user` where id = 5 order by aa asc",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from user where id = 5 order by aa",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user` where id = 5 order by aa asc",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "ORDER BY uses column numbers",
+ "query": "select col from user where id = 1 order by 1",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from user where id = 1 order by 1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user` where id = 1 order by 1 asc",
+ "Table": "`user`",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from user where id = 1 order by 1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user` where id = 1 order by col asc",
+ "Table": "`user`",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "ORDER BY on scatter",
+ "query": "select col from user order by col",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from user order by col",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "OrderBy": "0 ASC",
+ "Query": "select col from `user` order by col asc",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from user order by col",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "OrderBy": "0 ASC",
+ "Query": "select col from `user` order by col asc",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "ORDER BY on select t.*",
+ "query": "select t.*, t.col from user t order by t.col",
+ "v3-plan": "VT12001: unsupported: in scatter query, cannot ORDER BY a column that comes after `*` expressions in the SELECT list",
+ "gen4-plan": "VT12001: unsupported: '*' expression in cross-shard query"
+ },
+ {
+ "comment": "ORDER BY on select *",
+ "query": "select *, col from user order by col",
+ "v3-plan": "VT12001: unsupported: in scatter query, cannot ORDER BY a column that comes after `*` expressions in the SELECT list",
+ "gen4-plan": "VT12001: unsupported: '*' expression in cross-shard query"
+ },
+ {
+ "comment": "ORDER BY on select multi t.*",
+ "query": "select t.*, t.name, t.*, t.col from user t order by t.col",
+ "v3-plan": "VT12001: unsupported: in scatter query, cannot ORDER BY a column that comes after `*` expressions in the SELECT list",
+ "gen4-plan": "VT12001: unsupported: '*' expression in cross-shard query"
+ },
+ {
+ "comment": "ORDER BY on select multi *",
+ "query": "select *, name, *, col from user order by col",
+ "v3-plan": "VT12001: unsupported: in scatter query, cannot ORDER BY a column that comes after `*` expressions in the SELECT list",
+ "gen4-plan": "VT12001: unsupported: '*' expression in cross-shard query"
+ },
+ {
+ "comment": "ORDER BY works for select * from authoritative table",
+ "query": "select * from authoritative order by user_id",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from authoritative order by user_id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_id, col1, col2, weight_string(user_id) from authoritative where 1 != 1",
+ "OrderBy": "(0|3) ASC",
+ "Query": "select user_id, col1, col2, weight_string(user_id) from authoritative order by user_id asc",
+ "ResultColumns": 3,
+ "Table": "authoritative"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from authoritative order by user_id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_id, col1, col2, weight_string(user_id) from authoritative where 1 != 1",
+ "OrderBy": "(0|3) ASC",
+ "Query": "select user_id, col1, col2, weight_string(user_id) from authoritative order by user_id asc",
+ "ResultColumns": 3,
+ "Table": "authoritative"
+ },
+ "TablesUsed": [
+ "user.authoritative"
+ ]
+ }
+ },
+ {
+ "comment": "ORDER BY works for select * from authoritative table",
+ "query": "select * from authoritative order by col1",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from authoritative order by col1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_id, col1, col2, weight_string(col1) from authoritative where 1 != 1",
+ "OrderBy": "(1|3) ASC",
+ "Query": "select user_id, col1, col2, weight_string(col1) from authoritative order by col1 asc",
+ "ResultColumns": 3,
+ "Table": "authoritative"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from authoritative order by col1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_id, col1, col2 from authoritative where 1 != 1",
+ "OrderBy": "1 ASC COLLATE latin1_swedish_ci",
+ "Query": "select user_id, col1, col2 from authoritative order by col1 asc",
+ "Table": "authoritative"
+ },
+ "TablesUsed": [
+ "user.authoritative"
+ ]
+ }
+ },
+ {
+ "comment": "ORDER BY on scatter with text column",
+ "query": "select a, textcol1, b from user order by a, textcol1, b",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a, textcol1, b from user order by a, textcol1, b",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a, textcol1, b, weight_string(a), weight_string(textcol1), weight_string(b) from `user` where 1 != 1",
+ "OrderBy": "(0|3) ASC, (1|4) ASC, (2|5) ASC",
+ "Query": "select a, textcol1, b, weight_string(a), weight_string(textcol1), weight_string(b) from `user` order by a asc, textcol1 asc, b asc",
+ "ResultColumns": 3,
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a, textcol1, b from user order by a, textcol1, b",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a, textcol1, b, weight_string(a), weight_string(b) from `user` where 1 != 1",
+ "OrderBy": "(0|3) ASC, 1 ASC COLLATE latin1_swedish_ci, (2|4) ASC",
+ "Query": "select a, textcol1, b, weight_string(a), weight_string(b) from `user` order by a asc, textcol1 asc, b asc",
+ "ResultColumns": 3,
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "ORDER BY on scatter with text column, qualified name TODO: can plan better",
+ "query": "select a, user.textcol1, b from user order by a, textcol1, b",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a, user.textcol1, b from user order by a, textcol1, b",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a, `user`.textcol1, b, weight_string(a), weight_string(`user`.textcol1), weight_string(b) from `user` where 1 != 1",
+ "OrderBy": "(0|3) ASC, (1|4) ASC, (2|5) ASC",
+ "Query": "select a, `user`.textcol1, b, weight_string(a), weight_string(`user`.textcol1), weight_string(b) from `user` order by a asc, textcol1 asc, b asc",
+ "ResultColumns": 3,
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a, user.textcol1, b from user order by a, textcol1, b",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a, `user`.textcol1, b, weight_string(a), weight_string(b) from `user` where 1 != 1",
+ "OrderBy": "(0|3) ASC, 1 ASC COLLATE latin1_swedish_ci, (2|4) ASC",
+ "Query": "select a, `user`.textcol1, b, weight_string(a), weight_string(b) from `user` order by a asc, textcol1 asc, b asc",
+ "ResultColumns": 3,
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "ORDER BY on scatter with multiple text columns",
+ "query": "select a, textcol1, b, textcol2 from user order by a, textcol1, b, textcol2",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a, textcol1, b, textcol2 from user order by a, textcol1, b, textcol2",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a, textcol1, b, textcol2, weight_string(a), weight_string(textcol1), weight_string(b), weight_string(textcol2) from `user` where 1 != 1",
+ "OrderBy": "(0|4) ASC, (1|5) ASC, (2|6) ASC, (3|7) ASC",
+ "Query": "select a, textcol1, b, textcol2, weight_string(a), weight_string(textcol1), weight_string(b), weight_string(textcol2) from `user` order by a asc, textcol1 asc, b asc, textcol2 asc",
+ "ResultColumns": 4,
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a, textcol1, b, textcol2 from user order by a, textcol1, b, textcol2",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a, textcol1, b, textcol2, weight_string(a), weight_string(b) from `user` where 1 != 1",
+ "OrderBy": "(0|4) ASC, 1 ASC COLLATE latin1_swedish_ci, (2|5) ASC, 3 ASC COLLATE latin1_swedish_ci",
+ "Query": "select a, textcol1, b, textcol2, weight_string(a), weight_string(b) from `user` order by a asc, textcol1 asc, b asc, textcol2 asc",
+ "ResultColumns": 4,
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "ORDER BY invalid col number on scatter",
+ "query": "select col from user order by 2",
+ "v3-plan": "VT03014: unknown column '2' in 'order clause'",
+ "gen4-plan": "Unknown column '2' in 'order clause'"
+ },
+ {
+ "comment": "ORDER BY column offset",
+ "query": "select id as foo from music order by 1",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id as foo from music order by 1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id as foo, weight_string(id) from music where 1 != 1",
+ "OrderBy": "(0|1) ASC",
+ "Query": "select id as foo, weight_string(id) from music order by 1 asc",
+ "ResultColumns": 1,
+ "Table": "music"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id as foo from music order by 1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id as foo, weight_string(id) from music where 1 != 1",
+ "OrderBy": "(0|1) ASC",
+ "Query": "select id as foo, weight_string(id) from music order by foo asc",
+ "ResultColumns": 1,
+ "Table": "music"
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "ORDER BY NULL",
+ "query": "select col from user order by null",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from user order by null",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user` order by null",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from user order by null",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user` order by null",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "ORDER BY after pull-out subquery",
+ "query": "select col from user where col in (select col2 from user) order by col",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from user where col in (select col2 from user) order by col",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col2 from `user` where 1 != 1",
+ "Query": "select col2 from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "OrderBy": "0 ASC",
+ "Query": "select col from `user` where :__sq_has_values1 = 1 and col in ::__sq1 order by col asc",
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from user where col in (select col2 from user) order by col",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col2 from `user` where 1 != 1",
+ "Query": "select col2 from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "OrderBy": "0 ASC",
+ "Query": "select col from `user` where :__sq_has_values1 = 1 and col in ::__sq1 order by col asc",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "ORDER BY NULL for join",
+ "query": "select user.col1 as a, user.col2, music.col3 from user join music on user.id = music.id where user.id = 1 order by null",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col1 as a, user.col2, music.col3 from user join music on user.id = music.id where user.id = 1 order by null",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1,R:0",
+ "JoinVars": {
+ "user_id": 2
+ },
+ "TableName": "`user`_music",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col1 as a, `user`.col2, `user`.id from `user` where 1 != 1",
+ "Query": "select `user`.col1 as a, `user`.col2, `user`.id from `user` where `user`.id = 1 order by null",
+ "Table": "`user`",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.col3 from music where 1 != 1",
+ "Query": "select music.col3 from music where music.id = :user_id order by null",
+ "Table": "music",
+ "Values": [
+ ":user_id"
+ ],
+ "Vindex": "music_user_map"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col1 as a, user.col2, music.col3 from user join music on user.id = music.id where user.id = 1 order by null",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:1,L:2,R:0",
+ "JoinVars": {
+ "user_id": 0
+ },
+ "TableName": "`user`_music",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id, `user`.col1 as a, `user`.col2 from `user` where 1 != 1",
+ "Query": "select `user`.id, `user`.col1 as a, `user`.col2 from `user` where `user`.id = 1",
+ "Table": "`user`",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.col3 from music where 1 != 1",
+ "Query": "select music.col3 from music where music.id = :user_id",
+ "Table": "music",
+ "Values": [
+ ":user_id"
+ ],
+ "Vindex": "music_user_map"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.music",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "ORDER BY non-key column for join",
+ "query": "select user.col1 as a, user.col2, music.col3 from user join music on user.id = music.id where user.id = 1 order by a",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col1 as a, user.col2, music.col3 from user join music on user.id = music.id where user.id = 1 order by a",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1,R:0",
+ "JoinVars": {
+ "user_id": 2
+ },
+ "TableName": "`user`_music",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col1 as a, `user`.col2, `user`.id from `user` where 1 != 1",
+ "Query": "select `user`.col1 as a, `user`.col2, `user`.id from `user` where `user`.id = 1 order by a asc",
+ "Table": "`user`",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.col3 from music where 1 != 1",
+ "Query": "select music.col3 from music where music.id = :user_id",
+ "Table": "music",
+ "Values": [
+ ":user_id"
+ ],
+ "Vindex": "music_user_map"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col1 as a, user.col2, music.col3 from user join music on user.id = music.id where user.id = 1 order by a",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:1,L:2,R:0",
+ "JoinVars": {
+ "user_id": 0
+ },
+ "TableName": "`user`_music",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id, `user`.col1 as a, `user`.col2, weight_string(`user`.col1) from `user` where 1 != 1",
+ "OrderBy": "(1|3) ASC",
+ "Query": "select `user`.id, `user`.col1 as a, `user`.col2, weight_string(`user`.col1) from `user` where `user`.id = 1 order by a asc",
+ "Table": "`user`",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.col3 from music where 1 != 1",
+ "Query": "select music.col3 from music where music.id = :user_id",
+ "Table": "music",
+ "Values": [
+ ":user_id"
+ ],
+ "Vindex": "music_user_map"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.music",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "ORDER BY non-key column for implicit join",
+ "query": "select user.col1 as a, user.col2, music.col3 from user, music where user.id = music.id and user.id = 1 order by a",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col1 as a, user.col2, music.col3 from user, music where user.id = music.id and user.id = 1 order by a",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1,R:0",
+ "JoinVars": {
+ "user_id": 2
+ },
+ "TableName": "`user`_music",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col1 as a, `user`.col2, `user`.id from `user` where 1 != 1",
+ "Query": "select `user`.col1 as a, `user`.col2, `user`.id from `user` where `user`.id = 1 order by a asc",
+ "Table": "`user`",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.col3 from music where 1 != 1",
+ "Query": "select music.col3 from music where music.id = :user_id",
+ "Table": "music",
+ "Values": [
+ ":user_id"
+ ],
+ "Vindex": "music_user_map"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col1 as a, user.col2, music.col3 from user, music where user.id = music.id and user.id = 1 order by a",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:1,L:2,R:0",
+ "JoinVars": {
+ "user_id": 0
+ },
+ "TableName": "`user`_music",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id, `user`.col1 as a, `user`.col2, weight_string(`user`.col1) from `user` where 1 != 1",
+ "OrderBy": "(1|3) ASC",
+ "Query": "select `user`.id, `user`.col1 as a, `user`.col2, weight_string(`user`.col1) from `user` where `user`.id = 1 order by a asc",
+ "Table": "`user`",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.col3 from music where 1 != 1",
+ "Query": "select music.col3 from music where music.id = :user_id",
+ "Table": "music",
+ "Values": [
+ ":user_id"
+ ],
+ "Vindex": "music_user_map"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.music",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "ORDER BY NULL after pull-out subquery",
+ "query": "select col from user where col in (select col2 from user) order by null",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from user where col in (select col2 from user) order by null",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col2 from `user` where 1 != 1",
+ "Query": "select col2 from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user` where :__sq_has_values1 = 1 and col in ::__sq1 order by null",
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from user where col in (select col2 from user) order by null",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col2 from `user` where 1 != 1",
+ "Query": "select col2 from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user` where :__sq_has_values1 = 1 and col in ::__sq1",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "ORDER BY RAND()",
+ "query": "select col from user order by RAND()",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from user order by RAND()",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user` order by RAND()",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from user order by RAND()",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user` order by RAND()",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "ORDER BY RAND() for join",
+ "query": "select user.col1 as a, user.col2, music.col3 from user join music on user.id = music.id where user.id = 1 order by RAND()",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col1 as a, user.col2, music.col3 from user join music on user.id = music.id where user.id = 1 order by RAND()",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1,R:0",
+ "JoinVars": {
+ "user_id": 2
+ },
+ "TableName": "`user`_music",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col1 as a, `user`.col2, `user`.id from `user` where 1 != 1",
+ "Query": "select `user`.col1 as a, `user`.col2, `user`.id from `user` where `user`.id = 1 order by RAND()",
+ "Table": "`user`",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.col3 from music where 1 != 1",
+ "Query": "select music.col3 from music where music.id = :user_id order by RAND()",
+ "Table": "music",
+ "Values": [
+ ":user_id"
+ ],
+ "Vindex": "music_user_map"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col1 as a, user.col2, music.col3 from user join music on user.id = music.id where user.id = 1 order by RAND()",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:1,L:2,R:0",
+ "JoinVars": {
+ "user_id": 0
+ },
+ "TableName": "`user`_music",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id, `user`.col1 as a, `user`.col2 from `user` where 1 != 1",
+ "Query": "select `user`.id, `user`.col1 as a, `user`.col2 from `user` where `user`.id = 1 order by RAND()",
+ "Table": "`user`",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.col3 from music where 1 != 1",
+ "Query": "select music.col3 from music where music.id = :user_id order by RAND()",
+ "Table": "music",
+ "Values": [
+ ":user_id"
+ ],
+ "Vindex": "music_user_map"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.music",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "ORDER BY RAND() after pull-out subquery",
+ "query": "select col from user where col in (select col2 from user) order by rand()",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from user where col in (select col2 from user) order by rand()",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col2 from `user` where 1 != 1",
+ "Query": "select col2 from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user` where :__sq_has_values1 = 1 and col in ::__sq1 order by rand()",
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from user where col in (select col2 from user) order by rand()",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col2 from `user` where 1 != 1",
+ "Query": "select col2 from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user` where :__sq_has_values1 = 1 and col in ::__sq1 order by rand()",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Order by, '*' expression",
+ "query": "select * from user where id = 5 order by col",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from user where id = 5 order by col",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select * from `user` where id = 5 order by col asc",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from user where id = 5 order by col",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select * from `user` where id = 5 order by col asc",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Order by, qualified '*' expression",
+ "query": "select user.* from user where id = 5 order by user.col",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.* from user where id = 5 order by user.col",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.* from `user` where 1 != 1",
+ "Query": "select `user`.* from `user` where id = 5 order by `user`.col asc",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.* from user where id = 5 order by user.col",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.* from `user` where 1 != 1",
+ "Query": "select `user`.* from `user` where id = 5 order by `user`.col asc",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Order by, '*' expression with qualified reference",
+ "query": "select * from user where id = 5 order by user.col",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from user where id = 5 order by user.col",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select * from `user` where id = 5 order by `user`.col asc",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from user where id = 5 order by user.col",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select * from `user` where id = 5 order by `user`.col asc",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Order by, '*' expression in a subquery",
+ "query": "select u.id, e.id from user u join user_extra e where u.col = e.col and u.col in (select * from user where user.id = u.id order by col)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u.id, e.id from user u join user_extra e where u.col = e.col and u.col in (select * from user where user.id = u.id order by col)",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,R:0",
+ "JoinVars": {
+ "u_col": 1
+ },
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.id, u.col from `user` as u where 1 != 1",
+ "Query": "select u.id, u.col from `user` as u where u.col in (select * from `user` where `user`.id = u.id order by col asc)",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select e.id from user_extra as e where 1 != 1",
+ "Query": "select e.id from user_extra as e where e.col = :u_col",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u.id, e.id from user u join user_extra e where u.col = e.col and u.col in (select * from user where user.id = u.id order by col)",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:1,R:0",
+ "JoinVars": {
+ "u_col": 0
+ },
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.col, u.id from `user` as u where 1 != 1",
+ "Query": "select u.col, u.id from `user` as u where u.col in (select * from `user` where `user`.id = u.id order by col asc)",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select e.id from user_extra as e where 1 != 1",
+ "Query": "select e.id from user_extra as e where e.col = :u_col",
+ "Table": "user_extra"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "Order by, verify outer symtab is searched according to its own context.",
+ "query": "select u.id from user u having u.id in (select col2 from user where user.id = u.id order by u.col)",
+ "v3-plan": "VT03020: symbol u.col not found in subquery",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u.id from user u having u.id in (select col2 from user where user.id = u.id order by u.col)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.id from `user` as u where 1 != 1",
+ "Query": "select u.id from `user` as u where u.id in (select col2 from `user` where `user`.id = u.id order by u.col asc)",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Order by, qualified '*' expression, name mismatched.",
+ "query": "select user.* from user where id = 5 order by e.col",
+ "v3-plan": "VT03019: symbol e.col not found",
+ "gen4-plan": "symbol e.col not found"
+ },
+ {
+ "comment": "Order by, invalid column number",
+ "query": "select col from user order by 18446744073709551616",
+ "v3-plan": "VT13001: [BUG] error parsing column number: 18446744073709551616",
+ "gen4-plan": "error parsing column number: 18446744073709551616"
+ },
+ {
+ "comment": "Order by, out of range column number",
+ "query": "select col from user order by 2",
+ "v3-plan": "VT03014: unknown column '2' in 'order clause'",
+ "gen4-plan": "Unknown column '2' in 'order clause'"
+ },
+ {
+ "comment": "Order by, '*' expression with qualified reference and using collate",
+ "query": "select * from user where id = 5 order by user.col collate utf8_general_ci",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from user where id = 5 order by user.col collate utf8_general_ci",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select * from `user` where id = 5 order by `user`.col collate utf8_general_ci asc",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from user where id = 5 order by user.col collate utf8_general_ci",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select * from `user` where id = 5 order by `user`.col collate utf8_general_ci asc",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Order by with math functions",
+ "query": "select * from user where id = 5 order by -col1",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from user where id = 5 order by -col1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select * from `user` where id = 5 order by -col1 asc",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from user where id = 5 order by -col1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select * from `user` where id = 5 order by -col1 asc",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Order by with string operations",
+ "query": "select * from user where id = 5 order by concat(col,col1) collate utf8_general_ci desc",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from user where id = 5 order by concat(col,col1) collate utf8_general_ci desc",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select * from `user` where id = 5 order by concat(col, col1) collate utf8_general_ci desc",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from user where id = 5 order by concat(col,col1) collate utf8_general_ci desc",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select * from `user` where id = 5 order by concat(col, col1) collate utf8_general_ci desc",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Order by with math operations",
+ "query": "select * from user where id = 5 order by id+col collate utf8_general_ci desc",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from user where id = 5 order by id+col collate utf8_general_ci desc",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select * from `user` where id = 5 order by id + col collate utf8_general_ci desc",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from user where id = 5 order by id+col collate utf8_general_ci desc",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select * from `user` where id = 5 order by id + col collate utf8_general_ci desc",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Order by derived table column",
+ "query": "select * from user u join (select user_id from user_extra where user_id = 5) eu on u.id = eu.user_id where u.id = 5 order by eu.user_id",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from user u join (select user_id from user_extra where user_id = 5) eu on u.id = eu.user_id where u.id = 5 order by eu.user_id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` as u join (select user_id from user_extra where 1 != 1) as eu on u.id = eu.user_id where 1 != 1",
+ "Query": "select * from `user` as u join (select user_id from user_extra where user_id = 5) as eu on u.id = eu.user_id where u.id = 5 order by eu.user_id asc",
+ "Table": "`user`, user_extra",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from user u join (select user_id from user_extra where user_id = 5) eu on u.id = eu.user_id where u.id = 5 order by eu.user_id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` as u, (select user_id from user_extra where 1 != 1) as eu where 1 != 1",
+ "Query": "select * from `user` as u, (select user_id from user_extra where user_id = 5) as eu where u.id = 5 and u.id = eu.user_id order by eu.user_id asc",
+ "Table": "`user`, user_extra",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "routing rules: order by gets pushed for routes",
+ "query": "select col from route1 where id = 1 order by col",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from route1 where id = 1 order by col",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` as route1 where 1 != 1",
+ "Query": "select col from `user` as route1 where id = 1 order by col asc",
+ "Table": "`user`",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from route1 where id = 1 order by col",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` as route1 where 1 != 1",
+ "Query": "select col from `user` as route1 where id = 1 order by col asc",
+ "Table": "`user`",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "LIMIT",
+ "query": "select col1 from user where id = 1 limit 1",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col1 from user where id = 1 limit 1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col1 from `user` where 1 != 1",
+ "Query": "select col1 from `user` where id = 1 limit 1",
+ "Table": "`user`",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col1 from user where id = 1 limit 1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col1 from `user` where 1 != 1",
+ "Query": "select col1 from `user` where id = 1 limit 1",
+ "Table": "`user`",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "limit for joins. Can't push down the limit because result\n# counts get multiplied by join operations.",
+ "query": "select user.col from user join user_extra limit 1",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user join user_extra limit 1",
+ "Instructions": {
+ "OperatorType": "Limit",
+ "Count": "INT64(1)",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` where 1 != 1",
+ "Query": "select `user`.col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user join user_extra limit 1",
+ "Instructions": {
+ "OperatorType": "Limit",
+ "Count": "INT64(1)",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` where 1 != 1",
+ "Query": "select `user`.col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "limit for scatter",
+ "query": "select col from user limit 1",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from user limit 1",
+ "Instructions": {
+ "OperatorType": "Limit",
+ "Count": "INT64(1)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user` limit :__upper_limit",
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from user limit 1",
+ "Instructions": {
+ "OperatorType": "Limit",
+ "Count": "INT64(1)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user` limit :__upper_limit",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "limit for scatter with bind var",
+ "query": "select col from user limit :a",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from user limit :a",
+ "Instructions": {
+ "OperatorType": "Limit",
+ "Count": ":a",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user` limit :__upper_limit",
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from user limit :a",
+ "Instructions": {
+ "OperatorType": "Limit",
+ "Count": ":a",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user` limit :__upper_limit",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "cross-shard expression in parenthesis with limit",
+ "query": "select * from user where (id1 = 4 AND name1 ='abc') limit 5",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from user where (id1 = 4 AND name1 ='abc') limit 5",
+ "Instructions": {
+ "OperatorType": "Limit",
+ "Count": "INT64(5)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select * from `user` where id1 = 4 and name1 = 'abc' limit :__upper_limit",
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from user where (id1 = 4 AND name1 ='abc') limit 5",
+ "Instructions": {
+ "OperatorType": "Limit",
+ "Count": "INT64(5)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select * from `user` where id1 = 4 and name1 = 'abc' limit :__upper_limit",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "scatter limit after pullout subquery",
+ "query": "select col from user where col in (select col1 from user) limit 1",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from user where col in (select col1 from user) limit 1",
+ "Instructions": {
+ "OperatorType": "Limit",
+ "Count": "INT64(1)",
+ "Inputs": [
+ {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col1 from `user` where 1 != 1",
+ "Query": "select col1 from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user` where :__sq_has_values1 = 1 and col in ::__sq1 limit :__upper_limit",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from user where col in (select col1 from user) limit 1",
+ "Instructions": {
+ "OperatorType": "Limit",
+ "Count": "INT64(1)",
+ "Inputs": [
+ {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col1 from `user` where 1 != 1",
+ "Query": "select col1 from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user` where :__sq_has_values1 = 1 and col in ::__sq1 limit :__upper_limit",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "limit on reference table",
+ "query": "select col from ref limit 1",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from ref limit 1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from ref where 1 != 1",
+ "Query": "select col from ref limit 1",
+ "Table": "ref"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from ref limit 1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from ref where 1 != 1",
+ "Query": "select col from ref limit 1",
+ "Table": "ref"
+ },
+ "TablesUsed": [
+ "user.ref"
+ ]
+ }
+ },
+ {
+ "comment": "arithmetic limit",
+ "query": "select id from user limit 1+1",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user limit 1+1",
+ "Instructions": {
+ "OperatorType": "Limit",
+ "Count": "INT64(2)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` limit :__upper_limit",
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user limit 1+1",
+ "Instructions": {
+ "OperatorType": "Limit",
+ "Count": "INT64(2)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` limit :__upper_limit",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "order by column alias",
+ "query": "select id as foo from music order by foo",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id as foo from music order by foo",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id as foo, weight_string(id) from music where 1 != 1",
+ "OrderBy": "(0|1) ASC",
+ "Query": "select id as foo, weight_string(id) from music order by foo asc",
+ "ResultColumns": 1,
+ "Table": "music"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id as foo from music order by foo",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id as foo, weight_string(id) from music where 1 != 1",
+ "OrderBy": "(0|1) ASC",
+ "Query": "select id as foo, weight_string(id) from music order by foo asc",
+ "ResultColumns": 1,
+ "Table": "music"
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "column alias for a table column in order by",
+ "query": "select id as foo, id2 as id from music order by id",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id as foo, id2 as id from music order by id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id as foo, id2 as id, weight_string(id2) from music where 1 != 1",
+ "OrderBy": "(1|2) ASC",
+ "Query": "select id as foo, id2 as id, weight_string(id2) from music order by id asc",
+ "ResultColumns": 2,
+ "Table": "music"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id as foo, id2 as id from music order by id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id as foo, id2 as id, weight_string(id2) from music where 1 != 1",
+ "OrderBy": "(1|2) ASC",
+ "Query": "select id as foo, id2 as id, weight_string(id2) from music order by id asc",
+ "ResultColumns": 2,
+ "Table": "music"
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "ordering on the left side of the join",
+ "query": "select name from user, music order by name",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select name from user, music order by name",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "`user`_music",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `name`, weight_string(`name`) from `user` where 1 != 1",
+ "OrderBy": "(0|1) ASC",
+ "Query": "select `name`, weight_string(`name`) from `user` order by `name` asc",
+ "ResultColumns": 1,
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from music where 1 != 1",
+ "Query": "select 1 from music",
+ "Table": "music"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select name from user, music order by name",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "`user`_music",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `name`, weight_string(`name`) from `user` where 1 != 1",
+ "OrderBy": "(0|1) ASC",
+ "Query": "select `name`, weight_string(`name`) from `user` order by `name` asc",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from music where 1 != 1",
+ "Query": "select 1 from music",
+ "Table": "music"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.music",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "aggregation and non-aggregations column without group by",
+ "query": "select count(id), num from user",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select count(id), num from user",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count(0) AS count",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(id), num from `user` where 1 != 1",
+ "Query": "select count(id), num from `user`",
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select count(id), num from user",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count(0) AS count(id), random(1) AS num",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(id), num from `user` where 1 != 1",
+ "Query": "select count(id), num from `user`",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "aggregation and non-aggregations column with order by",
+ "query": "select count(id), num from user order by 2",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select count(id), num from user order by 2",
+ "Instructions": {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "(1|2) ASC",
+ "ResultColumns": 2,
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count(0) AS count",
+ "ResultColumns": 3,
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(id), num, weight_string(num) from `user` where 1 != 1",
+ "Query": "select count(id), num, weight_string(num) from `user`",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select count(id), num from user order by 2",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count(0) AS count(id), random(1) AS num",
+ "ResultColumns": 2,
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(id), num, weight_string(num) from `user` where 1 != 1",
+ "OrderBy": "(1|2) ASC",
+ "Query": "select count(id), num, weight_string(num) from `user` order by num asc",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "aggregation and non-aggregations column with group by",
+ "query": "select count(id), num from user group by 2",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select count(id), num from user group by 2",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count(0) AS count",
+ "GroupBy": "1",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(id), num, weight_string(num) from `user` where 1 != 1 group by 2, weight_string(num)",
+ "OrderBy": "(1|2) ASC",
+ "Query": "select count(id), num, weight_string(num) from `user` group by 2, weight_string(num) order by num asc",
+ "ResultColumns": 2,
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select count(id), num from user group by 2",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count(0) AS count(id)",
+ "GroupBy": "(1|2)",
+ "ResultColumns": 2,
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(id), num, weight_string(num) from `user` where 1 != 1 group by num, weight_string(num)",
+ "OrderBy": "(1|2) ASC",
+ "Query": "select count(id), num, weight_string(num) from `user` group by num, weight_string(num) order by num asc",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "aggregation and non-aggregations column with group by and order by",
+ "query": "select count(id), num from user group by 2 order by 1",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select count(id), num from user group by 2 order by 1",
+ "Instructions": {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "0 ASC",
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count(0) AS count",
+ "GroupBy": "1",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(id), num, weight_string(num) from `user` where 1 != 1 group by 2, weight_string(num)",
+ "OrderBy": "(1|2) ASC",
+ "Query": "select count(id), num, weight_string(num) from `user` group by 2, weight_string(num) order by num asc",
+ "ResultColumns": 2,
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select count(id), num from user group by 2 order by 1",
+ "Instructions": {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "0 ASC",
+ "ResultColumns": 2,
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count(0) AS count(id)",
+ "GroupBy": "(1|2)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(id), num, weight_string(num) from `user` where 1 != 1 group by num, weight_string(num)",
+ "OrderBy": "(1|2) ASC",
+ "Query": "select count(id), num, weight_string(num) from `user` group by num, weight_string(num) order by num asc",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "join order by with ambiguous column reference ; valid in MySQL",
+ "query": "select name, name from user, music order by name",
+ "v3-plan": "VT03021: ambiguous symbol reference: `name`",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select name, name from user, music order by name",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:0",
+ "TableName": "`user`_music",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `name`, weight_string(`name`) from `user` where 1 != 1",
+ "OrderBy": "(0|1) ASC",
+ "Query": "select `name`, weight_string(`name`) from `user` order by `name` asc",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from music where 1 != 1",
+ "Query": "select 1 from music",
+ "Table": "music"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.music",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "order by with ambiguous column reference ; valid in MySQL",
+ "query": "select id, id from user order by id",
+ "v3-plan": "VT03021: ambiguous symbol reference: id",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id, id from user order by id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, id, weight_string(id) from `user` where 1 != 1",
+ "OrderBy": "(0|2) ASC",
+ "Query": "select id, id, weight_string(id) from `user` order by id asc",
+ "ResultColumns": 2,
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Scatter order by and aggregation: order by column must reference column from select list",
+ "query": "select col, count(*) from user group by col order by c1",
+ "v3-plan": "VT12001: unsupported: memory sort: ORDER BY must reference a column in the SELECT list: c1 asc",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col, count(*) from user group by col order by c1",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count_star(1) AS count(*), random(2) AS c1",
+ "GroupBy": "0",
+ "ResultColumns": 2,
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col, count(*), c1, weight_string(c1) from `user` where 1 != 1 group by col",
+ "OrderBy": "(2|3) ASC, 0 ASC",
+ "Query": "select col, count(*), c1, weight_string(c1) from `user` group by col order by c1 asc, col asc",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Distinct with cross shard query",
+ "query": "select distinct user.a from user join user_extra",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select distinct user.a from user join user_extra",
+ "Instructions": {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.a from `user` where 1 != 1",
+ "Query": "select `user`.a from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select distinct user.a from user join user_extra",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "GroupBy": "(0|1)",
+ "ResultColumns": 1,
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.a, weight_string(`user`.a) from `user` where 1 != 1",
+ "OrderBy": "(0|1) ASC",
+ "Query": "select `user`.a, weight_string(`user`.a) from `user` order by `user`.a asc",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "Distinct with column alias",
+ "query": "select distinct a as c, a from user",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select distinct a as c, a from user",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "GroupBy": "0, 1",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a as c, a, weight_string(a) from `user` where 1 != 1",
+ "OrderBy": "(0|2) ASC, (0|2) ASC",
+ "Query": "select distinct a as c, a, weight_string(a) from `user` order by c asc, a asc",
+ "ResultColumns": 2,
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select distinct a as c, a from user",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "GroupBy": "(0|2), (1|2)",
+ "ResultColumns": 2,
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a as c, a, weight_string(a) from `user` where 1 != 1",
+ "OrderBy": "(0|2) ASC, (0|2) ASC",
+ "Query": "select distinct a as c, a, weight_string(a) from `user` order by c asc, a asc",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Distinct with same column",
+ "query": "select distinct a, a from user",
+ "v3-plan": "generating ORDER BY clause: VT03021: ambiguous symbol reference: a",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select distinct a, a from user",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "GroupBy": "(0|2), (1|2)",
+ "ResultColumns": 2,
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a, a, weight_string(a) from `user` where 1 != 1",
+ "OrderBy": "(0|2) ASC, (0|2) ASC",
+ "Query": "select distinct a, a, weight_string(a) from `user` order by a asc, a asc",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Order by has subqueries",
+ "query": "select id from unsharded order by (select id from unsharded)",
+ "v3-plan": "VT12001: unsupported: subqueries disallowed in sqlparser.OrderBy",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from unsharded order by (select id from unsharded)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select id from unsharded where 1 != 1",
+ "Query": "select id from unsharded order by (select id from unsharded) asc",
+ "Table": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "Equal filter with hexadecimal value",
+ "query": "select count(*) a from user having a = 0x01",
+ "v3-plan": "VT12001: unsupported: filtering on results of aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select count(*) a from user having a = 0x01",
+ "Instructions": {
+ "OperatorType": "Filter",
+ "Predicate": ":0 = 0x01",
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count_star(0) AS a",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*) as a from `user` where 1 != 1",
+ "Query": "select count(*) as a from `user`",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ }
+]
diff --git a/go/vt/vtgate/planbuilder/testdata/postprocess_cases.txt b/go/vt/vtgate/planbuilder/testdata/postprocess_cases.txt
deleted file mode 100644
index 9e96746f5b6..00000000000
--- a/go/vt/vtgate/planbuilder/testdata/postprocess_cases.txt
+++ /dev/null
@@ -1,3055 +0,0 @@
-# HAVING implicitly references table col
-"select user.col1 from user having col2 = 2"
-{
- "QueryType": "SELECT",
- "Original": "select user.col1 from user having col2 = 2",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col1 from `user` where 1 != 1",
- "Query": "select `user`.col1 from `user` having col2 = 2",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.col1 from user having col2 = 2",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col1 from `user` where 1 != 1",
- "Query": "select `user`.col1 from `user` where col2 = 2",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# ambiguous symbol reference
-"select user.col1, user_extra.col1 from user join user_extra having col1 = 2"
-"ambiguous symbol reference: col1"
-Gen4 error: Column 'col1' in field list is ambiguous
-
-# TODO: this should be 'Column 'col1' in having clause is ambiguous'
-# non-ambiguous symbol reference
-"select user.col1, user_extra.col1 from user join user_extra having user_extra.col1 = 2"
-{
- "QueryType": "SELECT",
- "Original": "select user.col1, user_extra.col1 from user join user_extra having user_extra.col1 = 2",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,R:0",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col1 from `user` where 1 != 1",
- "Query": "select `user`.col1 from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.col1 from user_extra where 1 != 1",
- "Query": "select user_extra.col1 from user_extra having user_extra.col1 = 2",
- "Table": "user_extra"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.col1, user_extra.col1 from user join user_extra having user_extra.col1 = 2",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,R:0",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col1 from `user` where 1 != 1",
- "Query": "select `user`.col1 from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.col1 from user_extra where 1 != 1",
- "Query": "select user_extra.col1 from user_extra where user_extra.col1 = 2",
- "Table": "user_extra"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# HAVING multi-route
-"select user.col1 as a, user.col2, user_extra.col3 from user join user_extra having 1 = 1 and a = 1 and a = user.col2 and user_extra.col3 = 1"
-{
- "QueryType": "SELECT",
- "Original": "select user.col1 as a, user.col2, user_extra.col3 from user join user_extra having 1 = 1 and a = 1 and a = user.col2 and user_extra.col3 = 1",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1,R:0",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col1 as a, `user`.col2 from `user` where 1 != 1",
- "Query": "select `user`.col1 as a, `user`.col2 from `user` having 1 = 1 and a = 1 and a = `user`.col2",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.col3 from user_extra where 1 != 1",
- "Query": "select user_extra.col3 from user_extra having user_extra.col3 = 1",
- "Table": "user_extra"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.col1 as a, user.col2, user_extra.col3 from user join user_extra having 1 = 1 and a = 1 and a = user.col2 and user_extra.col3 = 1",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1,R:0",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col1 as a, `user`.col2 from `user` where 1 != 1",
- "Query": "select `user`.col1 as a, `user`.col2 from `user` where `user`.col1 = 1 and `user`.col1 = `user`.col2 and 1 = 1",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.col3 from user_extra where 1 != 1",
- "Query": "select user_extra.col3 from user_extra where user_extra.col3 = 1 and 1 = 1",
- "Table": "user_extra"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# HAVING uses subquery
-"select id from user having id in (select col from user)"
-{
- "QueryType": "SELECT",
- "Original": "select id from user having id in (select col from user)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` having :__sq_has_values1 = 1 and id in ::__vals",
- "Table": "`user`",
- "Values": [
- ":__sq1"
- ],
- "Vindex": "user_index"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user having id in (select col from user)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where :__sq_has_values1 = 1 and id in ::__vals",
- "Table": "`user`",
- "Values": [
- ":__sq1"
- ],
- "Vindex": "user_index"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# ORDER BY, reference col from local table.
-"select col from user where id = 5 order by aa"
-{
- "QueryType": "SELECT",
- "Original": "select col from user where id = 5 order by aa",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user` where id = 5 order by aa asc",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select col from user where id = 5 order by aa",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user` where id = 5 order by aa asc",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# ORDER BY uses column numbers
-"select col from user where id = 1 order by 1"
-{
- "QueryType": "SELECT",
- "Original": "select col from user where id = 1 order by 1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user` where id = 1 order by 1 asc",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select col from user where id = 1 order by 1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user` where id = 1 order by col asc",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# ORDER BY on scatter
-"select col from user order by col"
-{
- "QueryType": "SELECT",
- "Original": "select col from user order by col",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "OrderBy": "0 ASC",
- "Query": "select col from `user` order by col asc",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select col from user order by col",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "OrderBy": "0 ASC",
- "Query": "select col from `user` order by col asc",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# ORDER BY on select t.*
-"select t.*, t.col from user t order by t.col"
-"unsupported: in scatter query, can't order by a column that comes after `*` expressions in the SELECT list"
-Gen4 error: unsupported: '*' expression in cross-shard query
-
-# ORDER BY on select *
-"select *, col from user order by col"
-"unsupported: in scatter query, can't order by a column that comes after `*` expressions in the SELECT list"
-Gen4 error: unsupported: '*' expression in cross-shard query
-
-# ORDER BY on select multi t.*
-"select t.*, t.name, t.*, t.col from user t order by t.col"
-"unsupported: in scatter query, can't order by a column that comes after `*` expressions in the SELECT list"
-Gen4 error: unsupported: '*' expression in cross-shard query
-
-# ORDER BY on select multi *
-"select *, name, *, col from user order by col"
-"unsupported: in scatter query, can't order by a column that comes after `*` expressions in the SELECT list"
-Gen4 error: unsupported: '*' expression in cross-shard query
-
-# ORDER BY works for select * from authoritative table
-"select * from authoritative order by user_id"
-{
- "QueryType": "SELECT",
- "Original": "select * from authoritative order by user_id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_id, col1, col2, weight_string(user_id) from authoritative where 1 != 1",
- "OrderBy": "(0|3) ASC",
- "Query": "select user_id, col1, col2, weight_string(user_id) from authoritative order by user_id asc",
- "ResultColumns": 3,
- "Table": "authoritative"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from authoritative order by user_id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_id, col1, col2, weight_string(user_id) from authoritative where 1 != 1",
- "OrderBy": "(0|3) ASC",
- "Query": "select user_id, col1, col2, weight_string(user_id) from authoritative order by user_id asc",
- "ResultColumns": 3,
- "Table": "authoritative"
- },
- "TablesUsed": [
- "user.authoritative"
- ]
-}
-
-# ORDER BY works for select * from authoritative table
-"select * from authoritative order by col1"
-{
- "QueryType": "SELECT",
- "Original": "select * from authoritative order by col1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_id, col1, col2, weight_string(col1) from authoritative where 1 != 1",
- "OrderBy": "(1|3) ASC",
- "Query": "select user_id, col1, col2, weight_string(col1) from authoritative order by col1 asc",
- "ResultColumns": 3,
- "Table": "authoritative"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from authoritative order by col1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_id, col1, col2 from authoritative where 1 != 1",
- "OrderBy": "1 ASC COLLATE latin1_swedish_ci",
- "Query": "select user_id, col1, col2 from authoritative order by col1 asc",
- "Table": "authoritative"
- },
- "TablesUsed": [
- "user.authoritative"
- ]
-}
-
-# ORDER BY on scatter with text column
-"select a, textcol1, b from user order by a, textcol1, b"
-{
- "QueryType": "SELECT",
- "Original": "select a, textcol1, b from user order by a, textcol1, b",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a, textcol1, b, weight_string(a), weight_string(textcol1), weight_string(b) from `user` where 1 != 1",
- "OrderBy": "(0|3) ASC, (1|4) ASC, (2|5) ASC",
- "Query": "select a, textcol1, b, weight_string(a), weight_string(textcol1), weight_string(b) from `user` order by a asc, textcol1 asc, b asc",
- "ResultColumns": 3,
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select a, textcol1, b from user order by a, textcol1, b",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a, textcol1, b, weight_string(a), weight_string(b) from `user` where 1 != 1",
- "OrderBy": "(0|3) ASC, 1 ASC COLLATE latin1_swedish_ci, (2|4) ASC",
- "Query": "select a, textcol1, b, weight_string(a), weight_string(b) from `user` order by a asc, textcol1 asc, b asc",
- "ResultColumns": 3,
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# ORDER BY on scatter with text column, qualified name TODO: can plan better
-"select a, user.textcol1, b from user order by a, textcol1, b"
-{
- "QueryType": "SELECT",
- "Original": "select a, user.textcol1, b from user order by a, textcol1, b",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a, `user`.textcol1, b, weight_string(a), weight_string(`user`.textcol1), weight_string(b) from `user` where 1 != 1",
- "OrderBy": "(0|3) ASC, (1|4) ASC, (2|5) ASC",
- "Query": "select a, `user`.textcol1, b, weight_string(a), weight_string(`user`.textcol1), weight_string(b) from `user` order by a asc, textcol1 asc, b asc",
- "ResultColumns": 3,
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select a, user.textcol1, b from user order by a, textcol1, b",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a, `user`.textcol1, b, weight_string(a), weight_string(b) from `user` where 1 != 1",
- "OrderBy": "(0|3) ASC, 1 ASC COLLATE latin1_swedish_ci, (2|4) ASC",
- "Query": "select a, `user`.textcol1, b, weight_string(a), weight_string(b) from `user` order by a asc, textcol1 asc, b asc",
- "ResultColumns": 3,
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# ORDER BY on scatter with multiple text columns
-"select a, textcol1, b, textcol2 from user order by a, textcol1, b, textcol2"
-{
- "QueryType": "SELECT",
- "Original": "select a, textcol1, b, textcol2 from user order by a, textcol1, b, textcol2",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a, textcol1, b, textcol2, weight_string(a), weight_string(textcol1), weight_string(b), weight_string(textcol2) from `user` where 1 != 1",
- "OrderBy": "(0|4) ASC, (1|5) ASC, (2|6) ASC, (3|7) ASC",
- "Query": "select a, textcol1, b, textcol2, weight_string(a), weight_string(textcol1), weight_string(b), weight_string(textcol2) from `user` order by a asc, textcol1 asc, b asc, textcol2 asc",
- "ResultColumns": 4,
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select a, textcol1, b, textcol2 from user order by a, textcol1, b, textcol2",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a, textcol1, b, textcol2, weight_string(a), weight_string(b) from `user` where 1 != 1",
- "OrderBy": "(0|4) ASC, 1 ASC COLLATE latin1_swedish_ci, (2|5) ASC, 3 ASC COLLATE latin1_swedish_ci",
- "Query": "select a, textcol1, b, textcol2, weight_string(a), weight_string(b) from `user` order by a asc, textcol1 asc, b asc, textcol2 asc",
- "ResultColumns": 4,
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# ORDER BY invalid col number on scatter
-"select col from user order by 2"
-"Unknown column '2' in 'order clause'"
-Gen4 plan same as above
-
-# ORDER BY column offset
-"select id as foo from music order by 1"
-{
- "QueryType": "SELECT",
- "Original": "select id as foo from music order by 1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id as foo, weight_string(id) from music where 1 != 1",
- "OrderBy": "(0|1) ASC",
- "Query": "select id as foo, weight_string(id) from music order by 1 asc",
- "ResultColumns": 1,
- "Table": "music"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id as foo from music order by 1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id as foo, weight_string(id) from music where 1 != 1",
- "OrderBy": "(0|1) ASC",
- "Query": "select id as foo, weight_string(id) from music order by foo asc",
- "ResultColumns": 1,
- "Table": "music"
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# ORDER BY NULL
-"select col from user order by null"
-{
- "QueryType": "SELECT",
- "Original": "select col from user order by null",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user` order by null",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select col from user order by null",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user` order by null",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# ORDER BY after pull-out subquery
-"select col from user where col in (select col2 from user) order by col"
-{
- "QueryType": "SELECT",
- "Original": "select col from user where col in (select col2 from user) order by col",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col2 from `user` where 1 != 1",
- "Query": "select col2 from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "OrderBy": "0 ASC",
- "Query": "select col from `user` where :__sq_has_values1 = 1 and col in ::__sq1 order by col asc",
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select col from user where col in (select col2 from user) order by col",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col2 from `user` where 1 != 1",
- "Query": "select col2 from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "OrderBy": "0 ASC",
- "Query": "select col from `user` where :__sq_has_values1 = 1 and col in ::__sq1 order by col asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# ORDER BY NULL for join
-"select user.col1 as a, user.col2, music.col3 from user join music on user.id = music.id where user.id = 1 order by null"
-{
- "QueryType": "SELECT",
- "Original": "select user.col1 as a, user.col2, music.col3 from user join music on user.id = music.id where user.id = 1 order by null",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1,R:0",
- "JoinVars": {
- "user_id": 2
- },
- "TableName": "`user`_music",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col1 as a, `user`.col2, `user`.id from `user` where 1 != 1",
- "Query": "select `user`.col1 as a, `user`.col2, `user`.id from `user` where `user`.id = 1 order by null",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.col3 from music where 1 != 1",
- "Query": "select music.col3 from music where music.id = :user_id order by null",
- "Table": "music",
- "Values": [
- ":user_id"
- ],
- "Vindex": "music_user_map"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.col1 as a, user.col2, music.col3 from user join music on user.id = music.id where user.id = 1 order by null",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:1,L:2,R:0",
- "JoinVars": {
- "user_id": 0
- },
- "TableName": "`user`_music",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id, `user`.col1 as a, `user`.col2 from `user` where 1 != 1",
- "Query": "select `user`.id, `user`.col1 as a, `user`.col2 from `user` where `user`.id = 1",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.col3 from music where 1 != 1",
- "Query": "select music.col3 from music where music.id = :user_id",
- "Table": "music",
- "Values": [
- ":user_id"
- ],
- "Vindex": "music_user_map"
- }
- ]
- },
- "TablesUsed": [
- "user.music",
- "user.user"
- ]
-}
-
-# ORDER BY non-key column for join
-"select user.col1 as a, user.col2, music.col3 from user join music on user.id = music.id where user.id = 1 order by a"
-{
- "QueryType": "SELECT",
- "Original": "select user.col1 as a, user.col2, music.col3 from user join music on user.id = music.id where user.id = 1 order by a",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1,R:0",
- "JoinVars": {
- "user_id": 2
- },
- "TableName": "`user`_music",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col1 as a, `user`.col2, `user`.id from `user` where 1 != 1",
- "Query": "select `user`.col1 as a, `user`.col2, `user`.id from `user` where `user`.id = 1 order by a asc",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.col3 from music where 1 != 1",
- "Query": "select music.col3 from music where music.id = :user_id",
- "Table": "music",
- "Values": [
- ":user_id"
- ],
- "Vindex": "music_user_map"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.col1 as a, user.col2, music.col3 from user join music on user.id = music.id where user.id = 1 order by a",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:1,L:2,R:0",
- "JoinVars": {
- "user_id": 0
- },
- "TableName": "`user`_music",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id, `user`.col1 as a, `user`.col2, weight_string(`user`.col1) from `user` where 1 != 1",
- "OrderBy": "(1|3) ASC",
- "Query": "select `user`.id, `user`.col1 as a, `user`.col2, weight_string(`user`.col1) from `user` where `user`.id = 1 order by a asc",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.col3 from music where 1 != 1",
- "Query": "select music.col3 from music where music.id = :user_id",
- "Table": "music",
- "Values": [
- ":user_id"
- ],
- "Vindex": "music_user_map"
- }
- ]
- },
- "TablesUsed": [
- "user.music",
- "user.user"
- ]
-}
-
-# ORDER BY non-key column for implicit join
-"select user.col1 as a, user.col2, music.col3 from user, music where user.id = music.id and user.id = 1 order by a"
-{
- "QueryType": "SELECT",
- "Original": "select user.col1 as a, user.col2, music.col3 from user, music where user.id = music.id and user.id = 1 order by a",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1,R:0",
- "JoinVars": {
- "user_id": 2
- },
- "TableName": "`user`_music",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col1 as a, `user`.col2, `user`.id from `user` where 1 != 1",
- "Query": "select `user`.col1 as a, `user`.col2, `user`.id from `user` where `user`.id = 1 order by a asc",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.col3 from music where 1 != 1",
- "Query": "select music.col3 from music where music.id = :user_id",
- "Table": "music",
- "Values": [
- ":user_id"
- ],
- "Vindex": "music_user_map"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.col1 as a, user.col2, music.col3 from user, music where user.id = music.id and user.id = 1 order by a",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:1,L:2,R:0",
- "JoinVars": {
- "user_id": 0
- },
- "TableName": "`user`_music",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id, `user`.col1 as a, `user`.col2, weight_string(`user`.col1) from `user` where 1 != 1",
- "OrderBy": "(1|3) ASC",
- "Query": "select `user`.id, `user`.col1 as a, `user`.col2, weight_string(`user`.col1) from `user` where `user`.id = 1 order by a asc",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.col3 from music where 1 != 1",
- "Query": "select music.col3 from music where music.id = :user_id",
- "Table": "music",
- "Values": [
- ":user_id"
- ],
- "Vindex": "music_user_map"
- }
- ]
- },
- "TablesUsed": [
- "user.music",
- "user.user"
- ]
-}
-
-# ORDER BY NULL after pull-out subquery
-"select col from user where col in (select col2 from user) order by null"
-{
- "QueryType": "SELECT",
- "Original": "select col from user where col in (select col2 from user) order by null",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col2 from `user` where 1 != 1",
- "Query": "select col2 from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user` where :__sq_has_values1 = 1 and col in ::__sq1 order by null",
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select col from user where col in (select col2 from user) order by null",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col2 from `user` where 1 != 1",
- "Query": "select col2 from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user` where :__sq_has_values1 = 1 and col in ::__sq1",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# ORDER BY RAND()
-"select col from user order by RAND()"
-{
- "QueryType": "SELECT",
- "Original": "select col from user order by RAND()",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user` order by RAND()",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select col from user order by RAND()",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user` order by RAND()",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# ORDER BY RAND() for join
-"select user.col1 as a, user.col2, music.col3 from user join music on user.id = music.id where user.id = 1 order by RAND()"
-{
- "QueryType": "SELECT",
- "Original": "select user.col1 as a, user.col2, music.col3 from user join music on user.id = music.id where user.id = 1 order by RAND()",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1,R:0",
- "JoinVars": {
- "user_id": 2
- },
- "TableName": "`user`_music",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col1 as a, `user`.col2, `user`.id from `user` where 1 != 1",
- "Query": "select `user`.col1 as a, `user`.col2, `user`.id from `user` where `user`.id = 1 order by RAND()",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.col3 from music where 1 != 1",
- "Query": "select music.col3 from music where music.id = :user_id order by RAND()",
- "Table": "music",
- "Values": [
- ":user_id"
- ],
- "Vindex": "music_user_map"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.col1 as a, user.col2, music.col3 from user join music on user.id = music.id where user.id = 1 order by RAND()",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:1,L:2,R:0",
- "JoinVars": {
- "user_id": 0
- },
- "TableName": "`user`_music",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id, `user`.col1 as a, `user`.col2 from `user` where 1 != 1",
- "Query": "select `user`.id, `user`.col1 as a, `user`.col2 from `user` where `user`.id = 1 order by RAND()",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.col3 from music where 1 != 1",
- "Query": "select music.col3 from music where music.id = :user_id order by RAND()",
- "Table": "music",
- "Values": [
- ":user_id"
- ],
- "Vindex": "music_user_map"
- }
- ]
- },
- "TablesUsed": [
- "user.music",
- "user.user"
- ]
-}
-
-# ORDER BY RAND() after pull-out subquery
-"select col from user where col in (select col2 from user) order by rand()"
-{
- "QueryType": "SELECT",
- "Original": "select col from user where col in (select col2 from user) order by rand()",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col2 from `user` where 1 != 1",
- "Query": "select col2 from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user` where :__sq_has_values1 = 1 and col in ::__sq1 order by rand()",
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select col from user where col in (select col2 from user) order by rand()",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col2 from `user` where 1 != 1",
- "Query": "select col2 from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user` where :__sq_has_values1 = 1 and col in ::__sq1 order by rand()",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Order by, '*' expression
-"select * from user where id = 5 order by col"
-{
- "QueryType": "SELECT",
- "Original": "select * from user where id = 5 order by col",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select * from `user` where id = 5 order by col asc",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from user where id = 5 order by col",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select * from `user` where id = 5 order by col asc",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Order by, qualified '*' expression
-"select user.* from user where id = 5 order by user.col"
-{
- "QueryType": "SELECT",
- "Original": "select user.* from user where id = 5 order by user.col",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.* from `user` where 1 != 1",
- "Query": "select `user`.* from `user` where id = 5 order by `user`.col asc",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.* from user where id = 5 order by user.col",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.* from `user` where 1 != 1",
- "Query": "select `user`.* from `user` where id = 5 order by `user`.col asc",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Order by, '*' expression with qualified reference
-"select * from user where id = 5 order by user.col"
-{
- "QueryType": "SELECT",
- "Original": "select * from user where id = 5 order by user.col",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select * from `user` where id = 5 order by `user`.col asc",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from user where id = 5 order by user.col",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select * from `user` where id = 5 order by `user`.col asc",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Order by, '*' expression in a subquery
-"select u.id, e.id from user u join user_extra e where u.col = e.col and u.col in (select * from user where user.id = u.id order by col)"
-{
- "QueryType": "SELECT",
- "Original": "select u.id, e.id from user u join user_extra e where u.col = e.col and u.col in (select * from user where user.id = u.id order by col)",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,R:0",
- "JoinVars": {
- "u_col": 1
- },
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u.id, u.col from `user` as u where 1 != 1",
- "Query": "select u.id, u.col from `user` as u where u.col in (select * from `user` where `user`.id = u.id order by col asc)",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select e.id from user_extra as e where 1 != 1",
- "Query": "select e.id from user_extra as e where e.col = :u_col",
- "Table": "user_extra"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select u.id, e.id from user u join user_extra e where u.col = e.col and u.col in (select * from user where user.id = u.id order by col)",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:1,R:0",
- "JoinVars": {
- "u_col": 0
- },
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u.col, u.id from `user` as u where 1 != 1",
- "Query": "select u.col, u.id from `user` as u where u.col in (select * from `user` where `user`.id = u.id order by col asc)",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select e.id from user_extra as e where 1 != 1",
- "Query": "select e.id from user_extra as e where e.col = :u_col",
- "Table": "user_extra"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# Order by, verify outer symtab is searched according to its own context.
-"select u.id from user u having u.id in (select col2 from user where user.id = u.id order by u.col)"
-"symbol u.col not found in subquery"
-{
- "QueryType": "SELECT",
- "Original": "select u.id from user u having u.id in (select col2 from user where user.id = u.id order by u.col)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u.id from `user` as u where 1 != 1",
- "Query": "select u.id from `user` as u where u.id in (select col2 from `user` where `user`.id = u.id order by u.col asc)",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Order by, qualified '*' expression, name mismatched.
-"select user.* from user where id = 5 order by e.col"
-"symbol e.col not found"
-Gen4 plan same as above
-
-# Order by, invalid column number
-"select col from user order by 18446744073709551616"
-"error parsing column number: 18446744073709551616"
-Gen4 plan same as above
-
-# Order by, out of range column number
-"select col from user order by 2"
-"Unknown column '2' in 'order clause'"
-Gen4 plan same as above
-
-# Order by, '*' expression with qualified reference and using collate
-"select * from user where id = 5 order by user.col collate utf8_general_ci"
-{
- "QueryType": "SELECT",
- "Original": "select * from user where id = 5 order by user.col collate utf8_general_ci",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select * from `user` where id = 5 order by `user`.col collate utf8_general_ci asc",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from user where id = 5 order by user.col collate utf8_general_ci",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select * from `user` where id = 5 order by `user`.col collate utf8_general_ci asc",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-#Order by with math functions
-"select * from user where id = 5 order by -col1"
-{
- "QueryType": "SELECT",
- "Original": "select * from user where id = 5 order by -col1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select * from `user` where id = 5 order by -col1 asc",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from user where id = 5 order by -col1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select * from `user` where id = 5 order by -col1 asc",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-#Order by with string operations
-"select * from user where id = 5 order by concat(col,col1) collate utf8_general_ci desc"
-{
- "QueryType": "SELECT",
- "Original": "select * from user where id = 5 order by concat(col,col1) collate utf8_general_ci desc",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select * from `user` where id = 5 order by concat(col, col1) collate utf8_general_ci desc",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from user where id = 5 order by concat(col,col1) collate utf8_general_ci desc",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select * from `user` where id = 5 order by concat(col, col1) collate utf8_general_ci desc",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-#Order by with math operations
-"select * from user where id = 5 order by id+col collate utf8_general_ci desc"
-{
- "QueryType": "SELECT",
- "Original": "select * from user where id = 5 order by id+col collate utf8_general_ci desc",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select * from `user` where id = 5 order by id + col collate utf8_general_ci desc",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from user where id = 5 order by id+col collate utf8_general_ci desc",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select * from `user` where id = 5 order by id + col collate utf8_general_ci desc",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-#Order by derived table column
-"select * from user u join (select user_id from user_extra where user_id = 5) eu on u.id = eu.user_id where u.id = 5 order by eu.user_id"
-{
- "QueryType": "SELECT",
- "Original": "select * from user u join (select user_id from user_extra where user_id = 5) eu on u.id = eu.user_id where u.id = 5 order by eu.user_id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` as u join (select user_id from user_extra where 1 != 1) as eu on u.id = eu.user_id where 1 != 1",
- "Query": "select * from `user` as u join (select user_id from user_extra where user_id = 5) as eu on u.id = eu.user_id where u.id = 5 order by eu.user_id asc",
- "Table": "`user`, user_extra",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from user u join (select user_id from user_extra where user_id = 5) eu on u.id = eu.user_id where u.id = 5 order by eu.user_id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` as u, (select user_id from user_extra where 1 != 1) as eu where 1 != 1",
- "Query": "select * from `user` as u, (select user_id from user_extra where user_id = 5) as eu where u.id = 5 and u.id = eu.user_id order by eu.user_id asc",
- "Table": "`user`, user_extra",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# routing rules: order by gets pushed for routes
-"select col from route1 where id = 1 order by col"
-{
- "QueryType": "SELECT",
- "Original": "select col from route1 where id = 1 order by col",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` as route1 where 1 != 1",
- "Query": "select col from `user` as route1 where id = 1 order by col asc",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select col from route1 where id = 1 order by col",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` as route1 where 1 != 1",
- "Query": "select col from `user` as route1 where id = 1 order by col asc",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# LIMIT
-"select col1 from user where id = 1 limit 1"
-{
- "QueryType": "SELECT",
- "Original": "select col1 from user where id = 1 limit 1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col1 from `user` where 1 != 1",
- "Query": "select col1 from `user` where id = 1 limit 1",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select col1 from user where id = 1 limit 1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col1 from `user` where 1 != 1",
- "Query": "select col1 from `user` where id = 1 limit 1",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# limit for joins. Can't push down the limit because result
-# counts get multiplied by join operations.
-"select user.col from user join user_extra limit 1"
-{
- "QueryType": "SELECT",
- "Original": "select user.col from user join user_extra limit 1",
- "Instructions": {
- "OperatorType": "Limit",
- "Count": "INT64(1)",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` where 1 != 1",
- "Query": "select `user`.col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra",
- "Table": "user_extra"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.col from user join user_extra limit 1",
- "Instructions": {
- "OperatorType": "Limit",
- "Count": "INT64(1)",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` where 1 != 1",
- "Query": "select `user`.col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra",
- "Table": "user_extra"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# limit for scatter
-"select col from user limit 1"
-{
- "QueryType": "SELECT",
- "Original": "select col from user limit 1",
- "Instructions": {
- "OperatorType": "Limit",
- "Count": "INT64(1)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user` limit :__upper_limit",
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select col from user limit 1",
- "Instructions": {
- "OperatorType": "Limit",
- "Count": "INT64(1)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user` limit :__upper_limit",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# limit for scatter with bind var
-"select col from user limit :a"
-{
- "QueryType": "SELECT",
- "Original": "select col from user limit :a",
- "Instructions": {
- "OperatorType": "Limit",
- "Count": ":a",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user` limit :__upper_limit",
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select col from user limit :a",
- "Instructions": {
- "OperatorType": "Limit",
- "Count": ":a",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user` limit :__upper_limit",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# cross-shard expression in parenthesis with limit
-"select * from user where (id1 = 4 AND name1 ='abc') limit 5"
-{
- "QueryType": "SELECT",
- "Original": "select * from user where (id1 = 4 AND name1 ='abc') limit 5",
- "Instructions": {
- "OperatorType": "Limit",
- "Count": "INT64(5)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select * from `user` where id1 = 4 and name1 = 'abc' limit :__upper_limit",
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from user where (id1 = 4 AND name1 ='abc') limit 5",
- "Instructions": {
- "OperatorType": "Limit",
- "Count": "INT64(5)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select * from `user` where id1 = 4 and name1 = 'abc' limit :__upper_limit",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# scatter limit after pullout subquery
-"select col from user where col in (select col1 from user) limit 1"
-{
- "QueryType": "SELECT",
- "Original": "select col from user where col in (select col1 from user) limit 1",
- "Instructions": {
- "OperatorType": "Limit",
- "Count": "INT64(1)",
- "Inputs": [
- {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col1 from `user` where 1 != 1",
- "Query": "select col1 from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user` where :__sq_has_values1 = 1 and col in ::__sq1 limit :__upper_limit",
- "Table": "`user`"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select col from user where col in (select col1 from user) limit 1",
- "Instructions": {
- "OperatorType": "Limit",
- "Count": "INT64(1)",
- "Inputs": [
- {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col1 from `user` where 1 != 1",
- "Query": "select col1 from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user` where :__sq_has_values1 = 1 and col in ::__sq1 limit :__upper_limit",
- "Table": "`user`"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# limit on reference table
-"select col from ref limit 1"
-{
- "QueryType": "SELECT",
- "Original": "select col from ref limit 1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from ref where 1 != 1",
- "Query": "select col from ref limit 1",
- "Table": "ref"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select col from ref limit 1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from ref where 1 != 1",
- "Query": "select col from ref limit 1",
- "Table": "ref"
- },
- "TablesUsed": [
- "user.ref"
- ]
-}
-
-# arithmetic limit
-"select id from user limit 1+1"
-{
- "QueryType": "SELECT",
- "Original": "select id from user limit 1+1",
- "Instructions": {
- "OperatorType": "Limit",
- "Count": "INT64(2)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` limit :__upper_limit",
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user limit 1+1",
- "Instructions": {
- "OperatorType": "Limit",
- "Count": "INT64(2)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` limit :__upper_limit",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# order by column alias
-"select id as foo from music order by foo"
-{
- "QueryType": "SELECT",
- "Original": "select id as foo from music order by foo",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id as foo, weight_string(id) from music where 1 != 1",
- "OrderBy": "(0|1) ASC",
- "Query": "select id as foo, weight_string(id) from music order by foo asc",
- "ResultColumns": 1,
- "Table": "music"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id as foo from music order by foo",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id as foo, weight_string(id) from music where 1 != 1",
- "OrderBy": "(0|1) ASC",
- "Query": "select id as foo, weight_string(id) from music order by foo asc",
- "ResultColumns": 1,
- "Table": "music"
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# column alias for a table column in order by
-"select id as foo, id2 as id from music order by id"
-{
- "QueryType": "SELECT",
- "Original": "select id as foo, id2 as id from music order by id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id as foo, id2 as id, weight_string(id2) from music where 1 != 1",
- "OrderBy": "(1|2) ASC",
- "Query": "select id as foo, id2 as id, weight_string(id2) from music order by id asc",
- "ResultColumns": 2,
- "Table": "music"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id as foo, id2 as id from music order by id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id as foo, id2 as id, weight_string(id2) from music where 1 != 1",
- "OrderBy": "(1|2) ASC",
- "Query": "select id as foo, id2 as id, weight_string(id2) from music order by id asc",
- "ResultColumns": 2,
- "Table": "music"
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# ordering on the left side of the join
-"select name from user, music order by name"
-{
- "QueryType": "SELECT",
- "Original": "select name from user, music order by name",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "`user`_music",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `name`, weight_string(`name`) from `user` where 1 != 1",
- "OrderBy": "(0|1) ASC",
- "Query": "select `name`, weight_string(`name`) from `user` order by `name` asc",
- "ResultColumns": 1,
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from music where 1 != 1",
- "Query": "select 1 from music",
- "Table": "music"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select name from user, music order by name",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "`user`_music",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `name`, weight_string(`name`) from `user` where 1 != 1",
- "OrderBy": "(0|1) ASC",
- "Query": "select `name`, weight_string(`name`) from `user` order by `name` asc",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from music where 1 != 1",
- "Query": "select 1 from music",
- "Table": "music"
- }
- ]
- },
- "TablesUsed": [
- "user.music",
- "user.user"
- ]
-}
-
-# aggregation and non-aggregations column without group by
-"select count(id), num from user"
-{
- "QueryType": "SELECT",
- "Original": "select count(id), num from user",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count(0) AS count",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(id), num from `user` where 1 != 1",
- "Query": "select count(id), num from `user`",
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select count(id), num from user",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count(0) AS count(id), random(1) AS num",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(id), num from `user` where 1 != 1",
- "Query": "select count(id), num from `user`",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# aggregation and non-aggregations column with order by
-"select count(id), num from user order by 2"
-{
- "QueryType": "SELECT",
- "Original": "select count(id), num from user order by 2",
- "Instructions": {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "(1|2) ASC",
- "ResultColumns": 2,
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count(0) AS count",
- "ResultColumns": 3,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(id), num, weight_string(num) from `user` where 1 != 1",
- "Query": "select count(id), num, weight_string(num) from `user`",
- "Table": "`user`"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select count(id), num from user order by 2",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count(0) AS count(id), random(1) AS num",
- "ResultColumns": 2,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(id), num, weight_string(num) from `user` where 1 != 1",
- "OrderBy": "(1|2) ASC",
- "Query": "select count(id), num, weight_string(num) from `user` order by num asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# aggregation and non-aggregations column with group by
-"select count(id), num from user group by 2"
-{
- "QueryType": "SELECT",
- "Original": "select count(id), num from user group by 2",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count(0) AS count",
- "GroupBy": "1",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(id), num, weight_string(num) from `user` where 1 != 1 group by 2, weight_string(num)",
- "OrderBy": "(1|2) ASC",
- "Query": "select count(id), num, weight_string(num) from `user` group by 2, weight_string(num) order by num asc",
- "ResultColumns": 2,
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select count(id), num from user group by 2",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count(0) AS count(id)",
- "GroupBy": "(1|2)",
- "ResultColumns": 2,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(id), num, weight_string(num) from `user` where 1 != 1 group by num, weight_string(num)",
- "OrderBy": "(1|2) ASC",
- "Query": "select count(id), num, weight_string(num) from `user` group by num, weight_string(num) order by num asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# aggregation and non-aggregations column with group by and order by
-"select count(id), num from user group by 2 order by 1"
-{
- "QueryType": "SELECT",
- "Original": "select count(id), num from user group by 2 order by 1",
- "Instructions": {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "0 ASC",
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count(0) AS count",
- "GroupBy": "1",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(id), num, weight_string(num) from `user` where 1 != 1 group by 2, weight_string(num)",
- "OrderBy": "(1|2) ASC",
- "Query": "select count(id), num, weight_string(num) from `user` group by 2, weight_string(num) order by num asc",
- "ResultColumns": 2,
- "Table": "`user`"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select count(id), num from user group by 2 order by 1",
- "Instructions": {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "0 ASC",
- "ResultColumns": 2,
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count(0) AS count(id)",
- "GroupBy": "(1|2)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(id), num, weight_string(num) from `user` where 1 != 1 group by num, weight_string(num)",
- "OrderBy": "(1|2) ASC",
- "Query": "select count(id), num, weight_string(num) from `user` group by num, weight_string(num) order by num asc",
- "Table": "`user`"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# join order by with ambiguous column reference ; valid in MySQL
-"select name, name from user, music order by name"
-"ambiguous symbol reference: `name`"
-{
- "QueryType": "SELECT",
- "Original": "select name, name from user, music order by name",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:0",
- "TableName": "`user`_music",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `name`, weight_string(`name`) from `user` where 1 != 1",
- "OrderBy": "(0|1) ASC",
- "Query": "select `name`, weight_string(`name`) from `user` order by `name` asc",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from music where 1 != 1",
- "Query": "select 1 from music",
- "Table": "music"
- }
- ]
- },
- "TablesUsed": [
- "user.music",
- "user.user"
- ]
-}
-
-# order by with ambiguous column reference ; valid in MySQL
-"select id, id from user order by id"
-"ambiguous symbol reference: id"
-{
- "QueryType": "SELECT",
- "Original": "select id, id from user order by id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, id, weight_string(id) from `user` where 1 != 1",
- "OrderBy": "(0|2) ASC",
- "Query": "select id, id, weight_string(id) from `user` order by id asc",
- "ResultColumns": 2,
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Scatter order by and aggregation: order by column must reference column from select list
-"select col, count(*) from user group by col order by c1"
-"unsupported: memory sort: order by must reference a column in the select list: c1 asc"
-{
- "QueryType": "SELECT",
- "Original": "select col, count(*) from user group by col order by c1",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count_star(1) AS count(*), random(2) AS c1",
- "GroupBy": "0",
- "ResultColumns": 2,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col, count(*), c1, weight_string(c1) from `user` where 1 != 1 group by col",
- "OrderBy": "(2|3) ASC, 0 ASC",
- "Query": "select col, count(*), c1, weight_string(c1) from `user` group by col order by c1 asc, col asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Distinct with cross shard query
-"select distinct user.a from user join user_extra"
-{
- "QueryType": "SELECT",
- "Original": "select distinct user.a from user join user_extra",
- "Instructions": {
- "OperatorType": "Distinct",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.a from `user` where 1 != 1",
- "Query": "select `user`.a from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra",
- "Table": "user_extra"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select distinct user.a from user join user_extra",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "GroupBy": "(0|1)",
- "ResultColumns": 1,
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.a, weight_string(`user`.a) from `user` where 1 != 1",
- "OrderBy": "(0|1) ASC",
- "Query": "select `user`.a, weight_string(`user`.a) from `user` order by `user`.a asc",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra",
- "Table": "user_extra"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# Distinct with column alias
-"select distinct a as c, a from user"
-{
- "QueryType": "SELECT",
- "Original": "select distinct a as c, a from user",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "GroupBy": "0, 1",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a as c, a, weight_string(a) from `user` where 1 != 1",
- "OrderBy": "(0|2) ASC, (0|2) ASC",
- "Query": "select distinct a as c, a, weight_string(a) from `user` order by c asc, a asc",
- "ResultColumns": 2,
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select distinct a as c, a from user",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "GroupBy": "(0|2), (1|2)",
- "ResultColumns": 2,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a as c, a, weight_string(a) from `user` where 1 != 1",
- "OrderBy": "(0|2) ASC, (0|2) ASC",
- "Query": "select distinct a as c, a, weight_string(a) from `user` order by c asc, a asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Distinct with same column
-"select distinct a, a from user"
-"generating order by clause: ambiguous symbol reference: a"
-{
- "QueryType": "SELECT",
- "Original": "select distinct a, a from user",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "GroupBy": "(0|2), (1|2)",
- "ResultColumns": 2,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a, a, weight_string(a) from `user` where 1 != 1",
- "OrderBy": "(0|2) ASC, (0|2) ASC",
- "Query": "select distinct a, a, weight_string(a) from `user` order by a asc, a asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Order by has subqueries
-"select id from unsharded order by (select id from unsharded)"
-"unsupported: subqueries disallowed in GROUP or ORDER BY"
-{
- "QueryType": "SELECT",
- "Original": "select id from unsharded order by (select id from unsharded)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select id from unsharded where 1 != 1",
- "Query": "select id from unsharded order by (select id from unsharded) asc",
- "Table": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-
-# Equal filter with hexadecimal value
-"select count(*) a from user having a = 0x01"
-"unsupported: filtering on results of aggregates"
-{
- "QueryType": "SELECT",
- "Original": "select count(*) a from user having a = 0x01",
- "Instructions": {
- "OperatorType": "Filter",
- "Predicate": ":0 = 0x01",
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count_star(0) AS a",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) as a from `user` where 1 != 1",
- "Query": "select count(*) as a from `user`",
- "Table": "`user`"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
diff --git a/go/vt/vtgate/planbuilder/testdata/rails_cases.json b/go/vt/vtgate/planbuilder/testdata/rails_cases.json
new file mode 100644
index 00000000000..89fdc4ff059
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/rails_cases.json
@@ -0,0 +1,218 @@
+[
+ {
+ "comment": "Author5.joins(books: [{orders: :customer}, :supplier])",
+ "query": "select author5s.* from author5s join book6s on book6s.author5_id = author5s.id join book6s_order2s on book6s_order2s.book6_id = book6s.id join order2s on order2s.id = book6s_order2s.order2_id join customer2s on customer2s.id = order2s.customer2_id join supplier5s on supplier5s.id = book6s.supplier5_id",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select author5s.* from author5s join book6s on book6s.author5_id = author5s.id join book6s_order2s on book6s_order2s.book6_id = book6s.id join order2s on order2s.id = book6s_order2s.order2_id join customer2s on customer2s.id = order2s.customer2_id join supplier5s on supplier5s.id = book6s.supplier5_id",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1,L:2,L:3",
+ "JoinVars": {
+ "book6s_supplier5_id": 4
+ },
+ "TableName": "author5s, book6s_book6s_order2s_order2s_customer2s_supplier5s",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1,L:2,L:3,L:4",
+ "JoinVars": {
+ "order2s_customer2_id": 5
+ },
+ "TableName": "author5s, book6s_book6s_order2s_order2s_customer2s",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1,L:2,L:3,L:4,R:0",
+ "JoinVars": {
+ "book6s_order2s_order2_id": 5
+ },
+ "TableName": "author5s, book6s_book6s_order2s_order2s",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1,L:2,L:3,L:4,R:0",
+ "JoinVars": {
+ "book6s_id": 5
+ },
+ "TableName": "author5s, book6s_book6s_order2s",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select author5s.id, author5s.`name`, author5s.created_at, author5s.updated_at, book6s.supplier5_id, book6s.id from author5s join book6s on book6s.author5_id = author5s.id where 1 != 1",
+ "Query": "select author5s.id, author5s.`name`, author5s.created_at, author5s.updated_at, book6s.supplier5_id, book6s.id from author5s join book6s on book6s.author5_id = author5s.id",
+ "Table": "author5s, book6s"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select book6s_order2s.order2_id from book6s_order2s where 1 != 1",
+ "Query": "select book6s_order2s.order2_id from book6s_order2s where book6s_order2s.book6_id = :book6s_id",
+ "Table": "book6s_order2s",
+ "Values": [
+ ":book6s_id"
+ ],
+ "Vindex": "binary_md5"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select order2s.customer2_id from order2s where 1 != 1",
+ "Query": "select order2s.customer2_id from order2s where order2s.id = :book6s_order2s_order2_id",
+ "Table": "order2s"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from customer2s where 1 != 1",
+ "Query": "select 1 from customer2s where customer2s.id = :order2s_customer2_id",
+ "Table": "customer2s",
+ "Values": [
+ ":order2s_customer2_id"
+ ],
+ "Vindex": "binary_md5"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from supplier5s where 1 != 1",
+ "Query": "select 1 from supplier5s where supplier5s.id = :book6s_supplier5_id",
+ "Table": "supplier5s",
+ "Values": [
+ ":book6s_supplier5_id"
+ ],
+ "Vindex": "binary_md5"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select author5s.* from author5s join book6s on book6s.author5_id = author5s.id join book6s_order2s on book6s_order2s.book6_id = book6s.id join order2s on order2s.id = book6s_order2s.order2_id join customer2s on customer2s.id = order2s.customer2_id join supplier5s on supplier5s.id = book6s.supplier5_id",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0,R:1,R:2,R:3",
+ "JoinVars": {
+ "order2s_id": 0
+ },
+ "TableName": "customer2s, order2s_author5s, book6s_book6s_order2s_supplier5s",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select order2s.id from order2s, customer2s where 1 != 1",
+ "Query": "select order2s.id from order2s, customer2s where customer2s.id = order2s.customer2_id",
+ "Table": "customer2s, order2s"
+ },
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:1,L:2,L:3,L:4",
+ "JoinVars": {
+ "book6s_supplier5_id": 0
+ },
+ "TableName": "author5s, book6s_book6s_order2s_supplier5s",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:1,L:2,L:3,L:4,L:5",
+ "JoinVars": {
+ "book6s_id": 0
+ },
+ "TableName": "author5s, book6s_book6s_order2s",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select book6s.id, book6s.supplier5_id, author5s.id as id, author5s.`name` as `name`, author5s.created_at as created_at, author5s.updated_at as updated_at from author5s, book6s where 1 != 1",
+ "Query": "select book6s.id, book6s.supplier5_id, author5s.id as id, author5s.`name` as `name`, author5s.created_at as created_at, author5s.updated_at as updated_at from author5s, book6s where book6s.author5_id = author5s.id",
+ "Table": "author5s, book6s"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from book6s_order2s where 1 != 1",
+ "Query": "select 1 from book6s_order2s where book6s_order2s.book6_id = :book6s_id and book6s_order2s.order2_id = :order2s_id",
+ "Table": "book6s_order2s",
+ "Values": [
+ ":book6s_id"
+ ],
+ "Vindex": "binary_md5"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from supplier5s where 1 != 1",
+ "Query": "select 1 from supplier5s where supplier5s.id = :book6s_supplier5_id",
+ "Table": "supplier5s",
+ "Values": [
+ ":book6s_supplier5_id"
+ ],
+ "Vindex": "binary_md5"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.author5s",
+ "user.book6s",
+ "user.book6s_order2s",
+ "user.customer2s",
+ "user.order2s",
+ "user.supplier5s"
+ ]
+ }
+ }
+]
\ No newline at end of file
diff --git a/go/vt/vtgate/planbuilder/testdata/rails_cases.txt b/go/vt/vtgate/planbuilder/testdata/rails_cases.txt
deleted file mode 100644
index ae4e12d3542..00000000000
--- a/go/vt/vtgate/planbuilder/testdata/rails_cases.txt
+++ /dev/null
@@ -1,214 +0,0 @@
-# Author5.joins(books: [{orders: :customer}, :supplier])
-"select author5s.* from author5s join book6s on book6s.author5_id = author5s.id join book6s_order2s on book6s_order2s.book6_id = book6s.id join order2s on order2s.id = book6s_order2s.order2_id join customer2s on customer2s.id = order2s.customer2_id join supplier5s on supplier5s.id = book6s.supplier5_id"
-{
- "QueryType": "SELECT",
- "Original": "select author5s.* from author5s join book6s on book6s.author5_id = author5s.id join book6s_order2s on book6s_order2s.book6_id = book6s.id join order2s on order2s.id = book6s_order2s.order2_id join customer2s on customer2s.id = order2s.customer2_id join supplier5s on supplier5s.id = book6s.supplier5_id",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1,L:2,L:3",
- "JoinVars": {
- "book6s_supplier5_id": 4
- },
- "TableName": "author5s, book6s_book6s_order2s_order2s_customer2s_supplier5s",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1,L:2,L:3,L:4",
- "JoinVars": {
- "order2s_customer2_id": 5
- },
- "TableName": "author5s, book6s_book6s_order2s_order2s_customer2s",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1,L:2,L:3,L:4,R:0",
- "JoinVars": {
- "book6s_order2s_order2_id": 5
- },
- "TableName": "author5s, book6s_book6s_order2s_order2s",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1,L:2,L:3,L:4,R:0",
- "JoinVars": {
- "book6s_id": 5
- },
- "TableName": "author5s, book6s_book6s_order2s",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select author5s.id, author5s.`name`, author5s.created_at, author5s.updated_at, book6s.supplier5_id, book6s.id from author5s join book6s on book6s.author5_id = author5s.id where 1 != 1",
- "Query": "select author5s.id, author5s.`name`, author5s.created_at, author5s.updated_at, book6s.supplier5_id, book6s.id from author5s join book6s on book6s.author5_id = author5s.id",
- "Table": "author5s, book6s"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select book6s_order2s.order2_id from book6s_order2s where 1 != 1",
- "Query": "select book6s_order2s.order2_id from book6s_order2s where book6s_order2s.book6_id = :book6s_id",
- "Table": "book6s_order2s",
- "Values": [
- ":book6s_id"
- ],
- "Vindex": "binary_md5"
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select order2s.customer2_id from order2s where 1 != 1",
- "Query": "select order2s.customer2_id from order2s where order2s.id = :book6s_order2s_order2_id",
- "Table": "order2s"
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from customer2s where 1 != 1",
- "Query": "select 1 from customer2s where customer2s.id = :order2s_customer2_id",
- "Table": "customer2s",
- "Values": [
- ":order2s_customer2_id"
- ],
- "Vindex": "binary_md5"
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from supplier5s where 1 != 1",
- "Query": "select 1 from supplier5s where supplier5s.id = :book6s_supplier5_id",
- "Table": "supplier5s",
- "Values": [
- ":book6s_supplier5_id"
- ],
- "Vindex": "binary_md5"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select author5s.* from author5s join book6s on book6s.author5_id = author5s.id join book6s_order2s on book6s_order2s.book6_id = book6s.id join order2s on order2s.id = book6s_order2s.order2_id join customer2s on customer2s.id = order2s.customer2_id join supplier5s on supplier5s.id = book6s.supplier5_id",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0,R:1,R:2,R:3",
- "JoinVars": {
- "order2s_id": 0
- },
- "TableName": "customer2s, order2s_author5s, book6s_book6s_order2s_supplier5s",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select order2s.id from order2s, customer2s where 1 != 1",
- "Query": "select order2s.id from order2s, customer2s where customer2s.id = order2s.customer2_id",
- "Table": "customer2s, order2s"
- },
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:1,L:2,L:3,L:4",
- "JoinVars": {
- "book6s_supplier5_id": 0
- },
- "TableName": "author5s, book6s_book6s_order2s_supplier5s",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:1,L:2,L:3,L:4,L:5",
- "JoinVars": {
- "book6s_id": 0
- },
- "TableName": "author5s, book6s_book6s_order2s",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select book6s.id, book6s.supplier5_id, author5s.id as id, author5s.`name` as `name`, author5s.created_at as created_at, author5s.updated_at as updated_at from author5s, book6s where 1 != 1",
- "Query": "select book6s.id, book6s.supplier5_id, author5s.id as id, author5s.`name` as `name`, author5s.created_at as created_at, author5s.updated_at as updated_at from author5s, book6s where book6s.author5_id = author5s.id",
- "Table": "author5s, book6s"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from book6s_order2s where 1 != 1",
- "Query": "select 1 from book6s_order2s where book6s_order2s.book6_id = :book6s_id and book6s_order2s.order2_id = :order2s_id",
- "Table": "book6s_order2s",
- "Values": [
- ":book6s_id"
- ],
- "Vindex": "binary_md5"
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from supplier5s where 1 != 1",
- "Query": "select 1 from supplier5s where supplier5s.id = :book6s_supplier5_id",
- "Table": "supplier5s",
- "Values": [
- ":book6s_supplier5_id"
- ],
- "Vindex": "binary_md5"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.author5s",
- "user.book6s",
- "user.book6s_order2s",
- "user.customer2s",
- "user.order2s",
- "user.supplier5s"
- ]
-}
diff --git a/go/vt/vtgate/planbuilder/testdata/reference_cases.json b/go/vt/vtgate/planbuilder/testdata/reference_cases.json
new file mode 100644
index 00000000000..375cbf9cb57
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/reference_cases.json
@@ -0,0 +1,623 @@
+[
+ {
+ "comment": "select from unqualified ambiguous reference routes to reference source",
+ "query": "select * from ambiguous_ref_with_source",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from ambiguous_ref_with_source",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from ambiguous_ref_with_source where 1 != 1",
+ "Query": "select * from ambiguous_ref_with_source",
+ "Table": "ambiguous_ref_with_source"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from ambiguous_ref_with_source",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from ambiguous_ref_with_source where 1 != 1",
+ "Query": "select * from ambiguous_ref_with_source",
+ "Table": "ambiguous_ref_with_source"
+ },
+ "TablesUsed": [
+ "main.ambiguous_ref_with_source"
+ ]
+ }
+ },
+ {
+ "comment": "join with unqualified ambiguous reference table routes to optimal keyspace",
+ "query": "select user.col from user join ambiguous_ref_with_source",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user join ambiguous_ref_with_source",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "`user`_ambiguous_ref_with_source",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` where 1 != 1",
+ "Query": "select `user`.col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 from ambiguous_ref_with_source where 1 != 1",
+ "Query": "select 1 from ambiguous_ref_with_source",
+ "Table": "ambiguous_ref_with_source"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user join ambiguous_ref_with_source",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user`, ambiguous_ref_with_source where 1 != 1",
+ "Query": "select `user`.col from `user`, ambiguous_ref_with_source",
+ "Table": "`user`, ambiguous_ref_with_source"
+ },
+ "TablesUsed": [
+ "user.ambiguous_ref_with_source",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "ambiguous unqualified reference table self-join routes to reference source",
+ "query": "select r1.col from ambiguous_ref_with_source r1 join ambiguous_ref_with_source",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select r1.col from ambiguous_ref_with_source r1 join ambiguous_ref_with_source",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select r1.col from ambiguous_ref_with_source as r1 join ambiguous_ref_with_source where 1 != 1",
+ "Query": "select r1.col from ambiguous_ref_with_source as r1 join ambiguous_ref_with_source",
+ "Table": "ambiguous_ref_with_source"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select r1.col from ambiguous_ref_with_source r1 join ambiguous_ref_with_source",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select r1.col from ambiguous_ref_with_source as r1, ambiguous_ref_with_source where 1 != 1",
+ "Query": "select r1.col from ambiguous_ref_with_source as r1, ambiguous_ref_with_source",
+ "Table": "ambiguous_ref_with_source"
+ },
+ "TablesUsed": [
+ "main.ambiguous_ref_with_source"
+ ]
+ }
+ },
+ {
+ "comment": "ambiguous unqualified reference table can merge with other opcodes left to right.",
+ "query": "select ambiguous_ref_with_source.col from ambiguous_ref_with_source join user",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select ambiguous_ref_with_source.col from ambiguous_ref_with_source join user",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "ambiguous_ref_with_source_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select ambiguous_ref_with_source.col from ambiguous_ref_with_source where 1 != 1",
+ "Query": "select ambiguous_ref_with_source.col from ambiguous_ref_with_source",
+ "Table": "ambiguous_ref_with_source"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` where 1 != 1",
+ "Query": "select 1 from `user`",
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select ambiguous_ref_with_source.col from ambiguous_ref_with_source join user",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select ambiguous_ref_with_source.col from ambiguous_ref_with_source, `user` where 1 != 1",
+ "Query": "select ambiguous_ref_with_source.col from ambiguous_ref_with_source, `user`",
+ "Table": "`user`, ambiguous_ref_with_source"
+ },
+ "TablesUsed": [
+ "user.ambiguous_ref_with_source",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "ambiguous unqualified reference table can merge with other opcodes left to right and vindex value is in the plan",
+ "query": "select ambiguous_ref_with_source.col from ambiguous_ref_with_source join (select aa from user where user.id=1) user",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select ambiguous_ref_with_source.col from ambiguous_ref_with_source join (select aa from user where user.id=1) user",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "ambiguous_ref_with_source_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select ambiguous_ref_with_source.col from ambiguous_ref_with_source where 1 != 1",
+ "Query": "select ambiguous_ref_with_source.col from ambiguous_ref_with_source",
+ "Table": "ambiguous_ref_with_source"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from (select aa from `user` where 1 != 1) as `user` where 1 != 1",
+ "Query": "select 1 from (select aa from `user` where `user`.id = 1) as `user`",
+ "Table": "`user`",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select ambiguous_ref_with_source.col from ambiguous_ref_with_source join (select aa from user where user.id=1) user",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select ambiguous_ref_with_source.col from ambiguous_ref_with_source, (select aa from `user` where 1 != 1) as `user` where 1 != 1",
+ "Query": "select ambiguous_ref_with_source.col from ambiguous_ref_with_source, (select aa from `user` where `user`.id = 1) as `user`",
+ "Table": "`user`, ambiguous_ref_with_source",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.ambiguous_ref_with_source",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "qualified join to reference table routes to optimal keyspace",
+ "query": "select user.col from user join main.ambiguous_ref_with_source",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user join main.ambiguous_ref_with_source",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "`user`_ambiguous_ref_with_source",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` where 1 != 1",
+ "Query": "select `user`.col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 from ambiguous_ref_with_source where 1 != 1",
+ "Query": "select 1 from ambiguous_ref_with_source",
+ "Table": "ambiguous_ref_with_source"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user join main.ambiguous_ref_with_source",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user`, ambiguous_ref_with_source where 1 != 1",
+ "Query": "select `user`.col from `user`, ambiguous_ref_with_source",
+ "Table": "`user`, ambiguous_ref_with_source"
+ },
+ "TablesUsed": [
+ "user.ambiguous_ref_with_source",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "insert into ambiguous qualified reference table routes to source",
+ "query": "insert into ambiguous_ref_with_source(col) values(1)",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into ambiguous_ref_with_source(col) values(1)",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into ambiguous_ref_with_source(col) values (1)",
+ "TableName": "ambiguous_ref_with_source"
+ },
+ "TablesUsed": [
+ "main.ambiguous_ref_with_source"
+ ]
+ }
+ },
+ {
+ "comment": "insert into qualified ambiguous reference table routes v3 to requested keyspace gen4 to source",
+ "query": "insert into user.ambiguous_ref_with_source(col) values(1)",
+ "v3-plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into user.ambiguous_ref_with_source(col) values(1)",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Sharded",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into ambiguous_ref_with_source(col) values (1)",
+ "TableName": "ambiguous_ref_with_source"
+ },
+ "TablesUsed": [
+ "user.ambiguous_ref_with_source"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into user.ambiguous_ref_with_source(col) values(1)",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into ambiguous_ref_with_source(col) values (1)",
+ "TableName": "ambiguous_ref_with_source"
+ },
+ "TablesUsed": [
+ "main.ambiguous_ref_with_source"
+ ]
+ }
+ },
+ {
+ "comment": "update unqualified ambiguous reference table routes to source",
+ "query": "update ambiguous_ref_with_source set col = 1",
+ "plan": {
+ "QueryType": "UPDATE",
+ "Original": "update ambiguous_ref_with_source set col = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update ambiguous_ref_with_source set col = 1",
+ "Table": "ambiguous_ref_with_source"
+ },
+ "TablesUsed": [
+ "main.ambiguous_ref_with_source"
+ ]
+ }
+ },
+ {
+ "comment": "update qualified ambiguous reference table v3 error no primary vindex v4 route to source",
+ "query": "update user.ambiguous_ref_with_source set col = 1",
+ "v3-plan": "VT09001: table 'ambiguous_ref_with_source' does not have a primary vindex",
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update user.ambiguous_ref_with_source set col = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update ambiguous_ref_with_source set col = 1",
+ "Table": "ambiguous_ref_with_source"
+ },
+ "TablesUsed": [
+ "main.ambiguous_ref_with_source"
+ ]
+ }
+ },
+ {
+ "comment": "delete from unqualified ambiguous reference table routes to source",
+ "query": "delete from ambiguous_ref_with_source where col = 1",
+ "plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from ambiguous_ref_with_source where col = 1",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "delete from ambiguous_ref_with_source where col = 1",
+ "Table": "ambiguous_ref_with_source"
+ },
+ "TablesUsed": [
+ "main.ambiguous_ref_with_source"
+ ]
+ }
+ },
+ {
+ "comment": "delete from qualified ambiguous reference table v3 error no primary vindex v4 route to source",
+ "query": "delete from user.ambiguous_ref_with_source where col = 1",
+ "v3-plan": "VT09001: table 'ambiguous_ref_with_source' does not have a primary vindex",
+ "gen4-plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from user.ambiguous_ref_with_source where col = 1",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "delete from ambiguous_ref_with_source where col = 1",
+ "Table": "ambiguous_ref_with_source"
+ },
+ "TablesUsed": [
+ "main.ambiguous_ref_with_source"
+ ]
+ }
+ },
+ {
+ "comment": "join with unqualified unambiguous ref with source routes to requested table",
+ "query": "select user.col from user join ref_with_source",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user join ref_with_source",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` join ref_with_source where 1 != 1",
+ "Query": "select `user`.col from `user` join ref_with_source",
+ "Table": "`user`, ref_with_source"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user join ref_with_source",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user`, ref_with_source where 1 != 1",
+ "Query": "select `user`.col from `user`, ref_with_source",
+ "Table": "`user`, ref_with_source"
+ },
+ "TablesUsed": [
+ "user.ref_with_source",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "join with unqualified reference optimize routes when source & reference have different names",
+ "query": "select user.col from user join ref_in_source",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user join ref_in_source",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "`user`_ref_in_source",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` where 1 != 1",
+ "Query": "select `user`.col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 from ref_in_source where 1 != 1",
+ "Query": "select 1 from ref_in_source",
+ "Table": "ref_in_source"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user join ref_in_source",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user`, ref_with_source as ref_in_source where 1 != 1",
+ "Query": "select `user`.col from `user`, ref_with_source as ref_in_source",
+ "Table": "`user`, ref_with_source"
+ },
+ "TablesUsed": [
+ "user.ref_with_source",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "join with unqualified reference respects routing rules",
+ "query": "select user.col from user join rerouted_ref",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user join rerouted_ref",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "`user`_rerouted_ref",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` where 1 != 1",
+ "Query": "select `user`.col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 from rerouted_ref where 1 != 1",
+ "Query": "select 1 from rerouted_ref",
+ "Table": "rerouted_ref"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user join rerouted_ref",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user`, ref as rerouted_ref where 1 != 1",
+ "Query": "select `user`.col from `user`, ref as rerouted_ref",
+ "Table": "`user`, ref"
+ },
+ "TablesUsed": [
+ "user.ref",
+ "user.user"
+ ]
+ }
+ }
+]
diff --git a/go/vt/vtgate/planbuilder/testdata/select_cases.json b/go/vt/vtgate/planbuilder/testdata/select_cases.json
new file mode 100644
index 00000000000..57e48630c0e
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/select_cases.json
@@ -0,0 +1,8360 @@
+[
+ {
+ "comment": "No column referenced",
+ "query": "select 1 from user",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 from user",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` where 1 != 1",
+ "Query": "select 1 from `user`",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 from user",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` where 1 != 1",
+ "Query": "select 1 from `user`",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "'*' expression for simple route",
+ "query": "select user.* from user",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.* from user",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.* from `user` where 1 != 1",
+ "Query": "select `user`.* from `user`",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.* from user",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.* from `user` where 1 != 1",
+ "Query": "select `user`.* from `user`",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "unqualified '*' expression for simple route",
+ "query": "select * from user",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from user",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select * from `user`",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from user",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select * from `user`",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "select with timeout directive sets QueryTimeout in the route",
+ "query": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from user",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from user",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from `user`",
+ "QueryTimeout": 1000,
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from user",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from `user`",
+ "QueryTimeout": 1000,
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "select aggregation with timeout directive sets QueryTimeout in the route",
+ "query": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ count(*) from user",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ count(*) from user",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count(0) AS count",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*) from `user` where 1 != 1",
+ "Query": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ count(*) from `user`",
+ "QueryTimeout": 1000,
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ count(*) from user",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count_star(0) AS count(*)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*) from `user` where 1 != 1",
+ "Query": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ count(*) from `user`",
+ "QueryTimeout": 1000,
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "select limit with timeout directive sets QueryTimeout in the route",
+ "query": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from user limit 10",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from user limit 10",
+ "Instructions": {
+ "OperatorType": "Limit",
+ "Count": "INT64(10)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from `user` limit :__upper_limit",
+ "QueryTimeout": 1000,
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from user limit 10",
+ "Instructions": {
+ "OperatorType": "Limit",
+ "Count": "INT64(10)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from `user` limit :__upper_limit",
+ "QueryTimeout": 1000,
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "select limit with timeout directive sets QueryTimeout in the route",
+ "query": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from main.unsharded limit 10",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from main.unsharded limit 10",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from unsharded where 1 != 1",
+ "Query": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from unsharded limit 10",
+ "QueryTimeout": 1000,
+ "Table": "unsharded"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from main.unsharded limit 10",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from unsharded where 1 != 1",
+ "Query": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from unsharded limit 10",
+ "QueryTimeout": 1000,
+ "Table": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "select with partial scatter directive",
+ "query": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS */ * from user",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS */ * from user",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS */ * from `user`",
+ "ScatterErrorsAsWarnings": true,
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS */ * from user",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS */ * from `user`",
+ "ScatterErrorsAsWarnings": true,
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "select aggregation with partial scatter directive",
+ "query": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ count(*) from user",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ count(*) from user",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count(0) AS count",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*) from `user` where 1 != 1",
+ "Query": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ count(*) from `user`",
+ "ScatterErrorsAsWarnings": true,
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ count(*) from user",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count_star(0) AS count(*)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*) from `user` where 1 != 1",
+ "Query": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ count(*) from `user`",
+ "ScatterErrorsAsWarnings": true,
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "select aggregation with partial scatter directive - added comments to try to confuse the hint extraction",
+ "query": "/*VT_SPAN_CONTEXT=123*/select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ count(*) from user",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "/*VT_SPAN_CONTEXT=123*/select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ count(*) from user",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count(0) AS count",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*) from `user` where 1 != 1",
+ "Query": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ count(*) from `user`",
+ "ScatterErrorsAsWarnings": true,
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "/*VT_SPAN_CONTEXT=123*/select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ count(*) from user",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count_star(0) AS count(*)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*) from `user` where 1 != 1",
+ "Query": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ count(*) from `user`",
+ "ScatterErrorsAsWarnings": true,
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "select limit with partial scatter directive",
+ "query": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ * from user limit 10",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ * from user limit 10",
+ "Instructions": {
+ "OperatorType": "Limit",
+ "Count": "INT64(10)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ * from `user` limit :__upper_limit",
+ "ScatterErrorsAsWarnings": true,
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ * from user limit 10",
+ "Instructions": {
+ "OperatorType": "Limit",
+ "Count": "INT64(10)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ * from `user` limit :__upper_limit",
+ "ScatterErrorsAsWarnings": true,
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "qualified '*' expression for simple route",
+ "query": "select user.* from user",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.* from user",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.* from `user` where 1 != 1",
+ "Query": "select `user`.* from `user`",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.* from user",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.* from `user` where 1 != 1",
+ "Query": "select `user`.* from `user`",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "fully qualified '*' expression for simple route",
+ "query": "select user.user.* from user.user",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.user.* from user.user",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.* from `user` where 1 != 1",
+ "Query": "select `user`.* from `user`",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.user.* from user.user",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.* from `user` where 1 != 1",
+ "Query": "select `user`.* from `user`",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "select * from authoritative table",
+ "query": "select * from authoritative",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from authoritative",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_id, col1, col2 from authoritative where 1 != 1",
+ "Query": "select user_id, col1, col2 from authoritative",
+ "Table": "authoritative"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from authoritative",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_id, col1, col2 from authoritative where 1 != 1",
+ "Query": "select user_id, col1, col2 from authoritative",
+ "Table": "authoritative"
+ },
+ "TablesUsed": [
+ "user.authoritative"
+ ]
+ }
+ },
+ {
+ "comment": "select * from join of authoritative tables",
+ "query": "select * from authoritative a join authoritative b on a.user_id=b.user_id",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from authoritative a join authoritative b on a.user_id=b.user_id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a.user_id as user_id, a.col1 as col1, a.col2 as col2, b.user_id as user_id, b.col1 as col1, b.col2 as col2 from authoritative as a join authoritative as b on a.user_id = b.user_id where 1 != 1",
+ "Query": "select a.user_id as user_id, a.col1 as col1, a.col2 as col2, b.user_id as user_id, b.col1 as col1, b.col2 as col2 from authoritative as a join authoritative as b on a.user_id = b.user_id",
+ "Table": "authoritative"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from authoritative a join authoritative b on a.user_id=b.user_id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a.user_id as user_id, a.col1 as col1, a.col2 as col2, b.user_id as user_id, b.col1 as col1, b.col2 as col2 from authoritative as a, authoritative as b where 1 != 1",
+ "Query": "select a.user_id as user_id, a.col1 as col1, a.col2 as col2, b.user_id as user_id, b.col1 as col1, b.col2 as col2 from authoritative as a, authoritative as b where a.user_id = b.user_id",
+ "Table": "authoritative"
+ },
+ "TablesUsed": [
+ "user.authoritative"
+ ]
+ }
+ },
+ {
+ "comment": "test table lookup failure for authoritative code path",
+ "query": "select a.* from authoritative",
+ "v3-plan": "VT05004: table 'a' does not exist",
+ "gen4-plan": "Unknown table 'a'"
+ },
+ {
+ "comment": "select * from qualified authoritative table",
+ "query": "select a.* from authoritative a",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a.* from authoritative a",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a.user_id, a.col1, a.col2 from authoritative as a where 1 != 1",
+ "Query": "select a.user_id, a.col1, a.col2 from authoritative as a",
+ "Table": "authoritative"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a.* from authoritative a",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a.user_id, a.col1, a.col2 from authoritative as a where 1 != 1",
+ "Query": "select a.user_id, a.col1, a.col2 from authoritative as a",
+ "Table": "authoritative"
+ },
+ "TablesUsed": [
+ "user.authoritative"
+ ]
+ }
+ },
+ {
+ "comment": "select * from intermixing of authoritative table with non-authoritative results in no expansion",
+ "query": "select * from authoritative join user on authoritative.user_id=user.id",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from authoritative join user on authoritative.user_id=user.id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from authoritative join `user` on authoritative.user_id = `user`.id where 1 != 1",
+ "Query": "select * from authoritative join `user` on authoritative.user_id = `user`.id",
+ "Table": "authoritative, `user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from authoritative join user on authoritative.user_id=user.id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from authoritative, `user` where 1 != 1",
+ "Query": "select * from authoritative, `user` where authoritative.user_id = `user`.id",
+ "Table": "`user`, authoritative"
+ },
+ "TablesUsed": [
+ "user.authoritative",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "select authoritative.* with intermixing still expands",
+ "query": "select user.id, a.*, user.col1 from authoritative a join user on a.user_id=user.id",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.id, a.*, user.col1 from authoritative a join user on a.user_id=user.id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id, a.user_id, a.col1, a.col2, `user`.col1 from authoritative as a join `user` on a.user_id = `user`.id where 1 != 1",
+ "Query": "select `user`.id, a.user_id, a.col1, a.col2, `user`.col1 from authoritative as a join `user` on a.user_id = `user`.id",
+ "Table": "authoritative, `user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.id, a.*, user.col1 from authoritative a join user on a.user_id=user.id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id, a.user_id as user_id, a.col1 as col1, a.col2 as col2, `user`.col1 from authoritative as a, `user` where 1 != 1",
+ "Query": "select `user`.id, a.user_id as user_id, a.col1 as col1, a.col2 as col2, `user`.col1 from authoritative as a, `user` where a.user_id = `user`.id",
+ "Table": "`user`, authoritative"
+ },
+ "TablesUsed": [
+ "user.authoritative",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "auto-resolve anonymous columns for simple route",
+ "query": "select anon_col from user join user_extra on user.id = user_extra.user_id",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select anon_col from user join user_extra on user.id = user_extra.user_id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select anon_col from `user` join user_extra on `user`.id = user_extra.user_id where 1 != 1",
+ "Query": "select anon_col from `user` join user_extra on `user`.id = user_extra.user_id",
+ "Table": "`user`, user_extra"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select anon_col from user join user_extra on user.id = user_extra.user_id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select anon_col from `user`, user_extra where 1 != 1",
+ "Query": "select anon_col from `user`, user_extra where `user`.id = user_extra.user_id",
+ "Table": "`user`, user_extra"
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "Cannot auto-resolve for cross-shard joins",
+ "query": "select col from user join user_extra",
+ "v3-plan": "VT03019: symbol col not found",
+ "gen4-plan": "Column 'col' in field list is ambiguous"
+ },
+ {
+ "comment": "Auto-resolve should work if unique vindex columns are referenced",
+ "query": "select id, user_id from user join user_extra",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id, user_id from user join user_extra",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,R:0",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_id from user_extra where 1 != 1",
+ "Query": "select user_id from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id, user_id from user join user_extra",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,R:0",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_id from user_extra where 1 != 1",
+ "Query": "select user_id from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "database calls should be substituted",
+ "query": "select database() from dual",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select database() from dual",
+ "Instructions": {
+ "OperatorType": "Projection",
+ "Expressions": [
+ ":__vtdbname as database()"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SingleRow"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select database() from dual",
+ "Instructions": {
+ "OperatorType": "Projection",
+ "Expressions": [
+ ":__vtdbname as database()"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SingleRow"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.dual"
+ ]
+ }
+ },
+ {
+ "comment": "last_insert_id for unsharded route",
+ "query": "select last_insert_id() as x from main.unsharded",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select last_insert_id() as x from main.unsharded",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select :__lastInsertId as x from unsharded where 1 != 1",
+ "Query": "select :__lastInsertId as x from unsharded",
+ "Table": "unsharded"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select last_insert_id() as x from main.unsharded",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select :__lastInsertId as x from unsharded where 1 != 1",
+ "Query": "select :__lastInsertId as x from unsharded",
+ "Table": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "select from dual on unqualified keyspace",
+ "query": "select @@session.auto_increment_increment from dual",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select @@session.auto_increment_increment from dual",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select @@auto_increment_increment from dual where 1 != 1",
+ "Query": "select @@auto_increment_increment from dual",
+ "Table": "dual"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select @@session.auto_increment_increment from dual",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select @@auto_increment_increment from dual where 1 != 1",
+ "Query": "select @@auto_increment_increment from dual",
+ "Table": "dual"
+ },
+ "TablesUsed": [
+ "main.dual"
+ ]
+ }
+ },
+ {
+ "comment": "select from pinned table",
+ "query": "select * from pin_test",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from pin_test",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from pin_test where 1 != 1",
+ "Query": "select * from pin_test",
+ "Table": "pin_test",
+ "Values": [
+ "VARCHAR(\"\\x80\")"
+ ],
+ "Vindex": "binary"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from pin_test",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from pin_test where 1 != 1",
+ "Query": "select * from pin_test",
+ "Table": "pin_test",
+ "Values": [
+ "VARCHAR(\"\\x80\")"
+ ],
+ "Vindex": "binary"
+ },
+ "TablesUsed": [
+ "user.pin_test"
+ ]
+ }
+ },
+ {
+ "comment": "select from dual on sharded keyspace",
+ "query": "select @@session.auto_increment_increment from user.dual",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select @@session.auto_increment_increment from user.dual",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select @@auto_increment_increment from dual where 1 != 1",
+ "Query": "select @@auto_increment_increment from dual",
+ "Table": "dual"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select @@session.auto_increment_increment from user.dual",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select @@auto_increment_increment from dual where 1 != 1",
+ "Query": "select @@auto_increment_increment from dual",
+ "Table": "dual"
+ },
+ "TablesUsed": [
+ "user.dual"
+ ]
+ }
+ },
+ {
+ "comment": "RHS route referenced",
+ "query": "select user_extra.id from user join user_extra",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user_extra.id from user join user_extra",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` where 1 != 1",
+ "Query": "select 1 from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.id from user_extra where 1 != 1",
+ "Query": "select user_extra.id from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user_extra.id from user join user_extra",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` where 1 != 1",
+ "Query": "select 1 from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.id from user_extra where 1 != 1",
+ "Query": "select user_extra.id from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "Both routes referenced",
+ "query": "select user.col, user_extra.id from user join user_extra",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col, user_extra.id from user join user_extra",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,R:0",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` where 1 != 1",
+ "Query": "select `user`.col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.id from user_extra where 1 != 1",
+ "Query": "select user_extra.id from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col, user_extra.id from user join user_extra",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,R:0",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` where 1 != 1",
+ "Query": "select `user`.col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.id from user_extra where 1 != 1",
+ "Query": "select user_extra.id from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "Expression with single-route reference",
+ "query": "select user.col, user_extra.id + user_extra.col from user join user_extra",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col, user_extra.id + user_extra.col from user join user_extra",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,R:0",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` where 1 != 1",
+ "Query": "select `user`.col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.id + user_extra.col from user_extra where 1 != 1",
+ "Query": "select user_extra.id + user_extra.col from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col, user_extra.id + user_extra.col from user join user_extra",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,R:0",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` where 1 != 1",
+ "Query": "select `user`.col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.id + user_extra.col from user_extra where 1 != 1",
+ "Query": "select user_extra.id + user_extra.col from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "Jumbled references",
+ "query": "select user.col, user_extra.id, user.col2 from user join user_extra",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col, user_extra.id, user.col2 from user join user_extra",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,R:0,L:1",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col, `user`.col2 from `user` where 1 != 1",
+ "Query": "select `user`.col, `user`.col2 from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.id from user_extra where 1 != 1",
+ "Query": "select user_extra.id from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col, user_extra.id, user.col2 from user join user_extra",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,R:0,L:1",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col, `user`.col2 from `user` where 1 != 1",
+ "Query": "select `user`.col, `user`.col2 from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.id from user_extra where 1 != 1",
+ "Query": "select user_extra.id from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "Comments",
+ "query": "select /* comment */ user.col from user join user_extra",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select /* comment */ user.col from user join user_extra",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` where 1 != 1",
+ "Query": "select /* comment */ `user`.col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select /* comment */ 1 from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select /* comment */ user.col from user join user_extra",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` where 1 != 1",
+ "Query": "select /* comment */ `user`.col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select /* comment */ 1 from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "for update",
+ "query": "select user.col from user join user_extra for update",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user join user_extra for update",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` where 1 != 1",
+ "Query": "select `user`.col from `user` for update",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra for update",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user join user_extra for update",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` where 1 != 1",
+ "Query": "select `user`.col from `user` for update",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra for update",
+ "Table": "user_extra"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "Field query should work for joins select bind vars",
+ "query": "select user.id, (select user.id+outm.m+unsharded.m from unsharded) from user join unsharded outm",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.id, (select user.id+outm.m+unsharded.m from unsharded) from user join unsharded outm",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,R:0",
+ "JoinVars": {
+ "user_id": 0
+ },
+ "TableName": "`user`_unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id from `user` where 1 != 1",
+ "Query": "select `user`.id from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select (select :user_id + outm.m + unsharded.m from unsharded where 1 != 1) from unsharded as outm where 1 != 1",
+ "Query": "select (select :user_id + outm.m + unsharded.m from unsharded) from unsharded as outm",
+ "Table": "unsharded"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.id, (select user.id+outm.m+unsharded.m from unsharded) from user join unsharded outm",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,R:0",
+ "JoinVars": {
+ "user_id": 0
+ },
+ "TableName": "`user`_unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id from `user` where 1 != 1",
+ "Query": "select `user`.id from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select (select :user_id + outm.m + unsharded.m from unsharded where 1 != 1) from unsharded as outm where 1 != 1",
+ "Query": "select (select :user_id + outm.m + unsharded.m from unsharded) from unsharded as outm",
+ "Table": "unsharded"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Case preservation",
+ "query": "select user.Col, user_extra.Id from user join user_extra",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.Col, user_extra.Id from user join user_extra",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,R:0",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.Col from `user` where 1 != 1",
+ "Query": "select `user`.Col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.Id from user_extra where 1 != 1",
+ "Query": "select user_extra.Id from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.Col, user_extra.Id from user join user_extra",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,R:0",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.Col from `user` where 1 != 1",
+ "Query": "select `user`.Col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.Id from user_extra where 1 != 1",
+ "Query": "select user_extra.Id from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "syntax error",
+ "query": "the quick brown fox",
+ "plan": "syntax error at position 4 near 'the'"
+ },
+ {
+ "comment": "Hex number is not treated as a simple value",
+ "query": "select * from user where id = 0x04",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from user where id = 0x04",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select * from `user` where id = 0x04",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from user where id = 0x04",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select * from `user` where id = 0x04",
+ "Table": "`user`",
+ "Values": [
+ "VARBINARY(\"\\x04\")"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "sharded limit offset",
+ "query": "select user_id from music order by user_id limit 10, 20",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user_id from music order by user_id limit 10, 20",
+ "Instructions": {
+ "OperatorType": "Limit",
+ "Count": "INT64(20)",
+ "Offset": "INT64(10)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_id, weight_string(user_id) from music where 1 != 1",
+ "OrderBy": "(0|1) ASC",
+ "Query": "select user_id, weight_string(user_id) from music order by user_id asc limit :__upper_limit",
+ "ResultColumns": 1,
+ "Table": "music"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user_id from music order by user_id limit 10, 20",
+ "Instructions": {
+ "OperatorType": "Limit",
+ "Count": "INT64(20)",
+ "Offset": "INT64(10)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_id, weight_string(user_id) from music where 1 != 1",
+ "OrderBy": "(0|1) ASC",
+ "Query": "select user_id, weight_string(user_id) from music order by user_id asc limit :__upper_limit",
+ "ResultColumns": 1,
+ "Table": "music"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "Sharding Key Condition in Parenthesis",
+ "query": "select * from user where name ='abc' AND (id = 4) limit 5",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from user where name ='abc' AND (id = 4) limit 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select * from `user` where `name` = 'abc' and id = 4 limit 5",
+ "Table": "`user`",
+ "Values": [
+ "INT64(4)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from user where name ='abc' AND (id = 4) limit 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select * from `user` where `name` = 'abc' and id = 4 limit 5",
+ "Table": "`user`",
+ "Values": [
+ "INT64(4)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Multiple parenthesized expressions",
+ "query": "select * from user where (id = 4) AND (name ='abc') limit 5",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from user where (id = 4) AND (name ='abc') limit 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select * from `user` where id = 4 and `name` = 'abc' limit 5",
+ "Table": "`user`",
+ "Values": [
+ "INT64(4)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from user where (id = 4) AND (name ='abc') limit 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select * from `user` where id = 4 and `name` = 'abc' limit 5",
+ "Table": "`user`",
+ "Values": [
+ "INT64(4)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Multiple parenthesized expressions",
+ "query": "select * from user where (id = 4 and name ='abc') limit 5",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from user where (id = 4 and name ='abc') limit 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select * from `user` where id = 4 and `name` = 'abc' limit 5",
+ "Table": "`user`",
+ "Values": [
+ "INT64(4)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from user where (id = 4 and name ='abc') limit 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select * from `user` where id = 4 and `name` = 'abc' limit 5",
+ "Table": "`user`",
+ "Values": [
+ "INT64(4)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Column Aliasing with Table.Column",
+ "query": "select user0_.col as col0_ from user user0_ where id = 1 order by user0_.col desc limit 2",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user0_.col as col0_ from user user0_ where id = 1 order by user0_.col desc limit 2",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user0_.col as col0_ from `user` as user0_ where 1 != 1",
+ "Query": "select user0_.col as col0_ from `user` as user0_ where id = 1 order by user0_.col desc limit 2",
+ "Table": "`user`",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user0_.col as col0_ from user user0_ where id = 1 order by user0_.col desc limit 2",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user0_.col as col0_ from `user` as user0_ where 1 != 1",
+ "Query": "select user0_.col as col0_ from `user` as user0_ where id = 1 order by user0_.col desc limit 2",
+ "Table": "`user`",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Column Aliasing with Column",
+ "query": "select user0_.col as col0_ from user user0_ where id = 1 order by col0_ desc limit 3",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user0_.col as col0_ from user user0_ where id = 1 order by col0_ desc limit 3",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user0_.col as col0_ from `user` as user0_ where 1 != 1",
+ "Query": "select user0_.col as col0_ from `user` as user0_ where id = 1 order by col0_ desc limit 3",
+ "Table": "`user`",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user0_.col as col0_ from user user0_ where id = 1 order by col0_ desc limit 3",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user0_.col as col0_ from `user` as user0_ where 1 != 1",
+ "Query": "select user0_.col as col0_ from `user` as user0_ where id = 1 order by col0_ desc limit 3",
+ "Table": "`user`",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Booleans and parenthesis",
+ "query": "select * from user where (id = 1) AND name = true limit 5",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from user where (id = 1) AND name = true limit 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select * from `user` where id = 1 and `name` = true limit 5",
+ "Table": "`user`",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from user where (id = 1) AND name = true limit 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select * from `user` where id = 1 and `name` = true limit 5",
+ "Table": "`user`",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Column as boolean-ish",
+ "query": "select * from user where (id = 1) AND name limit 5",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from user where (id = 1) AND name limit 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select * from `user` where id = 1 and `name` limit 5",
+ "Table": "`user`",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from user where (id = 1) AND name limit 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select * from `user` where id = 1 and `name` limit 5",
+ "Table": "`user`",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "PK as fake boolean, and column as boolean-ish",
+ "query": "select * from user where (id = 5) AND name = true limit 5",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from user where (id = 5) AND name = true limit 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select * from `user` where id = 5 and `name` = true limit 5",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from user where (id = 5) AND name = true limit 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select * from `user` where id = 5 and `name` = true limit 5",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "top level subquery in select",
+ "query": "select a, (select col from user) from unsharded",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a, (select col from user) from unsharded",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutValue",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select a, :__sq1 from unsharded where 1 != 1",
+ "Query": "select a, :__sq1 from unsharded",
+ "Table": "unsharded"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a, (select col from user) from unsharded",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutValue",
+ "PulloutVars": [
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select a, :__sq1 from unsharded where 1 != 1",
+ "Query": "select a, :__sq1 from unsharded",
+ "Table": "unsharded"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "sub-expression subquery in select",
+ "query": "select a, 1+(select col from user) from unsharded",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a, 1+(select col from user) from unsharded",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutValue",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select a, 1 + :__sq1 from unsharded where 1 != 1",
+ "Query": "select a, 1 + :__sq1 from unsharded",
+ "Table": "unsharded"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a, 1+(select col from user) from unsharded",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutValue",
+ "PulloutVars": [
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select a, 1 + :__sq1 from unsharded where 1 != 1",
+ "Query": "select a, 1 + :__sq1 from unsharded",
+ "Table": "unsharded"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "select * from derived table expands specific columns",
+ "query": "select * from (select user.id id1, user_extra.id id2 from user join user_extra) as t",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from (select user.id id1, user_extra.id id2 from user join user_extra) as t",
+ "Instructions": {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0,
+ 1
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,R:0",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id as id1 from `user` where 1 != 1",
+ "Query": "select `user`.id as id1 from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.id as id2 from user_extra where 1 != 1",
+ "Query": "select user_extra.id as id2 from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from (select user.id id1, user_extra.id id2 from user join user_extra) as t",
+ "Instructions": {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0,
+ 1
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,R:0",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id as id1 from `user` where 1 != 1",
+ "Query": "select `user`.id as id1 from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.id as id2 from user_extra where 1 != 1",
+ "Query": "select user_extra.id as id2 from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "duplicate columns not allowed in derived table",
+ "query": "select * from (select user.id, user_extra.id from user join user_extra) as t",
+ "v3-plan": "VT12001: unsupported: duplicate column names in subquery: id",
+ "gen4-plan": "Duplicate column name 'id'"
+ },
+ {
+ "comment": "non-existent symbol in cross-shard derived table",
+ "query": "select t.col from (select user.id from user join user_extra) as t",
+ "v3-plan": "VT03019: symbol t.col not found",
+ "gen4-plan": "symbol t.col not found"
+ },
+ {
+ "comment": "union with the same target shard",
+ "query": "select * from music where user_id = 1 union select * from user where id = 1",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from music where user_id = 1 union select * from user where id = 1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from music where 1 != 1 union select * from `user` where 1 != 1",
+ "Query": "select * from music where user_id = 1 union select * from `user` where id = 1",
+ "Table": "music",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from music where user_id = 1 union select * from user where id = 1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from music where 1 != 1 union select * from `user` where 1 != 1",
+ "Query": "select * from music where user_id = 1 union select * from `user` where id = 1",
+ "Table": "music",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.music",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "union with the same target shard last_insert_id",
+ "query": "select *, last_insert_id() from music where user_id = 1 union select * from user where id = 1",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select *, last_insert_id() from music where user_id = 1 union select * from user where id = 1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select *, :__lastInsertId as `last_insert_id()` from music where 1 != 1 union select * from `user` where 1 != 1",
+ "Query": "select *, :__lastInsertId as `last_insert_id()` from music where user_id = 1 union select * from `user` where id = 1",
+ "Table": "music",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select *, last_insert_id() from music where user_id = 1 union select * from user where id = 1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select *, :__lastInsertId as `last_insert_id()` from music where 1 != 1 union select * from `user` where 1 != 1",
+ "Query": "select *, :__lastInsertId as `last_insert_id()` from music where user_id = 1 union select * from `user` where id = 1",
+ "Table": "music",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.music",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "unsharded union in derived table",
+ "query": "select * from (select col1, col2 from unsharded where id = 1 union select col1, col2 from unsharded where id = 3) a",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from (select col1, col2 from unsharded where id = 1 union select col1, col2 from unsharded where id = 3) a",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from (select col1, col2 from unsharded where 1 != 1 union select col1, col2 from unsharded where 1 != 1) as a where 1 != 1",
+ "Query": "select * from (select col1, col2 from unsharded where id = 1 union select col1, col2 from unsharded where id = 3) as a",
+ "Table": "unsharded"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from (select col1, col2 from unsharded where id = 1 union select col1, col2 from unsharded where id = 3) a",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select a.col1, a.col2 from (select col1, col2 from unsharded where 1 != 1 union select col1, col2 from unsharded where 1 != 1) as a where 1 != 1",
+ "Query": "select a.col1, a.col2 from (select col1, col2 from unsharded where id = 1 union select col1, col2 from unsharded where id = 3) as a",
+ "Table": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "unsharded union in subquery",
+ "query": "select id, name from unsharded where id in (select id from unsharded where id = 1 union select id from unsharded where id = 3)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id, name from unsharded where id in (select id from unsharded where id = 1 union select id from unsharded where id = 3)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select id, `name` from unsharded where 1 != 1",
+ "Query": "select id, `name` from unsharded where id in (select id from unsharded where id = 1 union select id from unsharded where id = 3)",
+ "Table": "unsharded"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id, name from unsharded where id in (select id from unsharded where id = 1 union select id from unsharded where id = 3)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select id, `name` from unsharded where 1 != 1",
+ "Query": "select id, `name` from unsharded where id in (select id from unsharded where id = 1 union select id from unsharded where id = 3)",
+ "Table": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "(select id from unsharded) union (select id from unsharded_auto) order by id limit 5",
+ "query": "(select id from unsharded) union (select id from unsharded_auto) order by id limit 5",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "(select id from unsharded) union (select id from unsharded_auto) order by id limit 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select id from unsharded where 1 != 1 union select id from unsharded_auto where 1 != 1",
+ "Query": "select id from unsharded union select id from unsharded_auto order by id asc limit 5",
+ "Table": "unsharded"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "(select id from unsharded) union (select id from unsharded_auto) order by id limit 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select id from unsharded where 1 != 1 union select id from unsharded_auto where 1 != 1",
+ "Query": "select id from unsharded union select id from unsharded_auto order by id asc limit 5",
+ "Table": "unsharded, unsharded_auto"
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "main.unsharded_auto"
+ ]
+ }
+ },
+ {
+ "comment": "unsharded union",
+ "query": "select id from unsharded union select id from unsharded_auto union select id from unsharded_auto where id in (132)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from unsharded union select id from unsharded_auto union select id from unsharded_auto where id in (132)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select id from unsharded where 1 != 1 union select id from unsharded_auto where 1 != 1 union select id from unsharded_auto where 1 != 1",
+ "Query": "select id from unsharded union select id from unsharded_auto union select id from unsharded_auto where id in (132)",
+ "Table": "unsharded"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from unsharded union select id from unsharded_auto union select id from unsharded_auto where id in (132)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select id from unsharded where 1 != 1 union select id from unsharded_auto where 1 != 1 union select id from unsharded_auto where 1 != 1",
+ "Query": "select id from unsharded union select id from unsharded_auto union select id from unsharded_auto where id in (132)",
+ "Table": "unsharded, unsharded_auto"
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "main.unsharded_auto"
+ ]
+ }
+ },
+ {
+ "comment": "unsharded nested union",
+ "query": "(select id from unsharded union select id from unsharded_auto) union (select id from unsharded_auto union select name from unsharded)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "(select id from unsharded union select id from unsharded_auto) union (select id from unsharded_auto union select name from unsharded)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select id from unsharded where 1 != 1 union select id from unsharded_auto where 1 != 1 union select id from unsharded_auto where 1 != 1 union select `name` from unsharded where 1 != 1",
+ "Query": "select id from unsharded union select id from unsharded_auto union select id from unsharded_auto union select `name` from unsharded",
+ "Table": "unsharded"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "(select id from unsharded union select id from unsharded_auto) union (select id from unsharded_auto union select name from unsharded)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select id from unsharded where 1 != 1 union select id from unsharded_auto where 1 != 1 union select id from unsharded_auto where 1 != 1 union select `name` from unsharded where 1 != 1",
+ "Query": "select id from unsharded union select id from unsharded_auto union select id from unsharded_auto union select `name` from unsharded",
+ "Table": "unsharded, unsharded_auto"
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "main.unsharded_auto"
+ ]
+ }
+ },
+ {
+ "comment": "unsharded nested union with limit",
+ "query": "(select id from unsharded order by id asc limit 1) union (select id from unsharded order by id desc limit 1) order by id asc limit 1",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "(select id from unsharded order by id asc limit 1) union (select id from unsharded order by id desc limit 1) order by id asc limit 1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "(select id from unsharded where 1 != 1) union (select id from unsharded where 1 != 1)",
+ "Query": "(select id from unsharded order by id asc limit 1) union (select id from unsharded order by id desc limit 1) order by id asc limit 1",
+ "Table": "unsharded"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "(select id from unsharded order by id asc limit 1) union (select id from unsharded order by id desc limit 1) order by id asc limit 1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "(select id from unsharded where 1 != 1) union (select id from unsharded where 1 != 1)",
+ "Query": "(select id from unsharded order by id asc limit 1) union (select id from unsharded order by id desc limit 1) order by id asc limit 1",
+ "Table": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "routing rules: ensure directives are not lost",
+ "query": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from route2",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from route2",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from unsharded as route2 where 1 != 1",
+ "Query": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from unsharded as route2",
+ "QueryTimeout": 1000,
+ "Table": "unsharded"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from route2",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from unsharded as route2 where 1 != 1",
+ "Query": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from unsharded as route2",
+ "QueryTimeout": 1000,
+ "Table": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "testing SingleRow Projection",
+ "query": "select 42",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 42",
+ "Instructions": {
+ "OperatorType": "Projection",
+ "Expressions": [
+ "INT64(42) as 42"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SingleRow"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 42",
+ "Instructions": {
+ "OperatorType": "Projection",
+ "Expressions": [
+ "INT64(42) as 42"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SingleRow"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.dual"
+ ]
+ }
+ },
+ {
+ "comment": "don't filter on the vtgate",
+ "query": "select 42 from dual where false",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 42 from dual where false",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 42 from dual where 1 != 1",
+ "Query": "select 42 from dual where false",
+ "Table": "dual"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 42 from dual where false",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 42 from dual where 1 != 1",
+ "Query": "select 42 from dual where false",
+ "Table": "dual"
+ },
+ "TablesUsed": [
+ "main.dual"
+ ]
+ }
+ },
+ {
+ "comment": "testing SingleRow Projection with arithmetics",
+ "query": "select 42+2",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 42+2",
+ "Instructions": {
+ "OperatorType": "Projection",
+ "Expressions": [
+ "INT64(44) as 42 + 2"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SingleRow"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 42+2",
+ "Instructions": {
+ "OperatorType": "Projection",
+ "Expressions": [
+ "INT64(44) as 42 + 2"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SingleRow"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.dual"
+ ]
+ }
+ },
+ {
+ "comment": "sql_calc_found_rows without limit",
+ "query": "select sql_calc_found_rows * from music where user_id = 1",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select sql_calc_found_rows * from music where user_id = 1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from music where 1 != 1",
+ "Query": "select * from music where user_id = 1",
+ "Table": "music",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select sql_calc_found_rows * from music where user_id = 1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from music where 1 != 1",
+ "Query": "select * from music where user_id = 1",
+ "Table": "music",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "sql_calc_found_rows with limit",
+ "query": "select sql_calc_found_rows * from music limit 100",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select sql_calc_found_rows * from music limit 100",
+ "Instructions": {
+ "OperatorType": "SQL_CALC_FOUND_ROWS",
+ "Inputs": [
+ {
+ "OperatorType": "Limit",
+ "Count": "INT64(100)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from music where 1 != 1",
+ "Query": "select * from music limit :__upper_limit",
+ "Table": "music"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count(0) AS count",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*) from music where 1 != 1",
+ "Query": "select count(*) from music",
+ "Table": "music"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select sql_calc_found_rows * from music limit 100",
+ "Instructions": {
+ "OperatorType": "SQL_CALC_FOUND_ROWS",
+ "Inputs": [
+ {
+ "OperatorType": "Limit",
+ "Count": "INT64(100)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from music where 1 != 1",
+ "Query": "select * from music limit :__upper_limit",
+ "Table": "music"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count_star(0) AS count(*)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*) from music where 1 != 1",
+ "Query": "select count(*) from music",
+ "Table": "music"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "sql_calc_found_rows with SelectEqualUnique plans",
+ "query": "select sql_calc_found_rows * from music where user_id = 1 limit 2",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select sql_calc_found_rows * from music where user_id = 1 limit 2",
+ "Instructions": {
+ "OperatorType": "SQL_CALC_FOUND_ROWS",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from music where 1 != 1",
+ "Query": "select * from music where user_id = 1 limit 2",
+ "Table": "music",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*) from music where 1 != 1",
+ "Query": "select count(*) from music where user_id = 1",
+ "Table": "music",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select sql_calc_found_rows * from music where user_id = 1 limit 2",
+ "Instructions": {
+ "OperatorType": "SQL_CALC_FOUND_ROWS",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from music where 1 != 1",
+ "Query": "select * from music where user_id = 1 limit 2",
+ "Table": "music",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*) from music where 1 != 1",
+ "Query": "select count(*) from music where user_id = 1",
+ "Table": "music",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "sql_calc_found_rows with group by and having",
+ "query": "select sql_calc_found_rows user_id, count(id) from music group by user_id having count(user_id) = 1 order by user_id limit 2",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select sql_calc_found_rows user_id, count(id) from music group by user_id having count(user_id) = 1 order by user_id limit 2",
+ "Instructions": {
+ "OperatorType": "SQL_CALC_FOUND_ROWS",
+ "Inputs": [
+ {
+ "OperatorType": "Limit",
+ "Count": "INT64(2)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_id, count(id), weight_string(user_id) from music where 1 != 1 group by user_id",
+ "OrderBy": "(0|2) ASC",
+ "Query": "select user_id, count(id), weight_string(user_id) from music group by user_id having count(user_id) = 1 order by user_id asc limit :__upper_limit",
+ "ResultColumns": 2,
+ "Table": "music"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count(0) AS count",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*) from (select user_id, count(id) from music where 1 != 1 group by user_id) as t where 1 != 1",
+ "Query": "select count(*) from (select user_id, count(id) from music group by user_id having count(user_id) = 1) as t",
+ "Table": "music"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select sql_calc_found_rows user_id, count(id) from music group by user_id having count(user_id) = 1 order by user_id limit 2",
+ "Instructions": {
+ "OperatorType": "SQL_CALC_FOUND_ROWS",
+ "Inputs": [
+ {
+ "OperatorType": "Limit",
+ "Count": "INT64(2)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_id, count(id), weight_string(user_id) from music where 1 != 1 group by user_id",
+ "OrderBy": "(0|2) ASC",
+ "Query": "select user_id, count(id), weight_string(user_id) from music group by user_id having count(user_id) = 1 order by user_id asc limit :__upper_limit",
+ "ResultColumns": 2,
+ "Table": "music"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count_star(0) AS count(*)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*) from (select user_id, count(id) from music where 1 != 1 group by user_id) as t where 1 != 1",
+ "Query": "select count(*) from (select user_id, count(id) from music group by user_id having count(user_id) = 1) as t",
+ "Table": "music"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "sql_calc_found_rows in sub queries",
+ "query": "select * from music where user_id IN (select sql_calc_found_rows * from music limit 10)",
+ "v3-plan": "VT03008: incorrect usage/placement of 'SQL_CALC_FOUND_ROWS'",
+ "gen4-plan": "Incorrect usage/placement of 'SQL_CALC_FOUND_ROWS'"
+ },
+ {
+ "comment": "sql_calc_found_rows in derived table",
+ "query": "select sql_calc_found_rows * from (select sql_calc_found_rows * from music limit 10) t limit 1",
+ "v3-plan": "VT03008: incorrect usage/placement of 'SQL_CALC_FOUND_ROWS'",
+ "gen4-plan": "Incorrect usage/placement of 'SQL_CALC_FOUND_ROWS'"
+ },
+ {
+ "comment": "select from unsharded keyspace into dumpfile",
+ "query": "select * from main.unsharded into Dumpfile 'x.txt'",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from main.unsharded into Dumpfile 'x.txt'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from unsharded where 1 != 1",
+ "Query": "select * from unsharded into dumpfile 'x.txt'",
+ "Table": "unsharded"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from main.unsharded into Dumpfile 'x.txt'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from unsharded where 1 != 1",
+ "Query": "select * from unsharded into dumpfile 'x.txt'",
+ "Table": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "select from unsharded keyspace into outfile",
+ "query": "select * from main.unsharded into outfile 'x.txt' character set binary fields terminated by 'term' optionally enclosed by 'c' escaped by 'e' lines starting by 'a' terminated by '\n'",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from main.unsharded into outfile 'x.txt' character set binary fields terminated by 'term' optionally enclosed by 'c' escaped by 'e' lines starting by 'a' terminated by '\n'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from unsharded where 1 != 1",
+ "Query": "select * from unsharded into outfile 'x.txt' character set binary fields terminated by 'term' optionally enclosed by 'c' escaped by 'e' lines starting by 'a' terminated by '\\n'",
+ "Table": "unsharded"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from main.unsharded into outfile 'x.txt' character set binary fields terminated by 'term' optionally enclosed by 'c' escaped by 'e' lines starting by 'a' terminated by '\n'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from unsharded where 1 != 1",
+ "Query": "select * from unsharded into outfile 'x.txt' character set binary fields terminated by 'term' optionally enclosed by 'c' escaped by 'e' lines starting by 'a' terminated by '\\n'",
+ "Table": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "select from unsharded keyspace into outfile s3",
+ "query": "select * from main.unsharded into outfile s3 'out_file_name' character set binary format csv header fields terminated by 'term' optionally enclosed by 'c' escaped by 'e' lines starting by 'a' terminated by '\n' manifest on overwrite off",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from main.unsharded into outfile s3 'out_file_name' character set binary format csv header fields terminated by 'term' optionally enclosed by 'c' escaped by 'e' lines starting by 'a' terminated by '\n' manifest on overwrite off",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from unsharded where 1 != 1",
+ "Query": "select * from unsharded into outfile s3 'out_file_name' character set binary format csv header fields terminated by 'term' optionally enclosed by 'c' escaped by 'e' lines starting by 'a' terminated by '\\n' manifest on overwrite off",
+ "Table": "unsharded"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from main.unsharded into outfile s3 'out_file_name' character set binary format csv header fields terminated by 'term' optionally enclosed by 'c' escaped by 'e' lines starting by 'a' terminated by '\n' manifest on overwrite off",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from unsharded where 1 != 1",
+ "Query": "select * from unsharded into outfile s3 'out_file_name' character set binary format csv header fields terminated by 'term' optionally enclosed by 'c' escaped by 'e' lines starting by 'a' terminated by '\\n' manifest on overwrite off",
+ "Table": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "Union after into outfile is incorrect",
+ "query": "select id from user into outfile 'out_file_name' union all select id from music",
+ "plan": "syntax error at position 55 near 'union'"
+ },
+ {
+ "comment": "Into outfile s3 in derived table is incorrect",
+ "query": "select id from (select id from user into outfile s3 'inner_outfile') as t2",
+ "plan": "syntax error at position 41 near 'into'"
+ },
+ {
+ "comment": "Into outfile s3 in derived table with union incorrect",
+ "query": "select id from (select id from user into outfile s3 'inner_outfile' union select 1) as t2",
+ "plan": "syntax error at position 41 near 'into'"
+ },
+ {
+ "comment": "select (select u.id from user as u where u.id = 1), a.id from user as a where a.id = 1",
+ "query": "select (select u.id from user as u where u.id = 1), a.id from user as a where a.id = 1",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select (select u.id from user as u where u.id = 1), a.id from user as a where a.id = 1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select (select u.id from `user` as u where 1 != 1), a.id from `user` as a where 1 != 1",
+ "Query": "select (select u.id from `user` as u where u.id = 1), a.id from `user` as a where a.id = 1",
+ "Table": "`user`",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select (select u.id from user as u where u.id = 1), a.id from user as a where a.id = 1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select (select u.id from `user` as u where 1 != 1), a.id from `user` as a where 1 != 1",
+ "Query": "select (select u.id from `user` as u where u.id = 1), a.id from `user` as a where a.id = 1",
+ "Table": "`user`",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Add two tables with the same column in a join",
+ "query": "select t.id, s.id from user t join user_extra s on t.id = s.user_id join unsharded",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select t.id, s.id from user t join user_extra s on t.id = s.user_id join unsharded",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1",
+ "TableName": "`user`, user_extra_unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select t.id, s.id from `user` as t join user_extra as s on t.id = s.user_id where 1 != 1",
+ "Query": "select t.id, s.id from `user` as t join user_extra as s on t.id = s.user_id",
+ "Table": "`user`, user_extra"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 from unsharded where 1 != 1",
+ "Query": "select 1 from unsharded",
+ "Table": "unsharded"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select t.id, s.id from user t join user_extra s on t.id = s.user_id join unsharded",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0,R:1",
+ "TableName": "unsharded_`user`, user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 from unsharded where 1 != 1",
+ "Query": "select 1 from unsharded",
+ "Table": "unsharded"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select t.id, s.id from `user` as t, user_extra as s where 1 != 1",
+ "Query": "select t.id, s.id from `user` as t, user_extra as s where t.id = s.user_id",
+ "Table": "`user`, user_extra"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "((((select 1))))",
+ "query": "((((select 1))))",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "((((select 1))))",
+ "Instructions": {
+ "OperatorType": "Projection",
+ "Expressions": [
+ "INT64(1) as 1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SingleRow"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "((((select 1))))",
+ "Instructions": {
+ "OperatorType": "Projection",
+ "Expressions": [
+ "INT64(1) as 1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SingleRow"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.dual"
+ ]
+ }
+ },
+ {
+ "comment": "Merging dual with user",
+ "query": "select 42, id from dual, user",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 42, id from dual, user",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,R:0",
+ "TableName": "dual_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 42 from dual where 1 != 1",
+ "Query": "select 42 from dual",
+ "Table": "dual"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user`",
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 42, id from dual, user",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 42, id from dual, `user` where 1 != 1",
+ "Query": "select 42, id from dual, `user`",
+ "Table": "`user`, dual"
+ },
+ "TablesUsed": [
+ "main.dual",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Table named \"dual\" with a qualifier joined on user should not be merged",
+ "query": "select 42, user.id from main.dual, user",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 42, user.id from main.dual, user",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,R:0",
+ "TableName": "dual_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 42 from dual where 1 != 1",
+ "Query": "select 42 from dual",
+ "Table": "dual"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id from `user` where 1 != 1",
+ "Query": "select `user`.id from `user`",
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 42, user.id from main.dual, user",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,R:0",
+ "TableName": "dual_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 42 from dual where 1 != 1",
+ "Query": "select 42 from dual",
+ "Table": "dual"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id from `user` where 1 != 1",
+ "Query": "select `user`.id from `user`",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.dual",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "select (select col from user limit 1) as a from user join user_extra order by a",
+ "query": "select (select col from user limit 1) as a from user join user_extra order by a",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select (select col from user limit 1) as a from user join user_extra order by a",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutValue",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Limit",
+ "Count": "INT64(1)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user` limit :__upper_limit",
+ "Table": "`user`"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select :__sq1 as a, weight_string(:__sq1) from `user` where 1 != 1",
+ "OrderBy": "(0|1) ASC",
+ "Query": "select :__sq1 as a, weight_string(:__sq1) from `user` order by a asc",
+ "ResultColumns": 1,
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select (select col from user limit 1) as a from user join user_extra order by a",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutValue",
+ "PulloutVars": [
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Limit",
+ "Count": "INT64(1)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user` limit :__upper_limit",
+ "Table": "`user`"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select :__sq1 as a, weight_string(:__sq1) from `user` where 1 != 1",
+ "OrderBy": "(0|1) ASC",
+ "Query": "select :__sq1 as a, weight_string(:__sq1) from `user` order by a asc",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "select t.a from (select (select col from user limit 1) as a from user join user_extra) t",
+ "query": "select t.a from (select (select col from user limit 1) as a from user join user_extra) t",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select t.a from (select (select col from user limit 1) as a from user join user_extra) t",
+ "Instructions": {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutValue",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Limit",
+ "Count": "INT64(1)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user` limit :__upper_limit",
+ "Table": "`user`"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select :__sq1 as a from `user` where 1 != 1",
+ "Query": "select :__sq1 as a from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select t.a from (select (select col from user limit 1) as a from user join user_extra) t",
+ "Instructions": {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutValue",
+ "PulloutVars": [
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Limit",
+ "Count": "INT64(1)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user` limit :__upper_limit",
+ "Table": "`user`"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select :__sq1 as a from `user` where 1 != 1",
+ "Query": "select :__sq1 as a from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "select (select col from user where user_extra.id = 4 limit 1) as a from user join user_extra",
+ "query": "select (select col from user where user_extra.id = 4 limit 1) as a from user join user_extra",
+ "plan": "VT12001: unsupported: cross-shard correlated subquery"
+ },
+ {
+ "comment": "plan test for a natural character set string",
+ "query": "select N'string' from dual",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select N'string' from dual",
+ "Instructions": {
+ "OperatorType": "Projection",
+ "Expressions": [
+ "VARCHAR(\"string\") as N'string'"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SingleRow"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select N'string' from dual",
+ "Instructions": {
+ "OperatorType": "Projection",
+ "Expressions": [
+ "VARCHAR(\"string\") as N'string'"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SingleRow"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.dual"
+ ]
+ }
+ },
+ {
+ "comment": "select expression having dependencies on both sides of a join",
+ "query": "select user.id * user_id as amount from user, user_extra",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.id * user_id as amount from user, user_extra",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "JoinVars": {
+ "user_id": 0
+ },
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id from `user` where 1 != 1",
+ "Query": "select `user`.id from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select :user_id * user_id as amount from user_extra where 1 != 1",
+ "Query": "select :user_id * user_id as amount from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.id * user_id as amount from user, user_extra",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "JoinVars": {
+ "user_id": 0
+ },
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id from `user` where 1 != 1",
+ "Query": "select `user`.id from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select :user_id * user_id as amount from user_extra where 1 != 1",
+ "Query": "select :user_id * user_id as amount from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "correlated subquery in exists clause",
+ "query": "select col from user where exists(select user_id from user_extra where user_id = 3 and user_id < user.id)",
+ "v3-plan": "VT12001: unsupported: cross-shard correlated subquery",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from user where exists(select user_id from user_extra where user_id = 3 and user_id < user.id)",
+ "Instructions": {
+ "OperatorType": "SemiJoin",
+ "JoinVars": {
+ "user_id": 0
+ },
+ "ProjectedIndexes": "-2",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id, col from `user` where 1 != 1",
+ "Query": "select `user`.id, col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra where user_id = 3 and user_id < :user_id",
+ "Table": "user_extra",
+ "Values": [
+ "INT64(3)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "correlated subquery in exists clause with an order by",
+ "query": "select col from user where exists(select user_id from user_extra where user_id = 3 and user_id < user.id) order by col",
+ "v3-plan": "VT12001: unsupported: cross-shard correlated subquery",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from user where exists(select user_id from user_extra where user_id = 3 and user_id < user.id) order by col",
+ "Instructions": {
+ "OperatorType": "SemiJoin",
+ "JoinVars": {
+ "user_id": 0
+ },
+ "ProjectedIndexes": "-2",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id, col from `user` where 1 != 1",
+ "OrderBy": "1 ASC",
+ "Query": "select `user`.id, col from `user` order by col asc",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra where user_id = 3 and user_id < :user_id",
+ "Table": "user_extra",
+ "Values": [
+ "INT64(3)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "correlated subquery having dependencies on two tables",
+ "query": "select 1 from user u1, user u2 where exists (select 1 from user_extra ue where ue.col = u1.col and ue.col = u2.col)",
+ "v3-plan": "VT12001: unsupported: cross-shard correlated subquery",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 from user u1, user u2 where exists (select 1 from user_extra ue where ue.col = u1.col and ue.col = u2.col)",
+ "Instructions": {
+ "OperatorType": "SemiJoin",
+ "JoinVars": {
+ "u1_col": 0,
+ "u2_col": 1
+ },
+ "ProjectedIndexes": "-3",
+ "TableName": "`user`_`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,R:0,L:1",
+ "TableName": "`user`_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u1.col, 1 from `user` as u1 where 1 != 1",
+ "Query": "select u1.col, 1 from `user` as u1",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u2.col from `user` as u2 where 1 != 1",
+ "Query": "select u2.col from `user` as u2",
+ "Table": "`user`"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra as ue where 1 != 1",
+ "Query": "select 1 from user_extra as ue where ue.col = :u1_col and ue.col = :u2_col",
+ "Table": "user_extra"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "correlated subquery using a column twice",
+ "query": "select 1 from user u where exists (select 1 from user_extra ue where ue.col = u.col and u.col = ue.col2)",
+ "v3-plan": "VT12001: unsupported: cross-shard correlated subquery",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 from user u where exists (select 1 from user_extra ue where ue.col = u.col and u.col = ue.col2)",
+ "Instructions": {
+ "OperatorType": "SemiJoin",
+ "JoinVars": {
+ "u_col": 0
+ },
+ "ProjectedIndexes": "-2",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.col, 1 from `user` as u where 1 != 1",
+ "Query": "select u.col, 1 from `user` as u",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra as ue where 1 != 1",
+ "Query": "select 1 from user_extra as ue where ue.col = :u_col and ue.col2 = :u_col",
+ "Table": "user_extra"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "correlated subquery part of an OR clause",
+ "query": "select 1 from user u where u.col = 6 or exists (select 1 from user_extra ue where ue.col = u.col and u.col = ue.col2)",
+ "v3-plan": "VT12001: unsupported: cross-shard correlated subquery",
+ "gen4-plan": "VT12001: unsupported: EXISTS sub-queries are only supported with AND clause"
+ },
+ {
+ "comment": "correlated subquery that is dependent on one side of a join, fully mergeable",
+ "query": "SELECT music.id FROM music INNER JOIN user ON music.user_id = user.id WHERE music.user_id = 5 AND music.id = (SELECT MAX(m2.id) FROM music m2 WHERE m2.user_id = user.id)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music INNER JOIN user ON music.user_id = user.id WHERE music.user_id = 5 AND music.id = (SELECT MAX(m2.id) FROM music m2 WHERE m2.user_id = user.id)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music join `user` on music.user_id = `user`.id where 1 != 1",
+ "Query": "select music.id from music join `user` on music.user_id = `user`.id where music.user_id = 5 and music.id = (select max(m2.id) from music as m2 where m2.user_id = `user`.id)",
+ "Table": "music, `user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music INNER JOIN user ON music.user_id = user.id WHERE music.user_id = 5 AND music.id = (SELECT MAX(m2.id) FROM music m2 WHERE m2.user_id = user.id)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music, `user` where 1 != 1",
+ "Query": "select music.id from music, `user` where music.user_id = 5 and music.id = (select max(m2.id) from music as m2 where m2.user_id = `user`.id) and music.user_id = `user`.id",
+ "Table": "`user`, music",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.music",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "union as a derived table",
+ "query": "select found from (select id as found from user union all (select id from unsharded)) as t",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select found from (select id as found from user union all (select id from unsharded)) as t",
+ "Instructions": {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id as found from `user` where 1 != 1",
+ "Query": "select id as found from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select id from unsharded where 1 != 1",
+ "Query": "select id from unsharded",
+ "Table": "unsharded"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select found from (select id as found from user union all (select id from unsharded)) as t",
+ "Instructions": {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id as found from `user` where 1 != 1",
+ "Query": "select id as found from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select id from unsharded where 1 != 1",
+ "Query": "select id from unsharded",
+ "Table": "unsharded"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "use output column containing data from both sides of the join",
+ "query": "select user_extra.col + user.col from user join user_extra on user.id = user_extra.id",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user_extra.col + user.col from user join user_extra on user.id = user_extra.id",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "JoinVars": {
+ "user_col": 0,
+ "user_id": 1
+ },
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col, `user`.id from `user` where 1 != 1",
+ "Query": "select `user`.col, `user`.id from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.col + :user_col from user_extra where 1 != 1",
+ "Query": "select user_extra.col + :user_col from user_extra where user_extra.id = :user_id",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user_extra.col + user.col from user join user_extra on user.id = user_extra.id",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "JoinVars": {
+ "user_extra_col": 1,
+ "user_extra_id": 0
+ },
+ "TableName": "user_extra_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.id, user_extra.col from user_extra where 1 != 1",
+ "Query": "select user_extra.id, user_extra.col from user_extra",
+ "Table": "user_extra"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select :user_extra_col + `user`.col from `user` where 1 != 1",
+ "Query": "select :user_extra_col + `user`.col from `user` where `user`.id = :user_extra_id",
+ "Table": "`user`",
+ "Values": [
+ ":user_extra_id"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "mergeable derived table with order by and limit",
+ "query": "select 1 from (select col from main.unsharded order by main.unsharded.col1 desc limit 12 offset 0) as f left join unsharded as u on f.col = u.id",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 from (select col from main.unsharded order by main.unsharded.col1 desc limit 12 offset 0) as f left join unsharded as u on f.col = u.id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 from (select col from unsharded where 1 != 1) as f left join unsharded as u on f.col = u.id where 1 != 1",
+ "Query": "select 1 from (select col from unsharded order by unsharded.col1 desc limit 0, 12) as f left join unsharded as u on f.col = u.id",
+ "Table": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "mergeable derived table with group by and limit",
+ "query": "select 1 from (select col, count(*) as a from main.unsharded group by col having a > 0 limit 12 offset 0) as f left join unsharded as u on f.col = u.id",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 from (select col, count(*) as a from main.unsharded group by col having a > 0 limit 12 offset 0) as f left join unsharded as u on f.col = u.id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 from (select col, count(*) as a from unsharded where 1 != 1 group by col) as f left join unsharded as u on f.col = u.id where 1 != 1",
+ "Query": "select 1 from (select col, count(*) as a from unsharded group by col having count(*) > 0 limit 0, 12) as f left join unsharded as u on f.col = u.id",
+ "Table": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "select user.id, trim(leading 'x' from user.name) from user",
+ "query": "select user.id, trim(leading 'x' from user.name) from user",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.id, trim(leading 'x' from user.name) from user",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id, trim(leading 'x' from `user`.`name`) from `user` where 1 != 1",
+ "Query": "select `user`.id, trim(leading 'x' from `user`.`name`) from `user`",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.id, trim(leading 'x' from user.name) from user",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id, trim(leading 'x' from `user`.`name`) from `user` where 1 != 1",
+ "Query": "select `user`.id, trim(leading 'x' from `user`.`name`) from `user`",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "json utility functions",
+ "query": "select jcol, JSON_STORAGE_SIZE(jcol), JSON_STORAGE_FREE(jcol), JSON_PRETTY(jcol) from user",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select jcol, JSON_STORAGE_SIZE(jcol), JSON_STORAGE_FREE(jcol), JSON_PRETTY(jcol) from user",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select jcol, json_storage_size(jcol), json_storage_free(jcol), json_pretty(jcol) from `user` where 1 != 1",
+ "Query": "select jcol, json_storage_size(jcol), json_storage_free(jcol), json_pretty(jcol) from `user`",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select jcol, JSON_STORAGE_SIZE(jcol), JSON_STORAGE_FREE(jcol), JSON_PRETTY(jcol) from user",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select jcol, json_storage_size(jcol), json_storage_free(jcol), json_pretty(jcol) from `user` where 1 != 1",
+ "Query": "select jcol, json_storage_size(jcol), json_storage_free(jcol), json_pretty(jcol) from `user`",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "dual query with exists clause",
+ "query": "select 1 from dual where exists (select 1 from information_schema.TABLES where information_schema.TABLES.TABLE_NAME = 'proc' and information_schema.TABLES.TABLE_SCHEMA = 'mysql')",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 from dual where exists (select 1 from information_schema.TABLES where information_schema.TABLES.TABLE_NAME = 'proc' and information_schema.TABLES.TABLE_SCHEMA = 'mysql')",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 from dual where 1 != 1",
+ "Query": "select 1 from dual where exists (select 1 from information_schema.`TABLES` where information_schema.`TABLES`.TABLE_NAME = :TABLES_TABLE_NAME and information_schema.`TABLES`.TABLE_SCHEMA = :__vtschemaname limit 1)",
+ "SysTableTableName": "[TABLES_TABLE_NAME:VARCHAR(\"proc\")]",
+ "SysTableTableSchema": "[VARCHAR(\"mysql\")]",
+ "Table": "dual"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 from dual where exists (select 1 from information_schema.TABLES where information_schema.TABLES.TABLE_NAME = 'proc' and information_schema.TABLES.TABLE_SCHEMA = 'mysql')",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 from dual where 1 != 1",
+ "Query": "select 1 from dual where exists (select 1 from information_schema.`TABLES` where `TABLES`.TABLE_NAME = :TABLES_TABLE_NAME and `TABLES`.TABLE_SCHEMA = :__vtschemaname limit 1)",
+ "SysTableTableName": "[TABLES_TABLE_NAME:VARCHAR(\"proc\")]",
+ "SysTableTableSchema": "[VARCHAR(\"mysql\")]",
+ "Table": "dual"
+ },
+ "TablesUsed": [
+ "main.dual"
+ ]
+ }
+ },
+ {
+ "comment": "json_quote, json_object and json_array",
+ "query": "SELECT JSON_QUOTE('null'), JSON_QUOTE('\"null\"'), JSON_OBJECT(BIN(1),2,'abc',ASCII(4)), JSON_ARRAY(1, \"abc\", NULL, TRUE, CURTIME())",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT JSON_QUOTE('null'), JSON_QUOTE('\"null\"'), JSON_OBJECT(BIN(1),2,'abc',ASCII(4)), JSON_ARRAY(1, \"abc\", NULL, TRUE, CURTIME())",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select json_quote('null'), json_quote('\\\"null\\\"'), json_object(BIN(1), 2, 'abc', ASCII(4)), json_array(1, 'abc', null, true, CURTIME()) from dual where 1 != 1",
+ "Query": "select json_quote('null'), json_quote('\\\"null\\\"'), json_object(BIN(1), 2, 'abc', ASCII(4)), json_array(1, 'abc', null, true, CURTIME()) from dual",
+ "Table": "dual"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT JSON_QUOTE('null'), JSON_QUOTE('\"null\"'), JSON_OBJECT(BIN(1),2,'abc',ASCII(4)), JSON_ARRAY(1, \"abc\", NULL, TRUE, CURTIME())",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select json_quote('null'), json_quote('\\\"null\\\"'), json_object(BIN(1), 2, 'abc', ASCII(4)), json_array(1, 'abc', null, true, CURTIME()) from dual where 1 != 1",
+ "Query": "select json_quote('null'), json_quote('\\\"null\\\"'), json_object(BIN(1), 2, 'abc', ASCII(4)), json_array(1, 'abc', null, true, CURTIME()) from dual",
+ "Table": "dual"
+ },
+ "TablesUsed": [
+ "main.dual"
+ ]
+ }
+ },
+ {
+ "comment": "select (select id from user order by id limit 1) from user_extra",
+ "query": "select (select id from user order by id limit 1) from user_extra",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select (select id from user order by id limit 1) from user_extra",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutValue",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Limit",
+ "Count": "INT64(1)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1",
+ "OrderBy": "(0|1) ASC",
+ "Query": "select id, weight_string(id) from `user` order by id asc limit :__upper_limit",
+ "ResultColumns": 1,
+ "Table": "`user`"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select :__sq1 from user_extra where 1 != 1",
+ "Query": "select :__sq1 from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select (select id from user order by id limit 1) from user_extra",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutValue",
+ "PulloutVars": [
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Limit",
+ "Count": "INT64(1)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1",
+ "OrderBy": "(0|1) ASC",
+ "Query": "select id, weight_string(id) from `user` order by id asc limit :__upper_limit",
+ "ResultColumns": 1,
+ "Table": "`user`"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select :__sq1 from user_extra where 1 != 1",
+ "Query": "select :__sq1 from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "yeah, it does not make sense, but it's valid",
+ "query": "select exists(select 1) from user where id = 5",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select exists(select 1) from user where id = 5",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutExists",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 from dual where 1 != 1",
+ "Query": "select 1 from dual limit 1",
+ "Table": "dual"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select :__sq_has_values1 from `user` where 1 != 1",
+ "Query": "select :__sq_has_values1 from `user` where id = 5",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select exists(select 1) from user where id = 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select exists (select 1 from dual where 1 != 1) from `user` where 1 != 1",
+ "Query": "select exists (select 1 from dual limit 1) from `user` where id = 5",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "main.dual",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "json schema validation functions",
+ "query": "SELECT JSON_SCHEMA_VALID('{\"type\":\"string\",\"pattern\":\"(\"}', '\"abc\"'), JSON_SCHEMA_VALIDATION_REPORT('{\"type\":\"string\",\"pattern\":\"(\"}', '\"abc\"')",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT JSON_SCHEMA_VALID('{\"type\":\"string\",\"pattern\":\"(\"}', '\"abc\"'), JSON_SCHEMA_VALIDATION_REPORT('{\"type\":\"string\",\"pattern\":\"(\"}', '\"abc\"')",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select json_schema_valid('{\\\"type\\\":\\\"string\\\",\\\"pattern\\\":\\\"(\\\"}', '\\\"abc\\\"'), json_schema_validation_report('{\\\"type\\\":\\\"string\\\",\\\"pattern\\\":\\\"(\\\"}', '\\\"abc\\\"') from dual where 1 != 1",
+ "Query": "select json_schema_valid('{\\\"type\\\":\\\"string\\\",\\\"pattern\\\":\\\"(\\\"}', '\\\"abc\\\"'), json_schema_validation_report('{\\\"type\\\":\\\"string\\\",\\\"pattern\\\":\\\"(\\\"}', '\\\"abc\\\"') from dual",
+ "Table": "dual"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT JSON_SCHEMA_VALID('{\"type\":\"string\",\"pattern\":\"(\"}', '\"abc\"'), JSON_SCHEMA_VALIDATION_REPORT('{\"type\":\"string\",\"pattern\":\"(\"}', '\"abc\"')",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select json_schema_valid('{\\\"type\\\":\\\"string\\\",\\\"pattern\\\":\\\"(\\\"}', '\\\"abc\\\"'), json_schema_validation_report('{\\\"type\\\":\\\"string\\\",\\\"pattern\\\":\\\"(\\\"}', '\\\"abc\\\"') from dual where 1 != 1",
+ "Query": "select json_schema_valid('{\\\"type\\\":\\\"string\\\",\\\"pattern\\\":\\\"(\\\"}', '\\\"abc\\\"'), json_schema_validation_report('{\\\"type\\\":\\\"string\\\",\\\"pattern\\\":\\\"(\\\"}', '\\\"abc\\\"') from dual",
+ "Table": "dual"
+ },
+ "TablesUsed": [
+ "main.dual"
+ ]
+ }
+ },
+ {
+ "comment": "json search functions",
+ "query": "SELECT JSON_CONTAINS('{\"a\": 1, \"b\": 2, \"c\": {\"d\": 4}}', '1'), JSON_CONTAINS_PATH('{\"a\": 1, \"b\": 2, \"c\": {\"d\": 4}}', 'one', '$.a', '$.e'), JSON_EXTRACT('[10, 20, [30, 40]]', '$[1]'), JSON_UNQUOTE(JSON_EXTRACT('[\"a\",\"b\"]', '$[1]')), JSON_KEYS('{\"a\": 1, \"b\": {\"c\": 30}}'), JSON_OVERLAPS(\"[1,3,5,7]\", \"[2,5,7]\"), JSON_SEARCH('[\"abc\"]', 'one', 'abc'), JSON_VALUE('{\"fname\": \"Joe\", \"lname\": \"Palmer\"}', '$.fname'), JSON_ARRAY(4,5) MEMBER OF('[[3,4],[4,5]]')",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT JSON_CONTAINS('{\"a\": 1, \"b\": 2, \"c\": {\"d\": 4}}', '1'), JSON_CONTAINS_PATH('{\"a\": 1, \"b\": 2, \"c\": {\"d\": 4}}', 'one', '$.a', '$.e'), JSON_EXTRACT('[10, 20, [30, 40]]', '$[1]'), JSON_UNQUOTE(JSON_EXTRACT('[\"a\",\"b\"]', '$[1]')), JSON_KEYS('{\"a\": 1, \"b\": {\"c\": 30}}'), JSON_OVERLAPS(\"[1,3,5,7]\", \"[2,5,7]\"), JSON_SEARCH('[\"abc\"]', 'one', 'abc'), JSON_VALUE('{\"fname\": \"Joe\", \"lname\": \"Palmer\"}', '$.fname'), JSON_ARRAY(4,5) MEMBER OF('[[3,4],[4,5]]')",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select json_contains('{\\\"a\\\": 1, \\\"b\\\": 2, \\\"c\\\": {\\\"d\\\": 4}}', '1'), json_contains_path('{\\\"a\\\": 1, \\\"b\\\": 2, \\\"c\\\": {\\\"d\\\": 4}}', 'one', '$.a', '$.e'), json_extract('[10, 20, [30, 40]]', '$[1]'), json_unquote(json_extract('[\\\"a\\\",\\\"b\\\"]', '$[1]')), json_keys('{\\\"a\\\": 1, \\\"b\\\": {\\\"c\\\": 30}}'), json_overlaps('[1,3,5,7]', '[2,5,7]'), json_search('[\\\"abc\\\"]', 'one', 'abc'), json_value('{\\\"fname\\\": \\\"Joe\\\", \\\"lname\\\": \\\"Palmer\\\"}', '$.fname'), json_array(4, 5) member of ('[[3,4],[4,5]]') from dual where 1 != 1",
+ "Query": "select json_contains('{\\\"a\\\": 1, \\\"b\\\": 2, \\\"c\\\": {\\\"d\\\": 4}}', '1'), json_contains_path('{\\\"a\\\": 1, \\\"b\\\": 2, \\\"c\\\": {\\\"d\\\": 4}}', 'one', '$.a', '$.e'), json_extract('[10, 20, [30, 40]]', '$[1]'), json_unquote(json_extract('[\\\"a\\\",\\\"b\\\"]', '$[1]')), json_keys('{\\\"a\\\": 1, \\\"b\\\": {\\\"c\\\": 30}}'), json_overlaps('[1,3,5,7]', '[2,5,7]'), json_search('[\\\"abc\\\"]', 'one', 'abc'), json_value('{\\\"fname\\\": \\\"Joe\\\", \\\"lname\\\": \\\"Palmer\\\"}', '$.fname'), json_array(4, 5) member of ('[[3,4],[4,5]]') from dual",
+ "Table": "dual"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT JSON_CONTAINS('{\"a\": 1, \"b\": 2, \"c\": {\"d\": 4}}', '1'), JSON_CONTAINS_PATH('{\"a\": 1, \"b\": 2, \"c\": {\"d\": 4}}', 'one', '$.a', '$.e'), JSON_EXTRACT('[10, 20, [30, 40]]', '$[1]'), JSON_UNQUOTE(JSON_EXTRACT('[\"a\",\"b\"]', '$[1]')), JSON_KEYS('{\"a\": 1, \"b\": {\"c\": 30}}'), JSON_OVERLAPS(\"[1,3,5,7]\", \"[2,5,7]\"), JSON_SEARCH('[\"abc\"]', 'one', 'abc'), JSON_VALUE('{\"fname\": \"Joe\", \"lname\": \"Palmer\"}', '$.fname'), JSON_ARRAY(4,5) MEMBER OF('[[3,4],[4,5]]')",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select json_contains('{\\\"a\\\": 1, \\\"b\\\": 2, \\\"c\\\": {\\\"d\\\": 4}}', '1'), json_contains_path('{\\\"a\\\": 1, \\\"b\\\": 2, \\\"c\\\": {\\\"d\\\": 4}}', 'one', '$.a', '$.e'), json_extract('[10, 20, [30, 40]]', '$[1]'), json_unquote(json_extract('[\\\"a\\\",\\\"b\\\"]', '$[1]')), json_keys('{\\\"a\\\": 1, \\\"b\\\": {\\\"c\\\": 30}}'), json_overlaps('[1,3,5,7]', '[2,5,7]'), json_search('[\\\"abc\\\"]', 'one', 'abc'), json_value('{\\\"fname\\\": \\\"Joe\\\", \\\"lname\\\": \\\"Palmer\\\"}', '$.fname'), json_array(4, 5) member of ('[[3,4],[4,5]]') from dual where 1 != 1",
+ "Query": "select json_contains('{\\\"a\\\": 1, \\\"b\\\": 2, \\\"c\\\": {\\\"d\\\": 4}}', '1'), json_contains_path('{\\\"a\\\": 1, \\\"b\\\": 2, \\\"c\\\": {\\\"d\\\": 4}}', 'one', '$.a', '$.e'), json_extract('[10, 20, [30, 40]]', '$[1]'), json_unquote(json_extract('[\\\"a\\\",\\\"b\\\"]', '$[1]')), json_keys('{\\\"a\\\": 1, \\\"b\\\": {\\\"c\\\": 30}}'), json_overlaps('[1,3,5,7]', '[2,5,7]'), json_search('[\\\"abc\\\"]', 'one', 'abc'), json_value('{\\\"fname\\\": \\\"Joe\\\", \\\"lname\\\": \\\"Palmer\\\"}', '$.fname'), json_array(4, 5) member of ('[[3,4],[4,5]]') from dual",
+ "Table": "dual"
+ },
+ "TablesUsed": [
+ "main.dual"
+ ]
+ }
+ },
+ {
+ "comment": "Json extract and json unquote shorthands",
+ "query": "SELECT a->\"$[4]\", a->>\"$[3]\" FROM user",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT a->\"$[4]\", a->>\"$[3]\" FROM user",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a -> '$[4]', a ->> '$[3]' from `user` where 1 != 1",
+ "Query": "select a -> '$[4]', a ->> '$[3]' from `user`",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT a->\"$[4]\", a->>\"$[3]\" FROM user",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a -> '$[4]', a ->> '$[3]' from `user` where 1 != 1",
+ "Query": "select a -> '$[4]', a ->> '$[3]' from `user`",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "groupe by with non aggregated columns and table alias",
+ "query": "select u.id, u.age from user u group by u.id",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u.id, u.age from user u group by u.id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.id, u.age from `user` as u where 1 != 1 group by u.id",
+ "Query": "select u.id, u.age from `user` as u group by u.id",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u.id, u.age from user u group by u.id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.id, u.age from `user` as u where 1 != 1 group by u.id",
+ "Query": "select u.id, u.age from `user` as u group by u.id",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Functions that return JSON value attributes",
+ "query": "select JSON_DEPTH('{}'), JSON_LENGTH('{\"a\": 1, \"b\": {\"c\": 30}}', '$.b'), JSON_TYPE(JSON_EXTRACT('{\"a\": [10, true]}', '$.a')), JSON_VALID('{\"a\": 1}')",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select JSON_DEPTH('{}'), JSON_LENGTH('{\"a\": 1, \"b\": {\"c\": 30}}', '$.b'), JSON_TYPE(JSON_EXTRACT('{\"a\": [10, true]}', '$.a')), JSON_VALID('{\"a\": 1}')",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select json_depth('{}'), json_length('{\\\"a\\\": 1, \\\"b\\\": {\\\"c\\\": 30}}', '$.b'), json_type(json_extract('{\\\"a\\\": [10, true]}', '$.a')), json_valid('{\\\"a\\\": 1}') from dual where 1 != 1",
+ "Query": "select json_depth('{}'), json_length('{\\\"a\\\": 1, \\\"b\\\": {\\\"c\\\": 30}}', '$.b'), json_type(json_extract('{\\\"a\\\": [10, true]}', '$.a')), json_valid('{\\\"a\\\": 1}') from dual",
+ "Table": "dual"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select JSON_DEPTH('{}'), JSON_LENGTH('{\"a\": 1, \"b\": {\"c\": 30}}', '$.b'), JSON_TYPE(JSON_EXTRACT('{\"a\": [10, true]}', '$.a')), JSON_VALID('{\"a\": 1}')",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select json_depth('{}'), json_length('{\\\"a\\\": 1, \\\"b\\\": {\\\"c\\\": 30}}', '$.b'), json_type(json_extract('{\\\"a\\\": [10, true]}', '$.a')), json_valid('{\\\"a\\\": 1}') from dual where 1 != 1",
+ "Query": "select json_depth('{}'), json_length('{\\\"a\\\": 1, \\\"b\\\": {\\\"c\\\": 30}}', '$.b'), json_type(json_extract('{\\\"a\\\": [10, true]}', '$.a')), json_valid('{\\\"a\\\": 1}') from dual",
+ "Table": "dual"
+ },
+ "TablesUsed": [
+ "main.dual"
+ ]
+ }
+ },
+ {
+ "comment": "Json array functions",
+ "query": "select JSON_ARRAY_APPEND('{\"a\": 1}', '$', 'z'), JSON_ARRAY_INSERT('[\"a\", {\"b\": [1, 2]}, [3, 4]]', '$[0]', 'x', '$[2][1]', 'y'), JSON_INSERT('{ \"a\": 1, \"b\": [2, 3]}', '$.a', 10, '$.c', CAST('[true, false]' AS JSON))",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select JSON_ARRAY_APPEND('{\"a\": 1}', '$', 'z'), JSON_ARRAY_INSERT('[\"a\", {\"b\": [1, 2]}, [3, 4]]', '$[0]', 'x', '$[2][1]', 'y'), JSON_INSERT('{ \"a\": 1, \"b\": [2, 3]}', '$.a', 10, '$.c', CAST('[true, false]' AS JSON))",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select json_array_append('{\\\"a\\\": 1}', '$', 'z'), json_array_insert('[\\\"a\\\", {\\\"b\\\": [1, 2]}, [3, 4]]', '$[0]', 'x', '$[2][1]', 'y'), json_insert('{ \\\"a\\\": 1, \\\"b\\\": [2, 3]}', '$.a', 10, '$.c', cast('[true, false]' as JSON)) from dual where 1 != 1",
+ "Query": "select json_array_append('{\\\"a\\\": 1}', '$', 'z'), json_array_insert('[\\\"a\\\", {\\\"b\\\": [1, 2]}, [3, 4]]', '$[0]', 'x', '$[2][1]', 'y'), json_insert('{ \\\"a\\\": 1, \\\"b\\\": [2, 3]}', '$.a', 10, '$.c', cast('[true, false]' as JSON)) from dual",
+ "Table": "dual"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select JSON_ARRAY_APPEND('{\"a\": 1}', '$', 'z'), JSON_ARRAY_INSERT('[\"a\", {\"b\": [1, 2]}, [3, 4]]', '$[0]', 'x', '$[2][1]', 'y'), JSON_INSERT('{ \"a\": 1, \"b\": [2, 3]}', '$.a', 10, '$.c', CAST('[true, false]' AS JSON))",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select json_array_append('{\\\"a\\\": 1}', '$', 'z'), json_array_insert('[\\\"a\\\", {\\\"b\\\": [1, 2]}, [3, 4]]', '$[0]', 'x', '$[2][1]', 'y'), json_insert('{ \\\"a\\\": 1, \\\"b\\\": [2, 3]}', '$.a', 10, '$.c', cast('[true, false]' as JSON)) from dual where 1 != 1",
+ "Query": "select json_array_append('{\\\"a\\\": 1}', '$', 'z'), json_array_insert('[\\\"a\\\", {\\\"b\\\": [1, 2]}, [3, 4]]', '$[0]', 'x', '$[2][1]', 'y'), json_insert('{ \\\"a\\\": 1, \\\"b\\\": [2, 3]}', '$.a', 10, '$.c', cast('[true, false]' as JSON)) from dual",
+ "Table": "dual"
+ },
+ "TablesUsed": [
+ "main.dual"
+ ]
+ }
+ },
+ {
+ "comment": "Json merge functions",
+ "query": "select JSON_MERGE('[1, 2]', '[true, false]'), JSON_MERGE_PATCH('{\"name\": \"x\"}', '{\"id\": 47}'), JSON_MERGE_PRESERVE('[1, 2]', '{\"id\": 47}')",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select JSON_MERGE('[1, 2]', '[true, false]'), JSON_MERGE_PATCH('{\"name\": \"x\"}', '{\"id\": 47}'), JSON_MERGE_PRESERVE('[1, 2]', '{\"id\": 47}')",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select json_merge('[1, 2]', '[true, false]'), json_merge_patch('{\\\"name\\\": \\\"x\\\"}', '{\\\"id\\\": 47}'), json_merge_preserve('[1, 2]', '{\\\"id\\\": 47}') from dual where 1 != 1",
+ "Query": "select json_merge('[1, 2]', '[true, false]'), json_merge_patch('{\\\"name\\\": \\\"x\\\"}', '{\\\"id\\\": 47}'), json_merge_preserve('[1, 2]', '{\\\"id\\\": 47}') from dual",
+ "Table": "dual"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select JSON_MERGE('[1, 2]', '[true, false]'), JSON_MERGE_PATCH('{\"name\": \"x\"}', '{\"id\": 47}'), JSON_MERGE_PRESERVE('[1, 2]', '{\"id\": 47}')",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select json_merge('[1, 2]', '[true, false]'), json_merge_patch('{\\\"name\\\": \\\"x\\\"}', '{\\\"id\\\": 47}'), json_merge_preserve('[1, 2]', '{\\\"id\\\": 47}') from dual where 1 != 1",
+ "Query": "select json_merge('[1, 2]', '[true, false]'), json_merge_patch('{\\\"name\\\": \\\"x\\\"}', '{\\\"id\\\": 47}'), json_merge_preserve('[1, 2]', '{\\\"id\\\": 47}') from dual",
+ "Table": "dual"
+ },
+ "TablesUsed": [
+ "main.dual"
+ ]
+ }
+ },
+ {
+ "comment": "JSON modifier functions",
+ "query": "select JSON_REMOVE('[1, [2, 3], 4]', '$[1]'), JSON_REPLACE('{ \"a\": 1, \"b\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), JSON_SET('{ \"a\": 1, \"b\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), JSON_UNQUOTE('\"abc\"')",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select JSON_REMOVE('[1, [2, 3], 4]', '$[1]'), JSON_REPLACE('{ \"a\": 1, \"b\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), JSON_SET('{ \"a\": 1, \"b\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), JSON_UNQUOTE('\"abc\"')",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select json_remove('[1, [2, 3], 4]', '$[1]'), json_replace('{ \\\"a\\\": 1, \\\"b\\\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), json_set('{ \\\"a\\\": 1, \\\"b\\\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), json_unquote('\\\"abc\\\"') from dual where 1 != 1",
+ "Query": "select json_remove('[1, [2, 3], 4]', '$[1]'), json_replace('{ \\\"a\\\": 1, \\\"b\\\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), json_set('{ \\\"a\\\": 1, \\\"b\\\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), json_unquote('\\\"abc\\\"') from dual",
+ "Table": "dual"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select JSON_REMOVE('[1, [2, 3], 4]', '$[1]'), JSON_REPLACE('{ \"a\": 1, \"b\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), JSON_SET('{ \"a\": 1, \"b\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), JSON_UNQUOTE('\"abc\"')",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select json_remove('[1, [2, 3], 4]', '$[1]'), json_replace('{ \\\"a\\\": 1, \\\"b\\\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), json_set('{ \\\"a\\\": 1, \\\"b\\\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), json_unquote('\\\"abc\\\"') from dual where 1 != 1",
+ "Query": "select json_remove('[1, [2, 3], 4]', '$[1]'), json_replace('{ \\\"a\\\": 1, \\\"b\\\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), json_set('{ \\\"a\\\": 1, \\\"b\\\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), json_unquote('\\\"abc\\\"') from dual",
+ "Table": "dual"
+ },
+ "TablesUsed": [
+ "main.dual"
+ ]
+ }
+ },
+ {
+ "comment": "Reference with a subquery which can be merged",
+ "query": "select exists(select id from user where id = 4)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select exists(select id from user where id = 4)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutExists",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` where 1 != 1",
+ "Query": "select 1 from `user` where id = 4 limit 1",
+ "Table": "`user`",
+ "Values": [
+ "INT64(4)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select :__sq_has_values1 from dual where 1 != 1",
+ "Query": "select :__sq_has_values1 from dual",
+ "Table": "dual"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select exists(select id from user where id = 4)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select exists (select 1 from `user` where 1 != 1) from dual where 1 != 1",
+ "Query": "select exists (select 1 from `user` where id = 4 limit 1) from dual",
+ "Table": "dual",
+ "Values": [
+ "INT64(4)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "main.dual",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Reference with a subquery which cannot be merged",
+ "query": "select exists(select * from user)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select exists(select * from user)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutExists",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Limit",
+ "Count": "INT64(1)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` where 1 != 1",
+ "Query": "select 1 from `user` limit :__upper_limit",
+ "Table": "`user`"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select :__sq_has_values1 from dual where 1 != 1",
+ "Query": "select :__sq_has_values1 from dual",
+ "Table": "dual"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select exists(select * from user)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutExists",
+ "PulloutVars": [
+ "__sq_has_values1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Limit",
+ "Count": "INT64(1)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` where 1 != 1",
+ "Query": "select 1 from `user` limit :__upper_limit",
+ "Table": "`user`"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select :__sq_has_values1 from dual where 1 != 1",
+ "Query": "select :__sq_has_values1 from dual",
+ "Table": "dual"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.dual",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "insert function not requiring any table",
+ "query": "select insert('Quadratic', 3, 4, 'What')",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select insert('Quadratic', 3, 4, 'What')",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select insert('Quadratic', 3, 4, 'What') from dual where 1 != 1",
+ "Query": "select insert('Quadratic', 3, 4, 'What') from dual",
+ "Table": "dual"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select insert('Quadratic', 3, 4, 'What')",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select insert('Quadratic', 3, 4, 'What') from dual where 1 != 1",
+ "Query": "select insert('Quadratic', 3, 4, 'What') from dual",
+ "Table": "dual"
+ },
+ "TablesUsed": [
+ "main.dual"
+ ]
+ }
+ },
+ {
+ "comment": "insert function using column names as arguments",
+ "query": "select insert(tcol1, id, 3, tcol2) from user",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select insert(tcol1, id, 3, tcol2) from user",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select insert(tcol1, id, 3, tcol2) from `user` where 1 != 1",
+ "Query": "select insert(tcol1, id, 3, tcol2) from `user`",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select insert(tcol1, id, 3, tcol2) from user",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select insert(tcol1, id, 3, tcol2) from `user` where 1 != 1",
+ "Query": "select insert(tcol1, id, 3, tcol2) from `user`",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "gtid functions",
+ "query": "select gtid_subset('3E11FA47-71CA-11E1-9E33-C80AA9429562:23','3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57'), gtid_subtract('3E11FA47-71CA-11E1-9E33-C80AA9429562:23','3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57')",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select gtid_subset('3E11FA47-71CA-11E1-9E33-C80AA9429562:23','3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57'), gtid_subtract('3E11FA47-71CA-11E1-9E33-C80AA9429562:23','3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57')",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select gtid_subset('3E11FA47-71CA-11E1-9E33-C80AA9429562:23', '3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57'), gtid_subtract('3E11FA47-71CA-11E1-9E33-C80AA9429562:23', '3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57') from dual where 1 != 1",
+ "Query": "select gtid_subset('3E11FA47-71CA-11E1-9E33-C80AA9429562:23', '3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57'), gtid_subtract('3E11FA47-71CA-11E1-9E33-C80AA9429562:23', '3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57') from dual",
+ "Table": "dual"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select gtid_subset('3E11FA47-71CA-11E1-9E33-C80AA9429562:23','3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57'), gtid_subtract('3E11FA47-71CA-11E1-9E33-C80AA9429562:23','3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57')",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select gtid_subset('3E11FA47-71CA-11E1-9E33-C80AA9429562:23', '3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57'), gtid_subtract('3E11FA47-71CA-11E1-9E33-C80AA9429562:23', '3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57') from dual where 1 != 1",
+ "Query": "select gtid_subset('3E11FA47-71CA-11E1-9E33-C80AA9429562:23', '3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57'), gtid_subtract('3E11FA47-71CA-11E1-9E33-C80AA9429562:23', '3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57') from dual",
+ "Table": "dual"
+ },
+ "TablesUsed": [
+ "main.dual"
+ ]
+ }
+ },
+ {
+ "comment": "Predicate in apply join which is merged",
+ "query": "select user.col, user_metadata.user_id from user join user_extra on user.col = user_extra.col join user_metadata on user_extra.user_id = user_metadata.user_id where user.textcol1 = 'alice@gmail.com'",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col, user_metadata.user_id from user join user_extra on user.col = user_extra.col join user_metadata on user_extra.user_id = user_metadata.user_id where user.textcol1 = 'alice@gmail.com'",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,R:0",
+ "JoinVars": {
+ "user_extra_user_id": 1
+ },
+ "TableName": "`user`_user_extra_user_metadata",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,R:0",
+ "JoinVars": {
+ "user_col": 0
+ },
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` where 1 != 1",
+ "Query": "select `user`.col from `user` where `user`.textcol1 = 'alice@gmail.com'",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.user_id from user_extra where 1 != 1",
+ "Query": "select user_extra.user_id from user_extra where user_extra.col = :user_col",
+ "Table": "user_extra"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_metadata.user_id from user_metadata where 1 != 1",
+ "Query": "select user_metadata.user_id from user_metadata where user_metadata.user_id = :user_extra_user_id",
+ "Table": "user_metadata",
+ "Values": [
+ ":user_extra_user_id"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col, user_metadata.user_id from user join user_extra on user.col = user_extra.col join user_metadata on user_extra.user_id = user_metadata.user_id where user.textcol1 = 'alice@gmail.com'",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,R:0",
+ "JoinVars": {
+ "user_col": 0
+ },
+ "TableName": "`user`_user_extra, user_metadata",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` where 1 != 1",
+ "Query": "select `user`.col from `user` where `user`.textcol1 = 'alice@gmail.com'",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_metadata.user_id from user_extra, user_metadata where 1 != 1",
+ "Query": "select user_metadata.user_id from user_extra, user_metadata where user_extra.col = :user_col and user_extra.user_id = user_metadata.user_id",
+ "Table": "user_extra, user_metadata"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra",
+ "user.user_metadata"
+ ]
+ }
+ },
+ {
+ "comment": "Join across multiple tables, with conditions on different vindexes, but mergeable through join predicates",
+ "query": "SELECT user.id FROM user INNER JOIN music_extra ON user.id = music_extra.user_id INNER JOIN music ON music_extra.user_id = music.user_id WHERE user.id = 123 and music.id = 456",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT user.id FROM user INNER JOIN music_extra ON user.id = music_extra.user_id INNER JOIN music ON music_extra.user_id = music.user_id WHERE user.id = 123 and music.id = 456",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id from `user` join music_extra on `user`.id = music_extra.user_id join music on music_extra.user_id = music.user_id where 1 != 1",
+ "Query": "select `user`.id from `user` join music_extra on `user`.id = music_extra.user_id join music on music_extra.user_id = music.user_id where `user`.id = 123 and music.id = 456",
+ "Table": "`user`, music_extra, music",
+ "Values": [
+ "INT64(123)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT user.id FROM user INNER JOIN music_extra ON user.id = music_extra.user_id INNER JOIN music ON music_extra.user_id = music.user_id WHERE user.id = 123 and music.id = 456",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id from `user`, music_extra, music where 1 != 1",
+ "Query": "select `user`.id from `user`, music_extra, music where music.id = 456 and `user`.id = 123 and `user`.id = music_extra.user_id and music_extra.user_id = music.user_id",
+ "Table": "`user`, music, music_extra",
+ "Values": [
+ "INT64(123)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.music",
+ "user.music_extra",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "SQL_CALC_FOUND_ROWS with vindex lookup",
+ "query": "select SQL_CALC_FOUND_ROWS id, name from user where name = 'aa' order by id limit 2",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select SQL_CALC_FOUND_ROWS id, name from user where name = 'aa' order by id limit 2",
+ "Instructions": {
+ "OperatorType": "SQL_CALC_FOUND_ROWS",
+ "Inputs": [
+ {
+ "OperatorType": "Limit",
+ "Count": "INT64(2)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, `name`, weight_string(id) from `user` where 1 != 1",
+ "OrderBy": "(0|2) ASC",
+ "Query": "select id, `name`, weight_string(id) from `user` where `name` = 'aa' order by id asc limit :__upper_limit",
+ "ResultColumns": 2,
+ "Table": "`user`",
+ "Values": [
+ "VARCHAR(\"aa\")"
+ ],
+ "Vindex": "name_user_map"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count(0) AS count",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*) from `user` where 1 != 1",
+ "Query": "select count(*) from `user` where `name` = 'aa'",
+ "Table": "`user`",
+ "Values": [
+ "VARCHAR(\"aa\")"
+ ],
+ "Vindex": "name_user_map"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select SQL_CALC_FOUND_ROWS id, name from user where name = 'aa' order by id limit 2",
+ "Instructions": {
+ "OperatorType": "SQL_CALC_FOUND_ROWS",
+ "Inputs": [
+ {
+ "OperatorType": "Limit",
+ "Count": "INT64(2)",
+ "Inputs": [
+ {
+ "OperatorType": "VindexLookup",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Values": [
+ "VARCHAR(\"aa\")"
+ ],
+ "Vindex": "name_user_map",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
+ "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
+ "Table": "name_user_vdx",
+ "Values": [
+ ":name"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, `name`, weight_string(id) from `user` where 1 != 1",
+ "OrderBy": "(0|2) ASC",
+ "Query": "select id, `name`, weight_string(id) from `user` where `name` = 'aa' order by id asc limit :__upper_limit",
+ "ResultColumns": 2,
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count_star(0) AS count(*)",
+ "Inputs": [
+ {
+ "OperatorType": "VindexLookup",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Values": [
+ "VARCHAR(\"aa\")"
+ ],
+ "Vindex": "name_user_map",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
+ "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
+ "Table": "name_user_vdx",
+ "Values": [
+ ":name"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*) from `user` where 1 != 1",
+ "Query": "select count(*) from `user` where `name` = 'aa'",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "`None` route being merged with another route via join predicate on Vindex columns",
+ "query": "SELECT `music`.id FROM `music` INNER JOIN `user` ON music.user_id = user.id WHERE music.user_id IN (NULL) AND user.id = 5",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT `music`.id FROM `music` INNER JOIN `user` ON music.user_id = user.id WHERE music.user_id IN (NULL) AND user.id = 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "None",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music join `user` on music.user_id = `user`.id where 1 != 1",
+ "Query": "select music.id from music join `user` on music.user_id = `user`.id where music.user_id in (null) and `user`.id = 5",
+ "Table": "music, `user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT `music`.id FROM `music` INNER JOIN `user` ON music.user_id = user.id WHERE music.user_id IN (NULL) AND user.id = 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "None",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music, `user` where 1 != 1",
+ "Query": "select music.id from music, `user` where music.user_id in (null) and `user`.id = 5 and music.user_id = `user`.id",
+ "Table": "`user`, music"
+ },
+ "TablesUsed": [
+ "user.music",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Treating single value tuples as `EqualUnique` routes",
+ "query": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (5)) AND music.user_id = 5",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (5)) AND music.user_id = 5",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where music.user_id in ::__vals",
+ "Table": "music",
+ "Values": [
+ "(INT64(5))"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where music.user_id = 5 and :__sq_has_values1 = 1 and music.id in ::__sq1",
+ "Table": "music",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (5)) AND music.user_id = 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where music.id in (select music.id from music where music.user_id in (5)) and music.user_id = 5",
+ "Table": "music",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "Subquery with `IN` condition using columns with matching lookup vindexes",
+ "query": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (1, 2, 3))",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (1, 2, 3))",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where music.user_id in ::__vals",
+ "Table": "music",
+ "Values": [
+ "(INT64(1), INT64(2), INT64(3))"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals",
+ "Table": "music",
+ "Values": [
+ ":__sq1"
+ ],
+ "Vindex": "music_user_map"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (1, 2, 3))",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where music.id in (select music.id from music where music.user_id in ::__vals)",
+ "Table": "music",
+ "Values": [
+ "(INT64(1), INT64(2), INT64(3))"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "Subquery with `IN` condition using columns with matching lookup vindexes, with derived table",
+ "query": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT music.id FROM music WHERE music.user_id IN (1, 2, 3)) _inner)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT music.id FROM music WHERE music.user_id IN (1, 2, 3)) _inner)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from (select music.id from music where 1 != 1) as _inner where 1 != 1",
+ "Query": "select * from (select music.id from music where music.user_id in ::__vals) as _inner",
+ "Table": "music",
+ "Values": [
+ "(INT64(1), INT64(2), INT64(3))"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals",
+ "Table": "music",
+ "Values": [
+ ":__sq1"
+ ],
+ "Vindex": "music_user_map"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT music.id FROM music WHERE music.user_id IN (1, 2, 3)) _inner)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where music.id in (select _inner.id from (select music.id from music where music.user_id in ::__vals) as _inner)",
+ "Table": "music",
+ "Values": [
+ "(INT64(1), INT64(2), INT64(3))"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "Subquery with `IN` condition using columns with matching lookup vindexes, with inner scatter query",
+ "query": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.foo = 'bar') AND music.user_id IN (3, 4, 5)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.foo = 'bar') AND music.user_id IN (3, 4, 5)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where music.foo = 'bar'",
+ "Table": "music"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where music.user_id in ::__vals and :__sq_has_values1 = 1 and music.id in ::__sq1",
+ "Table": "music",
+ "Values": [
+ "(INT64(3), INT64(4), INT64(5))"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.foo = 'bar') AND music.user_id IN (3, 4, 5)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where music.id in (select music.id from music where music.foo = 'bar') and music.user_id in ::__vals",
+ "Table": "music",
+ "Values": [
+ "(INT64(3), INT64(4), INT64(5))"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "Subquery with `IN` condition using columns with matching lookup vindexes",
+ "query": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (1, 2, 3)) and music.user_id = 5",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (1, 2, 3)) and music.user_id = 5",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where music.user_id in ::__vals",
+ "Table": "music",
+ "Values": [
+ "(INT64(1), INT64(2), INT64(3))"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where music.user_id = 5 and :__sq_has_values1 = 1 and music.id in ::__sq1",
+ "Table": "music",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (1, 2, 3)) and music.user_id = 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where music.id in (select music.id from music where music.user_id in (1, 2, 3)) and music.user_id = 5",
+ "Table": "music",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "Subquery with `IN` condition using columns with matching lookup vindexes, but not a top level predicate",
+ "query": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (1, 2, 3)) OR music.user_id = 5",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (1, 2, 3)) OR music.user_id = 5",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where music.user_id in ::__vals",
+ "Table": "music",
+ "Values": [
+ "(INT64(1), INT64(2), INT64(3))"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__sq1 or music.user_id = 5",
+ "Table": "music"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (1, 2, 3)) OR music.user_id = 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where music.id in (select music.id from music where music.user_id in (1, 2, 3)) or music.user_id = 5",
+ "Table": "music"
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "`IN` comparison on Vindex with `None` subquery, as routing predicate",
+ "query": "SELECT `music`.id FROM `music` WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL)) AND music.user_id = 5",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT `music`.id FROM `music` WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL)) AND music.user_id = 5",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "None",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where music.user_id in (null)",
+ "Table": "music"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where music.user_id = 5 and :__sq_has_values1 = 1 and music.id in ::__sq1",
+ "Table": "music",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT `music`.id FROM `music` WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL)) AND music.user_id = 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "None",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where music.id in (select music.id from music where music.user_id in (null)) and music.user_id = 5",
+ "Table": "music"
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "`IN` comparison on Vindex with `None` subquery, as non-routing predicate",
+ "query": "SELECT `music`.id FROM `music` WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL)) OR music.user_id = 5",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT `music`.id FROM `music` WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL)) OR music.user_id = 5",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "None",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where music.user_id in (null)",
+ "Table": "music"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__sq1 or music.user_id = 5",
+ "Table": "music"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT `music`.id FROM `music` WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL)) OR music.user_id = 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where music.id in (select music.id from music where music.user_id in (null)) or music.user_id = 5",
+ "Table": "music"
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "Mergeable scatter subquery",
+ "query": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.genre = 'pop')",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.genre = 'pop')",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where music.genre = 'pop'",
+ "Table": "music"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals",
+ "Table": "music",
+ "Values": [
+ ":__sq1"
+ ],
+ "Vindex": "music_user_map"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.genre = 'pop')",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where music.id in (select music.id from music where music.genre = 'pop')",
+ "Table": "music"
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "Mergeable scatter subquery with `GROUP BY` on unique vindex column",
+ "query": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.genre = 'pop' GROUP BY music.id)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.genre = 'pop' GROUP BY music.id)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1 group by music.id",
+ "Query": "select music.id from music where music.genre = 'pop' group by music.id",
+ "Table": "music"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals",
+ "Table": "music",
+ "Values": [
+ ":__sq1"
+ ],
+ "Vindex": "music_user_map"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.genre = 'pop' GROUP BY music.id)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where music.id in (select music.id from music where music.genre = 'pop' group by music.id)",
+ "Table": "music"
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "Unmergeable scatter subquery with `GROUP BY` on-non vindex column",
+ "query": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.genre = 'pop' GROUP BY music.genre)",
+ "v3-plan": "VT12001: unsupported: in scatter query: GROUP BY column must reference column in SELECT list",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.genre = 'pop' GROUP BY music.genre)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "random(0) AS id",
+ "GroupBy": "(1|2)",
+ "ResultColumns": 1,
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id, music.genre, weight_string(music.genre) from music where 1 != 1 group by music.genre, weight_string(music.genre)",
+ "OrderBy": "(1|2) ASC",
+ "Query": "select music.id, music.genre, weight_string(music.genre) from music where music.genre = 'pop' group by music.genre, weight_string(music.genre) order by music.genre asc",
+ "Table": "music"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals",
+ "Table": "music",
+ "Values": [
+ ":__sq1"
+ ],
+ "Vindex": "music_user_map"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "Unmergeable scatter subquery with LIMIT",
+ "query": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.genre = 'pop' LIMIT 10)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.genre = 'pop' LIMIT 10)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Limit",
+ "Count": "INT64(10)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where music.genre = 'pop' limit :__upper_limit",
+ "Table": "music"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals",
+ "Table": "music",
+ "Values": [
+ ":__sq1"
+ ],
+ "Vindex": "music_user_map"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.genre = 'pop' LIMIT 10)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Limit",
+ "Count": "INT64(10)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where music.genre = 'pop' limit :__upper_limit",
+ "Table": "music"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals",
+ "Table": "music",
+ "Values": [
+ ":__sq1"
+ ],
+ "Vindex": "music_user_map"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "Mergeable subquery with `MAX` aggregate and grouped by unique vindex",
+ "query": "SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id IN (5, 6) GROUP BY music.user_id)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id IN (5, 6) GROUP BY music.user_id)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select max(music.id) from music where 1 != 1 group by music.user_id",
+ "Query": "select max(music.id) from music where music.user_id in ::__vals group by music.user_id",
+ "Table": "music",
+ "Values": [
+ "(INT64(5), INT64(6))"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals",
+ "Table": "music",
+ "Values": [
+ ":__sq1"
+ ],
+ "Vindex": "music_user_map"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id IN (5, 6) GROUP BY music.user_id)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where music.id in (select max(music.id) from music where music.user_id in ::__vals group by music.user_id)",
+ "Table": "music",
+ "Values": [
+ "(INT64(5), INT64(6))"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "Unmergeable subquery with `MAX` aggregate",
+ "query": "SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id IN (5, 6))",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id IN (5, 6))",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "max(0)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select max(music.id) from music where 1 != 1",
+ "Query": "select max(music.id) from music where music.user_id in ::__vals",
+ "Table": "music",
+ "Values": [
+ "(INT64(5), INT64(6))"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals",
+ "Table": "music",
+ "Values": [
+ ":__sq1"
+ ],
+ "Vindex": "music_user_map"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id IN (5, 6))",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "max(0) AS max(music.id)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select max(music.id) from music where 1 != 1",
+ "Query": "select max(music.id) from music where music.user_id in ::__vals",
+ "Table": "music",
+ "Values": [
+ "(INT64(5), INT64(6))"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals",
+ "Table": "music",
+ "Values": [
+ ":__sq1"
+ ],
+ "Vindex": "music_user_map"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "Mergeable subquery with `MAX` aggregate with `EqualUnique` route operator",
+ "query": "SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id = 5)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id = 5)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select max(music.id) from music where 1 != 1",
+ "Query": "select max(music.id) from music where music.user_id = 5",
+ "Table": "music",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals",
+ "Table": "music",
+ "Values": [
+ ":__sq1"
+ ],
+ "Vindex": "music_user_map"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id = 5)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select max(music.id) from music where 1 != 1",
+ "Query": "select max(music.id) from music where music.user_id = 5",
+ "Table": "music",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals",
+ "Table": "music",
+ "Values": [
+ ":__sq1"
+ ],
+ "Vindex": "music_user_map"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "Mergeable subquery with `LIMIT` due to `EqualUnique` route",
+ "query": "SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id = 5 LIMIT 10)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id = 5 LIMIT 10)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select max(music.id) from music where 1 != 1",
+ "Query": "select max(music.id) from music where music.user_id = 5 limit 10",
+ "Table": "music",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals",
+ "Table": "music",
+ "Values": [
+ ":__sq1"
+ ],
+ "Vindex": "music_user_map"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id = 5 LIMIT 10)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select max(music.id) from music where 1 != 1",
+ "Query": "select max(music.id) from music where music.user_id = 5 limit 10",
+ "Table": "music",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals",
+ "Table": "music",
+ "Values": [
+ ":__sq1"
+ ],
+ "Vindex": "music_user_map"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "Mergeable subquery with multiple levels of derived statements",
+ "query": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT * FROM (SELECT music.id FROM music WHERE music.user_id = 5 LIMIT 10) subquery_for_limit) subquery_for_limit)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT * FROM (SELECT music.id FROM music WHERE music.user_id = 5 LIMIT 10) subquery_for_limit) subquery_for_limit)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from (select * from (select music.id from music where 1 != 1) as subquery_for_limit where 1 != 1) as subquery_for_limit where 1 != 1",
+ "Query": "select * from (select * from (select music.id from music where music.user_id = 5 limit 10) as subquery_for_limit) as subquery_for_limit",
+ "Table": "music",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals",
+ "Table": "music",
+ "Values": [
+ ":__sq1"
+ ],
+ "Vindex": "music_user_map"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT * FROM (SELECT music.id FROM music WHERE music.user_id = 5 LIMIT 10) subquery_for_limit) subquery_for_limit)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where music.id in (select subquery_for_limit.id from (select subquery_for_limit.id from (select music.id from music where music.user_id = 5 limit 10) as subquery_for_limit) as subquery_for_limit)",
+ "Table": "music",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "Mergeable subquery with multiple levels of derived statements, using a single value `IN` predicate",
+ "query": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT * FROM (SELECT music.id FROM music WHERE music.user_id IN (5) LIMIT 10) subquery_for_limit) subquery_for_limit)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT * FROM (SELECT music.id FROM music WHERE music.user_id IN (5) LIMIT 10) subquery_for_limit) subquery_for_limit)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Limit",
+ "Count": "INT64(10)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where music.user_id in ::__vals limit :__upper_limit",
+ "Table": "music",
+ "Values": [
+ "(INT64(5))"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals",
+ "Table": "music",
+ "Values": [
+ ":__sq1"
+ ],
+ "Vindex": "music_user_map"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT * FROM (SELECT music.id FROM music WHERE music.user_id IN (5) LIMIT 10) subquery_for_limit) subquery_for_limit)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where music.id in (select subquery_for_limit.id from (select subquery_for_limit.id from (select music.id from music where music.user_id in (5) limit 10) as subquery_for_limit) as subquery_for_limit)",
+ "Table": "music",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "Unmergeable subquery with multiple levels of derived statements, using a multi value `IN` predicate",
+ "query": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT * FROM (SELECT music.id FROM music WHERE music.user_id IN (5, 6) LIMIT 10) subquery_for_limit) subquery_for_limit)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT * FROM (SELECT music.id FROM music WHERE music.user_id IN (5, 6) LIMIT 10) subquery_for_limit) subquery_for_limit)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Limit",
+ "Count": "INT64(10)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where music.user_id in ::__vals limit :__upper_limit",
+ "Table": "music",
+ "Values": [
+ "(INT64(5), INT64(6))"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals",
+ "Table": "music",
+ "Values": [
+ ":__sq1"
+ ],
+ "Vindex": "music_user_map"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT * FROM (SELECT music.id FROM music WHERE music.user_id IN (5, 6) LIMIT 10) subquery_for_limit) subquery_for_limit)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Limit",
+ "Count": "INT64(10)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where music.user_id in ::__vals limit :__upper_limit",
+ "Table": "music",
+ "Values": [
+ "(INT64(5), INT64(6))"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals",
+ "Table": "music",
+ "Values": [
+ ":__sq1"
+ ],
+ "Vindex": "music_user_map"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "Unmergeable subquery with multiple levels of derived statements",
+ "query": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT * FROM (SELECT music.id FROM music LIMIT 10) subquery_for_limit) subquery_for_limit)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT * FROM (SELECT music.id FROM music LIMIT 10) subquery_for_limit) subquery_for_limit)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Limit",
+ "Count": "INT64(10)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music limit :__upper_limit",
+ "Table": "music"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals",
+ "Table": "music",
+ "Values": [
+ ":__sq1"
+ ],
+ "Vindex": "music_user_map"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT * FROM (SELECT music.id FROM music LIMIT 10) subquery_for_limit) subquery_for_limit)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Limit",
+ "Count": "INT64(10)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music limit :__upper_limit",
+ "Table": "music"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals",
+ "Table": "music",
+ "Values": [
+ ":__sq1"
+ ],
+ "Vindex": "music_user_map"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "`None` subquery as top level predicate - outer query changes from `Scatter` to `None` on merge",
+ "query": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL))",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL))",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "None",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where music.user_id in (null)",
+ "Table": "music"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals",
+ "Table": "music",
+ "Values": [
+ ":__sq1"
+ ],
+ "Vindex": "music_user_map"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL))",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "None",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where music.id in (select music.id from music where music.user_id in (null))",
+ "Table": "music"
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "`None` subquery as top level predicate - outer query changes from `EqualUnique` to `None` on merge",
+ "query": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL)) AND music.user_id = 5",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL)) AND music.user_id = 5",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "None",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where music.user_id in (null)",
+ "Table": "music"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where music.user_id = 5 and :__sq_has_values1 = 1 and music.id in ::__sq1",
+ "Table": "music",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL)) AND music.user_id = 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "None",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where music.id in (select music.id from music where music.user_id in (null)) and music.user_id = 5",
+ "Table": "music"
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "`None` subquery nested inside `OR` expression - outer query keeps routing information",
+ "query": "SELECT music.id FROM music WHERE (music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL)) OR music.user_id = 5)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music WHERE (music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL)) OR music.user_id = 5)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "None",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where music.user_id in (null)",
+ "Table": "music"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__sq1 or music.user_id = 5",
+ "Table": "music"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music WHERE (music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL)) OR music.user_id = 5)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where music.id in (select music.id from music where music.user_id in (null)) or music.user_id = 5",
+ "Table": "music"
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "Joining with a subquery that uses an aggregate column and an `EqualUnique` route can be merged together",
+ "query": "SELECT music.id FROM music INNER JOIN (SELECT MAX(id) as maxt FROM music WHERE music.user_id = 5) other ON other.maxt = music.id",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music INNER JOIN (SELECT MAX(id) as maxt FROM music WHERE music.user_id = 5) other ON other.maxt = music.id",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "JoinVars": {
+ "music_id": 0
+ },
+ "TableName": "music_music",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music",
+ "Table": "music"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from (select max(id) as maxt from music where 1 != 1) as other where 1 != 1",
+ "Query": "select 1 from (select max(id) as maxt from music where music.user_id = 5) as other where other.maxt = :music_id",
+ "Table": "music",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music INNER JOIN (SELECT MAX(id) as maxt FROM music WHERE music.user_id = 5) other ON other.maxt = music.id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music, (select max(id) as maxt from music where 1 != 1) as other where 1 != 1",
+ "Query": "select music.id from music, (select max(id) as maxt from music where music.user_id = 5) as other where other.maxt = music.id",
+ "Table": "music",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "Joining with a subquery that uses an `EqualUnique` route can be merged",
+ "query": "SELECT music.id FROM music INNER JOIN (SELECT id FROM music WHERE music.user_id = 5) other ON other.id = music.id",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music INNER JOIN (SELECT id FROM music WHERE music.user_id = 5) other ON other.id = music.id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music join (select id from music where 1 != 1) as other on other.id = music.id where 1 != 1",
+ "Query": "select music.id from music join (select id from music where music.user_id = 5) as other on other.id = music.id",
+ "Table": "music"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music INNER JOIN (SELECT id FROM music WHERE music.user_id = 5) other ON other.id = music.id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music, (select id from music where 1 != 1) as other where 1 != 1",
+ "Query": "select music.id from music, (select id from music where music.user_id = 5) as other where other.id = music.id",
+ "Table": "music",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "Joining with a subquery that has an `IN` route can be merged",
+ "query": "SELECT music.id FROM music INNER JOIN (SELECT id FROM music WHERE music.user_id IN (5, 6, 7)) other ON other.id = music.id",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music INNER JOIN (SELECT id FROM music WHERE music.user_id IN (5, 6, 7)) other ON other.id = music.id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music join (select id from music where 1 != 1) as other on other.id = music.id where 1 != 1",
+ "Query": "select music.id from music join (select id from music where music.user_id in (5, 6, 7)) as other on other.id = music.id",
+ "Table": "music"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music INNER JOIN (SELECT id FROM music WHERE music.user_id IN (5, 6, 7)) other ON other.id = music.id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music, (select id from music where 1 != 1) as other where 1 != 1",
+ "Query": "select music.id from music, (select id from music where music.user_id in ::__vals) as other where other.id = music.id",
+ "Table": "music",
+ "Values": [
+ "(INT64(5), INT64(6), INT64(7))"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "limit on the vtgate has to be executed on the LHS of a join",
+ "query": "select id from user join (select user_id from user_extra limit 10) ue on user.id = ue.user_id",
+ "v3-plan": "VT12001: unsupported: filtering on results of cross-shard subquery",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user join (select user_id from user_extra limit 10) ue on user.id = ue.user_id",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "JoinVars": {
+ "ue_user_id": 0
+ },
+ "TableName": "user_extra_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Limit",
+ "Count": "INT64(10)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_id from user_extra where 1 != 1",
+ "Query": "select user_id from user_extra limit :__upper_limit",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where `user`.id = :ue_user_id",
+ "Table": "`user`",
+ "Values": [
+ ":ue_user_id"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "select user.a, t.b from user join (select id, count(*) b, req from user_extra group by req, id) as t on user.id = t.id",
+ "query": "select user.a, t.b from user join (select id, count(*) b, req from user_extra group by req, id) as t on user.id = t.id",
+ "v3-plan": "VT12001: unsupported: filtering on results of cross-shard subquery",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.a, t.b from user join (select id, count(*) b, req from user_extra group by req, id) as t on user.id = t.id",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0,L:1",
+ "JoinVars": {
+ "t_id": 0
+ },
+ "TableName": "user_extra_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0,
+ 1
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count_star(1) AS b",
+ "GroupBy": "(0|3), (2|4)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, count(*) as b, req, weight_string(id), weight_string(req) from user_extra where 1 != 1 group by id, weight_string(id), req, weight_string(req)",
+ "OrderBy": "(0|3) ASC, (2|4) ASC",
+ "Query": "select id, count(*) as b, req, weight_string(id), weight_string(req) from user_extra group by id, weight_string(id), req, weight_string(req) order by id asc, req asc",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.a from `user` where 1 != 1",
+ "Query": "select `user`.a from `user` where `user`.id = :t_id",
+ "Table": "`user`",
+ "Values": [
+ ":t_id"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "cant switch sides for outer joins",
+ "query": "select id from user left join (select user_id from user_extra limit 10) ue on user.id = ue.user_id",
+ "plan": "VT12001: unsupported: LEFT JOIN with derived tables"
+ },
+ {
+ "comment": "limit on both sides means that we can't evaluate this at all",
+ "query": "select id from (select id from user limit 10) u join (select user_id from user_extra limit 10) ue on u.id = ue.user_id",
+ "v3-plan": "VT12001: unsupported: filtering on results of cross-shard subquery",
+ "gen4-plan": "VT12001: unsupported: JOIN between derived tables"
+ },
+ {
+ "comment": "SELECT music.id FROM (SELECT MAX(id) as maxt FROM music WHERE music.user_id = 5) other JOIN music ON other.maxt = music.id",
+ "query": "SELECT music.id FROM (SELECT MAX(id) as maxt FROM music WHERE music.user_id = 5) other JOIN music ON other.maxt = music.id",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM (SELECT MAX(id) as maxt FROM music WHERE music.user_id = 5) other JOIN music ON other.maxt = music.id",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "JoinVars": {
+ "other_maxt": 0
+ },
+ "TableName": "music_music",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select other.maxt from (select max(id) as maxt from music where 1 != 1) as other where 1 != 1",
+ "Query": "select other.maxt from (select max(id) as maxt from music where music.user_id = 5) as other",
+ "Table": "music",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where music.id = :other_maxt",
+ "Table": "music",
+ "Values": [
+ ":other_maxt"
+ ],
+ "Vindex": "music_user_map"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM (SELECT MAX(id) as maxt FROM music WHERE music.user_id = 5) other JOIN music ON other.maxt = music.id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from (select max(id) as maxt from music where 1 != 1) as other, music where 1 != 1",
+ "Query": "select music.id from (select max(id) as maxt from music where music.user_id = 5) as other, music where other.maxt = music.id",
+ "Table": "music",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "Earlier columns are in scope in subqueries https://github.com/vitessio/vitess/issues/11246",
+ "query": "SELECT 1 as x, (SELECT x)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT 1 as x, (SELECT x)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 as x, (select x from dual where 1 != 1) from dual where 1 != 1",
+ "Query": "select 1 as x, (select x from dual) from dual",
+ "Table": "dual"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT 1 as x, (SELECT x)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 as x, (select x from dual where 1 != 1) from dual where 1 != 1",
+ "Query": "select 1 as x, (select x from dual) from dual",
+ "Table": "dual"
+ },
+ "TablesUsed": [
+ "main.dual"
+ ]
+ }
+ },
+ {
+ "comment": "(OR 1 = 0) doesn't cause unnecessary scatter",
+ "query": "select * from user where id = 1 or 1 = 0",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from user where id = 1 or 1 = 0",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select * from `user` where id = 1 or 1 = 0",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from user where id = 1 or 1 = 0",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select * from `user` where id = 1",
+ "Table": "`user`",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "(OR 2 < 1) doesn't cause unnecessary scatter",
+ "query": "select * from user where id = 1 or 2 < 1",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from user where id = 1 or 2 < 1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select * from `user` where id = 1 or 2 < 1",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from user where id = 1 or 2 < 1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select * from `user` where id = 1",
+ "Table": "`user`",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "query with a derived table and dual table in unsharded keyspace",
+ "query": "SELECT * FROM unsharded_a AS t1 JOIN (SELECT trim((SELECT MAX(name) FROM unsharded_a)) AS name) AS t2 WHERE t1.name >= t2.name ORDER BY t1.name ASC LIMIT 1;",
+ "v3-plan": {
+ "Instructions": {
+ "FieldQuery": "select * from unsharded_a as t1 join (select trim((select max(`name`) from unsharded_a where 1 != 1)) as `name` from dual where 1 != 1) as t2 where 1 != 1",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "OperatorType": "Route",
+ "Query": "select * from unsharded_a as t1 join (select trim((select max(`name`) from unsharded_a)) as `name` from dual) as t2 where t1.`name` >= t2.`name` order by t1.`name` asc limit 1",
+ "Table": "unsharded_a, dual",
+ "Variant": "Unsharded"
+ },
+ "Original": "SELECT * FROM unsharded_a AS t1 JOIN (SELECT trim((SELECT MAX(name) FROM unsharded_a)) AS name) AS t2 WHERE t1.name >= t2.name ORDER BY t1.name ASC LIMIT 1;",
+ "QueryType": "SELECT"
+ },
+ "gen4-plan": {
+ "Instructions": {
+ "FieldQuery": "select * from unsharded_a as t1 join (select trim((select max(`name`) from unsharded_a where 1 != 1)) as `name` from dual where 1 != 1) as t2 where 1 != 1",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "OperatorType": "Route",
+ "Query": "select * from unsharded_a as t1 join (select trim((select max(`name`) from unsharded_a)) as `name` from dual) as t2 where t1.`name` >= t2.`name` order by t1.`name` asc limit 1",
+ "Table": "dual, unsharded_a",
+ "Variant": "Unsharded"
+ },
+ "Original": "SELECT * FROM unsharded_a AS t1 JOIN (SELECT trim((SELECT MAX(name) FROM unsharded_a)) AS name) AS t2 WHERE t1.name >= t2.name ORDER BY t1.name ASC LIMIT 1;",
+ "QueryType": "SELECT",
+ "TablesUsed": [
+ "main.dual",
+ "main.unsharded_a"
+ ]
+ }
+ },
+ {
+ "comment": "subquery having join table on clause, using column reference of outer select table",
+ "query": "select (select 1 from user u1 join user u2 on u1.id = u2.id and u1.id = u3.id) subquery from user u3 where u3.id = 1",
+ "v3-plan": "VT03019: symbol u3.id not found",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select (select 1 from user u1 join user u2 on u1.id = u2.id and u1.id = u3.id) subquery from user u3 where u3.id = 1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select (select 1 from `user` as u1 join `user` as u2 on u1.id = u2.id and u1.id = u3.id where 1 != 1) as subquery from `user` as u3 where 1 != 1",
+ "Query": "select (select 1 from `user` as u1 join `user` as u2 on u1.id = u2.id and u1.id = u3.id) as subquery from `user` as u3 where u3.id = 1",
+ "Table": "`user`",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "allow last_insert_id with argument",
+ "query": "select last_insert_id(id) from user",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select last_insert_id(id) from user",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select last_insert_id(id) from `user` where 1 != 1",
+ "Query": "select last_insert_id(id) from `user`",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select last_insert_id(id) from user",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select last_insert_id(id) from `user` where 1 != 1",
+ "Query": "select last_insert_id(id) from `user`",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "pick email as vindex lookup",
+ "query": "select * from customer where email = 'a@mail.com'",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from customer where email = 'a@mail.com'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from customer where 1 != 1",
+ "Query": "select * from customer where email = 'a@mail.com'",
+ "Table": "customer",
+ "Values": [
+ "VARCHAR(\"a@mail.com\")"
+ ],
+ "Vindex": "unq_lkp_vdx"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from customer where email = 'a@mail.com'",
+ "Instructions": {
+ "OperatorType": "VindexLookup",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Values": [
+ "VARCHAR(\"a@mail.com\")"
+ ],
+ "Vindex": "unq_lkp_vdx",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select unq_key, keyspace_id from unq_lkp_idx where 1 != 1",
+ "Query": "select unq_key, keyspace_id from unq_lkp_idx where unq_key in ::__vals",
+ "Table": "unq_lkp_idx",
+ "Values": [
+ ":unq_key"
+ ],
+ "Vindex": "shard_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from customer where 1 != 1",
+ "Query": "select * from customer where email = 'a@mail.com'",
+ "Table": "customer"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.customer"
+ ]
+ }
+ },
+ {
+ "comment": "phone is in backfill vindex - not selected for vindex lookup",
+ "query": "select * from customer where phone = 123456",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from customer where phone = 123456",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from customer where 1 != 1",
+ "Query": "select * from customer where phone = 123456",
+ "Table": "customer",
+ "Values": [
+ "INT64(123456)"
+ ],
+ "Vindex": "unq_lkp_bf_vdx"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from customer where phone = 123456",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from customer where 1 != 1",
+ "Query": "select * from customer where phone = 123456",
+ "Table": "customer"
+ },
+ "TablesUsed": [
+ "user.customer"
+ ]
+ }
+ },
+ {
+ "comment": "email vindex is costly than phone vindex - but phone vindex is backfiling hence ignored",
+ "query": "select * from customer where email = 'a@mail.com' and phone = 123456",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from customer where email = 'a@mail.com' and phone = 123456",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from customer where 1 != 1",
+ "Query": "select * from customer where email = 'a@mail.com' and phone = 123456",
+ "Table": "customer",
+ "Values": [
+ "INT64(123456)"
+ ],
+ "Vindex": "unq_lkp_bf_vdx"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from customer where email = 'a@mail.com' and phone = 123456",
+ "Instructions": {
+ "OperatorType": "VindexLookup",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Values": [
+ "VARCHAR(\"a@mail.com\")"
+ ],
+ "Vindex": "unq_lkp_vdx",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select unq_key, keyspace_id from unq_lkp_idx where 1 != 1",
+ "Query": "select unq_key, keyspace_id from unq_lkp_idx where unq_key in ::__vals",
+ "Table": "unq_lkp_idx",
+ "Values": [
+ ":unq_key"
+ ],
+ "Vindex": "shard_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from customer where 1 != 1",
+ "Query": "select * from customer where email = 'a@mail.com' and phone = 123456",
+ "Table": "customer"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.customer"
+ ]
+ }
+ },
+ {
+ "comment": "predicate order changed: email vindex is costly than phone vindex - but phone vindex is backfiling hence ignored",
+ "query": "select * from customer where phone = 123456 and email = 'a@mail.com'",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from customer where phone = 123456 and email = 'a@mail.com'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from customer where 1 != 1",
+ "Query": "select * from customer where phone = 123456 and email = 'a@mail.com'",
+ "Table": "customer",
+ "Values": [
+ "INT64(123456)"
+ ],
+ "Vindex": "unq_lkp_bf_vdx"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from customer where phone = 123456 and email = 'a@mail.com'",
+ "Instructions": {
+ "OperatorType": "VindexLookup",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Values": [
+ "VARCHAR(\"a@mail.com\")"
+ ],
+ "Vindex": "unq_lkp_vdx",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select unq_key, keyspace_id from unq_lkp_idx where 1 != 1",
+ "Query": "select unq_key, keyspace_id from unq_lkp_idx where unq_key in ::__vals",
+ "Table": "unq_lkp_idx",
+ "Values": [
+ ":unq_key"
+ ],
+ "Vindex": "shard_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from customer where 1 != 1",
+ "Query": "select * from customer where phone = 123456 and email = 'a@mail.com'",
+ "Table": "customer"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.customer"
+ ]
+ }
+ }
+]
diff --git a/go/vt/vtgate/planbuilder/testdata/select_cases.txt b/go/vt/vtgate/planbuilder/testdata/select_cases.txt
deleted file mode 100644
index 3072533565c..00000000000
--- a/go/vt/vtgate/planbuilder/testdata/select_cases.txt
+++ /dev/null
@@ -1,7732 +0,0 @@
-# No column referenced
-"select 1 from user"
-{
- "QueryType": "SELECT",
- "Original": "select 1 from user",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` where 1 != 1",
- "Query": "select 1 from `user`",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select 1 from user",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` where 1 != 1",
- "Query": "select 1 from `user`",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# '*' expression for simple route
-"select user.* from user"
-{
- "QueryType": "SELECT",
- "Original": "select user.* from user",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.* from `user` where 1 != 1",
- "Query": "select `user`.* from `user`",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.* from user",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.* from `user` where 1 != 1",
- "Query": "select `user`.* from `user`",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# unqualified '*' expression for simple route
-"select * from user"
-{
- "QueryType": "SELECT",
- "Original": "select * from user",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select * from `user`",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from user",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select * from `user`",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# select with timeout directive sets QueryTimeout in the route
-"select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from user"
-{
- "QueryType": "SELECT",
- "Original": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from user",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from `user`",
- "QueryTimeout": 1000,
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from user",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from `user`",
- "QueryTimeout": 1000,
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# select aggregation with timeout directive sets QueryTimeout in the route
-"select /*vt+ QUERY_TIMEOUT_MS=1000 */ count(*) from user"
-{
- "QueryType": "SELECT",
- "Original": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ count(*) from user",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count(0) AS count",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) from `user` where 1 != 1",
- "Query": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ count(*) from `user`",
- "QueryTimeout": 1000,
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ count(*) from user",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count_star(0) AS count(*)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) from `user` where 1 != 1",
- "Query": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ count(*) from `user`",
- "QueryTimeout": 1000,
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# select limit with timeout directive sets QueryTimeout in the route
-"select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from user limit 10"
-{
- "QueryType": "SELECT",
- "Original": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from user limit 10",
- "Instructions": {
- "OperatorType": "Limit",
- "Count": "INT64(10)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from `user` limit :__upper_limit",
- "QueryTimeout": 1000,
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from user limit 10",
- "Instructions": {
- "OperatorType": "Limit",
- "Count": "INT64(10)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from `user` limit :__upper_limit",
- "QueryTimeout": 1000,
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# select with partial scatter directive
-"select /*vt+ SCATTER_ERRORS_AS_WARNINGS */ * from user"
-{
- "QueryType": "SELECT",
- "Original": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS */ * from user",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS */ * from `user`",
- "ScatterErrorsAsWarnings": true,
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS */ * from user",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS */ * from `user`",
- "ScatterErrorsAsWarnings": true,
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# select aggregation with partial scatter directive
-"select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ count(*) from user"
-{
- "QueryType": "SELECT",
- "Original": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ count(*) from user",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count(0) AS count",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) from `user` where 1 != 1",
- "Query": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ count(*) from `user`",
- "ScatterErrorsAsWarnings": true,
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ count(*) from user",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count_star(0) AS count(*)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) from `user` where 1 != 1",
- "Query": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ count(*) from `user`",
- "ScatterErrorsAsWarnings": true,
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# select aggregation with partial scatter directive - added comments to try to confuse the hint extraction
-"/*VT_SPAN_CONTEXT=123*/select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ count(*) from user"
-{
- "QueryType": "SELECT",
- "Original": "/*VT_SPAN_CONTEXT=123*/select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ count(*) from user",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count(0) AS count",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) from `user` where 1 != 1",
- "Query": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ count(*) from `user`",
- "ScatterErrorsAsWarnings": true,
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "/*VT_SPAN_CONTEXT=123*/select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ count(*) from user",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count_star(0) AS count(*)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) from `user` where 1 != 1",
- "Query": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ count(*) from `user`",
- "ScatterErrorsAsWarnings": true,
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# select limit with partial scatter directive
-"select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ * from user limit 10"
-{
- "QueryType": "SELECT",
- "Original": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ * from user limit 10",
- "Instructions": {
- "OperatorType": "Limit",
- "Count": "INT64(10)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ * from `user` limit :__upper_limit",
- "ScatterErrorsAsWarnings": true,
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ * from user limit 10",
- "Instructions": {
- "OperatorType": "Limit",
- "Count": "INT64(10)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ * from `user` limit :__upper_limit",
- "ScatterErrorsAsWarnings": true,
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# qualified '*' expression for simple route
-"select user.* from user"
-{
- "QueryType": "SELECT",
- "Original": "select user.* from user",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.* from `user` where 1 != 1",
- "Query": "select `user`.* from `user`",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.* from user",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.* from `user` where 1 != 1",
- "Query": "select `user`.* from `user`",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# fully qualified '*' expression for simple route
-"select user.user.* from user.user"
-{
- "QueryType": "SELECT",
- "Original": "select user.user.* from user.user",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.* from `user` where 1 != 1",
- "Query": "select `user`.* from `user`",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.user.* from user.user",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.* from `user` where 1 != 1",
- "Query": "select `user`.* from `user`",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# select * from authoritative table
-"select * from authoritative"
-{
- "QueryType": "SELECT",
- "Original": "select * from authoritative",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_id, col1, col2 from authoritative where 1 != 1",
- "Query": "select user_id, col1, col2 from authoritative",
- "Table": "authoritative"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from authoritative",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_id, col1, col2 from authoritative where 1 != 1",
- "Query": "select user_id, col1, col2 from authoritative",
- "Table": "authoritative"
- },
- "TablesUsed": [
- "user.authoritative"
- ]
-}
-
-# select * from join of authoritative tables
-"select * from authoritative a join authoritative b on a.user_id=b.user_id"
-{
- "QueryType": "SELECT",
- "Original": "select * from authoritative a join authoritative b on a.user_id=b.user_id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a.user_id as user_id, a.col1 as col1, a.col2 as col2, b.user_id as user_id, b.col1 as col1, b.col2 as col2 from authoritative as a join authoritative as b on a.user_id = b.user_id where 1 != 1",
- "Query": "select a.user_id as user_id, a.col1 as col1, a.col2 as col2, b.user_id as user_id, b.col1 as col1, b.col2 as col2 from authoritative as a join authoritative as b on a.user_id = b.user_id",
- "Table": "authoritative"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from authoritative a join authoritative b on a.user_id=b.user_id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a.user_id as user_id, a.col1 as col1, a.col2 as col2, b.user_id as user_id, b.col1 as col1, b.col2 as col2 from authoritative as a, authoritative as b where 1 != 1",
- "Query": "select a.user_id as user_id, a.col1 as col1, a.col2 as col2, b.user_id as user_id, b.col1 as col1, b.col2 as col2 from authoritative as a, authoritative as b where a.user_id = b.user_id",
- "Table": "authoritative"
- },
- "TablesUsed": [
- "user.authoritative"
- ]
-}
-
-# test table lookup failure for authoritative code path
-"select a.* from authoritative"
-"table a not found"
-Gen4 error: Unknown table 'a'
-
-# select * from qualified authoritative table
-"select a.* from authoritative a"
-{
- "QueryType": "SELECT",
- "Original": "select a.* from authoritative a",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a.user_id, a.col1, a.col2 from authoritative as a where 1 != 1",
- "Query": "select a.user_id, a.col1, a.col2 from authoritative as a",
- "Table": "authoritative"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select a.* from authoritative a",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a.user_id, a.col1, a.col2 from authoritative as a where 1 != 1",
- "Query": "select a.user_id, a.col1, a.col2 from authoritative as a",
- "Table": "authoritative"
- },
- "TablesUsed": [
- "user.authoritative"
- ]
-}
-
-# select * from intermixing of authoritative table with non-authoritative results in no expansion
-"select * from authoritative join user on authoritative.user_id=user.id"
-{
- "QueryType": "SELECT",
- "Original": "select * from authoritative join user on authoritative.user_id=user.id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from authoritative join `user` on authoritative.user_id = `user`.id where 1 != 1",
- "Query": "select * from authoritative join `user` on authoritative.user_id = `user`.id",
- "Table": "authoritative, `user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from authoritative join user on authoritative.user_id=user.id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from authoritative, `user` where 1 != 1",
- "Query": "select * from authoritative, `user` where authoritative.user_id = `user`.id",
- "Table": "`user`, authoritative"
- },
- "TablesUsed": [
- "user.authoritative",
- "user.user"
- ]
-}
-
-# select authoritative.* with intermixing still expands
-"select user.id, a.*, user.col1 from authoritative a join user on a.user_id=user.id"
-{
- "QueryType": "SELECT",
- "Original": "select user.id, a.*, user.col1 from authoritative a join user on a.user_id=user.id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id, a.user_id, a.col1, a.col2, `user`.col1 from authoritative as a join `user` on a.user_id = `user`.id where 1 != 1",
- "Query": "select `user`.id, a.user_id, a.col1, a.col2, `user`.col1 from authoritative as a join `user` on a.user_id = `user`.id",
- "Table": "authoritative, `user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.id, a.*, user.col1 from authoritative a join user on a.user_id=user.id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id, a.user_id as user_id, a.col1 as col1, a.col2 as col2, `user`.col1 from authoritative as a, `user` where 1 != 1",
- "Query": "select `user`.id, a.user_id as user_id, a.col1 as col1, a.col2 as col2, `user`.col1 from authoritative as a, `user` where a.user_id = `user`.id",
- "Table": "`user`, authoritative"
- },
- "TablesUsed": [
- "user.authoritative",
- "user.user"
- ]
-}
-
-# auto-resolve anonymous columns for simple route
-"select anon_col from user join user_extra on user.id = user_extra.user_id"
-{
- "QueryType": "SELECT",
- "Original": "select anon_col from user join user_extra on user.id = user_extra.user_id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select anon_col from `user` join user_extra on `user`.id = user_extra.user_id where 1 != 1",
- "Query": "select anon_col from `user` join user_extra on `user`.id = user_extra.user_id",
- "Table": "`user`, user_extra"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select anon_col from user join user_extra on user.id = user_extra.user_id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select anon_col from `user`, user_extra where 1 != 1",
- "Query": "select anon_col from `user`, user_extra where `user`.id = user_extra.user_id",
- "Table": "`user`, user_extra"
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# Cannot auto-resolve for cross-shard joins
-"select col from user join user_extra"
-"symbol col not found"
-Gen4 error: Column 'col' in field list is ambiguous
-
-# Auto-resolve should work if unique vindex columns are referenced
-"select id, user_id from user join user_extra"
-{
- "QueryType": "SELECT",
- "Original": "select id, user_id from user join user_extra",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,R:0",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_id from user_extra where 1 != 1",
- "Query": "select user_id from user_extra",
- "Table": "user_extra"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id, user_id from user join user_extra",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,R:0",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_id from user_extra where 1 != 1",
- "Query": "select user_id from user_extra",
- "Table": "user_extra"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# database calls should be substituted
-"select database() from dual"
-{
- "QueryType": "SELECT",
- "Original": "select database() from dual",
- "Instructions": {
- "OperatorType": "Projection",
- "Expressions": [
- ":__vtdbname as database()"
- ],
- "Inputs": [
- {
- "OperatorType": "SingleRow"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select database() from dual",
- "Instructions": {
- "OperatorType": "Projection",
- "Expressions": [
- ":__vtdbname as database()"
- ],
- "Inputs": [
- {
- "OperatorType": "SingleRow"
- }
- ]
- },
- "TablesUsed": [
- "main.dual"
- ]
-}
-
-# last_insert_id for unsharded route
-"select last_insert_id() as x from main.unsharded"
-{
- "QueryType": "SELECT",
- "Original": "select last_insert_id() as x from main.unsharded",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select :__lastInsertId as x from unsharded where 1 != 1",
- "Query": "select :__lastInsertId as x from unsharded",
- "Table": "unsharded"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select last_insert_id() as x from main.unsharded",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select :__lastInsertId as x from unsharded where 1 != 1",
- "Query": "select :__lastInsertId as x from unsharded",
- "Table": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-
-# select from dual on unqualified keyspace
-"select @@session.auto_increment_increment from dual"
-{
- "QueryType": "SELECT",
- "Original": "select @@session.auto_increment_increment from dual",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select @@auto_increment_increment from dual where 1 != 1",
- "Query": "select @@auto_increment_increment from dual",
- "Table": "dual"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select @@session.auto_increment_increment from dual",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select @@auto_increment_increment from dual where 1 != 1",
- "Query": "select @@auto_increment_increment from dual",
- "Table": "dual"
- },
- "TablesUsed": [
- "main.dual"
- ]
-}
-
-# select from pinned table
-"select * from pin_test"
-{
- "QueryType": "SELECT",
- "Original": "select * from pin_test",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from pin_test where 1 != 1",
- "Query": "select * from pin_test",
- "Table": "pin_test",
- "Values": [
- "VARCHAR(\"\\x80\")"
- ],
- "Vindex": "binary"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from pin_test",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from pin_test where 1 != 1",
- "Query": "select * from pin_test",
- "Table": "pin_test",
- "Values": [
- "VARCHAR(\"\\x80\")"
- ],
- "Vindex": "binary"
- },
- "TablesUsed": [
- "user.pin_test"
- ]
-}
-
-# select from dual on sharded keyspace
-"select @@session.auto_increment_increment from user.dual"
-{
- "QueryType": "SELECT",
- "Original": "select @@session.auto_increment_increment from user.dual",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select @@auto_increment_increment from dual where 1 != 1",
- "Query": "select @@auto_increment_increment from dual",
- "Table": "dual"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select @@session.auto_increment_increment from user.dual",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select @@auto_increment_increment from dual where 1 != 1",
- "Query": "select @@auto_increment_increment from dual",
- "Table": "dual"
- },
- "TablesUsed": [
- "user.dual"
- ]
-}
-
-# RHS route referenced
-"select user_extra.id from user join user_extra"
-{
- "QueryType": "SELECT",
- "Original": "select user_extra.id from user join user_extra",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` where 1 != 1",
- "Query": "select 1 from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.id from user_extra where 1 != 1",
- "Query": "select user_extra.id from user_extra",
- "Table": "user_extra"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user_extra.id from user join user_extra",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` where 1 != 1",
- "Query": "select 1 from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.id from user_extra where 1 != 1",
- "Query": "select user_extra.id from user_extra",
- "Table": "user_extra"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# Both routes referenced
-"select user.col, user_extra.id from user join user_extra"
-{
- "QueryType": "SELECT",
- "Original": "select user.col, user_extra.id from user join user_extra",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,R:0",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` where 1 != 1",
- "Query": "select `user`.col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.id from user_extra where 1 != 1",
- "Query": "select user_extra.id from user_extra",
- "Table": "user_extra"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.col, user_extra.id from user join user_extra",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,R:0",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` where 1 != 1",
- "Query": "select `user`.col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.id from user_extra where 1 != 1",
- "Query": "select user_extra.id from user_extra",
- "Table": "user_extra"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# Expression with single-route reference
-"select user.col, user_extra.id + user_extra.col from user join user_extra"
-{
- "QueryType": "SELECT",
- "Original": "select user.col, user_extra.id + user_extra.col from user join user_extra",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,R:0",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` where 1 != 1",
- "Query": "select `user`.col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.id + user_extra.col from user_extra where 1 != 1",
- "Query": "select user_extra.id + user_extra.col from user_extra",
- "Table": "user_extra"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.col, user_extra.id + user_extra.col from user join user_extra",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,R:0",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` where 1 != 1",
- "Query": "select `user`.col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.id + user_extra.col from user_extra where 1 != 1",
- "Query": "select user_extra.id + user_extra.col from user_extra",
- "Table": "user_extra"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# Jumbled references
-"select user.col, user_extra.id, user.col2 from user join user_extra"
-{
- "QueryType": "SELECT",
- "Original": "select user.col, user_extra.id, user.col2 from user join user_extra",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,R:0,L:1",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col, `user`.col2 from `user` where 1 != 1",
- "Query": "select `user`.col, `user`.col2 from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.id from user_extra where 1 != 1",
- "Query": "select user_extra.id from user_extra",
- "Table": "user_extra"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.col, user_extra.id, user.col2 from user join user_extra",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,R:0,L:1",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col, `user`.col2 from `user` where 1 != 1",
- "Query": "select `user`.col, `user`.col2 from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.id from user_extra where 1 != 1",
- "Query": "select user_extra.id from user_extra",
- "Table": "user_extra"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# Comments
-"select /* comment */ user.col from user join user_extra"
-{
- "QueryType": "SELECT",
- "Original": "select /* comment */ user.col from user join user_extra",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` where 1 != 1",
- "Query": "select /* comment */ `user`.col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select /* comment */ 1 from user_extra",
- "Table": "user_extra"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select /* comment */ user.col from user join user_extra",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` where 1 != 1",
- "Query": "select /* comment */ `user`.col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select /* comment */ 1 from user_extra",
- "Table": "user_extra"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# for update
-"select user.col from user join user_extra for update"
-{
- "QueryType": "SELECT",
- "Original": "select user.col from user join user_extra for update",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` where 1 != 1",
- "Query": "select `user`.col from `user` for update",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra for update",
- "Table": "user_extra"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.col from user join user_extra for update",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` where 1 != 1",
- "Query": "select `user`.col from `user` for update",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra for update",
- "Table": "user_extra"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# Field query should work for joins select bind vars
-"select user.id, (select user.id+outm.m+unsharded.m from unsharded) from user join unsharded outm"
-{
- "QueryType": "SELECT",
- "Original": "select user.id, (select user.id+outm.m+unsharded.m from unsharded) from user join unsharded outm",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,R:0",
- "JoinVars": {
- "user_id": 0
- },
- "TableName": "`user`_unsharded",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id from `user` where 1 != 1",
- "Query": "select `user`.id from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select (select :user_id + outm.m + unsharded.m from unsharded where 1 != 1) from unsharded as outm where 1 != 1",
- "Query": "select (select :user_id + outm.m + unsharded.m from unsharded) from unsharded as outm",
- "Table": "unsharded"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.id, (select user.id+outm.m+unsharded.m from unsharded) from user join unsharded outm",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,R:0",
- "JoinVars": {
- "user_id": 0
- },
- "TableName": "`user`_unsharded",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id from `user` where 1 != 1",
- "Query": "select `user`.id from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select (select :user_id + outm.m + unsharded.m from unsharded where 1 != 1) from unsharded as outm where 1 != 1",
- "Query": "select (select :user_id + outm.m + unsharded.m from unsharded) from unsharded as outm",
- "Table": "unsharded"
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded",
- "user.user"
- ]
-}
-
-# Case preservation
-"select user.Col, user_extra.Id from user join user_extra"
-{
- "QueryType": "SELECT",
- "Original": "select user.Col, user_extra.Id from user join user_extra",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,R:0",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.Col from `user` where 1 != 1",
- "Query": "select `user`.Col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.Id from user_extra where 1 != 1",
- "Query": "select user_extra.Id from user_extra",
- "Table": "user_extra"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.Col, user_extra.Id from user join user_extra",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,R:0",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.Col from `user` where 1 != 1",
- "Query": "select `user`.Col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.Id from user_extra where 1 != 1",
- "Query": "select user_extra.Id from user_extra",
- "Table": "user_extra"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# syntax error
-"the quick brown fox"
-"syntax error at position 4 near 'the'"
-Gen4 plan same as above
-
-# Hex number is not treated as a simple value
-"select * from user where id = 0x04"
-{
- "QueryType": "SELECT",
- "Original": "select * from user where id = 0x04",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select * from `user` where id = 0x04",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from user where id = 0x04",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select * from `user` where id = 0x04",
- "Table": "`user`",
- "Values": [
- "VARBINARY(\"\\x04\")"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# sharded limit offset
-"select user_id from music order by user_id limit 10, 20"
-{
- "QueryType": "SELECT",
- "Original": "select user_id from music order by user_id limit 10, 20",
- "Instructions": {
- "OperatorType": "Limit",
- "Count": "INT64(20)",
- "Offset": "INT64(10)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_id, weight_string(user_id) from music where 1 != 1",
- "OrderBy": "(0|1) ASC",
- "Query": "select user_id, weight_string(user_id) from music order by user_id asc limit :__upper_limit",
- "ResultColumns": 1,
- "Table": "music"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user_id from music order by user_id limit 10, 20",
- "Instructions": {
- "OperatorType": "Limit",
- "Count": "INT64(20)",
- "Offset": "INT64(10)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_id, weight_string(user_id) from music where 1 != 1",
- "OrderBy": "(0|1) ASC",
- "Query": "select user_id, weight_string(user_id) from music order by user_id asc limit :__upper_limit",
- "ResultColumns": 1,
- "Table": "music"
- }
- ]
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# Sharding Key Condition in Parenthesis
-"select * from user where name ='abc' AND (id = 4) limit 5"
-{
- "QueryType": "SELECT",
- "Original": "select * from user where name ='abc' AND (id = 4) limit 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select * from `user` where `name` = 'abc' and id = 4 limit 5",
- "Table": "`user`",
- "Values": [
- "INT64(4)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from user where name ='abc' AND (id = 4) limit 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select * from `user` where `name` = 'abc' and id = 4 limit 5",
- "Table": "`user`",
- "Values": [
- "INT64(4)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Multiple parenthesized expressions
-"select * from user where (id = 4) AND (name ='abc') limit 5"
-{
- "QueryType": "SELECT",
- "Original": "select * from user where (id = 4) AND (name ='abc') limit 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select * from `user` where id = 4 and `name` = 'abc' limit 5",
- "Table": "`user`",
- "Values": [
- "INT64(4)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from user where (id = 4) AND (name ='abc') limit 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select * from `user` where id = 4 and `name` = 'abc' limit 5",
- "Table": "`user`",
- "Values": [
- "INT64(4)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Multiple parenthesized expressions
-"select * from user where (id = 4 and name ='abc') limit 5"
-{
- "QueryType": "SELECT",
- "Original": "select * from user where (id = 4 and name ='abc') limit 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select * from `user` where id = 4 and `name` = 'abc' limit 5",
- "Table": "`user`",
- "Values": [
- "INT64(4)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from user where (id = 4 and name ='abc') limit 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select * from `user` where id = 4 and `name` = 'abc' limit 5",
- "Table": "`user`",
- "Values": [
- "INT64(4)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Column Aliasing with Table.Column
-"select user0_.col as col0_ from user user0_ where id = 1 order by user0_.col desc limit 2"
-{
- "QueryType": "SELECT",
- "Original": "select user0_.col as col0_ from user user0_ where id = 1 order by user0_.col desc limit 2",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user0_.col as col0_ from `user` as user0_ where 1 != 1",
- "Query": "select user0_.col as col0_ from `user` as user0_ where id = 1 order by user0_.col desc limit 2",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user0_.col as col0_ from user user0_ where id = 1 order by user0_.col desc limit 2",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user0_.col as col0_ from `user` as user0_ where 1 != 1",
- "Query": "select user0_.col as col0_ from `user` as user0_ where id = 1 order by user0_.col desc limit 2",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Column Aliasing with Column
-"select user0_.col as col0_ from user user0_ where id = 1 order by col0_ desc limit 3"
-{
- "QueryType": "SELECT",
- "Original": "select user0_.col as col0_ from user user0_ where id = 1 order by col0_ desc limit 3",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user0_.col as col0_ from `user` as user0_ where 1 != 1",
- "Query": "select user0_.col as col0_ from `user` as user0_ where id = 1 order by col0_ desc limit 3",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user0_.col as col0_ from user user0_ where id = 1 order by col0_ desc limit 3",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user0_.col as col0_ from `user` as user0_ where 1 != 1",
- "Query": "select user0_.col as col0_ from `user` as user0_ where id = 1 order by col0_ desc limit 3",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Booleans and parenthesis
-"select * from user where (id = 1) AND name = true limit 5"
-{
- "QueryType": "SELECT",
- "Original": "select * from user where (id = 1) AND name = true limit 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select * from `user` where id = 1 and `name` = true limit 5",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from user where (id = 1) AND name = true limit 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select * from `user` where id = 1 and `name` = true limit 5",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Column as boolean-ish
-"select * from user where (id = 1) AND name limit 5"
-{
- "QueryType": "SELECT",
- "Original": "select * from user where (id = 1) AND name limit 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select * from `user` where id = 1 and `name` limit 5",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from user where (id = 1) AND name limit 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select * from `user` where id = 1 and `name` limit 5",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# PK as fake boolean, and column as boolean-ish
-"select * from user where (id = 5) AND name = true limit 5"
-{
- "QueryType": "SELECT",
- "Original": "select * from user where (id = 5) AND name = true limit 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select * from `user` where id = 5 and `name` = true limit 5",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from user where (id = 5) AND name = true limit 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select * from `user` where id = 5 and `name` = true limit 5",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# top level subquery in select
-"select a, (select col from user) from unsharded"
-{
- "QueryType": "SELECT",
- "Original": "select a, (select col from user) from unsharded",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutValue",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select a, :__sq1 from unsharded where 1 != 1",
- "Query": "select a, :__sq1 from unsharded",
- "Table": "unsharded"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select a, (select col from user) from unsharded",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutValue",
- "PulloutVars": [
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select a, :__sq1 from unsharded where 1 != 1",
- "Query": "select a, :__sq1 from unsharded",
- "Table": "unsharded"
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded",
- "user.user"
- ]
-}
-
-# sub-expression subquery in select
-"select a, 1+(select col from user) from unsharded"
-{
- "QueryType": "SELECT",
- "Original": "select a, 1+(select col from user) from unsharded",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutValue",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select a, 1 + :__sq1 from unsharded where 1 != 1",
- "Query": "select a, 1 + :__sq1 from unsharded",
- "Table": "unsharded"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select a, 1+(select col from user) from unsharded",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutValue",
- "PulloutVars": [
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select a, 1 + :__sq1 from unsharded where 1 != 1",
- "Query": "select a, 1 + :__sq1 from unsharded",
- "Table": "unsharded"
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded",
- "user.user"
- ]
-}
-
-# select * from derived table expands specific columns
-"select * from (select user.id id1, user_extra.id id2 from user join user_extra) as t"
-{
- "QueryType": "SELECT",
- "Original": "select * from (select user.id id1, user_extra.id id2 from user join user_extra) as t",
- "Instructions": {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0,
- 1
- ],
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,R:0",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id as id1 from `user` where 1 != 1",
- "Query": "select `user`.id as id1 from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.id as id2 from user_extra where 1 != 1",
- "Query": "select user_extra.id as id2 from user_extra",
- "Table": "user_extra"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from (select user.id id1, user_extra.id id2 from user join user_extra) as t",
- "Instructions": {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0,
- 1
- ],
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,R:0",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id as id1 from `user` where 1 != 1",
- "Query": "select `user`.id as id1 from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.id as id2 from user_extra where 1 != 1",
- "Query": "select user_extra.id as id2 from user_extra",
- "Table": "user_extra"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# duplicate columns not allowed in derived table
-"select * from (select user.id, user_extra.id from user join user_extra) as t"
-"duplicate column names in subquery: id"
-Gen4 error: Duplicate column name 'id'
-
-# non-existent symbol in cross-shard derived table
-"select t.col from (select user.id from user join user_extra) as t"
-"symbol t.col not found in table or subquery"
-Gen4 error: symbol t.col not found
-
-# union with the same target shard
-"select * from music where user_id = 1 union select * from user where id = 1"
-{
- "QueryType": "SELECT",
- "Original": "select * from music where user_id = 1 union select * from user where id = 1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from music where 1 != 1 union select * from `user` where 1 != 1",
- "Query": "select * from music where user_id = 1 union select * from `user` where id = 1",
- "Table": "music",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from music where user_id = 1 union select * from user where id = 1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from music where 1 != 1 union select * from `user` where 1 != 1",
- "Query": "select * from music where user_id = 1 union select * from `user` where id = 1",
- "Table": "music",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.music",
- "user.user"
- ]
-}
-
-# union with the same target shard last_insert_id
-"select *, last_insert_id() from music where user_id = 1 union select * from user where id = 1"
-{
- "QueryType": "SELECT",
- "Original": "select *, last_insert_id() from music where user_id = 1 union select * from user where id = 1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select *, :__lastInsertId as `last_insert_id()` from music where 1 != 1 union select * from `user` where 1 != 1",
- "Query": "select *, :__lastInsertId as `last_insert_id()` from music where user_id = 1 union select * from `user` where id = 1",
- "Table": "music",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select *, last_insert_id() from music where user_id = 1 union select * from user where id = 1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select *, :__lastInsertId as `last_insert_id()` from music where 1 != 1 union select * from `user` where 1 != 1",
- "Query": "select *, :__lastInsertId as `last_insert_id()` from music where user_id = 1 union select * from `user` where id = 1",
- "Table": "music",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.music",
- "user.user"
- ]
-}
-
-# unsharded union in derived table
-"select * from (select col1, col2 from unsharded where id = 1 union select col1, col2 from unsharded where id = 3) a"
-{
- "QueryType": "SELECT",
- "Original": "select * from (select col1, col2 from unsharded where id = 1 union select col1, col2 from unsharded where id = 3) a",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select * from (select col1, col2 from unsharded where 1 != 1 union select col1, col2 from unsharded where 1 != 1) as a where 1 != 1",
- "Query": "select * from (select col1, col2 from unsharded where id = 1 union select col1, col2 from unsharded where id = 3) as a",
- "Table": "unsharded"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from (select col1, col2 from unsharded where id = 1 union select col1, col2 from unsharded where id = 3) a",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select a.col1, a.col2 from (select col1, col2 from unsharded where 1 != 1 union select col1, col2 from unsharded where 1 != 1) as a where 1 != 1",
- "Query": "select a.col1, a.col2 from (select col1, col2 from unsharded where id = 1 union select col1, col2 from unsharded where id = 3) as a",
- "Table": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-
-# unsharded union in subquery
-"select id, name from unsharded where id in (select id from unsharded where id = 1 union select id from unsharded where id = 3)"
-{
- "QueryType": "SELECT",
- "Original": "select id, name from unsharded where id in (select id from unsharded where id = 1 union select id from unsharded where id = 3)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select id, `name` from unsharded where 1 != 1",
- "Query": "select id, `name` from unsharded where id in (select id from unsharded where id = 1 union select id from unsharded where id = 3)",
- "Table": "unsharded"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id, name from unsharded where id in (select id from unsharded where id = 1 union select id from unsharded where id = 3)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select id, `name` from unsharded where 1 != 1",
- "Query": "select id, `name` from unsharded where id in (select id from unsharded where id = 1 union select id from unsharded where id = 3)",
- "Table": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-
-"(select id from unsharded) union (select id from unsharded_auto) order by id limit 5"
-{
- "QueryType": "SELECT",
- "Original": "(select id from unsharded) union (select id from unsharded_auto) order by id limit 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select id from unsharded where 1 != 1 union select id from unsharded_auto where 1 != 1",
- "Query": "select id from unsharded union select id from unsharded_auto order by id asc limit 5",
- "Table": "unsharded"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "(select id from unsharded) union (select id from unsharded_auto) order by id limit 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select id from unsharded where 1 != 1 union select id from unsharded_auto where 1 != 1",
- "Query": "select id from unsharded union select id from unsharded_auto order by id asc limit 5",
- "Table": "unsharded, unsharded_auto"
- },
- "TablesUsed": [
- "main.unsharded",
- "main.unsharded_auto"
- ]
-}
-
-# unsharded union
-"select id from unsharded union select id from unsharded_auto union select id from unsharded_auto where id in (132)"
-{
- "QueryType": "SELECT",
- "Original": "select id from unsharded union select id from unsharded_auto union select id from unsharded_auto where id in (132)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select id from unsharded where 1 != 1 union select id from unsharded_auto where 1 != 1 union select id from unsharded_auto where 1 != 1",
- "Query": "select id from unsharded union select id from unsharded_auto union select id from unsharded_auto where id in (132)",
- "Table": "unsharded"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from unsharded union select id from unsharded_auto union select id from unsharded_auto where id in (132)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select id from unsharded where 1 != 1 union select id from unsharded_auto where 1 != 1 union select id from unsharded_auto where 1 != 1",
- "Query": "select id from unsharded union select id from unsharded_auto union select id from unsharded_auto where id in (132)",
- "Table": "unsharded, unsharded_auto"
- },
- "TablesUsed": [
- "main.unsharded",
- "main.unsharded_auto"
- ]
-}
-
-# unsharded nested union
-"(select id from unsharded union select id from unsharded_auto) union (select id from unsharded_auto union select name from unsharded)"
-{
- "QueryType": "SELECT",
- "Original": "(select id from unsharded union select id from unsharded_auto) union (select id from unsharded_auto union select name from unsharded)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select id from unsharded where 1 != 1 union select id from unsharded_auto where 1 != 1 union select id from unsharded_auto where 1 != 1 union select `name` from unsharded where 1 != 1",
- "Query": "select id from unsharded union select id from unsharded_auto union select id from unsharded_auto union select `name` from unsharded",
- "Table": "unsharded"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "(select id from unsharded union select id from unsharded_auto) union (select id from unsharded_auto union select name from unsharded)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select id from unsharded where 1 != 1 union select id from unsharded_auto where 1 != 1 union select id from unsharded_auto where 1 != 1 union select `name` from unsharded where 1 != 1",
- "Query": "select id from unsharded union select id from unsharded_auto union select id from unsharded_auto union select `name` from unsharded",
- "Table": "unsharded, unsharded_auto"
- },
- "TablesUsed": [
- "main.unsharded",
- "main.unsharded_auto"
- ]
-}
-
-# unsharded nested union with limit
-"(select id from unsharded order by id asc limit 1) union (select id from unsharded order by id desc limit 1) order by id asc limit 1"
-{
- "QueryType": "SELECT",
- "Original": "(select id from unsharded order by id asc limit 1) union (select id from unsharded order by id desc limit 1) order by id asc limit 1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "(select id from unsharded where 1 != 1) union (select id from unsharded where 1 != 1)",
- "Query": "(select id from unsharded order by id asc limit 1) union (select id from unsharded order by id desc limit 1) order by id asc limit 1",
- "Table": "unsharded"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "(select id from unsharded order by id asc limit 1) union (select id from unsharded order by id desc limit 1) order by id asc limit 1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "(select id from unsharded where 1 != 1) union (select id from unsharded where 1 != 1)",
- "Query": "(select id from unsharded order by id asc limit 1) union (select id from unsharded order by id desc limit 1) order by id asc limit 1",
- "Table": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-
-# routing rules: ensure directives are not lost
-"select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from route2"
-{
- "QueryType": "SELECT",
- "Original": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from route2",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select * from unsharded as route2 where 1 != 1",
- "Query": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from unsharded as route2",
- "QueryTimeout": 1000,
- "Table": "unsharded"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from route2",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select * from unsharded as route2 where 1 != 1",
- "Query": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from unsharded as route2",
- "QueryTimeout": 1000,
- "Table": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-
-# testing SingleRow Projection
-"select 42"
-{
- "QueryType": "SELECT",
- "Original": "select 42",
- "Instructions": {
- "OperatorType": "Projection",
- "Expressions": [
- "INT64(42) as 42"
- ],
- "Inputs": [
- {
- "OperatorType": "SingleRow"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select 42",
- "Instructions": {
- "OperatorType": "Projection",
- "Expressions": [
- "INT64(42) as 42"
- ],
- "Inputs": [
- {
- "OperatorType": "SingleRow"
- }
- ]
- },
- "TablesUsed": [
- "main.dual"
- ]
-}
-
-# don't filter on the vtgate
-"select 42 from dual where false"
-{
- "QueryType": "SELECT",
- "Original": "select 42 from dual where false",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 42 from dual where 1 != 1",
- "Query": "select 42 from dual where false",
- "Table": "dual"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select 42 from dual where false",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 42 from dual where 1 != 1",
- "Query": "select 42 from dual where false",
- "Table": "dual"
- },
- "TablesUsed": [
- "main.dual"
- ]
-}
-
-# testing SingleRow Projection with arithmetics
-"select 42+2"
-{
- "QueryType": "SELECT",
- "Original": "select 42+2",
- "Instructions": {
- "OperatorType": "Projection",
- "Expressions": [
- "INT64(44) as 42 + 2"
- ],
- "Inputs": [
- {
- "OperatorType": "SingleRow"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select 42+2",
- "Instructions": {
- "OperatorType": "Projection",
- "Expressions": [
- "INT64(44) as 42 + 2"
- ],
- "Inputs": [
- {
- "OperatorType": "SingleRow"
- }
- ]
- },
- "TablesUsed": [
- "main.dual"
- ]
-}
-
-# sql_calc_found_rows without limit
-"select sql_calc_found_rows * from music where user_id = 1"
-{
- "QueryType": "SELECT",
- "Original": "select sql_calc_found_rows * from music where user_id = 1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from music where 1 != 1",
- "Query": "select * from music where user_id = 1",
- "Table": "music",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select sql_calc_found_rows * from music where user_id = 1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from music where 1 != 1",
- "Query": "select * from music where user_id = 1",
- "Table": "music",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# sql_calc_found_rows with limit
-"select sql_calc_found_rows * from music limit 100"
-{
- "QueryType": "SELECT",
- "Original": "select sql_calc_found_rows * from music limit 100",
- "Instructions": {
- "OperatorType": "SQL_CALC_FOUND_ROWS",
- "Inputs": [
- {
- "OperatorType": "Limit",
- "Count": "INT64(100)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from music where 1 != 1",
- "Query": "select * from music limit :__upper_limit",
- "Table": "music"
- }
- ]
- },
- {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count(0) AS count",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) from music where 1 != 1",
- "Query": "select count(*) from music",
- "Table": "music"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select sql_calc_found_rows * from music limit 100",
- "Instructions": {
- "OperatorType": "SQL_CALC_FOUND_ROWS",
- "Inputs": [
- {
- "OperatorType": "Limit",
- "Count": "INT64(100)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from music where 1 != 1",
- "Query": "select * from music limit :__upper_limit",
- "Table": "music"
- }
- ]
- },
- {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count_star(0) AS count(*)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) from music where 1 != 1",
- "Query": "select count(*) from music",
- "Table": "music"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# sql_calc_found_rows with SelectEqualUnique plans
-"select sql_calc_found_rows * from music where user_id = 1 limit 2"
-{
- "QueryType": "SELECT",
- "Original": "select sql_calc_found_rows * from music where user_id = 1 limit 2",
- "Instructions": {
- "OperatorType": "SQL_CALC_FOUND_ROWS",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from music where 1 != 1",
- "Query": "select * from music where user_id = 1 limit 2",
- "Table": "music",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) from music where 1 != 1",
- "Query": "select count(*) from music where user_id = 1",
- "Table": "music",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select sql_calc_found_rows * from music where user_id = 1 limit 2",
- "Instructions": {
- "OperatorType": "SQL_CALC_FOUND_ROWS",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from music where 1 != 1",
- "Query": "select * from music where user_id = 1 limit 2",
- "Table": "music",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) from music where 1 != 1",
- "Query": "select count(*) from music where user_id = 1",
- "Table": "music",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- }
- ]
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# sql_calc_found_rows with group by and having
-"select sql_calc_found_rows user_id, count(id) from music group by user_id having count(user_id) = 1 order by user_id limit 2"
-{
- "QueryType": "SELECT",
- "Original": "select sql_calc_found_rows user_id, count(id) from music group by user_id having count(user_id) = 1 order by user_id limit 2",
- "Instructions": {
- "OperatorType": "SQL_CALC_FOUND_ROWS",
- "Inputs": [
- {
- "OperatorType": "Limit",
- "Count": "INT64(2)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_id, count(id), weight_string(user_id) from music where 1 != 1 group by user_id",
- "OrderBy": "(0|2) ASC",
- "Query": "select user_id, count(id), weight_string(user_id) from music group by user_id having count(user_id) = 1 order by user_id asc limit :__upper_limit",
- "ResultColumns": 2,
- "Table": "music"
- }
- ]
- },
- {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count(0) AS count",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) from (select user_id, count(id) from music where 1 != 1 group by user_id) as t where 1 != 1",
- "Query": "select count(*) from (select user_id, count(id) from music group by user_id having count(user_id) = 1) as t",
- "Table": "music"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select sql_calc_found_rows user_id, count(id) from music group by user_id having count(user_id) = 1 order by user_id limit 2",
- "Instructions": {
- "OperatorType": "SQL_CALC_FOUND_ROWS",
- "Inputs": [
- {
- "OperatorType": "Limit",
- "Count": "INT64(2)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_id, count(id), weight_string(user_id) from music where 1 != 1 group by user_id",
- "OrderBy": "(0|2) ASC",
- "Query": "select user_id, count(id), weight_string(user_id) from music group by user_id having count(user_id) = 1 order by user_id asc limit :__upper_limit",
- "ResultColumns": 2,
- "Table": "music"
- }
- ]
- },
- {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count_star(0) AS count(*)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) from (select user_id, count(id) from music where 1 != 1 group by user_id) as t where 1 != 1",
- "Query": "select count(*) from (select user_id, count(id) from music group by user_id having count(user_id) = 1) as t",
- "Table": "music"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# sql_calc_found_rows in sub queries
-"select * from music where user_id IN (select sql_calc_found_rows * from music limit 10)"
-"Incorrect usage/placement of 'SQL_CALC_FOUND_ROWS'"
-Gen4 plan same as above
-
-# sql_calc_found_rows in derived table
-"select sql_calc_found_rows * from (select sql_calc_found_rows * from music limit 10) t limit 1"
-"Incorrect usage/placement of 'SQL_CALC_FOUND_ROWS'"
-Gen4 plan same as above
-
-# select from unsharded keyspace into dumpfile
-"select * from main.unsharded into Dumpfile 'x.txt'"
-{
- "QueryType": "SELECT",
- "Original": "select * from main.unsharded into Dumpfile 'x.txt'",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select * from unsharded where 1 != 1",
- "Query": "select * from unsharded into dumpfile 'x.txt'",
- "Table": "unsharded"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from main.unsharded into Dumpfile 'x.txt'",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select * from unsharded where 1 != 1",
- "Query": "select * from unsharded into dumpfile 'x.txt'",
- "Table": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-
-# select from unsharded keyspace into outfile
-"select * from main.unsharded into outfile 'x.txt' character set binary fields terminated by 'term' optionally enclosed by 'c' escaped by 'e' lines starting by 'a' terminated by '\n'"
-{
- "QueryType": "SELECT",
- "Original": "select * from main.unsharded into outfile 'x.txt' character set binary fields terminated by 'term' optionally enclosed by 'c' escaped by 'e' lines starting by 'a' terminated by '\n'",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select * from unsharded where 1 != 1",
- "Query": "select * from unsharded into outfile 'x.txt' character set binary fields terminated by 'term' optionally enclosed by 'c' escaped by 'e' lines starting by 'a' terminated by '\\n'",
- "Table": "unsharded"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from main.unsharded into outfile 'x.txt' character set binary fields terminated by 'term' optionally enclosed by 'c' escaped by 'e' lines starting by 'a' terminated by '\n'",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select * from unsharded where 1 != 1",
- "Query": "select * from unsharded into outfile 'x.txt' character set binary fields terminated by 'term' optionally enclosed by 'c' escaped by 'e' lines starting by 'a' terminated by '\\n'",
- "Table": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-
-# select from unsharded keyspace into outfile s3
-"select * from main.unsharded into outfile s3 'out_file_name' character set binary format csv header fields terminated by 'term' optionally enclosed by 'c' escaped by 'e' lines starting by 'a' terminated by '\n' manifest on overwrite off"
-{
- "QueryType": "SELECT",
- "Original": "select * from main.unsharded into outfile s3 'out_file_name' character set binary format csv header fields terminated by 'term' optionally enclosed by 'c' escaped by 'e' lines starting by 'a' terminated by '\n' manifest on overwrite off",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select * from unsharded where 1 != 1",
- "Query": "select * from unsharded into outfile s3 'out_file_name' character set binary format csv header fields terminated by 'term' optionally enclosed by 'c' escaped by 'e' lines starting by 'a' terminated by '\\n' manifest on overwrite off",
- "Table": "unsharded"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from main.unsharded into outfile s3 'out_file_name' character set binary format csv header fields terminated by 'term' optionally enclosed by 'c' escaped by 'e' lines starting by 'a' terminated by '\n' manifest on overwrite off",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select * from unsharded where 1 != 1",
- "Query": "select * from unsharded into outfile s3 'out_file_name' character set binary format csv header fields terminated by 'term' optionally enclosed by 'c' escaped by 'e' lines starting by 'a' terminated by '\\n' manifest on overwrite off",
- "Table": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-
-# Union after into outfile is incorrect
-"select id from user into outfile 'out_file_name' union all select id from music"
-"syntax error at position 55 near 'union'"
-Gen4 plan same as above
-
-# Into outfile s3 in derived table is incorrect
-"select id from (select id from user into outfile s3 'inner_outfile') as t2"
-"syntax error at position 41 near 'into'"
-Gen4 plan same as above
-
-# Into outfile s3 in derived table with union incorrect
-"select id from (select id from user into outfile s3 'inner_outfile' union select 1) as t2"
-"syntax error at position 41 near 'into'"
-Gen4 plan same as above
-
-"select (select u.id from user as u where u.id = 1), a.id from user as a where a.id = 1"
-{
- "QueryType": "SELECT",
- "Original": "select (select u.id from user as u where u.id = 1), a.id from user as a where a.id = 1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select (select u.id from `user` as u where 1 != 1), a.id from `user` as a where 1 != 1",
- "Query": "select (select u.id from `user` as u where u.id = 1), a.id from `user` as a where a.id = 1",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select (select u.id from user as u where u.id = 1), a.id from user as a where a.id = 1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select (select u.id from `user` as u where 1 != 1), a.id from `user` as a where 1 != 1",
- "Query": "select (select u.id from `user` as u where u.id = 1), a.id from `user` as a where a.id = 1",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Add two tables with the same column in a join
-"select t.id, s.id from user t join user_extra s on t.id = s.user_id join unsharded"
-{
- "QueryType": "SELECT",
- "Original": "select t.id, s.id from user t join user_extra s on t.id = s.user_id join unsharded",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1",
- "TableName": "`user`, user_extra_unsharded",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select t.id, s.id from `user` as t join user_extra as s on t.id = s.user_id where 1 != 1",
- "Query": "select t.id, s.id from `user` as t join user_extra as s on t.id = s.user_id",
- "Table": "`user`, user_extra"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 from unsharded where 1 != 1",
- "Query": "select 1 from unsharded",
- "Table": "unsharded"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select t.id, s.id from user t join user_extra s on t.id = s.user_id join unsharded",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0,R:1",
- "TableName": "unsharded_`user`, user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 from unsharded where 1 != 1",
- "Query": "select 1 from unsharded",
- "Table": "unsharded"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select t.id, s.id from `user` as t, user_extra as s where 1 != 1",
- "Query": "select t.id, s.id from `user` as t, user_extra as s where t.id = s.user_id",
- "Table": "`user`, user_extra"
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded",
- "user.user",
- "user.user_extra"
- ]
-}
-
-"((((select 1))))"
-{
- "QueryType": "SELECT",
- "Original": "((((select 1))))",
- "Instructions": {
- "OperatorType": "Projection",
- "Expressions": [
- "INT64(1) as 1"
- ],
- "Inputs": [
- {
- "OperatorType": "SingleRow"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "((((select 1))))",
- "Instructions": {
- "OperatorType": "Projection",
- "Expressions": [
- "INT64(1) as 1"
- ],
- "Inputs": [
- {
- "OperatorType": "SingleRow"
- }
- ]
- },
- "TablesUsed": [
- "main.dual"
- ]
-}
-
-# Merging dual with user
-"select 42, id from dual, user"
-{
- "QueryType": "SELECT",
- "Original": "select 42, id from dual, user",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,R:0",
- "TableName": "dual_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 42 from dual where 1 != 1",
- "Query": "select 42 from dual",
- "Table": "dual"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user`",
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select 42, id from dual, user",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 42, id from dual, `user` where 1 != 1",
- "Query": "select 42, id from dual, `user`",
- "Table": "`user`, dual"
- },
- "TablesUsed": [
- "main.dual",
- "user.user"
- ]
-}
-
-# Table named "dual" with a qualifier joined on user should not be merged
-"select 42, user.id from main.dual, user"
-{
- "QueryType": "SELECT",
- "Original": "select 42, user.id from main.dual, user",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,R:0",
- "TableName": "dual_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 42 from dual where 1 != 1",
- "Query": "select 42 from dual",
- "Table": "dual"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id from `user` where 1 != 1",
- "Query": "select `user`.id from `user`",
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select 42, user.id from main.dual, user",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,R:0",
- "TableName": "dual_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 42 from dual where 1 != 1",
- "Query": "select 42 from dual",
- "Table": "dual"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id from `user` where 1 != 1",
- "Query": "select `user`.id from `user`",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "main.dual",
- "user.user"
- ]
-}
-
-"select (select col from user limit 1) as a from user join user_extra order by a"
-{
- "QueryType": "SELECT",
- "Original": "select (select col from user limit 1) as a from user join user_extra order by a",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutValue",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Limit",
- "Count": "INT64(1)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user` limit :__upper_limit",
- "Table": "`user`"
- }
- ]
- },
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select :__sq1 as a, weight_string(:__sq1) from `user` where 1 != 1",
- "OrderBy": "(0|1) ASC",
- "Query": "select :__sq1 as a, weight_string(:__sq1) from `user` order by a asc",
- "ResultColumns": 1,
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra",
- "Table": "user_extra"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select (select col from user limit 1) as a from user join user_extra order by a",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutValue",
- "PulloutVars": [
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Limit",
- "Count": "INT64(1)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user` limit :__upper_limit",
- "Table": "`user`"
- }
- ]
- },
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select :__sq1 as a, weight_string(:__sq1) from `user` where 1 != 1",
- "OrderBy": "(0|1) ASC",
- "Query": "select :__sq1 as a, weight_string(:__sq1) from `user` order by a asc",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra",
- "Table": "user_extra"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-"select t.a from (select (select col from user limit 1) as a from user join user_extra) t"
-{
- "QueryType": "SELECT",
- "Original": "select t.a from (select (select col from user limit 1) as a from user join user_extra) t",
- "Instructions": {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "Subquery",
- "Variant": "PulloutValue",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Limit",
- "Count": "INT64(1)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user` limit :__upper_limit",
- "Table": "`user`"
- }
- ]
- },
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select :__sq1 as a from `user` where 1 != 1",
- "Query": "select :__sq1 as a from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra",
- "Table": "user_extra"
- }
- ]
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select t.a from (select (select col from user limit 1) as a from user join user_extra) t",
- "Instructions": {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "Subquery",
- "Variant": "PulloutValue",
- "PulloutVars": [
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Limit",
- "Count": "INT64(1)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user` limit :__upper_limit",
- "Table": "`user`"
- }
- ]
- },
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select :__sq1 as a from `user` where 1 != 1",
- "Query": "select :__sq1 as a from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra",
- "Table": "user_extra"
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-"select (select col from user where user_extra.id = 4 limit 1) as a from user join user_extra"
-"unsupported: cross-shard correlated subquery"
-Gen4 plan same as above
-
-# plan test for a natural character set string
-"select N'string' from dual"
-{
- "QueryType": "SELECT",
- "Original": "select N'string' from dual",
- "Instructions": {
- "OperatorType": "Projection",
- "Expressions": [
- "VARCHAR(\"string\") as N'string'"
- ],
- "Inputs": [
- {
- "OperatorType": "SingleRow"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select N'string' from dual",
- "Instructions": {
- "OperatorType": "Projection",
- "Expressions": [
- "VARCHAR(\"string\") as N'string'"
- ],
- "Inputs": [
- {
- "OperatorType": "SingleRow"
- }
- ]
- },
- "TablesUsed": [
- "main.dual"
- ]
-}
-
-# select expression having dependencies on both sides of a join
-"select user.id * user_id as amount from user, user_extra"
-{
- "QueryType": "SELECT",
- "Original": "select user.id * user_id as amount from user, user_extra",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "JoinVars": {
- "user_id": 0
- },
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id from `user` where 1 != 1",
- "Query": "select `user`.id from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select :user_id * user_id as amount from user_extra where 1 != 1",
- "Query": "select :user_id * user_id as amount from user_extra",
- "Table": "user_extra"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.id * user_id as amount from user, user_extra",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "JoinVars": {
- "user_id": 0
- },
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id from `user` where 1 != 1",
- "Query": "select `user`.id from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select :user_id * user_id as amount from user_extra where 1 != 1",
- "Query": "select :user_id * user_id as amount from user_extra",
- "Table": "user_extra"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# correlated subquery in exists clause
-"select col from user where exists(select user_id from user_extra where user_id = 3 and user_id \u003c user.id)"
-"unsupported: cross-shard correlated subquery"
-{
- "QueryType": "SELECT",
- "Original": "select col from user where exists(select user_id from user_extra where user_id = 3 and user_id \u003c user.id)",
- "Instructions": {
- "OperatorType": "SemiJoin",
- "JoinVars": {
- "user_id": 0
- },
- "ProjectedIndexes": "-2",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id, col from `user` where 1 != 1",
- "Query": "select `user`.id, col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra where user_id = 3 and user_id \u003c :user_id",
- "Table": "user_extra",
- "Values": [
- "INT64(3)"
- ],
- "Vindex": "user_index"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# correlated subquery in exists clause with an order by
-"select col from user where exists(select user_id from user_extra where user_id = 3 and user_id \u003c user.id) order by col"
-"unsupported: cross-shard correlated subquery"
-{
- "QueryType": "SELECT",
- "Original": "select col from user where exists(select user_id from user_extra where user_id = 3 and user_id \u003c user.id) order by col",
- "Instructions": {
- "OperatorType": "SemiJoin",
- "JoinVars": {
- "user_id": 0
- },
- "ProjectedIndexes": "-2",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id, col from `user` where 1 != 1",
- "OrderBy": "1 ASC",
- "Query": "select `user`.id, col from `user` order by col asc",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra where user_id = 3 and user_id \u003c :user_id",
- "Table": "user_extra",
- "Values": [
- "INT64(3)"
- ],
- "Vindex": "user_index"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# correlated subquery having dependencies on two tables
-"select 1 from user u1, user u2 where exists (select 1 from user_extra ue where ue.col = u1.col and ue.col = u2.col)"
-"unsupported: cross-shard correlated subquery"
-{
- "QueryType": "SELECT",
- "Original": "select 1 from user u1, user u2 where exists (select 1 from user_extra ue where ue.col = u1.col and ue.col = u2.col)",
- "Instructions": {
- "OperatorType": "SemiJoin",
- "JoinVars": {
- "u1_col": 0,
- "u2_col": 1
- },
- "ProjectedIndexes": "-3",
- "TableName": "`user`_`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,R:0,L:1",
- "TableName": "`user`_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u1.col, 1 from `user` as u1 where 1 != 1",
- "Query": "select u1.col, 1 from `user` as u1",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u2.col from `user` as u2 where 1 != 1",
- "Query": "select u2.col from `user` as u2",
- "Table": "`user`"
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra as ue where 1 != 1",
- "Query": "select 1 from user_extra as ue where ue.col = :u1_col and ue.col = :u2_col",
- "Table": "user_extra"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# correlated subquery using a column twice
-"select 1 from user u where exists (select 1 from user_extra ue where ue.col = u.col and u.col = ue.col2)"
-"unsupported: cross-shard correlated subquery"
-{
- "QueryType": "SELECT",
- "Original": "select 1 from user u where exists (select 1 from user_extra ue where ue.col = u.col and u.col = ue.col2)",
- "Instructions": {
- "OperatorType": "SemiJoin",
- "JoinVars": {
- "u_col": 0
- },
- "ProjectedIndexes": "-2",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u.col, 1 from `user` as u where 1 != 1",
- "Query": "select u.col, 1 from `user` as u",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra as ue where 1 != 1",
- "Query": "select 1 from user_extra as ue where ue.col = :u_col and ue.col2 = :u_col",
- "Table": "user_extra"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# correlated subquery part of an OR clause
-"select 1 from user u where u.col = 6 or exists (select 1 from user_extra ue where ue.col = u.col and u.col = ue.col2)"
-"unsupported: cross-shard correlated subquery"
-Gen4 error: exists sub-queries are only supported with AND clause
-
-# correlated subquery that is dependent on one side of a join, fully mergeable
-"SELECT music.id FROM music INNER JOIN user ON music.user_id = user.id WHERE music.user_id = 5 AND music.id = (SELECT MAX(m2.id) FROM music m2 WHERE m2.user_id = user.id)"
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music INNER JOIN user ON music.user_id = user.id WHERE music.user_id = 5 AND music.id = (SELECT MAX(m2.id) FROM music m2 WHERE m2.user_id = user.id)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music join `user` on music.user_id = `user`.id where 1 != 1",
- "Query": "select music.id from music join `user` on music.user_id = `user`.id where music.user_id = 5 and music.id = (select max(m2.id) from music as m2 where m2.user_id = `user`.id)",
- "Table": "music, `user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music INNER JOIN user ON music.user_id = user.id WHERE music.user_id = 5 AND music.id = (SELECT MAX(m2.id) FROM music m2 WHERE m2.user_id = user.id)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music, `user` where 1 != 1",
- "Query": "select music.id from music, `user` where music.user_id = 5 and music.id = (select max(m2.id) from music as m2 where m2.user_id = `user`.id) and music.user_id = `user`.id",
- "Table": "`user`, music",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.music",
- "user.user"
- ]
-}
-
-# union as a derived table
-"select found from (select id as found from user union all (select id from unsharded)) as t"
-{
- "QueryType": "SELECT",
- "Original": "select found from (select id as found from user union all (select id from unsharded)) as t",
- "Instructions": {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id as found from `user` where 1 != 1",
- "Query": "select id as found from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select id from unsharded where 1 != 1",
- "Query": "select id from unsharded",
- "Table": "unsharded"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select found from (select id as found from user union all (select id from unsharded)) as t",
- "Instructions": {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id as found from `user` where 1 != 1",
- "Query": "select id as found from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select id from unsharded where 1 != 1",
- "Query": "select id from unsharded",
- "Table": "unsharded"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded",
- "user.user"
- ]
-}
-
-# use output column containing data from both sides of the join
-"select user_extra.col + user.col from user join user_extra on user.id = user_extra.id"
-{
- "QueryType": "SELECT",
- "Original": "select user_extra.col + user.col from user join user_extra on user.id = user_extra.id",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "JoinVars": {
- "user_col": 0,
- "user_id": 1
- },
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col, `user`.id from `user` where 1 != 1",
- "Query": "select `user`.col, `user`.id from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.col + :user_col from user_extra where 1 != 1",
- "Query": "select user_extra.col + :user_col from user_extra where user_extra.id = :user_id",
- "Table": "user_extra"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user_extra.col + user.col from user join user_extra on user.id = user_extra.id",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "JoinVars": {
- "user_extra_col": 1,
- "user_extra_id": 0
- },
- "TableName": "user_extra_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.id, user_extra.col from user_extra where 1 != 1",
- "Query": "select user_extra.id, user_extra.col from user_extra",
- "Table": "user_extra"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select :user_extra_col + `user`.col from `user` where 1 != 1",
- "Query": "select :user_extra_col + `user`.col from `user` where `user`.id = :user_extra_id",
- "Table": "`user`",
- "Values": [
- ":user_extra_id"
- ],
- "Vindex": "user_index"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# mergeable derived table with order by and limit
-"select 1 from (select col from main.unsharded order by main.unsharded.col1 desc limit 12 offset 0) as f left join unsharded as u on f.col = u.id"
-{
- "QueryType": "SELECT",
- "Original": "select 1 from (select col from main.unsharded order by main.unsharded.col1 desc limit 12 offset 0) as f left join unsharded as u on f.col = u.id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 from (select col from unsharded where 1 != 1) as f left join unsharded as u on f.col = u.id where 1 != 1",
- "Query": "select 1 from (select col from unsharded order by unsharded.col1 desc limit 0, 12) as f left join unsharded as u on f.col = u.id",
- "Table": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-Gen4 plan same as above
-
-# mergeable derived table with group by and limit
-"select 1 from (select col, count(*) as a from main.unsharded group by col having a \u003e 0 limit 12 offset 0) as f left join unsharded as u on f.col = u.id"
-{
- "QueryType": "SELECT",
- "Original": "select 1 from (select col, count(*) as a from main.unsharded group by col having a \u003e 0 limit 12 offset 0) as f left join unsharded as u on f.col = u.id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 from (select col, count(*) as a from unsharded where 1 != 1 group by col) as f left join unsharded as u on f.col = u.id where 1 != 1",
- "Query": "select 1 from (select col, count(*) as a from unsharded group by col having count(*) \u003e 0 limit 0, 12) as f left join unsharded as u on f.col = u.id",
- "Table": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-Gen4 plan same as above
-
-"select user.id, trim(leading 'x' from user.name) from user"
-{
- "QueryType": "SELECT",
- "Original": "select user.id, trim(leading 'x' from user.name) from user",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id, trim(leading 'x' from `user`.`name`) from `user` where 1 != 1",
- "Query": "select `user`.id, trim(leading 'x' from `user`.`name`) from `user`",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.id, trim(leading 'x' from user.name) from user",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id, trim(leading 'x' from `user`.`name`) from `user` where 1 != 1",
- "Query": "select `user`.id, trim(leading 'x' from `user`.`name`) from `user`",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# json utility functions
-"select jcol, JSON_STORAGE_SIZE(jcol), JSON_STORAGE_FREE(jcol), JSON_PRETTY(jcol) from user"
-{
- "QueryType": "SELECT",
- "Original": "select jcol, JSON_STORAGE_SIZE(jcol), JSON_STORAGE_FREE(jcol), JSON_PRETTY(jcol) from user",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select jcol, json_storage_size(jcol), json_storage_free(jcol), json_pretty(jcol) from `user` where 1 != 1",
- "Query": "select jcol, json_storage_size(jcol), json_storage_free(jcol), json_pretty(jcol) from `user`",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select jcol, JSON_STORAGE_SIZE(jcol), JSON_STORAGE_FREE(jcol), JSON_PRETTY(jcol) from user",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select jcol, json_storage_size(jcol), json_storage_free(jcol), json_pretty(jcol) from `user` where 1 != 1",
- "Query": "select jcol, json_storage_size(jcol), json_storage_free(jcol), json_pretty(jcol) from `user`",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# dual query with exists clause
-"select 1 from dual where exists (select 1 from information_schema.TABLES where information_schema.TABLES.TABLE_NAME = 'proc' and information_schema.TABLES.TABLE_SCHEMA = 'mysql')"
-{
- "QueryType": "SELECT",
- "Original": "select 1 from dual where exists (select 1 from information_schema.TABLES where information_schema.TABLES.TABLE_NAME = 'proc' and information_schema.TABLES.TABLE_SCHEMA = 'mysql')",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 from dual where 1 != 1",
- "Query": "select 1 from dual where exists (select 1 from information_schema.`TABLES` where information_schema.`TABLES`.TABLE_NAME = :TABLES_TABLE_NAME and information_schema.`TABLES`.TABLE_SCHEMA = :__vtschemaname limit 1)",
- "SysTableTableName": "[TABLES_TABLE_NAME:VARCHAR(\"proc\")]",
- "SysTableTableSchema": "[VARCHAR(\"mysql\")]",
- "Table": "dual"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select 1 from dual where exists (select 1 from information_schema.TABLES where information_schema.TABLES.TABLE_NAME = 'proc' and information_schema.TABLES.TABLE_SCHEMA = 'mysql')",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 from dual where 1 != 1",
- "Query": "select 1 from dual where exists (select 1 from information_schema.`TABLES` where `TABLES`.TABLE_NAME = :TABLES_TABLE_NAME and `TABLES`.TABLE_SCHEMA = :__vtschemaname limit 1)",
- "SysTableTableName": "[TABLES_TABLE_NAME:VARCHAR(\"proc\")]",
- "SysTableTableSchema": "[VARCHAR(\"mysql\")]",
- "Table": "dual"
- },
- "TablesUsed": [
- "main.dual"
- ]
-}
-
-# json_quote, json_object and json_array
-"SELECT JSON_QUOTE('null'), JSON_QUOTE('\"null\"'), JSON_OBJECT(BIN(1),2,'abc',ASCII(4)), JSON_ARRAY(1, \"abc\", NULL, TRUE, CURTIME())"
-{
- "QueryType": "SELECT",
- "Original": "SELECT JSON_QUOTE('null'), JSON_QUOTE('\"null\"'), JSON_OBJECT(BIN(1),2,'abc',ASCII(4)), JSON_ARRAY(1, \"abc\", NULL, TRUE, CURTIME())",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select json_quote('null'), json_quote('\\\"null\\\"'), json_object(BIN(1), 2, 'abc', ASCII(4)), json_array(1, 'abc', null, true, CURTIME()) from dual where 1 != 1",
- "Query": "select json_quote('null'), json_quote('\\\"null\\\"'), json_object(BIN(1), 2, 'abc', ASCII(4)), json_array(1, 'abc', null, true, CURTIME()) from dual",
- "Table": "dual"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT JSON_QUOTE('null'), JSON_QUOTE('\"null\"'), JSON_OBJECT(BIN(1),2,'abc',ASCII(4)), JSON_ARRAY(1, \"abc\", NULL, TRUE, CURTIME())",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select json_quote('null'), json_quote('\\\"null\\\"'), json_object(BIN(1), 2, 'abc', ASCII(4)), json_array(1, 'abc', null, true, CURTIME()) from dual where 1 != 1",
- "Query": "select json_quote('null'), json_quote('\\\"null\\\"'), json_object(BIN(1), 2, 'abc', ASCII(4)), json_array(1, 'abc', null, true, CURTIME()) from dual",
- "Table": "dual"
- },
- "TablesUsed": [
- "main.dual"
- ]
-}
-
-"select (select id from user order by id limit 1) from user_extra"
-{
- "QueryType": "SELECT",
- "Original": "select (select id from user order by id limit 1) from user_extra",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutValue",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Limit",
- "Count": "INT64(1)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1",
- "OrderBy": "(0|1) ASC",
- "Query": "select id, weight_string(id) from `user` order by id asc limit :__upper_limit",
- "ResultColumns": 1,
- "Table": "`user`"
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select :__sq1 from user_extra where 1 != 1",
- "Query": "select :__sq1 from user_extra",
- "Table": "user_extra"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select (select id from user order by id limit 1) from user_extra",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutValue",
- "PulloutVars": [
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Limit",
- "Count": "INT64(1)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1",
- "OrderBy": "(0|1) ASC",
- "Query": "select id, weight_string(id) from `user` order by id asc limit :__upper_limit",
- "ResultColumns": 1,
- "Table": "`user`"
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select :__sq1 from user_extra where 1 != 1",
- "Query": "select :__sq1 from user_extra",
- "Table": "user_extra"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# yeah, it does not make sense, but it's valid
-"select exists(select 1) from user where id = 5"
-{
- "QueryType": "SELECT",
- "Original": "select exists(select 1) from user where id = 5",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutExists",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 from dual where 1 != 1",
- "Query": "select 1 from dual limit 1",
- "Table": "dual"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select :__sq_has_values1 from `user` where 1 != 1",
- "Query": "select :__sq_has_values1 from `user` where id = 5",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select exists(select 1) from user where id = 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select exists (select 1 from dual where 1 != 1) from `user` where 1 != 1",
- "Query": "select exists (select 1 from dual limit 1) from `user` where id = 5",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "main.dual",
- "user.user"
- ]
-}
-
-# json schema validation functions
-"SELECT JSON_SCHEMA_VALID('{\"type\":\"string\",\"pattern\":\"(\"}', '\"abc\"'), JSON_SCHEMA_VALIDATION_REPORT('{\"type\":\"string\",\"pattern\":\"(\"}', '\"abc\"')"
-{
- "QueryType": "SELECT",
- "Original": "SELECT JSON_SCHEMA_VALID('{\"type\":\"string\",\"pattern\":\"(\"}', '\"abc\"'), JSON_SCHEMA_VALIDATION_REPORT('{\"type\":\"string\",\"pattern\":\"(\"}', '\"abc\"')",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select json_schema_valid('{\\\"type\\\":\\\"string\\\",\\\"pattern\\\":\\\"(\\\"}', '\\\"abc\\\"'), json_schema_validation_report('{\\\"type\\\":\\\"string\\\",\\\"pattern\\\":\\\"(\\\"}', '\\\"abc\\\"') from dual where 1 != 1",
- "Query": "select json_schema_valid('{\\\"type\\\":\\\"string\\\",\\\"pattern\\\":\\\"(\\\"}', '\\\"abc\\\"'), json_schema_validation_report('{\\\"type\\\":\\\"string\\\",\\\"pattern\\\":\\\"(\\\"}', '\\\"abc\\\"') from dual",
- "Table": "dual"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT JSON_SCHEMA_VALID('{\"type\":\"string\",\"pattern\":\"(\"}', '\"abc\"'), JSON_SCHEMA_VALIDATION_REPORT('{\"type\":\"string\",\"pattern\":\"(\"}', '\"abc\"')",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select json_schema_valid('{\\\"type\\\":\\\"string\\\",\\\"pattern\\\":\\\"(\\\"}', '\\\"abc\\\"'), json_schema_validation_report('{\\\"type\\\":\\\"string\\\",\\\"pattern\\\":\\\"(\\\"}', '\\\"abc\\\"') from dual where 1 != 1",
- "Query": "select json_schema_valid('{\\\"type\\\":\\\"string\\\",\\\"pattern\\\":\\\"(\\\"}', '\\\"abc\\\"'), json_schema_validation_report('{\\\"type\\\":\\\"string\\\",\\\"pattern\\\":\\\"(\\\"}', '\\\"abc\\\"') from dual",
- "Table": "dual"
- },
- "TablesUsed": [
- "main.dual"
- ]
-}
-
-# json search functions
-"SELECT JSON_CONTAINS('{\"a\": 1, \"b\": 2, \"c\": {\"d\": 4}}', '1'), JSON_CONTAINS_PATH('{\"a\": 1, \"b\": 2, \"c\": {\"d\": 4}}', 'one', '$.a', '$.e'), JSON_EXTRACT('[10, 20, [30, 40]]', '$[1]'), JSON_UNQUOTE(JSON_EXTRACT('[\"a\",\"b\"]', '$[1]')), JSON_KEYS('{\"a\": 1, \"b\": {\"c\": 30}}'), JSON_OVERLAPS(\"[1,3,5,7]\", \"[2,5,7]\"), JSON_SEARCH('[\"abc\"]', 'one', 'abc'), JSON_VALUE('{\"fname\": \"Joe\", \"lname\": \"Palmer\"}', '$.fname'), JSON_ARRAY(4,5) MEMBER OF('[[3,4],[4,5]]')"
-{
- "QueryType": "SELECT",
- "Original": "SELECT JSON_CONTAINS('{\"a\": 1, \"b\": 2, \"c\": {\"d\": 4}}', '1'), JSON_CONTAINS_PATH('{\"a\": 1, \"b\": 2, \"c\": {\"d\": 4}}', 'one', '$.a', '$.e'), JSON_EXTRACT('[10, 20, [30, 40]]', '$[1]'), JSON_UNQUOTE(JSON_EXTRACT('[\"a\",\"b\"]', '$[1]')), JSON_KEYS('{\"a\": 1, \"b\": {\"c\": 30}}'), JSON_OVERLAPS(\"[1,3,5,7]\", \"[2,5,7]\"), JSON_SEARCH('[\"abc\"]', 'one', 'abc'), JSON_VALUE('{\"fname\": \"Joe\", \"lname\": \"Palmer\"}', '$.fname'), JSON_ARRAY(4,5) MEMBER OF('[[3,4],[4,5]]')",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select json_contains('{\\\"a\\\": 1, \\\"b\\\": 2, \\\"c\\\": {\\\"d\\\": 4}}', '1'), json_contains_path('{\\\"a\\\": 1, \\\"b\\\": 2, \\\"c\\\": {\\\"d\\\": 4}}', 'one', '$.a', '$.e'), json_extract('[10, 20, [30, 40]]', '$[1]'), json_unquote(json_extract('[\\\"a\\\",\\\"b\\\"]', '$[1]')), json_keys('{\\\"a\\\": 1, \\\"b\\\": {\\\"c\\\": 30}}'), json_overlaps('[1,3,5,7]', '[2,5,7]'), json_search('[\\\"abc\\\"]', 'one', 'abc'), json_value('{\\\"fname\\\": \\\"Joe\\\", \\\"lname\\\": \\\"Palmer\\\"}', '$.fname'), json_array(4, 5) member of ('[[3,4],[4,5]]') from dual where 1 != 1",
- "Query": "select json_contains('{\\\"a\\\": 1, \\\"b\\\": 2, \\\"c\\\": {\\\"d\\\": 4}}', '1'), json_contains_path('{\\\"a\\\": 1, \\\"b\\\": 2, \\\"c\\\": {\\\"d\\\": 4}}', 'one', '$.a', '$.e'), json_extract('[10, 20, [30, 40]]', '$[1]'), json_unquote(json_extract('[\\\"a\\\",\\\"b\\\"]', '$[1]')), json_keys('{\\\"a\\\": 1, \\\"b\\\": {\\\"c\\\": 30}}'), json_overlaps('[1,3,5,7]', '[2,5,7]'), json_search('[\\\"abc\\\"]', 'one', 'abc'), json_value('{\\\"fname\\\": \\\"Joe\\\", \\\"lname\\\": \\\"Palmer\\\"}', '$.fname'), json_array(4, 5) member of ('[[3,4],[4,5]]') from dual",
- "Table": "dual"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT JSON_CONTAINS('{\"a\": 1, \"b\": 2, \"c\": {\"d\": 4}}', '1'), JSON_CONTAINS_PATH('{\"a\": 1, \"b\": 2, \"c\": {\"d\": 4}}', 'one', '$.a', '$.e'), JSON_EXTRACT('[10, 20, [30, 40]]', '$[1]'), JSON_UNQUOTE(JSON_EXTRACT('[\"a\",\"b\"]', '$[1]')), JSON_KEYS('{\"a\": 1, \"b\": {\"c\": 30}}'), JSON_OVERLAPS(\"[1,3,5,7]\", \"[2,5,7]\"), JSON_SEARCH('[\"abc\"]', 'one', 'abc'), JSON_VALUE('{\"fname\": \"Joe\", \"lname\": \"Palmer\"}', '$.fname'), JSON_ARRAY(4,5) MEMBER OF('[[3,4],[4,5]]')",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select json_contains('{\\\"a\\\": 1, \\\"b\\\": 2, \\\"c\\\": {\\\"d\\\": 4}}', '1'), json_contains_path('{\\\"a\\\": 1, \\\"b\\\": 2, \\\"c\\\": {\\\"d\\\": 4}}', 'one', '$.a', '$.e'), json_extract('[10, 20, [30, 40]]', '$[1]'), json_unquote(json_extract('[\\\"a\\\",\\\"b\\\"]', '$[1]')), json_keys('{\\\"a\\\": 1, \\\"b\\\": {\\\"c\\\": 30}}'), json_overlaps('[1,3,5,7]', '[2,5,7]'), json_search('[\\\"abc\\\"]', 'one', 'abc'), json_value('{\\\"fname\\\": \\\"Joe\\\", \\\"lname\\\": \\\"Palmer\\\"}', '$.fname'), json_array(4, 5) member of ('[[3,4],[4,5]]') from dual where 1 != 1",
- "Query": "select json_contains('{\\\"a\\\": 1, \\\"b\\\": 2, \\\"c\\\": {\\\"d\\\": 4}}', '1'), json_contains_path('{\\\"a\\\": 1, \\\"b\\\": 2, \\\"c\\\": {\\\"d\\\": 4}}', 'one', '$.a', '$.e'), json_extract('[10, 20, [30, 40]]', '$[1]'), json_unquote(json_extract('[\\\"a\\\",\\\"b\\\"]', '$[1]')), json_keys('{\\\"a\\\": 1, \\\"b\\\": {\\\"c\\\": 30}}'), json_overlaps('[1,3,5,7]', '[2,5,7]'), json_search('[\\\"abc\\\"]', 'one', 'abc'), json_value('{\\\"fname\\\": \\\"Joe\\\", \\\"lname\\\": \\\"Palmer\\\"}', '$.fname'), json_array(4, 5) member of ('[[3,4],[4,5]]') from dual",
- "Table": "dual"
- },
- "TablesUsed": [
- "main.dual"
- ]
-}
-
-# Json extract and json unquote shorthands
-"SELECT a-\u003e\"$[4]\", a-\u003e\u003e\"$[3]\" FROM user"
-{
- "QueryType": "SELECT",
- "Original": "SELECT a-\u003e\"$[4]\", a-\u003e\u003e\"$[3]\" FROM user",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a -\u003e '$[4]', a -\u003e\u003e '$[3]' from `user` where 1 != 1",
- "Query": "select a -\u003e '$[4]', a -\u003e\u003e '$[3]' from `user`",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT a-\u003e\"$[4]\", a-\u003e\u003e\"$[3]\" FROM user",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a -\u003e '$[4]', a -\u003e\u003e '$[3]' from `user` where 1 != 1",
- "Query": "select a -\u003e '$[4]', a -\u003e\u003e '$[3]' from `user`",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# groupe by with non aggregated columns and table alias
-"select u.id, u.age from user u group by u.id"
-{
- "QueryType": "SELECT",
- "Original": "select u.id, u.age from user u group by u.id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u.id, u.age from `user` as u where 1 != 1 group by u.id",
- "Query": "select u.id, u.age from `user` as u group by u.id",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select u.id, u.age from user u group by u.id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u.id, u.age from `user` as u where 1 != 1 group by u.id",
- "Query": "select u.id, u.age from `user` as u group by u.id",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Functions that return JSON value attributes
-"select JSON_DEPTH('{}'), JSON_LENGTH('{\"a\": 1, \"b\": {\"c\": 30}}', '$.b'), JSON_TYPE(JSON_EXTRACT('{\"a\": [10, true]}', '$.a')), JSON_VALID('{\"a\": 1}')"
-{
- "QueryType": "SELECT",
- "Original": "select JSON_DEPTH('{}'), JSON_LENGTH('{\"a\": 1, \"b\": {\"c\": 30}}', '$.b'), JSON_TYPE(JSON_EXTRACT('{\"a\": [10, true]}', '$.a')), JSON_VALID('{\"a\": 1}')",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select json_depth('{}'), json_length('{\\\"a\\\": 1, \\\"b\\\": {\\\"c\\\": 30}}', '$.b'), json_type(json_extract('{\\\"a\\\": [10, true]}', '$.a')), json_valid('{\\\"a\\\": 1}') from dual where 1 != 1",
- "Query": "select json_depth('{}'), json_length('{\\\"a\\\": 1, \\\"b\\\": {\\\"c\\\": 30}}', '$.b'), json_type(json_extract('{\\\"a\\\": [10, true]}', '$.a')), json_valid('{\\\"a\\\": 1}') from dual",
- "Table": "dual"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select JSON_DEPTH('{}'), JSON_LENGTH('{\"a\": 1, \"b\": {\"c\": 30}}', '$.b'), JSON_TYPE(JSON_EXTRACT('{\"a\": [10, true]}', '$.a')), JSON_VALID('{\"a\": 1}')",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select json_depth('{}'), json_length('{\\\"a\\\": 1, \\\"b\\\": {\\\"c\\\": 30}}', '$.b'), json_type(json_extract('{\\\"a\\\": [10, true]}', '$.a')), json_valid('{\\\"a\\\": 1}') from dual where 1 != 1",
- "Query": "select json_depth('{}'), json_length('{\\\"a\\\": 1, \\\"b\\\": {\\\"c\\\": 30}}', '$.b'), json_type(json_extract('{\\\"a\\\": [10, true]}', '$.a')), json_valid('{\\\"a\\\": 1}') from dual",
- "Table": "dual"
- },
- "TablesUsed": [
- "main.dual"
- ]
-}
-
-# Json array functions
-"select JSON_ARRAY_APPEND('{\"a\": 1}', '$', 'z'), JSON_ARRAY_INSERT('[\"a\", {\"b\": [1, 2]}, [3, 4]]', '$[0]', 'x', '$[2][1]', 'y'), JSON_INSERT('{ \"a\": 1, \"b\": [2, 3]}', '$.a', 10, '$.c', CAST('[true, false]' AS JSON))"
-{
- "QueryType": "SELECT",
- "Original": "select JSON_ARRAY_APPEND('{\"a\": 1}', '$', 'z'), JSON_ARRAY_INSERT('[\"a\", {\"b\": [1, 2]}, [3, 4]]', '$[0]', 'x', '$[2][1]', 'y'), JSON_INSERT('{ \"a\": 1, \"b\": [2, 3]}', '$.a', 10, '$.c', CAST('[true, false]' AS JSON))",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select json_array_append('{\\\"a\\\": 1}', '$', 'z'), json_array_insert('[\\\"a\\\", {\\\"b\\\": [1, 2]}, [3, 4]]', '$[0]', 'x', '$[2][1]', 'y'), json_insert('{ \\\"a\\\": 1, \\\"b\\\": [2, 3]}', '$.a', 10, '$.c', cast('[true, false]' as JSON)) from dual where 1 != 1",
- "Query": "select json_array_append('{\\\"a\\\": 1}', '$', 'z'), json_array_insert('[\\\"a\\\", {\\\"b\\\": [1, 2]}, [3, 4]]', '$[0]', 'x', '$[2][1]', 'y'), json_insert('{ \\\"a\\\": 1, \\\"b\\\": [2, 3]}', '$.a', 10, '$.c', cast('[true, false]' as JSON)) from dual",
- "Table": "dual"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select JSON_ARRAY_APPEND('{\"a\": 1}', '$', 'z'), JSON_ARRAY_INSERT('[\"a\", {\"b\": [1, 2]}, [3, 4]]', '$[0]', 'x', '$[2][1]', 'y'), JSON_INSERT('{ \"a\": 1, \"b\": [2, 3]}', '$.a', 10, '$.c', CAST('[true, false]' AS JSON))",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select json_array_append('{\\\"a\\\": 1}', '$', 'z'), json_array_insert('[\\\"a\\\", {\\\"b\\\": [1, 2]}, [3, 4]]', '$[0]', 'x', '$[2][1]', 'y'), json_insert('{ \\\"a\\\": 1, \\\"b\\\": [2, 3]}', '$.a', 10, '$.c', cast('[true, false]' as JSON)) from dual where 1 != 1",
- "Query": "select json_array_append('{\\\"a\\\": 1}', '$', 'z'), json_array_insert('[\\\"a\\\", {\\\"b\\\": [1, 2]}, [3, 4]]', '$[0]', 'x', '$[2][1]', 'y'), json_insert('{ \\\"a\\\": 1, \\\"b\\\": [2, 3]}', '$.a', 10, '$.c', cast('[true, false]' as JSON)) from dual",
- "Table": "dual"
- },
- "TablesUsed": [
- "main.dual"
- ]
-}
-
-# Json merge functions
-"select JSON_MERGE('[1, 2]', '[true, false]'), JSON_MERGE_PATCH('{\"name\": \"x\"}', '{\"id\": 47}'), JSON_MERGE_PRESERVE('[1, 2]', '{\"id\": 47}')"
-{
- "QueryType": "SELECT",
- "Original": "select JSON_MERGE('[1, 2]', '[true, false]'), JSON_MERGE_PATCH('{\"name\": \"x\"}', '{\"id\": 47}'), JSON_MERGE_PRESERVE('[1, 2]', '{\"id\": 47}')",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select json_merge('[1, 2]', '[true, false]'), json_merge_patch('{\\\"name\\\": \\\"x\\\"}', '{\\\"id\\\": 47}'), json_merge_preserve('[1, 2]', '{\\\"id\\\": 47}') from dual where 1 != 1",
- "Query": "select json_merge('[1, 2]', '[true, false]'), json_merge_patch('{\\\"name\\\": \\\"x\\\"}', '{\\\"id\\\": 47}'), json_merge_preserve('[1, 2]', '{\\\"id\\\": 47}') from dual",
- "Table": "dual"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select JSON_MERGE('[1, 2]', '[true, false]'), JSON_MERGE_PATCH('{\"name\": \"x\"}', '{\"id\": 47}'), JSON_MERGE_PRESERVE('[1, 2]', '{\"id\": 47}')",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select json_merge('[1, 2]', '[true, false]'), json_merge_patch('{\\\"name\\\": \\\"x\\\"}', '{\\\"id\\\": 47}'), json_merge_preserve('[1, 2]', '{\\\"id\\\": 47}') from dual where 1 != 1",
- "Query": "select json_merge('[1, 2]', '[true, false]'), json_merge_patch('{\\\"name\\\": \\\"x\\\"}', '{\\\"id\\\": 47}'), json_merge_preserve('[1, 2]', '{\\\"id\\\": 47}') from dual",
- "Table": "dual"
- },
- "TablesUsed": [
- "main.dual"
- ]
-}
-
-# JSON modifier functions
-"select JSON_REMOVE('[1, [2, 3], 4]', '$[1]'), JSON_REPLACE('{ \"a\": 1, \"b\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), JSON_SET('{ \"a\": 1, \"b\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), JSON_UNQUOTE('\"abc\"')"
-{
- "QueryType": "SELECT",
- "Original": "select JSON_REMOVE('[1, [2, 3], 4]', '$[1]'), JSON_REPLACE('{ \"a\": 1, \"b\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), JSON_SET('{ \"a\": 1, \"b\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), JSON_UNQUOTE('\"abc\"')",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select json_remove('[1, [2, 3], 4]', '$[1]'), json_replace('{ \\\"a\\\": 1, \\\"b\\\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), json_set('{ \\\"a\\\": 1, \\\"b\\\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), json_unquote('\\\"abc\\\"') from dual where 1 != 1",
- "Query": "select json_remove('[1, [2, 3], 4]', '$[1]'), json_replace('{ \\\"a\\\": 1, \\\"b\\\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), json_set('{ \\\"a\\\": 1, \\\"b\\\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), json_unquote('\\\"abc\\\"') from dual",
- "Table": "dual"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select JSON_REMOVE('[1, [2, 3], 4]', '$[1]'), JSON_REPLACE('{ \"a\": 1, \"b\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), JSON_SET('{ \"a\": 1, \"b\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), JSON_UNQUOTE('\"abc\"')",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select json_remove('[1, [2, 3], 4]', '$[1]'), json_replace('{ \\\"a\\\": 1, \\\"b\\\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), json_set('{ \\\"a\\\": 1, \\\"b\\\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), json_unquote('\\\"abc\\\"') from dual where 1 != 1",
- "Query": "select json_remove('[1, [2, 3], 4]', '$[1]'), json_replace('{ \\\"a\\\": 1, \\\"b\\\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), json_set('{ \\\"a\\\": 1, \\\"b\\\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), json_unquote('\\\"abc\\\"') from dual",
- "Table": "dual"
- },
- "TablesUsed": [
- "main.dual"
- ]
-}
-
-# Reference with a subquery which can be merged
-"select exists(select id from user where id = 4)"
-{
- "QueryType": "SELECT",
- "Original": "select exists(select id from user where id = 4)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutExists",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` where 1 != 1",
- "Query": "select 1 from `user` where id = 4 limit 1",
- "Table": "`user`",
- "Values": [
- "INT64(4)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select :__sq_has_values1 from dual where 1 != 1",
- "Query": "select :__sq_has_values1 from dual",
- "Table": "dual"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select exists(select id from user where id = 4)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select exists (select 1 from `user` where 1 != 1) from dual where 1 != 1",
- "Query": "select exists (select 1 from `user` where id = 4 limit 1) from dual",
- "Table": "dual",
- "Values": [
- "INT64(4)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "main.dual",
- "user.user"
- ]
-}
-
-# Reference with a subquery which cannot be merged
-"select exists(select * from user)"
-{
- "QueryType": "SELECT",
- "Original": "select exists(select * from user)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutExists",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Limit",
- "Count": "INT64(1)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` where 1 != 1",
- "Query": "select 1 from `user` limit :__upper_limit",
- "Table": "`user`"
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select :__sq_has_values1 from dual where 1 != 1",
- "Query": "select :__sq_has_values1 from dual",
- "Table": "dual"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select exists(select * from user)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutExists",
- "PulloutVars": [
- "__sq_has_values1"
- ],
- "Inputs": [
- {
- "OperatorType": "Limit",
- "Count": "INT64(1)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` where 1 != 1",
- "Query": "select 1 from `user` limit :__upper_limit",
- "Table": "`user`"
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select :__sq_has_values1 from dual where 1 != 1",
- "Query": "select :__sq_has_values1 from dual",
- "Table": "dual"
- }
- ]
- },
- "TablesUsed": [
- "main.dual",
- "user.user"
- ]
-}
-
-# insert function not requiring any table
-"select insert('Quadratic', 3, 4, 'What')"
-{
- "QueryType": "SELECT",
- "Original": "select insert('Quadratic', 3, 4, 'What')",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select insert('Quadratic', 3, 4, 'What') from dual where 1 != 1",
- "Query": "select insert('Quadratic', 3, 4, 'What') from dual",
- "Table": "dual"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select insert('Quadratic', 3, 4, 'What')",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select insert('Quadratic', 3, 4, 'What') from dual where 1 != 1",
- "Query": "select insert('Quadratic', 3, 4, 'What') from dual",
- "Table": "dual"
- },
- "TablesUsed": [
- "main.dual"
- ]
-}
-
-# insert function using column names as arguments
-"select insert(tcol1, id, 3, tcol2) from user"
-{
- "QueryType": "SELECT",
- "Original": "select insert(tcol1, id, 3, tcol2) from user",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select insert(tcol1, id, 3, tcol2) from `user` where 1 != 1",
- "Query": "select insert(tcol1, id, 3, tcol2) from `user`",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select insert(tcol1, id, 3, tcol2) from user",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select insert(tcol1, id, 3, tcol2) from `user` where 1 != 1",
- "Query": "select insert(tcol1, id, 3, tcol2) from `user`",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# gtid functions
-"select gtid_subset('3E11FA47-71CA-11E1-9E33-C80AA9429562:23','3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57'), gtid_subtract('3E11FA47-71CA-11E1-9E33-C80AA9429562:23','3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57')"
-{
- "QueryType": "SELECT",
- "Original": "select gtid_subset('3E11FA47-71CA-11E1-9E33-C80AA9429562:23','3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57'), gtid_subtract('3E11FA47-71CA-11E1-9E33-C80AA9429562:23','3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57')",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select gtid_subset('3E11FA47-71CA-11E1-9E33-C80AA9429562:23', '3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57'), gtid_subtract('3E11FA47-71CA-11E1-9E33-C80AA9429562:23', '3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57') from dual where 1 != 1",
- "Query": "select gtid_subset('3E11FA47-71CA-11E1-9E33-C80AA9429562:23', '3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57'), gtid_subtract('3E11FA47-71CA-11E1-9E33-C80AA9429562:23', '3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57') from dual",
- "Table": "dual"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select gtid_subset('3E11FA47-71CA-11E1-9E33-C80AA9429562:23','3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57'), gtid_subtract('3E11FA47-71CA-11E1-9E33-C80AA9429562:23','3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57')",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select gtid_subset('3E11FA47-71CA-11E1-9E33-C80AA9429562:23', '3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57'), gtid_subtract('3E11FA47-71CA-11E1-9E33-C80AA9429562:23', '3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57') from dual where 1 != 1",
- "Query": "select gtid_subset('3E11FA47-71CA-11E1-9E33-C80AA9429562:23', '3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57'), gtid_subtract('3E11FA47-71CA-11E1-9E33-C80AA9429562:23', '3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57') from dual",
- "Table": "dual"
- },
- "TablesUsed": [
- "main.dual"
- ]
-}
-
-# Predicate in apply join which is merged
-"select user.col, user_metadata.user_id from user join user_extra on user.col = user_extra.col join user_metadata on user_extra.user_id = user_metadata.user_id where user.textcol1 = 'alice@gmail.com'"
-{
- "QueryType": "SELECT",
- "Original": "select user.col, user_metadata.user_id from user join user_extra on user.col = user_extra.col join user_metadata on user_extra.user_id = user_metadata.user_id where user.textcol1 = 'alice@gmail.com'",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,R:0",
- "JoinVars": {
- "user_extra_user_id": 1
- },
- "TableName": "`user`_user_extra_user_metadata",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,R:0",
- "JoinVars": {
- "user_col": 0
- },
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` where 1 != 1",
- "Query": "select `user`.col from `user` where `user`.textcol1 = 'alice@gmail.com'",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.user_id from user_extra where 1 != 1",
- "Query": "select user_extra.user_id from user_extra where user_extra.col = :user_col",
- "Table": "user_extra"
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_metadata.user_id from user_metadata where 1 != 1",
- "Query": "select user_metadata.user_id from user_metadata where user_metadata.user_id = :user_extra_user_id",
- "Table": "user_metadata",
- "Values": [
- ":user_extra_user_id"
- ],
- "Vindex": "user_index"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.col, user_metadata.user_id from user join user_extra on user.col = user_extra.col join user_metadata on user_extra.user_id = user_metadata.user_id where user.textcol1 = 'alice@gmail.com'",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,R:0",
- "JoinVars": {
- "user_col": 0
- },
- "TableName": "`user`_user_extra, user_metadata",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` where 1 != 1",
- "Query": "select `user`.col from `user` where `user`.textcol1 = 'alice@gmail.com'",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_metadata.user_id from user_extra, user_metadata where 1 != 1",
- "Query": "select user_metadata.user_id from user_extra, user_metadata where user_extra.col = :user_col and user_extra.user_id = user_metadata.user_id",
- "Table": "user_extra, user_metadata"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra",
- "user.user_metadata"
- ]
-}
-
-# Join across multiple tables, with conditions on different vindexes, but mergeable through join predicates
-"SELECT user.id FROM user INNER JOIN music_extra ON user.id = music_extra.user_id INNER JOIN music ON music_extra.user_id = music.user_id WHERE user.id = 123 and music.id = 456"
-{
- "QueryType": "SELECT",
- "Original": "SELECT user.id FROM user INNER JOIN music_extra ON user.id = music_extra.user_id INNER JOIN music ON music_extra.user_id = music.user_id WHERE user.id = 123 and music.id = 456",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id from `user` join music_extra on `user`.id = music_extra.user_id join music on music_extra.user_id = music.user_id where 1 != 1",
- "Query": "select `user`.id from `user` join music_extra on `user`.id = music_extra.user_id join music on music_extra.user_id = music.user_id where `user`.id = 123 and music.id = 456",
- "Table": "`user`, music_extra, music",
- "Values": [
- "INT64(123)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT user.id FROM user INNER JOIN music_extra ON user.id = music_extra.user_id INNER JOIN music ON music_extra.user_id = music.user_id WHERE user.id = 123 and music.id = 456",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id from `user`, music_extra, music where 1 != 1",
- "Query": "select `user`.id from `user`, music_extra, music where music.id = 456 and `user`.id = 123 and `user`.id = music_extra.user_id and music_extra.user_id = music.user_id",
- "Table": "`user`, music, music_extra",
- "Values": [
- "INT64(123)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.music",
- "user.music_extra",
- "user.user"
- ]
-}
-
-# SQL_CALC_FOUND_ROWS with vindex lookup
-"select SQL_CALC_FOUND_ROWS id, name from user where name = 'aa' order by id limit 2"
-{
- "QueryType": "SELECT",
- "Original": "select SQL_CALC_FOUND_ROWS id, name from user where name = 'aa' order by id limit 2",
- "Instructions": {
- "OperatorType": "SQL_CALC_FOUND_ROWS",
- "Inputs": [
- {
- "OperatorType": "Limit",
- "Count": "INT64(2)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, `name`, weight_string(id) from `user` where 1 != 1",
- "OrderBy": "(0|2) ASC",
- "Query": "select id, `name`, weight_string(id) from `user` where `name` = 'aa' order by id asc limit :__upper_limit",
- "ResultColumns": 2,
- "Table": "`user`",
- "Values": [
- "VARCHAR(\"aa\")"
- ],
- "Vindex": "name_user_map"
- }
- ]
- },
- {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count(0) AS count",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) from `user` where 1 != 1",
- "Query": "select count(*) from `user` where `name` = 'aa'",
- "Table": "`user`",
- "Values": [
- "VARCHAR(\"aa\")"
- ],
- "Vindex": "name_user_map"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select SQL_CALC_FOUND_ROWS id, name from user where name = 'aa' order by id limit 2",
- "Instructions": {
- "OperatorType": "SQL_CALC_FOUND_ROWS",
- "Inputs": [
- {
- "OperatorType": "Limit",
- "Count": "INT64(2)",
- "Inputs": [
- {
- "OperatorType": "VindexLookup",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Values": [
- "VARCHAR(\"aa\")"
- ],
- "Vindex": "name_user_map",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
- "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
- "Table": "name_user_vdx",
- "Values": [
- ":name"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "ByDestination",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, `name`, weight_string(id) from `user` where 1 != 1",
- "OrderBy": "(0|2) ASC",
- "Query": "select id, `name`, weight_string(id) from `user` where `name` = 'aa' order by id asc limit :__upper_limit",
- "ResultColumns": 2,
- "Table": "`user`"
- }
- ]
- }
- ]
- },
- {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count_star(0) AS count(*)",
- "Inputs": [
- {
- "OperatorType": "VindexLookup",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Values": [
- "VARCHAR(\"aa\")"
- ],
- "Vindex": "name_user_map",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
- "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
- "Table": "name_user_vdx",
- "Values": [
- ":name"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "ByDestination",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) from `user` where 1 != 1",
- "Query": "select count(*) from `user` where `name` = 'aa'",
- "Table": "`user`"
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# `None` route being merged with another route via join predicate on Vindex columns
-"SELECT `music`.id FROM `music` INNER JOIN `user` ON music.user_id = user.id WHERE music.user_id IN (NULL) AND user.id = 5"
-{
- "QueryType": "SELECT",
- "Original": "SELECT `music`.id FROM `music` INNER JOIN `user` ON music.user_id = user.id WHERE music.user_id IN (NULL) AND user.id = 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "None",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music join `user` on music.user_id = `user`.id where 1 != 1",
- "Query": "select music.id from music join `user` on music.user_id = `user`.id where music.user_id in (null) and `user`.id = 5",
- "Table": "music, `user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT `music`.id FROM `music` INNER JOIN `user` ON music.user_id = user.id WHERE music.user_id IN (NULL) AND user.id = 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "None",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music, `user` where 1 != 1",
- "Query": "select music.id from music, `user` where music.user_id in (null) and `user`.id = 5 and music.user_id = `user`.id",
- "Table": "`user`, music"
- },
- "TablesUsed": [
- "user.music",
- "user.user"
- ]
-}
-
-# Treating single value tuples as `EqualUnique` routes
-"SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (5)) AND music.user_id = 5"
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (5)) AND music.user_id = 5",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where music.user_id in ::__vals",
- "Table": "music",
- "Values": [
- "(INT64(5))"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where music.user_id = 5 and :__sq_has_values1 = 1 and music.id in ::__sq1",
- "Table": "music",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (5)) AND music.user_id = 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where music.id in (select music.id from music where music.user_id in (5)) and music.user_id = 5",
- "Table": "music",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# Subquery with `IN` condition using columns with matching lookup vindexes
-"SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (1, 2, 3))"
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (1, 2, 3))",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where music.user_id in ::__vals",
- "Table": "music",
- "Values": [
- "(INT64(1), INT64(2), INT64(3))"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals",
- "Table": "music",
- "Values": [
- ":__sq1"
- ],
- "Vindex": "music_user_map"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (1, 2, 3))",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where music.id in (select music.id from music where music.user_id in ::__vals)",
- "Table": "music",
- "Values": [
- "(INT64(1), INT64(2), INT64(3))"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# Subquery with `IN` condition using columns with matching lookup vindexes, with derived table
-"SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT music.id FROM music WHERE music.user_id IN (1, 2, 3)) _inner)"
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT music.id FROM music WHERE music.user_id IN (1, 2, 3)) _inner)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from (select music.id from music where 1 != 1) as _inner where 1 != 1",
- "Query": "select * from (select music.id from music where music.user_id in ::__vals) as _inner",
- "Table": "music",
- "Values": [
- "(INT64(1), INT64(2), INT64(3))"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals",
- "Table": "music",
- "Values": [
- ":__sq1"
- ],
- "Vindex": "music_user_map"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT music.id FROM music WHERE music.user_id IN (1, 2, 3)) _inner)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where music.id in (select _inner.id from (select music.id from music where music.user_id in ::__vals) as _inner)",
- "Table": "music",
- "Values": [
- "(INT64(1), INT64(2), INT64(3))"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# Subquery with `IN` condition using columns with matching lookup vindexes, with inner scatter query
-"SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.foo = 'bar') AND music.user_id IN (3, 4, 5)"
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.foo = 'bar') AND music.user_id IN (3, 4, 5)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where music.foo = 'bar'",
- "Table": "music"
- },
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where music.user_id in ::__vals and :__sq_has_values1 = 1 and music.id in ::__sq1",
- "Table": "music",
- "Values": [
- "(INT64(3), INT64(4), INT64(5))"
- ],
- "Vindex": "user_index"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.foo = 'bar') AND music.user_id IN (3, 4, 5)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where music.id in (select music.id from music where music.foo = 'bar') and music.user_id in ::__vals",
- "Table": "music",
- "Values": [
- "(INT64(3), INT64(4), INT64(5))"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# Subquery with `IN` condition using columns with matching lookup vindexes
-"SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (1, 2, 3)) and music.user_id = 5"
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (1, 2, 3)) and music.user_id = 5",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where music.user_id in ::__vals",
- "Table": "music",
- "Values": [
- "(INT64(1), INT64(2), INT64(3))"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where music.user_id = 5 and :__sq_has_values1 = 1 and music.id in ::__sq1",
- "Table": "music",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (1, 2, 3)) and music.user_id = 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where music.id in (select music.id from music where music.user_id in (1, 2, 3)) and music.user_id = 5",
- "Table": "music",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# Subquery with `IN` condition using columns with matching lookup vindexes, but not a top level predicate
-"SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (1, 2, 3)) OR music.user_id = 5"
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (1, 2, 3)) OR music.user_id = 5",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where music.user_id in ::__vals",
- "Table": "music",
- "Values": [
- "(INT64(1), INT64(2), INT64(3))"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__sq1 or music.user_id = 5",
- "Table": "music"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (1, 2, 3)) OR music.user_id = 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where music.id in (select music.id from music where music.user_id in (1, 2, 3)) or music.user_id = 5",
- "Table": "music"
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# `IN` comparison on Vindex with `None` subquery, as routing predicate
-"SELECT `music`.id FROM `music` WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL)) AND music.user_id = 5"
-{
- "QueryType": "SELECT",
- "Original": "SELECT `music`.id FROM `music` WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL)) AND music.user_id = 5",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "None",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where music.user_id in (null)",
- "Table": "music"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where music.user_id = 5 and :__sq_has_values1 = 1 and music.id in ::__sq1",
- "Table": "music",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT `music`.id FROM `music` WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL)) AND music.user_id = 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "None",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where music.id in (select music.id from music where music.user_id in (null)) and music.user_id = 5",
- "Table": "music"
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# `IN` comparison on Vindex with `None` subquery, as non-routing predicate
-"SELECT `music`.id FROM `music` WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL)) OR music.user_id = 5"
-{
- "QueryType": "SELECT",
- "Original": "SELECT `music`.id FROM `music` WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL)) OR music.user_id = 5",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "None",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where music.user_id in (null)",
- "Table": "music"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__sq1 or music.user_id = 5",
- "Table": "music"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT `music`.id FROM `music` WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL)) OR music.user_id = 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where music.id in (select music.id from music where music.user_id in (null)) or music.user_id = 5",
- "Table": "music"
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# Mergeable scatter subquery
-"SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.genre = 'pop')"
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.genre = 'pop')",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where music.genre = 'pop'",
- "Table": "music"
- },
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals",
- "Table": "music",
- "Values": [
- ":__sq1"
- ],
- "Vindex": "music_user_map"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.genre = 'pop')",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where music.id in (select music.id from music where music.genre = 'pop')",
- "Table": "music"
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# Mergeable scatter subquery with `GROUP BY` on unique vindex column
-"SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.genre = 'pop' GROUP BY music.id)"
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.genre = 'pop' GROUP BY music.id)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1 group by music.id",
- "Query": "select music.id from music where music.genre = 'pop' group by music.id",
- "Table": "music"
- },
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals",
- "Table": "music",
- "Values": [
- ":__sq1"
- ],
- "Vindex": "music_user_map"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.genre = 'pop' GROUP BY music.id)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where music.id in (select music.id from music where music.genre = 'pop' group by music.id)",
- "Table": "music"
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# Unmergeable scatter subquery with `GROUP BY` on-non vindex column
-"SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.genre = 'pop' GROUP BY music.genre)"
-"unsupported: in scatter query: group by column must reference column in SELECT list"
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.genre = 'pop' GROUP BY music.genre)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "random(0) AS id",
- "GroupBy": "(1|2)",
- "ResultColumns": 1,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id, music.genre, weight_string(music.genre) from music where 1 != 1 group by music.genre, weight_string(music.genre)",
- "OrderBy": "(1|2) ASC",
- "Query": "select music.id, music.genre, weight_string(music.genre) from music where music.genre = 'pop' group by music.genre, weight_string(music.genre) order by music.genre asc",
- "Table": "music"
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals",
- "Table": "music",
- "Values": [
- ":__sq1"
- ],
- "Vindex": "music_user_map"
- }
- ]
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# Unmergeable scatter subquery with LIMIT
-"SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.genre = 'pop' LIMIT 10)"
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.genre = 'pop' LIMIT 10)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Limit",
- "Count": "INT64(10)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where music.genre = 'pop' limit :__upper_limit",
- "Table": "music"
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals",
- "Table": "music",
- "Values": [
- ":__sq1"
- ],
- "Vindex": "music_user_map"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.genre = 'pop' LIMIT 10)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Limit",
- "Count": "INT64(10)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where music.genre = 'pop' limit :__upper_limit",
- "Table": "music"
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals",
- "Table": "music",
- "Values": [
- ":__sq1"
- ],
- "Vindex": "music_user_map"
- }
- ]
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# Mergeable subquery with `MAX` aggregate and grouped by unique vindex
-"SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id IN (5, 6) GROUP BY music.user_id)"
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id IN (5, 6) GROUP BY music.user_id)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select max(music.id) from music where 1 != 1 group by music.user_id",
- "Query": "select max(music.id) from music where music.user_id in ::__vals group by music.user_id",
- "Table": "music",
- "Values": [
- "(INT64(5), INT64(6))"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals",
- "Table": "music",
- "Values": [
- ":__sq1"
- ],
- "Vindex": "music_user_map"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id IN (5, 6) GROUP BY music.user_id)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where music.id in (select max(music.id) from music where music.user_id in ::__vals group by music.user_id)",
- "Table": "music",
- "Values": [
- "(INT64(5), INT64(6))"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# Unmergeable subquery with `MAX` aggregate
-"SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id IN (5, 6))"
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id IN (5, 6))",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "max(0)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select max(music.id) from music where 1 != 1",
- "Query": "select max(music.id) from music where music.user_id in ::__vals",
- "Table": "music",
- "Values": [
- "(INT64(5), INT64(6))"
- ],
- "Vindex": "user_index"
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals",
- "Table": "music",
- "Values": [
- ":__sq1"
- ],
- "Vindex": "music_user_map"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id IN (5, 6))",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "max(0) AS max(music.id)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select max(music.id) from music where 1 != 1",
- "Query": "select max(music.id) from music where music.user_id in ::__vals",
- "Table": "music",
- "Values": [
- "(INT64(5), INT64(6))"
- ],
- "Vindex": "user_index"
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals",
- "Table": "music",
- "Values": [
- ":__sq1"
- ],
- "Vindex": "music_user_map"
- }
- ]
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# Mergeable subquery with `MAX` aggregate with `EqualUnique` route operator
-"SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id = 5)"
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id = 5)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select max(music.id) from music where 1 != 1",
- "Query": "select max(music.id) from music where music.user_id = 5",
- "Table": "music",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals",
- "Table": "music",
- "Values": [
- ":__sq1"
- ],
- "Vindex": "music_user_map"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id = 5)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select max(music.id) from music where 1 != 1",
- "Query": "select max(music.id) from music where music.user_id = 5",
- "Table": "music",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals",
- "Table": "music",
- "Values": [
- ":__sq1"
- ],
- "Vindex": "music_user_map"
- }
- ]
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# Mergeable subquery with `LIMIT` due to `EqualUnique` route
-"SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id = 5 LIMIT 10)"
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id = 5 LIMIT 10)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select max(music.id) from music where 1 != 1",
- "Query": "select max(music.id) from music where music.user_id = 5 limit 10",
- "Table": "music",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals",
- "Table": "music",
- "Values": [
- ":__sq1"
- ],
- "Vindex": "music_user_map"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id = 5 LIMIT 10)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select max(music.id) from music where 1 != 1",
- "Query": "select max(music.id) from music where music.user_id = 5 limit 10",
- "Table": "music",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals",
- "Table": "music",
- "Values": [
- ":__sq1"
- ],
- "Vindex": "music_user_map"
- }
- ]
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# Mergeable subquery with multiple levels of derived statements
-"SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT * FROM (SELECT music.id FROM music WHERE music.user_id = 5 LIMIT 10) subquery_for_limit) subquery_for_limit)"
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT * FROM (SELECT music.id FROM music WHERE music.user_id = 5 LIMIT 10) subquery_for_limit) subquery_for_limit)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from (select * from (select music.id from music where 1 != 1) as subquery_for_limit where 1 != 1) as subquery_for_limit where 1 != 1",
- "Query": "select * from (select * from (select music.id from music where music.user_id = 5 limit 10) as subquery_for_limit) as subquery_for_limit",
- "Table": "music",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals",
- "Table": "music",
- "Values": [
- ":__sq1"
- ],
- "Vindex": "music_user_map"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT * FROM (SELECT music.id FROM music WHERE music.user_id = 5 LIMIT 10) subquery_for_limit) subquery_for_limit)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where music.id in (select subquery_for_limit.id from (select subquery_for_limit.id from (select music.id from music where music.user_id = 5 limit 10) as subquery_for_limit) as subquery_for_limit)",
- "Table": "music",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# Mergeable subquery with multiple levels of derived statements, using a single value `IN` predicate
-"SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT * FROM (SELECT music.id FROM music WHERE music.user_id IN (5) LIMIT 10) subquery_for_limit) subquery_for_limit)"
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT * FROM (SELECT music.id FROM music WHERE music.user_id IN (5) LIMIT 10) subquery_for_limit) subquery_for_limit)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "Limit",
- "Count": "INT64(10)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where music.user_id in ::__vals limit :__upper_limit",
- "Table": "music",
- "Values": [
- "(INT64(5))"
- ],
- "Vindex": "user_index"
- }
- ]
- }
- ]
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals",
- "Table": "music",
- "Values": [
- ":__sq1"
- ],
- "Vindex": "music_user_map"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT * FROM (SELECT music.id FROM music WHERE music.user_id IN (5) LIMIT 10) subquery_for_limit) subquery_for_limit)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where music.id in (select subquery_for_limit.id from (select subquery_for_limit.id from (select music.id from music where music.user_id in (5) limit 10) as subquery_for_limit) as subquery_for_limit)",
- "Table": "music",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# Unmergeable subquery with multiple levels of derived statements, using a multi value `IN` predicate
-"SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT * FROM (SELECT music.id FROM music WHERE music.user_id IN (5, 6) LIMIT 10) subquery_for_limit) subquery_for_limit)"
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT * FROM (SELECT music.id FROM music WHERE music.user_id IN (5, 6) LIMIT 10) subquery_for_limit) subquery_for_limit)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "Limit",
- "Count": "INT64(10)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where music.user_id in ::__vals limit :__upper_limit",
- "Table": "music",
- "Values": [
- "(INT64(5), INT64(6))"
- ],
- "Vindex": "user_index"
- }
- ]
- }
- ]
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals",
- "Table": "music",
- "Values": [
- ":__sq1"
- ],
- "Vindex": "music_user_map"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT * FROM (SELECT music.id FROM music WHERE music.user_id IN (5, 6) LIMIT 10) subquery_for_limit) subquery_for_limit)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "Limit",
- "Count": "INT64(10)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where music.user_id in ::__vals limit :__upper_limit",
- "Table": "music",
- "Values": [
- "(INT64(5), INT64(6))"
- ],
- "Vindex": "user_index"
- }
- ]
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals",
- "Table": "music",
- "Values": [
- ":__sq1"
- ],
- "Vindex": "music_user_map"
- }
- ]
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# Unmergeable subquery with multiple levels of derived statements
-"SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT * FROM (SELECT music.id FROM music LIMIT 10) subquery_for_limit) subquery_for_limit)"
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT * FROM (SELECT music.id FROM music LIMIT 10) subquery_for_limit) subquery_for_limit)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "Limit",
- "Count": "INT64(10)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music limit :__upper_limit",
- "Table": "music"
- }
- ]
- }
- ]
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals",
- "Table": "music",
- "Values": [
- ":__sq1"
- ],
- "Vindex": "music_user_map"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT * FROM (SELECT music.id FROM music LIMIT 10) subquery_for_limit) subquery_for_limit)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "Limit",
- "Count": "INT64(10)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music limit :__upper_limit",
- "Table": "music"
- }
- ]
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals",
- "Table": "music",
- "Values": [
- ":__sq1"
- ],
- "Vindex": "music_user_map"
- }
- ]
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# `None` subquery as top level predicate - outer query changes from `Scatter` to `None` on merge
-"SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL))"
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL))",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "None",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where music.user_id in (null)",
- "Table": "music"
- },
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals",
- "Table": "music",
- "Values": [
- ":__sq1"
- ],
- "Vindex": "music_user_map"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL))",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "None",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where music.id in (select music.id from music where music.user_id in (null))",
- "Table": "music"
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# `None` subquery as top level predicate - outer query changes from `EqualUnique` to `None` on merge
-"SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL)) AND music.user_id = 5"
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL)) AND music.user_id = 5",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "None",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where music.user_id in (null)",
- "Table": "music"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where music.user_id = 5 and :__sq_has_values1 = 1 and music.id in ::__sq1",
- "Table": "music",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL)) AND music.user_id = 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "None",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where music.id in (select music.id from music where music.user_id in (null)) and music.user_id = 5",
- "Table": "music"
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# `None` subquery nested inside `OR` expression - outer query keeps routing information
-"SELECT music.id FROM music WHERE (music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL)) OR music.user_id = 5)"
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music WHERE (music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL)) OR music.user_id = 5)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "None",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where music.user_id in (null)",
- "Table": "music"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__sq1 or music.user_id = 5",
- "Table": "music"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music WHERE (music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL)) OR music.user_id = 5)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where music.id in (select music.id from music where music.user_id in (null)) or music.user_id = 5",
- "Table": "music"
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# Joining with a subquery that uses an aggregate column and an `EqualUnique` route can be merged together
-"SELECT music.id FROM music INNER JOIN (SELECT MAX(id) as maxt FROM music WHERE music.user_id = 5) other ON other.maxt = music.id"
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music INNER JOIN (SELECT MAX(id) as maxt FROM music WHERE music.user_id = 5) other ON other.maxt = music.id",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "JoinVars": {
- "music_id": 0
- },
- "TableName": "music_music",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music",
- "Table": "music"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from (select max(id) as maxt from music where 1 != 1) as other where 1 != 1",
- "Query": "select 1 from (select max(id) as maxt from music where music.user_id = 5) as other where other.maxt = :music_id",
- "Table": "music",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music INNER JOIN (SELECT MAX(id) as maxt FROM music WHERE music.user_id = 5) other ON other.maxt = music.id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music, (select max(id) as maxt from music where 1 != 1) as other where 1 != 1",
- "Query": "select music.id from music, (select max(id) as maxt from music where music.user_id = 5) as other where other.maxt = music.id",
- "Table": "music",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# Joining with a subquery that uses an `EqualUnique` route can be merged
-"SELECT music.id FROM music INNER JOIN (SELECT id FROM music WHERE music.user_id = 5) other ON other.id = music.id"
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music INNER JOIN (SELECT id FROM music WHERE music.user_id = 5) other ON other.id = music.id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music join (select id from music where 1 != 1) as other on other.id = music.id where 1 != 1",
- "Query": "select music.id from music join (select id from music where music.user_id = 5) as other on other.id = music.id",
- "Table": "music"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music INNER JOIN (SELECT id FROM music WHERE music.user_id = 5) other ON other.id = music.id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music, (select id from music where 1 != 1) as other where 1 != 1",
- "Query": "select music.id from music, (select id from music where music.user_id = 5) as other where other.id = music.id",
- "Table": "music",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# Joining with a subquery that has an `IN` route can be merged
-"SELECT music.id FROM music INNER JOIN (SELECT id FROM music WHERE music.user_id IN (5, 6, 7)) other ON other.id = music.id"
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music INNER JOIN (SELECT id FROM music WHERE music.user_id IN (5, 6, 7)) other ON other.id = music.id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music join (select id from music where 1 != 1) as other on other.id = music.id where 1 != 1",
- "Query": "select music.id from music join (select id from music where music.user_id in (5, 6, 7)) as other on other.id = music.id",
- "Table": "music"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music INNER JOIN (SELECT id FROM music WHERE music.user_id IN (5, 6, 7)) other ON other.id = music.id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music, (select id from music where 1 != 1) as other where 1 != 1",
- "Query": "select music.id from music, (select id from music where music.user_id in ::__vals) as other where other.id = music.id",
- "Table": "music",
- "Values": [
- "(INT64(5), INT64(6), INT64(7))"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# limit on the vtgate has to be executed on the LHS of a join
-"select id from user join (select user_id from user_extra limit 10) ue on user.id = ue.user_id"
-"unsupported: filtering on results of cross-shard subquery"
-{
- "QueryType": "SELECT",
- "Original": "select id from user join (select user_id from user_extra limit 10) ue on user.id = ue.user_id",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "JoinVars": {
- "ue_user_id": 0
- },
- "TableName": "user_extra_`user`",
- "Inputs": [
- {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "Limit",
- "Count": "INT64(10)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_id from user_extra where 1 != 1",
- "Query": "select user_id from user_extra limit :__upper_limit",
- "Table": "user_extra"
- }
- ]
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where `user`.id = :ue_user_id",
- "Table": "`user`",
- "Values": [
- ":ue_user_id"
- ],
- "Vindex": "user_index"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-"select user.a, t.b from user join (select id, count(*) b, req from user_extra group by req, id) as t on user.id = t.id"
-"unsupported: filtering on results of cross-shard subquery"
-{
- "QueryType": "SELECT",
- "Original": "select user.a, t.b from user join (select id, count(*) b, req from user_extra group by req, id) as t on user.id = t.id",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0,L:1",
- "JoinVars": {
- "t_id": 0
- },
- "TableName": "user_extra_`user`",
- "Inputs": [
- {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0,
- 1
- ],
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count_star(1) AS b",
- "GroupBy": "(0|3), (2|4)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, count(*) as b, req, weight_string(id), weight_string(req) from user_extra where 1 != 1 group by id, weight_string(id), req, weight_string(req)",
- "OrderBy": "(0|3) ASC, (2|4) ASC",
- "Query": "select id, count(*) as b, req, weight_string(id), weight_string(req) from user_extra group by id, weight_string(id), req, weight_string(req) order by id asc, req asc",
- "Table": "user_extra"
- }
- ]
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.a from `user` where 1 != 1",
- "Query": "select `user`.a from `user` where `user`.id = :t_id",
- "Table": "`user`",
- "Values": [
- ":t_id"
- ],
- "Vindex": "user_index"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# cant switch sides for outer joins
-"select id from user left join (select user_id from user_extra limit 10) ue on user.id = ue.user_id"
-"unsupported: LEFT JOIN not supported for derived tables"
-Gen4 plan same as above
-
-# limit on both sides means that we can't evaluate this at all
-"select id from (select id from user limit 10) u join (select user_id from user_extra limit 10) ue on u.id = ue.user_id"
-"unsupported: filtering on results of cross-shard subquery"
-Gen4 error: unsupported: JOIN not supported between derived tables
-
-"SELECT music.id FROM (SELECT MAX(id) as maxt FROM music WHERE music.user_id = 5) other JOIN music ON other.maxt = music.id"
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM (SELECT MAX(id) as maxt FROM music WHERE music.user_id = 5) other JOIN music ON other.maxt = music.id",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "JoinVars": {
- "other_maxt": 0
- },
- "TableName": "music_music",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select other.maxt from (select max(id) as maxt from music where 1 != 1) as other where 1 != 1",
- "Query": "select other.maxt from (select max(id) as maxt from music where music.user_id = 5) as other",
- "Table": "music",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where music.id = :other_maxt",
- "Table": "music",
- "Values": [
- ":other_maxt"
- ],
- "Vindex": "music_user_map"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM (SELECT MAX(id) as maxt FROM music WHERE music.user_id = 5) other JOIN music ON other.maxt = music.id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from (select max(id) as maxt from music where 1 != 1) as other, music where 1 != 1",
- "Query": "select music.id from (select max(id) as maxt from music where music.user_id = 5) as other, music where other.maxt = music.id",
- "Table": "music",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# Earlier columns are in scope in subqueries https://github.com/vitessio/vitess/issues/11246
-"SELECT 1 as x, (SELECT x)"
-{
- "QueryType": "SELECT",
- "Original": "SELECT 1 as x, (SELECT x)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 as x, (select x from dual where 1 != 1) from dual where 1 != 1",
- "Query": "select 1 as x, (select x from dual) from dual",
- "Table": "dual"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT 1 as x, (SELECT x)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 as x, (select x from dual where 1 != 1) from dual where 1 != 1",
- "Query": "select 1 as x, (select x from dual) from dual",
- "Table": "dual"
- },
- "TablesUsed": [
- "main.dual"
- ]
-}
diff --git a/go/vt/vtgate/planbuilder/testdata/select_cases_with_default.json b/go/vt/vtgate/planbuilder/testdata/select_cases_with_default.json
new file mode 100644
index 00000000000..5817157752b
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/select_cases_with_default.json
@@ -0,0 +1,69 @@
+[
+ {
+ "comment": "EXISTS subquery when the default ks is different than the inner query",
+ "query": "select exists(select * from user where id = 5)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select exists(select * from user where id = 5)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutExists",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` where 1 != 1",
+ "Query": "select 1 from `user` where id = 5 limit 1",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "second_user",
+ "Sharded": true
+ },
+ "FieldQuery": "select :__sq_has_values1 from dual where 1 != 1",
+ "Query": "select :__sq_has_values1 from dual",
+ "Table": "dual"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select exists(select * from user where id = 5)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select exists (select 1 from `user` where 1 != 1) from dual where 1 != 1",
+ "Query": "select exists (select 1 from `user` where id = 5 limit 1) from dual",
+ "Table": "dual",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "second_user.dual",
+ "user.user"
+ ]
+ }
+ }
+]
\ No newline at end of file
diff --git a/go/vt/vtgate/planbuilder/testdata/select_cases_with_default.txt b/go/vt/vtgate/planbuilder/testdata/select_cases_with_default.txt
deleted file mode 100644
index 347c07ad4c9..00000000000
--- a/go/vt/vtgate/planbuilder/testdata/select_cases_with_default.txt
+++ /dev/null
@@ -1,65 +0,0 @@
-# EXISTS subquery when the default ks is different than the inner query
-"select exists(select * from user where id = 5)"
-{
- "QueryType": "SELECT",
- "Original": "select exists(select * from user where id = 5)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutExists",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` where 1 != 1",
- "Query": "select 1 from `user` where id = 5 limit 1",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "second_user",
- "Sharded": true
- },
- "FieldQuery": "select :__sq_has_values1 from dual where 1 != 1",
- "Query": "select :__sq_has_values1 from dual",
- "Table": "dual"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select exists(select * from user where id = 5)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select exists (select 1 from `user` where 1 != 1) from dual where 1 != 1",
- "Query": "select exists (select 1 from `user` where id = 5 limit 1) from dual",
- "Table": "dual",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "second_user.dual",
- "user.user"
- ]
-}
diff --git a/go/vt/vtgate/planbuilder/testdata/select_cases_with_user_as_default.json b/go/vt/vtgate/planbuilder/testdata/select_cases_with_user_as_default.json
new file mode 100644
index 00000000000..822ed6c2307
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/select_cases_with_user_as_default.json
@@ -0,0 +1,48 @@
+[
+ {
+ "comment": "EXISTS subquery",
+ "query": "select exists(select * from user where id = 5)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select exists(select * from user where id = 5)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select exists (select 1 from `user` where 1 != 1) from dual where 1 != 1",
+ "Query": "select exists (select 1 from `user` where id = 5 limit 1) from dual",
+ "Table": "dual",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select exists(select * from user where id = 5)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select exists (select 1 from `user` where 1 != 1) from dual where 1 != 1",
+ "Query": "select exists (select 1 from `user` where id = 5 limit 1) from dual",
+ "Table": "dual",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.dual",
+ "user.user"
+ ]
+ }
+ }
+]
\ No newline at end of file
diff --git a/go/vt/vtgate/planbuilder/testdata/select_cases_with_user_as_default.txt b/go/vt/vtgate/planbuilder/testdata/select_cases_with_user_as_default.txt
deleted file mode 100644
index 66afdf93a63..00000000000
--- a/go/vt/vtgate/planbuilder/testdata/select_cases_with_user_as_default.txt
+++ /dev/null
@@ -1,44 +0,0 @@
-# EXISTS subquery
-"select exists(select * from user where id = 5)"
-{
- "QueryType": "SELECT",
- "Original": "select exists(select * from user where id = 5)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select exists (select 1 from `user` where 1 != 1) from dual where 1 != 1",
- "Query": "select exists (select 1 from `user` where id = 5 limit 1) from dual",
- "Table": "dual",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select exists(select * from user where id = 5)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select exists (select 1 from `user` where 1 != 1) from dual where 1 != 1",
- "Query": "select exists (select 1 from `user` where id = 5 limit 1) from dual",
- "Table": "dual",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.dual",
- "user.user"
- ]
-}
diff --git a/go/vt/vtgate/planbuilder/testdata/set_cases.json b/go/vt/vtgate/planbuilder/testdata/set_cases.json
new file mode 100644
index 00000000000..b6b14665f1a
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/set_cases.json
@@ -0,0 +1,609 @@
+[
+ {
+ "comment": "set single user defined variable",
+ "query": "set @foo = 42",
+ "plan": {
+ "QueryType": "SET",
+ "Original": "set @foo = 42",
+ "Instructions": {
+ "OperatorType": "Set",
+ "Ops": [
+ {
+ "Type": "UserDefinedVariable",
+ "Name": "foo",
+ "Expr": "INT64(42)"
+ }
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SingleRow"
+ }
+ ]
+ }
+ }
+ },
+ {
+ "comment": "set multi user defined variable",
+ "query": "set @foo = 42, @bar = @foo",
+ "plan": {
+ "QueryType": "SET",
+ "Original": "set @foo = 42, @bar = @foo",
+ "Instructions": {
+ "OperatorType": "Set",
+ "Ops": [
+ {
+ "Type": "UserDefinedVariable",
+ "Name": "foo",
+ "Expr": "INT64(42)"
+ },
+ {
+ "Type": "UserDefinedVariable",
+ "Name": "bar",
+ "Expr": ":__vtudvfoo"
+ }
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SingleRow"
+ }
+ ]
+ }
+ }
+ },
+ {
+ "comment": "set multi user defined variable with complex expression",
+ "query": "set @foo = 42, @bar = @foo + 1",
+ "plan": {
+ "QueryType": "SET",
+ "Original": "set @foo = 42, @bar = @foo + 1",
+ "Instructions": {
+ "OperatorType": "Set",
+ "Ops": [
+ {
+ "Type": "UserDefinedVariable",
+ "Name": "foo",
+ "Expr": "INT64(42)"
+ },
+ {
+ "Type": "UserDefinedVariable",
+ "Name": "bar",
+ "Expr": ":__vtudvfoo + INT64(1)"
+ }
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SingleRow"
+ }
+ ]
+ }
+ }
+ },
+ {
+ "comment": "set UDV to expression that can't be evaluated at vtgate",
+ "query": "set @foo = CONCAT('Any','Expression','Is','Valid')",
+ "plan": {
+ "QueryType": "SET",
+ "Original": "set @foo = CONCAT('Any','Expression','Is','Valid')",
+ "Instructions": {
+ "OperatorType": "Set",
+ "Ops": [
+ {
+ "Type": "UserDefinedVariable",
+ "Name": "foo",
+ "Expr": "[COLUMN 0]"
+ }
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AnyShard()",
+ "Query": "select CONCAT('Any', 'Expression', 'Is', 'Valid') from dual",
+ "SingleShardOnly": true
+ }
+ ]
+ }
+ }
+ },
+ {
+ "comment": "single sysvar cases",
+ "query": "SET sql_mode = 'STRICT_ALL_TABLES,NO_AUTO_VALUE_ON_ZERO'",
+ "plan": {
+ "QueryType": "SET",
+ "Original": "SET sql_mode = 'STRICT_ALL_TABLES,NO_AUTO_VALUE_ON_ZERO'",
+ "Instructions": {
+ "OperatorType": "Set",
+ "Ops": [
+ {
+ "Type": "SysVarSet",
+ "Name": "sql_mode",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "Expr": "'STRICT_ALL_TABLES,NO_AUTO_VALUE_ON_ZERO'",
+ "SupportSetVar": true
+ }
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SingleRow"
+ }
+ ]
+ }
+ }
+ },
+ {
+ "comment": "multiple sysvar cases",
+ "query": "SET @@SESSION.sql_mode = CONCAT(CONCAT(@@sql_mode, ',STRICT_ALL_TABLES'), ',NO_AUTO_VALUE_ON_ZERO'), @@SESSION.sql_safe_updates = 0",
+ "plan": {
+ "QueryType": "SET",
+ "Original": "SET @@SESSION.sql_mode = CONCAT(CONCAT(@@sql_mode, ',STRICT_ALL_TABLES'), ',NO_AUTO_VALUE_ON_ZERO'), @@SESSION.sql_safe_updates = 0",
+ "Instructions": {
+ "OperatorType": "Set",
+ "Ops": [
+ {
+ "Type": "SysVarSet",
+ "Name": "sql_mode",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "Expr": "CONCAT(CONCAT(@@sql_mode, ',STRICT_ALL_TABLES'), ',NO_AUTO_VALUE_ON_ZERO')",
+ "SupportSetVar": true
+ },
+ {
+ "Type": "SysVarSet",
+ "Name": "sql_safe_updates",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "Expr": "0",
+ "SupportSetVar": true
+ }
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SingleRow"
+ }
+ ]
+ }
+ }
+ },
+ {
+ "comment": "autocommit case",
+ "query": "SET autocommit = 1, autocommit = on, autocommit = 'on', autocommit = @myudv, autocommit = `on`, autocommit = `off`",
+ "plan": {
+ "QueryType": "SET",
+ "Original": "SET autocommit = 1, autocommit = on, autocommit = 'on', autocommit = @myudv, autocommit = `on`, autocommit = `off`",
+ "Instructions": {
+ "OperatorType": "Set",
+ "Ops": [
+ {
+ "Type": "SysVarAware",
+ "Name": "autocommit",
+ "Expr": "INT64(1)"
+ },
+ {
+ "Type": "SysVarAware",
+ "Name": "autocommit",
+ "Expr": "INT64(1)"
+ },
+ {
+ "Type": "SysVarAware",
+ "Name": "autocommit",
+ "Expr": "INT64(1)"
+ },
+ {
+ "Type": "SysVarAware",
+ "Name": "autocommit",
+ "Expr": ":__vtudvmyudv"
+ },
+ {
+ "Type": "SysVarAware",
+ "Name": "autocommit",
+ "Expr": "INT64(1)"
+ },
+ {
+ "Type": "SysVarAware",
+ "Name": "autocommit",
+ "Expr": "INT64(0)"
+ }
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SingleRow"
+ }
+ ]
+ }
+ }
+ },
+ {
+ "comment": "set ignore plan",
+ "query": "set @@default_storage_engine = 'DONOTCHANGEME'",
+ "plan": {
+ "QueryType": "SET",
+ "Original": "set @@default_storage_engine = 'DONOTCHANGEME'",
+ "Instructions": {
+ "OperatorType": "Set",
+ "Ops": [
+ {
+ "Type": "SysVarIgnore",
+ "Name": "default_storage_engine",
+ "Expr": "'DONOTCHANGEME'"
+ }
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SingleRow"
+ }
+ ]
+ }
+ }
+ },
+ {
+ "comment": "set check and ignore plan",
+ "query": "set @@sql_mode = concat(@@sql_mode, ',NO_AUTO_CREATE_USER')",
+ "plan": {
+ "QueryType": "SET",
+ "Original": "set @@sql_mode = concat(@@sql_mode, ',NO_AUTO_CREATE_USER')",
+ "Instructions": {
+ "OperatorType": "Set",
+ "Ops": [
+ {
+ "Type": "SysVarSet",
+ "Name": "sql_mode",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "Expr": "concat(@@sql_mode, ',NO_AUTO_CREATE_USER')",
+ "SupportSetVar": true
+ }
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SingleRow"
+ }
+ ]
+ }
+ }
+ },
+ {
+ "comment": "set system settings",
+ "query": "set @@sql_safe_updates = 1",
+ "plan": {
+ "QueryType": "SET",
+ "Original": "set @@sql_safe_updates = 1",
+ "Instructions": {
+ "OperatorType": "Set",
+ "Ops": [
+ {
+ "Type": "SysVarSet",
+ "Name": "sql_safe_updates",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "Expr": "1",
+ "SupportSetVar": true
+ }
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SingleRow"
+ }
+ ]
+ }
+ }
+ },
+ {
+ "comment": "set plan building with ON/OFF enum",
+ "query": "set @@innodb_strict_mode = OFF",
+ "plan": {
+ "QueryType": "SET",
+ "Original": "set @@innodb_strict_mode = OFF",
+ "Instructions": {
+ "OperatorType": "Set",
+ "Ops": [
+ {
+ "Type": "SysVarIgnore",
+ "Name": "innodb_strict_mode",
+ "Expr": "0"
+ }
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SingleRow"
+ }
+ ]
+ }
+ }
+ },
+ {
+ "comment": "set plan building with string literal",
+ "query": "set @@innodb_strict_mode = 'OFF'",
+ "plan": {
+ "QueryType": "SET",
+ "Original": "set @@innodb_strict_mode = 'OFF'",
+ "Instructions": {
+ "OperatorType": "Set",
+ "Ops": [
+ {
+ "Type": "SysVarIgnore",
+ "Name": "innodb_strict_mode",
+ "Expr": "0"
+ }
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SingleRow"
+ }
+ ]
+ }
+ }
+ },
+ {
+ "comment": "set plan building with string literal",
+ "query": "set @@innodb_tmpdir = 'OFF'",
+ "plan": {
+ "QueryType": "SET",
+ "Original": "set @@innodb_tmpdir = 'OFF'",
+ "Instructions": {
+ "OperatorType": "Set",
+ "Ops": [
+ {
+ "Type": "SysVarIgnore",
+ "Name": "innodb_tmpdir",
+ "Expr": "'OFF'"
+ }
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SingleRow"
+ }
+ ]
+ }
+ }
+ },
+ {
+ "comment": "set system settings",
+ "query": "set @@ndbinfo_max_bytes = 192",
+ "plan": "VT12001: unsupported: system setting: ndbinfo_max_bytes"
+ },
+ {
+ "comment": "set autocommit",
+ "query": "set autocommit = 1",
+ "plan": {
+ "QueryType": "SET",
+ "Original": "set autocommit = 1",
+ "Instructions": {
+ "OperatorType": "Set",
+ "Ops": [
+ {
+ "Type": "SysVarAware",
+ "Name": "autocommit",
+ "Expr": "INT64(1)"
+ }
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SingleRow"
+ }
+ ]
+ }
+ }
+ },
+ {
+ "comment": "set autocommit false",
+ "query": "set autocommit = 0",
+ "plan": {
+ "QueryType": "SET",
+ "Original": "set autocommit = 0",
+ "Instructions": {
+ "OperatorType": "Set",
+ "Ops": [
+ {
+ "Type": "SysVarAware",
+ "Name": "autocommit",
+ "Expr": "INT64(0)"
+ }
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SingleRow"
+ }
+ ]
+ }
+ }
+ },
+ {
+ "comment": "set autocommit with backticks",
+ "query": "set @@session.`autocommit` = 0",
+ "plan": {
+ "QueryType": "SET",
+ "Original": "set @@session.`autocommit` = 0",
+ "Instructions": {
+ "OperatorType": "Set",
+ "Ops": [
+ {
+ "Type": "SysVarAware",
+ "Name": "autocommit",
+ "Expr": "INT64(0)"
+ }
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SingleRow"
+ }
+ ]
+ }
+ }
+ },
+ {
+ "comment": "more vitess aware settings",
+ "query": "set client_found_rows = off, skip_query_plan_cache = ON, sql_select_limit=20",
+ "plan": {
+ "QueryType": "SET",
+ "Original": "set client_found_rows = off, skip_query_plan_cache = ON, sql_select_limit=20",
+ "Instructions": {
+ "OperatorType": "Set",
+ "Ops": [
+ {
+ "Type": "SysVarAware",
+ "Name": "client_found_rows",
+ "Expr": "INT64(0)"
+ },
+ {
+ "Type": "SysVarAware",
+ "Name": "skip_query_plan_cache",
+ "Expr": "INT64(1)"
+ },
+ {
+ "Type": "SysVarAware",
+ "Name": "sql_select_limit",
+ "Expr": "INT64(20)"
+ }
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SingleRow"
+ }
+ ]
+ }
+ }
+ },
+ {
+ "comment": "set autocommit to default",
+ "query": "set @@autocommit = default",
+ "plan": {
+ "QueryType": "SET",
+ "Original": "set @@autocommit = default",
+ "Instructions": {
+ "OperatorType": "Set",
+ "Ops": [
+ {
+ "Type": "SysVarAware",
+ "Name": "autocommit",
+ "Expr": "INT64(1)"
+ }
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SingleRow"
+ }
+ ]
+ }
+ }
+ },
+ {
+ "comment": "set global autocommit to default",
+ "query": "set global autocommit = off",
+ "plan": {
+ "QueryType": "SET",
+ "Original": "set global autocommit = off",
+ "Instructions": {
+ "OperatorType": "Set",
+ "Ops": [
+ {
+ "Type": "SysVarCheckAndIgnore",
+ "Name": "autocommit",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": {},
+ "Expr": "0"
+ }
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SingleRow"
+ }
+ ]
+ }
+ }
+ },
+ {
+ "comment": "change read only variable",
+ "query": "set socket = ''",
+ "plan": "VT03010: variable 'socket' is a read only variable"
+ },
+ {
+ "comment": "set transaction read only",
+ "query": "set session transaction read only",
+ "plan": {
+ "QueryType": "SET",
+ "Original": "set session transaction read only",
+ "Instructions": {
+ "OperatorType": "Set",
+ "Ops": [
+ {
+ "Type": "SysVarAware",
+ "Name": "transaction_read_only",
+ "Expr": "INT64(1)"
+ }
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SingleRow"
+ }
+ ]
+ }
+ }
+ },
+ {
+ "comment": "set transaction isolation level",
+ "query": "set transaction isolation level read committed",
+ "plan": {
+ "QueryType": "SET",
+ "Original": "set transaction isolation level read committed",
+ "Instructions": {
+ "OperatorType": "Set",
+ "Ops": [
+ {
+ "Type": "SysVarSet",
+ "Name": "transaction_isolation",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "Expr": "'READ-COMMITTED'",
+ "SupportSetVar": false
+ }
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SingleRow"
+ }
+ ]
+ }
+ }
+ },
+ {
+ "comment": "set vitess_metadata",
+ "query": "set @@vitess_metadata.app_v1= '1'",
+ "plan": {
+ "QueryType": "SET",
+ "Original": "set @@vitess_metadata.app_v1= '1'",
+ "Instructions": {
+ "OperatorType": "Set",
+ "Ops": [
+ {
+ "Name": "app_v1",
+ "Value": "1"
+ }
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SingleRow"
+ }
+ ]
+ }
+ }
+ }
+]
diff --git a/go/vt/vtgate/planbuilder/testdata/set_cases.txt b/go/vt/vtgate/planbuilder/testdata/set_cases.txt
deleted file mode 100644
index 4c5e77ff5b5..00000000000
--- a/go/vt/vtgate/planbuilder/testdata/set_cases.txt
+++ /dev/null
@@ -1,566 +0,0 @@
-# set single user defined variable
-"set @foo = 42"
-{
- "QueryType": "SET",
- "Original": "set @foo = 42",
- "Instructions": {
- "OperatorType": "Set",
- "Ops": [
- {
- "Type": "UserDefinedVariable",
- "Name": "foo",
- "Expr": "INT64(42)"
- }
- ],
- "Inputs": [
- {
- "OperatorType": "SingleRow"
- }
- ]
- }
-}
-Gen4 plan same as above
-
-# set multi user defined variable
-"set @foo = 42, @bar = @foo"
-{
- "QueryType": "SET",
- "Original": "set @foo = 42, @bar = @foo",
- "Instructions": {
- "OperatorType": "Set",
- "Ops": [
- {
- "Type": "UserDefinedVariable",
- "Name": "foo",
- "Expr": "INT64(42)"
- },
- {
- "Type": "UserDefinedVariable",
- "Name": "bar",
- "Expr": ":__vtudvfoo"
- }
- ],
- "Inputs": [
- {
- "OperatorType": "SingleRow"
- }
- ]
- }
-}
-Gen4 plan same as above
-
-# set multi user defined variable with complex expression
-"set @foo = 42, @bar = @foo + 1"
-{
- "QueryType": "SET",
- "Original": "set @foo = 42, @bar = @foo + 1",
- "Instructions": {
- "OperatorType": "Set",
- "Ops": [
- {
- "Type": "UserDefinedVariable",
- "Name": "foo",
- "Expr": "INT64(42)"
- },
- {
- "Type": "UserDefinedVariable",
- "Name": "bar",
- "Expr": ":__vtudvfoo + INT64(1)"
- }
- ],
- "Inputs": [
- {
- "OperatorType": "SingleRow"
- }
- ]
- }
-}
-Gen4 plan same as above
-
-# set UDV to expression that can't be evaluated at vtgate
-"set @foo = CONCAT('Any','Expression','Is','Valid')"
-{
- "QueryType": "SET",
- "Original": "set @foo = CONCAT('Any','Expression','Is','Valid')",
- "Instructions": {
- "OperatorType": "Set",
- "Ops": [
- {
- "Type": "UserDefinedVariable",
- "Name": "foo",
- "Expr": "[COLUMN 0]"
- }
- ],
- "Inputs": [
- {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AnyShard()",
- "Query": "select CONCAT('Any', 'Expression', 'Is', 'Valid') from dual",
- "SingleShardOnly": true
- }
- ]
- }
-}
-Gen4 plan same as above
-
-# single sysvar cases
-"SET sql_mode = 'STRICT_ALL_TABLES,NO_AUTO_VALUE_ON_ZERO'"
-{
- "QueryType": "SET",
- "Original": "SET sql_mode = 'STRICT_ALL_TABLES,NO_AUTO_VALUE_ON_ZERO'",
- "Instructions": {
- "OperatorType": "Set",
- "Ops": [
- {
- "Type": "SysVarSet",
- "Name": "sql_mode",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "Expr": "'STRICT_ALL_TABLES,NO_AUTO_VALUE_ON_ZERO'",
- "SupportSetVar": true
- }
- ],
- "Inputs": [
- {
- "OperatorType": "SingleRow"
- }
- ]
- }
-}
-Gen4 plan same as above
-
-# multiple sysvar cases
-"SET @@SESSION.sql_mode = CONCAT(CONCAT(@@sql_mode, ',STRICT_ALL_TABLES'), ',NO_AUTO_VALUE_ON_ZERO'), @@SESSION.sql_safe_updates = 0"
-{
- "QueryType": "SET",
- "Original": "SET @@SESSION.sql_mode = CONCAT(CONCAT(@@sql_mode, ',STRICT_ALL_TABLES'), ',NO_AUTO_VALUE_ON_ZERO'), @@SESSION.sql_safe_updates = 0",
- "Instructions": {
- "OperatorType": "Set",
- "Ops": [
- {
- "Type": "SysVarSet",
- "Name": "sql_mode",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "Expr": "CONCAT(CONCAT(@@sql_mode, ',STRICT_ALL_TABLES'), ',NO_AUTO_VALUE_ON_ZERO')",
- "SupportSetVar": true
- },
- {
- "Type": "SysVarSet",
- "Name": "sql_safe_updates",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "Expr": "0",
- "SupportSetVar": true
- }
- ],
- "Inputs": [
- {
- "OperatorType": "SingleRow"
- }
- ]
- }
-}
-Gen4 plan same as above
-
-# autocommit case
-"SET autocommit = 1, autocommit = on, autocommit = 'on', autocommit = @myudv, autocommit = `on`, autocommit = `off`"
-{
- "QueryType": "SET",
- "Original": "SET autocommit = 1, autocommit = on, autocommit = 'on', autocommit = @myudv, autocommit = `on`, autocommit = `off`",
- "Instructions": {
- "OperatorType": "Set",
- "Ops": [
- {
- "Type": "SysVarAware",
- "Name": "autocommit",
- "Expr": "INT64(1)"
- },
- {
- "Type": "SysVarAware",
- "Name": "autocommit",
- "Expr": "INT64(1)"
- },
- {
- "Type": "SysVarAware",
- "Name": "autocommit",
- "Expr": "INT64(1)"
- },
- {
- "Type": "SysVarAware",
- "Name": "autocommit",
- "Expr": ":__vtudvmyudv"
- },
- {
- "Type": "SysVarAware",
- "Name": "autocommit",
- "Expr": "INT64(1)"
- },
- {
- "Type": "SysVarAware",
- "Name": "autocommit",
- "Expr": "INT64(0)"
- }
- ],
- "Inputs": [
- {
- "OperatorType": "SingleRow"
- }
- ]
- }
-}
-Gen4 plan same as above
-
-# set ignore plan
-"set @@default_storage_engine = 'DONOTCHANGEME'"
-{
- "QueryType": "SET",
- "Original": "set @@default_storage_engine = 'DONOTCHANGEME'",
- "Instructions": {
- "OperatorType": "Set",
- "Ops": [
- {
- "Type": "SysVarIgnore",
- "Name": "default_storage_engine",
- "Expr": "'DONOTCHANGEME'"
- }
- ],
- "Inputs": [
- {
- "OperatorType": "SingleRow"
- }
- ]
- }
-}
-Gen4 plan same as above
-
-# set check and ignore plan
-"set @@sql_mode = concat(@@sql_mode, ',NO_AUTO_CREATE_USER')"
-{
- "QueryType": "SET",
- "Original": "set @@sql_mode = concat(@@sql_mode, ',NO_AUTO_CREATE_USER')",
- "Instructions": {
- "OperatorType": "Set",
- "Ops": [
- {
- "Type": "SysVarSet",
- "Name": "sql_mode",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "Expr": "concat(@@sql_mode, ',NO_AUTO_CREATE_USER')",
- "SupportSetVar": true
- }
- ],
- "Inputs": [
- {
- "OperatorType": "SingleRow"
- }
- ]
- }
-}
-Gen4 plan same as above
-
-# set system settings
-"set @@sql_safe_updates = 1"
-{
- "QueryType": "SET",
- "Original": "set @@sql_safe_updates = 1",
- "Instructions": {
- "OperatorType": "Set",
- "Ops": [
- {
- "Type": "SysVarSet",
- "Name": "sql_safe_updates",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "Expr": "1",
- "SupportSetVar": true
- }
- ],
- "Inputs": [
- {
- "OperatorType": "SingleRow"
- }
- ]
- }
-}
-Gen4 plan same as above
-
-# set plan building with ON/OFF enum
-"set @@innodb_strict_mode = OFF"
-{
- "QueryType": "SET",
- "Original": "set @@innodb_strict_mode = OFF",
- "Instructions": {
- "OperatorType": "Set",
- "Ops": [
- {
- "Type": "SysVarIgnore",
- "Name": "innodb_strict_mode",
- "Expr": "0"
- }
- ],
- "Inputs": [
- {
- "OperatorType": "SingleRow"
- }
- ]
- }
-}
-Gen4 plan same as above
-
-# set plan building with string literal
-"set @@innodb_strict_mode = 'OFF'"
-{
- "QueryType": "SET",
- "Original": "set @@innodb_strict_mode = 'OFF'",
- "Instructions": {
- "OperatorType": "Set",
- "Ops": [
- {
- "Type": "SysVarIgnore",
- "Name": "innodb_strict_mode",
- "Expr": "0"
- }
- ],
- "Inputs": [
- {
- "OperatorType": "SingleRow"
- }
- ]
- }
-}
-Gen4 plan same as above
-
-# set plan building with string literal
-"set @@innodb_tmpdir = 'OFF'"
-{
- "QueryType": "SET",
- "Original": "set @@innodb_tmpdir = 'OFF'",
- "Instructions": {
- "OperatorType": "Set",
- "Ops": [
- {
- "Type": "SysVarIgnore",
- "Name": "innodb_tmpdir",
- "Expr": "'OFF'"
- }
- ],
- "Inputs": [
- {
- "OperatorType": "SingleRow"
- }
- ]
- }
-}
-Gen4 plan same as above
-
-# set system settings
-"set @@ndbinfo_max_bytes = 192"
-"ndbinfo_max_bytes: system setting is not supported"
-Gen4 plan same as above
-
-# set autocommit
-"set autocommit = 1"
-{
- "QueryType": "SET",
- "Original": "set autocommit = 1",
- "Instructions": {
- "OperatorType": "Set",
- "Ops": [
- {
- "Type": "SysVarAware",
- "Name": "autocommit",
- "Expr": "INT64(1)"
- }
- ],
- "Inputs": [
- {
- "OperatorType": "SingleRow"
- }
- ]
- }
-}
-Gen4 plan same as above
-
-# set autocommit false
-"set autocommit = 0"
-{
- "QueryType": "SET",
- "Original": "set autocommit = 0",
- "Instructions": {
- "OperatorType": "Set",
- "Ops": [
- {
- "Type": "SysVarAware",
- "Name": "autocommit",
- "Expr": "INT64(0)"
- }
- ],
- "Inputs": [
- {
- "OperatorType": "SingleRow"
- }
- ]
- }
-}
-Gen4 plan same as above
-
-# set autocommit with backticks
-"set @@session.`autocommit` = 0"
-{
- "QueryType": "SET",
- "Original": "set @@session.`autocommit` = 0",
- "Instructions": {
- "OperatorType": "Set",
- "Ops": [
- {
- "Type": "SysVarAware",
- "Name": "autocommit",
- "Expr": "INT64(0)"
- }
- ],
- "Inputs": [
- {
- "OperatorType": "SingleRow"
- }
- ]
- }
-}
-Gen4 plan same as above
-
-# more vitess aware settings
-"set client_found_rows = off, skip_query_plan_cache = ON, sql_select_limit=20"
-{
- "QueryType": "SET",
- "Original": "set client_found_rows = off, skip_query_plan_cache = ON, sql_select_limit=20",
- "Instructions": {
- "OperatorType": "Set",
- "Ops": [
- {
- "Type": "SysVarAware",
- "Name": "client_found_rows",
- "Expr": "INT64(0)"
- },
- {
- "Type": "SysVarAware",
- "Name": "skip_query_plan_cache",
- "Expr": "INT64(1)"
- },
- {
- "Type": "SysVarAware",
- "Name": "sql_select_limit",
- "Expr": "INT64(20)"
- }
- ],
- "Inputs": [
- {
- "OperatorType": "SingleRow"
- }
- ]
- }
-}
-Gen4 plan same as above
-
-# set autocommit to default
-"set @@autocommit = default"
-{
- "QueryType": "SET",
- "Original": "set @@autocommit = default",
- "Instructions": {
- "OperatorType": "Set",
- "Ops": [
- {
- "Type": "SysVarAware",
- "Name": "autocommit",
- "Expr": "INT64(1)"
- }
- ],
- "Inputs": [
- {
- "OperatorType": "SingleRow"
- }
- ]
- }
-}
-Gen4 plan same as above
-
-# set global autocommit to default
-"set global autocommit = off"
-{
- "QueryType": "SET",
- "Original": "set global autocommit = off",
- "Instructions": {
- "OperatorType": "Set",
- "Ops": [
- {
- "Type": "SysVarCheckAndIgnore",
- "Name": "autocommit",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": {},
- "Expr": "0"
- }
- ],
- "Inputs": [
- {
- "OperatorType": "SingleRow"
- }
- ]
- }
-}
-Gen4 plan same as above
-
-# change read only variable
-"set socket = ''"
-"variable 'socket' is a read only variable"
-Gen4 plan same as above
-
-# set transaction
-"set transaction read only"
-{
- "QueryType": "SET",
- "Original": "set transaction read only",
- "Instructions": {
- "OperatorType": "Rows"
- }
-}
-Gen4 plan same as above
-
-# set vitess_metadata
-"set @@vitess_metadata.app_v1= '1'"
-{
- "QueryType": "SET",
- "Original": "set @@vitess_metadata.app_v1= '1'",
- "Instructions": {
- "OperatorType": "Set",
- "Ops": [
- {
- "Name": "app_v1",
- "Value": "1"
- }
- ],
- "Inputs": [
- {
- "OperatorType": "SingleRow"
- }
- ]
- }
-}
-Gen4 plan same as above
diff --git a/go/vt/vtgate/planbuilder/testdata/set_sysvar_disabled_cases.json b/go/vt/vtgate/planbuilder/testdata/set_sysvar_disabled_cases.json
new file mode 100644
index 00000000000..dea7b35ff3e
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/set_sysvar_disabled_cases.json
@@ -0,0 +1,40 @@
+[
+ {
+ "comment": "set passthrough disabled - check and ignore plan",
+ "query": "set @@sql_mode = concat(@@sql_mode, ',NO_AUTO_CREATE_USER'), @@sql_safe_updates = 1",
+ "plan": {
+ "QueryType": "SET",
+ "Original": "set @@sql_mode = concat(@@sql_mode, ',NO_AUTO_CREATE_USER'), @@sql_safe_updates = 1",
+ "Instructions": {
+ "OperatorType": "Set",
+ "Ops": [
+ {
+ "Type": "SysVarCheckAndIgnore",
+ "Name": "sql_mode",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": {},
+ "Expr": "concat(@@sql_mode, ',NO_AUTO_CREATE_USER')"
+ },
+ {
+ "Type": "SysVarCheckAndIgnore",
+ "Name": "sql_safe_updates",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": {},
+ "Expr": "1"
+ }
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SingleRow"
+ }
+ ]
+ }
+ }
+ }
+]
\ No newline at end of file
diff --git a/go/vt/vtgate/planbuilder/testdata/set_sysvar_disabled_cases.txt b/go/vt/vtgate/planbuilder/testdata/set_sysvar_disabled_cases.txt
deleted file mode 100644
index 8a561a5ea59..00000000000
--- a/go/vt/vtgate/planbuilder/testdata/set_sysvar_disabled_cases.txt
+++ /dev/null
@@ -1,37 +0,0 @@
-# set passthrough disabled - check and ignore plan
-"set @@sql_mode = concat(@@sql_mode, ',NO_AUTO_CREATE_USER'), @@sql_safe_updates = 1"
-{
- "QueryType": "SET",
- "Original": "set @@sql_mode = concat(@@sql_mode, ',NO_AUTO_CREATE_USER'), @@sql_safe_updates = 1",
- "Instructions": {
- "OperatorType": "Set",
- "Ops": [
- {
- "Type": "SysVarCheckAndIgnore",
- "Name": "sql_mode",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": {},
- "Expr": "concat(@@sql_mode, ',NO_AUTO_CREATE_USER')"
- },
- {
- "Type": "SysVarCheckAndIgnore",
- "Name": "sql_safe_updates",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": {},
- "Expr": "1"
- }
- ],
- "Inputs": [
- {
- "OperatorType": "SingleRow"
- }
- ]
- }
-}
-Gen4 plan same as above
diff --git a/go/vt/vtgate/planbuilder/testdata/show_cases.json b/go/vt/vtgate/planbuilder/testdata/show_cases.json
new file mode 100644
index 00000000000..84bbf3eb3ea
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/show_cases.json
@@ -0,0 +1,824 @@
+[
+ {
+ "comment": "Show table status without database name or conditions.",
+ "query": "SHOW table StatUs",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "SHOW table StatUs",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AnyShard()",
+ "Query": "show table status",
+ "SingleShardOnly": true
+ }
+ }
+ },
+ {
+ "comment": "Show Table status with a keyspace name",
+ "query": "SHOW table StatUs from main",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "SHOW table StatUs from main",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AnyShard()",
+ "Query": "show table status",
+ "SingleShardOnly": true
+ }
+ }
+ },
+ {
+ "comment": "Show Table status with a keyspace name using IN",
+ "query": "SHOW table StatUs In main",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "SHOW table StatUs In main",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AnyShard()",
+ "Query": "show table status",
+ "SingleShardOnly": true
+ }
+ }
+ },
+ {
+ "comment": "Show Table status with a keyspace name with a condition",
+ "query": "SHOW table StatUs In user WHERE `Rows` > 70",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "SHOW table StatUs In user WHERE `Rows` > 70",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetDestination": "AnyShard()",
+ "Query": "show table status where `Rows` > 70",
+ "SingleShardOnly": true
+ }
+ }
+ },
+ {
+ "comment": "Show Table status with a Like condition",
+ "query": "SHOW table StatUs LIKe '%a'",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "SHOW table StatUs LIKe '%a'",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AnyShard()",
+ "Query": "show table status like '%a'",
+ "SingleShardOnly": true
+ }
+ }
+ },
+ {
+ "comment": "show columns from user keyspace",
+ "query": "show full columns from user.user_extra",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show full columns from user.user_extra",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetDestination": "AnyShard()",
+ "Query": "show full columns from user_extra",
+ "SingleShardOnly": true
+ }
+ }
+ },
+ {
+ "comment": "show columns from main keyspace",
+ "query": "show full columns from unsharded",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show full columns from unsharded",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AnyShard()",
+ "Query": "show full columns from unsharded",
+ "SingleShardOnly": true
+ }
+ }
+ },
+ {
+ "comment": "show columns pass as dbname in from clause supersedes the qualifier",
+ "query": "show full columns from user.unsharded from main",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show full columns from user.unsharded from main",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AnyShard()",
+ "Query": "show full columns from unsharded",
+ "SingleShardOnly": true
+ }
+ }
+ },
+ {
+ "comment": "show columns fails as table does not exists in user keyspace",
+ "query": "show full columns from unsharded from user",
+ "plan": "table unsharded not found"
+ },
+ {
+ "comment": "show columns fails as table does not exists in user keyspace",
+ "query": "show full columns from user.unsharded",
+ "plan": "table unsharded not found"
+ },
+ {
+ "comment": "show charset",
+ "query": "show charset",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show charset",
+ "Instructions": {
+ "OperatorType": "Rows",
+ "Fields": {
+ "Charset": "VARCHAR",
+ "Default collation": "VARCHAR",
+ "Description": "VARCHAR",
+ "Maxlen": "INT32"
+ },
+ "RowCount": 2
+ }
+ }
+ },
+ {
+ "comment": "show function",
+ "query": "show function status",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show function status",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AnyShard()",
+ "Query": "show function status",
+ "SingleShardOnly": true
+ }
+ }
+ },
+ {
+ "comment": "show privileges",
+ "query": "show privileges",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show privileges",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AnyShard()",
+ "Query": "show privileges",
+ "SingleShardOnly": true
+ }
+ }
+ },
+ {
+ "comment": "show procedure status",
+ "query": "show procedure status",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show procedure status",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AnyShard()",
+ "Query": "show procedure status",
+ "SingleShardOnly": true
+ }
+ }
+ },
+ {
+ "comment": "show variables",
+ "query": "show variables",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show variables",
+ "Instructions": {
+ "OperatorType": "ReplaceVariables",
+ "Inputs": [
+ {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AnyShard()",
+ "Query": "show variables",
+ "SingleShardOnly": true
+ }
+ ]
+ }
+ }
+ },
+ {
+ "comment": "show global variables",
+ "query": "show global variables",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show global variables",
+ "Instructions": {
+ "OperatorType": "ReplaceVariables",
+ "Inputs": [
+ {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AnyShard()",
+ "Query": "show global variables",
+ "SingleShardOnly": true
+ }
+ ]
+ }
+ }
+ },
+ {
+ "comment": "show databases",
+ "query": "show databases",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show databases",
+ "Instructions": {
+ "OperatorType": "Rows",
+ "Fields": {
+ "Database": "VARCHAR"
+ },
+ "RowCount": 5
+ }
+ }
+ },
+ {
+ "comment": "show create database",
+ "query": "show create database user",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show create database user",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetDestination": "AnyShard()",
+ "Query": "show create database `user`",
+ "SingleShardOnly": true
+ }
+ }
+ },
+ {
+ "comment": "show create database system_schema",
+ "query": "show create database mysql",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show create database mysql",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AnyShard()",
+ "Query": "show create database mysql",
+ "SingleShardOnly": true
+ }
+ }
+ },
+ {
+ "comment": "show create procedure",
+ "query": "show create procedure proc",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show create procedure proc",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AnyShard()",
+ "Query": "show create procedure proc",
+ "SingleShardOnly": true
+ }
+ }
+ },
+ {
+ "comment": "show create procedure from system_schema",
+ "query": "show create procedure information_schema.proc",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show create procedure information_schema.proc",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AnyShard()",
+ "Query": "show create procedure information_schema.proc",
+ "SingleShardOnly": true
+ }
+ }
+ },
+ {
+ "comment": "show create table on table present in sharded but as unsharded is selected it goes to unsharded keyspace",
+ "query": "show create table user_extra",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show create table user_extra",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AnyShard()",
+ "Query": "show create table user_extra",
+ "SingleShardOnly": true
+ }
+ }
+ },
+ {
+ "comment": "show create table with qualifier",
+ "query": "show create table user.user_extra",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show create table user.user_extra",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetDestination": "AnyShard()",
+ "Query": "show create table user_extra",
+ "SingleShardOnly": true
+ }
+ }
+ },
+ {
+ "comment": "show create table with unsharded as default keyspace",
+ "query": "show create table unknown",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show create table unknown",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AnyShard()",
+ "Query": "show create table unknown",
+ "SingleShardOnly": true
+ }
+ }
+ },
+ {
+ "comment": "show create table with table not present with qualifier",
+ "query": "show create table user.unknown",
+ "plan": "table unknown not found"
+ },
+ {
+ "comment": "show create table from system_schema",
+ "query": "show create table information_schema.tables",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show create table information_schema.tables",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AnyShard()",
+ "Query": "show create table information_schema.`tables`",
+ "SingleShardOnly": true
+ }
+ }
+ },
+ {
+ "comment": "show tables",
+ "query": "show tables",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show tables",
+ "Instructions": {
+ "OperatorType": "RenameFields",
+ "Columns": [
+ "Tables_in_main"
+ ],
+ "Indices": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AnyShard()",
+ "Query": "show tables",
+ "SingleShardOnly": true
+ }
+ ]
+ }
+ }
+ },
+ {
+ "comment": "show tables from db",
+ "query": "show tables from user",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show tables from user",
+ "Instructions": {
+ "OperatorType": "RenameFields",
+ "Columns": [
+ "Tables_in_user"
+ ],
+ "Indices": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetDestination": "AnyShard()",
+ "Query": "show tables",
+ "SingleShardOnly": true
+ }
+ ]
+ }
+ }
+ },
+ {
+ "comment": "show tables from system schema",
+ "query": "show tables from performance_schema",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show tables from performance_schema",
+ "Instructions": {
+ "OperatorType": "RenameFields",
+ "Columns": [
+ "Tables_in_performance_schema"
+ ],
+ "Indices": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AnyShard()",
+ "Query": "show tables from performance_schema",
+ "SingleShardOnly": true
+ }
+ ]
+ }
+ }
+ },
+ {
+ "comment": "show migrations with db and like",
+ "query": "show vitess_migrations from user like '%format'",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show vitess_migrations from user like '%format'",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetDestination": "AllShards()",
+ "Query": "SELECT * FROM _vt.schema_migrations where migration_uuid LIKE '%format' OR migration_context LIKE '%format' OR migration_status LIKE '%format'"
+ }
+ }
+ },
+ {
+ "comment": "show migrations with db and where",
+ "query": "show vitess_migrations from user where id = 5",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show vitess_migrations from user where id = 5",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetDestination": "AllShards()",
+ "Query": "SELECT * FROM _vt.schema_migrations where id = 5"
+ }
+ }
+ },
+ {
+ "comment": "show vgtid",
+ "query": "show global vgtid_executed",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show global vgtid_executed",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "vgtid(1) AS global vgtid_executed",
+ "ResultColumns": 2,
+ "Inputs": [
+ {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AllShards()",
+ "Query": "select 'main' as db_name, @@global.gtid_executed as gtid_executed, :__vt_shard as shard",
+ "ShardNameNeeded": true
+ }
+ ]
+ }
+ }
+ },
+ {
+ "comment": "show gtid",
+ "query": "show global gtid_executed from user",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show global gtid_executed from user",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetDestination": "AllShards()",
+ "Query": "select 'user' as db_name, @@global.gtid_executed as gtid_executed, :__vt_shard as shard",
+ "ShardNameNeeded": true
+ }
+ }
+ },
+ {
+ "comment": "show warnings",
+ "query": "show warnings",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show warnings",
+ "Instructions": {
+ "OperatorType": "SHOW WARNINGS"
+ }
+ }
+ },
+ {
+ "comment": "show global status",
+ "query": "show global status",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show global status",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AnyShard()",
+ "Query": "show global status",
+ "SingleShardOnly": true
+ }
+ }
+ },
+ {
+ "comment": "show plugins",
+ "query": "show plugins",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show plugins",
+ "Instructions": {
+ "OperatorType": "Rows",
+ "Fields": {
+ "Library": "VARCHAR",
+ "License": "VARCHAR",
+ "Name": "VARCHAR",
+ "Status": "VARCHAR",
+ "Type": "VARCHAR"
+ },
+ "RowCount": 1
+ }
+ }
+ },
+ {
+ "comment": "show engines",
+ "query": "show engines",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show engines",
+ "Instructions": {
+ "OperatorType": "Rows",
+ "Fields": {
+ "Comment": "VARCHAR",
+ "Engine": "VARCHAR",
+ "Savepoints": "VARCHAR",
+ "Support": "VARCHAR",
+ "Transactions": "VARCHAR",
+ "XA": "VARCHAR"
+ },
+ "RowCount": 1
+ }
+ }
+ },
+ {
+ "comment": "show vitess_shards",
+ "query": "show vitess_shards",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show vitess_shards",
+ "Instructions": {
+ "OperatorType": "ShowExec",
+ "Variant": " vitess_shards"
+ }
+ }
+ },
+ {
+ "comment": "show vitess_tablets",
+ "query": "show vitess_tablets",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show vitess_tablets",
+ "Instructions": {
+ "OperatorType": "ShowExec",
+ "Variant": " vitess_tablets"
+ }
+ }
+ },
+ {
+ "comment": "show vitess_tablets with filter",
+ "query": "show vitess_tablets like '-2%'",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show vitess_tablets like '-2%'",
+ "Instructions": {
+ "OperatorType": "ShowExec",
+ "Variant": " vitess_tablets",
+ "Filter": " like '-2%'"
+ }
+ }
+ },
+ {
+ "comment": "show vschema tables",
+ "query": "show vschema tables",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show vschema tables",
+ "Instructions": {
+ "OperatorType": "Rows",
+ "Fields": {
+ "Tables": "VARCHAR"
+ },
+ "RowCount": 11
+ }
+ }
+ },
+ {
+ "comment": "show vschema vindexes",
+ "query": "show vschema vindexes",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show vschema vindexes",
+ "Instructions": {
+ "OperatorType": "Rows",
+ "Fields": {
+ "Keyspace": "VARCHAR",
+ "Name": "VARCHAR",
+ "Owner": "VARCHAR",
+ "Params": "VARCHAR",
+ "Type": "VARCHAR"
+ }
+ }
+ }
+ },
+ {
+ "comment": "show vschema vindexes on a table",
+ "query": "show vschema vindexes on user.user",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show vschema vindexes on user.user",
+ "Instructions": {
+ "OperatorType": "Rows",
+ "Fields": {
+ "Columns": "VARCHAR",
+ "Name": "VARCHAR",
+ "Owner": "VARCHAR",
+ "Params": "VARCHAR",
+ "Type": "VARCHAR"
+ }
+ }
+ }
+ },
+ {
+ "comment": "show vitess target",
+ "query": "show vitess_target",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show vitess_target",
+ "Instructions": {
+ "OperatorType": "Rows",
+ "Fields": {
+ "Target": "VARCHAR"
+ },
+ "RowCount": 1
+ }
+ }
+ },
+ {
+ "comment": "show vitess_replication_status",
+ "query": "show vitess_replication_status",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show vitess_replication_status",
+ "Instructions": {
+ "OperatorType": "ShowExec",
+ "Variant": " vitess_replication_status"
+ }
+ }
+ },
+ {
+ "comment": "show vitess_replication_status with filter",
+ "query": "show vitess_replication_status like 'x'",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show vitess_replication_status like 'x'",
+ "Instructions": {
+ "OperatorType": "ShowExec",
+ "Variant": " vitess_replication_status",
+ "Filter": " like 'x'"
+ }
+ }
+ },
+ {
+ "comment": "show vitess_metadata variables",
+ "query": "show vitess_metadata variables",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show vitess_metadata variables",
+ "Instructions": {
+ "OperatorType": "ShowExec",
+ "Variant": " vitess_metadata variables"
+ }
+ }
+ },
+ {
+ "comment": "show vitess_metadata variables with filter",
+ "query": "show vitess_metadata variables like 'x'",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show vitess_metadata variables like 'x'",
+ "Instructions": {
+ "OperatorType": "ShowExec",
+ "Variant": " vitess_metadata variables",
+ "Filter": " like 'x'"
+ }
+ }
+ }
+]
diff --git a/go/vt/vtgate/planbuilder/testdata/show_cases.txt b/go/vt/vtgate/planbuilder/testdata/show_cases.txt
deleted file mode 100644
index 3dc88f8ae49..00000000000
--- a/go/vt/vtgate/planbuilder/testdata/show_cases.txt
+++ /dev/null
@@ -1,771 +0,0 @@
-# Show table status without database name or conditions.
-"SHOW table StatUs"
-{
- "QueryType": "SHOW",
- "Original": "SHOW table StatUs",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AnyShard()",
- "Query": "show table status",
- "SingleShardOnly": true
- }
-}
-Gen4 plan same as above
-
-# Show Table status with a keyspace name
-"SHOW table StatUs from main"
-{
- "QueryType": "SHOW",
- "Original": "SHOW table StatUs from main",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AnyShard()",
- "Query": "show table status",
- "SingleShardOnly": true
- }
-}
-Gen4 plan same as above
-
-# Show Table status with a keyspace name using IN
-"SHOW table StatUs In main"
-{
- "QueryType": "SHOW",
- "Original": "SHOW table StatUs In main",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AnyShard()",
- "Query": "show table status",
- "SingleShardOnly": true
- }
-}
-Gen4 plan same as above
-
-# Show Table status with a keyspace name with a condition
-"SHOW table StatUs In user WHERE `Rows` > 70"
-{
- "QueryType": "SHOW",
- "Original": "SHOW table StatUs In user WHERE `Rows` \u003e 70",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetDestination": "AnyShard()",
- "Query": "show table status where `Rows` \u003e 70",
- "SingleShardOnly": true
- }
-}
-Gen4 plan same as above
-
-# Show Table status with a Like condition
-"SHOW table StatUs LIKe '%a'"
-{
- "QueryType": "SHOW",
- "Original": "SHOW table StatUs LIKe '%a'",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AnyShard()",
- "Query": "show table status like '%a'",
- "SingleShardOnly": true
- }
-}
-Gen4 plan same as above
-
-# show columns from user keyspace
-"show full columns from user.user_extra"
-{
- "QueryType": "SHOW",
- "Original": "show full columns from user.user_extra",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetDestination": "AnyShard()",
- "Query": "show full columns from user_extra",
- "SingleShardOnly": true
- }
-}
-Gen4 plan same as above
-
-# show columns from main keyspace
-"show full columns from unsharded"
-{
- "QueryType": "SHOW",
- "Original": "show full columns from unsharded",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AnyShard()",
- "Query": "show full columns from unsharded",
- "SingleShardOnly": true
- }
-}
-Gen4 plan same as above
-
-# show columns pass as dbname in from clause supersedes the qualifier
-"show full columns from user.unsharded from main"
-{
- "QueryType": "SHOW",
- "Original": "show full columns from user.unsharded from main",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AnyShard()",
- "Query": "show full columns from unsharded",
- "SingleShardOnly": true
- }
-}
-Gen4 plan same as above
-
-# show columns fails as table does not exists in user keyspace
-"show full columns from unsharded from user"
-"table unsharded not found"
-Gen4 plan same as above
-
-# show columns fails as table does not exists in user keyspace
-"show full columns from user.unsharded"
-"table unsharded not found"
-Gen4 plan same as above
-
-# show charset
-"show charset"
-{
- "QueryType": "SHOW",
- "Original": "show charset",
- "Instructions": {
- "OperatorType": "Rows"
- }
-}
-Gen4 plan same as above
-
-# show function
-"show function status"
-{
- "QueryType": "SHOW",
- "Original": "show function status",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AnyShard()",
- "Query": "show function status",
- "SingleShardOnly": true
- }
-}
-Gen4 plan same as above
-
-# show privileges
-"show privileges"
-{
- "QueryType": "SHOW",
- "Original": "show privileges",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AnyShard()",
- "Query": "show privileges",
- "SingleShardOnly": true
- }
-}
-Gen4 plan same as above
-
-# show procedure status
-"show procedure status"
-{
- "QueryType": "SHOW",
- "Original": "show procedure status",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AnyShard()",
- "Query": "show procedure status",
- "SingleShardOnly": true
- }
-}
-Gen4 plan same as above
-
-# show variables
-"show variables"
-{
- "QueryType": "SHOW",
- "Original": "show variables",
- "Instructions": {
- "OperatorType": "ReplaceVariables",
- "Inputs": [
- {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AnyShard()",
- "Query": "show variables",
- "SingleShardOnly": true
- }
- ]
- }
-}
-Gen4 plan same as above
-
-# show global variables
-"show global variables"
-{
- "QueryType": "SHOW",
- "Original": "show global variables",
- "Instructions": {
- "OperatorType": "ReplaceVariables",
- "Inputs": [
- {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AnyShard()",
- "Query": "show global variables",
- "SingleShardOnly": true
- }
- ]
- }
-}
-Gen4 plan same as above
-
-# show databases
-"show databases"
-{
- "QueryType": "SHOW",
- "Original": "show databases",
- "Instructions": {
- "OperatorType": "Rows"
- }
-}
-Gen4 plan same as above
-
-# show create database
-"show create database user"
-{
- "QueryType": "SHOW",
- "Original": "show create database user",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetDestination": "AnyShard()",
- "Query": "show create database `user`",
- "SingleShardOnly": true
- }
-}
-Gen4 plan same as above
-
-# show create database system_schema
-"show create database mysql"
-{
- "QueryType": "SHOW",
- "Original": "show create database mysql",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AnyShard()",
- "Query": "show create database mysql",
- "SingleShardOnly": true
- }
-}
-Gen4 plan same as above
-
-# show create procedure
-"show create procedure proc"
-{
- "QueryType": "SHOW",
- "Original": "show create procedure proc",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AnyShard()",
- "Query": "show create procedure proc",
- "SingleShardOnly": true
- }
-}
-Gen4 plan same as above
-
-# show create procedure from system_schema
-"show create procedure information_schema.proc"
-{
- "QueryType": "SHOW",
- "Original": "show create procedure information_schema.proc",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AnyShard()",
- "Query": "show create procedure information_schema.proc",
- "SingleShardOnly": true
- }
-}
-Gen4 plan same as above
-
-# show create table on table present in sharded but as unsharded is selected it goes to unsharded keyspace
-"show create table user_extra"
-{
- "QueryType": "SHOW",
- "Original": "show create table user_extra",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AnyShard()",
- "Query": "show create table user_extra",
- "SingleShardOnly": true
- }
-}
-Gen4 plan same as above
-
-# show create table with qualifier
-"show create table user.user_extra"
-{
- "QueryType": "SHOW",
- "Original": "show create table user.user_extra",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetDestination": "AnyShard()",
- "Query": "show create table user_extra",
- "SingleShardOnly": true
- }
-}
-Gen4 plan same as above
-
-# show create table with unsharded as default keyspace
-"show create table unknown"
-{
- "QueryType": "SHOW",
- "Original": "show create table unknown",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AnyShard()",
- "Query": "show create table unknown",
- "SingleShardOnly": true
- }
-}
-Gen4 plan same as above
-
-# show create table with table not present with qualifier
-"show create table user.unknown"
-"table unknown not found"
-Gen4 plan same as above
-
-# show create table from system_schema
-"show create table information_schema.tables"
-{
- "QueryType": "SHOW",
- "Original": "show create table information_schema.tables",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AnyShard()",
- "Query": "show create table information_schema.`tables`",
- "SingleShardOnly": true
- }
-}
-Gen4 plan same as above
-
-# show tables
-"show tables"
-{
- "QueryType": "SHOW",
- "Original": "show tables",
- "Instructions": {
- "OperatorType": "RenameFields",
- "Columns": [
- "Tables_in_main"
- ],
- "Indices": [
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AnyShard()",
- "Query": "show tables",
- "SingleShardOnly": true
- }
- ]
- }
-}
-Gen4 plan same as above
-
-# show tables from db
-"show tables from user"
-{
- "QueryType": "SHOW",
- "Original": "show tables from user",
- "Instructions": {
- "OperatorType": "RenameFields",
- "Columns": [
- "Tables_in_user"
- ],
- "Indices": [
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetDestination": "AnyShard()",
- "Query": "show tables",
- "SingleShardOnly": true
- }
- ]
- }
-}
-Gen4 plan same as above
-
-# show tables from system schema
-"show tables from performance_schema"
-{
- "QueryType": "SHOW",
- "Original": "show tables from performance_schema",
- "Instructions": {
- "OperatorType": "RenameFields",
- "Columns": [
- "Tables_in_performance_schema"
- ],
- "Indices": [
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AnyShard()",
- "Query": "show tables from performance_schema",
- "SingleShardOnly": true
- }
- ]
- }
-}
-Gen4 plan same as above
-
-# show migrations with db and like
-"show vitess_migrations from user like '%format'"
-{
- "QueryType": "SHOW",
- "Original": "show vitess_migrations from user like '%format'",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetDestination": "AllShards()",
- "Query": "SELECT * FROM _vt.schema_migrations where migration_uuid LIKE '%format' OR migration_context LIKE '%format' OR migration_status LIKE '%format'"
- }
-}
-Gen4 plan same as above
-
-# show migrations with db and where
-"show vitess_migrations from user where id = 5"
-{
- "QueryType": "SHOW",
- "Original": "show vitess_migrations from user where id = 5",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetDestination": "AllShards()",
- "Query": "SELECT * FROM _vt.schema_migrations where id = 5"
- }
-}
-Gen4 plan same as above
-
-# show vgtid
-"show global vgtid_executed"
-{
- "QueryType": "SHOW",
- "Original": "show global vgtid_executed",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "vgtid(1) AS global vgtid_executed",
- "ResultColumns": 2,
- "Inputs": [
- {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AllShards()",
- "Query": "select 'main' as db_name, @@global.gtid_executed as gtid_executed, :__vt_shard as shard",
- "ShardNameNeeded": true
- }
- ]
- }
-}
-Gen4 plan same as above
-
-# show gtid
-"show global gtid_executed from user"
-{
- "QueryType": "SHOW",
- "Original": "show global gtid_executed from user",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetDestination": "AllShards()",
- "Query": "select 'user' as db_name, @@global.gtid_executed as gtid_executed, :__vt_shard as shard",
- "ShardNameNeeded": true
- }
-}
-Gen4 plan same as above
-
-# show warnings
-"show warnings"
-{
- "QueryType": "SHOW",
- "Original": "show warnings",
- "Instructions": {
- "OperatorType": "SHOW WARNINGS"
- }
-}
-Gen4 plan same as above
-
-# show global status
-"show global status"
-{
- "QueryType": "SHOW",
- "Original": "show global status",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AnyShard()",
- "Query": "show global status",
- "SingleShardOnly": true
- }
-}
-Gen4 plan same as above
-
-# show plugins
-"show plugins"
-{
- "QueryType": "SHOW",
- "Original": "show plugins",
- "Instructions": {
- "OperatorType": "Rows"
- }
-}
-Gen4 plan same as above
-
-# show engines
-"show engines"
-{
- "QueryType": "SHOW",
- "Original": "show engines",
- "Instructions": {
- "OperatorType": "Rows"
- }
-}
-Gen4 plan same as above
-
-# show vitess_shards
-"show vitess_shards"
-{
- "QueryType": "SHOW",
- "Original": "show vitess_shards",
- "Instructions": {
- "OperatorType": "ShowExec",
- "Variant": " vitess_shards"
- }
-}
-Gen4 plan same as above
-
-# show vitess_tablets
-"show vitess_tablets"
-{
- "QueryType": "SHOW",
- "Original": "show vitess_tablets",
- "Instructions": {
- "OperatorType": "ShowExec",
- "Variant": " vitess_tablets"
- }
-}
-Gen4 plan same as above
-
-# show vitess_tablets with filter
-"show vitess_tablets like '-2%'"
-{
- "QueryType": "SHOW",
- "Original": "show vitess_tablets like '-2%'",
- "Instructions": {
- "OperatorType": "ShowExec",
- "Variant": " vitess_tablets",
- "Filter": " like '-2%'"
- }
-}
-Gen4 plan same as above
-
-# show vschema tables
-"show vschema tables"
-{
- "QueryType": "SHOW",
- "Original": "show vschema tables",
- "Instructions": {
- "OperatorType": "Rows"
- }
-}
-Gen4 plan same as above
-
-# show vschema vindexes
-"show vschema vindexes"
-{
- "QueryType": "SHOW",
- "Original": "show vschema vindexes",
- "Instructions": {
- "OperatorType": "Rows"
- }
-}
-Gen4 plan same as above
-
-# show vschema vindexes on a table
-"show vschema vindexes on user.user"
-{
- "QueryType": "SHOW",
- "Original": "show vschema vindexes on user.user",
- "Instructions": {
- "OperatorType": "Rows"
- }
-}
-Gen4 plan same as above
-
-# show vitess target
-"show vitess_target"
-{
- "QueryType": "SHOW",
- "Original": "show vitess_target",
- "Instructions": {
- "OperatorType": "Rows"
- }
-}
-Gen4 plan same as above
-
-# show vitess_replication_status
-"show vitess_replication_status"
-{
- "QueryType": "SHOW",
- "Original": "show vitess_replication_status",
- "Instructions": {
- "OperatorType": "ShowExec",
- "Variant": " vitess_replication_status"
- }
-}
-Gen4 plan same as above
-
-# show vitess_replication_status with filter
-"show vitess_replication_status like 'x'"
-{
- "QueryType": "SHOW",
- "Original": "show vitess_replication_status like 'x'",
- "Instructions": {
- "OperatorType": "ShowExec",
- "Variant": " vitess_replication_status",
- "Filter": " like 'x'"
- }
-}
-Gen4 plan same as above
-
-# show vitess_metadata variables
-"show vitess_metadata variables"
-{
- "QueryType": "SHOW",
- "Original": "show vitess_metadata variables",
- "Instructions": {
- "OperatorType": "ShowExec",
- "Variant": " vitess_metadata variables"
- }
-}
-Gen4 plan same as above
-
-# show vitess_metadata variables with filter
-"show vitess_metadata variables like 'x'"
-{
- "QueryType": "SHOW",
- "Original": "show vitess_metadata variables like 'x'",
- "Instructions": {
- "OperatorType": "ShowExec",
- "Variant": " vitess_metadata variables",
- "Filter": " like 'x'"
- }
-}
-Gen4 plan same as above
diff --git a/go/vt/vtgate/planbuilder/testdata/show_cases_no_default_keyspace.json b/go/vt/vtgate/planbuilder/testdata/show_cases_no_default_keyspace.json
new file mode 100644
index 00000000000..72d441c52ea
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/show_cases_no_default_keyspace.json
@@ -0,0 +1,115 @@
+[
+ {
+ "comment": "show columns from user keyspace",
+ "query": "show full columns from user_extra",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show full columns from user_extra",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetDestination": "AnyShard()",
+ "Query": "show full columns from user_extra",
+ "SingleShardOnly": true
+ }
+ }
+ },
+ {
+ "comment": "show columns from routed table",
+ "query": "show full fields from `route1`",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show full fields from `route1`",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetDestination": "AnyShard()",
+ "Query": "show full columns from `user`",
+ "SingleShardOnly": true
+ }
+ }
+ },
+ {
+ "comment": "show variables",
+ "query": "show variables",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show variables",
+ "Instructions": {
+ "OperatorType": "ReplaceVariables",
+ "Inputs": [
+ {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AnyShard()",
+ "Query": "show variables",
+ "SingleShardOnly": true
+ }
+ ]
+ }
+ }
+ },
+ {
+ "comment": "show full columns from system schema",
+ "query": "show full columns from sys.sys_config",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show full columns from sys.sys_config",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AnyShard()",
+ "Query": "show full columns from sys.sys_config",
+ "SingleShardOnly": true
+ }
+ }
+ },
+ {
+ "comment": "show full columns from system schema replacing qualifier",
+ "query": "show full columns from x.sys_config from sys",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show full columns from x.sys_config from sys",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AnyShard()",
+ "Query": "show full columns from sys.sys_config",
+ "SingleShardOnly": true
+ }
+ }
+ },
+ {
+ "comment": "show global status",
+ "query": "show global status",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show global status",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AnyShard()",
+ "Query": "show global status",
+ "SingleShardOnly": true
+ }
+ }
+ }
+]
diff --git a/go/vt/vtgate/planbuilder/testdata/show_cases_no_default_keyspace.txt b/go/vt/vtgate/planbuilder/testdata/show_cases_no_default_keyspace.txt
deleted file mode 100644
index 8bb2addd61d..00000000000
--- a/go/vt/vtgate/planbuilder/testdata/show_cases_no_default_keyspace.txt
+++ /dev/null
@@ -1,112 +0,0 @@
-# show columns from user keyspace
-"show full columns from user_extra"
-{
- "QueryType": "SHOW",
- "Original": "show full columns from user_extra",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetDestination": "AnyShard()",
- "Query": "show full columns from user_extra",
- "SingleShardOnly": true
- }
-}
-Gen4 plan same as above
-
-# show columns from routed table
-"show full fields from `route1`"
-{
- "QueryType": "SHOW",
- "Original": "show full fields from `route1`",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetDestination": "AnyShard()",
- "Query": "show full columns from `user`",
- "SingleShardOnly": true
- }
-}
-Gen4 plan same as above
-
-# show variables
-"show variables"
-{
- "QueryType": "SHOW",
- "Original": "show variables",
- "Instructions": {
- "OperatorType": "ReplaceVariables",
- "Inputs": [
- {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AnyShard()",
- "Query": "show variables",
- "SingleShardOnly": true
- }
- ]
- }
-}
-Gen4 plan same as above
-
-# show full columns from system schema
-"show full columns from sys.sys_config"
-{
- "QueryType": "SHOW",
- "Original": "show full columns from sys.sys_config",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AnyShard()",
- "Query": "show full columns from sys.sys_config",
- "SingleShardOnly": true
- }
-}
-Gen4 plan same as above
-
-# show full columns from system schema replacing qualifier
-"show full columns from x.sys_config from sys"
-{
- "QueryType": "SHOW",
- "Original": "show full columns from x.sys_config from sys",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AnyShard()",
- "Query": "show full columns from sys.sys_config",
- "SingleShardOnly": true
- }
-}
-Gen4 plan same as above
-
-# show global status
-"show global status"
-{
- "QueryType": "SHOW",
- "Original": "show global status",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AnyShard()",
- "Query": "show global status",
- "SingleShardOnly": true
- }
-}
-Gen4 plan same as above
diff --git a/go/vt/vtgate/planbuilder/testdata/stream_cases.json b/go/vt/vtgate/planbuilder/testdata/stream_cases.json
new file mode 100644
index 00000000000..e527d668694
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/stream_cases.json
@@ -0,0 +1,19 @@
+[
+ {
+ "comment": "stream table",
+ "query": "stream * from music",
+ "plan": {
+ "QueryType": "STREAM",
+ "Original": "stream * from music",
+ "Instructions": {
+ "OperatorType": "MStream",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetDestination": "ExactKeyRange(-)",
+ "Table": "music"
+ }
+ }
+ }
+]
diff --git a/go/vt/vtgate/planbuilder/testdata/stream_cases.txt b/go/vt/vtgate/planbuilder/testdata/stream_cases.txt
deleted file mode 100644
index 2d2f1041af4..00000000000
--- a/go/vt/vtgate/planbuilder/testdata/stream_cases.txt
+++ /dev/null
@@ -1,16 +0,0 @@
-#stream table
-"stream * from music"
-{
- "QueryType": "STREAM",
- "Original": "stream * from music",
- "Instructions": {
- "OperatorType": "MStream",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetDestination": "ExactKeyRange(-)",
- "Table": "music"
- }
-}
-Gen4 plan same as above
diff --git a/go/vt/vtgate/planbuilder/testdata/symtab_cases.json b/go/vt/vtgate/planbuilder/testdata/symtab_cases.json
new file mode 100644
index 00000000000..40558770196
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/symtab_cases.json
@@ -0,0 +1,90 @@
+[
+ {
+ "comment": "Tests in this file are for testing symtab functionality\n#\n# Column names need not be qualified if they are predefined in vschema and unambiguous.",
+ "query": "select predef2, predef3 from user join unsharded on predef2 = predef3",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select predef2, predef3 from user join unsharded on predef2 = predef3",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,R:0",
+ "JoinVars": {
+ "predef2": 0
+ },
+ "TableName": "`user`_unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select predef2 from `user` where 1 != 1",
+ "Query": "select predef2 from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select predef3 from unsharded where 1 != 1",
+ "Query": "select predef3 from unsharded where predef3 = :predef2",
+ "Table": "unsharded"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select predef2, predef3 from user join unsharded on predef2 = predef3",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,R:0",
+ "JoinVars": {
+ "predef2": 0
+ },
+ "TableName": "`user`_unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select predef2 from `user` where 1 != 1",
+ "Query": "select predef2 from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select predef3 from unsharded where 1 != 1",
+ "Query": "select predef3 from unsharded where predef3 = :predef2",
+ "Table": "unsharded"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "predef1 is in both user and unsharded. So, it's ambiguous.",
+ "query": "select predef1, predef3 from user join unsharded on predef1 = predef3",
+ "v3-plan": "VT03019: symbol predef1 not found",
+ "gen4-plan": "Column 'predef1' in field list is ambiguous"
+ }
+]
diff --git a/go/vt/vtgate/planbuilder/testdata/symtab_cases.txt b/go/vt/vtgate/planbuilder/testdata/symtab_cases.txt
deleted file mode 100644
index ed273ba6bd8..00000000000
--- a/go/vt/vtgate/planbuilder/testdata/symtab_cases.txt
+++ /dev/null
@@ -1,87 +0,0 @@
-# Tests in this file are for testing symtab functionality
-#
-# Column names need not be qualified if they are predefined in vschema and unambiguous.
-"select predef2, predef3 from user join unsharded on predef2 = predef3"
-{
- "QueryType": "SELECT",
- "Original": "select predef2, predef3 from user join unsharded on predef2 = predef3",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,R:0",
- "JoinVars": {
- "predef2": 0
- },
- "TableName": "`user`_unsharded",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select predef2 from `user` where 1 != 1",
- "Query": "select predef2 from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select predef3 from unsharded where 1 != 1",
- "Query": "select predef3 from unsharded where predef3 = :predef2",
- "Table": "unsharded"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select predef2, predef3 from user join unsharded on predef2 = predef3",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,R:0",
- "JoinVars": {
- "predef2": 0
- },
- "TableName": "`user`_unsharded",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select predef2 from `user` where 1 != 1",
- "Query": "select predef2 from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select predef3 from unsharded where 1 != 1",
- "Query": "select predef3 from unsharded where predef3 = :predef2",
- "Table": "unsharded"
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded",
- "user.user"
- ]
-}
-
-# predef1 is in both user and unsharded. So, it's ambiguous.
-"select predef1, predef3 from user join unsharded on predef1 = predef3"
-"symbol predef1 not found"
-Gen4 error: Column 'predef1' in field list is ambiguous
diff --git a/go/vt/vtgate/planbuilder/testdata/sysschema_default.json b/go/vt/vtgate/planbuilder/testdata/sysschema_default.json
new file mode 100644
index 00000000000..2d12dd815cf
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/sysschema_default.json
@@ -0,0 +1,150 @@
+[
+ {
+ "comment": "max_allowed_packet",
+ "query": "select @@max_allowed_packet from dual",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select @@max_allowed_packet from dual",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select @@max_allowed_packet from dual where 1 != 1",
+ "Query": "select @@max_allowed_packet from dual",
+ "Table": "dual"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select @@max_allowed_packet from dual",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select @@max_allowed_packet from dual where 1 != 1",
+ "Query": "select @@max_allowed_packet from dual",
+ "Table": "dual"
+ },
+ "TablesUsed": [
+ "main.dual"
+ ]
+ }
+ },
+ {
+ "comment": "unqualified table name",
+ "query": "select t.table_schema,t.table_name,c.column_name,c.column_type from tables t join columns c on c.table_schema = t.table_schema and c.table_name = t.table_name where t.table_schema = 'user' and c.table_schema = 'user' order by t.table_schema,t.table_name,c.column_name",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select t.table_schema,t.table_name,c.column_name,c.column_type from tables t join columns c on c.table_schema = t.table_schema and c.table_name = t.table_name where t.table_schema = 'user' and c.table_schema = 'user' order by t.table_schema,t.table_name,c.column_name",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select t.table_schema, t.table_name, c.column_name, c.column_type from information_schema.`tables` as t join information_schema.`columns` as c on c.table_schema = t.table_schema and c.table_name = t.table_name where 1 != 1",
+ "Query": "select t.table_schema, t.table_name, c.column_name, c.column_type from information_schema.`tables` as t join information_schema.`columns` as c on c.table_schema = t.table_schema and c.table_name = t.table_name where t.table_schema = :__vtschemaname and c.table_schema = :__vtschemaname order by t.table_schema asc, t.table_name asc, c.column_name asc",
+ "SysTableTableSchema": "[VARCHAR(\"user\"), VARCHAR(\"user\")]",
+ "Table": "information_schema.`tables`, information_schema.`columns`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select t.table_schema,t.table_name,c.column_name,c.column_type from tables t join columns c on c.table_schema = t.table_schema and c.table_name = t.table_name where t.table_schema = 'user' and c.table_schema = 'user' order by t.table_schema,t.table_name,c.column_name",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select t.table_schema, t.table_name, c.column_name, c.column_type from information_schema.`tables` as t, information_schema.`columns` as c where 1 != 1",
+ "Query": "select t.table_schema, t.table_name, c.column_name, c.column_type from information_schema.`tables` as t, information_schema.`columns` as c where t.table_schema = :__vtschemaname and c.table_schema = :__vtschemaname and c.table_schema = t.table_schema and c.table_name = t.table_name order by t.table_schema asc, t.table_name asc, c.column_name asc",
+ "SysTableTableSchema": "[VARCHAR(\"user\"), VARCHAR(\"user\")]",
+ "Table": "information_schema.`columns`, information_schema.`tables`"
+ }
+ }
+ },
+ {
+ "comment": "system schema query as a subquery",
+ "query": "SELECT (SELECT 1 FROM information_schema.schemata WHERE schema_name='MyDatabase' LIMIT 1);",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT (SELECT 1 FROM information_schema.schemata WHERE schema_name='MyDatabase' LIMIT 1);",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select (select 1 from information_schema.schemata where 1 != 1) from dual where 1 != 1",
+ "Query": "select (select 1 from information_schema.schemata where schema_name = :__vtschemaname limit 1) from dual",
+ "SysTableTableSchema": "[VARCHAR(\"MyDatabase\")]",
+ "Table": "dual"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT (SELECT 1 FROM information_schema.schemata WHERE schema_name='MyDatabase' LIMIT 1);",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select (select 1 from information_schema.schemata where 1 != 1) from dual where 1 != 1",
+ "Query": "select (select 1 from information_schema.schemata where schema_name = :__vtschemaname limit 1) from dual",
+ "SysTableTableSchema": "[VARCHAR(\"MyDatabase\")]",
+ "Table": "dual"
+ },
+ "TablesUsed": [
+ "main.dual"
+ ]
+ }
+ },
+ {
+ "comment": "system schema query as a derived table",
+ "query": "SELECT * from (SELECT 1 FROM information_schema.schemata WHERE schema_name='MyDatabase' LIMIT 1) x",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT * from (SELECT 1 FROM information_schema.schemata WHERE schema_name='MyDatabase' LIMIT 1) x",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from (select 1 from information_schema.schemata where 1 != 1) as x where 1 != 1",
+ "Query": "select * from (select 1 from information_schema.schemata where schema_name = :__vtschemaname limit 1) as x",
+ "SysTableTableSchema": "[VARCHAR(\"MyDatabase\")]",
+ "Table": "information_schema.schemata"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT * from (SELECT 1 FROM information_schema.schemata WHERE schema_name='MyDatabase' LIMIT 1) x",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select x.`1` from (select 1 from information_schema.schemata where 1 != 1) as x where 1 != 1",
+ "Query": "select x.`1` from (select 1 from information_schema.schemata where schema_name = :__vtschemaname limit 1) as x",
+ "SysTableTableSchema": "[VARCHAR(\"MyDatabase\")]",
+ "Table": "information_schema.schemata"
+ }
+ }
+ }
+]
\ No newline at end of file
diff --git a/go/vt/vtgate/planbuilder/testdata/sysschema_default.txt b/go/vt/vtgate/planbuilder/testdata/sysschema_default.txt
deleted file mode 100644
index eab99ec3245..00000000000
--- a/go/vt/vtgate/planbuilder/testdata/sysschema_default.txt
+++ /dev/null
@@ -1,143 +0,0 @@
-# max_allowed_packet
-"select @@max_allowed_packet from dual"
-{
- "QueryType": "SELECT",
- "Original": "select @@max_allowed_packet from dual",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select @@max_allowed_packet from dual where 1 != 1",
- "Query": "select @@max_allowed_packet from dual",
- "Table": "dual"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select @@max_allowed_packet from dual",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select @@max_allowed_packet from dual where 1 != 1",
- "Query": "select @@max_allowed_packet from dual",
- "Table": "dual"
- },
- "TablesUsed": [
- "main.dual"
- ]
-}
-
-# unqualified table name
-"select t.table_schema,t.table_name,c.column_name,c.column_type from tables t join columns c on c.table_schema = t.table_schema and c.table_name = t.table_name where t.table_schema = 'user' and c.table_schema = 'user' order by t.table_schema,t.table_name,c.column_name"
-{
- "QueryType": "SELECT",
- "Original": "select t.table_schema,t.table_name,c.column_name,c.column_type from tables t join columns c on c.table_schema = t.table_schema and c.table_name = t.table_name where t.table_schema = 'user' and c.table_schema = 'user' order by t.table_schema,t.table_name,c.column_name",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select t.table_schema, t.table_name, c.column_name, c.column_type from information_schema.`tables` as t join information_schema.`columns` as c on c.table_schema = t.table_schema and c.table_name = t.table_name where 1 != 1",
- "Query": "select t.table_schema, t.table_name, c.column_name, c.column_type from information_schema.`tables` as t join information_schema.`columns` as c on c.table_schema = t.table_schema and c.table_name = t.table_name where t.table_schema = :__vtschemaname and c.table_schema = :__vtschemaname order by t.table_schema asc, t.table_name asc, c.column_name asc",
- "SysTableTableSchema": "[VARCHAR(\"user\"), VARCHAR(\"user\")]",
- "Table": "information_schema.`tables`, information_schema.`columns`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select t.table_schema,t.table_name,c.column_name,c.column_type from tables t join columns c on c.table_schema = t.table_schema and c.table_name = t.table_name where t.table_schema = 'user' and c.table_schema = 'user' order by t.table_schema,t.table_name,c.column_name",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select t.table_schema, t.table_name, c.column_name, c.column_type from information_schema.`tables` as t, information_schema.`columns` as c where 1 != 1",
- "Query": "select t.table_schema, t.table_name, c.column_name, c.column_type from information_schema.`tables` as t, information_schema.`columns` as c where t.table_schema = :__vtschemaname and c.table_schema = :__vtschemaname and c.table_schema = t.table_schema and c.table_name = t.table_name order by t.table_schema asc, t.table_name asc, c.column_name asc",
- "SysTableTableSchema": "[VARCHAR(\"user\"), VARCHAR(\"user\")]",
- "Table": "information_schema.`columns`, information_schema.`tables`"
- }
-}
-
-# system schema query as a subquery
-"SELECT (SELECT 1 FROM information_schema.schemata WHERE schema_name='MyDatabase' LIMIT 1);"
-{
- "QueryType": "SELECT",
- "Original": "SELECT (SELECT 1 FROM information_schema.schemata WHERE schema_name='MyDatabase' LIMIT 1);",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select (select 1 from information_schema.schemata where 1 != 1) from dual where 1 != 1",
- "Query": "select (select 1 from information_schema.schemata where schema_name = :__vtschemaname limit 1) from dual",
- "SysTableTableSchema": "[VARCHAR(\"MyDatabase\")]",
- "Table": "dual"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT (SELECT 1 FROM information_schema.schemata WHERE schema_name='MyDatabase' LIMIT 1);",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select (select 1 from information_schema.schemata where 1 != 1) from dual where 1 != 1",
- "Query": "select (select 1 from information_schema.schemata where schema_name = :__vtschemaname limit 1) from dual",
- "SysTableTableSchema": "[VARCHAR(\"MyDatabase\")]",
- "Table": "dual"
- },
- "TablesUsed": [
- "main.dual"
- ]
-}
-
-# system schema query as a derived table
-"SELECT * from (SELECT 1 FROM information_schema.schemata WHERE schema_name='MyDatabase' LIMIT 1) x"
-{
- "QueryType": "SELECT",
- "Original": "SELECT * from (SELECT 1 FROM information_schema.schemata WHERE schema_name='MyDatabase' LIMIT 1) x",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select * from (select 1 from information_schema.schemata where 1 != 1) as x where 1 != 1",
- "Query": "select * from (select 1 from information_schema.schemata where schema_name = :__vtschemaname limit 1) as x",
- "SysTableTableSchema": "[VARCHAR(\"MyDatabase\")]",
- "Table": "information_schema.schemata"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT * from (SELECT 1 FROM information_schema.schemata WHERE schema_name='MyDatabase' LIMIT 1) x",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select x.`1` from (select 1 from information_schema.schemata where 1 != 1) as x where 1 != 1",
- "Query": "select x.`1` from (select 1 from information_schema.schemata where schema_name = :__vtschemaname limit 1) as x",
- "SysTableTableSchema": "[VARCHAR(\"MyDatabase\")]",
- "Table": "information_schema.schemata"
- }
-}
diff --git a/go/vt/vtgate/planbuilder/testdata/systemtables_cases.txt b/go/vt/vtgate/planbuilder/testdata/systemtables_cases.txt
deleted file mode 100644
index 094cc96e8f0..00000000000
--- a/go/vt/vtgate/planbuilder/testdata/systemtables_cases.txt
+++ /dev/null
@@ -1,1456 +0,0 @@
-# Single information_schema query
-"select col from information_schema.foo"
-{
- "QueryType": "SELECT",
- "Original": "select col from information_schema.foo",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select col from information_schema.foo where 1 != 1",
- "Query": "select col from information_schema.foo",
- "Table": "information_schema.foo"
- }
-}
-Gen4 plan same as above
-
-# ',' join information_schema
-"select a.id,b.id from information_schema.a as a, information_schema.b as b"
-{
- "QueryType": "SELECT",
- "Original": "select a.id,b.id from information_schema.a as a, information_schema.b as b",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select a.id, b.id from information_schema.a as a, information_schema.b as b where 1 != 1",
- "Query": "select a.id, b.id from information_schema.a as a, information_schema.b as b",
- "Table": "information_schema.a, information_schema.b"
- }
-}
-Gen4 plan same as above
-
-# information schema query that uses table_schema
-"select column_name from information_schema.columns where table_schema = (select schema())"
-{
- "QueryType": "SELECT",
- "Original": "select column_name from information_schema.columns where table_schema = (select schema())",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select column_name from information_schema.`columns` where 1 != 1",
- "Query": "select column_name from information_schema.`columns` where table_schema = schema()",
- "Table": "information_schema.`columns`"
- }
-}
-Gen4 plan same as above
-
-# information schema join
-"select * from information_schema.a join information_schema.b"
-{
- "QueryType": "SELECT",
- "Original": "select * from information_schema.a join information_schema.b",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select * from information_schema.a join information_schema.b where 1 != 1",
- "Query": "select * from information_schema.a join information_schema.b",
- "Table": "information_schema.a, information_schema.b"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from information_schema.a join information_schema.b",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select * from information_schema.a, information_schema.b where 1 != 1",
- "Query": "select * from information_schema.a, information_schema.b",
- "Table": "information_schema.a, information_schema.b"
- }
-}
-
-# access to unqualified column names in information_schema
-"select * from information_schema.a where b=10"
-{
- "QueryType": "SELECT",
- "Original": "select * from information_schema.a where b=10",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select * from information_schema.a where 1 != 1",
- "Query": "select * from information_schema.a where b = 10",
- "Table": "information_schema.a"
- }
-}
-Gen4 plan same as above
-
-# access to qualified column names in information_schema
-"select * from information_schema.a where information_schema.a.b=10"
-{
- "QueryType": "SELECT",
- "Original": "select * from information_schema.a where information_schema.a.b=10",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select * from information_schema.a where 1 != 1",
- "Query": "select * from information_schema.a where information_schema.a.b = 10",
- "Table": "information_schema.a"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from information_schema.a where information_schema.a.b=10",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select * from information_schema.a where 1 != 1",
- "Query": "select * from information_schema.a where a.b = 10",
- "Table": "information_schema.a"
- }
-}
-
-# union of information_schema
-"select * from information_schema.a union select * from information_schema.b"
-{
- "QueryType": "SELECT",
- "Original": "select * from information_schema.a union select * from information_schema.b",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select * from information_schema.a where 1 != 1 union select * from information_schema.b where 1 != 1",
- "Query": "select * from information_schema.a union select * from information_schema.b",
- "Table": "information_schema.a"
- }
-}
-Gen4 plan same as above
-
-# union between information_schema tables that should not be merged
-"select * from information_schema.tables where table_schema = 'user' union select * from information_schema.tables where table_schema = 'main'"
-{
- "QueryType": "SELECT",
- "Original": "select * from information_schema.tables where table_schema = 'user' union select * from information_schema.tables where table_schema = 'main'",
- "Instructions": {
- "OperatorType": "Distinct",
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select * from information_schema.`tables` where 1 != 1",
- "Query": "select * from information_schema.`tables` where table_schema = :__vtschemaname",
- "SysTableTableSchema": "[VARCHAR(\"user\")]",
- "Table": "information_schema.`tables`"
- },
- {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select * from information_schema.`tables` where 1 != 1",
- "Query": "select * from information_schema.`tables` where table_schema = :__vtschemaname",
- "SysTableTableSchema": "[VARCHAR(\"main\")]",
- "Table": "information_schema.`tables`"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from information_schema.tables where table_schema = 'user' union select * from information_schema.tables where table_schema = 'main'",
- "Instructions": {
- "OperatorType": "Distinct",
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select * from information_schema.`tables` where 1 != 1",
- "Query": "select distinct * from information_schema.`tables` where table_schema = :__vtschemaname",
- "SysTableTableSchema": "[VARCHAR(\"user\")]",
- "Table": "information_schema.`tables`"
- },
- {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select * from information_schema.`tables` where 1 != 1",
- "Query": "select distinct * from information_schema.`tables` where table_schema = :__vtschemaname",
- "SysTableTableSchema": "[VARCHAR(\"main\")]",
- "Table": "information_schema.`tables`"
- }
- ]
- }
- ]
- }
-}
-
-# Select from information schema query with two tables that route should be merged
-"SELECT DELETE_RULE, UPDATE_RULE FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE AS KCU INNER JOIN INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS AS RC ON KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME WHERE KCU.TABLE_SCHEMA = 'test' AND KCU.TABLE_NAME = 'data_type_table' AND KCU.COLUMN_NAME = 'id' AND KCU.REFERENCED_TABLE_SCHEMA = 'test' AND KCU.CONSTRAINT_NAME = 'data_type_table_id_fkey' ORDER BY KCU.CONSTRAINT_NAME, KCU.COLUMN_NAME"
-{
- "QueryType": "SELECT",
- "Original": "SELECT DELETE_RULE, UPDATE_RULE FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE AS KCU INNER JOIN INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS AS RC ON KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME WHERE KCU.TABLE_SCHEMA = 'test' AND KCU.TABLE_NAME = 'data_type_table' AND KCU.COLUMN_NAME = 'id' AND KCU.REFERENCED_TABLE_SCHEMA = 'test' AND KCU.CONSTRAINT_NAME = 'data_type_table_id_fkey' ORDER BY KCU.CONSTRAINT_NAME, KCU.COLUMN_NAME",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select DELETE_RULE, UPDATE_RULE from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as KCU join INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS as RC on KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME where 1 != 1",
- "Query": "select DELETE_RULE, UPDATE_RULE from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as KCU join INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS as RC on KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME where KCU.TABLE_SCHEMA = :__vtschemaname and KCU.TABLE_NAME = :KCU_TABLE_NAME and KCU.COLUMN_NAME = 'id' and KCU.REFERENCED_TABLE_SCHEMA = 'test' and KCU.CONSTRAINT_NAME = 'data_type_table_id_fkey' order by KCU.CONSTRAINT_NAME asc, KCU.COLUMN_NAME asc",
- "SysTableTableName": "[KCU_TABLE_NAME:VARCHAR(\"data_type_table\")]",
- "SysTableTableSchema": "[VARCHAR(\"test\")]",
- "Table": "INFORMATION_SCHEMA.KEY_COLUMN_USAGE, INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT DELETE_RULE, UPDATE_RULE FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE AS KCU INNER JOIN INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS AS RC ON KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME WHERE KCU.TABLE_SCHEMA = 'test' AND KCU.TABLE_NAME = 'data_type_table' AND KCU.COLUMN_NAME = 'id' AND KCU.REFERENCED_TABLE_SCHEMA = 'test' AND KCU.CONSTRAINT_NAME = 'data_type_table_id_fkey' ORDER BY KCU.CONSTRAINT_NAME, KCU.COLUMN_NAME",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select DELETE_RULE, UPDATE_RULE from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as KCU, INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS as RC where 1 != 1",
- "Query": "select DELETE_RULE, UPDATE_RULE from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as KCU, INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS as RC where KCU.TABLE_SCHEMA = :__vtschemaname and KCU.TABLE_NAME = :KCU_TABLE_NAME and KCU.COLUMN_NAME = 'id' and KCU.REFERENCED_TABLE_SCHEMA = 'test' and KCU.CONSTRAINT_NAME = 'data_type_table_id_fkey' and KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME order by KCU.CONSTRAINT_NAME asc, KCU.COLUMN_NAME asc",
- "SysTableTableName": "[KCU_TABLE_NAME:VARCHAR(\"data_type_table\")]",
- "SysTableTableSchema": "[VARCHAR(\"test\")]",
- "Table": "INFORMATION_SCHEMA.KEY_COLUMN_USAGE, INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS"
- }
-}
-
-# Select from information schema query with three tables such that route for 2 should be merged but not for the last.
-"SELECT KCU.DELETE_RULE, S.UPDATE_RULE FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE AS KCU INNER JOIN INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS AS RC ON KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME, INFORMATION_SCHEMA.K AS S WHERE KCU.TABLE_SCHEMA = 'test' AND KCU.TABLE_NAME = 'data_type_table' AND KCU.TABLE_NAME = 'data_type_table' AND S.TABLE_SCHEMA = 'test' AND S.TABLE_NAME = 'sc' ORDER BY KCU.CONSTRAINT_NAME, KCU.COLUMN_NAME"
-{
- "QueryType": "SELECT",
- "Original": "SELECT KCU.DELETE_RULE, S.UPDATE_RULE FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE AS KCU INNER JOIN INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS AS RC ON KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME, INFORMATION_SCHEMA.K AS S WHERE KCU.TABLE_SCHEMA = 'test' AND KCU.TABLE_NAME = 'data_type_table' AND KCU.TABLE_NAME = 'data_type_table' AND S.TABLE_SCHEMA = 'test' AND S.TABLE_NAME = 'sc' ORDER BY KCU.CONSTRAINT_NAME, KCU.COLUMN_NAME",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,R:0",
- "TableName": "INFORMATION_SCHEMA.KEY_COLUMN_USAGE, INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS_INFORMATION_SCHEMA.K",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select KCU.DELETE_RULE from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as KCU join INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS as RC on KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME where 1 != 1",
- "Query": "select KCU.DELETE_RULE from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as KCU join INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS as RC on KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME where KCU.TABLE_SCHEMA = :__vtschemaname and KCU.TABLE_NAME = :KCU_TABLE_NAME and KCU.TABLE_NAME = :KCU_TABLE_NAME1 order by KCU.CONSTRAINT_NAME asc, KCU.COLUMN_NAME asc",
- "SysTableTableName": "[KCU_TABLE_NAME1:VARCHAR(\"data_type_table\"), KCU_TABLE_NAME:VARCHAR(\"data_type_table\")]",
- "SysTableTableSchema": "[VARCHAR(\"test\")]",
- "Table": "INFORMATION_SCHEMA.KEY_COLUMN_USAGE, INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS"
- },
- {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select S.UPDATE_RULE from INFORMATION_SCHEMA.K as S where 1 != 1",
- "Query": "select S.UPDATE_RULE from INFORMATION_SCHEMA.K as S where S.TABLE_SCHEMA = :__vtschemaname and S.TABLE_NAME = :S_TABLE_NAME",
- "SysTableTableName": "[S_TABLE_NAME:VARCHAR(\"sc\")]",
- "SysTableTableSchema": "[VARCHAR(\"test\")]",
- "Table": "INFORMATION_SCHEMA.K"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT KCU.DELETE_RULE, S.UPDATE_RULE FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE AS KCU INNER JOIN INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS AS RC ON KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME, INFORMATION_SCHEMA.K AS S WHERE KCU.TABLE_SCHEMA = 'test' AND KCU.TABLE_NAME = 'data_type_table' AND KCU.TABLE_NAME = 'data_type_table' AND S.TABLE_SCHEMA = 'test' AND S.TABLE_NAME = 'sc' ORDER BY KCU.CONSTRAINT_NAME, KCU.COLUMN_NAME",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select KCU.DELETE_RULE, S.UPDATE_RULE from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as KCU, INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS as RC, INFORMATION_SCHEMA.K as S where 1 != 1",
- "Query": "select KCU.DELETE_RULE, S.UPDATE_RULE from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as KCU, INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS as RC, INFORMATION_SCHEMA.K as S where S.TABLE_SCHEMA = :__vtschemaname and S.TABLE_NAME = :S_TABLE_NAME and KCU.TABLE_SCHEMA = :__vtschemaname and KCU.TABLE_NAME = :KCU_TABLE_NAME and KCU.TABLE_NAME = :KCU_TABLE_NAME1 and KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME order by KCU.CONSTRAINT_NAME asc, KCU.COLUMN_NAME asc",
- "SysTableTableName": "[KCU_TABLE_NAME1:VARCHAR(\"data_type_table\"), KCU_TABLE_NAME:VARCHAR(\"data_type_table\"), S_TABLE_NAME:VARCHAR(\"sc\")]",
- "SysTableTableSchema": "[VARCHAR(\"test\"), VARCHAR(\"test\")]",
- "Table": "INFORMATION_SCHEMA.K, INFORMATION_SCHEMA.KEY_COLUMN_USAGE, INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS"
- }
-}
-
-#information_schema.routines
-"SELECT routine_name AS name, routine_definition AS definition FROM information_schema.routines WHERE ROUTINE_SCHEMA = ? AND ROUTINE_TYPE = 'PROCEDURE'"
-{
- "QueryType": "SELECT",
- "Original": "SELECT routine_name AS name, routine_definition AS definition FROM information_schema.routines WHERE ROUTINE_SCHEMA = ? AND ROUTINE_TYPE = 'PROCEDURE'",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select routine_name as `name`, routine_definition as definition from information_schema.routines where 1 != 1",
- "Query": "select routine_name as `name`, routine_definition as definition from information_schema.routines where ROUTINE_SCHEMA = :__vtschemaname and ROUTINE_TYPE = 'PROCEDURE'",
- "SysTableTableSchema": "[:v1]",
- "Table": "information_schema.routines"
- }
-}
-Gen4 plan same as above
-
-#information_schema table sizes
-"SELECT SUM(data_length + index_length) as size FROM information_schema.TABLES WHERE table_schema = ?"
-{
- "QueryType": "SELECT",
- "Original": "SELECT SUM(data_length + index_length) as size FROM information_schema.TABLES WHERE table_schema = ?",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select sum(data_length + index_length) as size from information_schema.`TABLES` where 1 != 1",
- "Query": "select sum(data_length + index_length) as size from information_schema.`TABLES` where table_schema = :__vtschemaname",
- "SysTableTableSchema": "[:v1]",
- "Table": "information_schema.`TABLES`"
- }
-}
-Gen4 plan same as above
-
-#information_schema referential contraints
-"SELECT kcu.constraint_name constraint_name, kcu.column_name column_name, kcu.referenced_table_name referenced_table_name, kcu.referenced_column_name referenced_column_name, kcu.ordinal_position ordinal_position, kcu.table_name table_name, rc.delete_rule delete_rule, rc.update_rule update_rule FROM information_schema.key_column_usage AS kcu INNER JOIN information_schema.referential_constraints AS rc ON kcu.constraint_name = rc.constraint_name WHERE kcu.table_schema = ? AND rc.constraint_schema = ? AND kcu.referenced_column_name IS NOT NULL ORDER BY ordinal_position"
-{
- "QueryType": "SELECT",
- "Original": "SELECT kcu.constraint_name constraint_name, kcu.column_name column_name, kcu.referenced_table_name referenced_table_name, kcu.referenced_column_name referenced_column_name, kcu.ordinal_position ordinal_position, kcu.table_name table_name, rc.delete_rule delete_rule, rc.update_rule update_rule FROM information_schema.key_column_usage AS kcu INNER JOIN information_schema.referential_constraints AS rc ON kcu.constraint_name = rc.constraint_name WHERE kcu.table_schema = ? AND rc.constraint_schema = ? AND kcu.referenced_column_name IS NOT NULL ORDER BY ordinal_position",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select kcu.constraint_name as constraint_name, kcu.column_name as column_name, kcu.referenced_table_name as referenced_table_name, kcu.referenced_column_name as referenced_column_name, kcu.ordinal_position as ordinal_position, kcu.table_name as table_name, rc.delete_rule as delete_rule, rc.update_rule as update_rule from information_schema.key_column_usage as kcu join information_schema.referential_constraints as rc on kcu.constraint_name = rc.constraint_name where 1 != 1",
- "Query": "select kcu.constraint_name as constraint_name, kcu.column_name as column_name, kcu.referenced_table_name as referenced_table_name, kcu.referenced_column_name as referenced_column_name, kcu.ordinal_position as ordinal_position, kcu.table_name as table_name, rc.delete_rule as delete_rule, rc.update_rule as update_rule from information_schema.key_column_usage as kcu join information_schema.referential_constraints as rc on kcu.constraint_name = rc.constraint_name where kcu.table_schema = :__vtschemaname and rc.constraint_schema = :__vtschemaname and kcu.referenced_column_name is not null order by ordinal_position asc",
- "SysTableTableSchema": "[:v1, :v2]",
- "Table": "information_schema.key_column_usage, information_schema.referential_constraints"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT kcu.constraint_name constraint_name, kcu.column_name column_name, kcu.referenced_table_name referenced_table_name, kcu.referenced_column_name referenced_column_name, kcu.ordinal_position ordinal_position, kcu.table_name table_name, rc.delete_rule delete_rule, rc.update_rule update_rule FROM information_schema.key_column_usage AS kcu INNER JOIN information_schema.referential_constraints AS rc ON kcu.constraint_name = rc.constraint_name WHERE kcu.table_schema = ? AND rc.constraint_schema = ? AND kcu.referenced_column_name IS NOT NULL ORDER BY ordinal_position",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select kcu.constraint_name as constraint_name, kcu.column_name as column_name, kcu.referenced_table_name as referenced_table_name, kcu.referenced_column_name as referenced_column_name, kcu.ordinal_position as ordinal_position, kcu.table_name as table_name, rc.delete_rule as delete_rule, rc.update_rule as update_rule from information_schema.key_column_usage as kcu, information_schema.referential_constraints as rc where 1 != 1",
- "Query": "select kcu.constraint_name as constraint_name, kcu.column_name as column_name, kcu.referenced_table_name as referenced_table_name, kcu.referenced_column_name as referenced_column_name, kcu.ordinal_position as ordinal_position, kcu.table_name as table_name, rc.delete_rule as delete_rule, rc.update_rule as update_rule from information_schema.key_column_usage as kcu, information_schema.referential_constraints as rc where kcu.table_schema = :__vtschemaname and kcu.referenced_column_name is not null and rc.constraint_schema = :__vtschemaname and kcu.constraint_name = rc.constraint_name order by ordinal_position asc",
- "SysTableTableSchema": "[:v1, :v2]",
- "Table": "information_schema.key_column_usage, information_schema.referential_constraints"
- }
-}
-
-# rails query
-"select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as name, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc join information_schema.key_column_usage as fk using (constraint_schema, constraint_name) where fk.referenced_column_name is not null and fk.table_schema = database() and fk.table_name = ':vtg1' and rc.constraint_schema = database() and rc.table_name = ':vtg1'"
-{
- "QueryType": "SELECT",
- "Original": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as name, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc join information_schema.key_column_usage as fk using (constraint_schema, constraint_name) where fk.referenced_column_name is not null and fk.table_schema = database() and fk.table_name = ':vtg1' and rc.constraint_schema = database() and rc.table_name = ':vtg1'",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as `name`, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc join information_schema.key_column_usage as fk using (constraint_schema, constraint_name) where 1 != 1",
- "Query": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as `name`, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc join information_schema.key_column_usage as fk using (constraint_schema, constraint_name) where fk.referenced_column_name is not null and fk.table_schema = database() and fk.table_name = :fk_table_name and rc.constraint_schema = database() and rc.table_name = :rc_table_name",
- "SysTableTableName": "[fk_table_name:VARCHAR(\":vtg1\"), rc_table_name:VARCHAR(\":vtg1\")]",
- "Table": "information_schema.referential_constraints, information_schema.key_column_usage"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as name, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc join information_schema.key_column_usage as fk using (constraint_schema, constraint_name) where fk.referenced_column_name is not null and fk.table_schema = database() and fk.table_name = ':vtg1' and rc.constraint_schema = database() and rc.table_name = ':vtg1'",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as `name`, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc, information_schema.key_column_usage as fk where 1 != 1",
- "Query": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as `name`, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc, information_schema.key_column_usage as fk where rc.constraint_schema = database() and rc.table_name = :rc_table_name and fk.referenced_column_name is not null and fk.table_schema = database() and fk.table_name = :fk_table_name",
- "SysTableTableName": "[fk_table_name:VARCHAR(\":vtg1\"), rc_table_name:VARCHAR(\":vtg1\")]",
- "Table": "information_schema.key_column_usage, information_schema.referential_constraints"
- }
-}
-
-#rails_query 2
-"SELECT * FROM information_schema.schemata WHERE schema_name = 'user'"
-{
- "QueryType": "SELECT",
- "Original": "SELECT * FROM information_schema.schemata WHERE schema_name = 'user'",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select * from information_schema.schemata where 1 != 1",
- "Query": "select * from information_schema.schemata where schema_name = :__vtschemaname",
- "SysTableTableSchema": "[VARCHAR(\"user\")]",
- "Table": "information_schema.schemata"
- }
-}
-Gen4 plan same as above
-
-#rails_query 3
-"SELECT table_comment FROM information_schema.tables WHERE table_schema = 'schema_name' AND table_name = 'table_name'"
-{
- "QueryType": "SELECT",
- "Original": "SELECT table_comment FROM information_schema.tables WHERE table_schema = 'schema_name' AND table_name = 'table_name'",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select table_comment from information_schema.`tables` where 1 != 1",
- "Query": "select table_comment from information_schema.`tables` where table_schema = :__vtschemaname and table_name = :table_name",
- "SysTableTableName": "[table_name:VARCHAR(\"table_name\")]",
- "SysTableTableSchema": "[VARCHAR(\"schema_name\")]",
- "Table": "information_schema.`tables`"
- }
-}
-Gen4 plan same as above
-
-#rails_query 4
-"SELECT fk.referenced_table_name AS 'to_table', fk.referenced_column_name AS 'primary_key',fk.column_name AS 'column',fk.constraint_name AS 'name',rc.update_rule AS 'on_update',rc.delete_rule AS 'on_delete' FROM information_schema.referential_constraints rc JOIN information_schema.key_column_usage fk USING (constraint_schema, constraint_name) WHERE fk.referenced_column_name IS NOT NULL AND fk.table_schema = 'table_schema' AND fk.table_name = 'table_name' AND rc.constraint_schema = 'table_schema' AND rc.table_name = 'table_name'"
-{
- "QueryType": "SELECT",
- "Original": "SELECT fk.referenced_table_name AS 'to_table', fk.referenced_column_name AS 'primary_key',fk.column_name AS 'column',fk.constraint_name AS 'name',rc.update_rule AS 'on_update',rc.delete_rule AS 'on_delete' FROM information_schema.referential_constraints rc JOIN information_schema.key_column_usage fk USING (constraint_schema, constraint_name) WHERE fk.referenced_column_name IS NOT NULL AND fk.table_schema = 'table_schema' AND fk.table_name = 'table_name' AND rc.constraint_schema = 'table_schema' AND rc.table_name = 'table_name'",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as `name`, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc join information_schema.key_column_usage as fk using (constraint_schema, constraint_name) where 1 != 1",
- "Query": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as `name`, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc join information_schema.key_column_usage as fk using (constraint_schema, constraint_name) where fk.referenced_column_name is not null and fk.table_schema = :__vtschemaname and fk.table_name = :fk_table_name and rc.constraint_schema = :__vtschemaname and rc.table_name = :rc_table_name",
- "SysTableTableName": "[fk_table_name:VARCHAR(\"table_name\"), rc_table_name:VARCHAR(\"table_name\")]",
- "SysTableTableSchema": "[VARCHAR(\"table_schema\"), VARCHAR(\"table_schema\")]",
- "Table": "information_schema.referential_constraints, information_schema.key_column_usage"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT fk.referenced_table_name AS 'to_table', fk.referenced_column_name AS 'primary_key',fk.column_name AS 'column',fk.constraint_name AS 'name',rc.update_rule AS 'on_update',rc.delete_rule AS 'on_delete' FROM information_schema.referential_constraints rc JOIN information_schema.key_column_usage fk USING (constraint_schema, constraint_name) WHERE fk.referenced_column_name IS NOT NULL AND fk.table_schema = 'table_schema' AND fk.table_name = 'table_name' AND rc.constraint_schema = 'table_schema' AND rc.table_name = 'table_name'",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as `name`, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc, information_schema.key_column_usage as fk where 1 != 1",
- "Query": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as `name`, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc, information_schema.key_column_usage as fk where rc.constraint_schema = :__vtschemaname and rc.table_name = :rc_table_name and fk.referenced_column_name is not null and fk.table_schema = :__vtschemaname and fk.table_name = :fk_table_name",
- "SysTableTableName": "[fk_table_name:VARCHAR(\"table_name\"), rc_table_name:VARCHAR(\"table_name\")]",
- "SysTableTableSchema": "[VARCHAR(\"table_schema\"), VARCHAR(\"table_schema\")]",
- "Table": "information_schema.key_column_usage, information_schema.referential_constraints"
- }
-}
-
-#rails_query 5
-"SELECT cc.constraint_name AS 'name', cc.check_clause AS 'expression' FROM information_schema.check_constraints cc JOIN information_schema.table_constraints tc USING (constraint_schema, constraint_name) WHERE tc.table_schema = 'table_schema' AND tc.table_name = 'table_name' AND cc.constraint_schema = 'constraint_schema'"
-{
- "QueryType": "SELECT",
- "Original": "SELECT cc.constraint_name AS 'name', cc.check_clause AS 'expression' FROM information_schema.check_constraints cc JOIN information_schema.table_constraints tc USING (constraint_schema, constraint_name) WHERE tc.table_schema = 'table_schema' AND tc.table_name = 'table_name' AND cc.constraint_schema = 'constraint_schema'",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select cc.constraint_name as `name`, cc.check_clause as expression from information_schema.check_constraints as cc join information_schema.table_constraints as tc using (constraint_schema, constraint_name) where 1 != 1",
- "Query": "select cc.constraint_name as `name`, cc.check_clause as expression from information_schema.check_constraints as cc join information_schema.table_constraints as tc using (constraint_schema, constraint_name) where tc.table_schema = :__vtschemaname and tc.table_name = :tc_table_name and cc.constraint_schema = :__vtschemaname",
- "SysTableTableName": "[tc_table_name:VARCHAR(\"table_name\")]",
- "SysTableTableSchema": "[VARCHAR(\"table_schema\"), VARCHAR(\"constraint_schema\")]",
- "Table": "information_schema.check_constraints, information_schema.table_constraints"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT cc.constraint_name AS 'name', cc.check_clause AS 'expression' FROM information_schema.check_constraints cc JOIN information_schema.table_constraints tc USING (constraint_schema, constraint_name) WHERE tc.table_schema = 'table_schema' AND tc.table_name = 'table_name' AND cc.constraint_schema = 'constraint_schema'",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select cc.constraint_name as `name`, cc.check_clause as expression from information_schema.check_constraints as cc, information_schema.table_constraints as tc where 1 != 1",
- "Query": "select cc.constraint_name as `name`, cc.check_clause as expression from information_schema.check_constraints as cc, information_schema.table_constraints as tc where cc.constraint_schema = :__vtschemaname and tc.table_schema = :__vtschemaname and tc.table_name = :tc_table_name",
- "SysTableTableName": "[tc_table_name:VARCHAR(\"table_name\")]",
- "SysTableTableSchema": "[VARCHAR(\"constraint_schema\"), VARCHAR(\"table_schema\")]",
- "Table": "information_schema.check_constraints, information_schema.table_constraints"
- }
-}
-
-#rails_query 6
-"SELECT column_name FROM information_schema.statistics WHERE index_name = 'PRIMARY' AND table_schema = 'table_schema' AND table_name = 'table_name' ORDER BY seq_in_index"
-{
- "QueryType": "SELECT",
- "Original": "SELECT column_name FROM information_schema.statistics WHERE index_name = 'PRIMARY' AND table_schema = 'table_schema' AND table_name = 'table_name' ORDER BY seq_in_index",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select column_name from information_schema.statistics where 1 != 1",
- "Query": "select column_name from information_schema.statistics where index_name = 'PRIMARY' and table_schema = :__vtschemaname and table_name = :table_name order by seq_in_index asc",
- "SysTableTableName": "[table_name:VARCHAR(\"table_name\")]",
- "SysTableTableSchema": "[VARCHAR(\"table_schema\")]",
- "Table": "information_schema.statistics"
- }
-}
-Gen4 plan same as above
-
-#rails_query 7
-"SELECT generation_expression FROM information_schema.columns WHERE table_schema = 'table_schema' AND table_name = 'table_name' AND column_name = 'column_name'"
-{
- "QueryType": "SELECT",
- "Original": "SELECT generation_expression FROM information_schema.columns WHERE table_schema = 'table_schema' AND table_name = 'table_name' AND column_name = 'column_name'",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select generation_expression from information_schema.`columns` where 1 != 1",
- "Query": "select generation_expression from information_schema.`columns` where table_schema = :__vtschemaname and table_name = :table_name and column_name = 'column_name'",
- "SysTableTableName": "[table_name:VARCHAR(\"table_name\")]",
- "SysTableTableSchema": "[VARCHAR(\"table_schema\")]",
- "Table": "information_schema.`columns`"
- }
-}
-Gen4 plan same as above
-
-#rails_query 8
-"SELECT id FROM information_schema.processlist WHERE info LIKE '% FOR UPDATE'"
-{
- "QueryType": "SELECT",
- "Original": "SELECT id FROM information_schema.processlist WHERE info LIKE '% FOR UPDATE'",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select id from information_schema.`processlist` where 1 != 1",
- "Query": "select id from information_schema.`processlist` where info like '% FOR UPDATE'",
- "Table": "information_schema.`processlist`"
- }
-}
-Gen4 plan same as above
-
-#rails_query 9
-"SELECT table_name FROM (SELECT * FROM information_schema.tables WHERE table_schema = 'table_schema') _subquery"
-{
- "QueryType": "SELECT",
- "Original": "SELECT table_name FROM (SELECT * FROM information_schema.tables WHERE table_schema = 'table_schema') _subquery",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select table_name from (select * from information_schema.`tables` where 1 != 1) as _subquery where 1 != 1",
- "Query": "select table_name from (select * from information_schema.`tables` where table_schema = :__vtschemaname) as _subquery",
- "SysTableTableSchema": "[VARCHAR(\"table_schema\")]",
- "Table": "information_schema.`tables`"
- }
-}
-Gen4 plan same as above
-
-#rails_query 10
-"SELECT table_name FROM (SELECT * FROM information_schema.tables WHERE table_schema = 'table_schema') _subquery WHERE _subquery.table_type = 'table_type' AND _subquery.table_name = 'table_name'"
-{
- "QueryType": "SELECT",
- "Original": "SELECT table_name FROM (SELECT * FROM information_schema.tables WHERE table_schema = 'table_schema') _subquery WHERE _subquery.table_type = 'table_type' AND _subquery.table_name = 'table_name'",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select table_name from (select * from information_schema.`tables` where 1 != 1) as _subquery where 1 != 1",
- "Query": "select table_name from (select * from information_schema.`tables` where table_schema = :__vtschemaname) as _subquery where _subquery.table_type = 'table_type' and _subquery.table_name = :_subquery_table_name",
- "SysTableTableName": "[_subquery_table_name:VARCHAR(\"table_name\")]",
- "SysTableTableSchema": "[VARCHAR(\"table_schema\")]",
- "Table": "information_schema.`tables`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT table_name FROM (SELECT * FROM information_schema.tables WHERE table_schema = 'table_schema') _subquery WHERE _subquery.table_type = 'table_type' AND _subquery.table_name = 'table_name'",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select table_name from (select * from information_schema.`tables` where 1 != 1) as _subquery where 1 != 1",
- "Query": "select table_name from (select * from information_schema.`tables` where table_schema = :__vtschemaname and table_type = 'table_type' and table_name = 'table_name') as _subquery",
- "SysTableTableSchema": "[VARCHAR(\"table_schema\")]",
- "Table": "information_schema.`tables`"
- }
-}
-
-# two predicates specifying the database for the same table work if the database is the same
-"SELECT cc.constraint_name AS 'name' FROM information_schema.check_constraints cc WHERE cc.constraint_schema = 'a' AND cc.table_schema = 'a'"
-{
- "QueryType": "SELECT",
- "Original": "SELECT cc.constraint_name AS 'name' FROM information_schema.check_constraints cc WHERE cc.constraint_schema = 'a' AND cc.table_schema = 'a'",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select cc.constraint_name as `name` from information_schema.check_constraints as cc where 1 != 1",
- "Query": "select cc.constraint_name as `name` from information_schema.check_constraints as cc where cc.constraint_schema = :__vtschemaname and cc.table_schema = :__vtschemaname",
- "SysTableTableSchema": "[VARCHAR(\"a\"), VARCHAR(\"a\")]",
- "Table": "information_schema.check_constraints"
- }
-}
-Gen4 plan same as above
-
-# system schema in where clause of information_schema query
-"SELECT COUNT(*) FROM INFORMATION_SCHEMA.TABLES WHERE table_schema = 'performance_schema' AND table_name = 'foo'"
-{
- "QueryType": "SELECT",
- "Original": "SELECT COUNT(*) FROM INFORMATION_SCHEMA.TABLES WHERE table_schema = 'performance_schema' AND table_name = 'foo'",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select count(*) from INFORMATION_SCHEMA.`TABLES` where 1 != 1",
- "Query": "select count(*) from INFORMATION_SCHEMA.`TABLES` where table_schema = :__vtschemaname and table_name = :table_name",
- "SysTableTableName": "[table_name:VARCHAR(\"foo\")]",
- "SysTableTableSchema": "[VARCHAR(\"performance_schema\")]",
- "Table": "INFORMATION_SCHEMA.`TABLES`"
- }
-}
-Gen4 plan same as above
-
-# subquery of information_schema with itself
-"select * from information_schema.a where id in (select * from information_schema.b)"
-{
- "QueryType": "SELECT",
- "Original": "select * from information_schema.a where id in (select * from information_schema.b)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select * from information_schema.a where 1 != 1",
- "Query": "select * from information_schema.a where id in (select * from information_schema.b)",
- "Table": "information_schema.a"
- }
-}
-Gen4 plan same as above
-
-# query trying to query two different keyspaces at the same time
-"SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'user' AND TABLE_SCHEMA = 'main'"
-{
- "QueryType": "SELECT",
- "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'user' AND TABLE_SCHEMA = 'main'",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select * from INFORMATION_SCHEMA.`TABLES` where 1 != 1",
- "Query": "select * from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname",
- "SysTableTableSchema": "[VARCHAR(\"user\"), VARCHAR(\"main\")]",
- "Table": "INFORMATION_SCHEMA.`TABLES`"
- }
-}
-Gen4 plan same as above
-
-# information_schema query using database() func
-"SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = database()"
-{
- "QueryType": "SELECT",
- "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = database()",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select * from INFORMATION_SCHEMA.`TABLES` where 1 != 1",
- "Query": "select * from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = database()",
- "Table": "INFORMATION_SCHEMA.`TABLES`"
- }
-}
-Gen4 plan same as above
-
-# table_schema predicate the wrong way around
-"SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE 'ks' = TABLE_SCHEMA"
-{
- "QueryType": "SELECT",
- "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE 'ks' = TABLE_SCHEMA",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select * from INFORMATION_SCHEMA.`TABLES` where 1 != 1",
- "Query": "select * from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname",
- "SysTableTableSchema": "[VARCHAR(\"ks\")]",
- "Table": "INFORMATION_SCHEMA.`TABLES`"
- }
-}
-Gen4 plan same as above
-
-# table_name predicate against a routed table
-"SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'ks' AND TABLE_NAME = 'route1'"
-{
- "QueryType": "SELECT",
- "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'ks' AND TABLE_NAME = 'route1'",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select * from INFORMATION_SCHEMA.`TABLES` where 1 != 1",
- "Query": "select * from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname and TABLE_NAME = :TABLE_NAME",
- "SysTableTableName": "[TABLE_NAME:VARCHAR(\"route1\")]",
- "SysTableTableSchema": "[VARCHAR(\"ks\")]",
- "Table": "INFORMATION_SCHEMA.`TABLES`"
- }
-}
-Gen4 plan same as above
-
-# information_schema query with additional predicates
-"SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'ks' and other_column = 42"
-{
- "QueryType": "SELECT",
- "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'ks' and other_column = 42",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select * from INFORMATION_SCHEMA.`TABLES` where 1 != 1",
- "Query": "select * from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname and other_column = 42",
- "SysTableTableSchema": "[VARCHAR(\"ks\")]",
- "Table": "INFORMATION_SCHEMA.`TABLES`"
- }
-}
-Gen4 plan same as above
-
-# able to isolate table_schema value even when hidden inside of ORs
-"SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE (TABLE_SCHEMA = 'ks' and other_column = 42) OR (TABLE_SCHEMA = 'ks' and foobar = 'value')"
-{
- "QueryType": "SELECT",
- "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE (TABLE_SCHEMA = 'ks' and other_column = 42) OR (TABLE_SCHEMA = 'ks' and foobar = 'value')",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select * from INFORMATION_SCHEMA.`TABLES` where 1 != 1",
- "Query": "select * from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname and (other_column = 42 or TABLE_SCHEMA = 'ks') and (other_column = 42 or foobar = 'value')",
- "SysTableTableSchema": "[VARCHAR(\"ks\")]",
- "Table": "INFORMATION_SCHEMA.`TABLES`"
- }
-}
-Gen4 plan same as above
-
-# expand star with information schema
-"select x.table_name from (select a.* from information_schema.key_column_usage a) x"
-{
- "QueryType": "SELECT",
- "Original": "select x.table_name from (select a.* from information_schema.key_column_usage a) x",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select x.table_name from (select a.* from information_schema.key_column_usage as a where 1 != 1) as x where 1 != 1",
- "Query": "select x.table_name from (select a.* from information_schema.key_column_usage as a) as x",
- "Table": "information_schema.key_column_usage"
- }
-}
-Gen4 plan same as above
-
-# expand star with information schema in a derived table
-"select x.table_name from (select a.* from information_schema.key_column_usage a) x join user on x.id = user.id"
-{
- "QueryType": "SELECT",
- "Original": "select x.table_name from (select a.* from information_schema.key_column_usage a) x join user on x.id = user.id",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "JoinVars": {
- "x_id": 1
- },
- "TableName": "information_schema.key_column_usage_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select x.table_name, x.id from (select a.* from information_schema.key_column_usage as a where 1 != 1) as x where 1 != 1",
- "Query": "select x.table_name, x.id from (select a.* from information_schema.key_column_usage as a) as x",
- "Table": "information_schema.key_column_usage"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` where 1 != 1",
- "Query": "select 1 from `user` where `user`.id = :x_id",
- "Table": "`user`",
- "Values": [
- ":x_id"
- ],
- "Vindex": "user_index"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select x.table_name from (select a.* from information_schema.key_column_usage a) x join user on x.id = user.id",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:1",
- "JoinVars": {
- "x_id": 0
- },
- "TableName": "information_schema.key_column_usage_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select x.id, x.table_name from (select a.* from information_schema.key_column_usage as a where 1 != 1) as x where 1 != 1",
- "Query": "select x.id, x.table_name from (select a.* from information_schema.key_column_usage as a) as x",
- "Table": "information_schema.key_column_usage"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` where 1 != 1",
- "Query": "select 1 from `user` where `user`.id = :x_id",
- "Table": "`user`",
- "Values": [
- ":x_id"
- ],
- "Vindex": "user_index"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# join of information_schema queries with select stars exprs
-"select a.*, b.* from information_schema.a a, information_schema.b b"
-{
- "QueryType": "SELECT",
- "Original": "select a.*, b.* from information_schema.a a, information_schema.b b",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select a.*, b.* from information_schema.a as a, information_schema.b as b where 1 != 1",
- "Query": "select a.*, b.* from information_schema.a as a, information_schema.b as b",
- "Table": "information_schema.a, information_schema.b"
- }
-}
-Gen4 plan same as above
-
-# join two routes with SysTableTableName entries in LHS and RHS
-"select a.table_name from (select * from information_schema.key_column_usage a where a.table_name = 'users') a join (select * from information_schema.referential_constraints where table_name = 'users') b"
-{
- "QueryType": "SELECT",
- "Original": "select a.table_name from (select * from information_schema.key_column_usage a where a.table_name = 'users') a join (select * from information_schema.referential_constraints where table_name = 'users') b",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select a.table_name from (select * from information_schema.key_column_usage as a where 1 != 1) as a join (select * from information_schema.referential_constraints where 1 != 1) as b where 1 != 1",
- "Query": "select a.table_name from (select * from information_schema.key_column_usage as a where a.table_name = :a_table_name) as a join (select * from information_schema.referential_constraints where table_name = :table_name) as b",
- "SysTableTableName": "[a_table_name:VARCHAR(\"users\"), table_name:VARCHAR(\"users\")]",
- "Table": "information_schema.key_column_usage, information_schema.referential_constraints"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select a.table_name from (select * from information_schema.key_column_usage a where a.table_name = 'users') a join (select * from information_schema.referential_constraints where table_name = 'users') b",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select a.table_name from (select * from information_schema.key_column_usage as a where 1 != 1) as a, (select * from information_schema.referential_constraints where 1 != 1) as b where 1 != 1",
- "Query": "select a.table_name from (select * from information_schema.key_column_usage as a where a.table_name = :a_table_name) as a, (select * from information_schema.referential_constraints where table_name = :table_name) as b",
- "SysTableTableName": "[a_table_name:VARCHAR(\"users\"), table_name:VARCHAR(\"users\")]",
- "Table": "information_schema.key_column_usage, information_schema.referential_constraints"
- }
-}
-
-"select sum(found) from (select 1 as found from information_schema.`tables` where table_schema = 'music' union all (select 1 as found from information_schema.views where table_schema = 'music' limit 1)) as t"
-"unsupported: cross-shard query with aggregates"
-{
- "QueryType": "SELECT",
- "Original": "select sum(found) from (select 1 as found from information_schema.`tables` where table_schema = 'music' union all (select 1 as found from information_schema.views where table_schema = 'music' limit 1)) as t",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select sum(found) from (select 1 as found from information_schema.`tables` where 1 != 1 union all (select 1 as found from information_schema.views where 1 != 1)) as t where 1 != 1",
- "Query": "select sum(found) from (select 1 as found from information_schema.`tables` where table_schema = :__vtschemaname union all (select 1 as found from information_schema.views where table_schema = :__vtschemaname limit 1)) as t",
- "SysTableTableSchema": "[VARCHAR(\"music\"), VARCHAR(\"music\")]",
- "Table": "information_schema.`tables`"
- }
-}
-
-# union as a derived table
-"select found from (select 1 as found from information_schema.`tables` where table_schema = 'music' union all (select 1 as found from information_schema.views where table_schema = 'music' limit 1)) as t"
-{
- "QueryType": "SELECT",
- "Original": "select found from (select 1 as found from information_schema.`tables` where table_schema = 'music' union all (select 1 as found from information_schema.views where table_schema = 'music' limit 1)) as t",
- "Instructions": {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 as found from information_schema.`tables` where 1 != 1",
- "Query": "select 1 as found from information_schema.`tables` where table_schema = :__vtschemaname",
- "SysTableTableSchema": "[VARCHAR(\"music\")]",
- "Table": "information_schema.`tables`"
- },
- {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 as found from information_schema.views where 1 != 1",
- "Query": "select 1 as found from information_schema.views where table_schema = :__vtschemaname limit 1",
- "SysTableTableSchema": "[VARCHAR(\"music\")]",
- "Table": "information_schema.views"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select found from (select 1 as found from information_schema.`tables` where table_schema = 'music' union all (select 1 as found from information_schema.views where table_schema = 'music' limit 1)) as t",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select found from (select 1 as found from information_schema.`tables` where 1 != 1 union all (select 1 as found from information_schema.views where 1 != 1)) as t where 1 != 1",
- "Query": "select found from (select 1 as found from information_schema.`tables` where table_schema = :__vtschemaname union all (select 1 as found from information_schema.views where table_schema = :__vtschemaname limit 1)) as t",
- "SysTableTableSchema": "[VARCHAR(\"music\"), VARCHAR(\"music\")]",
- "Table": "information_schema.`tables`"
- }
-}
-
-# merge system schema queries as long as they have any same table_schema
-"select 1 as found from information_schema.`tables` where table_schema = 'music' and table_schema = 'Music' union all (select 1 as found from information_schema.views where table_schema = 'music' and table_schema = 'user' limit 1)"
-{
- "QueryType": "SELECT",
- "Original": "select 1 as found from information_schema.`tables` where table_schema = 'music' and table_schema = 'Music' union all (select 1 as found from information_schema.views where table_schema = 'music' and table_schema = 'user' limit 1)",
- "Instructions": {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 as found from information_schema.`tables` where 1 != 1",
- "Query": "select 1 as found from information_schema.`tables` where table_schema = :__vtschemaname",
- "SysTableTableSchema": "[VARCHAR(\"music\"), VARCHAR(\"Music\")]",
- "Table": "information_schema.`tables`"
- },
- {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 as found from information_schema.views where 1 != 1",
- "Query": "select 1 as found from information_schema.views where table_schema = :__vtschemaname limit 1",
- "SysTableTableSchema": "[VARCHAR(\"music\"), VARCHAR(\"user\")]",
- "Table": "information_schema.views"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select 1 as found from information_schema.`tables` where table_schema = 'music' and table_schema = 'Music' union all (select 1 as found from information_schema.views where table_schema = 'music' and table_schema = 'user' limit 1)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 as found from information_schema.`tables` where 1 != 1 union all (select 1 as found from information_schema.views where 1 != 1)",
- "Query": "select 1 as found from information_schema.`tables` where table_schema = :__vtschemaname union all (select 1 as found from information_schema.views where table_schema = :__vtschemaname limit 1)",
- "SysTableTableSchema": "[VARCHAR(\"music\"), VARCHAR(\"Music\"), VARCHAR(\"music\"), VARCHAR(\"user\")]",
- "Table": "information_schema.`tables`"
- }
-}
-
-# merge system schema queries as long as they have any same table_name
-"select 1 as found from information_schema.`tables` where table_schema = 'music' and table_schema = 'Music' union all (select 1 as found from information_schema.views where table_schema = 'music' and table_schema = 'user' limit 1)"
-{
- "QueryType": "SELECT",
- "Original": "select 1 as found from information_schema.`tables` where table_schema = 'music' and table_schema = 'Music' union all (select 1 as found from information_schema.views where table_schema = 'music' and table_schema = 'user' limit 1)",
- "Instructions": {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 as found from information_schema.`tables` where 1 != 1",
- "Query": "select 1 as found from information_schema.`tables` where table_schema = :__vtschemaname",
- "SysTableTableSchema": "[VARCHAR(\"music\"), VARCHAR(\"Music\")]",
- "Table": "information_schema.`tables`"
- },
- {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 as found from information_schema.views where 1 != 1",
- "Query": "select 1 as found from information_schema.views where table_schema = :__vtschemaname limit 1",
- "SysTableTableSchema": "[VARCHAR(\"music\"), VARCHAR(\"user\")]",
- "Table": "information_schema.views"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select 1 as found from information_schema.`tables` where table_schema = 'music' and table_schema = 'Music' union all (select 1 as found from information_schema.views where table_schema = 'music' and table_schema = 'user' limit 1)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 as found from information_schema.`tables` where 1 != 1 union all (select 1 as found from information_schema.views where 1 != 1)",
- "Query": "select 1 as found from information_schema.`tables` where table_schema = :__vtschemaname union all (select 1 as found from information_schema.views where table_schema = :__vtschemaname limit 1)",
- "SysTableTableSchema": "[VARCHAR(\"music\"), VARCHAR(\"Music\"), VARCHAR(\"music\"), VARCHAR(\"user\")]",
- "Table": "information_schema.`tables`"
- }
-}
-
-# merge union subquery with outer query referencing the same system schemas
-"select 1 as found from information_schema.`tables` where table_name = 'music' and table_name = 'Music' and exists (select 1 as found from information_schema.`tables` where table_name = 'music' and table_name = 'Music' union all (select 1 as found from information_schema.views where table_name = 'music' and table_name = 'user' limit 1))"
-{
- "QueryType": "SELECT",
- "Original": "select 1 as found from information_schema.`tables` where table_name = 'music' and table_name = 'Music' and exists (select 1 as found from information_schema.`tables` where table_name = 'music' and table_name = 'Music' union all (select 1 as found from information_schema.views where table_name = 'music' and table_name = 'user' limit 1))",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutExists",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 as found from information_schema.`tables` where 1 != 1",
- "Query": "select 1 as found from information_schema.`tables` where table_name = :table_name2 and table_name = :table_name3",
- "SysTableTableName": "[table_name2:VARCHAR(\"music\"), table_name3:VARCHAR(\"Music\")]",
- "Table": "information_schema.`tables`"
- },
- {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 as found from information_schema.views where 1 != 1",
- "Query": "select 1 as found from information_schema.views where table_name = :table_name4 and table_name = :table_name5 limit 1",
- "SysTableTableName": "[table_name4:VARCHAR(\"music\"), table_name5:VARCHAR(\"user\")]",
- "Table": "information_schema.views"
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 as found from information_schema.`tables` where 1 != 1",
- "Query": "select 1 as found from information_schema.`tables` where table_name = :table_name and table_name = :table_name1 and :__sq_has_values1",
- "SysTableTableName": "[table_name1:VARCHAR(\"Music\"), table_name:VARCHAR(\"music\")]",
- "Table": "information_schema.`tables`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select 1 as found from information_schema.`tables` where table_name = 'music' and table_name = 'Music' and exists (select 1 as found from information_schema.`tables` where table_name = 'music' and table_name = 'Music' union all (select 1 as found from information_schema.views where table_name = 'music' and table_name = 'user' limit 1))",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 as found from information_schema.`tables` where 1 != 1",
- "Query": "select 1 as found from information_schema.`tables` where table_name = :table_name and table_name = :table_name1 and exists (select 1 as found from information_schema.`tables` where table_name = :table_name2 and table_name = :table_name3 union all (select 1 as found from information_schema.views where table_name = :table_name4 and table_name = :table_name5 limit 1))",
- "SysTableTableName": "[table_name1:VARCHAR(\"Music\"), table_name2:VARCHAR(\"music\"), table_name3:VARCHAR(\"Music\"), table_name4:VARCHAR(\"music\"), table_name5:VARCHAR(\"user\"), table_name:VARCHAR(\"music\")]",
- "Table": "information_schema.`tables`"
- }
-}
-
-# merge even one side have schema name in derived table
-"select id from (select id from information_schema.table t where t.schema_name = 'a' union select id from information_schema.columns) dt"
-{
- "QueryType": "SELECT",
- "Original": "select id from (select id from information_schema.table t where t.schema_name = 'a' union select id from information_schema.columns) dt",
- "Instructions": {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "Distinct",
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select id from information_schema.`table` as t where 1 != 1",
- "Query": "select id from information_schema.`table` as t where t.schema_name = :__vtschemaname",
- "SysTableTableSchema": "[VARCHAR(\"a\")]",
- "Table": "information_schema.`table`"
- },
- {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select id from information_schema.`columns` where 1 != 1",
- "Query": "select id from information_schema.`columns`",
- "Table": "information_schema.`columns`"
- }
- ]
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from (select id from information_schema.table t where t.schema_name = 'a' union select id from information_schema.columns) dt",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select id from (select id from information_schema.`table` as t where 1 != 1 union select id from information_schema.`columns` where 1 != 1) as dt where 1 != 1",
- "Query": "select id from (select id from information_schema.`table` as t where t.schema_name = :__vtschemaname union select id from information_schema.`columns`) as dt",
- "SysTableTableSchema": "[VARCHAR(\"a\")]",
- "Table": "information_schema.`table`"
- }
-}
-
-# merge even one side have schema name in subquery
-"select id from information_schema.random t where t.col in (select id from information_schema.table t where t.schema_name = 'a' union select id from information_schema.columns)"
-{
- "QueryType": "SELECT",
- "Original": "select id from information_schema.random t where t.col in (select id from information_schema.table t where t.schema_name = 'a' union select id from information_schema.columns)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Distinct",
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select id from information_schema.`table` as t where 1 != 1",
- "Query": "select id from information_schema.`table` as t where t.schema_name = :__vtschemaname",
- "SysTableTableSchema": "[VARCHAR(\"a\")]",
- "Table": "information_schema.`table`"
- },
- {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select id from information_schema.`columns` where 1 != 1",
- "Query": "select id from information_schema.`columns`",
- "Table": "information_schema.`columns`"
- }
- ]
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select id from information_schema.random as t where 1 != 1",
- "Query": "select id from information_schema.random as t where :__sq_has_values1 = 1 and t.col in ::__sq1",
- "Table": "information_schema.random"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from information_schema.random t where t.col in (select id from information_schema.table t where t.schema_name = 'a' union select id from information_schema.columns)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select id from information_schema.random as t where 1 != 1",
- "Query": "select id from information_schema.random as t where t.col in (select id from information_schema.`table` as t where t.schema_name = :__vtschemaname union select id from information_schema.`columns`)",
- "SysTableTableSchema": "[VARCHAR(\"a\")]",
- "Table": "information_schema.random"
- }
-}
-
-# systable union query in derived table with constraint on outside (star projection)
-"select * from (select * from `information_schema`.`key_column_usage` `kcu` where `kcu`.`table_schema` = 'user' and `kcu`.`table_name` = 'user_extra' union select * from `information_schema`.`key_column_usage` `kcu` where `kcu`.`table_schema` = 'user' and `kcu`.`table_name` = 'music') `kcu` where `constraint_name` = 'primary'"
-"symbol constraint_name not found in table or subquery"
-{
- "QueryType": "SELECT",
- "Original": "select * from (select * from `information_schema`.`key_column_usage` `kcu` where `kcu`.`table_schema` = 'user' and `kcu`.`table_name` = 'user_extra' union select * from `information_schema`.`key_column_usage` `kcu` where `kcu`.`table_schema` = 'user' and `kcu`.`table_name` = 'music') `kcu` where `constraint_name` = 'primary'",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select * from (select * from information_schema.key_column_usage as kcu where 1 != 1 union select * from information_schema.key_column_usage as kcu where 1 != 1) as kcu where 1 != 1",
- "Query": "select * from (select * from information_schema.key_column_usage as kcu where kcu.table_schema = :__vtschemaname and kcu.table_name = :kcu_table_name and constraint_name = 'primary' union select * from information_schema.key_column_usage as kcu where kcu.table_schema = :__vtschemaname and kcu.table_name = :kcu_table_name1 and constraint_name = 'primary') as kcu",
- "SysTableTableName": "[kcu_table_name1:VARCHAR(\"music\"), kcu_table_name:VARCHAR(\"user_extra\")]",
- "SysTableTableSchema": "[VARCHAR(\"user\"), VARCHAR(\"user\")]",
- "Table": "information_schema.key_column_usage"
- }
-}
-
-# table_schema OR predicate
-# It is unsupported because we do not route queries to multiple keyspaces right now
-"SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'ks' OR TABLE_SCHEMA = 'main'"
-{
- "QueryType": "SELECT",
- "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'ks' OR TABLE_SCHEMA = 'main'",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select * from INFORMATION_SCHEMA.`TABLES` where 1 != 1",
- "Query": "select * from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = 'ks' or TABLE_SCHEMA = 'main'",
- "Table": "INFORMATION_SCHEMA.`TABLES`"
- }
-}
-Gen4 plan same as above
diff --git a/go/vt/vtgate/planbuilder/testdata/tpcc_cases.json b/go/vt/vtgate/planbuilder/testdata/tpcc_cases.json
new file mode 100644
index 00000000000..ed28ddf599b
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/tpcc_cases.json
@@ -0,0 +1,1823 @@
+[
+ {
+ "comment": "TPC-C select join customer1 and warehouse1",
+ "query": "SELECT c_discount, c_last, c_credit, w_tax FROM customer1 AS c JOIN warehouse1 AS w ON c_w_id=w_id WHERE w_id = 1 AND c_d_id = 15 AND c_id = 10",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT c_discount, c_last, c_credit, w_tax FROM customer1 AS c JOIN warehouse1 AS w ON c_w_id=w_id WHERE w_id = 1 AND c_d_id = 15 AND c_id = 10",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select c_discount, c_last, c_credit, w_tax from customer1 as c join warehouse1 as w on c_w_id = w_id where 1 != 1",
+ "Query": "select c_discount, c_last, c_credit, w_tax from customer1 as c join warehouse1 as w on c_w_id = w_id where w_id = 1 and c_d_id = 15 and c_id = 10",
+ "Table": "customer1, warehouse1",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "hash"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT c_discount, c_last, c_credit, w_tax FROM customer1 AS c JOIN warehouse1 AS w ON c_w_id=w_id WHERE w_id = 1 AND c_d_id = 15 AND c_id = 10",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select c_discount, c_last, c_credit, w_tax from customer1 as c, warehouse1 as w where 1 != 1",
+ "Query": "select c_discount, c_last, c_credit, w_tax from customer1 as c, warehouse1 as w where c_d_id = 15 and c_id = 10 and w_id = 1 and c_w_id = w_id",
+ "Table": "customer1, warehouse1",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.customer1",
+ "main.warehouse1"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-C select district1 for update",
+ "query": "SELECT d_next_o_id, d_tax FROM district1 WHERE d_w_id = 15 AND d_id = 95 FOR UPDATE",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT d_next_o_id, d_tax FROM district1 WHERE d_w_id = 15 AND d_id = 95 FOR UPDATE",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select d_next_o_id, d_tax from district1 where 1 != 1",
+ "Query": "select d_next_o_id, d_tax from district1 where d_w_id = 15 and d_id = 95 for update",
+ "Table": "district1",
+ "Values": [
+ "INT64(15)"
+ ],
+ "Vindex": "hash"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT d_next_o_id, d_tax FROM district1 WHERE d_w_id = 15 AND d_id = 95 FOR UPDATE",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select d_next_o_id, d_tax from district1 where 1 != 1",
+ "Query": "select d_next_o_id, d_tax from district1 where d_w_id = 15 and d_id = 95 for update",
+ "Table": "district1",
+ "Values": [
+ "INT64(15)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.district1"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-C update district1 unique",
+ "query": "UPDATE district1 SET d_next_o_id = 56 WHERE d_id = 9842 AND d_w_id= 8546",
+ "v3-plan": {
+ "QueryType": "UPDATE",
+ "Original": "UPDATE district1 SET d_next_o_id = 56 WHERE d_id = 9842 AND d_w_id= 8546",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update district1 set d_next_o_id = 56 where d_id = 9842 and d_w_id = 8546",
+ "Table": "district1",
+ "Values": [
+ "INT64(8546)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.district1"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "UPDATE district1 SET d_next_o_id = 56 WHERE d_id = 9842 AND d_w_id= 8546",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update district1 set d_next_o_id = 56 where d_id = 9842 and d_w_id = 8546",
+ "Table": "district1",
+ "Values": [
+ "INT64(8546)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.district1"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-C insert into orders1",
+ "query": "INSERT INTO orders1 (o_id, o_d_id, o_w_id, o_c_id, o_entry_d, o_ol_cnt, o_all_local) VALUES (334983,59896,99,156,NOW(),781038,'hello')",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "INSERT INTO orders1 (o_id, o_d_id, o_w_id, o_c_id, o_entry_d, o_ol_cnt, o_all_local) VALUES (334983,59896,99,156,NOW(),781038,'hello')",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Sharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into orders1(o_id, o_d_id, o_w_id, o_c_id, o_entry_d, o_ol_cnt, o_all_local) values (334983, 59896, :_o_w_id_0, 156, now(), 781038, 'hello')",
+ "TableName": "orders1",
+ "VindexValues": {
+ "hash": "INT64(99)"
+ }
+ },
+ "TablesUsed": [
+ "main.orders1"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-C insert into new_orders1",
+ "query": "INSERT INTO new_orders1 (no_o_id, no_d_id, no_w_id) VALUES (8,9,48)",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "INSERT INTO new_orders1 (no_o_id, no_d_id, no_w_id) VALUES (8,9,48)",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Sharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into new_orders1(no_o_id, no_d_id, no_w_id) values (8, 9, :_no_w_id_0)",
+ "TableName": "new_orders1",
+ "VindexValues": {
+ "hash": "INT64(48)"
+ }
+ },
+ "TablesUsed": [
+ "main.new_orders1"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-C select unique item1",
+ "query": "SELECT i_price, i_name, i_data FROM item1 WHERE i_id = 9654",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT i_price, i_name, i_data FROM item1 WHERE i_id = 9654",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select i_price, i_name, i_data from item1 where 1 != 1",
+ "Query": "select i_price, i_name, i_data from item1 where i_id = 9654",
+ "Table": "item1",
+ "Values": [
+ "INT64(9654)"
+ ],
+ "Vindex": "hash"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT i_price, i_name, i_data FROM item1 WHERE i_id = 9654",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select i_price, i_name, i_data from item1 where 1 != 1",
+ "Query": "select i_price, i_name, i_data from item1 where i_id = 9654",
+ "Table": "item1",
+ "Values": [
+ "INT64(9654)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.item1"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-C select stock1 for update",
+ "query": "SELECT s_quantity, s_data, s_dist_01 s_dist FROM stock1 WHERE s_i_id = 2198 AND s_w_id = 89 FOR UPDATE",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT s_quantity, s_data, s_dist_01 s_dist FROM stock1 WHERE s_i_id = 2198 AND s_w_id = 89 FOR UPDATE",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select s_quantity, s_data, s_dist_01 as s_dist from stock1 where 1 != 1",
+ "Query": "select s_quantity, s_data, s_dist_01 as s_dist from stock1 where s_i_id = 2198 and s_w_id = 89 for update",
+ "Table": "stock1",
+ "Values": [
+ "INT64(89)"
+ ],
+ "Vindex": "hash"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT s_quantity, s_data, s_dist_01 s_dist FROM stock1 WHERE s_i_id = 2198 AND s_w_id = 89 FOR UPDATE",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select s_quantity, s_data, s_dist_01 as s_dist from stock1 where 1 != 1",
+ "Query": "select s_quantity, s_data, s_dist_01 as s_dist from stock1 where s_i_id = 2198 and s_w_id = 89 for update",
+ "Table": "stock1",
+ "Values": [
+ "INT64(89)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.stock1"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-C update stock1",
+ "query": "UPDATE stock1 SET s_quantity = 894 WHERE s_i_id = 156 AND s_w_id= 6",
+ "v3-plan": {
+ "QueryType": "UPDATE",
+ "Original": "UPDATE stock1 SET s_quantity = 894 WHERE s_i_id = 156 AND s_w_id= 6",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update stock1 set s_quantity = 894 where s_i_id = 156 and s_w_id = 6",
+ "Table": "stock1",
+ "Values": [
+ "INT64(6)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.stock1"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "UPDATE stock1 SET s_quantity = 894 WHERE s_i_id = 156 AND s_w_id= 6",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update stock1 set s_quantity = 894 where s_i_id = 156 and s_w_id = 6",
+ "Table": "stock1",
+ "Values": [
+ "INT64(6)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.stock1"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-C insert into order_line1",
+ "query": "INSERT INTO order_line1 (ol_o_id, ol_d_id, ol_w_id, ol_number, ol_i_id, ol_supply_w_id, ol_quantity, ol_amount, ol_dist_info) VALUES (648,36812,3201,4946378,3,7,89,1,'info')",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "INSERT INTO order_line1 (ol_o_id, ol_d_id, ol_w_id, ol_number, ol_i_id, ol_supply_w_id, ol_quantity, ol_amount, ol_dist_info) VALUES (648,36812,3201,4946378,3,7,89,1,'info')",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Sharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into order_line1(ol_o_id, ol_d_id, ol_w_id, ol_number, ol_i_id, ol_supply_w_id, ol_quantity, ol_amount, ol_dist_info) values (648, 36812, :_ol_w_id_0, 4946378, 3, 7, 89, 1, 'info')",
+ "TableName": "order_line1",
+ "VindexValues": {
+ "hash": "INT64(3201)"
+ }
+ },
+ "TablesUsed": [
+ "main.order_line1"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-C update warehouse1 unique",
+ "query": "UPDATE warehouse1 SET w_ytd = w_ytd + 946879 WHERE w_id = 3",
+ "v3-plan": {
+ "QueryType": "UPDATE",
+ "Original": "UPDATE warehouse1 SET w_ytd = w_ytd + 946879 WHERE w_id = 3",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update warehouse1 set w_ytd = w_ytd + 946879 where w_id = 3",
+ "Table": "warehouse1",
+ "Values": [
+ "INT64(3)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.warehouse1"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "UPDATE warehouse1 SET w_ytd = w_ytd + 946879 WHERE w_id = 3",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update warehouse1 set w_ytd = w_ytd + 946879 where w_id = 3",
+ "Table": "warehouse1",
+ "Values": [
+ "INT64(3)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.warehouse1"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-C select warehouse1 unique",
+ "query": "SELECT w_street_1, w_street_2, w_city, w_state, w_zip, w_name FROM warehouse1 WHERE w_id = 998",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT w_street_1, w_street_2, w_city, w_state, w_zip, w_name FROM warehouse1 WHERE w_id = 998",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select w_street_1, w_street_2, w_city, w_state, w_zip, w_name from warehouse1 where 1 != 1",
+ "Query": "select w_street_1, w_street_2, w_city, w_state, w_zip, w_name from warehouse1 where w_id = 998",
+ "Table": "warehouse1",
+ "Values": [
+ "INT64(998)"
+ ],
+ "Vindex": "hash"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT w_street_1, w_street_2, w_city, w_state, w_zip, w_name FROM warehouse1 WHERE w_id = 998",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select w_street_1, w_street_2, w_city, w_state, w_zip, w_name from warehouse1 where 1 != 1",
+ "Query": "select w_street_1, w_street_2, w_city, w_state, w_zip, w_name from warehouse1 where w_id = 998",
+ "Table": "warehouse1",
+ "Values": [
+ "INT64(998)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.warehouse1"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-C update district1 unique",
+ "query": "UPDATE district1 SET d_ytd = d_ytd + 2 WHERE d_w_id = 89 AND d_id= 9",
+ "v3-plan": {
+ "QueryType": "UPDATE",
+ "Original": "UPDATE district1 SET d_ytd = d_ytd + 2 WHERE d_w_id = 89 AND d_id= 9",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update district1 set d_ytd = d_ytd + 2 where d_w_id = 89 and d_id = 9",
+ "Table": "district1",
+ "Values": [
+ "INT64(89)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.district1"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "UPDATE district1 SET d_ytd = d_ytd + 2 WHERE d_w_id = 89 AND d_id= 9",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update district1 set d_ytd = d_ytd + 2 where d_w_id = 89 and d_id = 9",
+ "Table": "district1",
+ "Values": [
+ "INT64(89)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.district1"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-C select district1 unique",
+ "query": "SELECT d_street_1, d_street_2, d_city, d_state, d_zip, d_name FROM district1 WHERE d_w_id = 896 AND d_id = 9",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT d_street_1, d_street_2, d_city, d_state, d_zip, d_name FROM district1 WHERE d_w_id = 896 AND d_id = 9",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select d_street_1, d_street_2, d_city, d_state, d_zip, d_name from district1 where 1 != 1",
+ "Query": "select d_street_1, d_street_2, d_city, d_state, d_zip, d_name from district1 where d_w_id = 896 and d_id = 9",
+ "Table": "district1",
+ "Values": [
+ "INT64(896)"
+ ],
+ "Vindex": "hash"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT d_street_1, d_street_2, d_city, d_state, d_zip, d_name FROM district1 WHERE d_w_id = 896 AND d_id = 9",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select d_street_1, d_street_2, d_city, d_state, d_zip, d_name from district1 where 1 != 1",
+ "Query": "select d_street_1, d_street_2, d_city, d_state, d_zip, d_name from district1 where d_w_id = 896 and d_id = 9",
+ "Table": "district1",
+ "Values": [
+ "INT64(896)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.district1"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-C select aggr from customer1",
+ "query": "SELECT count(c_id) namecnt FROM customer1 WHERE c_w_id = 5 AND c_d_id= 1 AND c_last='last'",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT count(c_id) namecnt FROM customer1 WHERE c_w_id = 5 AND c_d_id= 1 AND c_last='last'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(c_id) as namecnt from customer1 where 1 != 1",
+ "Query": "select count(c_id) as namecnt from customer1 where c_w_id = 5 and c_d_id = 1 and c_last = 'last'",
+ "Table": "customer1",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "hash"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT count(c_id) namecnt FROM customer1 WHERE c_w_id = 5 AND c_d_id= 1 AND c_last='last'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(c_id) as namecnt from customer1 where 1 != 1",
+ "Query": "select count(c_id) as namecnt from customer1 where c_w_id = 5 and c_d_id = 1 and c_last = 'last'",
+ "Table": "customer1",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.customer1"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-C select customer1 order by",
+ "query": "SELECT c_id FROM customer1 WHERE c_w_id = 8 AND c_d_id = 5 AND c_last='item_last' ORDER BY c_first",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT c_id FROM customer1 WHERE c_w_id = 8 AND c_d_id = 5 AND c_last='item_last' ORDER BY c_first",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select c_id from customer1 where 1 != 1",
+ "Query": "select c_id from customer1 where c_w_id = 8 and c_d_id = 5 and c_last = 'item_last' order by c_first asc",
+ "Table": "customer1",
+ "Values": [
+ "INT64(8)"
+ ],
+ "Vindex": "hash"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT c_id FROM customer1 WHERE c_w_id = 8 AND c_d_id = 5 AND c_last='item_last' ORDER BY c_first",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select c_id from customer1 where 1 != 1",
+ "Query": "select c_id from customer1 where c_w_id = 8 and c_d_id = 5 and c_last = 'item_last' order by c_first asc",
+ "Table": "customer1",
+ "Values": [
+ "INT64(8)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.customer1"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-C select for update customer1 unique",
+ "query": "SELECT c_first, c_middle, c_last, c_street_1, c_street_2, c_city, c_state, c_zip, c_phone, c_credit, c_credit_lim, c_discount, c_balance, c_ytd_payment, c_since FROM customer1 WHERE c_w_id = 8965 AND c_d_id = 1 AND c_id = 9 FOR UPDATE",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT c_first, c_middle, c_last, c_street_1, c_street_2, c_city, c_state, c_zip, c_phone, c_credit, c_credit_lim, c_discount, c_balance, c_ytd_payment, c_since FROM customer1 WHERE c_w_id = 8965 AND c_d_id = 1 AND c_id = 9 FOR UPDATE",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select c_first, c_middle, c_last, c_street_1, c_street_2, c_city, c_state, c_zip, c_phone, c_credit, c_credit_lim, c_discount, c_balance, c_ytd_payment, c_since from customer1 where 1 != 1",
+ "Query": "select c_first, c_middle, c_last, c_street_1, c_street_2, c_city, c_state, c_zip, c_phone, c_credit, c_credit_lim, c_discount, c_balance, c_ytd_payment, c_since from customer1 where c_w_id = 8965 and c_d_id = 1 and c_id = 9 for update",
+ "Table": "customer1",
+ "Values": [
+ "INT64(8965)"
+ ],
+ "Vindex": "hash"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT c_first, c_middle, c_last, c_street_1, c_street_2, c_city, c_state, c_zip, c_phone, c_credit, c_credit_lim, c_discount, c_balance, c_ytd_payment, c_since FROM customer1 WHERE c_w_id = 8965 AND c_d_id = 1 AND c_id = 9 FOR UPDATE",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select c_first, c_middle, c_last, c_street_1, c_street_2, c_city, c_state, c_zip, c_phone, c_credit, c_credit_lim, c_discount, c_balance, c_ytd_payment, c_since from customer1 where 1 != 1",
+ "Query": "select c_first, c_middle, c_last, c_street_1, c_street_2, c_city, c_state, c_zip, c_phone, c_credit, c_credit_lim, c_discount, c_balance, c_ytd_payment, c_since from customer1 where c_w_id = 8965 and c_d_id = 1 and c_id = 9 for update",
+ "Table": "customer1",
+ "Values": [
+ "INT64(8965)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.customer1"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-C select customer1 unique",
+ "query": "SELECT c_data FROM customer1 WHERE c_w_id = 32 AND c_d_id=68 AND c_id = 5",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT c_data FROM customer1 WHERE c_w_id = 32 AND c_d_id=68 AND c_id = 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select c_data from customer1 where 1 != 1",
+ "Query": "select c_data from customer1 where c_w_id = 32 and c_d_id = 68 and c_id = 5",
+ "Table": "customer1",
+ "Values": [
+ "INT64(32)"
+ ],
+ "Vindex": "hash"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT c_data FROM customer1 WHERE c_w_id = 32 AND c_d_id=68 AND c_id = 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select c_data from customer1 where 1 != 1",
+ "Query": "select c_data from customer1 where c_w_id = 32 and c_d_id = 68 and c_id = 5",
+ "Table": "customer1",
+ "Values": [
+ "INT64(32)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.customer1"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-C update customer1 unique and float value",
+ "query": "UPDATE customer1 SET c_balance=508.98, c_ytd_payment=48941.980301, c_data='i am data' WHERE c_w_id = 20 AND c_d_id=387 AND c_id=98",
+ "v3-plan": {
+ "QueryType": "UPDATE",
+ "Original": "UPDATE customer1 SET c_balance=508.98, c_ytd_payment=48941.980301, c_data='i am data' WHERE c_w_id = 20 AND c_d_id=387 AND c_id=98",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update customer1 set c_balance = 508.98, c_ytd_payment = 48941.980301, c_data = 'i am data' where c_w_id = 20 and c_d_id = 387 and c_id = 98",
+ "Table": "customer1",
+ "Values": [
+ "INT64(20)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.customer1"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "UPDATE customer1 SET c_balance=508.98, c_ytd_payment=48941.980301, c_data='i am data' WHERE c_w_id = 20 AND c_d_id=387 AND c_id=98",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update customer1 set c_balance = 508.98, c_ytd_payment = 48941.980301, c_data = 'i am data' where c_w_id = 20 and c_d_id = 387 and c_id = 98",
+ "Table": "customer1",
+ "Values": [
+ "INT64(20)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.customer1"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-C update customer1 unique and float value",
+ "query": "UPDATE customer1 SET c_balance=508.98, c_ytd_payment=48941.980301 WHERE c_w_id = 20 AND c_d_id=387 AND c_id=98",
+ "v3-plan": {
+ "QueryType": "UPDATE",
+ "Original": "UPDATE customer1 SET c_balance=508.98, c_ytd_payment=48941.980301 WHERE c_w_id = 20 AND c_d_id=387 AND c_id=98",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update customer1 set c_balance = 508.98, c_ytd_payment = 48941.980301 where c_w_id = 20 and c_d_id = 387 and c_id = 98",
+ "Table": "customer1",
+ "Values": [
+ "INT64(20)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.customer1"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "UPDATE customer1 SET c_balance=508.98, c_ytd_payment=48941.980301 WHERE c_w_id = 20 AND c_d_id=387 AND c_id=98",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update customer1 set c_balance = 508.98, c_ytd_payment = 48941.980301 where c_w_id = 20 and c_d_id = 387 and c_id = 98",
+ "Table": "customer1",
+ "Values": [
+ "INT64(20)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.customer1"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-C insert into history1",
+ "query": "INSERT INTO history1 (h_c_d_id, h_c_w_id, h_c_id, h_d_id, h_w_id, h_date, h_amount, h_data) VALUES (6809887,38748,8746,210,8,NOW(),8907,'data')",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "INSERT INTO history1 (h_c_d_id, h_c_w_id, h_c_id, h_d_id, h_w_id, h_date, h_amount, h_data) VALUES (6809887,38748,8746,210,8,NOW(),8907,'data')",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Sharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into history1(h_c_d_id, h_c_w_id, h_c_id, h_d_id, h_w_id, h_date, h_amount, h_data) values (6809887, 38748, 8746, 210, :_h_w_id_0, now(), 8907, 'data')",
+ "TableName": "history1",
+ "VindexValues": {
+ "hash": "INT64(8)"
+ }
+ },
+ "TablesUsed": [
+ "main.history1"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-C select aggr customer1",
+ "query": "SELECT count(c_id) namecnt FROM customer1 WHERE c_w_id = 870 AND c_d_id= 780 AND c_last='last'",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT count(c_id) namecnt FROM customer1 WHERE c_w_id = 870 AND c_d_id= 780 AND c_last='last'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(c_id) as namecnt from customer1 where 1 != 1",
+ "Query": "select count(c_id) as namecnt from customer1 where c_w_id = 870 and c_d_id = 780 and c_last = 'last'",
+ "Table": "customer1",
+ "Values": [
+ "INT64(870)"
+ ],
+ "Vindex": "hash"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT count(c_id) namecnt FROM customer1 WHERE c_w_id = 870 AND c_d_id= 780 AND c_last='last'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(c_id) as namecnt from customer1 where 1 != 1",
+ "Query": "select count(c_id) as namecnt from customer1 where c_w_id = 870 and c_d_id = 780 and c_last = 'last'",
+ "Table": "customer1",
+ "Values": [
+ "INT64(870)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.customer1"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-C select order by customer1",
+ "query": "SELECT c_balance, c_first, c_middle, c_id FROM customer1 WHERE c_w_id = 840 AND c_d_id= 1 AND c_last='test' ORDER BY c_first",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT c_balance, c_first, c_middle, c_id FROM customer1 WHERE c_w_id = 840 AND c_d_id= 1 AND c_last='test' ORDER BY c_first",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select c_balance, c_first, c_middle, c_id from customer1 where 1 != 1",
+ "Query": "select c_balance, c_first, c_middle, c_id from customer1 where c_w_id = 840 and c_d_id = 1 and c_last = 'test' order by c_first asc",
+ "Table": "customer1",
+ "Values": [
+ "INT64(840)"
+ ],
+ "Vindex": "hash"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT c_balance, c_first, c_middle, c_id FROM customer1 WHERE c_w_id = 840 AND c_d_id= 1 AND c_last='test' ORDER BY c_first",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select c_balance, c_first, c_middle, c_id from customer1 where 1 != 1",
+ "Query": "select c_balance, c_first, c_middle, c_id from customer1 where c_w_id = 840 and c_d_id = 1 and c_last = 'test' order by c_first asc",
+ "Table": "customer1",
+ "Values": [
+ "INT64(840)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.customer1"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-C select unique customer1",
+ "query": "SELECT c_balance, c_first, c_middle, c_last FROM customer1 WHERE c_w_id = 15 AND c_d_id=5169 AND c_id=1",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT c_balance, c_first, c_middle, c_last FROM customer1 WHERE c_w_id = 15 AND c_d_id=5169 AND c_id=1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select c_balance, c_first, c_middle, c_last from customer1 where 1 != 1",
+ "Query": "select c_balance, c_first, c_middle, c_last from customer1 where c_w_id = 15 and c_d_id = 5169 and c_id = 1",
+ "Table": "customer1",
+ "Values": [
+ "INT64(15)"
+ ],
+ "Vindex": "hash"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT c_balance, c_first, c_middle, c_last FROM customer1 WHERE c_w_id = 15 AND c_d_id=5169 AND c_id=1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select c_balance, c_first, c_middle, c_last from customer1 where 1 != 1",
+ "Query": "select c_balance, c_first, c_middle, c_last from customer1 where c_w_id = 15 and c_d_id = 5169 and c_id = 1",
+ "Table": "customer1",
+ "Values": [
+ "INT64(15)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.customer1"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-C select order by orders1",
+ "query": "SELECT o_id, o_carrier_id, o_entry_d FROM orders1 WHERE o_w_id = 9894 AND o_d_id = 3 AND o_c_id = 159 ORDER BY o_id DESC",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT o_id, o_carrier_id, o_entry_d FROM orders1 WHERE o_w_id = 9894 AND o_d_id = 3 AND o_c_id = 159 ORDER BY o_id DESC",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select o_id, o_carrier_id, o_entry_d from orders1 where 1 != 1",
+ "Query": "select o_id, o_carrier_id, o_entry_d from orders1 where o_w_id = 9894 and o_d_id = 3 and o_c_id = 159 order by o_id desc",
+ "Table": "orders1",
+ "Values": [
+ "INT64(9894)"
+ ],
+ "Vindex": "hash"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT o_id, o_carrier_id, o_entry_d FROM orders1 WHERE o_w_id = 9894 AND o_d_id = 3 AND o_c_id = 159 ORDER BY o_id DESC",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select o_id, o_carrier_id, o_entry_d from orders1 where 1 != 1",
+ "Query": "select o_id, o_carrier_id, o_entry_d from orders1 where o_w_id = 9894 and o_d_id = 3 and o_c_id = 159 order by o_id desc",
+ "Table": "orders1",
+ "Values": [
+ "INT64(9894)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.orders1"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-C select order_line1",
+ "query": "SELECT ol_i_id, ol_supply_w_id, ol_quantity, ol_amount, ol_delivery_d FROM order_line1 WHERE ol_w_id = 92 AND ol_d_id = 5 AND ol_o_id = 1",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT ol_i_id, ol_supply_w_id, ol_quantity, ol_amount, ol_delivery_d FROM order_line1 WHERE ol_w_id = 92 AND ol_d_id = 5 AND ol_o_id = 1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select ol_i_id, ol_supply_w_id, ol_quantity, ol_amount, ol_delivery_d from order_line1 where 1 != 1",
+ "Query": "select ol_i_id, ol_supply_w_id, ol_quantity, ol_amount, ol_delivery_d from order_line1 where ol_w_id = 92 and ol_d_id = 5 and ol_o_id = 1",
+ "Table": "order_line1",
+ "Values": [
+ "INT64(92)"
+ ],
+ "Vindex": "hash"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT ol_i_id, ol_supply_w_id, ol_quantity, ol_amount, ol_delivery_d FROM order_line1 WHERE ol_w_id = 92 AND ol_d_id = 5 AND ol_o_id = 1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select ol_i_id, ol_supply_w_id, ol_quantity, ol_amount, ol_delivery_d from order_line1 where 1 != 1",
+ "Query": "select ol_i_id, ol_supply_w_id, ol_quantity, ol_amount, ol_delivery_d from order_line1 where ol_w_id = 92 and ol_d_id = 5 and ol_o_id = 1",
+ "Table": "order_line1",
+ "Values": [
+ "INT64(92)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.order_line1"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-C select for update new_orders1",
+ "query": "SELECT no_o_id FROM new_orders1 WHERE no_d_id = 689 AND no_w_id = 15 ORDER BY no_o_id ASC LIMIT 1 FOR UPDATE",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT no_o_id FROM new_orders1 WHERE no_d_id = 689 AND no_w_id = 15 ORDER BY no_o_id ASC LIMIT 1 FOR UPDATE",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select no_o_id from new_orders1 where 1 != 1",
+ "Query": "select no_o_id from new_orders1 where no_d_id = 689 and no_w_id = 15 order by no_o_id asc limit 1 for update",
+ "Table": "new_orders1",
+ "Values": [
+ "INT64(15)"
+ ],
+ "Vindex": "hash"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT no_o_id FROM new_orders1 WHERE no_d_id = 689 AND no_w_id = 15 ORDER BY no_o_id ASC LIMIT 1 FOR UPDATE",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select no_o_id from new_orders1 where 1 != 1",
+ "Query": "select no_o_id from new_orders1 where no_d_id = 689 and no_w_id = 15 order by no_o_id asc limit 1 for update",
+ "Table": "new_orders1",
+ "Values": [
+ "INT64(15)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.new_orders1"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-C delete new_orders1",
+ "query": "DELETE FROM new_orders1 WHERE no_o_id = 2218 AND no_d_id = 358 AND no_w_id = 98465",
+ "v3-plan": {
+ "QueryType": "DELETE",
+ "Original": "DELETE FROM new_orders1 WHERE no_o_id = 2218 AND no_d_id = 358 AND no_w_id = 98465",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "delete from new_orders1 where no_o_id = 2218 and no_d_id = 358 and no_w_id = 98465",
+ "Table": "new_orders1",
+ "Values": [
+ "INT64(98465)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.new_orders1"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "DELETE",
+ "Original": "DELETE FROM new_orders1 WHERE no_o_id = 2218 AND no_d_id = 358 AND no_w_id = 98465",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "delete from new_orders1 where no_o_id = 2218 and no_d_id = 358 and no_w_id = 98465",
+ "Table": "new_orders1",
+ "Values": [
+ "INT64(98465)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.new_orders1"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-C select unique orders1",
+ "query": "SELECT o_c_id FROM orders1 WHERE o_id = 6 AND o_d_id = 1983 AND o_w_id = 894605",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT o_c_id FROM orders1 WHERE o_id = 6 AND o_d_id = 1983 AND o_w_id = 894605",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select o_c_id from orders1 where 1 != 1",
+ "Query": "select o_c_id from orders1 where o_id = 6 and o_d_id = 1983 and o_w_id = 894605",
+ "Table": "orders1",
+ "Values": [
+ "INT64(894605)"
+ ],
+ "Vindex": "hash"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT o_c_id FROM orders1 WHERE o_id = 6 AND o_d_id = 1983 AND o_w_id = 894605",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select o_c_id from orders1 where 1 != 1",
+ "Query": "select o_c_id from orders1 where o_id = 6 and o_d_id = 1983 and o_w_id = 894605",
+ "Table": "orders1",
+ "Values": [
+ "INT64(894605)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.orders1"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-C update orders1 unique",
+ "query": "UPDATE orders1 SET o_carrier_id = 9 WHERE o_id = 56 AND o_d_id = 98 AND o_w_id = 897",
+ "v3-plan": {
+ "QueryType": "UPDATE",
+ "Original": "UPDATE orders1 SET o_carrier_id = 9 WHERE o_id = 56 AND o_d_id = 98 AND o_w_id = 897",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update orders1 set o_carrier_id = 9 where o_id = 56 and o_d_id = 98 and o_w_id = 897",
+ "Table": "orders1",
+ "Values": [
+ "INT64(897)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.orders1"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "UPDATE orders1 SET o_carrier_id = 9 WHERE o_id = 56 AND o_d_id = 98 AND o_w_id = 897",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update orders1 set o_carrier_id = 9 where o_id = 56 and o_d_id = 98 and o_w_id = 897",
+ "Table": "orders1",
+ "Values": [
+ "INT64(897)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.orders1"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-C update order_line1",
+ "query": "UPDATE order_line1 SET ol_delivery_d = NOW() WHERE ol_o_id = 235 AND ol_d_id = 315 AND ol_w_id = 8",
+ "v3-plan": {
+ "QueryType": "UPDATE",
+ "Original": "UPDATE order_line1 SET ol_delivery_d = NOW() WHERE ol_o_id = 235 AND ol_d_id = 315 AND ol_w_id = 8",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update order_line1 set ol_delivery_d = now() where ol_o_id = 235 and ol_d_id = 315 and ol_w_id = 8",
+ "Table": "order_line1",
+ "Values": [
+ "INT64(8)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.order_line1"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "UPDATE order_line1 SET ol_delivery_d = NOW() WHERE ol_o_id = 235 AND ol_d_id = 315 AND ol_w_id = 8",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update order_line1 set ol_delivery_d = now() where ol_o_id = 235 and ol_d_id = 315 and ol_w_id = 8",
+ "Table": "order_line1",
+ "Values": [
+ "INT64(8)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.order_line1"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-C select sum order_line1",
+ "query": "SELECT SUM(ol_amount) sm FROM order_line1 WHERE ol_o_id = 680 AND ol_d_id = 201 AND ol_w_id = 87",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT SUM(ol_amount) sm FROM order_line1 WHERE ol_o_id = 680 AND ol_d_id = 201 AND ol_w_id = 87",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select sum(ol_amount) as sm from order_line1 where 1 != 1",
+ "Query": "select sum(ol_amount) as sm from order_line1 where ol_o_id = 680 and ol_d_id = 201 and ol_w_id = 87",
+ "Table": "order_line1",
+ "Values": [
+ "INT64(87)"
+ ],
+ "Vindex": "hash"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT SUM(ol_amount) sm FROM order_line1 WHERE ol_o_id = 680 AND ol_d_id = 201 AND ol_w_id = 87",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select sum(ol_amount) as sm from order_line1 where 1 != 1",
+ "Query": "select sum(ol_amount) as sm from order_line1 where ol_o_id = 680 and ol_d_id = 201 and ol_w_id = 87",
+ "Table": "order_line1",
+ "Values": [
+ "INT64(87)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.order_line1"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-C update customer1",
+ "query": "UPDATE customer1 SET c_balance = c_balance + 988.01, c_delivery_cnt = c_delivery_cnt + 1 WHERE c_id = 6 AND c_d_id = 5 AND c_w_id = 160",
+ "v3-plan": {
+ "QueryType": "UPDATE",
+ "Original": "UPDATE customer1 SET c_balance = c_balance + 988.01, c_delivery_cnt = c_delivery_cnt + 1 WHERE c_id = 6 AND c_d_id = 5 AND c_w_id = 160",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update customer1 set c_balance = c_balance + 988.01, c_delivery_cnt = c_delivery_cnt + 1 where c_id = 6 and c_d_id = 5 and c_w_id = 160",
+ "Table": "customer1",
+ "Values": [
+ "INT64(160)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.customer1"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "UPDATE customer1 SET c_balance = c_balance + 988.01, c_delivery_cnt = c_delivery_cnt + 1 WHERE c_id = 6 AND c_d_id = 5 AND c_w_id = 160",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update customer1 set c_balance = c_balance + 988.01, c_delivery_cnt = c_delivery_cnt + 1 where c_id = 6 and c_d_id = 5 and c_w_id = 160",
+ "Table": "customer1",
+ "Values": [
+ "INT64(160)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.customer1"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-C select unique district1",
+ "query": "SELECT d_next_o_id FROM district1 WHERE d_id = 6 AND d_w_id= 21",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT d_next_o_id FROM district1 WHERE d_id = 6 AND d_w_id= 21",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select d_next_o_id from district1 where 1 != 1",
+ "Query": "select d_next_o_id from district1 where d_id = 6 and d_w_id = 21",
+ "Table": "district1",
+ "Values": [
+ "INT64(21)"
+ ],
+ "Vindex": "hash"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT d_next_o_id FROM district1 WHERE d_id = 6 AND d_w_id= 21",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select d_next_o_id from district1 where 1 != 1",
+ "Query": "select d_next_o_id from district1 where d_id = 6 and d_w_id = 21",
+ "Table": "district1",
+ "Values": [
+ "INT64(21)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.district1"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-C select count distinct stock1 join order_line1",
+ "query": "SELECT COUNT(DISTINCT(s.s_i_id)) FROM stock1 AS s JOIN order_line1 AS ol ON ol.ol_w_id=s.s_w_id AND ol.ol_i_id=s.s_i_id WHERE ol.ol_w_id = 12 AND ol.ol_d_id = 1908 AND ol.ol_o_id < 30 AND ol.ol_o_id >= 15 AND s.s_w_id= 12 AND s.s_quantity < 10",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT COUNT(DISTINCT(s.s_i_id)) FROM stock1 AS s JOIN order_line1 AS ol ON ol.ol_w_id=s.s_w_id AND ol.ol_i_id=s.s_i_id WHERE ol.ol_w_id = 12 AND ol.ol_d_id = 1908 AND ol.ol_o_id < 30 AND ol.ol_o_id >= 15 AND s.s_w_id= 12 AND s.s_quantity < 10",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(distinct s.s_i_id) from stock1 as s join order_line1 as ol on ol.ol_w_id = s.s_w_id and ol.ol_i_id = s.s_i_id where 1 != 1",
+ "Query": "select count(distinct s.s_i_id) from stock1 as s join order_line1 as ol on ol.ol_w_id = s.s_w_id and ol.ol_i_id = s.s_i_id where ol.ol_w_id = 12 and ol.ol_d_id = 1908 and ol.ol_o_id < 30 and ol.ol_o_id >= 15 and s.s_w_id = 12 and s.s_quantity < 10",
+ "Table": "stock1, order_line1",
+ "Values": [
+ "INT64(12)"
+ ],
+ "Vindex": "hash"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT COUNT(DISTINCT(s.s_i_id)) FROM stock1 AS s JOIN order_line1 AS ol ON ol.ol_w_id=s.s_w_id AND ol.ol_i_id=s.s_i_id WHERE ol.ol_w_id = 12 AND ol.ol_d_id = 1908 AND ol.ol_o_id < 30 AND ol.ol_o_id >= 15 AND s.s_w_id= 12 AND s.s_quantity < 10",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(distinct s.s_i_id) from stock1 as s, order_line1 as ol where 1 != 1",
+ "Query": "select count(distinct s.s_i_id) from stock1 as s, order_line1 as ol where s.s_w_id = 12 and s.s_quantity < 10 and ol.ol_w_id = 12 and ol.ol_d_id = 1908 and ol.ol_o_id < 30 and ol.ol_o_id >= 15 and ol.ol_w_id = s.s_w_id and ol.ol_i_id = s.s_i_id",
+ "Table": "order_line1, stock1",
+ "Values": [
+ "INT64(12)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.order_line1",
+ "main.stock1"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-C select distinct order_line1",
+ "query": "SELECT DISTINCT ol_i_id FROM order_line1 WHERE ol_w_id = 1 AND ol_d_id = 156 AND ol_o_id < 500 AND ol_o_id >= 56",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT DISTINCT ol_i_id FROM order_line1 WHERE ol_w_id = 1 AND ol_d_id = 156 AND ol_o_id < 500 AND ol_o_id >= 56",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select ol_i_id from order_line1 where 1 != 1",
+ "Query": "select distinct ol_i_id from order_line1 where ol_w_id = 1 and ol_d_id = 156 and ol_o_id < 500 and ol_o_id >= 56",
+ "Table": "order_line1",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "hash"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT DISTINCT ol_i_id FROM order_line1 WHERE ol_w_id = 1 AND ol_d_id = 156 AND ol_o_id < 500 AND ol_o_id >= 56",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select ol_i_id from order_line1 where 1 != 1",
+ "Query": "select distinct ol_i_id from order_line1 where ol_w_id = 1 and ol_d_id = 156 and ol_o_id < 500 and ol_o_id >= 56",
+ "Table": "order_line1",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.order_line1"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-C",
+ "query": "SELECT count(*) FROM stock1 WHERE s_w_id = 1 AND s_i_id = 8 AND s_quantity < 1000",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT count(*) FROM stock1 WHERE s_w_id = 1 AND s_i_id = 8 AND s_quantity < 1000",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*) from stock1 where 1 != 1",
+ "Query": "select count(*) from stock1 where s_w_id = 1 and s_i_id = 8 and s_quantity < 1000",
+ "Table": "stock1",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "hash"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT count(*) FROM stock1 WHERE s_w_id = 1 AND s_i_id = 8 AND s_quantity < 1000",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*) from stock1 where 1 != 1",
+ "Query": "select count(*) from stock1 where s_w_id = 1 and s_i_id = 8 and s_quantity < 1000",
+ "Table": "stock1",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.stock1"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-C select with subquery,aggr,distinct,having,limit",
+ "query": "select o.o_id,o.o_d_id from orders1 o, (select o_c_id,o_w_id,o_d_id,count(distinct o_w_id),o_id from orders1 where o_w_id=1 and o_id > 2100 and o_id < 11153 group by o_c_id,o_d_id,o_w_id having count( distinct o_id) > 1 limit 1) t where t.o_w_id=o.o_w_id and t.o_d_id=o.o_d_id and t.o_c_id=o.o_c_id limit 1",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select o.o_id,o.o_d_id from orders1 o, (select o_c_id,o_w_id,o_d_id,count(distinct o_w_id),o_id from orders1 where o_w_id=1 and o_id > 2100 and o_id < 11153 group by o_c_id,o_d_id,o_w_id having count( distinct o_id) > 1 limit 1) t where t.o_w_id=o.o_w_id and t.o_d_id=o.o_d_id and t.o_c_id=o.o_c_id limit 1",
+ "Instructions": {
+ "OperatorType": "Limit",
+ "Count": "INT64(1)",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1",
+ "JoinVars": {
+ "o_o_c_id": 3,
+ "o_o_d_id": 1,
+ "o_o_w_id": 2
+ },
+ "TableName": "orders1_orders1",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select o.o_id, o.o_d_id, o.o_w_id, o.o_c_id from orders1 as o where 1 != 1",
+ "Query": "select o.o_id, o.o_d_id, o.o_w_id, o.o_c_id from orders1 as o",
+ "Table": "orders1"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from (select o_c_id, o_w_id, o_d_id, count(distinct o_w_id), o_id from orders1 where 1 != 1 group by o_c_id, o_d_id, o_w_id) as t where 1 != 1",
+ "Query": "select 1 from (select o_c_id, o_w_id, o_d_id, count(distinct o_w_id), o_id from orders1 where o_w_id = 1 and o_id > 2100 and o_id < 11153 group by o_c_id, o_d_id, o_w_id having count(distinct o_id) > 1 limit 1) as t where t.o_w_id = :o_o_w_id and t.o_d_id = :o_o_d_id and t.o_c_id = :o_o_c_id",
+ "Table": "orders1",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "hash"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select o.o_id,o.o_d_id from orders1 o, (select o_c_id,o_w_id,o_d_id,count(distinct o_w_id),o_id from orders1 where o_w_id=1 and o_id > 2100 and o_id < 11153 group by o_c_id,o_d_id,o_w_id having count( distinct o_id) > 1 limit 1) t where t.o_w_id=o.o_w_id and t.o_d_id=o.o_d_id and t.o_c_id=o.o_c_id limit 1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select o.o_id, o.o_d_id from orders1 as o, (select o_c_id, o_w_id, o_d_id, count(distinct o_w_id), o_id from orders1 where 1 != 1 group by o_c_id, o_d_id, o_w_id) as t where 1 != 1",
+ "Query": "select o.o_id, o.o_d_id from orders1 as o, (select o_c_id, o_w_id, o_d_id, count(distinct o_w_id), o_id from orders1 where o_w_id = 1 and o_id > 2100 and o_id < 11153 group by o_c_id, o_d_id, o_w_id having count(distinct o_id) > 1 limit 1) as t where t.o_w_id = o.o_w_id and t.o_d_id = o.o_d_id and t.o_c_id = o.o_c_id limit 1",
+ "Table": "orders1",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.orders1"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-C delete order_line1",
+ "query": "DELETE FROM order_line1 where ol_w_id=178 AND ol_d_id=1 AND ol_o_id=84",
+ "v3-plan": {
+ "QueryType": "DELETE",
+ "Original": "DELETE FROM order_line1 where ol_w_id=178 AND ol_d_id=1 AND ol_o_id=84",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "delete from order_line1 where ol_w_id = 178 and ol_d_id = 1 and ol_o_id = 84",
+ "Table": "order_line1",
+ "Values": [
+ "INT64(178)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.order_line1"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "DELETE",
+ "Original": "DELETE FROM order_line1 where ol_w_id=178 AND ol_d_id=1 AND ol_o_id=84",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "delete from order_line1 where ol_w_id = 178 and ol_d_id = 1 and ol_o_id = 84",
+ "Table": "order_line1",
+ "Values": [
+ "INT64(178)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.order_line1"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-C delete orders1",
+ "query": "DELETE FROM orders1 where o_w_id=1 AND o_d_id=3 and o_id=384",
+ "v3-plan": {
+ "QueryType": "DELETE",
+ "Original": "DELETE FROM orders1 where o_w_id=1 AND o_d_id=3 and o_id=384",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "delete from orders1 where o_w_id = 1 and o_d_id = 3 and o_id = 384",
+ "Table": "orders1",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.orders1"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "DELETE",
+ "Original": "DELETE FROM orders1 where o_w_id=1 AND o_d_id=3 and o_id=384",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "delete from orders1 where o_w_id = 1 and o_d_id = 3 and o_id = 384",
+ "Table": "orders1",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.orders1"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-C delete history1",
+ "query": "DELETE FROM history1 where h_w_id=75 AND h_d_id=102 LIMIT 10",
+ "v3-plan": {
+ "QueryType": "DELETE",
+ "Original": "DELETE FROM history1 where h_w_id=75 AND h_d_id=102 LIMIT 10",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "delete from history1 where h_w_id = 75 and h_d_id = 102 limit 10",
+ "Table": "history1",
+ "Values": [
+ "INT64(75)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.history1"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "DELETE",
+ "Original": "DELETE FROM history1 where h_w_id=75 AND h_d_id=102 LIMIT 10",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "delete from history1 where h_w_id = 75 and h_d_id = 102 limit 10",
+ "Table": "history1",
+ "Values": [
+ "INT64(75)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.history1"
+ ]
+ }
+ }
+]
\ No newline at end of file
diff --git a/go/vt/vtgate/planbuilder/testdata/tpcc_cases.txt b/go/vt/vtgate/planbuilder/testdata/tpcc_cases.txt
deleted file mode 100644
index ced6e2f5425..00000000000
--- a/go/vt/vtgate/planbuilder/testdata/tpcc_cases.txt
+++ /dev/null
@@ -1,1784 +0,0 @@
-# TPC-C select join customer1 and warehouse1
-"SELECT c_discount, c_last, c_credit, w_tax FROM customer1 AS c JOIN warehouse1 AS w ON c_w_id=w_id WHERE w_id = 1 AND c_d_id = 15 AND c_id = 10"
-{
- "QueryType": "SELECT",
- "Original": "SELECT c_discount, c_last, c_credit, w_tax FROM customer1 AS c JOIN warehouse1 AS w ON c_w_id=w_id WHERE w_id = 1 AND c_d_id = 15 AND c_id = 10",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select c_discount, c_last, c_credit, w_tax from customer1 as c join warehouse1 as w on c_w_id = w_id where 1 != 1",
- "Query": "select c_discount, c_last, c_credit, w_tax from customer1 as c join warehouse1 as w on c_w_id = w_id where w_id = 1 and c_d_id = 15 and c_id = 10",
- "Table": "customer1, warehouse1",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "hash"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT c_discount, c_last, c_credit, w_tax FROM customer1 AS c JOIN warehouse1 AS w ON c_w_id=w_id WHERE w_id = 1 AND c_d_id = 15 AND c_id = 10",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select c_discount, c_last, c_credit, w_tax from customer1 as c, warehouse1 as w where 1 != 1",
- "Query": "select c_discount, c_last, c_credit, w_tax from customer1 as c, warehouse1 as w where c_d_id = 15 and c_id = 10 and w_id = 1 and c_w_id = w_id",
- "Table": "customer1, warehouse1",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.customer1",
- "main.warehouse1"
- ]
-}
-
-# TPC-C select district1 for update
-"SELECT d_next_o_id, d_tax FROM district1 WHERE d_w_id = 15 AND d_id = 95 FOR UPDATE"
-{
- "QueryType": "SELECT",
- "Original": "SELECT d_next_o_id, d_tax FROM district1 WHERE d_w_id = 15 AND d_id = 95 FOR UPDATE",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select d_next_o_id, d_tax from district1 where 1 != 1",
- "Query": "select d_next_o_id, d_tax from district1 where d_w_id = 15 and d_id = 95 for update",
- "Table": "district1",
- "Values": [
- "INT64(15)"
- ],
- "Vindex": "hash"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT d_next_o_id, d_tax FROM district1 WHERE d_w_id = 15 AND d_id = 95 FOR UPDATE",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select d_next_o_id, d_tax from district1 where 1 != 1",
- "Query": "select d_next_o_id, d_tax from district1 where d_w_id = 15 and d_id = 95 for update",
- "Table": "district1",
- "Values": [
- "INT64(15)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.district1"
- ]
-}
-
-# TPC-C update district1 unique
-"UPDATE district1 SET d_next_o_id = 56 WHERE d_id = 9842 AND d_w_id= 8546"
-{
- "QueryType": "UPDATE",
- "Original": "UPDATE district1 SET d_next_o_id = 56 WHERE d_id = 9842 AND d_w_id= 8546",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update district1 set d_next_o_id = 56 where d_id = 9842 and d_w_id = 8546",
- "Table": "district1",
- "Values": [
- "INT64(8546)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.district1"
- ]
-}
-{
- "QueryType": "UPDATE",
- "Original": "UPDATE district1 SET d_next_o_id = 56 WHERE d_id = 9842 AND d_w_id= 8546",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update district1 set d_next_o_id = 56 where d_id = 9842 and d_w_id = 8546",
- "Table": "district1",
- "Values": [
- "INT64(8546)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.district1"
- ]
-}
-
-# TPC-C insert into orders1
-"INSERT INTO orders1 (o_id, o_d_id, o_w_id, o_c_id, o_entry_d, o_ol_cnt, o_all_local) VALUES (334983,59896,99,156,NOW(),781038,'hello')"
-{
- "QueryType": "INSERT",
- "Original": "INSERT INTO orders1 (o_id, o_d_id, o_w_id, o_c_id, o_entry_d, o_ol_cnt, o_all_local) VALUES (334983,59896,99,156,NOW(),781038,'hello')",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Sharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into orders1(o_id, o_d_id, o_w_id, o_c_id, o_entry_d, o_ol_cnt, o_all_local) values (334983, 59896, :_o_w_id_0, 156, now(), 781038, 'hello')",
- "TableName": "orders1",
- "VindexValues": {
- "hash": "INT64(99)"
- }
- },
- "TablesUsed": [
- "main.orders1"
- ]
-}
-Gen4 plan same as above
-
-# TPC-C insert into new_orders1
-"INSERT INTO new_orders1 (no_o_id, no_d_id, no_w_id) VALUES (8,9,48)"
-{
- "QueryType": "INSERT",
- "Original": "INSERT INTO new_orders1 (no_o_id, no_d_id, no_w_id) VALUES (8,9,48)",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Sharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into new_orders1(no_o_id, no_d_id, no_w_id) values (8, 9, :_no_w_id_0)",
- "TableName": "new_orders1",
- "VindexValues": {
- "hash": "INT64(48)"
- }
- },
- "TablesUsed": [
- "main.new_orders1"
- ]
-}
-Gen4 plan same as above
-
-# TPC-C select unique item1
-"SELECT i_price, i_name, i_data FROM item1 WHERE i_id = 9654"
-{
- "QueryType": "SELECT",
- "Original": "SELECT i_price, i_name, i_data FROM item1 WHERE i_id = 9654",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select i_price, i_name, i_data from item1 where 1 != 1",
- "Query": "select i_price, i_name, i_data from item1 where i_id = 9654",
- "Table": "item1",
- "Values": [
- "INT64(9654)"
- ],
- "Vindex": "hash"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT i_price, i_name, i_data FROM item1 WHERE i_id = 9654",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select i_price, i_name, i_data from item1 where 1 != 1",
- "Query": "select i_price, i_name, i_data from item1 where i_id = 9654",
- "Table": "item1",
- "Values": [
- "INT64(9654)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.item1"
- ]
-}
-
-# TPC-C select stock1 for update
-"SELECT s_quantity, s_data, s_dist_01 s_dist FROM stock1 WHERE s_i_id = 2198 AND s_w_id = 89 FOR UPDATE"
-{
- "QueryType": "SELECT",
- "Original": "SELECT s_quantity, s_data, s_dist_01 s_dist FROM stock1 WHERE s_i_id = 2198 AND s_w_id = 89 FOR UPDATE",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select s_quantity, s_data, s_dist_01 as s_dist from stock1 where 1 != 1",
- "Query": "select s_quantity, s_data, s_dist_01 as s_dist from stock1 where s_i_id = 2198 and s_w_id = 89 for update",
- "Table": "stock1",
- "Values": [
- "INT64(89)"
- ],
- "Vindex": "hash"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT s_quantity, s_data, s_dist_01 s_dist FROM stock1 WHERE s_i_id = 2198 AND s_w_id = 89 FOR UPDATE",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select s_quantity, s_data, s_dist_01 as s_dist from stock1 where 1 != 1",
- "Query": "select s_quantity, s_data, s_dist_01 as s_dist from stock1 where s_i_id = 2198 and s_w_id = 89 for update",
- "Table": "stock1",
- "Values": [
- "INT64(89)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.stock1"
- ]
-}
-
-# TPC-C update stock1
-"UPDATE stock1 SET s_quantity = 894 WHERE s_i_id = 156 AND s_w_id= 6"
-{
- "QueryType": "UPDATE",
- "Original": "UPDATE stock1 SET s_quantity = 894 WHERE s_i_id = 156 AND s_w_id= 6",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update stock1 set s_quantity = 894 where s_i_id = 156 and s_w_id = 6",
- "Table": "stock1",
- "Values": [
- "INT64(6)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.stock1"
- ]
-}
-{
- "QueryType": "UPDATE",
- "Original": "UPDATE stock1 SET s_quantity = 894 WHERE s_i_id = 156 AND s_w_id= 6",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update stock1 set s_quantity = 894 where s_i_id = 156 and s_w_id = 6",
- "Table": "stock1",
- "Values": [
- "INT64(6)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.stock1"
- ]
-}
-
-# TPC-C insert into order_line1
-"INSERT INTO order_line1 (ol_o_id, ol_d_id, ol_w_id, ol_number, ol_i_id, ol_supply_w_id, ol_quantity, ol_amount, ol_dist_info) VALUES (648,36812,3201,4946378,3,7,89,1,'info')"
-{
- "QueryType": "INSERT",
- "Original": "INSERT INTO order_line1 (ol_o_id, ol_d_id, ol_w_id, ol_number, ol_i_id, ol_supply_w_id, ol_quantity, ol_amount, ol_dist_info) VALUES (648,36812,3201,4946378,3,7,89,1,'info')",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Sharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into order_line1(ol_o_id, ol_d_id, ol_w_id, ol_number, ol_i_id, ol_supply_w_id, ol_quantity, ol_amount, ol_dist_info) values (648, 36812, :_ol_w_id_0, 4946378, 3, 7, 89, 1, 'info')",
- "TableName": "order_line1",
- "VindexValues": {
- "hash": "INT64(3201)"
- }
- },
- "TablesUsed": [
- "main.order_line1"
- ]
-}
-Gen4 plan same as above
-
-# TPC-C update warehouse1 unique
-"UPDATE warehouse1 SET w_ytd = w_ytd + 946879 WHERE w_id = 3"
-{
- "QueryType": "UPDATE",
- "Original": "UPDATE warehouse1 SET w_ytd = w_ytd + 946879 WHERE w_id = 3",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update warehouse1 set w_ytd = w_ytd + 946879 where w_id = 3",
- "Table": "warehouse1",
- "Values": [
- "INT64(3)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.warehouse1"
- ]
-}
-{
- "QueryType": "UPDATE",
- "Original": "UPDATE warehouse1 SET w_ytd = w_ytd + 946879 WHERE w_id = 3",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update warehouse1 set w_ytd = w_ytd + 946879 where w_id = 3",
- "Table": "warehouse1",
- "Values": [
- "INT64(3)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.warehouse1"
- ]
-}
-
-# TPC-C select warehouse1 unique
-"SELECT w_street_1, w_street_2, w_city, w_state, w_zip, w_name FROM warehouse1 WHERE w_id = 998"
-{
- "QueryType": "SELECT",
- "Original": "SELECT w_street_1, w_street_2, w_city, w_state, w_zip, w_name FROM warehouse1 WHERE w_id = 998",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select w_street_1, w_street_2, w_city, w_state, w_zip, w_name from warehouse1 where 1 != 1",
- "Query": "select w_street_1, w_street_2, w_city, w_state, w_zip, w_name from warehouse1 where w_id = 998",
- "Table": "warehouse1",
- "Values": [
- "INT64(998)"
- ],
- "Vindex": "hash"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT w_street_1, w_street_2, w_city, w_state, w_zip, w_name FROM warehouse1 WHERE w_id = 998",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select w_street_1, w_street_2, w_city, w_state, w_zip, w_name from warehouse1 where 1 != 1",
- "Query": "select w_street_1, w_street_2, w_city, w_state, w_zip, w_name from warehouse1 where w_id = 998",
- "Table": "warehouse1",
- "Values": [
- "INT64(998)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.warehouse1"
- ]
-}
-
-# TPC-C update district1 unique
-"UPDATE district1 SET d_ytd = d_ytd + 2 WHERE d_w_id = 89 AND d_id= 9"
-{
- "QueryType": "UPDATE",
- "Original": "UPDATE district1 SET d_ytd = d_ytd + 2 WHERE d_w_id = 89 AND d_id= 9",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update district1 set d_ytd = d_ytd + 2 where d_w_id = 89 and d_id = 9",
- "Table": "district1",
- "Values": [
- "INT64(89)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.district1"
- ]
-}
-{
- "QueryType": "UPDATE",
- "Original": "UPDATE district1 SET d_ytd = d_ytd + 2 WHERE d_w_id = 89 AND d_id= 9",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update district1 set d_ytd = d_ytd + 2 where d_w_id = 89 and d_id = 9",
- "Table": "district1",
- "Values": [
- "INT64(89)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.district1"
- ]
-}
-
-# TPC-C select district1 unique
-"SELECT d_street_1, d_street_2, d_city, d_state, d_zip, d_name FROM district1 WHERE d_w_id = 896 AND d_id = 9"
-{
- "QueryType": "SELECT",
- "Original": "SELECT d_street_1, d_street_2, d_city, d_state, d_zip, d_name FROM district1 WHERE d_w_id = 896 AND d_id = 9",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select d_street_1, d_street_2, d_city, d_state, d_zip, d_name from district1 where 1 != 1",
- "Query": "select d_street_1, d_street_2, d_city, d_state, d_zip, d_name from district1 where d_w_id = 896 and d_id = 9",
- "Table": "district1",
- "Values": [
- "INT64(896)"
- ],
- "Vindex": "hash"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT d_street_1, d_street_2, d_city, d_state, d_zip, d_name FROM district1 WHERE d_w_id = 896 AND d_id = 9",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select d_street_1, d_street_2, d_city, d_state, d_zip, d_name from district1 where 1 != 1",
- "Query": "select d_street_1, d_street_2, d_city, d_state, d_zip, d_name from district1 where d_w_id = 896 and d_id = 9",
- "Table": "district1",
- "Values": [
- "INT64(896)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.district1"
- ]
-}
-
-# TPC-C select aggr from customer1
-"SELECT count(c_id) namecnt FROM customer1 WHERE c_w_id = 5 AND c_d_id= 1 AND c_last='last'"
-{
- "QueryType": "SELECT",
- "Original": "SELECT count(c_id) namecnt FROM customer1 WHERE c_w_id = 5 AND c_d_id= 1 AND c_last='last'",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select count(c_id) as namecnt from customer1 where 1 != 1",
- "Query": "select count(c_id) as namecnt from customer1 where c_w_id = 5 and c_d_id = 1 and c_last = 'last'",
- "Table": "customer1",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "hash"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT count(c_id) namecnt FROM customer1 WHERE c_w_id = 5 AND c_d_id= 1 AND c_last='last'",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select count(c_id) as namecnt from customer1 where 1 != 1",
- "Query": "select count(c_id) as namecnt from customer1 where c_w_id = 5 and c_d_id = 1 and c_last = 'last'",
- "Table": "customer1",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.customer1"
- ]
-}
-
-# TPC-C select customer1 order by
-"SELECT c_id FROM customer1 WHERE c_w_id = 8 AND c_d_id = 5 AND c_last='item_last' ORDER BY c_first"
-{
- "QueryType": "SELECT",
- "Original": "SELECT c_id FROM customer1 WHERE c_w_id = 8 AND c_d_id = 5 AND c_last='item_last' ORDER BY c_first",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select c_id from customer1 where 1 != 1",
- "Query": "select c_id from customer1 where c_w_id = 8 and c_d_id = 5 and c_last = 'item_last' order by c_first asc",
- "Table": "customer1",
- "Values": [
- "INT64(8)"
- ],
- "Vindex": "hash"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT c_id FROM customer1 WHERE c_w_id = 8 AND c_d_id = 5 AND c_last='item_last' ORDER BY c_first",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select c_id from customer1 where 1 != 1",
- "Query": "select c_id from customer1 where c_w_id = 8 and c_d_id = 5 and c_last = 'item_last' order by c_first asc",
- "Table": "customer1",
- "Values": [
- "INT64(8)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.customer1"
- ]
-}
-
-# TPC-C select for update customer1 unique
-"SELECT c_first, c_middle, c_last, c_street_1, c_street_2, c_city, c_state, c_zip, c_phone, c_credit, c_credit_lim, c_discount, c_balance, c_ytd_payment, c_since FROM customer1 WHERE c_w_id = 8965 AND c_d_id = 1 AND c_id = 9 FOR UPDATE"
-{
- "QueryType": "SELECT",
- "Original": "SELECT c_first, c_middle, c_last, c_street_1, c_street_2, c_city, c_state, c_zip, c_phone, c_credit, c_credit_lim, c_discount, c_balance, c_ytd_payment, c_since FROM customer1 WHERE c_w_id = 8965 AND c_d_id = 1 AND c_id = 9 FOR UPDATE",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select c_first, c_middle, c_last, c_street_1, c_street_2, c_city, c_state, c_zip, c_phone, c_credit, c_credit_lim, c_discount, c_balance, c_ytd_payment, c_since from customer1 where 1 != 1",
- "Query": "select c_first, c_middle, c_last, c_street_1, c_street_2, c_city, c_state, c_zip, c_phone, c_credit, c_credit_lim, c_discount, c_balance, c_ytd_payment, c_since from customer1 where c_w_id = 8965 and c_d_id = 1 and c_id = 9 for update",
- "Table": "customer1",
- "Values": [
- "INT64(8965)"
- ],
- "Vindex": "hash"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT c_first, c_middle, c_last, c_street_1, c_street_2, c_city, c_state, c_zip, c_phone, c_credit, c_credit_lim, c_discount, c_balance, c_ytd_payment, c_since FROM customer1 WHERE c_w_id = 8965 AND c_d_id = 1 AND c_id = 9 FOR UPDATE",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select c_first, c_middle, c_last, c_street_1, c_street_2, c_city, c_state, c_zip, c_phone, c_credit, c_credit_lim, c_discount, c_balance, c_ytd_payment, c_since from customer1 where 1 != 1",
- "Query": "select c_first, c_middle, c_last, c_street_1, c_street_2, c_city, c_state, c_zip, c_phone, c_credit, c_credit_lim, c_discount, c_balance, c_ytd_payment, c_since from customer1 where c_w_id = 8965 and c_d_id = 1 and c_id = 9 for update",
- "Table": "customer1",
- "Values": [
- "INT64(8965)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.customer1"
- ]
-}
-
-# TPC-C select customer1 unique
-"SELECT c_data FROM customer1 WHERE c_w_id = 32 AND c_d_id=68 AND c_id = 5"
-{
- "QueryType": "SELECT",
- "Original": "SELECT c_data FROM customer1 WHERE c_w_id = 32 AND c_d_id=68 AND c_id = 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select c_data from customer1 where 1 != 1",
- "Query": "select c_data from customer1 where c_w_id = 32 and c_d_id = 68 and c_id = 5",
- "Table": "customer1",
- "Values": [
- "INT64(32)"
- ],
- "Vindex": "hash"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT c_data FROM customer1 WHERE c_w_id = 32 AND c_d_id=68 AND c_id = 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select c_data from customer1 where 1 != 1",
- "Query": "select c_data from customer1 where c_w_id = 32 and c_d_id = 68 and c_id = 5",
- "Table": "customer1",
- "Values": [
- "INT64(32)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.customer1"
- ]
-}
-
-# TPC-C update customer1 unique and float value
-"UPDATE customer1 SET c_balance=508.98, c_ytd_payment=48941.980301, c_data='i am data' WHERE c_w_id = 20 AND c_d_id=387 AND c_id=98"
-{
- "QueryType": "UPDATE",
- "Original": "UPDATE customer1 SET c_balance=508.98, c_ytd_payment=48941.980301, c_data='i am data' WHERE c_w_id = 20 AND c_d_id=387 AND c_id=98",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update customer1 set c_balance = 508.98, c_ytd_payment = 48941.980301, c_data = 'i am data' where c_w_id = 20 and c_d_id = 387 and c_id = 98",
- "Table": "customer1",
- "Values": [
- "INT64(20)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.customer1"
- ]
-}
-{
- "QueryType": "UPDATE",
- "Original": "UPDATE customer1 SET c_balance=508.98, c_ytd_payment=48941.980301, c_data='i am data' WHERE c_w_id = 20 AND c_d_id=387 AND c_id=98",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update customer1 set c_balance = 508.98, c_ytd_payment = 48941.980301, c_data = 'i am data' where c_w_id = 20 and c_d_id = 387 and c_id = 98",
- "Table": "customer1",
- "Values": [
- "INT64(20)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.customer1"
- ]
-}
-
-# TPC-C update customer1 unique and float value
-"UPDATE customer1 SET c_balance=508.98, c_ytd_payment=48941.980301 WHERE c_w_id = 20 AND c_d_id=387 AND c_id=98"
-{
- "QueryType": "UPDATE",
- "Original": "UPDATE customer1 SET c_balance=508.98, c_ytd_payment=48941.980301 WHERE c_w_id = 20 AND c_d_id=387 AND c_id=98",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update customer1 set c_balance = 508.98, c_ytd_payment = 48941.980301 where c_w_id = 20 and c_d_id = 387 and c_id = 98",
- "Table": "customer1",
- "Values": [
- "INT64(20)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.customer1"
- ]
-}
-{
- "QueryType": "UPDATE",
- "Original": "UPDATE customer1 SET c_balance=508.98, c_ytd_payment=48941.980301 WHERE c_w_id = 20 AND c_d_id=387 AND c_id=98",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update customer1 set c_balance = 508.98, c_ytd_payment = 48941.980301 where c_w_id = 20 and c_d_id = 387 and c_id = 98",
- "Table": "customer1",
- "Values": [
- "INT64(20)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.customer1"
- ]
-}
-
-# TPC-C insert into history1
-"INSERT INTO history1 (h_c_d_id, h_c_w_id, h_c_id, h_d_id, h_w_id, h_date, h_amount, h_data) VALUES (6809887,38748,8746,210,8,NOW(),8907,'data')"
-{
- "QueryType": "INSERT",
- "Original": "INSERT INTO history1 (h_c_d_id, h_c_w_id, h_c_id, h_d_id, h_w_id, h_date, h_amount, h_data) VALUES (6809887,38748,8746,210,8,NOW(),8907,'data')",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Sharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into history1(h_c_d_id, h_c_w_id, h_c_id, h_d_id, h_w_id, h_date, h_amount, h_data) values (6809887, 38748, 8746, 210, :_h_w_id_0, now(), 8907, 'data')",
- "TableName": "history1",
- "VindexValues": {
- "hash": "INT64(8)"
- }
- },
- "TablesUsed": [
- "main.history1"
- ]
-}
-Gen4 plan same as above
-
-# TPC-C select aggr customer1
-"SELECT count(c_id) namecnt FROM customer1 WHERE c_w_id = 870 AND c_d_id= 780 AND c_last='last'"
-{
- "QueryType": "SELECT",
- "Original": "SELECT count(c_id) namecnt FROM customer1 WHERE c_w_id = 870 AND c_d_id= 780 AND c_last='last'",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select count(c_id) as namecnt from customer1 where 1 != 1",
- "Query": "select count(c_id) as namecnt from customer1 where c_w_id = 870 and c_d_id = 780 and c_last = 'last'",
- "Table": "customer1",
- "Values": [
- "INT64(870)"
- ],
- "Vindex": "hash"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT count(c_id) namecnt FROM customer1 WHERE c_w_id = 870 AND c_d_id= 780 AND c_last='last'",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select count(c_id) as namecnt from customer1 where 1 != 1",
- "Query": "select count(c_id) as namecnt from customer1 where c_w_id = 870 and c_d_id = 780 and c_last = 'last'",
- "Table": "customer1",
- "Values": [
- "INT64(870)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.customer1"
- ]
-}
-
-# TPC-C select order by customer1
-"SELECT c_balance, c_first, c_middle, c_id FROM customer1 WHERE c_w_id = 840 AND c_d_id= 1 AND c_last='test' ORDER BY c_first"
-{
- "QueryType": "SELECT",
- "Original": "SELECT c_balance, c_first, c_middle, c_id FROM customer1 WHERE c_w_id = 840 AND c_d_id= 1 AND c_last='test' ORDER BY c_first",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select c_balance, c_first, c_middle, c_id from customer1 where 1 != 1",
- "Query": "select c_balance, c_first, c_middle, c_id from customer1 where c_w_id = 840 and c_d_id = 1 and c_last = 'test' order by c_first asc",
- "Table": "customer1",
- "Values": [
- "INT64(840)"
- ],
- "Vindex": "hash"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT c_balance, c_first, c_middle, c_id FROM customer1 WHERE c_w_id = 840 AND c_d_id= 1 AND c_last='test' ORDER BY c_first",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select c_balance, c_first, c_middle, c_id from customer1 where 1 != 1",
- "Query": "select c_balance, c_first, c_middle, c_id from customer1 where c_w_id = 840 and c_d_id = 1 and c_last = 'test' order by c_first asc",
- "Table": "customer1",
- "Values": [
- "INT64(840)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.customer1"
- ]
-}
-
-# TPC-C select unique customer1
-"SELECT c_balance, c_first, c_middle, c_last FROM customer1 WHERE c_w_id = 15 AND c_d_id=5169 AND c_id=1"
-{
- "QueryType": "SELECT",
- "Original": "SELECT c_balance, c_first, c_middle, c_last FROM customer1 WHERE c_w_id = 15 AND c_d_id=5169 AND c_id=1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select c_balance, c_first, c_middle, c_last from customer1 where 1 != 1",
- "Query": "select c_balance, c_first, c_middle, c_last from customer1 where c_w_id = 15 and c_d_id = 5169 and c_id = 1",
- "Table": "customer1",
- "Values": [
- "INT64(15)"
- ],
- "Vindex": "hash"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT c_balance, c_first, c_middle, c_last FROM customer1 WHERE c_w_id = 15 AND c_d_id=5169 AND c_id=1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select c_balance, c_first, c_middle, c_last from customer1 where 1 != 1",
- "Query": "select c_balance, c_first, c_middle, c_last from customer1 where c_w_id = 15 and c_d_id = 5169 and c_id = 1",
- "Table": "customer1",
- "Values": [
- "INT64(15)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.customer1"
- ]
-}
-
-# TPC-C select order by orders1
-"SELECT o_id, o_carrier_id, o_entry_d FROM orders1 WHERE o_w_id = 9894 AND o_d_id = 3 AND o_c_id = 159 ORDER BY o_id DESC"
-{
- "QueryType": "SELECT",
- "Original": "SELECT o_id, o_carrier_id, o_entry_d FROM orders1 WHERE o_w_id = 9894 AND o_d_id = 3 AND o_c_id = 159 ORDER BY o_id DESC",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select o_id, o_carrier_id, o_entry_d from orders1 where 1 != 1",
- "Query": "select o_id, o_carrier_id, o_entry_d from orders1 where o_w_id = 9894 and o_d_id = 3 and o_c_id = 159 order by o_id desc",
- "Table": "orders1",
- "Values": [
- "INT64(9894)"
- ],
- "Vindex": "hash"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT o_id, o_carrier_id, o_entry_d FROM orders1 WHERE o_w_id = 9894 AND o_d_id = 3 AND o_c_id = 159 ORDER BY o_id DESC",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select o_id, o_carrier_id, o_entry_d from orders1 where 1 != 1",
- "Query": "select o_id, o_carrier_id, o_entry_d from orders1 where o_w_id = 9894 and o_d_id = 3 and o_c_id = 159 order by o_id desc",
- "Table": "orders1",
- "Values": [
- "INT64(9894)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.orders1"
- ]
-}
-
-# TPC-C select order_line1
-"SELECT ol_i_id, ol_supply_w_id, ol_quantity, ol_amount, ol_delivery_d FROM order_line1 WHERE ol_w_id = 92 AND ol_d_id = 5 AND ol_o_id = 1"
-{
- "QueryType": "SELECT",
- "Original": "SELECT ol_i_id, ol_supply_w_id, ol_quantity, ol_amount, ol_delivery_d FROM order_line1 WHERE ol_w_id = 92 AND ol_d_id = 5 AND ol_o_id = 1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select ol_i_id, ol_supply_w_id, ol_quantity, ol_amount, ol_delivery_d from order_line1 where 1 != 1",
- "Query": "select ol_i_id, ol_supply_w_id, ol_quantity, ol_amount, ol_delivery_d from order_line1 where ol_w_id = 92 and ol_d_id = 5 and ol_o_id = 1",
- "Table": "order_line1",
- "Values": [
- "INT64(92)"
- ],
- "Vindex": "hash"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT ol_i_id, ol_supply_w_id, ol_quantity, ol_amount, ol_delivery_d FROM order_line1 WHERE ol_w_id = 92 AND ol_d_id = 5 AND ol_o_id = 1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select ol_i_id, ol_supply_w_id, ol_quantity, ol_amount, ol_delivery_d from order_line1 where 1 != 1",
- "Query": "select ol_i_id, ol_supply_w_id, ol_quantity, ol_amount, ol_delivery_d from order_line1 where ol_w_id = 92 and ol_d_id = 5 and ol_o_id = 1",
- "Table": "order_line1",
- "Values": [
- "INT64(92)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.order_line1"
- ]
-}
-
-# TPC-C select for update new_orders1
-"SELECT no_o_id FROM new_orders1 WHERE no_d_id = 689 AND no_w_id = 15 ORDER BY no_o_id ASC LIMIT 1 FOR UPDATE"
-{
- "QueryType": "SELECT",
- "Original": "SELECT no_o_id FROM new_orders1 WHERE no_d_id = 689 AND no_w_id = 15 ORDER BY no_o_id ASC LIMIT 1 FOR UPDATE",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select no_o_id from new_orders1 where 1 != 1",
- "Query": "select no_o_id from new_orders1 where no_d_id = 689 and no_w_id = 15 order by no_o_id asc limit 1 for update",
- "Table": "new_orders1",
- "Values": [
- "INT64(15)"
- ],
- "Vindex": "hash"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT no_o_id FROM new_orders1 WHERE no_d_id = 689 AND no_w_id = 15 ORDER BY no_o_id ASC LIMIT 1 FOR UPDATE",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select no_o_id from new_orders1 where 1 != 1",
- "Query": "select no_o_id from new_orders1 where no_d_id = 689 and no_w_id = 15 order by no_o_id asc limit 1 for update",
- "Table": "new_orders1",
- "Values": [
- "INT64(15)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.new_orders1"
- ]
-}
-
-# TPC-C delete new_orders1
-"DELETE FROM new_orders1 WHERE no_o_id = 2218 AND no_d_id = 358 AND no_w_id = 98465"
-{
- "QueryType": "DELETE",
- "Original": "DELETE FROM new_orders1 WHERE no_o_id = 2218 AND no_d_id = 358 AND no_w_id = 98465",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "delete from new_orders1 where no_o_id = 2218 and no_d_id = 358 and no_w_id = 98465",
- "Table": "new_orders1",
- "Values": [
- "INT64(98465)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.new_orders1"
- ]
-}
-{
- "QueryType": "DELETE",
- "Original": "DELETE FROM new_orders1 WHERE no_o_id = 2218 AND no_d_id = 358 AND no_w_id = 98465",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "delete from new_orders1 where no_o_id = 2218 and no_d_id = 358 and no_w_id = 98465",
- "Table": "new_orders1",
- "Values": [
- "INT64(98465)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.new_orders1"
- ]
-}
-
-# TPC-C select unique orders1
-"SELECT o_c_id FROM orders1 WHERE o_id = 6 AND o_d_id = 1983 AND o_w_id = 894605"
-{
- "QueryType": "SELECT",
- "Original": "SELECT o_c_id FROM orders1 WHERE o_id = 6 AND o_d_id = 1983 AND o_w_id = 894605",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select o_c_id from orders1 where 1 != 1",
- "Query": "select o_c_id from orders1 where o_id = 6 and o_d_id = 1983 and o_w_id = 894605",
- "Table": "orders1",
- "Values": [
- "INT64(894605)"
- ],
- "Vindex": "hash"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT o_c_id FROM orders1 WHERE o_id = 6 AND o_d_id = 1983 AND o_w_id = 894605",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select o_c_id from orders1 where 1 != 1",
- "Query": "select o_c_id from orders1 where o_id = 6 and o_d_id = 1983 and o_w_id = 894605",
- "Table": "orders1",
- "Values": [
- "INT64(894605)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.orders1"
- ]
-}
-
-# TPC-C update orders1 unique
-"UPDATE orders1 SET o_carrier_id = 9 WHERE o_id = 56 AND o_d_id = 98 AND o_w_id = 897"
-{
- "QueryType": "UPDATE",
- "Original": "UPDATE orders1 SET o_carrier_id = 9 WHERE o_id = 56 AND o_d_id = 98 AND o_w_id = 897",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update orders1 set o_carrier_id = 9 where o_id = 56 and o_d_id = 98 and o_w_id = 897",
- "Table": "orders1",
- "Values": [
- "INT64(897)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.orders1"
- ]
-}
-{
- "QueryType": "UPDATE",
- "Original": "UPDATE orders1 SET o_carrier_id = 9 WHERE o_id = 56 AND o_d_id = 98 AND o_w_id = 897",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update orders1 set o_carrier_id = 9 where o_id = 56 and o_d_id = 98 and o_w_id = 897",
- "Table": "orders1",
- "Values": [
- "INT64(897)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.orders1"
- ]
-}
-
-# TPC-C update order_line1
-"UPDATE order_line1 SET ol_delivery_d = NOW() WHERE ol_o_id = 235 AND ol_d_id = 315 AND ol_w_id = 8"
-{
- "QueryType": "UPDATE",
- "Original": "UPDATE order_line1 SET ol_delivery_d = NOW() WHERE ol_o_id = 235 AND ol_d_id = 315 AND ol_w_id = 8",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update order_line1 set ol_delivery_d = now() where ol_o_id = 235 and ol_d_id = 315 and ol_w_id = 8",
- "Table": "order_line1",
- "Values": [
- "INT64(8)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.order_line1"
- ]
-}
-{
- "QueryType": "UPDATE",
- "Original": "UPDATE order_line1 SET ol_delivery_d = NOW() WHERE ol_o_id = 235 AND ol_d_id = 315 AND ol_w_id = 8",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update order_line1 set ol_delivery_d = now() where ol_o_id = 235 and ol_d_id = 315 and ol_w_id = 8",
- "Table": "order_line1",
- "Values": [
- "INT64(8)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.order_line1"
- ]
-}
-
-# TPC-C select sum order_line1
-"SELECT SUM(ol_amount) sm FROM order_line1 WHERE ol_o_id = 680 AND ol_d_id = 201 AND ol_w_id = 87"
-{
- "QueryType": "SELECT",
- "Original": "SELECT SUM(ol_amount) sm FROM order_line1 WHERE ol_o_id = 680 AND ol_d_id = 201 AND ol_w_id = 87",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select sum(ol_amount) as sm from order_line1 where 1 != 1",
- "Query": "select sum(ol_amount) as sm from order_line1 where ol_o_id = 680 and ol_d_id = 201 and ol_w_id = 87",
- "Table": "order_line1",
- "Values": [
- "INT64(87)"
- ],
- "Vindex": "hash"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT SUM(ol_amount) sm FROM order_line1 WHERE ol_o_id = 680 AND ol_d_id = 201 AND ol_w_id = 87",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select sum(ol_amount) as sm from order_line1 where 1 != 1",
- "Query": "select sum(ol_amount) as sm from order_line1 where ol_o_id = 680 and ol_d_id = 201 and ol_w_id = 87",
- "Table": "order_line1",
- "Values": [
- "INT64(87)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.order_line1"
- ]
-}
-
-# TPC-C update customer1
-"UPDATE customer1 SET c_balance = c_balance + 988.01, c_delivery_cnt = c_delivery_cnt + 1 WHERE c_id = 6 AND c_d_id = 5 AND c_w_id = 160"
-{
- "QueryType": "UPDATE",
- "Original": "UPDATE customer1 SET c_balance = c_balance + 988.01, c_delivery_cnt = c_delivery_cnt + 1 WHERE c_id = 6 AND c_d_id = 5 AND c_w_id = 160",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update customer1 set c_balance = c_balance + 988.01, c_delivery_cnt = c_delivery_cnt + 1 where c_id = 6 and c_d_id = 5 and c_w_id = 160",
- "Table": "customer1",
- "Values": [
- "INT64(160)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.customer1"
- ]
-}
-{
- "QueryType": "UPDATE",
- "Original": "UPDATE customer1 SET c_balance = c_balance + 988.01, c_delivery_cnt = c_delivery_cnt + 1 WHERE c_id = 6 AND c_d_id = 5 AND c_w_id = 160",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update customer1 set c_balance = c_balance + 988.01, c_delivery_cnt = c_delivery_cnt + 1 where c_id = 6 and c_d_id = 5 and c_w_id = 160",
- "Table": "customer1",
- "Values": [
- "INT64(160)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.customer1"
- ]
-}
-
-# TPC-C select unique district1
-"SELECT d_next_o_id FROM district1 WHERE d_id = 6 AND d_w_id= 21"
-{
- "QueryType": "SELECT",
- "Original": "SELECT d_next_o_id FROM district1 WHERE d_id = 6 AND d_w_id= 21",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select d_next_o_id from district1 where 1 != 1",
- "Query": "select d_next_o_id from district1 where d_id = 6 and d_w_id = 21",
- "Table": "district1",
- "Values": [
- "INT64(21)"
- ],
- "Vindex": "hash"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT d_next_o_id FROM district1 WHERE d_id = 6 AND d_w_id= 21",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select d_next_o_id from district1 where 1 != 1",
- "Query": "select d_next_o_id from district1 where d_id = 6 and d_w_id = 21",
- "Table": "district1",
- "Values": [
- "INT64(21)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.district1"
- ]
-}
-
-# TPC-C select count distinct stock1 join order_line1
-"SELECT COUNT(DISTINCT(s.s_i_id)) FROM stock1 AS s JOIN order_line1 AS ol ON ol.ol_w_id=s.s_w_id AND ol.ol_i_id=s.s_i_id WHERE ol.ol_w_id = 12 AND ol.ol_d_id = 1908 AND ol.ol_o_id \u003c 30 AND ol.ol_o_id \u003e= 15 AND s.s_w_id= 12 AND s.s_quantity \u003c 10"
-{
- "QueryType": "SELECT",
- "Original": "SELECT COUNT(DISTINCT(s.s_i_id)) FROM stock1 AS s JOIN order_line1 AS ol ON ol.ol_w_id=s.s_w_id AND ol.ol_i_id=s.s_i_id WHERE ol.ol_w_id = 12 AND ol.ol_d_id = 1908 AND ol.ol_o_id \u003c 30 AND ol.ol_o_id \u003e= 15 AND s.s_w_id= 12 AND s.s_quantity \u003c 10",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select count(distinct s.s_i_id) from stock1 as s join order_line1 as ol on ol.ol_w_id = s.s_w_id and ol.ol_i_id = s.s_i_id where 1 != 1",
- "Query": "select count(distinct s.s_i_id) from stock1 as s join order_line1 as ol on ol.ol_w_id = s.s_w_id and ol.ol_i_id = s.s_i_id where ol.ol_w_id = 12 and ol.ol_d_id = 1908 and ol.ol_o_id \u003c 30 and ol.ol_o_id \u003e= 15 and s.s_w_id = 12 and s.s_quantity \u003c 10",
- "Table": "stock1, order_line1",
- "Values": [
- "INT64(12)"
- ],
- "Vindex": "hash"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT COUNT(DISTINCT(s.s_i_id)) FROM stock1 AS s JOIN order_line1 AS ol ON ol.ol_w_id=s.s_w_id AND ol.ol_i_id=s.s_i_id WHERE ol.ol_w_id = 12 AND ol.ol_d_id = 1908 AND ol.ol_o_id \u003c 30 AND ol.ol_o_id \u003e= 15 AND s.s_w_id= 12 AND s.s_quantity \u003c 10",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select count(distinct s.s_i_id) from stock1 as s, order_line1 as ol where 1 != 1",
- "Query": "select count(distinct s.s_i_id) from stock1 as s, order_line1 as ol where s.s_w_id = 12 and s.s_quantity \u003c 10 and ol.ol_w_id = 12 and ol.ol_d_id = 1908 and ol.ol_o_id \u003c 30 and ol.ol_o_id \u003e= 15 and ol.ol_w_id = s.s_w_id and ol.ol_i_id = s.s_i_id",
- "Table": "order_line1, stock1",
- "Values": [
- "INT64(12)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.order_line1",
- "main.stock1"
- ]
-}
-
-# TPC-C select distinct order_line1
-"SELECT DISTINCT ol_i_id FROM order_line1 WHERE ol_w_id = 1 AND ol_d_id = 156 AND ol_o_id \u003c 500 AND ol_o_id \u003e= 56"
-{
- "QueryType": "SELECT",
- "Original": "SELECT DISTINCT ol_i_id FROM order_line1 WHERE ol_w_id = 1 AND ol_d_id = 156 AND ol_o_id \u003c 500 AND ol_o_id \u003e= 56",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select ol_i_id from order_line1 where 1 != 1",
- "Query": "select distinct ol_i_id from order_line1 where ol_w_id = 1 and ol_d_id = 156 and ol_o_id \u003c 500 and ol_o_id \u003e= 56",
- "Table": "order_line1",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "hash"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT DISTINCT ol_i_id FROM order_line1 WHERE ol_w_id = 1 AND ol_d_id = 156 AND ol_o_id \u003c 500 AND ol_o_id \u003e= 56",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select ol_i_id from order_line1 where 1 != 1",
- "Query": "select distinct ol_i_id from order_line1 where ol_w_id = 1 and ol_d_id = 156 and ol_o_id \u003c 500 and ol_o_id \u003e= 56",
- "Table": "order_line1",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.order_line1"
- ]
-}
-
-# TPC-C
-"SELECT count(*) FROM stock1 WHERE s_w_id = 1 AND s_i_id = 8 AND s_quantity \u003c 1000"
-{
- "QueryType": "SELECT",
- "Original": "SELECT count(*) FROM stock1 WHERE s_w_id = 1 AND s_i_id = 8 AND s_quantity \u003c 1000",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select count(*) from stock1 where 1 != 1",
- "Query": "select count(*) from stock1 where s_w_id = 1 and s_i_id = 8 and s_quantity \u003c 1000",
- "Table": "stock1",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "hash"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT count(*) FROM stock1 WHERE s_w_id = 1 AND s_i_id = 8 AND s_quantity \u003c 1000",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select count(*) from stock1 where 1 != 1",
- "Query": "select count(*) from stock1 where s_w_id = 1 and s_i_id = 8 and s_quantity \u003c 1000",
- "Table": "stock1",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.stock1"
- ]
-}
-
-# TPC-C select with subquery,aggr,distinct,having,limit
-"select o.o_id,o.o_d_id from orders1 o, (select o_c_id,o_w_id,o_d_id,count(distinct o_w_id),o_id from orders1 where o_w_id=1 and o_id \u003e 2100 and o_id \u003c 11153 group by o_c_id,o_d_id,o_w_id having count( distinct o_id) \u003e 1 limit 1) t where t.o_w_id=o.o_w_id and t.o_d_id=o.o_d_id and t.o_c_id=o.o_c_id limit 1"
-{
- "QueryType": "SELECT",
- "Original": "select o.o_id,o.o_d_id from orders1 o, (select o_c_id,o_w_id,o_d_id,count(distinct o_w_id),o_id from orders1 where o_w_id=1 and o_id \u003e 2100 and o_id \u003c 11153 group by o_c_id,o_d_id,o_w_id having count( distinct o_id) \u003e 1 limit 1) t where t.o_w_id=o.o_w_id and t.o_d_id=o.o_d_id and t.o_c_id=o.o_c_id limit 1",
- "Instructions": {
- "OperatorType": "Limit",
- "Count": "INT64(1)",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1",
- "JoinVars": {
- "o_o_c_id": 3,
- "o_o_d_id": 1,
- "o_o_w_id": 2
- },
- "TableName": "orders1_orders1",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select o.o_id, o.o_d_id, o.o_w_id, o.o_c_id from orders1 as o where 1 != 1",
- "Query": "select o.o_id, o.o_d_id, o.o_w_id, o.o_c_id from orders1 as o",
- "Table": "orders1"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select 1 from (select o_c_id, o_w_id, o_d_id, count(distinct o_w_id), o_id from orders1 where 1 != 1 group by o_c_id, o_d_id, o_w_id) as t where 1 != 1",
- "Query": "select 1 from (select o_c_id, o_w_id, o_d_id, count(distinct o_w_id), o_id from orders1 where o_w_id = 1 and o_id \u003e 2100 and o_id \u003c 11153 group by o_c_id, o_d_id, o_w_id having count(distinct o_id) \u003e 1 limit 1) as t where t.o_w_id = :o_o_w_id and t.o_d_id = :o_o_d_id and t.o_c_id = :o_o_c_id",
- "Table": "orders1",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "hash"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select o.o_id,o.o_d_id from orders1 o, (select o_c_id,o_w_id,o_d_id,count(distinct o_w_id),o_id from orders1 where o_w_id=1 and o_id \u003e 2100 and o_id \u003c 11153 group by o_c_id,o_d_id,o_w_id having count( distinct o_id) \u003e 1 limit 1) t where t.o_w_id=o.o_w_id and t.o_d_id=o.o_d_id and t.o_c_id=o.o_c_id limit 1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select o.o_id, o.o_d_id from orders1 as o, (select o_c_id, o_w_id, o_d_id, count(distinct o_w_id), o_id from orders1 where 1 != 1 group by o_c_id, o_d_id, o_w_id) as t where 1 != 1",
- "Query": "select o.o_id, o.o_d_id from orders1 as o, (select o_c_id, o_w_id, o_d_id, count(distinct o_w_id), o_id from orders1 where o_w_id = 1 and o_id \u003e 2100 and o_id \u003c 11153 group by o_c_id, o_d_id, o_w_id having count(distinct o_id) \u003e 1 limit 1) as t where t.o_w_id = o.o_w_id and t.o_d_id = o.o_d_id and t.o_c_id = o.o_c_id limit 1",
- "Table": "orders1",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.orders1"
- ]
-}
-
-# TPC-C delete order_line1
-"DELETE FROM order_line1 where ol_w_id=178 AND ol_d_id=1 AND ol_o_id=84"
-{
- "QueryType": "DELETE",
- "Original": "DELETE FROM order_line1 where ol_w_id=178 AND ol_d_id=1 AND ol_o_id=84",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "delete from order_line1 where ol_w_id = 178 and ol_d_id = 1 and ol_o_id = 84",
- "Table": "order_line1",
- "Values": [
- "INT64(178)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.order_line1"
- ]
-}
-{
- "QueryType": "DELETE",
- "Original": "DELETE FROM order_line1 where ol_w_id=178 AND ol_d_id=1 AND ol_o_id=84",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "delete from order_line1 where ol_w_id = 178 and ol_d_id = 1 and ol_o_id = 84",
- "Table": "order_line1",
- "Values": [
- "INT64(178)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.order_line1"
- ]
-}
-
-# TPC-C delete orders1
-"DELETE FROM orders1 where o_w_id=1 AND o_d_id=3 and o_id=384"
-{
- "QueryType": "DELETE",
- "Original": "DELETE FROM orders1 where o_w_id=1 AND o_d_id=3 and o_id=384",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "delete from orders1 where o_w_id = 1 and o_d_id = 3 and o_id = 384",
- "Table": "orders1",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.orders1"
- ]
-}
-{
- "QueryType": "DELETE",
- "Original": "DELETE FROM orders1 where o_w_id=1 AND o_d_id=3 and o_id=384",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "delete from orders1 where o_w_id = 1 and o_d_id = 3 and o_id = 384",
- "Table": "orders1",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.orders1"
- ]
-}
-
-# TPC-C delete history1
-"DELETE FROM history1 where h_w_id=75 AND h_d_id=102 LIMIT 10"
-{
- "QueryType": "DELETE",
- "Original": "DELETE FROM history1 where h_w_id=75 AND h_d_id=102 LIMIT 10",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "delete from history1 where h_w_id = 75 and h_d_id = 102 limit 10",
- "Table": "history1",
- "Values": [
- "INT64(75)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.history1"
- ]
-}
-{
- "QueryType": "DELETE",
- "Original": "DELETE FROM history1 where h_w_id=75 AND h_d_id=102 LIMIT 10",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "delete from history1 where h_w_id = 75 and h_d_id = 102 limit 10",
- "Table": "history1",
- "Values": [
- "INT64(75)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.history1"
- ]
-}
diff --git a/go/vt/vtgate/planbuilder/testdata/tpch_cases.json b/go/vt/vtgate/planbuilder/testdata/tpch_cases.json
new file mode 100644
index 00000000000..b403d8469de
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/tpch_cases.json
@@ -0,0 +1,1495 @@
+[
+ {
+ "comment": "TPC-H query 1",
+ "query": "select l_returnflag, l_linestatus, sum(l_quantity) as sum_qty, sum(l_extendedprice) as sum_base_price, sum(l_extendedprice * (1 - l_discount)) as sum_disc_price, sum(l_extendedprice * (1 - l_discount) * (1 + l_tax)) as sum_charge, avg(l_quantity) as avg_qty, avg(l_extendedprice) as avg_price, avg(l_discount) as avg_disc, count(*) as count_order from lineitem where l_shipdate <= '1998-12-01' - interval '108' day group by l_returnflag, l_linestatus order by l_returnflag, l_linestatus",
+ "v3-plan": "VT12001: unsupported: in scatter query: complex aggregate expression",
+ "gen4-plan": "VT12001: unsupported: in scatter query: aggregation function 'avg'"
+ },
+ {
+ "comment": "TPC-H query 2",
+ "query": "select s_acctbal, s_name, n_name, p_partkey, p_mfgr, s_address, s_phone, s_comment from part, supplier, partsupp, nation, region where p_partkey = ps_partkey and s_suppkey = ps_suppkey and p_size = 15 and p_type like '%BRASS' and s_nationkey = n_nationkey and n_regionkey = r_regionkey and r_name = 'EUROPE' and ps_supplycost = ( select min(ps_supplycost) from partsupp, supplier, nation, region where p_partkey = ps_partkey and s_suppkey = ps_suppkey and s_nationkey = n_nationkey and n_regionkey = r_regionkey and r_name = 'EUROPE' ) order by s_acctbal desc, n_name, s_name, p_partkey limit 10",
+ "v3-plan": "VT03019: symbol p_partkey not found",
+ "gen4-plan": "VT12001: unsupported: cross-shard correlated subquery"
+ },
+ {
+ "comment": "TPC-H query 3",
+ "query": "select l_orderkey, sum(l_extendedprice * (1 - l_discount)) as revenue, o_orderdate, o_shippriority from customer, orders, lineitem where c_mktsegment = 'BUILDING' and c_custkey = o_custkey and l_orderkey = o_orderkey and o_orderdate < date('1995-03-15') and l_shipdate > date('1995-03-15') group by l_orderkey, o_orderdate, o_shippriority order by revenue desc, o_orderdate limit 10",
+ "v3-plan": "VT12001: unsupported: cross-shard query with aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select l_orderkey, sum(l_extendedprice * (1 - l_discount)) as revenue, o_orderdate, o_shippriority from customer, orders, lineitem where c_mktsegment = 'BUILDING' and c_custkey = o_custkey and l_orderkey = o_orderkey and o_orderdate < date('1995-03-15') and l_shipdate > date('1995-03-15') group by l_orderkey, o_orderdate, o_shippriority order by revenue desc, o_orderdate limit 10",
+ "Instructions": {
+ "OperatorType": "Limit",
+ "Count": "INT64(10)",
+ "Inputs": [
+ {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "1 DESC, (2|5) ASC",
+ "ResultColumns": 4,
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum(1) AS revenue",
+ "GroupBy": "(0|6), (2|5), (3|4)",
+ "Inputs": [
+ {
+ "OperatorType": "Projection",
+ "Expressions": [
+ "[COLUMN 0] as l_orderkey",
+ "([COLUMN 6] * COALESCE([COLUMN 7], INT64(1))) * COALESCE([COLUMN 8], INT64(1)) as revenue",
+ "[COLUMN 1] as o_orderdate",
+ "[COLUMN 2] as o_shippriority",
+ "[COLUMN 5]",
+ "[COLUMN 4]",
+ "[COLUMN 3]"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "(0|3) ASC, (1|4) ASC, (2|5) ASC",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,R:0,R:1,L:2,R:2,R:3,L:1,R:4,R:5",
+ "JoinVars": {
+ "l_orderkey": 0
+ },
+ "TableName": "lineitem_orders_customer",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select l_orderkey, sum(l_extendedprice * (1 - l_discount)) as revenue, weight_string(l_orderkey) from lineitem where 1 != 1 group by l_orderkey, weight_string(l_orderkey)",
+ "Query": "select l_orderkey, sum(l_extendedprice * (1 - l_discount)) as revenue, weight_string(l_orderkey) from lineitem where l_shipdate > date('1995-03-15') group by l_orderkey, weight_string(l_orderkey)",
+ "Table": "lineitem"
+ },
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:3,L:5,L:4,L:6,L:1,R:1",
+ "JoinVars": {
+ "o_custkey": 0
+ },
+ "TableName": "orders_customer",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select o_custkey, count(*), weight_string(o_custkey), o_orderdate, weight_string(o_orderdate), o_shippriority, weight_string(o_shippriority) from orders where 1 != 1 group by o_custkey, weight_string(o_custkey), o_orderdate, weight_string(o_orderdate), o_shippriority, weight_string(o_shippriority)",
+ "Query": "select o_custkey, count(*), weight_string(o_custkey), o_orderdate, weight_string(o_orderdate), o_shippriority, weight_string(o_shippriority) from orders where o_orderdate < date('1995-03-15') and o_orderkey = :l_orderkey group by o_custkey, weight_string(o_custkey), o_orderdate, weight_string(o_orderdate), o_shippriority, weight_string(o_shippriority)",
+ "Table": "orders",
+ "Values": [
+ ":l_orderkey"
+ ],
+ "Vindex": "hash"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1, count(*) from customer where 1 != 1 group by 1",
+ "Query": "select 1, count(*) from customer where c_mktsegment = 'BUILDING' and c_custkey = :o_custkey group by 1",
+ "Table": "customer",
+ "Values": [
+ ":o_custkey"
+ ],
+ "Vindex": "hash"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.customer",
+ "main.lineitem",
+ "main.orders"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-H query 4",
+ "query": "select o_orderpriority, count(*) as order_count from orders where o_orderdate >= date('1993-07-01') and o_orderdate < date('1993-07-01') + interval '3' month and exists ( select * from lineitem where l_orderkey = o_orderkey and l_commitdate < l_receiptdate ) group by o_orderpriority order by o_orderpriority",
+ "v3-plan": "VT03019: symbol o_orderkey not found",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select o_orderpriority, count(*) as order_count from orders where o_orderdate >= date('1993-07-01') and o_orderdate < date('1993-07-01') + interval '3' month and exists ( select * from lineitem where l_orderkey = o_orderkey and l_commitdate < l_receiptdate ) group by o_orderpriority order by o_orderpriority",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count_star(1) AS order_count",
+ "GroupBy": "(0|2)",
+ "ResultColumns": 2,
+ "Inputs": [
+ {
+ "OperatorType": "Projection",
+ "Expressions": [
+ "[COLUMN 1] as o_orderpriority",
+ "[COLUMN 2] as order_count",
+ "[COLUMN 3]"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SemiJoin",
+ "JoinVars": {
+ "o_orderkey": 0
+ },
+ "TableName": "orders_lineitem",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select o_orderkey, o_orderpriority, count(*) as order_count, weight_string(o_orderpriority), weight_string(o_orderkey) from orders where 1 != 1 group by o_orderpriority, weight_string(o_orderpriority), o_orderkey, weight_string(o_orderkey)",
+ "OrderBy": "(1|3) ASC",
+ "Query": "select o_orderkey, o_orderpriority, count(*) as order_count, weight_string(o_orderpriority), weight_string(o_orderkey) from orders where o_orderdate >= date('1993-07-01') and o_orderdate < date('1993-07-01') + interval '3' month group by o_orderpriority, weight_string(o_orderpriority), o_orderkey, weight_string(o_orderkey) order by o_orderpriority asc",
+ "Table": "orders"
+ },
+ {
+ "OperatorType": "VindexLookup",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "Values": [
+ ":o_orderkey"
+ ],
+ "Vindex": "lineitem_map",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select l_orderkey, l_linenumber from lineitem_map where 1 != 1",
+ "Query": "select l_orderkey, l_linenumber from lineitem_map where l_orderkey in ::__vals",
+ "Table": "lineitem_map",
+ "Values": [
+ ":l_orderkey"
+ ],
+ "Vindex": "md5"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from lineitem where 1 != 1",
+ "Query": "select 1 from lineitem where l_commitdate < l_receiptdate and l_orderkey = :o_orderkey",
+ "Table": "lineitem"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.lineitem",
+ "main.orders"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-H query 5 - Gen4 produces plan but the plan output is flaky",
+ "query": "select n_name, sum(l_extendedprice * (1 - l_discount)) as revenue from customer, orders, lineitem, supplier, nation, region where c_custkey = o_custkey and l_orderkey = o_orderkey and l_suppkey = s_suppkey and c_nationkey = s_nationkey and s_nationkey = n_nationkey and n_regionkey = r_regionkey and r_name = 'ASIA' and o_orderdate >= date('1994-01-01') and o_orderdate < date('1994-01-01') + interval '1' year group by n_name order by revenue desc",
+ "v3-plan": "VT12001: unsupported: cross-shard query with aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select n_name, sum(l_extendedprice * (1 - l_discount)) as revenue from customer, orders, lineitem, supplier, nation, region where c_custkey = o_custkey and l_orderkey = o_orderkey and l_suppkey = s_suppkey and c_nationkey = s_nationkey and s_nationkey = n_nationkey and n_regionkey = r_regionkey and r_name = 'ASIA' and o_orderdate >= date('1994-01-01') and o_orderdate < date('1994-01-01') + interval '1' year group by n_name order by revenue desc",
+ "Instructions": {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "1 DESC",
+ "ResultColumns": 2,
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum(1) AS revenue",
+ "GroupBy": "(0|2)",
+ "Inputs": [
+ {
+ "OperatorType": "Projection",
+ "Expressions": [
+ "[COLUMN 0] as n_name",
+ "(((([COLUMN 2] * COALESCE([COLUMN 3], INT64(1))) * COALESCE([COLUMN 4], INT64(1))) * COALESCE([COLUMN 5], INT64(1))) * COALESCE([COLUMN 6], INT64(1))) * COALESCE([COLUMN 7], INT64(1)) as revenue",
+ "[COLUMN 1]"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "(0|1) ASC",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0,R:1,L:3,L:4,L:5,L:6,R:2,R:3",
+ "JoinVars": {
+ "s_nationkey": 0
+ },
+ "TableName": "orders_customer_lineitem_supplier_nation_region",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0,R:1,R:2,L:6,L:7,R:3,R:4",
+ "JoinVars": {
+ "c_nationkey": 1,
+ "o_orderkey": 0
+ },
+ "TableName": "orders_customer_lineitem_supplier",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:1,R:0,L:1,R:0,L:4,R:2,L:2,R:1",
+ "JoinVars": {
+ "o_custkey": 0
+ },
+ "TableName": "orders_customer",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select o_custkey, o_orderkey, count(*), weight_string(o_custkey), weight_string(o_orderkey) from orders where 1 != 1 group by o_custkey, weight_string(o_custkey), o_orderkey, weight_string(o_orderkey)",
+ "Query": "select o_custkey, o_orderkey, count(*), weight_string(o_custkey), weight_string(o_orderkey) from orders where o_orderdate >= date('1994-01-01') and o_orderdate < date('1994-01-01') + interval '1' year group by o_custkey, weight_string(o_custkey), o_orderkey, weight_string(o_orderkey)",
+ "Table": "orders"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select c_nationkey, count(*), weight_string(c_nationkey) from customer where 1 != 1 group by c_nationkey, weight_string(c_nationkey)",
+ "Query": "select c_nationkey, count(*), weight_string(c_nationkey) from customer where c_custkey = :o_custkey group by c_nationkey, weight_string(c_nationkey)",
+ "Table": "customer",
+ "Values": [
+ ":o_custkey"
+ ],
+ "Vindex": "hash"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0,R:0,R:2,L:1,R:1",
+ "JoinVars": {
+ "l_suppkey": 0
+ },
+ "TableName": "lineitem_supplier",
+ "Inputs": [
+ {
+ "OperatorType": "VindexLookup",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "Values": [
+ ":o_orderkey"
+ ],
+ "Vindex": "lineitem_map",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select l_orderkey, l_linenumber from lineitem_map where 1 != 1",
+ "Query": "select l_orderkey, l_linenumber from lineitem_map where l_orderkey in ::__vals",
+ "Table": "lineitem_map",
+ "Values": [
+ ":l_orderkey"
+ ],
+ "Vindex": "md5"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select l_suppkey, sum(l_extendedprice * (1 - l_discount)) as revenue, weight_string(l_suppkey) from lineitem where 1 != 1 group by l_suppkey, weight_string(l_suppkey)",
+ "Query": "select l_suppkey, sum(l_extendedprice * (1 - l_discount)) as revenue, weight_string(l_suppkey) from lineitem where l_orderkey = :o_orderkey group by l_suppkey, weight_string(l_suppkey)",
+ "Table": "lineitem"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select s_nationkey, count(*), weight_string(s_nationkey) from supplier where 1 != 1 group by s_nationkey, weight_string(s_nationkey)",
+ "Query": "select s_nationkey, count(*), weight_string(s_nationkey) from supplier where s_suppkey = :l_suppkey and s_nationkey = :c_nationkey group by s_nationkey, weight_string(s_nationkey)",
+ "Table": "supplier",
+ "Values": [
+ ":l_suppkey"
+ ],
+ "Vindex": "hash"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:3,L:4,L:1,R:1",
+ "JoinVars": {
+ "n_regionkey": 0
+ },
+ "TableName": "nation_region",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select n_regionkey, count(*), weight_string(n_regionkey), n_name, weight_string(n_name) from nation where 1 != 1 group by n_regionkey, weight_string(n_regionkey), n_name, weight_string(n_name)",
+ "Query": "select n_regionkey, count(*), weight_string(n_regionkey), n_name, weight_string(n_name) from nation where n_nationkey = :s_nationkey group by n_regionkey, weight_string(n_regionkey), n_name, weight_string(n_name)",
+ "Table": "nation",
+ "Values": [
+ ":s_nationkey"
+ ],
+ "Vindex": "hash"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1, count(*) from region where 1 != 1 group by 1",
+ "Query": "select 1, count(*) from region where r_name = 'ASIA' and r_regionkey = :n_regionkey group by 1",
+ "Table": "region",
+ "Values": [
+ ":n_regionkey"
+ ],
+ "Vindex": "hash"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.customer",
+ "main.lineitem",
+ "main.nation",
+ "main.orders",
+ "main.region",
+ "main.supplier"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-H query 6",
+ "query": "select sum(l_extendedprice * l_discount) as revenue from lineitem where l_shipdate >= date('1994-01-01') and l_shipdate < date('1994-01-01') + interval '1' year and l_discount between 0.06 - 0.01 and 0.06 + 0.01 and l_quantity < 24",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select sum(l_extendedprice * l_discount) as revenue from lineitem where l_shipdate >= date('1994-01-01') and l_shipdate < date('1994-01-01') + interval '1' year and l_discount between 0.06 - 0.01 and 0.06 + 0.01 and l_quantity < 24",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum(0)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select sum(l_extendedprice * l_discount) as revenue from lineitem where 1 != 1",
+ "Query": "select sum(l_extendedprice * l_discount) as revenue from lineitem where l_shipdate >= date('1994-01-01') and l_shipdate < date('1994-01-01') + interval '1' year and l_discount between 0.06 - 0.01 and 0.06 + 0.01 and l_quantity < 24",
+ "Table": "lineitem"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select sum(l_extendedprice * l_discount) as revenue from lineitem where l_shipdate >= date('1994-01-01') and l_shipdate < date('1994-01-01') + interval '1' year and l_discount between 0.06 - 0.01 and 0.06 + 0.01 and l_quantity < 24",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum(0) AS revenue",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select sum(l_extendedprice * l_discount) as revenue from lineitem where 1 != 1",
+ "Query": "select sum(l_extendedprice * l_discount) as revenue from lineitem where l_shipdate >= date('1994-01-01') and l_shipdate < date('1994-01-01') + interval '1' year and l_discount between 0.06 - 0.01 and 0.06 + 0.01 and l_quantity < 24",
+ "Table": "lineitem"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.lineitem"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-H query 7",
+ "query": "select supp_nation, cust_nation, l_year, sum(volume) as revenue from (select n1.n_name as supp_nation, n2.n_name as cust_nation, extract(year from l_shipdate) as l_year, l_extendedprice * (1 - l_discount) as volume from supplier, lineitem, orders, customer, nation n1, nation n2 where s_suppkey = l_suppkey and o_orderkey = l_orderkey and c_custkey = o_custkey and s_nationkey = n1.n_nationkey and c_nationkey = n2.n_nationkey and ((n1.n_name = 'FRANCE' and n2.n_name = 'GERMANY') or (n1.n_name = 'GERMANY' and n2.n_name = 'FRANCE')) and l_shipdate between date('1995-01-01') and date('1996-12-31')) as shipping group by supp_nation, cust_nation, l_year order by supp_nation, cust_nation, l_year",
+ "v3-plan": "VT12001: unsupported: cross-shard query with aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select supp_nation, cust_nation, l_year, sum(volume) as revenue from (select n1.n_name as supp_nation, n2.n_name as cust_nation, extract(year from l_shipdate) as l_year, l_extendedprice * (1 - l_discount) as volume from supplier, lineitem, orders, customer, nation n1, nation n2 where s_suppkey = l_suppkey and o_orderkey = l_orderkey and c_custkey = o_custkey and s_nationkey = n1.n_nationkey and c_nationkey = n2.n_nationkey and ((n1.n_name = 'FRANCE' and n2.n_name = 'GERMANY') or (n1.n_name = 'GERMANY' and n2.n_name = 'FRANCE')) and l_shipdate between date('1995-01-01') and date('1996-12-31')) as shipping group by supp_nation, cust_nation, l_year order by supp_nation, cust_nation, l_year",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum(3) AS revenue",
+ "GroupBy": "(0|6), (1|5), (2|4)",
+ "ResultColumns": 4,
+ "Inputs": [
+ {
+ "OperatorType": "Projection",
+ "Expressions": [
+ "[COLUMN 4] as supp_nation",
+ "[COLUMN 5] as cust_nation",
+ "[COLUMN 6] as l_year",
+ "(((([COLUMN 10] * COALESCE([COLUMN 11], INT64(1))) * COALESCE([COLUMN 12], INT64(1))) * COALESCE([COLUMN 13], INT64(1))) * COALESCE([COLUMN 14], INT64(1))) * COALESCE([COLUMN 15], INT64(1)) as revenue",
+ "[COLUMN 9]",
+ "[COLUMN 8]",
+ "[COLUMN 7]"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "(0|16) ASC, (1|17) ASC, (2|18) ASC",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:2,R:0,L:3,L:4,L:8,R:1,L:9,L:13,R:2,L:14,L:15,L:16,L:17,L:18,R:3,R:4,L:19,R:5,L:20",
+ "JoinVars": {
+ "n1_n_name": 1,
+ "o_custkey": 0
+ },
+ "TableName": "lineitem_orders_supplier_nation_customer_nation",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:1,R:0,R:1,L:2,L:3,L:5,R:2,R:3,R:4,L:6,L:8,R:5,R:6,R:7,L:9,L:10,L:11,R:8,R:9,R:10,L:12",
+ "JoinVars": {
+ "l_suppkey": 0
+ },
+ "TableName": "lineitem_orders_supplier_nation",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:1,R:0,L:2,L:3,L:1,R:0,L:2,L:6,R:2,L:7,L:4,R:1,L:8",
+ "JoinVars": {
+ "l_orderkey": 0
+ },
+ "TableName": "lineitem_orders",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select l_orderkey, l_suppkey, extract(year from l_shipdate) as l_year, l_extendedprice * (1 - l_discount) as volume, sum(volume) as revenue, weight_string(l_orderkey), weight_string(l_suppkey), weight_string(extract(year from l_shipdate)), weight_string(extract(year from l_shipdate)) from lineitem where 1 != 1 group by l_orderkey, weight_string(l_orderkey), l_suppkey, weight_string(l_suppkey), l_year, weight_string(l_year)",
+ "Query": "select l_orderkey, l_suppkey, extract(year from l_shipdate) as l_year, l_extendedprice * (1 - l_discount) as volume, sum(volume) as revenue, weight_string(l_orderkey), weight_string(l_suppkey), weight_string(extract(year from l_shipdate)), weight_string(extract(year from l_shipdate)) from lineitem where l_shipdate between date('1995-01-01') and date('1996-12-31') group by l_orderkey, weight_string(l_orderkey), l_suppkey, weight_string(l_suppkey), l_year, weight_string(l_year)",
+ "Table": "lineitem"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select o_custkey, count(*), weight_string(o_custkey) from orders where 1 != 1 group by o_custkey, weight_string(o_custkey)",
+ "Query": "select o_custkey, count(*), weight_string(o_custkey) from orders where o_orderkey = :l_orderkey group by o_custkey, weight_string(o_custkey)",
+ "Table": "orders",
+ "Values": [
+ ":l_orderkey"
+ ],
+ "Vindex": "hash"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0,R:1,R:0,R:0,R:1,R:3,R:3,R:4,L:1,R:2,R:5",
+ "JoinVars": {
+ "s_nationkey": 0
+ },
+ "TableName": "supplier_nation",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select s_nationkey, count(*), weight_string(s_nationkey) from supplier where 1 != 1 group by s_nationkey, weight_string(s_nationkey)",
+ "Query": "select s_nationkey, count(*), weight_string(s_nationkey) from supplier where s_suppkey = :l_suppkey group by s_nationkey, weight_string(s_nationkey)",
+ "Table": "supplier",
+ "Values": [
+ ":l_suppkey"
+ ],
+ "Vindex": "hash"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select n1.n_name, n1.n_name as supp_nation, count(*), weight_string(n1.n_name), weight_string(n1.n_name), weight_string(n1.n_name) from nation as n1 where 1 != 1 group by n1.n_name, weight_string(n1.n_name), supp_nation, weight_string(supp_nation)",
+ "Query": "select n1.n_name, n1.n_name as supp_nation, count(*), weight_string(n1.n_name), weight_string(n1.n_name), weight_string(n1.n_name) from nation as n1 where n1.n_nationkey = :s_nationkey group by n1.n_name, weight_string(n1.n_name), supp_nation, weight_string(supp_nation)",
+ "Table": "nation",
+ "Values": [
+ ":s_nationkey"
+ ],
+ "Vindex": "hash"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0,R:0,R:2,L:1,R:1,R:3",
+ "JoinVars": {
+ "c_nationkey": 0
+ },
+ "TableName": "customer_nation",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select c_nationkey, count(*), weight_string(c_nationkey) from customer where 1 != 1 group by c_nationkey, weight_string(c_nationkey)",
+ "Query": "select c_nationkey, count(*), weight_string(c_nationkey) from customer where c_custkey = :o_custkey group by c_nationkey, weight_string(c_nationkey)",
+ "Table": "customer",
+ "Values": [
+ ":o_custkey"
+ ],
+ "Vindex": "hash"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select n2.n_name as cust_nation, count(*), weight_string(n2.n_name), weight_string(n2.n_name) from nation as n2 where 1 != 1 group by cust_nation, weight_string(cust_nation)",
+ "Query": "select n2.n_name as cust_nation, count(*), weight_string(n2.n_name), weight_string(n2.n_name) from nation as n2 where n2.n_nationkey = :c_nationkey and (:n1_n_name = 'FRANCE' and n2.n_name = 'GERMANY' or :n1_n_name = 'GERMANY' and n2.n_name = 'FRANCE') group by cust_nation, weight_string(cust_nation)",
+ "Table": "nation",
+ "Values": [
+ ":c_nationkey"
+ ],
+ "Vindex": "hash"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.customer",
+ "main.lineitem",
+ "main.nation",
+ "main.orders",
+ "main.supplier"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-H query 8",
+ "query": "select o_year, sum(case when nation = 'BRAZIL' then volume else 0 end) / sum(volume) as mkt_share from ( select extract(year from o_orderdate) as o_year, l_extendedprice * (1 - l_discount) as volume, n2.n_name as nation from part, supplier, lineitem, orders, customer, nation n1, nation n2, region where p_partkey = l_partkey and s_suppkey = l_suppkey and l_orderkey = o_orderkey and o_custkey = c_custkey and c_nationkey = n1.n_nationkey and n1.n_regionkey = r_regionkey and r_name = 'AMERICA' and s_nationkey = n2.n_nationkey and o_orderdate between date '1995-01-01' and date('1996-12-31') and p_type = 'ECONOMY ANODIZED STEEL' ) as all_nations group by o_year order by o_year",
+ "v3-plan": "VT12001: unsupported: cross-shard query with aggregates",
+ "gen4-plan": "VT12001: unsupported: in scatter query: complex aggregate expression"
+ },
+ {
+ "comment": "TPC-H query 9",
+ "query": "select nation, o_year, sum(amount) as sum_profit from ( select n_name as nation, extract(year from o_orderdate) as o_year, l_extendedprice * (1 - l_discount) - ps_supplycost * l_quantity as amount from part, supplier, lineitem, partsupp, orders, nation where s_suppkey = l_suppkey and ps_suppkey = l_suppkey and ps_partkey = l_partkey and p_partkey = l_partkey and o_orderkey = l_orderkey and s_nationkey = n_nationkey and p_name like '%green%' ) as profit group by nation, o_year order by nation, o_year desc",
+ "v3-plan": "VT12001: unsupported: cross-shard query with aggregates",
+ "gen4-plan": "VT12001: unsupported: aggregation on columns from different sources"
+ },
+ {
+ "comment": "TPC-H query 10",
+ "query": "select c_custkey, c_name, sum(l_extendedprice * (1 - l_discount)) as revenue, c_acctbal, n_name, c_address, c_phone, c_comment from customer, orders, lineitem, nation where c_custkey = o_custkey and l_orderkey = o_orderkey and o_orderdate >= date('1993-10-01') and o_orderdate < date('1993-10-01') + interval '3' month and l_returnflag = 'R' and c_nationkey = n_nationkey group by c_custkey, c_name, c_acctbal, c_phone, n_name, c_address, c_comment order by revenue desc limit 20",
+ "v3-plan": "VT12001: unsupported: cross-shard query with aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select c_custkey, c_name, sum(l_extendedprice * (1 - l_discount)) as revenue, c_acctbal, n_name, c_address, c_phone, c_comment from customer, orders, lineitem, nation where c_custkey = o_custkey and l_orderkey = o_orderkey and o_orderdate >= date('1993-10-01') and o_orderdate < date('1993-10-01') + interval '3' month and l_returnflag = 'R' and c_nationkey = n_nationkey group by c_custkey, c_name, c_acctbal, c_phone, n_name, c_address, c_comment order by revenue desc limit 20",
+ "Instructions": {
+ "OperatorType": "Limit",
+ "Count": "INT64(20)",
+ "Inputs": [
+ {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "2 DESC",
+ "ResultColumns": 8,
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum(2) AS revenue",
+ "GroupBy": "(0|14), (1|13), (3|12), (6|11), (4|10), (5|9), (7|8)",
+ "Inputs": [
+ {
+ "OperatorType": "Projection",
+ "Expressions": [
+ "[COLUMN 0] as c_custkey",
+ "[COLUMN 1] as c_name",
+ "(([COLUMN 14] * COALESCE([COLUMN 15], INT64(1))) * COALESCE([COLUMN 16], INT64(1))) * COALESCE([COLUMN 17], INT64(1)) as revenue",
+ "[COLUMN 2] as c_acctbal",
+ "[COLUMN 4] as n_name",
+ "[COLUMN 5] as c_address",
+ "[COLUMN 3] as c_phone",
+ "[COLUMN 6] as c_comment",
+ "[COLUMN 13]",
+ "[COLUMN 12]",
+ "[COLUMN 11]",
+ "[COLUMN 10]",
+ "[COLUMN 9]",
+ "[COLUMN 8]",
+ "[COLUMN 7]"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "(0|7) ASC, (1|8) ASC, (2|9) ASC, (3|10) ASC, (4|11) ASC, (5|12) ASC, (6|13) ASC",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0,R:1,R:2,R:3,R:4,R:5,R:6,R:7,R:8,R:9,R:10,R:11,R:12,R:13,L:3,L:4,R:14,R:15",
+ "JoinVars": {
+ "o_custkey": 0
+ },
+ "TableName": "orders_lineitem_customer_nation",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:1,L:1,L:4,L:2,R:1",
+ "JoinVars": {
+ "o_orderkey": 0
+ },
+ "TableName": "orders_lineitem",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select o_orderkey, o_custkey, count(*), weight_string(o_orderkey), weight_string(o_custkey) from orders where 1 != 1 group by o_orderkey, weight_string(o_orderkey), o_custkey, weight_string(o_custkey)",
+ "Query": "select o_orderkey, o_custkey, count(*), weight_string(o_orderkey), weight_string(o_custkey) from orders where o_orderdate >= date('1993-10-01') and o_orderdate < date('1993-10-01') + interval '3' month group by o_orderkey, weight_string(o_orderkey), o_custkey, weight_string(o_custkey)",
+ "Table": "orders"
+ },
+ {
+ "OperatorType": "VindexLookup",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "Values": [
+ ":o_orderkey"
+ ],
+ "Vindex": "lineitem_map",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select l_orderkey, l_linenumber from lineitem_map where 1 != 1",
+ "Query": "select l_orderkey, l_linenumber from lineitem_map where l_orderkey in ::__vals",
+ "Table": "lineitem_map",
+ "Values": [
+ ":l_orderkey"
+ ],
+ "Vindex": "md5"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1, sum(l_extendedprice * (1 - l_discount)) as revenue from lineitem where 1 != 1 group by 1",
+ "Query": "select 1, sum(l_extendedprice * (1 - l_discount)) as revenue from lineitem where l_returnflag = 'R' and l_orderkey = :o_orderkey group by 1",
+ "Table": "lineitem"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:3,L:5,L:7,L:9,R:1,L:11,L:13,L:4,L:6,L:8,L:10,R:2,L:12,L:14,L:1,R:0",
+ "JoinVars": {
+ "c_nationkey": 0
+ },
+ "TableName": "customer_nation",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select c_nationkey, count(*), weight_string(c_nationkey), c_custkey, weight_string(c_custkey), c_name, weight_string(c_name), c_acctbal, weight_string(c_acctbal), c_phone, weight_string(c_phone), c_address, weight_string(c_address), c_comment, weight_string(c_comment) from customer where 1 != 1 group by c_nationkey, weight_string(c_nationkey), c_custkey, weight_string(c_custkey), c_name, weight_string(c_name), c_acctbal, weight_string(c_acctbal), c_phone, weight_string(c_phone), c_address, weight_string(c_address), c_comment, weight_string(c_comment)",
+ "Query": "select c_nationkey, count(*), weight_string(c_nationkey), c_custkey, weight_string(c_custkey), c_name, weight_string(c_name), c_acctbal, weight_string(c_acctbal), c_phone, weight_string(c_phone), c_address, weight_string(c_address), c_comment, weight_string(c_comment) from customer where c_custkey = :o_custkey group by c_nationkey, weight_string(c_nationkey), c_custkey, weight_string(c_custkey), c_name, weight_string(c_name), c_acctbal, weight_string(c_acctbal), c_phone, weight_string(c_phone), c_address, weight_string(c_address), c_comment, weight_string(c_comment)",
+ "Table": "customer",
+ "Values": [
+ ":o_custkey"
+ ],
+ "Vindex": "hash"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*), n_name, weight_string(n_name) from nation where 1 != 1 group by n_name, weight_string(n_name)",
+ "Query": "select count(*), n_name, weight_string(n_name) from nation where n_nationkey = :c_nationkey group by n_name, weight_string(n_name)",
+ "Table": "nation",
+ "Values": [
+ ":c_nationkey"
+ ],
+ "Vindex": "hash"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.customer",
+ "main.lineitem",
+ "main.nation",
+ "main.orders"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-H query 11",
+ "query": "select ps_partkey, sum(ps_supplycost * ps_availqty) as value from partsupp, supplier, nation where ps_suppkey = s_suppkey and s_nationkey = n_nationkey and n_name = 'GERMANY' group by ps_partkey having sum(ps_supplycost * ps_availqty) > ( select sum(ps_supplycost * ps_availqty) * 0.00001000000 from partsupp, supplier, nation where ps_suppkey = s_suppkey and s_nationkey = n_nationkey and n_name = 'GERMANY' ) order by value desc",
+ "v3-plan": "VT12001: unsupported: cross-shard query with aggregates",
+ "gen4-plan": "VT12001: unsupported: in scatter query: complex aggregate expression"
+ },
+ {
+ "comment": "TPC-H query 12",
+ "query": "select l_shipmode, sum(case when o_orderpriority = '1-URGENT' or o_orderpriority = '2-HIGH' then 1 else 0 end) as high_line_count, sum(case when o_orderpriority <> '1-URGENT' and o_orderpriority <> '2-HIGH' then 1 else 0 end) as low_line_count from orders, lineitem where o_orderkey = l_orderkey and l_shipmode in ('MAIL', 'SHIP') and l_commitdate < l_receiptdate and l_shipdate < l_commitdate and l_receiptdate >= date('1994-01-01') and l_receiptdate < date('1994-01-01') + interval '1' year group by l_shipmode order by l_shipmode",
+ "v3-plan": "VT12001: unsupported: cross-shard query with aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select l_shipmode, sum(case when o_orderpriority = '1-URGENT' or o_orderpriority = '2-HIGH' then 1 else 0 end) as high_line_count, sum(case when o_orderpriority <> '1-URGENT' and o_orderpriority <> '2-HIGH' then 1 else 0 end) as low_line_count from orders, lineitem where o_orderkey = l_orderkey and l_shipmode in ('MAIL', 'SHIP') and l_commitdate < l_receiptdate and l_shipdate < l_commitdate and l_receiptdate >= date('1994-01-01') and l_receiptdate < date('1994-01-01') + interval '1' year group by l_shipmode order by l_shipmode",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum(1) AS high_line_count, sum(2) AS low_line_count",
+ "GroupBy": "(0|3)",
+ "ResultColumns": 3,
+ "Inputs": [
+ {
+ "OperatorType": "Projection",
+ "Expressions": [
+ "[COLUMN 0] as l_shipmode",
+ "[COLUMN 2] * COALESCE([COLUMN 3], INT64(1)) as high_line_count",
+ "[COLUMN 4] * COALESCE([COLUMN 5], INT64(1)) as low_line_count",
+ "[COLUMN 1]"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "(0|1) ASC",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:1,R:2,L:1,R:0,L:2,R:0",
+ "JoinVars": {
+ "o_orderkey": 0
+ },
+ "TableName": "orders_lineitem",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select o_orderkey, sum(case when o_orderpriority = '1-URGENT' or o_orderpriority = '2-HIGH' then 1 else 0 end) as high_line_count, sum(case when o_orderpriority != '1-URGENT' and o_orderpriority != '2-HIGH' then 1 else 0 end) as low_line_count, weight_string(o_orderkey) from orders where 1 != 1 group by o_orderkey, weight_string(o_orderkey)",
+ "Query": "select o_orderkey, sum(case when o_orderpriority = '1-URGENT' or o_orderpriority = '2-HIGH' then 1 else 0 end) as high_line_count, sum(case when o_orderpriority != '1-URGENT' and o_orderpriority != '2-HIGH' then 1 else 0 end) as low_line_count, weight_string(o_orderkey) from orders group by o_orderkey, weight_string(o_orderkey)",
+ "Table": "orders"
+ },
+ {
+ "OperatorType": "VindexLookup",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "Values": [
+ ":o_orderkey"
+ ],
+ "Vindex": "lineitem_map",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select l_orderkey, l_linenumber from lineitem_map where 1 != 1",
+ "Query": "select l_orderkey, l_linenumber from lineitem_map where l_orderkey in ::__vals",
+ "Table": "lineitem_map",
+ "Values": [
+ ":l_orderkey"
+ ],
+ "Vindex": "md5"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*), l_shipmode, weight_string(l_shipmode) from lineitem where 1 != 1 group by l_shipmode, weight_string(l_shipmode)",
+ "Query": "select count(*), l_shipmode, weight_string(l_shipmode) from lineitem where l_shipmode in ('MAIL', 'SHIP') and l_commitdate < l_receiptdate and l_shipdate < l_commitdate and l_receiptdate >= date('1994-01-01') and l_receiptdate < date('1994-01-01') + interval '1' year and l_orderkey = :o_orderkey group by l_shipmode, weight_string(l_shipmode)",
+ "Table": "lineitem"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.lineitem",
+ "main.orders"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-H query 13",
+ "query": "select c_count, count(*) as custdist from ( select c_custkey, count(o_orderkey) from customer left outer join orders on c_custkey = o_custkey and o_comment not like '%special%requests%' group by c_custkey ) as c_orders(c_custkey, c_count) group by c_count order by custdist desc, c_count desc",
+ "plan": "VT12001: unsupported: using aggregation on top of a *planbuilder.orderedAggregate plan"
+ },
+ {
+ "comment": "TPC-H query 14",
+ "query": "select 100.00 * sum(case when p_type like 'PROMO%' then l_extendedprice * (1 - l_discount) else 0 end) / sum(l_extendedprice * (1 - l_discount)) as promo_revenue from lineitem, part where l_partkey = p_partkey and l_shipdate >= date('1995-09-01') and l_shipdate < date('1995-09-01') + interval '1' month",
+ "v3-plan": "VT12001: unsupported: cross-shard query with aggregates",
+ "gen4-plan": "VT12001: unsupported: in scatter query: complex aggregate expression"
+ },
+ {
+ "comment": "TPC-H query 15 view\n#\"with revenue0(supplier_no, total_revenue) as (select l_suppkey, sum(l_extendedprice * (1 - l_discount)) from lineitem where l_shipdate >= date('1996-01-01') and l_shipdate < date('1996-01-01') + interval '3' month group by l_suppkey )\"\n#\"syntax error at position 236\"\n#Gen4 plan same as above\n# TPC-H query 15",
+ "query": "select s_suppkey, s_name, s_address, s_phone, total_revenue from supplier, revenue0 where s_suppkey = supplier_no and total_revenue = ( select max(total_revenue) from revenue0 ) order by s_suppkey",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select s_suppkey, s_name, s_address, s_phone, total_revenue from supplier, revenue0 where s_suppkey = supplier_no and total_revenue = ( select max(total_revenue) from revenue0 ) order by s_suppkey",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutValue",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "max(0)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select max(total_revenue) from revenue0 where 1 != 1",
+ "Query": "select max(total_revenue) from revenue0",
+ "Table": "revenue0"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1,L:2,L:3,R:0",
+ "JoinVars": {
+ "s_suppkey": 0
+ },
+ "TableName": "supplier_revenue0",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select s_suppkey, s_name, s_address, s_phone, weight_string(s_suppkey) from supplier where 1 != 1",
+ "OrderBy": "(0|4) ASC",
+ "Query": "select s_suppkey, s_name, s_address, s_phone, weight_string(s_suppkey) from supplier order by s_suppkey asc",
+ "ResultColumns": 4,
+ "Table": "supplier"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select total_revenue from revenue0 where 1 != 1",
+ "Query": "select total_revenue from revenue0 where supplier_no = :s_suppkey and total_revenue = :__sq1",
+ "Table": "revenue0",
+ "Values": [
+ ":s_suppkey"
+ ],
+ "Vindex": "hash"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select s_suppkey, s_name, s_address, s_phone, total_revenue from supplier, revenue0 where s_suppkey = supplier_no and total_revenue = ( select max(total_revenue) from revenue0 ) order by s_suppkey",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutValue",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "max(0) AS max(total_revenue)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select max(total_revenue) from revenue0 where 1 != 1",
+ "Query": "select max(total_revenue) from revenue0",
+ "Table": "revenue0"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select s_suppkey, s_name, s_address, s_phone, total_revenue, weight_string(s_suppkey) from supplier, revenue0 where 1 != 1",
+ "OrderBy": "(0|5) ASC",
+ "Query": "select s_suppkey, s_name, s_address, s_phone, total_revenue, weight_string(s_suppkey) from supplier, revenue0 where total_revenue = :__sq1 and s_suppkey = supplier_no order by s_suppkey asc",
+ "ResultColumns": 5,
+ "Table": "revenue0, supplier"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.revenue0",
+ "main.supplier"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-H query 16",
+ "query": "select p_brand, p_type, p_size, count(distinct ps_suppkey) as supplier_cnt from partsupp, part where p_partkey = ps_partkey and p_brand <> 'Brand#45' and p_type not like 'MEDIUM POLISHED%' and p_size in (49, 14, 23, 45, 19, 3, 36, 9) and ps_suppkey not in ( select s_suppkey from supplier where s_comment like '%Customer%Complaints%' ) group by p_brand, p_type, p_size order by supplier_cnt desc, p_brand, p_type, p_size",
+ "v3-plan": "VT12001: unsupported: cross-shard query with aggregates",
+ "gen4-plan": "VT12001: unsupported: using aggregation on top of a *planbuilder.pulloutSubquery plan"
+ },
+ {
+ "comment": "TPC-H query 17",
+ "query": "select sum(l_extendedprice) / 7.0 as avg_yearly from lineitem, part where p_partkey = l_partkey and p_brand = 'Brand#23' and p_container = 'MED BOX' and l_quantity < ( select 0.2 * avg(l_quantity) from lineitem where l_partkey = p_partkey )",
+ "v3-plan": "VT03019: symbol p_partkey not found",
+ "gen4-plan": "VT12001: unsupported: cross-shard correlated subquery"
+ },
+ {
+ "comment": "TPC-H query 18",
+ "query": "select c_name, c_custkey, o_orderkey, o_orderdate, o_totalprice, sum(l_quantity) from customer, orders, lineitem where o_orderkey in ( select l_orderkey from lineitem group by l_orderkey having sum(l_quantity) > 300 ) and c_custkey = o_custkey and o_orderkey = l_orderkey group by c_name, c_custkey, o_orderkey, o_orderdate, o_totalprice order by o_totalprice desc, o_orderdate limit 100",
+ "v3-plan": "VT12001: unsupported: cross-shard query with aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select c_name, c_custkey, o_orderkey, o_orderdate, o_totalprice, sum(l_quantity) from customer, orders, lineitem where o_orderkey in ( select l_orderkey from lineitem group by l_orderkey having sum(l_quantity) > 300 ) and c_custkey = o_custkey and o_orderkey = l_orderkey group by c_name, c_custkey, o_orderkey, o_orderdate, o_totalprice order by o_totalprice desc, o_orderdate limit 100",
+ "Instructions": {
+ "OperatorType": "Limit",
+ "Count": "INT64(100)",
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum(5) AS sum(l_quantity)",
+ "GroupBy": "(4|10), (3|9), (0|8), (1|7), (2|6)",
+ "ResultColumns": 6,
+ "Inputs": [
+ {
+ "OperatorType": "Projection",
+ "Expressions": [
+ "[COLUMN 2] as c_name",
+ "[COLUMN 3] as c_custkey",
+ "[COLUMN 4] as o_orderkey",
+ "[COLUMN 1] as o_orderdate",
+ "[COLUMN 0] as o_totalprice",
+ "([COLUMN 10] * COALESCE([COLUMN 11], INT64(1))) * COALESCE([COLUMN 12], INT64(1)) as sum(l_quantity)",
+ "[COLUMN 9]",
+ "[COLUMN 8]",
+ "[COLUMN 7]",
+ "[COLUMN 6]",
+ "[COLUMN 5]"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:2,L:3,L:4,L:5,L:6,L:8,L:9,L:10,L:11,L:12,L:13,L:14,R:1",
+ "JoinVars": {
+ "o_orderkey": 0
+ },
+ "TableName": "orders_customer_lineitem",
+ "Inputs": [
+ {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "(2|8) DESC, (3|9) ASC, (4|10) ASC, (5|11) ASC, (0|7) ASC",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:1,L:1,L:5,L:7,R:1,R:3,L:1,L:4,L:6,L:8,R:2,R:4,L:4,L:2,R:0",
+ "JoinVars": {
+ "o_custkey": 0
+ },
+ "TableName": "orders_customer",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select o_custkey, o_orderkey, count(*), weight_string(o_custkey), weight_string(o_orderkey), o_totalprice, weight_string(o_totalprice), o_orderdate, weight_string(o_orderdate) from orders where 1 != 1 group by o_custkey, weight_string(o_custkey), o_orderkey, weight_string(o_orderkey), o_totalprice, weight_string(o_totalprice), o_orderdate, weight_string(o_orderdate)",
+ "Query": "select o_custkey, o_orderkey, count(*), weight_string(o_custkey), weight_string(o_orderkey), o_totalprice, weight_string(o_totalprice), o_orderdate, weight_string(o_orderdate) from orders where :o_orderkey in (select l_orderkey from lineitem group by l_orderkey having sum(l_quantity) > 300) group by o_custkey, weight_string(o_custkey), o_orderkey, weight_string(o_orderkey), o_totalprice, weight_string(o_totalprice), o_orderdate, weight_string(o_orderdate)",
+ "Table": "orders",
+ "Values": [
+ ":__sq1"
+ ],
+ "Vindex": "hash"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*), c_name, weight_string(c_name), c_custkey, weight_string(c_custkey) from customer where 1 != 1 group by c_name, weight_string(c_name), c_custkey, weight_string(c_custkey)",
+ "Query": "select count(*), c_name, weight_string(c_name), c_custkey, weight_string(c_custkey) from customer where c_custkey = :o_custkey group by c_name, weight_string(c_name), c_custkey, weight_string(c_custkey)",
+ "Table": "customer",
+ "Values": [
+ ":o_custkey"
+ ],
+ "Vindex": "hash"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "VindexLookup",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "Values": [
+ ":o_orderkey"
+ ],
+ "Vindex": "lineitem_map",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select l_orderkey, l_linenumber from lineitem_map where 1 != 1",
+ "Query": "select l_orderkey, l_linenumber from lineitem_map where l_orderkey in ::__vals",
+ "Table": "lineitem_map",
+ "Values": [
+ ":l_orderkey"
+ ],
+ "Vindex": "md5"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1, sum(l_quantity) from lineitem where 1 != 1 group by 1",
+ "Query": "select 1, sum(l_quantity) from lineitem where l_orderkey = :o_orderkey group by 1",
+ "Table": "lineitem"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.customer",
+ "main.lineitem",
+ "main.orders"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-H query 19",
+ "query": "select sum(l_extendedprice* (1 - l_discount)) as revenue from lineitem, part where ( p_partkey = l_partkey and p_brand = 'Brand#12' and p_container in ('SM CASE', 'SM BOX', 'SM PACK', 'SM PKG') and l_quantity >= 1 and l_quantity <= 1 + 10 and p_size between 1 and 5 and l_shipmode in ('AIR', 'AIR REG') and l_shipinstruct = 'DELIVER IN PERSON' ) or ( p_partkey = l_partkey and p_brand = 'Brand#23' and p_container in ('MED BAG', 'MED BOX', 'MED PKG', 'MED PACK') and l_quantity >= 10 and l_quantity <= 10 + 10 and p_size between 1 and 10 and l_shipmode in ('AIR', 'AIR REG') and l_shipinstruct = 'DELIVER IN PERSON' ) or ( p_partkey = l_partkey and p_brand = 'Brand#34' and p_container in ('LG CASE', 'LG BOX', 'LG PACK', 'LG PKG') and l_quantity >= 20 and l_quantity <= 20 + 10 and p_size between 1 and 15 and l_shipmode in ('AIR', 'AIR REG') and l_shipinstruct = 'DELIVER IN PERSON' )",
+ "v3-plan": "VT12001: unsupported: cross-shard query with aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select sum(l_extendedprice* (1 - l_discount)) as revenue from lineitem, part where ( p_partkey = l_partkey and p_brand = 'Brand#12' and p_container in ('SM CASE', 'SM BOX', 'SM PACK', 'SM PKG') and l_quantity >= 1 and l_quantity <= 1 + 10 and p_size between 1 and 5 and l_shipmode in ('AIR', 'AIR REG') and l_shipinstruct = 'DELIVER IN PERSON' ) or ( p_partkey = l_partkey and p_brand = 'Brand#23' and p_container in ('MED BAG', 'MED BOX', 'MED PKG', 'MED PACK') and l_quantity >= 10 and l_quantity <= 10 + 10 and p_size between 1 and 10 and l_shipmode in ('AIR', 'AIR REG') and l_shipinstruct = 'DELIVER IN PERSON' ) or ( p_partkey = l_partkey and p_brand = 'Brand#34' and p_container in ('LG CASE', 'LG BOX', 'LG PACK', 'LG PKG') and l_quantity >= 20 and l_quantity <= 20 + 10 and p_size between 1 and 15 and l_shipmode in ('AIR', 'AIR REG') and l_shipinstruct = 'DELIVER IN PERSON' )",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum(0) AS revenue",
+ "Inputs": [
+ {
+ "OperatorType": "Projection",
+ "Expressions": [
+ "[COLUMN 0] * COALESCE([COLUMN 1], INT64(1)) as revenue"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:4,R:1",
+ "JoinVars": {
+ "l_partkey": 0,
+ "l_quantity": 1,
+ "l_shipinstruct": 3,
+ "l_shipmode": 2
+ },
+ "TableName": "lineitem_part",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select l_partkey, l_quantity, l_shipmode, l_shipinstruct, sum(l_extendedprice * (1 - l_discount)) as revenue, weight_string(l_partkey), weight_string(l_quantity), weight_string(l_shipmode), weight_string(l_shipinstruct) from lineitem where 1 != 1 group by l_partkey, weight_string(l_partkey), l_quantity, weight_string(l_quantity), l_shipmode, weight_string(l_shipmode), l_shipinstruct, weight_string(l_shipinstruct)",
+ "Query": "select l_partkey, l_quantity, l_shipmode, l_shipinstruct, sum(l_extendedprice * (1 - l_discount)) as revenue, weight_string(l_partkey), weight_string(l_quantity), weight_string(l_shipmode), weight_string(l_shipinstruct) from lineitem group by l_partkey, weight_string(l_partkey), l_quantity, weight_string(l_quantity), l_shipmode, weight_string(l_shipmode), l_shipinstruct, weight_string(l_shipinstruct)",
+ "Table": "lineitem"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1, count(*) from part where 1 != 1 group by 1",
+ "Query": "select 1, count(*) from part where p_partkey = :l_partkey and p_brand = 'Brand#12' and p_container in ('SM CASE', 'SM BOX', 'SM PACK', 'SM PKG') and :l_quantity >= 1 and :l_quantity <= 1 + 10 and p_size between 1 and 5 and :l_shipmode in ('AIR', 'AIR REG') and :l_shipinstruct = 'DELIVER IN PERSON' or p_partkey = :l_partkey and p_brand = 'Brand#23' and p_container in ('MED BAG', 'MED BOX', 'MED PKG', 'MED PACK') and :l_quantity >= 10 and :l_quantity <= 10 + 10 and p_size between 1 and 10 and :l_shipmode in ('AIR', 'AIR REG') and :l_shipinstruct = 'DELIVER IN PERSON' or p_partkey = :l_partkey and p_brand = 'Brand#34' and p_container in ('LG CASE', 'LG BOX', 'LG PACK', 'LG PKG') and :l_quantity >= 20 and :l_quantity <= 20 + 10 and p_size between 1 and 15 and :l_shipmode in ('AIR', 'AIR REG') and :l_shipinstruct = 'DELIVER IN PERSON' group by 1",
+ "Table": "part"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.lineitem",
+ "main.part"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-H query 20",
+ "query": "select s_name, s_address from supplier, nation where s_suppkey in ( select ps_suppkey from partsupp where ps_partkey in ( select p_partkey from part where p_name like 'forest%' ) and ps_availqty > ( select 0.5 * sum(l_quantity) from lineitem where l_partkey = ps_partkey and l_suppkey = ps_suppkey and l_shipdate >= date('1994-01-01') and l_shipdate < date('1994-01-01') + interval '1' year ) ) and s_nationkey = n_nationkey and n_name = 'CANADA' order by s_name",
+ "v3-plan": "VT03019: symbol ps_partkey not found",
+ "gen4-plan": "VT12001: unsupported: cross-shard correlated subquery"
+ },
+ {
+ "comment": "TPC-H query 21",
+ "query": "select s_name, count(*) as numwait from supplier, lineitem l1, orders, nation where s_suppkey = l1.l_suppkey and o_orderkey = l1.l_orderkey and o_orderstatus = 'F' and l1.l_receiptdate > l1.l_commitdate and exists ( select * from lineitem l2 where l2.l_orderkey = l1.l_orderkey and l2.l_suppkey <> l1.l_suppkey ) and not exists ( select * from lineitem l3 where l3.l_orderkey = l1.l_orderkey and l3.l_suppkey <> l1.l_suppkey and l3.l_receiptdate > l3.l_commitdate ) and s_nationkey = n_nationkey and n_name = 'SAUDI ARABIA' group by s_name order by numwait desc, s_name limit 100",
+ "v3-plan": "VT12001: unsupported: cross-shard query with aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select s_name, count(*) as numwait from supplier, lineitem l1, orders, nation where s_suppkey = l1.l_suppkey and o_orderkey = l1.l_orderkey and o_orderstatus = 'F' and l1.l_receiptdate > l1.l_commitdate and exists ( select * from lineitem l2 where l2.l_orderkey = l1.l_orderkey and l2.l_suppkey <> l1.l_suppkey ) and not exists ( select * from lineitem l3 where l3.l_orderkey = l1.l_orderkey and l3.l_suppkey <> l1.l_suppkey and l3.l_receiptdate > l3.l_commitdate ) and s_nationkey = n_nationkey and n_name = 'SAUDI ARABIA' group by s_name order by numwait desc, s_name limit 100",
+ "Instructions": {
+ "OperatorType": "Limit",
+ "Count": "INT64(100)",
+ "Inputs": [
+ {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "1 DESC, (0|2) ASC",
+ "ResultColumns": 2,
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count_star(1) AS numwait",
+ "GroupBy": "(0|2)",
+ "Inputs": [
+ {
+ "OperatorType": "Projection",
+ "Expressions": [
+ "[COLUMN 0] as s_name",
+ "(([COLUMN 2] * COALESCE([COLUMN 3], INT64(1))) * COALESCE([COLUMN 4], INT64(1))) * COALESCE([COLUMN 5], INT64(1)) as numwait",
+ "[COLUMN 1]"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "(0|1) ASC",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0,R:1,L:3,L:4,R:2,R:3",
+ "JoinVars": {
+ "l1_l_suppkey": 0
+ },
+ "TableName": "lineitem_orders_supplier_nation",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:1,L:1,L:4,L:2,R:1",
+ "JoinVars": {
+ "l1_l_orderkey": 0
+ },
+ "TableName": "lineitem_orders",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select l1.l_orderkey, l1.l_suppkey, count(*) as numwait, weight_string(l1.l_orderkey), weight_string(l1.l_suppkey) from lineitem as l1 where 1 != 1 group by l1.l_orderkey, weight_string(l1.l_orderkey), l1.l_suppkey, weight_string(l1.l_suppkey)",
+ "Query": "select l1.l_orderkey, l1.l_suppkey, count(*) as numwait, weight_string(l1.l_orderkey), weight_string(l1.l_suppkey) from lineitem as l1 where l1.l_receiptdate > l1.l_commitdate and exists (select 1 from lineitem as l2 where l2.l_orderkey = l1.l_orderkey and l2.l_suppkey != l1.l_suppkey limit 1) and not exists (select 1 from lineitem as l3 where l3.l_orderkey = l1.l_orderkey and l3.l_suppkey != l1.l_suppkey and l3.l_receiptdate > l3.l_commitdate limit 1) group by l1.l_orderkey, weight_string(l1.l_orderkey), l1.l_suppkey, weight_string(l1.l_suppkey)",
+ "Table": "lineitem"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1, count(*) as numwait from orders where 1 != 1 group by 1",
+ "Query": "select 1, count(*) as numwait from orders where o_orderstatus = 'F' and exists (select 1 from lineitem as l2 where l2.l_orderkey = l1.l_orderkey and l2.l_suppkey != l1.l_suppkey limit 1) and not exists (select 1 from lineitem as l3 where l3.l_orderkey = l1.l_orderkey and l3.l_suppkey != l1.l_suppkey and l3.l_receiptdate > l3.l_commitdate limit 1) and o_orderkey = :l1_l_orderkey group by 1",
+ "Table": "orders",
+ "Values": [
+ ":l1_l_orderkey"
+ ],
+ "Vindex": "hash"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:3,L:4,L:1,R:1",
+ "JoinVars": {
+ "s_nationkey": 0
+ },
+ "TableName": "supplier_nation",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select s_nationkey, count(*) as numwait, weight_string(s_nationkey), s_name, weight_string(s_name) from supplier where 1 != 1 group by s_nationkey, weight_string(s_nationkey), s_name, weight_string(s_name)",
+ "Query": "select s_nationkey, count(*) as numwait, weight_string(s_nationkey), s_name, weight_string(s_name) from supplier where exists (select 1 from lineitem as l2 where l2.l_orderkey = l1.l_orderkey and l2.l_suppkey != l1.l_suppkey limit 1) and not exists (select 1 from lineitem as l3 where l3.l_orderkey = l1.l_orderkey and l3.l_suppkey != l1.l_suppkey and l3.l_receiptdate > l3.l_commitdate limit 1) and s_suppkey = :l1_l_suppkey group by s_nationkey, weight_string(s_nationkey), s_name, weight_string(s_name)",
+ "Table": "supplier",
+ "Values": [
+ ":l1_l_suppkey"
+ ],
+ "Vindex": "hash"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1, count(*) as numwait from nation where 1 != 1 group by 1",
+ "Query": "select 1, count(*) as numwait from nation where n_name = 'SAUDI ARABIA' and exists (select 1 from lineitem as l2 where l2.l_orderkey = l1.l_orderkey and l2.l_suppkey != l1.l_suppkey limit 1) and not exists (select 1 from lineitem as l3 where l3.l_orderkey = l1.l_orderkey and l3.l_suppkey != l1.l_suppkey and l3.l_receiptdate > l3.l_commitdate limit 1) and n_nationkey = :s_nationkey group by 1",
+ "Table": "nation",
+ "Values": [
+ ":s_nationkey"
+ ],
+ "Vindex": "hash"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.lineitem",
+ "main.nation",
+ "main.orders",
+ "main.supplier"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-H query 22",
+ "query": "select cntrycode, count(*) as numcust, sum(c_acctbal) as totacctbal from ( select substring(c_phone from 1 for 2) as cntrycode, c_acctbal from customer where substring(c_phone from 1 for 2) in ('13', '31', '23', '29', '30', '18', '17') and c_acctbal > ( select avg(c_acctbal) from customer where c_acctbal > 0.00 and substring(c_phone from 1 for 2) in ('13', '31', '23', '29', '30', '18', '17') ) and not exists ( select * from orders where o_custkey = c_custkey ) ) as custsale group by cntrycode order by cntrycode",
+ "v3-plan": "VT03019: symbol c_custkey not found",
+ "gen4-plan": "VT12001: unsupported: EXISTS sub-queries are only supported with AND clause"
+ }
+]
diff --git a/go/vt/vtgate/planbuilder/testdata/tpch_cases.txt b/go/vt/vtgate/planbuilder/testdata/tpch_cases.txt
deleted file mode 100644
index b63c58a6c12..00000000000
--- a/go/vt/vtgate/planbuilder/testdata/tpch_cases.txt
+++ /dev/null
@@ -1,1475 +0,0 @@
-# TPC-H query 1
-"select l_returnflag, l_linestatus, sum(l_quantity) as sum_qty, sum(l_extendedprice) as sum_base_price, sum(l_extendedprice * (1 - l_discount)) as sum_disc_price, sum(l_extendedprice * (1 - l_discount) * (1 + l_tax)) as sum_charge, avg(l_quantity) as avg_qty, avg(l_extendedprice) as avg_price, avg(l_discount) as avg_disc, count(*) as count_order from lineitem where l_shipdate \u003c= '1998-12-01' - interval '108' day group by l_returnflag, l_linestatus order by l_returnflag, l_linestatus"
-"unsupported: in scatter query: complex aggregate expression"
-Gen4 error: unsupported: in scatter query: aggregation function 'avg'
-
-# TPC-H query 2
-"select s_acctbal, s_name, n_name, p_partkey, p_mfgr, s_address, s_phone, s_comment from part, supplier, partsupp, nation, region where p_partkey = ps_partkey and s_suppkey = ps_suppkey and p_size = 15 and p_type like '%BRASS' and s_nationkey = n_nationkey and n_regionkey = r_regionkey and r_name = 'EUROPE' and ps_supplycost = ( select min(ps_supplycost) from partsupp, supplier, nation, region where p_partkey = ps_partkey and s_suppkey = ps_suppkey and s_nationkey = n_nationkey and n_regionkey = r_regionkey and r_name = 'EUROPE' ) order by s_acctbal desc, n_name, s_name, p_partkey limit 10"
-"symbol p_partkey not found"
-Gen4 error: unsupported: cross-shard correlated subquery
-
-# TPC-H query 3
-"select l_orderkey, sum(l_extendedprice * (1 - l_discount)) as revenue, o_orderdate, o_shippriority from customer, orders, lineitem where c_mktsegment = 'BUILDING' and c_custkey = o_custkey and l_orderkey = o_orderkey and o_orderdate \u003c date('1995-03-15') and l_shipdate \u003e date('1995-03-15') group by l_orderkey, o_orderdate, o_shippriority order by revenue desc, o_orderdate limit 10"
-"unsupported: cross-shard query with aggregates"
-{
- "QueryType": "SELECT",
- "Original": "select l_orderkey, sum(l_extendedprice * (1 - l_discount)) as revenue, o_orderdate, o_shippriority from customer, orders, lineitem where c_mktsegment = 'BUILDING' and c_custkey = o_custkey and l_orderkey = o_orderkey and o_orderdate \u003c date('1995-03-15') and l_shipdate \u003e date('1995-03-15') group by l_orderkey, o_orderdate, o_shippriority order by revenue desc, o_orderdate limit 10",
- "Instructions": {
- "OperatorType": "Limit",
- "Count": "INT64(10)",
- "Inputs": [
- {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "1 DESC, (2|5) ASC",
- "ResultColumns": 4,
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum(1) AS revenue",
- "GroupBy": "(0|6), (2|5), (3|4)",
- "Inputs": [
- {
- "OperatorType": "Projection",
- "Expressions": [
- "[COLUMN 0] as l_orderkey",
- "([COLUMN 6] * [COLUMN 7]) * [COLUMN 8] as revenue",
- "[COLUMN 1] as o_orderdate",
- "[COLUMN 2] as o_shippriority",
- "[COLUMN 5]",
- "[COLUMN 4]",
- "[COLUMN 3]"
- ],
- "Inputs": [
- {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "(0|3) ASC, (1|4) ASC, (2|5) ASC",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,R:0,R:1,L:2,R:2,R:3,L:1,R:4,R:5",
- "JoinVars": {
- "l_orderkey": 0
- },
- "TableName": "lineitem_orders_customer",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select l_orderkey, sum(l_extendedprice * (1 - l_discount)) as revenue, weight_string(l_orderkey) from lineitem where 1 != 1 group by l_orderkey, weight_string(l_orderkey)",
- "Query": "select l_orderkey, sum(l_extendedprice * (1 - l_discount)) as revenue, weight_string(l_orderkey) from lineitem where l_shipdate \u003e date('1995-03-15') group by l_orderkey, weight_string(l_orderkey)",
- "Table": "lineitem"
- },
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:3,L:5,L:4,L:6,L:1,R:1",
- "JoinVars": {
- "o_custkey": 0
- },
- "TableName": "orders_customer",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select o_custkey, count(*), weight_string(o_custkey), o_orderdate, weight_string(o_orderdate), o_shippriority, weight_string(o_shippriority) from orders where 1 != 1 group by o_custkey, weight_string(o_custkey), o_orderdate, weight_string(o_orderdate), o_shippriority, weight_string(o_shippriority)",
- "Query": "select o_custkey, count(*), weight_string(o_custkey), o_orderdate, weight_string(o_orderdate), o_shippriority, weight_string(o_shippriority) from orders where o_orderdate \u003c date('1995-03-15') and o_orderkey = :l_orderkey group by o_custkey, weight_string(o_custkey), o_orderdate, weight_string(o_orderdate), o_shippriority, weight_string(o_shippriority)",
- "Table": "orders",
- "Values": [
- ":l_orderkey"
- ],
- "Vindex": "hash"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select 1, count(*) from customer where 1 != 1 group by 1",
- "Query": "select 1, count(*) from customer where c_mktsegment = 'BUILDING' and c_custkey = :o_custkey group by 1",
- "Table": "customer",
- "Values": [
- ":o_custkey"
- ],
- "Vindex": "hash"
- }
- ]
- }
- ]
- }
- ]
- }
- ]
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "main.customer",
- "main.lineitem",
- "main.orders"
- ]
-}
-
-# TPC-H query 4
-"select o_orderpriority, count(*) as order_count from orders where o_orderdate \u003e= date('1993-07-01') and o_orderdate \u003c date('1993-07-01') + interval '3' month and exists ( select * from lineitem where l_orderkey = o_orderkey and l_commitdate \u003c l_receiptdate ) group by o_orderpriority order by o_orderpriority"
-"symbol o_orderkey not found in table or subquery"
-{
- "QueryType": "SELECT",
- "Original": "select o_orderpriority, count(*) as order_count from orders where o_orderdate \u003e= date('1993-07-01') and o_orderdate \u003c date('1993-07-01') + interval '3' month and exists ( select * from lineitem where l_orderkey = o_orderkey and l_commitdate \u003c l_receiptdate ) group by o_orderpriority order by o_orderpriority",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count_star(1) AS order_count",
- "GroupBy": "(0|2)",
- "ResultColumns": 2,
- "Inputs": [
- {
- "OperatorType": "Projection",
- "Expressions": [
- "[COLUMN 1] as o_orderpriority",
- "[COLUMN 2] as order_count",
- "[COLUMN 3]"
- ],
- "Inputs": [
- {
- "OperatorType": "SemiJoin",
- "JoinVars": {
- "o_orderkey": 0
- },
- "TableName": "orders_lineitem",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select o_orderkey, o_orderpriority, count(*) as order_count, weight_string(o_orderpriority), weight_string(o_orderkey) from orders where 1 != 1 group by o_orderpriority, weight_string(o_orderpriority), o_orderkey, weight_string(o_orderkey)",
- "OrderBy": "(1|3) ASC",
- "Query": "select o_orderkey, o_orderpriority, count(*) as order_count, weight_string(o_orderpriority), weight_string(o_orderkey) from orders where o_orderdate \u003e= date('1993-07-01') and o_orderdate \u003c date('1993-07-01') + interval '3' month group by o_orderpriority, weight_string(o_orderpriority), o_orderkey, weight_string(o_orderkey) order by o_orderpriority asc",
- "Table": "orders"
- },
- {
- "OperatorType": "VindexLookup",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "Values": [
- ":o_orderkey"
- ],
- "Vindex": "lineitem_map",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select l_orderkey, l_linenumber from lineitem_map where 1 != 1",
- "Query": "select l_orderkey, l_linenumber from lineitem_map where l_orderkey in ::__vals",
- "Table": "lineitem_map",
- "Values": [
- ":l_orderkey"
- ],
- "Vindex": "md5"
- },
- {
- "OperatorType": "Route",
- "Variant": "ByDestination",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select 1 from lineitem where 1 != 1",
- "Query": "select 1 from lineitem where l_commitdate \u003c l_receiptdate and l_orderkey = :o_orderkey",
- "Table": "lineitem"
- }
- ]
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "main.lineitem",
- "main.orders"
- ]
-}
-
-# TPC-H query 5 - Gen4 produces plan but the plan output is flaky
-"select n_name, sum(l_extendedprice * (1 - l_discount)) as revenue from customer, orders, lineitem, supplier, nation, region where c_custkey = o_custkey and l_orderkey = o_orderkey and l_suppkey = s_suppkey and c_nationkey = s_nationkey and s_nationkey = n_nationkey and n_regionkey = r_regionkey and r_name = 'ASIA' and o_orderdate \u003e= date('1994-01-01') and o_orderdate \u003c date('1994-01-01') + interval '1' year group by n_name order by revenue desc"
-"unsupported: cross-shard query with aggregates"
-{
- "QueryType": "SELECT",
- "Original": "select n_name, sum(l_extendedprice * (1 - l_discount)) as revenue from customer, orders, lineitem, supplier, nation, region where c_custkey = o_custkey and l_orderkey = o_orderkey and l_suppkey = s_suppkey and c_nationkey = s_nationkey and s_nationkey = n_nationkey and n_regionkey = r_regionkey and r_name = 'ASIA' and o_orderdate \u003e= date('1994-01-01') and o_orderdate \u003c date('1994-01-01') + interval '1' year group by n_name order by revenue desc",
- "Instructions": {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "1 DESC",
- "ResultColumns": 2,
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum(1) AS revenue",
- "GroupBy": "(0|2)",
- "Inputs": [
- {
- "OperatorType": "Projection",
- "Expressions": [
- "[COLUMN 0] as n_name",
- "(((([COLUMN 2] * [COLUMN 3]) * [COLUMN 4]) * [COLUMN 5]) * [COLUMN 6]) * [COLUMN 7] as revenue",
- "[COLUMN 1]"
- ],
- "Inputs": [
- {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "(0|1) ASC",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0,R:1,L:3,L:4,L:5,L:6,R:2,R:3",
- "JoinVars": {
- "s_nationkey": 0
- },
- "TableName": "orders_customer_lineitem_supplier_nation_region",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0,R:1,R:2,L:6,L:7,R:3,R:4",
- "JoinVars": {
- "c_nationkey": 1,
- "o_orderkey": 0
- },
- "TableName": "orders_customer_lineitem_supplier",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:1,R:0,L:1,R:0,L:4,R:2,L:2,R:1",
- "JoinVars": {
- "o_custkey": 0
- },
- "TableName": "orders_customer",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select o_custkey, o_orderkey, count(*), weight_string(o_custkey), weight_string(o_orderkey) from orders where 1 != 1 group by o_custkey, weight_string(o_custkey), o_orderkey, weight_string(o_orderkey)",
- "Query": "select o_custkey, o_orderkey, count(*), weight_string(o_custkey), weight_string(o_orderkey) from orders where o_orderdate \u003e= date('1994-01-01') and o_orderdate \u003c date('1994-01-01') + interval '1' year group by o_custkey, weight_string(o_custkey), o_orderkey, weight_string(o_orderkey)",
- "Table": "orders"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select c_nationkey, count(*), weight_string(c_nationkey) from customer where 1 != 1 group by c_nationkey, weight_string(c_nationkey)",
- "Query": "select c_nationkey, count(*), weight_string(c_nationkey) from customer where c_custkey = :o_custkey group by c_nationkey, weight_string(c_nationkey)",
- "Table": "customer",
- "Values": [
- ":o_custkey"
- ],
- "Vindex": "hash"
- }
- ]
- },
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0,R:0,R:2,L:1,R:1",
- "JoinVars": {
- "l_suppkey": 0
- },
- "TableName": "lineitem_supplier",
- "Inputs": [
- {
- "OperatorType": "VindexLookup",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "Values": [
- ":o_orderkey"
- ],
- "Vindex": "lineitem_map",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select l_orderkey, l_linenumber from lineitem_map where 1 != 1",
- "Query": "select l_orderkey, l_linenumber from lineitem_map where l_orderkey in ::__vals",
- "Table": "lineitem_map",
- "Values": [
- ":l_orderkey"
- ],
- "Vindex": "md5"
- },
- {
- "OperatorType": "Route",
- "Variant": "ByDestination",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select l_suppkey, sum(l_extendedprice * (1 - l_discount)) as revenue, weight_string(l_suppkey) from lineitem where 1 != 1 group by l_suppkey, weight_string(l_suppkey)",
- "Query": "select l_suppkey, sum(l_extendedprice * (1 - l_discount)) as revenue, weight_string(l_suppkey) from lineitem where l_orderkey = :o_orderkey group by l_suppkey, weight_string(l_suppkey)",
- "Table": "lineitem"
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select s_nationkey, count(*), weight_string(s_nationkey) from supplier where 1 != 1 group by s_nationkey, weight_string(s_nationkey)",
- "Query": "select s_nationkey, count(*), weight_string(s_nationkey) from supplier where s_suppkey = :l_suppkey and s_nationkey = :c_nationkey group by s_nationkey, weight_string(s_nationkey)",
- "Table": "supplier",
- "Values": [
- ":l_suppkey"
- ],
- "Vindex": "hash"
- }
- ]
- }
- ]
- },
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:3,L:4,L:1,R:1",
- "JoinVars": {
- "n_regionkey": 0
- },
- "TableName": "nation_region",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select n_regionkey, count(*), weight_string(n_regionkey), n_name, weight_string(n_name) from nation where 1 != 1 group by n_regionkey, weight_string(n_regionkey), n_name, weight_string(n_name)",
- "Query": "select n_regionkey, count(*), weight_string(n_regionkey), n_name, weight_string(n_name) from nation where n_nationkey = :s_nationkey group by n_regionkey, weight_string(n_regionkey), n_name, weight_string(n_name)",
- "Table": "nation",
- "Values": [
- ":s_nationkey"
- ],
- "Vindex": "hash"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select 1, count(*) from region where 1 != 1 group by 1",
- "Query": "select 1, count(*) from region where r_name = 'ASIA' and r_regionkey = :n_regionkey group by 1",
- "Table": "region",
- "Values": [
- ":n_regionkey"
- ],
- "Vindex": "hash"
- }
- ]
- }
- ]
- }
- ]
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "main.customer",
- "main.lineitem",
- "main.nation",
- "main.orders",
- "main.region",
- "main.supplier"
- ]
-}
-
-# TPC-H query 6
-"select sum(l_extendedprice * l_discount) as revenue from lineitem where l_shipdate \u003e= date('1994-01-01') and l_shipdate \u003c date('1994-01-01') + interval '1' year and l_discount between 0.06 - 0.01 and 0.06 + 0.01 and l_quantity \u003c 24"
-{
- "QueryType": "SELECT",
- "Original": "select sum(l_extendedprice * l_discount) as revenue from lineitem where l_shipdate \u003e= date('1994-01-01') and l_shipdate \u003c date('1994-01-01') + interval '1' year and l_discount between 0.06 - 0.01 and 0.06 + 0.01 and l_quantity \u003c 24",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum(0)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select sum(l_extendedprice * l_discount) as revenue from lineitem where 1 != 1",
- "Query": "select sum(l_extendedprice * l_discount) as revenue from lineitem where l_shipdate \u003e= date('1994-01-01') and l_shipdate \u003c date('1994-01-01') + interval '1' year and l_discount between 0.06 - 0.01 and 0.06 + 0.01 and l_quantity \u003c 24",
- "Table": "lineitem"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select sum(l_extendedprice * l_discount) as revenue from lineitem where l_shipdate \u003e= date('1994-01-01') and l_shipdate \u003c date('1994-01-01') + interval '1' year and l_discount between 0.06 - 0.01 and 0.06 + 0.01 and l_quantity \u003c 24",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum(0) AS revenue",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select sum(l_extendedprice * l_discount) as revenue from lineitem where 1 != 1",
- "Query": "select sum(l_extendedprice * l_discount) as revenue from lineitem where l_shipdate \u003e= date('1994-01-01') and l_shipdate \u003c date('1994-01-01') + interval '1' year and l_discount between 0.06 - 0.01 and 0.06 + 0.01 and l_quantity \u003c 24",
- "Table": "lineitem"
- }
- ]
- },
- "TablesUsed": [
- "main.lineitem"
- ]
-}
-
-# TPC-H query 7
-"select supp_nation, cust_nation, l_year, sum(volume) as revenue from (select n1.n_name as supp_nation, n2.n_name as cust_nation, extract(year from l_shipdate) as l_year, l_extendedprice * (1 - l_discount) as volume from supplier, lineitem, orders, customer, nation n1, nation n2 where s_suppkey = l_suppkey and o_orderkey = l_orderkey and c_custkey = o_custkey and s_nationkey = n1.n_nationkey and c_nationkey = n2.n_nationkey and ((n1.n_name = 'FRANCE' and n2.n_name = 'GERMANY') or (n1.n_name = 'GERMANY' and n2.n_name = 'FRANCE')) and l_shipdate between date('1995-01-01') and date('1996-12-31')) as shipping group by supp_nation, cust_nation, l_year order by supp_nation, cust_nation, l_year"
-"unsupported: cross-shard query with aggregates"
-{
- "QueryType": "SELECT",
- "Original": "select supp_nation, cust_nation, l_year, sum(volume) as revenue from (select n1.n_name as supp_nation, n2.n_name as cust_nation, extract(year from l_shipdate) as l_year, l_extendedprice * (1 - l_discount) as volume from supplier, lineitem, orders, customer, nation n1, nation n2 where s_suppkey = l_suppkey and o_orderkey = l_orderkey and c_custkey = o_custkey and s_nationkey = n1.n_nationkey and c_nationkey = n2.n_nationkey and ((n1.n_name = 'FRANCE' and n2.n_name = 'GERMANY') or (n1.n_name = 'GERMANY' and n2.n_name = 'FRANCE')) and l_shipdate between date('1995-01-01') and date('1996-12-31')) as shipping group by supp_nation, cust_nation, l_year order by supp_nation, cust_nation, l_year",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum(3) AS revenue",
- "GroupBy": "(0|6), (1|5), (2|4)",
- "ResultColumns": 4,
- "Inputs": [
- {
- "OperatorType": "Projection",
- "Expressions": [
- "[COLUMN 4] as supp_nation",
- "[COLUMN 5] as cust_nation",
- "[COLUMN 6] as l_year",
- "(((([COLUMN 10] * [COLUMN 11]) * [COLUMN 12]) * [COLUMN 13]) * [COLUMN 14]) * [COLUMN 15] as revenue",
- "[COLUMN 9]",
- "[COLUMN 8]",
- "[COLUMN 7]"
- ],
- "Inputs": [
- {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "(0|16) ASC, (1|17) ASC, (2|18) ASC",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:3,R:0,L:4,L:5,L:9,R:1,L:10,L:14,R:2,L:15,L:16,L:17,L:18,L:19,R:3,R:4,L:20,R:5,L:21",
- "JoinVars": {
- "n1_n_name": 2,
- "o_custkey": 0
- },
- "TableName": "lineitem_orders_supplier_nation_customer_nation",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:1,R:0,R:1,R:2,L:2,L:3,L:5,R:3,R:4,R:5,L:6,L:8,R:6,R:7,R:8,L:9,L:10,L:11,R:9,R:10,R:11,L:12",
- "JoinVars": {
- "l_suppkey": 0
- },
- "TableName": "lineitem_orders_supplier_nation",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:1,R:0,L:2,L:3,L:1,R:0,L:2,L:6,R:2,L:7,L:4,R:1,L:8",
- "JoinVars": {
- "l_orderkey": 0
- },
- "TableName": "lineitem_orders",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select l_orderkey, l_suppkey, extract(year from l_shipdate) as l_year, l_extendedprice * (1 - l_discount) as volume, sum(volume) as revenue, weight_string(l_orderkey), weight_string(l_suppkey), weight_string(extract(year from l_shipdate)), weight_string(extract(year from l_shipdate)) from lineitem where 1 != 1 group by l_orderkey, weight_string(l_orderkey), l_suppkey, weight_string(l_suppkey), l_year, weight_string(l_year)",
- "Query": "select l_orderkey, l_suppkey, extract(year from l_shipdate) as l_year, l_extendedprice * (1 - l_discount) as volume, sum(volume) as revenue, weight_string(l_orderkey), weight_string(l_suppkey), weight_string(extract(year from l_shipdate)), weight_string(extract(year from l_shipdate)) from lineitem where l_shipdate between date('1995-01-01') and date('1996-12-31') group by l_orderkey, weight_string(l_orderkey), l_suppkey, weight_string(l_suppkey), l_year, weight_string(l_year)",
- "Table": "lineitem"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select o_custkey, count(*), weight_string(o_custkey) from orders where 1 != 1 group by o_custkey, weight_string(o_custkey)",
- "Query": "select o_custkey, count(*), weight_string(o_custkey) from orders where o_orderkey = :l_orderkey group by o_custkey, weight_string(o_custkey)",
- "Table": "orders",
- "Values": [
- ":l_orderkey"
- ],
- "Vindex": "hash"
- }
- ]
- },
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0,R:0,R:1,R:0,R:0,R:1,R:3,R:3,R:4,L:1,R:2,R:5",
- "JoinVars": {
- "s_nationkey": 0
- },
- "TableName": "supplier_nation",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select s_nationkey, count(*), weight_string(s_nationkey) from supplier where 1 != 1 group by s_nationkey, weight_string(s_nationkey)",
- "Query": "select s_nationkey, count(*), weight_string(s_nationkey) from supplier where s_suppkey = :l_suppkey group by s_nationkey, weight_string(s_nationkey)",
- "Table": "supplier",
- "Values": [
- ":l_suppkey"
- ],
- "Vindex": "hash"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select n1.n_name, n1.n_name as supp_nation, count(*), weight_string(n1.n_name), weight_string(n1.n_name), weight_string(n1.n_name) from nation as n1 where 1 != 1 group by n1.n_name, weight_string(n1.n_name), supp_nation, weight_string(supp_nation)",
- "Query": "select n1.n_name, n1.n_name as supp_nation, count(*), weight_string(n1.n_name), weight_string(n1.n_name), weight_string(n1.n_name) from nation as n1 where n1.n_nationkey = :s_nationkey group by n1.n_name, weight_string(n1.n_name), supp_nation, weight_string(supp_nation)",
- "Table": "nation",
- "Values": [
- ":s_nationkey"
- ],
- "Vindex": "hash"
- }
- ]
- }
- ]
- },
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0,R:0,R:2,L:1,R:1,R:3",
- "JoinVars": {
- "c_nationkey": 0
- },
- "TableName": "customer_nation",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select c_nationkey, count(*), weight_string(c_nationkey) from customer where 1 != 1 group by c_nationkey, weight_string(c_nationkey)",
- "Query": "select c_nationkey, count(*), weight_string(c_nationkey) from customer where c_custkey = :o_custkey group by c_nationkey, weight_string(c_nationkey)",
- "Table": "customer",
- "Values": [
- ":o_custkey"
- ],
- "Vindex": "hash"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select n2.n_name as cust_nation, count(*), weight_string(n2.n_name), weight_string(n2.n_name) from nation as n2 where 1 != 1 group by cust_nation, weight_string(cust_nation)",
- "Query": "select n2.n_name as cust_nation, count(*), weight_string(n2.n_name), weight_string(n2.n_name) from nation as n2 where n2.n_nationkey = :c_nationkey and (:n1_n_name = 'FRANCE' and n2.n_name = 'GERMANY' or :n1_n_name = 'GERMANY' and n2.n_name = 'FRANCE') group by cust_nation, weight_string(cust_nation)",
- "Table": "nation",
- "Values": [
- ":c_nationkey"
- ],
- "Vindex": "hash"
- }
- ]
- }
- ]
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "main.customer",
- "main.lineitem",
- "main.nation",
- "main.orders",
- "main.supplier"
- ]
-}
-
-# TPC-H query 8
-"select o_year, sum(case when nation = 'BRAZIL' then volume else 0 end) / sum(volume) as mkt_share from ( select extract(year from o_orderdate) as o_year, l_extendedprice * (1 - l_discount) as volume, n2.n_name as nation from part, supplier, lineitem, orders, customer, nation n1, nation n2, region where p_partkey = l_partkey and s_suppkey = l_suppkey and l_orderkey = o_orderkey and o_custkey = c_custkey and c_nationkey = n1.n_nationkey and n1.n_regionkey = r_regionkey and r_name = 'AMERICA' and s_nationkey = n2.n_nationkey and o_orderdate between date '1995-01-01' and date('1996-12-31') and p_type = 'ECONOMY ANODIZED STEEL' ) as all_nations group by o_year order by o_year"
-"unsupported: cross-shard query with aggregates"
-Gen4 error: unsupported: in scatter query: complex aggregate expression
-
-# TPC-H query 9
-"select nation, o_year, sum(amount) as sum_profit from ( select n_name as nation, extract(year from o_orderdate) as o_year, l_extendedprice * (1 - l_discount) - ps_supplycost * l_quantity as amount from part, supplier, lineitem, partsupp, orders, nation where s_suppkey = l_suppkey and ps_suppkey = l_suppkey and ps_partkey = l_partkey and p_partkey = l_partkey and o_orderkey = l_orderkey and s_nationkey = n_nationkey and p_name like '%green%' ) as profit group by nation, o_year order by nation, o_year desc"
-"unsupported: cross-shard query with aggregates"
-Gen4 error: aggregation on columns from different sources not supported yet
-
-# TPC-H query 10
-"select c_custkey, c_name, sum(l_extendedprice * (1 - l_discount)) as revenue, c_acctbal, n_name, c_address, c_phone, c_comment from customer, orders, lineitem, nation where c_custkey = o_custkey and l_orderkey = o_orderkey and o_orderdate \u003e= date('1993-10-01') and o_orderdate \u003c date('1993-10-01') + interval '3' month and l_returnflag = 'R' and c_nationkey = n_nationkey group by c_custkey, c_name, c_acctbal, c_phone, n_name, c_address, c_comment order by revenue desc limit 20"
-"unsupported: cross-shard query with aggregates"
-{
- "QueryType": "SELECT",
- "Original": "select c_custkey, c_name, sum(l_extendedprice * (1 - l_discount)) as revenue, c_acctbal, n_name, c_address, c_phone, c_comment from customer, orders, lineitem, nation where c_custkey = o_custkey and l_orderkey = o_orderkey and o_orderdate \u003e= date('1993-10-01') and o_orderdate \u003c date('1993-10-01') + interval '3' month and l_returnflag = 'R' and c_nationkey = n_nationkey group by c_custkey, c_name, c_acctbal, c_phone, n_name, c_address, c_comment order by revenue desc limit 20",
- "Instructions": {
- "OperatorType": "Limit",
- "Count": "INT64(20)",
- "Inputs": [
- {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "2 DESC",
- "ResultColumns": 8,
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum(2) AS revenue",
- "GroupBy": "(0|14), (1|13), (3|12), (6|11), (4|10), (5|9), (7|8)",
- "Inputs": [
- {
- "OperatorType": "Projection",
- "Expressions": [
- "[COLUMN 0] as c_custkey",
- "[COLUMN 1] as c_name",
- "(([COLUMN 14] * [COLUMN 15]) * [COLUMN 16]) * [COLUMN 17] as revenue",
- "[COLUMN 2] as c_acctbal",
- "[COLUMN 4] as n_name",
- "[COLUMN 5] as c_address",
- "[COLUMN 3] as c_phone",
- "[COLUMN 6] as c_comment",
- "[COLUMN 13]",
- "[COLUMN 12]",
- "[COLUMN 11]",
- "[COLUMN 10]",
- "[COLUMN 9]",
- "[COLUMN 8]",
- "[COLUMN 7]"
- ],
- "Inputs": [
- {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "(0|7) ASC, (1|8) ASC, (2|9) ASC, (3|10) ASC, (4|11) ASC, (5|12) ASC, (6|13) ASC",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0,R:1,R:2,R:3,R:4,R:5,R:6,R:7,R:8,R:9,R:10,R:11,R:12,R:13,L:3,L:4,R:14,R:15",
- "JoinVars": {
- "o_custkey": 0
- },
- "TableName": "orders_lineitem_customer_nation",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:1,L:1,L:4,L:2,R:1",
- "JoinVars": {
- "o_orderkey": 0
- },
- "TableName": "orders_lineitem",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select o_orderkey, o_custkey, count(*), weight_string(o_orderkey), weight_string(o_custkey) from orders where 1 != 1 group by o_orderkey, weight_string(o_orderkey), o_custkey, weight_string(o_custkey)",
- "Query": "select o_orderkey, o_custkey, count(*), weight_string(o_orderkey), weight_string(o_custkey) from orders where o_orderdate \u003e= date('1993-10-01') and o_orderdate \u003c date('1993-10-01') + interval '3' month group by o_orderkey, weight_string(o_orderkey), o_custkey, weight_string(o_custkey)",
- "Table": "orders"
- },
- {
- "OperatorType": "VindexLookup",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "Values": [
- ":o_orderkey"
- ],
- "Vindex": "lineitem_map",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select l_orderkey, l_linenumber from lineitem_map where 1 != 1",
- "Query": "select l_orderkey, l_linenumber from lineitem_map where l_orderkey in ::__vals",
- "Table": "lineitem_map",
- "Values": [
- ":l_orderkey"
- ],
- "Vindex": "md5"
- },
- {
- "OperatorType": "Route",
- "Variant": "ByDestination",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select 1, sum(l_extendedprice * (1 - l_discount)) as revenue from lineitem where 1 != 1 group by 1",
- "Query": "select 1, sum(l_extendedprice * (1 - l_discount)) as revenue from lineitem where l_returnflag = 'R' and l_orderkey = :o_orderkey group by 1",
- "Table": "lineitem"
- }
- ]
- }
- ]
- },
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:3,L:5,L:7,L:9,R:1,L:11,L:13,L:4,L:6,L:8,L:10,R:2,L:12,L:14,L:1,R:0",
- "JoinVars": {
- "c_nationkey": 0
- },
- "TableName": "customer_nation",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select c_nationkey, count(*), weight_string(c_nationkey), c_custkey, weight_string(c_custkey), c_name, weight_string(c_name), c_acctbal, weight_string(c_acctbal), c_phone, weight_string(c_phone), c_address, weight_string(c_address), c_comment, weight_string(c_comment) from customer where 1 != 1 group by c_nationkey, weight_string(c_nationkey), c_custkey, weight_string(c_custkey), c_name, weight_string(c_name), c_acctbal, weight_string(c_acctbal), c_phone, weight_string(c_phone), c_address, weight_string(c_address), c_comment, weight_string(c_comment)",
- "Query": "select c_nationkey, count(*), weight_string(c_nationkey), c_custkey, weight_string(c_custkey), c_name, weight_string(c_name), c_acctbal, weight_string(c_acctbal), c_phone, weight_string(c_phone), c_address, weight_string(c_address), c_comment, weight_string(c_comment) from customer where c_custkey = :o_custkey group by c_nationkey, weight_string(c_nationkey), c_custkey, weight_string(c_custkey), c_name, weight_string(c_name), c_acctbal, weight_string(c_acctbal), c_phone, weight_string(c_phone), c_address, weight_string(c_address), c_comment, weight_string(c_comment)",
- "Table": "customer",
- "Values": [
- ":o_custkey"
- ],
- "Vindex": "hash"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select count(*), n_name, weight_string(n_name) from nation where 1 != 1 group by n_name, weight_string(n_name)",
- "Query": "select count(*), n_name, weight_string(n_name) from nation where n_nationkey = :c_nationkey group by n_name, weight_string(n_name)",
- "Table": "nation",
- "Values": [
- ":c_nationkey"
- ],
- "Vindex": "hash"
- }
- ]
- }
- ]
- }
- ]
- }
- ]
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "main.customer",
- "main.lineitem",
- "main.nation",
- "main.orders"
- ]
-}
-
-# TPC-H query 11
-"select ps_partkey, sum(ps_supplycost * ps_availqty) as value from partsupp, supplier, nation where ps_suppkey = s_suppkey and s_nationkey = n_nationkey and n_name = 'GERMANY' group by ps_partkey having sum(ps_supplycost * ps_availqty) \u003e ( select sum(ps_supplycost * ps_availqty) * 0.00001000000 from partsupp, supplier, nation where ps_suppkey = s_suppkey and s_nationkey = n_nationkey and n_name = 'GERMANY' ) order by value desc"
-"unsupported: cross-shard query with aggregates"
-Gen4 error: unsupported: in scatter query: complex aggregate expression
-
-# TPC-H query 12
-"select l_shipmode, sum(case when o_orderpriority = '1-URGENT' or o_orderpriority = '2-HIGH' then 1 else 0 end) as high_line_count, sum(case when o_orderpriority \u003c\u003e '1-URGENT' and o_orderpriority \u003c\u003e '2-HIGH' then 1 else 0 end) as low_line_count from orders, lineitem where o_orderkey = l_orderkey and l_shipmode in ('MAIL', 'SHIP') and l_commitdate \u003c l_receiptdate and l_shipdate \u003c l_commitdate and l_receiptdate \u003e= date('1994-01-01') and l_receiptdate \u003c date('1994-01-01') + interval '1' year group by l_shipmode order by l_shipmode"
-"unsupported: cross-shard query with aggregates"
-{
- "QueryType": "SELECT",
- "Original": "select l_shipmode, sum(case when o_orderpriority = '1-URGENT' or o_orderpriority = '2-HIGH' then 1 else 0 end) as high_line_count, sum(case when o_orderpriority \u003c\u003e '1-URGENT' and o_orderpriority \u003c\u003e '2-HIGH' then 1 else 0 end) as low_line_count from orders, lineitem where o_orderkey = l_orderkey and l_shipmode in ('MAIL', 'SHIP') and l_commitdate \u003c l_receiptdate and l_shipdate \u003c l_commitdate and l_receiptdate \u003e= date('1994-01-01') and l_receiptdate \u003c date('1994-01-01') + interval '1' year group by l_shipmode order by l_shipmode",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum(1) AS high_line_count, sum(2) AS low_line_count",
- "GroupBy": "(0|3)",
- "ResultColumns": 3,
- "Inputs": [
- {
- "OperatorType": "Projection",
- "Expressions": [
- "[COLUMN 0] as l_shipmode",
- "[COLUMN 2] * [COLUMN 3] as high_line_count",
- "[COLUMN 4] * [COLUMN 5] as low_line_count",
- "[COLUMN 1]"
- ],
- "Inputs": [
- {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "(0|1) ASC",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:1,R:2,L:1,R:0,L:2,R:0",
- "JoinVars": {
- "o_orderkey": 0
- },
- "TableName": "orders_lineitem",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select o_orderkey, sum(case when o_orderpriority = '1-URGENT' or o_orderpriority = '2-HIGH' then 1 else 0 end) as high_line_count, sum(case when o_orderpriority != '1-URGENT' and o_orderpriority != '2-HIGH' then 1 else 0 end) as low_line_count, weight_string(o_orderkey) from orders where 1 != 1 group by o_orderkey, weight_string(o_orderkey)",
- "Query": "select o_orderkey, sum(case when o_orderpriority = '1-URGENT' or o_orderpriority = '2-HIGH' then 1 else 0 end) as high_line_count, sum(case when o_orderpriority != '1-URGENT' and o_orderpriority != '2-HIGH' then 1 else 0 end) as low_line_count, weight_string(o_orderkey) from orders group by o_orderkey, weight_string(o_orderkey)",
- "Table": "orders"
- },
- {
- "OperatorType": "VindexLookup",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "Values": [
- ":o_orderkey"
- ],
- "Vindex": "lineitem_map",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select l_orderkey, l_linenumber from lineitem_map where 1 != 1",
- "Query": "select l_orderkey, l_linenumber from lineitem_map where l_orderkey in ::__vals",
- "Table": "lineitem_map",
- "Values": [
- ":l_orderkey"
- ],
- "Vindex": "md5"
- },
- {
- "OperatorType": "Route",
- "Variant": "ByDestination",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select count(*), l_shipmode, weight_string(l_shipmode) from lineitem where 1 != 1 group by l_shipmode, weight_string(l_shipmode)",
- "Query": "select count(*), l_shipmode, weight_string(l_shipmode) from lineitem where l_shipmode in ('MAIL', 'SHIP') and l_commitdate \u003c l_receiptdate and l_shipdate \u003c l_commitdate and l_receiptdate \u003e= date('1994-01-01') and l_receiptdate \u003c date('1994-01-01') + interval '1' year and l_orderkey = :o_orderkey group by l_shipmode, weight_string(l_shipmode)",
- "Table": "lineitem"
- }
- ]
- }
- ]
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "main.lineitem",
- "main.orders"
- ]
-}
-
-# TPC-H query 13
-"select c_count, count(*) as custdist from ( select c_custkey, count(o_orderkey) from customer left outer join orders on c_custkey = o_custkey and o_comment not like '%special%requests%' group by c_custkey ) as c_orders(c_custkey, c_count) group by c_count order by custdist desc, c_count desc"
-"using aggregation on top of a *planbuilder.orderedAggregate plan is not yet supported"
-Gen4 plan same as above
-
-# TPC-H query 14
-"select 100.00 * sum(case when p_type like 'PROMO%' then l_extendedprice * (1 - l_discount) else 0 end) / sum(l_extendedprice * (1 - l_discount)) as promo_revenue from lineitem, part where l_partkey = p_partkey and l_shipdate \u003e= date('1995-09-01') and l_shipdate \u003c date('1995-09-01') + interval '1' month"
-"unsupported: cross-shard query with aggregates"
-Gen4 error: unsupported: in scatter query: complex aggregate expression
-
-# TPC-H query 15 view
-#"with revenue0(supplier_no, total_revenue) as (select l_suppkey, sum(l_extendedprice * (1 - l_discount)) from lineitem where l_shipdate >= date('1996-01-01') and l_shipdate < date('1996-01-01') + interval '3' month group by l_suppkey )"
-#"syntax error at position 236"
-#Gen4 plan same as above
-# TPC-H query 15
-"select s_suppkey, s_name, s_address, s_phone, total_revenue from supplier, revenue0 where s_suppkey = supplier_no and total_revenue = ( select max(total_revenue) from revenue0 ) order by s_suppkey"
-{
- "QueryType": "SELECT",
- "Original": "select s_suppkey, s_name, s_address, s_phone, total_revenue from supplier, revenue0 where s_suppkey = supplier_no and total_revenue = ( select max(total_revenue) from revenue0 ) order by s_suppkey",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutValue",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "max(0)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select max(total_revenue) from revenue0 where 1 != 1",
- "Query": "select max(total_revenue) from revenue0",
- "Table": "revenue0"
- }
- ]
- },
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1,L:2,L:3,R:0",
- "JoinVars": {
- "s_suppkey": 0
- },
- "TableName": "supplier_revenue0",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select s_suppkey, s_name, s_address, s_phone, weight_string(s_suppkey) from supplier where 1 != 1",
- "OrderBy": "(0|4) ASC",
- "Query": "select s_suppkey, s_name, s_address, s_phone, weight_string(s_suppkey) from supplier order by s_suppkey asc",
- "ResultColumns": 4,
- "Table": "supplier"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select total_revenue from revenue0 where 1 != 1",
- "Query": "select total_revenue from revenue0 where supplier_no = :s_suppkey and total_revenue = :__sq1",
- "Table": "revenue0",
- "Values": [
- ":s_suppkey"
- ],
- "Vindex": "hash"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select s_suppkey, s_name, s_address, s_phone, total_revenue from supplier, revenue0 where s_suppkey = supplier_no and total_revenue = ( select max(total_revenue) from revenue0 ) order by s_suppkey",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutValue",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "max(0) AS max(total_revenue)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select max(total_revenue) from revenue0 where 1 != 1",
- "Query": "select max(total_revenue) from revenue0",
- "Table": "revenue0"
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select s_suppkey, s_name, s_address, s_phone, total_revenue, weight_string(s_suppkey) from supplier, revenue0 where 1 != 1",
- "OrderBy": "(0|5) ASC",
- "Query": "select s_suppkey, s_name, s_address, s_phone, total_revenue, weight_string(s_suppkey) from supplier, revenue0 where total_revenue = :__sq1 and s_suppkey = supplier_no order by s_suppkey asc",
- "ResultColumns": 5,
- "Table": "revenue0, supplier"
- }
- ]
- },
- "TablesUsed": [
- "main.revenue0",
- "main.supplier"
- ]
-}
-
-# TPC-H query 16
-"select p_brand, p_type, p_size, count(distinct ps_suppkey) as supplier_cnt from partsupp, part where p_partkey = ps_partkey and p_brand \u003c\u003e 'Brand#45' and p_type not like 'MEDIUM POLISHED%' and p_size in (49, 14, 23, 45, 19, 3, 36, 9) and ps_suppkey not in ( select s_suppkey from supplier where s_comment like '%Customer%Complaints%' ) group by p_brand, p_type, p_size order by supplier_cnt desc, p_brand, p_type, p_size"
-"unsupported: cross-shard query with aggregates"
-Gen4 error: using aggregation on top of a *planbuilder.pulloutSubquery plan is not yet supported
-
-# TPC-H query 17
-"select sum(l_extendedprice) / 7.0 as avg_yearly from lineitem, part where p_partkey = l_partkey and p_brand = 'Brand#23' and p_container = 'MED BOX' and l_quantity \u003c ( select 0.2 * avg(l_quantity) from lineitem where l_partkey = p_partkey )"
-"symbol p_partkey not found in table or subquery"
-Gen4 error: unsupported: cross-shard correlated subquery
-
-# TPC-H query 18
-"select c_name, c_custkey, o_orderkey, o_orderdate, o_totalprice, sum(l_quantity) from customer, orders, lineitem where o_orderkey in ( select l_orderkey from lineitem group by l_orderkey having sum(l_quantity) \u003e 300 ) and c_custkey = o_custkey and o_orderkey = l_orderkey group by c_name, c_custkey, o_orderkey, o_orderdate, o_totalprice order by o_totalprice desc, o_orderdate limit 100"
-"unsupported: cross-shard query with aggregates"
-{
- "QueryType": "SELECT",
- "Original": "select c_name, c_custkey, o_orderkey, o_orderdate, o_totalprice, sum(l_quantity) from customer, orders, lineitem where o_orderkey in ( select l_orderkey from lineitem group by l_orderkey having sum(l_quantity) \u003e 300 ) and c_custkey = o_custkey and o_orderkey = l_orderkey group by c_name, c_custkey, o_orderkey, o_orderdate, o_totalprice order by o_totalprice desc, o_orderdate limit 100",
- "Instructions": {
- "OperatorType": "Limit",
- "Count": "INT64(100)",
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum(5) AS sum(l_quantity)",
- "GroupBy": "(4|10), (3|9), (0|8), (1|7), (2|6)",
- "ResultColumns": 6,
- "Inputs": [
- {
- "OperatorType": "Projection",
- "Expressions": [
- "[COLUMN 2] as c_name",
- "[COLUMN 3] as c_custkey",
- "[COLUMN 4] as o_orderkey",
- "[COLUMN 1] as o_orderdate",
- "[COLUMN 0] as o_totalprice",
- "([COLUMN 10] * [COLUMN 11]) * [COLUMN 12] as sum(l_quantity)",
- "[COLUMN 9]",
- "[COLUMN 8]",
- "[COLUMN 7]",
- "[COLUMN 6]",
- "[COLUMN 5]"
- ],
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:2,L:3,L:4,L:5,L:6,L:8,L:9,L:10,L:11,L:12,L:13,L:14,R:1",
- "JoinVars": {
- "o_orderkey": 0
- },
- "TableName": "orders_customer_lineitem",
- "Inputs": [
- {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "(2|8) DESC, (3|9) ASC, (4|10) ASC, (5|11) ASC, (0|7) ASC",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:1,L:1,L:5,L:7,R:1,R:3,L:1,L:4,L:6,L:8,R:2,R:4,L:4,L:2,R:0",
- "JoinVars": {
- "o_custkey": 0
- },
- "TableName": "orders_customer",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select o_custkey, o_orderkey, count(*), weight_string(o_custkey), weight_string(o_orderkey), o_totalprice, weight_string(o_totalprice), o_orderdate, weight_string(o_orderdate) from orders where 1 != 1 group by o_custkey, weight_string(o_custkey), o_orderkey, weight_string(o_orderkey), o_totalprice, weight_string(o_totalprice), o_orderdate, weight_string(o_orderdate)",
- "Query": "select o_custkey, o_orderkey, count(*), weight_string(o_custkey), weight_string(o_orderkey), o_totalprice, weight_string(o_totalprice), o_orderdate, weight_string(o_orderdate) from orders where :o_orderkey in (select l_orderkey from lineitem group by l_orderkey having sum(l_quantity) \u003e 300) group by o_custkey, weight_string(o_custkey), o_orderkey, weight_string(o_orderkey), o_totalprice, weight_string(o_totalprice), o_orderdate, weight_string(o_orderdate)",
- "Table": "orders",
- "Values": [
- ":__sq1"
- ],
- "Vindex": "hash"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select count(*), c_name, weight_string(c_name), c_custkey, weight_string(c_custkey) from customer where 1 != 1 group by c_name, weight_string(c_name), c_custkey, weight_string(c_custkey)",
- "Query": "select count(*), c_name, weight_string(c_name), c_custkey, weight_string(c_custkey) from customer where c_custkey = :o_custkey group by c_name, weight_string(c_name), c_custkey, weight_string(c_custkey)",
- "Table": "customer",
- "Values": [
- ":o_custkey"
- ],
- "Vindex": "hash"
- }
- ]
- }
- ]
- },
- {
- "OperatorType": "VindexLookup",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "Values": [
- ":o_orderkey"
- ],
- "Vindex": "lineitem_map",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select l_orderkey, l_linenumber from lineitem_map where 1 != 1",
- "Query": "select l_orderkey, l_linenumber from lineitem_map where l_orderkey in ::__vals",
- "Table": "lineitem_map",
- "Values": [
- ":l_orderkey"
- ],
- "Vindex": "md5"
- },
- {
- "OperatorType": "Route",
- "Variant": "ByDestination",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select 1, sum(l_quantity) from lineitem where 1 != 1 group by 1",
- "Query": "select 1, sum(l_quantity) from lineitem where l_orderkey = :o_orderkey group by 1",
- "Table": "lineitem"
- }
- ]
- }
- ]
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "main.customer",
- "main.lineitem",
- "main.orders"
- ]
-}
-
-# TPC-H query 19
-"select sum(l_extendedprice* (1 - l_discount)) as revenue from lineitem, part where ( p_partkey = l_partkey and p_brand = 'Brand#12' and p_container in ('SM CASE', 'SM BOX', 'SM PACK', 'SM PKG') and l_quantity \u003e= 1 and l_quantity \u003c= 1 + 10 and p_size between 1 and 5 and l_shipmode in ('AIR', 'AIR REG') and l_shipinstruct = 'DELIVER IN PERSON' ) or ( p_partkey = l_partkey and p_brand = 'Brand#23' and p_container in ('MED BAG', 'MED BOX', 'MED PKG', 'MED PACK') and l_quantity \u003e= 10 and l_quantity \u003c= 10 + 10 and p_size between 1 and 10 and l_shipmode in ('AIR', 'AIR REG') and l_shipinstruct = 'DELIVER IN PERSON' ) or ( p_partkey = l_partkey and p_brand = 'Brand#34' and p_container in ('LG CASE', 'LG BOX', 'LG PACK', 'LG PKG') and l_quantity \u003e= 20 and l_quantity \u003c= 20 + 10 and p_size between 1 and 15 and l_shipmode in ('AIR', 'AIR REG') and l_shipinstruct = 'DELIVER IN PERSON' )"
-"unsupported: cross-shard query with aggregates"
-{
- "QueryType": "SELECT",
- "Original": "select sum(l_extendedprice* (1 - l_discount)) as revenue from lineitem, part where ( p_partkey = l_partkey and p_brand = 'Brand#12' and p_container in ('SM CASE', 'SM BOX', 'SM PACK', 'SM PKG') and l_quantity \u003e= 1 and l_quantity \u003c= 1 + 10 and p_size between 1 and 5 and l_shipmode in ('AIR', 'AIR REG') and l_shipinstruct = 'DELIVER IN PERSON' ) or ( p_partkey = l_partkey and p_brand = 'Brand#23' and p_container in ('MED BAG', 'MED BOX', 'MED PKG', 'MED PACK') and l_quantity \u003e= 10 and l_quantity \u003c= 10 + 10 and p_size between 1 and 10 and l_shipmode in ('AIR', 'AIR REG') and l_shipinstruct = 'DELIVER IN PERSON' ) or ( p_partkey = l_partkey and p_brand = 'Brand#34' and p_container in ('LG CASE', 'LG BOX', 'LG PACK', 'LG PKG') and l_quantity \u003e= 20 and l_quantity \u003c= 20 + 10 and p_size between 1 and 15 and l_shipmode in ('AIR', 'AIR REG') and l_shipinstruct = 'DELIVER IN PERSON' )",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum(0) AS revenue",
- "Inputs": [
- {
- "OperatorType": "Projection",
- "Expressions": [
- "[COLUMN 0] * [COLUMN 1] as revenue"
- ],
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:4,R:1",
- "JoinVars": {
- "l_partkey": 0,
- "l_quantity": 1,
- "l_shipinstruct": 3,
- "l_shipmode": 2
- },
- "TableName": "lineitem_part",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select l_partkey, l_quantity, l_shipmode, l_shipinstruct, sum(l_extendedprice * (1 - l_discount)) as revenue, weight_string(l_partkey), weight_string(l_quantity), weight_string(l_shipmode), weight_string(l_shipinstruct) from lineitem where 1 != 1 group by l_partkey, weight_string(l_partkey), l_quantity, weight_string(l_quantity), l_shipmode, weight_string(l_shipmode), l_shipinstruct, weight_string(l_shipinstruct)",
- "Query": "select l_partkey, l_quantity, l_shipmode, l_shipinstruct, sum(l_extendedprice * (1 - l_discount)) as revenue, weight_string(l_partkey), weight_string(l_quantity), weight_string(l_shipmode), weight_string(l_shipinstruct) from lineitem group by l_partkey, weight_string(l_partkey), l_quantity, weight_string(l_quantity), l_shipmode, weight_string(l_shipmode), l_shipinstruct, weight_string(l_shipinstruct)",
- "Table": "lineitem"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select 1, count(*) from part where 1 != 1 group by 1",
- "Query": "select 1, count(*) from part where p_partkey = :l_partkey and p_brand = 'Brand#12' and p_container in ('SM CASE', 'SM BOX', 'SM PACK', 'SM PKG') and :l_quantity \u003e= 1 and :l_quantity \u003c= 1 + 10 and p_size between 1 and 5 and :l_shipmode in ('AIR', 'AIR REG') and :l_shipinstruct = 'DELIVER IN PERSON' or p_partkey = :l_partkey and p_brand = 'Brand#23' and p_container in ('MED BAG', 'MED BOX', 'MED PKG', 'MED PACK') and :l_quantity \u003e= 10 and :l_quantity \u003c= 10 + 10 and p_size between 1 and 10 and :l_shipmode in ('AIR', 'AIR REG') and :l_shipinstruct = 'DELIVER IN PERSON' or p_partkey = :l_partkey and p_brand = 'Brand#34' and p_container in ('LG CASE', 'LG BOX', 'LG PACK', 'LG PKG') and :l_quantity \u003e= 20 and :l_quantity \u003c= 20 + 10 and p_size between 1 and 15 and :l_shipmode in ('AIR', 'AIR REG') and :l_shipinstruct = 'DELIVER IN PERSON' group by 1",
- "Table": "part"
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "main.lineitem",
- "main.part"
- ]
-}
-
-# TPC-H query 20
-"select s_name, s_address from supplier, nation where s_suppkey in ( select ps_suppkey from partsupp where ps_partkey in ( select p_partkey from part where p_name like 'forest%' ) and ps_availqty \u003e ( select 0.5 * sum(l_quantity) from lineitem where l_partkey = ps_partkey and l_suppkey = ps_suppkey and l_shipdate \u003e= date('1994-01-01') and l_shipdate \u003c date('1994-01-01') + interval '1' year ) ) and s_nationkey = n_nationkey and n_name = 'CANADA' order by s_name"
-"symbol ps_partkey not found in table or subquery"
-Gen4 error: unsupported: cross-shard correlated subquery
-
-# TPC-H query 21
-"select s_name, count(*) as numwait from supplier, lineitem l1, orders, nation where s_suppkey = l1.l_suppkey and o_orderkey = l1.l_orderkey and o_orderstatus = 'F' and l1.l_receiptdate \u003e l1.l_commitdate and exists ( select * from lineitem l2 where l2.l_orderkey = l1.l_orderkey and l2.l_suppkey \u003c\u003e l1.l_suppkey ) and not exists ( select * from lineitem l3 where l3.l_orderkey = l1.l_orderkey and l3.l_suppkey \u003c\u003e l1.l_suppkey and l3.l_receiptdate \u003e l3.l_commitdate ) and s_nationkey = n_nationkey and n_name = 'SAUDI ARABIA' group by s_name order by numwait desc, s_name limit 100"
-"unsupported: cross-shard query with aggregates"
-{
- "QueryType": "SELECT",
- "Original": "select s_name, count(*) as numwait from supplier, lineitem l1, orders, nation where s_suppkey = l1.l_suppkey and o_orderkey = l1.l_orderkey and o_orderstatus = 'F' and l1.l_receiptdate \u003e l1.l_commitdate and exists ( select * from lineitem l2 where l2.l_orderkey = l1.l_orderkey and l2.l_suppkey \u003c\u003e l1.l_suppkey ) and not exists ( select * from lineitem l3 where l3.l_orderkey = l1.l_orderkey and l3.l_suppkey \u003c\u003e l1.l_suppkey and l3.l_receiptdate \u003e l3.l_commitdate ) and s_nationkey = n_nationkey and n_name = 'SAUDI ARABIA' group by s_name order by numwait desc, s_name limit 100",
- "Instructions": {
- "OperatorType": "Limit",
- "Count": "INT64(100)",
- "Inputs": [
- {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "1 DESC, (0|2) ASC",
- "ResultColumns": 2,
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count_star(1) AS numwait",
- "GroupBy": "(0|2)",
- "Inputs": [
- {
- "OperatorType": "Projection",
- "Expressions": [
- "[COLUMN 0] as s_name",
- "(([COLUMN 2] * [COLUMN 3]) * [COLUMN 4]) * [COLUMN 5] as numwait",
- "[COLUMN 1]"
- ],
- "Inputs": [
- {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "(0|1) ASC",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0,R:1,L:3,L:4,R:2,R:3",
- "JoinVars": {
- "l1_l_suppkey": 0
- },
- "TableName": "lineitem_orders_supplier_nation",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:1,L:1,L:4,L:2,R:1",
- "JoinVars": {
- "l1_l_orderkey": 0
- },
- "TableName": "lineitem_orders",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select l1.l_orderkey, l1.l_suppkey, count(*) as numwait, weight_string(l1.l_orderkey), weight_string(l1.l_suppkey) from lineitem as l1 where 1 != 1 group by l1.l_orderkey, weight_string(l1.l_orderkey), l1.l_suppkey, weight_string(l1.l_suppkey)",
- "Query": "select l1.l_orderkey, l1.l_suppkey, count(*) as numwait, weight_string(l1.l_orderkey), weight_string(l1.l_suppkey) from lineitem as l1 where l1.l_receiptdate \u003e l1.l_commitdate and exists (select 1 from lineitem as l2 where l2.l_orderkey = l1.l_orderkey and l2.l_suppkey != l1.l_suppkey limit 1) and not exists (select 1 from lineitem as l3 where l3.l_orderkey = l1.l_orderkey and l3.l_suppkey != l1.l_suppkey and l3.l_receiptdate \u003e l3.l_commitdate limit 1) group by l1.l_orderkey, weight_string(l1.l_orderkey), l1.l_suppkey, weight_string(l1.l_suppkey)",
- "Table": "lineitem"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select 1, count(*) as numwait from orders where 1 != 1 group by 1",
- "Query": "select 1, count(*) as numwait from orders where o_orderstatus = 'F' and exists (select 1 from lineitem as l2 where l2.l_orderkey = l1.l_orderkey and l2.l_suppkey != l1.l_suppkey limit 1) and not exists (select 1 from lineitem as l3 where l3.l_orderkey = l1.l_orderkey and l3.l_suppkey != l1.l_suppkey and l3.l_receiptdate \u003e l3.l_commitdate limit 1) and o_orderkey = :l1_l_orderkey group by 1",
- "Table": "orders",
- "Values": [
- ":l1_l_orderkey"
- ],
- "Vindex": "hash"
- }
- ]
- },
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:3,L:4,L:1,R:1",
- "JoinVars": {
- "s_nationkey": 0
- },
- "TableName": "supplier_nation",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select s_nationkey, count(*) as numwait, weight_string(s_nationkey), s_name, weight_string(s_name) from supplier where 1 != 1 group by s_nationkey, weight_string(s_nationkey), s_name, weight_string(s_name)",
- "Query": "select s_nationkey, count(*) as numwait, weight_string(s_nationkey), s_name, weight_string(s_name) from supplier where exists (select 1 from lineitem as l2 where l2.l_orderkey = l1.l_orderkey and l2.l_suppkey != l1.l_suppkey limit 1) and not exists (select 1 from lineitem as l3 where l3.l_orderkey = l1.l_orderkey and l3.l_suppkey != l1.l_suppkey and l3.l_receiptdate \u003e l3.l_commitdate limit 1) and s_suppkey = :l1_l_suppkey group by s_nationkey, weight_string(s_nationkey), s_name, weight_string(s_name)",
- "Table": "supplier",
- "Values": [
- ":l1_l_suppkey"
- ],
- "Vindex": "hash"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select 1, count(*) as numwait from nation where 1 != 1 group by 1",
- "Query": "select 1, count(*) as numwait from nation where n_name = 'SAUDI ARABIA' and exists (select 1 from lineitem as l2 where l2.l_orderkey = l1.l_orderkey and l2.l_suppkey != l1.l_suppkey limit 1) and not exists (select 1 from lineitem as l3 where l3.l_orderkey = l1.l_orderkey and l3.l_suppkey != l1.l_suppkey and l3.l_receiptdate \u003e l3.l_commitdate limit 1) and n_nationkey = :s_nationkey group by 1",
- "Table": "nation",
- "Values": [
- ":s_nationkey"
- ],
- "Vindex": "hash"
- }
- ]
- }
- ]
- }
- ]
- }
- ]
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "main.lineitem",
- "main.nation",
- "main.orders",
- "main.supplier"
- ]
-}
-
-# TPC-H query 22
-"select cntrycode, count(*) as numcust, sum(c_acctbal) as totacctbal from ( select substring(c_phone from 1 for 2) as cntrycode, c_acctbal from customer where substring(c_phone from 1 for 2) in ('13', '31', '23', '29', '30', '18', '17') and c_acctbal \u003e ( select avg(c_acctbal) from customer where c_acctbal \u003e 0.00 and substring(c_phone from 1 for 2) in ('13', '31', '23', '29', '30', '18', '17') ) and not exists ( select * from orders where o_custkey = c_custkey ) ) as custsale group by cntrycode order by cntrycode"
-"symbol c_custkey not found in table or subquery"
-Gen4 error: exists sub-queries are only supported with AND clause
diff --git a/go/vt/vtgate/planbuilder/testdata/transaction_cases.json b/go/vt/vtgate/planbuilder/testdata/transaction_cases.json
new file mode 100644
index 00000000000..eb27ad51b43
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/transaction_cases.json
@@ -0,0 +1,58 @@
+[
+ {
+ "comment": "Begin",
+ "query": "begin",
+ "plan": {
+ "QueryType": "BEGIN",
+ "Original": "begin"
+ }
+ },
+ {
+ "comment": "Start Transaction",
+ "query": "start transaction",
+ "plan": {
+ "QueryType": "BEGIN",
+ "Original": "start transaction"
+ }
+ },
+ {
+ "comment": "Commit",
+ "query": "commit",
+ "plan": {
+ "QueryType": "COMMIT",
+ "Original": "commit"
+ }
+ },
+ {
+ "comment": "Rollback",
+ "query": "rollback",
+ "plan": {
+ "QueryType": "ROLLBACK",
+ "Original": "rollback"
+ }
+ },
+ {
+ "comment": "Savepoint",
+ "query": "savepoint a",
+ "plan": {
+ "QueryType": "SAVEPOINT",
+ "Original": "savepoint a"
+ }
+ },
+ {
+ "comment": "Savepoint rollback",
+ "query": "rollback work to savepoint a",
+ "plan": {
+ "QueryType": "SAVEPOINT_ROLLBACK",
+ "Original": "rollback work to savepoint a"
+ }
+ },
+ {
+ "comment": "Savepoint release",
+ "query": "release savepoint a",
+ "plan": {
+ "QueryType": "RELEASE",
+ "Original": "release savepoint a"
+ }
+ }
+]
diff --git a/go/vt/vtgate/planbuilder/testdata/transaction_cases.txt b/go/vt/vtgate/planbuilder/testdata/transaction_cases.txt
deleted file mode 100644
index 68be3ff6d8e..00000000000
--- a/go/vt/vtgate/planbuilder/testdata/transaction_cases.txt
+++ /dev/null
@@ -1,55 +0,0 @@
-# Begin
-"begin"
-{
- "QueryType": "BEGIN",
- "Original": "begin"
-}
-Gen4 plan same as above
-
-# Start Transaction
-"start transaction"
-{
- "QueryType": "BEGIN",
- "Original": "start transaction"
-}
-Gen4 plan same as above
-
-# Commit
-"commit"
-{
- "QueryType": "COMMIT",
- "Original": "commit"
-}
-Gen4 plan same as above
-
-# Rollback
-"rollback"
-{
- "QueryType": "ROLLBACK",
- "Original": "rollback"
-}
-Gen4 plan same as above
-
-# Savepoint
-"savepoint a"
-{
- "QueryType": "SAVEPOINT",
- "Original": "savepoint a"
-}
-Gen4 plan same as above
-
-# Savepoint rollback
-"rollback work to savepoint a"
-{
- "QueryType": "SAVEPOINT_ROLLBACK",
- "Original": "rollback work to savepoint a"
-}
-Gen4 plan same as above
-
-# Savepoint release
-"release savepoint a"
-{
- "QueryType": "RELEASE",
- "Original": "release savepoint a"
-}
-Gen4 plan same as above
diff --git a/go/vt/vtgate/planbuilder/testdata/union_cases.json b/go/vt/vtgate/planbuilder/testdata/union_cases.json
new file mode 100644
index 00000000000..1acfa7b82dd
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/union_cases.json
@@ -0,0 +1,2582 @@
+[
+ {
+ "comment": "union all between two scatter selects",
+ "query": "select id from user union all select id from music",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user union all select id from music",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1 union all select id from music where 1 != 1",
+ "Query": "select id from `user` union all select id from music",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user union all select id from music",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1 union all select id from music where 1 != 1",
+ "Query": "select id from `user` union all select id from music",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.music",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "union distinct between two scatter selects",
+ "query": "select id from user union select id from music",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user union select id from music",
+ "Instructions": {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from music where 1 != 1",
+ "Query": "select id from music",
+ "Table": "music"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user union select id from music",
+ "Instructions": {
+ "OperatorType": "Distinct",
+ "Collations": [
+ "(0:1)"
+ ],
+ "ResultColumns": 1,
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1 union select id, weight_string(id) from music where 1 != 1",
+ "Query": "select id, weight_string(id) from `user` union select id, weight_string(id) from music",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.music",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "union all between two SelectEqualUnique",
+ "query": "select id from user where id = 1 union all select id from user where id = 5",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where id = 1 union all select id from user where id = 5",
+ "Instructions": {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where id = 1",
+ "Table": "`user`",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where id = 5",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where id = 1 union all select id from user where id = 5",
+ "Instructions": {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where id = 1",
+ "Table": "`user`",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where id = 5",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "almost dereks query - two queries with order by and limit being scattered to two different sets of tablets",
+ "query": "(SELECT id FROM user ORDER BY id DESC LIMIT 1) UNION ALL (SELECT id FROM music ORDER BY id DESC LIMIT 1)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "(SELECT id FROM user ORDER BY id DESC LIMIT 1) UNION ALL (SELECT id FROM music ORDER BY id DESC LIMIT 1)",
+ "Instructions": {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Limit",
+ "Count": "INT64(1)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1",
+ "OrderBy": "(0|1) DESC",
+ "Query": "select id, weight_string(id) from `user` order by id desc limit :__upper_limit",
+ "ResultColumns": 1,
+ "Table": "`user`"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Limit",
+ "Count": "INT64(1)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, weight_string(id) from music where 1 != 1",
+ "OrderBy": "(0|1) DESC",
+ "Query": "select id, weight_string(id) from music order by id desc limit :__upper_limit",
+ "ResultColumns": 1,
+ "Table": "music"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "(SELECT id FROM user ORDER BY id DESC LIMIT 1) UNION ALL (SELECT id FROM music ORDER BY id DESC LIMIT 1)",
+ "Instructions": {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Limit",
+ "Count": "INT64(1)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1",
+ "OrderBy": "(0|1) DESC",
+ "Query": "select id, weight_string(id) from `user` order by id desc limit :__upper_limit",
+ "ResultColumns": 1,
+ "Table": "`user`"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Limit",
+ "Count": "INT64(1)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, weight_string(id) from music where 1 != 1",
+ "OrderBy": "(0|1) DESC",
+ "Query": "select id, weight_string(id) from music order by id desc limit :__upper_limit",
+ "ResultColumns": 1,
+ "Table": "music"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.music",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Union all",
+ "query": "select col1, col2 from user union all select col1, col2 from user_extra",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col1, col2 from user union all select col1, col2 from user_extra",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col1, col2 from `user` where 1 != 1 union all select col1, col2 from user_extra where 1 != 1",
+ "Query": "select col1, col2 from `user` union all select col1, col2 from user_extra",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col1, col2 from user union all select col1, col2 from user_extra",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col1, col2 from `user` where 1 != 1 union all select col1, col2 from user_extra where 1 != 1",
+ "Query": "select col1, col2 from `user` union all select col1, col2 from user_extra",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "union operations in subqueries (FROM)",
+ "query": "select * from (select * from user union all select * from user_extra) as t",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from (select * from user union all select * from user_extra) as t",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from (select * from `user` where 1 != 1 union all select * from user_extra where 1 != 1) as t where 1 != 1",
+ "Query": "select * from (select * from `user` union all select * from user_extra) as t",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from (select * from user union all select * from user_extra) as t",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from (select * from `user` where 1 != 1 union all select * from user_extra where 1 != 1) as t where 1 != 1",
+ "Query": "select * from (select * from `user` union all select * from user_extra) as t",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "union operations in derived table, without star expression (FROM)¡",
+ "query": "select col1,col2 from (select col1, col2 from user union all select col1, col2 from user_extra) as t",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col1,col2 from (select col1, col2 from user union all select col1, col2 from user_extra) as t",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col1, col2 from (select col1, col2 from `user` where 1 != 1 union all select col1, col2 from user_extra where 1 != 1) as t where 1 != 1",
+ "Query": "select col1, col2 from (select col1, col2 from `user` union all select col1, col2 from user_extra) as t",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col1,col2 from (select col1, col2 from user union all select col1, col2 from user_extra) as t",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col1, col2 from (select col1, col2 from `user` where 1 != 1 union all select col1, col2 from user_extra where 1 != 1) as t where 1 != 1",
+ "Query": "select col1, col2 from (select col1, col2 from `user` union all select col1, col2 from user_extra) as t",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "union all between two scatter selects, with order by",
+ "query": "(select id from user order by id limit 5) union all (select id from music order by id desc limit 5)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "(select id from user order by id limit 5) union all (select id from music order by id desc limit 5)",
+ "Instructions": {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Limit",
+ "Count": "INT64(5)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1",
+ "OrderBy": "(0|1) ASC",
+ "Query": "select id, weight_string(id) from `user` order by id asc limit :__upper_limit",
+ "ResultColumns": 1,
+ "Table": "`user`"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Limit",
+ "Count": "INT64(5)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, weight_string(id) from music where 1 != 1",
+ "OrderBy": "(0|1) DESC",
+ "Query": "select id, weight_string(id) from music order by id desc limit :__upper_limit",
+ "ResultColumns": 1,
+ "Table": "music"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "(select id from user order by id limit 5) union all (select id from music order by id desc limit 5)",
+ "Instructions": {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Limit",
+ "Count": "INT64(5)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1",
+ "OrderBy": "(0|1) ASC",
+ "Query": "select id, weight_string(id) from `user` order by id asc limit :__upper_limit",
+ "ResultColumns": 1,
+ "Table": "`user`"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Limit",
+ "Count": "INT64(5)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, weight_string(id) from music where 1 != 1",
+ "OrderBy": "(0|1) DESC",
+ "Query": "select id, weight_string(id) from music order by id desc limit :__upper_limit",
+ "ResultColumns": 1,
+ "Table": "music"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.music",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "union all on scatter and single route",
+ "query": "select id from user where id = 1 union select id from user where id = 1 union all select id from user",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where id = 1 union select id from user where id = 1 union all select id from user",
+ "Instructions": {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1 union select id from `user` where 1 != 1",
+ "Query": "select id from `user` where id = 1 union select id from `user` where id = 1",
+ "Table": "`user`",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user`",
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where id = 1 union select id from user where id = 1 union all select id from user",
+ "Instructions": {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1 union select id from `user` where 1 != 1",
+ "Query": "select id from `user` where id = 1 union select id from `user` where id = 1",
+ "Table": "`user`",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user`",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "union of information_schema with normal table",
+ "query": "select CHARACTER_SET_NAME from information_schema.CHARACTER_SETS union select user_name from unsharded",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select CHARACTER_SET_NAME from information_schema.CHARACTER_SETS union select user_name from unsharded",
+ "Instructions": {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select CHARACTER_SET_NAME from information_schema.CHARACTER_SETS where 1 != 1",
+ "Query": "select CHARACTER_SET_NAME from information_schema.CHARACTER_SETS",
+ "Table": "information_schema.CHARACTER_SETS"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select user_name from unsharded where 1 != 1",
+ "Query": "select user_name from unsharded",
+ "Table": "unsharded"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select CHARACTER_SET_NAME from information_schema.CHARACTER_SETS union select user_name from unsharded",
+ "Instructions": {
+ "OperatorType": "Distinct",
+ "Collations": [
+ "(0:1)"
+ ],
+ "ResultColumns": 1,
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select CHARACTER_SET_NAME, weight_string(CHARACTER_SET_NAME) from information_schema.CHARACTER_SETS where 1 != 1",
+ "Query": "select distinct CHARACTER_SET_NAME, weight_string(CHARACTER_SET_NAME) from information_schema.CHARACTER_SETS",
+ "Table": "information_schema.CHARACTER_SETS"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select user_name, weight_string(user_name) from unsharded where 1 != 1",
+ "Query": "select distinct user_name, weight_string(user_name) from unsharded",
+ "Table": "unsharded"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "union of information_schema with normal table",
+ "query": "select * from unsharded union select * from information_schema.CHARACTER_SETS",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from unsharded union select * from information_schema.CHARACTER_SETS",
+ "Instructions": {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from unsharded where 1 != 1",
+ "Query": "select * from unsharded",
+ "Table": "unsharded"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from information_schema.CHARACTER_SETS where 1 != 1",
+ "Query": "select * from information_schema.CHARACTER_SETS",
+ "Table": "information_schema.CHARACTER_SETS"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from unsharded union select * from information_schema.CHARACTER_SETS",
+ "Instructions": {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from unsharded where 1 != 1",
+ "Query": "select distinct * from unsharded",
+ "Table": "unsharded"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select CHARACTER_SET_NAME, DEFAULT_COLLATE_NAME, DESCRIPTION, MAXLEN from information_schema.CHARACTER_SETS where 1 != 1",
+ "Query": "select distinct CHARACTER_SET_NAME, DEFAULT_COLLATE_NAME, DESCRIPTION, MAXLEN from information_schema.CHARACTER_SETS",
+ "Table": "information_schema.CHARACTER_SETS"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "multi-shard union",
+ "query": "(select id from user union select id from music) union select 1 from dual",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "(select id from user union select id from music) union select 1 from dual",
+ "Instructions": {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from music where 1 != 1",
+ "Query": "select id from music",
+ "Table": "music"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 from dual where 1 != 1",
+ "Query": "select 1 from dual",
+ "Table": "dual"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "(select id from user union select id from music) union select 1 from dual",
+ "Instructions": {
+ "OperatorType": "Distinct",
+ "Collations": [
+ "(0:1)"
+ ],
+ "ResultColumns": 1,
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1 union select id, weight_string(id) from music where 1 != 1",
+ "Query": "select id, weight_string(id) from `user` union select id, weight_string(id) from music",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1, weight_string(1) from dual where 1 != 1",
+ "Query": "select distinct 1, weight_string(1) from dual",
+ "Table": "dual"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.dual",
+ "user.music",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "multi-shard union",
+ "query": "select 1 from music union (select id from user union all select name from unsharded)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 from music union (select id from user union all select name from unsharded)",
+ "Instructions": {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from music where 1 != 1",
+ "Query": "select 1 from music",
+ "Table": "music"
+ },
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select `name` from unsharded where 1 != 1",
+ "Query": "select `name` from unsharded",
+ "Table": "unsharded"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": "VT12001: unsupported: nesting of UNIONs on the right-hand side"
+ },
+ {
+ "comment": "multi-shard union",
+ "query": "select 1 from music union (select id from user union select name from unsharded)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 from music union (select id from user union select name from unsharded)",
+ "Instructions": {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from music where 1 != 1",
+ "Query": "select 1 from music",
+ "Table": "music"
+ },
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select `name` from unsharded where 1 != 1",
+ "Query": "select `name` from unsharded",
+ "Table": "unsharded"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": "VT12001: unsupported: nesting of UNIONs on the right-hand side"
+ },
+ {
+ "comment": "union with the same target shard because of vindex",
+ "query": "select * from music where id = 1 union select * from user where id = 1",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from music where id = 1 union select * from user where id = 1",
+ "Instructions": {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from music where 1 != 1",
+ "Query": "select * from music where id = 1",
+ "Table": "music",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "music_user_map"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select * from `user` where id = 1",
+ "Table": "`user`",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from music where id = 1 union select * from user where id = 1",
+ "Instructions": {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from music where 1 != 1",
+ "Query": "select distinct * from music where id = 1",
+ "Table": "music",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "music_user_map"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select distinct * from `user` where id = 1",
+ "Table": "`user`",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.music",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "union with different target shards",
+ "query": "select 1 from music where id = 1 union select 1 from music where id = 2",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 from music where id = 1 union select 1 from music where id = 2",
+ "Instructions": {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from music where 1 != 1",
+ "Query": "select 1 from music where id = 1",
+ "Table": "music",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "music_user_map"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from music where 1 != 1",
+ "Query": "select 1 from music where id = 2",
+ "Table": "music",
+ "Values": [
+ "INT64(2)"
+ ],
+ "Vindex": "music_user_map"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 from music where id = 1 union select 1 from music where id = 2",
+ "Instructions": {
+ "OperatorType": "Distinct",
+ "Collations": [
+ "0: binary"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from music where 1 != 1",
+ "Query": "select distinct 1 from music where id = 1",
+ "Table": "music",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "music_user_map"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from music where 1 != 1",
+ "Query": "select distinct 1 from music where id = 2",
+ "Table": "music",
+ "Values": [
+ "INT64(2)"
+ ],
+ "Vindex": "music_user_map"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "multiple select statement have inner order by with union - TODO (systay) no need to send down ORDER BY if we are going to loose it with UNION DISTINCT",
+ "query": "(select id from user order by 1 desc) union (select id from user order by 1 asc)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "(select id from user order by 1 desc) union (select id from user order by 1 asc)",
+ "Instructions": {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1",
+ "OrderBy": "(0|1) DESC",
+ "Query": "select id, weight_string(id) from `user` order by 1 desc",
+ "ResultColumns": 1,
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1",
+ "OrderBy": "(0|1) ASC",
+ "Query": "select id, weight_string(id) from `user` order by 1 asc",
+ "ResultColumns": 1,
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "(select id from user order by 1 desc) union (select id from user order by 1 asc)",
+ "Instructions": {
+ "OperatorType": "Distinct",
+ "Collations": [
+ "(0:1)"
+ ],
+ "ResultColumns": 1,
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "(select id, weight_string(id) from `user` where 1 != 1) union (select id, weight_string(id) from `user` where 1 != 1)",
+ "OrderBy": "(0|1) DESC",
+ "Query": "(select id, weight_string(id) from `user` order by id desc) union (select id, weight_string(id) from `user` order by id asc)",
+ "ResultColumns": 1,
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "multiple unions",
+ "query": "select 1 union select null union select 1.0 union select '1' union select 2 union select 2.0 from user",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 union select null union select 1.0 union select '1' union select 2 union select 2.0 from user",
+ "Instructions": {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 from dual where 1 != 1 union select null from dual where 1 != 1 union select 1.0 from dual where 1 != 1 union select '1' from dual where 1 != 1 union select 2 from dual where 1 != 1",
+ "Query": "select 1 from dual union select null from dual union select 1.0 from dual union select '1' from dual union select 2 from dual",
+ "Table": "dual"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 2.0 from `user` where 1 != 1",
+ "Query": "select 2.0 from `user`",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 union select null union select 1.0 union select '1' union select 2 union select 2.0 from user",
+ "Instructions": {
+ "OperatorType": "Distinct",
+ "Collations": [
+ "0: binary"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 from dual where 1 != 1 union all select null from dual where 1 != 1 union all select 1.0 from dual where 1 != 1 union all select '1' from dual where 1 != 1 union select 2 from dual where 1 != 1",
+ "Query": "select 1 from dual union all select null from dual union all select 1.0 from dual union all select '1' from dual union select 2 from dual",
+ "Table": "dual"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 2.0 from `user` where 1 != 1",
+ "Query": "select distinct 2.0 from `user`",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.dual",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "union distinct between a scatter query and a join (other side)",
+ "query": "(select user.id, user.name from user join user_extra where user_extra.extra = 'asdf') union select 'b','c' from user",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "(select user.id, user.name from user join user_extra where user_extra.extra = 'asdf') union select 'b','c' from user",
+ "Instructions": {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id, `user`.`name` from `user` where 1 != 1",
+ "Query": "select `user`.id, `user`.`name` from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra where user_extra.extra = 'asdf'",
+ "Table": "user_extra"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 'b', 'c' from `user` where 1 != 1",
+ "Query": "select 'b', 'c' from `user`",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "(select user.id, user.name from user join user_extra where user_extra.extra = 'asdf') union select 'b','c' from user",
+ "Instructions": {
+ "OperatorType": "Distinct",
+ "Collations": [
+ "(0:2)",
+ "(1:3)"
+ ],
+ "ResultColumns": 2,
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1,L:2,L:3",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id, `user`.`name`, weight_string(`user`.id), weight_string(`user`.`name`) from `user` where 1 != 1",
+ "Query": "select `user`.id, `user`.`name`, weight_string(`user`.id), weight_string(`user`.`name`) from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra where user_extra.extra = 'asdf'",
+ "Table": "user_extra"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 'b', 'c', weight_string('b'), weight_string('c') from `user` where 1 != 1",
+ "Query": "select distinct 'b', 'c', weight_string('b'), weight_string('c') from `user`",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "union distinct between a scatter query and a join (other side)",
+ "query": "select 'b','c' from user union (select user.id, user.name from user join user_extra where user_extra.extra = 'asdf')",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 'b','c' from user union (select user.id, user.name from user join user_extra where user_extra.extra = 'asdf')",
+ "Instructions": {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 'b', 'c' from `user` where 1 != 1",
+ "Query": "select 'b', 'c' from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id, `user`.`name` from `user` where 1 != 1",
+ "Query": "select `user`.id, `user`.`name` from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra where user_extra.extra = 'asdf'",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 'b','c' from user union (select user.id, user.name from user join user_extra where user_extra.extra = 'asdf')",
+ "Instructions": {
+ "OperatorType": "Distinct",
+ "Collations": [
+ "(0:2)",
+ "(1:3)"
+ ],
+ "ResultColumns": 2,
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 'b', 'c', weight_string('b'), weight_string('c') from `user` where 1 != 1",
+ "Query": "select distinct 'b', 'c', weight_string('b'), weight_string('c') from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1,L:2,L:3",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id, `user`.`name`, weight_string(`user`.id), weight_string(`user`.`name`) from `user` where 1 != 1",
+ "Query": "select `user`.id, `user`.`name`, weight_string(`user`.id), weight_string(`user`.`name`) from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra where user_extra.extra = 'asdf'",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "unmergable because we are using aggregation",
+ "query": "select count(*) as s from user union select count(*) as s from music",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select count(*) as s from user union select count(*) as s from music",
+ "Instructions": {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count(0) AS count",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*) as s from `user` where 1 != 1",
+ "Query": "select count(*) as s from `user`",
+ "Table": "`user`"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count(0) AS count",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*) as s from music where 1 != 1",
+ "Query": "select count(*) as s from music",
+ "Table": "music"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select count(*) as s from user union select count(*) as s from music",
+ "Instructions": {
+ "OperatorType": "Distinct",
+ "Collations": [
+ "0: binary"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count_star(0) AS s",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*) as s from `user` where 1 != 1",
+ "Query": "select count(*) as s from `user`",
+ "Table": "`user`"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count_star(0) AS s",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*) as s from music where 1 != 1",
+ "Query": "select count(*) as s from music",
+ "Table": "music"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.music",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Union in derived table with first SELECT being an UNION",
+ "query": "select * from ((select id from user union select id+1 from user) union select user_id from user_extra) as t",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from ((select id from user union select id+1 from user) union select user_id from user_extra) as t",
+ "Instructions": {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id + 1 from `user` where 1 != 1",
+ "Query": "select id + 1 from `user`",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_id from user_extra where 1 != 1",
+ "Query": "select user_id from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from ((select id from user union select id+1 from user) union select user_id from user_extra) as t",
+ "Instructions": {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Collations": [
+ "(0:1)"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1 union all select id + 1, weight_string(id + 1) from `user` where 1 != 1 union select user_id, weight_string(user_id) from user_extra where 1 != 1",
+ "Query": "select id, weight_string(id) from `user` union all select id + 1, weight_string(id + 1) from `user` union select user_id, weight_string(user_id) from user_extra",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "gen4 optimises away ORDER BY when it's safe to do",
+ "query": "(select id from user union select id from music order by id) union select 1 from unsharded",
+ "v3-plan": "VT12001: unsupported: ORDER BY on top of UNION",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "(select id from user union select id from music order by id) union select 1 from unsharded",
+ "Instructions": {
+ "OperatorType": "Distinct",
+ "Collations": [
+ "(0:1)"
+ ],
+ "ResultColumns": 1,
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1 union select id, weight_string(id) from music where 1 != 1",
+ "Query": "select id, weight_string(id) from `user` union select id, weight_string(id) from music",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1, weight_string(1) from unsharded where 1 != 1",
+ "Query": "select distinct 1, weight_string(1) from unsharded",
+ "Table": "unsharded"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "user.music",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "push down the ::upper_limit to the sources, since we are doing DISTINCT on them, it's safe",
+ "query": "select id from user union select 3 limit 10",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user union select 3 limit 10",
+ "Instructions": {
+ "OperatorType": "Limit",
+ "Count": "INT64(10)",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 3 from dual where 1 != 1",
+ "Query": "select 3 from dual",
+ "Table": "dual"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user union select 3 limit 10",
+ "Instructions": {
+ "OperatorType": "Limit",
+ "Count": "INT64(10)",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Collations": [
+ "(0:1)"
+ ],
+ "ResultColumns": 1,
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1",
+ "Query": "select distinct id, weight_string(id) from `user` limit :__upper_limit",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 3, weight_string(3) from dual where 1 != 1",
+ "Query": "select distinct 3, weight_string(3) from dual limit :__upper_limit",
+ "Table": "dual"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.dual",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "silly query that should be collapsed into a single unsharded UNION route",
+ "query": "(select 1 from unsharded union select 1 from unsharded union all select 1 from unsharded order by 1) union select 1 from unsharded union all select 1 from unsharded order by 1",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "(select 1 from unsharded union select 1 from unsharded union all select 1 from unsharded order by 1) union select 1 from unsharded union all select 1 from unsharded order by 1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "(select 1 from unsharded where 1 != 1 union select 1 from unsharded where 1 != 1 union all select 1 from unsharded where 1 != 1) union select 1 from unsharded where 1 != 1 union all select 1 from unsharded where 1 != 1",
+ "Query": "(select 1 from unsharded union select 1 from unsharded union all select 1 from unsharded order by 1 asc) union select 1 from unsharded union all select 1 from unsharded order by 1 asc",
+ "Table": "unsharded"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "(select 1 from unsharded union select 1 from unsharded union all select 1 from unsharded order by 1) union select 1 from unsharded union all select 1 from unsharded order by 1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "(select 1 from unsharded where 1 != 1 union select 1 from unsharded where 1 != 1 union all select 1 from unsharded where 1 != 1) union select 1 from unsharded where 1 != 1 union all select 1 from unsharded where 1 != 1",
+ "Query": "(select 1 from unsharded union select 1 from unsharded union all select 1 from unsharded order by 1 asc) union select 1 from unsharded union all select 1 from unsharded order by 1 asc",
+ "Table": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "UNION that needs to be reordered to be merged more aggressively. Gen4 is able to get it down to 2 routes",
+ "query": "select col from unsharded union select id from user union select col2 from unsharded union select col from user_extra",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from unsharded union select id from user union select col2 from unsharded union select col from user_extra",
+ "Instructions": {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select col from unsharded where 1 != 1",
+ "Query": "select col from unsharded",
+ "Table": "unsharded"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user`",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select col2 from unsharded where 1 != 1",
+ "Query": "select col2 from unsharded",
+ "Table": "unsharded"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from user_extra where 1 != 1",
+ "Query": "select col from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from unsharded union select id from user union select col2 from unsharded union select col from user_extra",
+ "Instructions": {
+ "OperatorType": "Distinct",
+ "Collations": [
+ "(0:1)"
+ ],
+ "ResultColumns": 1,
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select col, weight_string(col) from unsharded where 1 != 1 union select col2, weight_string(col2) from unsharded where 1 != 1",
+ "Query": "select col, weight_string(col) from unsharded union select col2, weight_string(col2) from unsharded",
+ "Table": "unsharded"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1 union select col, weight_string(col) from user_extra where 1 != 1",
+ "Query": "select id, weight_string(id) from `user` union select col, weight_string(col) from user_extra",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "derived table with union",
+ "query": "select tbl2.id FROM ((select id from user order by id limit 5) union all (select id from user order by id desc limit 5)) as tbl1 INNER JOIN user as tbl2 ON tbl1.id = tbl2.id",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select tbl2.id FROM ((select id from user order by id limit 5) union all (select id from user order by id desc limit 5)) as tbl1 INNER JOIN user as tbl2 ON tbl1.id = tbl2.id",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "JoinVars": {
+ "tbl1_id": 0
+ },
+ "TableName": "`user`_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Limit",
+ "Count": "INT64(5)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1",
+ "OrderBy": "(0|1) ASC",
+ "Query": "select id, weight_string(id) from `user` order by id asc limit :__upper_limit",
+ "ResultColumns": 1,
+ "Table": "`user`"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Limit",
+ "Count": "INT64(5)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1",
+ "OrderBy": "(0|1) DESC",
+ "Query": "select id, weight_string(id) from `user` order by id desc limit :__upper_limit",
+ "ResultColumns": 1,
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select tbl2.id from `user` as tbl2 where 1 != 1",
+ "Query": "select tbl2.id from `user` as tbl2 where tbl2.id = :tbl1_id",
+ "Table": "`user`",
+ "Values": [
+ ":tbl1_id"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select tbl2.id FROM ((select id from user order by id limit 5) union all (select id from user order by id desc limit 5)) as tbl1 INNER JOIN user as tbl2 ON tbl1.id = tbl2.id",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "JoinVars": {
+ "tbl1_id": 0
+ },
+ "TableName": "`user`_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Limit",
+ "Count": "INT64(5)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1",
+ "OrderBy": "(0|1) ASC",
+ "Query": "select id, weight_string(id) from `user` order by id asc limit :__upper_limit",
+ "ResultColumns": 1,
+ "Table": "`user`"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Limit",
+ "Count": "INT64(5)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1",
+ "OrderBy": "(0|1) DESC",
+ "Query": "select id, weight_string(id) from `user` order by id desc limit :__upper_limit",
+ "ResultColumns": 1,
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select tbl2.id from `user` as tbl2 where 1 != 1",
+ "Query": "select tbl2.id from `user` as tbl2 where tbl2.id = :tbl1_id",
+ "Table": "`user`",
+ "Values": [
+ ":tbl1_id"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "ambiguous LIMIT",
+ "query": "select id from user limit 1 union all select id from music limit 1",
+ "plan": "syntax error at position 34 near 'union'"
+ },
+ {
+ "comment": "ambiguous ORDER BY",
+ "query": "select id from user order by id union all select id from music order by id desc",
+ "plan": "syntax error at position 38 near 'union'"
+ },
+ {
+ "comment": "different number of columns",
+ "query": "select id, 42 from user where id = 1 union all select id from user where id = 5",
+ "v3-plan": "The used SELECT statements have a different number of columns (errno 1222) (sqlstate 21000) during query: select id, 42 from `user` where id = 1 union all select id from `user` where id = 5",
+ "gen4-plan": "The used SELECT statements have a different number of columns"
+ },
+ {
+ "comment": "union with invalid order by clause with table qualifier",
+ "query": "select id from user union select 3 order by user.id",
+ "v3-plan": "VT12001: unsupported: ORDER BY on top of UNION",
+ "gen4-plan": "Table `user` from one of the SELECTs cannot be used in global ORDER clause"
+ },
+ {
+ "comment": "union with invalid order by clause with table qualifier",
+ "query": "select id from user union select 3 order by id",
+ "plan": "VT12001: unsupported: ORDER BY on top of UNION"
+ },
+ {
+ "comment": "select 1 from (select id+42 as foo from user union select 1+id as foo from unsharded) as t",
+ "query": "select 1 from (select id+42 as foo from user union select 1+id as foo from unsharded) as t",
+ "v3-plan": "VT12001: unsupported: expression on results of a cross-shard subquery",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 from (select id+42 as foo from user union select 1+id as foo from unsharded) as t",
+ "Instructions": {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 2
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Collations": [
+ "(0:1)"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id + 42 as foo, weight_string(id + 42), 1 from `user` where 1 != 1",
+ "Query": "select distinct id + 42 as foo, weight_string(id + 42), 1 from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 + id as foo, weight_string(1 + id), 1 from unsharded where 1 != 1",
+ "Query": "select distinct 1 + id as foo, weight_string(1 + id), 1 from unsharded",
+ "Table": "unsharded"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "systable union query in derived table with constraint on outside (without star projection)",
+ "query": "select * from (select kcu.`COLUMN_NAME` from `information_schema`.`key_column_usage` `kcu` where `kcu`.`table_schema` = 'user' and `kcu`.`table_name` = 'user_extra' union select kcu.`COLUMN_NAME` from `information_schema`.`key_column_usage` `kcu` where `kcu`.`table_schema` = 'user' and `kcu`.`table_name` = 'music') `kcu` where `COLUMN_NAME` = 'primary'",
+ "v3-plan": "VT12001: unsupported: filtering on results of cross-shard subquery",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from (select kcu.`COLUMN_NAME` from `information_schema`.`key_column_usage` `kcu` where `kcu`.`table_schema` = 'user' and `kcu`.`table_name` = 'user_extra' union select kcu.`COLUMN_NAME` from `information_schema`.`key_column_usage` `kcu` where `kcu`.`table_schema` = 'user' and `kcu`.`table_name` = 'music') `kcu` where `COLUMN_NAME` = 'primary'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select kcu.COLUMN_NAME from (select kcu.COLUMN_NAME from information_schema.key_column_usage as kcu where 1 != 1 union select kcu.COLUMN_NAME from information_schema.key_column_usage as kcu where 1 != 1) as kcu where 1 != 1",
+ "Query": "select kcu.COLUMN_NAME from (select kcu.COLUMN_NAME from information_schema.key_column_usage as kcu where kcu.table_schema = :__vtschemaname and kcu.table_name = :kcu_table_name and kcu.COLUMN_NAME = 'primary' union select kcu.COLUMN_NAME from information_schema.key_column_usage as kcu where kcu.table_schema = :__vtschemaname and kcu.table_name = :kcu_table_name1 and kcu.COLUMN_NAME = 'primary') as kcu",
+ "SysTableTableName": "[kcu_table_name1:VARCHAR(\"music\"), kcu_table_name:VARCHAR(\"user_extra\")]",
+ "SysTableTableSchema": "[VARCHAR(\"user\"), VARCHAR(\"user\")]",
+ "Table": "information_schema.key_column_usage"
+ }
+ }
+ },
+ {
+ "comment": "pushes predicate on both sides of UNION",
+ "query": "select * from (select name, id as foo from user union select 'extra', user_id from user_extra) X where X.foo = 3",
+ "v3-plan": "VT12001: unsupported: filtering on results of cross-shard subquery",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from (select name, id as foo from user union select 'extra', user_id from user_extra) X where X.foo = 3",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select X.`name`, X.foo from (select `name`, id as foo from `user` where 1 != 1 union select 'extra', user_id from user_extra where 1 != 1) as X where 1 != 1",
+ "Query": "select X.`name`, X.foo from (select `name`, id as foo from `user` where id = 3 union select 'extra', user_id from user_extra where user_id = 3) as X",
+ "Table": "`user`",
+ "Values": [
+ "INT64(3)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "systable union query in derived table with constraint on outside (star projection)",
+ "query": "select * from (select * from `information_schema`.`key_column_usage` `kcu` where `kcu`.`table_schema` = 'user' and `kcu`.`table_name` = 'user_extra' union select * from `information_schema`.`key_column_usage` `kcu` where `kcu`.`table_schema` = 'user' and `kcu`.`table_name` = 'music') `kcu` where `constraint_name` = 'primary'",
+ "v3-plan": "VT03019: symbol constraint_name not found",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from (select * from `information_schema`.`key_column_usage` `kcu` where `kcu`.`table_schema` = 'user' and `kcu`.`table_name` = 'user_extra' union select * from `information_schema`.`key_column_usage` `kcu` where `kcu`.`table_schema` = 'user' and `kcu`.`table_name` = 'music') `kcu` where `constraint_name` = 'primary'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select kcu.CONSTRAINT_CATALOG, kcu.CONSTRAINT_SCHEMA, kcu.CONSTRAINT_NAME, kcu.TABLE_CATALOG, kcu.TABLE_SCHEMA, kcu.TABLE_NAME, kcu.COLUMN_NAME, kcu.ORDINAL_POSITION, kcu.POSITION_IN_UNIQUE_CONSTRAINT, kcu.REFERENCED_TABLE_SCHEMA, kcu.REFERENCED_TABLE_NAME, kcu.REFERENCED_COLUMN_NAME from (select kcu.CONSTRAINT_CATALOG, kcu.CONSTRAINT_SCHEMA, kcu.CONSTRAINT_NAME, kcu.TABLE_CATALOG, kcu.TABLE_SCHEMA, kcu.TABLE_NAME, kcu.COLUMN_NAME, kcu.ORDINAL_POSITION, kcu.POSITION_IN_UNIQUE_CONSTRAINT, kcu.REFERENCED_TABLE_SCHEMA, kcu.REFERENCED_TABLE_NAME, kcu.REFERENCED_COLUMN_NAME from information_schema.key_column_usage as kcu where 1 != 1 union select kcu.CONSTRAINT_CATALOG, kcu.CONSTRAINT_SCHEMA, kcu.CONSTRAINT_NAME, kcu.TABLE_CATALOG, kcu.TABLE_SCHEMA, kcu.TABLE_NAME, kcu.COLUMN_NAME, kcu.ORDINAL_POSITION, kcu.POSITION_IN_UNIQUE_CONSTRAINT, kcu.REFERENCED_TABLE_SCHEMA, kcu.REFERENCED_TABLE_NAME, kcu.REFERENCED_COLUMN_NAME from information_schema.key_column_usage as kcu where 1 != 1) as kcu where 1 != 1",
+ "Query": "select kcu.CONSTRAINT_CATALOG, kcu.CONSTRAINT_SCHEMA, kcu.CONSTRAINT_NAME, kcu.TABLE_CATALOG, kcu.TABLE_SCHEMA, kcu.TABLE_NAME, kcu.COLUMN_NAME, kcu.ORDINAL_POSITION, kcu.POSITION_IN_UNIQUE_CONSTRAINT, kcu.REFERENCED_TABLE_SCHEMA, kcu.REFERENCED_TABLE_NAME, kcu.REFERENCED_COLUMN_NAME from (select kcu.CONSTRAINT_CATALOG, kcu.CONSTRAINT_SCHEMA, kcu.CONSTRAINT_NAME, kcu.TABLE_CATALOG, kcu.TABLE_SCHEMA, kcu.TABLE_NAME, kcu.COLUMN_NAME, kcu.ORDINAL_POSITION, kcu.POSITION_IN_UNIQUE_CONSTRAINT, kcu.REFERENCED_TABLE_SCHEMA, kcu.REFERENCED_TABLE_NAME, kcu.REFERENCED_COLUMN_NAME from information_schema.key_column_usage as kcu where kcu.table_schema = :__vtschemaname and kcu.table_name = :kcu_table_name and kcu.CONSTRAINT_NAME = 'primary' union select kcu.CONSTRAINT_CATALOG, kcu.CONSTRAINT_SCHEMA, kcu.CONSTRAINT_NAME, kcu.TABLE_CATALOG, kcu.TABLE_SCHEMA, kcu.TABLE_NAME, kcu.COLUMN_NAME, kcu.ORDINAL_POSITION, kcu.POSITION_IN_UNIQUE_CONSTRAINT, kcu.REFERENCED_TABLE_SCHEMA, kcu.REFERENCED_TABLE_NAME, kcu.REFERENCED_COLUMN_NAME from information_schema.key_column_usage as kcu where kcu.table_schema = :__vtschemaname and kcu.table_name = :kcu_table_name1 and kcu.CONSTRAINT_NAME = 'primary') as kcu",
+ "SysTableTableName": "[kcu_table_name1:VARCHAR(\"music\"), kcu_table_name:VARCHAR(\"user_extra\")]",
+ "SysTableTableSchema": "[VARCHAR(\"user\"), VARCHAR(\"user\")]",
+ "Table": "information_schema.key_column_usage"
+ }
+ }
+ },
+ {
+ "comment": "unknown columns are OK as long as the whole query is unsharded",
+ "query": "(SELECT * FROM (SELECT * FROM unsharded WHERE branchId = 203622 AND buildNumber <= 113893 AND state = 'FAILED' ORDER BY buildNumber DESC LIMIT 1) AS last_failed) UNION ALL (SELECT * FROM (SELECT * FROM unsharded WHERE branchId = 203622 AND buildNumber <= 113893 AND state = 'SUCCEEDED' ORDER BY buildNumber DESC LIMIT 1) AS last_succeeded) ORDER BY buildNumber DESC LIMIT 1",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "(SELECT * FROM (SELECT * FROM unsharded WHERE branchId = 203622 AND buildNumber <= 113893 AND state = 'FAILED' ORDER BY buildNumber DESC LIMIT 1) AS last_failed) UNION ALL (SELECT * FROM (SELECT * FROM unsharded WHERE branchId = 203622 AND buildNumber <= 113893 AND state = 'SUCCEEDED' ORDER BY buildNumber DESC LIMIT 1) AS last_succeeded) ORDER BY buildNumber DESC LIMIT 1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from (select * from unsharded where 1 != 1) as last_failed where 1 != 1 union all select * from (select * from unsharded where 1 != 1) as last_succeeded where 1 != 1",
+ "Query": "select * from (select * from unsharded where branchId = 203622 and buildNumber <= 113893 and state = 'FAILED' order by buildNumber desc limit 1) as last_failed union all select * from (select * from unsharded where branchId = 203622 and buildNumber <= 113893 and state = 'SUCCEEDED' order by buildNumber desc limit 1) as last_succeeded order by buildNumber desc limit 1",
+ "Table": "unsharded"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "(SELECT * FROM (SELECT * FROM unsharded WHERE branchId = 203622 AND buildNumber <= 113893 AND state = 'FAILED' ORDER BY buildNumber DESC LIMIT 1) AS last_failed) UNION ALL (SELECT * FROM (SELECT * FROM unsharded WHERE branchId = 203622 AND buildNumber <= 113893 AND state = 'SUCCEEDED' ORDER BY buildNumber DESC LIMIT 1) AS last_succeeded) ORDER BY buildNumber DESC LIMIT 1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from (select * from unsharded where 1 != 1) as last_failed where 1 != 1 union all select * from (select * from unsharded where 1 != 1) as last_succeeded where 1 != 1",
+ "Query": "select * from (select * from unsharded where branchId = 203622 and buildNumber <= 113893 and state = 'FAILED' order by buildNumber desc limit 1) as last_failed union all select * from (select * from unsharded where branchId = 203622 and buildNumber <= 113893 and state = 'SUCCEEDED' order by buildNumber desc limit 1) as last_succeeded order by buildNumber desc limit 1",
+ "Table": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "union of unsharded route with sharded join with involvement of weight string",
+ "query": "select id, foo, bar from unsharded union select user.intcol, user.textcol2, authoritative.col2 from user join authoritative",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id, foo, bar from unsharded union select user.intcol, user.textcol2, authoritative.col2 from user join authoritative",
+ "Instructions": {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select id, foo, bar from unsharded where 1 != 1",
+ "Query": "select id, foo, bar from unsharded",
+ "Table": "unsharded"
+ },
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1,R:0",
+ "TableName": "`user`_authoritative",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.intcol, `user`.textcol2 from `user` where 1 != 1",
+ "Query": "select `user`.intcol, `user`.textcol2 from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select authoritative.col2 from authoritative where 1 != 1",
+ "Query": "select authoritative.col2 from authoritative",
+ "Table": "authoritative"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id, foo, bar from unsharded union select user.intcol, user.textcol2, authoritative.col2 from user join authoritative",
+ "Instructions": {
+ "OperatorType": "Distinct",
+ "Collations": [
+ "(0:3)",
+ "(1:4)",
+ "(2:5)"
+ ],
+ "ResultColumns": 3,
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select id, foo, bar, weight_string(id), weight_string(foo), weight_string(bar) from unsharded where 1 != 1",
+ "Query": "select distinct id, foo, bar, weight_string(id), weight_string(foo), weight_string(bar) from unsharded",
+ "Table": "unsharded"
+ },
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1,R:0,L:2,L:3,R:1",
+ "TableName": "`user`_authoritative",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.intcol, `user`.textcol2, weight_string(`user`.intcol), weight_string(`user`.textcol2) from `user` where 1 != 1",
+ "Query": "select `user`.intcol, `user`.textcol2, weight_string(`user`.intcol), weight_string(`user`.textcol2) from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select authoritative.col2, weight_string(authoritative.col2) from authoritative where 1 != 1",
+ "Query": "select authoritative.col2, weight_string(authoritative.col2) from authoritative",
+ "Table": "authoritative"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "user.authoritative",
+ "user.user"
+ ]
+ }
+ }
+]
diff --git a/go/vt/vtgate/planbuilder/testdata/union_cases.txt b/go/vt/vtgate/planbuilder/testdata/union_cases.txt
deleted file mode 100644
index abf1f20f7b9..00000000000
--- a/go/vt/vtgate/planbuilder/testdata/union_cases.txt
+++ /dev/null
@@ -1,2310 +0,0 @@
-# union all between two scatter selects
-"select id from user union all select id from music"
-{
- "QueryType": "SELECT",
- "Original": "select id from user union all select id from music",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1 union all select id from music where 1 != 1",
- "Query": "select id from `user` union all select id from music",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user union all select id from music",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1 union all select id from music where 1 != 1",
- "Query": "select id from `user` union all select id from music",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.music",
- "user.user"
- ]
-}
-
-# union distinct between two scatter selects
-"select id from user union select id from music"
-{
- "QueryType": "SELECT",
- "Original": "select id from user union select id from music",
- "Instructions": {
- "OperatorType": "Distinct",
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from music where 1 != 1",
- "Query": "select id from music",
- "Table": "music"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user union select id from music",
- "Instructions": {
- "OperatorType": "Distinct",
- "Collations": [
- "(0:1)"
- ],
- "ResultColumns": 1,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1 union select id, weight_string(id) from music where 1 != 1",
- "Query": "select id, weight_string(id) from `user` union select id, weight_string(id) from music",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.music",
- "user.user"
- ]
-}
-
-# union all between two SelectEqualUnique
-"select id from user where id = 1 union all select id from user where id = 5"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where id = 1 union all select id from user where id = 5",
- "Instructions": {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where id = 1",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where id = 5",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where id = 1 union all select id from user where id = 5",
- "Instructions": {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where id = 1",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where id = 5",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-#almost dereks query - two queries with order by and limit being scattered to two different sets of tablets
-"(SELECT id FROM user ORDER BY id DESC LIMIT 1) UNION ALL (SELECT id FROM music ORDER BY id DESC LIMIT 1)"
-{
- "QueryType": "SELECT",
- "Original": "(SELECT id FROM user ORDER BY id DESC LIMIT 1) UNION ALL (SELECT id FROM music ORDER BY id DESC LIMIT 1)",
- "Instructions": {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Limit",
- "Count": "INT64(1)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1",
- "OrderBy": "(0|1) DESC",
- "Query": "select id, weight_string(id) from `user` order by id desc limit :__upper_limit",
- "ResultColumns": 1,
- "Table": "`user`"
- }
- ]
- },
- {
- "OperatorType": "Limit",
- "Count": "INT64(1)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, weight_string(id) from music where 1 != 1",
- "OrderBy": "(0|1) DESC",
- "Query": "select id, weight_string(id) from music order by id desc limit :__upper_limit",
- "ResultColumns": 1,
- "Table": "music"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "(SELECT id FROM user ORDER BY id DESC LIMIT 1) UNION ALL (SELECT id FROM music ORDER BY id DESC LIMIT 1)",
- "Instructions": {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Limit",
- "Count": "INT64(1)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1",
- "OrderBy": "(0|1) DESC",
- "Query": "select id, weight_string(id) from `user` order by id desc limit :__upper_limit",
- "ResultColumns": 1,
- "Table": "`user`"
- }
- ]
- },
- {
- "OperatorType": "Limit",
- "Count": "INT64(1)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, weight_string(id) from music where 1 != 1",
- "OrderBy": "(0|1) DESC",
- "Query": "select id, weight_string(id) from music order by id desc limit :__upper_limit",
- "ResultColumns": 1,
- "Table": "music"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.music",
- "user.user"
- ]
-}
-
-# Union all
-"select col1, col2 from user union all select col1, col2 from user_extra"
-{
- "QueryType": "SELECT",
- "Original": "select col1, col2 from user union all select col1, col2 from user_extra",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col1, col2 from `user` where 1 != 1 union all select col1, col2 from user_extra where 1 != 1",
- "Query": "select col1, col2 from `user` union all select col1, col2 from user_extra",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select col1, col2 from user union all select col1, col2 from user_extra",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col1, col2 from `user` where 1 != 1 union all select col1, col2 from user_extra where 1 != 1",
- "Query": "select col1, col2 from `user` union all select col1, col2 from user_extra",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# union operations in subqueries (FROM)
-"select * from (select * from user union all select * from user_extra) as t"
-{
- "QueryType": "SELECT",
- "Original": "select * from (select * from user union all select * from user_extra) as t",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from (select * from `user` where 1 != 1 union all select * from user_extra where 1 != 1) as t where 1 != 1",
- "Query": "select * from (select * from `user` union all select * from user_extra) as t",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from (select * from user union all select * from user_extra) as t",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from (select * from `user` where 1 != 1 union all select * from user_extra where 1 != 1) as t where 1 != 1",
- "Query": "select * from (select * from `user` union all select * from user_extra) as t",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# union operations in derived table, without star expression (FROM)¡
-"select col1,col2 from (select col1, col2 from user union all select col1, col2 from user_extra) as t"
-{
- "QueryType": "SELECT",
- "Original": "select col1,col2 from (select col1, col2 from user union all select col1, col2 from user_extra) as t",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col1, col2 from (select col1, col2 from `user` where 1 != 1 union all select col1, col2 from user_extra where 1 != 1) as t where 1 != 1",
- "Query": "select col1, col2 from (select col1, col2 from `user` union all select col1, col2 from user_extra) as t",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select col1,col2 from (select col1, col2 from user union all select col1, col2 from user_extra) as t",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col1, col2 from (select col1, col2 from `user` where 1 != 1 union all select col1, col2 from user_extra where 1 != 1) as t where 1 != 1",
- "Query": "select col1, col2 from (select col1, col2 from `user` union all select col1, col2 from user_extra) as t",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# union all between two scatter selects, with order by
-"(select id from user order by id limit 5) union all (select id from music order by id desc limit 5)"
-{
- "QueryType": "SELECT",
- "Original": "(select id from user order by id limit 5) union all (select id from music order by id desc limit 5)",
- "Instructions": {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Limit",
- "Count": "INT64(5)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1",
- "OrderBy": "(0|1) ASC",
- "Query": "select id, weight_string(id) from `user` order by id asc limit :__upper_limit",
- "ResultColumns": 1,
- "Table": "`user`"
- }
- ]
- },
- {
- "OperatorType": "Limit",
- "Count": "INT64(5)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, weight_string(id) from music where 1 != 1",
- "OrderBy": "(0|1) DESC",
- "Query": "select id, weight_string(id) from music order by id desc limit :__upper_limit",
- "ResultColumns": 1,
- "Table": "music"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "(select id from user order by id limit 5) union all (select id from music order by id desc limit 5)",
- "Instructions": {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Limit",
- "Count": "INT64(5)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1",
- "OrderBy": "(0|1) ASC",
- "Query": "select id, weight_string(id) from `user` order by id asc limit :__upper_limit",
- "ResultColumns": 1,
- "Table": "`user`"
- }
- ]
- },
- {
- "OperatorType": "Limit",
- "Count": "INT64(5)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, weight_string(id) from music where 1 != 1",
- "OrderBy": "(0|1) DESC",
- "Query": "select id, weight_string(id) from music order by id desc limit :__upper_limit",
- "ResultColumns": 1,
- "Table": "music"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.music",
- "user.user"
- ]
-}
-
-# union all on scatter and single route
-"select id from user where id = 1 union select id from user where id = 1 union all select id from user"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where id = 1 union select id from user where id = 1 union all select id from user",
- "Instructions": {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1 union select id from `user` where 1 != 1",
- "Query": "select id from `user` where id = 1 union select id from `user` where id = 1",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user`",
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where id = 1 union select id from user where id = 1 union all select id from user",
- "Instructions": {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1 union select id from `user` where 1 != 1",
- "Query": "select id from `user` where id = 1 union select id from `user` where id = 1",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user`",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# union of information_schema with normal table
-"select * from information_schema.a union select * from unsharded"
-{
- "QueryType": "SELECT",
- "Original": "select * from information_schema.a union select * from unsharded",
- "Instructions": {
- "OperatorType": "Distinct",
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select * from information_schema.a where 1 != 1",
- "Query": "select * from information_schema.a",
- "Table": "information_schema.a"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select * from unsharded where 1 != 1",
- "Query": "select * from unsharded",
- "Table": "unsharded"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from information_schema.a union select * from unsharded",
- "Instructions": {
- "OperatorType": "Distinct",
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select * from information_schema.a where 1 != 1",
- "Query": "select distinct * from information_schema.a",
- "Table": "information_schema.a"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select * from unsharded where 1 != 1",
- "Query": "select distinct * from unsharded",
- "Table": "unsharded"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-
-# union of information_schema with normal table
-"select * from unsharded union select * from information_schema.a"
-{
- "QueryType": "SELECT",
- "Original": "select * from unsharded union select * from information_schema.a",
- "Instructions": {
- "OperatorType": "Distinct",
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select * from unsharded where 1 != 1",
- "Query": "select * from unsharded",
- "Table": "unsharded"
- },
- {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select * from information_schema.a where 1 != 1",
- "Query": "select * from information_schema.a",
- "Table": "information_schema.a"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from unsharded union select * from information_schema.a",
- "Instructions": {
- "OperatorType": "Distinct",
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select * from unsharded where 1 != 1",
- "Query": "select distinct * from unsharded",
- "Table": "unsharded"
- },
- {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select * from information_schema.a where 1 != 1",
- "Query": "select distinct * from information_schema.a",
- "Table": "information_schema.a"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-
-# multi-shard union
-"(select id from user union select id from music) union select 1 from dual"
-{
- "QueryType": "SELECT",
- "Original": "(select id from user union select id from music) union select 1 from dual",
- "Instructions": {
- "OperatorType": "Distinct",
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Distinct",
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from music where 1 != 1",
- "Query": "select id from music",
- "Table": "music"
- }
- ]
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 from dual where 1 != 1",
- "Query": "select 1 from dual",
- "Table": "dual"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "(select id from user union select id from music) union select 1 from dual",
- "Instructions": {
- "OperatorType": "Distinct",
- "Collations": [
- "(0:1)"
- ],
- "ResultColumns": 1,
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1 union select id, weight_string(id) from music where 1 != 1",
- "Query": "select id, weight_string(id) from `user` union select id, weight_string(id) from music",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1, weight_string(1) from dual where 1 != 1",
- "Query": "select distinct 1, weight_string(1) from dual",
- "Table": "dual"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "main.dual",
- "user.music",
- "user.user"
- ]
-}
-
-# multi-shard union
-"select 1 from music union (select id from user union all select name from unsharded)"
-{
- "QueryType": "SELECT",
- "Original": "select 1 from music union (select id from user union all select name from unsharded)",
- "Instructions": {
- "OperatorType": "Distinct",
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from music where 1 != 1",
- "Query": "select 1 from music",
- "Table": "music"
- },
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select `name` from unsharded where 1 != 1",
- "Query": "select `name` from unsharded",
- "Table": "unsharded"
- }
- ]
- }
- ]
- }
- ]
- }
-}
-Gen4 error: nesting of unions at the right-hand side is not yet supported
-
-# multi-shard union
-"select 1 from music union (select id from user union select name from unsharded)"
-{
- "QueryType": "SELECT",
- "Original": "select 1 from music union (select id from user union select name from unsharded)",
- "Instructions": {
- "OperatorType": "Distinct",
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from music where 1 != 1",
- "Query": "select 1 from music",
- "Table": "music"
- },
- {
- "OperatorType": "Distinct",
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select `name` from unsharded where 1 != 1",
- "Query": "select `name` from unsharded",
- "Table": "unsharded"
- }
- ]
- }
- ]
- }
- ]
- }
- ]
- }
-}
-Gen4 error: nesting of unions at the right-hand side is not yet supported
-
-# union with the same target shard because of vindex
-"select * from music where id = 1 union select * from user where id = 1"
-{
- "QueryType": "SELECT",
- "Original": "select * from music where id = 1 union select * from user where id = 1",
- "Instructions": {
- "OperatorType": "Distinct",
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from music where 1 != 1",
- "Query": "select * from music where id = 1",
- "Table": "music",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "music_user_map"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select * from `user` where id = 1",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from music where id = 1 union select * from user where id = 1",
- "Instructions": {
- "OperatorType": "Distinct",
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from music where 1 != 1",
- "Query": "select distinct * from music where id = 1",
- "Table": "music",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "music_user_map"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select distinct * from `user` where id = 1",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.music",
- "user.user"
- ]
-}
-
-# union with different target shards
-"select 1 from music where id = 1 union select 1 from music where id = 2"
-{
- "QueryType": "SELECT",
- "Original": "select 1 from music where id = 1 union select 1 from music where id = 2",
- "Instructions": {
- "OperatorType": "Distinct",
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from music where 1 != 1",
- "Query": "select 1 from music where id = 1",
- "Table": "music",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "music_user_map"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from music where 1 != 1",
- "Query": "select 1 from music where id = 2",
- "Table": "music",
- "Values": [
- "INT64(2)"
- ],
- "Vindex": "music_user_map"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select 1 from music where id = 1 union select 1 from music where id = 2",
- "Instructions": {
- "OperatorType": "Distinct",
- "Collations": [
- "0: binary"
- ],
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from music where 1 != 1",
- "Query": "select distinct 1 from music where id = 1",
- "Table": "music",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "music_user_map"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from music where 1 != 1",
- "Query": "select distinct 1 from music where id = 2",
- "Table": "music",
- "Values": [
- "INT64(2)"
- ],
- "Vindex": "music_user_map"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# multiple select statement have inner order by with union - TODO (systay) no need to send down ORDER BY if we are going to loose it with UNION DISTINCT
-"(select id from user order by 1 desc) union (select id from user order by 1 asc)"
-{
- "QueryType": "SELECT",
- "Original": "(select id from user order by 1 desc) union (select id from user order by 1 asc)",
- "Instructions": {
- "OperatorType": "Distinct",
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1",
- "OrderBy": "(0|1) DESC",
- "Query": "select id, weight_string(id) from `user` order by 1 desc",
- "ResultColumns": 1,
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1",
- "OrderBy": "(0|1) ASC",
- "Query": "select id, weight_string(id) from `user` order by 1 asc",
- "ResultColumns": 1,
- "Table": "`user`"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "(select id from user order by 1 desc) union (select id from user order by 1 asc)",
- "Instructions": {
- "OperatorType": "Distinct",
- "Collations": [
- "(0:1)"
- ],
- "ResultColumns": 1,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "(select id, weight_string(id) from `user` where 1 != 1) union (select id, weight_string(id) from `user` where 1 != 1)",
- "OrderBy": "(0|1) DESC",
- "Query": "(select id, weight_string(id) from `user` order by id desc) union (select id, weight_string(id) from `user` order by id asc)",
- "ResultColumns": 1,
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# multiple unions
-"select 1 union select null union select 1.0 union select '1' union select 2 union select 2.0 from user"
-{
- "QueryType": "SELECT",
- "Original": "select 1 union select null union select 1.0 union select '1' union select 2 union select 2.0 from user",
- "Instructions": {
- "OperatorType": "Distinct",
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 from dual where 1 != 1 union select null from dual where 1 != 1 union select 1.0 from dual where 1 != 1 union select '1' from dual where 1 != 1 union select 2 from dual where 1 != 1",
- "Query": "select 1 from dual union select null from dual union select 1.0 from dual union select '1' from dual union select 2 from dual",
- "Table": "dual"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 2.0 from `user` where 1 != 1",
- "Query": "select 2.0 from `user`",
- "Table": "`user`"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select 1 union select null union select 1.0 union select '1' union select 2 union select 2.0 from user",
- "Instructions": {
- "OperatorType": "Distinct",
- "Collations": [
- "0: binary"
- ],
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 from dual where 1 != 1 union all select null from dual where 1 != 1 union all select 1.0 from dual where 1 != 1 union all select '1' from dual where 1 != 1 union select 2 from dual where 1 != 1",
- "Query": "select 1 from dual union all select null from dual union all select 1.0 from dual union all select '1' from dual union select 2 from dual",
- "Table": "dual"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 2.0 from `user` where 1 != 1",
- "Query": "select distinct 2.0 from `user`",
- "Table": "`user`"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "main.dual",
- "user.user"
- ]
-}
-
-# union distinct between a scatter query and a join (other side)
-"(select user.id, user.name from user join user_extra where user_extra.extra = 'asdf') union select 'b','c' from user"
-{
- "QueryType": "SELECT",
- "Original": "(select user.id, user.name from user join user_extra where user_extra.extra = 'asdf') union select 'b','c' from user",
- "Instructions": {
- "OperatorType": "Distinct",
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id, `user`.`name` from `user` where 1 != 1",
- "Query": "select `user`.id, `user`.`name` from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra where user_extra.extra = 'asdf'",
- "Table": "user_extra"
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 'b', 'c' from `user` where 1 != 1",
- "Query": "select 'b', 'c' from `user`",
- "Table": "`user`"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "(select user.id, user.name from user join user_extra where user_extra.extra = 'asdf') union select 'b','c' from user",
- "Instructions": {
- "OperatorType": "Distinct",
- "Collations": [
- "(0:2)",
- "(1:3)"
- ],
- "ResultColumns": 2,
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1,L:2,L:3",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id, `user`.`name`, weight_string(`user`.id), weight_string(`user`.`name`) from `user` where 1 != 1",
- "Query": "select `user`.id, `user`.`name`, weight_string(`user`.id), weight_string(`user`.`name`) from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra where user_extra.extra = 'asdf'",
- "Table": "user_extra"
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 'b', 'c', weight_string('b'), weight_string('c') from `user` where 1 != 1",
- "Query": "select distinct 'b', 'c', weight_string('b'), weight_string('c') from `user`",
- "Table": "`user`"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# union distinct between a scatter query and a join (other side)
-"select 'b','c' from user union (select user.id, user.name from user join user_extra where user_extra.extra = 'asdf')"
-{
- "QueryType": "SELECT",
- "Original": "select 'b','c' from user union (select user.id, user.name from user join user_extra where user_extra.extra = 'asdf')",
- "Instructions": {
- "OperatorType": "Distinct",
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 'b', 'c' from `user` where 1 != 1",
- "Query": "select 'b', 'c' from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id, `user`.`name` from `user` where 1 != 1",
- "Query": "select `user`.id, `user`.`name` from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra where user_extra.extra = 'asdf'",
- "Table": "user_extra"
- }
- ]
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select 'b','c' from user union (select user.id, user.name from user join user_extra where user_extra.extra = 'asdf')",
- "Instructions": {
- "OperatorType": "Distinct",
- "Collations": [
- "(0:2)",
- "(1:3)"
- ],
- "ResultColumns": 2,
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 'b', 'c', weight_string('b'), weight_string('c') from `user` where 1 != 1",
- "Query": "select distinct 'b', 'c', weight_string('b'), weight_string('c') from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1,L:2,L:3",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id, `user`.`name`, weight_string(`user`.id), weight_string(`user`.`name`) from `user` where 1 != 1",
- "Query": "select `user`.id, `user`.`name`, weight_string(`user`.id), weight_string(`user`.`name`) from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra where user_extra.extra = 'asdf'",
- "Table": "user_extra"
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# unmergable because we are using aggregation
-"select count(*) as s from user union select count(*) as s from music"
-{
- "QueryType": "SELECT",
- "Original": "select count(*) as s from user union select count(*) as s from music",
- "Instructions": {
- "OperatorType": "Distinct",
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count(0) AS count",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) as s from `user` where 1 != 1",
- "Query": "select count(*) as s from `user`",
- "Table": "`user`"
- }
- ]
- },
- {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count(0) AS count",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) as s from music where 1 != 1",
- "Query": "select count(*) as s from music",
- "Table": "music"
- }
- ]
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select count(*) as s from user union select count(*) as s from music",
- "Instructions": {
- "OperatorType": "Distinct",
- "Collations": [
- "0: binary"
- ],
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count_star(0) AS s",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) as s from `user` where 1 != 1",
- "Query": "select count(*) as s from `user`",
- "Table": "`user`"
- }
- ]
- },
- {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count_star(0) AS s",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) as s from music where 1 != 1",
- "Query": "select count(*) as s from music",
- "Table": "music"
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.music",
- "user.user"
- ]
-}
-
-# Union in derived table with first SELECT being an UNION
-"select * from ((select id from user union select id+1 from user) union select user_id from user_extra) as t"
-{
- "QueryType": "SELECT",
- "Original": "select * from ((select id from user union select id+1 from user) union select user_id from user_extra) as t",
- "Instructions": {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "Distinct",
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Distinct",
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id + 1 from `user` where 1 != 1",
- "Query": "select id + 1 from `user`",
- "Table": "`user`"
- }
- ]
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_id from user_extra where 1 != 1",
- "Query": "select user_id from user_extra",
- "Table": "user_extra"
- }
- ]
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from ((select id from user union select id+1 from user) union select user_id from user_extra) as t",
- "Instructions": {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "Distinct",
- "Collations": [
- "(0:1)"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1 union all select id + 1, weight_string(id + 1) from `user` where 1 != 1 union select user_id, weight_string(user_id) from user_extra where 1 != 1",
- "Query": "select id, weight_string(id) from `user` union all select id + 1, weight_string(id + 1) from `user` union select user_id, weight_string(user_id) from user_extra",
- "Table": "`user`"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# gen4 optimises away ORDER BY when it's safe to do
-"(select id from user union select id from music order by id) union select 1 from unsharded"
-"can't do ORDER BY on top of UNION"
-{
- "QueryType": "SELECT",
- "Original": "(select id from user union select id from music order by id) union select 1 from unsharded",
- "Instructions": {
- "OperatorType": "Distinct",
- "Collations": [
- "(0:1)"
- ],
- "ResultColumns": 1,
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1 union select id, weight_string(id) from music where 1 != 1",
- "Query": "select id, weight_string(id) from `user` union select id, weight_string(id) from music",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1, weight_string(1) from unsharded where 1 != 1",
- "Query": "select distinct 1, weight_string(1) from unsharded",
- "Table": "unsharded"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded",
- "user.music",
- "user.user"
- ]
-}
-
-# push down the ::upper_limit to the sources, since we are doing DISTINCT on them, it's safe
-"select id from user union select 3 limit 10"
-{
- "QueryType": "SELECT",
- "Original": "select id from user union select 3 limit 10",
- "Instructions": {
- "OperatorType": "Limit",
- "Count": "INT64(10)",
- "Inputs": [
- {
- "OperatorType": "Distinct",
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 3 from dual where 1 != 1",
- "Query": "select 3 from dual",
- "Table": "dual"
- }
- ]
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user union select 3 limit 10",
- "Instructions": {
- "OperatorType": "Limit",
- "Count": "INT64(10)",
- "Inputs": [
- {
- "OperatorType": "Distinct",
- "Collations": [
- "(0:1)"
- ],
- "ResultColumns": 1,
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1",
- "Query": "select distinct id, weight_string(id) from `user` limit :__upper_limit",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 3, weight_string(3) from dual where 1 != 1",
- "Query": "select distinct 3, weight_string(3) from dual limit :__upper_limit",
- "Table": "dual"
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "main.dual",
- "user.user"
- ]
-}
-
-# silly query that should be collapsed into a single unsharded UNION route
-"(select 1 from unsharded union select 1 from unsharded union all select 1 from unsharded order by 1) union select 1 from unsharded union all select 1 from unsharded order by 1"
-{
- "QueryType": "SELECT",
- "Original": "(select 1 from unsharded union select 1 from unsharded union all select 1 from unsharded order by 1) union select 1 from unsharded union all select 1 from unsharded order by 1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "(select 1 from unsharded where 1 != 1 union select 1 from unsharded where 1 != 1 union all select 1 from unsharded where 1 != 1) union select 1 from unsharded where 1 != 1 union all select 1 from unsharded where 1 != 1",
- "Query": "(select 1 from unsharded union select 1 from unsharded union all select 1 from unsharded order by 1 asc) union select 1 from unsharded union all select 1 from unsharded order by 1 asc",
- "Table": "unsharded"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "(select 1 from unsharded union select 1 from unsharded union all select 1 from unsharded order by 1) union select 1 from unsharded union all select 1 from unsharded order by 1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "(select 1 from unsharded where 1 != 1 union select 1 from unsharded where 1 != 1 union all select 1 from unsharded where 1 != 1) union select 1 from unsharded where 1 != 1 union all select 1 from unsharded where 1 != 1",
- "Query": "(select 1 from unsharded union select 1 from unsharded union all select 1 from unsharded order by 1 asc) union select 1 from unsharded union all select 1 from unsharded order by 1 asc",
- "Table": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-
-# UNION that needs to be reordered to be merged more aggressively. Gen4 is able to get it down to 2 routes
-"select col from unsharded union select id from user union select col2 from unsharded union select col from user_extra"
-{
- "QueryType": "SELECT",
- "Original": "select col from unsharded union select id from user union select col2 from unsharded union select col from user_extra",
- "Instructions": {
- "OperatorType": "Distinct",
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Distinct",
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Distinct",
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select col from unsharded where 1 != 1",
- "Query": "select col from unsharded",
- "Table": "unsharded"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user`",
- "Table": "`user`"
- }
- ]
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select col2 from unsharded where 1 != 1",
- "Query": "select col2 from unsharded",
- "Table": "unsharded"
- }
- ]
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from user_extra where 1 != 1",
- "Query": "select col from user_extra",
- "Table": "user_extra"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select col from unsharded union select id from user union select col2 from unsharded union select col from user_extra",
- "Instructions": {
- "OperatorType": "Distinct",
- "Collations": [
- "(0:1)"
- ],
- "ResultColumns": 1,
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select col, weight_string(col) from unsharded where 1 != 1 union select col2, weight_string(col2) from unsharded where 1 != 1",
- "Query": "select col, weight_string(col) from unsharded union select col2, weight_string(col2) from unsharded",
- "Table": "unsharded"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1 union select col, weight_string(col) from user_extra where 1 != 1",
- "Query": "select id, weight_string(id) from `user` union select col, weight_string(col) from user_extra",
- "Table": "`user`"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded",
- "user.user",
- "user.user_extra"
- ]
-}
-
-# derived table with union
-"select tbl2.id FROM ((select id from user order by id limit 5) union all (select id from user order by id desc limit 5)) as tbl1 INNER JOIN user as tbl2 ON tbl1.id = tbl2.id"
-{
- "QueryType": "SELECT",
- "Original": "select tbl2.id FROM ((select id from user order by id limit 5) union all (select id from user order by id desc limit 5)) as tbl1 INNER JOIN user as tbl2 ON tbl1.id = tbl2.id",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "JoinVars": {
- "tbl1_id": 0
- },
- "TableName": "`user`_`user`",
- "Inputs": [
- {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Limit",
- "Count": "INT64(5)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1",
- "OrderBy": "(0|1) ASC",
- "Query": "select id, weight_string(id) from `user` order by id asc limit :__upper_limit",
- "ResultColumns": 1,
- "Table": "`user`"
- }
- ]
- },
- {
- "OperatorType": "Limit",
- "Count": "INT64(5)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1",
- "OrderBy": "(0|1) DESC",
- "Query": "select id, weight_string(id) from `user` order by id desc limit :__upper_limit",
- "ResultColumns": 1,
- "Table": "`user`"
- }
- ]
- }
- ]
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select tbl2.id from `user` as tbl2 where 1 != 1",
- "Query": "select tbl2.id from `user` as tbl2 where tbl2.id = :tbl1_id",
- "Table": "`user`",
- "Values": [
- ":tbl1_id"
- ],
- "Vindex": "user_index"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select tbl2.id FROM ((select id from user order by id limit 5) union all (select id from user order by id desc limit 5)) as tbl1 INNER JOIN user as tbl2 ON tbl1.id = tbl2.id",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "JoinVars": {
- "tbl1_id": 0
- },
- "TableName": "`user`_`user`",
- "Inputs": [
- {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Limit",
- "Count": "INT64(5)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1",
- "OrderBy": "(0|1) ASC",
- "Query": "select id, weight_string(id) from `user` order by id asc limit :__upper_limit",
- "ResultColumns": 1,
- "Table": "`user`"
- }
- ]
- },
- {
- "OperatorType": "Limit",
- "Count": "INT64(5)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1",
- "OrderBy": "(0|1) DESC",
- "Query": "select id, weight_string(id) from `user` order by id desc limit :__upper_limit",
- "ResultColumns": 1,
- "Table": "`user`"
- }
- ]
- }
- ]
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select tbl2.id from `user` as tbl2 where 1 != 1",
- "Query": "select tbl2.id from `user` as tbl2 where tbl2.id = :tbl1_id",
- "Table": "`user`",
- "Values": [
- ":tbl1_id"
- ],
- "Vindex": "user_index"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# ambiguous LIMIT
-"select id from user limit 1 union all select id from music limit 1"
-"syntax error at position 34 near 'union'"
-Gen4 plan same as above
-
-# ambiguous ORDER BY
-"select id from user order by id union all select id from music order by id desc"
-"syntax error at position 38 near 'union'"
-Gen4 plan same as above
-
-# different number of columns
-"select id, 42 from user where id = 1 union all select id from user where id = 5"
-"The used SELECT statements have a different number of columns (errno 1222) (sqlstate 21000) during query: select id, 42 from `user` where id = 1 union all select id from `user` where id = 5"
-Gen4 error: The used SELECT statements have a different number of columns
-
-# union with invalid order by clause with table qualifier
-"select id from user union select 3 order by user.id"
-"can't do ORDER BY on top of UNION"
-Gen4 error: Table 'user' from one of the SELECTs cannot be used in global ORDER clause
-
-# union with invalid order by clause with table qualifier
-"select id from user union select 3 order by id"
-"can't do ORDER BY on top of UNION"
-Gen4 plan same as above
-
-"select 1 from (select id+42 as foo from user union select 1+id as foo from unsharded) as t"
-"unsupported: expression on results of a cross-shard subquery"
-{
- "QueryType": "SELECT",
- "Original": "select 1 from (select id+42 as foo from user union select 1+id as foo from unsharded) as t",
- "Instructions": {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 2
- ],
- "Inputs": [
- {
- "OperatorType": "Distinct",
- "Collations": [
- "(0:1)"
- ],
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id + 42 as foo, weight_string(id + 42), 1 from `user` where 1 != 1",
- "Query": "select distinct id + 42 as foo, weight_string(id + 42), 1 from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 + id as foo, weight_string(1 + id), 1 from unsharded where 1 != 1",
- "Query": "select distinct 1 + id as foo, weight_string(1 + id), 1 from unsharded",
- "Table": "unsharded"
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded",
- "user.user"
- ]
-}
diff --git a/go/vt/vtgate/planbuilder/testdata/unsupported_cases.json b/go/vt/vtgate/planbuilder/testdata/unsupported_cases.json
new file mode 100644
index 00000000000..5a2c92451d4
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/unsupported_cases.json
@@ -0,0 +1,489 @@
+[
+ {
+ "comment": "union operations in subqueries (expressions)",
+ "query": "select * from user where id in (select * from user union select * from user_extra)",
+ "plan": "VT12001: unsupported: '*' expression in cross-shard query"
+ },
+ {
+ "comment": "TODO: Implement support for select with a target destination",
+ "query": "select * from `user[-]`.user_metadata",
+ "plan": "VT12001: unsupported: SELECT with a target destination"
+ },
+ {
+ "comment": "Unsupported INSERT statement with a target destination",
+ "query": "insert into `user[-]`.user_metadata (a, b) values (1,2)",
+ "plan": "VT12001: unsupported: INSERT with a target destination"
+ },
+ {
+ "comment": "Unsupported delete statement with a replica target destination",
+ "query": "DELETE FROM `user[-]@replica`.user_metadata limit 1",
+ "plan": "VT09002: delete statement with a replica target"
+ },
+ {
+ "comment": "Unsupported update statement with a replica target destination",
+ "query": "update `user[-]@replica`.user_metadata set id=2",
+ "plan": "VT09002: update statement with a replica target"
+ },
+ {
+ "comment": "scatter order by with * expression",
+ "query": "select * from user order by id",
+ "v3-plan": "VT12001: unsupported: in scatter query: ORDER BY must reference a column in the SELECT list: id asc",
+ "gen4-plan": "VT12001: unsupported: '*' expression in cross-shard query"
+ },
+ {
+ "comment": "order by rand on a cross-shard subquery",
+ "query": "select id from (select user.id, user.col from user join user_extra) as t order by rand()",
+ "v3-plan": "VT12001: unsupported: memory sort: complex ORDER BY expression: rand()",
+ "gen4-plan": "VT13001: [BUG] in scatter query: complex ORDER BY expression: rand()"
+ },
+ {
+ "comment": "natural join",
+ "query": "select * from user natural join user_extra",
+ "plan": "VT12001: unsupported: natural join"
+ },
+ {
+ "comment": "join with USING construct",
+ "query": "select * from user join user_extra using(id)",
+ "v3-plan": "VT12001: unsupported: JOIN with USING(column_list) clause for complex queries",
+ "gen4-plan": "can't handle JOIN USING without authoritative tables"
+ },
+ {
+ "comment": "join with USING construct with 3 tables",
+ "query": "select user.id from user join user_extra using(id) join music using(id2)",
+ "v3-plan": "VT12001: unsupported: JOIN with USING(column_list) clause for complex queries",
+ "gen4-plan": "can't handle JOIN USING without authoritative tables"
+ },
+ {
+ "comment": "natural left join",
+ "query": "select * from user natural left join user_extra",
+ "plan": "VT12001: unsupported: natural left join"
+ },
+ {
+ "comment": "natural right join",
+ "query": "select * from user natural right join user_extra",
+ "plan": "VT12001: unsupported: natural right join"
+ },
+ {
+ "comment": "* expresson not allowed for cross-shard joins",
+ "query": "select * from user join user_extra",
+ "plan": "VT12001: unsupported: '*' expression in cross-shard query"
+ },
+ {
+ "comment": "Group by column number, used with non-aliased expression (duplicated code)",
+ "query": "select * from user group by 1",
+ "v3-plan": "VT12001: unsupported: '*' expression in cross-shard query",
+ "gen4-plan": "cannot use column offsets in group statement when using `*`"
+ },
+ {
+ "comment": "complex group by expression",
+ "query": "select a from user group by a+1",
+ "v3-plan": "VT12001: unsupported: in scatter query: only simple references are allowed",
+ "gen4-plan": "VT13001: [BUG] in scatter query: complex ORDER BY expression: a + 1"
+ },
+ {
+ "comment": "Complex aggregate expression on scatter",
+ "query": "select 1+count(*) from user",
+ "plan": "VT12001: unsupported: in scatter query: complex aggregate expression"
+ },
+ {
+ "comment": "Multi-value aggregates not supported",
+ "query": "select count(a,b) from user",
+ "v3-plan": "VT12001: unsupported: only one expression is allowed inside aggregates: count(a, b)",
+ "gen4-plan": "VT03001: aggregate functions take a single argument 'count(a, b)'"
+ },
+ {
+ "comment": "scatter aggregate complex order by",
+ "query": "select id from user group by id order by id+1",
+ "v3-plan": "VT12001: unsupported: in scatter query: complex ORDER BY expression: id + 1",
+ "gen4-plan": "VT13001: [BUG] in scatter query: complex ORDER BY expression: id + 1"
+ },
+ {
+ "comment": "Scatter order by is complex with aggregates in select",
+ "query": "select col, count(*) from user group by col order by col+1",
+ "v3-plan": "VT12001: unsupported: in scatter query: complex ORDER BY expression: col + 1",
+ "gen4-plan": "VT13001: [BUG] in scatter query: complex ORDER BY expression: col + 1"
+ },
+ {
+ "comment": "Aggregate detection (group_concat)",
+ "query": "select group_concat(user.a) from user join user_extra",
+ "v3-plan": "VT12001: unsupported: cross-shard query with aggregates",
+ "gen4-plan": "VT12001: unsupported: in scatter query: aggregation function 'group_concat'"
+ },
+ {
+ "comment": "subqueries not supported in group by",
+ "query": "select id from user group by id, (select id from user_extra)",
+ "v3-plan": "VT12001: unsupported: subqueries disallowed in sqlparser.GroupBy",
+ "gen4-plan": "VT12001: unsupported: subqueries in GROUP BY"
+ },
+ {
+ "comment": "Order by uses cross-shard expression",
+ "query": "select id from user order by id+1",
+ "v3-plan": "VT12001: unsupported: in scatter query: complex ORDER BY expression: id + 1",
+ "gen4-plan": "VT13001: [BUG] in scatter query: complex ORDER BY expression: id + 1"
+ },
+ {
+ "comment": "Order by column number with collate",
+ "query": "select user.col1 as a from user order by 1 collate utf8_general_ci",
+ "v3-plan": "VT12001: unsupported: in scatter query: complex ORDER BY expression: 1 collate utf8_general_ci",
+ "gen4-plan": "VT13001: [BUG] in scatter query: complex ORDER BY expression: a collate utf8_general_ci"
+ },
+ {
+ "comment": "subqueries in delete",
+ "query": "delete from user where col = (select id from unsharded)",
+ "v3-plan": "VT12001: unsupported: sharded subqueries in DML",
+ "gen4-plan": "VT12001: unsupported: subqueries in DML"
+ },
+ {
+ "comment": "sharded subqueries in unsharded delete",
+ "query": "delete from unsharded where col = (select id from user)",
+ "v3-plan": "VT12001: unsupported: sharded subqueries in DML",
+ "gen4-plan": "VT12001: unsupported: subqueries in DML"
+ },
+ {
+ "comment": "sharded delete with limit clasue",
+ "query": "delete from user_extra limit 10",
+ "v3-plan": "VT12001: unsupported: multi-shard delete with LIMIT",
+ "gen4-plan": "VT12001: unsupported: multi shard DELETE with LIMIT"
+ },
+ {
+ "comment": "sharded subquery in unsharded subquery in unsharded delete",
+ "query": "delete from unsharded where col = (select id from unsharded where id = (select id from user))",
+ "v3-plan": "VT12001: unsupported: sharded subqueries in DML",
+ "gen4-plan": "VT12001: unsupported: subqueries in DML"
+ },
+ {
+ "comment": "sharded join unsharded subqueries in unsharded delete",
+ "query": "delete from unsharded where col = (select id from unsharded join user on unsharded.id = user.id)",
+ "v3-plan": "VT12001: unsupported: sharded subqueries in DML",
+ "gen4-plan": "VT12001: unsupported: subqueries in DML"
+ },
+ {
+ "comment": "scatter update with limit clause",
+ "query": "update user_extra set val = 1 where (name = 'foo' or id = 1) limit 1",
+ "v3-plan": "VT12001: unsupported: multi-shard update with LIMIT",
+ "gen4-plan": "VT12001: unsupported: multi shard UPDATE with LIMIT"
+ },
+ {
+ "comment": "multi delete multi table",
+ "query": "delete user from user join user_extra on user.id = user_extra.id where user.name = 'foo'",
+ "plan": "VT12001: unsupported: multi-shard or vindex write statement"
+ },
+ {
+ "comment": "update changes primary vindex column",
+ "query": "update user set id = 1 where id = 1",
+ "v3-plan": "VT12001: unsupported: you cannot update primary vindex columns; invalid update on vindex: user_index",
+ "gen4-plan": "VT12001: unsupported: you cannot UPDATE primary vindex columns; invalid update on vindex: user_index"
+ },
+ {
+ "comment": "update change in multicol vindex column",
+ "query": "update multicol_tbl set colc = 5, colb = 4 where cola = 1 and colb = 2",
+ "v3-plan": "VT12001: unsupported: you cannot update primary vindex columns; invalid update on vindex: multicolIdx",
+ "gen4-plan": "VT12001: unsupported: you cannot UPDATE primary vindex columns; invalid update on vindex: multicolIdx"
+ },
+ {
+ "comment": "update changes non lookup vindex column",
+ "query": "update user_metadata set md5 = 1 where user_id = 1",
+ "v3-plan": "VT12001: unsupported: you can only update lookup vindexes; invalid update on vindex: user_md5_index",
+ "gen4-plan": "VT12001: unsupported: you can only UPDATE lookup vindexes; invalid update on vindex: user_md5_index"
+ },
+ {
+ "comment": "update with complex set clause",
+ "query": "update music set id = id + 1 where id = 1",
+ "v3-plan": "VT12001: unsupported: only values are supported: invalid update on column: `id` with expr: [id + 1]",
+ "gen4-plan": "VT12001: unsupported: only values are supported; invalid update on column: `id` with expr: [id + 1]"
+ },
+ {
+ "comment": "update by primary keyspace id, changing one vindex column, limit without order clause",
+ "query": "update user_metadata set email = 'juan@vitess.io' where user_id = 1 limit 10",
+ "v3-plan": "VT12001: unsupported: need to provide ORDER BY clause when using LIMIT; invalid update on vindex: email_user_map",
+ "gen4-plan": "VT12001: unsupported: you need to provide the ORDER BY clause when using LIMIT; invalid update on vindex: email_user_map"
+ },
+ {
+ "comment": "update with derived table",
+ "query": "update (select id from user) as u set id = 4",
+ "v3-plan": "VT12001: unsupported: sharded subqueries in DML",
+ "gen4-plan": "The target table u of the UPDATE is not updatable"
+ },
+ {
+ "comment": "join in update tables",
+ "query": "update user join user_extra on user.id = user_extra.id set user.name = 'foo'",
+ "v3-plan": "VT12001: unsupported: multi-shard or vindex write statement",
+ "gen4-plan": "VT12001: unsupported: multiple tables in update"
+ },
+ {
+ "comment": "multiple tables in update",
+ "query": "update user as u, user_extra as ue set u.name = 'foo' where u.id = ue.id",
+ "v3-plan": "VT12001: unsupported: multi-shard or vindex write statement",
+ "gen4-plan": "VT12001: unsupported: multiple tables in update"
+ },
+ {
+ "comment": "unsharded insert, unqualified names and auto-inc combined",
+ "query": "insert into unsharded_auto select col from unsharded",
+ "plan": "VT12001: unsupported: auto-increment and SELECT in INSERT"
+ },
+ {
+ "comment": "unsharded insert, no col list with auto-inc",
+ "query": "insert into unsharded_auto values(1,1)",
+ "plan": "VT13001: [BUG] column list required for tables with auto-inc columns"
+ },
+ {
+ "comment": "unsharded insert, col list does not match values",
+ "query": "insert into unsharded_auto(id, val) values(1)",
+ "plan": "VT13001: [BUG] column list does not match values"
+ },
+ {
+ "comment": "sharded upsert can't change vindex",
+ "query": "insert into user(id) values(1) on duplicate key update id = 3",
+ "plan": "VT12001: unsupported: DML cannot update vindex column"
+ },
+ {
+ "comment": "sharded upsert can't change vindex using values function",
+ "query": "insert into music(user_id, id) values(1, 2) on duplicate key update user_id = values(id)",
+ "plan": "VT12001: unsupported: DML cannot update vindex column"
+ },
+ {
+ "comment": "sharded replace no vindex",
+ "query": "replace into user(val) values(1, 'foo')",
+ "plan": "VT12001: unsupported: REPLACE INTO with sharded keyspace"
+ },
+ {
+ "comment": "sharded replace with vindex",
+ "query": "replace into user(id, name) values(1, 'foo')",
+ "plan": "VT12001: unsupported: REPLACE INTO with sharded keyspace"
+ },
+ {
+ "comment": "replace no column list",
+ "query": "replace into user values(1, 2, 3)",
+ "plan": "VT12001: unsupported: REPLACE INTO with sharded keyspace"
+ },
+ {
+ "comment": "replace with mimatched column list",
+ "query": "replace into user(id) values (1, 2)",
+ "plan": "VT12001: unsupported: REPLACE INTO with sharded keyspace"
+ },
+ {
+ "comment": "replace with one vindex",
+ "query": "replace into user(id) values (1)",
+ "plan": "VT12001: unsupported: REPLACE INTO with sharded keyspace"
+ },
+ {
+ "comment": "replace with non vindex on vindex-enabled table",
+ "query": "replace into user(nonid) values (2)",
+ "plan": "VT12001: unsupported: REPLACE INTO with sharded keyspace"
+ },
+ {
+ "comment": "replace with all vindexes supplied",
+ "query": "replace into user(nonid, name, id) values (2, 'foo', 1)",
+ "plan": "VT12001: unsupported: REPLACE INTO with sharded keyspace"
+ },
+ {
+ "comment": "replace for non-vindex autoinc",
+ "query": "replace into user_extra(nonid) values (2)",
+ "plan": "VT12001: unsupported: REPLACE INTO with sharded keyspace"
+ },
+ {
+ "comment": "replace with multiple rows",
+ "query": "replace into user(id) values (1), (2)",
+ "plan": "VT12001: unsupported: REPLACE INTO with sharded keyspace"
+ },
+ {
+ "comment": "select keyspace_id from user_index where id = 1 and id = 2",
+ "query": "select keyspace_id from user_index where id = 1 and id = 2",
+ "plan": "VT12001: unsupported: WHERE clause for vindex function must be of the form id = or id in(,...) (multiple filters)"
+ },
+ {
+ "comment": "select keyspace_id from user_index where func(id)",
+ "query": "select keyspace_id from user_index where func(id)",
+ "plan": "VT12001: unsupported: WHERE clause for vindex function must be of the form id = or id in(,...) (not a comparison)"
+ },
+ {
+ "comment": "select keyspace_id from user_index where id > 1",
+ "query": "select keyspace_id from user_index where id > 1",
+ "plan": "VT12001: unsupported: WHERE clause for vindex function must be of the form id = or id in(,...) (not equality)"
+ },
+ {
+ "comment": "select keyspace_id from user_index where 1 = id",
+ "query": "select keyspace_id from user_index where 1 = id",
+ "plan": "VT12001: unsupported: WHERE clause for vindex function must be of the form id = or id in(,...) (lhs is not a column)"
+ },
+ {
+ "comment": "select keyspace_id from user_index where keyspace_id = 1",
+ "query": "select keyspace_id from user_index where keyspace_id = 1",
+ "plan": "VT12001: unsupported: WHERE clause for vindex function must be of the form id = or id in(,...) (lhs is not id)"
+ },
+ {
+ "comment": "select keyspace_id from user_index where id = id+1",
+ "query": "select keyspace_id from user_index where id = id+1",
+ "plan": "VT12001: unsupported: WHERE clause for vindex function must be of the form id = or id in(,...) (rhs is not a value)"
+ },
+ {
+ "comment": "vindex func without where condition",
+ "query": "select keyspace_id from user_index",
+ "plan": "VT12001: unsupported: WHERE clause for vindex function must be of the form id = or id in(,...) (where clause missing)"
+ },
+ {
+ "comment": "vindex func in subquery without where",
+ "query": "select id from user where exists(select keyspace_id from user_index)",
+ "plan": "VT12001: unsupported: WHERE clause for vindex function must be of the form id = or id in(,...) (where clause missing)"
+ },
+ {
+ "comment": "select func(keyspace_id) from user_index where id = :id",
+ "query": "select func(keyspace_id) from user_index where id = :id",
+ "plan": "VT12001: unsupported: expression on results of a vindex function"
+ },
+ {
+ "comment": "delete with multi-table targets",
+ "query": "delete music,user from music inner join user where music.id = user.id",
+ "plan": "VT12001: unsupported: multi-shard or vindex write statement"
+ },
+ {
+ "comment": "select get_lock with non-dual table",
+ "query": "select get_lock('xyz', 10) from user",
+ "v3-plan": "VT12001: unsupported: get_lock('xyz', 10) is allowed only with dual",
+ "gen4-plan": "get_lock('xyz', 10) allowed only with dual"
+ },
+ {
+ "comment": "select is_free_lock with non-dual table",
+ "query": "select is_free_lock('xyz') from user",
+ "v3-plan": "VT12001: unsupported: is_free_lock('xyz') is allowed only with dual",
+ "gen4-plan": "is_free_lock('xyz') allowed only with dual"
+ },
+ {
+ "comment": "union with SQL_CALC_FOUND_ROWS",
+ "query": "(select sql_calc_found_rows id from user where id = 1 limit 1) union select id from user where id = 1",
+ "v3-plan": "VT12001: unsupported: SQL_CALC_FOUND_ROWS not supported with UNION",
+ "gen4-plan": "VT12001: unsupported: SQL_CALC_FOUND_ROWS not supported with union"
+ },
+ {
+ "comment": "set with DEFAULT - vitess aware",
+ "query": "set workload = default",
+ "plan": "VT12001: unsupported: DEFAULT for @@workload"
+ },
+ {
+ "comment": "set with DEFAULT - reserved connection",
+ "query": "set sql_mode = default",
+ "plan": "VT12001: unsupported: DEFAULT for @@%s%!(EXTRA sqlparser.IdentifierCI=sql_mode)"
+ },
+ {
+ "comment": "Multi shard query using into outfile s3",
+ "query": "select * from user into outfile s3 'out_file_name'",
+ "plan": "VT12001: unsupported: INTO on sharded keyspace"
+ },
+ {
+ "comment": "create view with Cannot auto-resolve for cross-shard joins",
+ "query": "create view user.view_a as select col from user join user_extra",
+ "v3-plan": "VT03019: symbol col not found",
+ "gen4-plan": "Column 'col' in field list is ambiguous"
+ },
+ {
+ "comment": "create view with join that cannot be served in each shard separately",
+ "query": "create view user.view_a as select user_extra.id from user join user_extra",
+ "plan": "VT12001: unsupported: Complex select queries are not supported in create or alter view statements"
+ },
+ {
+ "comment": "create view with sharded limit",
+ "query": "create view user.view_a as select id from user order by id limit 10",
+ "plan": "VT12001: unsupported: Complex select queries are not supported in create or alter view statements"
+ },
+ {
+ "comment": "create view with top level subquery in select",
+ "query": "create view user.view_a as select a, (select col from user) from unsharded",
+ "plan": "VT12001: unsupported: Select query does not belong to the same keyspace as the view statement"
+ },
+ {
+ "comment": "create view with sql_calc_found_rows with limit",
+ "query": "create view user.view_a as select sql_calc_found_rows * from music limit 100",
+ "plan": "VT12001: unsupported: Complex select queries are not supported in create or alter view statements"
+ },
+ {
+ "comment": "create view with sql_calc_found_rows with group by and having",
+ "query": "create view user.view_a as select sql_calc_found_rows user_id, count(id) from music group by user_id having count(user_id) = 1 order by user_id limit 2",
+ "plan": "VT12001: unsupported: Complex select queries are not supported in create or alter view statements"
+ },
+ {
+ "comment": "create view with incompatible keyspaces",
+ "query": "create view main.view_a as select * from user.user_extra",
+ "plan": "VT12001: unsupported: Select query does not belong to the same keyspace as the view statement"
+ },
+ {
+ "comment": "avg function on scatter query",
+ "query": "select avg(id) from user",
+ "v3-plan": "VT12001: unsupported: in scatter query: complex aggregate expression",
+ "gen4-plan": "VT12001: unsupported: in scatter query: aggregation function 'avg'"
+ },
+ {
+ "comment": "scatter aggregate with ambiguous aliases",
+ "query": "select distinct a, b as a from user",
+ "v3-plan": "generating ORDER BY clause: VT03021: ambiguous symbol reference: a",
+ "gen4-plan": "VT13001: [BUG] generating ORDER BY clause: ambiguous symbol reference: a"
+ },
+ {
+ "comment": "outer and inner subquery route reference the same \"uu.id\" name\n# but they refer to different things. The first reference is to the outermost query,\n# and the second reference is to the innermost 'from' subquery.\n# This query will never work as the inner derived table is only selecting one of the column",
+ "query": "select id2 from user uu where id in (select id from user where id = uu.id and user.col in (select col from (select id from user_extra where user_id = 5) uu where uu.user_id = uu.id))",
+ "plan": "VT12001: unsupported: cross-shard correlated subquery"
+ },
+ {
+ "comment": "outer and inner subquery route reference the same \"uu.id\" name\n# but they refer to different things. The first reference is to the outermost query,\n# and the second reference is to the innermost 'from' subquery.\n# changed to project all the columns from the derived tables.",
+ "query": "select id2 from user uu where id in (select id from user where id = uu.id and user.col in (select col from (select col, id, user_id from user_extra where user_id = 5) uu where uu.user_id = uu.id))",
+ "plan": "VT12001: unsupported: cross-shard correlated subquery"
+ },
+ {
+ "comment": "Gen4 does a rewrite of 'order by 2' that becomes 'order by id', leading to ambiguous binding.",
+ "query": "select a.id, b.id from user as a, user_extra as b union select 1, 2 order by 2",
+ "v3-plan": "VT12001: unsupported: ORDER BY on top of UNION",
+ "gen4-plan": "Column 'id' in field list is ambiguous"
+ },
+ {
+ "comment": "unsupported with clause in delete statement",
+ "query": "with x as (select * from user) delete from x",
+ "plan": "VT12001: unsupported: WITH expression in DELETE statement"
+ },
+ {
+ "comment": "unsupported with clause in update statement",
+ "query": "with x as (select * from user) update x set name = 'f'",
+ "plan": "VT12001: unsupported: WITH expression in UPDATE statement"
+ },
+ {
+ "comment": "unsupported with clause in select statement",
+ "query": "with x as (select * from user) select * from x",
+ "plan": "VT12001: unsupported: WITH expression in SELECT statement"
+ },
+ {
+ "comment": "unsupported with clause in union statement",
+ "query": "with x as (select * from user) select * from x union select * from x",
+ "plan": "VT12001: unsupported: WITH expression in UNION statement"
+ },
+ {
+ "comment": "scatter aggregate with complex select list (can't build order by)",
+ "query": "select distinct a+1 from user",
+ "v3-plan": "generating ORDER BY clause: VT12001: unsupported: reference a complex expression",
+ "gen4-plan": "VT13001: [BUG] in scatter query: complex ORDER BY expression: a + 1"
+ },
+ {
+ "comment": "aggregation on union",
+ "query": "select sum(col) from (select col from user union all select col from unsharded) t",
+ "v3-plan": "VT12001: unsupported: cross-shard query with aggregates",
+ "gen4-plan": "VT12001: unsupported: using aggregation on top of a *planbuilder.concatenateGen4 plan"
+ },
+ {
+ "comment": "insert having subquery in row values",
+ "query": "insert into user(id, name) values ((select 1 from user where id = 1), 'A')",
+ "plan": "expr cannot be translated, not supported: (select 1 from `user` where id = 1)"
+ },
+ {
+ "comment": "lateral derived tables",
+ "query": "select * from user, lateral (select * from user_extra where user_id = user.id) t",
+ "plan": "VT12001: unsupported: lateral derived tables"
+ },
+ {
+ "comment": "json_table expressions",
+ "query": "SELECT * FROM JSON_TABLE('[ {\"c1\": null} ]','$[*]' COLUMNS( c1 INT PATH '$.c1' ERROR ON ERROR )) as jt",
+ "v3-plan": "VT12001: unsupported: JSON_TABLE expressions",
+ "gen4-plan": "VT12001: unsupported: json_table expressions"
+ },
+ {
+ "comment": "mix lock with other expr",
+ "query": "select get_lock('xyz', 10), 1 from dual",
+ "plan": "VT12001: unsupported: LOCK function and other expression: [1] in same select query"
+ }
+]
diff --git a/go/vt/vtgate/planbuilder/testdata/unsupported_cases.txt b/go/vt/vtgate/planbuilder/testdata/unsupported_cases.txt
deleted file mode 100644
index 81627343248..00000000000
--- a/go/vt/vtgate/planbuilder/testdata/unsupported_cases.txt
+++ /dev/null
@@ -1,458 +0,0 @@
-# union operations in subqueries (expressions)
-"select * from user where id in (select * from user union select * from user_extra)"
-"unsupported: '*' expression in cross-shard query"
-Gen4 plan same as above
-
-# TODO: Implement support for select with a target destination
-"select * from `user[-]`.user_metadata"
-"unsupported: SELECT with a target destination"
-Gen4 plan same as above
-
-# Unsupported INSERT statement with a target destination
-"insert into `user[-]`.user_metadata (a, b) values (1,2)"
-"unsupported: INSERT with a target destination"
-Gen4 plan same as above
-
-# Unsupported delete statement with a replica target destination
-"DELETE FROM `user[-]@replica`.user_metadata limit 1"
-"unsupported: delete statement with a replica target"
-Gen4 plan same as above
-
-# Unsupported update statement with a replica target destination
-"update `user[-]@replica`.user_metadata set id=2"
-"unsupported: update statement with a replica target"
-Gen4 plan same as above
-
-# scatter order by with * expression
-"select * from user order by id"
-"unsupported: in scatter query: order by must reference a column in the select list: id asc"
-Gen4 error: unsupported: '*' expression in cross-shard query
-
-# order by rand on a cross-shard subquery
-"select id from (select user.id, user.col from user join user_extra) as t order by rand()"
-"unsupported: memory sort: complex order by expression: rand()"
-Gen4 error: unsupported: in scatter query: complex order by expression: rand()
-
-# natural join
-"select * from user natural join user_extra"
-"unsupported: natural join"
-Gen4 plan same as above
-
-# join with USING construct
-"select * from user join user_extra using(id)"
-"unsupported: join with USING(column_list) clause for complex queries"
-Gen4 error: can't handle JOIN USING without authoritative tables
-
-# join with USING construct with 3 tables
-"select user.id from user join user_extra using(id) join music using(id2)"
-"unsupported: join with USING(column_list) clause for complex queries"
-Gen4 error: can't handle JOIN USING without authoritative tables
-
-# natural left join
-"select * from user natural left join user_extra"
-"unsupported: natural left join"
-Gen4 plan same as above
-
-# natural right join
-"select * from user natural right join user_extra"
-"unsupported: natural right join"
-Gen4 plan same as above
-
-# * expresson not allowed for cross-shard joins
-"select * from user join user_extra"
-"unsupported: '*' expression in cross-shard query"
-Gen4 plan same as above
-
-# Group by column number, used with non-aliased expression (duplicated code)
-"select * from user group by 1"
-"unsupported: '*' expression in cross-shard query"
-Gen4 error: cannot use column offsets in group statement when using `*`
-
-# complex group by expression
-"select a from user group by a+1"
-"unsupported: in scatter query: only simple references allowed"
-Gen4 error: unsupported: in scatter query: complex order by expression: a + 1
-
-# Complex aggregate expression on scatter
-"select 1+count(*) from user"
-"unsupported: in scatter query: complex aggregate expression"
-Gen4 plan same as above
-
-# Multi-value aggregates not supported
-"select count(a,b) from user"
-"unsupported: only one expression allowed inside aggregates: count(a, b)"
-Gen4 error: aggregate functions take a single argument 'count(a, b)'
-
-# scatter aggregate complex order by
-"select id from user group by id order by id+1"
-"unsupported: in scatter query: complex order by expression: id + 1"
-Gen4 plan same as above
-
-# Scatter order by is complex with aggregates in select
-"select col, count(*) from user group by col order by col+1"
-"unsupported: in scatter query: complex order by expression: col + 1"
-Gen4 plan same as above
-
-# Aggregate detection (group_concat)
-"select group_concat(user.a) from user join user_extra"
-"unsupported: cross-shard query with aggregates"
-Gen4 error: unsupported: in scatter query: aggregation function 'group_concat'
-
-# subqueries not supported in group by
-"select id from user group by id, (select id from user_extra)"
-"unsupported: subqueries disallowed in GROUP or ORDER BY"
-Gen4 error: unsupported: subqueries disallowed in GROUP BY
-
-# Order by uses cross-shard expression
-"select id from user order by id+1"
-"unsupported: in scatter query: complex order by expression: id + 1"
-Gen4 plan same as above
-
-# Order by column number with collate
-"select user.col1 as a from user order by 1 collate utf8_general_ci"
-"unsupported: in scatter query: complex order by expression: 1 collate utf8_general_ci"
-Gen4 error: unsupported: in scatter query: complex order by expression: a collate utf8_general_ci
-
-# subqueries in delete
-"delete from user where col = (select id from unsharded)"
-"unsupported: subqueries in sharded DML"
-Gen4 plan same as above
-
-# sharded subqueries in unsharded delete
-"delete from unsharded where col = (select id from user)"
-"unsupported: sharded subqueries in DML"
-Gen4 plan same as above
-
-# sharded delete with limit clasue
-"delete from user_extra limit 10"
-"multi shard delete with limit is not supported"
-Gen4 plan same as above
-
-# sharded subquery in unsharded subquery in unsharded delete
-"delete from unsharded where col = (select id from unsharded where id = (select id from user))"
-"unsupported: sharded subqueries in DML"
-Gen4 plan same as above
-
-# sharded join unsharded subqueries in unsharded delete
-"delete from unsharded where col = (select id from unsharded join user on unsharded.id = user.id)"
-"unsupported: sharded subqueries in DML"
-Gen4 plan same as above
-
-# scatter update with limit clause
-"update user_extra set val = 1 where (name = 'foo' or id = 1) limit 1"
-"multi shard update with limit is not supported"
-Gen4 plan same as above
-
-# multi delete multi table
-"delete user from user join user_extra on user.id = user_extra.id where user.name = 'foo'"
-"unsupported: multi-shard or vindex write statement"
-Gen4 plan same as above
-
-# update changes primary vindex column
-"update user set id = 1 where id = 1"
-"unsupported: You can't update primary vindex columns. Invalid update on vindex: user_index"
-Gen4 plan same as above
-
-# update change in multicol vindex column
-"update multicol_tbl set colc = 5, colb = 4 where cola = 1 and colb = 2"
-"unsupported: You can't update primary vindex columns. Invalid update on vindex: multicolIdx"
-Gen4 plan same as above
-
-# update changes non lookup vindex column
-"update user_metadata set md5 = 1 where user_id = 1"
-"unsupported: You can only update lookup vindexes. Invalid update on vindex: user_md5_index"
-Gen4 plan same as above
-
-# update with complex set clause
-"update music set id = id + 1 where id = 1"
-"unsupported: Only values are supported. Invalid update on column: `id` with expr: [id + 1]"
-Gen4 plan same as above
-
-# update by primary keyspace id, changing one vindex column, limit without order clause
-"update user_metadata set email = 'juan@vitess.io' where user_id = 1 limit 10"
-"unsupported: Need to provide order by clause when using limit. Invalid update on vindex: email_user_map"
-Gen4 plan same as above
-
-# update with derived table
-"update (select id from user) as u set id = 4"
-"unsupported: subqueries in sharded DML"
-Gen4 error: The target table u of the UPDATE is not updatable
-
-# join in update tables
-"update user join user_extra on user.id = user_extra.id set user.name = 'foo'"
-"unsupported: multi-shard or vindex write statement"
-Gen4 error: unsupported: multiple tables in update
-
-# multiple tables in update
-"update user as u, user_extra as ue set u.name = 'foo' where u.id = ue.id"
-"unsupported: multi-shard or vindex write statement"
-Gen4 error: unsupported: multiple tables in update
-
-# unsharded insert, unqualified names and auto-inc combined
-"insert into unsharded_auto select col from unsharded"
-"unsupported: auto-inc and select in insert"
-Gen4 plan same as above
-
-# unsharded insert, no col list with auto-inc
-"insert into unsharded_auto values(1,1)"
-"column list required for tables with auto-inc columns"
-Gen4 plan same as above
-
-# unsharded insert, col list does not match values
-"insert into unsharded_auto(id, val) values(1)"
-"column list doesn't match values"
-Gen4 plan same as above
-
-# sharded upsert can't change vindex
-"insert into user(id) values(1) on duplicate key update id = 3"
-"unsupported: DML cannot change vindex column"
-Gen4 plan same as above
-
-# sharded upsert can't change vindex using values function
-"insert into music(user_id, id) values(1, 2) on duplicate key update user_id = values(id)"
-"unsupported: DML cannot change vindex column"
-Gen4 plan same as above
-
-# sharded replace no vindex
-"replace into user(val) values(1, 'foo')"
-"unsupported: REPLACE INTO with sharded schema"
-Gen4 plan same as above
-
-# sharded replace with vindex
-"replace into user(id, name) values(1, 'foo')"
-"unsupported: REPLACE INTO with sharded schema"
-Gen4 plan same as above
-
-# replace no column list
-"replace into user values(1, 2, 3)"
-"unsupported: REPLACE INTO with sharded schema"
-Gen4 plan same as above
-
-# replace with mimatched column list
-"replace into user(id) values (1, 2)"
-"unsupported: REPLACE INTO with sharded schema"
-Gen4 plan same as above
-
-# replace with one vindex
-"replace into user(id) values (1)"
-"unsupported: REPLACE INTO with sharded schema"
-Gen4 plan same as above
-
-# replace with non vindex on vindex-enabled table
-"replace into user(nonid) values (2)"
-"unsupported: REPLACE INTO with sharded schema"
-Gen4 plan same as above
-
-# replace with all vindexes supplied
-"replace into user(nonid, name, id) values (2, 'foo', 1)"
-"unsupported: REPLACE INTO with sharded schema"
-Gen4 plan same as above
-
-# replace for non-vindex autoinc
-"replace into user_extra(nonid) values (2)"
-"unsupported: REPLACE INTO with sharded schema"
-Gen4 plan same as above
-
-# replace with multiple rows
-"replace into user(id) values (1), (2)"
-"unsupported: REPLACE INTO with sharded schema"
-Gen4 plan same as above
-
-"select keyspace_id from user_index where id = 1 and id = 2"
-"unsupported: where clause for vindex function must be of the form id = or id in(,...) (multiple filters)"
-Gen4 plan same as above
-
-"select keyspace_id from user_index where func(id)"
-"unsupported: where clause for vindex function must be of the form id = or id in(,...) (not a comparison)"
-Gen4 plan same as above
-
-"select keyspace_id from user_index where id > 1"
-"unsupported: where clause for vindex function must be of the form id = or id in(,...) (not equality)"
-Gen4 plan same as above
-
-"select keyspace_id from user_index where 1 = id"
-"unsupported: where clause for vindex function must be of the form id = or id in(,...) (lhs is not a column)"
-Gen4 plan same as above
-
-"select keyspace_id from user_index where keyspace_id = 1"
-"unsupported: where clause for vindex function must be of the form id = | | | | |